diff --git a/.gitlab-ci.d/crossbuilds.yml b/.gitlab-ci.d/crossbuilds.yml index 17d6cb3e458ab75d6c1853988f8c2403256122a3..d06bf5f57d6f2a497cb2e060ac7e206f84b0938e 100644 --- a/.gitlab-ci.d/crossbuilds.yml +++ b/.gitlab-ci.d/crossbuilds.yml @@ -65,7 +65,7 @@ cross-i386-tci: variables: IMAGE: fedora-i386-cross ACCEL: tcg-interpreter - EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user + EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins MAKE_CHECK_ARGS: check check-tcg cross-mips-system: diff --git a/MAINTAINERS b/MAINTAINERS index 7543eb4d5971628d207b0d387a4c630619bf2a58..fbd6d0b174afa18243e912dae6ebf4ee9c3d8103 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1825,6 +1825,7 @@ F: hw/scsi/* F: tests/qtest/virtio-scsi-test.c F: tests/qtest/fuzz-virtio-scsi-test.c F: tests/qtest/am53c974-test.c +F: tests/qtest/fuzz-lsi53c895a-test.c T: git https://github.com/bonzini/qemu.git scsi-next SSI diff --git a/accel/kvm/kvm-all.c b/accel/kvm/kvm-all.c index eecd8031cf6c326f1816da179cf104c9c6dfb983..2eb0666bd7223c1ba2c5f24b6b1297917fdcacfa 100644 --- a/accel/kvm/kvm-all.c +++ b/accel/kvm/kvm-all.c @@ -45,9 +45,12 @@ #include "qemu/guest-random.h" #include "sysemu/hw_accel.h" #include "kvm-cpus.h" +#include "sysemu/dirtylimit.h" #include "hw/boards.h" +#include "sysemu/kvm.h" + /* This check must be after config-host.h is included */ #ifdef CONFIG_EVENTFD #include @@ -71,87 +74,16 @@ do { } while (0) #endif -#define KVM_MSI_HASHTAB_SIZE 256 - struct KVMParkedVcpu { unsigned long vcpu_id; int kvm_fd; QLIST_ENTRY(KVMParkedVcpu) node; }; -enum KVMDirtyRingReaperState { - KVM_DIRTY_RING_REAPER_NONE = 0, - /* The reaper is sleeping */ - KVM_DIRTY_RING_REAPER_WAIT, - /* The reaper is reaping for dirty pages */ - KVM_DIRTY_RING_REAPER_REAPING, -}; - -/* - * KVM reaper instance, responsible for collecting the KVM dirty bits - * via the dirty ring. - */ -struct KVMDirtyRingReaper { - /* The reaper thread */ - QemuThread reaper_thr; - volatile uint64_t reaper_iteration; /* iteration number of reaper thr */ - volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */ -}; +KVMState *kvm_state; -struct KVMState -{ - AccelState parent_obj; +bool virtcca_cvm_allowed = false; - int nr_slots; - int fd; - int vmfd; - int coalesced_mmio; - int coalesced_pio; - struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; - bool coalesced_flush_in_progress; - int vcpu_events; - int robust_singlestep; - int debugregs; -#ifdef KVM_CAP_SET_GUEST_DEBUG - QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; -#endif - int max_nested_state_len; - int many_ioeventfds; - int intx_set_mask; - int kvm_shadow_mem; - bool kernel_irqchip_allowed; - bool kernel_irqchip_required; - OnOffAuto kernel_irqchip_split; - bool sync_mmu; - uint64_t manual_dirty_log_protect; - /* The man page (and posix) say ioctl numbers are signed int, but - * they're not. Linux, glibc and *BSD all treat ioctl numbers as - * unsigned, and treating them as signed here can break things */ - unsigned irq_set_ioctl; - unsigned int sigmask_len; - GHashTable *gsimap; -#ifdef KVM_CAP_IRQ_ROUTING - struct kvm_irq_routing *irq_routes; - int nr_allocated_irq_routes; - unsigned long *used_gsi_bitmap; - unsigned int gsi_count; - QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; -#endif - KVMMemoryListener memory_listener; - QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; - - /* For "info mtree -f" to tell if an MR is registered in KVM */ - int nr_as; - struct KVMAs { - KVMMemoryListener *ml; - AddressSpace *as; - } *as; - uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ - uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ - struct KVMDirtyRingReaper reaper; -}; - -KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_split_irqchip; bool kvm_async_interrupts_allowed; @@ -433,6 +365,29 @@ void kvm_destroy_vcpu(CPUState *cpu) } } +int kvm_create_parked_vcpu(unsigned long vcpu_id) +{ + KVMState *s = kvm_state; + struct KVMParkedVcpu *vcpu = NULL; + int ret; + + DPRINTF("kvm_create_parked_vcpu\n"); + + ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id); + if (ret < 0) { + DPRINTF("kvm_create_vcpu failed\n"); + goto err; + } + + vcpu = g_malloc0(sizeof(*vcpu)); + vcpu->vcpu_id = vcpu_id; + vcpu->kvm_fd = ret; + QLIST_INSERT_HEAD(&s->kvm_parked_vcpus, vcpu, node); + +err: + return ret; +} + static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id) { struct KVMParkedVcpu *cpu; @@ -470,6 +425,7 @@ int kvm_init_vcpu(CPUState *cpu, Error **errp) cpu->kvm_state = s; cpu->vcpu_dirty = true; cpu->dirty_pages = 0; + cpu->throttle_us_per_full = 0; mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); if (mmap_size < 0) { @@ -710,12 +666,32 @@ static void kvm_dirty_ring_mark_page(KVMState *s, uint32_t as_id, static bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn) { - return gfn->flags == KVM_DIRTY_GFN_F_DIRTY; + /* + * Read the flags before the value. Pairs with barrier in + * KVM's kvm_dirty_ring_push() function. + */ + return qatomic_load_acquire(&gfn->flags) == KVM_DIRTY_GFN_F_DIRTY; } static void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn) { - gfn->flags = KVM_DIRTY_GFN_F_RESET; + /* + * Use a store-release so that the CPU that executes KVM_RESET_DIRTY_RINGS + * sees the full content of the ring: + * + * CPU0 CPU1 CPU2 + * ------------------------------------------------------------------------------ + * fill gfn0 + * store-rel flags for gfn0 + * load-acq flags for gfn0 + * store-rel RESET for gfn0 + * ioctl(RESET_RINGS) + * load-acq flags for gfn0 + * check if flags have RESET + * + * The synchronization goes from CPU2 to CPU0 to CPU1. + */ + qatomic_store_release(&gfn->flags, KVM_DIRTY_GFN_F_RESET); } /* @@ -750,17 +726,20 @@ static uint32_t kvm_dirty_ring_reap_one(KVMState *s, CPUState *cpu) } /* Must be with slots_lock held */ -static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) +static uint64_t kvm_dirty_ring_reap_locked(KVMState *s, CPUState* cpu) { int ret; - CPUState *cpu; uint64_t total = 0; int64_t stamp; stamp = get_clock(); - CPU_FOREACH(cpu) { - total += kvm_dirty_ring_reap_one(s, cpu); + if (cpu) { + total = kvm_dirty_ring_reap_one(s, cpu); + } else { + CPU_FOREACH(cpu) { + total += kvm_dirty_ring_reap_one(s, cpu); + } } if (total) { @@ -781,7 +760,7 @@ static uint64_t kvm_dirty_ring_reap_locked(KVMState *s) * Currently for simplicity, we must hold BQL before calling this. We can * consider to drop the BQL if we're clear with all the race conditions. */ -static uint64_t kvm_dirty_ring_reap(KVMState *s) +static uint64_t kvm_dirty_ring_reap(KVMState *s, CPUState *cpu) { uint64_t total; @@ -801,7 +780,7 @@ static uint64_t kvm_dirty_ring_reap(KVMState *s) * reset below. */ kvm_slots_lock(); - total = kvm_dirty_ring_reap_locked(s); + total = kvm_dirty_ring_reap_locked(s, cpu); kvm_slots_unlock(); return total; @@ -848,7 +827,7 @@ static void kvm_dirty_ring_flush(void) * vcpus out in a synchronous way. */ kvm_cpu_synchronize_kick_all(); - kvm_dirty_ring_reap(kvm_state); + kvm_dirty_ring_reap(kvm_state, NULL); trace_kvm_dirty_ring_flush(1); } @@ -1392,7 +1371,7 @@ static void kvm_set_phys_mem(KVMMemoryListener *kml, * Not easy. Let's cross the fingers until it's fixed. */ if (kvm_state->kvm_dirty_ring_size) { - kvm_dirty_ring_reap_locked(kvm_state); + kvm_dirty_ring_reap_locked(kvm_state, NULL); } else { kvm_slot_get_dirty_log(kvm_state, mem); } @@ -1460,11 +1439,16 @@ static void *kvm_dirty_ring_reaper_thread(void *data) */ sleep(1); + /* keep sleeping so that dirtylimit not be interfered by reaper */ + if (dirtylimit_in_service()) { + continue; + } + trace_kvm_dirty_ring_reaper("wakeup"); r->reaper_state = KVM_DIRTY_RING_REAPER_REAPING; qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(s); + kvm_dirty_ring_reap(s, NULL); qemu_mutex_unlock_iothread(); r->reaper_iteration++; @@ -1477,15 +1461,13 @@ static void *kvm_dirty_ring_reaper_thread(void *data) return NULL; } -static int kvm_dirty_ring_reaper_init(KVMState *s) +static void kvm_dirty_ring_reaper_init(KVMState *s) { struct KVMDirtyRingReaper *r = &s->reaper; qemu_thread_create(&r->reaper_thr, "kvm-reaper", kvm_dirty_ring_reaper_thread, s, QEMU_THREAD_JOINABLE); - - return 0; } static void kvm_region_add(MemoryListener *listener, @@ -1751,7 +1733,10 @@ void kvm_irqchip_commit_routes(KVMState *s) s->irq_routes->flags = 0; trace_kvm_irqchip_commit_routes(); ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes); - assert(ret == 0); + if (ret < 0) { + error_report("Set GSI routing failed: %m"); + abort(); + } } static void kvm_add_routing_entry(KVMState *s, @@ -2303,6 +2288,16 @@ bool kvm_dirty_ring_enabled(void) return kvm_state->kvm_dirty_ring_size ? true : false; } +uint32_t kvm_dirty_ring_size(void) +{ + return kvm_state->kvm_dirty_ring_size; +} + +static inline bool kvm_is_virtcca_cvm_type(int type) +{ + return type & VIRTCCA_CVM_TYPE; +} + static int kvm_init(MachineState *ms) { MachineClass *mc = MACHINE_GET_CLASS(ms); @@ -2387,6 +2382,10 @@ static int kvm_init(MachineState *ms) type = mc->kvm_type(ms, NULL); } + if (kvm_is_virtcca_cvm_type(type)) { + virtcca_cvm_allowed = true; + } + do { ret = kvm_ioctl(s, KVM_CREATE_VM, type); } while (ret == -EINTR); @@ -2606,10 +2605,7 @@ static int kvm_init(MachineState *ms) } if (s->kvm_dirty_ring_size) { - ret = kvm_dirty_ring_reaper_init(s); - if (ret) { - goto err; - } + kvm_dirty_ring_reaper_init(s); } return 0; @@ -2622,6 +2618,7 @@ err: if (s->fd != -1) { close(s->fd); } + g_free(s->as); g_free(s->memory_listener.slots); return ret; @@ -2931,8 +2928,19 @@ int kvm_cpu_exec(CPUState *cpu) */ trace_kvm_dirty_ring_full(cpu->cpu_index); qemu_mutex_lock_iothread(); - kvm_dirty_ring_reap(kvm_state); + /* + * We throttle vCPU by making it sleep once it exit from kernel + * due to dirty ring full. In the dirtylimit scenario, reaping + * all vCPUs after a single vCPU dirty ring get full result in + * the miss of sleep, so just reap the ring-fulled vCPU. + */ + if (dirtylimit_in_service()) { + kvm_dirty_ring_reap(kvm_state, cpu); + } else { + kvm_dirty_ring_reap(kvm_state, NULL); + } qemu_mutex_unlock_iothread(); + dirtylimit_vcpu_execute(cpu); ret = 0; break; case KVM_EXIT_SYSTEM_EVENT: @@ -2970,14 +2978,14 @@ int kvm_cpu_exec(CPUState *cpu) if (ret < 0) { cpu_dump_state(cpu, stderr, CPU_DUMP_CODE); - vm_stop(RUN_STATE_INTERNAL_ERROR); + qemu_system_guest_panicked(cpu_get_crash_info(cpu)); } qatomic_set(&cpu->exit_request, 0); return ret; } -int kvm_ioctl(KVMState *s, int type, ...) +int kvm_ioctl(KVMState *s, unsigned long type, ...) { int ret; void *arg; @@ -2995,7 +3003,7 @@ int kvm_ioctl(KVMState *s, int type, ...) return ret; } -int kvm_vm_ioctl(KVMState *s, int type, ...) +int kvm_vm_ioctl(KVMState *s, unsigned long type, ...) { int ret; void *arg; @@ -3013,7 +3021,7 @@ int kvm_vm_ioctl(KVMState *s, int type, ...) return ret; } -int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) +int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...) { int ret; void *arg; @@ -3031,7 +3039,7 @@ int kvm_vcpu_ioctl(CPUState *cpu, int type, ...) return ret; } -int kvm_device_ioctl(int fd, int type, ...) +int kvm_device_ioctl(int fd, unsigned long type, ...) { int ret; void *arg; @@ -3477,6 +3485,28 @@ int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target) return r; } +int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, + struct kvm_numa_info *numa_info) +{ + KVMState *state = kvm_state; + struct kvm_user_data data; + int ret; + + data.loader_start = loader_start; + data.image_end = image_end; + data.initrd_start = initrd_start; + data.dtb_end = dtb_end; + data.ram_size = ram_size; + memcpy(&data.numa_info, numa_info, sizeof(struct kvm_numa_info)); + + ret = kvm_vm_ioctl(state, KVM_LOAD_USER_DATA, &data); + if (ret < 0) { + error_report("%s: KVM_LOAD_USER_DATA failed!\n", __func__); + } + + return ret; +} + static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as, hwaddr start_addr, hwaddr size) { @@ -3624,6 +3654,8 @@ static void kvm_accel_instance_init(Object *obj) s->kernel_irqchip_split = ON_OFF_AUTO_AUTO; /* KVM dirty ring is by default off */ s->kvm_dirty_ring_size = 0; + s->notify_vmexit = NOTIFY_VMEXIT_OPTION_RUN; + s->notify_window = 0; } static void kvm_accel_class_init(ObjectClass *oc, void *data) @@ -3651,6 +3683,8 @@ static void kvm_accel_class_init(ObjectClass *oc, void *data) NULL, NULL); object_class_property_set_description(oc, "dirty-ring-size", "Size of KVM dirty page ring buffer (default: 0, i.e. use bitmap)"); + + kvm_arch_accel_class_init(oc); } static const TypeInfo kvm_accel_type = { diff --git a/accel/kvm/trace-events b/accel/kvm/trace-events index 399aaeb0ec757cf50ba44943d4ab4d30e02c1a0d..a1905fe985824b252afe8a58c800bf76f13420a5 100644 --- a/accel/kvm/trace-events +++ b/accel/kvm/trace-events @@ -1,11 +1,11 @@ # See docs/devel/tracing.rst for syntax documentation. # kvm-all.c -kvm_ioctl(int type, void *arg) "type 0x%x, arg %p" -kvm_vm_ioctl(int type, void *arg) "type 0x%x, arg %p" -kvm_vcpu_ioctl(int cpu_index, int type, void *arg) "cpu_index %d, type 0x%x, arg %p" +kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p" +kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p" +kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p" kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d" -kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p" +kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p" kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s" kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s" kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu" diff --git a/accel/stubs/kvm-stub.c b/accel/stubs/kvm-stub.c index 5319573e0032a336612a822cb15a60ec113c9785..1128cb2928a0aa375b2f474665b99f2fc5ad7665 100644 --- a/accel/stubs/kvm-stub.c +++ b/accel/stubs/kvm-stub.c @@ -152,4 +152,9 @@ bool kvm_dirty_ring_enabled(void) { return false; } + +uint32_t kvm_dirty_ring_size(void) +{ + return 0; +} #endif diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c index 409ec8c38c8bc5af98fde36960e9ad96ab57ae17..7fb87afedcb5e87e3d81ac58509b8a142b77d424 100644 --- a/accel/tcg/cpu-exec.c +++ b/accel/tcg/cpu-exec.c @@ -798,8 +798,12 @@ static inline bool cpu_handle_interrupt(CPUState *cpu, * raised when single-stepping so that GDB doesn't miss the * next instruction. */ - cpu->exception_index = - (cpu->singlestep_enabled ? EXCP_DEBUG : -1); + if (unlikely(cpu->singlestep_enabled)) { + cpu->exception_index = EXCP_DEBUG; + qemu_mutex_unlock_iothread(); + return true; + } + cpu->exception_index = -1; *last_tb = NULL; } /* The target hook may have updated the 'cpu->interrupt_request'; diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index b69a9534473ef9d629ce15451efaaf15bd8d0bce..03526fa1abb1c76879cd199262137d6632735aec 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -783,6 +783,15 @@ static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, } qemu_spin_unlock(&env_tlb(env)->c.lock); + /* + * If the length is larger than the jump cache size, then it will take + * longer to clear each entry individually than it will to clear it all. + */ + if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { + cpu_tb_jmp_cache_clear(cpu); + return; + } + for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { tb_flush_jmp_cache(cpu, d.addr + i); } diff --git a/audio/dsoundaudio.c b/audio/dsoundaudio.c index cfc79c129eee99be72196e6e84e3b094d2cbf30f..3dd2c4d4a60b2aac5dfa5b783861986049b53f81 100644 --- a/audio/dsoundaudio.c +++ b/audio/dsoundaudio.c @@ -536,13 +536,12 @@ static void *dsound_get_buffer_in(HWVoiceIn *hw, size_t *size) DSoundVoiceIn *ds = (DSoundVoiceIn *) hw; LPDIRECTSOUNDCAPTUREBUFFER dscb = ds->dsound_capture_buffer; HRESULT hr; - DWORD cpos, rpos, act_size; + DWORD rpos, act_size; size_t req_size; int err; void *ret; - hr = IDirectSoundCaptureBuffer_GetCurrentPosition( - dscb, &cpos, ds->first_time ? &rpos : NULL); + hr = IDirectSoundCaptureBuffer_GetCurrentPosition(dscb, NULL, &rpos); if (FAILED(hr)) { dsound_logerr(hr, "Could not get capture buffer position\n"); *size = 0; @@ -554,7 +553,7 @@ static void *dsound_get_buffer_in(HWVoiceIn *hw, size_t *size) ds->first_time = false; } - req_size = audio_ring_dist(cpos, hw->pos_emul, hw->size_emul); + req_size = audio_ring_dist(rpos, hw->pos_emul, hw->size_emul); req_size = MIN(*size, MIN(req_size, hw->size_emul - hw->pos_emul)); if (req_size == 0) { diff --git a/audio/jackaudio.c b/audio/jackaudio.c index e7de6d5433e97bbc0361012840426107aedb295d..317009e936603be4cd4ea35cd5c623e9d3efa4ea 100644 --- a/audio/jackaudio.c +++ b/audio/jackaudio.c @@ -622,6 +622,7 @@ static void qjack_enable_in(HWVoiceIn *hw, bool enable) ji->c.enabled = enable; } +#if !defined(WIN32) && defined(CONFIG_PTHREAD_SETNAME_NP_W_TID) static int qjack_thread_creator(jack_native_thread_t *thread, const pthread_attr_t *attr, void *(*function)(void *), void *arg) { @@ -635,6 +636,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread, return ret; } +#endif static void *qjack_init(Audiodev *dev) { @@ -687,7 +689,9 @@ static void register_audio_jack(void) { qemu_mutex_init(&qjack_shutdown_lock); audio_driver_register(&jack_driver); +#if !defined(WIN32) && defined(CONFIG_PTHREAD_SETNAME_NP_W_TID) jack_set_thread_creator(qjack_thread_creator); +#endif jack_set_error_function(qjack_error); jack_set_info_function(qjack_info); } diff --git a/backends/cryptodev-vhost.c b/backends/cryptodev-vhost.c index bc13e466b4f017675cc1ea9095ce8a62208518ca..572f87b3bee1f561328fd7775f668b6041c4678a 100644 --- a/backends/cryptodev-vhost.c +++ b/backends/cryptodev-vhost.c @@ -94,7 +94,7 @@ cryptodev_vhost_start_one(CryptoDevBackendVhost *crypto, goto fail_notifiers; } - r = vhost_dev_start(&crypto->dev, dev); + r = vhost_dev_start(&crypto->dev, dev, false); if (r < 0) { goto fail_start; } @@ -111,7 +111,7 @@ static void cryptodev_vhost_stop_one(CryptoDevBackendVhost *crypto, VirtIODevice *dev) { - vhost_dev_stop(&crypto->dev, dev); + vhost_dev_stop(&crypto->dev, dev, false); vhost_dev_disable_notifiers(&crypto->dev, dev); } diff --git a/backends/dbus-vmstate.c b/backends/dbus-vmstate.c index 9cfd758c42f37a4341806833cf99dacae59d8f33..57369ec0f22b250454a779c55d2722ac405b2f44 100644 --- a/backends/dbus-vmstate.c +++ b/backends/dbus-vmstate.c @@ -114,14 +114,19 @@ dbus_get_proxies(DBusVMState *self, GError **err) "org.qemu.VMState1", NULL, err); if (!proxy) { - return NULL; + if (err != NULL && *err != NULL) { + warn_report("%s: Failed to create proxy: %s", + __func__, (*err)->message); + g_clear_error(err); + } + continue; } result = g_dbus_proxy_get_cached_property(proxy, "Id"); if (!result) { - g_set_error_literal(err, G_IO_ERROR, G_IO_ERROR_FAILED, - "VMState Id property is missing."); - return NULL; + warn_report("%s: VMState Id property is missing.", __func__); + g_clear_object(&proxy); + continue; } id = g_variant_dup_string(result, &size); diff --git a/backends/hostmem.c b/backends/hostmem.c index 4c05862ed5a89059f1dbca8b9285692a413dfa36..c9ddaec849b459b68c458fd014c90483b8dd405b 100644 --- a/backends/hostmem.c +++ b/backends/hostmem.c @@ -273,7 +273,7 @@ static void host_memory_backend_init(Object *obj) backend->merge = machine_mem_merge(machine); backend->dump = machine_dump_guest_core(machine); backend->reserve = true; - backend->prealloc_threads = 1; + backend->prealloc_threads = machine->smp.cpus; } static void host_memory_backend_post_init(Object *obj) diff --git a/backends/tpm/tpm_emulator.c b/backends/tpm/tpm_emulator.c index 87d061e9bbd61cb572dfb0aa2e1bb68aac04edf4..9b50c5b3e2ec122346d1e2dad3be546ed61c9984 100644 --- a/backends/tpm/tpm_emulator.c +++ b/backends/tpm/tpm_emulator.c @@ -32,6 +32,7 @@ #include "qemu/sockets.h" #include "qemu/lockable.h" #include "io/channel-socket.h" +#include "sysemu/runstate.h" #include "sysemu/tpm_backend.h" #include "sysemu/tpm_util.h" #include "tpm_int.h" @@ -383,6 +384,15 @@ err_exit: static int tpm_emulator_startup_tpm(TPMBackend *tb, size_t buffersize) { + /* TPM startup will be done from post_load hook */ + if (runstate_check(RUN_STATE_INMIGRATE)) { + if (buffersize != 0) { + return tpm_emulator_set_buffer_size(tb, buffersize, NULL); + } + + return 0; + } + return tpm_emulator_startup_tpm_resume(tb, buffersize, false); } diff --git a/backends/vhost-user.c b/backends/vhost-user.c index 10b39992d21dc20107d8ed41a0cf889cdf0fbea3..6632e2fe6f60a352dabeb6dba129699dd489064e 100644 --- a/backends/vhost-user.c +++ b/backends/vhost-user.c @@ -85,7 +85,7 @@ vhost_user_backend_start(VhostUserBackend *b) } b->dev.acked_features = b->vdev->guest_features; - ret = vhost_dev_start(&b->dev, b->vdev); + ret = vhost_dev_start(&b->dev, b->vdev, true); if (ret < 0) { error_report("Error start vhost dev"); goto err_guest_notifiers; @@ -120,7 +120,7 @@ vhost_user_backend_stop(VhostUserBackend *b) return; } - vhost_dev_stop(&b->dev, b->vdev); + vhost_dev_stop(&b->dev, b->vdev, true); if (k->set_guest_notifiers) { ret = k->set_guest_notifiers(qbus->parent, diff --git a/block.c b/block.c index 0ac5b163d2aa19368ff54f2bc04a1f76b626d2dd..a91117abda5ce327047cf487ceeb28caa3cc3682 100644 --- a/block.c +++ b/block.c @@ -67,6 +67,9 @@ #define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */ +#define DEFAULT_BIOS_BOOT_LOADER_DIR "/usr/share/edk2" +#define DEFAULT_NVRAM_TEMPLATE_DIR "/var/lib/libvirt/qemu/nvram" + static QTAILQ_HEAD(, BlockDriverState) graph_bdrv_states = QTAILQ_HEAD_INITIALIZER(graph_bdrv_states); @@ -82,6 +85,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, BlockDriverState *parent, const BdrvChildClass *child_class, BdrvChildRole child_role, + bool parse_filename, Error **errp); static bool bdrv_recurse_has_child(BlockDriverState *bs, @@ -1926,7 +1930,8 @@ static void parse_json_protocol(QDict *options, const char **pfilename, * block driver has been specified explicitly. */ static int bdrv_fill_options(QDict **options, const char *filename, - int *flags, Error **errp) + int *flags, bool allow_parse_filename, + Error **errp) { const char *drvname; bool protocol = *flags & BDRV_O_PROTOCOL; @@ -1966,7 +1971,7 @@ static int bdrv_fill_options(QDict **options, const char *filename, if (protocol && filename) { if (!qdict_haskey(*options, "filename")) { qdict_put_str(*options, "filename", filename); - parse_filename = true; + parse_filename = allow_parse_filename; } else { error_setg(errp, "Can't specify 'file' and 'filename' options at " "the same time"); @@ -3439,7 +3444,8 @@ int bdrv_open_backing_file(BlockDriverState *bs, QDict *parent_options, } backing_hd = bdrv_open_inherit(backing_filename, reference, options, 0, bs, - &child_of_bds, bdrv_backing_role(bs), errp); + &child_of_bds, bdrv_backing_role(bs), true, + errp); if (!backing_hd) { bs->open_flags |= BDRV_O_NO_BACKING; error_prepend(errp, "Could not open backing file: "); @@ -3472,7 +3478,8 @@ free_exit: static BlockDriverState * bdrv_open_child_bs(const char *filename, QDict *options, const char *bdref_key, BlockDriverState *parent, const BdrvChildClass *child_class, - BdrvChildRole child_role, bool allow_none, Error **errp) + BdrvChildRole child_role, bool allow_none, + bool parse_filename, Error **errp) { BlockDriverState *bs = NULL; QDict *image_options; @@ -3503,7 +3510,8 @@ bdrv_open_child_bs(const char *filename, QDict *options, const char *bdref_key, } bs = bdrv_open_inherit(filename, reference, image_options, 0, - parent, child_class, child_role, errp); + parent, child_class, child_role, parse_filename, + errp); if (!bs) { goto done; } @@ -3513,6 +3521,26 @@ done: return bs; } +static BdrvChild *bdrv_open_child_common(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, + const BdrvChildClass *child_class, + BdrvChildRole child_role, + bool allow_none, bool parse_filename, + Error **errp) +{ + BlockDriverState *bs; + + bs = bdrv_open_child_bs(filename, options, bdref_key, parent, child_class, + child_role, allow_none, parse_filename, errp); + if (bs == NULL) { + return NULL; + } + + return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, + errp); +} + /* * Opens a disk image whose options are given as BlockdevRef in another block * device's options. @@ -3534,16 +3562,29 @@ BdrvChild *bdrv_open_child(const char *filename, BdrvChildRole child_role, bool allow_none, Error **errp) { - BlockDriverState *bs; + return bdrv_open_child_common(filename, options, bdref_key, parent, + child_class, child_role, allow_none, false, + errp); +} - bs = bdrv_open_child_bs(filename, options, bdref_key, parent, child_class, - child_role, allow_none, errp); - if (bs == NULL) { - return NULL; - } +/* + * This does mostly the same as bdrv_open_child(), but for opening the primary + * child of a node. A notable difference from bdrv_open_child() is that it + * enables filename parsing for protocol names (including json:). + */ +int bdrv_open_file_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, Error **errp) +{ + BdrvChildRole role; - return bdrv_attach_child(parent, bs, bdref_key, child_class, child_role, - errp); + role = parent->drv->is_filter ? + (BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY) : BDRV_CHILD_IMAGE; + + parent->file = bdrv_open_child_common(filename, options, bdref_key, parent, + &child_of_bds, role, false, true, errp); + + return parent->file ? 0 : -EINVAL; } /* @@ -3581,7 +3622,8 @@ BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp) } - bs = bdrv_open_inherit(NULL, reference, qdict, 0, NULL, NULL, 0, errp); + bs = bdrv_open_inherit(NULL, reference, qdict, 0, NULL, NULL, 0, false, + errp); obj = NULL; qobject_unref(obj); visit_free(v); @@ -3672,6 +3714,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, BlockDriverState *parent, const BdrvChildClass *child_class, BdrvChildRole child_role, + bool parse_filename, Error **errp) { int ret; @@ -3715,9 +3758,11 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, } /* json: syntax counts as explicit options, as if in the QDict */ - parse_json_protocol(options, &filename, &local_err); - if (local_err) { - goto fail; + if (parse_filename) { + parse_json_protocol(options, &filename, &local_err); + if (local_err) { + goto fail; + } } bs->explicit_options = qdict_clone_shallow(options); @@ -3742,7 +3787,8 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, parent->open_flags, parent->options); } - ret = bdrv_fill_options(&options, filename, &flags, &local_err); + ret = bdrv_fill_options(&options, filename, &flags, parse_filename, + &local_err); if (ret < 0) { goto fail; } @@ -3811,7 +3857,7 @@ static BlockDriverState *bdrv_open_inherit(const char *filename, file_bs = bdrv_open_child_bs(filename, options, "file", bs, &child_of_bds, BDRV_CHILD_IMAGE, - true, &local_err); + true, true, &local_err); if (local_err) { goto fail; } @@ -3956,7 +4002,7 @@ BlockDriverState *bdrv_open(const char *filename, const char *reference, QDict *options, int flags, Error **errp) { return bdrv_open_inherit(filename, reference, options, flags, NULL, - NULL, 0, errp); + NULL, 0, true, errp); } /* Return true if the NULL-terminated @list contains @str */ @@ -6432,7 +6478,13 @@ int coroutine_fn bdrv_co_invalidate_cache(BlockDriverState *bs, Error **errp) return ret; } - if (bs->drv->bdrv_co_invalidate_cache) { + /* + * It's not necessary for bios bootloader and nvram template to drop cache + * when migration, skip this step for them to avoid dowtime increase. + */ + if (bs->drv->bdrv_co_invalidate_cache && + !strstr(bs->filename, DEFAULT_BIOS_BOOT_LOADER_DIR) && + !strstr(bs->filename, DEFAULT_NVRAM_TEMPLATE_DIR)) { bs->drv->bdrv_co_invalidate_cache(bs, &local_err); if (local_err) { bs->open_flags |= BDRV_O_INACTIVE; @@ -6692,6 +6744,22 @@ bool bdrv_op_is_blocked(BlockDriverState *bs, BlockOpType op, Error **errp) bdrv_get_device_or_node_name(bs)); return true; } + + /* + * When migration puts a BDRV_O_INACTIVE flag on driver's open_flags, + * we fake a blocker that doesn't exist. From now on, block jobs + * will not be permitted. + */ + if ((op == BLOCK_OP_TYPE_RESIZE || op == BLOCK_OP_TYPE_COMMIT_SOURCE || + op == BLOCK_OP_TYPE_MIRROR_SOURCE || op == BLOCK_OP_TYPE_MIRROR_TARGET) && + (bs->open_flags & BDRV_O_INACTIVE)) { + if (errp) { + error_setg(errp, "block device is in use by migration with" + " a driver BDRV_O_INACTIVE flag setted"); + } + return true; + } + return false; } diff --git a/block/blkdebug.c b/block/blkdebug.c index bbf2948703083502cd2fa0ad872734e816dac6fa..5fcfc8ac6fab9b6c33803f951518cd6a6707668d 100644 --- a/block/blkdebug.c +++ b/block/blkdebug.c @@ -503,12 +503,9 @@ static int blkdebug_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the image file */ - bs->file = bdrv_open_child(qemu_opt_get(opts, "x-image"), options, "image", - bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(qemu_opt_get(opts, "x-image"), options, "image", + bs, errp); + if (ret < 0) { goto out; } diff --git a/block/blklogwrites.c b/block/blklogwrites.c index f7a251e91f9eff96c434672711a74c7df0d56084..f66a617eb3e86fd6fe9a027676ebfd22cb23b420 100644 --- a/block/blklogwrites.c +++ b/block/blklogwrites.c @@ -155,11 +155,8 @@ static int blk_log_writes_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the file */ - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, false, - errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { goto fail; } diff --git a/block/blkreplay.c b/block/blkreplay.c index dcbe780ddbd36b3c2071fb350ee7fdc8b5349cb7..76a0b8d12ae148a1d75b7047c0b5a15e59f28e56 100644 --- a/block/blkreplay.c +++ b/block/blkreplay.c @@ -26,11 +26,8 @@ static int blkreplay_open(BlockDriverState *bs, QDict *options, int flags, int ret; /* Open the image file */ - bs->file = bdrv_open_child(NULL, options, "image", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "image", bs, errp); + if (ret < 0) { goto fail; } diff --git a/block/blkverify.c b/block/blkverify.c index d1facf5ba90bb8be08a0b601cb20499705719c78..920e8916841a1a26a1f317f473bd93e7dc93f44b 100644 --- a/block/blkverify.c +++ b/block/blkverify.c @@ -121,12 +121,9 @@ static int blkverify_open(BlockDriverState *bs, QDict *options, int flags, } /* Open the raw file */ - bs->file = bdrv_open_child(qemu_opt_get(opts, "x-raw"), options, "raw", - bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(qemu_opt_get(opts, "x-raw"), options, "raw", + bs, errp); + if (ret < 0) { goto fail; } diff --git a/block/block-backend.c b/block/block-backend.c index 12ef80ea170c04a15500b42bc5e798a07e2f4493..3a757fb7463ed73542ab12e377f7c6b52d7770c2 100644 --- a/block/block-backend.c +++ b/block/block-backend.c @@ -95,6 +95,15 @@ struct BlockBackend { * Accessed with atomic ops. */ unsigned int in_flight; + + /* Timer for retry on errors. */ + QEMUTimer *retry_timer; + /* Interval in ms to trigger next retry. */ + int64_t retry_interval; + /* Start time of the first error. Used to check timeout. */ + int64_t retry_start_time; + /* Retry timeout. 0 represents infinite retry. */ + int64_t retry_timeout; }; typedef struct BlockBackendAIOCB { @@ -353,6 +362,11 @@ BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm) blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT; blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; + blk->retry_timer = NULL; + blk->retry_interval = BLOCK_BACKEND_DEFAULT_RETRY_INTERVAL; + blk->retry_start_time = 0; + blk->retry_timeout = 0; + block_acct_init(&blk->stats); qemu_co_queue_init(&blk->queued_requests); @@ -471,6 +485,10 @@ static void blk_delete(BlockBackend *blk) QTAILQ_REMOVE(&block_backends, blk, link); drive_info_del(blk->legacy_dinfo); block_acct_cleanup(&blk->stats); + if (blk->retry_timer) { + timer_del(blk->retry_timer); + timer_free(blk->retry_timer); + } g_free(blk); } @@ -822,16 +840,22 @@ BlockBackend *blk_by_public(BlockBackendPublic *public) void blk_remove_bs(BlockBackend *blk) { ThrottleGroupMember *tgm = &blk->public.throttle_group_member; - BlockDriverState *bs; BdrvChild *root; notifier_list_notify(&blk->remove_bs_notifiers, blk); if (tgm->throttle_state) { - bs = blk_bs(blk); + BlockDriverState *bs = blk_bs(blk); + + /* + * Take a ref in case blk_bs() changes across bdrv_drained_begin(), for + * example, if a temporary filter node is removed by a blockjob. + */ + bdrv_ref(bs); bdrv_drained_begin(bs); throttle_group_detach_aio_context(tgm); throttle_group_attach_aio_context(tgm, qemu_get_aio_context()); bdrv_drained_end(bs); + bdrv_unref(bs); } blk_update_root_state(blk); @@ -997,6 +1021,14 @@ void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops, blk->dev_ops = ops; blk->dev_opaque = opaque; + if ((blk->on_read_error == BLOCKDEV_ON_ERROR_RETRY || + blk->on_write_error == BLOCKDEV_ON_ERROR_RETRY) && + ops->retry_request_cb) { + blk->retry_timer = aio_timer_new(blk->ctx, QEMU_CLOCK_REALTIME, + SCALE_MS, ops->retry_request_cb, + opaque); + } + /* Are we currently quiesced? Should we enforce this right now? */ if (blk->quiesce_counter && ops->drained_begin) { ops->drained_begin(opaque); @@ -1705,6 +1737,7 @@ void blk_drain(BlockBackend *blk) BlockDriverState *bs = blk_bs(blk); if (bs) { + bdrv_ref(bs); bdrv_drained_begin(bs); } @@ -1714,6 +1747,7 @@ void blk_drain(BlockBackend *blk) if (bs) { bdrv_drained_end(bs); + bdrv_unref(bs); } } @@ -1737,6 +1771,39 @@ void blk_drain_all(void) bdrv_drain_all_end(); } +void blk_set_on_error_retry_interval(BlockBackend *blk, int64_t interval) +{ + blk->retry_interval = interval; +} + +void blk_set_on_error_retry_timeout(BlockBackend *blk, int64_t timeout) +{ + blk->retry_timeout = timeout; +} + +static bool blk_error_retry_timeout(BlockBackend *blk) +{ + /* No timeout set, infinite retries. */ + if (!blk->retry_timeout) { + return false; + } + + /* The first time an error occurs. */ + if (!blk->retry_start_time) { + blk->retry_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + return false; + } + + return qemu_clock_get_ms(QEMU_CLOCK_REALTIME) > (blk->retry_start_time + + blk->retry_timeout); +} + +void blk_error_retry_reset_timeout(BlockBackend *blk) +{ + if (blk->retry_timer && blk->retry_start_time) + blk->retry_start_time = 0; +} + void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, BlockdevOnError on_write_error) { @@ -1764,6 +1831,9 @@ BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read, return BLOCK_ERROR_ACTION_REPORT; case BLOCKDEV_ON_ERROR_IGNORE: return BLOCK_ERROR_ACTION_IGNORE; + case BLOCKDEV_ON_ERROR_RETRY: + return (blk->retry_timer && !blk_error_retry_timeout(blk)) ? + BLOCK_ERROR_ACTION_RETRY : BLOCK_ERROR_ACTION_REPORT; case BLOCKDEV_ON_ERROR_AUTO: default: abort(); @@ -1811,6 +1881,12 @@ void blk_error_action(BlockBackend *blk, BlockErrorAction action, qemu_system_vmstop_request_prepare(); send_qmp_error_event(blk, action, is_read, error); qemu_system_vmstop_request(RUN_STATE_IO_ERROR); + } else if (action == BLOCK_ERROR_ACTION_RETRY) { + if (!blk->quiesce_counter) { + timer_mod(blk->retry_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + + blk->retry_interval); + send_qmp_error_event(blk, action, is_read, error); + } } else { send_qmp_error_event(blk, action, is_read, error); } @@ -2044,10 +2120,13 @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, int ret; if (bs) { + bdrv_ref(bs); + if (update_root_node) { ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root, errp); if (ret < 0) { + bdrv_unref(bs); return ret; } } @@ -2057,6 +2136,8 @@ static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context, throttle_group_attach_aio_context(tgm, new_context); bdrv_drained_end(bs); } + + bdrv_unref(bs); } blk->ctx = new_context; @@ -2326,11 +2407,13 @@ void blk_io_limits_disable(BlockBackend *blk) ThrottleGroupMember *tgm = &blk->public.throttle_group_member; assert(tgm->throttle_state); if (bs) { + bdrv_ref(bs); bdrv_drained_begin(bs); } throttle_group_unregister_tgm(tgm); if (bs) { bdrv_drained_end(bs); + bdrv_unref(bs); } } diff --git a/block/bochs.c b/block/bochs.c index 4d68658087bdb5c4bab4a84dac76b2c84e1bcc32..b2dc06bbfdfa8e492beb5fabb73b441723e27554 100644 --- a/block/bochs.c +++ b/block/bochs.c @@ -110,10 +110,9 @@ static int bochs_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } ret = bdrv_pread(bs->file, 0, &bochs, sizeof(bochs)); diff --git a/block/cloop.c b/block/cloop.c index b8c6d0eccdba8ac8134dede89509be1aa31add8f..bee87da1734499f30be320d5281ec423e8aedd0a 100644 --- a/block/cloop.c +++ b/block/cloop.c @@ -71,10 +71,9 @@ static int cloop_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } /* read header */ diff --git a/block/copy-before-write.c b/block/copy-before-write.c index c30a5ff8dea933fce5b1913012d184c19aae6e83..8aa2cb6a853ec6a7fa5b50bef3365915228510db 100644 --- a/block/copy-before-write.c +++ b/block/copy-before-write.c @@ -150,12 +150,11 @@ static int cbw_open(BlockDriverState *bs, QDict *options, int flags, { BDRVCopyBeforeWriteState *s = bs->opaque; BdrvDirtyBitmap *copy_bitmap; + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } s->target = bdrv_open_child(NULL, options, "target", bs, &child_of_bds, diff --git a/block/copy-on-read.c b/block/copy-on-read.c index 1fc7fb3333b1ef0e070eebdc873d55a24d3d5d45..815ac1d8356cd9c62e4a911fa54aa0b771b4100e 100644 --- a/block/copy-on-read.c +++ b/block/copy-on-read.c @@ -41,12 +41,11 @@ static int cor_open(BlockDriverState *bs, QDict *options, int flags, BDRVStateCOR *state = bs->opaque; /* Find a bottom node name, if any */ const char *bottom_node = qdict_get_try_str(options, "bottom"); + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_read_flags = BDRV_REQ_PREFETCH; diff --git a/block/crypto.c b/block/crypto.c index c8ba4681e200c6980ea972c43cd104912ebb2f5f..abfce39230b745299d149921053e8998ad8e9529 100644 --- a/block/crypto.c +++ b/block/crypto.c @@ -260,15 +260,14 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, { BlockCrypto *crypto = bs->opaque; QemuOpts *opts = NULL; - int ret = -EINVAL; + int ret; QCryptoBlockOpenOptions *open_opts = NULL; unsigned int cflags = 0; QDict *cryptoopts = NULL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_write_flags = BDRV_REQ_FUA & @@ -276,6 +275,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, opts = qemu_opts_create(opts_spec, NULL, 0, &error_abort); if (!qemu_opts_absorb_qdict(opts, options, errp)) { + ret = -EINVAL; goto cleanup; } @@ -284,6 +284,7 @@ static int block_crypto_open_generic(QCryptoBlockFormat format, open_opts = block_crypto_open_opts_init(cryptoopts, errp); if (!open_opts) { + ret = -EINVAL; goto cleanup; } diff --git a/block/curl.c b/block/curl.c index 4a8ae2b269867b9d41c6b0d4a308983e8baadaa1..5aebb080021d0a7a2700117bfbcaa14e4a208d5f 100644 --- a/block/curl.c +++ b/block/curl.c @@ -821,8 +821,10 @@ out_noclean: g_free(s->username); g_free(s->proxyusername); g_free(s->proxypassword); - curl_drop_all_sockets(s->sockets); - g_hash_table_destroy(s->sockets); + if (s->sockets) { + curl_drop_all_sockets(s->sockets); + g_hash_table_destroy(s->sockets); + } qemu_opts_del(opts); return -EINVAL; } diff --git a/block/dmg.c b/block/dmg.c index 447901fbb875a97595dc65d58cddc30f60dbdc9c..38c363dd39ae019f5bbdcb9928ec20e24f183ae7 100644 --- a/block/dmg.c +++ b/block/dmg.c @@ -439,10 +439,9 @@ static int dmg_open(BlockDriverState *bs, QDict *options, int flags, return ret; } - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } block_module_load_one("dmg-bz2"); diff --git a/block/file-posix.c b/block/file-posix.c index b283093e5b7b4a0643dc82e3f5cea896476a2b16..5180fd1d0b95d43c84f32a0dfebd5ddc2e01a33c 100644 --- a/block/file-posix.c +++ b/block/file-posix.c @@ -128,6 +128,10 @@ #define FTYPE_CD 1 #define MAX_BLOCKSIZE 4096 +#define DEFAULT_BUFFER_SIZE 65536 +#define BUFFER_ALIGN_SIZE 65536 +#define MIN_BUFFER_SIZE 65536 +#define MAX_BUFFER_SIZE 16777216 /* Posix file locking bytes. Libvirt takes byte 0, we start from higher bytes, * leaving a few more bytes for its future use. */ @@ -206,6 +210,8 @@ typedef struct RawPosixAIOData { off_t aio_offset; uint64_t aio_nbytes; + size_t buffer_size; + union { struct { struct iovec *iov; @@ -804,7 +810,7 @@ static int raw_open_common(BlockDriverState *bs, QDict *options, } #endif - bs->supported_zero_flags = BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK; + bs->supported_zero_flags = s->discard_zeroes ? (BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK) : 0; if (S_ISREG(st.st_mode)) { /* When extending regular files, we get zeros from the OS */ bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE; @@ -2218,7 +2224,8 @@ static void raw_close(BlockDriverState *bs) */ static int coroutine_fn raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset, - PreallocMode prealloc, Error **errp) + PreallocMode prealloc, size_t buffer_size, + Error **errp) { RawPosixAIOData acb; @@ -2227,6 +2234,7 @@ raw_regular_truncate(BlockDriverState *bs, int fd, int64_t offset, .aio_fildes = fd, .aio_type = QEMU_AIO_TRUNCATE, .aio_offset = offset, + .buffer_size = buffer_size, .truncate = { .prealloc = prealloc, .errp = errp, @@ -2252,7 +2260,8 @@ static int coroutine_fn raw_co_truncate(BlockDriverState *bs, int64_t offset, if (S_ISREG(st.st_mode)) { /* Always resizes to the exact @offset */ - return raw_regular_truncate(bs, s->fd, offset, prealloc, errp); + return raw_regular_truncate(bs, s->fd, offset, prealloc, + DEFAULT_BUFFER_SIZE, errp); } if (prealloc != PREALLOC_MODE_OFF) { @@ -2465,6 +2474,8 @@ raw_co_create(BlockdevCreateOptions *options, Error **errp) int fd; uint64_t perm, shared; int result = 0; + int flags = O_RDWR | O_BINARY; + size_t buffer_size = DEFAULT_BUFFER_SIZE; /* Validate options and set default values */ assert(options->driver == BLOCKDEV_DRIVER_FILE); @@ -2484,9 +2495,19 @@ raw_co_create(BlockdevCreateOptions *options, Error **errp) error_setg(errp, "Extent size hint is too large"); goto out; } + if (!file_opts->has_cache) { + file_opts->cache = g_strdup("writeback"); + } + if (file_opts->preallocation == PREALLOC_MODE_FULL && + !strcmp(file_opts->cache, "none")) { + flags |= O_DIRECT; + } + if (file_opts->has_buffersize) { + buffer_size = file_opts->buffersize; + } /* Create file */ - fd = qemu_create(file_opts->filename, O_RDWR | O_BINARY, 0644, errp); + fd = qemu_create(file_opts->filename, flags, 0644, errp); if (fd < 0) { result = -errno; goto out; @@ -2521,7 +2542,8 @@ raw_co_create(BlockdevCreateOptions *options, Error **errp) } /* Clear the file by truncating it to 0 */ - result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, errp); + result = raw_regular_truncate(NULL, fd, 0, PREALLOC_MODE_OFF, + buffer_size, errp); if (result < 0) { goto out_unlock; } @@ -2565,7 +2587,8 @@ raw_co_create(BlockdevCreateOptions *options, Error **errp) /* Resize and potentially preallocate the file to the desired * final size */ result = raw_regular_truncate(NULL, fd, file_opts->size, - file_opts->preallocation, errp); + file_opts->preallocation, + buffer_size, errp); if (result < 0) { goto out_unlock; } @@ -2586,6 +2609,7 @@ out_close: error_setg_errno(errp, -result, "Could not close the new file"); } out: + g_free(file_opts->cache); return result; } @@ -2602,6 +2626,8 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv, PreallocMode prealloc; char *buf = NULL; Error *local_err = NULL; + size_t buffersize = DEFAULT_BUFFER_SIZE; + char *cache = NULL; /* Skip file: protocol prefix */ strstart(filename, "file:", &filename); @@ -2624,6 +2650,21 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv, return -EINVAL; } + buffersize = qemu_opt_get_size_del(opts, BLOCK_OPT_BUFFER_SIZE, + DEFAULT_BUFFER_SIZE); + if (buffersize < MIN_BUFFER_SIZE || buffersize > MAX_BUFFER_SIZE) { + error_setg_errno(errp, EINVAL, "Buffer size must be between %d " + "and %d", MIN_BUFFER_SIZE, MAX_BUFFER_SIZE); + return -EINVAL; + } + + cache = qemu_opt_get_del(opts, BLOCK_OPT_CACHE); + if (!cache) { + cache = g_strdup("writeback"); + } + + buffersize = ROUND_UP(buffersize, BUFFER_ALIGN_SIZE); + options = (BlockdevCreateOptions) { .driver = BLOCKDEV_DRIVER_FILE, .u.file = { @@ -2635,6 +2676,10 @@ static int coroutine_fn raw_co_create_opts(BlockDriver *drv, .nocow = nocow, .has_extent_size_hint = has_extent_size_hint, .extent_size_hint = extent_size_hint, + .has_buffersize = true, + .buffersize = buffersize, + .has_cache = true, + .cache = cache, }, }; return raw_co_create(&options, errp); @@ -3133,6 +3178,16 @@ static QemuOptsList raw_create_opts = { .type = QEMU_OPT_SIZE, .help = "Extent size hint for the image file, 0 to disable" }, + { + .name = BLOCK_OPT_CACHE, + .type = QEMU_OPT_STRING, + .help = "Cache mode (allowed values: writeback, none)" + }, + { + .name = BLOCK_OPT_BUFFER_SIZE, + .type = QEMU_OPT_SIZE, + .help = "write buffer size" + }, { /* end of list */ } } }; diff --git a/block/filter-compress.c b/block/filter-compress.c index d5be538619ae74d4f13ce6cc65c29144a88b4784..305716c86cb33bb744c7507e2d6aca64fe2fade5 100644 --- a/block/filter-compress.c +++ b/block/filter-compress.c @@ -30,11 +30,9 @@ static int compress_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + int ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } if (!bs->file->bs->drv || !block_driver_can_compress(bs->file->bs->drv)) { diff --git a/block/io_uring.c b/block/io_uring.c index dfa475cc874650e67d3d7c362cd83bf9f8a08253..e88d75d4624136007bd1653ff6e8889a103d805f 100644 --- a/block/io_uring.c +++ b/block/io_uring.c @@ -89,7 +89,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, trace_luring_resubmit_short_read(s, luringcb, nread); /* Update read position */ - luringcb->total_read = nread; + luringcb->total_read += nread; remaining = luringcb->qiov->size - luringcb->total_read; /* Shorten qiov */ @@ -103,7 +103,7 @@ static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb, remaining); /* Update sqe */ - luringcb->sqeq.off = nread; + luringcb->sqeq.off += nread; luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov; luringcb->sqeq.len = luringcb->resubmit_qiov.niov; diff --git a/block/iscsi.c b/block/iscsi.c index 57aa07a40d7f4cafa26197afca05a862681c9b4d..61ccb58fc8c0c47e1054642108032ba0cafa329d 100644 --- a/block/iscsi.c +++ b/block/iscsi.c @@ -268,6 +268,7 @@ iscsi_co_generic_cb(struct iscsi_context *iscsi, int status, timer_mod(&iTask->retry_timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) + retry_time); iTask->do_retry = 1; + return; } else if (status == SCSI_STATUS_CHECK_CONDITION) { int error = iscsi_translate_sense(&task->sense); if (error == EAGAIN) { diff --git a/block/linux-aio.c b/block/linux-aio.c index f53ae72e21fdf7cecdf7c1556bccd1b87fbd9418..77f17ad5965760444bc6b9c80d8a3dfa9ae9b142 100644 --- a/block/linux-aio.c +++ b/block/linux-aio.c @@ -360,8 +360,10 @@ void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s, uint64_t dev_max_batch) { assert(s->io_q.plugged); + s->io_q.plugged--; + if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) || - (--s->io_q.plugged == 0 && + (!s->io_q.plugged && !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) { ioq_submit(s); } diff --git a/block/mirror.c b/block/mirror.c index efec2c7674b047cd906a02930ddcbdf36e1e1f13..de020bdb3e71877a5b610c679606da7e142b9560 100644 --- a/block/mirror.c +++ b/block/mirror.c @@ -72,7 +72,7 @@ typedef struct MirrorBlockJob { uint64_t last_pause_ns; unsigned long *in_flight_bitmap; - int in_flight; + unsigned in_flight; int64_t bytes_in_flight; QTAILQ_HEAD(, MirrorOp) ops_in_flight; int ret; @@ -1420,11 +1420,13 @@ static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, MirrorOp *op = NULL; MirrorBDSOpaque *s = bs->opaque; int ret = 0; - bool copy_to_target; + bool copy_to_target = false; - copy_to_target = s->job->ret >= 0 && - !job_is_cancelled(&s->job->common.job) && - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + if (s->job) { + copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + } if (copy_to_target) { op = active_write_prepare(s->job, offset, bytes); @@ -1469,11 +1471,13 @@ static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, QEMUIOVector bounce_qiov; void *bounce_buf; int ret = 0; - bool copy_to_target; + bool copy_to_target = false; - copy_to_target = s->job->ret >= 0 && - !job_is_cancelled(&s->job->common.job) && - s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + if (s->job) { + copy_to_target = s->job->ret >= 0 && + !job_is_cancelled(&s->job->common.job) && + s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; + } if (copy_to_target) { /* The guest might concurrently modify the data to write; but @@ -1640,7 +1644,7 @@ static BlockJob *mirror_start_job( * reads on the top, while disabling it in the intermediate nodes, and make * the backing chain writable. */ mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, - BDRV_O_RDWR, errp); + BDRV_O_RDWR | BDRV_O_NOCACHE, errp); if (mirror_top_bs == NULL) { return NULL; } diff --git a/block/monitor/block-hmp-cmds.c b/block/monitor/block-hmp-cmds.c index 2ac4aedfff04228be376c05ff97815e4b3bfe2e5..bc4bd46b47a0c332882d8cd3b3873aae678da48b 100644 --- a/block/monitor/block-hmp-cmds.c +++ b/block/monitor/block-hmp-cmds.c @@ -213,15 +213,17 @@ void hmp_commit(Monitor *mon, const QDict *qdict) error_report("Device '%s' not found", device); return; } - if (!blk_is_available(blk)) { - error_report("Device '%s' has no medium", device); - return; - } bs = bdrv_skip_implicit_filters(blk_bs(blk)); aio_context = bdrv_get_aio_context(bs); aio_context_acquire(aio_context); + if (!blk_is_available(blk)) { + error_report("Device '%s' has no medium", device); + aio_context_release(aio_context); + return; + } + ret = bdrv_commit(bs); aio_context_release(aio_context); @@ -411,7 +413,8 @@ void hmp_nbd_server_start(Monitor *mon, const QDict *qdict) goto exit; } - nbd_server_start(addr, NULL, NULL, 0, &local_err); + nbd_server_start(addr, NULL, NULL, NBD_DEFAULT_MAX_CONNECTIONS, + &local_err); qapi_free_SocketAddress(addr); if (local_err != NULL) { goto exit; diff --git a/block/nbd.c b/block/nbd.c index 5ef462db1b7fd805d0cc886c395f80fe74623fe2..a543e68d2f4e5dc9a4a26bbb13337b7e1531bf04 100644 --- a/block/nbd.c +++ b/block/nbd.c @@ -76,10 +76,11 @@ typedef struct BDRVNBDState { CoQueue free_sema; CoMutex receive_mutex; - int in_flight; + unsigned in_flight; NBDClientState state; QEMUTimer *reconnect_delay_timer; + QEMUTimer *open_timer; NBDClientRequest requests[MAX_NBD_REQUESTS]; NBDReply reply; @@ -87,6 +88,7 @@ typedef struct BDRVNBDState { /* Connection parameters */ uint32_t reconnect_delay; + uint32_t open_timeout; SocketAddress *saddr; char *export, *tlscredsid; QCryptoTLSCreds *tlscreds; @@ -108,6 +110,10 @@ static void nbd_clear_bdrvstate(BlockDriverState *bs) yank_unregister_instance(BLOCKDEV_YANK_INSTANCE(bs->node_name)); + /* Must not leave timers behind that would access freed data */ + assert(!s->reconnect_delay_timer); + assert(!s->open_timer); + object_unref(OBJECT(s->tlscreds)); qapi_free_SocketAddress(s->saddr); s->saddr = NULL; @@ -218,6 +224,32 @@ static void nbd_teardown_connection(BlockDriverState *bs) s->state = NBD_CLIENT_QUIT; } +static void open_timer_del(BDRVNBDState *s) +{ + if (s->open_timer) { + timer_free(s->open_timer); + s->open_timer = NULL; + } +} + +static void open_timer_cb(void *opaque) +{ + BDRVNBDState *s = opaque; + + nbd_co_establish_connection_cancel(s->conn); + open_timer_del(s); +} + +static void open_timer_init(BDRVNBDState *s, uint64_t expire_time_ns) +{ + assert(!s->open_timer); + s->open_timer = aio_timer_new(bdrv_get_aio_context(s->bs), + QEMU_CLOCK_REALTIME, + SCALE_NS, + open_timer_cb, s); + timer_mod(s->open_timer, expire_time_ns); +} + static bool nbd_client_connecting(BDRVNBDState *s) { NBDClientState state = qatomic_load_acquire(&s->state); @@ -353,6 +385,13 @@ static coroutine_fn void nbd_reconnect_attempt(BDRVNBDState *s) } nbd_co_do_establish_connection(s->bs, NULL); + + /* + * The reconnect attempt is done (maybe successfully, maybe not), so + * we no longer need this timer. Delete it so it will not outlive + * this I/O request (so draining removes all timers). + */ + reconnect_delay_timer_del(s); } static coroutine_fn int nbd_receive_replies(BDRVNBDState *s, uint64_t handle) @@ -490,8 +529,8 @@ err: if (i != -1) { s->requests[i].coroutine = NULL; s->in_flight--; - qemu_co_queue_next(&s->free_sema); } + qemu_co_queue_next(&s->free_sema); } qemu_co_mutex_unlock(&s->send_mutex); return rc; @@ -1742,6 +1781,15 @@ static QemuOptsList nbd_runtime_opts = { "future requests before a successful reconnect will " "immediately fail. Default 0", }, + { + .name = "open-timeout", + .type = QEMU_OPT_NUMBER, + .help = "In seconds. If zero, the nbd driver tries the connection " + "only once, and fails to open if the connection fails. " + "If non-zero, the nbd driver will repeat connection " + "attempts until successful or until @open-timeout seconds " + "have elapsed. Default 0", + }, { /* end of list */ } }, }; @@ -1797,6 +1845,7 @@ static int nbd_process_options(BlockDriverState *bs, QDict *options, } s->reconnect_delay = qemu_opt_get_number(opts, "reconnect-delay", 0); + s->open_timeout = qemu_opt_get_number(opts, "open-timeout", 0); ret = 0; @@ -1828,18 +1877,31 @@ static int nbd_open(BlockDriverState *bs, QDict *options, int flags, s->conn = nbd_client_connection_new(s->saddr, true, s->export, s->x_dirty_bitmap, s->tlscreds); - /* TODO: Configurable retry-until-timeout behaviour. */ + if (s->open_timeout) { + nbd_client_connection_enable_retry(s->conn); + open_timer_init(s, qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + s->open_timeout * NANOSECONDS_PER_SECOND); + } + s->state = NBD_CLIENT_CONNECTING_WAIT; ret = nbd_do_establish_connection(bs, errp); if (ret < 0) { goto fail; } + /* + * The connect attempt is done, so we no longer need this timer. + * Delete it, because we do not want it to be around when this node + * is drained or closed. + */ + open_timer_del(s); + nbd_client_connection_enable_retry(s->conn); return 0; fail: + open_timer_del(s); nbd_clear_bdrvstate(bs); return ret; } @@ -1993,6 +2055,42 @@ static void nbd_cancel_in_flight(BlockDriverState *bs) nbd_co_establish_connection_cancel(s->conn); } +static void nbd_attach_aio_context(BlockDriverState *bs, + AioContext *new_context) +{ + BDRVNBDState *s = bs->opaque; + + /* The open_timer is used only during nbd_open() */ + assert(!s->open_timer); + + /* + * The reconnect_delay_timer is scheduled in I/O paths when the + * connection is lost, to cancel the reconnection attempt after a + * given time. Once this attempt is done (successfully or not), + * nbd_reconnect_attempt() ensures the timer is deleted before the + * respective I/O request is resumed. + * Since the AioContext can only be changed when a node is drained, + * the reconnect_delay_timer cannot be active here. + */ + assert(!s->reconnect_delay_timer); + + if (s->ioc) { + qio_channel_attach_aio_context(s->ioc, new_context); + } +} + +static void nbd_detach_aio_context(BlockDriverState *bs) +{ + BDRVNBDState *s = bs->opaque; + + assert(!s->open_timer); + assert(!s->reconnect_delay_timer); + + if (s->ioc) { + qio_channel_detach_aio_context(s->ioc); + } +} + static BlockDriver bdrv_nbd = { .format_name = "nbd", .protocol_name = "nbd", @@ -2016,6 +2114,9 @@ static BlockDriver bdrv_nbd = { .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static BlockDriver bdrv_nbd_tcp = { @@ -2041,6 +2142,9 @@ static BlockDriver bdrv_nbd_tcp = { .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static BlockDriver bdrv_nbd_unix = { @@ -2066,6 +2170,9 @@ static BlockDriver bdrv_nbd_unix = { .bdrv_dirname = nbd_dirname, .strong_runtime_opts = nbd_strong_runtime_opts, .bdrv_cancel_in_flight = nbd_cancel_in_flight, + + .bdrv_attach_aio_context = nbd_attach_aio_context, + .bdrv_detach_aio_context = nbd_detach_aio_context, }; static void bdrv_nbd_init(void) diff --git a/block/nfs.c b/block/nfs.c index 577aea1d222aeac63ed2bf4d75b4f1afb564a97a..56b25829cfe2618d5042002dfbf89b6159bddb91 100644 --- a/block/nfs.c +++ b/block/nfs.c @@ -418,7 +418,11 @@ static int64_t nfs_client_open(NFSClient *client, BlockdevOptionsNfs *opts, int flags, int open_flags, Error **errp) { int64_t ret = -EINVAL; +#ifdef _WIN32 + struct __stat64 st; +#else struct stat st; +#endif char *file = NULL, *strp = NULL; qemu_mutex_init(&client->mutex); @@ -781,7 +785,11 @@ static int nfs_reopen_prepare(BDRVReopenState *state, BlockReopenQueue *queue, Error **errp) { NFSClient *client = state->bs->opaque; +#ifdef _WIN32 + struct __stat64 st; +#else struct stat st; +#endif int ret = 0; if (state->flags & BDRV_O_RDWR && bdrv_is_read_only(state->bs)) { diff --git a/block/nvme.c b/block/nvme.c index e4f336d79c26c815f004460893ecc51adb9958f0..d8f4b04e19020a875dd12193d5c7236754c3bd9b 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -206,8 +206,9 @@ static void nvme_free_req_queue_cb(void *opaque) NVMeQueuePair *q = opaque; qemu_mutex_lock(&q->lock); - while (qemu_co_enter_next(&q->free_req_queue, &q->lock)) { - /* Retry all pending requests */ + while (q->free_req_head != -1 && + qemu_co_enter_next(&q->free_req_queue, &q->lock)) { + /* Retry waiting requests */ } qemu_mutex_unlock(&q->lock); } @@ -409,9 +410,10 @@ static bool nvme_process_completion(NVMeQueuePair *q) q->cq_phase = !q->cq_phase; } cid = le16_to_cpu(c->cid); - if (cid == 0 || cid > NVME_QUEUE_SIZE) { - warn_report("NVMe: Unexpected CID in completion queue: %"PRIu32", " - "queue size: %u", cid, NVME_QUEUE_SIZE); + if (cid == 0 || cid > NVME_NUM_REQS) { + warn_report("NVMe: Unexpected CID in completion queue: %" PRIu32 + ", should be within: 1..%u inclusively", cid, + NVME_NUM_REQS); continue; } trace_nvme_complete_command(s, q->index, cid); diff --git a/block/parallels-ext.c b/block/parallels-ext.c index e0dd0975c6f8936228d9df06b33c2a7ced8347a3..b0e1c1aa47c561d02d3671f5822d1f35ec2d855b 100644 --- a/block/parallels-ext.c +++ b/block/parallels-ext.c @@ -260,7 +260,7 @@ static int parallels_parse_format_extension(BlockDriverState *bs, break; default: - error_setg(errp, "Unknown feature: 0x%" PRIu64, fh.magic); + error_setg(errp, "Unknown feature: 0x%" PRIx64, fh.magic); goto fail; } diff --git a/block/parallels.c b/block/parallels.c index 6ebad2a2bbc9129ee184b7a38ce66ef5346e33de..ae3f324bb5cb77822621f476bff4c4486b22b744 100644 --- a/block/parallels.c +++ b/block/parallels.c @@ -240,8 +240,8 @@ static int64_t allocate_clusters(BlockDriverState *bs, int64_t sector_num, return ret; } - ret = bdrv_co_pwritev(bs->file, s->data_end * BDRV_SECTOR_SIZE, - nb_cow_bytes, buf, 0); + ret = bdrv_co_pwrite(bs->file, s->data_end * BDRV_SECTOR_SIZE, + nb_cow_bytes, buf, 0); qemu_vfree(buf); if (ret < 0) { return ret; @@ -735,10 +735,9 @@ static int parallels_open(BlockDriverState *bs, QDict *options, int flags, Error *local_err = NULL; char *buf; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } ret = bdrv_pread(bs->file, 0, &ph, sizeof(ph)); diff --git a/block/preallocate.c b/block/preallocate.c index 1d4233f730082ae61f13b8bb55350ed8abf447d3..332408bdc9d07d225c6ebe7227378892ef7f7ace 100644 --- a/block/preallocate.c +++ b/block/preallocate.c @@ -134,6 +134,7 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags, Error **errp) { BDRVPreallocateState *s = bs->opaque; + int ret; /* * s->data_end and friends should be initialized on permission update. @@ -141,11 +142,9 @@ static int preallocate_open(BlockDriverState *bs, QDict *options, int flags, */ s->file_end = s->zero_start = s->data_end = -EINVAL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } if (!preallocate_absorb_opts(&s->opts, options, bs->file->bs, errp)) { diff --git a/block/qcow.c b/block/qcow.c index c39940f33ebea70b48aa4f7cdc8ffe35084fb48f..544a17261f933806bbbd40c26e6b5f9c055af3b8 100644 --- a/block/qcow.c +++ b/block/qcow.c @@ -120,10 +120,8 @@ static int qcow_open(BlockDriverState *bs, QDict *options, int flags, qdict_extract_subqdict(options, &encryptopts, "encrypt."); encryptfmt = qdict_get_try_str(encryptopts, "format"); - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - ret = -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { goto fail; } diff --git a/block/qcow2.c b/block/qcow2.c index d50901675699ef3806bacb780ee0cf74e946cce5..7b1e870919ab6ea3242e59906fb6cf7a4e76b01e 100644 --- a/block/qcow2.c +++ b/block/qcow2.c @@ -272,6 +272,7 @@ static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset, void *feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature)); ret = bdrv_pread(bs->file, offset , feature_table, ext.len); if (ret < 0) { + g_free(feature_table); error_setg_errno(errp, -ret, "ERROR: ext_feature_table: " "Could not read table"); return ret; @@ -1295,7 +1296,8 @@ static int validate_compression_type(BDRVQcow2State *s, Error **errp) /* Called with s->lock held. */ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, - int flags, Error **errp) + int flags, bool open_data_file, + Error **errp) { ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; @@ -1613,50 +1615,67 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, goto fail; } - /* Open external data file */ - s->data_file = bdrv_open_child(NULL, options, "data-file", bs, - &child_of_bds, BDRV_CHILD_DATA, - true, errp); - if (*errp) { - ret = -EINVAL; - goto fail; - } + if (open_data_file && (flags & BDRV_O_NO_IO)) { + /* + * Don't open the data file for 'qemu-img info' so that it can be used + * to verify that an untrusted qcow2 image doesn't refer to external + * files. + * + * Note: This still makes has_data_file() return true. + */ + if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { + s->data_file = NULL; + } else { + s->data_file = bs->file; + } + qdict_extract_subqdict(options, NULL, "data-file."); + qdict_del(options, "data-file"); + } else if (open_data_file) { + /* Open external data file */ + s->data_file = bdrv_open_child(NULL, options, "data-file", bs, + &child_of_bds, BDRV_CHILD_DATA, + true, errp); + if (*errp) { + ret = -EINVAL; + goto fail; + } - if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { - if (!s->data_file && s->image_data_file) { - s->data_file = bdrv_open_child(s->image_data_file, options, - "data-file", bs, &child_of_bds, - BDRV_CHILD_DATA, false, errp); + if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) { + if (!s->data_file && s->image_data_file) { + s->data_file = bdrv_open_child(s->image_data_file, options, + "data-file", bs, &child_of_bds, + BDRV_CHILD_DATA, false, errp); + if (!s->data_file) { + ret = -EINVAL; + goto fail; + } + } if (!s->data_file) { + error_setg(errp, "'data-file' is required for this image"); ret = -EINVAL; goto fail; } - } - if (!s->data_file) { - error_setg(errp, "'data-file' is required for this image"); - ret = -EINVAL; - goto fail; - } - /* No data here */ - bs->file->role &= ~BDRV_CHILD_DATA; + /* No data here */ + bs->file->role &= ~BDRV_CHILD_DATA; - /* Must succeed because we have given up permissions if anything */ - bdrv_child_refresh_perms(bs, bs->file, &error_abort); - } else { - if (s->data_file) { - error_setg(errp, "'data-file' can only be set for images with an " - "external data file"); - ret = -EINVAL; - goto fail; - } + /* Must succeed because we have given up permissions if anything */ + bdrv_child_refresh_perms(bs, bs->file, &error_abort); + } else { + if (s->data_file) { + error_setg(errp, "'data-file' can only be set for images with " + "an external data file"); + ret = -EINVAL; + goto fail; + } - s->data_file = bs->file; + s->data_file = bs->file; - if (data_file_is_raw(bs)) { - error_setg(errp, "data-file-raw requires a data file"); - ret = -EINVAL; - goto fail; + if (data_file_is_raw(bs)) { + error_setg(errp, "data-file-raw requires a data file"); + ret = -EINVAL; + goto fail; + } } } @@ -1838,7 +1857,7 @@ static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options, fail: g_free(s->image_data_file); - if (has_data_file(bs)) { + if (open_data_file && has_data_file(bs)) { bdrv_unref_child(bs, s->data_file); s->data_file = NULL; } @@ -1875,7 +1894,8 @@ static void coroutine_fn qcow2_open_entry(void *opaque) BDRVQcow2State *s = qoc->bs->opaque; qemu_co_mutex_lock(&s->lock); - qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp); + qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, true, + qoc->errp); qemu_co_mutex_unlock(&s->lock); } @@ -1890,11 +1910,11 @@ static int qcow2_open(BlockDriverState *bs, QDict *options, int flags, .errp = errp, .ret = -EINPROGRESS }; + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } /* Initialise locks */ @@ -2713,7 +2733,7 @@ static int qcow2_inactivate(BlockDriverState *bs) return result; } -static void qcow2_close(BlockDriverState *bs) +static void qcow2_do_close(BlockDriverState *bs, bool close_data_file) { BDRVQcow2State *s = bs->opaque; qemu_vfree(s->l1_table); @@ -2739,7 +2759,7 @@ static void qcow2_close(BlockDriverState *bs) g_free(s->image_backing_file); g_free(s->image_backing_format); - if (has_data_file(bs)) { + if (close_data_file && has_data_file(bs)) { bdrv_unref_child(bs, s->data_file); s->data_file = NULL; } @@ -2748,11 +2768,17 @@ static void qcow2_close(BlockDriverState *bs) qcow2_free_snapshots(bs); } +static void qcow2_close(BlockDriverState *bs) +{ + qcow2_do_close(bs, true); +} + static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, Error **errp) { ERRP_GUARD(); BDRVQcow2State *s = bs->opaque; + BdrvChild *data_file; int flags = s->flags; QCryptoBlock *crypto = NULL; QDict *options; @@ -2766,14 +2792,24 @@ static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs, crypto = s->crypto; s->crypto = NULL; - qcow2_close(bs); + /* + * Do not reopen s->data_file (i.e., have qcow2_do_close() not close it, + * and then prevent qcow2_do_open() from opening it), because this function + * runs in the I/O path and as such we must not invoke global-state + * functions like bdrv_unref_child() and bdrv_open_child(). + */ + qcow2_do_close(bs, false); + + data_file = s->data_file; memset(s, 0, sizeof(BDRVQcow2State)); + s->data_file = data_file; + options = qdict_clone_shallow(bs->options); flags &= ~BDRV_O_INACTIVE; qemu_co_mutex_lock(&s->lock); - ret = qcow2_do_open(bs, options, flags, errp); + ret = qcow2_do_open(bs, options, flags, false, errp); qemu_co_mutex_unlock(&s->lock); qobject_unref(options); if (ret < 0) { diff --git a/block/qed.c b/block/qed.c index 558d3646c4b2b8e2db6de13e11034c167d106e85..e3b06a3d009e9698a4bed08d1532347b78382022 100644 --- a/block/qed.c +++ b/block/qed.c @@ -558,11 +558,11 @@ static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags, .errp = errp, .ret = -EINPROGRESS }; + int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bdrv_qed_init_state(bs); diff --git a/block/raw-format.c b/block/raw-format.c index bda757fd1954764c0b5e31127d4716b67302d8d1..a8185a3a2c584c13485e6a6e7440dbc9e638206d 100644 --- a/block/raw-format.c +++ b/block/raw-format.c @@ -109,7 +109,7 @@ static int raw_apply_options(BlockDriverState *bs, BDRVRawState *s, if (offset > real_size) { error_setg(errp, "Offset (%" PRIu64 ") cannot be greater than " "size of the containing file (%" PRId64 ")", - s->offset, real_size); + offset, real_size); return -EINVAL; } @@ -117,7 +117,7 @@ static int raw_apply_options(BlockDriverState *bs, BDRVRawState *s, error_setg(errp, "The sum of offset (%" PRIu64 ") and size " "(%" PRIu64 ") has to be smaller or equal to the " " actual size of the containing file (%" PRId64 ")", - s->offset, s->size, real_size); + offset, size, real_size); return -EINVAL; } diff --git a/block/rbd.c b/block/rbd.c index def96292e0eb17ab94c19e244652fe0568ac1062..6caf35cbbade2fe00e14a07cbb986e6ba2e537ef 100644 --- a/block/rbd.c +++ b/block/rbd.c @@ -1107,6 +1107,20 @@ static int coroutine_fn qemu_rbd_start_co(BlockDriverState *bs, assert(!qiov || qiov->size == bytes); + if (cmd == RBD_AIO_WRITE || cmd == RBD_AIO_WRITE_ZEROES) { + /* + * RBD APIs don't allow us to write more than actual size, so in order + * to support growing images, we resize the image before write + * operations that exceed the current size. + */ + if (offset + bytes > s->image_size) { + int r = qemu_rbd_resize(bs, offset + bytes); + if (r < 0) { + return r; + } + } + } + r = rbd_aio_create_completion(&task, (rbd_callback_t) qemu_rbd_completion_cb, &c); if (r < 0) { @@ -1182,18 +1196,6 @@ coroutine_fn qemu_rbd_co_pwritev(BlockDriverState *bs, int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) { - BDRVRBDState *s = bs->opaque; - /* - * RBD APIs don't allow us to write more than actual size, so in order - * to support growing images, we resize the image before write - * operations that exceed the current size. - */ - if (offset + bytes > s->image_size) { - int r = qemu_rbd_resize(bs, offset + bytes); - if (r < 0) { - return r; - } - } return qemu_rbd_start_co(bs, offset, bytes, qiov, flags, RBD_AIO_WRITE); } @@ -1279,11 +1281,11 @@ static int qemu_rbd_diff_iterate_cb(uint64_t offs, size_t len, RBDDiffIterateReq *req = opaque; assert(req->offs + req->bytes <= offs); - /* - * we do not diff against a snapshot so we should never receive a callback - * for a hole. - */ - assert(exists); + + /* treat a hole like an unallocated area and bail out */ + if (!exists) { + return 0; + } if (!req->exists && offs > req->offs) { /* @@ -1320,6 +1322,7 @@ static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, int status, r; RBDDiffIterateReq req = { .offs = offset }; uint64_t features, flags; + uint64_t head = 0; assert(offset + bytes <= s->image_size); @@ -1347,7 +1350,43 @@ static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, return status; } - r = rbd_diff_iterate2(s->image, NULL, offset, bytes, true, true, +#if LIBRBD_VERSION_CODE < LIBRBD_VERSION(1, 17, 0) + /* + * librbd had a bug until early 2022 that affected all versions of ceph that + * supported fast-diff. This bug results in reporting of incorrect offsets + * if the offset parameter to rbd_diff_iterate2 is not object aligned. + * Work around this bug by rounding down the offset to object boundaries. + * This is OK because we call rbd_diff_iterate2 with whole_object = true. + * However, this workaround only works for non cloned images with default + * striping. + * + * See: https://tracker.ceph.com/issues/53784 + */ + + /* check if RBD image has non-default striping enabled */ + if (features & RBD_FEATURE_STRIPINGV2) { + return status; + } + +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeprecated-declarations" + /* + * check if RBD image is a clone (= has a parent). + * + * rbd_get_parent_info is deprecated from Nautilus onwards, but the + * replacement rbd_get_parent is not present in Luminous and Mimic. + */ + if (rbd_get_parent_info(s->image, NULL, 0, NULL, 0, NULL, 0) != -ENOENT) { + return status; + } +#pragma GCC diagnostic pop + + head = req.offs & (s->object_size - 1); + req.offs -= head; + bytes += head; +#endif + + r = rbd_diff_iterate2(s->image, NULL, req.offs, bytes, true, true, qemu_rbd_diff_iterate_cb, &req); if (r < 0 && r != QEMU_RBD_EXIT_DIFF_ITERATE2) { return status; @@ -1366,7 +1405,8 @@ static int coroutine_fn qemu_rbd_co_block_status(BlockDriverState *bs, status = BDRV_BLOCK_ZERO | BDRV_BLOCK_OFFSET_VALID; } - *pnum = req.bytes; + assert(req.bytes > head); + *pnum = req.bytes - head; return status; } diff --git a/block/replication.c b/block/replication.c index 55c8f894aa312e2555dda24a38ffed9fcfecad70..2f17397764179ed3d53b58e9fd747cd3d1f7a77c 100644 --- a/block/replication.c +++ b/block/replication.c @@ -88,11 +88,9 @@ static int replication_open(BlockDriverState *bs, QDict *options, const char *mode; const char *top_id; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } ret = -EINVAL; diff --git a/block/snapshot.c b/block/snapshot.c index ccacda8bd59bfa2e19c0707c2542bcc7d63f1c83..f5703aa28b9c67e4b24bbee87b069747ed96e11f 100644 --- a/block/snapshot.c +++ b/block/snapshot.c @@ -200,7 +200,7 @@ static BlockDriverState *bdrv_snapshot_fallback(BlockDriverState *bs) int bdrv_can_snapshot(BlockDriverState *bs) { BlockDriver *drv = bs->drv; - if (!drv || !bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) { + if (!drv || !bdrv_is_inserted(bs) || !bdrv_is_writable(bs)) { return 0; } diff --git a/block/throttle.c b/block/throttle.c index 6e8d52fa24511d6f115803f9ef7ae9cc2697633a..4fb5798c27af543426f867978c18945aaea4d2bf 100644 --- a/block/throttle.c +++ b/block/throttle.c @@ -78,11 +78,9 @@ static int throttle_open(BlockDriverState *bs, QDict *options, char *group; int ret; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY, - false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } bs->supported_write_flags = bs->file->bs->supported_write_flags | BDRV_REQ_WRITE_UNCHANGED; diff --git a/block/vdi.c b/block/vdi.c index bdc58d726ee13a18c7c93e6a1a6bcc35e1f92465..c50c0ed61fde39eddc1c73980453e836dfcb7e96 100644 --- a/block/vdi.c +++ b/block/vdi.c @@ -376,10 +376,9 @@ static int vdi_open(BlockDriverState *bs, QDict *options, int flags, int ret; QemuUUID uuid_link, uuid_parent; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } logout("\n"); diff --git a/block/vhdx.c b/block/vhdx.c index 356ec4c455a42be6e5f68b62c712536e9f2f6310..e7d6d7509a72155919ea633ede1816dc2763b687 100644 --- a/block/vhdx.c +++ b/block/vhdx.c @@ -996,10 +996,9 @@ static int vhdx_open(BlockDriverState *bs, QDict *options, int flags, uint64_t signature; Error *local_err = NULL; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } s->bat = NULL; diff --git a/block/vmdk.c b/block/vmdk.c index 0dfab6e94130246a2dbc8d0646513f90487fbb6d..7d7e56b36c89f16c52a68d45c2918fa1911e9282 100644 --- a/block/vmdk.c +++ b/block/vmdk.c @@ -1262,10 +1262,9 @@ static int vmdk_open(BlockDriverState *bs, QDict *options, int flags, BDRVVmdkState *s = bs->opaque; uint32_t magic; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } buf = vmdk_read_desc(bs->file, 0, errp); diff --git a/block/vpc.c b/block/vpc.c index 297a26262ab1e58d6d42549decda77578319a73c..430cab1cbb9e28c7df46696f6882a6bd825e259c 100644 --- a/block/vpc.c +++ b/block/vpc.c @@ -232,10 +232,9 @@ static int vpc_open(BlockDriverState *bs, QDict *options, int flags, int ret; int64_t bs_size; - bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, - BDRV_CHILD_IMAGE, false, errp); - if (!bs->file) { - return -EINVAL; + ret = bdrv_open_file_child(NULL, options, "file", bs, errp); + if (ret < 0) { + return ret; } opts = qemu_opts_create(&vpc_runtime_opts, NULL, 0, &error_abort); diff --git a/block/vvfat.c b/block/vvfat.c index 5dacc6cfac42decf61c04e15dc07dbc033316130..935a10bdd3e3d731d8d104eeb29bfc9ff018cd13 100644 --- a/block/vvfat.c +++ b/block/vvfat.c @@ -882,7 +882,7 @@ static int read_directory(BDRVVVFATState* s, int mapping_index) return 0; } -static inline uint32_t sector2cluster(BDRVVVFATState* s,off_t sector_num) +static inline int32_t sector2cluster(BDRVVVFATState* s,off_t sector_num) { return (sector_num - s->offset_to_root_dir) / s->sectors_per_cluster; } @@ -1230,6 +1230,7 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, dirname, cyls, heads, secs)); s->sector_count = cyls * heads * secs - s->offset_to_bootsector; + bs->total_sectors = cyls * heads * secs; if (qemu_opt_get_bool(opts, "rw", false)) { if (!bdrv_is_read_only(bs)) { @@ -1250,8 +1251,6 @@ static int vvfat_open(BlockDriverState *bs, QDict *options, int flags, } } - bs->total_sectors = cyls * heads * secs; - if (init_directories(s, dirname, heads, secs, errp)) { ret = -EIO; goto fail; @@ -1368,8 +1367,9 @@ static int open_file(BDRVVVFATState* s,mapping_t* mapping) return -1; vvfat_close_current_file(s); s->current_fd = fd; - s->current_mapping = mapping; } + + s->current_mapping = mapping; return 0; } @@ -2521,8 +2521,9 @@ static int commit_one_file(BDRVVVFATState* s, return -1; } - for (i = s->cluster_size; i < offset; i += s->cluster_size) + for (i = 0; i < offset; i += s->cluster_size) { c = modified_fat_get(s, c); + } fd = qemu_open_old(mapping->path, O_RDWR | O_CREAT | O_BINARY, 0666); if (fd < 0) { @@ -2982,6 +2983,7 @@ static int vvfat_write(BlockDriverState *bs, int64_t sector_num, { BDRVVVFATState *s = bs->opaque; int i, ret; + int first_cluster, last_cluster; DLOG(checkpoint()); @@ -3000,9 +3002,20 @@ DLOG(checkpoint()); if (sector_num < s->offset_to_fat) return -1; - for (i = sector2cluster(s, sector_num); - i <= sector2cluster(s, sector_num + nb_sectors - 1);) { - mapping_t* mapping = find_mapping_for_cluster(s, i); + /* + * Values will be negative for writes to the FAT, which is located before + * the root directory. + */ + first_cluster = sector2cluster(s, sector_num); + last_cluster = sector2cluster(s, sector_num + nb_sectors - 1); + + for (i = first_cluster; i <= last_cluster;) { + mapping_t *mapping = NULL; + + if (i >= 0) { + mapping = find_mapping_for_cluster(s, i); + } + if (mapping) { if (mapping->read_only) { fprintf(stderr, "Tried to write to write-protected file %s\n", @@ -3042,8 +3055,9 @@ DLOG(checkpoint()); } } i = mapping->end; - } else + } else { i++; + } } /* @@ -3057,10 +3071,11 @@ DLOG(fprintf(stderr, "Write to qcow backend: %d + %d\n", (int)sector_num, nb_sec return ret; } - for (i = sector2cluster(s, sector_num); - i <= sector2cluster(s, sector_num + nb_sectors - 1); i++) - if (i >= 0) + for (i = first_cluster; i <= last_cluster; i++) { + if (i >= 0) { s->used_clusters[i] |= USED_ALLOCATED; + } + } DLOG(checkpoint()); /* TODO: add timeout */ @@ -3147,8 +3162,8 @@ static int enable_write_target(BlockDriverState *bs, Error **errp) } opts = qemu_opts_create(bdrv_qcow->create_opts, NULL, 0, &error_abort); - qemu_opt_set_number(opts, BLOCK_OPT_SIZE, s->sector_count * 512, - &error_abort); + qemu_opt_set_number(opts, BLOCK_OPT_SIZE, + bs->total_sectors * BDRV_SECTOR_SIZE, &error_abort); qemu_opt_set(opts, BLOCK_OPT_BACKING_FILE, "fat:", &error_abort); ret = bdrv_create(bdrv_qcow, s->qcow_filename, opts, errp); diff --git a/blockdev-nbd.c b/blockdev-nbd.c index bdfa7ed3a5a9afb819c87fffc416566247f7a448..94e9eddc3c6910b333d6ff80b36d6151c4cda494 100644 --- a/blockdev-nbd.c +++ b/blockdev-nbd.c @@ -21,12 +21,18 @@ #include "io/channel-socket.h" #include "io/net-listener.h" +typedef struct NBDConn { + QIOChannelSocket *cioc; + QLIST_ENTRY(NBDConn) next; +} NBDConn; + typedef struct NBDServerData { QIONetListener *listener; QCryptoTLSCreds *tlscreds; char *tlsauthz; uint32_t max_connections; uint32_t connections; + QLIST_HEAD(, NBDConn) conns; } NBDServerData; static NBDServerData *nbd_server; @@ -46,6 +52,14 @@ bool nbd_server_is_running(void) static void nbd_blockdev_client_closed(NBDClient *client, bool ignored) { + NBDConn *conn = nbd_client_owner(client); + + assert(qemu_in_main_thread() && nbd_server); + + object_unref(OBJECT(conn->cioc)); + QLIST_REMOVE(conn, next); + g_free(conn); + nbd_client_put(client); assert(nbd_server->connections > 0); nbd_server->connections--; @@ -55,31 +69,56 @@ static void nbd_blockdev_client_closed(NBDClient *client, bool ignored) static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc, gpointer opaque) { + NBDConn *conn = g_new0(NBDConn, 1); + + assert(qemu_in_main_thread() && nbd_server); nbd_server->connections++; + object_ref(OBJECT(cioc)); + conn->cioc = cioc; + QLIST_INSERT_HEAD(&nbd_server->conns, conn, next); nbd_update_server_watch(nbd_server); qio_channel_set_name(QIO_CHANNEL(cioc), "nbd-server"); - nbd_client_new(cioc, nbd_server->tlscreds, nbd_server->tlsauthz, - nbd_blockdev_client_closed); + /* TODO - expose handshake timeout as QMP option */ + nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS, + nbd_server->tlscreds, nbd_server->tlsauthz, + nbd_blockdev_client_closed, conn); } static void nbd_update_server_watch(NBDServerData *s) { - if (!s->max_connections || s->connections < s->max_connections) { - qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, NULL); - } else { - qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL); + if (s->listener) { + if (!s->max_connections || s->connections < s->max_connections) { + qio_net_listener_set_client_func(s->listener, nbd_accept, NULL, + NULL); + } else { + qio_net_listener_set_client_func(s->listener, NULL, NULL, NULL); + } } } static void nbd_server_free(NBDServerData *server) { + NBDConn *conn, *tmp; + if (!server) { return; } + /* + * Forcefully close the listener socket, and any clients that have + * not yet disconnected on their own. + */ qio_net_listener_disconnect(server->listener); object_unref(OBJECT(server->listener)); + server->listener = NULL; + QLIST_FOREACH_SAFE(conn, &server->conns, next, tmp) { + qio_channel_shutdown(QIO_CHANNEL(conn->cioc), QIO_CHANNEL_SHUTDOWN_BOTH, + NULL); + } + + AIO_WAIT_WHILE_UNLOCKED(NULL, server->connections > 0); + if (server->tlscreds) { object_unref(OBJECT(server->tlscreds)); } @@ -169,6 +208,10 @@ void nbd_server_start(SocketAddress *addr, const char *tls_creds, void nbd_server_start_options(NbdServerOptions *arg, Error **errp) { + if (!arg->has_max_connections) { + arg->max_connections = NBD_DEFAULT_MAX_CONNECTIONS; + } + nbd_server_start(arg->addr, arg->tls_creds, arg->tls_authz, arg->max_connections, errp); } @@ -181,6 +224,10 @@ void qmp_nbd_server_start(SocketAddressLegacy *addr, { SocketAddress *addr_flat = socket_address_flatten(addr); + if (!has_max_connections) { + max_connections = NBD_DEFAULT_MAX_CONNECTIONS; + } + nbd_server_start(addr_flat, tls_creds, tls_authz, max_connections, errp); qapi_free_SocketAddress(addr_flat); } diff --git a/blockdev.c b/blockdev.c index b35072644eba92fa1291d1c270afcfd52240030e..3ce294ec4a7cb4c0b83dd1171bd8f78cd9732de8 100644 --- a/blockdev.c +++ b/blockdev.c @@ -333,6 +333,8 @@ static int parse_block_error_action(const char *buf, bool is_read, Error **errp) return BLOCKDEV_ON_ERROR_STOP; } else if (!strcmp(buf, "report")) { return BLOCKDEV_ON_ERROR_REPORT; + } else if (!strcmp(buf, "retry")) { + return BLOCKDEV_ON_ERROR_RETRY; } else { error_setg(errp, "'%s' invalid %s error action", buf, is_read ? "read" : "write"); @@ -478,6 +480,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, const char *buf; int bdrv_flags = 0; int on_read_error, on_write_error; + int64_t retry_interval, retry_timeout; bool account_invalid, account_failed; bool writethrough, read_only; BlockBackend *blk; @@ -489,6 +492,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, QDict *interval_dict = NULL; QList *interval_list = NULL; const char *id; + const char *cache; BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; const char *throttling_group = NULL; @@ -552,7 +556,7 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, qdict_put_str(bs_opts, "driver", buf); } - on_write_error = BLOCKDEV_ON_ERROR_ENOSPC; + on_write_error = BLOCKDEV_ON_ERROR_REPORT; if ((buf = qemu_opt_get(opts, "werror")) != NULL) { on_write_error = parse_block_error_action(buf, 0, &error); if (error) { @@ -570,12 +574,31 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, } } + retry_interval = qemu_opt_get_number(opts, "retry_interval", + BLOCK_BACKEND_DEFAULT_RETRY_INTERVAL); + retry_timeout = qemu_opt_get_number(opts, "retry_timeout", 0); + if (snapshot) { bdrv_flags |= BDRV_O_SNAPSHOT; } read_only = qemu_opt_get_bool(opts, BDRV_OPT_READ_ONLY, false); + if (!file || !*file) { + cache = qdict_get_try_str(bs_opts, BDRV_OPT_CACHE_NO_FLUSH); + if (cache && !strcmp(cache, "on")) { + bdrv_flags |= BDRV_O_NO_FLUSH; + } + + cache = qdict_get_try_str(bs_opts, BDRV_OPT_CACHE_DIRECT); + if (cache && !strcmp(cache, "on")) { + bdrv_flags |= BDRV_O_NOCACHE; + } + + qdict_del(bs_opts, BDRV_OPT_CACHE_NO_FLUSH); + qdict_del(bs_opts, BDRV_OPT_CACHE_DIRECT); + } + /* init */ if ((!file || !*file) && !qdict_size(bs_opts)) { BlockBackendRootState *blk_rs; @@ -633,6 +656,11 @@ static BlockBackend *blockdev_init(const char *file, QDict *bs_opts, blk_set_enable_write_cache(blk, !writethrough); blk_set_on_error(blk, on_read_error, on_write_error); + if (on_read_error == BLOCKDEV_ON_ERROR_RETRY || + on_write_error == BLOCKDEV_ON_ERROR_RETRY) { + blk_set_on_error_retry_interval(blk, retry_interval); + blk_set_on_error_retry_timeout(blk, retry_timeout); + } if (!monitor_add_blk(blk, id, errp)) { blk_unref(blk); @@ -759,6 +787,14 @@ QemuOptsList qemu_legacy_drive_opts = { .name = "werror", .type = QEMU_OPT_STRING, .help = "write error action", + },{ + .name = "retry_interval", + .type = QEMU_OPT_NUMBER, + .help = "interval for retry action in millisecond", + },{ + .name = "retry_timeout", + .type = QEMU_OPT_NUMBER, + .help = "timeout for retry action in millisecond", },{ .name = "copy-on-read", .type = QEMU_OPT_BOOL, @@ -781,6 +817,7 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, BlockInterfaceType type; int max_devs, bus_id, unit_id, index; const char *werror, *rerror; + int64_t retry_interval, retry_timeout; bool read_only = false; bool copy_on_read; const char *filename; @@ -988,6 +1025,29 @@ DriveInfo *drive_new(QemuOpts *all_opts, BlockInterfaceType block_default_type, qdict_put_str(bs_opts, "rerror", rerror); } + if (qemu_opt_find(legacy_opts, "retry_interval")) { + if ((werror == NULL || strcmp(werror, "retry")) && + (rerror == NULL || strcmp(rerror, "retry"))) { + error_setg(errp, "retry_interval is only supported " + "by werror/rerror=retry"); + goto fail; + } + retry_interval = qemu_opt_get_number(legacy_opts, "retry_interval", + BLOCK_BACKEND_DEFAULT_RETRY_INTERVAL); + qdict_put_int(bs_opts, "retry_interval", retry_interval); + } + + if (qemu_opt_find(legacy_opts, "retry_timeout")) { + if ((werror == NULL || strcmp(werror, "retry")) && + (rerror == NULL || strcmp(rerror, "retry"))) { + error_setg(errp, "retry_timeout is only supported " + "by werror/rerror=retry"); + goto fail; + } + retry_timeout = qemu_opt_get_number(legacy_opts, "retry_timeout", 0); + qdict_put_int(bs_opts, "retry_timeout", retry_timeout); + } + /* Actual block device init: Functionality shared with blockdev-add */ blk = blockdev_init(filename, bs_opts, errp); bs_opts = NULL; @@ -3804,6 +3864,14 @@ QemuOptsList qemu_common_drive_opts = { .name = "werror", .type = QEMU_OPT_STRING, .help = "write error action", + },{ + .name = "retry_interval", + .type = QEMU_OPT_NUMBER, + .help = "interval for retry action in millisecond", + },{ + .name = "retry_timeout", + .type = QEMU_OPT_NUMBER, + .help = "timeout for retry action in millisecond", },{ .name = BDRV_OPT_READ_ONLY, .type = QEMU_OPT_BOOL, diff --git a/chardev/baum.c b/chardev/baum.c index 79d618e350450f43786353fe5d956a1e3e5e1c45..6a210ffd815677ae1abc3fb942c5dc58807312c5 100644 --- a/chardev/baum.c +++ b/chardev/baum.c @@ -87,6 +87,9 @@ #define BUF_SIZE 256 +#define X_MAX 84 +#define Y_MAX 1 + struct BaumChardev { Chardev parent; @@ -244,11 +247,11 @@ static int baum_deferred_init(BaumChardev *baum) brlapi_perror("baum: brlapi__getDisplaySize"); return 0; } - if (baum->y > 1) { - baum->y = 1; + if (baum->y > Y_MAX) { + baum->y = Y_MAX; } - if (baum->x > 84) { - baum->x = 84; + if (baum->x > X_MAX) { + baum->x = X_MAX; } con = qemu_console_lookup_by_index(0); @@ -380,9 +383,9 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len) switch (req) { case BAUM_REQ_DisplayData: { - uint8_t cells[baum->x * baum->y], c; - uint8_t text[baum->x * baum->y]; - uint8_t zero[baum->x * baum->y]; + uint8_t cells[X_MAX * Y_MAX], c; + uint8_t text[X_MAX * Y_MAX]; + uint8_t zero[X_MAX * Y_MAX]; int cursor = BRLAPI_CURSOR_OFF; int i; @@ -405,7 +408,7 @@ static int baum_eat_packet(BaumChardev *baum, const uint8_t *buf, int len) } timer_del(baum->cellCount_timer); - memset(zero, 0, sizeof(zero)); + memset(zero, 0, baum->x * baum->y); brlapi_writeArguments_t wa = { .displayNumber = BRLAPI_DISPLAY_DEFAULT, diff --git a/chardev/char-socket.c b/chardev/char-socket.c index 836cfa0bc21be39ea1bcc6ab4a48eafbd2ef0bdf..278190dd9332bf9ead255dd45fccc169b4ee4619 100644 --- a/chardev/char-socket.c +++ b/chardev/char-socket.c @@ -393,6 +393,28 @@ static GSource *tcp_chr_add_watch(Chardev *chr, GIOCondition cond) return qio_channel_create_watch(s->ioc, cond); } +static void tcp_chr_set_reconnect_time(Chardev *chr, + int64_t reconnect_time) +{ + SocketChardev *s = SOCKET_CHARDEV(chr); + s->reconnect_time = reconnect_time; +} + +void qemu_chr_set_reconnect_time(Chardev *chr, int64_t reconnect_time) +{ + ChardevClass *cc = CHARDEV_GET_CLASS(chr); + SocketChardev *s = SOCKET_CHARDEV(chr); + + /* if sock dev is listen, dont set reconnect time */ + if (s->is_listen) { + return; + } + + if (cc->chr_set_reconnect_time) { + cc->chr_set_reconnect_time(chr, reconnect_time); + } +} + static void remove_hup_source(SocketChardev *s) { if (s->hup_source != NULL) { @@ -591,7 +613,7 @@ static int tcp_chr_sync_read(Chardev *chr, const uint8_t *buf, int len) if (s->state != TCP_CHARDEV_STATE_DISCONNECTED) { qio_channel_set_blocking(s->ioc, false, NULL); } - if (size == 0) { + if (size == 0 && chr->chr_for_flag != CHR_FOR_VHOST_USER) { /* connection closed */ tcp_chr_disconnect(chr); } @@ -610,12 +632,10 @@ static char *qemu_chr_compute_filename(SocketChardev *s) const char *left = "", *right = ""; switch (ss->ss_family) { -#ifndef _WIN32 case AF_UNIX: return g_strdup_printf("unix:%s%s", ((struct sockaddr_un *)(ss))->sun_path, s->is_listen ? ",server=on" : ""); -#endif case AF_INET6: left = "["; right = "]"; @@ -797,8 +817,12 @@ static void tcp_chr_websock_handshake(QIOTask *task, gpointer user_data) { Chardev *chr = user_data; SocketChardev *s = user_data; + Error *err = NULL; - if (qio_task_propagate_error(task, NULL)) { + if (qio_task_propagate_error(task, &err)) { + error_reportf_err(err, + "websock handshake of character device %s failed: ", + chr->label); tcp_chr_disconnect(chr); } else { if (s->do_telnetopt) { @@ -833,8 +857,12 @@ static void tcp_chr_tls_handshake(QIOTask *task, { Chardev *chr = user_data; SocketChardev *s = user_data; + Error *err = NULL; - if (qio_task_propagate_error(task, NULL)) { + if (qio_task_propagate_error(task, &err)) { + error_reportf_err(err, + "TLS handshake of character device %s failed: ", + chr->label); tcp_chr_disconnect(chr); } else { if (s->is_websock) { @@ -1120,6 +1148,7 @@ static void char_socket_finalize(Object *obj) qio_net_listener_set_client_func_full(s->listener, NULL, NULL, NULL, chr->gcontext); object_unref(OBJECT(s->listener)); + s->listener = NULL; } if (s->tls_creds) { object_unref(OBJECT(s->tls_creds)); @@ -1419,10 +1448,12 @@ static void qmp_chardev_open_socket(Chardev *chr, } qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_RECONNECTABLE); +#ifndef _WIN32 /* TODO SOCKET_ADDRESS_FD where fd has AF_UNIX */ if (addr->type == SOCKET_ADDRESS_TYPE_UNIX) { qemu_chr_set_feature(chr, QEMU_CHAR_FEATURE_FD_PASS); } +#endif /* * In the chardev-change special-case, we shouldn't register a new yank @@ -1585,6 +1616,7 @@ static void char_socket_class_init(ObjectClass *oc, void *data) cc->set_msgfds = tcp_set_msgfds; cc->chr_add_client = tcp_chr_add_client; cc->chr_add_watch = tcp_chr_add_watch; + cc->chr_set_reconnect_time = tcp_chr_set_reconnect_time; cc->chr_update_read_handler = tcp_chr_update_read_handler; object_class_property_add(oc, "addr", "SocketAddress", diff --git a/chardev/char-stdio.c b/chardev/char-stdio.c index 403da308c980ebf24eaacb61cc2e77c6ffb5ad03..69bedca7f51b17813ace6221717bfa8997f38260 100644 --- a/chardev/char-stdio.c +++ b/chardev/char-stdio.c @@ -41,6 +41,7 @@ /* init terminal so that we can grab keys */ static struct termios oldtty; static int old_fd0_flags; +static int old_fd1_flags; static bool stdio_in_use; static bool stdio_allow_signal; static bool stdio_echo_state; @@ -50,6 +51,8 @@ static void term_exit(void) if (stdio_in_use) { tcsetattr(0, TCSANOW, &oldtty); fcntl(0, F_SETFL, old_fd0_flags); + fcntl(1, F_SETFL, old_fd1_flags); + stdio_in_use = false; } } @@ -102,6 +105,7 @@ static void qemu_chr_open_stdio(Chardev *chr, stdio_in_use = true; old_fd0_flags = fcntl(0, F_GETFL); + old_fd1_flags = fcntl(1, F_GETFL); tcgetattr(0, &oldtty); qemu_set_nonblock(0); atexit(term_exit); diff --git a/chardev/char.c b/chardev/char.c index 0169d8dde4b533c9cf851831b03c8adcac24cff5..b76bbe8019309d56c87820d3ff68ea6e4aac68ca 100644 --- a/chardev/char.c +++ b/chardev/char.c @@ -320,7 +320,7 @@ static bool qemu_chr_is_busy(Chardev *s) { if (CHARDEV_IS_MUX(s)) { MuxChardev *d = MUX_CHARDEV(s); - return d->mux_cnt >= 0; + return d->mux_cnt > 0; } else { return s->be != NULL; } @@ -519,7 +519,7 @@ static const ChardevClass *char_get_class(const char *driver, Error **errp) if (object_class_is_abstract(oc)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "driver", - "an abstract device type"); + "a non-abstract device type"); return NULL; } diff --git a/chardev/msmouse.c b/chardev/msmouse.c index eb9231dcdb988fa9c0ca2d61f2ae5ebfd932ab43..2cc1b16561dbb18954940cefa2e53a5ed8794c8d 100644 --- a/chardev/msmouse.c +++ b/chardev/msmouse.c @@ -146,7 +146,9 @@ static void char_msmouse_finalize(Object *obj) { MouseChardev *mouse = MOUSE_CHARDEV(obj); - qemu_input_handler_unregister(mouse->hs); + if (mouse->hs) { + qemu_input_handler_unregister(mouse->hs); + } } static QemuInputHandler msmouse_handler = { diff --git a/chardev/wctablet.c b/chardev/wctablet.c index e8b292c43ca782e9b9e7ce3a1f338733ac3fe725..43bdf6b608350e0cd3fd0595befe3bf3226f6023 100644 --- a/chardev/wctablet.c +++ b/chardev/wctablet.c @@ -319,7 +319,9 @@ static void wctablet_chr_finalize(Object *obj) { TabletChardev *tablet = WCTABLET_CHARDEV(obj); - qemu_input_handler_unregister(tablet->hs); + if (tablet->hs) { + qemu_input_handler_unregister(tablet->hs); + } } static void wctablet_chr_open(Chardev *chr, diff --git a/configs/devices/aarch64-softmmu/default.mak b/configs/devices/aarch64-softmmu/default.mak index cf43ac8da11658504d9637dca13c1f8e0ce97dc7..c7a710a0f123b3d5361726b5573a2ca9e4d9f3cc 100644 --- a/configs/devices/aarch64-softmmu/default.mak +++ b/configs/devices/aarch64-softmmu/default.mak @@ -6,3 +6,4 @@ include ../arm-softmmu/default.mak CONFIG_XLNX_ZYNQMP_ARM=y CONFIG_XLNX_VERSAL=y CONFIG_SBSA_REF=y +CONFIG_CPUFREQ=y diff --git a/configs/devices/loongarch64-softmmu/default.mak b/configs/devices/loongarch64-softmmu/default.mak new file mode 100644 index 0000000000000000000000000000000000000000..5f72bbfb89d9f685233d15b900e7faa41b7022a6 --- /dev/null +++ b/configs/devices/loongarch64-softmmu/default.mak @@ -0,0 +1,157 @@ +# Default configuration for loongarch-softmmu + +CONFIG_PCI=y +CONFIG_ACPI_PCI=y +# For now, CONFIG_IDE_CORE requires ISA, so we enable it here +CONFIG_ISA_BUS=y +CONFIG_VIRTIO_PCI=y + +CONFIG_VGA_PCI=y +CONFIG_ACPI_SMBUS=y +CONFIG_VHOST_USER_SCSI=y +CONFIG_VHOST_USER_BLK=y +CONFIG_VIRTIO=y +CONFIG_VIRTIO_BALLOON=y +CONFIG_VIRTIO_BLK=y +CONFIG_VIRTIO_CRYPTO=y +CONFIG_VIRTIO_GPU=y +CONFIG_VIRTIO_INPUT=y +CONFIG_VIRTIO_NET=y +CONFIG_VIRTIO_RNG=y +CONFIG_SCSI=y +CONFIG_VIRTIO_SCSI=y +CONFIG_VIRTIO_SERIAL=y + +CONFIG_USB_UHCI=y +CONFIG_USB_OHCI=y +CONFIG_USB_OHCI_PCI=y +CONFIG_USB_XHCI=y +CONFIG_USB_XHCI_NEC=y +CONFIG_NE2000_PCI=y +CONFIG_EEPRO100_PCI=y +CONFIG_PCNET_PCI=y +CONFIG_PCNET_COMMON=y +CONFIG_AC97=y +CONFIG_HDA=y +CONFIG_ES1370=y +CONFIG_SCSI=y +CONFIG_LSI_SCSI_PCI=y +CONFIG_VMW_PVSCSI_SCSI_PCI=y +CONFIG_MEGASAS_SCSI_PCI=y +CONFIG_MPTSAS_SCSI_PCI=y +CONFIG_RTL8139_PCI=y +CONFIG_E1000_PCI=y +CONFIG_IDE_CORE=y +CONFIG_IDE_QDEV=y +CONFIG_IDE_PCI=y +CONFIG_AHCI=y +CONFIG_AHCI_ICH9=y +CONFIG_ESP=y +CONFIG_ESP_PCI=y +CONFIG_SERIAL=y +CONFIG_SERIAL_ISA=y +CONFIG_SERIAL_PCI=y +CONFIG_CAN_BUS=y +CONFIG_CAN_SJA1000=y +CONFIG_CAN_PCI=y +CONFIG_USB_UHCI=y +CONFIG_USB_OHCI=y +CONFIG_USB_XHCI=y +CONFIG_USB_XHCI_NEC=y +CONFIG_NE2000_PCI=y +CONFIG_EEPRO100_PCI=y +CONFIG_PCNET_PCI=y +CONFIG_PCNET_COMMON=y +CONFIG_AC97=y +CONFIG_HDA=y +CONFIG_ES1370=y +CONFIG_SCSI=y +CONFIG_LSI_SCSI_PCI=y +CONFIG_VMW_PVSCSI_SCSI_PCI=y +CONFIG_MEGASAS_SCSI_PCI=y +CONFIG_MPTSAS_SCSI_PCI=y +CONFIG_RTL8139_PCI=y +CONFIG_E1000_PCI=y +CONFIG_IDE_CORE=y +CONFIG_IDE_QDEV=y +CONFIG_IDE_PCI=y +CONFIG_AHCI=y +CONFIG_ESP=y +CONFIG_ESP_PCI=y +CONFIG_SERIAL=y +CONFIG_SERIAL_ISA=y +CONFIG_SERIAL_PCI=y +CONFIG_CAN_BUS=y +CONFIG_CAN_SJA1000=y +CONFIG_CAN_PCI=y + +CONFIG_SPICE=y +CONFIG_QXL=y +CONFIG_ESP=y +CONFIG_SCSI=y +CONFIG_VGA_ISA=y +CONFIG_VGA_ISA_MM=y +CONFIG_VGA_CIRRUS=y +CONFIG_VMWARE_VGA=y +CONFIG_SERIAL=y +CONFIG_SERIAL_ISA=y +CONFIG_PARALLEL=y +CONFIG_I8254=y +CONFIG_PCSPK=y +CONFIG_PCKBD=y +CONFIG_FDC=y +CONFIG_ACPI=y +CONFIG_ACPI_MEMORY_HOTPLUG=y +CONFIG_ACPI_NVDIMM=y +CONFIG_ACPI_CPU_HOTPLUG=y +CONFIG_APM=y +CONFIG_I8257=y +CONFIG_PIIX4=y +CONFIG_IDE_ISA=y +CONFIG_IDE_PIIX=y +CONFIG_MIPSNET=y +CONFIG_PFLASH_CFI01=y +CONFIG_I8259=y +CONFIG_MC146818RTC=y +CONFIG_ISA_TESTDEV=y +CONFIG_EMPTY_SLOT=y +CONFIG_I2C=y +CONFIG_DIMM=y +CONFIG_MEM_DEVICE=y + +# Arch Specified CONFIG defines +CONFIG_IDE_VIA=y +CONFIG_VT82C686=y +CONFIG_RC4030=y +CONFIG_DP8393X=y +CONFIG_DS1225Y=y +CONFIG_FITLOADER=y +CONFIG_SMBIOS=y + +CONFIG_PCIE_PORT=y +CONFIG_I82801B11=y +CONFIG_XIO3130=y +CONFIG_PCI_EXPRESS=y +CONFIG_MSI_NONBROKEN=y +CONFIG_IOH3420=y +CONFIG_SD=y +CONFIG_SDHCI=y +CONFIG_VIRTFS=y +CONFIG_VIRTIO_9P=y +CONFIG_USB_EHCI=y +CONFIG_USB_EHCI_PCI=y +CONFIG_USB_EHCI_SYSBUS=y +CONFIG_USB_STORAGE_BOT=y +CONFIG_TPM_EMULATOR=y +CONFIG_TPM_TIS=y +CONFIG_PLATFORM_BUS=y +CONFIG_TPM_TIS_SYSBUS=y +CONFIG_ACPI_LOONGARCH=y +CONFIG_LS7A_RTC=y + +#vfio config +CONFIG_VFIO=y +CONFIG_VFIO_PCI=y +CONFIG_VFIO_PLATFORM=y +CONFIG_VFIO_XGMAC=y +CONFIG_VFIO_AMD_XGBE=y diff --git a/configs/devices/sh4-softmmu/default.mak b/configs/devices/sh4-softmmu/default.mak index 565e8b0b5df697ed047fe4c577d8431743b28f07..5e86578647c0325f012c5c7a7d409385a1fcd3d3 100644 --- a/configs/devices/sh4-softmmu/default.mak +++ b/configs/devices/sh4-softmmu/default.mak @@ -1,4 +1,4 @@ -# Default configuration for sh4eb-softmmu +# Default configuration for sh4-softmmu # Uncomment the following lines to disable these optional devices: # diff --git a/configs/devices/sw64-softmmu/default.mak b/configs/devices/sw64-softmmu/default.mak new file mode 100644 index 0000000000000000000000000000000000000000..0b4d56b43e42c2afc75fd5662ce4a42b2ce1d604 --- /dev/null +++ b/configs/devices/sw64-softmmu/default.mak @@ -0,0 +1,10 @@ +# Default configuration for sw64-softmmu + +# Uncomment the following lines to disable these optional devices: +# +#CONFIG_PCI_DEVICES=n +#CONFIG_TEST_DEVICES=n + +# Boards: +# +CONFIG_CORE3=y diff --git a/configs/targets/loongarch64-softmmu.mak b/configs/targets/loongarch64-softmmu.mak new file mode 100644 index 0000000000000000000000000000000000000000..c42dfbbd9cf4ead4da4f19f7544fb43993f1baee --- /dev/null +++ b/configs/targets/loongarch64-softmmu.mak @@ -0,0 +1,3 @@ +TARGET_ARCH=loongarch64 +TARGET_SUPPORTS_MTTCG=y +TARGET_XML_FILES= gdb-xml/loongarch-base64.xml gdb-xml/loongarch-fpu.xml diff --git a/configs/targets/sw64-linux-user.mak b/configs/targets/sw64-linux-user.mak new file mode 100644 index 0000000000000000000000000000000000000000..ae00665692470c2cf4352ec92e66785fa4b2c324 --- /dev/null +++ b/configs/targets/sw64-linux-user.mak @@ -0,0 +1,5 @@ +TARGET_ARCH=sw64 +TARGET_SYSTBL_ABI=common +TARGET_SYSTBL=syscall.tbl +TARGET_ALIGNED_ONLY=y +TARGET_XML_FILES= gdb-xml/sw64-core.xml diff --git a/configs/targets/sw64-softmmu.mak b/configs/targets/sw64-softmmu.mak new file mode 100644 index 0000000000000000000000000000000000000000..9cf002df8ca4568dadf69cc1ff5130c1872a7b3b --- /dev/null +++ b/configs/targets/sw64-softmmu.mak @@ -0,0 +1,9 @@ +# Default configuration for sw64-softmmu + +# Boards: +# +TARGET_ARCH=sw64 +TARGET_BASE_ARCH=sw64 +TARGET_ABI_DIR=sw64 +TARGET_SUPPORTS_MTTCG=y +TARGET_XML_FILES= gdb-xml/sw64-core.xml diff --git a/configure b/configure index 48c21775f3a90c91631d90bd6e3ec6b060215465..7d508b2e72c2b9d19db0611cb11c6a3d41c2c522 100755 --- a/configure +++ b/configure @@ -57,7 +57,7 @@ GNUmakefile: ; EOF cd build - exec $source_path/configure "$@" + exec "$source_path/configure" "$@" fi # Temporary directory used for files created while @@ -67,8 +67,7 @@ fi # it when configure exits.) TMPDIR1="config-temp" rm -rf "${TMPDIR1}" -mkdir -p "${TMPDIR1}" -if [ $? -ne 0 ]; then +if ! mkdir -p "${TMPDIR1}"; then echo "ERROR: failed to create temporary directory" exit 1 fi @@ -330,8 +329,6 @@ qom_cast_debug="yes" trace_backends="log" trace_file="trace" opengl="$default_feature" -cpuid_h="no" -avx2_opt="$default_feature" guest_agent="$default_feature" guest_agent_with_vss="no" guest_agent_ntddscsi="no" @@ -361,7 +358,6 @@ plugins="$default_feature" rng_none="no" secret_keyring="$default_feature" meson="" -meson_args="" ninja="" gio="$default_feature" skip_meson=no @@ -581,6 +577,8 @@ elif check_define __arm__ ; then cpu="arm" elif check_define __aarch64__ ; then cpu="aarch64" +elif check_define __loongarch__ ; then + cpu="loongarch64" else cpu=$(uname -m) fi @@ -606,12 +604,18 @@ case "$cpu" in aarch64) cpu="aarch64" ;; + loongarch64) + cpu="loongarch64" + ;; mips*) cpu="mips" ;; sparc|sun4[cdmuv]) cpu="sparc" ;; + sw_64) + cpu="sw64" + ;; *) # This will result in either an error or falling back to TCI later ARCH=unknown @@ -626,7 +630,6 @@ fi case $targetos in MINGW32*) mingw32="yes" - supported_os="yes" plugins="no" pie="no" ;; @@ -668,7 +671,6 @@ SunOS) QEMU_CFLAGS="-D__EXTENSIONS__ $QEMU_CFLAGS" ;; Haiku) - haiku="yes" pie="no" QEMU_CFLAGS="-DB_USE_POSITIVE_POSIX_ERRORS -D_BSD_SOURCE -fPIC $QEMU_CFLAGS" ;; @@ -726,7 +728,7 @@ fi werror="" -. $source_path/scripts/meson-buildoptions.sh +. "$source_path/scripts/meson-buildoptions.sh" meson_options= meson_option_parse() { @@ -743,7 +745,7 @@ for opt do case "$opt" in --help|-h) show_help=yes ;; - --version|-V) exec cat $source_path/VERSION + --version|-V) exec cat "$source_path/VERSION" ;; --prefix=*) prefix="$optarg" ;; @@ -1047,14 +1049,6 @@ for opt do ;; --disable-tools) want_tools="no" ;; - --disable-avx2) avx2_opt="no" - ;; - --enable-avx2) avx2_opt="yes" - ;; - --disable-avx512f) avx512f_opt="no" - ;; - --enable-avx512f) avx512f_opt="yes" - ;; --disable-virtio-blk-data-plane|--enable-virtio-blk-data-plane) echo "$0: $opt is obsolete, virtio-blk data-plane is always on" >&2 ;; @@ -1450,8 +1444,6 @@ cat << EOF tpm TPM support libssh ssh block device support numa libnuma support - avx2 AVX2 optimization support - avx512f AVX512F optimization support replication replication support opengl opengl support xfsctl xfsctl support @@ -1477,7 +1469,7 @@ exit 0 fi # Remove old dependency files to make sure that they get properly regenerated -rm -f */config-devices.mak.d +rm -f ./*/config-devices.mak.d if test -z "$python" then @@ -1495,16 +1487,13 @@ if ! $python -c 'import sys; sys.exit(sys.version_info < (3,6))'; then "Use --python=/path/to/python to specify a supported Python." fi -# Preserve python version since some functionality is dependent on it -python_version=$($python -c 'import sys; print("%d.%d.%d" % (sys.version_info[0], sys.version_info[1], sys.version_info[2]))' 2>/dev/null) - # Suppress writing compiled files python="$python -B" if test -z "$meson"; then if test "$explicit_python" = no && has meson && version_ge "$(meson --version)" 0.59.3; then meson=meson - elif test $git_submodules_action != 'ignore' ; then + elif test "$git_submodules_action" != 'ignore' ; then meson=git elif test -e "${source_path}/meson/meson.py" ; then meson=internal @@ -2890,85 +2879,6 @@ else # "$safe_stack" = "" fi fi -######################################## -# check if cpuid.h is usable. - -cat > $TMPC << EOF -#include -int main(void) { - unsigned a, b, c, d; - int max = __get_cpuid_max(0, 0); - - if (max >= 1) { - __cpuid(1, a, b, c, d); - } - - if (max >= 7) { - __cpuid_count(7, 0, a, b, c, d); - } - - return 0; -} -EOF -if compile_prog "" "" ; then - cpuid_h=yes -fi - -########################################## -# avx2 optimization requirement check -# -# There is no point enabling this if cpuid.h is not usable, -# since we won't be able to select the new routines. - -if test "$cpuid_h" = "yes" && test "$avx2_opt" != "no"; then - cat > $TMPC << EOF -#pragma GCC push_options -#pragma GCC target("avx2") -#include -#include -static int bar(void *a) { - __m256i x = *(__m256i *)a; - return _mm256_testz_si256(x, x); -} -int main(int argc, char *argv[]) { return bar(argv[0]); } -EOF - if compile_object "-Werror" ; then - avx2_opt="yes" - else - avx2_opt="no" - fi -fi - -########################################## -# avx512f optimization requirement check -# -# There is no point enabling this if cpuid.h is not usable, -# since we won't be able to select the new routines. -# by default, it is turned off. -# if user explicitly want to enable it, check environment - -if test "$cpuid_h" = "yes" && test "$avx512f_opt" = "yes"; then - cat > $TMPC << EOF -#pragma GCC push_options -#pragma GCC target("avx512f") -#include -#include -static int bar(void *a) { - __m512i x = *(__m512i *)a; - return _mm512_test_epi64_mask(x, x); -} -int main(int argc, char *argv[]) -{ - return bar(argv[0]); -} -EOF - if ! compile_object "-Werror" ; then - avx512f_opt="no" - fi -else - avx512f_opt="no" -fi - ######################################## # check if __[u]int128_t is usable. @@ -3268,6 +3178,10 @@ alpha) # Ensure there's only a single GP QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS" ;; +sw*) + # Ensure there's only a single GP + QEMU_CFLAGS="-msmall-data $QEMU_CFLAGS" +;; esac if test "$gprof" = "yes" ; then @@ -3349,7 +3263,7 @@ if test "$QEMU_GA_DISTRO" = ""; then QEMU_GA_DISTRO=Linux fi if test "$QEMU_GA_VERSION" = ""; then - QEMU_GA_VERSION=$(cat $source_path/VERSION) + QEMU_GA_VERSION=$(cat "$source_path"/VERSION) fi QEMU_GA_MSI_MINGW_DLL_PATH="$($pkg_config --variable=prefix glib-2.0)/bin" @@ -3421,7 +3335,7 @@ else cxx= fi -if !(GIT="$git" "$source_path/scripts/git-submodule.sh" "$git_submodules_action" "$git_submodules"); then +if ! (GIT="$git" "$source_path/scripts/git-submodule.sh" "$git_submodules_action" "$git_submodules"); then exit 1 fi @@ -3472,9 +3386,6 @@ fi if test "$solaris" = "yes" ; then echo "CONFIG_SOLARIS=y" >> $config_host_mak fi -if test "$haiku" = "yes" ; then - echo "CONFIG_HAIKU=y" >> $config_host_mak -fi if test "$static" = "yes" ; then echo "CONFIG_STATIC=y" >> $config_host_mak fi @@ -3580,14 +3491,6 @@ if test "$opengl" = "yes" ; then echo "OPENGL_LIBS=$opengl_libs" >> $config_host_mak fi -if test "$avx2_opt" = "yes" ; then - echo "CONFIG_AVX2_OPT=y" >> $config_host_mak -fi - -if test "$avx512f_opt" = "yes" ; then - echo "CONFIG_AVX512F_OPT=y" >> $config_host_mak -fi - # XXX: suppress that if [ "$bsd" = "yes" ] ; then echo "CONFIG_BSD=y" >> $config_host_mak @@ -3620,10 +3523,6 @@ if test "$have_tsan" = "yes" && test "$have_tsan_iface_fiber" = "yes" ; then echo "CONFIG_TSAN=y" >> $config_host_mak fi -if test "$cpuid_h" = "yes" ; then - echo "CONFIG_CPUID_H=y" >> $config_host_mak -fi - if test "$int128" = "yes" ; then echo "CONFIG_INT128=y" >> $config_host_mak fi @@ -3786,8 +3685,8 @@ fi for target in $target_list; do target_dir="$target" - target_name=$(echo $target | cut -d '-' -f 1) - mkdir -p $target_dir + target_name=$(echo $target | cut -d '-' -f 1)$EXESUF + mkdir -p "$target_dir" case $target in *-user) symlink "../qemu-$target_name" "$target_dir/qemu-$target_name" ;; *) symlink "../qemu-system-$target_name" "$target_dir/qemu-system-$target_name" ;; @@ -3854,7 +3753,9 @@ for bios_file in \ $source_path/pc-bios/u-boot.* \ $source_path/pc-bios/edk2-*.fd.bz2 \ $source_path/pc-bios/palcode-* \ - $source_path/pc-bios/qemu_vga.ndrv + $source_path/pc-bios/qemu_vga.ndrv \ + $source_path/pc-bios/core* \ + $source_path/pc-bios/uefi-bios-sw do LINKS="$LINKS pc-bios/$(basename $bios_file)" diff --git a/contrib/elf2dmp/main.c b/contrib/elf2dmp/main.c index 20b477d582a49eb7b8f8a9db6078f12e8a325629..3f0d1eb7091dc605a56f753786cc66ad9928578c 100644 --- a/contrib/elf2dmp/main.c +++ b/contrib/elf2dmp/main.c @@ -125,6 +125,7 @@ static KDDEBUGGER_DATA64 *get_kdbg(uint64_t KernBase, struct pdb_reader *pdb, if (va_space_rw(vs, KdDebuggerDataBlock, kdbg, kdbg_hdr.Size, 0)) { eprintf("Failed to extract entire KDBG\n"); + free(kdbg); return NULL; } diff --git a/contrib/vhost-user-blk/vhost-user-blk.c b/contrib/vhost-user-blk/vhost-user-blk.c index d14b2896bf00ecc819c9eeb3c1dbff5d9cd75595..91c4462659c68c0d6dc587ee15b9ffe7e0a354ff 100644 --- a/contrib/vhost-user-blk/vhost-user-blk.c +++ b/contrib/vhost-user-blk/vhost-user-blk.c @@ -106,10 +106,7 @@ static void vub_req_complete(VubReq *req) req->size + 1); vu_queue_notify(vu_dev, req->vq); - if (req->elem) { - free(req->elem); - } - + g_free(req->elem); g_free(req); } @@ -243,7 +240,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk, /* refer to hw/block/virtio_blk.c */ if (elem->out_num < 1 || elem->in_num < 1) { fprintf(stderr, "virtio-blk request missing headers\n"); - free(elem); + g_free(elem); return -1; } @@ -325,7 +322,7 @@ static int vub_virtio_process_req(VubDev *vdev_blk, return 0; err: - free(elem); + g_free(elem); g_free(req); return -1; } diff --git a/cpus-common.c b/cpus-common.c index 6e73d3e58dac6b6f4e117d56f5020d8496f17439..31c6415f37c64ef62d4e9a9d41be6b0263e5716e 100644 --- a/cpus-common.c +++ b/cpus-common.c @@ -73,6 +73,12 @@ static int cpu_get_free_index(void) } CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus); +static unsigned int cpu_list_generation_id; + +unsigned int cpu_list_generation_id_get(void) +{ + return cpu_list_generation_id; +} void cpu_list_add(CPUState *cpu) { @@ -84,6 +90,7 @@ void cpu_list_add(CPUState *cpu) assert(!cpu_index_auto_assigned); } QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node); + cpu_list_generation_id++; } void cpu_list_remove(CPUState *cpu) @@ -96,6 +103,7 @@ void cpu_list_remove(CPUState *cpu) QTAILQ_REMOVE_RCU(&cpus, cpu, node); cpu->cpu_index = UNASSIGNED_CPU_INDEX; + cpu_list_generation_id++; } CPUState *qemu_get_cpu(int index) diff --git a/crypto/block-luks.c b/crypto/block-luks.c index fe8f04ffb294d80ce55957cab72f2dd14e0d496d..a14eaeba4f799bba809b7abe2152b039b9f1b34b 100644 --- a/crypto/block-luks.c +++ b/crypto/block-luks.c @@ -33,6 +33,7 @@ #include "qemu/coroutine.h" #include "qemu/bitmap.h" +#include "qemu/range.h" /* * Reference for the LUKS format implemented here is @@ -126,12 +127,23 @@ qcrypto_block_luks_cipher_size_map_twofish[] = { { 0, 0 }, }; +#ifdef CONFIG_CRYPTO_SM4 +static const QCryptoBlockLUKSCipherSizeMap +qcrypto_block_luks_cipher_size_map_sm4[] = { + { 16, QCRYPTO_CIPHER_ALG_SM4}, + { 0, 0 }, +}; +#endif + static const QCryptoBlockLUKSCipherNameMap qcrypto_block_luks_cipher_name_map[] = { { "aes", qcrypto_block_luks_cipher_size_map_aes }, { "cast5", qcrypto_block_luks_cipher_size_map_cast5 }, { "serpent", qcrypto_block_luks_cipher_size_map_serpent }, { "twofish", qcrypto_block_luks_cipher_size_map_twofish }, +#ifdef CONFIG_CRYPTO_SM4 + { "sm4", qcrypto_block_luks_cipher_size_map_sm4}, +#endif }; @@ -591,7 +603,7 @@ qcrypto_block_luks_check_header(const QCryptoBlockLUKS *luks, Error **errp) header_sectors, slot2->stripes); - if (start1 + len1 > start2 && start2 + len2 > start1) { + if (ranges_overlap(start1, len1, start2, len2)) { error_setg(errp, "Keyslots %zu and %zu are overlapping in the header", i, j); diff --git a/crypto/cipher-gcrypt.c.inc b/crypto/cipher-gcrypt.c.inc index a6a0117717f5bc1061e91612981ee26eaa8a1d3b..1377cbaf147a019e4b0a6899cb14d5046c15ef2c 100644 --- a/crypto/cipher-gcrypt.c.inc +++ b/crypto/cipher-gcrypt.c.inc @@ -35,6 +35,9 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_ALG_SERPENT_256: case QCRYPTO_CIPHER_ALG_TWOFISH_128: case QCRYPTO_CIPHER_ALG_TWOFISH_256: +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: +#endif break; default: return false; @@ -219,6 +222,11 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_ALG_TWOFISH_256: gcryalg = GCRY_CIPHER_TWOFISH; break; +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: + gcryalg = GCRY_CIPHER_SM4; + break; +#endif default: error_setg(errp, "Unsupported cipher algorithm %s", QCryptoCipherAlgorithm_str(alg)); diff --git a/crypto/cipher-nettle.c.inc b/crypto/cipher-nettle.c.inc index 24cc61f87bfc4ae7ab8ff523a4105a67bace67a1..42b39e18a23d49b1a5d86eaa5874f849f444b522 100644 --- a/crypto/cipher-nettle.c.inc +++ b/crypto/cipher-nettle.c.inc @@ -33,6 +33,9 @@ #ifndef CONFIG_QEMU_PRIVATE_XTS #include #endif +#ifdef CONFIG_CRYPTO_SM4 +#include +#endif static inline bool qcrypto_length_check(size_t len, size_t blocksize, Error **errp) @@ -426,6 +429,30 @@ DEFINE_ECB_CBC_CTR_XTS(qcrypto_nettle_twofish, QCryptoNettleTwofish, TWOFISH_BLOCK_SIZE, twofish_encrypt_native, twofish_decrypt_native) +#ifdef CONFIG_CRYPTO_SM4 +typedef struct QCryptoNettleSm4 { + QCryptoCipher base; + struct sm4_ctx key[2]; +} QCryptoNettleSm4; + +static void sm4_encrypt_native(void *ctx, size_t length, + uint8_t *dst, const uint8_t *src) +{ + struct sm4_ctx *keys = ctx; + sm4_crypt(&keys[0], length, dst, src); +} + +static void sm4_decrypt_native(void *ctx, size_t length, + uint8_t *dst, const uint8_t *src) +{ + struct sm4_ctx *keys = ctx; + sm4_crypt(&keys[1], length, dst, src); +} + +DEFINE_ECB(qcrypto_nettle_sm4, + QCryptoNettleSm4, SM4_BLOCK_SIZE, + sm4_encrypt_native, sm4_decrypt_native) +#endif bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, QCryptoCipherMode mode) @@ -443,6 +470,9 @@ bool qcrypto_cipher_supports(QCryptoCipherAlgorithm alg, case QCRYPTO_CIPHER_ALG_TWOFISH_128: case QCRYPTO_CIPHER_ALG_TWOFISH_192: case QCRYPTO_CIPHER_ALG_TWOFISH_256: +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: +#endif break; default: return false; @@ -701,6 +731,25 @@ static QCryptoCipher *qcrypto_cipher_ctx_new(QCryptoCipherAlgorithm alg, return &ctx->base; } +#ifdef CONFIG_CRYPTO_SM4 + case QCRYPTO_CIPHER_ALG_SM4: + { + QCryptoNettleSm4 *ctx = g_new0(QCryptoNettleSm4, 1); + + switch (mode) { + case QCRYPTO_CIPHER_MODE_ECB: + ctx->base.driver = &qcrypto_nettle_sm4_driver_ecb; + break; + default: + goto bad_cipher_mode; + } + + sm4_set_encrypt_key(&ctx->key[0], key); + sm4_set_decrypt_key(&ctx->key[1], key); + + return &ctx->base; + } +#endif default: error_setg(errp, "Unsupported cipher algorithm %s", diff --git a/crypto/cipher.c b/crypto/cipher.c index 74b09a5b261bae00c01afa788dbca99017f4110e..5f512768ea3b05852960a65fadcf38b3ca6265d1 100644 --- a/crypto/cipher.c +++ b/crypto/cipher.c @@ -38,6 +38,9 @@ static const size_t alg_key_len[QCRYPTO_CIPHER_ALG__MAX] = { [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 24, [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 32, +#ifdef CONFIG_CRYPTO_SM4 + [QCRYPTO_CIPHER_ALG_SM4] = 16, +#endif }; static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = { @@ -53,6 +56,9 @@ static const size_t alg_block_len[QCRYPTO_CIPHER_ALG__MAX] = { [QCRYPTO_CIPHER_ALG_TWOFISH_128] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_192] = 16, [QCRYPTO_CIPHER_ALG_TWOFISH_256] = 16, +#ifdef CONFIG_CRYPTO_SM4 + [QCRYPTO_CIPHER_ALG_SM4] = 16, +#endif }; static const bool mode_need_iv[QCRYPTO_CIPHER_MODE__MAX] = { diff --git a/crypto/tls-cipher-suites.c b/crypto/tls-cipher-suites.c index 5e4f59746450a746af7f8141c0f1eeccd401598a..d0df4badc0fe1730dc9d58dcc1f0aad3c0de6563 100644 --- a/crypto/tls-cipher-suites.c +++ b/crypto/tls-cipher-suites.c @@ -52,7 +52,6 @@ GByteArray *qcrypto_tls_cipher_suites_get_data(QCryptoTLSCipherSuites *obj, byte_array = g_byte_array_new(); for (i = 0;; i++) { - int ret; unsigned idx; const char *name; IANA_TLS_CIPHER cipher; diff --git a/crypto/tlscredspsk.c b/crypto/tlscredspsk.c index 752f2d92bee74438d8a91dc3164978b191eba6a8..9ab62b411dad79a9c170f613faa39bb5825c9880 100644 --- a/crypto/tlscredspsk.c +++ b/crypto/tlscredspsk.c @@ -245,6 +245,7 @@ qcrypto_tls_creds_psk_finalize(Object *obj) QCryptoTLSCredsPSK *creds = QCRYPTO_TLS_CREDS_PSK(obj); qcrypto_tls_creds_psk_unload(creds); + g_free(creds->username); } static void diff --git a/disas.c b/disas.c index 3dab4482d1a1eeef0393f3c3efb4b54637270136..897de1d9a98d251de466c388177c50c0b63c20f0 100644 --- a/disas.c +++ b/disas.c @@ -207,6 +207,8 @@ static void initialize_debug_host(CPUDebug *s) s->info.cap_insn_split = 6; #elif defined(__hppa__) s->info.print_insn = print_insn_hppa; +#elif defined(__sw_64__) + s->info.print_insn = print_insn_sw_64; #endif } diff --git a/disas/hppa.c b/disas/hppa.c index dcf9a47f348932ea7ddaafe810f2e1d04c146583..cce4f4aa374179bbfc95df14684a5df462af6f49 100644 --- a/disas/hppa.c +++ b/disas/hppa.c @@ -1968,6 +1968,10 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info) insn = bfd_getb32 (buffer); + info->fprintf_func(info->stream, " %02x %02x %02x %02x ", + (insn >> 24) & 0xff, (insn >> 16) & 0xff, + (insn >> 8) & 0xff, insn & 0xff); + for (i = 0; i < NUMOPCODES; ++i) { const struct pa_opcode *opcode = &pa_opcodes[i]; @@ -2826,6 +2830,6 @@ print_insn_hppa (bfd_vma memaddr, disassemble_info *info) return sizeof (insn); } } - (*info->fprintf_func) (info->stream, "#%8x", insn); + info->fprintf_func(info->stream, ""); return sizeof (insn); } diff --git a/disas/loongarch.c b/disas/loongarch.c new file mode 100644 index 0000000000000000000000000000000000000000..b3f38e99ab20ae98a5ac48d2c9a7bf9c1d88c185 --- /dev/null +++ b/disas/loongarch.c @@ -0,0 +1,2736 @@ +/* + * QEMU Loongarch Disassembler + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "disas/dis-asm.h" + +#define INSNLEN 4 + +/* types */ +typedef uint16_t la_opcode; + +/* enums */ +typedef enum { + la_op_illegal = 0, + la_op_gr2scr = 1, + la_op_scr2gr = 2, + la_op_clo_w = 3, + la_op_clz_w = 4, + la_op_cto_w = 5, + la_op_ctz_w = 6, + la_op_clo_d = 7, + la_op_clz_d = 8, + la_op_cto_d = 9, + la_op_ctz_d = 10, + la_op_revb_2h = 11, + la_op_revb_4h = 12, + la_op_revb_2w = 13, + la_op_revb_d = 14, + la_op_revh_2w = 15, + la_op_revh_d = 16, + la_op_bitrev_4b = 17, + la_op_bitrev_8b = 18, + la_op_bitrev_w = 19, + la_op_bitrev_d = 20, + la_op_ext_w_h = 21, + la_op_ext_w_b = 22, + la_op_rdtime_d = 23, + la_op_cpucfg = 24, + la_op_asrtle_d = 25, + la_op_asrtgt_d = 26, + la_op_alsl_w = 27, + la_op_alsl_wu = 28, + la_op_bytepick_w = 29, + la_op_bytepick_d = 30, + la_op_add_w = 31, + la_op_add_d = 32, + la_op_sub_w = 33, + la_op_sub_d = 34, + la_op_slt = 35, + la_op_sltu = 36, + la_op_maskeqz = 37, + la_op_masknez = 38, + la_op_nor = 39, + la_op_and = 40, + la_op_or = 41, + la_op_xor = 42, + la_op_orn = 43, + la_op_andn = 44, + la_op_sll_w = 45, + la_op_srl_w = 46, + la_op_sra_w = 47, + la_op_sll_d = 48, + la_op_srl_d = 49, + la_op_sra_d = 50, + la_op_rotr_w = 51, + la_op_rotr_d = 52, + la_op_mul_w = 53, + la_op_mulh_w = 54, + la_op_mulh_wu = 55, + la_op_mul_d = 56, + la_op_mulh_d = 57, + la_op_mulh_du = 58, + la_op_mulw_d_w = 59, + la_op_mulw_d_wu = 60, + la_op_div_w = 61, + la_op_mod_w = 62, + la_op_div_wu = 63, + la_op_mod_wu = 64, + la_op_div_d = 65, + la_op_mod_d = 66, + la_op_div_du = 67, + la_op_mod_du = 68, + la_op_crc_w_b_w = 69, + la_op_crc_w_h_w = 70, + la_op_crc_w_w_w = 71, + la_op_crc_w_d_w = 72, + la_op_crcc_w_b_w = 73, + la_op_crcc_w_h_w = 74, + la_op_crcc_w_w_w = 75, + la_op_crcc_w_d_w = 76, + la_op_break = 77, + la_op_dbcl = 78, + la_op_syscall = 79, + la_op_alsl_d = 80, + la_op_slli_w = 81, + la_op_slli_d = 82, + la_op_srli_w = 83, + la_op_srli_d = 84, + la_op_srai_w = 85, + la_op_srai_d = 86, + la_op_rotri_w = 87, + la_op_rotri_d = 88, + la_op_bstrins_w = 89, + la_op_bstrpick_w = 90, + la_op_bstrins_d = 91, + la_op_bstrpick_d = 92, + la_op_fadd_s = 93, + la_op_fadd_d = 94, + la_op_fsub_s = 95, + la_op_fsub_d = 96, + la_op_fmul_s = 97, + la_op_fmul_d = 98, + la_op_fdiv_s = 99, + la_op_fdiv_d = 100, + la_op_fmax_s = 101, + la_op_fmax_d = 102, + la_op_fmin_s = 103, + la_op_fmin_d = 104, + la_op_fmaxa_s = 105, + la_op_fmaxa_d = 106, + la_op_fmina_s = 107, + la_op_fmina_d = 108, + la_op_fscaleb_s = 109, + la_op_fscaleb_d = 110, + la_op_fcopysign_s = 111, + la_op_fcopysign_d = 112, + la_op_fabs_s = 113, + la_op_fabs_d = 114, + la_op_fneg_s = 115, + la_op_fneg_d = 116, + la_op_flogb_s = 117, + la_op_flogb_d = 118, + la_op_fclass_s = 119, + la_op_fclass_d = 120, + la_op_fsqrt_s = 121, + la_op_fsqrt_d = 122, + la_op_frecip_s = 123, + la_op_frecip_d = 124, + la_op_frsqrt_s = 125, + la_op_frsqrt_d = 126, + la_op_fmov_s = 127, + la_op_fmov_d = 128, + la_op_movgr2fr_w = 129, + la_op_movgr2fr_d = 130, + la_op_movgr2frh_w = 131, + la_op_movfr2gr_s = 132, + la_op_movfr2gr_d = 133, + la_op_movfrh2gr_s = 134, + la_op_movgr2fcsr = 135, + la_op_movfcsr2gr = 136, + la_op_movfr2cf = 137, + la_op_movcf2fr = 138, + la_op_movgr2cf = 139, + la_op_movcf2gr = 140, + la_op_fcvt_s_d = 141, + la_op_fcvt_d_s = 142, + + la_op_ftintrm_w_s = 143, + la_op_ftintrm_w_d = 144, + la_op_ftintrm_l_s = 145, + la_op_ftintrm_l_d = 146, + la_op_ftintrp_w_s = 147, + la_op_ftintrp_w_d = 148, + la_op_ftintrp_l_s = 149, + la_op_ftintrp_l_d = 150, + la_op_ftintrz_w_s = 151, + la_op_ftintrz_w_d = 152, + la_op_ftintrz_l_s = 153, + la_op_ftintrz_l_d = 154, + la_op_ftintrne_w_s = 155, + la_op_ftintrne_w_d = 156, + la_op_ftintrne_l_s = 157, + la_op_ftintrne_l_d = 158, + la_op_ftint_w_s = 159, + la_op_ftint_w_d = 160, + la_op_ftint_l_s = 161, + la_op_ftint_l_d = 162, + la_op_ffint_s_w = 163, + la_op_ffint_s_l = 164, + la_op_ffint_d_w = 165, + la_op_ffint_d_l = 166, + la_op_frint_s = 167, + la_op_frint_d = 168, + + la_op_slti = 169, + la_op_sltui = 170, + la_op_addi_w = 171, + la_op_addi_d = 172, + la_op_lu52i_d = 173, + la_op_addi = 174, + la_op_ori = 175, + la_op_xori = 176, + + la_op_csrxchg = 177, + la_op_cacop = 178, + la_op_lddir = 179, + la_op_ldpte = 180, + la_op_iocsrrd_b = 181, + la_op_iocsrrd_h = 182, + la_op_iocsrrd_w = 183, + la_op_iocsrrd_d = 184, + la_op_iocsrwr_b = 185, + la_op_iocsrwr_h = 186, + la_op_iocsrwr_w = 187, + la_op_iocsrwr_d = 188, + la_op_tlbclr = 189, + la_op_tlbflush = 190, + la_op_tlbsrch = 191, + la_op_tlbrd = 192, + la_op_tlbwr = 193, + la_op_tlbfill = 194, + la_op_ertn = 195, + la_op_idle = 196, + la_op_invtlb = 197, + + la_op_fmadd_s = 198, + la_op_fmadd_d = 199, + la_op_fmsub_s = 200, + la_op_fmsub_d = 201, + la_op_fnmadd_s = 202, + la_op_fnmadd_d = 203, + la_op_fnmsub_s = 204, + la_op_fnmsub_d = 205, + la_op_fcmp_cond_s = 206, + la_op_fcmp_cond_d = 207, + la_op_fsel = 208, + la_op_addu16i_d = 209, + la_op_lu12i_w = 210, + la_op_lu32i_d = 211, + la_op_pcaddi = 212, + la_op_pcalau12i = 213, + la_op_pcaddu12i = 214, + la_op_pcaddu18i = 215, + + la_op_ll_w = 216, + la_op_sc_w = 217, + la_op_ll_d = 218, + la_op_sc_d = 219, + la_op_ldptr_w = 220, + la_op_stptr_w = 221, + la_op_ldptr_d = 222, + la_op_stptr_d = 223, + la_op_ld_b = 224, + la_op_ld_h = 225, + la_op_ld_w = 226, + la_op_ld_d = 227, + la_op_st_b = 228, + la_op_st_h = 229, + la_op_st_w = 230, + la_op_st_d = 231, + la_op_ld_bu = 232, + la_op_ld_hu = 233, + la_op_ld_wu = 234, + la_op_preld = 235, + la_op_fld_s = 236, + la_op_fst_s = 237, + la_op_fld_d = 238, + la_op_fst_d = 239, + la_op_ldl_w = 240, + la_op_ldr_w = 241, + la_op_ldl_d = 242, + la_op_ldr_d = 243, + la_op_stl_d = 244, + la_op_str_d = 245, + la_op_ldx_b = 246, + la_op_ldx_h = 247, + la_op_ldx_w = 248, + la_op_ldx_d = 249, + la_op_stx_b = 250, + la_op_stx_h = 251, + la_op_stx_w = 252, + la_op_stx_d = 253, + la_op_ldx_bu = 254, + la_op_ldx_hu = 255, + la_op_ldx_wu = 256, + la_op_fldx_s = 257, + la_op_fldx_d = 258, + la_op_fstx_s = 259, + la_op_fstx_d = 260, + + la_op_amswap_w = 261, + la_op_amswap_d = 262, + la_op_amadd_w = 263, + la_op_amadd_d = 264, + la_op_amand_w = 265, + la_op_amand_d = 266, + la_op_amor_w = 267, + la_op_amor_d = 268, + la_op_amxor_w = 269, + la_op_amxor_d = 270, + la_op_ammax_w = 271, + la_op_ammax_d = 272, + la_op_ammin_w = 273, + la_op_ammin_d = 274, + la_op_ammax_wu = 275, + la_op_ammax_du = 276, + la_op_ammin_wu = 277, + la_op_ammin_du = 278, + la_op_amswap_db_w = 279, + la_op_amswap_db_d = 280, + la_op_amadd_db_w = 281, + la_op_amadd_db_d = 282, + la_op_amand_db_w = 283, + la_op_amand_db_d = 284, + la_op_amor_db_w = 285, + la_op_amor_db_d = 286, + la_op_amxor_db_w = 287, + la_op_amxor_db_d = 288, + la_op_ammax_db_w = 289, + la_op_ammax_db_d = 290, + la_op_ammin_db_w = 291, + la_op_ammin_db_d = 292, + la_op_ammax_db_wu = 293, + la_op_ammax_db_du = 294, + la_op_ammin_db_wu = 295, + la_op_ammin_db_du = 296, + la_op_dbar = 297, + la_op_ibar = 298, + la_op_fldgt_s = 299, + la_op_fldgt_d = 300, + la_op_fldle_s = 301, + la_op_fldle_d = 302, + la_op_fstgt_s = 303, + la_op_fstgt_d = 304, + ls_op_fstle_s = 305, + la_op_fstle_d = 306, + la_op_ldgt_b = 307, + la_op_ldgt_h = 308, + la_op_ldgt_w = 309, + la_op_ldgt_d = 310, + la_op_ldle_b = 311, + la_op_ldle_h = 312, + la_op_ldle_w = 313, + la_op_ldle_d = 314, + la_op_stgt_b = 315, + la_op_stgt_h = 316, + la_op_stgt_w = 317, + la_op_stgt_d = 318, + la_op_stle_b = 319, + la_op_stle_h = 320, + la_op_stle_w = 321, + la_op_stle_d = 322, + la_op_beqz = 323, + la_op_bnez = 324, + la_op_bceqz = 325, + la_op_bcnez = 326, + la_op_jirl = 327, + la_op_b = 328, + la_op_bl = 329, + la_op_beq = 330, + la_op_bne = 331, + la_op_blt = 332, + la_op_bge = 333, + la_op_bltu = 334, + la_op_bgeu = 335, + + /* vz insn */ + la_op_hvcl = 336, + +} la_op; + +typedef enum { + la_codec_illegal, + la_codec_empty, + la_codec_2r, + la_codec_2r_u5, + la_codec_2r_u6, + la_codec_2r_2bw, + la_codec_2r_2bd, + la_codec_3r, + la_codec_3r_rd0, + la_codec_3r_sa2, + la_codec_3r_sa3, + la_codec_4r, + la_codec_r_im20, + la_codec_2r_im16, + la_codec_2r_im14, + la_codec_2r_im12, + la_codec_im5_r_im12, + la_codec_2r_im8, + la_codec_r_sd, + la_codec_r_sj, + la_codec_r_cd, + la_codec_r_cj, + la_codec_r_seq, + la_codec_code, + la_codec_whint, + la_codec_invtlb, + la_codec_r_ofs21, + la_codec_cj_ofs21, + la_codec_ofs26, + la_codec_cond, + la_codec_sel, + +} la_codec; + +#define la_fmt_illegal "nte" +#define la_fmt_empty "nt" +#define la_fmt_sd_rj "ntA,1" +#define la_fmt_rd_sj "nt0,B" +#define la_fmt_rd_rj "nt0,1" +#define la_fmt_rj_rk "nt1,2" +#define la_fmt_rj_seq "nt1,x" +#define la_fmt_rd_si20 "nt0,i(x)" +#define la_fmt_rd_rj_ui5 "nt0,1,C" +#define la_fmt_rd_rj_ui6 "nt0,1.C" +#define la_fmt_rd_rj_level "nt0,1,x" +#define la_fmt_rd_rj_msbw_lsbw "nt0,1,C,D" +#define la_fmt_rd_rj_msbd_lsbd "nt0,1,C,D" +#define la_fmt_rd_rj_si12 "nt0,1,i(x)" +#define la_fmt_hint_rj_si12 "ntE,1,i(x)" +#define la_fmt_rd_rj_csr "nt0,1,x" +#define la_fmt_rd_rj_si14 "nt0,1,i(x)" +#define la_fmt_rd_rj_si16 "nt0,1,i(x)" +#define la_fmt_rd_rj_rk "nt0,1,2" +#define la_fmt_fd_rj_rk "nt3,1,2" +#define la_fmt_rd_rj_rk_sa2 "nt0,1,2,D" +#define la_fmt_rd_rj_rk_sa3 "nt0,1,2,D" +#define la_fmt_fd_rj "nt3,1" +#define la_fmt_rd_fj "nt0,4" +#define la_fmt_fd_fj "nt3,4" +#define la_fmt_fd_fj_si12 "nt3,4,i(x)" +#define la_fmt_fcsrd_rj "ntF,1" +#define la_fmt_rd_fcsrs "nt0,G" +#define la_fmt_cd_fj "ntH,4" +#define la_fmt_fd_cj "nt3,I" +#define la_fmt_fd_fj_fk "nt3,4,5" +#define la_fmt_code "ntJ" +#define la_fmt_whint "ntx" +#define la_fmt_invtlb "ntx,1,2" /* op,rj,rk */ +#define la_fmt_offs26 "nto(X)p" +#define la_fmt_rj_offs21 "nt1,o(X)p" +#define la_fmt_cj_offs21 "ntQ,o(X)p" +#define la_fmt_rd_rj_offs16 "nt0,1,o(X)" +#define la_fmt_rj_rd_offs16 "nt1,0,o(X)p" +#define la_fmt_s_cd_fj_fk "K.stH,4,5" +#define la_fmt_d_cd_fj_fk "K.dtH,4,5" +#define la_fmt_fd_fj_fk_fa "nt3,4,5,6" +#define la_fmt_fd_fj_fk_ca "nt3,4,5,L" +#define la_fmt_cop_rj_si12 "ntM,1,i(x)" + +/* structures */ + +typedef struct { + uint32_t pc; + uint32_t insn; + int32_t imm; + int32_t imm2; + uint16_t op; + uint16_t code; + uint8_t codec; + uint8_t r1; + uint8_t r2; + uint8_t r3; + uint8_t r4; + uint8_t bit; +} la_decode; + +typedef struct { + const char *const name; + const la_codec codec; + const char *const format; +} la_opcode_data; + +/* reg names */ +const char *const loongarch_r_normal_name[32] = { + "$r0", "$r1", "$r2", "$r3", "$r4", "$r5", "$r6", "$r7", + "$r8", "$r9", "$r10", "$r11", "$r12", "$r13", "$r14", "$r15", + "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", + "$r24", "$r25", "$r26", "$r27", "$r28", "$r29", "$r30", "$r31", +}; + +const char *const loongarch_f_normal_name[32] = { + "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", + "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", + "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", + "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31", +}; + +const char *const loongarch_cr_normal_name[4] = { + "$scr0", + "$scr1", + "$scr2", + "$scr3", +}; + +const char *const loongarch_c_normal_name[8] = { + "$fcc0", "$fcc1", "$fcc2", "$fcc3", "$fcc4", "$fcc5", "$fcc6", "$fcc7", +}; + +/* instruction data */ +const la_opcode_data opcode_la[] = { + { "illegal", la_codec_illegal, la_fmt_illegal }, + { "gr2scr", la_codec_r_sd, la_fmt_sd_rj }, + { "scr2gr", la_codec_r_sj, la_fmt_rd_sj }, + { "clo.w", la_codec_2r, la_fmt_rd_rj }, + { "clz.w", la_codec_2r, la_fmt_rd_rj }, + { "cto.w", la_codec_2r, la_fmt_rd_rj }, + { "ctz.w", la_codec_2r, la_fmt_rd_rj }, + { "clo.d", la_codec_2r, la_fmt_rd_rj }, + { "clz.d", la_codec_2r, la_fmt_rd_rj }, + { "cto.d", la_codec_2r, la_fmt_rd_rj }, + { "ctz_d", la_codec_2r, la_fmt_rd_rj }, + { "revb.2h", la_codec_2r, la_fmt_rd_rj }, + { "revb.4h", la_codec_2r, la_fmt_rd_rj }, + { "revb.2w", la_codec_2r, la_fmt_rd_rj }, + { "revb.d", la_codec_2r, la_fmt_rd_rj }, + { "revh.2w", la_codec_2r, la_fmt_rd_rj }, + { "revh.d", la_codec_2r, la_fmt_rd_rj }, + { "bitrev.4b", la_codec_2r, la_fmt_rd_rj }, + { "bitrev.8b", la_codec_2r, la_fmt_rd_rj }, + { "bitrev.w", la_codec_2r, la_fmt_rd_rj }, + { "bitrev.d", la_codec_2r, la_fmt_rd_rj }, + { "ext.w.h", la_codec_2r, la_fmt_rd_rj }, + { "ext.w.b", la_codec_2r, la_fmt_rd_rj }, + { "rdtime.d", la_codec_2r, la_fmt_rd_rj }, + { "cpucfg", la_codec_2r, la_fmt_rd_rj }, + { "asrtle.d", la_codec_3r_rd0, la_fmt_rj_rk }, + { "asrtgt.d", la_codec_3r_rd0, la_fmt_rj_rk }, + { "alsl.w", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 }, + { "alsl.wu", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 }, + { "bytepick.w", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 }, + { "bytepick.d", la_codec_3r_sa3, la_fmt_rd_rj_rk_sa3 }, + { "add.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "add.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "sub.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "sub.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "slt", la_codec_3r, la_fmt_rd_rj_rk }, + { "sltu", la_codec_3r, la_fmt_rd_rj_rk }, + { "maskeqz", la_codec_3r, la_fmt_rd_rj_rk }, + { "masknez", la_codec_3r, la_fmt_rd_rj_rk }, + { "nor", la_codec_3r, la_fmt_rd_rj_rk }, + { "and", la_codec_3r, la_fmt_rd_rj_rk }, + { "or", la_codec_3r, la_fmt_rd_rj_rk }, + { "xor", la_codec_3r, la_fmt_rd_rj_rk }, + { "orn", la_codec_3r, la_fmt_rd_rj_rk }, + { "andn", la_codec_3r, la_fmt_rd_rj_rk }, + { "sll.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "srl.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "sra.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "sll.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "srl.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "sra.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "rotr.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "rotr.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "mul.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulh.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulh.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "mul.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulh.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulh.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulw.d.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "mulw.d.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "div.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "mod.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "div.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "mod.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "div.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "mod.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "div.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "mod.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "crc.w.b.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crc.w.h.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crc.w.w.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crc.w.d.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crcc.w.b.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crcc.w.h.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crcc.w.w.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "crcc.w.d.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "break", la_codec_code, la_fmt_code }, + { "dbcl", la_codec_code, la_fmt_code }, + { "syscall", la_codec_code, la_fmt_code }, + { "alsl.d", la_codec_3r_sa2, la_fmt_rd_rj_rk_sa2 }, + { "slli.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 }, + { "slli.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 }, + { "srli.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 }, + { "srli.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 }, + { "srai.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 }, + { "srai.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 }, + { "rotri.w", la_codec_2r_u5, la_fmt_rd_rj_ui5 }, + { "rotri.d", la_codec_2r_u6, la_fmt_rd_rj_ui6 }, + { "bstrins.w", la_codec_2r_2bw, la_fmt_rd_rj_msbw_lsbw }, + { "bstrpick.w", la_codec_2r_2bw, la_fmt_rd_rj_msbw_lsbw }, + { "bstrins.d", la_codec_2r_2bd, la_fmt_rd_rj_msbd_lsbd }, + { "bstrpick.d", la_codec_2r_2bd, la_fmt_rd_rj_msbd_lsbd }, + { "fadd.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fadd.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fsub.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fsub.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmul.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmul.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fdiv.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fdiv.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmax.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmax.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmin.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmin.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmaxa.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmaxa.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmina.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fmina.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fscaleb.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fscaleb.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fcopysign.s", la_codec_3r, la_fmt_fd_fj_fk }, + { "fcopysign.d", la_codec_3r, la_fmt_fd_fj_fk }, + { "fabs.s", la_codec_2r, la_fmt_fd_fj }, + { "fabs.d", la_codec_2r, la_fmt_fd_fj }, + { "fneg.s", la_codec_2r, la_fmt_fd_fj }, + { "fneg.d", la_codec_2r, la_fmt_fd_fj }, + { "flogb.s", la_codec_2r, la_fmt_fd_fj }, + { "flogb.d", la_codec_2r, la_fmt_fd_fj }, + { "fclass.s", la_codec_2r, la_fmt_fd_fj }, + { "fclass.d", la_codec_2r, la_fmt_fd_fj }, + { "fsqrt.s", la_codec_2r, la_fmt_fd_fj }, + { "fsqrt.d", la_codec_2r, la_fmt_fd_fj }, + { "frecip.s", la_codec_2r, la_fmt_fd_fj }, + { "frecip.d", la_codec_2r, la_fmt_fd_fj }, + { "frsqrt.s", la_codec_2r, la_fmt_fd_fj }, + { "frsqrt.d", la_codec_2r, la_fmt_fd_fj }, + { "fmov.s", la_codec_2r, la_fmt_fd_fj }, + { "fmov.d", la_codec_2r, la_fmt_fd_fj }, + { "movgr2fr.w", la_codec_2r, la_fmt_fd_rj }, + { "movgr2fr.d", la_codec_2r, la_fmt_fd_rj }, + { "movgr2frh.w", la_codec_2r, la_fmt_fd_rj }, + { "movfr2gr.s", la_codec_2r, la_fmt_rd_fj }, + { "movfr2gr.d", la_codec_2r, la_fmt_rd_fj }, + { "movfrh2gr.s", la_codec_2r, la_fmt_rd_fj }, + { "movgr2fcsr", la_codec_2r, la_fmt_fcsrd_rj }, + { "movfcsr2gr", la_codec_2r, la_fmt_rd_fcsrs }, + { "movfr2cf", la_codec_r_cd, la_fmt_cd_fj }, + { "movcf2fr", la_codec_r_cj, la_fmt_fd_cj }, + { "movgr2cf", la_codec_r_cd, la_fmt_cd_fj }, + { "movcf2gr", la_codec_r_cj, la_fmt_fd_cj }, + { "fcvt.s.d", la_codec_2r, la_fmt_fd_fj }, + { "fcvt.d.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrm.w.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrm.w.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrm.l.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrm.l.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrp.w.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrp.w.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrp.l.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrp.l.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrz.w.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrz.w.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrz.l.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrz.l.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrne.w.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrne.w.d", la_codec_2r, la_fmt_fd_fj }, + { "ftintrne.l.s", la_codec_2r, la_fmt_fd_fj }, + { "ftintrne.l.d", la_codec_2r, la_fmt_fd_fj }, + { "ftint.w.s", la_codec_2r, la_fmt_fd_fj }, + { "ftint.w.d", la_codec_2r, la_fmt_fd_fj }, + { "ftint.l.s", la_codec_2r, la_fmt_fd_fj }, + { "ftint.l.d", la_codec_2r, la_fmt_fd_fj }, + { "ffint.s.w", la_codec_2r, la_fmt_fd_fj }, + { "ffint.s.l", la_codec_2r, la_fmt_fd_fj }, + { "ffint.d.w", la_codec_2r, la_fmt_fd_fj }, + { "ffint.d.l", la_codec_2r, la_fmt_fd_fj }, + { "frint.s", la_codec_2r, la_fmt_fd_fj }, + { "frint.d", la_codec_2r, la_fmt_fd_fj }, + { "slti", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "sltui", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "addi.w", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "addi.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "lu52i.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "addi", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ori", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "xori", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "csrxchg", la_codec_2r_im14, la_fmt_rd_rj_csr }, + { "cacop", la_codec_im5_r_im12, la_fmt_cop_rj_si12 }, + { "lddir", la_codec_2r_im8, la_fmt_rd_rj_level }, + { "ldpte", la_codec_r_seq, la_fmt_rj_seq }, + { "iocsrrd.b", la_codec_2r, la_fmt_rd_rj }, + { "iocsrrd.h", la_codec_2r, la_fmt_rd_rj }, + { "iocsrrd.w", la_codec_2r, la_fmt_rd_rj }, + { "iocsrrd.d", la_codec_2r, la_fmt_rd_rj }, + { "iocsrwr.b", la_codec_2r, la_fmt_rd_rj }, + { "iocsrwr.h", la_codec_2r, la_fmt_rd_rj }, + { "iocsrwr.w", la_codec_2r, la_fmt_rd_rj }, + { "iocsrwr.d", la_codec_2r, la_fmt_rd_rj }, + { "tlbclr", la_codec_empty, la_fmt_empty }, + { "tlbflush", la_codec_empty, la_fmt_empty }, + { "tlbsrch", la_codec_empty, la_fmt_empty }, + { "tlbrd", la_codec_empty, la_fmt_empty }, + { "tlbwr", la_codec_empty, la_fmt_empty }, + { "tlbfill", la_codec_empty, la_fmt_empty }, + { "ertn", la_codec_empty, la_fmt_empty }, + { "idle", la_codec_whint, la_fmt_whint }, + { "invtlb", la_codec_invtlb, la_fmt_invtlb }, + { "fmadd.s", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fmadd.d", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fmsub.s", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fmsub.d", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fnmadd.s", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fnmadd.d", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fnmsub.s", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fnmsub.d", la_codec_4r, la_fmt_fd_fj_fk_fa }, + { "fcmp.cond.s", la_codec_cond, la_fmt_s_cd_fj_fk }, + { "fcmp.cond.d", la_codec_cond, la_fmt_d_cd_fj_fk }, + { "fsel", la_codec_sel, la_fmt_fd_fj_fk_ca }, + { "addu16i.d", la_codec_2r_im16, la_fmt_rd_rj_si16 }, + { "lu12i.w", la_codec_r_im20, la_fmt_rd_si20 }, + { "lu32i.d", la_codec_r_im20, la_fmt_rd_si20 }, + { "pcaddi", la_codec_r_im20, la_fmt_rd_si20 }, + { "pcalau12i", la_codec_r_im20, la_fmt_rd_si20 }, + { "pcaddu12i", la_codec_r_im20, la_fmt_rd_si20 }, + { "pcaddu18i", la_codec_r_im20, la_fmt_rd_si20 }, + { "ll.w", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "sc.w", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "ll.d", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "sc.d", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "ldptr.w", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "stptr.w", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "ldptr.d", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "stptr.d", la_codec_2r_im14, la_fmt_rd_rj_si14 }, + { "ld.b", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.h", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.w", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "st.b", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "st.h", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "st.w", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "st.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.bu", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.hu", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ld.wu", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "preld", la_codec_2r_im12, la_fmt_hint_rj_si12 }, + { "fld.s", la_codec_2r_im12, la_fmt_fd_fj_si12 }, + { "fst.s", la_codec_2r_im12, la_fmt_fd_fj_si12 }, + { "fld.d", la_codec_2r_im12, la_fmt_fd_fj_si12 }, + { "fst.d", la_codec_2r_im12, la_fmt_fd_fj_si12 }, + { "ldl.w", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ldr.w", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ldl.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ldr.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "stl.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "str.d", la_codec_2r_im12, la_fmt_rd_rj_si12 }, + { "ldx.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "stx.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "stx.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "stx.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "stx.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.bu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.hu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldx.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "fldx.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fldx.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstx.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstx.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "amswap.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amswap.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amadd.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amadd.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amand.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amand.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amor.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amor.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amxor.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amxor.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "amswap.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amswap.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amadd.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amadd.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amand.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amand.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amor.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amor.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "amxor.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "amxor.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.db.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.db.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.db.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammax.db.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.db.wu", la_codec_3r, la_fmt_rd_rj_rk }, + { "ammin.db.du", la_codec_3r, la_fmt_rd_rj_rk }, + { "dbar", la_codec_whint, la_fmt_whint }, + { "ibar", la_codec_whint, la_fmt_whint }, + { "fldgt.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fldgt.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "fldle.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fldle.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstgt.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstgt.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstle.s", la_codec_3r, la_fmt_fd_rj_rk }, + { "fstle.d", la_codec_3r, la_fmt_fd_rj_rk }, + { "ldgt.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldgt.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldgt.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldgt.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldle.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldle.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldle.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "ldle.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "stgt.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "stgt.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "stgt.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "stgt.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "stle.b", la_codec_3r, la_fmt_rd_rj_rk }, + { "stle.h", la_codec_3r, la_fmt_rd_rj_rk }, + { "stle.w", la_codec_3r, la_fmt_rd_rj_rk }, + { "stle.d", la_codec_3r, la_fmt_rd_rj_rk }, + { "beqz", la_codec_r_ofs21, la_fmt_rj_offs21 }, + { "bnez", la_codec_r_ofs21, la_fmt_rj_offs21 }, + { "bceqz", la_codec_cj_ofs21, la_fmt_cj_offs21 }, + { "bcnez", la_codec_cj_ofs21, la_fmt_cj_offs21 }, + { "jirl", la_codec_2r_im16, la_fmt_rd_rj_offs16 }, + { "b", la_codec_ofs26, la_fmt_offs26 }, + { "bl", la_codec_ofs26, la_fmt_offs26 }, + { "beq", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + { "bne", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + { "blt", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + { "bge", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + { "bltu", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + { "bgeu", la_codec_2r_im16, la_fmt_rj_rd_offs16 }, + + /* vz insn */ + { "hvcl", la_codec_code, la_fmt_code }, + +}; + +/* decode opcode */ +static void decode_insn_opcode(la_decode *dec) +{ + uint32_t insn = dec->insn; + uint16_t op = la_op_illegal; + switch ((insn >> 26) & 0x3f) { + case 0x0: + switch ((insn >> 22) & 0xf) { + case 0x0: + switch ((insn >> 18) & 0xf) { + case 0x0: + switch ((insn >> 15) & 0x7) { + case 0x0: + switch ((insn >> 10) & 0x1f) { + case 0x2: + switch ((insn >> 2) & 0x7) { + case 0x0: + op = la_op_gr2scr; + break; + } + break; + case 0x3: + switch ((insn >> 7) & 0x7) { + case 0x0: + op = la_op_scr2gr; + break; + } + break; + case 0x4: + op = la_op_clo_w; + break; + case 0x5: + op = la_op_clz_w; + break; + case 0x6: + op = la_op_cto_w; + break; + case 0x7: + op = la_op_ctz_w; + break; + case 0x8: + op = la_op_clo_d; + break; + case 0x9: + op = la_op_clz_d; + break; + case 0xa: + op = la_op_cto_d; + break; + case 0xb: + op = la_op_ctz_d; + break; + case 0xc: + op = la_op_revb_2h; + break; + case 0xd: + op = la_op_revb_4h; + break; + case 0xe: + op = la_op_revb_2w; + break; + case 0xf: + op = la_op_revb_d; + break; + case 0x10: + op = la_op_revh_2w; + break; + case 0x11: + op = la_op_revh_d; + break; + case 0x12: + op = la_op_bitrev_4b; + break; + case 0x13: + op = la_op_bitrev_8b; + break; + case 0x14: + op = la_op_bitrev_w; + break; + case 0x15: + op = la_op_bitrev_d; + break; + case 0x16: + op = la_op_ext_w_h; + break; + case 0x17: + op = la_op_ext_w_b; + break; + case 0x1a: + op = la_op_rdtime_d; + break; + case 0x1b: + op = la_op_cpucfg; + break; + } + break; + case 0x2: + switch (insn & 0x0000001f) { + case 0x00000000: + op = la_op_asrtle_d; + break; + } + break; + case 0x3: + switch (insn & 0x0000001f) { + case 0x00000000: + op = la_op_asrtgt_d; + break; + } + break; + } + break; + case 0x1: + switch ((insn >> 17) & 0x1) { + case 0x0: + op = la_op_alsl_w; + break; + case 0x1: + op = la_op_alsl_wu; + break; + } + break; + case 0x2: + switch ((insn >> 17) & 0x1) { + case 0x0: + op = la_op_bytepick_w; + break; + } + break; + case 0x3: + op = la_op_bytepick_d; + break; + case 0x4: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_add_w; + break; + case 0x1: + op = la_op_add_d; + break; + case 0x2: + op = la_op_sub_w; + break; + case 0x3: + op = la_op_sub_d; + break; + case 0x4: + op = la_op_slt; + break; + case 0x5: + op = la_op_sltu; + break; + case 0x6: + op = la_op_maskeqz; + break; + case 0x7: + op = la_op_masknez; + break; + } + break; + case 0x5: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_nor; + break; + case 0x1: + op = la_op_and; + break; + case 0x2: + op = la_op_or; + break; + case 0x3: + op = la_op_xor; + break; + case 0x4: + op = la_op_orn; + break; + case 0x5: + op = la_op_andn; + break; + case 0x6: + op = la_op_sll_w; + break; + case 0x7: + op = la_op_srl_w; + break; + } + break; + case 0x6: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_sra_w; + break; + case 0x1: + op = la_op_sll_d; + break; + case 0x2: + op = la_op_srl_d; + break; + case 0x3: + op = la_op_sra_d; + break; + case 0x6: + op = la_op_rotr_w; + break; + case 0x7: + op = la_op_rotr_d; + break; + } + break; + case 0x7: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_mul_w; + break; + case 0x1: + op = la_op_mulh_w; + break; + case 0x2: + op = la_op_mulh_wu; + break; + case 0x3: + op = la_op_mul_d; + break; + case 0x4: + op = la_op_mulh_d; + break; + case 0x5: + op = la_op_mulh_du; + break; + case 0x6: + op = la_op_mulw_d_w; + break; + case 0x7: + op = la_op_mulw_d_wu; + break; + } + break; + case 0x8: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_div_w; + break; + case 0x1: + op = la_op_mod_w; + break; + case 0x2: + op = la_op_div_wu; + break; + case 0x3: + op = la_op_mod_wu; + break; + case 0x4: + op = la_op_div_d; + break; + case 0x5: + op = la_op_mod_d; + break; + case 0x6: + op = la_op_div_du; + break; + case 0x7: + op = la_op_mod_du; + break; + } + break; + case 0x9: + switch ((insn >> 15) & 0x7) { + case 0x0: + op = la_op_crc_w_b_w; + break; + case 0x1: + op = la_op_crc_w_h_w; + break; + case 0x2: + op = la_op_crc_w_w_w; + break; + case 0x3: + op = la_op_crc_w_d_w; + break; + case 0x4: + op = la_op_crcc_w_b_w; + break; + case 0x5: + op = la_op_crcc_w_h_w; + break; + case 0x6: + op = la_op_crcc_w_w_w; + break; + case 0x7: + op = la_op_crcc_w_d_w; + break; + } + break; + case 0xa: + switch ((insn >> 15) & 0x7) { + case 0x4: + op = la_op_break; + break; + case 0x5: + op = la_op_dbcl; + break; + case 0x6: + op = la_op_syscall; + break; + case 0x7: + op = la_op_hvcl; + break; + } + break; + case 0xb: + switch ((insn >> 17) & 0x1) { + case 0x0: + op = la_op_alsl_d; + break; + } + break; + } + break; + case 0x1: + switch ((insn >> 21) & 0x1) { + case 0x0: + switch ((insn >> 16) & 0x1f) { + case 0x0: + switch ((insn >> 15) & 0x1) { + case 0x1: + op = la_op_slli_w; + break; + } + break; + case 0x1: + op = la_op_slli_d; + break; + case 0x4: + switch ((insn >> 15) & 0x1) { + case 0x1: + op = la_op_srli_w; + break; + } + break; + case 0x5: + op = la_op_srli_d; + break; + case 0x8: + switch ((insn >> 15) & 0x1) { + case 0x1: + op = la_op_srai_w; + break; + } + break; + case 0x9: + op = la_op_srai_d; + break; + case 0xc: + switch ((insn >> 15) & 0x1) { + case 0x1: + op = la_op_rotri_w; + break; + } + break; + case 0xd: + op = la_op_rotri_d; + break; + } + break; + case 0x1: + switch ((insn >> 15) & 0x1) { + case 0x0: + op = la_op_bstrins_w; + break; + case 0x1: + op = la_op_bstrpick_w; + break; + } + break; + } + break; + case 0x2: + op = la_op_bstrins_d; + break; + case 0x3: + op = la_op_bstrpick_d; + break; + case 0x4: + switch ((insn >> 15) & 0x7f) { + case 0x1: + op = la_op_fadd_s; + break; + case 0x2: + op = la_op_fadd_d; + break; + case 0x5: + op = la_op_fsub_s; + break; + case 0x6: + op = la_op_fsub_d; + break; + case 0x9: + op = la_op_fmul_s; + break; + case 0xa: + op = la_op_fmul_d; + break; + case 0xd: + op = la_op_fdiv_s; + break; + case 0xe: + op = la_op_fdiv_d; + break; + case 0x11: + op = la_op_fmax_s; + break; + case 0x12: + op = la_op_fmax_d; + break; + case 0x15: + op = la_op_fmin_s; + break; + case 0x16: + op = la_op_fmin_d; + break; + case 0x19: + op = la_op_fmaxa_s; + break; + case 0x1a: + op = la_op_fmaxa_d; + break; + case 0x1d: + op = la_op_fmina_s; + break; + case 0x1e: + op = la_op_fmina_d; + break; + case 0x21: + op = la_op_fscaleb_s; + break; + case 0x22: + op = la_op_fscaleb_d; + break; + case 0x25: + op = la_op_fcopysign_s; + break; + case 0x26: + op = la_op_fcopysign_d; + break; + case 0x28: + switch ((insn >> 10) & 0x1f) { + case 0x1: + op = la_op_fabs_s; + break; + case 0x2: + op = la_op_fabs_d; + break; + case 0x5: + op = la_op_fneg_s; + break; + case 0x6: + op = la_op_fneg_d; + break; + case 0x9: + op = la_op_flogb_s; + break; + case 0xa: + op = la_op_flogb_d; + break; + case 0xd: + op = la_op_fclass_s; + break; + case 0xe: + op = la_op_fclass_d; + break; + case 0x11: + op = la_op_fsqrt_s; + break; + case 0x12: + op = la_op_fsqrt_d; + break; + case 0x15: + op = la_op_frecip_s; + break; + case 0x16: + op = la_op_frecip_d; + break; + case 0x19: + op = la_op_frsqrt_s; + break; + case 0x1a: + op = la_op_frsqrt_d; + break; + } + break; + case 0x29: + switch ((insn >> 10) & 0x1f) { + case 0x5: + op = la_op_fmov_s; + break; + case 0x6: + op = la_op_fmov_d; + break; + case 0x9: + op = la_op_movgr2fr_w; + break; + case 0xa: + op = la_op_movgr2fr_d; + break; + case 0xb: + op = la_op_movgr2frh_w; + break; + case 0xd: + op = la_op_movfr2gr_s; + break; + case 0xe: + op = la_op_movfr2gr_d; + break; + case 0xf: + op = la_op_movfrh2gr_s; + break; + case 0x10: + op = la_op_movgr2fcsr; + break; + case 0x12: + op = la_op_movfcsr2gr; + break; + case 0x14: + switch ((insn >> 3) & 0x3) { + case 0x0: + op = la_op_movfr2cf; + break; + } + break; + case 0x15: + switch ((insn >> 8) & 0x3) { + case 0x0: + op = la_op_movcf2fr; + break; + } + break; + case 0x16: + switch ((insn >> 3) & 0x3) { + case 0x0: + op = la_op_movgr2cf; + break; + } + break; + case 0x17: + switch ((insn >> 8) & 0x3) { + case 0x0: + op = la_op_movcf2gr; + break; + } + break; + } + break; + case 0x32: + switch ((insn >> 10) & 0x1f) { + case 0x6: + op = la_op_fcvt_s_d; + break; + case 0x9: + op = la_op_fcvt_d_s; + break; + } + break; + case 0x34: + switch ((insn >> 10) & 0x1f) { + case 0x1: + op = la_op_ftintrm_w_s; + break; + case 0x2: + op = la_op_ftintrm_w_d; + break; + case 0x9: + op = la_op_ftintrm_l_s; + break; + case 0xa: + op = la_op_ftintrm_l_d; + break; + case 0x11: + op = la_op_ftintrp_w_s; + break; + case 0x12: + op = la_op_ftintrp_w_d; + break; + case 0x19: + op = la_op_ftintrp_l_s; + break; + case 0x1a: + op = la_op_ftintrp_l_d; + break; + } + break; + case 0x35: + switch ((insn >> 10) & 0x1f) { + case 0x1: + op = la_op_ftintrz_w_s; + break; + case 0x2: + op = la_op_ftintrz_w_d; + break; + case 0x9: + op = la_op_ftintrz_l_s; + break; + case 0xa: + op = la_op_ftintrz_l_d; + break; + case 0x11: + op = la_op_ftintrne_w_s; + break; + case 0x12: + op = la_op_ftintrne_w_d; + break; + case 0x19: + op = la_op_ftintrne_l_s; + break; + case 0x1a: + op = la_op_ftintrne_l_d; + break; + } + break; + case 0x36: + switch ((insn >> 10) & 0x1f) { + case 0x1: + op = la_op_ftint_w_s; + break; + case 0x2: + op = la_op_ftint_w_d; + break; + case 0x9: + op = la_op_ftint_l_s; + break; + case 0xa: + op = la_op_ftint_l_d; + break; + } + break; + case 0x3a: + switch ((insn >> 10) & 0x1f) { + case 0x4: + op = la_op_ffint_s_w; + break; + case 0x6: + op = la_op_ffint_s_l; + break; + case 0x8: + op = la_op_ffint_d_w; + break; + case 0xa: + op = la_op_ffint_d_l; + break; + } + break; + case 0x3c: + switch ((insn >> 10) & 0x1f) { + case 0x11: + op = la_op_frint_s; + break; + case 0x12: + op = la_op_frint_d; + break; + } + break; + } + break; + case 0x8: + op = la_op_slti; + break; + case 0x9: + op = la_op_sltui; + break; + case 0xa: + op = la_op_addi_w; + break; + case 0xb: + op = la_op_addi_d; + break; + case 0xc: + op = la_op_lu52i_d; + break; + case 0xd: + op = la_op_addi; + break; + case 0xe: + op = la_op_ori; + break; + case 0xf: + op = la_op_xori; + break; + } + break; + case 0x1: + switch ((insn >> 24) & 0x3) { + case 0x0: + op = la_op_csrxchg; + break; + case 0x2: + switch ((insn >> 22) & 0x3) { + case 0x0: + op = la_op_cacop; + break; + case 0x1: + switch ((insn >> 18) & 0xf) { + case 0x0: + op = la_op_lddir; + break; + case 0x1: + switch (insn & 0x0000001f) { + case 0x00000000: + op = la_op_ldpte; + break; + } + break; + case 0x2: + switch ((insn >> 15) & 0x7) { + case 0x0: + switch ((insn >> 10) & 0x1f) { + case 0x0: + op = la_op_iocsrrd_b; + break; + case 0x1: + op = la_op_iocsrrd_h; + break; + case 0x2: + op = la_op_iocsrrd_w; + break; + case 0x3: + op = la_op_iocsrrd_d; + break; + case 0x4: + op = la_op_iocsrwr_b; + break; + case 0x5: + op = la_op_iocsrwr_h; + break; + case 0x6: + op = la_op_iocsrwr_w; + break; + case 0x7: + op = la_op_iocsrwr_d; + break; + case 0x8: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbclr; + break; + } + break; + case 0x9: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbflush; + break; + } + break; + case 0xa: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbsrch; + break; + } + break; + case 0xb: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbrd; + break; + } + break; + case 0xc: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbwr; + break; + } + break; + case 0xd: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_tlbfill; + break; + } + break; + case 0xe: + switch (insn & 0x000003ff) { + case 0x00000000: + op = la_op_ertn; + break; + } + break; + } + break; + case 0x1: + op = la_op_idle; + break; + case 0x3: + op = la_op_invtlb; + break; + } + break; + } + break; + } + break; + } + break; + case 0x2: + switch ((insn >> 20) & 0x3f) { + case 0x1: + op = la_op_fmadd_s; + break; + case 0x2: + op = la_op_fmadd_d; + break; + case 0x5: + op = la_op_fmsub_s; + break; + case 0x6: + op = la_op_fmsub_d; + break; + case 0x9: + op = la_op_fnmadd_s; + break; + case 0xa: + op = la_op_fnmadd_d; + break; + case 0xd: + op = la_op_fnmsub_s; + break; + case 0xe: + op = la_op_fnmsub_d; + break; + } + break; + case 0x3: + switch ((insn >> 20) & 0x3f) { + case 0x1: + switch ((insn >> 3) & 0x3) { + case 0x0: + op = la_op_fcmp_cond_s; + break; + } + break; + case 0x2: + switch ((insn >> 3) & 0x3) { + case 0x0: + op = la_op_fcmp_cond_d; + break; + } + break; + case 0x10: + switch ((insn >> 18) & 0x3) { + case 0x0: + op = la_op_fsel; + break; + } + break; + } + break; + case 0x4: + op = la_op_addu16i_d; + break; + case 0x5: + switch ((insn >> 25) & 0x1) { + case 0x0: + op = la_op_lu12i_w; + break; + case 0x1: + op = la_op_lu32i_d; + break; + } + break; + case 0x6: + switch ((insn >> 25) & 0x1) { + case 0x0: + op = la_op_pcaddi; + break; + case 0x1: + op = la_op_pcalau12i; + break; + } + break; + case 0x7: + switch ((insn >> 25) & 0x1) { + case 0x0: + op = la_op_pcaddu12i; + break; + case 0x1: + op = la_op_pcaddu18i; + break; + } + break; + case 0x8: + switch ((insn >> 24) & 0x3) { + case 0x0: + op = la_op_ll_w; + break; + case 0x1: + op = la_op_sc_w; + break; + case 0x2: + op = la_op_ll_d; + break; + case 0x3: + op = la_op_sc_d; + break; + } + break; + case 0x9: + switch ((insn >> 24) & 0x3) { + case 0x0: + op = la_op_ldptr_w; + break; + case 0x1: + op = la_op_stptr_w; + break; + case 0x2: + op = la_op_ldptr_d; + break; + case 0x3: + op = la_op_stptr_d; + break; + } + break; + case 0xa: + switch ((insn >> 22) & 0xf) { + case 0x0: + op = la_op_ld_b; + break; + case 0x1: + op = la_op_ld_h; + break; + case 0x2: + op = la_op_ld_w; + break; + case 0x3: + op = la_op_ld_d; + break; + case 0x4: + op = la_op_st_b; + break; + case 0x5: + op = la_op_st_h; + break; + case 0x6: + op = la_op_st_w; + break; + case 0x7: + op = la_op_st_d; + break; + case 0x8: + op = la_op_ld_bu; + break; + case 0x9: + op = la_op_ld_hu; + break; + case 0xa: + op = la_op_ld_wu; + break; + case 0xb: + op = la_op_preld; + break; + case 0xc: + op = la_op_fld_s; + break; + case 0xd: + op = la_op_fst_s; + break; + case 0xe: + op = la_op_fld_d; + break; + case 0xf: + op = la_op_fst_d; + break; + } + break; + case 0xb: + switch ((insn >> 22) & 0xf) { + case 0x8: + op = la_op_ldl_w; + break; + case 0x9: + op = la_op_ldr_w; + break; + case 0xa: + op = la_op_ldl_d; + break; + case 0xb: + op = la_op_ldr_d; + break; + case 0xe: + op = la_op_stl_d; + break; + case 0xf: + op = la_op_str_d; + break; + } + break; + case 0xe: + switch ((insn >> 15) & 0x7ff) { + case 0x0: + op = la_op_ldx_b; + break; + case 0x8: + op = la_op_ldx_h; + break; + case 0x10: + op = la_op_ldx_w; + break; + case 0x18: + op = la_op_ldx_d; + break; + case 0x20: + op = la_op_stx_b; + break; + case 0x28: + op = la_op_stx_h; + break; + case 0x30: + op = la_op_stx_w; + break; + case 0x38: + op = la_op_stx_d; + break; + case 0x40: + op = la_op_ldx_bu; + break; + case 0x48: + op = la_op_ldx_hu; + break; + case 0x50: + op = la_op_ldx_wu; + break; + case 0x60: + op = la_op_fldx_s; + break; + case 0x68: + op = la_op_fldx_d; + break; + case 0x70: + op = la_op_fstx_s; + break; + case 0x78: + op = la_op_fstx_d; + break; + case 0xc0: + op = la_op_amswap_w; + break; + case 0xc1: + op = la_op_amswap_d; + break; + case 0xc2: + op = la_op_amadd_w; + break; + case 0xc3: + op = la_op_amadd_d; + break; + case 0xc4: + op = la_op_amand_w; + break; + case 0xc5: + op = la_op_amand_d; + break; + case 0xc6: + op = la_op_amor_w; + break; + case 0xc7: + op = la_op_amor_d; + break; + case 0xc8: + op = la_op_amxor_w; + break; + case 0xc9: + op = la_op_amxor_d; + break; + case 0xca: + op = la_op_ammax_w; + break; + case 0xcb: + op = la_op_ammax_d; + break; + case 0xcc: + op = la_op_ammin_w; + break; + case 0xcd: + op = la_op_ammin_d; + break; + case 0xce: + op = la_op_ammax_wu; + break; + case 0xcf: + op = la_op_ammax_du; + break; + case 0xd0: + op = la_op_ammin_wu; + break; + case 0xd1: + op = la_op_ammin_du; + break; + case 0xd2: + op = la_op_amswap_db_w; + break; + case 0xd3: + op = la_op_amswap_db_d; + break; + case 0xd4: + op = la_op_amadd_db_w; + break; + case 0xd5: + op = la_op_amadd_db_d; + break; + case 0xd6: + op = la_op_amand_db_w; + break; + case 0xd7: + op = la_op_amand_db_d; + break; + case 0xd8: + op = la_op_amor_db_w; + break; + case 0xd9: + op = la_op_amor_db_d; + break; + case 0xda: + op = la_op_amxor_db_w; + break; + case 0xdb: + op = la_op_amxor_db_d; + break; + case 0xdc: + op = la_op_ammax_db_w; + break; + case 0xdd: + op = la_op_ammax_db_d; + break; + case 0xde: + op = la_op_ammin_db_w; + break; + case 0xdf: + op = la_op_ammin_db_d; + break; + case 0xe0: + op = la_op_ammax_db_wu; + break; + case 0xe1: + op = la_op_ammax_db_du; + break; + case 0xe2: + op = la_op_ammin_db_wu; + break; + case 0xe3: + op = la_op_ammin_db_du; + break; + case 0xe4: + op = la_op_dbar; + break; + case 0xe5: + op = la_op_ibar; + break; + case 0xe8: + op = la_op_fldgt_s; + break; + case 0xe9: + op = la_op_fldgt_d; + break; + case 0xea: + op = la_op_fldle_s; + break; + case 0xeb: + op = la_op_fldle_d; + break; + case 0xec: + op = la_op_fstgt_s; + break; + case 0xed: + op = la_op_fstgt_d; + break; + case 0xee: + op = ls_op_fstle_s; + break; + case 0xef: + op = la_op_fstle_d; + break; + case 0xf0: + op = la_op_ldgt_b; + break; + case 0xf1: + op = la_op_ldgt_h; + break; + case 0xf2: + op = la_op_ldgt_w; + break; + case 0xf3: + op = la_op_ldgt_d; + break; + case 0xf4: + op = la_op_ldle_b; + break; + case 0xf5: + op = la_op_ldle_h; + break; + case 0xf6: + op = la_op_ldle_w; + break; + case 0xf7: + op = la_op_ldle_d; + break; + case 0xf8: + op = la_op_stgt_b; + break; + case 0xf9: + op = la_op_stgt_h; + break; + case 0xfa: + op = la_op_stgt_w; + break; + case 0xfb: + op = la_op_stgt_d; + break; + case 0xfc: + op = la_op_stle_b; + break; + case 0xfd: + op = la_op_stle_h; + break; + case 0xfe: + op = la_op_stle_w; + break; + case 0xff: + op = la_op_stle_d; + break; + } + break; + case 0x10: + op = la_op_beqz; + break; + case 0x11: + op = la_op_bnez; + break; + case 0x12: + switch ((insn >> 8) & 0x3) { + case 0x0: + op = la_op_bceqz; + break; + case 0x1: + op = la_op_bcnez; + break; + } + break; + case 0x13: + op = la_op_jirl; + break; + case 0x14: + op = la_op_b; + break; + case 0x15: + op = la_op_bl; + break; + case 0x16: + op = la_op_beq; + break; + case 0x17: + op = la_op_bne; + break; + case 0x18: + op = la_op_blt; + break; + case 0x19: + op = la_op_bge; + break; + case 0x1a: + op = la_op_bltu; + break; + case 0x1b: + op = la_op_bgeu; + break; + default: + op = la_op_illegal; + break; + } + dec->op = op; +} + +/* operand extractors */ +#define IM_5 5 +#define IM_8 8 +#define IM_12 12 +#define IM_14 14 +#define IM_15 15 +#define IM_16 16 +#define IM_20 20 +#define IM_21 21 +#define IM_26 26 + +static uint32_t operand_r1(uint32_t insn) +{ + return insn & 0x1f; +} + +static uint32_t operand_r2(uint32_t insn) +{ + return (insn >> 5) & 0x1f; +} + +static uint32_t operand_r3(uint32_t insn) +{ + return (insn >> 10) & 0x1f; +} + +static uint32_t operand_r4(uint32_t insn) +{ + return (insn >> 15) & 0x1f; +} + +static uint32_t operand_u6(uint32_t insn) +{ + return (insn >> 10) & 0x3f; +} + +static uint32_t operand_bw1(uint32_t insn) +{ + return (insn >> 10) & 0x1f; +} + +static uint32_t operand_bw2(uint32_t insn) +{ + return (insn >> 16) & 0x1f; +} + +static uint32_t operand_bd1(uint32_t insn) +{ + return (insn >> 10) & 0x3f; +} + +static uint32_t operand_bd2(uint32_t insn) +{ + return (insn >> 16) & 0x3f; +} + +static uint32_t operand_sa2(uint32_t insn) +{ + return (insn >> 15) & 0x3; +} + +static uint32_t operand_sa3(uint32_t insn) +{ + return (insn >> 15) & 0x3; +} + +static int32_t operand_im20(uint32_t insn) +{ + int32_t imm = (int32_t)((insn >> 5) & 0xfffff); + return imm > (1 << 19) ? imm - (1 << 20) : imm; +} + +static int32_t operand_im16(uint32_t insn) +{ + int32_t imm = (int32_t)((insn >> 10) & 0xffff); + return imm > (1 << 15) ? imm - (1 << 16) : imm; +} + +static int32_t operand_im14(uint32_t insn) +{ + int32_t imm = (int32_t)((insn >> 10) & 0x3fff); + return imm > (1 << 13) ? imm - (1 << 14) : imm; +} + +static int32_t operand_im12(uint32_t insn) +{ + int32_t imm = (int32_t)((insn >> 10) & 0xfff); + return imm > (1 << 11) ? imm - (1 << 12) : imm; +} + +static int32_t operand_im8(uint32_t insn) +{ + int32_t imm = (int32_t)((insn >> 10) & 0xff); + return imm > (1 << 7) ? imm - (1 << 8) : imm; +} + +static uint32_t operand_sd(uint32_t insn) +{ + return insn & 0x3; +} + +static uint32_t operand_sj(uint32_t insn) +{ + return (insn >> 5) & 0x3; +} + +static uint32_t operand_cd(uint32_t insn) +{ + return insn & 0x7; +} + +static uint32_t operand_cj(uint32_t insn) +{ + return (insn >> 5) & 0x7; +} + +static uint32_t operand_code(uint32_t insn) +{ + return insn & 0x7fff; +} + +static int32_t operand_whint(uint32_t insn) +{ + int32_t imm = (int32_t)(insn & 0x7fff); + return imm > (1 << 14) ? imm - (1 << 15) : imm; +} + +static int32_t operand_invop(uint32_t insn) +{ + int32_t imm = (int32_t)(insn & 0x1f); + return imm > (1 << 4) ? imm - (1 << 5) : imm; +} + +static int32_t operand_ofs21(uint32_t insn) +{ + int32_t imm = (((int32_t)insn & 0x1f) << 16) | ((insn >> 10) & 0xffff); + return imm > (1 << 20) ? imm - (1 << 21) : imm; +} + +static int32_t operand_ofs26(uint32_t insn) +{ + int32_t imm = (((int32_t)insn & 0x3ff) << 16) | ((insn >> 10) & 0xffff); + return imm > (1 << 25) ? imm - (1 << 26) : imm; +} + +static uint32_t operand_fcond(uint32_t insn) +{ + return (insn >> 15) & 0x1f; +} + +static uint32_t operand_sel(uint32_t insn) +{ + return (insn >> 15) & 0x7; +} + +/* decode operands */ +static void decode_insn_operands(la_decode *dec) +{ + uint32_t insn = dec->insn; + dec->codec = opcode_la[dec->op].codec; + switch (dec->codec) { + case la_codec_illegal: + case la_codec_empty: + break; + case la_codec_2r: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + break; + case la_codec_2r_u5: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + break; + case la_codec_2r_u6: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_u6(insn); + break; + case la_codec_2r_2bw: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_bw1(insn); + dec->r4 = operand_bw2(insn); + break; + case la_codec_2r_2bd: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_bd1(insn); + dec->r4 = operand_bd2(insn); + break; + case la_codec_3r: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + break; + case la_codec_3r_rd0: + dec->r1 = 0; + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + break; + case la_codec_3r_sa2: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + dec->r4 = operand_sa2(insn); + break; + case la_codec_3r_sa3: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + dec->r4 = operand_sa3(insn); + break; + case la_codec_4r: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + dec->r4 = operand_r4(insn); + break; + case la_codec_r_im20: + dec->r1 = operand_r1(insn); + dec->imm = operand_im20(insn); + dec->bit = IM_20; + break; + case la_codec_2r_im16: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->imm = operand_im16(insn); + dec->bit = IM_16; + break; + case la_codec_2r_im14: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->imm = operand_im14(insn); + dec->bit = IM_14; + break; + case la_codec_im5_r_im12: + dec->imm2 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->imm = operand_im12(insn); + dec->bit = IM_12; + break; + case la_codec_2r_im12: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->imm = operand_im12(insn); + dec->bit = IM_12; + break; + case la_codec_2r_im8: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->imm = operand_im8(insn); + dec->bit = IM_8; + break; + case la_codec_r_sd: + dec->r1 = operand_sd(insn); + dec->r2 = operand_r2(insn); + break; + case la_codec_r_sj: + dec->r1 = operand_r1(insn); + dec->r2 = operand_sj(insn); + break; + case la_codec_r_cd: + dec->r1 = operand_cd(insn); + dec->r2 = operand_r2(insn); + break; + case la_codec_r_cj: + dec->r1 = operand_r1(insn); + dec->r2 = operand_cj(insn); + break; + case la_codec_r_seq: + dec->r1 = 0; + dec->r2 = operand_r1(insn); + dec->imm = operand_im8(insn); + dec->bit = IM_8; + break; + case la_codec_code: + dec->code = operand_code(insn); + break; + case la_codec_whint: + dec->imm = operand_whint(insn); + dec->bit = IM_15; + break; + case la_codec_invtlb: + dec->imm = operand_invop(insn); + dec->bit = IM_5; + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + break; + case la_codec_r_ofs21: + dec->imm = operand_ofs21(insn); + dec->bit = IM_21; + dec->r2 = operand_r2(insn); + break; + case la_codec_cj_ofs21: + dec->imm = operand_ofs21(insn); + dec->bit = IM_21; + dec->r2 = operand_cj(insn); + break; + case la_codec_ofs26: + dec->imm = operand_ofs26(insn); + dec->bit = IM_26; + break; + case la_codec_cond: + dec->r1 = operand_cd(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + dec->r4 = operand_fcond(insn); + break; + case la_codec_sel: + dec->r1 = operand_r1(insn); + dec->r2 = operand_r2(insn); + dec->r3 = operand_r3(insn); + dec->r4 = operand_sel(insn); + break; + } +} + +/* format instruction */ + +static void append(char *s1, const char *s2, size_t n) +{ + size_t l1 = strlen(s1); + if (n - l1 - 1 > 0) { + strncat(s1, s2, n - l1); + } +} + +static void format_insn(char *buf, size_t buflen, size_t tab, la_decode *dec) +{ + char tmp[16]; + const char *fmt; + + fmt = opcode_la[dec->op].format; + while (*fmt) { + switch (*fmt) { + case 'n': /* name */ + append(buf, opcode_la[dec->op].name, buflen); + break; + case 's': + append(buf, "s", buflen); + break; + case 'd': + append(buf, "d", buflen); + break; + case 'e': /* illegal */ + snprintf(tmp, sizeof(tmp), "%x", dec->insn); + append(buf, tmp, buflen); + break; + case 't': + while (strlen(buf) < tab) { + append(buf, " ", buflen); + } + break; + case '(': + append(buf, "(", buflen); + break; + case ',': + append(buf, ",", buflen); + break; + case '.': + append(buf, ".", buflen); + break; + case ')': + append(buf, ")", buflen); + break; + case '0': /* rd */ + append(buf, loongarch_r_normal_name[dec->r1], buflen); + break; + case '1': /* rj */ + append(buf, loongarch_r_normal_name[dec->r2], buflen); + break; + case '2': /* rk */ + append(buf, loongarch_r_normal_name[dec->r3], buflen); + break; + case '3': /* fd */ + append(buf, loongarch_f_normal_name[dec->r1], buflen); + break; + case '4': /* fj */ + append(buf, loongarch_f_normal_name[dec->r2], buflen); + break; + case '5': /* fk */ + append(buf, loongarch_f_normal_name[dec->r3], buflen); + break; + case '6': /* fa */ + append(buf, loongarch_f_normal_name[dec->r4], buflen); + break; + case 'A': /* sd */ + append(buf, loongarch_cr_normal_name[dec->r1], buflen); + break; + case 'B': /* sj */ + append(buf, loongarch_cr_normal_name[dec->r2], buflen); + break; + case 'C': /* r3 */ + snprintf(tmp, sizeof(tmp), "%x", dec->r3); + append(buf, tmp, buflen); + break; + case 'D': /* r4 */ + snprintf(tmp, sizeof(tmp), "%x", dec->r4); + append(buf, tmp, buflen); + break; + case 'E': /* r1 */ + snprintf(tmp, sizeof(tmp), "%x", dec->r1); + append(buf, tmp, buflen); + break; + case 'F': /* fcsrd */ + append(buf, loongarch_r_normal_name[dec->r1], buflen); + break; + case 'G': /* fcsrs */ + append(buf, loongarch_r_normal_name[dec->r2], buflen); + break; + case 'H': /* cd */ + append(buf, loongarch_c_normal_name[dec->r1], buflen); + break; + case 'I': /* cj */ + append(buf, loongarch_c_normal_name[dec->r2], buflen); + break; + case 'J': /* code */ + snprintf(tmp, sizeof(tmp), "0x%x", dec->code); + append(buf, tmp, buflen); + break; + case 'K': /* cond */ + switch (dec->r4) { + case 0x0: + append(buf, "caf", buflen); + break; + case 0x1: + append(buf, "saf", buflen); + break; + case 0x2: + append(buf, "clt", buflen); + break; + case 0x3: + append(buf, "slt", buflen); + break; + case 0x4: + append(buf, "ceq", buflen); + break; + case 0x5: + append(buf, "seq", buflen); + break; + case 0x6: + append(buf, "cle", buflen); + break; + case 0x7: + append(buf, "sle", buflen); + break; + case 0x8: + append(buf, "cun", buflen); + break; + case 0x9: + append(buf, "sun", buflen); + break; + case 0xA: + append(buf, "cult", buflen); + break; + case 0xB: + append(buf, "sult", buflen); + break; + case 0xC: + append(buf, "cueq", buflen); + break; + case 0xD: + append(buf, "sueq", buflen); + break; + case 0xE: + append(buf, "cule", buflen); + break; + case 0xF: + append(buf, "sule", buflen); + break; + case 0x10: + append(buf, "cne", buflen); + break; + case 0x11: + append(buf, "sne", buflen); + break; + case 0x14: + append(buf, "cor", buflen); + break; + case 0x15: + append(buf, "sor", buflen); + break; + case 0x18: + append(buf, "cune", buflen); + break; + case 0x19: + append(buf, "sune", buflen); + break; + } + break; + case 'L': /* ca */ + append(buf, loongarch_c_normal_name[dec->r4], buflen); + break; + case 'M': /* cop */ + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm2) & 0x1f); + append(buf, tmp, buflen); + break; + case 'i': /* sixx d */ + snprintf(tmp, sizeof(tmp), "%d", dec->imm); + append(buf, tmp, buflen); + break; + case 'o': /* offset */ + snprintf(tmp, sizeof(tmp), "%d", (dec->imm) << 2); + append(buf, tmp, buflen); + break; + case 'x': /* sixx x */ + switch (dec->bit) { + case IM_5: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x1f); + append(buf, tmp, buflen); + break; + case IM_8: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xff); + append(buf, tmp, buflen); + break; + case IM_12: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xfff); + append(buf, tmp, buflen); + break; + case IM_14: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x3fff); + append(buf, tmp, buflen); + break; + case IM_15: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0x7fff); + append(buf, tmp, buflen); + break; + case IM_16: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xffff); + append(buf, tmp, buflen); + break; + case IM_20: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) & 0xfffff); + append(buf, tmp, buflen); + break; + default: + snprintf(tmp, sizeof(tmp), "0x%x", dec->imm); + append(buf, tmp, buflen); + break; + } + break; + case 'X': /* offset x*/ + switch (dec->bit) { + case IM_16: + snprintf(tmp, sizeof(tmp), "0x%x", ((dec->imm) << 2) & 0xffff); + append(buf, tmp, buflen); + break; + case IM_21: + snprintf(tmp, sizeof(tmp), "0x%x", + ((dec->imm) << 2) & 0x1fffff); + append(buf, tmp, buflen); + break; + case IM_26: + snprintf(tmp, sizeof(tmp), "0x%x", + ((dec->imm) << 2) & 0x3ffffff); + append(buf, tmp, buflen); + break; + default: + snprintf(tmp, sizeof(tmp), "0x%x", (dec->imm) << 2); + append(buf, tmp, buflen); + break; + } + break; + case 'p': /* pc */ + snprintf(tmp, sizeof(tmp), " # 0x%" PRIx32 "", + dec->pc + ((dec->imm) << 2)); + append(buf, tmp, buflen); + break; + default: + break; + } + fmt++; + } +} + +/* disassemble instruction */ +static void disasm_insn(char *buf, size_t buflen, bfd_vma pc, + unsigned long int insn) +{ + la_decode dec = { 0 }; + dec.pc = pc; + dec.insn = insn; + decode_insn_opcode(&dec); + decode_insn_operands(&dec); + format_insn(buf, buflen, 16, &dec); +} + +int print_insn_loongarch(bfd_vma memaddr, struct disassemble_info *info) +{ + char buf[128] = { 0 }; + bfd_byte buffer[INSNLEN]; + unsigned long insn; + int status; + + status = (*info->read_memory_func)(memaddr, buffer, INSNLEN, info); + if (status == 0) { + insn = (uint32_t)bfd_getl32(buffer); + (*info->fprintf_func)(info->stream, "%08" PRIx64 " ", insn); + } else { + (*info->memory_error_func)(status, memaddr, info); + return -1; + } + disasm_insn(buf, sizeof(buf), memaddr, insn); + (*info->fprintf_func)(info->stream, "\t%s", buf); + return INSNLEN; +} diff --git a/disas/meson.build b/disas/meson.build index 449f99e1de63b3bab10da894b4d4e681226b6618..c337369cb1e8fde40b9f9d62ec973d02ddb1c951 100644 --- a/disas/meson.build +++ b/disas/meson.build @@ -12,6 +12,7 @@ common_ss.add(when: 'CONFIG_I386_DIS', if_true: files('i386.c')) common_ss.add(when: 'CONFIG_M68K_DIS', if_true: files('m68k.c')) common_ss.add(when: 'CONFIG_MICROBLAZE_DIS', if_true: files('microblaze.c')) common_ss.add(when: 'CONFIG_MIPS_DIS', if_true: files('mips.c')) +common_ss.add(when: 'CONFIG_LOONGARCH_DIS', if_true: files('loongarch.c')) common_ss.add(when: 'CONFIG_NANOMIPS_DIS', if_true: files('nanomips.cpp')) common_ss.add(when: 'CONFIG_NIOS2_DIS', if_true: files('nios2.c')) common_ss.add(when: 'CONFIG_PPC_DIS', if_true: files('ppc.c')) @@ -20,4 +21,5 @@ common_ss.add(when: 'CONFIG_S390_DIS', if_true: files('s390.c')) common_ss.add(when: 'CONFIG_SH4_DIS', if_true: files('sh4.c')) common_ss.add(when: 'CONFIG_SPARC_DIS', if_true: files('sparc.c')) common_ss.add(when: 'CONFIG_XTENSA_DIS', if_true: files('xtensa.c')) +common_ss.add(when: 'CONFIG_SW64_DIS', if_true: files('sw64.c')) common_ss.add(when: capstone, if_true: files('capstone.c')) diff --git a/disas/riscv.c b/disas/riscv.c index 793ad14c2700b1ccbf62825fe24bb8f37179f542..ad7b978815df4efd794141c1d850a39ed20f3f5f 100644 --- a/disas/riscv.c +++ b/disas/riscv.c @@ -1189,7 +1189,7 @@ const rv_opcode_data opcode_data[] = { { "max", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "maxu", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, - { "clzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, + { "ctzw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "cpopw", rv_codec_r, rv_fmt_rd_rs1, NULL, 0, 0, 0 }, { "slli.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, { "add.uw", rv_codec_r, rv_fmt_rd_rs1_rs2, NULL, 0, 0, 0 }, @@ -1307,8 +1307,8 @@ static const char *csr_name(int csrno) case 0x03ba: return "pmpaddr10"; case 0x03bb: return "pmpaddr11"; case 0x03bc: return "pmpaddr12"; - case 0x03bd: return "pmpaddr14"; - case 0x03be: return "pmpaddr13"; + case 0x03bd: return "pmpaddr13"; + case 0x03be: return "pmpaddr14"; case 0x03bf: return "pmpaddr15"; case 0x0780: return "mtohost"; case 0x0781: return "mfromhost"; diff --git a/disas/sw64.c b/disas/sw64.c new file mode 100755 index 0000000000000000000000000000000000000000..16504c673a948f075a13415bc9c820d2bdd5232c --- /dev/null +++ b/disas/sw64.c @@ -0,0 +1,1213 @@ +/* + * sw_64-dis.c -- Disassemble Sw_64 CORE3 instructions + * + * This file is part of libopcodes. + * + * This library is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 3, or (at your option) + * any later version. + * + * It is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public + * License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this file; see the file COPYING. If not, write to the Free + * Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA + * 02110-1301, USA. + */ + +#include "qemu/osdep.h" +#include "disas/dis-asm.h" + +#undef MAX + +struct sw_64_opcode { + /* The opcode name. */ + const char *name; + + /* The opcode itself. Those bits which will be filled in with + operands are zeroes. */ + unsigned opcode; + + /* The opcode mask. This is used by the disassembler. This is a + mask containing ones indicating those bits which must match the + opcode field, and zeroes indicating those bits which need not + match (and are presumably filled in by operands). */ + unsigned mask; + + /* One bit flags for the opcode. These are primarily used to + indicate specific processors and environments support the + instructions. The defined values are listed below. */ + unsigned flags; + + /* An array of operand codes. Each code is an index into the + operand table. They appear in the order which the operands must + appear in assembly code, and are terminated by a zero. */ + unsigned char operands[5]; +}; + +/* The table itself is sorted by major opcode number, and is otherwise + in the order in which the disassembler should consider + instructions. */ +extern const struct sw_64_opcode sw_64_opcodes[]; +extern const unsigned sw_64_num_opcodes; + +/* Values defined for the flags field of a struct sw_64_opcode. */ + +/* CPU Availability */ +#define SW_OPCODE_BASE 0x0001 /* Base architecture insns. */ +#define SW_OPCODE_CORE3 0x0002 /* Core3 private insns. */ +#define SW_LITOP(i) (((i) >> 26) & 0x3D) + +#define SW_OPCODE_NOHMCODE (~(SW_OPCODE_BASE|SW_OPCODE_CORE3)) + +/* A macro to extract the major opcode from an instruction. */ +#define SW_OP(i) (((i) >> 26) & 0x3F) + +/* The total number of major opcodes. */ +#define SW_NOPS 0x40 + +/* The operands table is an array of struct sw_64_operand. */ + +struct sw_64_operand { + /* The number of bits in the operand. */ + unsigned int bits : 5; + + /* How far the operand is left shifted in the instruction. */ + unsigned int shift : 5; + + /* The default relocation type for this operand. */ + signed int default_reloc : 16; + + /* One bit syntax flags. */ + unsigned int flags : 16; + + /* Insertion function. This is used by the assembler. To insert an + operand value into an instruction, check this field. + + If it is NULL, execute + i |= (op & ((1 << o->bits) - 1)) << o->shift; + (i is the instruction which we are filling in, o is a pointer to + this structure, and op is the opcode value; this assumes twos + complement arithmetic). + + If this field is not NULL, then simply call it with the + instruction and the operand value. It will return the new value + of the instruction. If the ERRMSG argument is not NULL, then if + the operand value is illegal, *ERRMSG will be set to a warning + string (the operand will be inserted in any case). If the + operand value is legal, *ERRMSG will be unchanged (most operands + can accept any value). */ + unsigned (*insert) (unsigned instruction, int op, const char **errmsg); + + /* Extraction function. This is used by the disassembler. To + extract this operand type from an instruction, check this field. + + If it is NULL, compute + op = ((i) >> o->shift) & ((1 << o->bits) - 1); + if ((o->flags & SW_OPERAND_SIGNED) != 0 + && (op & (1 << (o->bits - 1))) != 0) + op -= 1 << o->bits; + (i is the instruction, o is a pointer to this structure, and op + is the result; this assumes twos complement arithmetic). + + If this field is not NULL, then simply call it with the + instruction value. It will return the value of the operand. If + the INVALID argument is not NULL, *INVALID will be set to + non-zero if this operand type can not actually be extracted from + this operand (i.e., the instruction does not match). If the + operand is valid, *INVALID will not be changed. */ + int (*extract) (unsigned instruction, int *invalid); +}; + +/* Elements in the table are retrieved by indexing with values from + the operands field of the sw_64_opcodes table. */ + +extern const struct sw_64_operand sw_64_operands[]; +extern const unsigned sw_64_num_operands; +/* Values defined for the flags field of a struct sw_64_operand. */ + +/* Mask for selecting the type for typecheck purposes */ +#define SW_OPERAND_TYPECHECK_MASK \ + (SW_OPERAND_PARENS | SW_OPERAND_COMMA | SW_OPERAND_IR | \ + SW_OPERAND_FPR | SW_OPERAND_RELATIVE | SW_OPERAND_SIGNED | \ + SW_OPERAND_UNSIGNED) + +/* This operand does not actually exist in the assembler input. This + is used to support extended mnemonics, for which two operands fields + are identical. The assembler should call the insert function with + any op value. The disassembler should call the extract function, + ignore the return value, and check the value placed in the invalid + argument. */ +#define SW_OPERAND_FAKE 01 + +/* The operand should be wrapped in parentheses rather than separated + from the previous by a comma. This is used for the load and store + instructions which want their operands to look like "Ra,disp(Rb)". */ +#define SW_OPERAND_PARENS 02 + +/* Used in combination with PARENS, this supresses the supression of + the comma. This is used for "jmp Ra,(Rb),hint". */ +#define SW_OPERAND_COMMA 04 + +/* This operand names an integer register. */ +#define SW_OPERAND_IR 010 + +/* This operand names a floating point register. */ +#define SW_OPERAND_FPR 020 + +/* This operand is a relative branch displacement. The disassembler + prints these symbolically if possible. */ +#define SW_OPERAND_RELATIVE 040 + +/* This operand takes signed values. */ +#define SW_OPERAND_SIGNED 0100 + +/* This operand takes unsigned values. This exists primarily so that + a flags value of 0 can be treated as end-of-arguments. */ +#define SW_OPERAND_UNSIGNED 0200 + +/* Supress overflow detection on this field. This is used for hints. */ +#define SW_OPERAND_NOOVERFLOW 0400 + +/* Mask for optional argument default value. */ +#define SW_OPERAND_OPTIONAL_MASK 07000 + +/* This operand defaults to zero. This is used for jump hints. */ +#define SW_OPERAND_DEFAULT_ZERO 01000 + +/* This operand should default to the first (real) operand and is used + in conjunction with SW_OPERAND_OPTIONAL. This allows + "and $0,3,$0" to be written as "and $0,3", etc. I don't like + it, but it's what DEC does. */ +#define SW_OPERAND_DEFAULT_FIRST 02000 + +/* Similarly, this operand should default to the second (real) operand. + This allows "negl $0" instead of "negl $0,$0". */ +#define SW_OPERAND_DEFAULT_SECOND 04000 + +/* Register common names */ + +#define SW_REG_V0 0 +#define SW_REG_T0 1 +#define SW_REG_T1 2 +#define SW_REG_T2 3 +#define SW_REG_T3 4 +#define SW_REG_T4 5 +#define SW_REG_T5 6 +#define SW_REG_T6 7 +#define SW_REG_T7 8 +#define SW_REG_S0 9 +#define SW_REG_S1 10 +#define SW_REG_S2 11 +#define SW_REG_S3 12 +#define SW_REG_S4 13 +#define SW_REG_S5 14 +#define SW_REG_FP 15 +#define SW_REG_A0 16 +#define SW_REG_A1 17 +#define SW_REG_A2 18 +#define SW_REG_A3 19 +#define SW_REG_A4 20 +#define SW_REG_A5 21 +#define SW_REG_T8 22 +#define SW_REG_T9 23 +#define SW_REG_T10 24 +#define SW_REG_T11 25 +#define SW_REG_RA 26 +#define SW_REG_PV 27 +#define SW_REG_T12 27 +#define SW_REG_AT 28 +#define SW_REG_GP 29 +#define SW_REG_SP 30 +#define SW_REG_ZERO 31 + +enum bfd_reloc_code_real { + BFD_RELOC_23_PCREL_S2, + BFD_RELOC_SW_64_HINT +}; + +static unsigned insert_rba(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | (((insn >> 21) & 0x1f) << 16); +} + +static int extract_rba(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL + && ((insn >> 21) & 0x1f) != ((insn >> 16) & 0x1f)) + *invalid = 1; + return 0; +} + +/* The same for the RC field. */ +static unsigned insert_rca(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((insn >> 21) & 0x1f); +} + +static unsigned insert_rdc(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | ((insn >> 5) & 0x1f); +} + +static int extract_rdc(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL + && ((insn >> 5) & 0x1f) != (insn & 0x1f)) + *invalid = 1; + return 0; +} + +static int extract_rca(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL + && ((insn >> 21) & 0x1f) != (insn & 0x1f)) + *invalid = 1; + return 0; +} + +/* Fake arguments in which the registers must be set to ZERO. */ +static unsigned insert_za(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | (31 << 21); +} + +static int extract_za(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL && ((insn >> 21) & 0x1f) != 31) + *invalid = 1; + return 0; +} + +static unsigned insert_zb(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | (31 << 16); +} + +static int extract_zb(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL && ((insn >> 16) & 0x1f) != 31) + *invalid = 1; + return 0; +} + +static unsigned insert_zc(unsigned insn, int value ATTRIBUTE_UNUSED, + const char **errmsg ATTRIBUTE_UNUSED) +{ + return insn | 31; +} + +static int extract_zc(unsigned insn, int *invalid) +{ + if (invalid != (int *) NULL && (insn & 0x1f) != 31) + *invalid = 1; + return 0; +} + + +/* The displacement field of a Branch format insn. */ + +static unsigned insert_bdisp(unsigned insn, int value, const char **errmsg) +{ + if (errmsg != (const char **)NULL && (value & 3)) + *errmsg = "branch operand unaligned"; + return insn | ((value / 4) & 0x1FFFFF); +} + +static int extract_bdisp(unsigned insn, int *invalid ATTRIBUTE_UNUSED) +{ + return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000); +} + +/* The hint field of a JMP/JSR insn. */ +/* sw use 16 bits hint disp. */ +static unsigned insert_jhint(unsigned insn, int value, const char **errmsg) +{ + if (errmsg != (const char **)NULL && (value & 3)) + *errmsg = "jump hint unaligned"; + return insn | ((value / 4) & 0xFFFF); +} + +static int extract_jhint(unsigned insn, int *invalid ATTRIBUTE_UNUSED) +{ + return 4 * (((insn & 0xFFFF) ^ 0x8000) - 0x8000); +} + +/* The hint field of an CORE3 HW_JMP/JSR insn. */ + +static unsigned insert_sw4hwjhint(unsigned insn, int value, const char **errmsg) +{ + if (errmsg != (const char **)NULL && (value & 3)) + *errmsg = "jump hint unaligned"; + return insn | ((value / 4) & 0x1FFF); +} + +static int extract_sw4hwjhint(unsigned insn, int *invalid ATTRIBUTE_UNUSED) +{ + return 4 * (((insn & 0x1FFF) ^ 0x1000) - 0x1000); +} + +/* The operands table. */ + +const struct sw_64_operand sw_64_operands[] = { + /* The fields are bits, shift, insert, extract, flags */ + /* The zero index is used to indicate end-of-list */ +#define UNUSED 0 + { 0, 0, 0, 0, 0, 0 }, + + /* The plain integer register fields. */ +#define RA (UNUSED + 1) + { 5, 21, 0, SW_OPERAND_IR, 0, 0 }, +#define RB (RA + 1) + { 5, 16, 0, SW_OPERAND_IR, 0, 0 }, +#define RC (RB + 1) + { 5, 0, 0, SW_OPERAND_IR, 0, 0 }, + + /* The plain fp register fields. */ +#define FA (RC + 1) + { 5, 21, 0, SW_OPERAND_FPR, 0, 0 }, +#define FB (FA + 1) + { 5, 16, 0, SW_OPERAND_FPR, 0, 0 }, +#define FC (FB + 1) + { 5, 0, 0, SW_OPERAND_FPR, 0, 0 }, + + /* The integer registers when they are ZERO. */ +#define ZA (FC + 1) + { 5, 21, 0, SW_OPERAND_FAKE, insert_za, extract_za }, +#define ZB (ZA + 1) + { 5, 16, 0, SW_OPERAND_FAKE, insert_zb, extract_zb }, +#define ZC (ZB + 1) + { 5, 0, 0, SW_OPERAND_FAKE, insert_zc, extract_zc }, + + /* The RB field when it needs parentheses. */ +#define PRB (ZC + 1) + { 5, 16, 0, SW_OPERAND_IR | SW_OPERAND_PARENS, 0, 0 }, + + /* The RB field when it needs parentheses _and_ a preceding comma. */ +#define CPRB (PRB + 1) + { 5, 16, 0, + SW_OPERAND_IR | SW_OPERAND_PARENS | SW_OPERAND_COMMA, 0, 0 }, + + /* The RB field when it must be the same as the RA field. */ +#define RBA (CPRB + 1) + { 5, 16, 0, SW_OPERAND_FAKE, insert_rba, extract_rba }, + + /* The RC field when it must be the same as the RB field. */ +#define RCA (RBA + 1) + { 5, 0, 0, SW_OPERAND_FAKE, insert_rca, extract_rca }, + +#define RDC (RCA + 1) + { 5, 0, 0, SW_OPERAND_FAKE, insert_rdc, extract_rdc }, + + /* The RC field when it can *default* to RA. */ +#define DRC1 (RDC + 1) + { 5, 0, 0, + SW_OPERAND_IR | SW_OPERAND_DEFAULT_FIRST, 0, 0 }, + + /* The RC field when it can *default* to RB. */ +#define DRC2 (DRC1 + 1) + { 5, 0, 0, + SW_OPERAND_IR | SW_OPERAND_DEFAULT_SECOND, 0, 0 }, + + /* The FC field when it can *default* to RA. */ +#define DFC1 (DRC2 + 1) + { 5, 0, 0, + SW_OPERAND_FPR | SW_OPERAND_DEFAULT_FIRST, 0, 0 }, + + /* The FC field when it can *default* to RB. */ +#define DFC2 (DFC1 + 1) + { 5, 0, 0, + SW_OPERAND_FPR | SW_OPERAND_DEFAULT_SECOND, 0, 0 }, + + /* The unsigned 8-bit literal of Operate format insns. */ +#define LIT (DFC2 + 1) + { 8, 13, -LIT, SW_OPERAND_UNSIGNED, 0, 0 }, + + /* The signed 16-bit displacement of Memory format insns. From here + we can't tell what relocation should be used, so don't use a default. */ +#define MDISP (LIT + 1) + { 16, 0, -MDISP, SW_OPERAND_SIGNED, 0, 0 }, + + /* The signed "23-bit" aligned displacement of Branch format insns. */ +#define BDISP (MDISP + 1) + { 21, 0, BFD_RELOC_23_PCREL_S2, + SW_OPERAND_RELATIVE, insert_bdisp, extract_bdisp }, + + /* The 26-bit hmcode function for sys_call and sys_call / b. */ +#define HMFN (BDISP + 1) + { 25, 0, -HMFN, SW_OPERAND_UNSIGNED, 0, 0 }, + + /* sw jsr/ret insntructions has no function bits. */ + /* The optional signed "16-bit" aligned displacement of the JMP/JSR hint. */ +#define JMPHINT (HMFN + 1) + { 16, 0, BFD_RELOC_SW_64_HINT, + SW_OPERAND_RELATIVE | SW_OPERAND_DEFAULT_ZERO | SW_OPERAND_NOOVERFLOW, + insert_jhint, extract_jhint }, + + /* The optional hint to RET/JSR_COROUTINE. */ +#define RETHINT (JMPHINT + 1) + { 16, 0, -RETHINT, + SW_OPERAND_UNSIGNED | SW_OPERAND_DEFAULT_ZERO, 0, 0 }, + + /* The 12-bit displacement for the core3 hw_{ld,st} (pal1b/pal1f) insns. */ +#define HWDISP (RETHINT + 1) + { 12, 0, -HWDISP, SW_OPERAND_SIGNED, 0, 0 }, + + /* The 16-bit combined index/scoreboard mask for the core3 + hw_m[ft]pr (pal19/pal1d) insns. */ +#define HWINDEX (HWDISP + 1) + { 16, 0, -HWINDEX, SW_OPERAND_UNSIGNED, 0, 0 }, + + /* The 13-bit branch hint for the core3 hw_jmp/jsr (pal1e) insn. */ + + /* for the third operand of ternary operands integer insn. */ +#define R3 (HWINDEX + 1) + { 5, 5, 0, SW_OPERAND_IR, 0, 0 }, + /* The plain fp register fields */ +#define F3 (R3 + 1) + { 5, 5, 0, SW_OPERAND_FPR, 0, 0 }, + /* sw simd settle instruction lit */ +#define FMALIT (F3 + 1) + { 5, 5, -FMALIT, SW_OPERAND_UNSIGNED, 0, 0 }, //V1.1 +#define RPIINDEX (FMALIT + 1) + { 8, 0, -RPIINDEX, SW_OPERAND_UNSIGNED, 0, 0 }, +#define ATMDISP (RPIINDEX + 1) + { 12, 0, -ATMDISP, SW_OPERAND_SIGNED, 0, 0 }, +}; + +const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_operands); + +/* Macros used to form opcodes. */ + +/* The main opcode. */ +#define OP(x) (((x) & 0x3F) << 26) +#define OP_MASK 0xFC000000 + +/* Branch format instructions. */ +#define BRA_(oo) OP(oo) +#define BRA_MASK OP_MASK +#define BRA(oo) BRA_(oo), BRA_MASK + +#ifdef HUANGLM20171113 +/* Floating point format instructions. */ +#define FP_(oo,fff) (OP(oo) | (((fff) & 0x7FF) << 5)) +#define FP_MASK (OP_MASK | 0xFFE0) +#define FP(oo,fff) FP_(oo,fff), FP_MASK + +#else +/* Floating point format instructions. */ +#define FP_(oo,fff) (OP(oo) | (((fff) & 0xFF) << 5)) +#define FP_MASK (OP_MASK | 0x1FE0) +#define FP(oo,fff) FP_(oo,fff), FP_MASK + +#define FMA_(oo,fff) (OP(oo) | (((fff) & 0x3F) << 10 )) +#define FMA_MASK (OP_MASK | 0xFC00) +#define FMA(oo,fff) FMA_(oo,fff), FMA_MASK +#endif + +/* Memory format instructions. */ +#define MEM_(oo) OP(oo) +#define MEM_MASK OP_MASK +#define MEM(oo) MEM_(oo), MEM_MASK + +/* Memory/Func Code format instructions. */ +#define MFC_(oo,ffff) (OP(oo) | ((ffff) & 0xFFFF)) +#define MFC_MASK (OP_MASK | 0xFFFF) +#define MFC(oo,ffff) MFC_(oo,ffff), MFC_MASK + +/* Memory/Branch format instructions. */ +#define MBR_(oo,h) (OP(oo) | (((h) & 3) << 14)) +#define MBR_MASK (OP_MASK | 0xC000) +#define MBR(oo,h) MBR_(oo,h), MBR_MASK + +/* Now sw Operate format instructions is different with SW1. */ +#define OPR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 5)) +#define OPRL_(oo,ff) (OPR_((oo), (ff)) ) +#define OPR_MASK (OP_MASK | 0x1FE0) +#define OPR(oo,ff) OPR_(oo,ff), OPR_MASK +#define OPRL(oo,ff) OPRL_(oo,ff), OPR_MASK + +/* sw ternary operands Operate format instructions. */ +#define TOPR_(oo,ff) (OP(oo) | (((ff) & 0x07) << 10)) +#define TOPRL_(oo,ff) (TOPR_((oo), (ff))) +#define TOPR_MASK (OP_MASK | 0x1C00) +#define TOPR(oo,ff) TOPR_(oo,ff), TOPR_MASK +#define TOPRL(oo,ff) TOPRL_(oo,ff), TOPR_MASK + +/* sw atom instructions. */ +#define ATMEM_(oo,h) (OP(oo) | (((h) & 0xF) << 12)) +#define ATMEM_MASK (OP_MASK | 0xF000) +#define ATMEM(oo,h) ATMEM_(oo,h), ATMEM_MASK + +/* sw privilege instructions. */ +#define PRIRET_(oo,h) (OP(oo) | (((h) & 0x1) << 20)) +#define PRIRET_MASK (OP_MASK | 0x100000) +#define PRIRET(oo,h) PRIRET_(oo,h), PRIRET_MASK + +/* sw pri_rcsr,pri_wcsr. */ +#define CSR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8)) +#define CSR_MASK (OP_MASK | 0xFF00) +#define CSR(oo,ff) CSR_(oo,ff), CSR_MASK + +#define PCD_(oo,ff) (OP(oo) | (ff << 25)) +#define PCD_MASK OP_MASK +#define PCD(oo,ff) PCD_(oo,ff), PCD_MASK + +/* Hardware memory (hw_{ld,st}) instructions. */ +#define HWMEM_(oo,f) (OP(oo) | (((f) & 0xF) << 12)) +#define HWMEM_MASK (OP_MASK | 0xF000) +#define HWMEM(oo,f) HWMEM_(oo,f), HWMEM_MASK + +#define LOGX_(oo,ff) (OP(oo) | (((ff) & 0x3F) << 10)) +#define LOGX_MASK (0xF0000000) +#define LOGX(oo,ff) LOGX_(oo,ff), LOGX_MASK + +/* Abbreviations for instruction subsets. */ +#define BASE SW_OPCODE_BASE +#define CORE3 SW_OPCODE_CORE3 + +/* Common combinations of arguments. */ +#define ARG_NONE { 0 } +#define ARG_BRA { RA, BDISP } +#define ARG_FBRA { FA, BDISP } +#define ARG_FP { FA, FB, DFC1 } +#define ARG_FPZ1 { ZA, FB, DFC1 } +#define ARG_MEM { RA, MDISP, PRB } +#define ARG_FMEM { FA, MDISP, PRB } +#define ARG_OPR { RA, RB, DRC1 } + +#define ARG_OPRL { RA, LIT, DRC1 } +#define ARG_OPRZ1 { ZA, RB, DRC1 } +#define ARG_OPRLZ1 { ZA, LIT, RC } +#define ARG_PCD { HMFN } +#define ARG_HWMEM { RA, HWDISP, PRB } +#define ARG_FPL { FA,LIT, DFC1 } +#define ARG_FMA { FA,FB,F3, DFC1 } +#define ARG_PREFETCH { ZA, MDISP, PRB } +#define ARG_TOPR { RA, RB,R3, DRC1 } +#define ARG_TOPRL { RA, LIT, R3,DRC1 } +#define ARG_FMAL { FA,FB,FMALIT, DFC1 } +#define ARG_ATMEM { RA, ATMDISP, PRB } +#define ARG_VUAMEM { FA, ATMDISP, PRB } + +/* The opcode table. + + The format of the opcode table is: + + NAME OPCODE MASK { OPERANDS } + + NAME is the name of the instruction. + + OPCODE is the instruction opcode. + + MASK is the opcode mask; this is used to tell the disassembler + which bits in the actual opcode must match OPCODE. + + OPERANDS is the list of operands. + + The preceding macros merge the text of the OPCODE and MASK fields. + + The disassembler reads the table in order and prints the first + instruction which matches, so this table is sorted to put more + specific instructions before more general instructions. + + Otherwise, it is sorted by major opcode and minor function code. + */ + +const struct sw_64_opcode sw_64_opcodes[] = { + { "sys_call/b", PCD(0x00,0x00), BASE, ARG_PCD }, + { "sys_call", PCD(0x00,0x01), BASE, ARG_PCD }, + + { "call", MEM(0x01), BASE, { RA, CPRB, JMPHINT } }, + { "ret", MEM(0x02), BASE, { RA, CPRB, RETHINT } }, + { "jmp", MEM(0x03), BASE, { RA, CPRB, JMPHINT } }, + { "br", BRA(0x04), BASE, { ZA, BDISP } }, + { "br", BRA(0x04), BASE, ARG_BRA }, + { "bsr", BRA(0x05), BASE, { ZA, BDISP } }, + { "bsr", BRA(0x05), BASE, ARG_BRA }, + { "memb", MFC(0x06,0x0000), BASE, ARG_NONE }, + { "imemb", MFC(0x06,0x0001), BASE, ARG_NONE }, + { "rtc", MFC(0x06,0x0020), BASE, { RA, ZB } }, + { "rtc", MFC(0x06,0x0020), BASE, { RA, RB } }, + { "rcid", MFC(0x06,0x0040), BASE, { RA , ZB} }, + { "halt", MFC(0x06,0x0080), BASE, { ZA, ZB } }, + { "rd_f", MFC(0x06,0x1000), CORE3, { RA, ZB } }, + { "wr_f", MFC(0x06,0x1020), CORE3, { RA, ZB } }, + { "rtid", MFC(0x06,0x1040), BASE, { RA } }, + { "pri_rcsr", CSR(0x06,0xFE), CORE3, { RA, RPIINDEX ,ZB } }, + { "pri_wcsr", CSR(0x06,0xFF), CORE3, { RA, RPIINDEX ,ZB } }, + { "pri_ret", PRIRET(0x07,0x0), BASE, { RA } }, + { "pri_ret/b", PRIRET(0x07,0x1), BASE, { RA } }, + { "lldw", ATMEM(0x08,0x0), BASE, ARG_ATMEM }, + { "lldl", ATMEM(0x08,0x1), BASE, ARG_ATMEM }, + { "ldw_inc", ATMEM(0x08,0x2), CORE3, ARG_ATMEM }, + { "ldl_inc", ATMEM(0x08,0x3), CORE3, ARG_ATMEM }, + { "ldw_dec", ATMEM(0x08,0x4), CORE3, ARG_ATMEM }, + { "ldl_dec", ATMEM(0x08,0x5), CORE3, ARG_ATMEM }, + { "ldw_set", ATMEM(0x08,0x6), CORE3, ARG_ATMEM }, + { "ldl_set", ATMEM(0x08,0x7), CORE3, ARG_ATMEM }, + { "lstw", ATMEM(0x08,0x8), BASE, ARG_ATMEM }, + { "lstl", ATMEM(0x08,0x9), BASE, ARG_ATMEM }, + { "ldw_nc", ATMEM(0x08,0xA), BASE, ARG_ATMEM }, + { "ldl_nc", ATMEM(0x08,0xB), BASE, ARG_ATMEM }, + { "ldd_nc", ATMEM(0x08,0xC), BASE, ARG_VUAMEM }, + { "stw_nc", ATMEM(0x08,0xD), BASE, ARG_ATMEM }, + { "stl_nc", ATMEM(0x08,0xE), BASE, ARG_ATMEM }, + { "std_nc", ATMEM(0x08,0xF), BASE, ARG_VUAMEM }, + { "fillcs", MEM(0x09), BASE, ARG_PREFETCH }, + { "ldwe", MEM(0x09), BASE, ARG_FMEM }, + { "e_fillcs", MEM(0x0A), BASE, ARG_PREFETCH }, + { "ldse", MEM(0x0A), BASE, ARG_FMEM }, + { "fillcs_e", MEM(0x0B), BASE, ARG_PREFETCH }, + { "ldde", MEM(0x0B), BASE, ARG_FMEM }, + { "vlds", MEM(0x0C), BASE, ARG_FMEM }, + { "vldd", MEM(0x0D), BASE, ARG_FMEM }, + { "vsts", MEM(0x0E), BASE, ARG_FMEM }, + { "vstd", MEM(0x0F), BASE, ARG_FMEM }, + { "addw", OPR(0x10,0x00), BASE, ARG_OPR }, + { "addw", OPRL(0x12,0x00), BASE, ARG_OPRL }, + { "subw", OPR(0x10,0x01), BASE, ARG_OPR }, + { "subw", OPRL(0x12,0x01), BASE, ARG_OPRL }, + { "s4addw", OPR(0x10,0x02), BASE, ARG_OPR }, + { "s4addw", OPRL(0x12,0x02), BASE, ARG_OPRL }, + { "s4subw", OPR(0x10,0x03), BASE, ARG_OPR }, + { "s4subw", OPRL(0x12,0x03), BASE, ARG_OPRL }, + { "s8addw", OPR(0x10,0x04), BASE, ARG_OPR }, + { "s8addw", OPRL(0x12,0x04), BASE, ARG_OPRL }, + { "s8subw", OPR(0x10,0x05), BASE, ARG_OPR }, + { "s8subw", OPRL(0x12,0x05), BASE, ARG_OPRL }, + { "addl", OPR(0x10,0x08), BASE, ARG_OPR }, + { "addl", OPRL(0x12,0x08), BASE, ARG_OPRL }, + { "subl", OPR(0x10,0x09), BASE, ARG_OPR }, + { "subl", OPRL(0x12,0x09), BASE, ARG_OPRL }, + { "s4addl", OPR(0x10,0x0A), BASE, ARG_OPR }, + { "s4addl", OPRL(0x12,0x0A), BASE, ARG_OPRL }, + { "s4subl", OPR(0x10,0x0B), BASE, ARG_OPR }, + { "s4subl", OPRL(0x12,0x0B), BASE, ARG_OPRL }, + { "s8addl", OPR(0x10,0x0C), BASE, ARG_OPR }, + { "s8addl", OPRL(0x12,0x0C), BASE, ARG_OPRL }, + { "s8subl", OPR(0x10,0x0D), BASE, ARG_OPR }, + { "s8subl", OPRL(0x12,0x0D), BASE, ARG_OPRL }, + { "mulw", OPR(0x10,0x10), BASE, ARG_OPR }, + { "mulw", OPRL(0x12,0x10), BASE, ARG_OPRL }, + { "mull", OPR(0x10,0x18), BASE, ARG_OPR }, + { "mull", OPRL(0x12,0x18), BASE, ARG_OPRL }, + { "umulh", OPR(0x10,0x19), BASE, ARG_OPR }, + { "umulh", OPRL(0x12,0x19), BASE, ARG_OPRL }, + { "cmpeq", OPR(0x10,0x28), BASE, ARG_OPR }, + { "cmpeq", OPRL(0x12,0x28), BASE, ARG_OPRL }, + { "cmplt", OPR(0x10,0x29), BASE, ARG_OPR }, + { "cmplt", OPRL(0x12,0x29), BASE, ARG_OPRL }, + { "cmple", OPR(0x10,0x2A), BASE, ARG_OPR }, + { "cmple", OPRL(0x12,0x2A), BASE, ARG_OPRL }, + { "cmpult", OPR(0x10,0x2B), BASE, ARG_OPR }, + { "cmpult", OPRL(0x12,0x2B), BASE, ARG_OPRL }, + { "cmpule", OPR(0x10,0x2C), BASE, ARG_OPR }, + { "cmpule", OPRL(0x12,0x2C), BASE, ARG_OPRL }, + + { "and", OPR(0x10,0x38), BASE, ARG_OPR }, + { "and", OPRL(0x12,0x38),BASE, ARG_OPRL }, + { "bic", OPR(0x10,0x39), BASE, ARG_OPR }, + { "bic", OPRL(0x12,0x39),BASE, ARG_OPRL }, + { "bis", OPR(0x10,0x3A), BASE, ARG_OPR }, + { "bis", OPRL(0x12,0x3A),BASE, ARG_OPRL }, + { "ornot", OPR(0x10,0x3B), BASE, ARG_OPR }, + { "ornot", OPRL(0x12,0x3B),BASE, ARG_OPRL }, + { "xor", OPR(0x10,0x3C), BASE, ARG_OPR }, + { "xor", OPRL(0x12,0x3C),BASE, ARG_OPRL }, + { "eqv", OPR(0x10,0x3D), BASE, ARG_OPR }, + { "eqv", OPRL(0x12,0x3D),BASE, ARG_OPRL }, + { "inslb", OPR(0x10,0x40), BASE, ARG_OPR }, + { "inslb", OPRL(0x12,0x40),BASE, ARG_OPRL }, + { "inslh", OPR(0x10,0x41), BASE, ARG_OPR }, + { "inslh", OPRL(0x12,0x41),BASE, ARG_OPRL }, + { "inslw", OPR(0x10,0x42), BASE, ARG_OPR }, + { "inslw", OPRL(0x12,0x42),BASE, ARG_OPRL }, + { "insll", OPR(0x10,0x43), BASE, ARG_OPR }, + { "insll", OPRL(0x12,0x43),BASE, ARG_OPRL }, + { "inshb", OPR(0x10,0x44), BASE, ARG_OPR }, + { "inshb", OPRL(0x12,0x44),BASE, ARG_OPRL }, + { "inshh", OPR(0x10,0x45), BASE, ARG_OPR }, + { "inshh", OPRL(0x12,0x45),BASE, ARG_OPRL }, + { "inshw", OPR(0x10,0x46), BASE, ARG_OPR }, + { "inshw", OPRL(0x12,0x46),BASE, ARG_OPRL }, + { "inshl", OPR(0x10,0x47), BASE, ARG_OPR }, + { "inshl", OPRL(0x12,0x47),BASE, ARG_OPRL }, + + { "sll", OPR(0x10,0x48), BASE, ARG_OPR }, + { "sll", OPRL(0x12,0x48),BASE, ARG_OPRL }, + { "srl", OPR(0x10,0x49), BASE, ARG_OPR }, + { "srl", OPRL(0x12,0x49),BASE, ARG_OPRL }, + { "sra", OPR(0x10,0x4A), BASE, ARG_OPR }, + { "sra", OPRL(0x12,0x4A),BASE, ARG_OPRL }, + { "extlb", OPR(0x10,0x50), BASE, ARG_OPR }, + { "extlb", OPRL(0x12,0x50),BASE, ARG_OPRL }, + { "extlh", OPR(0x10,0x51), BASE, ARG_OPR }, + { "extlh", OPRL(0x12,0x51),BASE, ARG_OPRL }, + { "extlw", OPR(0x10,0x52), BASE, ARG_OPR }, + { "extlw", OPRL(0x12,0x52),BASE, ARG_OPRL }, + { "extll", OPR(0x10,0x53), BASE, ARG_OPR }, + { "extll", OPRL(0x12,0x53),BASE, ARG_OPRL }, + { "exthb", OPR(0x10,0x54), BASE, ARG_OPR }, + { "exthb", OPRL(0x12,0x54),BASE, ARG_OPRL }, + { "exthh", OPR(0x10,0x55), BASE, ARG_OPR }, + { "exthh", OPRL(0x12,0x55),BASE, ARG_OPRL }, + { "exthw", OPR(0x10,0x56), BASE, ARG_OPR }, + { "exthw", OPRL(0x12,0x56),BASE, ARG_OPRL }, + { "exthl", OPR(0x10,0x57), BASE, ARG_OPR }, + { "exthl", OPRL(0x12,0x57),BASE, ARG_OPRL }, + { "ctpop", OPR(0x10,0x58), BASE, ARG_OPRZ1 }, + { "ctlz", OPR(0x10,0x59), BASE, ARG_OPRZ1 }, + { "cttz", OPR(0x10,0x5A), BASE, ARG_OPRZ1 }, + { "masklb", OPR(0x10,0x60), BASE, ARG_OPR }, + { "masklb", OPRL(0x12,0x60),BASE, ARG_OPRL }, + { "masklh", OPR(0x10,0x61), BASE, ARG_OPR }, + { "masklh", OPRL(0x12,0x61),BASE, ARG_OPRL }, + { "masklw", OPR(0x10,0x62), BASE, ARG_OPR }, + { "masklw", OPRL(0x12,0x62),BASE, ARG_OPRL }, + { "maskll", OPR(0x10,0x63), BASE, ARG_OPR }, + { "maskll", OPRL(0x12,0x63),BASE, ARG_OPRL }, + { "maskhb", OPR(0x10,0x64), BASE, ARG_OPR }, + { "maskhb", OPRL(0x12,0x64),BASE, ARG_OPRL }, + { "maskhh", OPR(0x10,0x65), BASE, ARG_OPR }, + { "maskhh", OPRL(0x12,0x65),BASE, ARG_OPRL }, + { "maskhw", OPR(0x10,0x66), BASE, ARG_OPR }, + { "maskhw", OPRL(0x12,0x66),BASE, ARG_OPRL }, + { "maskhl", OPR(0x10,0x67), BASE, ARG_OPR }, + { "maskhl", OPRL(0x12,0x67),BASE, ARG_OPRL }, + { "zap", OPR(0x10,0x68), BASE, ARG_OPR }, + { "zap", OPRL(0x12,0x68),BASE, ARG_OPRL }, + { "zapnot", OPR(0x10,0x69), BASE, ARG_OPR }, + { "zapnot", OPRL(0x12,0x69),BASE, ARG_OPRL }, + { "sextb", OPR(0x10,0x6A), BASE, ARG_OPRZ1}, + { "sextb", OPRL(0x12,0x6A),BASE, ARG_OPRLZ1 }, + { "sexth", OPR(0x10,0x6B), BASE, ARG_OPRZ1 }, + { "sexth", OPRL(0x12,0x6B),BASE, ARG_OPRLZ1 }, + { "cmpgeb", OPR(0x10,0x6C), BASE, ARG_OPR }, + { "cmpgeb", OPRL(0x12,0x6C),BASE, ARG_OPRL }, + { "fimovs", OPR(0x10,0x70), BASE, { FA, ZB, RC } }, + { "fimovd", OPR(0x10,0x78), BASE, { FA, ZB, RC } }, + { "seleq", TOPR(0x11,0x0), BASE, ARG_TOPR }, + { "seleq", TOPRL(0x13,0x0),BASE, ARG_TOPRL }, + { "selge", TOPR(0x11,0x1), BASE, ARG_TOPR }, + { "selge", TOPRL(0x13,0x1),BASE, ARG_TOPRL }, + { "selgt", TOPR(0x11,0x2), BASE, ARG_TOPR }, + { "selgt", TOPRL(0x13,0x2),BASE, ARG_TOPRL }, + { "selle", TOPR(0x11,0x3), BASE, ARG_TOPR }, + { "selle", TOPRL(0x13,0x3),BASE, ARG_TOPRL }, + { "sellt", TOPR(0x11,0x4), BASE, ARG_TOPR }, + { "sellt", TOPRL(0x13,0x4),BASE, ARG_TOPRL }, + { "selne", TOPR(0x11,0x5), BASE, ARG_TOPR }, + { "selne", TOPRL(0x13,0x5),BASE, ARG_TOPRL }, + { "sellbc", TOPR(0x11,0x6), BASE, ARG_TOPR }, + { "sellbc", TOPRL(0x13,0x6),BASE, ARG_TOPRL }, + { "sellbs", TOPR(0x11,0x7), BASE, ARG_TOPR }, + { "sellbs", TOPRL(0x13,0x7),BASE, ARG_TOPRL }, + { "vlog", LOGX(0x14,0x00), BASE, ARG_FMA }, + + { "fadds", FP(0x18,0x00), BASE, ARG_FP }, + { "faddd", FP(0x18,0x01), BASE, ARG_FP }, + { "fsubs", FP(0x18,0x02), BASE, ARG_FP }, + { "fsubd", FP(0x18,0x03), BASE, ARG_FP }, + { "fmuls", FP(0x18,0x04), BASE, ARG_FP }, + { "fmuld", FP(0x18,0x05), BASE, ARG_FP }, + { "fdivs", FP(0x18,0x06), BASE, ARG_FP }, + { "fdivd", FP(0x18,0x07), BASE, ARG_FP }, + { "fsqrts", FP(0x18,0x08), BASE, ARG_FPZ1 }, + { "fsqrtd", FP(0x18,0x09), BASE, ARG_FPZ1 }, + { "fcmpeq", FP(0x18,0x10), BASE, ARG_FP }, + { "fcmple", FP(0x18,0x11), BASE, ARG_FP }, + { "fcmplt", FP(0x18,0x12), BASE, ARG_FP }, + { "fcmpun", FP(0x18,0x13), BASE, ARG_FP }, + + { "fcvtsd", FP(0x18,0x20), BASE, ARG_FPZ1 }, + { "fcvtds", FP(0x18,0x21), BASE, ARG_FPZ1 }, + { "fcvtdl_g", FP(0x18,0x22), BASE, ARG_FPZ1 }, + { "fcvtdl_p", FP(0x18,0x23), BASE, ARG_FPZ1 }, + { "fcvtdl_z", FP(0x18,0x24), BASE, ARG_FPZ1 }, + { "fcvtdl_n", FP(0x18,0x25), BASE, ARG_FPZ1 }, + { "fcvtdl", FP(0x18,0x27), BASE, ARG_FPZ1 }, + { "fcvtwl", FP(0x18,0x28), BASE, ARG_FPZ1 }, + { "fcvtlw", FP(0x18,0x29), BASE, ARG_FPZ1 }, + { "fcvtls", FP(0x18,0x2d), BASE, ARG_FPZ1 }, + { "fcvtld", FP(0x18,0x2f), BASE, ARG_FPZ1 }, + { "fcpys", FP(0x18,0x30), BASE, ARG_FP }, + { "fcpyse", FP(0x18,0x31), BASE, ARG_FP }, + { "fcpysn", FP(0x18,0x32), BASE, ARG_FP }, + { "ifmovs", FP(0x18,0x40), BASE, { RA, ZB, FC } }, + { "ifmovd", FP(0x18,0x41), BASE, { RA, ZB, FC } }, + { "rfpcr", FP(0x18,0x50), BASE, { FA, RBA, RCA } }, + { "wfpcr", FP(0x18,0x51), BASE, { FA, RBA, RCA } }, + { "setfpec0", FP(0x18,0x54), BASE, ARG_NONE }, + { "setfpec1", FP(0x18,0x55), BASE, ARG_NONE }, + { "setfpec2", FP(0x18,0x56), BASE, ARG_NONE }, + { "setfpec3", FP(0x18,0x57), BASE, ARG_NONE }, + { "fmas", FMA(0x19,0x00), BASE, ARG_FMA }, + { "fmad", FMA(0x19,0x01), BASE, ARG_FMA }, + { "fmss", FMA(0x19,0x02), BASE, ARG_FMA }, + { "fmsd", FMA(0x19,0x03), BASE, ARG_FMA }, + { "fnmas", FMA(0x19,0x04), BASE, ARG_FMA }, + { "fnmad", FMA(0x19,0x05), BASE, ARG_FMA }, + { "fnmss", FMA(0x19,0x06), BASE, ARG_FMA }, + { "fnmsd", FMA(0x19,0x07), BASE, ARG_FMA }, + { "fseleq", FMA(0x19,0x10), BASE, ARG_FMA }, + { "fselne", FMA(0x19,0x11), BASE, ARG_FMA }, + { "fsellt", FMA(0x19,0x12), BASE, ARG_FMA }, + { "fselle", FMA(0x19,0x13), BASE, ARG_FMA }, + { "fselgt", FMA(0x19,0x14), BASE, ARG_FMA }, + { "fselge", FMA(0x19,0x15), BASE, ARG_FMA }, + { "vaddw", FP(0x1A,0x00), BASE, ARG_FP }, + { "vaddw", FP(0x1A,0x20), BASE, ARG_FPL }, + { "vsubw", FP(0x1A,0x01), BASE, ARG_FP }, + { "vsubw", FP(0x1A,0x21), BASE, ARG_FPL }, + { "vcmpgew", FP(0x1A,0x02), BASE, ARG_FP }, + { "vcmpgew", FP(0x1A,0x22), BASE, ARG_FPL }, + { "vcmpeqw", FP(0x1A,0x03), BASE, ARG_FP }, + { "vcmpeqw", FP(0x1A,0x23), BASE, ARG_FPL }, + { "vcmplew", FP(0x1A,0x04), BASE, ARG_FP }, + { "vcmplew", FP(0x1A,0x24), BASE, ARG_FPL }, + { "vcmpltw", FP(0x1A,0x05), BASE, ARG_FP }, + { "vcmpltw", FP(0x1A,0x25), BASE, ARG_FPL }, + { "vcmpulew", FP(0x1A,0x06), BASE, ARG_FP }, + { "vcmpulew", FP(0x1A,0x26), BASE, ARG_FPL }, + { "vcmpultw", FP(0x1A,0x07), BASE, ARG_FP }, + { "vcmpultw", FP(0x1A,0x27), BASE, ARG_FPL }, + + { "vsllw", FP(0x1A,0x08), BASE, ARG_FP }, + { "vsllw", FP(0x1A,0x28), BASE, ARG_FPL }, + { "vsrlw", FP(0x1A,0x09), BASE, ARG_FP }, + { "vsrlw", FP(0x1A,0x29), BASE, ARG_FPL }, + { "vsraw", FP(0x1A,0x0A), BASE, ARG_FP }, + { "vsraw", FP(0x1A,0x2A), BASE, ARG_FPL }, + { "vrolw", FP(0x1A,0x0B), BASE, ARG_FP }, + { "vrolw", FP(0x1A,0x2B), BASE, ARG_FPL }, + { "sllow", FP(0x1A,0x0C), BASE, ARG_FP }, + { "sllow", FP(0x1A,0x2C), BASE, ARG_FPL }, + { "srlow", FP(0x1A,0x0D), BASE, ARG_FP }, + { "srlow", FP(0x1A,0x2D), BASE, ARG_FPL }, + { "vaddl", FP(0x1A,0x0E), BASE, ARG_FP }, + { "vaddl", FP(0x1A,0x2E), BASE, ARG_FPL }, + { "vsubl", FP(0x1A,0x0F), BASE, ARG_FP }, + { "vsubl", FP(0x1A,0x2F), BASE, ARG_FPL }, + { "ctpopow", FP(0x1A,0x18), BASE, { FA, ZB, DFC1 } }, + { "ctlzow", FP(0x1A,0x19), BASE, { FA, ZB, DFC1 } }, + { "vucaddw", FP(0x1A,0x40), BASE, ARG_FP }, + { "vucaddw", FP(0x1A,0x60), BASE, ARG_FPL }, + { "vucsubw", FP(0x1A,0x41), BASE, ARG_FP }, + { "vucsubw", FP(0x1A,0x61), BASE, ARG_FPL }, + { "vucaddh", FP(0x1A,0x42), BASE, ARG_FP }, + { "vucaddh", FP(0x1A,0x62), BASE, ARG_FPL }, + { "vucsubh", FP(0x1A,0x43), BASE, ARG_FP }, + { "vucsubh", FP(0x1A,0x63), BASE, ARG_FPL }, + { "vucaddb", FP(0x1A,0x44), BASE, ARG_FP }, + { "vucaddb", FP(0x1A,0x64), BASE, ARG_FPL }, + { "vucsubb", FP(0x1A,0x45), BASE, ARG_FP }, + { "vucsubb", FP(0x1A,0x65), BASE, ARG_FPL }, + { "vadds", FP(0x1A,0x80), BASE, ARG_FP }, + { "vaddd", FP(0x1A,0x81), BASE, ARG_FP }, + { "vsubs", FP(0x1A,0x82), BASE, ARG_FP }, + { "vsubd", FP(0x1A,0x83), BASE, ARG_FP }, + { "vmuls", FP(0x1A,0x84), BASE, ARG_FP }, + { "vmuld", FP(0x1A,0x85), BASE, ARG_FP }, + { "vdivs", FP(0x1A,0x86), BASE, ARG_FP }, + { "vdivd", FP(0x1A,0x87), BASE, ARG_FP }, + { "vsqrts", FP(0x1A,0x88), BASE, ARG_FPZ1 }, + { "vsqrtd", FP(0x1A,0x89), BASE, ARG_FPZ1 }, + { "vfcmpeq", FP(0x1A,0x8C), BASE, ARG_FP }, + { "vfcmple", FP(0x1A,0x8D), BASE, ARG_FP }, + { "vfcmplt", FP(0x1A,0x8E), BASE, ARG_FP }, + { "vfcmpun", FP(0x1A,0x8F), BASE, ARG_FP }, + { "vcpys", FP(0x1A,0x90), BASE, ARG_FP }, + { "vcpyse", FP(0x1A,0x91), BASE, ARG_FP }, + { "vcpysn", FP(0x1A,0x92), BASE, ARG_FP }, + { "vmas", FMA(0x1B,0x00), BASE, ARG_FMA }, + { "vmad", FMA(0x1B,0x01), BASE, ARG_FMA }, + { "vmss", FMA(0x1B,0x02), BASE, ARG_FMA }, + { "vmsd", FMA(0x1B,0x03), BASE, ARG_FMA }, + { "vnmas", FMA(0x1B,0x04), BASE, ARG_FMA }, + { "vnmad", FMA(0x1B,0x05), BASE, ARG_FMA }, + { "vnmss", FMA(0x1B,0x06), BASE, ARG_FMA }, + { "vnmsd", FMA(0x1B,0x07), BASE, ARG_FMA }, + { "vfseleq", FMA(0x1B,0x10), BASE, ARG_FMA }, + { "vfsellt", FMA(0x1B,0x12), BASE, ARG_FMA }, + { "vfselle", FMA(0x1B,0x13), BASE, ARG_FMA }, + { "vseleqw", FMA(0x1B,0x18), BASE, ARG_FMA }, + { "vseleqw", FMA(0x1B,0x38), BASE, ARG_FMAL }, + { "vsellbcw", FMA(0x1B,0x19), BASE, ARG_FMA }, + { "vsellbcw", FMA(0x1B,0x39), BASE, ARG_FMAL }, + { "vselltw", FMA(0x1B,0x1A), BASE, ARG_FMA }, + { "vselltw", FMA(0x1B,0x3A), BASE, ARG_FMAL }, + { "vsellew", FMA(0x1B,0x1B), BASE, ARG_FMA }, + { "vsellew", FMA(0x1B,0x3B), BASE, ARG_FMAL }, + { "vinsw", FMA(0x1B,0x20), BASE, ARG_FMAL }, + { "vinsf", FMA(0x1B,0x21), BASE, ARG_FMAL }, + { "vextw", FMA(0x1B,0x22), BASE, { FA, FMALIT, DFC1 }}, + { "vextf", FMA(0x1B,0x23), BASE, { FA, FMALIT, DFC1 }}, + { "vcpyw", FMA(0x1B,0x24), BASE, { FA, DFC1 }}, + { "vcpyf", FMA(0x1B,0x25), BASE, { FA, DFC1 }}, + { "vconw", FMA(0x1B,0x26), BASE, ARG_FMA }, + { "vshfw", FMA(0x1B,0x27), BASE, ARG_FMA }, + { "vcons", FMA(0x1B,0x28), BASE, ARG_FMA }, + { "vcond", FMA(0x1B,0x29), BASE, ARG_FMA }, + { "vldw_u", ATMEM(0x1C,0x0), BASE, ARG_VUAMEM }, + { "vstw_u", ATMEM(0x1C,0x1), BASE, ARG_VUAMEM }, + { "vlds_u", ATMEM(0x1C,0x2), BASE, ARG_VUAMEM }, + { "vsts_u", ATMEM(0x1C,0x3), BASE, ARG_VUAMEM }, + { "vldd_u", ATMEM(0x1C,0x4), BASE, ARG_VUAMEM }, + { "vstd_u", ATMEM(0x1C,0x5), BASE, ARG_VUAMEM }, + { "vstw_ul", ATMEM(0x1C,0x8), BASE, ARG_VUAMEM }, + { "vstw_uh", ATMEM(0x1C,0x9), BASE, ARG_VUAMEM }, + { "vsts_ul", ATMEM(0x1C,0xA), BASE, ARG_VUAMEM }, + { "vsts_uh", ATMEM(0x1C,0xB), BASE, ARG_VUAMEM }, + { "vstd_ul", ATMEM(0x1C,0xC), BASE, ARG_VUAMEM }, + { "vstd_uh", ATMEM(0x1C,0xD), BASE, ARG_VUAMEM }, + { "vldd_nc", ATMEM(0x1C,0xE), BASE, ARG_VUAMEM }, + { "vstd_nc", ATMEM(0x1C,0xF), BASE, ARG_VUAMEM }, + + { "flushd", MEM(0x20), BASE, ARG_PREFETCH }, + { "ldbu", MEM(0x20), BASE, ARG_MEM }, + { "evictdg", MEM(0x21), BASE, ARG_PREFETCH }, + { "ldhu", MEM(0x21), BASE, ARG_MEM }, + { "s_fillcs", MEM(0x22), BASE, ARG_PREFETCH }, + { "ldw", MEM(0x22), BASE, ARG_MEM }, + { "s_fillde", MEM(0x23), BASE, ARG_PREFETCH }, + { "ldl", MEM(0x23), BASE, ARG_MEM }, + { "evictdl", MEM(0x24), BASE, ARG_PREFETCH }, + { "ldl_u", MEM(0x24), BASE, ARG_MEM }, + { "pri_ldw/p", HWMEM(0x25,0x0), BASE, ARG_HWMEM }, + { "pri_ldw/v", HWMEM(0x25,0x8), BASE, ARG_HWMEM }, + { "pri_ldl/p", HWMEM(0x25,0x1), BASE, ARG_HWMEM }, + { "pri_ldl/v", HWMEM(0x25,0x9), BASE, ARG_HWMEM }, + { "fillde", MEM(0x26), BASE, ARG_PREFETCH }, + { "flds", MEM(0x26), BASE, ARG_FMEM }, + { "fillde_e", MEM(0x27), BASE, ARG_PREFETCH }, + { "fldd", MEM(0x27), BASE, ARG_FMEM }, + + { "stb", MEM(0x28), BASE, ARG_MEM }, + { "sth", MEM(0x29), BASE, ARG_MEM }, + { "stw", MEM(0x2A), BASE, ARG_MEM }, + { "stl", MEM(0x2B), BASE, ARG_MEM }, + { "stl_u", MEM(0x2C), BASE, ARG_MEM }, + { "pri_stw/p", HWMEM(0x2D,0x0), BASE, ARG_HWMEM }, + { "pri_stw/v", HWMEM(0x2D,0x8), BASE, ARG_HWMEM }, + { "pri_stl/p", HWMEM(0x2D,0x1), BASE, ARG_HWMEM }, + { "pri_stl/v", HWMEM(0x2D,0x9), BASE, ARG_HWMEM }, + { "fsts", MEM(0x2E), BASE, ARG_FMEM }, + { "fstd", MEM(0x2F), BASE, ARG_FMEM }, + { "beq", BRA(0x30), BASE, ARG_BRA }, + { "bne", BRA(0x31), BASE, ARG_BRA }, + { "blt", BRA(0x32), BASE, ARG_BRA }, + { "ble", BRA(0x33), BASE, ARG_BRA }, + { "bgt", BRA(0x34), BASE, ARG_BRA }, + { "bge", BRA(0x35), BASE, ARG_BRA }, + { "blbc", BRA(0x36), BASE, ARG_BRA }, + { "blbs", BRA(0x37), BASE, ARG_BRA }, + + { "fbeq", BRA(0x38), BASE, ARG_FBRA }, + { "fbne", BRA(0x39), BASE, ARG_FBRA }, + { "fblt", BRA(0x3A), BASE, ARG_FBRA }, + { "fble", BRA(0x3B), BASE, ARG_FBRA }, + { "fbgt", BRA(0x3C), BASE, ARG_FBRA }, + { "fbge", BRA(0x3D), BASE, ARG_FBRA }, + { "ldi", MEM(0x3E), BASE, ARG_MEM }, + { "ldih", MEM(0x3F), BASE, ARG_MEM }, +}; + +const unsigned sw_64_num_opcodes = sizeof(sw_64_opcodes) / sizeof(*sw_64_opcodes); + +/* OSF register names. */ + +static const char * const osf_regnames[64] = { + "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", + "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp", + "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", + "t10", "t11", "ra", "t12", "at", "gp", "sp", "zero", + "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", + "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", + "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23", + "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31" +}; + +/* VMS register names. */ + +static const char * const vms_regnames[64] = { + "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", + "R8", "R9", "R10", "R11", "R12", "R13", "R14", "R15", + "R16", "R17", "R18", "R19", "R20", "R21", "R22", "R23", + "R24", "AI", "RA", "PV", "AT", "FP", "SP", "RZ", + "F0", "F1", "F2", "F3", "F4", "F5", "F6", "F7", + "F8", "F9", "F10", "F11", "F12", "F13", "F14", "F15", + "F16", "F17", "F18", "F19", "F20", "F21", "F22", "F23", + "F24", "F25", "F26", "F27", "F28", "F29", "F30", "FZ" +}; + +int print_insn_sw_64(bfd_vma memaddr, struct disassemble_info *info) +{ + static const struct sw_64_opcode *opcode_index[SW_NOPS + 1]; + const char * const * regnames; + const struct sw_64_opcode *opcode, *opcode_end; + const unsigned char *opindex; + unsigned insn, op, isa_mask; + int need_comma; + + /* Initialize the majorop table the first time through */ + if (!opcode_index[0]) { + opcode = sw_64_opcodes; + opcode_end = opcode + sw_64_num_opcodes; + + for (op = 0; op < SW_NOPS; ++op) { + opcode_index[op] = opcode; + if ((SW_LITOP (opcode->opcode) != 0x10) && (SW_LITOP (opcode->opcode) != 0x11)) { + while (opcode < opcode_end && op == SW_OP (opcode->opcode)) + ++opcode; + } else { + while (opcode < opcode_end && op == SW_LITOP (opcode->opcode)) + ++opcode; + } + } + opcode_index[op] = opcode; + } + + if (info->flavour == bfd_target_evax_flavour) + regnames = vms_regnames; + else + regnames = osf_regnames; + isa_mask = SW_OPCODE_NOHMCODE; + switch (info->mach) { + case bfd_mach_sw_64_core3: + isa_mask |= SW_OPCODE_BASE | SW_OPCODE_CORE3; + break; + } + + /* Read the insn into a host word */ + { + bfd_byte buffer[4]; + int status = (*info->read_memory_func) (memaddr, buffer, 4, info); + if (status != 0) { + (*info->memory_error_func) (status, memaddr, info); + return -1; + } + insn = bfd_getl32 (buffer); + } + + /* Get the major opcode of the instruction. */ + if ((SW_LITOP (insn) == 0x10) || (SW_LITOP (insn) == 0x11)) + op = SW_LITOP (insn); + else if ((SW_OP(insn) & 0x3C) == 0x14 ) + op = 0x14; + else + op = SW_OP (insn); + + /* Find the first match in the opcode table. */ + opcode_end = opcode_index[op + 1]; + for (opcode = opcode_index[op]; opcode < opcode_end; ++opcode) { + if ((insn ^ opcode->opcode) & opcode->mask) + continue; + + if (!(opcode->flags & isa_mask)) + continue; + + /* Make two passes over the operands. First see if any of them + have extraction functions, and, if they do, make sure the + instruction is valid. */ + { + int invalid = 0; + for (opindex = opcode->operands; *opindex != 0; opindex++) { + const struct sw_64_operand *operand = sw_64_operands + *opindex; + if (operand->extract) + (*operand->extract) (insn, &invalid); + } + if (invalid) + continue; + } + + /* The instruction is valid. */ + goto found; + } + + /* No instruction found */ + (*info->fprintf_func) (info->stream, ".long %#08x", insn); + + return 4; + +found: + if (!strncmp("sys_call",opcode->name,8)) { + if (insn & (0x1 << 25)) + (*info->fprintf_func) (info->stream, "%s", "sys_call"); + else + (*info->fprintf_func) (info->stream, "%s", "sys_call/b"); + } else + (*info->fprintf_func) (info->stream, "%s", opcode->name); + + /* get zz[7:6] and zz[5:0] to form truth for vlog */ + if (!strcmp(opcode->name, "vlog")) + { + unsigned int truth; + char tr[4]; + truth=(SW_OP(insn) & 3) << 6; + truth = truth | ((insn & 0xFC00) >> 10); + sprintf(tr,"%x",truth); + (*info->fprintf_func) (info->stream, "%s", tr); + } + if (opcode->operands[0] != 0) + (*info->fprintf_func) (info->stream, "\t"); + + /* Now extract and print the operands. */ + need_comma = 0; + for (opindex = opcode->operands; *opindex != 0; opindex++) { + const struct sw_64_operand *operand = sw_64_operands + *opindex; + int value; + + /* Operands that are marked FAKE are simply ignored. We + already made sure that the extract function considered + the instruction to be valid. */ + if ((operand->flags & SW_OPERAND_FAKE) != 0) + continue; + + /* Extract the value from the instruction. */ + if (operand->extract) + value = (*operand->extract) (insn, (int *) NULL); + else { + value = (insn >> operand->shift) & ((1 << operand->bits) - 1); + if (operand->flags & SW_OPERAND_SIGNED) { + int signbit = 1 << (operand->bits - 1); + value = (value ^ signbit) - signbit; + } + } + + if (need_comma && + ((operand->flags & (SW_OPERAND_PARENS | SW_OPERAND_COMMA)) + != SW_OPERAND_PARENS)) { + (*info->fprintf_func) (info->stream, ","); + } + if (operand->flags & SW_OPERAND_PARENS) + (*info->fprintf_func) (info->stream, "("); + + /* Print the operand as directed by the flags. */ + if (operand->flags & SW_OPERAND_IR) + (*info->fprintf_func) (info->stream, "%s", regnames[value]); + else if (operand->flags & SW_OPERAND_FPR) + (*info->fprintf_func) (info->stream, "%s", regnames[value + 32]); + else if (operand->flags & SW_OPERAND_RELATIVE) + (*info->print_address_func) (memaddr + 4 + value, info); + else if (operand->flags & SW_OPERAND_SIGNED) + (*info->fprintf_func) (info->stream, "%d", value); + else + (*info->fprintf_func) (info->stream, "%#x", value); + + if (operand->flags & SW_OPERAND_PARENS) + (*info->fprintf_func) (info->stream, ")"); + need_comma = 1; + } + + return 4; +} diff --git a/docs/about/build-platforms.rst b/docs/about/build-platforms.rst index c29a4b8fe6494842db82ce3df8a91ab4b11a2a50..d893a2be1cf6bd88b15f59cb6e7ca642bbd52217 100644 --- a/docs/about/build-platforms.rst +++ b/docs/about/build-platforms.rst @@ -67,7 +67,8 @@ Non-supported architectures may be removed in the future following the Linux OS, macOS, FreeBSD, NetBSD, OpenBSD ----------------------------------------- -The project aims to support the most recent major version at all times. Support +The project aims to support the most recent major version at all times for +up to five years after its initial release. Support for the previous major version will be dropped 2 years after the new major version is released or when the vendor itself drops support, whichever comes first. In this context, third-party efforts to extend the lifetime of a distro diff --git a/docs/about/deprecated.rst b/docs/about/deprecated.rst index ff7488cb63b93830f75093030add22d6f7a086c4..33925edf450e739dfa45009e01f3d364af6efb18 100644 --- a/docs/about/deprecated.rst +++ b/docs/about/deprecated.rst @@ -270,6 +270,19 @@ accepted incorrect commands will return an error. Users should make sure that all arguments passed to ``device_add`` are consistent with the documented property types. +``query-sgx`` return value member ``section-size`` (since 7.0) +'''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``section-size`` in return value elements with meta-type ``uint64`` is +deprecated. Use ``sections`` instead. + + +``query-sgx-capabilities`` return value member ``section-size`` (since 7.0) +''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''''' + +Member ``section-size`` in return value elements with meta-type ``uint64`` is +deprecated. Use ``sections`` instead. + System accelerators ------------------- diff --git a/docs/can.txt b/docs/can.txt index 0d310237dfaadaed4e69f01295e98b6c9dda527c..873c95a35de9aae589631c48d0d165b7aa3de887 100644 --- a/docs/can.txt +++ b/docs/can.txt @@ -166,7 +166,7 @@ and with bitrate switch cangen can0 -b -The test can be run viceversa, generate messages in the guest system and capture them +The test can be run vice-versa, generate messages in the guest system and capture them in the host one and much more combinations. Links to other resources diff --git a/docs/conf.py b/docs/conf.py index 763e7d2434487bb558111d34f07f0b03b1ab6027..84b593e12af8a17412b731ef4366ab65b0901105 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -120,7 +120,7 @@ # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. diff --git a/docs/devel/multiple-iothreads.txt b/docs/devel/multiple-iothreads.txt index aeb997bed5f0f38e70035af60144a3a0430494df..a11576bc744d339d11b79e5de54833eba760890b 100644 --- a/docs/devel/multiple-iothreads.txt +++ b/docs/devel/multiple-iothreads.txt @@ -61,6 +61,7 @@ There are several old APIs that use the main loop AioContext: * LEGACY qemu_aio_set_event_notifier() - monitor an event notifier * LEGACY timer_new_ms() - create a timer * LEGACY qemu_bh_new() - create a BH + * LEGACY qemu_bh_new_guarded() - create a BH with a device re-entrancy guard * LEGACY qemu_aio_wait() - run an event loop iteration Since they implicitly work on the main loop they cannot be used in code that @@ -72,8 +73,14 @@ Instead, use the AioContext functions directly (see include/block/aio.h): * aio_set_event_notifier() - monitor an event notifier * aio_timer_new() - create a timer * aio_bh_new() - create a BH + * aio_bh_new_guarded() - create a BH with a device re-entrancy guard * aio_poll() - run an event loop iteration +The qemu_bh_new_guarded/aio_bh_new_guarded APIs accept a "MemReentrancyGuard" +argument, which is used to check for and prevent re-entrancy problems. For +BHs associated with devices, the reentrancy-guard is contained in the +corresponding DeviceState and named "mem_reentrancy_guard". + The AioContext can be obtained from the IOThread using iothread_get_aio_context() or for the main loop using qemu_get_aio_context(). Code that takes an AioContext argument works both in IOThreads or the main diff --git a/docs/devel/reset.rst b/docs/devel/reset.rst index abea1102dc4cccad153dedef0a35fb094edbda63..7cc6a6b314020a5c2c667fe3d9e534ba15f39988 100644 --- a/docs/devel/reset.rst +++ b/docs/devel/reset.rst @@ -210,9 +210,11 @@ Polling the reset state Resettable interface provides the ``resettable_is_in_reset()`` function. This function returns true if the object parameter is currently under reset. -An object is under reset from the beginning of the *init* phase to the end of -the *exit* phase. During all three phases, the function will return that the -object is in reset. +An object is under reset from the beginning of the *enter* phase (before +either its children or its own enter method is called) to the *exit* +phase. During *enter* and *hold* phase only, the function will return that the +object is in reset. The state is changed after the *exit* is propagated to +its children and just before calling the object's own *exit* method. This function may be used if the object behavior has to be adapted while in reset state. For example if a device has an irq input, diff --git a/docs/multi-thread-compression.txt b/docs/multi-thread-compression.txt index bb88c6bdf11c25ad9ae9c16c5838e41162fe410f..d429963cb04462d3f734a7dfd529178dbf6e858d 100644 --- a/docs/multi-thread-compression.txt +++ b/docs/multi-thread-compression.txt @@ -33,14 +33,15 @@ thread compression can be used to accelerate the compression process. The decompression speed of Zlib is at least 4 times as quick as compression, if the source and destination CPU have equal speed, -keeping the compression thread count 4 times the decompression -thread count can avoid resource waste. +and you choose Zlib as compression method, keeping the compression +thread count 4 times the decompression thread count can avoid resource waste. Compression level can be used to control the compression speed and the -compression ratio. High compression ratio will take more time, level 0 -stands for no compression, level 1 stands for the best compression -speed, and level 9 stands for the best compression ratio. Users can -select a level number between 0 and 9. +compression ratio. High compression ratio will take more time, +level 1 stands for the best compression speed, and higher level means higher +compression ration. For Zlib, users can select a level number between 0 and 9, +where level 0 stands for no compression. For Zstd, users can select a +level number between 1 and 22. When to use the multiple thread compression in live migration @@ -116,16 +117,19 @@ to support the multiple thread compression migration: 2. Activate compression on the source: {qemu} migrate_set_capability compress on -3. Set the compression thread count on source: +3. Set the compression method: + {qemu} migrate_set_parameter compress_method zstd + +4. Set the compression thread count on source: {qemu} migrate_set_parameter compress_threads 12 -4. Set the compression level on the source: +5. Set the compression level on the source: {qemu} migrate_set_parameter compress_level 1 -5. Set the decompression thread count on destination: +6. Set the decompression thread count on destination: {qemu} migrate_set_parameter decompress_threads 3 -6. Start outgoing migration: +7. Start outgoing migration: {qemu} migrate -d tcp:destination.host:4444 {qemu} info migrate Capabilities: ... compress: on @@ -136,6 +140,7 @@ The following are the default settings: compress_threads: 8 decompress_threads: 2 compress_level: 1 (which means best speed) + compress_method: zlib So, only the first two steps are required to use the multiple thread compression in migration. You can do more if the default @@ -143,7 +148,7 @@ settings are not appropriate. TODO ==== -Some faster (de)compression method such as LZ4 and Quicklz can help -to reduce the CPU consumption when doing (de)compression. If using -these faster (de)compression method, less (de)compression threads +Comparing to Zlib, Some faster (de)compression method such as LZ4 +and Quicklz can help to reduce the CPU consumption when doing (de)compression. +If using these faster (de)compression method, less (de)compression threads are needed when doing the migration. diff --git a/docs/specs/acpi_hw_reduced_hotplug.rst b/docs/specs/acpi_hw_reduced_hotplug.rst index 0bd3f9399fee04bc7829f6f5936a398720dc7eeb..3acd6fcd8b8fd84492391f702643b29af935b2c7 100644 --- a/docs/specs/acpi_hw_reduced_hotplug.rst +++ b/docs/specs/acpi_hw_reduced_hotplug.rst @@ -64,7 +64,8 @@ GED IO interface (4 byte access) 0: Memory hotplug event 1: System power down event 2: NVDIMM hotplug event - 3-31: Reserved + 3: CPU hotplug event + 4-31: Reserved **write_access:** diff --git a/docs/system/device-emulation.rst b/docs/system/device-emulation.rst index 19944f526cecd4a0de7d56b8fc8ee11b74ddbf79..ef299a2fcdade2684d4e8d74cdb6edbc59a61e70 100644 --- a/docs/system/device-emulation.rst +++ b/docs/system/device-emulation.rst @@ -89,3 +89,4 @@ Emulated Devices devices/vhost-user.rst devices/virtio-pmem.rst devices/vhost-user-rng.rst + devices/vhost-vdpa-generic-device.rst diff --git a/docs/system/devices/vhost-vdpa-generic-device.rst b/docs/system/devices/vhost-vdpa-generic-device.rst new file mode 100644 index 0000000000000000000000000000000000000000..25fbcac60e64b7e7fcc50bc5105e623bfbe2fcc1 --- /dev/null +++ b/docs/system/devices/vhost-vdpa-generic-device.rst @@ -0,0 +1,46 @@ + +========================= +vhost-vDPA generic device +========================= + +This document explains the usage of the vhost-vDPA generic device. + +Description +----------- + +vDPA(virtio data path acceleration) device is a device that uses a datapath +which complies with the virtio specifications with vendor specific control +path. + +QEMU provides two types of vhost-vDPA devices to enable the vDPA device, one +is type sensitive which means QEMU needs to know the actual device type +(e.g. net, blk, scsi) and another is called "vhost-vDPA generic device" which +is type insensitive + +The vhost-vDPA generic device builds on the vhost-vdpa subsystem and virtio +subsystem. It is quite small, but it can support any type of virtio device. + +Examples +-------- + +Prepare the vhost-vDPA backends first: + +:: + host# ls -l /dev/vhost-vdpa-* + crw------- 1 root root 236, 0 Nov 2 00:49 /dev/vhost-vdpa-0 + +Start QEMU with virtio-mmio bus: + +:: + host# qemu-system \ + -M microvm -m 512 -smp 2 -kernel ... -initrd ... \ + -device vhost-vdpa-device,vhostdev=/dev/vhost-vdpa-0 \ + ... + +Start QEMU with virtio-pci bus: + +:: + host# qemu-system \ + -M pc -m 512 -smp 2 \ + -device vhost-vdpa-device-pci,vhostdev=/dev/vhost-vdpa-0 \ + ...\ diff --git a/docs/system/i386/sgx.rst b/docs/system/i386/sgx.rst index f8fade5ac2d44b7378a87e8da14d33ae732bb20a..0f0a73f7587c44e69e028f061fed1c39bb97bdd8 100644 --- a/docs/system/i386/sgx.rst +++ b/docs/system/i386/sgx.rst @@ -141,8 +141,7 @@ To launch a SGX guest: |qemu_system_x86| \\ -cpu host,+sgx-provisionkey \\ -object memory-backend-epc,id=mem1,size=64M,prealloc=on \\ - -object memory-backend-epc,id=mem2,size=28M \\ - -M sgx-epc.0.memdev=mem1,sgx-epc.1.memdev=mem2 + -M sgx-epc.0.memdev=mem1,sgx-epc.0.node=0 Utilizing SGX in the guest requires a kernel/OS with SGX support. The support can be determined in guest by:: @@ -152,8 +151,32 @@ The support can be determined in guest by:: and SGX epc info by:: $ dmesg | grep sgx - [ 1.242142] sgx: EPC section 0x180000000-0x181bfffff - [ 1.242319] sgx: EPC section 0x181c00000-0x1837fffff + [ 0.182807] sgx: EPC section 0x140000000-0x143ffffff + [ 0.183695] sgx: [Firmware Bug]: Unable to map EPC section to online node. Fallback to the NUMA node 0. + +To launch a SGX numa guest: + +.. parsed-literal:: + + |qemu_system_x86| \\ + -cpu host,+sgx-provisionkey \\ + -object memory-backend-ram,size=2G,host-nodes=0,policy=bind,id=node0 \\ + -object memory-backend-epc,id=mem0,size=64M,prealloc=on,host-nodes=0,policy=bind \\ + -numa node,nodeid=0,cpus=0-1,memdev=node0 \\ + -object memory-backend-ram,size=2G,host-nodes=1,policy=bind,id=node1 \\ + -object memory-backend-epc,id=mem1,size=28M,prealloc=on,host-nodes=1,policy=bind \\ + -numa node,nodeid=1,cpus=2-3,memdev=node1 \\ + -M sgx-epc.0.memdev=mem0,sgx-epc.0.node=0,sgx-epc.1.memdev=mem1,sgx-epc.1.node=1 + +and SGX epc numa info by:: + + $ dmesg | grep sgx + [ 0.369937] sgx: EPC section 0x180000000-0x183ffffff + [ 0.370259] sgx: EPC section 0x184000000-0x185bfffff + + $ dmesg | grep SRAT + [ 0.009981] ACPI: SRAT: Node 0 PXM 0 [mem 0x180000000-0x183ffffff] + [ 0.009982] ACPI: SRAT: Node 1 PXM 1 [mem 0x184000000-0x185bfffff] References ---------- diff --git a/docs/system/target-arm.rst b/docs/system/target-arm.rst index 91ebc26c6dbdd0b567f1bccbd9e71e64e8765465..d3d2c28417adb175928012f06cdf3367457f39f6 100644 --- a/docs/system/target-arm.rst +++ b/docs/system/target-arm.rst @@ -84,16 +84,16 @@ undocumented; you can get a complete list by running arm/vexpress arm/aspeed arm/sabrelite + arm/highbank arm/digic arm/cubieboard arm/emcraft-sf2 - arm/highbank arm/musicpal arm/gumstix arm/mainstone arm/kzm - arm/nrf arm/nseries + arm/nrf arm/nuvoton arm/imx25-pdk arm/orangepi diff --git a/docs/tools/qemu-img.rst b/docs/tools/qemu-img.rst index d663dd92bd71ec065945ee1419ec989a56fd3f5c..1d68f9238f7bdfc32d7f7763e20265f2ac51fafe 100644 --- a/docs/tools/qemu-img.rst +++ b/docs/tools/qemu-img.rst @@ -402,7 +402,7 @@ Command description: Compare exits with ``0`` in case the images are equal and with ``1`` in case the images differ. Other exit codes mean an error occurred during execution and standard error output should contain an error message. - The following table sumarizes all exit codes of the compare subcommand: + The following table summarizes all exit codes of the compare subcommand: 0 Images are identical (or requested help was printed) diff --git a/docs/tools/qemu-storage-daemon.rst b/docs/tools/qemu-storage-daemon.rst index 3e5a9dc0320231dfb07e66e15b04c672746b3312..9b0eaba6e5c2f4ab57a3bb288f4770af13203906 100644 --- a/docs/tools/qemu-storage-daemon.rst +++ b/docs/tools/qemu-storage-daemon.rst @@ -201,7 +201,7 @@ Export raw image file ``disk.img`` over NBD UNIX domain socket ``nbd.sock``:: --nbd-server addr.type=unix,addr.path=nbd.sock \ --export type=nbd,id=export,node-name=disk,writable=on -Export a qcow2 image file ``disk.qcow2`` as a vhosts-user-blk device over UNIX +Export a qcow2 image file ``disk.qcow2`` as a vhost-user-blk device over UNIX domain socket ``vhost-user-blk.sock``:: $ qemu-storage-daemon \ diff --git a/ebpf/ebpf_rss.c b/ebpf/ebpf_rss.c index 118c68da831d998fe2469e86fbd39ebff06781da..cee658c158b253b5c539e828b391ed66536315ef 100644 --- a/ebpf/ebpf_rss.c +++ b/ebpf/ebpf_rss.c @@ -49,7 +49,7 @@ bool ebpf_rss_load(struct EBPFRSSContext *ctx) goto error; } - bpf_program__set_socket_filter(rss_bpf_ctx->progs.tun_rss_steering_prog); + bpf_program__set_type(rss_bpf_ctx->progs.tun_rss_steering_prog, BPF_PROG_TYPE_SOCKET_FILTER); if (rss_bpf__load(rss_bpf_ctx)) { trace_ebpf_error("eBPF RSS", "can not load RSS program"); diff --git a/fsdev/virtfs-proxy-helper.c b/fsdev/virtfs-proxy-helper.c index 15c0e79b067b89a6253a680dbf0126c89d74ce47..f9e4669a5bb0a8b2a3c0f2cf0a021442408a67f4 100644 --- a/fsdev/virtfs-proxy-helper.c +++ b/fsdev/virtfs-proxy-helper.c @@ -26,6 +26,7 @@ #include "qemu/xattr.h" #include "9p-iov-marshal.h" #include "hw/9pfs/9p-proxy.h" +#include "hw/9pfs/9p-util.h" #include "fsdev/9p-iov-marshal.h" #define PROGNAME "virtfs-proxy-helper" @@ -338,6 +339,28 @@ static void resetugid(int suid, int sgid) } } +/* + * Open regular file or directory. Attempts to open any special file are + * rejected. + * + * returns file descriptor or -1 on error + */ +static int open_regular(const char *pathname, int flags, mode_t mode) +{ + int fd; + + fd = open(pathname, flags, mode); + if (fd < 0) { + return fd; + } + + if (close_if_special_file(fd) < 0) { + return -1; + } + + return fd; +} + /* * send response in two parts * 1) ProxyHeader @@ -682,7 +705,7 @@ static int do_create(struct iovec *iovec) if (ret < 0) { goto unmarshal_err_out; } - ret = open(path.data, flags, mode); + ret = open_regular(path.data, flags, mode); if (ret < 0) { ret = -errno; } @@ -707,7 +730,7 @@ static int do_open(struct iovec *iovec) if (ret < 0) { goto err_out; } - ret = open(path.data, flags); + ret = open_regular(path.data, flags, 0); if (ret < 0) { ret = -errno; } diff --git a/gdb-xml/arm-neon.xml b/gdb-xml/arm-neon.xml index 9dce0a996fcc8c78085d0c9ac1431b3c8bdfa88a..d61f6b8549df8491eb3c348262ac79491f985afb 100644 --- a/gdb-xml/arm-neon.xml +++ b/gdb-xml/arm-neon.xml @@ -76,7 +76,7 @@ - + diff --git a/gdb-xml/i386-32bit.xml b/gdb-xml/i386-32bit.xml index 872fcea9c25a0d9880946882c3f3875b4715d2f3..7a66a02b67e3a050dcbd735a1a076a053f80856a 100644 --- a/gdb-xml/i386-32bit.xml +++ b/gdb-xml/i386-32bit.xml @@ -110,7 +110,7 @@ - + diff --git a/gdb-xml/loongarch-base64.xml b/gdb-xml/loongarch-base64.xml new file mode 100644 index 0000000000000000000000000000000000000000..2e515e0e3612c7daca6d875a1563d460457c4dcc --- /dev/null +++ b/gdb-xml/loongarch-base64.xml @@ -0,0 +1,45 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/loongarch-fpu.xml b/gdb-xml/loongarch-fpu.xml new file mode 100644 index 0000000000000000000000000000000000000000..d398fe3650ffac4c022ec0df80f56ea277c4c8e4 --- /dev/null +++ b/gdb-xml/loongarch-fpu.xml @@ -0,0 +1,50 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gdb-xml/sw64-core.xml b/gdb-xml/sw64-core.xml new file mode 100644 index 0000000000000000000000000000000000000000..24527c175b3893f5ea6c8930e60cab47c93ac061 --- /dev/null +++ b/gdb-xml/sw64-core.xml @@ -0,0 +1,43 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/hmp-commands-info.hx b/hmp-commands-info.hx index 407a1da800cfc19376dc48e1941de9346462e7f7..5dd3001af09ead0966391cc36b7b2fb2e4389e3d 100644 --- a/hmp-commands-info.hx +++ b/hmp-commands-info.hx @@ -863,6 +863,19 @@ SRST Display the vcpu dirty rate information. ERST + { + .name = "vcpu_dirty_limit", + .args_type = "", + .params = "", + .help = "show dirty page limit information of all vCPU", + .cmd = hmp_info_vcpu_dirty_limit, + }, + +SRST + ``info vcpu_dirty_limit`` + Display the vcpu dirty page limit information. +ERST + #if defined(TARGET_I386) { .name = "sgx", diff --git a/hmp-commands.hx b/hmp-commands.hx index 70a9136ac2935b0cb2b2f132266196b5df6cd556..5bedee2d49547acfcda639f7cb6fe8636d643e76 100644 --- a/hmp-commands.hx +++ b/hmp-commands.hx @@ -1744,3 +1744,35 @@ ERST "\n\t\t\t -b to specify dirty bitmap as method of calculation)", .cmd = hmp_calc_dirty_rate, }, + +SRST +``set_vcpu_dirty_limit`` + Set dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "set_vcpu_dirty_limit", + .args_type = "dirty_rate:l,cpu_index:l?", + .params = "dirty_rate [cpu_index]", + .help = "set dirty page rate limit, use cpu_index to set limit" + "\n\t\t\t\t\t on a specified virtual cpu", + .cmd = hmp_set_vcpu_dirty_limit, + }, + +SRST +``cancel_vcpu_dirty_limit`` + Cancel dirty page rate limit on virtual CPU, the information about all the + virtual CPU dirty limit status can be observed with ``info vcpu_dirty_limit`` + command. +ERST + + { + .name = "cancel_vcpu_dirty_limit", + .args_type = "cpu_index:l?", + .params = "[cpu_index]", + .help = "cancel dirty page rate limit, use cpu_index to cancel" + "\n\t\t\t\t\t limit on a specified virtual cpu", + .cmd = hmp_cancel_vcpu_dirty_limit, + }, diff --git a/hw/9pfs/9p-util.h b/hw/9pfs/9p-util.h index 546f46dc7dc636aaf4dff762468b44b622bfcfd7..23000e917fe64d8e2ea5a3924d4bccacf1e92437 100644 --- a/hw/9pfs/9p-util.h +++ b/hw/9pfs/9p-util.h @@ -13,12 +13,16 @@ #ifndef QEMU_9P_UTIL_H #define QEMU_9P_UTIL_H +#include "qemu/error-report.h" + #ifdef O_PATH #define O_PATH_9P_UTIL O_PATH #else #define O_PATH_9P_UTIL 0 #endif +#define qemu_fstat fstat + static inline void close_preserve_errno(int fd) { int serrno = errno; @@ -26,6 +30,38 @@ static inline void close_preserve_errno(int fd) errno = serrno; } +/** + * close_if_special_file() - Close @fd if neither regular file nor directory. + * + * @fd: file descriptor of open file + * Return: 0 on regular file or directory, -1 otherwise + * + * CVE-2023-2861: Prohibit opening any special file directly on host + * (especially device files), as a compromised client could potentially gain + * access outside exported tree under certain, unsafe setups. We expect + * client to handle I/O on special files exclusively on guest side. + */ +static inline int close_if_special_file(int fd) +{ + struct stat stbuf; + + if (qemu_fstat(fd, &stbuf) < 0) { + close_preserve_errno(fd); + return -1; + } + if (!S_ISREG(stbuf.st_mode) && !S_ISDIR(stbuf.st_mode)) { + error_report_once( + "9p: broken or compromised client detected; attempt to open " + "special file (i.e. neither regular file, nor directory)" + ); + close(fd); + errno = ENXIO; + return -1; + } + + return 0; +} + static inline int openat_dir(int dirfd, const char *name) { return openat(dirfd, name, @@ -56,6 +92,10 @@ again: return -1; } + if (close_if_special_file(fd) < 0) { + return -1; + } + serrno = errno; /* O_NONBLOCK was only needed to open the file. Let's drop it. We don't * do that with O_PATH since fcntl(F_SETFL) isn't supported, and openat() diff --git a/hw/9pfs/9p-xattr-user.c b/hw/9pfs/9p-xattr-user.c index f2ae9582e610b5f022dd6a45502d2add5a2e2a0d..535677ed609b44df5d16a2e6bdff57e0a9252fc8 100644 --- a/hw/9pfs/9p-xattr-user.c +++ b/hw/9pfs/9p-xattr-user.c @@ -27,7 +27,7 @@ static ssize_t mp_user_getxattr(FsContext *ctx, const char *path, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = ENOATTR; @@ -49,7 +49,7 @@ static ssize_t mp_user_listxattr(FsContext *ctx, const char *path, name_size -= 12; } else { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ return 0; @@ -74,7 +74,7 @@ static int mp_user_setxattr(FsContext *ctx, const char *path, const char *name, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = EACCES; @@ -88,7 +88,7 @@ static int mp_user_removexattr(FsContext *ctx, { if (strncmp(name, "user.virtfs.", 12) == 0) { /* - * Don't allow fetch of user.virtfs namesapce + * Don't allow fetch of user.virtfs namespace * in case of mapped security */ errno = EACCES; diff --git a/hw/9pfs/9p.c b/hw/9pfs/9p.c index 15b3f4d3853d8a81da003a5ad04401322cd34970..4e52f26afee14aee645427a9d3a264709b558431 100644 --- a/hw/9pfs/9p.c +++ b/hw/9pfs/9p.c @@ -2528,6 +2528,11 @@ static void coroutine_fn v9fs_readdir(void *opaque) retval = -EINVAL; goto out_nofid; } + if (fidp->fid_type != P9_FID_DIR) { + warn_report_once("9p: bad client: T_readdir on non-directory stream"); + retval = -ENOTDIR; + goto out; + } if (!fidp->fs.dir.stream) { retval = -EINVAL; goto out; diff --git a/hw/9pfs/xen-9p-backend.c b/hw/9pfs/xen-9p-backend.c index 65c4979c3c5b58e40bfb050efebe1960219816e6..09f7c135880972546d85fd832bda8a6660d138de 100644 --- a/hw/9pfs/xen-9p-backend.c +++ b/hw/9pfs/xen-9p-backend.c @@ -60,6 +60,7 @@ typedef struct Xen9pfsDev { int num_rings; Xen9pfsRing *rings; + MemReentrancyGuard mem_reentrancy_guard; } Xen9pfsDev; static void xen_9pfs_disconnect(struct XenLegacyDevice *xendev); @@ -441,7 +442,9 @@ static int xen_9pfs_connect(struct XenLegacyDevice *xendev) xen_9pdev->rings[i].ring.out = xen_9pdev->rings[i].data + XEN_FLEX_RING_SIZE(ring_order); - xen_9pdev->rings[i].bh = qemu_bh_new(xen_9pfs_bh, &xen_9pdev->rings[i]); + xen_9pdev->rings[i].bh = qemu_bh_new_guarded(xen_9pfs_bh, + &xen_9pdev->rings[i], + &xen_9pdev->mem_reentrancy_guard); xen_9pdev->rings[i].out_cons = 0; xen_9pdev->rings[i].out_size = 0; xen_9pdev->rings[i].inprogress = false; diff --git a/hw/Kconfig b/hw/Kconfig index ad20cce0a953ebba3f2ed0579d1305c4614a8193..5f3957be0f1812de7b78066558bd52a4266a342d 100644 --- a/hw/Kconfig +++ b/hw/Kconfig @@ -63,6 +63,7 @@ source sparc/Kconfig source sparc64/Kconfig source tricore/Kconfig source xtensa/Kconfig +source sw64/Kconfig # Symbols used by multiple targets config TEST_DEVICES diff --git a/hw/acpi/Kconfig b/hw/acpi/Kconfig index 622b0b50b7582888911375ec1f94d771f099a0c6..245c5554df1e155a397aac6e62d6dfebed4b3c5b 100644 --- a/hw/acpi/Kconfig +++ b/hw/acpi/Kconfig @@ -15,6 +15,14 @@ config ACPI_X86_ICH bool select ACPI_X86 +config ACPI_LOONGARCH + bool + select ACPI + select ACPI_CPU_HOTPLUG + select ACPI_MEMORY_HOTPLUG + select ACPI_PIIX4 + select ACPI_PCIHP + config ACPI_CPU_HOTPLUG bool diff --git a/hw/acpi/aml-build.c b/hw/acpi/aml-build.c index b3b3310df329630f76df888a4cb8e589ca1a59e8..0a9981acc1615eba026845e374a914f7e7be4b40 100644 --- a/hw/acpi/aml-build.c +++ b/hw/acpi/aml-build.c @@ -47,7 +47,7 @@ static void build_prepend_byte(GArray *array, uint8_t val) g_array_prepend_val(array, val); } -static void build_append_byte(GArray *array, uint8_t val) +void build_append_byte(GArray *array, uint8_t val) { g_array_append_val(array, val); } @@ -1554,6 +1554,28 @@ Aml *aml_sleep(uint64_t msec) return var; } +/* ACPI 5.0b: 6.4.3.7 Generic Register Descriptor */ +Aml *aml_generic_register(AmlRegionSpace rs, uint8_t reg_width, + uint8_t reg_offset, AmlAccessType type, uint64_t addr) +{ + int i; + Aml *var = aml_alloc(); + build_append_byte(var->buf, 0x82); /* Generic Register Descriptor */ + build_append_byte(var->buf, 0x0C); /* Length, bits[7:0] value = 0x0C */ + build_append_byte(var->buf, 0); /* Length, bits[15:8] value = 0 */ + build_append_byte(var->buf, rs); /* Address Space ID */ + build_append_byte(var->buf, reg_width); /* Register Bit Width */ + build_append_byte(var->buf, reg_offset); /* Register Bit Offset */ + build_append_byte(var->buf, type); /* Access Size */ + + /* Register address */ + for (i = 0; i < 8; i++) { + build_append_byte(var->buf, extract64(addr, i * 8, 8)); + } + + return var; +} + static uint8_t Hex2Byte(const char *src) { int hi, lo; @@ -1968,10 +1990,10 @@ void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, * ACPI spec, Revision 6.3 * 5.2.29.1 Processor hierarchy node structure (Type 0) */ -static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, - uint32_t parent, uint32_t id, - uint32_t *priv_rsrc, - uint32_t priv_num) +void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num) { int i; @@ -2001,7 +2023,11 @@ static void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id) { - int pptt_start = table_data->len; + MachineClass *mc = MACHINE_GET_CLASS(ms); + GQueue *list = g_queue_new(); + guint pptt_start = table_data->len; + guint parent_offset; + guint length, i; int uid = 0; int socket; AcpiTable table = { .sig = "PPTT", .rev = 2, @@ -2010,9 +2036,8 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, acpi_table_begin(&table, table_data); for (socket = 0; socket < ms->smp.sockets; socket++) { - uint32_t socket_offset = table_data->len - pptt_start; - int core; - + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); build_processor_hierarchy_node( table_data, /* @@ -2021,35 +2046,64 @@ void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, */ (1 << 0), 0, socket, NULL, 0); + } - for (core = 0; core < ms->smp.cores; core++) { - uint32_t core_offset = table_data->len - pptt_start; - int thread; + if (mc->smp_props.clusters_supported) { + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int cluster; + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (cluster = 0; cluster < ms->smp.clusters; cluster++) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + parent_offset, cluster, NULL, 0); + } + } + } + + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int core; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (core = 0; core < ms->smp.cores; core++) { if (ms->smp.threads > 1) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); build_processor_hierarchy_node( table_data, (0 << 0), /* not a physical package */ - socket_offset, core, NULL, 0); - - for (thread = 0; thread < ms->smp.threads; thread++) { - build_processor_hierarchy_node( - table_data, - (1 << 1) | /* ACPI Processor ID valid */ - (1 << 2) | /* Processor is a Thread */ - (1 << 3), /* Node is a Leaf */ - core_offset, uid++, NULL, 0); - } + parent_offset, core, NULL, 0); } else { build_processor_hierarchy_node( table_data, (1 << 1) | /* ACPI Processor ID valid */ (1 << 3), /* Node is a Leaf */ - socket_offset, uid++, NULL, 0); + parent_offset, uid++, NULL, 0); } } } + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int thread; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (thread = 0; thread < ms->smp.threads; thread++) { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + parent_offset, uid++, NULL, 0); + } + } + + g_queue_free(list); acpi_table_end(linker, &table); } diff --git a/hw/acpi/core.c b/hw/acpi/core.c index 1e004d0078d506147536b6ad6512af054fb70147..a2d790d432af13d10e2ccf4c5e3cdd77639ce716 100644 --- a/hw/acpi/core.c +++ b/hw/acpi/core.c @@ -24,6 +24,7 @@ #include "hw/acpi/acpi.h" #include "hw/nvram/fw_cfg.h" #include "qemu/config-file.h" +#include "qemu/log.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" #include "qapi/qapi-events-run-state.h" @@ -345,8 +346,8 @@ int acpi_get_slic_oem(AcpiSlicOem *oem) struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length)); if (memcmp(hdr->sig, "SLIC", 4) == 0) { - oem->id = hdr->oem_id; - oem->table_id = hdr->oem_table_id; + oem->id = g_strndup(hdr->oem_id, 6); + oem->table_id = g_strndup(hdr->oem_table_id, 8); return 0; } } @@ -560,13 +561,16 @@ static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) uint16_t sus_typ = (val >> 10) & 7; switch (sus_typ) { case 0: /* soft power off */ + qemu_log("VM will be soft power off\n"); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); break; case 1: + qemu_log("VM will be suspend state\n"); qemu_system_suspend_request(); break; default: if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */ + qemu_log("VM will be S4 state\n"); qapi_event_send_suspend_disk(); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } diff --git a/hw/acpi/cpu.c b/hw/acpi/cpu.c index b20903ea303f27130fb3d570278ddf1322ae5a2c..f9ce0a7f41861aae1f6ac02f759ceaf49b643ff7 100644 --- a/hw/acpi/cpu.c +++ b/hw/acpi/cpu.c @@ -6,7 +6,6 @@ #include "trace.h" #include "sysemu/numa.h" -#define ACPI_CPU_HOTPLUG_REG_LEN 12 #define ACPI_CPU_SELECTOR_OFFSET_WR 0 #define ACPI_CPU_FLAGS_OFFSET_RW 4 #define ACPI_CPU_CMD_OFFSET_WR 5 @@ -343,7 +342,8 @@ const VMStateDescription vmstate_cpu_hotplug = { void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, hwaddr io_base, const char *res_root, - const char *event_handler_method) + const char *event_handler_method, + AmlRegionSpace rs) { Aml *ifctx; Aml *field; @@ -371,13 +371,18 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(cpu_ctrl_dev, aml_mutex(CPU_LOCK, 0)); crs = aml_resource_template(); - aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1, - ACPI_CPU_HOTPLUG_REG_LEN)); + if (rs == AML_SYSTEM_IO) { + aml_append(crs, aml_io(AML_DECODE16, io_base, io_base, 1, + ACPI_CPU_HOTPLUG_REG_LEN)); + } else { + aml_append(crs, aml_memory32_fixed(io_base, + ACPI_CPU_HOTPLUG_REG_LEN, AML_READ_WRITE)); + } aml_append(cpu_ctrl_dev, aml_name_decl("_CRS", crs)); /* declare CPU hotplug MMIO region with related access fields */ aml_append(cpu_ctrl_dev, - aml_operation_region("PRST", AML_SYSTEM_IO, aml_int(io_base), + aml_operation_region("PRST", rs, aml_int(io_base), ACPI_CPU_HOTPLUG_REG_LEN)); field = aml_field("PRST", AML_BYTE_ACC, AML_NOLOCK, @@ -663,6 +668,11 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(dev, aml_name_decl("_UID", uid)); } + assert(adevc); + if (adevc->cpu_cppc) { + adevc->cpu_cppc(adev, i, arch_ids->len, dev); + } + method = aml_method("_STA", 0, AML_SERIALIZED); aml_append(method, aml_return(aml_call1(CPU_STS_METHOD, uid))); aml_append(dev, method); @@ -703,9 +713,11 @@ void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, aml_append(sb_scope, cpus_dev); aml_append(table, sb_scope); - method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED); - aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD)); - aml_append(table, method); + if (event_handler_method) { + method = aml_method(event_handler_method, 0, AML_NOTSERIALIZED); + aml_append(method, aml_call0("\\_SB.CPUS." CPU_SCAN_METHOD)); + aml_append(table, method); + } g_free(cphp_res_path); } diff --git a/hw/acpi/cpu_hotplug.c b/hw/acpi/cpu_hotplug.c index 53654f8638306bda80874a5e7d850f5c70f99c47..ff14c3f4106f7db41702ef9a38b9a63fc23e7e45 100644 --- a/hw/acpi/cpu_hotplug.c +++ b/hw/acpi/cpu_hotplug.c @@ -52,6 +52,9 @@ static const MemoryRegionOps AcpiCpuHotplug_ops = { .endianness = DEVICE_LITTLE_ENDIAN, .valid = { .min_access_size = 1, + .max_access_size = 4, + }, + .impl = { .max_access_size = 1, }, }; diff --git a/hw/acpi/cpufreq.c b/hw/acpi/cpufreq.c new file mode 100644 index 0000000000000000000000000000000000000000..a76f7b8fa2c86e790f6e66d73093e388e9bd8a21 --- /dev/null +++ b/hw/acpi/cpufreq.c @@ -0,0 +1,280 @@ +/* + * ACPI CPPC register device + * + * Support for showing CPU frequency in guest OS. + * + * Copyright (c) 2019 HUAWEI TECHNOLOGIES CO.,LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "chardev/char.h" +#include "qemu/log.h" +#include "trace.h" +#include "qemu/option.h" +#include "sysemu/sysemu.h" +#include "hw/acpi/acpi-defs.h" +#include "qemu/cutils.h" +#include "qemu/error-report.h" +#include "hw/boards.h" + +#define TYPE_CPUFREQ "cpufreq" +#define CPUFREQ(obj) OBJECT_CHECK(CpuhzState, (obj), TYPE_CPUFREQ) +#define NOMINAL_FREQ_FILE "/sys/devices/system/cpu/cpu0/acpi_cppc/nominal_freq" +#define CPU_MAX_FREQ_FILE "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq" +#define HZ_MAX_LENGTH 1024 +#define MAX_SUPPORT_SPACE 0x10000 + +/* + * Since Hi1616 will not support CPPC, we simply use its nominal frequency as + * the default. + */ +#define DEFAULT_HZ 2400 + +int cppc_regs_offset[CPPC_REG_COUNT] = { + [HIGHEST_PERF] = 0, + [NOMINAL_PERF] = 4, + [LOW_NON_LINEAR_PERF] = 8, + [LOWEST_PERF] = 12, + [GUARANTEED_PERF] = 16, + [DESIRED_PERF] = 20, + [MIN_PERF] = -1, + [MAX_PERF] = -1, + [PERF_REDUC_TOLERANCE] = -1, + [TIME_WINDOW] = -1, + [CTR_WRAP_TIME] = -1, + [REFERENCE_CTR] = 24, + [DELIVERED_CTR] = 32, + [PERF_LIMITED] = 40, + [ENABLE] = -1, + [AUTO_SEL_ENABLE] = -1, + [AUTO_ACT_WINDOW] = -1, + [ENERGY_PERF] = -1, + [REFERENCE_PERF] = -1, + [LOWEST_FREQ] = 44, + [NOMINAL_FREQ] = 48, +}; + +typedef struct CpuhzState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + uint32_t HighestPerformance; + uint32_t NominalPerformance; + uint32_t LowestNonlinearPerformance; + uint32_t LowestPerformance; + uint32_t GuaranteedPerformance; + uint32_t DesiredPerformance; + uint64_t ReferencePerformanceCounter; + uint64_t DeliveredPerformanceCounter; + uint32_t PerformanceLimited; + uint32_t LowestFreq; + uint32_t NominalFreq; + uint32_t num_cpu; + uint32_t reg_size; +} CpuhzState; + + +static uint64_t cpufreq_read(void *opaque, hwaddr offset, unsigned size) +{ + CpuhzState *s = (CpuhzState *)opaque; + uint64_t r; + uint64_t n; + + if (offset >= s->num_cpu * CPPC_REG_PER_CPU_STRIDE) { + warn_report("cpufreq_read: offset 0x%lx out of range", offset); + return 0; + } + + n = offset % CPPC_REG_PER_CPU_STRIDE; + switch (n) { + case 0: + r = s->HighestPerformance; + break; + case 4: + r = s->NominalPerformance; + break; + case 8: + r = s->LowestNonlinearPerformance; + break; + case 12: + r = s->LowestPerformance; + break; + case 16: + r = s->GuaranteedPerformance; + break; + case 20: + r = s->DesiredPerformance; + break; + /* + * We don't have real counters and it is hard to emulate, so always set the + * counter value to 1 to rely on Linux to use the DesiredPerformance value + * directly. + */ + case 24: + r = s->ReferencePerformanceCounter; + break; + /* + * Guest may still access the register by 32bit; add the process to + * eliminate unnecessary warnings. + */ + case 28: + r = s->ReferencePerformanceCounter >> 32; + break; + case 32: + r = s->DeliveredPerformanceCounter; + break; + case 36: + r = s->DeliveredPerformanceCounter >> 32; + break; + + case 40: + r = s->PerformanceLimited; + break; + case 44: + r = s->LowestFreq; + break; + case 48: + r = s->NominalFreq; + break; + default: + error_printf("cpufreq_read: Bad offset 0x%lx\n", offset); + r = 0; + break; + } + return r; +} + +static void cpufreq_write(void *opaque, hwaddr offset, + uint64_t value, unsigned size) +{ + CpuhzState *s = CPUFREQ(opaque); + uint64_t n; + + if (offset >= s->num_cpu * CPPC_REG_PER_CPU_STRIDE) { + error_printf("cpufreq_write: offset 0x%lx out of range", offset); + return; + } + + n = offset % CPPC_REG_PER_CPU_STRIDE; + + switch (n) { + case 20: + break; + default: + error_printf("cpufreq_write: Bad offset 0x%lx\n", offset); + } +} + +static uint32_t CPPC_Read(const char *hostpath) +{ + int fd; + char buffer[HZ_MAX_LENGTH] = { 0 }; + uint64_t hz; + int len; + const char *endptr = NULL; + int ret; + + fd = qemu_open_old(hostpath, O_RDONLY); + if (fd < 0) { + return 0; + } + + len = read(fd, buffer, HZ_MAX_LENGTH); + qemu_close(fd); + if (len <= 0) { + return 0; + } + ret = qemu_strtoul(buffer, &endptr, 0, &hz); + if (ret < 0) { + return 0; + } + return (uint32_t)hz; +} + +static const MemoryRegionOps cpufreq_ops = { + .read = cpufreq_read, + .write = cpufreq_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static void hz_init(CpuhzState *s) +{ + uint32_t hz; + + hz = CPPC_Read(NOMINAL_FREQ_FILE); + if (hz == 0) { + hz = CPPC_Read(CPU_MAX_FREQ_FILE); + if (hz == 0) { + hz = DEFAULT_HZ; + } else { + /* Value in CpuMaxFrequency is in KHz unit; convert to MHz */ + hz = hz / 1000; + } + } + + s->HighestPerformance = hz; + s->NominalPerformance = hz; + s->LowestNonlinearPerformance = hz; + s->LowestPerformance = hz; + s->GuaranteedPerformance = hz; + s->DesiredPerformance = hz; + s->ReferencePerformanceCounter = 1; + s->DeliveredPerformanceCounter = 1; + s->PerformanceLimited = 0; + s->LowestFreq = hz; + s->NominalFreq = hz; +} + +static void cpufreq_init(Object *obj) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + CpuhzState *s = CPUFREQ(obj); + + MachineState *ms = MACHINE(qdev_get_machine()); + s->num_cpu = ms->smp.max_cpus; + + s->reg_size = s->num_cpu * CPPC_REG_PER_CPU_STRIDE; + if (s->reg_size > MAX_SUPPORT_SPACE) { + error_report("Required space 0x%x excesses the max support 0x%x", + s->reg_size, MAX_SUPPORT_SPACE); + goto err_end; + } + + memory_region_init_io(&s->iomem, OBJECT(s), &cpufreq_ops, s, "cpufreq", + s->reg_size); + sysbus_init_mmio(sbd, &s->iomem); + hz_init(s); + return; + +err_end: + /* Set desired perf register offset to -1 to indicate no support for CPPC */ + cppc_regs_offset[DESIRED_PERF] = -1; +} + +static const TypeInfo cpufreq_arm_info = { + .name = TYPE_CPUFREQ, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(CpuhzState), + .instance_init = cpufreq_init, +}; + +static void cpufreq_register_types(void) +{ + type_register_static(&cpufreq_arm_info); +} + +type_init(cpufreq_register_types) diff --git a/hw/acpi/generic_event_device.c b/hw/acpi/generic_event_device.c index e28457a7d103f58ce8f12b38b55af2a266fb1205..9daf4f9f13b980e7e8683d4a8dff31f257f917d2 100644 --- a/hw/acpi/generic_event_device.c +++ b/hw/acpi/generic_event_device.c @@ -25,6 +25,7 @@ static const uint32_t ged_supported_events[] = { ACPI_GED_MEM_HOTPLUG_EVT, ACPI_GED_PWR_DOWN_EVT, ACPI_GED_NVDIMM_HOTPLUG_EVT, + ACPI_GED_CPU_HOTPLUG_EVT, }; /* @@ -117,6 +118,9 @@ void build_ged_aml(Aml *table, const char *name, HotplugHandler *hotplug_dev, aml_notify(aml_name("\\_SB.NVDR"), aml_int(0x80))); break; + case ACPI_GED_CPU_HOTPLUG_EVT: + aml_append(if_ctx, aml_call0("\\_SB.CPUS.CSCN")); + break; default: /* * Please make sure all the events in ged_supported_events[] @@ -197,9 +201,9 @@ static void ged_regs_write(void *opaque, hwaddr addr, uint64_t data, switch (addr) { case ACPI_GED_REG_SLEEP_CTL: - slp_typ = (data >> 2) & 0x07; - slp_en = (data >> 5) & 0x01; - if (slp_en && slp_typ == 5) { + slp_typ = (data >> ACPI_GED_SLP_TYP_POS) & ACPI_GED_SLP_TYP_MASK; + slp_en = !!(data & ACPI_GED_SLP_EN); + if (slp_en && slp_typ == ACPI_GED_SLP_TYP_S5) { qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } return; @@ -234,6 +238,8 @@ static void acpi_ged_device_plug_cb(HotplugHandler *hotplug_dev, } else { acpi_memory_plug_cb(hotplug_dev, &s->memhp_state, dev, errp); } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + acpi_cpu_plug_cb(hotplug_dev, &s->cpuhp_state, dev, errp); } else { error_setg(errp, "virt: device plug request for unsupported device" " type: %s", object_get_typename(OBJECT(dev))); @@ -267,6 +273,14 @@ static void acpi_ged_unplug_cb(HotplugHandler *hotplug_dev, } } +static void acpi_ged_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) +{ + AcpiGedState *s = ACPI_GED(adev); + + acpi_memory_ospm_status(&s->memhp_state, list); + acpi_cpu_ospm_status(&s->cpuhp_state, list); +} + static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) { AcpiGedState *s = ACPI_GED(adev); @@ -279,6 +293,8 @@ static void acpi_ged_send_event(AcpiDeviceIf *adev, AcpiEventStatusBits ev) sel = ACPI_GED_PWR_DOWN_EVT; } else if (ev & ACPI_NVDIMM_HOTPLUG_STATUS) { sel = ACPI_GED_NVDIMM_HOTPLUG_EVT; + } else if (ev & ACPI_CPU_HOTPLUG_STATUS) { + sel = ACPI_GED_CPU_HOTPLUG_EVT; } else { /* Unknown event. Return without generating interrupt. */ warn_report("GED: Unsupported event %d. No irq injected", ev); @@ -311,6 +327,16 @@ static const VMStateDescription vmstate_memhp_state = { } }; +static const VMStateDescription vmstate_cpuhp_state = { + .name = "acpi-ged/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_CPU_HOTPLUG(cpuhp_state, AcpiGedState), + VMSTATE_END_OF_LIST() + } +}; + static const VMStateDescription vmstate_ged_state = { .name = "acpi-ged-state", .version_id = 1, @@ -360,6 +386,7 @@ static const VMStateDescription vmstate_acpi_ged = { .subsections = (const VMStateDescription * []) { &vmstate_memhp_state, &vmstate_ghes_state, + &vmstate_cpuhp_state, NULL } }; @@ -370,6 +397,7 @@ static void acpi_ged_initfn(Object *obj) AcpiGedState *s = ACPI_GED(dev); SysBusDevice *sbd = SYS_BUS_DEVICE(obj); GEDState *ged_st = &s->ged_state; + MachineClass *mc; memory_region_init_io(&ged_st->evt, obj, &ged_evt_ops, ged_st, TYPE_ACPI_GED, ACPI_GED_EVT_SEL_LEN); @@ -393,6 +421,21 @@ static void acpi_ged_initfn(Object *obj) memory_region_init_io(&ged_st->regs, obj, &ged_regs_ops, ged_st, TYPE_ACPI_GED "-regs", ACPI_GED_REG_COUNT); sysbus_init_mmio(sbd, &ged_st->regs); + + mc = MACHINE_GET_CLASS(qdev_get_machine()); + if (!mc->possible_cpu_arch_ids) { + /* + * MachineClass should support possible_cpu_arch_ids in + * cpu_hotplug_hw_init below. + */ + return; + } + + memory_region_init(&s->container_cpuhp, OBJECT(dev), "cpuhp container", + ACPI_CPU_HOTPLUG_REG_LEN); + sysbus_init_mmio(sbd, &s->container_cpuhp); + cpu_hotplug_hw_init(&s->container_cpuhp, OBJECT(dev), + &s->cpuhp_state, 0); } static void acpi_ged_class_init(ObjectClass *class, void *data) @@ -409,6 +452,7 @@ static void acpi_ged_class_init(ObjectClass *class, void *data) hc->unplug_request = acpi_ged_unplug_request_cb; hc->unplug = acpi_ged_unplug_cb; + adevc->ospm_status = acpi_ged_ospm_status; adevc->send_event = acpi_ged_send_event; } diff --git a/hw/acpi/hmat.c b/hw/acpi/hmat.c index 6913ebf73088e0f285dfea294e0dd00fdd5dbebd..8af753c67b77c9a8e9824bab6a9e9c6d524e8ba4 100644 --- a/hw/acpi/hmat.c +++ b/hw/acpi/hmat.c @@ -77,6 +77,7 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, uint32_t *initiator_list) { int i, index; + uint32_t initiator_to_index[MAX_NODES] = {}; HMAT_LB_Data *lb_data; uint16_t *entry_list; uint32_t base; @@ -120,6 +121,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, /* Initiator Proximity Domain List */ for (i = 0; i < num_initiator; i++) { build_append_int_noprefix(table_data, initiator_list[i], 4); + /* Reverse mapping for array possitions */ + initiator_to_index[initiator_list[i]] = i; } /* Target Proximity Domain List */ @@ -131,7 +134,8 @@ static void build_hmat_lb(GArray *table_data, HMAT_LB_Info *hmat_lb, entry_list = g_malloc0(num_initiator * num_target * sizeof(uint16_t)); for (i = 0; i < hmat_lb->list->len; i++) { lb_data = &g_array_index(hmat_lb->list, HMAT_LB_Data, i); - index = lb_data->initiator * num_target + lb_data->target; + index = initiator_to_index[lb_data->initiator] * num_target + + lb_data->target; entry_list[index] = (uint16_t)(lb_data->data / hmat_lb->base); } diff --git a/hw/acpi/larch_7a.c b/hw/acpi/larch_7a.c new file mode 100644 index 0000000000000000000000000000000000000000..59b43170ff03cf367af3acf2fc991dc07a89c33e --- /dev/null +++ b/hw/acpi/larch_7a.c @@ -0,0 +1,616 @@ +/* + * Loongarch acpi emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + */ + +#include "qemu/osdep.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ls7a.h" +#include "hw/nvram/fw_cfg.h" +#include "qemu/config-file.h" +#include "qapi/opts-visitor.h" +#include "qapi/qapi-events-run-state.h" +#include "qapi/error.h" +#include "hw/loongarch/ls7a.h" +#include "hw/mem/pc-dimm.h" +#include "hw/mem/nvdimm.h" +#include "migration/vmstate.h" + +static void ls7a_pm_update_sci_fn(ACPIREGS *regs) +{ + LS7APCIPMRegs *pm = container_of(regs, LS7APCIPMRegs, acpi_regs); + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static uint64_t ls7a_gpe_readb(void *opaque, hwaddr addr, unsigned width) +{ + LS7APCIPMRegs *pm = opaque; + return acpi_gpe_ioport_readb(&pm->acpi_regs, addr); +} + +static void ls7a_gpe_writeb(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + LS7APCIPMRegs *pm = opaque; + acpi_gpe_ioport_writeb(&pm->acpi_regs, addr, val); + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static const MemoryRegionOps ls7a_gpe_ops = { + .read = ls7a_gpe_readb, + .write = ls7a_gpe_writeb, + .valid.min_access_size = 1, + .valid.max_access_size = 8, + .impl.min_access_size = 1, + .impl.max_access_size = 1, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +#define VMSTATE_GPE_ARRAY(_field, _state) \ +{ \ + .name = (stringify(_field)), .version_id = 0, .num = ACPI_GPE0_LEN, \ + .info = &vmstate_info_uint8, .size = sizeof(uint8_t), \ + .flags = VMS_ARRAY | VMS_POINTER, \ + .offset = vmstate_offset_pointer(_state, _field, uint8_t), \ +} + +static uint64_t ls7a_reset_readw(void *opaque, hwaddr addr, unsigned width) +{ + return 0; +} + +static void ls7a_reset_writew(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + if (val & 1) { + qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); + } +} + +static const MemoryRegionOps ls7a_reset_ops = { + .read = ls7a_reset_readw, + .write = ls7a_reset_writew, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static bool vmstate_test_use_memhp(void *opaque) +{ + LS7APCIPMRegs *s = opaque; + return s->acpi_memory_hotplug.is_enabled; +} + +static const VMStateDescription vmstate_memhp_state = { + .name = "ls7a_pm/memhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .needed = vmstate_test_use_memhp, + .fields = (VMStateField[]){ VMSTATE_MEMORY_HOTPLUG(acpi_memory_hotplug, + LS7APCIPMRegs), + VMSTATE_END_OF_LIST() } +}; + +static const VMStateDescription vmstate_cpuhp_state = { + .name = "ls7a_pm/cpuhp", + .version_id = 1, + .minimum_version_id = 1, + .minimum_version_id_old = 1, + .fields = + (VMStateField[]){ VMSTATE_CPU_HOTPLUG(cpuhp_state, LS7APCIPMRegs), + VMSTATE_END_OF_LIST() } +}; + +const VMStateDescription vmstate_ls7a_pm = { + .name = "ls7a_pm", + .version_id = 1, + .minimum_version_id = 1, + .fields = + (VMStateField[]){ + VMSTATE_UINT16(acpi_regs.pm1.evt.sts, LS7APCIPMRegs), + VMSTATE_UINT16(acpi_regs.pm1.evt.en, LS7APCIPMRegs), + VMSTATE_UINT16(acpi_regs.pm1.cnt.cnt, LS7APCIPMRegs), + VMSTATE_TIMER_PTR(acpi_regs.tmr.timer, LS7APCIPMRegs), + VMSTATE_INT64(acpi_regs.tmr.overflow_time, LS7APCIPMRegs), + VMSTATE_GPE_ARRAY(acpi_regs.gpe.sts, LS7APCIPMRegs), + VMSTATE_GPE_ARRAY(acpi_regs.gpe.en, LS7APCIPMRegs), + VMSTATE_END_OF_LIST() }, + .subsections = (const VMStateDescription *[]){ &vmstate_memhp_state, + &vmstate_cpuhp_state, NULL } +}; + +static inline int64_t acpi_pm_tmr_get_clock(void) +{ + return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY, + NANOSECONDS_PER_SECOND); +} + +static uint32_t acpi_pm_tmr_get(ACPIREGS *ar) +{ + uint32_t d = acpi_pm_tmr_get_clock(); + return d & 0xffffff; +} + +static void acpi_pm_tmr_timer(void *opaque) +{ + ACPIREGS *ar = opaque; + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER, NULL); + ar->tmr.update_sci(ar); +} + +static uint64_t acpi_pm_tmr_read(void *opaque, hwaddr addr, unsigned width) +{ + return acpi_pm_tmr_get(opaque); +} + +static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + /* nothing */ +} + +static const MemoryRegionOps acpi_pm_tmr_ops = { + .read = acpi_pm_tmr_read, + .write = acpi_pm_tmr_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ls7a_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, + MemoryRegion *parent, uint64_t offset) +{ + ar->tmr.update_sci = update_sci; + ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar); + memory_region_init_io(&ar->tmr.io, memory_region_owner(parent), + &acpi_pm_tmr_ops, ar, "acpi-tmr", 4); + memory_region_add_subregion(parent, offset, &ar->tmr.io); +} + +static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val) +{ + uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar); + if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) { + /* if TMRSTS is reset, then compute the new overflow time */ + acpi_pm_tmr_calc_overflow_time(ar); + } + ar->pm1.evt.sts &= ~val; +} + +static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width) +{ + ACPIREGS *ar = opaque; + switch (addr) { + case 0: + return acpi_pm1_evt_get_sts(ar); + case 4: + return ar->pm1.evt.en; + default: + return 0; + } +} + +static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val) +{ + ar->pm1.evt.en = val; + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, + val & ACPI_BITMASK_RT_CLOCK_ENABLE); + qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, + val & ACPI_BITMASK_TIMER_ENABLE); +} + +static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + ACPIREGS *ar = opaque; + switch (addr) { + case 0: + acpi_pm1_evt_write_sts(ar, val); + ar->pm1.evt.update_sci(ar); + break; + case 4: + acpi_pm1_evt_write_en(ar, val); + ar->pm1.evt.update_sci(ar); + break; + default: + break; + } +} + +static const MemoryRegionOps acpi_pm_evt_ops = { + .read = acpi_pm_evt_read, + .write = acpi_pm_evt_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void ls7a_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, + MemoryRegion *parent, uint64_t offset) +{ + ar->pm1.evt.update_sci = update_sci; + memory_region_init_io(&ar->pm1.evt.io, memory_region_owner(parent), + &acpi_pm_evt_ops, ar, "acpi-evt", 8); + memory_region_add_subregion(parent, offset, &ar->pm1.evt.io); +} + +static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width) +{ + ACPIREGS *ar = opaque; + return ar->pm1.cnt.cnt; +} + +/* ACPI PM1aCNT */ +static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) +{ + ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE); + if (val & ACPI_BITMASK_SLEEP_ENABLE) { + /* change suspend type */ + uint16_t sus_typ = (val >> 10) & 7; + switch (sus_typ) { + /* s3,s4 not support */ + case 5: + case 6: + warn_report("acpi s3,s4 state not support"); + break; + /* s5: soft off */ + case 7: + qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); + break; + default: + break; + } + } +} + +static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val, + unsigned width) +{ + acpi_pm1_cnt_write(opaque, val); +} + +static const MemoryRegionOps acpi_pm_cnt_ops = { + .read = acpi_pm_cnt_read, + .write = acpi_pm_cnt_write, + .valid.min_access_size = 4, + .valid.max_access_size = 4, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void acpi_notify_wakeup(Notifier *notifier, void *data) +{ + ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup); + WakeupReason *reason = data; + + switch (*reason) { + case QEMU_WAKEUP_REASON_RTC: + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_RT_CLOCK_STATUS); + break; + case QEMU_WAKEUP_REASON_PMTIMER: + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS); + break; + case QEMU_WAKEUP_REASON_OTHER: + /* + * ACPI_BITMASK_WAKE_STATUS should be set on resume. + * Pretend that resume was caused by power button + */ + ar->pm1.evt.sts |= + (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); + break; + default: + break; + } +} + +static void ls7a_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent, + bool disable_s3, bool disable_s4, uint8_t s4_val, + uint64_t offset) +{ + FWCfgState *fw_cfg; + + ar->pm1.cnt.s4_val = s4_val; + ar->wakeup.notify = acpi_notify_wakeup; + qemu_register_wakeup_notifier(&ar->wakeup); + memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent), + &acpi_pm_cnt_ops, ar, "acpi-cnt", 4); + memory_region_add_subregion(parent, offset, &ar->pm1.cnt.io); + + fw_cfg = fw_cfg_find(); + if (fw_cfg) { + uint8_t suspend[6] = { 128, 0, 0, 129, 128, 128 }; + suspend[3] = 1 | ((!disable_s3) << 7); + suspend[4] = s4_val | ((!disable_s4) << 7); + fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6); + } +} + +static void ls7a_pm_reset(void *opaque) +{ + LS7APCIPMRegs *pm = opaque; + + acpi_pm1_evt_reset(&pm->acpi_regs); + acpi_pm1_cnt_reset(&pm->acpi_regs); + acpi_pm_tmr_reset(&pm->acpi_regs); + acpi_gpe_reset(&pm->acpi_regs); + + acpi_update_sci(&pm->acpi_regs, pm->irq); +} + +static void pm_powerdown_req(Notifier *n, void *opaque) +{ + LS7APCIPMRegs *pm = container_of(n, LS7APCIPMRegs, powerdown_notifier); + + acpi_pm1_evt_power_down(&pm->acpi_regs); +} + +void ls7a_pm_init(LS7APCIPMRegs *pm, qemu_irq *pic) +{ + unsigned long base, gpe_len, acpi_aci_irq; + + /* + * ls7a board acpi hardware info, including + * acpi system io base address + * acpi gpe length + * acpi sci irq number + */ + base = ACPI_IO_BASE; + gpe_len = ACPI_GPE0_LEN; + acpi_aci_irq = ACPI_SCI_IRQ; + + pm->irq = pic[acpi_aci_irq - 64]; + memory_region_init(&pm->iomem, NULL, "ls7a_pm", ACPI_IO_SIZE); + memory_region_add_subregion(get_system_memory(), base, &pm->iomem); + + cpu_hotplug_hw_init(get_system_memory(), NULL, &pm->cpuhp_state, + CPU_HOTPLUG_BASE); + + ls7a_pm_tmr_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, + LS7A_PM_TMR_BLK); + ls7a_pm1_evt_init(&pm->acpi_regs, ls7a_pm_update_sci_fn, &pm->iomem, + LS7A_PM_EVT_BLK); + ls7a_pm1_cnt_init(&pm->acpi_regs, &pm->iomem, false, false, 2, + LS7A_PM_CNT_BLK); + + acpi_gpe_init(&pm->acpi_regs, gpe_len); + memory_region_init_io(&pm->iomem_gpe, NULL, &ls7a_gpe_ops, pm, "acpi-gpe0", + gpe_len); + memory_region_add_subregion(&pm->iomem, LS7A_GPE0_STS_REG, &pm->iomem_gpe); + + memory_region_init_io(&pm->iomem_reset, NULL, &ls7a_reset_ops, pm, + "acpi-reset", 4); + memory_region_add_subregion(&pm->iomem, LS7A_GPE0_RESET_REG, + &pm->iomem_reset); + + qemu_register_reset(ls7a_pm_reset, pm); + + pm->powerdown_notifier.notify = pm_powerdown_req; + qemu_register_powerdown_notifier(&pm->powerdown_notifier); + + if (pm->acpi_memory_hotplug.is_enabled) { + acpi_memory_hotplug_init(get_system_memory(), NULL, + &pm->acpi_memory_hotplug, + MEMORY_HOTPLUG_BASE); + } +} + +static void ls7a_pm_get_gpe0_blk(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + uint64_t value = ACPI_IO_BASE + LS7A_GPE0_STS_REG; + + visit_type_uint64(v, name, &value, errp); +} + +static bool ls7a_pm_get_memory_hotplug_support(Object *obj, Error **errp) +{ + LS7APCIState *ls7a = get_ls7a_type(obj); + + return ls7a->pm.acpi_memory_hotplug.is_enabled; +} + +static void ls7a_pm_set_memory_hotplug_support(Object *obj, bool value, + Error **errp) +{ + LS7APCIState *ls7a = get_ls7a_type(obj); + + ls7a->pm.acpi_memory_hotplug.is_enabled = value; +} + +static void ls7a_pm_get_disable_s3(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + uint8_t value = pm->disable_s3; + + visit_type_uint8(v, name, &value, errp); +} + +static void ls7a_pm_set_disable_s3(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + Error *local_err = NULL; + uint8_t value; + + visit_type_uint8(v, name, &value, &local_err); + if (local_err) { + goto out; + } + pm->disable_s3 = value; +out: + error_propagate(errp, local_err); +} + +static void ls7a_pm_get_disable_s4(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + uint8_t value = pm->disable_s4; + + visit_type_uint8(v, name, &value, errp); +} + +static void ls7a_pm_set_disable_s4(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + Error *local_err = NULL; + uint8_t value; + + visit_type_uint8(v, name, &value, &local_err); + if (local_err) { + goto out; + } + pm->disable_s4 = value; +out: + error_propagate(errp, local_err); +} + +static void ls7a_pm_get_s4_val(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + uint8_t value = pm->s4_val; + + visit_type_uint8(v, name, &value, errp); +} + +static void ls7a_pm_set_s4_val(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LS7APCIPMRegs *pm = opaque; + Error *local_err = NULL; + uint8_t value; + + visit_type_uint8(v, name, &value, &local_err); + if (local_err) { + goto out; + } + pm->s4_val = value; +out: + error_propagate(errp, local_err); +} + +void ls7a_pm_add_properties(Object *obj, LS7APCIPMRegs *pm, Error **errp) +{ + static const uint32_t gpe0_len = ACPI_GPE0_LEN; + pm->acpi_memory_hotplug.is_enabled = true; + pm->disable_s3 = 0; + pm->disable_s4 = 0; + pm->s4_val = 2; + + object_property_add_uint32_ptr(obj, ACPI_PM_PROP_PM_IO_BASE, + &pm->pm_io_base, OBJ_PROP_FLAG_READ); + object_property_add(obj, ACPI_PM_PROP_GPE0_BLK, "uint32", + ls7a_pm_get_gpe0_blk, NULL, NULL, pm); + object_property_add_uint32_ptr(obj, ACPI_PM_PROP_GPE0_BLK_LEN, &gpe0_len, + OBJ_PROP_FLAG_READ); + object_property_add_bool(obj, "memory-hotplug-support", + ls7a_pm_get_memory_hotplug_support, + ls7a_pm_set_memory_hotplug_support); + object_property_add(obj, ACPI_PM_PROP_S3_DISABLED, "uint8", + ls7a_pm_get_disable_s3, ls7a_pm_set_disable_s3, NULL, + pm); + object_property_add(obj, ACPI_PM_PROP_S4_DISABLED, "uint8", + ls7a_pm_get_disable_s4, ls7a_pm_set_disable_s4, NULL, + pm); + object_property_add(obj, ACPI_PM_PROP_S4_VAL, "uint8", ls7a_pm_get_s4_val, + ls7a_pm_set_s4_val, NULL, pm); +} + +void ls7a_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev)); + + if (ls7a->pm.acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + if (object_dynamic_cast(OBJECT(dev), TYPE_NVDIMM)) { + nvdimm_acpi_plug_cb(hotplug_dev, dev); + } else { + acpi_memory_plug_cb(hotplug_dev, &ls7a->pm.acpi_memory_hotplug, + dev, errp); + } + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + acpi_cpu_plug_cb(hotplug_dev, &ls7a->pm.cpuhp_state, dev, errp); + } else { + error_setg(errp, + "acpi: device plug request for not supported device" + " type: %s", + object_get_typename(OBJECT(dev))); + } +} + +void ls7a_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev)); + + if (ls7a->pm.acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_request_cb( + hotplug_dev, &ls7a->pm.acpi_memory_hotplug, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + acpi_cpu_unplug_request_cb(hotplug_dev, &ls7a->pm.cpuhp_state, dev, + errp); + } else { + error_setg(errp, + "acpi: device unplug request for not supported device" + " type: %s", + object_get_typename(OBJECT(dev))); + } +} + +void ls7a_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + LS7APCIState *ls7a = get_ls7a_type(OBJECT(hotplug_dev)); + + if (ls7a->pm.acpi_memory_hotplug.is_enabled && + object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + acpi_memory_unplug_cb(&ls7a->pm.acpi_memory_hotplug, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + acpi_cpu_unplug_cb(&ls7a->pm.cpuhp_state, dev, errp); + } else { + error_setg(errp, + "acpi: device unplug for not supported device" + " type: %s", + object_get_typename(OBJECT(dev))); + } +} + +void ls7a_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list) +{ + LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev)); + + acpi_memory_ospm_status(&ls7a->pm.acpi_memory_hotplug, list); + acpi_cpu_ospm_status(&ls7a->pm.cpuhp_state, list); +} + +void ls7a_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev) +{ + LS7APCIState *ls7a = get_ls7a_type(OBJECT(adev)); + + acpi_send_gpe_event(&ls7a->pm.acpi_regs, ls7a->pm.irq, ev); +} diff --git a/hw/acpi/meson.build b/hw/acpi/meson.build index adf6347bc425801b02efda69ee40d6ab1bdf8895..4718d143fca582ab1d62867f6c7b37c521f20a78 100644 --- a/hw/acpi/meson.build +++ b/hw/acpi/meson.build @@ -6,6 +6,7 @@ acpi_ss.add(files( 'core.c', 'utils.c', )) +acpi_ss.add(when: 'CONFIG_ACPI_LOONGARCH', if_true: files('larch_7a.c')) acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_true: files('cpu.c', 'cpu_hotplug.c')) acpi_ss.add(when: 'CONFIG_ACPI_CPU_HOTPLUG', if_false: files('acpi-cpu-hotplug-stub.c')) acpi_ss.add(when: 'CONFIG_ACPI_MEMORY_HOTPLUG', if_true: files('memory_hotplug.c')) @@ -25,6 +26,7 @@ acpi_ss.add(when: 'CONFIG_ACPI_X86_ICH', if_true: files('ich9.c', 'tco.c')) acpi_ss.add(when: 'CONFIG_IPMI', if_true: files('ipmi.c'), if_false: files('ipmi-stub.c')) acpi_ss.add(when: 'CONFIG_PC', if_false: files('acpi-x86-stub.c')) acpi_ss.add(when: 'CONFIG_TPM', if_true: files('tpm.c')) +acpi_ss.add(when: 'CONFIG_CPUFREQ', if_true: files('cpufreq.c')) softmmu_ss.add(when: 'CONFIG_ACPI', if_false: files('acpi-stub.c', 'aml-build-stub.c', 'ghes-stub.c')) softmmu_ss.add_all(when: 'CONFIG_ACPI', if_true: acpi_ss) softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('acpi-stub.c', 'aml-build-stub.c', diff --git a/hw/acpi/nvdimm.c b/hw/acpi/nvdimm.c index 0d43da19ea4213defcf1f4778dd35663b24a4ec5..5f85b16327063379ec25b08fb48e3daa16dabddc 100644 --- a/hw/acpi/nvdimm.c +++ b/hw/acpi/nvdimm.c @@ -476,7 +476,7 @@ struct NvdimmFuncGetLabelDataOut { /* the size of buffer filled by QEMU. */ uint32_t len; uint32_t func_ret_status; /* return status code. */ - uint8_t out_buf[]; /* the data got via Get Namesapce Label function. */ + uint8_t out_buf[]; /* the data got via Get Namespace Label function. */ } QEMU_PACKED; typedef struct NvdimmFuncGetLabelDataOut NvdimmFuncGetLabelDataOut; QEMU_BUILD_BUG_ON(sizeof(NvdimmFuncGetLabelDataOut) > NVDIMM_DSM_MEMORY_SIZE); diff --git a/hw/acpi/pcihp.c b/hw/acpi/pcihp.c index 30405b5113d7fb5a259a9d9cbd761d59ae5cbe59..be0e846b3433199bd81459fe164e884510066366 100644 --- a/hw/acpi/pcihp.c +++ b/hw/acpi/pcihp.c @@ -32,6 +32,7 @@ #include "hw/pci/pci_bridge.h" #include "hw/pci/pci_host.h" #include "hw/pci/pcie_port.h" +#include "hw/pci-bridge/xio3130_downstream.h" #include "hw/i386/acpi-build.h" #include "hw/acpi/acpi.h" #include "hw/pci/pci_bus.h" @@ -341,6 +342,8 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, { PCIDevice *pdev = PCI_DEVICE(dev); int slot = PCI_SLOT(pdev->devfn); + PCIDevice *bridge; + PCIBus *bus; int bsel; /* Don't send event when device is enabled during qemu machine creation: @@ -370,7 +373,14 @@ void acpi_pcihp_device_plug_cb(HotplugHandler *hotplug_dev, AcpiPciHpState *s, return; } - bsel = acpi_pcihp_get_bsel(pci_get_bus(pdev)); + bus = pci_get_bus(pdev); + bridge = pci_bridge_get_device(bus); + if (object_dynamic_cast(OBJECT(bridge), TYPE_PCIE_ROOT_PORT) || + object_dynamic_cast(OBJECT(bridge), TYPE_XIO3130_DOWNSTREAM)) { + pcie_cap_slot_enable_power(bridge); + } + + bsel = acpi_pcihp_get_bsel(bus); g_assert(bsel >= 0); s->acpi_pcihp_pci_status[bsel].up |= (1U << slot); acpi_send_event(DEVICE(hotplug_dev), ACPI_PCI_HOTPLUG_STATUS); @@ -491,6 +501,9 @@ static void pci_write(void *opaque, hwaddr addr, uint64_t data, } bus = acpi_pcihp_find_hotplug_bus(s, s->hotplug_select); + if (!bus) { + break; + } QTAILQ_FOREACH_SAFE(kid, &bus->qbus.children, sibling, next) { Object *o = OBJECT(kid->child); PCIDevice *dev = PCI_DEVICE(o); diff --git a/hw/arm/Kconfig b/hw/arm/Kconfig index 2d37d29f02b409c00f27b8ec9da7ad6b9894fc5f..006a4b4c4b709a3ff2b984a7b531c749b2ceb53e 100644 --- a/hw/arm/Kconfig +++ b/hw/arm/Kconfig @@ -27,6 +27,7 @@ config ARM_VIRT select DIMM select ACPI_HW_REDUCED select ACPI_APEI + select ACPI_CPU_HOTPLUG config CHEETAH bool diff --git a/hw/arm/aspeed_ast2600.c b/hw/arm/aspeed_ast2600.c index 0384357a9510e087831647e726c354cd869be36e..5da2d5175e4104641094044b2fb4fc7d0320d348 100644 --- a/hw/arm/aspeed_ast2600.c +++ b/hw/arm/aspeed_ast2600.c @@ -19,15 +19,17 @@ #include "sysemu/sysemu.h" #define ASPEED_SOC_IOMEM_SIZE 0x00200000 +#define ASPEED_SOC_DPMCU_SIZE 0x00040000 static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_SRAM] = 0x10000000, + [ASPEED_DEV_DPMCU] = 0x18000000, /* 0x16000000 0x17FFFFFF : AHB BUS do LPC Bus bridge */ [ASPEED_DEV_IOMEM] = 0x1E600000, [ASPEED_DEV_PWM] = 0x1E610000, [ASPEED_DEV_FMC] = 0x1E620000, [ASPEED_DEV_SPI1] = 0x1E630000, - [ASPEED_DEV_SPI2] = 0x1E641000, + [ASPEED_DEV_SPI2] = 0x1E631000, [ASPEED_DEV_EHCI1] = 0x1E6A1000, [ASPEED_DEV_EHCI2] = 0x1E6A3000, [ASPEED_DEV_MII1] = 0x1E650000, @@ -44,6 +46,7 @@ static const hwaddr aspeed_soc_ast2600_memmap[] = { [ASPEED_DEV_SCU] = 0x1E6E2000, [ASPEED_DEV_XDMA] = 0x1E6E7000, [ASPEED_DEV_ADC] = 0x1E6E9000, + [ASPEED_DEV_DP] = 0x1E6EB000, [ASPEED_DEV_VIDEO] = 0x1E700000, [ASPEED_DEV_SDHCI] = 0x1E740000, [ASPEED_DEV_EMMC] = 0x1E750000, @@ -104,6 +107,7 @@ static const int aspeed_soc_ast2600_irqmap[] = { [ASPEED_DEV_ETH3] = 32, [ASPEED_DEV_ETH4] = 33, [ASPEED_DEV_KCS] = 138, /* 138 -> 142 */ + [ASPEED_DEV_DP] = 62, }; static qemu_irq aspeed_soc_get_irq(AspeedSoCState *s, int ctrl) @@ -298,6 +302,10 @@ static void aspeed_soc_ast2600_realize(DeviceState *dev, Error **errp) memory_region_add_subregion(get_system_memory(), sc->memmap[ASPEED_DEV_SRAM], &s->sram); + /* DPMCU */ + create_unimplemented_device("aspeed.dpmcu", sc->memmap[ASPEED_DEV_DPMCU], + ASPEED_SOC_DPMCU_SIZE); + /* SCU */ if (!sysbus_realize(SYS_BUS_DEVICE(&s->scu), errp)) { return; diff --git a/hw/arm/boot.c b/hw/arm/boot.c index 74ad397b1ff907b11dd226c81b3289cc6aafbb30..db69ec648abb80b98a42b7fa6293b1cce626102c 100644 --- a/hw/arm/boot.c +++ b/hw/arm/boot.c @@ -12,6 +12,7 @@ #include "qemu/datadir.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qemu/log.h" #include #include "hw/arm/boot.h" #include "hw/arm/linux-boot-if.h" @@ -26,6 +27,7 @@ #include "qemu/config-file.h" #include "qemu/option.h" #include "qemu/units.h" +#include "kvm_arm.h" /* Kernel boot protocol is specified in the kernel docs * Documentation/arm/Booting and Documentation/arm64/booting.txt @@ -813,6 +815,24 @@ static void do_cpu_reset(void *opaque) } } +void cpu_hotplug_register_reset(int ncpu) +{ + CPUState *cpu_0 = qemu_get_cpu(0); + CPUState *cpu = qemu_get_cpu(ncpu); + QEMUResetEntry *entry = qemu_get_reset_entry(do_cpu_reset, cpu_0); + + assert(entry); + /* Gather the reset handlers of all CPUs */ + qemu_register_reset_after(entry, do_cpu_reset, cpu); +} + +void cpu_hotplug_reset_manually(int ncpu) +{ + CPUState *cpu = qemu_get_cpu(ncpu); + + do_cpu_reset(cpu); +} + /** * load_image_to_fw_cfg() - Load an image file into an fw_cfg entry identified * by key. @@ -1236,6 +1256,16 @@ static void arm_setup_direct_kernel_boot(ARMCPU *cpu, for (cs = first_cpu; cs; cs = CPU_NEXT(cs)) { ARM_CPU(cs)->env.boot_info = info; } + + if (kvm_enabled() && virtcca_cvm_enabled()) { + if (info->dtb_limit == 0) { + info->dtb_limit = info->dtb_start + 0x200000; + } + kvm_load_user_data(info->loader_start, image_high_addr, info->initrd_start, + info->dtb_limit, info->ram_size, (struct kvm_numa_info *)info->numa_info); + tmm_add_ram_region(info->loader_start, image_high_addr - info->loader_start, + info->initrd_start, info->dtb_limit - info->initrd_start, true); + } } static void arm_setup_firmware_boot(ARMCPU *cpu, struct arm_boot_info *info) @@ -1317,11 +1347,47 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) * doesn't support secure. */ assert(!(info->secure_board_setup && kvm_enabled())); + + qemu_log("load the kernel\n"); + info->kernel_filename = ms->kernel_filename; info->kernel_cmdline = ms->kernel_cmdline; info->initrd_filename = ms->initrd_filename; info->dtb_filename = ms->dtb; info->dtb_limit = 0; + if (kvm_enabled() && virtcca_cvm_enabled()) { + info->ram_size = ms->ram_size; + info->numa_info = g_malloc(sizeof(struct kvm_numa_info)); + struct kvm_numa_info *numa_info = (struct kvm_numa_info *) info->numa_info; + if (ms->numa_state != NULL && ms->numa_state->num_nodes > 0) { + numa_info->numa_cnt = ms->numa_state->num_nodes; + uint64_t mem_base = info->loader_start; + for (int64_t i = 0; i < ms->numa_state->num_nodes && i < MAX_NUMA_NODE; i++) { + uint64_t mem_len = ms->numa_state->nodes[i].node_mem; + numa_info->numa_nodes[i].numa_id = i; + numa_info->numa_nodes[i].ipa_start = mem_base; + numa_info->numa_nodes[i].ipa_size = mem_len; + memcpy(numa_info->numa_nodes[i].host_numa_nodes, ms->numa_state->nodes[i].node_memdev->host_nodes, + MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); + mem_base += mem_len; + } + } else { + numa_info->numa_cnt = 1; + numa_info->numa_nodes[0].numa_id = 0; + numa_info->numa_nodes[0].ipa_start = info->loader_start; + numa_info->numa_nodes[0].ipa_size = info->ram_size; + memset(numa_info->numa_nodes[0].host_numa_nodes, 0, MAX_NODES / BITS_PER_LONG * sizeof(uint64_t)); + } + + for (int cpu = ms->smp.cpus - 1; cpu >= 0; cpu--) { + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + uint64_t node_id = 0; + if (ms->possible_cpus->cpus[cs->cpu_index].props.has_node_id) + node_id = ms->possible_cpus->cpus[cs->cpu_index].props.node_id; + bitmap_set((unsigned long *)numa_info->numa_nodes[node_id].cpu_id, cpu, 1); + } + } /* Load the kernel. */ if (!info->kernel_filename || info->firmware_loaded) { @@ -1330,6 +1396,11 @@ void arm_load_kernel(ARMCPU *cpu, MachineState *ms, struct arm_boot_info *info) arm_setup_direct_kernel_boot(cpu, info); } + if (kvm_enabled() && virtcca_cvm_enabled()) { + g_free(info->numa_info); + info->numa_info = NULL; + } + if (!info->skip_dtb_autoload && have_dtb(info)) { if (arm_load_dtb(info->dtb_start, info, info->dtb_limit, as, ms) < 0) { exit(1); diff --git a/hw/arm/fsl-imx25.c b/hw/arm/fsl-imx25.c index 24c437459033c26f3970fa2e956389e52242de84..9aabbf7f5870b60990b473b1e1d9973bcdc76161 100644 --- a/hw/arm/fsl-imx25.c +++ b/hw/arm/fsl-imx25.c @@ -169,7 +169,8 @@ static void fsl_imx25_realize(DeviceState *dev, Error **errp) epit_table[i].irq)); } - object_property_set_uint(OBJECT(&s->fec), "phy-num", s->phy_num, &err); + object_property_set_uint(OBJECT(&s->fec), "phy-num", s->phy_num, + &error_abort); qdev_set_nic_properties(DEVICE(&s->fec), &nd_table[0]); if (!sysbus_realize(SYS_BUS_DEVICE(&s->fec), errp)) { diff --git a/hw/arm/fsl-imx6.c b/hw/arm/fsl-imx6.c index 00dafe3f62dee011c9249807ae1894082728c97e..c4b95dc7a746188d23e77a5397f722734c52b05b 100644 --- a/hw/arm/fsl-imx6.c +++ b/hw/arm/fsl-imx6.c @@ -377,7 +377,8 @@ static void fsl_imx6_realize(DeviceState *dev, Error **errp) spi_table[i].irq)); } - object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num, &err); + object_property_set_uint(OBJECT(&s->eth), "phy-num", s->phy_num, + &error_abort); qdev_set_nic_properties(DEVICE(&s->eth), &nd_table[0]); if (!sysbus_realize(SYS_BUS_DEVICE(&s->eth), errp)) { return; diff --git a/hw/arm/musicpal.c b/hw/arm/musicpal.c index 2d612cc0c9bee1809e50e0a57173714d4e90c393..15fc7fee419bb634d35663f05be3e59358b68e84 100644 --- a/hw/arm/musicpal.c +++ b/hw/arm/musicpal.c @@ -185,13 +185,13 @@ static void eth_rx_desc_put(AddressSpace *dma_as, uint32_t addr, cpu_to_le16s(&desc->buffer_size); cpu_to_le32s(&desc->buffer); cpu_to_le32s(&desc->next); - dma_memory_write(dma_as, addr, desc, sizeof(*desc)); + dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); } static void eth_rx_desc_get(AddressSpace *dma_as, uint32_t addr, mv88w8618_rx_desc *desc) { - dma_memory_read(dma_as, addr, desc, sizeof(*desc)); + dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); le32_to_cpus(&desc->cmdstat); le16_to_cpus(&desc->bytes); le16_to_cpus(&desc->buffer_size); @@ -215,7 +215,7 @@ static ssize_t eth_receive(NetClientState *nc, const uint8_t *buf, size_t size) eth_rx_desc_get(&s->dma_as, desc_addr, &desc); if ((desc.cmdstat & MP_ETH_RX_OWN) && desc.buffer_size >= size) { dma_memory_write(&s->dma_as, desc.buffer + s->vlan_header, - buf, size); + buf, size, MEMTXATTRS_UNSPECIFIED); desc.bytes = size + s->vlan_header; desc.cmdstat &= ~MP_ETH_RX_OWN; s->cur_rx[i] = desc.next; @@ -241,13 +241,13 @@ static void eth_tx_desc_put(AddressSpace *dma_as, uint32_t addr, cpu_to_le16s(&desc->bytes); cpu_to_le32s(&desc->buffer); cpu_to_le32s(&desc->next); - dma_memory_write(dma_as, addr, desc, sizeof(*desc)); + dma_memory_write(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); } static void eth_tx_desc_get(AddressSpace *dma_as, uint32_t addr, mv88w8618_tx_desc *desc) { - dma_memory_read(dma_as, addr, desc, sizeof(*desc)); + dma_memory_read(dma_as, addr, desc, sizeof(*desc), MEMTXATTRS_UNSPECIFIED); le32_to_cpus(&desc->cmdstat); le16_to_cpus(&desc->res); le16_to_cpus(&desc->bytes); @@ -269,7 +269,8 @@ static void eth_send(mv88w8618_eth_state *s, int queue_index) if (desc.cmdstat & MP_ETH_TX_OWN) { len = desc.bytes; if (len < 2048) { - dma_memory_read(&s->dma_as, desc.buffer, buf, len); + dma_memory_read(&s->dma_as, desc.buffer, buf, len, + MEMTXATTRS_UNSPECIFIED); qemu_send_packet(qemu_get_queue(s->nic), buf, len); } desc.cmdstat &= ~MP_ETH_TX_OWN; @@ -417,7 +418,8 @@ static void mv88w8618_eth_realize(DeviceState *dev, Error **errp) address_space_init(&s->dma_as, s->dma_mr, "emac-dma"); s->nic = qemu_new_nic(&net_mv88w8618_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); } static const VMStateDescription mv88w8618_eth_vmsd = { diff --git a/hw/arm/smmu-common.c b/hw/arm/smmu-common.c index 0459850a93d92f7d5dabaa5fce148f5d9793a489..d8fc81c1020bfa30c4be05ffe6124fdb70c4b321 100644 --- a/hw/arm/smmu-common.c +++ b/hw/arm/smmu-common.c @@ -193,7 +193,8 @@ static int get_pte(dma_addr_t baseaddr, uint32_t index, uint64_t *pte, dma_addr_t addr = baseaddr + index * sizeof(*pte); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte)); + ret = dma_memory_read(&address_space_memory, addr, pte, sizeof(*pte), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { info->type = SMMU_PTW_ERR_WALK_EABT; @@ -529,6 +530,8 @@ static void smmu_base_reset(DeviceState *dev) { SMMUState *s = ARM_SMMU(dev); + memset(s->smmu_pcibus_by_bus_num, 0, sizeof(s->smmu_pcibus_by_bus_num)); + g_hash_table_remove_all(s->configs); g_hash_table_remove_all(s->iotlb); } diff --git a/hw/arm/smmuv3.c b/hw/arm/smmuv3.c index 01b60bee4950535b291511bc0ff146ecb066867f..3b43368be0fac2bc50137fa340ba0f21d6f3bd1c 100644 --- a/hw/arm/smmuv3.c +++ b/hw/arm/smmuv3.c @@ -102,7 +102,8 @@ static inline MemTxResult queue_read(SMMUQueue *q, void *data) { dma_addr_t addr = Q_CONS_ENTRY(q); - return dma_memory_read(&address_space_memory, addr, data, q->entry_size); + return dma_memory_read(&address_space_memory, addr, data, q->entry_size, + MEMTXATTRS_UNSPECIFIED); } static MemTxResult queue_write(SMMUQueue *q, void *data) @@ -110,7 +111,8 @@ static MemTxResult queue_write(SMMUQueue *q, void *data) dma_addr_t addr = Q_PROD_ENTRY(q); MemTxResult ret; - ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size); + ret = dma_memory_write(&address_space_memory, addr, data, q->entry_size, + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { return ret; } @@ -285,7 +287,8 @@ static int smmu_get_ste(SMMUv3State *s, dma_addr_t addr, STE *buf, trace_smmuv3_get_ste(addr); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Cannot fetch pte at address=0x%"PRIx64"\n", addr); @@ -306,7 +309,8 @@ static int smmu_get_cd(SMMUv3State *s, STE *ste, uint32_t ssid, trace_smmuv3_get_cd(addr); /* TODO: guarantee 64-bit single-copy atomicity */ - ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf)); + ret = dma_memory_read(&address_space_memory, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Cannot fetch pte at address=0x%"PRIx64"\n", addr); @@ -411,7 +415,7 @@ static int smmu_find_ste(SMMUv3State *s, uint32_t sid, STE *ste, l1ptr = (dma_addr_t)(strtab_base + l1_ste_offset * sizeof(l1std)); /* TODO: guarantee 64-bit single-copy atomicity */ ret = dma_memory_read(&address_space_memory, l1ptr, &l1std, - sizeof(l1std)); + sizeof(l1std), MEMTXATTRS_UNSPECIFIED); if (ret != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "Could not read L1PTR at 0X%"PRIx64"\n", l1ptr); diff --git a/hw/arm/virt-acpi-build.c b/hw/arm/virt-acpi-build.c index 674f902652556598f47e7850b67027cbee820b06..5ed23e627a46a7ec24cedf182c3c645c38301f36 100644 --- a/hw/arm/virt-acpi-build.c +++ b/hw/arm/virt-acpi-build.c @@ -60,6 +60,266 @@ #define ACPI_BUILD_TABLE_SIZE 0x20000 +/* + * ACPI spec, Revision 6.3 + * 5.2.29.2 Cache Type Structure (Type 1) + */ +static void build_cache_hierarchy_node(MachineState *ms, GArray *tbl, + uint32_t next_level, uint32_t cache_type) +{ + build_append_byte(tbl, 1); + build_append_byte(tbl, 24); + build_append_int_noprefix(tbl, 0, 2); + build_append_int_noprefix(tbl, 127, 4); + build_append_int_noprefix(tbl, next_level, 4); + uint64_t cache_size; + + switch (cache_type) { + case ARM_L1D_CACHE: /* L1 dcache info */ + cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1D); + build_append_int_noprefix(tbl, + cache_size > 0 ? cache_size : ARM_L1DCACHE_SIZE, + 4); + build_append_int_noprefix(tbl, ARM_L1DCACHE_SETS, 4); + build_append_byte(tbl, ARM_L1DCACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1DCACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1DCACHE_LINE_SIZE, 2); + break; + case ARM_L1I_CACHE: /* L1 icache info */ + cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1I); + build_append_int_noprefix(tbl, + cache_size > 0 ? cache_size : ARM_L1ICACHE_SIZE, + 4); + build_append_int_noprefix(tbl, ARM_L1ICACHE_SETS, 4); + build_append_byte(tbl, ARM_L1ICACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1ICACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1ICACHE_LINE_SIZE, 2); + break; + case ARM_L1_CACHE: /* L1 cache info */ + cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1); + build_append_int_noprefix(tbl, + cache_size > 0 ? cache_size : ARM_L1CACHE_SIZE, + 4); + build_append_int_noprefix(tbl, ARM_L1CACHE_SETS, 4); + build_append_byte(tbl, ARM_L1CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L1CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L1CACHE_LINE_SIZE, 2); + break; + case ARM_L2_CACHE: /* L2 cache info */ + cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L2); + build_append_int_noprefix(tbl, + cache_size > 0 ? cache_size : ARM_L2CACHE_SIZE, + 4); + build_append_int_noprefix(tbl, ARM_L2CACHE_SETS, 4); + build_append_byte(tbl, ARM_L2CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L2CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L2CACHE_LINE_SIZE, 2); + break; + case ARM_L3_CACHE: /* L3 cache info */ + cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L3); + build_append_int_noprefix(tbl, + cache_size > 0 ? cache_size : ARM_L3CACHE_SIZE, + 4); + build_append_int_noprefix(tbl, ARM_L3CACHE_SETS, 4); + build_append_byte(tbl, ARM_L3CACHE_ASSOCIATIVITY); + build_append_byte(tbl, ARM_L3CACHE_ATTRIBUTES); + build_append_int_noprefix(tbl, ARM_L3CACHE_LINE_SIZE, 2); + break; + default: + build_append_int_noprefix(tbl, 0, 4); + build_append_int_noprefix(tbl, 0, 4); + build_append_byte(tbl, 0); + build_append_byte(tbl, 0); + build_append_int_noprefix(tbl, 0, 2); + } +} + +/* + * ACPI spec, Revision 6.3 + * 5.2.29 Processor Properties Topology Table (PPTT) + */ +static void build_pptt_arm(GArray *table_data, BIOSLinker *linker, MachineState *ms, + const char *oem_id, const char *oem_table_id) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + GQueue *list = g_queue_new(); + guint pptt_start = table_data->len; + guint parent_offset; + guint length, i; + int uid = 0; + int socket; + AcpiTable table = { .sig = "PPTT", .rev = 2, + .oem_id = oem_id, .oem_table_id = oem_table_id }; + bool unified_l1 = cpu_l1_cache_unified(0); + + acpi_table_begin(&table, table_data); + + for (socket = 0; socket < ms->smp.sockets; socket++) { + uint32_t l3_cache_offset = table_data->len - pptt_start; + build_cache_hierarchy_node(ms, table_data, 0, ARM_L3_CACHE); + + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + /* + * Physical package - represents the boundary + * of a physical package + */ + (1 << 0), + 0, socket, &l3_cache_offset, 1); + } + + if (mc->smp_props.clusters_supported) { + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int cluster; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (cluster = 0; cluster < ms->smp.clusters; cluster++) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + parent_offset, cluster, NULL, 0); + } + } + } + + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int core; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (core = 0; core < ms->smp.cores; core++) { + uint32_t priv_rsrc[3] = {}; + priv_rsrc[0] = table_data->len - pptt_start; /* L2 cache offset */ + build_cache_hierarchy_node(ms, table_data, 0, ARM_L2_CACHE); + + if (unified_l1) { + priv_rsrc[1] = table_data->len - pptt_start; /* L1 cache offset */ + build_cache_hierarchy_node(ms, table_data, priv_rsrc[0], ARM_L1_CACHE); + } else { + priv_rsrc[1] = table_data->len - pptt_start; /* L1 dcache offset */ + build_cache_hierarchy_node(ms, table_data, priv_rsrc[0], ARM_L1D_CACHE); + priv_rsrc[2] = table_data->len - pptt_start; /* L1 icache offset */ + build_cache_hierarchy_node(ms, table_data, priv_rsrc[0], ARM_L1I_CACHE); + } + + if (ms->smp.threads > 1) { + g_queue_push_tail(list, + GUINT_TO_POINTER(table_data->len - pptt_start)); + build_processor_hierarchy_node( + table_data, + (0 << 0), /* not a physical package */ + parent_offset, core, priv_rsrc, 3); + } else { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 3), /* Node is a Leaf */ + parent_offset, uid++, priv_rsrc, 3); + } + } + } + + length = g_queue_get_length(list); + for (i = 0; i < length; i++) { + int thread; + + parent_offset = GPOINTER_TO_UINT(g_queue_pop_head(list)); + for (thread = 0; thread < ms->smp.threads; thread++) { + build_processor_hierarchy_node( + table_data, + (1 << 1) | /* ACPI Processor ID valid */ + (1 << 2) | /* Processor is a Thread */ + (1 << 3), /* Node is a Leaf */ + parent_offset, uid++, NULL, 0); + } + } + + g_queue_free(list); + acpi_table_end(linker, &table); +} + +static void acpi_dsdt_add_psd(Aml *dev, int cpus) +{ + Aml *pkg; + Aml *sub; + + sub = aml_package(5); + aml_append(sub, aml_int(5)); + aml_append(sub, aml_int(0)); + /* Assume all vCPUs belong to the same domain */ + aml_append(sub, aml_int(0)); + /* SW_ANY: OSPM coordinate, initiate on any processor */ + aml_append(sub, aml_int(0xFD)); + aml_append(sub, aml_int(cpus)); + + pkg = aml_package(1); + aml_append(pkg, sub); + + aml_append(dev, aml_name_decl("_PSD", pkg)); +} + +static void acpi_dsdt_add_cppc(Aml *dev, uint64_t cpu_base, int *regs_offset) +{ + Aml *cpc; + int i; + + /* Use version 3 of CPPC table from ACPI 6.3 */ + cpc = aml_package(23); + aml_append(cpc, aml_int(23)); + aml_append(cpc, aml_int(3)); + + for (i = 0; i < CPPC_REG_COUNT; i++) { + Aml *res; + uint8_t reg_width; + uint8_t acc_type; + uint64_t addr; + + if (regs_offset[i] == -1) { + reg_width = 0; + acc_type = AML_ANY_ACC; + addr = 0; + } else { + addr = cpu_base + regs_offset[i]; + if (i == REFERENCE_CTR || i == DELIVERED_CTR) { + reg_width = 64; + acc_type = AML_QWORD_ACC; + } else { + reg_width = 32; + acc_type = AML_DWORD_ACC; + } + } + + res = aml_resource_template(); + aml_append(res, aml_generic_register(AML_SYSTEM_MEMORY, reg_width, 0, + acc_type, addr)); + aml_append(cpc, res); + } + + aml_append(dev, aml_name_decl("_CPC", cpc)); +} + +void virt_acpi_dsdt_cpu_cppc(AcpiDeviceIf *adev, int ncpu, int num_cpu, Aml *dev) +{ + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + const MemMapEntry *cppc_memmap = &vms->memmap[VIRT_CPUFREQ]; + + /* + * Append _CPC and _PSD to support CPU frequence show + * Check CPPC available by DESIRED_PERF register + */ + if (cppc_regs_offset[DESIRED_PERF] != -1) { + acpi_dsdt_add_cppc(dev, + cppc_memmap->base + ncpu * CPPC_REG_PER_CPU_STRIDE, + cppc_regs_offset); + acpi_dsdt_add_psd(dev, num_cpu); + } +} + static void acpi_dsdt_add_cpus(Aml *scope, VirtMachineState *vms) { MachineState *ms = MACHINE(vms); @@ -69,6 +329,9 @@ static void acpi_dsdt_add_cpus(Aml *scope, VirtMachineState *vms) Aml *dev = aml_device("C%.03X", i); aml_append(dev, aml_name_decl("_HID", aml_string("ACPI0007"))); aml_append(dev, aml_name_decl("_UID", aml_int(i))); + + virt_acpi_dsdt_cpu_cppc(NULL, i, ms->smp.cpus, dev); + aml_append(scope, dev); } } @@ -698,14 +961,69 @@ static void build_append_gicr(GArray *table_data, uint64_t base, uint32_t size) build_append_int_noprefix(table_data, size, 4); /* Discovery Range Length */ } +void virt_madt_cpu_entry(AcpiDeviceIf *adev, int i, + const CPUArchIdList *possible_cpus, GArray *table_data, + bool force_enabled) +{ + VirtMachineState *vms = VIRT_MACHINE(qdev_get_machine()); + const MemMapEntry *memmap = vms->memmap; + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); + uint64_t physical_base_address = 0, gich = 0, gicv = 0; + uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0; + uint32_t pmu_interrupt, enabled; + static bool pmu; + + if (i == 0) { + pmu = arm_feature(&armcpu->env, ARM_FEATURE_PMU); + } + /* FEATURE_PMU should be all enabled or disabled for CPUs */ + assert(!armcpu || arm_feature(&armcpu->env, ARM_FEATURE_PMU) == pmu); + pmu_interrupt = pmu ? PPI(VIRTUAL_PMU_IRQ) : 0; + enabled = armcpu || force_enabled ? 1 /* Enabled */ : 0 /* Disabled */; + + if (vms->gic_version == 2) { + physical_base_address = memmap[VIRT_GIC_CPU].base; + gicv = memmap[VIRT_GIC_VCPU].base; + gich = memmap[VIRT_GIC_HYP].base; + } + + /* 5.2.12.14 GIC Structure */ + build_append_int_noprefix(table_data, 0xB, 1); /* Type */ + build_append_int_noprefix(table_data, 76, 1); /* Length */ + build_append_int_noprefix(table_data, 0, 2); /* Reserved */ + build_append_int_noprefix(table_data, i, 4); /* GIC ID */ + build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ + /* Flags */ + build_append_int_noprefix(table_data, enabled, 4); /* Enabled */ + /* Parking Protocol Version */ + build_append_int_noprefix(table_data, 0, 4); + /* Performance Interrupt GSIV */ + build_append_int_noprefix(table_data, pmu_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* Parked Address */ + /* Physical Base Address */ + build_append_int_noprefix(table_data, physical_base_address, 8); + build_append_int_noprefix(table_data, gicv, 8); /* GICV */ + build_append_int_noprefix(table_data, gich, 8); /* GICH */ + /* VGIC Maintenance interrupt */ + build_append_int_noprefix(table_data, vgic_interrupt, 4); + build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ + /* MPIDR */ + build_append_int_noprefix(table_data, possible_cpus->cpus[i].arch_id, 8); +} + static void build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) { int i; VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(vms); + MachineClass *mc = MACHINE_GET_CLASS(vms); + MachineState *ms = MACHINE(vms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); const MemMapEntry *memmap = vms->memmap; AcpiTable table = { .sig = "APIC", .rev = 3, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; + /* The MADT GICC numbers */ + int num_cpu = ms->smp.cpus; acpi_table_begin(&table, table_data); /* Local Interrupt Controller Address */ @@ -724,41 +1042,11 @@ build_madt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) build_append_int_noprefix(table_data, vms->gic_version, 1); build_append_int_noprefix(table_data, 0, 3); /* Reserved */ - for (i = 0; i < MACHINE(vms)->smp.cpus; i++) { - ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(i)); - uint64_t physical_base_address = 0, gich = 0, gicv = 0; - uint32_t vgic_interrupt = vms->virt ? PPI(ARCH_GIC_MAINT_IRQ) : 0; - uint32_t pmu_interrupt = arm_feature(&armcpu->env, ARM_FEATURE_PMU) ? - PPI(VIRTUAL_PMU_IRQ) : 0; - - if (vms->gic_version == 2) { - physical_base_address = memmap[VIRT_GIC_CPU].base; - gicv = memmap[VIRT_GIC_VCPU].base; - gich = memmap[VIRT_GIC_HYP].base; - } - - /* 5.2.12.14 GIC Structure */ - build_append_int_noprefix(table_data, 0xB, 1); /* Type */ - build_append_int_noprefix(table_data, 76, 1); /* Length */ - build_append_int_noprefix(table_data, 0, 2); /* Reserved */ - build_append_int_noprefix(table_data, i, 4); /* GIC ID */ - build_append_int_noprefix(table_data, i, 4); /* ACPI Processor UID */ - /* Flags */ - build_append_int_noprefix(table_data, 1, 4); /* Enabled */ - /* Parking Protocol Version */ - build_append_int_noprefix(table_data, 0, 4); - /* Performance Interrupt GSIV */ - build_append_int_noprefix(table_data, pmu_interrupt, 4); - build_append_int_noprefix(table_data, 0, 8); /* Parked Address */ - /* Physical Base Address */ - build_append_int_noprefix(table_data, physical_base_address, 8); - build_append_int_noprefix(table_data, gicv, 8); /* GICV */ - build_append_int_noprefix(table_data, gich, 8); /* GICH */ - /* VGIC Maintenance interrupt */ - build_append_int_noprefix(table_data, vgic_interrupt, 4); - build_append_int_noprefix(table_data, 0, 8); /* GICR Base Address*/ - /* MPIDR */ - build_append_int_noprefix(table_data, armcpu->mp_affinity, 8); + if (vms->cpu_hotplug_enabled) { + num_cpu = ms->smp.max_cpus; + } + for (i = 0; i < num_cpu; i++) { + virt_madt_cpu_entry(NULL, i, possible_cpus, table_data, false); } if (vms->gic_version == 3) { @@ -848,6 +1136,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) const int *irqmap = vms->irqmap; AcpiTable table = { .sig = "DSDT", .rev = 2, .oem_id = vms->oem_id, .oem_table_id = vms->oem_table_id }; + bool cpu_aml_built = false; acpi_table_begin(&table, table_data); dsdt = init_aml_allocator(); @@ -858,7 +1147,6 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) * the RTC ACPI device at all when using UEFI. */ scope = aml_scope("\\_SB"); - acpi_dsdt_add_cpus(scope, vms); acpi_dsdt_add_uart(scope, &memmap[VIRT_UART], (irqmap[VIRT_UART] + ARM_SPI_BASE)); if (vmc->acpi_expose_flash) { @@ -888,6 +1176,19 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, VirtMachineState *vms) AML_SYSTEM_MEMORY, memmap[VIRT_PCDIMM_ACPI].base); } + + if (event & ACPI_GED_CPU_HOTPLUG_EVT) { + CPUHotplugFeatures opts = { + .acpi_1_compatible = false, .has_legacy_cphp = false + }; + build_cpus_aml(dsdt, ms, opts, memmap[VIRT_CPU_ACPI].base, + "\\_SB", NULL, AML_SYSTEM_MEMORY); + cpu_aml_built = true; + } + } + + if (!cpu_aml_built) { + acpi_dsdt_add_cpus(scope, vms); } acpi_dsdt_add_power_button(scope); @@ -952,7 +1253,7 @@ void virt_acpi_build(VirtMachineState *vms, AcpiBuildTables *tables) if (!vmc->no_cpu_topology) { acpi_add_table(table_offsets, tables_blob); - build_pptt(tables_blob, tables->linker, ms, + build_pptt_arm(tables_blob, tables->linker, ms, vms->oem_id, vms->oem_table_id); } diff --git a/hw/arm/virt.c b/hw/arm/virt.c index 30da05dfe040e35b0fc7c2834c946cad0b6f728c..c581f65a22de60ffcf56985423d7ebc47df87e04 100644 --- a/hw/arm/virt.c +++ b/hw/arm/virt.c @@ -33,11 +33,13 @@ #include "qemu/datadir.h" #include "qemu/units.h" #include "qemu/option.h" +#include "qemu/log.h" #include "monitor/qdev.h" #include "qapi/error.h" #include "hw/sysbus.h" #include "hw/arm/boot.h" #include "hw/arm/primecell.h" +#include "hw/arm/topology.h" #include "hw/arm/virt.h" #include "hw/block/flash.h" #include "hw/vfio/vfio-calxeda-xgmac.h" @@ -50,6 +52,8 @@ #include "sysemu/tpm.h" #include "sysemu/kvm.h" #include "sysemu/hvf.h" +#include "sysemu/cpus.h" +#include "sysemu/hw_accel.h" #include "hw/loader.h" #include "qapi/error.h" #include "qemu/bitops.h" @@ -153,7 +157,9 @@ static const MemMapEntry base_memmap[] = { [VIRT_NVDIMM_ACPI] = { 0x09090000, NVDIMM_ACPI_IO_LEN}, [VIRT_PVTIME] = { 0x090a0000, 0x00010000 }, [VIRT_SECURE_GPIO] = { 0x090b0000, 0x00001000 }, + [VIRT_CPU_ACPI] = { 0x090c0000, ACPI_CPU_HOTPLUG_REG_LEN }, [VIRT_MMIO] = { 0x0a000000, 0x00000200 }, + [VIRT_CPUFREQ] = { 0x0b000000, 0x00010000 }, /* ...repeating for a total of NUM_VIRTIO_TRANSPORTS, each of that size */ [VIRT_PLATFORM_BUS] = { 0x0c000000, 0x02000000 }, [VIRT_SECURE_MEM] = { 0x0e000000, 0x01000000 }, @@ -201,11 +207,19 @@ static const char *valid_cpus[] = { ARM_CPU_TYPE_NAME("cortex-a53"), ARM_CPU_TYPE_NAME("cortex-a57"), ARM_CPU_TYPE_NAME("cortex-a72"), + ARM_CPU_TYPE_NAME("Kunpeng-920"), + ARM_CPU_TYPE_NAME("FT-2000+"), + ARM_CPU_TYPE_NAME("Tengyun-S2500"), + ARM_CPU_TYPE_NAME("Tengyun-S5000C"), ARM_CPU_TYPE_NAME("a64fx"), ARM_CPU_TYPE_NAME("host"), ARM_CPU_TYPE_NAME("max"), }; +static MemoryRegion *secure_sysmem; +static MemoryRegion *tag_sysmem; +static MemoryRegion *secure_tag_sysmem; + static bool cpu_type_valid(const char *cpu) { int i; @@ -248,7 +262,15 @@ static void create_fdt(VirtMachineState *vms) /* /chosen must exist for load_dtb to fill in necessary properties later */ qemu_fdt_add_subnode(fdt, "/chosen"); - create_kaslr_seed(ms, "/chosen"); + + g_autofree char *kvm_type = NULL; + if (object_property_find(OBJECT(current_machine), "kvm-type")) { + kvm_type = object_property_get_str(OBJECT(current_machine), + "kvm-type", &error_abort); + } + if (!(kvm_type && !strcmp(kvm_type, "cvm"))) { + create_kaslr_seed(ms, "/chosen"); + } if (vms->secure) { qemu_fdt_add_subnode(fdt, "/secure-chosen"); @@ -347,6 +369,130 @@ static void fdt_add_timer_nodes(const VirtMachineState *vms) GIC_FDT_IRQ_TYPE_PPI, ARCH_TIMER_NS_EL2_IRQ, irqflags); } +/* + * In CLIDR_EL1 exposed to guest by the hypervisor, L1 cache type + * maybe unified or seperate ins and data. We need to read the + * guest visable CLIDR_EL1 and check L1 cache type. + */ +bool cpu_l1_cache_unified(int cpu) +{ + bool unified = false; + uint64_t clidr; + ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); + CPUState *cs = CPU(armcpu); + int ret; + + if (kvm_enabled()) { + struct kvm_one_reg reg = { + .id = ARM64_REG_CLIDR_EL1, + .addr = (uintptr_t)&clidr + }; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®); + if (ret) { + error_setg(&error_fatal, "Get vCPU clidr from KVM failed:%d", ret); + return unified; + } + + if (CLIDR_CTYPE(clidr, 1) == CTYPE_UNIFIED) { + unified = true; + } + } + + return unified; +} + +static void fdt_add_l3cache_nodes(const VirtMachineState *vms) +{ + int i; + const MachineState *ms = MACHINE(vms); + int cpus_per_socket = ms->smp.clusters * ms->smp.cores * ms->smp.threads; + int sockets = (ms->smp.cpus + cpus_per_socket - 1) / cpus_per_socket; + uint64_t cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L3); + + for (i = 0; i < sockets; i++) { + char *nodename = g_strdup_printf("/cpus/l3-cache%d", i); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache"); + qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-level", 3); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", + cache_size > 0 ? cache_size : ARM_L3CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L3CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L3CACHE_SETS); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + g_free(nodename); + } +} + +static void fdt_add_l2cache_nodes(const VirtMachineState *vms) +{ + const MachineState *ms = MACHINE(vms); + int cpus_per_socket = ms->smp.clusters * ms->smp.cores * ms->smp.threads; + int cpu; + uint64_t cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L2); + + for (cpu = 0; cpu < ms->smp.cpus; cpu++) { + char *next_path = g_strdup_printf("/cpus/l3-cache%d", + cpu / cpus_per_socket); + char *nodename = g_strdup_printf("/cpus/l2-cache%d", cpu); + + qemu_fdt_add_subnode(ms->fdt, nodename); + qemu_fdt_setprop_string(ms->fdt, nodename, "cache-unified", "true"); + qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cache"); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", + cache_size > 0 ? cache_size : ARM_L2CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L2CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L2CACHE_SETS); + qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", + next_path); + qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(ms->fdt)); + + g_free(next_path); + g_free(nodename); + } +} + +static void fdt_add_l1cache_prop(const VirtMachineState *vms, + char *nodename, int cpu) +{ + const MachineState *ms = MACHINE(vms); + char *next_path = g_strdup_printf("/cpus/l2-cache%d", cpu); + bool unified_l1 = cpu_l1_cache_unified(0); + uint64_t l1d_cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1D); + uint64_t l1i_cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1I); + uint64_t l1_cache_size = machine_get_cache_size(ms, CACHE_LEVEL_AND_TYPE_L1); + + if (unified_l1) { + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-size", + l1_cache_size > 0 ? l1_cache_size : ARM_L1CACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-line-size", + ARM_L1CACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "cache-sets", ARM_L1CACHE_SETS); + } else { + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-size", + l1d_cache_size > 0 ? l1d_cache_size : ARM_L1DCACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-line-size", + ARM_L1DCACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "d-cache-sets", + ARM_L1DCACHE_SETS); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-size", + l1i_cache_size > 0 ? l1i_cache_size : ARM_L1ICACHE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-line-size", + ARM_L1ICACHE_LINE_SIZE); + qemu_fdt_setprop_cell(ms->fdt, nodename, "i-cache-sets", + ARM_L1ICACHE_SETS); + } + qemu_fdt_setprop_phandle(ms->fdt, nodename, "next-level-cache", next_path); + + g_free(next_path); +} + static void fdt_add_cpu_nodes(const VirtMachineState *vms) { int cpu; @@ -381,6 +527,11 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#address-cells", addr_cells); qemu_fdt_setprop_cell(ms->fdt, "/cpus", "#size-cells", 0x0); + if (!vmc->no_cpu_topology) { + fdt_add_l3cache_nodes(vms); + fdt_add_l2cache_nodes(vms); + } + for (cpu = smp_cpus - 1; cpu >= 0; cpu--) { char *nodename = g_strdup_printf("/cpus/cpu@%d", cpu); ARMCPU *armcpu = ARM_CPU(qemu_get_cpu(cpu)); @@ -410,6 +561,7 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) } if (!vmc->no_cpu_topology) { + fdt_add_l1cache_prop(vms, nodename, cpu); qemu_fdt_setprop_cell(ms->fdt, nodename, "phandle", qemu_fdt_alloc_phandle(ms->fdt)); } @@ -431,9 +583,8 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) * can contain several layers of clustering within a single physical * package and cluster nodes can be contained in parent cluster nodes. * - * Given that cluster is not yet supported in the vCPU topology, - * we currently generate one cluster node within each socket node - * by default. + * Note: currently we only support one layer of clustering within + * each physical package. */ qemu_fdt_add_subnode(ms->fdt, "/cpus/cpu-map"); @@ -443,14 +594,16 @@ static void fdt_add_cpu_nodes(const VirtMachineState *vms) if (ms->smp.threads > 1) { map_path = g_strdup_printf( - "/cpus/cpu-map/socket%d/cluster0/core%d/thread%d", - cpu / (ms->smp.cores * ms->smp.threads), + "/cpus/cpu-map/socket%d/cluster%d/core%d/thread%d", + cpu / (ms->smp.clusters * ms->smp.cores * ms->smp.threads), + (cpu / (ms->smp.cores * ms->smp.threads)) % ms->smp.clusters, (cpu / ms->smp.threads) % ms->smp.cores, cpu % ms->smp.threads); } else { map_path = g_strdup_printf( - "/cpus/cpu-map/socket%d/cluster0/core%d", - cpu / ms->smp.cores, + "/cpus/cpu-map/socket%d/cluster%d/core%d", + cpu / (ms->smp.clusters * ms->smp.cores), + (cpu / ms->smp.cores) % ms->smp.clusters, cpu % ms->smp.cores); } qemu_fdt_add_path(ms->fdt, map_path); @@ -606,6 +759,7 @@ static void fdt_add_pmu_nodes(const VirtMachineState *vms) static inline DeviceState *create_acpi_ged(VirtMachineState *vms) { DeviceState *dev; + AcpiDeviceIfClass *adevc; MachineState *ms = MACHINE(vms); int irq = vms->irqmap[VIRT_ACPI_GED]; uint32_t event = ACPI_GED_PWR_DOWN_EVT; @@ -618,11 +772,20 @@ static inline DeviceState *create_acpi_ged(VirtMachineState *vms) event |= ACPI_GED_NVDIMM_HOTPLUG_EVT; } + if (vms->cpu_hotplug_enabled) { + event |= ACPI_GED_CPU_HOTPLUG_EVT; + } + dev = qdev_new(TYPE_ACPI_GED); qdev_prop_set_uint32(dev, "ged-event", event); + adevc = ACPI_DEVICE_IF_GET_CLASS(dev); + adevc->madt_cpu = virt_madt_cpu_entry; + adevc->cpu_cppc = virt_acpi_dsdt_cpu_cppc; + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, vms->memmap[VIRT_ACPI_GED].base); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 1, vms->memmap[VIRT_PCDIMM_ACPI].base); + sysbus_mmio_map(SYS_BUS_DEVICE(dev), 3, vms->memmap[VIRT_CPU_ACPI].base); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(vms->gic, irq)); sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); @@ -678,6 +841,54 @@ static void create_v2m(VirtMachineState *vms) vms->msi_controller = VIRT_MSI_CTRL_GICV2M; } +static void connect_gic_cpu_irqs(VirtMachineState *vms, int i) +{ + DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); + SysBusDevice *gicbusdev = SYS_BUS_DEVICE(vms->gic); + int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; + int num_cpus = object_property_get_uint(OBJECT(vms->gic), "num-cpu", NULL); + int gic_type = vms->gic_version; + int irq; + /* Mapping from the output timer irq lines from the CPU to the + * GIC PPI inputs we use for the virt board. + */ + const int timer_irq[] = { + [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, + [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, + [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, + [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, + }; + + for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { + qdev_connect_gpio_out(cpudev, irq, + qdev_get_gpio_in(vms->gic, + ppibase + timer_irq[irq])); + } + + if (gic_type == 3) { + qemu_irq irq = qdev_get_gpio_in(vms->gic, + ppibase + ARCH_GIC_MAINT_IRQ); + qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", + 0, irq); + } else if (vms->virt) { + qemu_irq irq = qdev_get_gpio_in(vms->gic, + ppibase + ARCH_GIC_MAINT_IRQ); + sysbus_connect_irq(gicbusdev, i + 4 * num_cpus, irq); + } + + qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, + qdev_get_gpio_in(vms->gic, ppibase + + VIRTUAL_PMU_IRQ)); + + sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); + sysbus_connect_irq(gicbusdev, i + num_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); + sysbus_connect_irq(gicbusdev, i + 2 * num_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); + sysbus_connect_irq(gicbusdev, i + 3 * num_cpus, + qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); +} + static void create_gic(VirtMachineState *vms, MemoryRegion *mem) { MachineState *ms = MACHINE(vms); @@ -685,14 +896,22 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) SysBusDevice *gicbusdev; const char *gictype; int type = vms->gic_version, i; + /* The max number of CPUs suppored by GIC */ + unsigned int num_cpus = ms->smp.cpus; + /* The number of CPUs present before boot */ unsigned int smp_cpus = ms->smp.cpus; uint32_t nb_redist_regions = 0; + if (vms->cpu_hotplug_enabled) { + num_cpus = ms->smp.max_cpus; + } + assert(num_cpus >= smp_cpus); + gictype = (type == 3) ? gicv3_class_name() : gic_class_name(); vms->gic = qdev_new(gictype); qdev_prop_set_uint32(vms->gic, "revision", type); - qdev_prop_set_uint32(vms->gic, "num-cpu", smp_cpus); + qdev_prop_set_uint32(vms->gic, "num-cpu", num_cpus); /* Note that the num-irq property counts both internal and external * interrupts; there are always 32 of the former (mandated by GIC spec). */ @@ -704,7 +923,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) if (type == 3) { uint32_t redist0_capacity = vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; - uint32_t redist0_count = MIN(smp_cpus, redist0_capacity); + uint32_t redist0_count = MIN(num_cpus, redist0_capacity); nb_redist_regions = virt_gicv3_redist_region_count(vms); @@ -725,7 +944,7 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) vms->memmap[VIRT_HIGH_GIC_REDIST2].size / GICV3_REDIST_SIZE; qdev_prop_set_uint32(vms->gic, "redist-region-count[1]", - MIN(smp_cpus - redist0_count, redist1_capacity)); + MIN(num_cpus - redist0_count, redist1_capacity)); } } else { if (!kvm_irqchip_in_kernel()) { @@ -752,50 +971,14 @@ static void create_gic(VirtMachineState *vms, MemoryRegion *mem) /* Wire the outputs from each CPU's generic timer and the GICv3 * maintenance interrupt signal to the appropriate GIC PPI inputs, - * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's inputs. + * and the GIC's IRQ/FIQ/VIRQ/VFIQ interrupt outputs to the CPU's + * inputs. + * + * The irqs of remaining CPUs (if we has) will be connected during + * hotplugging. */ for (i = 0; i < smp_cpus; i++) { - DeviceState *cpudev = DEVICE(qemu_get_cpu(i)); - int ppibase = NUM_IRQS + i * GIC_INTERNAL + GIC_NR_SGIS; - int irq; - /* Mapping from the output timer irq lines from the CPU to the - * GIC PPI inputs we use for the virt board. - */ - const int timer_irq[] = { - [GTIMER_PHYS] = ARCH_TIMER_NS_EL1_IRQ, - [GTIMER_VIRT] = ARCH_TIMER_VIRT_IRQ, - [GTIMER_HYP] = ARCH_TIMER_NS_EL2_IRQ, - [GTIMER_SEC] = ARCH_TIMER_S_EL1_IRQ, - }; - - for (irq = 0; irq < ARRAY_SIZE(timer_irq); irq++) { - qdev_connect_gpio_out(cpudev, irq, - qdev_get_gpio_in(vms->gic, - ppibase + timer_irq[irq])); - } - - if (type == 3) { - qemu_irq irq = qdev_get_gpio_in(vms->gic, - ppibase + ARCH_GIC_MAINT_IRQ); - qdev_connect_gpio_out_named(cpudev, "gicv3-maintenance-interrupt", - 0, irq); - } else if (vms->virt) { - qemu_irq irq = qdev_get_gpio_in(vms->gic, - ppibase + ARCH_GIC_MAINT_IRQ); - sysbus_connect_irq(gicbusdev, i + 4 * smp_cpus, irq); - } - - qdev_connect_gpio_out_named(cpudev, "pmu-interrupt", 0, - qdev_get_gpio_in(vms->gic, ppibase - + VIRTUAL_PMU_IRQ)); - - sysbus_connect_irq(gicbusdev, i, qdev_get_gpio_in(cpudev, ARM_CPU_IRQ)); - sysbus_connect_irq(gicbusdev, i + smp_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_FIQ)); - sysbus_connect_irq(gicbusdev, i + 2 * smp_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_VIRQ)); - sysbus_connect_irq(gicbusdev, i + 3 * smp_cpus, - qdev_get_gpio_in(cpudev, ARM_CPU_VFIQ)); + connect_gic_cpu_irqs(vms, i); } fdt_add_gic_node(vms); @@ -855,6 +1038,16 @@ static void create_uart(const VirtMachineState *vms, int uart, g_free(nodename); } +static void create_cpufreq(const VirtMachineState *vms, MemoryRegion *mem) +{ + hwaddr base = vms->memmap[VIRT_CPUFREQ].base; + DeviceState *dev = qdev_new("cpufreq"); + SysBusDevice *s = SYS_BUS_DEVICE(dev); + + sysbus_realize_and_unref(s, &error_fatal); + memory_region_add_subregion(mem, base, sysbus_mmio_get_region(s, 0)); +} + static void create_rtc(const VirtMachineState *vms) { char *nodename; @@ -884,6 +1077,7 @@ static void virt_powerdown_req(Notifier *n, void *opaque) { VirtMachineState *s = container_of(n, VirtMachineState, powerdown_notifier); + qemu_log("send powerdown to vm.\n"); if (s->acpi_dev) { acpi_send_event(s->acpi_dev, ACPI_POWER_DOWN_STATUS); } else { @@ -1343,14 +1537,15 @@ static void create_smmu(const VirtMachineState *vms, static void create_virtio_iommu_dt_bindings(VirtMachineState *vms) { - const char compat[] = "virtio,pci-iommu"; + const char compat[] = "virtio,pci-iommu\0pci1af4,1057"; uint16_t bdf = vms->virtio_iommu_bdf; MachineState *ms = MACHINE(vms); char *node; vms->iommu_phandle = qemu_fdt_alloc_phandle(ms->fdt); - node = g_strdup_printf("%s/virtio_iommu@%d", vms->pciehb_nodename, bdf); + node = g_strdup_printf("%s/virtio_iommu@%x,%x", vms->pciehb_nodename, + PCI_SLOT(bdf), PCI_FUNC(bdf)); qemu_fdt_add_subnode(ms->fdt, node); qemu_fdt_setprop(ms->fdt, node, "compatible", compat, sizeof(compat)); qemu_fdt_setprop_sized_cells(ms->fdt, node, "reg", @@ -1673,6 +1868,19 @@ static void virt_set_memmap(VirtMachineState *vms) vms->memmap[i] = base_memmap[i]; } + /* fix VIRT_MEM range */ + if (object_property_find(OBJECT(current_machine), "kvm-type")) { + g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), + "kvm-type", &error_abort); + + if (!strcmp(kvm_type, "cvm")) { + vms->memmap[VIRT_MEM].base = 3 * GiB; + vms->memmap[VIRT_MEM].size = ms->ram_size; + info_report("[qemu] fix VIRT_MEM range 0x%llx - 0x%llx\n", (unsigned long long)(vms->memmap[VIRT_MEM].base), + (unsigned long long)(vms->memmap[VIRT_MEM].base + ms->ram_size)); + } + } + if (ms->ram_slots > ACPI_MAX_RAM_SLOTS) { error_report("unsupported number of memory slots: %"PRIu64, ms->ram_slots); @@ -1886,18 +2094,17 @@ static void machvirt_init(MachineState *machine) { VirtMachineState *vms = VIRT_MACHINE(machine); VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(machine); + MachineState *ms = MACHINE(machine); MachineClass *mc = MACHINE_GET_CLASS(machine); const CPUArchIdList *possible_cpus; MemoryRegion *sysmem = get_system_memory(); - MemoryRegion *secure_sysmem = NULL; - MemoryRegion *tag_sysmem = NULL; - MemoryRegion *secure_tag_sysmem = NULL; int n, virt_max_cpus; bool firmware_loaded; bool aarch64 = true; bool has_ged = !vmc->no_ged; unsigned int smp_cpus = machine->smp.cpus; unsigned int max_cpus = machine->smp.max_cpus; + ObjectClass *cpu_class; /* * In accelerated mode, the memory map is computed earlier in kvm_type() @@ -1945,7 +2152,7 @@ static void machvirt_init(MachineState *machine) */ if (vms->secure && firmware_loaded) { vms->psci_conduit = QEMU_PSCI_CONDUIT_DISABLED; - } else if (vms->virt) { + } else if (vms->virt || virtcca_cvm_enabled()) { vms->psci_conduit = QEMU_PSCI_CONDUIT_SMC; } else { vms->psci_conduit = QEMU_PSCI_CONDUIT_HVC; @@ -1970,6 +2177,13 @@ static void machvirt_init(MachineState *machine) exit(1); } + if (vms->secure && (kvm_enabled() || hvf_enabled())) { + error_report("mach-virt: %s does not support providing " + "Security extensions (TrustZone) to the guest CPU", + kvm_enabled() ? "KVM" : "HVF"); + exit(1); + } + if (vms->virt && (kvm_enabled() || hvf_enabled())) { error_report("mach-virt: %s does not support providing " "Virtualization extensions to the guest CPU", @@ -1984,7 +2198,20 @@ static void machvirt_init(MachineState *machine) exit(1); } + if (virtcca_cvm_enabled()) { + int ret = kvm_arm_tmm_init(machine->cgs, &error_fatal); + if (ret != 0) { + error_report("fail to initialize TMM"); + exit(1); + } + } create_fdt(vms); + qemu_log("cpu init start\n"); + + cpu_class = object_class_by_name(ms->cpu_type); + vms->cpu_hotplug_enabled = has_ged && firmware_loaded && + virt_is_acpi_enabled(vms) && vms->gic_version == 3 && + !!object_class_dynamic_cast(cpu_class, TYPE_AARCH64_CPU); possible_cpus = mc->possible_cpu_arch_ids(machine); assert(possible_cpus->len == max_cpus); @@ -1992,105 +2219,23 @@ static void machvirt_init(MachineState *machine) Object *cpuobj; CPUState *cs; + if (kvm_enabled() && vms->cpu_hotplug_enabled) { + if (kvm_create_parked_vcpu(n) < 0) { + error_report("mach-virt: Create KVM parked vCPU failed"); + exit(1); + } + } + if (n >= smp_cpus) { - break; + continue; } cpuobj = object_new(possible_cpus->cpus[n].type); - object_property_set_int(cpuobj, "mp-affinity", - possible_cpus->cpus[n].arch_id, NULL); + aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL); cs = CPU(cpuobj); cs->cpu_index = n; - numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj), - &error_fatal); - - aarch64 &= object_property_get_bool(cpuobj, "aarch64", NULL); - - if (!vms->secure) { - object_property_set_bool(cpuobj, "has_el3", false, NULL); - } - - if (!vms->virt && object_property_find(cpuobj, "has_el2")) { - object_property_set_bool(cpuobj, "has_el2", false, NULL); - } - - if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { - object_property_set_int(cpuobj, "psci-conduit", vms->psci_conduit, - NULL); - - /* Secondary CPUs start in PSCI powered-down state */ - if (n > 0) { - object_property_set_bool(cpuobj, "start-powered-off", true, - NULL); - } - } - - if (vmc->kvm_no_adjvtime && - object_property_find(cpuobj, "kvm-no-adjvtime")) { - object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL); - } - - if (vmc->no_kvm_steal_time && - object_property_find(cpuobj, "kvm-steal-time")) { - object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL); - } - - if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) { - object_property_set_bool(cpuobj, "pmu", false, NULL); - } - - if (object_property_find(cpuobj, "reset-cbar")) { - object_property_set_int(cpuobj, "reset-cbar", - vms->memmap[VIRT_CPUPERIPHS].base, - &error_abort); - } - - object_property_set_link(cpuobj, "memory", OBJECT(sysmem), - &error_abort); - if (vms->secure) { - object_property_set_link(cpuobj, "secure-memory", - OBJECT(secure_sysmem), &error_abort); - } - - if (vms->mte) { - /* Create the memory region only once, but link to all cpus. */ - if (!tag_sysmem) { - /* - * The property exists only if MemTag is supported. - * If it is, we must allocate the ram to back that up. - */ - if (!object_property_find(cpuobj, "tag-memory")) { - error_report("MTE requested, but not supported " - "by the guest CPU"); - exit(1); - } - - tag_sysmem = g_new(MemoryRegion, 1); - memory_region_init(tag_sysmem, OBJECT(machine), - "tag-memory", UINT64_MAX / 32); - - if (vms->secure) { - secure_tag_sysmem = g_new(MemoryRegion, 1); - memory_region_init(secure_tag_sysmem, OBJECT(machine), - "secure-tag-memory", UINT64_MAX / 32); - - /* As with ram, secure-tag takes precedence over tag. */ - memory_region_add_subregion_overlap(secure_tag_sysmem, 0, - tag_sysmem, -1); - } - } - - object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem), - &error_abort); - if (vms->secure) { - object_property_set_link(cpuobj, "secure-tag-memory", - OBJECT(secure_tag_sysmem), - &error_abort); - } - } - qdev_realize(DEVICE(cpuobj), NULL, &error_fatal); object_unref(cpuobj); } @@ -2114,6 +2259,8 @@ static void machvirt_init(MachineState *machine) create_uart(vms, VIRT_UART, sysmem, serial_hd(0)); + create_cpufreq(vms, sysmem); + if (vms->secure) { create_secure_ram(vms, secure_sysmem, secure_tag_sysmem); create_uart(vms, VIRT_SECURE_UART, secure_sysmem, serial_hd(1)); @@ -2168,7 +2315,7 @@ static void machvirt_init(MachineState *machine) } vms->bootinfo.ram_size = machine->ram_size; - vms->bootinfo.nb_cpus = smp_cpus; + vms->bootinfo.nb_cpus = vms->cpu_hotplug_enabled ? max_cpus : smp_cpus; vms->bootinfo.board_id = -1; vms->bootinfo.loader_start = vms->memmap[VIRT_MEM].base; vms->bootinfo.get_dtb = machvirt_dtb; @@ -2421,6 +2568,7 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) int n; unsigned int max_cpus = ms->smp.max_cpus; VirtMachineState *vms = VIRT_MACHINE(ms); + ARMCPUTopoInfo topo; if (ms->possible_cpus) { assert(ms->possible_cpus->len == max_cpus); @@ -2432,10 +2580,19 @@ static const CPUArchIdList *virt_possible_cpu_arch_ids(MachineState *ms) ms->possible_cpus->len = max_cpus; for (n = 0; n < ms->possible_cpus->len; n++) { ms->possible_cpus->cpus[n].type = ms->cpu_type; + ms->possible_cpus->cpus[n].vcpus_count = 1; ms->possible_cpus->cpus[n].arch_id = virt_cpu_mp_affinity(vms, n); + + topo_ids_from_idx(n, ms->smp.clusters, ms->smp.cores, ms->smp.threads, &topo); + ms->possible_cpus->cpus[n].props.has_socket_id = true; + ms->possible_cpus->cpus[n].props.socket_id = topo.pkg_id; + ms->possible_cpus->cpus[n].props.has_cluster_id = true; + ms->possible_cpus->cpus[n].props.cluster_id = topo.cluster_id; + ms->possible_cpus->cpus[n].props.has_core_id = true; + ms->possible_cpus->cpus[n].props.core_id = topo.core_id; ms->possible_cpus->cpus[n].props.has_thread_id = true; - ms->possible_cpus->cpus[n].props.thread_id = n; + ms->possible_cpus->cpus[n].props.thread_id = topo.smt_id; } return ms->possible_cpus; } @@ -2483,6 +2640,288 @@ static void virt_memory_plug(HotplugHandler *hotplug_dev, dev, &error_abort); } +static void virt_cpu_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + ARMCPUTopoInfo topo; + Object *cpuobj = OBJECT(dev); + CPUState *cs = CPU(dev); + ARMCPU *cpu = ARM_CPU(dev); + MachineState *ms = MACHINE(hotplug_dev); + MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + VirtMachineClass *vmc = VIRT_MACHINE_GET_CLASS(hotplug_dev); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + const CPUArchId *cpu_slot = NULL; + MemoryRegion *sysmem = get_system_memory(); + int smp_clusters = ms->smp.clusters; + int smp_cores = ms->smp.cores; + int smp_threads = ms->smp.threads; + + if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) { + error_setg(errp, "Invalid CPU type, expected cpu type: '%s'", + ms->cpu_type); + return; + } + + /* if cpu idx is not set, set it based on socket/cluster/core/thread + * properties + */ + if (cs->cpu_index == UNASSIGNED_CPU_INDEX) { + int max_socket = ms->smp.max_cpus / smp_threads / smp_cores / smp_clusters; + if (cpu->socket_id < 0 || cpu->socket_id >= max_socket) { + error_setg(errp, "Invalid CPU socket-id: %u must be in range 0:%u", + cpu->socket_id, max_socket - 1); + return; + } + if (cpu->cluster_id < 0 || cpu->cluster_id >= smp_clusters) { + error_setg(errp, "Invalid CPU cluster-id: %u must be in range 0:%u", + cpu->cluster_id, smp_clusters - 1); + return; + } + if (cpu->core_id < 0 || cpu->core_id >= smp_cores) { + error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u", + cpu->core_id, smp_cores - 1); + return; + } + if (cpu->thread_id < 0 || cpu->thread_id >= smp_threads) { + error_setg(errp, "Invalid CPU thread-id: %u must be in range 0:%u", + cpu->thread_id, smp_threads - 1); + return; + } + + topo.pkg_id = cpu->socket_id; + topo.cluster_id = cpu->cluster_id; + topo.core_id = cpu->core_id; + topo.smt_id = cpu->thread_id; + cs->cpu_index = idx_from_topo_ids(smp_clusters, smp_cores, smp_threads, &topo); + } + + /* Some hotplug capability checks */ + if (cs->cpu_index >= ms->smp.cpus) { + if (!vms->acpi_dev) { + error_setg(errp, "CPU cold/hot plug is disabled: " + "missing acpi device."); + return; + } + if (!vms->cpu_hotplug_enabled) { + error_setg(errp, "CPU cold/hot plug is disabled: " + "should use AArch64 CPU and GICv3."); + return; + } + } + + /* if 'address' properties socket-id/cluster-id/core-id/thread-id are not + * set, set them so that machine_query_hotpluggable_cpus would show correct + * values + */ + topo_ids_from_idx(cs->cpu_index, smp_clusters, smp_cores, smp_threads, &topo); + if (cpu->socket_id != -1 && cpu->socket_id != topo.pkg_id) { + error_setg(errp, "property socket-id: %u doesn't match set idx:" + " 0x%x (socket-id: %u)", cpu->socket_id, cs->cpu_index, topo.pkg_id); + return; + } + cpu->socket_id = topo.pkg_id; + + if (cpu->cluster_id != -1 && cpu->cluster_id != topo.cluster_id) { + error_setg(errp, "property cluster-id: %u doesn't match set idx:" + " 0x%x (cluster-id: %u)", cpu->cluster_id, cs->cpu_index, topo.cluster_id); + return; + } + cpu->cluster_id = topo.cluster_id; + + if (cpu->core_id != -1 && cpu->core_id != topo.core_id) { + error_setg(errp, "property core-id: %u doesn't match set idx:" + " 0x%x (core-id: %u)", cpu->core_id, cs->cpu_index, topo.core_id); + return; + } + cpu->core_id = topo.core_id; + + if (cpu->thread_id != -1 && cpu->thread_id != topo.smt_id) { + error_setg(errp, "property thread-id: %u doesn't match set idx:" + " 0x%x (thread-id: %u)", cpu->thread_id, cs->cpu_index, topo.smt_id); + return; + } + cpu->thread_id = topo.smt_id; + + /* Init some properties */ + + object_property_set_int(cpuobj, "mp-affinity", + possible_cpus->cpus[cs->cpu_index].arch_id, NULL); + + cpu_slot = &possible_cpus->cpus[cs->cpu_index]; + if (cpu_slot->cpu) { + error_setg(errp, "CPU[%d] with mp_affinity %" PRIu64 " exists", + cs->cpu_index, cpu->mp_affinity); + return; + } + + numa_cpu_pre_plug(&possible_cpus->cpus[cs->cpu_index], DEVICE(cpuobj), + &error_fatal); + + if (!vms->secure) { + object_property_set_bool(cpuobj, "has_el3", false, NULL); + } + + if (!vms->virt && object_property_find(cpuobj, "has_el2")) { + object_property_set_bool(cpuobj, "has_el2", false, NULL); + } + + if (vms->psci_conduit != QEMU_PSCI_CONDUIT_DISABLED) { + object_property_set_int(cpuobj, "psci-conduit", vms->psci_conduit, + NULL); + + /* Secondary CPUs start in PSCI powered-down state */ + if (cs->cpu_index > 0) { + object_property_set_bool(cpuobj, "start-powered-off", true, + NULL); + } + } + + if (vmc->kvm_no_adjvtime && + object_property_find(cpuobj, "kvm-no-adjvtime")) { + object_property_set_bool(cpuobj, "kvm-no-adjvtime", true, NULL); + } + + if (vmc->no_kvm_steal_time && + object_property_find(cpuobj, "kvm-steal-time")) { + object_property_set_bool(cpuobj, "kvm-steal-time", false, NULL); + } + + if (vmc->no_pmu && object_property_find(cpuobj, "pmu")) { + object_property_set_bool(cpuobj, "pmu", false, NULL); + } + + if (object_property_find(cpuobj, "reset-cbar")) { + object_property_set_int(cpuobj, "reset-cbar", + vms->memmap[VIRT_CPUPERIPHS].base, + &error_abort); + } + + object_property_set_link(cpuobj, "memory", OBJECT(sysmem), + &error_abort); + if (vms->secure) { + object_property_set_link(cpuobj, "secure-memory", + OBJECT(secure_sysmem), &error_abort); + } + + if (vms->mte) { + /* Create the memory region only once, but link to all cpus. */ + if (!tag_sysmem) { + /* + * The property exists only if MemTag is supported. + * If it is, we must allocate the ram to back that up. + */ + if (!object_property_find(cpuobj, "tag-memory")) { + error_report("MTE requested, but not supported " + "by the guest CPU"); + exit(1); + } + + tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(tag_sysmem, OBJECT(ms), + "tag-memory", UINT64_MAX / 32); + + if (vms->secure) { + secure_tag_sysmem = g_new(MemoryRegion, 1); + memory_region_init(secure_tag_sysmem, OBJECT(ms), + "secure-tag-memory", UINT64_MAX / 32); + + /* As with ram, secure-tag takes precedence over tag. */ + memory_region_add_subregion_overlap(secure_tag_sysmem, 0, + tag_sysmem, -1); + } + } + + object_property_set_link(cpuobj, "tag-memory", OBJECT(tag_sysmem), + &error_abort); + if (vms->secure) { + object_property_set_link(cpuobj, "secure-tag-memory", + OBJECT(secure_tag_sysmem), + &error_abort); + } + } +} + +static void virt_cpu_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + CPUArchId *cpu_slot; + CPUState *cs = CPU(dev); + int ncpu = cs->cpu_index; + MachineState *ms = MACHINE(hotplug_dev); + VirtMachineState *vms = VIRT_MACHINE(hotplug_dev); + bool pmu = object_property_get_bool(OBJECT(first_cpu), "pmu", NULL); + bool steal_time = object_property_get_bool(OBJECT(first_cpu), + "kvm-steal-time", NULL); + GICv3State *gicv3; + ARMGICv3CommonClass *agcc; + Error *local_err = NULL; + + /* For CPU that is cold/hot plugged */ + if (ncpu >= ms->smp.cpus) { + if (dev->hotplugged) { + pause_all_vcpus(); + } + + /* Realize GIC related parts of CPU */ + assert(vms->gic_version == 3); + gicv3 = ARM_GICV3_COMMON(vms->gic); + agcc = ARM_GICV3_COMMON_GET_CLASS(gicv3); + agcc->cpu_hotplug_realize(gicv3, ncpu); + connect_gic_cpu_irqs(vms, ncpu); + + /* Init PMU and steal_time part */ + if (kvm_enabled()) { + hwaddr pvtime_reg_base = vms->memmap[VIRT_PVTIME].base; + + if (pmu) { + assert(arm_feature(&ARM_CPU(cs)->env, ARM_FEATURE_PMU)); + if (kvm_irqchip_in_kernel()) { + kvm_arm_pmu_set_irq(cs, PPI(VIRTUAL_PMU_IRQ)); + } + kvm_arm_pmu_init(cs); + } + if (steal_time) { + kvm_arm_pvtime_init(cs, pvtime_reg_base + + ncpu * PVTIME_SIZE_PER_CPU); + } + } + + /* Register CPU reset and trigger it manually */ + cpu_synchronize_post_init(cs); + cpu_synchronize_state(cs); + cpu_hotplug_register_reset(ncpu); + cpu_hotplug_reset_manually(ncpu); + cpu_synchronize_post_reset(cs); + + if (dev->hotplugged) { + resume_all_vcpus(); + } + } + + if (dev->hotplugged && kvm_enabled()) { + resume_all_vcpus(); + } + + if (vms->acpi_dev) { + hotplug_handler_plug(HOTPLUG_HANDLER(vms->acpi_dev), dev, &local_err); + if (local_err) { + goto out; + } + } + + vms->boot_cpus++; + if (vms->fw_cfg) { + fw_cfg_modify_i16(vms->fw_cfg, FW_CFG_NB_CPUS, vms->boot_cpus); + } + + cpu_slot = &ms->possible_cpus->cpus[ncpu]; + cpu_slot->cpu = OBJECT(dev); +out: + error_propagate(errp, local_err); +} + static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, Error **errp) { @@ -2516,6 +2955,8 @@ static void virt_machine_device_pre_plug_cb(HotplugHandler *hotplug_dev, qdev_prop_set_uint32(dev, "len-reserved-regions", 1); qdev_prop_set_string(dev, "reserved-regions[0]", resv_prop_str); g_free(resv_prop_str); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + virt_cpu_pre_plug(hotplug_dev, dev, errp); } } @@ -2534,6 +2975,8 @@ static void virt_machine_device_plug_cb(HotplugHandler *hotplug_dev, } if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { virt_memory_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_CPU)) { + virt_cpu_plug(hotplug_dev, dev, errp); } if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { PCIDevice *pdev = PCI_DEVICE(dev); @@ -2614,7 +3057,8 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, MachineClass *mc = MACHINE_GET_CLASS(machine); if (device_is_dynamic_sysbus(mc, dev) || - (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM))) { + (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) || + (object_dynamic_cast(OBJECT(dev), TYPE_CPU))) { return HOTPLUG_HANDLER(machine); } if (object_dynamic_cast(OBJECT(dev), TYPE_VIRTIO_IOMMU_PCI)) { @@ -2634,6 +3078,15 @@ static HotplugHandler *virt_machine_get_hotplug_handler(MachineState *machine, static int virt_kvm_type(MachineState *ms, const char *type_str) { VirtMachineState *vms = VIRT_MACHINE(ms); + int virtcca_cvm_type = 0; + if (object_property_find(OBJECT(current_machine), "kvm-type")) { + g_autofree char *kvm_type = object_property_get_str(OBJECT(current_machine), + "kvm-type", &error_abort); + + if (!strcmp(kvm_type, "cvm")) { + virtcca_cvm_type = VIRTCCA_CVM_TYPE; + } + } int max_vm_pa_size, requested_pa_size; bool fixed_ipa; @@ -2663,7 +3116,9 @@ static int virt_kvm_type(MachineState *ms, const char *type_str) * the implicit legacy 40b IPA setting, in which case the kvm_type * must be 0. */ - return fixed_ipa ? 0 : requested_pa_size; + return strcmp(type_str, "cvm") == 0 ? + ((fixed_ipa ? 0 : requested_pa_size) | virtcca_cvm_type) : + (fixed_ipa ? 0 : requested_pa_size); } static void virt_machine_class_init(ObjectClass *oc, void *data) @@ -2694,6 +3149,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) mc->default_cpu_type = ARM_CPU_TYPE_NAME("cortex-a15"); mc->get_default_cpu_node_id = virt_get_default_cpu_node_id; mc->kvm_type = virt_kvm_type; + mc->has_hotpluggable_cpus = true; assert(!mc->get_hotplug_handler); mc->get_hotplug_handler = virt_machine_get_hotplug_handler; hc->pre_plug = virt_machine_device_pre_plug_cb; @@ -2701,6 +3157,7 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) hc->unplug_request = virt_machine_device_unplug_request_cb; hc->unplug = virt_machine_device_unplug_cb; mc->nvdimm_supported = true; + mc->smp_props.clusters_supported = true; mc->auto_enable_numa_with_memhp = true; mc->auto_enable_numa_with_memdev = true; mc->default_ram_id = "mach-virt.ram"; @@ -2784,6 +3241,19 @@ static void virt_machine_class_init(ObjectClass *oc, void *data) } +static char *virt_get_kvm_type(Object *obj, Error **errp G_GNUC_UNUSED) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + return g_strdup(vms->kvm_type); +} + +static void virt_set_kvm_type(Object *obj, const char *value, Error **errp G_GNUC_UNUSED) +{ + VirtMachineState *vms = VIRT_MACHINE(obj); + g_free(vms->kvm_type); + vms->kvm_type = g_strdup(value); +} + static void virt_instance_init(Object *obj) { VirtMachineState *vms = VIRT_MACHINE(obj); @@ -2835,6 +3305,9 @@ static void virt_instance_init(Object *obj) vms->oem_id = g_strndup(ACPI_BUILD_APPNAME6, 6); vms->oem_table_id = g_strndup(ACPI_BUILD_APPNAME8, 8); + + object_property_add_str(obj, "kvm-type", virt_get_kvm_type, virt_set_kvm_type); + object_property_set_description(obj, "kvm-type", "CVM or Normal VM"); } static const TypeInfo virt_machine_info = { @@ -2931,7 +3404,7 @@ static void virt_machine_4_1_options(MachineClass *mc) virt_machine_4_2_options(mc); compat_props_add(mc->compat_props, hw_compat_4_1, hw_compat_4_1_len); - vmc->no_ged = true; + vmc->no_ged = false; mc->auto_enable_numa_with_memhp = false; } DEFINE_VIRT_MACHINE(4, 1) diff --git a/hw/arm/xlnx-zynqmp.c b/hw/arm/xlnx-zynqmp.c index 1c52a575aadf095c8dec01cf5593d446de6d1770..2ffc6df70bb81653895d126039ee5730b62ec051 100644 --- a/hw/arm/xlnx-zynqmp.c +++ b/hw/arm/xlnx-zynqmp.c @@ -194,7 +194,7 @@ static void xlnx_zynqmp_create_rpu(MachineState *ms, XlnxZynqMPState *s, const char *boot_cpu, Error **errp) { int i; - int num_rpus = MIN(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS, + int num_rpus = MIN((int)(ms->smp.cpus - XLNX_ZYNQMP_NUM_APU_CPUS), XLNX_ZYNQMP_NUM_RPU_CPUS); if (num_rpus <= 0) { diff --git a/hw/audio/es1370.c b/hw/audio/es1370.c index 690458981471bd5ff7847ee8ca3cb3c0c8f6a212..72de110e0b7a30cfff844e14c5871baac8ee2309 100644 --- a/hw/audio/es1370.c +++ b/hw/audio/es1370.c @@ -702,8 +702,13 @@ static void es1370_transfer_audio (ES1370State *s, struct chan *d, int loop_sel, cnt += (transferred + d->leftover) >> 2; if (s->sctl & loop_sel) { - /* Bah, how stupid is that having a 0 represent true value? - i just spent few hours on this shit */ + /* + * loop_sel tells us which bit in the SCTL register to look at + * (either P1_LOOP_SEL, P2_LOOP_SEL or R1_LOOP_SEL). The sense + * of these bits is 0 for loop mode (set interrupt and keep recording + * when the sample count reaches zero) or 1 for stop mode (set + * interrupt and stop recording). + */ AUD_log ("es1370: warning", "non looping mode\n"); } else { diff --git a/hw/audio/intel-hda.c b/hw/audio/intel-hda.c index 8ce9df64e3ecf40e7ad1c39f25fbf48c84b040ce..78a47bc08c15825abdf27625b3eab424b48fd7e0 100644 --- a/hw/audio/intel-hda.c +++ b/hw/audio/intel-hda.c @@ -335,7 +335,7 @@ static void intel_hda_corb_run(IntelHDAState *d) rp = (d->corb_rp + 1) & 0xff; addr = intel_hda_addr(d->corb_lbase, d->corb_ubase); - verb = ldl_le_pci_dma(&d->pci, addr + 4*rp); + ldl_le_pci_dma(&d->pci, addr + 4 * rp, &verb, MEMTXATTRS_UNSPECIFIED); d->corb_rp = rp; dprint(d, 2, "%s: [rp 0x%x] verb 0x%08x\n", __func__, rp, verb); @@ -345,10 +345,12 @@ static void intel_hda_corb_run(IntelHDAState *d) static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t response) { + const MemTxAttrs attrs = { .memory = true }; HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); IntelHDAState *d = container_of(bus, IntelHDAState, codecs); hwaddr addr; uint32_t wp, ex; + MemTxResult res = MEMTX_OK; if (d->ics & ICH6_IRS_BUSY) { dprint(d, 2, "%s: [irr] response 0x%x, cad 0x%x\n", @@ -367,8 +369,12 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res ex = (solicited ? 0 : (1 << 4)) | dev->cad; wp = (d->rirb_wp + 1) & 0xff; addr = intel_hda_addr(d->rirb_lbase, d->rirb_ubase); - stl_le_pci_dma(&d->pci, addr + 8*wp, response); - stl_le_pci_dma(&d->pci, addr + 8*wp + 4, ex); + res |= stl_le_pci_dma(&d->pci, addr + 8 * wp, response, attrs); + res |= stl_le_pci_dma(&d->pci, addr + 8 * wp + 4, ex, attrs); + if (res != MEMTX_OK && (d->rirb_ctl & ICH6_RBCTL_OVERRUN_EN)) { + d->rirb_sts |= ICH6_RBSTS_OVERRUN; + intel_hda_update_irq(d); + } d->rirb_wp = wp; dprint(d, 2, "%s: [wp 0x%x] response 0x%x, extra 0x%x\n", @@ -394,6 +400,7 @@ static void intel_hda_response(HDACodecDevice *dev, bool solicited, uint32_t res static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, uint8_t *buf, uint32_t len) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; HDACodecBus *bus = HDA_BUS(dev->qdev.parent_bus); IntelHDAState *d = container_of(bus, IntelHDAState, codecs); hwaddr addr; @@ -427,7 +434,8 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, dprint(d, 3, "dma: entry %d, pos %d/%d, copy %d\n", st->be, st->bp, st->bpl[st->be].len, copy); - pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output); + pci_dma_rw(&d->pci, st->bpl[st->be].addr + st->bp, buf, copy, !output, + attrs); st->lpib += copy; st->bp += copy; buf += copy; @@ -450,7 +458,7 @@ static bool intel_hda_xfer(HDACodecDevice *dev, uint32_t stnr, bool output, if (d->dp_lbase & 0x01) { s = st - d->st; addr = intel_hda_addr(d->dp_lbase & ~0x01, d->dp_ubase); - stl_le_pci_dma(&d->pci, addr + 8*s, st->lpib); + stl_le_pci_dma(&d->pci, addr + 8 * s, st->lpib, attrs); } dprint(d, 3, "dma: --\n"); @@ -578,7 +586,7 @@ static void intel_hda_set_st_ctl(IntelHDAState *d, const IntelHDAReg *reg, uint3 if (st->ctl & 0x01) { /* reset */ dprint(d, 1, "st #%d: reset\n", reg->stream); - st->ctl = SD_STS_FIFO_READY << 24; + st->ctl = SD_STS_FIFO_READY << 24 | SD_CTL_STREAM_RESET; } if ((st->ctl & 0x02) != (old & 0x02)) { uint32_t stnr = (st->ctl >> 20) & 0x0f; diff --git a/hw/avr/atmega.c b/hw/avr/atmega.c index 0608e2d475ee3102a2c25ad805a65acd27cabb38..a34803e64272ba8f1ae1e69dbcb1c3ebc1094b62 100644 --- a/hw/avr/atmega.c +++ b/hw/avr/atmega.c @@ -233,7 +233,7 @@ static void atmega_realize(DeviceState *dev, Error **errp) /* CPU */ object_initialize_child(OBJECT(dev), "cpu", &s->cpu, mc->cpu_type); - object_property_set_bool(OBJECT(&s->cpu), "realized", true, &error_abort); + qdev_realize(DEVICE(&s->cpu), NULL, &error_abort); cpudev = DEVICE(&s->cpu); /* SRAM */ diff --git a/hw/block/block.c b/hw/block/block.c index d47ebf005adaaac2e3c5b5218e01c331b66a4335..2cfc93a68e58e39bc49a52b03ed9bdefc05a2435 100644 --- a/hw/block/block.c +++ b/hw/block/block.c @@ -206,6 +206,16 @@ bool blkconf_apply_backend_options(BlockConf *conf, bool readonly, blk_set_enable_write_cache(blk, wce); blk_set_on_error(blk, rerror, werror); + if (rerror == BLOCKDEV_ON_ERROR_RETRY || + werror == BLOCKDEV_ON_ERROR_RETRY) { + if (conf->retry_interval >= 0) { + blk_set_on_error_retry_interval(blk, conf->retry_interval); + } + if (conf->retry_timeout >= 0) { + blk_set_on_error_retry_timeout(blk, conf->retry_timeout); + } + } + return true; } @@ -214,9 +224,16 @@ bool blkconf_geometry(BlockConf *conf, int *ptrans, Error **errp) { if (!conf->cyls && !conf->heads && !conf->secs) { + AioContext *ctx = blk_get_aio_context(conf->blk); + + /* Callers may not expect this function to dispatch aio handlers, so + * disable external aio such as guest device emulation. + */ + aio_disable_external(ctx); hd_geometry_guess(conf->blk, &conf->cyls, &conf->heads, &conf->secs, ptrans); + aio_enable_external(ctx); } else if (ptrans && *ptrans == BIOS_ATA_TRANSLATION_AUTO) { *ptrans = hd_bios_chs_auto_trans(conf->cyls, conf->heads, conf->secs); } diff --git a/hw/block/dataplane/virtio-blk.c b/hw/block/dataplane/virtio-blk.c index ee5a5352dc8a76b5f1525914d6a0b0b7182121ac..5f0de7da1e3f417e796503cbceab170af3ab4ed1 100644 --- a/hw/block/dataplane/virtio-blk.c +++ b/hw/block/dataplane/virtio-blk.c @@ -127,7 +127,8 @@ bool virtio_blk_data_plane_create(VirtIODevice *vdev, VirtIOBlkConf *conf, } else { s->ctx = qemu_get_aio_context(); } - s->bh = aio_bh_new(s->ctx, notify_guest_bh, s); + s->bh = aio_bh_new_guarded(s->ctx, notify_guest_bh, s, + &DEVICE(vdev)->mem_reentrancy_guard); s->batch_notify_vqs = bitmap_new(conf->num_queues); *dataplane = s; diff --git a/hw/block/dataplane/xen-block.c b/hw/block/dataplane/xen-block.c index 860787580a3d5787c813cfa7020d7e87f63f1ab2..07855feea6cb4b749d47137a9ddf2a49b209d1e7 100644 --- a/hw/block/dataplane/xen-block.c +++ b/hw/block/dataplane/xen-block.c @@ -631,8 +631,9 @@ XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, } else { dataplane->ctx = qemu_get_aio_context(); } - dataplane->bh = aio_bh_new(dataplane->ctx, xen_block_dataplane_bh, - dataplane); + dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, + dataplane, + &DEVICE(xendev)->mem_reentrancy_guard); return dataplane; } diff --git a/hw/block/fdc.c b/hw/block/fdc.c index 21d18ac2e360f936da10172a8fb39856677eb37e..24b05406e64b1d30786996a5e03d5928031bf5b6 100644 --- a/hw/block/fdc.c +++ b/hw/block/fdc.c @@ -1529,6 +1529,14 @@ static void fdctrl_start_transfer(FDCtrl *fdctrl, int direction) int tmp; fdctrl->data_len = 128 << (fdctrl->fifo[5] > 7 ? 7 : fdctrl->fifo[5]); tmp = (fdctrl->fifo[6] - ks + 1); + if (tmp < 0) { + FLOPPY_DPRINTF("invalid EOT: %d\n", tmp); + fdctrl_stop_transfer(fdctrl, FD_SR0_ABNTERM, FD_SR1_MA, 0x00); + fdctrl->fifo[3] = kt; + fdctrl->fifo[4] = kh; + fdctrl->fifo[5] = ks; + return; + } if (fdctrl->fifo[0] & 0x80) tmp += fdctrl->fifo[6]; fdctrl->data_len *= tmp; diff --git a/hw/block/hd-geometry.c b/hw/block/hd-geometry.c index dcbccee294cab287f422e8aaaa8ca0fbc58cfa82..67462f17523744a2b851a1d4d28c70de9591d6ae 100644 --- a/hw/block/hd-geometry.c +++ b/hw/block/hd-geometry.c @@ -150,7 +150,12 @@ void hd_geometry_guess(BlockBackend *blk, translation = BIOS_ATA_TRANSLATION_NONE; } if (ptrans) { - *ptrans = translation; + if (*ptrans == BIOS_ATA_TRANSLATION_AUTO) { + *ptrans = translation; + } else { + /* Defer to the translation specified by the user. */ + translation = *ptrans; + } } trace_hd_geometry_guess(blk, *pcyls, *pheads, *psecs, translation); } diff --git a/hw/block/vhost-user-blk.c b/hw/block/vhost-user-blk.c index ba13cb87e5200336dc5d23631fe65b75f01fb035..512482d1a17f61d3e3388e8acfaafde4993082fd 100644 --- a/hw/block/vhost-user-blk.c +++ b/hw/block/vhost-user-blk.c @@ -95,12 +95,16 @@ static int vhost_user_blk_handle_config_change(struct vhost_dev *dev) VHostUserBlk *s = VHOST_USER_BLK(dev->vdev); Error *local_err = NULL; + if (!dev->started) { + return 0; + } + ret = vhost_dev_get_config(dev, (uint8_t *)&blkcfg, sizeof(struct virtio_blk_config), &local_err); if (ret < 0) { error_report_err(local_err); - return -1; + return ret; } /* valid for resize only */ @@ -163,7 +167,7 @@ static int vhost_user_blk_start(VirtIODevice *vdev, Error **errp) goto err_guest_notifiers; } - ret = vhost_dev_start(&s->dev, vdev); + ret = vhost_dev_start(&s->dev, vdev, true); if (ret < 0) { error_setg_errno(errp, -ret, "Error starting vhost"); goto err_guest_notifiers; @@ -203,7 +207,7 @@ static void vhost_user_blk_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&s->dev, vdev); + vhost_dev_stop(&s->dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); if (ret < 0) { @@ -252,6 +256,7 @@ static uint64_t vhost_user_blk_get_features(VirtIODevice *vdev, VHostUserBlk *s = VHOST_USER_BLK(vdev); /* Turn on pre-defined features */ + virtio_add_feature(&features, VIRTIO_BLK_F_SIZE_MAX); virtio_add_feature(&features, VIRTIO_BLK_F_SEG_MAX); virtio_add_feature(&features, VIRTIO_BLK_F_GEOMETRY); virtio_add_feature(&features, VIRTIO_BLK_F_TOPOLOGY); @@ -511,7 +516,7 @@ static void vhost_user_blk_device_realize(DeviceState *dev, Error **errp) *errp = NULL; } ret = vhost_user_blk_realize_connect(s, errp); - } while (ret == -EPROTO && retries--); + } while (ret < 0 && retries--); if (ret < 0) { goto virtio_err; @@ -568,6 +573,12 @@ static void vhost_user_blk_instance_init(Object *obj) "/disk@0,0", DEVICE(obj)); } +static struct vhost_dev *vhost_user_blk_get_vhost(VirtIODevice *vdev) +{ + VHostUserBlk *s = VHOST_USER_BLK(vdev); + return &s->dev; +} + static const VMStateDescription vmstate_vhost_user_blk = { .name = "vhost-user-blk", .minimum_version_id = 1, @@ -602,6 +613,7 @@ static void vhost_user_blk_class_init(ObjectClass *klass, void *data) vdc->get_features = vhost_user_blk_get_features; vdc->set_status = vhost_user_blk_set_status; vdc->reset = vhost_user_blk_reset; + vdc->get_vhost = vhost_user_blk_get_vhost; } static const TypeInfo vhost_user_blk_info = { diff --git a/hw/block/virtio-blk.c b/hw/block/virtio-blk.c index f139cd7cc9cc3ae044829436222798c9da18fb30..c8d94a3dfb7b5bc50a46b2337e965a30caa5798c 100644 --- a/hw/block/virtio-blk.c +++ b/hw/block/virtio-blk.c @@ -108,6 +108,10 @@ static int virtio_blk_handle_rw_error(VirtIOBlockReq *req, int error, block_acct_failed(blk_get_stats(s->blk), &req->acct); } virtio_blk_free_request(req); + } else if (action == BLOCK_ERROR_ACTION_RETRY) { + req->mr_next = NULL; + req->next = s->rq; + s->rq = req; } blk_error_action(s->blk, action, is_read, error); @@ -149,6 +153,7 @@ static void virtio_blk_rw_complete(void *opaque, int ret) } } + blk_error_retry_reset_timeout(s->blk); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); @@ -168,6 +173,7 @@ static void virtio_blk_flush_complete(void *opaque, int ret) } } + blk_error_retry_reset_timeout(s->blk); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); block_acct_done(blk_get_stats(s->blk), &req->acct); virtio_blk_free_request(req); @@ -190,6 +196,7 @@ static void virtio_blk_discard_write_zeroes_complete(void *opaque, int ret) } } + blk_error_retry_reset_timeout(s->blk); virtio_blk_req_complete(req, VIRTIO_BLK_S_OK); if (is_write_zeroes) { block_acct_done(blk_get_stats(s->blk), &req->acct); @@ -828,12 +835,12 @@ static void virtio_blk_handle_output(VirtIODevice *vdev, VirtQueue *vq) void virtio_blk_process_queued_requests(VirtIOBlock *s, bool is_bh) { - VirtIOBlockReq *req = s->rq; + VirtIOBlockReq *req; MultiReqBuffer mrb = {}; - s->rq = NULL; - aio_context_acquire(blk_get_aio_context(s->conf.conf.blk)); + req = s->rq; + s->rq = NULL; while (req) { VirtIOBlockReq *next = req->next; if (virtio_blk_handle_request(req, &mrb)) { @@ -1138,8 +1145,16 @@ static void virtio_blk_resize(void *opaque) aio_bh_schedule_oneshot(qemu_get_aio_context(), virtio_resize_cb, vdev); } +static void virtio_blk_retry_request(void *opaque) +{ + VirtIOBlock *s = VIRTIO_BLK(opaque); + + virtio_blk_process_queued_requests(s, false); +} + static const BlockDevOps virtio_block_ops = { .resize_cb = virtio_blk_resize, + .retry_request_cb = virtio_blk_retry_request, }; static void virtio_blk_device_realize(DeviceState *dev, Error **errp) diff --git a/hw/block/xen-block.c b/hw/block/xen-block.c index 674953f1adeeaec6a81d9857144e0333d211e588..6d90621e0256b0b28a1c8edadc07faee093364c9 100644 --- a/hw/block/xen-block.c +++ b/hw/block/xen-block.c @@ -760,14 +760,15 @@ static XenBlockDrive *xen_block_drive_create(const char *id, drive = g_new0(XenBlockDrive, 1); drive->id = g_strdup(id); - file_layer = qdict_new(); - driver_layer = qdict_new(); - rc = stat(filename, &st); if (rc) { error_setg_errno(errp, errno, "Could not stat file '%s'", filename); goto done; } + + file_layer = qdict_new(); + driver_layer = qdict_new(); + if (S_ISBLK(st.st_mode)) { qdict_put_str(file_layer, "driver", "host_device"); } else { @@ -775,7 +776,6 @@ static XenBlockDrive *xen_block_drive_create(const char *id, } qdict_put_str(file_layer, "filename", filename); - g_free(filename); if (mode && *mode != 'w') { qdict_put_bool(file_layer, "read-only", true); @@ -810,7 +810,6 @@ static XenBlockDrive *xen_block_drive_create(const char *id, qdict_put_str(file_layer, "locking", "off"); qdict_put_str(driver_layer, "driver", driver); - g_free(driver); qdict_put(driver_layer, "file", file_layer); @@ -821,6 +820,8 @@ static XenBlockDrive *xen_block_drive_create(const char *id, qobject_unref(driver_layer); done: + g_free(filename); + g_free(driver); if (*errp) { xen_block_drive_destroy(drive, NULL); return NULL; diff --git a/hw/char/Kconfig b/hw/char/Kconfig index 6b6cf2fc1dff949b1f454308391d74953624a207..335a60c2c14c5e248aed1377daf6f2e4991bd96c 100644 --- a/hw/char/Kconfig +++ b/hw/char/Kconfig @@ -71,3 +71,7 @@ config GOLDFISH_TTY config SHAKTI_UART bool + +config CPUFREQ + bool + default y diff --git a/hw/char/escc.c b/hw/char/escc.c index 8755d8d34f3f00a09f1edd62f9a8971754ce7fca..17a908c59b91e20c794162d28bf07652c48a8968 100644 --- a/hw/char/escc.c +++ b/hw/char/escc.c @@ -828,7 +828,7 @@ static void sunkbd_handle_event(DeviceState *dev, QemuConsole *src, } } - if (qcode > qemu_input_map_qcode_to_sun_len) { + if (qcode >= qemu_input_map_qcode_to_sun_len) { return; } diff --git a/hw/char/pl011.c b/hw/char/pl011.c index 6e2d7f75095c596dfc4725cb1692ee4662a9b49e..b24ccfeac73b949025efd645771be6ae39db3ce6 100644 --- a/hw/char/pl011.c +++ b/hw/char/pl011.c @@ -176,7 +176,7 @@ static unsigned int pl011_get_baudrate(const PL011State *s) { uint64_t clk; - if (s->fbrd == 0) { + if (s->ibrd == 0) { return 0; } @@ -255,6 +255,10 @@ static void pl011_write(void *opaque, hwaddr offset, case 17: /* UARTICR */ s->int_level &= ~value; pl011_update(s); + if (!s->int_enabled && !s->int_level) { + s->read_count = 0; + s->read_pos = 0; + } break; case 18: /* UARTDMACR */ s->dmacr = value; diff --git a/hw/char/stm32f2xx_usart.c b/hw/char/stm32f2xx_usart.c index 8df0832424c6f5add9b58024fa78847d9f173a8a..fde67f4f03f5a5a2e1bb7718b11545a9e0d2c5ff 100644 --- a/hw/char/stm32f2xx_usart.c +++ b/hw/char/stm32f2xx_usart.c @@ -103,10 +103,11 @@ static uint64_t stm32f2xx_usart_read(void *opaque, hwaddr addr, return retvalue; case USART_DR: DB_PRINT("Value: 0x%" PRIx32 ", %c\n", s->usart_dr, (char) s->usart_dr); + retvalue = s->usart_dr & 0x3FF; s->usart_sr &= ~USART_SR_RXNE; qemu_chr_fe_accept_input(&s->chr); qemu_set_irq(s->irq, 0); - return s->usart_dr & 0x3FF; + return retvalue; case USART_BRR: return s->usart_brr; case USART_CR1: diff --git a/hw/char/virtio-serial-bus.c b/hw/char/virtio-serial-bus.c index f01ec2137c95abcfa7fcf0efcef9e5d3ca792153..e7f3e1eb876ee952ded828fd6c5c810aa54ea8ff 100644 --- a/hw/char/virtio-serial-bus.c +++ b/hw/char/virtio-serial-bus.c @@ -257,6 +257,8 @@ static size_t send_control_event(VirtIOSerial *vser, uint32_t port_id, virtio_stw_p(vdev, &cpkt.value, value); trace_virtio_serial_send_control_event(port_id, event, value); + qemu_log("virtio serial port %d send control message" + " event = %d, value = %d\n", port_id, event, value); return send_control_msg(vser, &cpkt, sizeof(cpkt)); } @@ -364,6 +366,9 @@ static void handle_control_message(VirtIOSerial *vser, void *buf, size_t len) cpkt.value = virtio_lduw_p(vdev, &gcpkt->value); trace_virtio_serial_handle_control_message(cpkt.event, cpkt.value); + qemu_log("virtio serial port '%u' handle control message" + " event = %d, value = %d\n", + virtio_ldl_p(vdev, &gcpkt->id), cpkt.event, cpkt.value); if (cpkt.event == VIRTIO_CONSOLE_DEVICE_READY) { if (!cpkt.value) { @@ -985,7 +990,7 @@ static void virtser_port_device_realize(DeviceState *dev, Error **errp) return; } - port->bh = qemu_bh_new(flush_queued_data_bh, port); + port->bh = virtio_bh_new_guarded(dev, flush_queued_data_bh, port); port->elem = NULL; } diff --git a/hw/core/cpu-common.c b/hw/core/cpu-common.c index 9e3241b43085bd8e44f9b189d0ef36c726abdcba..92d15f2f493a85ebe0578be3449f3feaf04cfc8b 100644 --- a/hw/core/cpu-common.c +++ b/hw/core/cpu-common.c @@ -206,10 +206,12 @@ static void cpu_common_realizefn(DeviceState *dev, Error **errp) } } +#ifndef __aarch64__ if (dev->hotplugged) { cpu_synchronize_post_init(cpu); cpu_resume(cpu); } +#endif /* NOTE: latest generic point where the cpu is fully realized */ trace_init_vcpu(cpu); diff --git a/hw/core/generic-loader.c b/hw/core/generic-loader.c index d14f932eea2e1eb39d2f6dab3e54a6aca685aca8..504ed7ca72e08ba3c789dc7884c61d8331e03e4e 100644 --- a/hw/core/generic-loader.c +++ b/hw/core/generic-loader.c @@ -56,8 +56,9 @@ static void generic_loader_reset(void *opaque) } if (s->data_len) { - assert(s->data_len < sizeof(s->data)); - dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len); + assert(s->data_len <= sizeof(s->data)); + dma_memory_write(s->cpu->as, s->addr, &s->data, s->data_len, + MEMTXATTRS_UNSPECIFIED); } } diff --git a/hw/core/irq.c b/hw/core/irq.c index 8a9cbdd5561a0d0c35b5139df51ec980ac4c9d21..700a6373d840cf17bf80977430c22292d04e73ba 100644 --- a/hw/core/irq.c +++ b/hw/core/irq.c @@ -126,7 +126,10 @@ void qemu_irq_intercept_in(qemu_irq *gpio_in, qemu_irq_handler handler, int n) int i; qemu_irq *old_irqs = qemu_allocate_irqs(NULL, NULL, n); for (i = 0; i < n; i++) { - *old_irqs[i] = *gpio_in[i]; + old_irqs[i]->handler = gpio_in[i]->handler; + old_irqs[i]->opaque = gpio_in[i]->opaque; + old_irqs[i]->n = gpio_in[i]->n; + gpio_in[i]->handler = handler; gpio_in[i]->opaque = &old_irqs[i]; } diff --git a/hw/core/loader-fit.c b/hw/core/loader-fit.c index b7c7b3ba94d47c877dc9711b8384e139bcaea3d1..4a9a74cf3aa1de5e9ea3792d8224fd1f65a62c51 100644 --- a/hw/core/loader-fit.c +++ b/hw/core/loader-fit.c @@ -265,7 +265,7 @@ int load_fit(const struct fit_loader *ldr, const char *filename, void *opaque) const char *def_cfg_name; char path[FIT_LOADER_MAX_PATH]; int itb_size, configs, cfg_off, off; - hwaddr kernel_end; + hwaddr kernel_end = 0; int ret; itb = load_device_tree(filename, &itb_size); diff --git a/hw/core/loader.c b/hw/core/loader.c index 052a0fd7198bc070a2fb347632eea5ac04f9dee2..8389860679b3b9988a1fd78dccfdcee172503b33 100644 --- a/hw/core/loader.c +++ b/hw/core/loader.c @@ -605,6 +605,7 @@ ssize_t gunzip(void *dst, size_t dstlen, uint8_t *src, size_t srclen) r = inflate(&s, Z_FINISH); if (r != Z_OK && r != Z_STREAM_END) { printf ("Error: inflate() returned %d\n", r); + inflateEnd(&s); return -1; } dstbytes = s.next_out - (unsigned char *) dst; @@ -1164,9 +1165,13 @@ static void rom_reset(void *unused) if (rom->mr) { void *host = memory_region_get_ram_ptr(rom->mr); memcpy(host, rom->data, rom->datasize); + memset(host + rom->datasize, 0, rom->romsize - rom->datasize); } else { address_space_write_rom(rom->as, rom->addr, MEMTXATTRS_UNSPECIFIED, rom->data, rom->datasize); + address_space_set(rom->as, rom->addr + rom->datasize, 0, + rom->romsize - rom->datasize, + MEMTXATTRS_UNSPECIFIED); } if (rom->isrom) { /* rom needs to be written only once */ diff --git a/hw/core/machine-hmp-cmds.c b/hw/core/machine-hmp-cmds.c index 4e2f319aebdae7b077042c9a092a904678769e58..c4f63b1d63c3be4a6e5ff023f83ca47e3389a8bf 100644 --- a/hw/core/machine-hmp-cmds.c +++ b/hw/core/machine-hmp-cmds.c @@ -77,6 +77,9 @@ void hmp_hotpluggable_cpus(Monitor *mon, const QDict *qdict) if (c->has_die_id) { monitor_printf(mon, " die-id: \"%" PRIu64 "\"\n", c->die_id); } + if (c->has_cluster_id) { + monitor_printf(mon, " cluster-id: \"%" PRIu64 "\"\n", c->cluster_id); + } if (c->has_core_id) { monitor_printf(mon, " core-id: \"%" PRIu64 "\"\n", c->core_id); } diff --git a/hw/core/machine-smp.c b/hw/core/machine-smp.c index 116a0cbbfaba448c2f0c13ded91c2ca990629031..47922ec4aa8717e63823243e10b15412ea987fbf 100644 --- a/hw/core/machine-smp.c +++ b/hw/core/machine-smp.c @@ -37,6 +37,10 @@ static char *cpu_hierarchy_to_string(MachineState *ms) g_string_append_printf(s, " * dies (%u)", ms->smp.dies); } + if (mc->smp_props.clusters_supported) { + g_string_append_printf(s, " * clusters (%u)", ms->smp.clusters); + } + g_string_append_printf(s, " * cores (%u)", ms->smp.cores); g_string_append_printf(s, " * threads (%u)", ms->smp.threads); @@ -44,7 +48,8 @@ static char *cpu_hierarchy_to_string(MachineState *ms) } /* - * smp_parse - Generic function used to parse the given SMP configuration + * machine_parse_smp_config: Generic function used to parse the given + * SMP configuration * * Any missing parameter in "cpus/maxcpus/sockets/cores/threads" will be * automatically computed based on the provided ones. @@ -63,12 +68,14 @@ static char *cpu_hierarchy_to_string(MachineState *ms) * introduced topology members which are likely to be target specific should * be directly set as 1 if they are omitted (e.g. dies for PC since 4.1). */ -void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) +void machine_parse_smp_config(MachineState *ms, + const SMPConfiguration *config, Error **errp) { MachineClass *mc = MACHINE_GET_CLASS(ms); unsigned cpus = config->has_cpus ? config->cpus : 0; unsigned sockets = config->has_sockets ? config->sockets : 0; unsigned dies = config->has_dies ? config->dies : 0; + unsigned clusters = config->has_clusters ? config->clusters : 0; unsigned cores = config->has_cores ? config->cores : 0; unsigned threads = config->has_threads ? config->threads : 0; unsigned maxcpus = config->has_maxcpus ? config->maxcpus : 0; @@ -80,6 +87,7 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) if ((config->has_cpus && config->cpus == 0) || (config->has_sockets && config->sockets == 0) || (config->has_dies && config->dies == 0) || + (config->has_clusters && config->clusters == 0) || (config->has_cores && config->cores == 0) || (config->has_threads && config->threads == 0) || (config->has_maxcpus && config->maxcpus == 0)) { @@ -95,8 +103,13 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) error_setg(errp, "dies not supported by this machine's CPU topology"); return; } + if (!mc->smp_props.clusters_supported && clusters > 1) { + error_setg(errp, "clusters not supported by this machine's CPU topology"); + return; + } dies = dies > 0 ? dies : 1; + clusters = clusters > 0 ? clusters : 1; /* compute missing values based on the provided ones */ if (cpus == 0 && maxcpus == 0) { @@ -111,41 +124,42 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) if (sockets == 0) { cores = cores > 0 ? cores : 1; threads = threads > 0 ? threads : 1; - sockets = maxcpus / (dies * cores * threads); + sockets = maxcpus / (dies * clusters * cores * threads); } else if (cores == 0) { threads = threads > 0 ? threads : 1; - cores = maxcpus / (sockets * dies * threads); + cores = maxcpus / (sockets * dies * clusters * threads); } } else { /* prefer cores over sockets since 6.2 */ if (cores == 0) { sockets = sockets > 0 ? sockets : 1; threads = threads > 0 ? threads : 1; - cores = maxcpus / (sockets * dies * threads); + cores = maxcpus / (sockets * dies * clusters * threads); } else if (sockets == 0) { threads = threads > 0 ? threads : 1; - sockets = maxcpus / (dies * cores * threads); + sockets = maxcpus / (dies * clusters * cores * threads); } } /* try to calculate omitted threads at last */ if (threads == 0) { - threads = maxcpus / (sockets * dies * cores); + threads = maxcpus / (sockets * dies * clusters * cores); } } - maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * cores * threads; + maxcpus = maxcpus > 0 ? maxcpus : sockets * dies * clusters * cores * threads; cpus = cpus > 0 ? cpus : maxcpus; ms->smp.cpus = cpus; ms->smp.sockets = sockets; ms->smp.dies = dies; + ms->smp.clusters = clusters; ms->smp.cores = cores; ms->smp.threads = threads; ms->smp.max_cpus = maxcpus; /* sanity-check of the computed topology */ - if (sockets * dies * cores * threads != maxcpus) { + if (sockets * dies * clusters * cores * threads != maxcpus) { g_autofree char *topo_msg = cpu_hierarchy_to_string(ms); error_setg(errp, "Invalid CPU topology: " "product of the hierarchy must match maxcpus: " @@ -179,3 +193,41 @@ void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp) return; } } + +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp) +{ + const SmpCachePropertiesList *node; + DECLARE_BITMAP(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX); + + bitmap_zero(caches_bitmap, CACHE_LEVEL_AND_TYPE__MAX); + for (node = caches; node; node = node->next) { + /* Prohibit users from repeating settings. */ + if (test_bit(node->value->cache, caches_bitmap)) { + error_setg(errp, + "Invalid cache properties: %s. " + "The cache properties are duplicated", + CacheLevelAndType_str(node->value->cache)); + return false; + } + + machine_set_cache_size(ms, node->value->cache, + node->value->size); + set_bit(node->value->cache, caches_bitmap); + } + + return true; +} + +uint64_t machine_get_cache_size(const MachineState *ms, + CacheLevelAndType cache) +{ + return ms->smp_cache.props[cache].size; +} + +void machine_set_cache_size(MachineState *ms, CacheLevelAndType cache, + uint64_t size) +{ + ms->smp_cache.props[cache].size = size; +} \ No newline at end of file diff --git a/hw/core/machine.c b/hw/core/machine.c index 53a99abc5605a8e08d70675f86ba14db543c7640..35a7c1d328a572d86afc9288ffb03a0b65f6a3b8 100644 --- a/hw/core/machine.c +++ b/hw/core/machine.c @@ -121,6 +121,8 @@ const size_t hw_compat_4_0_len = G_N_ELEMENTS(hw_compat_4_0); GlobalProperty hw_compat_3_1[] = { { "pcie-root-port", "x-speed", "2_5" }, { "pcie-root-port", "x-width", "1" }, + { "pcie-root-port", "fast-plug", "0" }, + { "pcie-root-port", "fast-unplug", "0" }, { "memory-backend-file", "x-use-canonical-path-for-ramblock-id", "true" }, { "memory-backend-memfd", "x-use-canonical-path-for-ramblock-id", "true" }, { "tpm-crb", "ppi", "false" }, @@ -684,6 +686,11 @@ void machine_set_cpu_numa_node(MachineState *machine, return; } + if (props->has_cluster_id && !slot->props.has_cluster_id) { + error_setg(errp, "cluster-id is not supported"); + return; + } + /* skip slots with explicit mismatch */ if (props->has_thread_id && props->thread_id != slot->props.thread_id) { continue; @@ -693,6 +700,10 @@ void machine_set_cpu_numa_node(MachineState *machine, continue; } + if (props->has_cluster_id && props->cluster_id != slot->props.cluster_id) { + continue; + } + if (props->has_die_id && props->die_id != slot->props.die_id) { continue; } @@ -742,10 +753,12 @@ static void machine_get_smp(Object *obj, Visitor *v, const char *name, .has_cpus = true, .cpus = ms->smp.cpus, .has_sockets = true, .sockets = ms->smp.sockets, .has_dies = true, .dies = ms->smp.dies, + .has_clusters = true, .clusters = ms->smp.clusters, .has_cores = true, .cores = ms->smp.cores, .has_threads = true, .threads = ms->smp.threads, .has_maxcpus = true, .maxcpus = ms->smp.max_cpus, }; + if (!visit_type_SMPConfiguration(v, name, &config, &error_abort)) { return; } @@ -761,7 +774,41 @@ static void machine_set_smp(Object *obj, Visitor *v, const char *name, return; } - smp_parse(ms, config, errp); + machine_parse_smp_config(ms, config, errp); +} + +static void machine_get_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCache *cache = &ms->smp_cache; + SmpCachePropertiesList *head = NULL; + SmpCachePropertiesList **tail = &head; + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + SmpCacheProperties *node = g_new(SmpCacheProperties, 1); + + node->cache = cache->props[i].cache; + node->size = cache->props[i].size; + QAPI_LIST_APPEND(tail, node); + } + + visit_type_SmpCachePropertiesList(v, name, &head, errp); + qapi_free_SmpCachePropertiesList(head); +} + +static void machine_set_smp_cache(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + MachineState *ms = MACHINE(obj); + SmpCachePropertiesList *caches; + + if (!visit_type_SmpCachePropertiesList(v, name, &caches, errp)) { + return; + } + + machine_parse_smp_cache(ms, caches, errp); + qapi_free_SmpCachePropertiesList(caches); } static void machine_class_init(ObjectClass *oc, void *data) @@ -808,6 +855,11 @@ static void machine_class_init(ObjectClass *oc, void *data) object_class_property_set_description(oc, "smp", "CPU topology"); + object_class_property_add(oc, "smp-cache", "SmpCachePropertiesWrapper", + machine_get_smp_cache, machine_set_smp_cache, NULL, NULL); + object_class_property_set_description(oc, "smp-cache", + "Cache properties list for SMP machine"); + object_class_property_add(oc, "phandle-start", "int", machine_get_phandle_start, machine_set_phandle_start, NULL, NULL); @@ -932,8 +984,14 @@ static void machine_initfn(Object *obj) ms->smp.max_cpus = mc->default_cpus; ms->smp.sockets = 1; ms->smp.dies = 1; + ms->smp.clusters = 1; ms->smp.cores = 1; ms->smp.threads = 1; + + for (int i = 0; i < CACHE_LEVEL_AND_TYPE__MAX; i++) { + ms->smp_cache.props[i].cache = (CacheLevelAndType)i; + ms->smp_cache.props[i].size = 0; + } } static void machine_finalize(Object *obj) @@ -984,6 +1042,12 @@ static char *cpu_slot_to_string(const CPUArchId *cpu) } g_string_append_printf(s, "die-id: %"PRId64, cpu->props.die_id); } + if (cpu->props.has_cluster_id) { + if (s->len) { + g_string_append_printf(s, ", "); + } + g_string_append_printf(s, "cluster-id: %"PRId64, cpu->props.cluster_id); + } if (cpu->props.has_core_id) { if (s->len) { g_string_append_printf(s, ", "); diff --git a/hw/core/numa.c b/hw/core/numa.c index e6050b22739f44a0c9acbed5459970bee0b58404..1aa05dcf425f46eca6ac0468d56a96fe0d67bf13 100644 --- a/hw/core/numa.c +++ b/hw/core/numa.c @@ -784,9 +784,8 @@ static void numa_stat_memory_devices(NumaNodeMem node_mem[]) break; case MEMORY_DEVICE_INFO_KIND_SGX_EPC: se = value->u.sgx_epc.data; - /* TODO: once we support numa, assign to right node */ - node_mem[0].node_mem += se->size; - node_mem[0].node_plugged_mem += se->size; + node_mem[se->node].node_mem += se->size; + node_mem[se->node].node_plugged_mem = 0; break; default: g_assert_not_reached(); diff --git a/hw/core/platform-bus.c b/hw/core/platform-bus.c index b8487b26b674e46e445749118abc7b2cc6918f0f..dc58bf505aa2f5c0460eed9b41d9ee41d92b28f1 100644 --- a/hw/core/platform-bus.c +++ b/hw/core/platform-bus.c @@ -145,9 +145,12 @@ static void platform_bus_map_mmio(PlatformBusDevice *pbus, SysBusDevice *sbdev, * the target device's memory region */ for (off = 0; off < pbus->mmio_size; off += alignment) { - if (!memory_region_find(&pbus->mmio, off, size).mr) { + MemoryRegion *mr = memory_region_find(&pbus->mmio, off, size).mr; + if (!mr) { found_region = true; break; + } else { + memory_region_unref(mr); } } diff --git a/hw/core/ptimer.c b/hw/core/ptimer.c index 6ba19fd96585e77bf9c8a9e4ed22fff4a6a3654e..25063deefebe065ee9c8a258211d94132a51ed83 100644 --- a/hw/core/ptimer.c +++ b/hw/core/ptimer.c @@ -84,7 +84,7 @@ static void ptimer_reload(ptimer_state *s, int delta_adjust) delta = s->delta = s->limit; } - if (s->period == 0) { + if (s->period == 0 && s->period_frac == 0) { if (!qtest_enabled()) { fprintf(stderr, "Timer with period zero, disabling\n"); } @@ -310,7 +310,7 @@ void ptimer_run(ptimer_state *s, int oneshot) assert(s->in_transaction); - if (was_disabled && s->period == 0) { + if (was_disabled && s->period == 0 && s->period_frac == 0) { if (!qtest_enabled()) { fprintf(stderr, "Timer with period zero, disabling\n"); } diff --git a/hw/core/qdev-prop-internal.h b/hw/core/qdev-prop-internal.h index d7b77844fe0b2fa14ee8db6728b4ce9fb005e4e5..68b1b9d10c86ab4195c245e7708442de7675e179 100644 --- a/hw/core/qdev-prop-internal.h +++ b/hw/core/qdev-prop-internal.h @@ -22,6 +22,8 @@ void qdev_propinfo_set_default_value_uint(ObjectProperty *op, void qdev_propinfo_get_int32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp); +void qdev_propinfo_get_int64(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp); void qdev_propinfo_get_size32(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp); diff --git a/hw/core/qdev-properties-system.c b/hw/core/qdev-properties-system.c index a91f60567aa5750c5534c131a30598d8adf53b49..b93ed9b4dd4a7dffa33f6d0d7752c602085c7719 100644 --- a/hw/core/qdev-properties-system.c +++ b/hw/core/qdev-properties-system.c @@ -612,6 +612,51 @@ const PropertyInfo qdev_prop_blockdev_on_error = { .set_default_value = qdev_propinfo_set_default_value_enum, }; +static void set_retry_time(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + Property *prop = opaque; + int64_t value, *ptr = object_field_prop_ptr(obj, prop); + Error *local_err = NULL; + + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + visit_type_int64(v, name, &value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + /* value should not be negative */ + if (value < 0) { + error_setg(errp, QERR_PROPERTY_VALUE_OUT_OF_RANGE, + dev->id ? : "", name, (int64_t)value, 0L, LONG_MAX); + return; + } + + *ptr = value; +} + +const PropertyInfo qdev_prop_blockdev_retry_interval = { + .name = "BlockdevRetryInterval", + .description = "Interval for retry error handling policy", + .get = qdev_propinfo_get_int64, + .set = set_retry_time, + .set_default_value = qdev_propinfo_set_default_value_int, +}; + +const PropertyInfo qdev_prop_blockdev_retry_timeout = { + .name = "BlockdevRetryTimeout", + .description = "Timeout for retry error handling policy", + .get = qdev_propinfo_get_int64, + .set = set_retry_time, + .set_default_value = qdev_propinfo_set_default_value_int, +}; + /* --- BIOS CHS translation */ QEMU_BUILD_BUG_ON(sizeof(BiosAtaTranslation) != sizeof(int)); @@ -1119,3 +1164,14 @@ const PropertyInfo qdev_prop_uuid = { .set = set_uuid, .set_default_value = set_default_uuid_auto, }; + +/* --- CompressMethod --- */ +const PropertyInfo qdev_prop_compress_method = { + .name = "CompressMethod", + .description = "multi-thread compression method, " + "zlib/zstd", + .enum_table = &CompressMethod_lookup, + .get = qdev_propinfo_get_enum, + .set = qdev_propinfo_set_enum, + .set_default_value = qdev_propinfo_set_default_value_enum, +}; diff --git a/hw/core/qdev-properties.c b/hw/core/qdev-properties.c index c34aac6ebc91f0aff62089b3bdca035ee1607ff6..2d5f662663b6342ce55e31889e86979696fa4f5e 100644 --- a/hw/core/qdev-properties.c +++ b/hw/core/qdev-properties.c @@ -396,7 +396,7 @@ static void set_uint64(Object *obj, Visitor *v, const char *name, visit_type_uint64(v, name, ptr, errp); } -static void get_int64(Object *obj, Visitor *v, const char *name, +void qdev_propinfo_get_int64(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { Property *prop = opaque; @@ -423,7 +423,7 @@ const PropertyInfo qdev_prop_uint64 = { const PropertyInfo qdev_prop_int64 = { .name = "int64", - .get = get_int64, + .get = qdev_propinfo_get_int64, .set = set_int64, .set_default_value = qdev_propinfo_set_default_value_int, }; diff --git a/hw/core/reset.c b/hw/core/reset.c index 9c477f2bf5bdc4adb97b32e92d6493083cd4c969..314d33211172948fb6d0f00b9f68c6a1ead70f1d 100644 --- a/hw/core/reset.c +++ b/hw/core/reset.c @@ -25,6 +25,7 @@ #include "qemu/osdep.h" #include "qemu/queue.h" +#include "qemu/log.h" #include "sysemu/reset.h" /* reset/shutdown handler */ @@ -47,6 +48,31 @@ void qemu_register_reset(QEMUResetHandler *func, void *opaque) QTAILQ_INSERT_TAIL(&reset_handlers, re, entry); } +QEMUResetEntry *qemu_get_reset_entry(QEMUResetHandler *func, + void *opaque) +{ + QEMUResetEntry *re; + + QTAILQ_FOREACH(re, &reset_handlers, entry) { + if (re->func == func && re->opaque == opaque) { + return re; + } + } + + return NULL; +} + +void qemu_register_reset_after(QEMUResetEntry *entry, + QEMUResetHandler *func, + void *opaque) +{ + QEMUResetEntry *re = g_malloc0(sizeof(QEMUResetEntry)); + + re->func = func; + re->opaque = opaque; + QTAILQ_INSERT_AFTER(&reset_handlers, entry, re, entry); +} + void qemu_unregister_reset(QEMUResetHandler *func, void *opaque) { QEMUResetEntry *re; @@ -64,6 +90,7 @@ void qemu_devices_reset(void) { QEMUResetEntry *re, *nre; + qemu_log("reset all devices\n"); /* reset all devices */ QTAILQ_FOREACH_SAFE(re, &reset_handlers, entry, nre) { re->func(re->opaque); diff --git a/hw/core/resettable.c b/hw/core/resettable.c index 96a99ce39ea37364405d20686e0ed17f47c785d5..c3df75c6ba8574d9052cb433d02c9744c6462c41 100644 --- a/hw/core/resettable.c +++ b/hw/core/resettable.c @@ -201,12 +201,11 @@ static void resettable_phase_exit(Object *obj, void *opaque, ResetType type) resettable_child_foreach(rc, obj, resettable_phase_exit, NULL, type); assert(s->count > 0); - if (s->count == 1) { + if (--s->count == 0) { trace_resettable_phase_exit_exec(obj, obj_typename, !!rc->phases.exit); if (rc->phases.exit && !resettable_get_tr_func(rc, obj)) { rc->phases.exit(obj); } - s->count = 0; } s->exit_phase_in_progress = false; trace_resettable_phase_exit_end(obj, obj_typename, s->count); diff --git a/hw/display/artist.c b/hw/display/artist.c index 21b7fd1b440eadb1b44bcebf410c033671f59775..1767203477e46679f6c76ca89417312f959ffda9 100644 --- a/hw/display/artist.c +++ b/hw/display/artist.c @@ -1359,7 +1359,7 @@ static void artist_create_buffer(ARTISTState *s, const char *name, { struct vram_buffer *buf = s->vram_buffer + idx; - memory_region_init_ram(&buf->mr, NULL, name, width * height, + memory_region_init_ram(&buf->mr, OBJECT(s), name, width * height, &error_fatal); memory_region_add_subregion_overlap(&s->mem_as_root, *offset, &buf->mr, 0); diff --git a/hw/display/ati_2d.c b/hw/display/ati_2d.c index 4dc10ea79529b354f6bdeb92e0056def6ed69f30..692bec91de452070b1b8d6bce79baee394b06fe0 100644 --- a/hw/display/ati_2d.c +++ b/hw/display/ati_2d.c @@ -84,7 +84,7 @@ void ati_2d_blt(ATIVGAState *s) DPRINTF("%d %d %d, %d %d %d, (%d,%d) -> (%d,%d) %dx%d %c %c\n", s->regs.src_offset, s->regs.dst_offset, s->regs.default_offset, s->regs.src_pitch, s->regs.dst_pitch, s->regs.default_pitch, - s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, + s->regs.src_x, s->regs.src_y, dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, (s->regs.dp_cntl & DST_X_LEFT_TO_RIGHT ? '>' : '<'), (s->regs.dp_cntl & DST_Y_TOP_TO_BOTTOM ? 'v' : '^')); @@ -180,11 +180,11 @@ void ati_2d_blt(ATIVGAState *s) dst_stride /= sizeof(uint32_t); DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", dst_bits, dst_stride, bpp, - s->regs.dst_x, s->regs.dst_y, + dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, filler); pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, - s->regs.dst_x, s->regs.dst_y, + dst_x, dst_y, s->regs.dst_width, s->regs.dst_height, filler); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && diff --git a/hw/display/bcm2835_fb.c b/hw/display/bcm2835_fb.c index 2be77bdd3a0637a0a7910fa112ef93f501ada0db..ac17c472a596f06d1ee150c530d303b09ea27444 100644 --- a/hw/display/bcm2835_fb.c +++ b/hw/display/bcm2835_fb.c @@ -279,8 +279,7 @@ static void bcm2835_fb_mbox_push(BCM2835FBState *s, uint32_t value) newconf.xoffset = ldl_le_phys(&s->dma_as, value + 24); newconf.yoffset = ldl_le_phys(&s->dma_as, value + 28); - newconf.base = s->vcram_base | (value & 0xc0000000); - newconf.base += BCM2835_FB_OFFSET; + newconf.base = s->vcram_base + BCM2835_FB_OFFSET; /* Copy fields which we don't want to change from the existing config */ newconf.pixo = s->config.pixo; diff --git a/hw/display/cirrus_vga.c b/hw/display/cirrus_vga.c index fdca6ca659f9147a630a64804d784ec890564aae..c66ed801ef5b1f01e8208f44464cd9174f9801e9 100644 --- a/hw/display/cirrus_vga.c +++ b/hw/display/cirrus_vga.c @@ -834,7 +834,7 @@ static void cirrus_bitblt_cputovideo_next(CirrusVGAState * s) word alignment, so we keep them for the next line */ /* XXX: keep alignment to speed up transfer */ end_ptr = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; - copy_count = s->cirrus_srcptr_end - end_ptr; + copy_count = MIN(s->cirrus_srcptr_end - end_ptr, CIRRUS_BLTBUFSIZE); memmove(s->cirrus_bltbuf, end_ptr, copy_count); s->cirrus_srcptr = s->cirrus_bltbuf + copy_count; s->cirrus_srcptr_end = s->cirrus_bltbuf + s->cirrus_blt_srcpitch; diff --git a/hw/display/edid-generate.c b/hw/display/edid-generate.c index f2b874d5e358523aa9f380bbc9c566a5544f1e67..6f5ac6a38ad864c3a07e1286b3b9fc2a59e36c7f 100644 --- a/hw/display/edid-generate.c +++ b/hw/display/edid-generate.c @@ -401,10 +401,10 @@ void qemu_edid_generate(uint8_t *edid, size_t size, info->name = "QEMU Monitor"; } if (!info->prefx) { - info->prefx = 1024; + info->prefx = 1280; } if (!info->prefy) { - info->prefy = 768; + info->prefy = 800; } if (info->prefx >= 4096 || info->prefy >= 4096) { large_screen = 1; diff --git a/hw/display/framebuffer.c b/hw/display/framebuffer.c index 4485aa335bbce829db75110463e0fc36a85729c0..9fff1c754f5ac693ea0106ac0234cc43e339c4dd 100644 --- a/hw/display/framebuffer.c +++ b/hw/display/framebuffer.c @@ -99,6 +99,10 @@ void framebuffer_update_display( src += i * src_width; dest += i * dest_row_pitch; + addr += (uint64_t)i * src_width; + src += (uint64_t)i * src_width; + dest += (uint64_t)i * dest_row_pitch; + snap = memory_region_snapshot_and_clear_dirty(mem, addr, src_width * rows, DIRTY_MEMORY_VGA); for (; i < rows; i++) { diff --git a/hw/display/next-fb.c b/hw/display/next-fb.c index dd6a1aa8aeeb5be4f163fb16b1850451f3111935..8446ff3c00e23e45d73a8a9e7a267b579519e665 100644 --- a/hw/display/next-fb.c +++ b/hw/display/next-fb.c @@ -126,7 +126,7 @@ static void nextfb_class_init(ObjectClass *oc, void *data) set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->realize = nextfb_realize; - /* Note: This device does not any state that we have to reset or migrate */ + /* Note: This device does not have any state that we have to reset or migrate */ } static const TypeInfo nextfb_info = { diff --git a/hw/display/qxl-logger.c b/hw/display/qxl-logger.c index 68bfa47568025b292cf2f39e65a8dfe9a6b4be53..35c38f62525deb89f11fa7fda3a40a92e9923de0 100644 --- a/hw/display/qxl-logger.c +++ b/hw/display/qxl-logger.c @@ -106,7 +106,7 @@ static int qxl_log_image(PCIQXLDevice *qxl, QXLPHYSICAL addr, int group_id) QXLImage *image; QXLImageDescriptor *desc; - image = qxl_phys2virt(qxl, addr, group_id); + image = qxl_phys2virt(qxl, addr, group_id, sizeof(QXLImage)); if (!image) { return 1; } @@ -214,7 +214,8 @@ int qxl_log_cmd_cursor(PCIQXLDevice *qxl, QXLCursorCmd *cmd, int group_id) cmd->u.set.position.y, cmd->u.set.visible ? "yes" : "no", cmd->u.set.shape); - cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id); + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, group_id, + sizeof(QXLCursor)); if (!cursor) { return 1; } @@ -236,6 +237,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) { bool compat = ext->flags & QXL_COMMAND_FLAG_COMPAT; void *data; + size_t datasz; int ret; if (!qxl->cmdlog) { @@ -247,7 +249,20 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) qxl_name(qxl_type, ext->cmd.type), compat ? "(compat)" : ""); - data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + switch (ext->cmd.type) { + case QXL_CMD_DRAW: + datasz = compat ? sizeof(QXLCompatDrawable) : sizeof(QXLDrawable); + break; + case QXL_CMD_SURFACE: + datasz = sizeof(QXLSurfaceCmd); + break; + case QXL_CMD_CURSOR: + datasz = sizeof(QXLCursorCmd); + break; + default: + goto out; + } + data = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, datasz); if (!data) { return 1; } @@ -269,6 +284,7 @@ int qxl_log_command(PCIQXLDevice *qxl, const char *ring, QXLCommandExt *ext) qxl_log_cmd_cursor(qxl, data, ext->group_id); break; } +out: fprintf(stderr, "\n"); return 0; } diff --git a/hw/display/qxl-render.c b/hw/display/qxl-render.c index d28849b121763600d219623213801ee02a847856..fcfd40c3ac1d622b6d27279e25be252c45ea0873 100644 --- a/hw/display/qxl-render.c +++ b/hw/display/qxl-render.c @@ -107,7 +107,9 @@ static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl) qxl->guest_primary.resized = 0; qxl->guest_primary.data = qxl_phys2virt(qxl, qxl->guest_primary.surface.mem, - MEMSLOT_GROUP_GUEST); + MEMSLOT_GROUP_GUEST, + qxl->guest_primary.abs_stride + * height); if (!qxl->guest_primary.data) { goto end; } @@ -228,7 +230,8 @@ static void qxl_unpack_chunks(void *dest, size_t size, PCIQXLDevice *qxl, if (offset == size) { return; } - chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id); + chunk = qxl_phys2virt(qxl, chunk->next_chunk, group_id, + sizeof(QXLDataChunk) + chunk->data_size); if (!chunk) { return; } @@ -247,6 +250,13 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, size_t size; c = cursor_alloc(cursor->header.width, cursor->header.height); + + if (!c) { + qxl_set_guest_bug(qxl, "%s: cursor %ux%u alloc error", __func__, + cursor->header.width, cursor->header.height); + goto fail; + } + c->hot_x = cursor->header.hot_spot_x; c->hot_y = cursor->header.hot_spot_y; switch (cursor->header.type) { @@ -266,7 +276,7 @@ static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor, } break; case SPICE_CURSOR_TYPE_ALPHA: - size = sizeof(uint32_t) * cursor->header.width * cursor->header.height; + size = sizeof(uint32_t) * c->width * c->height; qxl_unpack_chunks(c->data, size, qxl, &cursor->chunk, group_id); if (qxl->debug > 2) { cursor_print_ascii_art(c, "qxl/alpha"); @@ -288,7 +298,8 @@ fail: /* called from spice server thread context only */ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) { - QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLCursorCmd)); QXLCursor *cursor; QEMUCursor *c; @@ -307,7 +318,15 @@ int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext) } switch (cmd->type) { case QXL_CURSOR_SET: - cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id); + /* First read the QXLCursor to get QXLDataChunk::data_size ... */ + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id, + sizeof(QXLCursor)); + if (!cursor) { + return 1; + } + /* Then read including the chunked data following QXLCursor. */ + cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id, + sizeof(QXLCursor) + cursor->chunk.data_size); if (!cursor) { return 1; } diff --git a/hw/display/qxl.c b/hw/display/qxl.c index 29c80b4289b730b5c062bda783641745af22e244..d7673b8169443a61ac8ec11dfe524df03786a4f0 100644 --- a/hw/display/qxl.c +++ b/hw/display/qxl.c @@ -274,7 +274,8 @@ static void qxl_spice_monitors_config_async(PCIQXLDevice *qxl, int replay) QXL_IO_MONITORS_CONFIG_ASYNC)); } - cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST); + cfg = qxl_phys2virt(qxl, qxl->guest_monitors_config, MEMSLOT_GROUP_GUEST, + sizeof(QXLMonitorsConfig)); if (cfg != NULL && cfg->count == 1) { qxl->guest_primary.resized = 1; qxl->guest_head0_width = cfg->heads[0].width; @@ -459,7 +460,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) switch (le32_to_cpu(ext->cmd.type)) { case QXL_CMD_SURFACE: { - QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLSurfaceCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLSurfaceCmd)); if (!cmd) { return 1; @@ -494,7 +496,8 @@ static int qxl_track_command(PCIQXLDevice *qxl, struct QXLCommandExt *ext) } case QXL_CMD_CURSOR: { - QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id); + QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id, + sizeof(QXLCursorCmd)); if (!cmd) { return 1; @@ -1369,6 +1372,7 @@ static int qxl_add_memslot(PCIQXLDevice *d, uint32_t slot_id, uint64_t delta, qxl_set_guest_bug(d, "%s: pci_region = %d", __func__, pci_region); return 1; } + assert(guest_end - pci_start <= memory_region_size(mr)); virt_start = (intptr_t)memory_region_get_ram_ptr(mr); memslot.slot_id = slot_id; @@ -1409,11 +1413,13 @@ static void qxl_reset_surfaces(PCIQXLDevice *d) /* can be also called from spice server thread context */ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, - uint32_t *s, uint64_t *o) + uint32_t *s, uint64_t *o, + size_t size_requested) { uint64_t phys = le64_to_cpu(pqxl); uint32_t slot = (phys >> (64 - 8)) & 0xff; uint64_t offset = phys & 0xffffffffffff; + uint64_t size_available; if (slot >= NUM_MEMSLOTS) { qxl_set_guest_bug(qxl, "slot too large %d >= %d", slot, @@ -1437,6 +1443,23 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, slot, offset, qxl->guest_slots[slot].size); return false; } + size_available = memory_region_size(qxl->guest_slots[slot].mr); + if (qxl->guest_slots[slot].offset + offset >= size_available) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" > region size %"PRIu64"\n", + slot, qxl->guest_slots[slot].offset + offset, + size_available); + return false; + } + size_available -= qxl->guest_slots[slot].offset + offset; + if (size_requested > size_available) { + qxl_set_guest_bug(qxl, + "slot %d offset %"PRIu64" size %zu: " + "overrun by %"PRIu64" bytes\n", + slot, offset, size_requested, + size_requested - size_available); + return false; + } *s = slot; *o = offset; @@ -1444,7 +1467,8 @@ static bool qxl_get_check_slot_offset(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, } /* can be also called from spice server thread context */ -void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id, + size_t size) { uint64_t offset; uint32_t slot; @@ -1455,7 +1479,7 @@ void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, int group_id) offset = le64_to_cpu(pqxl) & 0xffffffffffff; return (void *)(intptr_t)offset; case MEMSLOT_GROUP_GUEST: - if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset)) { + if (!qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size)) { return NULL; } ptr = memory_region_get_ram_ptr(qxl->guest_slots[slot].mr); @@ -1577,7 +1601,10 @@ static void qxl_set_mode(PCIQXLDevice *d, unsigned int modenr, int loadvm) } d->guest_slots[0].slot = slot; - assert(qxl_add_memslot(d, 0, devmem, QXL_SYNC) == 0); + if (qxl_add_memslot(d, 0, devmem, QXL_SYNC) != 0) { + qxl_set_guest_bug(d, "device isn't initialized yet"); + return; + } d->guest_primary.surface = surface; qxl_create_guest_primary(d, 0, QXL_SYNC); @@ -1921,9 +1948,9 @@ static void qxl_dirty_one_surface(PCIQXLDevice *qxl, QXLPHYSICAL pqxl, uint32_t slot; bool rc; - rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset); - assert(rc == true); size = (uint64_t)height * abs(stride); + rc = qxl_get_check_slot_offset(qxl, pqxl, &slot, &offset, size); + assert(rc == true); trace_qxl_surfaces_dirty(qxl->id, offset, size); qxl_set_dirty(qxl->guest_slots[slot].mr, qxl->guest_slots[slot].offset + offset, @@ -1952,7 +1979,7 @@ static void qxl_dirty_surfaces(PCIQXLDevice *qxl) } cmd = qxl_phys2virt(qxl, qxl->guest_surfaces.cmds[i], - MEMSLOT_GROUP_GUEST); + MEMSLOT_GROUP_GUEST, sizeof(QXLSurfaceCmd)); assert(cmd); assert(cmd->type == QXL_SURFACE_CMD_CREATE); qxl_dirty_one_surface(qxl, cmd->u.surface_create.data, @@ -2182,11 +2209,14 @@ static void qxl_realize_common(PCIQXLDevice *qxl, Error **errp) qemu_add_vm_change_state_handler(qxl_vm_change_state_handler, qxl); - qxl->update_irq = qemu_bh_new(qxl_update_irq_bh, qxl); + qxl->update_irq = qemu_bh_new_guarded(qxl_update_irq_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); qxl_reset_state(qxl); - qxl->update_area_bh = qemu_bh_new(qxl_render_update_area_bh, qxl); - qxl->ssd.cursor_bh = qemu_bh_new(qemu_spice_cursor_refresh_bh, &qxl->ssd); + qxl->update_area_bh = qemu_bh_new_guarded(qxl_render_update_area_bh, qxl, + &DEVICE(qxl)->mem_reentrancy_guard); + qxl->ssd.cursor_bh = qemu_bh_new_guarded(qemu_spice_cursor_refresh_bh, &qxl->ssd, + &DEVICE(qxl)->mem_reentrancy_guard); } static void qxl_realize_primary(PCIDevice *dev, Error **errp) diff --git a/hw/display/qxl.h b/hw/display/qxl.h index 30d21f4d0bdcee78dd0bc97239e4426459fdce19..89ca832cf97eabe0507eec99ba91de8edc1e2841 100644 --- a/hw/display/qxl.h +++ b/hw/display/qxl.h @@ -147,7 +147,28 @@ OBJECT_DECLARE_SIMPLE_TYPE(PCIQXLDevice, PCI_QXL) #define QXL_DEFAULT_REVISION (QXL_REVISION_STABLE_V12 + 1) /* qxl.c */ -void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id); +/** + * qxl_phys2virt: Get a pointer within a PCI VRAM memory region. + * + * @qxl: QXL device + * @phys: physical offset of buffer within the VRAM + * @group_id: memory slot group + * @size: size of the buffer + * + * Returns a host pointer to a buffer placed at offset @phys within the + * active slot @group_id of the PCI VGA RAM memory region associated with + * the @qxl device. If the slot is inactive, or the offset + size are out + * of the memory region, returns NULL. + * + * Use with care; by the time this function returns, the returned pointer is + * not protected by RCU anymore. If the caller is not within an RCU critical + * section and does not hold the iothread lock, it must have other means of + * protecting the pointer, such as a reference to the region that includes + * the incoming ram_addr_t. + * + */ +void *qxl_phys2virt(PCIQXLDevice *qxl, QXLPHYSICAL phys, int group_id, + size_t size); void qxl_set_guest_bug(PCIQXLDevice *qxl, const char *msg, ...) GCC_FMT_ATTR(2, 3); diff --git a/hw/display/vga.c b/hw/display/vga.c index 9d1f66af402e54103033dc90c6589339fd199d8e..5e5fdf67d7f34c6f6de8ebf2af0f08c54bd3848c 100644 --- a/hw/display/vga.c +++ b/hw/display/vga.c @@ -1514,9 +1514,10 @@ static void vga_draw_graphic(VGACommonState *s, int full_update) force_shadow = true; } + /* bits 5-6: 0 = 16-color mode, 1 = 4-color mode, 2 = 256-color mode. */ shift_control = (s->gr[VGA_GFX_MODE] >> 5) & 3; double_scan = (s->cr[VGA_CRTC_MAX_SCAN] >> 7); - if (shift_control != 1) { + if (s->cr[VGA_CRTC_MODE] & 1) { multi_scan = (((s->cr[VGA_CRTC_MAX_SCAN] & 0x1f) + 1) << double_scan) - 1; } else { @@ -1744,6 +1745,13 @@ static void vga_draw_blank(VGACommonState *s, int full_update) if (s->last_scr_width <= 0 || s->last_scr_height <= 0) return; + if (is_buffer_shared(surface)) { + /* unshare buffer, otherwise the blanking corrupts vga vram */ + surface = qemu_create_displaysurface(s->last_scr_width, + s->last_scr_height); + dpy_gfx_replace_surface(s->con, surface); + } + w = s->last_scr_width * surface_bytes_per_pixel(surface); d = surface_data(surface); for(i = 0; i < s->last_scr_height; i++) { diff --git a/hw/display/vhost-user-gpu.c b/hw/display/vhost-user-gpu.c index 49df56cd14e951c43408a7a87a1736698066dbaf..b2d89cd54e4469ba53efa6ddcecc60acc70ad542 100644 --- a/hw/display/vhost-user-gpu.c +++ b/hw/display/vhost-user-gpu.c @@ -334,7 +334,7 @@ vhost_user_gpu_chr_read(void *opaque) } msg->request = request; - msg->flags = size; + msg->flags = flags; msg->size = size; if (request == VHOST_USER_GPU_CURSOR_UPDATE || @@ -485,6 +485,15 @@ vhost_user_gpu_guest_notifier_pending(VirtIODevice *vdev, int idx) { VhostUserGPU *g = VHOST_USER_GPU(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } return vhost_virtqueue_pending(&g->vhost->dev, idx); } @@ -493,6 +502,15 @@ vhost_user_gpu_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VhostUserGPU *g = VHOST_USER_GPU(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } vhost_virtqueue_mask(&g->vhost->dev, vdev, idx, mask); } @@ -565,6 +583,12 @@ vhost_user_gpu_device_realize(DeviceState *qdev, Error **errp) g->vhost_gpu_fd = -1; } +static struct vhost_dev *vhost_user_gpu_get_vhost(VirtIODevice *vdev) +{ + VhostUserGPU *g = VHOST_USER_GPU(vdev); + return &g->vhost->dev; +} + static Property vhost_user_gpu_properties[] = { VIRTIO_GPU_BASE_PROPERTIES(VhostUserGPU, parent_obj.conf), DEFINE_PROP_END_OF_LIST(), @@ -586,6 +610,7 @@ vhost_user_gpu_class_init(ObjectClass *klass, void *data) vdc->guest_notifier_pending = vhost_user_gpu_guest_notifier_pending; vdc->get_config = vhost_user_gpu_get_config; vdc->set_config = vhost_user_gpu_set_config; + vdc->get_vhost = vhost_user_gpu_get_vhost; device_class_set_props(dc, vhost_user_gpu_properties); } diff --git a/hw/display/virtio-gpu.c b/hw/display/virtio-gpu.c index d78b9700c7de50d0f08903121cff3141ae32f430..0e5f5045b5c9d66c235d324b0692d46e15fb7634 100644 --- a/hw/display/virtio-gpu.c +++ b/hw/display/virtio-gpu.c @@ -814,8 +814,9 @@ int virtio_gpu_create_mapping_iov(VirtIOGPU *g, do { len = l; - map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, - a, &len, DMA_DIRECTION_TO_DEVICE); + map = dma_memory_map(VIRTIO_DEVICE(g)->dma_as, a, &len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!map) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to map MMIO memory for" " element %d\n", __func__, e); @@ -1252,8 +1253,9 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, for (i = 0; i < res->iov_cnt; i++) { hwaddr len = res->iov[i].iov_len; res->iov[i].iov_base = - dma_memory_map(VIRTIO_DEVICE(g)->dma_as, - res->addrs[i], &len, DMA_DIRECTION_TO_DEVICE); + dma_memory_map(VIRTIO_DEVICE(g)->dma_as, res->addrs[i], &len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!res->iov[i].iov_base || len != res->iov[i].iov_len) { /* Clean up the half-a-mapping we just created... */ @@ -1282,6 +1284,7 @@ static int virtio_gpu_load(QEMUFile *f, void *opaque, size_t size, /* load & apply scanout state */ vmstate_load_state(f, &vmstate_virtio_gpu_scanouts, g, 1); for (i = 0; i < g->parent_obj.conf.max_outputs; i++) { + /* FIXME: should take scanout.r.{x,y} into account */ scanout = &g->parent_obj.scanout[i]; if (!scanout->resource_id) { continue; @@ -1332,8 +1335,8 @@ void virtio_gpu_device_realize(DeviceState *qdev, Error **errp) g->ctrl_vq = virtio_get_queue(vdev, 0); g->cursor_vq = virtio_get_queue(vdev, 1); - g->ctrl_bh = qemu_bh_new(virtio_gpu_ctrl_bh, g); - g->cursor_bh = qemu_bh_new(virtio_gpu_cursor_bh, g); + g->ctrl_bh = virtio_bh_new_guarded(qdev, virtio_gpu_ctrl_bh, g); + g->cursor_bh = virtio_bh_new_guarded(qdev, virtio_gpu_cursor_bh, g); QTAILQ_INIT(&g->reslist); QTAILQ_INIT(&g->cmdq); QTAILQ_INIT(&g->fenceq); diff --git a/hw/display/vmware_vga.c b/hw/display/vmware_vga.c index e2969a6c81c83190a334c35a6db10db209941e92..2b81d6122fc8fa2751c6a94bd60d7f466927f9d6 100644 --- a/hw/display/vmware_vga.c +++ b/hw/display/vmware_vga.c @@ -509,6 +509,8 @@ static inline void vmsvga_cursor_define(struct vmsvga_state_s *s, int i, pixels; qc = cursor_alloc(c->width, c->height); + assert(qc != NULL); + qc->hot_x = c->hot_x; qc->hot_y = c->hot_y; switch (c->bpp) { diff --git a/hw/dma/pl330.c b/hw/dma/pl330.c index 0cb46191c1921e266a3ac85493691ba6629e9014..31ce01b7c57c0a5a458351ca8e35d7aca35ddc03 100644 --- a/hw/dma/pl330.c +++ b/hw/dma/pl330.c @@ -1111,7 +1111,8 @@ static inline const PL330InsnDesc *pl330_fetch_insn(PL330Chan *ch) uint8_t opcode; int i; - dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1); + dma_memory_read(ch->parent->mem_as, ch->pc, &opcode, 1, + MEMTXATTRS_UNSPECIFIED); for (i = 0; insn_desc[i].size; i++) { if ((opcode & insn_desc[i].opmask) == insn_desc[i].opcode) { return &insn_desc[i]; @@ -1125,7 +1126,8 @@ static inline void pl330_exec_insn(PL330Chan *ch, const PL330InsnDesc *insn) uint8_t buf[PL330_INSN_MAXSIZE]; assert(insn->size <= PL330_INSN_MAXSIZE); - dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size); + dma_memory_read(ch->parent->mem_as, ch->pc, buf, insn->size, + MEMTXATTRS_UNSPECIFIED); insn->exec(ch, buf[0], &buf[1], insn->size - 1); } @@ -1189,7 +1191,8 @@ static int pl330_exec_cycle(PL330Chan *channel) if (q != NULL && q->len <= pl330_fifo_num_free(&s->fifo)) { int len = q->len - (q->addr & (q->len - 1)); - dma_memory_read(s->mem_as, q->addr, buf, len); + dma_memory_read(s->mem_as, q->addr, buf, len, + MEMTXATTRS_UNSPECIFIED); trace_pl330_exec_cycle(q->addr, len); if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { pl330_hexdump(buf, len); @@ -1220,7 +1223,8 @@ static int pl330_exec_cycle(PL330Chan *channel) fifo_res = pl330_fifo_get(&s->fifo, buf, len, q->tag); } if (fifo_res == PL330_FIFO_OK || q->z) { - dma_memory_write(s->mem_as, q->addr, buf, len); + dma_memory_write(s->mem_as, q->addr, buf, len, + MEMTXATTRS_UNSPECIFIED); trace_pl330_exec_cycle(q->addr, len); if (trace_event_get_state_backends(TRACE_PL330_HEXDUMP)) { pl330_hexdump(buf, len); diff --git a/hw/dma/sparc32_dma.c b/hw/dma/sparc32_dma.c index 03bc500878f639ec86fe97c5615593189973a99a..0ef13c5e9a8cc2ac826d622da43b3fa072230ba4 100644 --- a/hw/dma/sparc32_dma.c +++ b/hw/dma/sparc32_dma.c @@ -81,11 +81,11 @@ void ledma_memory_read(void *opaque, hwaddr addr, addr |= s->dmaregs[3]; trace_ledma_memory_read(addr, len); if (do_bswap) { - dma_memory_read(&is->iommu_as, addr, buf, len); + dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED); } else { addr &= ~1; len &= ~1; - dma_memory_read(&is->iommu_as, addr, buf, len); + dma_memory_read(&is->iommu_as, addr, buf, len, MEMTXATTRS_UNSPECIFIED); for(i = 0; i < len; i += 2) { bswap16s((uint16_t *)(buf + i)); } @@ -103,7 +103,8 @@ void ledma_memory_write(void *opaque, hwaddr addr, addr |= s->dmaregs[3]; trace_ledma_memory_write(addr, len); if (do_bswap) { - dma_memory_write(&is->iommu_as, addr, buf, len); + dma_memory_write(&is->iommu_as, addr, buf, len, + MEMTXATTRS_UNSPECIFIED); } else { addr &= ~1; len &= ~1; @@ -114,7 +115,8 @@ void ledma_memory_write(void *opaque, hwaddr addr, for(i = 0; i < l; i += 2) { tmp_buf[i >> 1] = bswap16(*(uint16_t *)(buf + i)); } - dma_memory_write(&is->iommu_as, addr, tmp_buf, l); + dma_memory_write(&is->iommu_as, addr, tmp_buf, l, + MEMTXATTRS_UNSPECIFIED); len -= l; buf += l; addr += l; @@ -148,7 +150,8 @@ void espdma_memory_read(void *opaque, uint8_t *buf, int len) IOMMUState *is = (IOMMUState *)s->iommu; trace_espdma_memory_read(s->dmaregs[1], len); - dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len); + dma_memory_read(&is->iommu_as, s->dmaregs[1], buf, len, + MEMTXATTRS_UNSPECIFIED); s->dmaregs[1] += len; } @@ -158,7 +161,8 @@ void espdma_memory_write(void *opaque, uint8_t *buf, int len) IOMMUState *is = (IOMMUState *)s->iommu; trace_espdma_memory_write(s->dmaregs[1], len); - dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len); + dma_memory_write(&is->iommu_as, s->dmaregs[1], buf, len, + MEMTXATTRS_UNSPECIFIED); s->dmaregs[1] += len; } diff --git a/hw/dma/xilinx_axidma.c b/hw/dma/xilinx_axidma.c index bc383f53cca3294a9c3088225ec44650f913f5d7..5044fb146d48d1654ed6df056215d595f6b81c89 100644 --- a/hw/dma/xilinx_axidma.c +++ b/hw/dma/xilinx_axidma.c @@ -598,7 +598,7 @@ static void axidma_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); - dc->realize = xilinx_axidma_realize, + dc->realize = xilinx_axidma_realize; dc->reset = xilinx_axidma_reset; device_class_set_props(dc, axidma_properties); } diff --git a/hw/dma/xlnx-zynq-devcfg.c b/hw/dma/xlnx-zynq-devcfg.c index e33112b6f0ea94e50ba0e9e8170dd5301164cb47..f5ad1a0d22cd031d3633c9108c0716bad1347f6f 100644 --- a/hw/dma/xlnx-zynq-devcfg.c +++ b/hw/dma/xlnx-zynq-devcfg.c @@ -161,12 +161,14 @@ static void xlnx_zynq_devcfg_dma_go(XlnxZynqDevcfg *s) btt = MIN(btt, dmah->dest_len); } DB_PRINT("reading %x bytes from %x\n", btt, dmah->src_addr); - dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt); + dma_memory_read(&address_space_memory, dmah->src_addr, buf, btt, + MEMTXATTRS_UNSPECIFIED); dmah->src_len -= btt; dmah->src_addr += btt; if (loopback && (dmah->src_len || dmah->dest_len)) { DB_PRINT("writing %x bytes from %x\n", btt, dmah->dest_addr); - dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt); + dma_memory_write(&address_space_memory, dmah->dest_addr, buf, btt, + MEMTXATTRS_UNSPECIFIED); dmah->dest_len -= btt; dmah->dest_addr += btt; } diff --git a/hw/dma/xlnx_dpdma.c b/hw/dma/xlnx_dpdma.c index 967548abd3158ed687bc5abc2779849a06ca070b..2d7eae72cd232603b91bb829b0209ae89fa7f459 100644 --- a/hw/dma/xlnx_dpdma.c +++ b/hw/dma/xlnx_dpdma.c @@ -652,7 +652,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, } if (dma_memory_read(&address_space_memory, desc_addr, &desc, - sizeof(DPDMADescriptor))) { + sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_EISR] |= ((1 << 1) << channel); xlnx_dpdma_update_irq(s); s->operation_finished[channel] = true; @@ -708,7 +708,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, if (dma_memory_read(&address_space_memory, source_addr[0], &s->data[channel][ptr], - line_size)) { + line_size, + MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_ISR] |= ((1 << 12) << channel); xlnx_dpdma_update_irq(s); DPRINTF("Can't get data.\n"); @@ -736,7 +737,8 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, if (dma_memory_read(&address_space_memory, source_addr[frag], &(s->data[channel][ptr]), - fragment_len)) { + fragment_len, + MEMTXATTRS_UNSPECIFIED)) { s->registers[DPDMA_ISR] |= ((1 << 12) << channel); xlnx_dpdma_update_irq(s); DPRINTF("Can't get data.\n"); @@ -754,7 +756,7 @@ size_t xlnx_dpdma_start_operation(XlnxDPDMAState *s, uint8_t channel, DPRINTF("update the descriptor with the done flag set.\n"); xlnx_dpdma_desc_set_done(&desc); dma_memory_write(&address_space_memory, desc_addr, &desc, - sizeof(DPDMADescriptor)); + sizeof(DPDMADescriptor), MEMTXATTRS_UNSPECIFIED); } if (xlnx_dpdma_desc_completion_interrupt(&desc)) { diff --git a/hw/hyperv/hyperv.c b/hw/hyperv/hyperv.c index cb1074f234c896a0544009ad2eb41202339fe467..220481a1ca7a7b68b92742794de5b900e7a213a3 100644 --- a/hw/hyperv/hyperv.c +++ b/hw/hyperv/hyperv.c @@ -150,7 +150,7 @@ void hyperv_synic_reset(CPUState *cs) SynICState *synic = get_synic(cs); if (synic) { - device_legacy_reset(DEVICE(synic)); + device_cold_reset(DEVICE(synic)); } } diff --git a/hw/hyperv/vmbus.c b/hw/hyperv/vmbus.c index dbce3b35fba7f1964f44f39a09103d540d33ce82..8aad29f1bb23609378020a6bbbf6903fbdb3505b 100644 --- a/hw/hyperv/vmbus.c +++ b/hw/hyperv/vmbus.c @@ -373,7 +373,8 @@ static ssize_t gpadl_iter_io(GpadlIter *iter, void *buf, uint32_t len) maddr = (iter->gpadl->gfns[idx] << TARGET_PAGE_BITS) | off_in_page; - iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir); + iter->map = dma_memory_map(iter->as, maddr, &mlen, iter->dir, + MEMTXATTRS_UNSPECIFIED); if (mlen != pgleft) { dma_memory_unmap(iter->as, iter->map, mlen, iter->dir, 0); iter->map = NULL; @@ -490,7 +491,8 @@ int vmbus_map_sgl(VMBusChanReq *req, DMADirection dir, struct iovec *iov, goto err; } - iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir); + iov[ret_cnt].iov_base = dma_memory_map(sgl->as, a, &l, dir, + MEMTXATTRS_UNSPECIFIED); if (!l) { ret = -EFAULT; goto err; @@ -566,7 +568,7 @@ static vmbus_ring_buffer *ringbuf_map_hdr(VMBusRingBufCommon *ringbuf) dma_addr_t mlen = sizeof(*rb); rb = dma_memory_map(ringbuf->as, ringbuf->rb_addr, &mlen, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED); if (mlen != sizeof(*rb)) { dma_memory_unmap(ringbuf->as, rb, mlen, DMA_DIRECTION_FROM_DEVICE, 0); diff --git a/hw/i2c/pmbus_device.c b/hw/i2c/pmbus_device.c index 24f8f522d9f19bdaf091ad6a212edf4330b939eb..f39cd532deca5e249424d3b75aba45e9924fecd5 100644 --- a/hw/i2c/pmbus_device.c +++ b/hw/i2c/pmbus_device.c @@ -166,15 +166,18 @@ static void pmbus_quick_cmd(SMBusDevice *smd, uint8_t read) } } -static void pmbus_pages_alloc(PMBusDevice *pmdev) +static uint8_t pmbus_pages_num(PMBusDevice *pmdev) { + const PMBusDeviceClass *k = PMBUS_DEVICE_GET_CLASS(pmdev); + /* some PMBus devices don't use the PAGE command, so they get 1 page */ - PMBusDeviceClass *k = PMBUS_DEVICE_GET_CLASS(pmdev); - if (k->device_num_pages == 0) { - k->device_num_pages = 1; - } - pmdev->num_pages = k->device_num_pages; - pmdev->pages = g_new0(PMBusPage, k->device_num_pages); + return k->device_num_pages ? : 1; +} + +static void pmbus_pages_alloc(PMBusDevice *pmdev) +{ + pmdev->num_pages = pmbus_pages_num(pmdev); + pmdev->pages = g_new0(PMBusPage, pmdev->num_pages); } void pmbus_check_limits(PMBusDevice *pmdev) diff --git a/hw/i386/acpi-build.c b/hw/i386/acpi-build.c index a99c6e4fe3fad88da568c9f738d7bd8f5900f126..1e33e1f3d0b88bd9682f8abb927f3f201b506f58 100644 --- a/hw/i386/acpi-build.c +++ b/hw/i386/acpi-build.c @@ -1513,7 +1513,7 @@ build_dsdt(GArray *table_data, BIOSLinker *linker, .fw_unplugs_cpu = pm->smi_on_cpu_unplug, }; build_cpus_aml(dsdt, machine, opts, pm->cpu_hp_io_base, - "\\_SB.PCI0", "\\_GPE._E02"); + "\\_SB.PCI0", "\\_GPE._E02", AML_SYSTEM_IO); } if (pcms->memhp_io_base && nr_mem) { @@ -2068,6 +2068,8 @@ build_srat(GArray *table_data, BIOSLinker *linker, MachineState *machine) nvdimm_build_srat(table_data); } + sgx_epc_build_srat(table_data); + /* * TODO: this part is not in ACPI spec and current linux kernel boots fine * without these entries. But I recall there were issues the last time I @@ -2721,6 +2723,8 @@ void acpi_build(AcpiBuildTables *tables, MachineState *machine) /* Cleanup memory that's no longer used. */ g_array_free(table_offsets, true); + g_free(slic_oem.id); + g_free(slic_oem.table_id); } static void acpi_ram_update(MemoryRegion *mr, GArray *data) diff --git a/hw/i386/amd_iommu.c b/hw/i386/amd_iommu.c index 91fe34ae5899339e4114a6e0cac6d66ef0294003..dfb9a2d8e6b4f784771b23553f7edaab05b087d4 100644 --- a/hw/i386/amd_iommu.c +++ b/hw/i386/amd_iommu.c @@ -181,7 +181,7 @@ static void amdvi_log_event(AMDVIState *s, uint64_t *evt) } if (dma_memory_write(&address_space_memory, s->evtlog + s->evtlog_tail, - evt, AMDVI_EVENT_LEN)) { + evt, AMDVI_EVENT_LEN, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_evntlog_fail(s->evtlog, s->evtlog_tail); } @@ -376,7 +376,8 @@ static void amdvi_completion_wait(AMDVIState *s, uint64_t *cmd) } if (extract64(cmd[0], 0, 1)) { if (dma_memory_write(&address_space_memory, addr, &data, - AMDVI_COMPLETION_DATA_SIZE)) { + AMDVI_COMPLETION_DATA_SIZE, + MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_completion_wait_fail(addr); } } @@ -502,7 +503,7 @@ static void amdvi_cmdbuf_exec(AMDVIState *s) uint64_t cmd[2]; if (dma_memory_read(&address_space_memory, s->cmdbuf + s->cmdbuf_head, - cmd, AMDVI_COMMAND_SIZE)) { + cmd, AMDVI_COMMAND_SIZE, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_command_read_fail(s->cmdbuf, s->cmdbuf_head); amdvi_log_command_error(s, s->cmdbuf + s->cmdbuf_head); return; @@ -836,7 +837,7 @@ static bool amdvi_get_dte(AMDVIState *s, int devid, uint64_t *entry) uint32_t offset = devid * AMDVI_DEVTAB_ENTRY_SIZE; if (dma_memory_read(&address_space_memory, s->devtab + offset, entry, - AMDVI_DEVTAB_ENTRY_SIZE)) { + AMDVI_DEVTAB_ENTRY_SIZE, MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_dte_get_fail(s->devtab, offset); /* log error accessing dte */ amdvi_log_devtab_error(s, devid, s->devtab + offset, 0); @@ -881,7 +882,8 @@ static inline uint64_t amdvi_get_pte_entry(AMDVIState *s, uint64_t pte_addr, { uint64_t pte; - if (dma_memory_read(&address_space_memory, pte_addr, &pte, sizeof(pte))) { + if (dma_memory_read(&address_space_memory, pte_addr, + &pte, sizeof(pte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_get_pte_hwerror(pte_addr); amdvi_log_pagetab_error(s, devid, pte_addr, 0); pte = 0; @@ -1048,7 +1050,7 @@ static int amdvi_get_irte(AMDVIState *s, MSIMessage *origin, uint64_t *dte, trace_amdvi_ir_irte(irte_root, offset); if (dma_memory_read(&address_space_memory, irte_root + offset, - irte, sizeof(*irte))) { + irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_ir_err("failed to get irte"); return -AMDVI_IR_GET_IRTE; } @@ -1108,7 +1110,7 @@ static int amdvi_get_irte_ga(AMDVIState *s, MSIMessage *origin, uint64_t *dte, trace_amdvi_ir_irte(irte_root, offset); if (dma_memory_read(&address_space_memory, irte_root + offset, - irte, sizeof(*irte))) { + irte, sizeof(*irte), MEMTXATTRS_UNSPECIFIED)) { trace_amdvi_ir_err("failed to get irte_ga"); return -AMDVI_IR_GET_IRTE; } @@ -1243,13 +1245,8 @@ static int amdvi_int_remap_msi(AMDVIState *iommu, return -AMDVI_IR_ERR; } - if (origin->address & AMDVI_MSI_ADDR_HI_MASK) { - trace_amdvi_err("MSI address high 32 bits non-zero when " - "Interrupt Remapping enabled."); - return -AMDVI_IR_ERR; - } - - if ((origin->address & AMDVI_MSI_ADDR_LO_MASK) != APIC_DEFAULT_ADDRESS) { + if (origin->address < AMDVI_INT_ADDR_FIRST || + origin->address + sizeof(origin->data) > AMDVI_INT_ADDR_LAST + 1) { trace_amdvi_err("MSI is not from IOAPIC."); return -AMDVI_IR_ERR; } diff --git a/hw/i386/amd_iommu.h b/hw/i386/amd_iommu.h index 79d38a3e41843bc48180e3a4cc8779289c5d5e6e..210a37dfb17e23637576fd5ab493fb08e0415cd9 100644 --- a/hw/i386/amd_iommu.h +++ b/hw/i386/amd_iommu.h @@ -210,8 +210,6 @@ #define AMDVI_INT_ADDR_FIRST 0xfee00000 #define AMDVI_INT_ADDR_LAST 0xfeefffff #define AMDVI_INT_ADDR_SIZE (AMDVI_INT_ADDR_LAST - AMDVI_INT_ADDR_FIRST + 1) -#define AMDVI_MSI_ADDR_HI_MASK (0xffffffff00000000ULL) -#define AMDVI_MSI_ADDR_LO_MASK (0x00000000ffffffffULL) /* SB IOAPIC is always on this device in AMD systems */ #define AMDVI_IOAPIC_SB_DEVID PCI_BUILD_BDF(0, PCI_DEVFN(0x14, 0)) diff --git a/hw/i386/intel_iommu.c b/hw/i386/intel_iommu.c index f584449d8d1d72e061d8eead5cbdb98a8745a8f4..196d5b72d9acaa1747a45ee188258ce60f615420 100644 --- a/hw/i386/intel_iommu.c +++ b/hw/i386/intel_iommu.c @@ -569,7 +569,8 @@ static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, dma_addr_t addr; addr = s->root + index * sizeof(*re); - if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { + if (dma_memory_read(&address_space_memory, addr, + re, sizeof(*re), MEMTXATTRS_UNSPECIFIED)) { re->lo = 0; return -VTD_FR_ROOT_TABLE_INV; } @@ -602,7 +603,8 @@ static int vtd_get_context_entry_from_root(IntelIOMMUState *s, } addr = addr + index * ce_size; - if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { + if (dma_memory_read(&address_space_memory, addr, + ce, ce_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_CONTEXT_TABLE_INV; } @@ -639,8 +641,8 @@ static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) assert(index < VTD_SL_PT_ENTRY_NR); if (dma_memory_read(&address_space_memory, - base_addr + index * sizeof(slpte), &slpte, - sizeof(slpte))) { + base_addr + index * sizeof(slpte), + &slpte, sizeof(slpte), MEMTXATTRS_UNSPECIFIED)) { slpte = (uint64_t)-1; return slpte; } @@ -704,7 +706,8 @@ static int vtd_get_pdire_from_pdir_table(dma_addr_t pasid_dir_base, index = VTD_PASID_DIR_INDEX(pasid); entry_size = VTD_PASID_DIR_ENTRY_SIZE; addr = pasid_dir_base + index * entry_size; - if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { + if (dma_memory_read(&address_space_memory, addr, + pdire, entry_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_PASID_TABLE_INV; } @@ -728,7 +731,8 @@ static int vtd_get_pe_in_pasid_leaf_table(IntelIOMMUState *s, index = VTD_PASID_TABLE_INDEX(pasid); entry_size = VTD_PASID_ENTRY_SIZE; addr = addr + index * entry_size; - if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { + if (dma_memory_read(&address_space_memory, addr, + pe, entry_size, MEMTXATTRS_UNSPECIFIED)) { return -VTD_FR_PASID_TABLE_INV; } @@ -1153,7 +1157,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) return ret; } /* Drop any existing mapping */ - iova_tree_remove(as->iova_tree, &target); + iova_tree_remove(as->iova_tree, target); /* Recover the correct type */ event->type = IOMMU_NOTIFIER_MAP; entry->perm = cache_perm; @@ -1166,7 +1170,7 @@ static int vtd_page_walk_one(IOMMUTLBEvent *event, vtd_page_walk_info *info) trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); return 0; } - iova_tree_remove(as->iova_tree, &target); + iova_tree_remove(as->iova_tree, target); } trace_vtd_page_walk_one(info->domain_id, entry->iova, @@ -2275,7 +2279,8 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s, uint32_t dw = s->iq_dw ? 32 : 16; dma_addr_t addr = base_addr + offset * dw; - if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { + if (dma_memory_read(&address_space_memory, addr, + inv_desc, dw, MEMTXATTRS_UNSPECIFIED)) { error_report_once("Read INV DESC failed."); return false; } @@ -2288,15 +2293,51 @@ static bool vtd_get_inv_desc(IntelIOMMUState *s, return true; } +static bool vtd_inv_desc_reserved_check(IntelIOMMUState *s, + VTDInvDesc *inv_desc, + uint64_t mask[4], bool dw, + const char *func_name, + const char *desc_type) +{ + if (s->iq_dw) { + if (inv_desc->val[0] & mask[0] || inv_desc->val[1] & mask[1] || + inv_desc->val[2] & mask[2] || inv_desc->val[3] & mask[3]) { + error_report("%s: invalid %s desc val[3]: 0x%"PRIx64 + " val[2]: 0x%"PRIx64" val[1]=0x%"PRIx64 + " val[0]=0x%"PRIx64" (reserved nonzero)", + func_name, desc_type, inv_desc->val[3], + inv_desc->val[2], inv_desc->val[1], + inv_desc->val[0]); + return false; + } + } else { + if (dw) { + error_report("%s: 256-bit %s desc in 128-bit invalidation queue", + func_name, desc_type); + return false; + } + + if (inv_desc->lo & mask[0] || inv_desc->hi & mask[1]) { + error_report("%s: invalid %s desc: hi=%"PRIx64", lo=%"PRIx64 + " (reserved nonzero)", func_name, desc_type, + inv_desc->hi, inv_desc->lo); + return false; + } + } + + return true; +} + static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { - if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || - (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { - error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + uint64_t mask[4] = {VTD_INV_DESC_WAIT_RSVD_LO, VTD_INV_DESC_WAIT_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "wait")) { return false; } + if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { /* Status Write */ uint32_t status_data = (uint32_t)(inv_desc->lo >> @@ -2308,8 +2349,9 @@ static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) dma_addr_t status_addr = inv_desc->hi; trace_vtd_inv_desc_wait_sw(status_addr, status_data); status_data = cpu_to_le32(status_data); - if (dma_memory_write(&address_space_memory, status_addr, &status_data, - sizeof(status_data))) { + if (dma_memory_write(&address_space_memory, status_addr, + &status_data, sizeof(status_data), + MEMTXATTRS_UNSPECIFIED)) { trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); return false; } @@ -2329,13 +2371,14 @@ static bool vtd_process_context_cache_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { uint16_t sid, fmask; + uint64_t mask[4] = {VTD_INV_DESC_CC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { - error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 - " (reserved nonzero)", __func__, inv_desc->hi, - inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "cc inv")) { return false; } + switch (inv_desc->lo & VTD_INV_DESC_CC_G) { case VTD_INV_DESC_CC_DOMAIN: trace_vtd_inv_desc_cc_domain( @@ -2365,12 +2408,11 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) uint16_t domain_id; uint8_t am; hwaddr addr; + uint64_t mask[4] = {VTD_INV_DESC_IOTLB_RSVD_LO, VTD_INV_DESC_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; - if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 - ", lo=0x%"PRIx64" (reserved bits unzero)", - __func__, inv_desc->hi, inv_desc->lo); + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iotlb inv")) { return false; } @@ -2411,6 +2453,14 @@ static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) { + uint64_t mask[4] = {VTD_INV_DESC_IEC_RSVD, VTD_INV_DESC_ALL_ONE, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "iec inv")) { + return false; + } + trace_vtd_inv_desc_iec(inv_desc->iec.granularity, inv_desc->iec.index, inv_desc->iec.index_mask); @@ -2433,6 +2483,14 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, uint8_t devfn; bool size; uint8_t bus_num; + uint64_t mask[4] = {VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO, + VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI, + VTD_INV_DESC_ALL_ONE, VTD_INV_DESC_ALL_ONE}; + + if (!vtd_inv_desc_reserved_check(s, inv_desc, mask, false, + __func__, "dev-iotlb inv")) { + return false; + } addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); @@ -2440,14 +2498,6 @@ static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, bus_num = sid >> 8; size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); - if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || - (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { - error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 - ", lo=%"PRIx64" (reserved nonzero)", __func__, - inv_desc->hi, inv_desc->lo); - return false; - } - vtd_bus = vtd_find_as_from_bus_num(s, bus_num); if (!vtd_bus) { goto done; @@ -2496,7 +2546,7 @@ static bool vtd_process_inv_desc(IntelIOMMUState *s) return false; } - desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; + desc_type = VTD_INV_DESC_TYPE(inv_desc.lo); /* FIXME: should update at first or at last? */ s->iq_last_desc_type = desc_type; @@ -2599,6 +2649,7 @@ static void vtd_handle_iqt_write(IntelIOMMUState *s) if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { error_report_once("%s: RSV bit is set: val=0x%"PRIx64, __func__, val); + vtd_handle_inv_queue_error(s); return; } s->iq_tail = VTD_IQT_QT(s->iq_dw, val); @@ -3120,8 +3171,8 @@ static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, } addr = iommu->intr_root + index * sizeof(*entry); - if (dma_memory_read(&address_space_memory, addr, entry, - sizeof(*entry))) { + if (dma_memory_read(&address_space_memory, addr, + entry, sizeof(*entry), MEMTXATTRS_UNSPECIFIED)) { error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, __func__, index, addr); return -VTD_FR_IR_ROOT_INVAL; @@ -3510,7 +3561,7 @@ static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) map.iova = n->start; map.size = size; - iova_tree_remove(as->iova_tree, &map); + iova_tree_remove(as->iova_tree, map); } static void vtd_address_space_unmap_all(IntelIOMMUState *s) diff --git a/hw/i386/intel_iommu_internal.h b/hw/i386/intel_iommu_internal.h index a6c788049ba1160bcd8eaa43b03e6a877904d088..6aa470149cd6a61daddabd48d65c30d0655e3015 100644 --- a/hw/i386/intel_iommu_internal.h +++ b/hw/i386/intel_iommu_internal.h @@ -340,7 +340,9 @@ union VTDInvDesc { typedef union VTDInvDesc VTDInvDesc; /* Masks for struct VTDInvDesc */ -#define VTD_INV_DESC_TYPE 0xf +#define VTD_INV_DESC_ALL_ONE -1ULL +#define VTD_INV_DESC_TYPE(val) ((((val) >> 5) & 0x70ULL) | \ + ((val) & 0xfULL)) #define VTD_INV_DESC_CC 0x1 /* Context-cache Invalidate Desc */ #define VTD_INV_DESC_IOTLB 0x2 #define VTD_INV_DESC_DEVICE 0x3 @@ -356,7 +358,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_WAIT_IF (1ULL << 4) #define VTD_INV_DESC_WAIT_FN (1ULL << 6) #define VTD_INV_DESC_WAIT_DATA_SHIFT 32 -#define VTD_INV_DESC_WAIT_RSVD_LO 0Xffffff80ULL +#define VTD_INV_DESC_WAIT_RSVD_LO 0Xfffff180ULL #define VTD_INV_DESC_WAIT_RSVD_HI 3ULL /* Masks for Context-cache Invalidation Descriptor */ @@ -367,7 +369,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_CC_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) #define VTD_INV_DESC_CC_SID(val) (((val) >> 32) & 0xffffUL) #define VTD_INV_DESC_CC_FM(val) (((val) >> 48) & 3UL) -#define VTD_INV_DESC_CC_RSVD 0xfffc00000000ffc0ULL +#define VTD_INV_DESC_CC_RSVD 0xfffc00000000f1c0ULL /* Masks for IOTLB Invalidate Descriptor */ #define VTD_INV_DESC_IOTLB_G (3ULL << 4) @@ -377,7 +379,7 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_IOTLB_DID(val) (((val) >> 16) & VTD_DOMAIN_ID_MASK) #define VTD_INV_DESC_IOTLB_ADDR(val) ((val) & ~0xfffULL) #define VTD_INV_DESC_IOTLB_AM(val) ((val) & 0x3fULL) -#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000ff00ULL +#define VTD_INV_DESC_IOTLB_RSVD_LO 0xffffffff0000f100ULL #define VTD_INV_DESC_IOTLB_RSVD_HI 0xf80ULL /* Mask for Device IOTLB Invalidate Descriptor */ @@ -385,7 +387,10 @@ typedef union VTDInvDesc VTDInvDesc; #define VTD_INV_DESC_DEVICE_IOTLB_SIZE(val) ((val) & 0x1) #define VTD_INV_DESC_DEVICE_IOTLB_SID(val) (((val) >> 32) & 0xFFFFULL) #define VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI 0xffeULL -#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0fff8 +#define VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO 0xffff0000ffe0f1f0 + +/* Masks for Interrupt Entry Invalidate Descriptor */ +#define VTD_INV_DESC_IEC_RSVD 0xffff000007fff1e0ULL /* Rsvd field masks for spte */ #define VTD_SPTE_SNP 0x800ULL diff --git a/hw/i386/microvm.c b/hw/i386/microvm.c index 4b3b1dd262f1f7f6a649dd34d6fa7c0e5eb64b9a..3ee95f9b4dada5442b7604b31730b5cb252c6c6c 100644 --- a/hw/i386/microvm.c +++ b/hw/i386/microvm.c @@ -486,7 +486,7 @@ static void microvm_machine_reset(MachineState *machine) cpu = X86_CPU(cs); if (cpu->apic_state) { - device_legacy_reset(cpu->apic_state); + device_cold_reset(cpu->apic_state); } } } diff --git a/hw/i386/multiboot.c b/hw/i386/multiboot.c index 0a10089f14b43e68b3b106c6197044262d9ec7f2..963e29362e4bfe308fb7f370169e3aa8cceee580 100644 --- a/hw/i386/multiboot.c +++ b/hw/i386/multiboot.c @@ -163,6 +163,7 @@ int load_multiboot(X86MachineState *x86ms, uint8_t *mb_bootinfo_data; uint32_t cmdline_len; GList *mods = NULL; + g_autofree char *kcmdline = NULL; /* Ok, let's see if it is a multiboot image. The header is 12x32bit long, so the latest entry may be 8192 - 48. */ @@ -362,9 +363,7 @@ int load_multiboot(X86MachineState *x86ms, } /* Commandline support */ - char kcmdline[strlen(kernel_filename) + strlen(kernel_cmdline) + 2]; - snprintf(kcmdline, sizeof(kcmdline), "%s %s", - kernel_filename, kernel_cmdline); + kcmdline = g_strdup_printf("%s %s", kernel_filename, kernel_cmdline); stl_p(bootinfo + MBI_CMDLINE, mb_add_cmdline(&mbs, kcmdline)); stl_p(bootinfo + MBI_BOOTLOADER, mb_add_bootloader(&mbs, bootloader_name)); diff --git a/hw/i386/pc.c b/hw/i386/pc.c index a2ef40ecbc2459145557e85f0873ba18d4139b74..7003ea1a05dd153c714b8fde52ec8e85bb77db16 100644 --- a/hw/i386/pc.c +++ b/hw/i386/pc.c @@ -736,14 +736,6 @@ void pc_machine_done(Notifier *notifier, void *data) /* update FW_CFG_NB_CPUS to account for -device added CPUs */ fw_cfg_modify_i16(x86ms->fw_cfg, FW_CFG_NB_CPUS, x86ms->boot_cpus); } - - - if (x86ms->apic_id_limit > 255 && !xen_enabled() && - !kvm_irqchip_in_kernel()) { - error_report("current -smp configuration requires kernel " - "irqchip support."); - exit(EXIT_FAILURE); - } } void pc_guest_info_init(PCMachineState *pcms) @@ -1642,7 +1634,7 @@ static void pc_machine_reset(MachineState *machine) cpu = X86_CPU(cs); if (cpu->apic_state) { - device_legacy_reset(cpu->apic_state); + device_cold_reset(cpu->apic_state); } } } @@ -1734,15 +1726,23 @@ static void pc_machine_class_init(ObjectClass *oc, void *data) object_class_property_add_bool(oc, PC_MACHINE_SMBUS, pc_machine_get_smbus, pc_machine_set_smbus); + object_class_property_set_description(oc, PC_MACHINE_SMBUS, + "Enable/disable system management bus"); object_class_property_add_bool(oc, PC_MACHINE_SATA, pc_machine_get_sata, pc_machine_set_sata); + object_class_property_set_description(oc, PC_MACHINE_SATA, + "Enable/disable Serial ATA bus"); object_class_property_add_bool(oc, PC_MACHINE_PIT, pc_machine_get_pit, pc_machine_set_pit); + object_class_property_set_description(oc, PC_MACHINE_PIT, + "Enable/disable Intel 8254 programmable interval timer emulation"); object_class_property_add_bool(oc, "hpet", pc_machine_get_hpet, pc_machine_set_hpet); + object_class_property_set_description(oc, "hpet", + "Enable/disable high precision event timer emulation"); object_class_property_add_bool(oc, "default-bus-bypass-iommu", pc_machine_get_default_bus_bypass_iommu, diff --git a/hw/i386/sgx-epc.c b/hw/i386/sgx-epc.c index e508827e787b7b7a38cf9785a030571cf22c6e54..96b2940d75eba983124effc9963f4d320ef0d7fb 100644 --- a/hw/i386/sgx-epc.c +++ b/hw/i386/sgx-epc.c @@ -21,6 +21,7 @@ static Property sgx_epc_properties[] = { DEFINE_PROP_UINT64(SGX_EPC_ADDR_PROP, SGXEPCDevice, addr, 0), + DEFINE_PROP_UINT32(SGX_EPC_NUMA_NODE_PROP, SGXEPCDevice, node, 0), DEFINE_PROP_LINK(SGX_EPC_MEMDEV_PROP, SGXEPCDevice, hostmem, TYPE_MEMORY_BACKEND_EPC, HostMemoryBackendEpc *), DEFINE_PROP_END_OF_LIST(), @@ -139,6 +140,8 @@ static void sgx_epc_md_fill_device_info(const MemoryDeviceState *md, se->memaddr = epc->addr; se->size = object_property_get_uint(OBJECT(epc), SGX_EPC_SIZE_PROP, NULL); + se->node = object_property_get_uint(OBJECT(epc), SGX_EPC_NUMA_NODE_PROP, + NULL); se->memdev = object_get_canonical_path(OBJECT(epc->hostmem)); info->u.sgx_epc.data = se; diff --git a/hw/i386/sgx-stub.c b/hw/i386/sgx-stub.c index c9b379e66519210337f03952359317de43f46901..26833eb233c5b6448831c76c31266babaa070265 100644 --- a/hw/i386/sgx-stub.c +++ b/hw/i386/sgx-stub.c @@ -6,6 +6,10 @@ #include "qapi/error.h" #include "qapi/qapi-commands-misc-target.h" +void sgx_epc_build_srat(GArray *table_data) +{ +} + SGXInfo *qmp_query_sgx(Error **errp) { error_setg(errp, "SGX support is not compiled in"); diff --git a/hw/i386/sgx.c b/hw/i386/sgx.c index 8fef3dd8fad4c20a4c9a3169f6e328707504bce9..a2b318dd9387d1343736ae50da1b7bfb6159e40f 100644 --- a/hw/i386/sgx.c +++ b/hw/i386/sgx.c @@ -23,6 +23,7 @@ #include "sysemu/hw_accel.h" #include "sysemu/reset.h" #include +#include "hw/acpi/aml-build.h" #define SGX_MAX_EPC_SECTIONS 8 #define SGX_CPUID_EPC_INVALID 0x0 @@ -36,17 +37,59 @@ #define RETRY_NUM 2 +static int sgx_epc_device_list(Object *obj, void *opaque) +{ + GSList **list = opaque; + + if (object_dynamic_cast(obj, TYPE_SGX_EPC)) { + *list = g_slist_append(*list, DEVICE(obj)); + } + + object_child_foreach(obj, sgx_epc_device_list, opaque); + return 0; +} + +static GSList *sgx_epc_get_device_list(void) +{ + GSList *list = NULL; + + object_child_foreach(qdev_get_machine(), sgx_epc_device_list, &list); + return list; +} + +void sgx_epc_build_srat(GArray *table_data) +{ + GSList *device_list = sgx_epc_get_device_list(); + + for (; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + Object *obj = OBJECT(dev); + uint64_t addr, size; + int node; + + node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP, + &error_abort); + addr = object_property_get_uint(obj, SGX_EPC_ADDR_PROP, &error_abort); + size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP, &error_abort); + + build_srat_memory(table_data, addr, size, node, MEM_AFFINITY_ENABLED); + } + g_slist_free(device_list); +} + static uint64_t sgx_calc_section_metric(uint64_t low, uint64_t high) { return (low & MAKE_64BIT_MASK(12, 20)) + ((high & MAKE_64BIT_MASK(0, 20)) << 32); } -static uint64_t sgx_calc_host_epc_section_size(void) +static SGXEPCSectionList *sgx_calc_host_epc_sections(uint64_t *size) { + SGXEPCSectionList *head = NULL, **tail = &head; + SGXEPCSection *section; uint32_t i, type; uint32_t eax, ebx, ecx, edx; - uint64_t size = 0; + uint32_t j = 0; for (i = 0; i < SGX_MAX_EPC_SECTIONS; i++) { host_cpuid(0x12, i + 2, &eax, &ebx, &ecx, &edx); @@ -60,10 +103,14 @@ static uint64_t sgx_calc_host_epc_section_size(void) break; } - size += sgx_calc_section_metric(ecx, edx); + section = g_new0(SGXEPCSection, 1); + section->node = j++; + section->size = sgx_calc_section_metric(ecx, edx); + *size += section->size; + QAPI_LIST_APPEND(tail, section); } - return size; + return head; } static void sgx_epc_reset(void *opaque) @@ -110,6 +157,7 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp) { SGXInfo *info = NULL; uint32_t eax, ebx, ecx, edx; + uint64_t size = 0; int fd = qemu_open_old("/dev/sgx_vepc", O_RDWR); if (fd < 0) { @@ -127,13 +175,36 @@ SGXInfo *qmp_query_sgx_capabilities(Error **errp) info->sgx1 = eax & (1U << 0) ? true : false; info->sgx2 = eax & (1U << 1) ? true : false; - info->section_size = sgx_calc_host_epc_section_size(); + info->sections = sgx_calc_host_epc_sections(&size); + info->section_size = size; close(fd); return info; } +static SGXEPCSectionList *sgx_get_epc_sections_list(void) +{ + GSList *device_list = sgx_epc_get_device_list(); + SGXEPCSectionList *head = NULL, **tail = &head; + SGXEPCSection *section; + + for (; device_list; device_list = device_list->next) { + DeviceState *dev = device_list->data; + Object *obj = OBJECT(dev); + + section = g_new0(SGXEPCSection, 1); + section->node = object_property_get_uint(obj, SGX_EPC_NUMA_NODE_PROP, + &error_abort); + section->size = object_property_get_uint(obj, SGX_EPC_SIZE_PROP, + &error_abort); + QAPI_LIST_APPEND(tail, section); + } + g_slist_free(device_list); + + return head; +} + SGXInfo *qmp_query_sgx(Error **errp) { SGXInfo *info = NULL; @@ -160,6 +231,7 @@ SGXInfo *qmp_query_sgx(Error **errp) info->sgx2 = true; info->flc = true; info->section_size = sgx_epc->size; + info->sections = sgx_get_epc_sections_list(); return info; } @@ -167,6 +239,7 @@ SGXInfo *qmp_query_sgx(Error **errp) void hmp_info_sgx(Monitor *mon, const QDict *qdict) { Error *err = NULL; + SGXEPCSectionList *section_list, *section; g_autoptr(SGXInfo) info = qmp_query_sgx(&err); if (err) { @@ -183,6 +256,14 @@ void hmp_info_sgx(Monitor *mon, const QDict *qdict) info->flc ? "enabled" : "disabled"); monitor_printf(mon, "size: %" PRIu64 "\n", info->section_size); + + section_list = info->sections; + for (section = section_list; section; section = section->next) { + monitor_printf(mon, "NUMA node #%" PRId64 ": ", + section->value->node); + monitor_printf(mon, "size=%" PRIu64 "\n", + section->value->size); + } } bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size) @@ -226,6 +307,9 @@ void pc_machine_init_sgx_epc(PCMachineState *pcms) /* set the memdev link with memory backend */ object_property_parse(obj, SGX_EPC_MEMDEV_PROP, list->value->memdev, &error_fatal); + /* set the numa node property for sgx epc object */ + object_property_set_uint(obj, SGX_EPC_NUMA_NODE_PROP, list->value->node, + &error_fatal); object_property_set_bool(obj, "realized", true, &error_fatal); object_unref(obj); } diff --git a/hw/i386/vmmouse.c b/hw/i386/vmmouse.c index 3d663682863cfcb8328e52fcd213f96a630fe1bf..a56c185f159c383667e87eb8fde9fa82b2da8560 100644 --- a/hw/i386/vmmouse.c +++ b/hw/i386/vmmouse.c @@ -286,6 +286,10 @@ static void vmmouse_realizefn(DeviceState *dev, Error **errp) DPRINTF("vmmouse_init\n"); + if (!s->i8042) { + error_setg(errp, "'i8042' link is not set"); + return; + } if (!object_resolve_path_type("", TYPE_VMPORT, NULL)) { error_setg(errp, "vmmouse needs a machine with vmport"); return; diff --git a/hw/i386/x86.c b/hw/i386/x86.c index b84840a1bb99a9072375f9784dc5068c23fb00ca..a3258d78facf41c63fdad6c8c6f21da82ad1938e 100644 --- a/hw/i386/x86.c +++ b/hw/i386/x86.c @@ -39,6 +39,7 @@ #include "sysemu/replay.h" #include "sysemu/sysemu.h" #include "sysemu/cpu-timers.h" +#include "sysemu/xen.h" #include "trace.h" #include "hw/i386/x86.h" @@ -136,6 +137,25 @@ void x86_cpus_init(X86MachineState *x86ms, int default_cpu_version) */ x86ms->apic_id_limit = x86_cpu_apic_id_from_index(x86ms, ms->smp.max_cpus - 1) + 1; + + /* + * Can we support APIC ID 255 or higher? + * + * Under Xen: yes. + * With userspace emulated lapic: no + * With KVM's in-kernel lapic: only if X2APIC API is enabled. + */ + if (x86ms->apic_id_limit > 255 && !xen_enabled() && + (!kvm_irqchip_in_kernel() || !kvm_enable_x2apic())) { + error_report("current -smp configuration requires kernel " + "irqchip and X2APIC API support."); + exit(EXIT_FAILURE); + } + + if (kvm_enabled()) { + kvm_set_max_apic_id(x86ms->apic_id_limit); + } + possible_cpus = mc->possible_cpu_arch_ids(ms); for (i = 0; i < ms->smp.cpus; i++) { x86_cpu_new(x86ms, possible_cpus->cpus[i].arch_id, &error_fatal); diff --git a/hw/ide/ahci.c b/hw/ide/ahci.c index a94c6e26fb0d69b5275a985966ff8681c00449b2..36d050aafcf75ef4f85204c59d42c039a2faf325 100644 --- a/hw/ide/ahci.c +++ b/hw/ide/ahci.c @@ -249,7 +249,8 @@ static void map_page(AddressSpace *as, uint8_t **ptr, uint64_t addr, dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); } - *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE); + *ptr = dma_memory_map(as, addr, &len, DMA_DIRECTION_FROM_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (len < wanted && *ptr) { dma_memory_unmap(as, *ptr, len, DMA_DIRECTION_FROM_DEVICE, len); *ptr = NULL; @@ -939,7 +940,8 @@ static int ahci_populate_sglist(AHCIDevice *ad, QEMUSGList *sglist, /* map PRDT */ if (!(prdt = dma_memory_map(ad->hba->as, prdt_addr, &prdt_len, - DMA_DIRECTION_TO_DEVICE))){ + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED))){ trace_ahci_populate_sglist_no_map(ad->hba, ad->port_no); return -1; } @@ -1301,7 +1303,7 @@ static int handle_cmd(AHCIState *s, int port, uint8_t slot) tbl_addr = le64_to_cpu(cmd->tbl_addr); cmd_len = 0x80; cmd_fis = dma_memory_map(s->as, tbl_addr, &cmd_len, - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); if (!cmd_fis) { trace_handle_cmd_badfis(s, port); return -1; @@ -1379,10 +1381,12 @@ static void ahci_pio_transfer(const IDEDMA *dma) has_sglist ? "" : "o"); if (has_sglist && size) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + if (is_write) { - dma_buf_write(s->data_ptr, size, &s->sg); + dma_buf_write(s->data_ptr, size, &s->sg, attrs); } else { - dma_buf_read(s->data_ptr, size, &s->sg); + dma_buf_read(s->data_ptr, size, &s->sg, attrs); } } @@ -1459,8 +1463,10 @@ static void ahci_commit_buf(const IDEDMA *dma, uint32_t tx_bytes) { AHCIDevice *ad = DO_UPCAST(AHCIDevice, dma, dma); - tx_bytes += le32_to_cpu(ad->cur_cmd->status); - ad->cur_cmd->status = cpu_to_le32(tx_bytes); + if (ad->cur_cmd) { + tx_bytes += le32_to_cpu(ad->cur_cmd->status); + ad->cur_cmd->status = cpu_to_le32(tx_bytes); + } } static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write) @@ -1475,9 +1481,9 @@ static int ahci_dma_rw_buf(const IDEDMA *dma, bool is_write) } if (is_write) { - dma_buf_read(p, l, &s->sg); + dma_buf_read(p, l, &s->sg, MEMTXATTRS_UNSPECIFIED); } else { - dma_buf_write(p, l, &s->sg); + dma_buf_write(p, l, &s->sg, MEMTXATTRS_UNSPECIFIED); } /* free sglist, update byte count */ @@ -1504,7 +1510,8 @@ static void ahci_cmd_done(const IDEDMA *dma) ahci_write_fis_d2h(ad); if (ad->port_regs.cmd_issue && !ad->check_bh) { - ad->check_bh = qemu_bh_new(ahci_check_cmd_bh, ad); + ad->check_bh = qemu_bh_new_guarded(ahci_check_cmd_bh, ad, + &ad->mem_reentrancy_guard); qemu_bh_schedule(ad->check_bh); } } diff --git a/hw/ide/ahci_internal.h b/hw/ide/ahci_internal.h index 109de9e2d1128bd220463b0c9547c87a81192a5c..a7768dd69e48cd4b5e52e51134bc2039aeca0de2 100644 --- a/hw/ide/ahci_internal.h +++ b/hw/ide/ahci_internal.h @@ -321,6 +321,7 @@ struct AHCIDevice { bool init_d2h_sent; AHCICmdHdr *cur_cmd; NCQTransferState ncq_tfs[AHCI_MAX_CMDS]; + MemReentrancyGuard mem_reentrancy_guard; }; struct AHCIPCIState { diff --git a/hw/ide/atapi.c b/hw/ide/atapi.c index b626199e3def404b27288f65370d272d8070233a..88b2890fafe4dec0776c6283a0e82ae40e9f4299 100644 --- a/hw/ide/atapi.c +++ b/hw/ide/atapi.c @@ -318,7 +318,7 @@ static void ide_atapi_cmd_reply(IDEState *s, int size, int max_size) } } -/* start a CD-CDROM read command */ +/* start a CD-ROM read command */ static void ide_atapi_cmd_read_pio(IDEState *s, int lba, int nb_sectors, int sector_size) { @@ -417,7 +417,7 @@ eot: ide_set_inactive(s, false); } -/* start a CD-CDROM read command with DMA */ +/* start a CD-ROM read command with DMA */ /* XXX: test if DMA is available */ static void ide_atapi_cmd_read_dma(IDEState *s, int lba, int nb_sectors, int sector_size) diff --git a/hw/ide/core.c b/hw/ide/core.c index e28f8aad6111f080b1aa9fec16c7b8d1d3d93cd0..5e5c6e2cf3128bfbf31d4453d534aa51e117e07f 100644 --- a/hw/ide/core.c +++ b/hw/ide/core.c @@ -433,12 +433,16 @@ static const AIOCBInfo trim_aiocb_info = { static void ide_trim_bh_cb(void *opaque) { TrimAIOCB *iocb = opaque; + BlockBackend *blk = iocb->s->blk; iocb->common.cb(iocb->common.opaque, iocb->ret); qemu_bh_delete(iocb->bh); iocb->bh = NULL; qemu_aio_unref(iocb); + + /* Paired with an increment in ide_issue_trim() */ + blk_dec_in_flight(blk); } static void ide_issue_trim_cb(void *opaque, int ret) @@ -506,11 +510,16 @@ BlockAIOCB *ide_issue_trim( BlockCompletionFunc *cb, void *cb_opaque, void *opaque) { IDEState *s = opaque; + IDEDevice *dev = s->unit ? s->bus->slave : s->bus->master; TrimAIOCB *iocb; + /* Paired with a decrement in ide_trim_bh_cb() */ + blk_inc_in_flight(s->blk); + iocb = blk_aio_get(&trim_aiocb_info, s->blk, cb, cb_opaque); iocb->s = s; - iocb->bh = qemu_bh_new(ide_trim_bh_cb, iocb); + iocb->bh = qemu_bh_new_guarded(ide_trim_bh_cb, iocb, + &DEVICE(dev)->mem_reentrancy_guard); iocb->ret = 0; iocb->qiov = qiov; iocb->i = -1; @@ -521,9 +530,9 @@ BlockAIOCB *ide_issue_trim( void ide_abort_command(IDEState *s) { - ide_transfer_stop(s); s->status = READY_STAT | ERR_STAT; s->error = ABRT_ERR; + ide_transfer_stop(s); } static void ide_set_retry(IDEState *s) @@ -2447,19 +2456,19 @@ static void ide_dummy_transfer_stop(IDEState *s) void ide_bus_reset(IDEBus *bus) { - bus->unit = 0; - bus->cmd = 0; - ide_reset(&bus->ifs[0]); - ide_reset(&bus->ifs[1]); - ide_clear_hob(bus); - - /* pending async DMA */ + /* pending async DMA - needs the IDEState before it is reset */ if (bus->dma->aiocb) { trace_ide_bus_reset_aio(); blk_aio_cancel(bus->dma->aiocb); bus->dma->aiocb = NULL; } + bus->unit = 0; + bus->cmd = 0; + ide_reset(&bus->ifs[0]); + ide_reset(&bus->ifs[1]); + ide_clear_hob(bus); + /* reset dma provider too */ if (bus->dma->ops->reset) { bus->dma->ops->reset(bus->dma); diff --git a/hw/ide/macio.c b/hw/ide/macio.c index b03d401ceb5ffafdb16f24f342920e490df6ada9..f23ce15459c2a20d744b0598f14c2b476f943874 100644 --- a/hw/ide/macio.c +++ b/hw/ide/macio.c @@ -97,7 +97,7 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) /* Non-block ATAPI transfer - just copy to RAM */ s->io_buffer_size = MIN(s->io_buffer_size, io->len); dma_memory_write(&address_space_memory, io->addr, s->io_buffer, - s->io_buffer_size); + s->io_buffer_size, MEMTXATTRS_UNSPECIFIED); io->len = 0; ide_atapi_cmd_ok(s); m->dma_active = false; @@ -119,9 +119,6 @@ static void pmac_ide_atapi_transfer_cb(void *opaque, int ret) return; done: - dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, - io->dir, io->dma_len); - if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); } else { @@ -202,9 +199,6 @@ static void pmac_ide_transfer_cb(void *opaque, int ret) return; done: - dma_memory_unmap(&address_space_memory, io->dma_mem, io->dma_len, - io->dir, io->dma_len); - if (s->dma_cmd == IDE_DMA_READ || s->dma_cmd == IDE_DMA_WRITE) { if (ret < 0) { block_acct_failed(blk_get_stats(s->blk), &s->acct); diff --git a/hw/ide/microdrive.c b/hw/ide/microdrive.c index 6df9b4cbbe1d7a92b923c3f91b1e605134b29344..56c5be365514446f5e63d059f586da8c02b04a7e 100644 --- a/hw/ide/microdrive.c +++ b/hw/ide/microdrive.c @@ -175,7 +175,7 @@ static void md_attr_write(PCMCIACardState *card, uint32_t at, uint8_t value) case 0x00: /* Configuration Option Register */ s->opt = value & 0xcf; if (value & OPT_SRESET) { - device_legacy_reset(DEVICE(s)); + device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; @@ -318,7 +318,7 @@ static void md_common_write(PCMCIACardState *card, uint32_t at, uint16_t value) case 0xe: /* Device Control */ s->ctrl = value; if (value & CTRL_SRST) { - device_legacy_reset(DEVICE(s)); + device_cold_reset(DEVICE(s)); } md_interrupt_update(s); break; @@ -543,7 +543,7 @@ static int dscm1xxxx_attach(PCMCIACardState *card) md->attr_base = pcc->cis[0x74] | (pcc->cis[0x76] << 8); md->io_base = 0x0; - device_legacy_reset(DEVICE(md)); + device_cold_reset(DEVICE(md)); md_interrupt_update(md); return 0; @@ -553,7 +553,7 @@ static int dscm1xxxx_detach(PCMCIACardState *card) { MicroDriveState *md = MICRODRIVE(card); - device_legacy_reset(DEVICE(md)); + device_cold_reset(DEVICE(md)); return 0; } diff --git a/hw/input/ps2.c b/hw/input/ps2.c index 9376a8f4ce53abb15fe57fc929d98407244eb89d..5d82ee3cdf076b4598e483e459b04106a9e6a671 100644 --- a/hw/input/ps2.c +++ b/hw/input/ps2.c @@ -205,7 +205,7 @@ void ps2_queue_noirq(PS2State *s, int b) } q->data[q->wptr] = b; - if (++q->wptr == PS2_BUFFER_SIZE) { + if (++q->wptr >= PS2_BUFFER_SIZE) { q->wptr = 0; } q->count++; @@ -578,7 +578,7 @@ uint32_t ps2_read_data(PS2State *s) val = q->data[index]; } else { val = q->data[q->rptr]; - if (++q->rptr == PS2_BUFFER_SIZE) { + if (++q->rptr >= PS2_BUFFER_SIZE) { q->rptr = 0; } q->count--; diff --git a/hw/input/vhost-user-input.c b/hw/input/vhost-user-input.c index 273e96a7b12d09d8ced4e51fbf5e1b0e92724dd5..43d2ff381652950d51324dd224c2f7952b86aaa1 100644 --- a/hw/input/vhost-user-input.c +++ b/hw/input/vhost-user-input.c @@ -79,6 +79,12 @@ static void vhost_input_set_config(VirtIODevice *vdev, virtio_notify_config(vdev); } +static struct vhost_dev *vhost_input_get_vhost(VirtIODevice *vdev) +{ + VHostUserInput *vhi = VHOST_USER_INPUT(vdev); + return &vhi->vhost->dev; +} + static const VMStateDescription vmstate_vhost_input = { .name = "vhost-user-input", .unmigratable = 1, @@ -93,6 +99,7 @@ static void vhost_input_class_init(ObjectClass *klass, void *data) dc->vmsd = &vmstate_vhost_input; vdc->get_config = vhost_input_get_config; vdc->set_config = vhost_input_set_config; + vdc->get_vhost = vhost_input_get_vhost; vic->realize = vhost_input_realize; vic->change_active = vhost_input_change_active; } diff --git a/hw/intc/arm_gicv3.c b/hw/intc/arm_gicv3.c index 9f5f815db9bc599f5d45c463a5ad4c6eeb0f0d57..864d4e40347bf76e82492542af8d6b4e7c904f79 100644 --- a/hw/intc/arm_gicv3.c +++ b/hw/intc/arm_gicv3.c @@ -19,6 +19,7 @@ #include "qapi/error.h" #include "qemu/module.h" #include "hw/intc/arm_gicv3.h" +#include "hw/core/cpu.h" #include "gicv3_internal.h" static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio) @@ -217,7 +218,9 @@ static void gicv3_update_noirqset(GICv3State *s, int start, int len) assert(len > 0); for (i = 0; i < s->num_cpu; i++) { - s->cpu[i].seenbetter = false; + if (qemu_get_cpu(i)) { + s->cpu[i].seenbetter = false; + } } /* Find the highest priority pending interrupt in this range. */ @@ -259,16 +262,18 @@ static void gicv3_update_noirqset(GICv3State *s, int start, int len) * now be the new best one). */ for (i = 0; i < s->num_cpu; i++) { - GICv3CPUState *cs = &s->cpu[i]; + if (qemu_get_cpu(i)) { + GICv3CPUState *cs = &s->cpu[i]; - if (cs->seenbetter) { - cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); - } + if (cs->seenbetter) { + cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); + } - if (!cs->seenbetter && cs->hppi.prio != 0xff && - cs->hppi.irq >= start && cs->hppi.irq < start + len) { - gicv3_full_update_noirqset(s); - break; + if (!cs->seenbetter && cs->hppi.prio != 0xff && + cs->hppi.irq >= start && cs->hppi.irq < start + len) { + gicv3_full_update_noirqset(s); + break; + } } } } @@ -279,7 +284,9 @@ void gicv3_update(GICv3State *s, int start, int len) gicv3_update_noirqset(s, start, len); for (i = 0; i < s->num_cpu; i++) { - gicv3_cpuif_update(&s->cpu[i]); + if (qemu_get_cpu(i)) { + gicv3_cpuif_update(&s->cpu[i]); + } } } @@ -291,7 +298,9 @@ void gicv3_full_update_noirqset(GICv3State *s) int i; for (i = 0; i < s->num_cpu; i++) { - s->cpu[i].hppi.prio = 0xff; + if (qemu_get_cpu(i)) { + s->cpu[i].hppi.prio = 0xff; + } } /* Note that we can guarantee that these functions will not @@ -302,7 +311,9 @@ void gicv3_full_update_noirqset(GICv3State *s) gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL); for (i = 0; i < s->num_cpu; i++) { - gicv3_redist_update_noirqset(&s->cpu[i]); + if (qemu_get_cpu(i)) { + gicv3_redist_update_noirqset(&s->cpu[i]); + } } } @@ -315,7 +326,9 @@ void gicv3_full_update(GICv3State *s) gicv3_full_update_noirqset(s); for (i = 0; i < s->num_cpu; i++) { - gicv3_cpuif_update(&s->cpu[i]); + if (qemu_get_cpu(i)) { + gicv3_cpuif_update(&s->cpu[i]); + } } } @@ -376,12 +389,26 @@ static const MemoryRegionOps gic_ops[] = { } }; +static void gicv3_cpu_realize(GICv3State *s, int i) +{ + gicv3_init_one_cpuif(s, i); +} + +static void arm_gicv3_cpu_hotplug_realize(GICv3State *s, int ncpu) +{ + ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s); + + agc->parent_cpu_hotplug_realize(s, ncpu); + gicv3_cpu_realize(s, ncpu); +} + static void arm_gic_realize(DeviceState *dev, Error **errp) { /* Device instance realize function for the GIC sysbus device */ GICv3State *s = ARM_GICV3(dev); ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s); Error *local_err = NULL; + int i; agc->parent_realize(dev, &local_err); if (local_err) { @@ -391,7 +418,11 @@ static void arm_gic_realize(DeviceState *dev, Error **errp) gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops); - gicv3_init_cpuif(s); + for (i = 0; i < s->num_cpu; i++) { + if (qemu_get_cpu(i)) { + gicv3_cpu_realize(s, i); + } + } } static void arm_gicv3_class_init(ObjectClass *klass, void *data) @@ -400,6 +431,8 @@ static void arm_gicv3_class_init(ObjectClass *klass, void *data) ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); ARMGICv3Class *agc = ARM_GICV3_CLASS(klass); + agc->parent_cpu_hotplug_realize = agcc->cpu_hotplug_realize; + agcc->cpu_hotplug_realize = arm_gicv3_cpu_hotplug_realize; agcc->post_load = arm_gicv3_post_load; device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); } diff --git a/hw/intc/arm_gicv3_common.c b/hw/intc/arm_gicv3_common.c index 9884d2e39b9ab938f78e26219054cd04755691df..a4976b2ba023d5bf04b0ec7fe328c7c6668c73b1 100644 --- a/hw/intc/arm_gicv3_common.c +++ b/hw/intc/arm_gicv3_common.c @@ -24,12 +24,14 @@ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/module.h" +#include "qemu/error-report.h" #include "hw/core/cpu.h" #include "hw/intc/arm_gicv3_common.h" #include "hw/qdev-properties.h" #include "migration/vmstate.h" #include "gicv3_internal.h" #include "hw/arm/linux-boot-if.h" +#include "hw/boards.h" #include "sysemu/kvm.h" @@ -301,6 +303,21 @@ void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler, } } +static void arm_gicv3_common_cpu_realize(GICv3State *s, int ncpu) +{ + CPUState *cpu = qemu_get_cpu(ncpu); + + s->cpu[ncpu].cpu = cpu; + s->cpu[ncpu].gic = s; + /* Store GICv3CPUState in CPUARMState gicv3state pointer */ + gicv3_set_gicv3state(cpu, &s->cpu[ncpu]); +} + +static void arm_gicv3_common_cpu_hotplug_realize(GICv3State *s, int ncpu) +{ + arm_gicv3_common_cpu_realize(s, ncpu); +} + static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) { GICv3State *s = ARM_GICV3_COMMON(dev); @@ -361,12 +378,15 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) for (i = 0; i < s->num_cpu; i++) { CPUState *cpu = qemu_get_cpu(i); + + MachineState *ms = MACHINE(qdev_get_machine()); + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = NULL; uint64_t cpu_affid; - s->cpu[i].cpu = cpu; - s->cpu[i].gic = s; - /* Store GICv3CPUState in CPUARMState gicv3state pointer */ - gicv3_set_gicv3state(cpu, &s->cpu[i]); + if (cpu) { + arm_gicv3_common_cpu_realize(s, i); + } /* Pre-construct the GICR_TYPER: * For our implementation: @@ -380,7 +400,18 @@ static void arm_gicv3_common_realize(DeviceState *dev, Error **errp) * VLPIS == 0 (virtual LPIs not supported) * PLPIS == 0 (physical LPIs not supported) */ - cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); + if (cpu) { + cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL); + } else { + if (!mc->possible_cpu_arch_ids) { + error_report("MachineClass must implement possible_cpu_arch_ids " + "hook to support pre-sizing GICv3"); + exit(1); + } + + possible_cpus = mc->possible_cpu_arch_ids(ms); + cpu_affid = possible_cpus->cpus[i].arch_id; + } /* The CPU mp-affinity property is in MPIDR register format; squash * the affinity bytes into 32 bits as the GICR_TYPER has them. @@ -530,12 +561,14 @@ static Property arm_gicv3_common_properties[] = { static void arm_gicv3_common_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); + ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass); dc->reset = arm_gicv3_common_reset; dc->realize = arm_gicv3_common_realize; device_class_set_props(dc, arm_gicv3_common_properties); dc->vmsd = &vmstate_gicv3; + agcc->cpu_hotplug_realize = arm_gicv3_common_cpu_hotplug_realize; albifc->arm_linux_init = arm_gic_common_linux_init; } diff --git a/hw/intc/arm_gicv3_cpuif.c b/hw/intc/arm_gicv3_cpuif.c index 85fc369e55019bc332af7a974f5b60ebfcf33d59..eaa1381b3d2b1ab28c299812e18642b221fd9e30 100644 --- a/hw/intc/arm_gicv3_cpuif.c +++ b/hw/intc/arm_gicv3_cpuif.c @@ -137,7 +137,7 @@ static uint32_t icv_fullprio_mask(GICv3CPUState *cs) * with the group priority, whose mask depends on the value of VBPR * for the interrupt group.) */ - return ~0U << (8 - cs->vpribits); + return (~0U << (8 - cs->vpribits)) & 0xff; } static int ich_highest_active_virt_prio(GICv3CPUState *cs) @@ -1676,6 +1676,10 @@ static void icc_generate_sgi(CPUARMState *env, GICv3CPUState *cs, aff, targetlist); for (i = 0; i < s->num_cpu; i++) { + if (!qemu_get_cpu(i)) { + continue; + } + GICv3CPUState *ocs = &s->cpu[i]; if (irm) { @@ -2625,76 +2629,72 @@ static void gicv3_cpuif_el_change_hook(ARMCPU *cpu, void *opaque) gicv3_cpuif_update(cs); } -void gicv3_init_cpuif(GICv3State *s) +void gicv3_init_one_cpuif(GICv3State *s, int ncpu) { /* Called from the GICv3 realize function; register our system * registers with the CPU */ - int i; - - for (i = 0; i < s->num_cpu; i++) { - ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); - GICv3CPUState *cs = &s->cpu[i]; - - /* Note that we can't just use the GICv3CPUState as an opaque pointer - * in define_arm_cp_regs_with_opaque(), because when we're called back - * it might be with code translated by CPU 0 but run by CPU 1, in - * which case we'd get the wrong value. - * So instead we define the regs with no ri->opaque info, and - * get back to the GICv3CPUState from the CPUARMState. + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(ncpu)); + GICv3CPUState *cs = &s->cpu[ncpu]; + + /* Note that we can't just use the GICv3CPUState as an opaque pointer + * in define_arm_cp_regs_with_opaque(), because when we're called back + * it might be with code translated by CPU 0 but run by CPU 1, in + * which case we'd get the wrong value. + * So instead we define the regs with no ri->opaque info, and + * get back to the GICv3CPUState from the CPUARMState. + */ + define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); + if (arm_feature(&cpu->env, ARM_FEATURE_EL2) + && cpu->gic_num_lrs) { + int j; + + cs->num_list_regs = cpu->gic_num_lrs; + cs->vpribits = cpu->gic_vpribits; + cs->vprebits = cpu->gic_vprebits; + + /* Check against architectural constraints: getting these + * wrong would be a bug in the CPU code defining these, + * and the implementation relies on them holding. */ - define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); - if (arm_feature(&cpu->env, ARM_FEATURE_EL2) - && cpu->gic_num_lrs) { - int j; - - cs->num_list_regs = cpu->gic_num_lrs; - cs->vpribits = cpu->gic_vpribits; - cs->vprebits = cpu->gic_vprebits; - - /* Check against architectural constraints: getting these - * wrong would be a bug in the CPU code defining these, - * and the implementation relies on them holding. - */ - g_assert(cs->vprebits <= cs->vpribits); - g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); - g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); + g_assert(cs->vprebits <= cs->vpribits); + g_assert(cs->vprebits >= 5 && cs->vprebits <= 7); + g_assert(cs->vpribits >= 5 && cs->vpribits <= 8); - define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); + define_arm_cp_regs(cpu, gicv3_cpuif_hcr_reginfo); - for (j = 0; j < cs->num_list_regs; j++) { - /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs - * are split into two cp15 regs, LR (the low part, with the - * same encoding as the AArch64 LR) and LRC (the high part). - */ - ARMCPRegInfo lr_regset[] = { - { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, - .opc0 = 3, .opc1 = 4, .crn = 12, - .crm = 12 + (j >> 3), .opc2 = j & 7, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL2_RW, - .readfn = ich_lr_read, - .writefn = ich_lr_write, - }, - { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, - .cp = 15, .opc1 = 4, .crn = 12, - .crm = 14 + (j >> 3), .opc2 = j & 7, - .type = ARM_CP_IO | ARM_CP_NO_RAW, - .access = PL2_RW, - .readfn = ich_lr_read, - .writefn = ich_lr_write, - }, - REGINFO_SENTINEL - }; - define_arm_cp_regs(cpu, lr_regset); - } - if (cs->vprebits >= 6) { - define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); - } - if (cs->vprebits == 7) { - define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); - } + for (j = 0; j < cs->num_list_regs; j++) { + /* Note that the AArch64 LRs are 64-bit; the AArch32 LRs + * are split into two cp15 regs, LR (the low part, with the + * same encoding as the AArch64 LR) and LRC (the high part). + */ + ARMCPRegInfo lr_regset[] = { + { .name = "ICH_LRn_EL2", .state = ARM_CP_STATE_BOTH, + .opc0 = 3, .opc1 = 4, .crn = 12, + .crm = 12 + (j >> 3), .opc2 = j & 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_lr_read, + .writefn = ich_lr_write, + }, + { .name = "ICH_LRCn_EL2", .state = ARM_CP_STATE_AA32, + .cp = 15, .opc1 = 4, .crn = 12, + .crm = 14 + (j >> 3), .opc2 = j & 7, + .type = ARM_CP_IO | ARM_CP_NO_RAW, + .access = PL2_RW, + .readfn = ich_lr_read, + .writefn = ich_lr_write, + }, + REGINFO_SENTINEL + }; + define_arm_cp_regs(cpu, lr_regset); + } + if (cs->vprebits >= 6) { + define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr1_reginfo); + } + if (cs->vprebits == 7) { + define_arm_cp_regs(cpu, gicv3_cpuif_ich_apxr23_reginfo); } - arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); } + arm_register_el_change_hook(cpu, gicv3_cpuif_el_change_hook, cs); } diff --git a/hw/intc/arm_gicv3_its.c b/hw/intc/arm_gicv3_its.c index c929a9cb5c3b64a3b11d56e970f142961ad476df..b99e63d58f7cc09eec6d35a0e1c5fd3caf0f005f 100644 --- a/hw/intc/arm_gicv3_its.c +++ b/hw/intc/arm_gicv3_its.c @@ -274,21 +274,36 @@ static bool process_its_cmd(GICv3ITSState *s, uint64_t value, uint32_t offset, if (res != MEMTX_OK) { return result; } + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: " + "invalid dte: %"PRIx64" for %d (MEM_TX: %d)\n", + __func__, dte, devid, res); + return result; } - if ((devid > s->dt.maxids.max_devids) || !dte_valid || !ite_valid || - !cte_valid || (eventid > max_eventid)) { + + /* + * In this implementation, in case of guest errors we ignore the + * command and move onto the next command in the queue. + */ + if (devid > s->dt.maxids.max_devids) { qemu_log_mask(LOG_GUEST_ERROR, - "%s: invalid command attributes " - "devid %d or eventid %d or invalid dte %d or" - "invalid cte %d or invalid ite %d\n", - __func__, devid, eventid, dte_valid, cte_valid, - ite_valid); - /* - * in this implementation, in case of error - * we ignore this command and move onto the next - * command in the queue - */ + "%s: invalid command attributes: devid %d>%d", + __func__, devid, s->dt.maxids.max_devids); + + } else if (!dte_valid || !ite_valid || !cte_valid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: " + "dte: %s, ite: %s, cte: %s\n", + __func__, + dte_valid ? "valid" : "invalid", + ite_valid ? "valid" : "invalid", + cte_valid ? "valid" : "invalid"); + } else if (eventid > max_eventid) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: invalid command attributes: eventid %d > %d\n", + __func__, eventid, max_eventid); } else { /* * Current implementation only supports rdbase == procnum diff --git a/hw/intc/arm_gicv3_kvm.c b/hw/intc/arm_gicv3_kvm.c index 5ec5ff9ef6e8deecb91c5207d5b5aa1718919faf..2e2b08e31f7ac3dccd95f0009c3472880e901a9f 100644 --- a/hw/intc/arm_gicv3_kvm.c +++ b/hw/intc/arm_gicv3_kvm.c @@ -76,6 +76,7 @@ struct KVMARMGICv3Class { ARMGICv3CommonClass parent_class; DeviceRealize parent_realize; void (*parent_reset)(DeviceState *dev); + CPUHotplugRealize parent_cpu_hotplug_realize; }; static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) @@ -341,6 +342,10 @@ static void kvm_arm_gicv3_put(GICv3State *s) for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { GICv3CPUState *c = &s->cpu[ncpu]; + if (!qemu_get_cpu(ncpu)) { + continue; + } + reg64 = c->gicr_propbaser; regl = (uint32_t)reg64; kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, true); @@ -360,6 +365,10 @@ static void kvm_arm_gicv3_put(GICv3State *s) for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { GICv3CPUState *c = &s->cpu[ncpu]; + if (!qemu_get_cpu(ncpu)) { + continue; + } + reg = c->gicr_ctlr; kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true); @@ -456,6 +465,10 @@ static void kvm_arm_gicv3_put(GICv3State *s) GICv3CPUState *c = &s->cpu[ncpu]; int num_pri_bits; + if (!qemu_get_cpu(ncpu)) { + continue; + } + kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true); kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, &c->icc_ctlr_el1[GICV3_NS], true); @@ -523,6 +536,10 @@ static void kvm_arm_gicv3_get(GICv3State *s) /* Redistributor state (one per CPU) */ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + if (!qemu_get_cpu(ncpu)) { + continue; + } + GICv3CPUState *c = &s->cpu[ncpu]; kvm_gicr_access(s, GICR_CTLR, ncpu, ®, false); @@ -558,6 +575,10 @@ static void kvm_arm_gicv3_get(GICv3State *s) if (redist_typer & GICR_TYPER_PLPIS) { for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + if (!qemu_get_cpu(ncpu)) { + continue; + } + GICv3CPUState *c = &s->cpu[ncpu]; kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, false); @@ -611,6 +632,10 @@ static void kvm_arm_gicv3_get(GICv3State *s) */ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { + if (!qemu_get_cpu(ncpu)) { + continue; + } + GICv3CPUState *c = &s->cpu[ncpu]; int num_pri_bits; @@ -764,6 +789,20 @@ static void vm_change_state_handler(void *opaque, bool running, } } +static void kvm_arm_gicv3_cpu_realize(GICv3State *s, int ncpu) +{ + ARMCPU *cpu = ARM_CPU(qemu_get_cpu(ncpu)); + + define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); +} + +static void kvm_arm_gicv3_cpu_hotplug_realize(GICv3State *s, int ncpu) +{ + KVMARMGICv3Class *kagcc = KVM_ARM_GICV3_GET_CLASS(s); + + kagcc->parent_cpu_hotplug_realize(s, ncpu); + kvm_arm_gicv3_cpu_realize(s, ncpu); +} static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) { @@ -790,9 +829,9 @@ static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); for (i = 0; i < s->num_cpu; i++) { - ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); - - define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); + if (qemu_get_cpu(i)) { + kvm_arm_gicv3_cpu_realize(s, i); + } } /* Try to create the device via the device control API */ @@ -877,6 +916,8 @@ static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass); + kgc->parent_cpu_hotplug_realize = agcc->cpu_hotplug_realize; + agcc->cpu_hotplug_realize = kvm_arm_gicv3_cpu_hotplug_realize; agcc->pre_save = kvm_arm_gicv3_get; agcc->post_load = kvm_arm_gicv3_put; device_class_set_parent_realize(dc, kvm_arm_gicv3_realize, diff --git a/hw/intc/arm_gicv3_redist.c b/hw/intc/arm_gicv3_redist.c index c8ff3eca085c413c1e73fcb13c632d56036d4b61..99b11ca5eeebd0b89d639f67e4fdf1a3d89593aa 100644 --- a/hw/intc/arm_gicv3_redist.c +++ b/hw/intc/arm_gicv3_redist.c @@ -462,7 +462,7 @@ MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data, break; } - if (r == MEMTX_ERROR) { + if (r != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest read at offset " TARGET_FMT_plx " size %u\n", __func__, offset, size); @@ -521,7 +521,7 @@ MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data, break; } - if (r == MEMTX_ERROR) { + if (r != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid guest write at offset " TARGET_FMT_plx " size %u\n", __func__, offset, size); diff --git a/hw/intc/armv7m_nvic.c b/hw/intc/armv7m_nvic.c index 13df002ce4dbc708cc326c8776a39163d2b680de..4b12b209b79fc8dcb3942d16771c3180625e8509 100644 --- a/hw/intc/armv7m_nvic.c +++ b/hw/intc/armv7m_nvic.c @@ -1273,17 +1273,17 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr0; + return cpu->isar.regs[ID_PFR0]; case 0xd44: /* PFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_pfr1; + return cpu->isar.regs[ID_PFR1]; case 0xd48: /* DFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_dfr0; + return cpu->isar.regs[ID_DFR0]; case 0xd4c: /* AFR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; @@ -1293,52 +1293,52 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr0; + return cpu->isar.regs[ID_MMFR0]; case 0xd54: /* MMFR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr1; + return cpu->isar.regs[ID_MMFR1]; case 0xd58: /* MMFR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr2; + return cpu->isar.regs[ID_MMFR2]; case 0xd5c: /* MMFR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_mmfr3; + return cpu->isar.regs[ID_MMFR3]; case 0xd60: /* ISAR0. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar0; + return cpu->isar.regs[ID_ISAR0]; case 0xd64: /* ISAR1. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar1; + return cpu->isar.regs[ID_ISAR1]; case 0xd68: /* ISAR2. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar2; + return cpu->isar.regs[ID_ISAR2]; case 0xd6c: /* ISAR3. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar3; + return cpu->isar.regs[ID_ISAR3]; case 0xd70: /* ISAR4. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar4; + return cpu->isar.regs[ID_ISAR4]; case 0xd74: /* ISAR5. */ if (!arm_feature(&cpu->env, ARM_FEATURE_M_MAIN)) { goto bad_offset; } - return cpu->isar.id_isar5; + return cpu->isar.regs[ID_ISAR5]; case 0xd78: /* CLIDR */ return cpu->clidr; case 0xd7c: /* CTR */ @@ -1548,11 +1548,11 @@ static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs) } return cpu->env.v7m.fpdscr[attrs.secure]; case 0xf40: /* MVFR0 */ - return cpu->isar.mvfr0; + return cpu->isar.regs[MVFR0]; case 0xf44: /* MVFR1 */ - return cpu->isar.mvfr1; + return cpu->isar.regs[MVFR1]; case 0xf48: /* MVFR2 */ - return cpu->isar.mvfr2; + return cpu->isar.regs[MVFR2]; default: bad_offset: qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset); diff --git a/hw/intc/gic_internal.h b/hw/intc/gic_internal.h index 8d29b40ca1012bf8b5a6b667bf8160b024d2c516..8ddbf554c69924ea087ddaca7d3d937816c4604c 100644 --- a/hw/intc/gic_internal.h +++ b/hw/intc/gic_internal.h @@ -280,6 +280,8 @@ static inline void gic_set_active(GICState *s, int irq, int cpu) static inline void gic_clear_active(GICState *s, int irq, int cpu) { + unsigned int cm; + if (gic_is_vcpu(cpu)) { uint32_t *entry = gic_get_lr_entry(s, irq, cpu); GICH_LR_CLEAR_ACTIVE(*entry); @@ -301,11 +303,13 @@ static inline void gic_clear_active(GICState *s, int irq, int cpu) * the GIC is secure. */ if (!s->security_extn || GIC_DIST_TEST_GROUP(phys_irq, 1 << rcpu)) { - GIC_DIST_CLEAR_ACTIVE(phys_irq, 1 << rcpu); + cm = phys_irq < GIC_INTERNAL ? 1 << rcpu : ALL_CPU_MASK; + GIC_DIST_CLEAR_ACTIVE(phys_irq, cm); } } } else { - GIC_DIST_CLEAR_ACTIVE(irq, 1 << cpu); + cm = irq < GIC_INTERNAL ? 1 << cpu : ALL_CPU_MASK; + GIC_DIST_CLEAR_ACTIVE(irq, cm); } } diff --git a/hw/intc/gicv3_internal.h b/hw/intc/gicv3_internal.h index b9c37453b0426a03f45124817eab5ae86dc4a34d..65db0126005269755b864ea4270b2da5f91a70a6 100644 --- a/hw/intc/gicv3_internal.h +++ b/hw/intc/gicv3_internal.h @@ -495,7 +495,7 @@ void gicv3_redist_update_lpi(GICv3CPUState *cs); */ void gicv3_redist_update_lpi_only(GICv3CPUState *cs); void gicv3_redist_send_sgi(GICv3CPUState *cs, int grp, int irq, bool ns); -void gicv3_init_cpuif(GICv3State *s); +void gicv3_init_one_cpuif(GICv3State *s, int ncpu); /** * gicv3_cpuif_update: diff --git a/hw/intc/pnv_xive.c b/hw/intc/pnv_xive.c index ad43483612e586fd21a85f7088afe57ee37a2ef5..bb207514f2dd83cb6bc4cf98da75499535a34e4b 100644 --- a/hw/intc/pnv_xive.c +++ b/hw/intc/pnv_xive.c @@ -172,7 +172,7 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, /* Get the page size of the indirect table. */ vsd_addr = vsd & VSD_ADDRESS_MASK; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG @@ -195,7 +195,8 @@ static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, /* Load the VSD we are looking for, if not already done */ if (vsd_idx) { vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, + MEMTXATTRS_UNSPECIFIED); if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG @@ -542,7 +543,7 @@ static uint64_t pnv_xive_vst_per_subpage(PnvXive *xive, uint32_t type) /* Get the page size of the indirect table. */ vsd_addr = vsd & VSD_ADDRESS_MASK; - vsd = ldq_be_dma(&address_space_memory, vsd_addr); + ldq_be_dma(&address_space_memory, vsd_addr, &vsd, MEMTXATTRS_UNSPECIFIED); if (!(vsd & VSD_ADDRESS_MASK)) { #ifdef XIVE_DEBUG diff --git a/hw/intc/sifive_plic.c b/hw/intc/sifive_plic.c index 877e76877ccf245d6e920790f3a7d7365e295dde..cbbe6372f9158e58882140fbec39db1c3d794297 100644 --- a/hw/intc/sifive_plic.c +++ b/hw/intc/sifive_plic.c @@ -414,8 +414,10 @@ static void sifive_plic_irq_request(void *opaque, int irq, int level) { SiFivePLICState *s = opaque; - sifive_plic_set_pending(s, irq, level > 0); - sifive_plic_update(s); + if (level > 0) { + sifive_plic_set_pending(s, irq, true); + sifive_plic_update(s); + } } static void sifive_plic_realize(DeviceState *dev, Error **errp) diff --git a/hw/intc/spapr_xive.c b/hw/intc/spapr_xive.c index 4ec659b93e13a7f64334dcddd2a016b167b01539..eae95c716f149d3fc67176bcb38d5db8ddb0d9d6 100644 --- a/hw/intc/spapr_xive.c +++ b/hw/intc/spapr_xive.c @@ -1684,7 +1684,8 @@ static target_ulong h_int_esb(PowerPCCPU *cpu, mmio_addr = xive->vc_base + xive_source_esb_mgmt(xsrc, lisn) + offset; if (dma_memory_rw(&address_space_memory, mmio_addr, &data, 8, - (flags & SPAPR_XIVE_ESB_STORE))) { + (flags & SPAPR_XIVE_ESB_STORE), + MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to access ESB @0x%" HWADDR_PRIx "\n", mmio_addr); return H_HARDWARE; diff --git a/hw/intc/xics.c b/hw/intc/xics.c index 48a835eab7c573b3edd5b8e9efc1cf78c8d5e568..9215ef1fb5d4e6e567e4a78e955c8db000880f36 100644 --- a/hw/intc/xics.c +++ b/hw/intc/xics.c @@ -565,8 +565,8 @@ static void ics_reset_irq(ICSIRQState *irq) static void ics_reset(DeviceState *dev) { ICSState *ics = ICS(dev); + g_autofree uint8_t *flags = g_malloc(ics->nr_irqs); int i; - uint8_t flags[ics->nr_irqs]; for (i = 0; i < ics->nr_irqs; i++) { flags[i] = ics->irqs[i].flags; diff --git a/hw/intc/xive.c b/hw/intc/xive.c index 190194d27f84ce1e2a64f4e0f22f9defb67e2dce..f15f98588a7186983bdfc861812e15fcdfef7d38 100644 --- a/hw/intc/xive.c +++ b/hw/intc/xive.c @@ -1246,8 +1246,8 @@ void xive_end_queue_pic_print_info(XiveEND *end, uint32_t width, Monitor *mon) uint64_t qaddr = qaddr_base + (qindex << 2); uint32_t qdata = -1; - if (dma_memory_read(&address_space_memory, qaddr, &qdata, - sizeof(qdata))) { + if (dma_memory_read(&address_space_memory, qaddr, + &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to read EQ @0x%" HWADDR_PRIx "\n", qaddr); return; @@ -1311,7 +1311,8 @@ static void xive_end_enqueue(XiveEND *end, uint32_t data) uint32_t qdata = cpu_to_be32((qgen << 31) | (data & 0x7fffffff)); uint32_t qentries = 1 << (qsize + 10); - if (dma_memory_write(&address_space_memory, qaddr, &qdata, sizeof(qdata))) { + if (dma_memory_write(&address_space_memory, qaddr, + &qdata, sizeof(qdata), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "XIVE: failed to write END data @0x%" HWADDR_PRIx "\n", qaddr); return; diff --git a/hw/ipmi/smbus_ipmi.c b/hw/ipmi/smbus_ipmi.c index 1fdf0a66b698512afe2635020905c9d65a04aff0..1591211a8671dcf062454441aa3c59b5e80230d1 100644 --- a/hw/ipmi/smbus_ipmi.c +++ b/hw/ipmi/smbus_ipmi.c @@ -280,7 +280,9 @@ static int ipmi_write_data(SMBusDevice *dev, uint8_t *buf, uint8_t len) */ send = true; } - memcpy(sid->inmsg + sid->inlen, buf, len); + if (len > 0) { + memcpy(sid->inmsg + sid->inlen, buf, len); + } sid->inlen += len; break; } diff --git a/hw/loongarch/Kconfig b/hw/loongarch/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..3fe2677fda7e689fe97c0a9cb82aebcb01eb1e3e --- /dev/null +++ b/hw/loongarch/Kconfig @@ -0,0 +1,17 @@ +config LS7A_APIC + bool + +config LS7A_RTC + bool + +config LOONGSON3A + bool + +config MEM_HOTPLUG + bool + +config ACPI_LOONGARCH + bool + +config E1000E_PCI + bool diff --git a/hw/loongarch/acpi-build.c b/hw/loongarch/acpi-build.c new file mode 100644 index 0000000000000000000000000000000000000000..4dd128a05e4d1d702239fdf75ad5263b1ae4b52a --- /dev/null +++ b/hw/loongarch/acpi-build.c @@ -0,0 +1,827 @@ +/* + * Support for generating ACPI tables and passing them to Guests + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/qmp/qnum.h" +#include "acpi-build.h" +#include "qemu-common.h" +#include "qemu/bitmap.h" +#include "qemu/error-report.h" +#include "hw/pci/pci.h" +#include "hw/boards.h" +#include "hw/core/cpu.h" +#include "target/loongarch64/cpu.h" +#include "hw/misc/pvpanic.h" +#include "hw/timer/hpet.h" +#include "hw/acpi/acpi-defs.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/cpu.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/acpi/bios-linker-loader.h" +#include "hw/loader.h" +#include "hw/isa/isa.h" +#include "hw/block/fdc.h" +#include "hw/acpi/memory_hotplug.h" +#include "sysemu/tpm.h" +#include "hw/acpi/tpm.h" +#include "hw/acpi/vmgenid.h" +#include "sysemu/tpm_backend.h" +#include "hw/rtc/mc146818rtc_regs.h" +#include "sysemu/numa.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "hw/mem/memory-device.h" +#include "hw/acpi/utils.h" +#include "hw/acpi/pci.h" +/* Supported chipsets: */ +#include "hw/acpi/aml-build.h" +#include "hw/loongarch/larch.h" +#include "hw/loongarch/ls7a.h" +#include "hw/platform-bus.h" + +#include "hw/acpi/ipmi.h" +#include "hw/acpi/ls7a.h" + +/* + * These are used to size the ACPI tables for -M pc-i440fx-1.7 and + * -M pc-i440fx-2.0. Even if the actual amount of AML generated grows + * a little bit, there should be plenty of free space since the DSDT + * shrunk by ~1.5k between QEMU 2.0 and QEMU 2.1. + */ +#define ACPI_BUILD_ALIGN_SIZE 0x1000 +#define ACPI_BUILD_TABLE_SIZE 0x20000 + +/* #define DEBUG_ACPI_BUILD */ +#ifdef DEBUG_ACPI_BUILD +#define ACPI_BUILD_DPRINTF(fmt, ...) \ + do { \ + printf("ACPI_BUILD: " fmt, ##__VA_ARGS__); \ + } while (0) +#else +#define ACPI_BUILD_DPRINTF(fmt, ...) +#endif + +/* Default IOAPIC ID */ +#define ACPI_BUILD_IOAPIC_ID 0x0 + +/* PCI fw r3.0 MCFG table. */ +/* Subtable */ + +typedef struct AcpiMiscInfo { + bool is_piix4; + bool has_hpet; + TPMVersion tpm_version; + const unsigned char *dsdt_code; + unsigned dsdt_size; + uint16_t pvpanic_port; + uint16_t applesmc_io_base; +} AcpiMiscInfo; + +typedef struct AcpiBuildPciBusHotplugState { + GArray *device_table; + GArray *notify_table; + struct AcpiBuildPciBusHotplugState *parent; + bool pcihp_bridge_en; +} AcpiBuildPciBusHotplugState; + +static void init_common_fadt_data(AcpiFadtData *data) +{ + AmlAddressSpace as = AML_AS_SYSTEM_MEMORY; + uint64_t base = LS7A_ACPI_REG_BASE; + AcpiFadtData fadt = { + .rev = 3, + .flags = (1 << ACPI_FADT_F_WBINVD) | (1 << ACPI_FADT_F_PROC_C1) | + (1 << ACPI_FADT_F_SLP_BUTTON) | + (1 << ACPI_FADT_F_TMR_VAL_EXT) | + (1 << ACPI_FADT_F_RESET_REG_SUP), + /* C2 state not supported */ + .plvl2_lat = 0xfff, + /* C3 state not supported */ + .plvl3_lat = 0xfff, + .smi_cmd = 0x00, + .sci_int = ACPI_SCI_IRQ, + .acpi_enable_cmd = 0x00, + .acpi_disable_cmd = 0x00, + .pm1a_evt = { .space_id = as, + .bit_width = 8 * 8, + .address = base + LS7A_PM_EVT_BLK }, + .pm1a_cnt = { .space_id = as, + .bit_width = 4 * 8, + .address = base + LS7A_PM_CNT_BLK }, + .pm_tmr = { .space_id = as, + .bit_width = 4 * 8, + .address = base + LS7A_PM_TMR_BLK }, + .gpe0_blk = { .space_id = as, + .bit_width = 8 * 8, + .address = base + LS7A_GPE0_STS_REG }, + .reset_reg = { .space_id = as, + .bit_width = 4 * 8, + .address = base + LS7A_GPE0_RESET_REG }, + .reset_val = 0x1, + }; + *data = fadt; +} + +static void acpi_align_size(GArray *blob, unsigned align) +{ + /* + * Align size to multiple of given size. This reduces the chance + * we need to change size in the future (breaking cross version migration). + */ + g_array_set_size(blob, ROUND_UP(acpi_data_len(blob), align)); +} + +/* FACS */ +static void build_facs(GArray *table_data) +{ + const char *sig = "FACS"; + const uint8_t reserved[40] = {}; + + g_array_append_vals(table_data, sig, 4); /* Signature */ + build_append_int_noprefix(table_data, 64, 4); /* Length */ + build_append_int_noprefix(table_data, 0, 4); /* Hardware Signature */ + build_append_int_noprefix(table_data, 0, 4); /* Firmware Waking Vector */ + build_append_int_noprefix(table_data, 0, 4); /* Global Lock */ + build_append_int_noprefix(table_data, 0, 4); /* Flags */ + g_array_append_vals(table_data, reserved, 40); /* Reserved */ +} + +void ls7a_madt_cpu_entry(AcpiDeviceIf *adev, int uid, + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled) +{ + uint32_t apic_id = apic_ids->cpus[uid].arch_id; + /* Flags – Local APIC Flags */ + uint32_t flags = apic_ids->cpus[uid].cpu != NULL || force_enabled ? 1 : 0; + + /* Rev 1.0b, Table 5-13 Processor Local APIC Structure */ + build_append_int_noprefix(entry, 0, 1); /* Type */ + build_append_int_noprefix(entry, 8, 1); /* Length */ + build_append_int_noprefix(entry, uid, 1); /* ACPI Processor ID */ + build_append_int_noprefix(entry, apic_id, 1); /* APIC ID */ + build_append_int_noprefix(entry, flags, 4); /* Flags */ +} + +static void build_ioapic(GArray *entry, uint8_t id, uint32_t addr, + uint32_t irq) +{ + /* Rev 1.0b, 5.2.8.2 IO APIC */ + build_append_int_noprefix(entry, 1, 1); /* Type */ + build_append_int_noprefix(entry, 12, 1); /* Length */ + build_append_int_noprefix(entry, id, 1); /* IO APIC ID */ + build_append_int_noprefix(entry, 0, 1); /* Reserved */ + build_append_int_noprefix(entry, addr, 4); /* IO APIC Address */ + build_append_int_noprefix(entry, irq, 4); /* System Vector Base */ +} + +static void build_madt(GArray *table_data, BIOSLinker *linker, + LoongarchMachineState *lsms) +{ + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + MachineClass *mc = MACHINE_GET_CLASS(lsms); + const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(MACHINE(lsms)); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_GET_CLASS(lsms->acpi_dev); + AcpiDeviceIf *adev = ACPI_DEVICE_IF(lsms->acpi_dev); + int i; + AcpiTable table = { .sig = "APIC", + .rev = 1, + .oem_id = lsms->oem_id, + .oem_table_id = lsms->oem_table_id }; + + acpi_table_begin(&table, table_data); + + /* Local APIC Address */ + build_append_int_noprefix(table_data, 0, 4); + build_append_int_noprefix(table_data, 1 /* PCAT_COMPAT */, 4); /* Flags */ + + for (i = 0; i < apic_ids->len; i++) { + adevc->madt_cpu(adev, i, apic_ids, table_data, false); + } + + build_ioapic(table_data, ACPI_BUILD_IOAPIC_ID, lsmc->ls7a_ioapic_reg_base, + LOONGARCH_PCH_IRQ_BASE); + + /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */ + build_append_int_noprefix(table_data, 3, 1); /* Type */ + build_append_int_noprefix(table_data, 6, 1); /* Length */ + /* ACPI Processor ID */ + build_append_int_noprefix(table_data, 0xFF, 1); /* all processors */ + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* Local APIC INTI# */ + build_append_int_noprefix(table_data, 1, 1); /* ACPI_LINT1 */ + + /* Rev 1.0b, 5.2.8.3.3 Local APIC NMI */ + build_append_int_noprefix(table_data, 4, 1); /* Type */ + build_append_int_noprefix(table_data, 6, 1); /* Length */ + /* ACPI Processor ID */ + build_append_int_noprefix(table_data, 0xFF, 1); /* all processors */ + build_append_int_noprefix(table_data, 0, 2); /* Flags */ + /* Local APIC INTI# */ + build_append_int_noprefix(table_data, 1, 1); /* ACPI_LINT1 */ + + acpi_table_end(linker, &table); +} + +static void build_srat(GArray *table_data, BIOSLinker *linker, + MachineState *machine) +{ + uint64_t i, mem_len, mem_base; + MachineClass *mc = MACHINE_GET_CLASS(machine); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + const CPUArchIdList *apic_ids = mc->possible_cpu_arch_ids(machine); + int nb_numa_nodes = machine->numa_state->num_nodes; + NodeInfo *numa_info = machine->numa_state->nodes; + AcpiTable table = { .sig = "SRAT", + .rev = 1, + .oem_id = lsms->oem_id, + .oem_table_id = lsms->oem_table_id }; + + acpi_table_begin(&table, table_data); + build_append_int_noprefix(table_data, 1, 4); /* Reserved */ + build_append_int_noprefix(table_data, 0, 8); /* Reserved */ + + for (i = 0; i < apic_ids->len; ++i) { + /* 5.2.15.1 Processor Local APIC/SAPIC Affinity Structure */ + build_append_int_noprefix(table_data, 0, 1); /* Type */ + build_append_int_noprefix(table_data, 16, 1); /* Length */ + /* Proximity Domain [7:0] */ + build_append_int_noprefix(table_data, apic_ids->cpus[i].props.node_id, + 1); + build_append_int_noprefix(table_data, apic_ids->cpus[i].arch_id, + 1); /* APIC ID */ + /* Flags, Table 5-36 */ + build_append_int_noprefix(table_data, 1, 4); + build_append_int_noprefix(table_data, 0, 1); /* Local SAPIC EID */ + /* Proximity Domain [31:8] */ + build_append_int_noprefix(table_data, 0, 3); + build_append_int_noprefix(table_data, 0, 4); /* Reserved */ + } + + /* node0 */ + mem_base = (uint64_t)0; + mem_len = 0x10000000; + build_srat_memory(table_data, mem_base, mem_len, 0, MEM_AFFINITY_ENABLED); + mem_base = 0x90000000; + if (!nb_numa_nodes) { + mem_len = machine->ram_size - 0x10000000; + } else { + mem_len = numa_info[0].node_mem - 0x10000000; + } + + build_srat_memory(table_data, mem_base, mem_len, 0, MEM_AFFINITY_ENABLED); + mem_base += mem_len; + + /* node1 ~ nodemax */ + for (i = 1; i < nb_numa_nodes; ++i) { + mem_len = numa_info[i].node_mem; + build_srat_memory(table_data, mem_base, mem_len, i, + MEM_AFFINITY_ENABLED); + mem_base += mem_len; + } + + if (lsms->hotplug_memory_size) { + build_srat_memory(table_data, machine->device_memory->base, + lsms->hotplug_memory_size, 0, + MEM_AFFINITY_HOTPLUGGABLE | MEM_AFFINITY_ENABLED); + } + + acpi_table_end(linker, &table); +} + +typedef struct AcpiBuildState { + /* Copy of table in RAM (for patching). */ + MemoryRegion *table_mr; + /* Is table patched? */ + uint8_t patched; + void *rsdp; + MemoryRegion *rsdp_mr; + MemoryRegion *linker_mr; +} AcpiBuildState; + +static void build_ls7a_pci0_int(Aml *table) +{ + Aml *sb_scope = aml_scope("_SB"); + Aml *pci0_scope = aml_scope("PCI0"); + Aml *prt_pkg = aml_varpackage(128); + int slot, pin; + + for (slot = 0; slot < PCI_SLOT_MAX; slot++) { + for (pin = 0; pin < PCI_NUM_PINS; pin++) { + Aml *pkg = aml_package(4); + aml_append(pkg, aml_int((slot << 16) | 0xFFFF)); + aml_append(pkg, aml_int(pin)); + aml_append(pkg, aml_int(0)); + aml_append(pkg, aml_int(LOONGARCH_PCH_IRQ_BASE + 16 + + (slot * 4 + pin) % 16)); + aml_append(prt_pkg, pkg); + } + } + aml_append(pci0_scope, aml_name_decl("_PRT", prt_pkg)); + + aml_append(sb_scope, pci0_scope); + + aml_append(table, sb_scope); +} + +static void build_dbg_aml(Aml *table) +{ + Aml *field; + Aml *method; + Aml *while_ctx; + Aml *scope = aml_scope("\\"); + Aml *buf = aml_local(0); + Aml *len = aml_local(1); + Aml *idx = aml_local(2); + + aml_append(scope, aml_operation_region("DBG", AML_SYSTEM_IO, + aml_int(0x0402), 0x01)); + field = aml_field("DBG", AML_BYTE_ACC, AML_NOLOCK, AML_PRESERVE); + aml_append(field, aml_named_field("DBGB", 8)); + aml_append(scope, field); + + method = aml_method("DBUG", 1, AML_NOTSERIALIZED); + + aml_append(method, aml_to_hexstring(aml_arg(0), buf)); + aml_append(method, aml_to_buffer(buf, buf)); + aml_append(method, aml_subtract(aml_sizeof(buf), aml_int(1), len)); + aml_append(method, aml_store(aml_int(0), idx)); + + while_ctx = aml_while(aml_lless(idx, len)); + aml_append(while_ctx, + aml_store(aml_derefof(aml_index(buf, idx)), aml_name("DBGB"))); + aml_append(while_ctx, aml_increment(idx)); + aml_append(method, while_ctx); + + aml_append(method, aml_store(aml_int(0x0A), aml_name("DBGB"))); + aml_append(scope, method); + + aml_append(table, scope); +} + +static Aml *build_ls7a_osc_method(void) +{ + Aml *if_ctx; + Aml *if_ctx2; + Aml *else_ctx; + Aml *method; + Aml *a_cwd1 = aml_name("CDW1"); + Aml *a_ctrl = aml_local(0); + + method = aml_method("_OSC", 4, AML_NOTSERIALIZED); + aml_append(method, aml_create_dword_field(aml_arg(3), aml_int(0), "CDW1")); + + if_ctx = aml_if(aml_equal( + aml_arg(0), aml_touuid("33DB4D5B-1FF7-401C-9657-7441C03DD766"))); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(4), "CDW2")); + aml_append(if_ctx, aml_create_dword_field(aml_arg(3), aml_int(8), "CDW3")); + + aml_append(if_ctx, aml_store(aml_name("CDW3"), a_ctrl)); + + /* + * Always allow native PME, AER (no dependencies) + * Allow SHPC (PCI bridges can have SHPC controller) + */ + aml_append(if_ctx, aml_and(a_ctrl, aml_int(0x1F), a_ctrl)); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_arg(1), aml_int(1)))); + /* Unknown revision */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x08), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + if_ctx2 = aml_if(aml_lnot(aml_equal(aml_name("CDW3"), a_ctrl))); + /* Capabilities bits were masked */ + aml_append(if_ctx2, aml_or(a_cwd1, aml_int(0x10), a_cwd1)); + aml_append(if_ctx, if_ctx2); + + /* Update DWORD3 in the buffer */ + aml_append(if_ctx, aml_store(a_ctrl, aml_name("CDW3"))); + aml_append(method, if_ctx); + + else_ctx = aml_else(); + /* Unrecognized UUID */ + aml_append(else_ctx, aml_or(a_cwd1, aml_int(4), a_cwd1)); + aml_append(method, else_ctx); + + aml_append(method, aml_return(aml_arg(3))); + return method; +} + +static void build_ls7a_rtc_device_aml(Aml *table) +{ + Aml *dev; + Aml *crs; + uint32_t rtc_irq = LS7A_RTC_IRQ; + + Aml *scope = aml_scope("_SB"); + dev = aml_device("RTC"); + aml_append(dev, aml_name_decl("_HID", aml_string("LOON0001"))); + crs = aml_resource_template(); + aml_append(crs, aml_qword_memory(AML_POS_DECODE, AML_MIN_FIXED, + AML_MAX_FIXED, AML_NON_CACHEABLE, + AML_READ_WRITE, 0, LS7A_RTC_REG_BASE, + LS7A_RTC_REG_BASE + LS7A_RTC_LEN - 1, 0, + LS7A_RTC_LEN)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &rtc_irq, 1)); + + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + aml_append(table, scope); +} + +static void build_ls7a_uart_device_aml(Aml *table) +{ + Aml *dev; + Aml *crs; + Aml *pkg0, *pkg1, *pkg2; + uint32_t uart_irq = LS7A_UART_IRQ; + + Aml *scope = aml_scope("_SB"); + dev = aml_device("COMA"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0501"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + aml_append(dev, aml_name_decl("_CCA", aml_int(1))); + crs = aml_resource_template(); + aml_append(crs, aml_qword_memory( + AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_NON_CACHEABLE, AML_READ_WRITE, 0, LS7A_UART_BASE, + LS7A_UART_BASE + LS7A_UART_LEN - 1, 0, 0x8)); + aml_append(crs, aml_interrupt(AML_CONSUMER, AML_LEVEL, AML_ACTIVE_HIGH, + AML_EXCLUSIVE, &uart_irq, 1)); + aml_append(dev, aml_name_decl("_CRS", crs)); + pkg0 = aml_package(0x2); + aml_append(pkg0, aml_int(0x01F78A40)); + aml_append(pkg0, aml_string("clock-frenquency")); + pkg1 = aml_package(0x1); + aml_append(pkg1, pkg0); + pkg2 = aml_package(0x2); + aml_append(pkg2, aml_touuid("DAFFD814-6EBA-4D8C-8A91-BC9BBF4AA301")); + aml_append(pkg2, pkg1); + + aml_append(dev, aml_name_decl("_DSD", pkg2)); + + aml_append(scope, dev); + aml_append(table, scope); +} + +#ifdef CONFIG_TPM +static void acpi_dsdt_add_tpm(Aml *scope, LoongarchMachineState *vms) +{ + PlatformBusDevice *pbus = PLATFORM_BUS_DEVICE(vms->platform_bus_dev); + hwaddr pbus_base = VIRT_PLATFORM_BUS_BASEADDRESS; + SysBusDevice *sbdev = SYS_BUS_DEVICE(tpm_find()); + MemoryRegion *sbdev_mr; + hwaddr tpm_base; + + if (!sbdev) { + return; + } + + tpm_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + assert(tpm_base != -1); + + tpm_base += pbus_base; + + sbdev_mr = sysbus_mmio_get_region(sbdev, 0); + + Aml *dev = aml_device("TPM0"); + aml_append(dev, aml_name_decl("_HID", aml_string("MSFT0101"))); + aml_append(dev, aml_name_decl("_STR", aml_string("TPM 2.0 Device"))); + aml_append(dev, aml_name_decl("_UID", aml_int(0))); + + Aml *crs = aml_resource_template(); + aml_append(crs, aml_memory32_fixed(tpm_base, + (uint32_t)memory_region_size(sbdev_mr), + AML_READ_WRITE)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); +} +#endif + +static void build_dsdt(GArray *table_data, BIOSLinker *linker, + MachineState *machine) +{ + Aml *dsdt, *sb_scope, *scope, *dev, *crs, *pkg; + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + uint32_t nr_mem = machine->ram_slots; + uint64_t base = LS7A_ACPI_REG_BASE; + int root_bus_limit = PCIE_MMCFG_BUS(LS_PCIECFG_SIZE - 1); + AcpiTable table = { .sig = "DSDT", + .rev = 1, + .oem_id = lsms->oem_id, + .oem_table_id = lsms->oem_table_id }; + + acpi_table_begin(&table, table_data); + dsdt = init_aml_allocator(); + + build_dbg_aml(dsdt); + + sb_scope = aml_scope("_SB"); + dev = aml_device("PCI0"); + aml_append(dev, aml_name_decl("_HID", aml_eisaid("PNP0A08"))); + aml_append(dev, aml_name_decl("_CID", aml_eisaid("PNP0A03"))); + aml_append(dev, aml_name_decl("_ADR", aml_int(0))); + aml_append(dev, aml_name_decl("_BBN", aml_int(0))); + aml_append(dev, aml_name_decl("_UID", aml_int(1))); + aml_append(dev, build_ls7a_osc_method()); + aml_append(sb_scope, dev); + +#ifdef CONFIG_TPM + acpi_dsdt_add_tpm(sb_scope, lsms); +#endif + aml_append(dsdt, sb_scope); + + build_ls7a_pci0_int(dsdt); + build_ls7a_rtc_device_aml(dsdt); + build_ls7a_uart_device_aml(dsdt); + + if (lsms->acpi_dev) { + CPUHotplugFeatures opts = { .acpi_1_compatible = true, + .has_legacy_cphp = false }; + build_cpus_aml(dsdt, machine, opts, CPU_HOTPLUG_BASE, "\\_SB.PCI0", + "\\_GPE._E02", AML_SYSTEM_MEMORY); + + build_memory_hotplug_aml(dsdt, nr_mem, "\\_SB.PCI0", "\\_GPE._E03", + AML_SYSTEM_MEMORY, MEMORY_HOTPLUG_BASE); + } + + scope = aml_scope("_GPE"); + { + aml_append(scope, aml_name_decl("_HID", aml_string("ACPI0006"))); + } + aml_append(dsdt, scope); + + scope = aml_scope("\\_SB.PCI0"); + /* build PCI0._CRS */ + crs = aml_resource_template(); + aml_append(crs, aml_word_bus_number( + AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, 0x0000, + 0x0, root_bus_limit, 0x0000, root_bus_limit + 1)); + aml_append(crs, aml_word_io(AML_MIN_FIXED, AML_MAX_FIXED, AML_POS_DECODE, + AML_ENTIRE_RANGE, 0x0000, 0x4000, 0xFFFF, + 0x0000, 0xC000)); + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, 0, 0x40000000, + 0x7FFFFFFF, 0, 0x40000000)); + aml_append(scope, aml_name_decl("_CRS", crs)); + + /* reserve GPE0 block resources */ + dev = aml_device("GPE0"); + aml_append(dev, aml_name_decl("_HID", aml_string("PNP0A06"))); + aml_append(dev, aml_name_decl("_UID", aml_string("GPE0 resources"))); + /* device present, functioning, decoding, not shown in UI */ + aml_append(dev, aml_name_decl("_STA", aml_int(0xB))); + crs = aml_resource_template(); + aml_append(crs, + aml_dword_memory(AML_POS_DECODE, AML_MIN_FIXED, AML_MAX_FIXED, + AML_CACHEABLE, AML_READ_WRITE, 0, + base + LS7A_GPE0_STS_REG, + base + LS7A_GPE0_STS_REG + 0x3, 0, 0x4)); + aml_append(dev, aml_name_decl("_CRS", crs)); + aml_append(scope, dev); + aml_append(dsdt, scope); + + scope = aml_scope("\\"); + pkg = aml_package(4); + aml_append(pkg, aml_int(7)); /* PM1a_CNT.SLP_TYP */ + aml_append(pkg, aml_int(7)); /* PM1b_CNT.SLP_TYP not impl. */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(pkg, aml_int(0)); /* reserved */ + aml_append(scope, aml_name_decl("_S5", pkg)); + aml_append(dsdt, scope); + + /* copy AML table into ACPI tables blob and patch header there */ + g_array_append_vals(table_data, dsdt->buf->data, dsdt->buf->len); + acpi_table_end(linker, &table); + free_aml_allocator(); +} + +static void acpi_build(AcpiBuildTables *tables, MachineState *machine) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + GArray *table_offsets; + AcpiFadtData fadt_data; + unsigned facs, rsdt, fadt, dsdt; + uint8_t *u; + size_t aml_len = 0; + GArray *tables_blob = tables->table_data; + + init_common_fadt_data(&fadt_data); + + table_offsets = g_array_new(false, true, sizeof(uint32_t)); /* clear */ + ACPI_BUILD_DPRINTF("init ACPI tables\n"); + + bios_linker_loader_alloc(tables->linker, ACPI_BUILD_TABLE_FILE, + tables_blob, 64, false /* high memory */); + + /* + * FACS is pointed to by FADT. + * We place it first since it's the only table that has alignment + * requirements. + */ + facs = tables_blob->len; + build_facs(tables_blob); + + /* DSDT is pointed to by FADT */ + dsdt = tables_blob->len; + build_dsdt(tables_blob, tables->linker, MACHINE(qdev_get_machine())); + + /* + * Count the size of the DSDT and SSDT, we will need it for legacy + * sizing of ACPI tables. + */ + aml_len += tables_blob->len - dsdt; + + /* ACPI tables pointed to by RSDT */ + fadt = tables_blob->len; + acpi_add_table(table_offsets, tables_blob); + fadt_data.facs_tbl_offset = &facs; + fadt_data.dsdt_tbl_offset = &dsdt; + fadt_data.xdsdt_tbl_offset = &dsdt; + build_fadt(tables_blob, tables->linker, &fadt_data, "LOONGS", "TP-R00"); + aml_len += tables_blob->len - fadt; + + acpi_add_table(table_offsets, tables_blob); + build_madt(tables_blob, tables->linker, lsms); + + acpi_add_table(table_offsets, tables_blob); + build_srat(tables_blob, tables->linker, machine); + if (machine->numa_state->have_numa_distance) { + acpi_add_table(table_offsets, tables_blob); + build_slit(tables_blob, tables->linker, machine, lsms->oem_id, + lsms->oem_table_id); + } + + if (tpm_get_version(tpm_find()) == TPM_VERSION_2_0) { + acpi_add_table(table_offsets, tables_blob); + build_tpm2(tables_blob, tables->linker, tables->tcpalog, lsms->oem_id, + lsms->oem_table_id); + } + + /* Build mcfg */ + acpi_add_table(table_offsets, tables_blob); + { + AcpiMcfgInfo mcfg = { + .base = LS_PCIECFG_BASE, + .size = LS_PCIECFG_SIZE, + }; + build_mcfg(tables_blob, tables->linker, &mcfg, lsms->oem_id, + lsms->oem_table_id); + } + + /* Add tables supplied by user (if any) */ + for (u = acpi_table_first(); u; u = acpi_table_next(u)) { + unsigned len = acpi_table_len(u); + + acpi_add_table(table_offsets, tables_blob); + g_array_append_vals(tables_blob, u, len); + } + + /* RSDT is pointed to by RSDP */ + rsdt = tables_blob->len; + build_rsdt(tables_blob, tables->linker, table_offsets, "LOONGS", "TP-R00"); + + /* RSDP is in FSEG memory, so allocate it separately */ + { + AcpiRsdpData rsdp_data = { + .revision = 0, + .oem_id = lsms->oem_id, + .xsdt_tbl_offset = NULL, + .rsdt_tbl_offset = &rsdt, + }; + build_rsdp(tables->rsdp, tables->linker, &rsdp_data); + } + acpi_align_size(tables->linker->cmd_blob, ACPI_BUILD_ALIGN_SIZE); + + /* Cleanup memory that's no longer used. */ + g_array_free(table_offsets, true); +} + +static void acpi_ram_update(MemoryRegion *mr, GArray *data) +{ + uint32_t size = acpi_data_len(data); + + /* + * Make sure RAM size is correct - + * in case it got changed e.g. by migration + */ + memory_region_ram_resize(mr, size, &error_abort); + + memcpy(memory_region_get_ram_ptr(mr), data->data, size); + memory_region_set_dirty(mr, 0, size); +} + +static void acpi_build_update(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + AcpiBuildTables tables; + + /* No state to update or already patched? Nothing to do. */ + if (!build_state || build_state->patched) { + return; + } + build_state->patched = 1; + + acpi_build_tables_init(&tables); + + acpi_build(&tables, MACHINE(qdev_get_machine())); + + acpi_ram_update(build_state->table_mr, tables.table_data); + + if (build_state->rsdp) { + memcpy(build_state->rsdp, tables.rsdp->data, + acpi_data_len(tables.rsdp)); + } else { + acpi_ram_update(build_state->rsdp_mr, tables.rsdp); + } + + acpi_ram_update(build_state->linker_mr, tables.linker->cmd_blob); + acpi_build_tables_cleanup(&tables, true); +} + +static void acpi_build_reset(void *build_opaque) +{ + AcpiBuildState *build_state = build_opaque; + build_state->patched = 0; +} + +static const VMStateDescription vmstate_acpi_build = { + .name = "acpi_build", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ VMSTATE_UINT8(patched, AcpiBuildState), + VMSTATE_END_OF_LIST() }, +}; + +void loongarch_acpi_setup(void) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine()); + AcpiBuildTables tables; + AcpiBuildState *build_state; + + if (!lsms->fw_cfg) { + ACPI_BUILD_DPRINTF("No fw cfg. Bailing out.\n"); + return; + } + + if (!lsms->acpi_build_enabled) { + ACPI_BUILD_DPRINTF("ACPI build disabled. Bailing out.\n"); + return; + } + + if (!loongarch_is_acpi_enabled(lsms)) { + ACPI_BUILD_DPRINTF("ACPI disabled. Bailing out.\n"); + return; + } + + build_state = g_malloc0(sizeof *build_state); + + acpi_build_tables_init(&tables); + acpi_build(&tables, MACHINE(lsms)); + + /* Now expose it all to Guest */ + build_state->table_mr = + acpi_add_rom_blob(acpi_build_update, build_state, tables.table_data, + ACPI_BUILD_TABLE_FILE); + assert(build_state->table_mr != NULL); + + build_state->linker_mr = + acpi_add_rom_blob(acpi_build_update, build_state, + tables.linker->cmd_blob, "etc/table-loader"); + + fw_cfg_add_file(lsms->fw_cfg, ACPI_BUILD_TPMLOG_FILE, tables.tcpalog->data, + acpi_data_len(tables.tcpalog)); + + build_state->rsdp = NULL; + build_state->rsdp_mr = acpi_add_rom_blob( + acpi_build_update, build_state, tables.rsdp, ACPI_BUILD_RSDP_FILE); + + qemu_register_reset(acpi_build_reset, build_state); + acpi_build_reset(build_state); + vmstate_register(NULL, 0, &vmstate_acpi_build, build_state); + + /* + * Cleanup tables but don't free the memory: we track it + * in build_state. + */ + acpi_build_tables_cleanup(&tables, false); +} diff --git a/hw/loongarch/acpi-build.h b/hw/loongarch/acpi-build.h new file mode 100644 index 0000000000000000000000000000000000000000..97d53a9258bee4a2781eccc39055815dd8e75152 --- /dev/null +++ b/hw/loongarch/acpi-build.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_LARCH_ACPI_BUILD_H +#define HW_LARCH_ACPI_BUILD_H + +#define EFI_ACPI_OEM_ID "LARCH" +#define EFI_ACPI_OEM_TABLE_ID "LARCH" /* OEM table id 8 bytes long */ +#define EFI_ACPI_OEM_REVISION 0x00000002 +#define EFI_ACPI_CREATOR_ID "LINUX" +#define EFI_ACPI_CREATOR_REVISION 0x01000013 + +#define ACPI_COMPATIBLE_1_0 0 +#define ACPI_COMPATIBLE_2_0 1 + +void loongarch_acpi_setup(void); + +#endif diff --git a/hw/loongarch/apic.c b/hw/loongarch/apic.c new file mode 100644 index 0000000000000000000000000000000000000000..9e762cf0fe03a6b7153adc9700ccf995694beb62 --- /dev/null +++ b/hw/loongarch/apic.c @@ -0,0 +1,689 @@ +/* + * Loongarch 3A5000 interrupt controller emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "hw/boards.h" +#include "hw/irq.h" +#include "hw/loongarch/cpudevs.h" +#include "hw/sysbus.h" +#include "qemu/host-utils.h" +#include "qemu/error-report.h" +#include "sysemu/kvm.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "target/loongarch64/cpu.h" +#include "exec/address-spaces.h" +#include "hw/loongarch/larch.h" +#include "migration/vmstate.h" + +#define DEBUG_APIC 0 + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_APIC) { \ + fprintf(stderr, "APIC: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define APIC_OFFSET 0x400 +#define APIC_BASE (0x1f010000ULL) +#define EXTIOI_NODETYPE_START (0x4a0 - APIC_OFFSET) +#define EXTIOI_NODETYPE_END (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_START (0x4c0 - APIC_OFFSET) +#define EXTIOI_IPMAP_END (0x4c8 - APIC_OFFSET) +#define EXTIOI_ENABLE_START (0x600 - APIC_OFFSET) +#define EXTIOI_ENABLE_END (0x620 - APIC_OFFSET) +#define EXTIOI_BOUNCE_START (0x680 - APIC_OFFSET) +#define EXTIOI_BOUNCE_END (0x6a0 - APIC_OFFSET) +#define EXTIOI_ISR_START (0x700 - APIC_OFFSET) +#define EXTIOI_ISR_END (0x720 - APIC_OFFSET) +#define EXTIOI_COREMAP_START (0xC00 - APIC_OFFSET) +#define EXTIOI_COREMAP_END (0xD00 - APIC_OFFSET) +#define EXTIOI_COREISR_START (0x10000) +#define EXTIOI_COREISR_END (EXTIOI_COREISR_START + 0x10000) + +static int ext_irq_pre_save(void *opaque) +{ +#ifdef CONFIG_KVM + apicState *apic = opaque; + struct loongarch_kvm_irqchip *chip; + struct kvm_loongarch_ls3a_extirq_state *kstate; + int ret, length, i, vcpuid; +#endif + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } +#ifdef CONFIG_KVM + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct kvm_loongarch_ls3a_extirq_state); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS3A_EXTIRQ; + chip->len = length; + + ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + + kstate = (struct kvm_loongarch_ls3a_extirq_state *)chip->data; + for (i = 0; i < EXTIOI_IRQS_BITMAP_SIZE; i++) { + apic->ext_en[i] = kstate->ext_en_r.reg_u8[i]; + apic->ext_bounce[i] = kstate->bounce_r.reg_u8[i]; + apic->ext_isr[i] = kstate->ext_isr_r.reg_u8[i]; + for (vcpuid = 0; vcpuid < MAX_CORES; vcpuid++) { + apic->ext_coreisr[vcpuid][i] = + kstate->ext_core_isr_r.reg_u8[vcpuid][i]; + } + } + for (i = 0; i < EXTIOI_IRQS_IPMAP_SIZE; i++) { + apic->ext_ipmap[i] = kstate->ip_map_r.reg_u8[i]; + } + for (i = 0; i < EXTIOI_IRQS; i++) { + apic->ext_coremap[i] = kstate->core_map_r.reg_u8[i]; + } + for (i = 0; i < 16; i++) { + apic->ext_nodetype[i] = kstate->node_type_r.reg_u16[i]; + } + g_free(chip); +#endif + return 0; +} + +static int ext_irq_post_load(void *opaque, int version) +{ +#ifdef CONFIG_KVM + apicState *apic = opaque; + struct loongarch_kvm_irqchip *chip; + struct kvm_loongarch_ls3a_extirq_state *kstate; + int ret, length, i, vcpuid; +#endif + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } +#ifdef CONFIG_KVM + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct kvm_loongarch_ls3a_extirq_state); + chip = g_malloc0(length); + + chip->chip_id = KVM_IRQCHIP_LS3A_EXTIRQ; + chip->len = length; + + kstate = (struct kvm_loongarch_ls3a_extirq_state *)chip->data; + for (i = 0; i < EXTIOI_IRQS_BITMAP_SIZE; i++) { + kstate->ext_en_r.reg_u8[i] = apic->ext_en[i]; + kstate->bounce_r.reg_u8[i] = apic->ext_bounce[i]; + kstate->ext_isr_r.reg_u8[i] = apic->ext_isr[i]; + for (vcpuid = 0; vcpuid < MAX_CORES; vcpuid++) { + kstate->ext_core_isr_r.reg_u8[vcpuid][i] = + apic->ext_coreisr[vcpuid][i]; + } + } + for (i = 0; i < EXTIOI_IRQS_IPMAP_SIZE; i++) { + kstate->ip_map_r.reg_u8[i] = apic->ext_ipmap[i]; + } + for (i = 0; i < EXTIOI_IRQS; i++) { + kstate->core_map_r.reg_u8[i] = apic->ext_coremap[i]; + } + for (i = 0; i < 16; i++) { + kstate->node_type_r.reg_u16[i] = apic->ext_nodetype[i]; + } + + ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_SET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + g_free(chip); +#endif + return 0; +} + +typedef struct nodeApicState { + unsigned long addr; + int nodeid; + apicState *apic; +} nodeApicState; + +static void ioapic_update_irq(void *opaque, int irq, int level) +{ + apicState *s = opaque; + uint8_t ipnum, cpu, cpu_ipnum; + unsigned long found1, found2; + uint8_t reg_count, reg_bit; + + reg_count = irq / 32; + reg_bit = irq % 32; + + ipnum = s->ext_sw_ipmap[irq]; + cpu = s->ext_sw_coremap[irq]; + cpu_ipnum = cpu * LS3A_INTC_IP + ipnum; + if (level == 1) { + if (test_bit(reg_bit, ((void *)s->ext_en + 0x4 * reg_count)) == + false) { + return; + } + + if (test_bit(reg_bit, ((void *)s->ext_isr + 0x4 * reg_count)) == + false) { + return; + } + bitmap_set(((void *)s->ext_coreisr[cpu] + 0x4 * reg_count), reg_bit, + 1); + found1 = + find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), + EXTIOI_IRQS, 0); + bitmap_set(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), + reg_bit, 1); + if (found1 >= EXTIOI_IRQS) { + qemu_set_irq(s->parent_irq[cpu][ipnum], level); + } + } else { + bitmap_clear(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1); + bitmap_clear(((void *)s->ext_coreisr[cpu] + 0x4 * reg_count), reg_bit, + 1); + found1 = + find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), + EXTIOI_IRQS, 0); + found1 += reg_count * 32; + bitmap_clear(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), + reg_bit, 1); + found2 = + find_next_bit(((void *)s->ext_ipisr[cpu_ipnum] + 0x4 * reg_count), + EXTIOI_IRQS, 0); + if ((found1 < EXTIOI_IRQS) && (found2 >= EXTIOI_IRQS)) { + qemu_set_irq(s->parent_irq[cpu][ipnum], level); + } + } +} + +static void ioapic_setirq(void *opaque, int irq, int level) +{ + apicState *s = opaque; + uint8_t reg_count, reg_bit; + + reg_count = irq / 32; + reg_bit = irq % 32; + + if (level) { + bitmap_set(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1); + } else { + bitmap_clear(((void *)s->ext_isr + 0x4 * reg_count), reg_bit, 1); + } + + ioapic_update_irq(s, irq, level); +} + +static uint32_t apic_readb(void *opaque, hwaddr addr) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint8_t ret; + int cpu; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + ret = 0; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + ret = *(uint8_t *)((void *)state->ext_en + off); + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + ret = *(uint8_t *)((void *)state->ext_bounce + off); + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + ret = *(uint8_t *)((void *)state->ext_isr + off); + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + ret = *(uint8_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f)); + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + off -= EXTIOI_IPMAP_START; + ret = *(uint8_t *)((void *)state->ext_ipmap + off); + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + off -= EXTIOI_COREMAP_START; + ret = *(uint8_t *)((void *)state->ext_coremap + off); + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + ret = *(uint8_t *)((void *)state->ext_nodetype + off); + } + + DPRINTF("readb reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret); + return ret; +} + +static uint32_t apic_readw(void *opaque, hwaddr addr) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint16_t ret; + int cpu; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + ret = 0; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + ret = *(uint16_t *)((void *)state->ext_en + off); + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + ret = *(uint16_t *)((void *)state->ext_bounce + off); + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + ret = *(uint16_t *)((void *)state->ext_isr + off); + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + ret = *(uint16_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f)); + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + off -= EXTIOI_IPMAP_START; + ret = *(uint16_t *)((void *)state->ext_ipmap + off); + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + off -= EXTIOI_COREMAP_START; + ret = *(uint16_t *)((void *)state->ext_coremap + off); + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + ret = *(uint16_t *)((void *)state->ext_nodetype + off); + } + + DPRINTF("readw reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret); + return ret; +} + +static uint32_t apic_readl(void *opaque, hwaddr addr) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint32_t ret; + int cpu; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + ret = 0; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + ret = *(uint32_t *)((void *)state->ext_en + off); + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + ret = *(uint32_t *)((void *)state->ext_bounce + off); + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + ret = *(uint32_t *)((void *)state->ext_isr + off); + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + ret = *(uint32_t *)((void *)state->ext_coreisr[cpu] + (off & 0x1f)); + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + off -= EXTIOI_IPMAP_START; + ret = *(uint32_t *)((void *)state->ext_ipmap + off); + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + off -= EXTIOI_COREMAP_START; + ret = *(uint32_t *)((void *)state->ext_coremap + off); + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + ret = *(uint32_t *)((void *)state->ext_nodetype + off); + } + + DPRINTF("readl reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, ret); + return ret; +} + +static void apic_writeb(void *opaque, hwaddr addr, uint32_t val) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint8_t old; + int cpu, i, ipnum, level, mask; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + old = *(uint8_t *)((void *)state->ext_en + off); + if (old != val) { + *(uint8_t *)((void *)state->ext_en + off) = val; + old = old ^ val; + mask = 0x1; + for (i = 0; i < 8; i++) { + if (old & mask) { + level = !!(val & (0x1 << i)); + ioapic_update_irq(state, i + off * 8, level); + } + mask = mask << 1; + } + } + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + *(uint8_t *)((void *)state->ext_bounce + off) = val; + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + old = *(uint8_t *)((void *)state->ext_isr + off); + *(uint8_t *)((void *)state->ext_isr + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 8; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + off = off & 0x1f; + old = *(uint8_t *)((void *)state->ext_coreisr[cpu] + off); + *(uint8_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 8; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + off -= EXTIOI_IPMAP_START; + val = val & 0xf; + *(uint8_t *)((void *)state->ext_ipmap + off) = val; + ipnum = 0; + for (i = 0; i < 4; i++) { + if (val & (0x1 << i)) { + ipnum = i; + break; + } + } + if (val) { + for (i = 0; i < 32; i++) { + cpu = off * 32 + i; + state->ext_sw_ipmap[cpu] = ipnum; + } + } + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + off -= EXTIOI_COREMAP_START; + val = val & 0xff; + *(uint8_t *)((void *)state->ext_coremap + off) = val; + state->ext_sw_coremap[off] = val; + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + *(uint8_t *)((void *)state->ext_nodetype + off) = val; + } + + DPRINTF("writeb reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val); +} + +static void apic_writew(void *opaque, hwaddr addr, uint32_t val) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint16_t old; + int cpu, i, level, mask; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + old = *(uint16_t *)((void *)state->ext_en + off); + if (old != val) { + *(uint16_t *)((void *)state->ext_en + off) = val; + old = old ^ val; + mask = 0x1; + for (i = 0; i < 16; i++) { + if (old & mask) { + level = !!(val & (0x1 << i)); + ioapic_update_irq(state, i + off * 8, level); + } + mask = mask << 1; + } + } + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + *(uint16_t *)((void *)state->ext_bounce + off) = val; + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + old = *(uint16_t *)((void *)state->ext_isr + off); + *(uint16_t *)((void *)state->ext_isr + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 16; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + off = off & 0x1f; + old = *(uint16_t *)((void *)state->ext_coreisr[cpu] + off); + *(uint16_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 16; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + apic_writeb(opaque, addr, val & 0xff); + apic_writeb(opaque, addr + 1, (val >> 8) & 0xff); + + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + apic_writeb(opaque, addr, val & 0xff); + apic_writeb(opaque, addr + 1, (val >> 8) & 0xff); + + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + *(uint16_t *)((void *)state->ext_nodetype + off) = val; + } + + DPRINTF("writew reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val); +} + +static void apic_writel(void *opaque, hwaddr addr, uint32_t val) +{ + nodeApicState *node; + apicState *state; + unsigned long off; + uint32_t old; + int cpu, i, level, mask; + + node = (nodeApicState *)opaque; + state = node->apic; + off = addr & 0xfffff; + if ((off >= EXTIOI_ENABLE_START) && (off < EXTIOI_ENABLE_END)) { + off -= EXTIOI_ENABLE_START; + old = *(uint32_t *)((void *)state->ext_en + off); + if (old != val) { + *(uint32_t *)((void *)state->ext_en + off) = val; + old = old ^ val; + mask = 0x1; + for (i = 0; i < 32; i++) { + if (old & mask) { + level = !!(val & (0x1 << i)); + ioapic_update_irq(state, i + off * 8, level); + } + mask = mask << 1; + } + } + } else if ((off >= EXTIOI_BOUNCE_START) && (off < EXTIOI_BOUNCE_END)) { + off -= EXTIOI_BOUNCE_START; + *(uint32_t *)((void *)state->ext_bounce + off) = val; + } else if ((off >= EXTIOI_ISR_START) && (off < EXTIOI_ISR_END)) { + off -= EXTIOI_ISR_START; + old = *(uint32_t *)((void *)state->ext_isr + off); + *(uint32_t *)((void *)state->ext_isr + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 32; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_COREISR_START) && (off < EXTIOI_COREISR_END)) { + off -= EXTIOI_COREISR_START; + cpu = (off >> 8) & 0xff; + off = off & 0x1f; + old = *(uint32_t *)((void *)state->ext_coreisr[cpu] + off); + *(uint32_t *)((void *)state->ext_coreisr[cpu] + off) = old & ~val; + mask = 0x1; + for (i = 0; i < 32; i++) { + if ((old & mask) && (val & mask)) { + ioapic_update_irq(state, i + off * 8, 0); + } + mask = mask << 1; + } + } else if ((off >= EXTIOI_IPMAP_START) && (off < EXTIOI_IPMAP_END)) { + apic_writeb(opaque, addr, val & 0xff); + apic_writeb(opaque, addr + 1, (val >> 8) & 0xff); + apic_writeb(opaque, addr + 2, (val >> 16) & 0xff); + apic_writeb(opaque, addr + 3, (val >> 24) & 0xff); + + } else if ((off >= EXTIOI_COREMAP_START) && (off < EXTIOI_COREMAP_END)) { + apic_writeb(opaque, addr, val & 0xff); + apic_writeb(opaque, addr + 1, (val >> 8) & 0xff); + apic_writeb(opaque, addr + 2, (val >> 16) & 0xff); + apic_writeb(opaque, addr + 3, (val >> 24) & 0xff); + + } else if ((off >= EXTIOI_NODETYPE_START) && (off < EXTIOI_NODETYPE_END)) { + off -= EXTIOI_NODETYPE_START; + *(uint32_t *)((void *)state->ext_nodetype + off) = val; + } + + DPRINTF("writel reg 0x" TARGET_FMT_plx " = %x\n", node->addr + addr, val); +} + +static uint64_t apic_readfn(void *opaque, hwaddr addr, unsigned size) +{ + switch (size) { + case 1: + return apic_readb(opaque, addr); + case 2: + return apic_readw(opaque, addr); + case 4: + return apic_readl(opaque, addr); + default: + g_assert_not_reached(); + } +} + +static void apic_writefn(void *opaque, hwaddr addr, uint64_t value, + unsigned size) +{ + switch (size) { + case 1: + apic_writeb(opaque, addr, value); + break; + case 2: + apic_writew(opaque, addr, value); + break; + case 4: + apic_writel(opaque, addr, value); + break; + default: + g_assert_not_reached(); + } +} + +static const VMStateDescription vmstate_apic = { + .name = "apic", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ext_irq_pre_save, + .post_load = ext_irq_post_load, + .fields = + (VMStateField[]){ + VMSTATE_UINT8_ARRAY(ext_en, apicState, EXTIOI_IRQS_BITMAP_SIZE), + VMSTATE_UINT8_ARRAY(ext_bounce, apicState, + EXTIOI_IRQS_BITMAP_SIZE), + VMSTATE_UINT8_ARRAY(ext_isr, apicState, EXTIOI_IRQS_BITMAP_SIZE), + VMSTATE_UINT8_2DARRAY(ext_coreisr, apicState, MAX_CORES, + EXTIOI_IRQS_BITMAP_SIZE), + VMSTATE_UINT8_ARRAY(ext_ipmap, apicState, EXTIOI_IRQS_IPMAP_SIZE), + VMSTATE_UINT8_ARRAY(ext_coremap, apicState, EXTIOI_IRQS), + VMSTATE_UINT16_ARRAY(ext_nodetype, apicState, 16), + VMSTATE_UINT64(ext_control, apicState), + VMSTATE_UINT8_ARRAY(ext_sw_ipmap, apicState, EXTIOI_IRQS), + VMSTATE_UINT8_ARRAY(ext_sw_coremap, apicState, EXTIOI_IRQS), + VMSTATE_UINT8_2DARRAY(ext_ipisr, apicState, + MAX_CORES *LS3A_INTC_IP, + EXTIOI_IRQS_BITMAP_SIZE), + VMSTATE_END_OF_LIST() } +}; + +static const MemoryRegionOps apic_ops = { + .read = apic_readfn, + .write = apic_writefn, + .impl.min_access_size = 1, + .impl.max_access_size = 4, + .valid.min_access_size = 1, + .valid.max_access_size = 4, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +int cpu_init_apic(LoongarchMachineState *ms, CPULOONGARCHState *env, int cpu) +{ + apicState *apic; + nodeApicState *node; + MemoryRegion *iomem; + unsigned long base; + int pin; + char str[32]; + + if (ms->apic == NULL) { + apic = g_malloc0(sizeof(apicState)); + vmstate_register(NULL, 0, &vmstate_apic, apic); + apic->irq = qemu_allocate_irqs(ioapic_setirq, apic, EXTIOI_IRQS); + + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + /* cpu_pin[9:2] <= intc_pin[7:0] */ + apic->parent_irq[cpu][pin] = env->irq[pin + 2]; + } + ms->apic = apic; + + if (cpu == 0) { + base = APIC_BASE; + node = g_malloc0(sizeof(nodeApicState)); + node->apic = ms->apic; + node->addr = base; + + iomem = g_new(MemoryRegion, 1); + sprintf(str, "apic%d", cpu); + /* extioi addr 0x1f010000~0x1f02ffff */ + memory_region_init_io(iomem, NULL, &apic_ops, node, str, 0x20000); + memory_region_add_subregion(get_system_memory(), base, iomem); + } + + } else { + if (cpu != 0) { + for (pin = 0; pin < LS3A_INTC_IP; pin++) { + ms->apic->parent_irq[cpu][pin] = env->irq[pin + 2]; + } + } + } + return 0; +} diff --git a/hw/loongarch/ioapic.c b/hw/loongarch/ioapic.c new file mode 100644 index 0000000000000000000000000000000000000000..102102781fde01f01eb012b7f40841a4546de72e --- /dev/null +++ b/hw/loongarch/ioapic.c @@ -0,0 +1,419 @@ +/* + * LS7A1000 Northbridge IOAPIC support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "qemu/log.h" +#include "sysemu/kvm.h" +#include "linux/kvm.h" +#include "migration/vmstate.h" + +#define DEBUG_LS7A_APIC 0 + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_LS7A_APIC) { \ + fprintf(stderr, "IOAPIC: " fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define TYPE_LS7A_APIC "ioapic" +#define LS7A_APIC(obj) OBJECT_CHECK(LS7AApicState, (obj), TYPE_LS7A_APIC) + +#define LS7A_IOAPIC_ROUTE_ENTRY_OFFSET 0x100 +#define LS7A_IOAPIC_INT_ID_OFFSET 0x00 +#define LS7A_INT_ID_VAL 0x7000000UL +#define LS7A_INT_ID_VER 0x1f0001UL +#define LS7A_IOAPIC_INT_MASK_OFFSET 0x20 +#define LS7A_IOAPIC_INT_EDGE_OFFSET 0x60 +#define LS7A_IOAPIC_INT_CLEAR_OFFSET 0x80 +#define LS7A_IOAPIC_INT_STATUS_OFFSET 0x3a0 +#define LS7A_IOAPIC_INT_POL_OFFSET 0x3e0 +#define LS7A_IOAPIC_HTMSI_EN_OFFSET 0x40 +#define LS7A_IOAPIC_HTMSI_VEC_OFFSET 0x200 +#define LS7A_AUTO_CTRL0_OFFSET 0xc0 +#define LS7A_AUTO_CTRL1_OFFSET 0xe0 + +typedef struct LS7AApicState { + SysBusDevice parent_obj; + qemu_irq parent_irq[257]; + uint64_t int_id; + uint64_t int_mask; /*0x020 interrupt mask register*/ + uint64_t htmsi_en; /*0x040 1=msi*/ + uint64_t intedge; /*0x060 edge=1 level =0*/ + uint64_t intclr; /*0x080 for clean edge int,set 1 clean,set 0 is noused*/ + uint64_t auto_crtl0; /*0x0c0*/ + uint64_t auto_crtl1; /*0x0e0*/ + uint8_t route_entry[64]; /*0x100 - 0x140*/ + uint8_t htmsi_vector[64]; /*0x200 - 0x240*/ + uint64_t intisr_chip0; /*0x300*/ + uint64_t intisr_chip1; /*0x320*/ + uint64_t last_intirr; /* edge detection */ + uint64_t intirr; /* 0x380 interrupt request register */ + uint64_t intisr; /* 0x3a0 interrupt service register */ + /* + * 0x3e0 interrupt level polarity + * selection register 0 for high level tirgger + */ + uint64_t int_polarity; + MemoryRegion iomem; +} LS7AApicState; + +static void update_irq(LS7AApicState *s) +{ + int i; + if ((s->intirr & (~s->int_mask)) & (~s->htmsi_en)) { + DPRINTF("7a update irqline up\n"); + s->intisr = (s->intirr & (~s->int_mask) & (~s->htmsi_en)); + qemu_set_irq(s->parent_irq[256], 1); + } else { + DPRINTF("7a update irqline down\n"); + s->intisr &= (~s->htmsi_en); + qemu_set_irq(s->parent_irq[256], 0); + } + if (s->htmsi_en) { + for (i = 0; i < 64; i++) { + if ((((~s->intisr) & s->intirr) & s->htmsi_en) & (1ULL << i)) { + s->intisr |= 1ULL << i; + qemu_set_irq(s->parent_irq[s->htmsi_vector[i]], 1); + } else if (((~(s->intisr | s->intirr)) & s->htmsi_en) & + (1ULL << i)) { + qemu_set_irq(s->parent_irq[s->htmsi_vector[i]], 0); + } + } + } +} + +static void irq_handler(void *opaque, int irq, int level) +{ + LS7AApicState *s = opaque; + + assert(irq < 64); + uint64_t mask = 1ULL << irq; + DPRINTF("------ %s irq %d %d\n", __func__, irq, level); + + if (s->intedge & mask) { + /* edge triggered */ + /*TODO*/ + } else { + /* level triggered */ + if (level) { + s->intirr |= mask; + } else { + s->intirr &= ~mask; + } + } + update_irq(s); +} + +static uint64_t ls7a_apic_reg_read(void *opaque, hwaddr addr, unsigned size) +{ + LS7AApicState *a = opaque; + uint64_t val = 0; + uint64_t offset; + int64_t offset_tmp; + offset = addr & 0xfff; + if (8 == size) { + switch (offset) { + case LS7A_IOAPIC_INT_ID_OFFSET: + val = LS7A_INT_ID_VER; + val = (val << 32) + LS7A_INT_ID_VAL; + break; + case LS7A_IOAPIC_INT_MASK_OFFSET: + val = a->int_mask; + break; + case LS7A_IOAPIC_INT_STATUS_OFFSET: + val = a->intisr & (~a->int_mask); + break; + case LS7A_IOAPIC_INT_EDGE_OFFSET: + val = a->intedge; + break; + case LS7A_IOAPIC_INT_POL_OFFSET: + val = a->int_polarity; + break; + case LS7A_IOAPIC_HTMSI_EN_OFFSET: + val = a->htmsi_en; + break; + case LS7A_AUTO_CTRL0_OFFSET: + case LS7A_AUTO_CTRL1_OFFSET: + break; + default: + break; + } + } else if (1 == size) { + if (offset >= LS7A_IOAPIC_HTMSI_VEC_OFFSET) { + offset_tmp = offset - LS7A_IOAPIC_HTMSI_VEC_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + val = a->htmsi_vector[offset_tmp]; + } + } else if (offset >= LS7A_IOAPIC_ROUTE_ENTRY_OFFSET) { + offset_tmp = offset - LS7A_IOAPIC_ROUTE_ENTRY_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + val = a->route_entry[offset_tmp]; + DPRINTF("addr %lx val %lx\n", addr, val); + } + } + } + DPRINTF(TARGET_FMT_plx " val %lx\n", addr, val); + return val; +} + +static void ls7a_apic_reg_write(void *opaque, hwaddr addr, uint64_t data, + unsigned size) +{ + LS7AApicState *a = opaque; + int64_t offset_tmp; + uint64_t offset; + offset = addr & 0xfff; + DPRINTF(TARGET_FMT_plx " size %d val %lx\n", addr, size, data); + if (8 == size) { + switch (offset) { + case LS7A_IOAPIC_INT_MASK_OFFSET: + a->int_mask = data; + update_irq(a); + break; + case LS7A_IOAPIC_INT_STATUS_OFFSET: + a->intisr = data; + break; + case LS7A_IOAPIC_INT_EDGE_OFFSET: + a->intedge = data; + break; + case LS7A_IOAPIC_INT_CLEAR_OFFSET: + a->intisr &= (~data); + update_irq(a); + break; + case LS7A_IOAPIC_INT_POL_OFFSET: + a->int_polarity = data; + break; + case LS7A_IOAPIC_HTMSI_EN_OFFSET: + a->htmsi_en = data; + break; + case LS7A_AUTO_CTRL0_OFFSET: + case LS7A_AUTO_CTRL1_OFFSET: + break; + default: + break; + } + } else if (1 == size) { + if (offset >= LS7A_IOAPIC_HTMSI_VEC_OFFSET) { + offset_tmp = offset - LS7A_IOAPIC_HTMSI_VEC_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + a->htmsi_vector[offset_tmp] = (uint8_t)(data & 0xff); + } + } else if (offset >= LS7A_IOAPIC_ROUTE_ENTRY_OFFSET) { + offset_tmp = offset - LS7A_IOAPIC_ROUTE_ENTRY_OFFSET; + if (offset_tmp >= 0 && offset_tmp < 64) { + a->route_entry[offset_tmp] = (uint8_t)(data & 0xff); + } + } + } +} + +static const MemoryRegionOps ls7a_apic_ops = { + .read = ls7a_apic_reg_read, + .write = ls7a_apic_reg_write, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static int kvm_ls7a_pre_save(void *opaque) +{ +#ifdef CONFIG_KVM + LS7AApicState *s = opaque; + struct loongarch_kvm_irqchip *chip; + struct ls7a_ioapic_state *state; + int ret, i, length; + + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } + + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct ls7a_ioapic_state); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC; + chip->len = length; + ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + state = (struct ls7a_ioapic_state *)chip->data; + s->int_id = state->int_id; + s->int_mask = state->int_mask; + s->htmsi_en = state->htmsi_en; + s->intedge = state->intedge; + s->intclr = state->intclr; + s->auto_crtl0 = state->auto_crtl0; + s->auto_crtl1 = state->auto_crtl1; + for (i = 0; i < 64; i++) { + s->route_entry[i] = state->route_entry[i]; + s->htmsi_vector[i] = state->htmsi_vector[i]; + } + s->intisr_chip0 = state->intisr_chip0; + s->intisr_chip1 = state->intisr_chip1; + s->intirr = state->intirr; + s->intisr = state->intisr; + s->int_polarity = state->int_polarity; + g_free(chip); +#endif + return 0; +} + +static int kvm_ls7a_post_load(void *opaque, int version) +{ +#ifdef CONFIG_KVM + LS7AApicState *s = opaque; + struct loongarch_kvm_irqchip *chip; + struct ls7a_ioapic_state *state; + int ret, i, length; + + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct ls7a_ioapic_state); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC; + chip->len = length; + + state = (struct ls7a_ioapic_state *)chip->data; + state->int_id = s->int_id; + state->int_mask = s->int_mask; + state->htmsi_en = s->htmsi_en; + state->intedge = s->intedge; + state->intclr = s->intclr; + state->auto_crtl0 = s->auto_crtl0; + state->auto_crtl1 = s->auto_crtl1; + for (i = 0; i < 64; i++) { + state->route_entry[i] = s->route_entry[i]; + state->htmsi_vector[i] = s->htmsi_vector[i]; + } + state->intisr_chip0 = s->intisr_chip0; + state->intisr_chip1 = s->intisr_chip1; + state->last_intirr = 0; + state->intirr = s->intirr; + state->intisr = s->intisr; + state->int_polarity = s->int_polarity; + + ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + g_free(chip); +#endif + return 0; +} + +static void ls7a_apic_reset(DeviceState *d) +{ + LS7AApicState *s = LS7A_APIC(d); + int i; + + s->int_id = 0x001f000107000000; + s->int_mask = 0xffffffffffffffff; + s->htmsi_en = 0x0; + s->intedge = 0x0; + s->intclr = 0x0; + s->auto_crtl0 = 0x0; + s->auto_crtl1 = 0x0; + for (i = 0; i < 64; i++) { + s->route_entry[i] = 0x1; + s->htmsi_vector[i] = 0x0; + } + s->intisr_chip0 = 0x0; + s->intisr_chip1 = 0x0; + s->intirr = 0x0; + s->intisr = 0x0; + s->int_polarity = 0x0; + kvm_ls7a_post_load(s, 0); +} + +static void ls7a_apic_init(Object *obj) +{ + DeviceState *dev = DEVICE(obj); + LS7AApicState *s = LS7A_APIC(obj); + SysBusDevice *sbd = SYS_BUS_DEVICE(obj); + int tmp; + memory_region_init_io(&s->iomem, obj, &ls7a_apic_ops, s, TYPE_LS7A_APIC, + 0x1000); + sysbus_init_mmio(sbd, &s->iomem); + for (tmp = 0; tmp < 257; tmp++) { + sysbus_init_irq(sbd, &s->parent_irq[tmp]); + } + qdev_init_gpio_in(dev, irq_handler, 64); +} + +static const VMStateDescription vmstate_ls7a_apic = { + .name = TYPE_LS7A_APIC, + .version_id = 1, + .minimum_version_id = 1, + .pre_save = kvm_ls7a_pre_save, + .post_load = kvm_ls7a_post_load, + .fields = + (VMStateField[]){ VMSTATE_UINT64(int_mask, LS7AApicState), + VMSTATE_UINT64(htmsi_en, LS7AApicState), + VMSTATE_UINT64(intedge, LS7AApicState), + VMSTATE_UINT64(intclr, LS7AApicState), + VMSTATE_UINT64(auto_crtl0, LS7AApicState), + VMSTATE_UINT64(auto_crtl1, LS7AApicState), + VMSTATE_UINT8_ARRAY(route_entry, LS7AApicState, 64), + VMSTATE_UINT8_ARRAY(htmsi_vector, LS7AApicState, 64), + VMSTATE_UINT64(intisr_chip0, LS7AApicState), + VMSTATE_UINT64(intisr_chip1, LS7AApicState), + VMSTATE_UINT64(last_intirr, LS7AApicState), + VMSTATE_UINT64(intirr, LS7AApicState), + VMSTATE_UINT64(intisr, LS7AApicState), + VMSTATE_UINT64(int_polarity, LS7AApicState), + VMSTATE_END_OF_LIST() } +}; + +static void ls7a_apic_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = ls7a_apic_reset; + dc->vmsd = &vmstate_ls7a_apic; +} + +static const TypeInfo ls7a_apic_info = { + .name = TYPE_LS7A_APIC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LS7AApicState), + .instance_init = ls7a_apic_init, + .class_init = ls7a_apic_class_init, +}; + +static void ls7a_apic_register_types(void) +{ + type_register_static(&ls7a_apic_info); +} + +type_init(ls7a_apic_register_types) diff --git a/hw/loongarch/iocsr.c b/hw/loongarch/iocsr.c new file mode 100644 index 0000000000000000000000000000000000000000..a1eb54bdd2b1ccbc75861ddb33fee89200a5bdd9 --- /dev/null +++ b/hw/loongarch/iocsr.c @@ -0,0 +1,227 @@ +/* + * LOONGARCH IOCSR support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "qemu/log.h" +#include "sysemu/kvm.h" +#include "linux/kvm.h" +#include "migration/vmstate.h" +#include "hw/boards.h" +#include "hw/loongarch/larch.h" + +#define BIT_ULL(nr) (1ULL << (nr)) +#define LOONGARCH_IOCSR_FEATURES 0x8 +#define IOCSRF_TEMP BIT_ULL(0) +#define IOCSRF_NODECNT BIT_ULL(1) +#define IOCSRF_MSI BIT_ULL(2) +#define IOCSRF_EXTIOI BIT_ULL(3) +#define IOCSRF_CSRIPI BIT_ULL(4) +#define IOCSRF_FREQCSR BIT_ULL(5) +#define IOCSRF_FREQSCALE BIT_ULL(6) +#define IOCSRF_DVFSV1 BIT_ULL(7) +#define IOCSRF_GMOD BIT_ULL(9) +#define IOCSRF_VM BIT_ULL(11) +#define LOONGARCH_IOCSR_VENDOR 0x10 +#define LOONGARCH_IOCSR_CPUNAME 0x20 +#define LOONGARCH_IOCSR_NODECNT 0x408 +#define LOONGARCH_IOCSR_MISC_FUNC 0x420 +#define IOCSR_MISC_FUNC_TIMER_RESET BIT_ULL(21) +#define IOCSR_MISC_FUNC_EXT_IOI_EN BIT_ULL(48) + +enum { + IOCSR_FEATURES, + IOCSR_VENDOR, + IOCSR_CPUNAME, + IOCSR_NODECNT, + IOCSR_MISC_FUNC, + IOCSR_MAX +}; + +#ifdef CONFIG_KVM +static uint32_t iocsr_array[IOCSR_MAX] = { + [IOCSR_FEATURES] = LOONGARCH_IOCSR_FEATURES, + [IOCSR_VENDOR] = LOONGARCH_IOCSR_VENDOR, + [IOCSR_CPUNAME] = LOONGARCH_IOCSR_CPUNAME, + [IOCSR_NODECNT] = LOONGARCH_IOCSR_NODECNT, + [IOCSR_MISC_FUNC] = LOONGARCH_IOCSR_MISC_FUNC, +}; +#endif + +#define TYPE_IOCSR "iocsr" +#define IOCSR(obj) OBJECT_CHECK(IOCSRState, (obj), TYPE_IOCSR) + +typedef struct IOCSRState { + SysBusDevice parent_obj; + uint64_t iocsr_val[IOCSR_MAX]; +} IOCSRState; + +IOCSRState iocsr_init = { .iocsr_val = { + IOCSRF_NODECNT | IOCSRF_MSI | IOCSRF_EXTIOI | + IOCSRF_CSRIPI | IOCSRF_GMOD | IOCSRF_VM, + 0x6e6f73676e6f6f4c, /* Loongson */ + 0x303030354133, /*3A5000*/ + 0x4, + 0x0, + } }; + +static int kvm_iocsr_pre_save(void *opaque) +{ +#ifdef CONFIG_KVM + IOCSRState *s = opaque; + struct kvm_iocsr_entry entry; + int i = 0; + + if ((!kvm_enabled())) { + return 0; + } + + for (i = 0; i < IOCSR_MAX; i++) { + entry.addr = iocsr_array[i]; + kvm_vm_ioctl(kvm_state, KVM_LOONGARCH_GET_IOCSR, &entry); + s->iocsr_val[i] = entry.data; + } +#endif + return 0; +} + +static int kvm_iocsr_post_load(void *opaque, int version) +{ +#ifdef CONFIG_KVM + IOCSRState *s = opaque; + struct kvm_iocsr_entry entry; + int i = 0; + + if (!kvm_enabled()) { + return 0; + } + + for (i = 0; i < IOCSR_MAX; i++) { + entry.addr = iocsr_array[i]; + entry.data = s->iocsr_val[i]; + kvm_vm_ioctl(kvm_state, KVM_LOONGARCH_SET_IOCSR, &entry); + } +#endif + return 0; +} + +static void iocsr_reset(DeviceState *d) +{ + IOCSRState *s = IOCSR(d); + int i; + + for (i = 0; i < IOCSR_MAX; i++) { + s->iocsr_val[i] = iocsr_init.iocsr_val[i]; + } + kvm_iocsr_post_load(s, 0); +} + +static void init_vendor_cpuname(uint64_t *vendor, uint64_t *cpu_name, + char *cpuname) +{ + int i = 0, len = 0; + char *index = NULL, *index_end = NULL; + char *vendor_c = (char *)vendor; + char *cpu_name_c = (char *)cpu_name; + + index = strstr(cpuname, "-"); + len = strlen(cpuname); + if ((index == NULL) || (len <= 0)) { + return; + } + + *vendor = 0; + *cpu_name = 0; + index_end = cpuname + len; + + while (((cpuname + i) < index) && (i < sizeof(uint64_t))) { + vendor_c[i] = cpuname[i]; + i++; + } + + index += 1; + i = 0; + + while (((index + i) < index_end) && (i < sizeof(uint64_t))) { + cpu_name_c[i] = index[i]; + i++; + } + + return; +} + +static void iocsr_instance_init(Object *obj) +{ + IOCSRState *s = IOCSR(obj); + int i; + LoongarchMachineState *lsms; + LoongarchMachineClass *lsmc; + Object *machine = qdev_get_machine(); + ObjectClass *mc = object_get_class(machine); + + /* 'lams' should be initialized */ + if (!strcmp(MACHINE_CLASS(mc)->name, "none")) { + return; + } + + lsms = LoongarchMACHINE(machine); + lsmc = LoongarchMACHINE_GET_CLASS(lsms); + + init_vendor_cpuname((uint64_t *)&iocsr_init.iocsr_val[IOCSR_VENDOR], + (uint64_t *)&iocsr_init.iocsr_val[IOCSR_CPUNAME], + lsmc->cpu_name); + + for (i = 0; i < IOCSR_MAX; i++) { + s->iocsr_val[i] = iocsr_init.iocsr_val[i]; + } +} + +static const VMStateDescription vmstate_iocsr = { + .name = TYPE_IOCSR, + .version_id = 1, + .minimum_version_id = 1, + .pre_save = kvm_iocsr_pre_save, + .post_load = kvm_iocsr_post_load, + .fields = (VMStateField[]){ VMSTATE_UINT64_ARRAY(iocsr_val, IOCSRState, + IOCSR_MAX), + VMSTATE_END_OF_LIST() } +}; + +static void iocsr_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = iocsr_reset; + dc->vmsd = &vmstate_iocsr; +} + +static const TypeInfo iocsr_info = { + .name = TYPE_IOCSR, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(IOCSRState), + .instance_init = iocsr_instance_init, + .class_init = iocsr_class_init, +}; + +static void iocsr_register_types(void) +{ + type_register_static(&iocsr_info); +} + +type_init(iocsr_register_types) diff --git a/hw/loongarch/ipi.c b/hw/loongarch/ipi.c new file mode 100644 index 0000000000000000000000000000000000000000..affa97392e86df66c277c7a65d5a931b26da2eee --- /dev/null +++ b/hw/loongarch/ipi.c @@ -0,0 +1,284 @@ +/* + * LOONGARCH IPI support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/loongarch/cpudevs.h" +#include "sysemu/sysemu.h" +#include "sysemu/cpus.h" +#include "sysemu/kvm.h" +#include "hw/core/cpu.h" +#include "qemu/log.h" +#include "hw/loongarch/bios.h" +#include "elf.h" +#include "linux/kvm.h" +#include "hw/loongarch/larch.h" +#include "hw/loongarch/ls7a.h" +#include "migration/vmstate.h" + +static int gipi_pre_save(void *opaque) +{ +#ifdef CONFIG_KVM + gipiState *state = opaque; + struct loongarch_gipiState *kstate; + struct loongarch_kvm_irqchip *chip; + int ret, i, j, length; +#endif + + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } + +#ifdef CONFIG_KVM + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct loongarch_gipiState); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS3A_GIPI; + chip->len = length; + ret = kvm_vm_ioctl(kvm_state, KVM_GET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + + kstate = (struct loongarch_gipiState *)chip->data; + + for (i = 0; i < MAX_GIPI_CORE_NUM; i++) { + state->core[i].status = kstate->core[i].status; + state->core[i].en = kstate->core[i].en; + state->core[i].set = kstate->core[i].set; + state->core[i].clear = kstate->core[i].clear; + for (j = 0; j < MAX_GIPI_MBX_NUM; j++) { + state->core[i].buf[j] = kstate->core[i].buf[j]; + } + } + g_free(chip); +#endif + + return 0; +} + +static int gipi_post_load(void *opaque, int version) +{ +#ifdef CONFIG_KVM + gipiState *state = opaque; + struct loongarch_gipiState *kstate; + struct loongarch_kvm_irqchip *chip; + int ret, i, j, length; +#endif + + if ((!kvm_enabled()) || (!kvm_irqchip_in_kernel())) { + return 0; + } + +#ifdef CONFIG_KVM + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct loongarch_gipiState); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS3A_GIPI; + chip->len = length; + kstate = (struct loongarch_gipiState *)chip->data; + + for (i = 0; i < MAX_GIPI_CORE_NUM; i++) { + kstate->core[i].status = state->core[i].status; + kstate->core[i].en = state->core[i].en; + kstate->core[i].set = state->core[i].set; + kstate->core[i].clear = state->core[i].clear; + for (j = 0; j < MAX_GIPI_MBX_NUM; j++) { + kstate->core[i].buf[j] = state->core[i].buf[j]; + } + } + + ret = kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); + if (ret < 0) { + fprintf(stderr, "KVM_GET_IRQCHIP failed: %s\n", strerror(ret)); + abort(); + } + g_free(chip); +#endif + return 0; +} + +static const VMStateDescription vmstate_gipi_core = { + .name = "gipi-single", + .version_id = 0, + .minimum_version_id = 0, + .fields = + (VMStateField[]){ + VMSTATE_UINT32(status, gipi_core), VMSTATE_UINT32(en, gipi_core), + VMSTATE_UINT32(set, gipi_core), VMSTATE_UINT32(clear, gipi_core), + VMSTATE_UINT64_ARRAY(buf, gipi_core, MAX_GIPI_MBX_NUM), + VMSTATE_END_OF_LIST() } +}; + +static const VMStateDescription vmstate_gipi = { + .name = "gipi", + .pre_save = gipi_pre_save, + .post_load = gipi_post_load, + .version_id = 0, + .minimum_version_id = 0, + .fields = (VMStateField[]){ VMSTATE_STRUCT_ARRAY( + core, gipiState, MAX_GIPI_CORE_NUM, 0, + vmstate_gipi_core, gipi_core), + VMSTATE_END_OF_LIST() } +}; + +static void gipi_writel(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ + gipi_core *s = opaque; + gipi_core *ss; + void *pbuf; + uint32_t cpu, action_data, mailaddr; + LoongarchMachineState *ms = LoongarchMACHINE(qdev_get_machine()); + + if ((size != 4) && (size != 8)) { + hw_error("size not 4 and not 8"); + } + addr &= 0xff; + switch (addr) { + case CORE0_STATUS_OFF: + hw_error("CORE0_STATUS_OFF Can't be write\n"); + break; + case CORE0_EN_OFF: + s->en = val; + break; + case CORE0_IPI_SEND: + cpu = (val >> 16) & 0x3ff; + action_data = 1UL << (val & 0x1f); + ss = &ms->gipi->core[cpu]; + ss->status |= action_data; + if (ss->status != 0) { + qemu_irq_raise(ss->irq); + } + break; + case CORE0_MAIL_SEND: + cpu = (val >> 16) & 0x3ff; + mailaddr = (val >> 2) & 0x7; + ss = &ms->gipi->core[cpu]; + pbuf = (void *)ss->buf + mailaddr * 4; + *(unsigned int *)pbuf = (val >> 32); + break; + case CORE0_SET_OFF: + hw_error("CORE0_SET_OFF Can't be write\n"); + break; + case CORE0_CLEAR_OFF: + s->status ^= val; + if (s->status == 0) { + qemu_irq_lower(s->irq); + } + break; + case 0x20 ... 0x3c: + pbuf = (void *)s->buf + (addr - 0x20); + if (size == 1) { + *(unsigned char *)pbuf = (unsigned char)val; + } else if (size == 2) { + *(unsigned short *)pbuf = (unsigned short)val; + } else if (size == 4) { + *(unsigned int *)pbuf = (unsigned int)val; + } else if (size == 8) { + *(unsigned long *)pbuf = (unsigned long)val; + } + break; + default: + break; + } +} + +static uint64_t gipi_readl(void *opaque, hwaddr addr, unsigned size) +{ + gipi_core *s = opaque; + uint64_t ret = 0; + void *pbuf; + + addr &= 0xff; + if ((size != 4) && (size != 8)) { + hw_error("size not 4 and not 8 size:%d\n", size); + } + switch (addr) { + case CORE0_STATUS_OFF: + ret = s->status; + break; + case CORE0_EN_OFF: + ret = s->en; + break; + case CORE0_SET_OFF: + ret = 0; + break; + case CORE0_CLEAR_OFF: + ret = 0; + break; + case 0x20 ... 0x3c: + pbuf = (void *)s->buf + (addr - 0x20); + if (size == 1) { + ret = *(unsigned char *)pbuf; + } else if (size == 2) { + ret = *(unsigned short *)pbuf; + } else if (size == 4) { + ret = *(unsigned int *)pbuf; + } else if (size == 8) { + ret = *(unsigned long *)pbuf; + } + break; + default: + break; + } + + return ret; +} + +static const MemoryRegionOps gipi_ops = { + .read = gipi_readl, + .write = gipi_writel, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +int cpu_init_ipi(LoongarchMachineState *ms, qemu_irq parent, int cpu) +{ + hwaddr addr; + MemoryRegion *region; + char str[32]; + + if (ms->gipi == NULL) { + ms->gipi = g_malloc0(sizeof(gipiState)); + vmstate_register(NULL, 0, &vmstate_gipi, ms->gipi); + } + + ms->gipi->core[cpu].irq = parent; + + addr = SMP_GIPI_MAILBOX | (cpu << 8); + region = g_new(MemoryRegion, 1); + sprintf(str, "gipi%d", cpu); + memory_region_init_io(region, NULL, &gipi_ops, &ms->gipi->core[cpu], str, + 0x100); + memory_region_add_subregion(get_system_memory(), addr, region); + return 0; +} diff --git a/hw/loongarch/larch_3a.c b/hw/loongarch/larch_3a.c new file mode 100644 index 0000000000000000000000000000000000000000..e2317ba581842a7c3cb76778fc6ae0a78eda9d85 --- /dev/null +++ b/hw/loongarch/larch_3a.c @@ -0,0 +1,2049 @@ +/* + * QEMU loongarch 3a develop board emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/units.h" +#include "qapi/error.h" +#include "qemu/datadir.h" +#include "hw/hw.h" +#include "hw/loongarch/cpudevs.h" +#include "hw/i386/pc.h" +#include "hw/char/serial.h" +#include "hw/isa/isa.h" +#include "hw/qdev-core.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "sysemu/cpus.h" +#include "hw/boards.h" +#include "qemu/log.h" +#include "hw/loongarch/bios.h" +#include "hw/loader.h" +#include "elf.h" +#include "exec/address-spaces.h" +#include "hw/ide.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/msi.h" +#include "linux/kvm.h" +#include "sysemu/kvm.h" +#include "sysemu/numa.h" +#include "hw/rtc/mc146818rtc.h" +#include "hw/irq.h" +#include "net/net.h" +#include "hw/platform-bus.h" +#include "hw/timer/i8254.h" +#include "hw/loongarch/larch.h" +#include "hw/loongarch/ls7a.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/firmware/smbios.h" +#include "acpi-build.h" +#include +#include +#include "sysemu/block-backend.h" +#include "hw/block/flash.h" +#include "sysemu/device_tree.h" +#include "qapi/visitor.h" +#include "qapi/qapi-visit-common.h" +#include "sysemu/tpm.h" +#include "hw/loongarch/sysbus-fdt.h" + +#include + +#define DMA64_SUPPORTED 0x2 +#define MAX_IDE_BUS 2 + +#define BOOTPARAM_PHYADDR 0x0ff00000ULL +#define BOOTPARAM_ADDR (0x9000000000000000ULL + BOOTPARAM_PHYADDR) +#define SMBIOS_PHYSICAL_ADDRESS 0x0fe00000 +#define SMBIOS_SIZE_LIMIT 0x200000 +#define RESERVED_SIZE_LIMIT 0x1100000 +#define COMMAND_LINE_SIZE 4096 +#define FW_CONF_ADDR 0x0fff0000 + +#define PHYS_TO_VIRT(x) ((x) | 0x9000000000000000ULL) + +#define TARGET_REALPAGE_MASK (TARGET_PAGE_MASK << 2) + +#ifdef CONFIG_KVM +#define align(x) (((x) + 63) & ~63) +#else +#define align(x) (((x) + 15) & ~15) +#endif + +#define DEBUG_LOONGARCH3A 0 +#define FLASH_SECTOR_SIZE 4096 + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_LOONGARCH3A) { \ + fprintf(stderr, fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +#define DEFINE_LS3A5K_MACHINE(suffix, name, optionfn) \ + static void ls3a5k_init_##suffix(MachineState *machine) \ + { \ + ls3a5k_init(machine); \ + } \ + DEFINE_LOONGARCH_MACHINE(suffix, name, ls3a5k_init_##suffix, optionfn) + +struct efi_memory_map_loongarch { + uint16_t vers; /* version of efi_memory_map */ + uint32_t nr_map; /* number of memory_maps */ + uint32_t mem_freq; /* memory frequence */ + struct mem_map { + uint32_t node_id; /* node_id which memory attached to */ + uint32_t mem_type; /* system memory, pci memory, pci io, etc. */ + uint64_t mem_start; /* memory map start address */ + uint32_t mem_size; /* each memory_map size, not the total size */ + } map[128]; +} __attribute__((packed)); + +enum loongarch_cpu_type { Loongson3 = 0x1, Loongson3_comp = 0x2 }; + +struct GlobalProperty loongarch_compat[] = { + { + .driver = "rtl8139", + .property = "romfile", + .value = "", + }, + { + .driver = "e1000", + .property = "romfile", + .value = "", + }, + { + .driver = "virtio-net-pci", + .property = "romfile", + .value = "", + }, + { + .driver = "qxl-vga", + .property = "romfile", + .value = "", + }, + { + .driver = "VGA", + .property = "romfile", + .value = "", + }, + { + .driver = "cirrus-vga", + .property = "romfile", + .value = "", + }, + { + .driver = "virtio-vga", + .property = "romfile", + .value = "", + }, + { + .driver = "vmware-svga", + .property = "romfile", + .value = "", + }, +}; +const size_t loongarch_compat_len = G_N_ELEMENTS(loongarch_compat); + +/* + * Capability and feature descriptor structure for LOONGARCH CPU + */ +struct efi_cpuinfo_loongarch { + uint16_t vers; /* version of efi_cpuinfo_loongarch */ + uint32_t processor_id; /* PRID, e.g. 6305, 6306 */ + enum loongarch_cpu_type cputype; /* 3A, 3B, etc. */ + uint32_t total_node; /* num of total numa nodes */ + uint16_t cpu_startup_core_id; /* Core id */ + uint16_t reserved_cores_mask; + uint32_t cpu_clock_freq; /* cpu_clock */ + uint32_t nr_cpus; +} __attribute__((packed)); + +#define MAX_UARTS 64 +struct uart_device { + uint32_t iotype; /* see include/linux/serial_core.h */ + uint32_t uartclk; + uint32_t int_offset; + uint64_t uart_base; +} __attribute__((packed)); + +#define MAX_SENSORS 64 +#define SENSOR_TEMPER 0x00000001 +#define SENSOR_VOLTAGE 0x00000002 +#define SENSOR_FAN 0x00000004 +struct sensor_device { + char name[32]; /* a formal name */ + char label[64]; /* a flexible description */ + uint32_t type; /* SENSOR_* */ + uint32_t id; /* instance id of a sensor-class */ + /* + * see arch/loongarch/include/ + * asm/mach-loongarch/loongarch_hwmon.h + */ + uint32_t fan_policy; + uint32_t fan_percent; /* only for constant speed policy */ + uint64_t base_addr; /* base address of device registers */ +} __attribute__((packed)); + +struct system_loongarch { + uint16_t vers; /* version of system_loongarch */ + uint32_t ccnuma_smp; /* 0: no numa; 1: has numa */ + uint32_t sing_double_channel; /* 1:single; 2:double */ + uint32_t nr_uarts; + struct uart_device uarts[MAX_UARTS]; + uint32_t nr_sensors; + struct sensor_device sensors[MAX_SENSORS]; + char has_ec; + char ec_name[32]; + uint64_t ec_base_addr; + char has_tcm; + char tcm_name[32]; + uint64_t tcm_base_addr; + uint64_t workarounds; /* see workarounds.h */ +} __attribute__((packed)); + +struct irq_source_routing_table { + uint16_t vers; + uint16_t size; + uint16_t rtr_bus; + uint16_t rtr_devfn; + uint32_t vendor; + uint32_t device; + uint32_t PIC_type; /* conform use HT or PCI to route to CPU-PIC */ + uint64_t ht_int_bit; /* 3A: 1<<24; 3B: 1<<16 */ + uint64_t ht_enable; /* irqs used in this PIC */ + uint32_t node_id; /* node id: 0x0-0; 0x1-1; 0x10-2; 0x11-3 */ + uint64_t pci_mem_start_addr; + uint64_t pci_mem_end_addr; + uint64_t pci_io_start_addr; + uint64_t pci_io_end_addr; + uint64_t pci_config_addr; + uint32_t dma_mask_bits; + uint16_t dma_noncoherent; +} __attribute__((packed)); + +struct interface_info { + uint16_t vers; /* version of the specificition */ + uint16_t size; + uint8_t flag; + char description[64]; +} __attribute__((packed)); + +#define MAX_RESOURCE_NUMBER 128 +struct resource_loongarch { + uint64_t start; /* resource start address */ + uint64_t end; /* resource end address */ + char name[64]; + uint32_t flags; +}; + +struct archdev_data { +}; /* arch specific additions */ + +struct board_devices { + char name[64]; /* hold the device name */ + uint32_t num_resources; /* number of device_resource */ + /* for each device's resource */ + struct resource_loongarch resource[MAX_RESOURCE_NUMBER]; + /* arch specific additions */ + struct archdev_data archdata; +}; + +struct loongarch_special_attribute { + uint16_t vers; /* version of this special */ + char special_name[64]; /* special_atribute_name */ + uint32_t loongarch_special_type; /* type of special device */ + /* for each device's resource */ + struct resource_loongarch resource[MAX_RESOURCE_NUMBER]; +}; + +struct loongarch_params { + uint64_t memory_offset; /* efi_memory_map_loongarch struct offset */ + uint64_t cpu_offset; /* efi_cpuinfo_loongarch struct offset */ + uint64_t system_offset; /* system_loongarch struct offset */ + uint64_t irq_offset; /* irq_source_routing_table struct offset */ + uint64_t interface_offset; /* interface_info struct offset */ + uint64_t special_offset; /* loongarch_special_attribute struct offset */ + uint64_t boarddev_table_offset; /* board_devices offset */ +}; + +struct smbios_tables { + uint16_t vers; /* version of smbios */ + uint64_t vga_bios; /* vga_bios address */ + struct loongarch_params lp; +}; + +struct efi_reset_system_t { + uint64_t ResetCold; + uint64_t ResetWarm; + uint64_t ResetType; + uint64_t Shutdown; + uint64_t DoSuspend; /* NULL if not support */ +}; + +struct efi_loongarch { + uint64_t mps; /* MPS table */ + uint64_t acpi; /* ACPI table (IA64 ext 0.71) */ + uint64_t acpi20; /* ACPI table (ACPI 2.0) */ + struct smbios_tables smbios; /* SM BIOS table */ + uint64_t sal_systab; /* SAL system table */ + uint64_t boot_info; /* boot info table */ +}; + +struct boot_params { + struct efi_loongarch efi; + struct efi_reset_system_t reset_system; +}; + +static struct _loaderparams { + unsigned long ram_size; + const char *kernel_filename; + const char *kernel_cmdline; + const char *initrd_filename; + unsigned long a0, a1, a2; +} loaderparams; + +static struct _firmware_config { + unsigned long ram_size; + unsigned int mem_freq; + unsigned int cpu_nr; + unsigned int cpu_clock_freq; +} fw_config; + +struct la_memmap_entry { + uint64_t address; + uint64_t length; + uint32_t type; + uint32_t reserved; +}; + +static void *boot_params_buf; +static void *boot_params_p; +static struct la_memmap_entry *la_memmap_table; +static unsigned la_memmap_entries; + +CPULOONGARCHState *cpu_states[LOONGARCH_MAX_VCPUS]; + +struct kvm_cpucfg ls3a5k_cpucfgs = { + .cpucfg[LOONGARCH_CPUCFG0] = CPUCFG0_3A5000_PRID, + .cpucfg[LOONGARCH_CPUCFG1] = + CPUCFG1_ISGR64 | CPUCFG1_PAGING | CPUCFG1_IOCSR | CPUCFG1_PABITS | + CPUCFG1_VABITS | CPUCFG1_UAL | CPUCFG1_RI | CPUCFG1_XI | CPUCFG1_RPLV | + CPUCFG1_HUGEPG | CPUCFG1_IOCSRBRD, + .cpucfg[LOONGARCH_CPUCFG2] = + CPUCFG2_FP | CPUCFG2_FPSP | CPUCFG2_FPDP | CPUCFG2_FPVERS | + CPUCFG2_LSX | CPUCFG2_LASX | CPUCFG2_COMPLEX | CPUCFG2_CRYPTO | + CPUCFG2_LLFTP | CPUCFG2_LLFTPREV | CPUCFG2_X86BT | CPUCFG2_ARMBT | + CPUCFG2_MIPSBT | CPUCFG2_LSPW | CPUCFG2_LAM, + .cpucfg[LOONGARCH_CPUCFG3] = + CPUCFG3_CCDMA | CPUCFG3_SFB | CPUCFG3_UCACC | CPUCFG3_LLEXC | + CPUCFG3_SCDLY | CPUCFG3_LLDBAR | CPUCFG3_ITLBT | CPUCFG3_ICACHET | + CPUCFG3_SPW_LVL | CPUCFG3_SPW_HG_HF | CPUCFG3_RVA | CPUCFG3_RVAMAX, + .cpucfg[LOONGARCH_CPUCFG4] = CCFREQ_100M, + .cpucfg[LOONGARCH_CPUCFG5] = CPUCFG5_CCMUL | CPUCFG5_CCDIV, + .cpucfg[LOONGARCH_CPUCFG6] = CPUCFG6_PMP | CPUCFG6_PAMVER | CPUCFG6_PMNUM | + CPUCFG6_PMBITS | CPUCFG6_UPM, + .cpucfg[LOONGARCH_CPUCFG16] = CPUCFG16_L1_IUPRE | CPUCFG16_L1_DPRE | + CPUCFG16_L2_IUPRE | CPUCFG16_L2_IUUNIFY | + CPUCFG16_L2_IUPRIV | CPUCFG16_L3_IUPRE | + CPUCFG16_L3_IUUNIFY | CPUCFG16_L3_IUINCL, + .cpucfg[LOONGARCH_CPUCFG17] = + CPUCFG17_L1I_WAYS_M | CPUCFG17_L1I_SETS_M | CPUCFG17_L1I_SIZE_M, + .cpucfg[LOONGARCH_CPUCFG18] = + CPUCFG18_L1D_WAYS_M | CPUCFG18_L1D_SETS_M | CPUCFG18_L1D_SIZE_M, + .cpucfg[LOONGARCH_CPUCFG19] = + CPUCFG19_L2_WAYS_M | CPUCFG19_L2_SETS_M | CPUCFG19_L2_SIZE_M, + .cpucfg[LOONGARCH_CPUCFG20] = + CPUCFG20_L3_WAYS_M | CPUCFG20_L3_SETS_M | CPUCFG20_L3_SIZE_M, +}; + +bool loongarch_is_acpi_enabled(LoongarchMachineState *vms) +{ + if (vms->acpi == ON_OFF_AUTO_OFF) { + return false; + } + return true; +} + +static void loongarch_get_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(obj); + OnOffAuto acpi = lsms->acpi; + + visit_type_OnOffAuto(v, name, &acpi, errp); +} + +static void loongarch_set_acpi(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(obj); + + visit_type_OnOffAuto(v, name, &lsms->acpi, errp); +} + +int la_memmap_add_entry(uint64_t address, uint64_t length, uint32_t type) +{ + int i; + + for (i = 0; i < la_memmap_entries; i++) { + if (la_memmap_table[i].address == address) { + fprintf(stderr, "%s address:0x%lx length:0x%lx already exists\n", + __func__, address, length); + return 0; + } + } + + la_memmap_table = g_renew(struct la_memmap_entry, la_memmap_table, + la_memmap_entries + 1); + la_memmap_table[la_memmap_entries].address = cpu_to_le64(address); + la_memmap_table[la_memmap_entries].length = cpu_to_le64(length); + la_memmap_table[la_memmap_entries].type = cpu_to_le32(type); + la_memmap_entries++; + + return la_memmap_entries; +} + +static ram_addr_t get_hotplug_membase(ram_addr_t ram_size) +{ + ram_addr_t sstart; + + if (ram_size <= 0x10000000) { + sstart = 0x90000000; + } else { + sstart = 0x90000000 + ROUND_UP((ram_size - 0x10000000), + LOONGARCH_HOTPLUG_MEM_ALIGN); + } + return sstart; +} + +static struct efi_memory_map_loongarch *init_memory_map(void *g_map) +{ + struct efi_memory_map_loongarch *emap = g_map; + + emap->nr_map = 4; + emap->mem_freq = 266000000; + + emap->map[0].node_id = 0; + emap->map[0].mem_type = 1; + emap->map[0].mem_start = 0x0; +#ifdef CONFIG_KVM + emap->map[0].mem_size = + (loaderparams.ram_size > 0x10000000 ? 256 + : (loaderparams.ram_size >> 20)) - + 18; +#else + emap->map[0].mem_size = atoi(getenv("memsize")); +#endif + + emap->map[1].node_id = 0; + emap->map[1].mem_type = 2; + emap->map[1].mem_start = 0x90000000; +#ifdef CONFIG_KVM + emap->map[1].mem_size = (loaderparams.ram_size > 0x10000000 + ? (loaderparams.ram_size >> 20) - 256 + : 0); +#else + emap->map[1].mem_size = atoi(getenv("highmemsize")); +#endif + + /* support for smbios */ + emap->map[2].node_id = 0; + emap->map[2].mem_type = 10; + emap->map[2].mem_start = SMBIOS_PHYSICAL_ADDRESS; + emap->map[2].mem_size = SMBIOS_SIZE_LIMIT >> 20; + + emap->map[3].node_id = 0; + emap->map[3].mem_type = 3; + emap->map[3].mem_start = 0xee00000; + emap->map[3].mem_size = RESERVED_SIZE_LIMIT >> 20; + + return emap; +} + +static uint64_t get_host_cpu_freq(void) +{ + int fd = 0; + char buf[1024]; + uint64_t freq = 0, size = 0; + char *buf_p; + + fd = open("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", + O_RDONLY); + if (fd == -1) { + fprintf(stderr, "/sys/devices/system/cpu/cpu0/cpufreq/ \ + cpuinfo_max_freq not exist!\n"); + fprintf(stderr, "Trying /proc/cpuinfo...\n"); + } else { + size = read(fd, buf, 16); + if (size == -1) { + fprintf(stderr, "read err...\n"); + } + close(fd); + freq = (uint64_t)atoi(buf); + return freq * 1000; + } + + fd = open("/proc/cpuinfo", O_RDONLY); + if (fd == -1) { + fprintf(stderr, "Failed to open /proc/cpuinfo!\n"); + return 0; + } + + size = read(fd, buf, 1024); + if (size == -1) { + fprintf(stderr, "read err...\n"); + } + close(fd); + + buf_p = strstr(buf, "MHz"); + if (buf_p) { + while (*buf_p != ':') { + buf_p++; + } + buf_p += 2; + } else { + buf_p = strstr(buf, "name"); + while (*buf_p != '@') { + buf_p++; + } + buf_p += 2; + } + + memcpy(buf, buf_p, 12); + buf_p = buf; + while ((*buf_p >= '0') && (*buf_p <= '9')) { + buf_p++; + } + *buf_p = '\0'; + + freq = (uint64_t)atoi(buf); + return freq * 1000 * 1000; +} + +static char *get_host_cpu_model_name(void) +{ + int fd = 0; + int size = 0; + static char buf[1024]; + char *buf_p; + + fd = open("/proc/cpuinfo", O_RDONLY); + if (fd == -1) { + fprintf(stderr, "Failed to open /proc/cpuinfo!\n"); + return 0; + } + + size = read(fd, buf, 1024); + if (size == -1) { + fprintf(stderr, "read err...\n"); + } + close(fd); + buf_p = strstr(buf, "Name"); + if (!buf_p) { + buf_p = strstr(buf, "name"); + } + if (!buf_p) { + fprintf(stderr, "Can't find cpu name\n"); + return 0; + } + + while (*buf_p != ':') { + buf_p++; + } + buf_p = buf_p + 2; + memcpy(buf, buf_p, 40); + buf_p = buf; + while (*buf_p != '\n') { + buf_p++; + } + + *(buf_p) = '\0'; + + return buf; +} + +static void fw_conf_init(unsigned long ramsize) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int smp_cpus = ms->smp.cpus; + fw_config.ram_size = ramsize; + fw_config.mem_freq = 266000000; + fw_config.cpu_nr = smp_cpus; + fw_config.cpu_clock_freq = get_host_cpu_freq(); +} + +static struct efi_cpuinfo_loongarch *init_cpu_info(void *g_cpuinfo_loongarch) +{ + struct efi_cpuinfo_loongarch *c = g_cpuinfo_loongarch; + MachineState *ms = MACHINE(qdev_get_machine()); + int smp_cpus = ms->smp.cpus; + LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine()); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + + if (strstr(lsmc->cpu_name, "5000")) { + c->processor_id = 0x14c010; + } + c->cputype = Loongson3_comp; + c->cpu_clock_freq = get_host_cpu_freq(); + if (!c->cpu_clock_freq) { + c->cpu_clock_freq = 200000000; + } + c->total_node = 1; + c->nr_cpus = smp_cpus; + c->cpu_startup_core_id = 0; + c->reserved_cores_mask = 0xffff & (0xffff << smp_cpus); + + return c; +} + +static struct system_loongarch *init_system_loongarch(void *g_sysitem) +{ + struct system_loongarch *s = g_sysitem; + + s->ccnuma_smp = 1; + s->ccnuma_smp = 0; + s->sing_double_channel = 1; + + return s; +} + +enum loongarch_irq_source_enum { HT, I8259, UNKNOWN }; + +static struct irq_source_routing_table *init_irq_source(void *g_irq_source) +{ + struct irq_source_routing_table *irq_info = g_irq_source; + LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine()); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + + irq_info->PIC_type = HT; + irq_info->ht_int_bit = 1 << 24; + irq_info->ht_enable = 0x0000d17b; + irq_info->node_id = 0; + + irq_info->pci_mem_start_addr = PCIE_MEMORY_BASE; + irq_info->pci_mem_end_addr = + irq_info->pci_mem_start_addr + PCIE_MEMORY_SIZE - 1; + + if (strstr(lsmc->cpu_name, "5000")) { + irq_info->pci_io_start_addr = LS3A5K_ISA_IO_BASE; + } + irq_info->dma_noncoherent = 1; + return irq_info; +} + +static struct interface_info *init_interface_info(void *g_interface) +{ + struct interface_info *inter = g_interface; + int flashsize = 0x80000; + + inter->vers = 0x0001; + inter->size = flashsize / 0x400; + inter->flag = 1; + + strcpy(inter->description, "PMON_Version_v2.1"); + + return inter; +} + +static struct board_devices *board_devices_info(void *g_board) +{ + struct board_devices *bd = g_board; + LoongarchMachineState *lsms = LoongarchMACHINE(qdev_get_machine()); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + + if (!strcmp(lsmc->bridge_name, "ls7a")) { + strcpy(bd->name, "Loongarch-3A-7A-1w-V1.03-demo"); + } + bd->num_resources = 10; + + return bd; +} + +static struct loongarch_special_attribute *init_special_info(void *g_special) +{ + struct loongarch_special_attribute *special = g_special; + char update[11] = "2013-01-01"; + int VRAM_SIZE = 0x20000; + + strcpy(special->special_name, update); + special->resource[0].flags = 0; + special->resource[0].start = 0; + special->resource[0].end = VRAM_SIZE; + strcpy(special->resource[0].name, "SPMODULE"); + special->resource[0].flags |= DMA64_SUPPORTED; + + return special; +} + +static void init_loongarch_params(struct loongarch_params *lp) +{ + void *p = boot_params_p; + + lp->memory_offset = + (unsigned long long)init_memory_map(p) - (unsigned long long)lp; + p += align(sizeof(struct efi_memory_map_loongarch)); + + lp->cpu_offset = + (unsigned long long)init_cpu_info(p) - (unsigned long long)lp; + p += align(sizeof(struct efi_cpuinfo_loongarch)); + + lp->system_offset = + (unsigned long long)init_system_loongarch(p) - (unsigned long long)lp; + p += align(sizeof(struct system_loongarch)); + + lp->irq_offset = + (unsigned long long)init_irq_source(p) - (unsigned long long)lp; + p += align(sizeof(struct irq_source_routing_table)); + + lp->interface_offset = + (unsigned long long)init_interface_info(p) - (unsigned long long)lp; + p += align(sizeof(struct interface_info)); + + lp->boarddev_table_offset = + (unsigned long long)board_devices_info(p) - (unsigned long long)lp; + p += align(sizeof(struct board_devices)); + + lp->special_offset = + (unsigned long long)init_special_info(p) - (unsigned long long)lp; + p += align(sizeof(struct loongarch_special_attribute)); + + boot_params_p = p; +} + +static void init_smbios(struct smbios_tables *smbios) +{ + smbios->vers = 1; + smbios->vga_bios = 1; + init_loongarch_params(&(smbios->lp)); +} + +static void init_efi(struct efi_loongarch *efi) +{ + init_smbios(&(efi->smbios)); +} + +static int init_boot_param(struct boot_params *bp) +{ + init_efi(&(bp->efi)); + + return 0; +} + +static unsigned int ls3a5k_aui_boot_code[] = { + 0x0380200d, /* ori $r13,$r0,0x8 */ + 0x0400002d, /* csrwr $r13,0x0 */ + 0x0401000e, /* csrrd $r14,0x40 */ + 0x0343fdce, /* andi $r14,$r14,0xff */ + 0x143fc02c, /* lu12i.w $r12,261889(0x1fe01) */ + 0x1600000c, /* lu32i.d $r12,0 */ + 0x0320018c, /* lu52i.d $r12,$r12,-1792(0x800) */ + 0x03400dcf, /* andi $r15,$r14,0x3 */ + 0x004121ef, /* slli.d $r15,$r15,0x8 */ + 0x00153d8c, /* or $r12,$r12,$r15 */ + 0x034031d0, /* andi $r16,$r14,0xc */ + 0x0041aa10, /* slli.d $r16,$r16,0x2a */ + 0x0015418c, /* or $r12,$r12,$r16 */ + 0x28808184, /* ld.w $r4,$r12,32(0x20) */ + 0x43fffc9f, /* beqz $r4,0 -4 */ + 0x28c08184, /* ld.d $r4,$r12,32(0x20) */ + 0x28c0a183, /* ld.d $r3,$r12,40(0x28) */ + 0x28c0c182, /* ld.d $r2,$r12,48(0x30) */ + 0x28c0e185, /* ld.d $r5,$r12,56(0x38) */ + 0x4c000080, /* jirl $r0,$r4,0 */ +}; + +static int set_bootparam_uefi(ram_addr_t initrd_offset, long initrd_size) +{ + long params_size; + char memenv[32]; + char highmemenv[32]; + void *params_buf; + unsigned long *parg_env; + int ret = 0; + + /* Allocate params_buf for command line. */ + params_size = 0x100000; + params_buf = g_malloc0(params_size); + + /* + * Layout of params_buf looks like this: + * argv[0], argv[1], 0, env[0], env[1], ...env[i], 0, + * argv[0]'s data, argv[1]'s data, env[0]'data, ..., env[i]'s data, 0 + */ + parg_env = (void *)params_buf; + + ret = (3 + 1) * sizeof(target_ulong); + *parg_env++ = (BOOTPARAM_ADDR + ret); + ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret, "g")); + + /* argv1 */ + *parg_env++ = BOOTPARAM_ADDR + ret; + if (initrd_size > 0) + ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret, + "rd_start=0x%llx rd_size=%li %s", + PHYS_TO_VIRT((uint32_t)initrd_offset), + initrd_size, loaderparams.kernel_cmdline)); + else + ret += (1 + snprintf(params_buf + ret, COMMAND_LINE_SIZE - ret, "%s", + loaderparams.kernel_cmdline)); + + /* argv2 */ + *parg_env++ = 0; + + /* env */ + sprintf(memenv, "%lu", + loaderparams.ram_size > 0x10000000 + ? 256 + : (loaderparams.ram_size >> 20)); + sprintf(highmemenv, "%lu", + loaderparams.ram_size > 0x10000000 + ? (loaderparams.ram_size >> 20) - 256 + : 0); + + setenv("memsize", memenv, 1); + setenv("highmemsize", highmemenv, 1); + + ret = ((ret + 32) & ~31); + + boot_params_buf = (void *)(params_buf + ret); + boot_params_p = boot_params_buf + align(sizeof(struct boot_params)); + init_boot_param(boot_params_buf); + rom_add_blob_fixed("params", params_buf, params_size, BOOTPARAM_PHYADDR); + loaderparams.a0 = 2; + loaderparams.a1 = BOOTPARAM_ADDR; + loaderparams.a2 = BOOTPARAM_ADDR + ret; + + return 0; +} + +static uint64_t cpu_loongarch_virt_to_phys(void *opaque, uint64_t addr) +{ + return addr & 0x1fffffffll; +} + +static void fw_cfg_add_kernel_info(FWCfgState *fw_cfg, uint64_t highram_size, + uint64_t phyAddr_initrd) +{ + int64_t entry, kernel_low, kernel_high; + long initrd_size = 0; + uint64_t initrd_offset = 0; + void *cmdline_buf; + int ret = 0; + + ret = load_elf(loaderparams.kernel_filename, NULL, + cpu_loongarch_virt_to_phys, NULL, (uint64_t *)&entry, + (uint64_t *)&kernel_low, (uint64_t *)&kernel_high, NULL, 0, + EM_LOONGARCH, 1, 0); + + if (0 > ret) { + error_report("kernel image load error"); + exit(1); + } + + fw_cfg_add_i64(fw_cfg, FW_CFG_KERNEL_ENTRY, entry); + + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + if (0 < initrd_size) { + if (initrd_size > highram_size) { + error_report("initrd size is too big, should below %ld MB", + highram_size / MiB); + /*prevent write io memory address space*/ + exit(1); + } + initrd_offset = + (phyAddr_initrd - initrd_size) & TARGET_REALPAGE_MASK; + initrd_size = load_image_targphys( + loaderparams.initrd_filename, initrd_offset, + loaderparams.ram_size - initrd_offset); + fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_ADDR, initrd_offset); + fw_cfg_add_i64(fw_cfg, FW_CFG_INITRD_SIZE, initrd_size); + } else { + error_report("initrd image size is error"); + } + } + + cmdline_buf = g_malloc0(COMMAND_LINE_SIZE); + if (initrd_size > 0) + ret = (1 + snprintf(cmdline_buf, COMMAND_LINE_SIZE, + "rd_start=0x%llx rd_size=%li %s", + PHYS_TO_VIRT(initrd_offset), initrd_size, + loaderparams.kernel_cmdline)); + else + ret = (1 + snprintf(cmdline_buf, COMMAND_LINE_SIZE, "%s", + loaderparams.kernel_cmdline)); + + fw_cfg_add_i32(fw_cfg, FW_CFG_CMDLINE_SIZE, ret); + fw_cfg_add_string(fw_cfg, FW_CFG_CMDLINE_DATA, (const char *)cmdline_buf); + + return; +} + +static int64_t load_kernel(void) +{ + int64_t entry, kernel_low, kernel_high; + long initrd_size = 0; + ram_addr_t initrd_offset = 0; + + load_elf(loaderparams.kernel_filename, NULL, cpu_loongarch_virt_to_phys, + NULL, (uint64_t *)&entry, (uint64_t *)&kernel_low, + (uint64_t *)&kernel_high, NULL, 0, EM_LOONGARCH, 1, 0); + + if (loaderparams.initrd_filename) { + initrd_size = get_image_size(loaderparams.initrd_filename); + + if (initrd_size > 0) { + initrd_offset = (kernel_high * 4 + ~TARGET_REALPAGE_MASK) & + TARGET_REALPAGE_MASK; + initrd_size = load_image_targphys( + loaderparams.initrd_filename, initrd_offset, + loaderparams.ram_size - initrd_offset); + } + } + set_bootparam_uefi(initrd_offset, initrd_size); + + return entry; +} + +static void main_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + CPULOONGARCHState *env = &s->cpu->env; + + cpu_reset(CPU(s->cpu)); + env->active_tc.PC = s->vector; + env->active_tc.gpr[4] = loaderparams.a0; + env->active_tc.gpr[5] = loaderparams.a1; + env->active_tc.gpr[6] = loaderparams.a2; +} + +void slave_cpu_reset(void *opaque) +{ + ResetData *s = (ResetData *)opaque; + + cpu_reset(CPU(s->cpu)); +} + +/* KVM_IRQ_LINE irq field index values */ +#define KVM_LOONGARCH_IRQ_TYPE_SHIFT 24 +#define KVM_LOONGARCH_IRQ_TYPE_MASK 0xff +#define KVM_LOONGARCH_IRQ_VCPU_SHIFT 16 +#define KVM_LOONGARCH_IRQ_VCPU_MASK 0xff +#define KVM_LOONGARCH_IRQ_NUM_SHIFT 0 +#define KVM_LOONGARCH_IRQ_NUM_MASK 0xffff + +/* irq_type field */ +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IP 0 +#define KVM_LOONGARCH_IRQ_TYPE_CPU_IO 1 +#define KVM_LOONGARCH_IRQ_TYPE_HT 2 +#define KVM_LOONGARCH_IRQ_TYPE_MSI 3 +#define KVM_LOONGARCH_IRQ_TYPE_IOAPIC 4 + +static void legacy_set_irq(void *opaque, int irq, int level) +{ + qemu_irq *pic = opaque; + + qemu_set_irq(pic[irq], level); +} + +typedef struct ls3a_intctlstate { + uint8_t nodecounter_reg[0x100]; + uint8_t pm_reg[0x100]; + uint8_t msi_reg[0x8]; + CPULOONGARCHState **env; + DeviceState *apicdev; + qemu_irq *ioapic_irq; +#ifdef CONFIG_KVM + struct loongarch_kvm_irqchip chip; +#endif +} ls3a_intctlstate; + +typedef struct ls3a_func_args { + ls3a_intctlstate *state; + uint64_t base; + uint32_t mask; + uint8_t *mem; +} ls3a_func_args; + +static uint64_t ls3a_msi_mem_read(void *opaque, hwaddr addr, unsigned size) +{ + return 0; +} + +static void ls3a_msi_mem_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + struct kvm_msi msi; + apicState *apic; + + apic = (apicState *)opaque; + msi.address_lo = 0; + msi.address_hi = 0; + msi.data = val & 0xff; + msi.flags = 0; + memset(msi.pad, 0, sizeof(msi.pad)); + + if (kvm_irqchip_in_kernel()) { + kvm_vm_ioctl(kvm_state, KVM_SIGNAL_MSI, &msi); + } else { + qemu_set_irq(apic->irq[msi.data], 1); + } +} + +static const MemoryRegionOps ls3a_msi_ops = { + .read = ls3a_msi_mem_read, + .write = ls3a_msi_mem_write, + .endianness = DEVICE_NATIVE_ENDIAN, +}; + +static const VMStateDescription vmstate_ls3a_msi = { + .name = "ls3a-msi", + .version_id = 0, + .minimum_version_id = 0, + .fields = + (VMStateField[]){ VMSTATE_UINT8_ARRAY(msi_reg, ls3a_intctlstate, 0x8), + VMSTATE_END_OF_LIST() } +}; + +static void ioapic_handler(void *opaque, int irq, int level) +{ + apicState *apic; + int kvm_irq; + + apic = (apicState *)opaque; + + if (kvm_irqchip_in_kernel()) { + kvm_irq = + (KVM_LOONGARCH_IRQ_TYPE_IOAPIC << KVM_LOONGARCH_IRQ_TYPE_SHIFT) | + (0 << KVM_LOONGARCH_IRQ_VCPU_SHIFT) | irq; + kvm_set_irq(kvm_state, kvm_irq, !!level); + } else { + qemu_set_irq(apic->irq[irq], level); + } +} + +static void *ls3a_intctl_init(MachineState *machine, CPULOONGARCHState *env[]) +{ + qemu_irq *irqhandler; + ls3a_intctlstate *s; + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + LoongarchMachineClass *mc = LoongarchMACHINE_GET_CLASS(lsms); + DeviceState *dev; + SysBusDevice *busdev; + MemoryRegion *address_space_mem = get_system_memory(); + MemoryRegion *iomem = NULL; +#ifdef CONFIG_KVM + int i; +#endif + + s = g_malloc0(sizeof(ls3a_intctlstate)); + + if (!s) { + return NULL; + } + + /*Add MSI mmio memory*/ + iomem = g_new(MemoryRegion, 1); + memory_region_init_io(iomem, NULL, &ls3a_msi_ops, lsms->apic, "ls3a_msi", + 0x8); + memory_region_add_subregion(address_space_mem, MSI_ADDR_LOW, iomem); + vmstate_register(NULL, 0, &vmstate_ls3a_msi, s); + + s->env = env; + + if (!strcmp(mc->bridge_name, "ls7a")) { + if (lsms->apic_xrupt_override) { + DPRINTF("irqchip in kernel %d\n", kvm_irqchip_in_kernel()); +#ifdef CONFIG_KVM + if (kvm_has_gsi_routing()) { + for (i = 0; i < 32; ++i) { + kvm_irqchip_add_irq_route(kvm_state, i, 0, i); + } + kvm_gsi_routing_allowed = true; + } + kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled(); +#endif + } + + irqhandler = qemu_allocate_irqs(ioapic_handler, lsms->apic, 64); + dev = qdev_new("ioapic"); + busdev = SYS_BUS_DEVICE(dev); + sysbus_realize_and_unref(busdev, &error_fatal); + sysbus_mmio_map(busdev, 0, mc->ls7a_ioapic_reg_base); + s->ioapic_irq = irqhandler; + s->apicdev = dev; + return s->ioapic_irq; + } + return NULL; +} + +/* Network support */ +static void network_init(PCIBus *pci_bus) +{ + int i; + + for (i = 0; i < nb_nics; i++) { + NICInfo *nd = &nd_table[i]; + + if (!nd->model) { + nd->model = g_strdup("virtio-net-pci"); + } + + pci_nic_init_nofail(nd, pci_bus, nd->model, NULL); + } +} + +void loongarch_cpu_destroy(MachineState *machine, LOONGARCHCPU *cpu) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + unsigned int id; + int smp_cpus = machine->smp.cpus; + id = cpu->id; + qemu_unregister_reset(slave_cpu_reset, lsms->reset_info[id]); + g_free(lsms->reset_info[id]); + lsms->reset_info[id] = NULL; + + smp_cpus -= 1; + if (lsms->fw_cfg) { + fw_cfg_modify_i16(lsms->fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + } + + qemu_del_vm_change_state_handler(cpu->cpuStateEntry); +} + +LOONGARCHCPU *loongarch_cpu_create(MachineState *machine, LOONGARCHCPU *cpu, + Error **errp) +{ + CPULOONGARCHState *env; + unsigned int id; + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + int smp_cpus = machine->smp.cpus; + id = cpu->id; + env = &cpu->env; + cpu_states[id] = env; + env->CSR_TMID |= id; + + lsms = LoongarchMACHINE(machine); + lsms->reset_info[id] = g_malloc0(sizeof(ResetData)); + lsms->reset_info[id]->cpu = cpu; + lsms->reset_info[id]->vector = env->active_tc.PC; + qemu_register_reset(slave_cpu_reset, lsms->reset_info[id]); + + /* Init CPU internal devices */ + cpu_init_irq(cpu); + cpu_loongarch_clock_init(cpu); + + smp_cpus += 1; + if (lsms->fw_cfg) { + fw_cfg_modify_i16(lsms->fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + } + cpu_init_ipi(lsms, env->irq[12], id); + cpu_init_apic(lsms, env, id); + + return cpu; +} + +static void fw_cfg_boot_set(void *opaque, const char *boot_device, + Error **errp) +{ + fw_cfg_modify_i16(opaque, FW_CFG_BOOT_DEVICE, boot_device[0]); +} + +static FWCfgState *loongarch_fw_cfg_init(ram_addr_t ram_size, + LoongarchMachineState *lsms) +{ + FWCfgState *fw_cfg; + uint64_t *numa_fw_cfg; + int i; + const CPUArchIdList *cpus; + MachineClass *mc = MACHINE_GET_CLASS(lsms); + MachineState *ms = MACHINE(OBJECT(lsms)); + int max_cpus = ms->smp.max_cpus; + int smp_cpus = ms->smp.cpus; + int nb_numa_nodes = ms->numa_state->num_nodes; + NodeInfo *numa_info = ms->numa_state->nodes; + + fw_cfg = fw_cfg_init_mem_wide(FW_CFG_ADDR + 8, FW_CFG_ADDR, 8, 0, NULL); + fw_cfg_add_i16(fw_cfg, FW_CFG_MAX_CPUS, (uint16_t)max_cpus); + fw_cfg_add_i64(fw_cfg, FW_CFG_RAM_SIZE, (uint64_t)ram_size); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, (uint16_t)smp_cpus); + + /* + * allocate memory for the NUMA channel: one (64bit) word for the number + * of nodes, one word for each VCPU->node and one word for each node to + * hold the amount of memory. + */ + numa_fw_cfg = g_new0(uint64_t, 1 + max_cpus + nb_numa_nodes); + numa_fw_cfg[0] = cpu_to_le64(nb_numa_nodes); + cpus = mc->possible_cpu_arch_ids(MACHINE(lsms)); + for (i = 0; i < cpus->len; i++) { + unsigned int apic_id = cpus->cpus[i].arch_id; + assert(apic_id < max_cpus); + numa_fw_cfg[apic_id + 1] = cpu_to_le64(cpus->cpus[i].props.node_id); + } + for (i = 0; i < nb_numa_nodes; i++) { + numa_fw_cfg[max_cpus + 1 + i] = cpu_to_le64(numa_info[i].node_mem); + } + fw_cfg_add_bytes(fw_cfg, FW_CFG_NUMA, numa_fw_cfg, + (1 + max_cpus + nb_numa_nodes) * sizeof(*numa_fw_cfg)); + + qemu_register_boot_set(fw_cfg_boot_set, fw_cfg); + return fw_cfg; +} + +static void loongarch_build_smbios(LoongarchMachineState *lsms) +{ + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + MachineState *ms = MACHINE(OBJECT(lsms)); + uint8_t *smbios_tables, *smbios_anchor; + size_t smbios_tables_len, smbios_anchor_len; + const char *product = "QEMU Virtual Machine"; + + if (!lsms->fw_cfg) { + return; + } + + if (kvm_enabled()) { + if (strstr(lsmc->cpu_name, "5000")) { + product = "KVM"; + } + } else { + product = "Loongarch-3A5K-7A1000-TCG"; + } + + smbios_set_defaults("Loongson", product, lsmc->cpu_name, false, true, + SMBIOS_ENTRY_POINT_30); + + smbios_get_tables(ms, NULL, 0, &smbios_tables, &smbios_tables_len, + &smbios_anchor, &smbios_anchor_len, &error_fatal); + + if (smbios_anchor) { + fw_cfg_add_file(lsms->fw_cfg, "etc/smbios/smbios-tables", + smbios_tables, smbios_tables_len); + fw_cfg_add_file(lsms->fw_cfg, "etc/smbios/smbios-anchor", + smbios_anchor, smbios_anchor_len); + } +} + +static void loongarch_machine_done(Notifier *notifier, void *data) +{ + LoongarchMachineState *lsms = + container_of(notifier, LoongarchMachineState, machine_done); + + platform_bus_add_all_fdt_nodes( + lsms->fdt, NULL, VIRT_PLATFORM_BUS_BASEADDRESS, VIRT_PLATFORM_BUS_SIZE, + VIRT_PLATFORM_BUS_IRQ); + + qemu_fdt_dumpdtb(lsms->fdt, lsms->fdt_size); + /* load fdt */ + MemoryRegion *fdt_rom = g_new(MemoryRegion, 1); + memory_region_init_rom(fdt_rom, NULL, "fdt", LS_FDT_SIZE, &error_fatal); + memory_region_add_subregion(get_system_memory(), LS_FDT_BASE, fdt_rom); + rom_add_blob_fixed("fdt", lsms->fdt, lsms->fdt_size, LS_FDT_BASE); + + loongarch_acpi_setup(); + loongarch_build_smbios(lsms); +} + +#ifdef CONFIG_TCG +#define FEATURE_REG 0x1fe00008 +#define VENDOR_REG 0x1fe00010 +#define CPUNAME_REG 0x1fe00020 +#define OTHER_FUNC_REG 0x1fe00420 +#define _str(x) #x +#define str(x) _str(x) +#define SIMPLE_OPS(ADDR, SIZE) \ + ({ \ + MemoryRegion *iomem = g_new(MemoryRegion, 1); \ + memory_region_init_io(iomem, NULL, &loongarch_qemu_ops, (void *)ADDR, \ + str(ADDR), SIZE); \ + memory_region_add_subregion_overlap(address_space_mem, ADDR, iomem, \ + 1); \ + }) + +static int reg180; + +static void loongarch_qemu_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + addr = ((hwaddr)(long)opaque) + addr; + addr = addr & 0xffffffff; + switch (addr) { + case 0x1fe00180: + reg180 = val; + break; + } +} + +static uint64_t loongarch_qemu_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t feature = 0UL; + addr = ((hwaddr)(long)opaque) + addr; + addr = addr & 0xffffffff; + switch (addr) { + case 0x1fe00180: + return reg180; + case 0x1001041c: + return 0xa800; + case FEATURE_REG: + feature |= 1UL << 2 | 1UL << 3 | 1UL << 4 | 1UL << 11; + return feature; + case VENDOR_REG: + return *(uint64_t *)"Loongson-3A5000"; + case CPUNAME_REG: + return *(uint64_t *)"3A5000"; + case 0x10013ffc: + return 0x80; + } + return 0; +} + +static const MemoryRegionOps loongarch_qemu_ops = { + .read = loongarch_qemu_read, + .write = loongarch_qemu_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 4, + .max_access_size = 8, + }, +}; +#endif + +static void loongarch_system_flash_cleanup_unused(LoongarchMachineState *lsms) +{ + char *prop_name; + int i; + Object *dev_obj; + + for (i = 0; i < ARRAY_SIZE(lsms->flash); i++) { + dev_obj = OBJECT(lsms->flash[i]); + if (!object_property_get_bool(dev_obj, "realized", &error_abort)) { + prop_name = g_strdup_printf("pflash%d", i); + object_property_del(OBJECT(lsms), prop_name); + g_free(prop_name); + object_unparent(dev_obj); + lsms->flash[i] = NULL; + } + } +} + +static bool loongarch_system_flash_init(LoongarchMachineState *lsms) +{ + int i = 0; + int64_t size = 0; + PFlashCFI01 *pflash = NULL; + BlockBackend *pflash_blk; + + for (i = 0; i < ARRAY_SIZE(lsms->flash); i++) { + pflash_blk = NULL; + pflash = NULL; + + pflash = lsms->flash[i]; + pflash_cfi01_legacy_drive(pflash, drive_get(IF_PFLASH, 0, i)); + + pflash_blk = pflash_cfi01_get_blk(pflash); + /*The pflash0 must be exist, or not support boot by pflash*/ + if (pflash_blk == NULL) { + if (i == 0) { + return false; + } else { + break; + } + } + + size = blk_getlength(pflash_blk); + if (size == 0 || size % FLASH_SECTOR_SIZE != 0) { + error_report("system firmware block device %s has invalid size " + "%" PRId64, + blk_name(pflash_blk), size); + error_report("its size must be a non-zero multiple of 0x%x", + FLASH_SECTOR_SIZE); + exit(1); + } + qdev_prop_set_uint32(DEVICE(pflash), "num-blocks", + size / FLASH_SECTOR_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(pflash), &error_fatal); + if (i == 0) { + sysbus_mmio_map(SYS_BUS_DEVICE(pflash), 0, LS_BIOS_BASE); + } else { + sysbus_mmio_map_overlap(SYS_BUS_DEVICE(pflash), 0, + LS_BIOS_VAR_BASE, 1); + } + } + + return true; +} + +static void ls3a5k_bios_init(LoongarchMachineState *lsms, ram_addr_t ram_size, + uint64_t highram_size, uint64_t phyAddr_initrd, + const char *kernel_filename, + const char *kernel_cmdline, + const char *initrd_filename) +{ + MemoryRegion *bios; + bool fw_cfg_used = false; + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + char *filename; + int bios_size; + const char *bios_name; + + bios_name = MACHINE(lsms)->firmware; + if (kernel_filename) { + loaderparams.ram_size = ram_size; + loaderparams.kernel_filename = kernel_filename; + loaderparams.kernel_cmdline = kernel_cmdline; + loaderparams.initrd_filename = initrd_filename; + } + + if (loongarch_system_flash_init(lsms)) { + fw_cfg_used = true; + } else { + bios = g_new(MemoryRegion, 1); + memory_region_init_ram(bios, NULL, "loongarch.bios", LS_BIOS_SIZE, + &error_fatal); + memory_region_set_readonly(bios, true); + memory_region_add_subregion(get_system_memory(), LS_BIOS_BASE, bios); + + /* BIOS load */ + if (bios_name) { + if (access(bios_name, R_OK) == 0) { + load_image_targphys(bios_name, LS_BIOS_BASE, LS_BIOS_SIZE); + } else { + filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); + load_image_targphys(filename, LS_BIOS_BASE, LS_BIOS_SIZE); + g_free(filename); + } + fw_cfg_used = true; + } else { + if (strstr(lsmc->cpu_name, "5000")) { + bios_size = sizeof(ls3a5k_aui_boot_code); + rom_add_blob_fixed("bios", ls3a5k_aui_boot_code, bios_size, + LS_BIOS_BASE); + } + + if (kernel_filename) { + lsms->reset_info[0]->vector = load_kernel(); + } + } + } + + loongarch_system_flash_cleanup_unused(lsms); + + if (fw_cfg_used) { + lsms->fw_cfg = loongarch_fw_cfg_init(ram_size, lsms); + rom_set_fw(lsms->fw_cfg); + fw_conf_init(ram_size); + rom_add_blob_fixed("fw_conf", (void *)&fw_config, sizeof(fw_config), + FW_CONF_ADDR); + + if (kernel_filename) { + fw_cfg_add_kernel_info(lsms->fw_cfg, highram_size, phyAddr_initrd); + } + } + + if (lsms->fw_cfg != NULL) { + fw_cfg_add_file(lsms->fw_cfg, "etc/memmap", la_memmap_table, + sizeof(struct la_memmap_entry) * (la_memmap_entries)); + } + + return; +} + +static void create_fdt(LoongarchMachineState *lsms) +{ + lsms->fdt = create_device_tree(&lsms->fdt_size); + if (!lsms->fdt) { + error_report("create_device_tree() failed"); + exit(1); + } + + /* Header */ + qemu_fdt_setprop_string(lsms->fdt, "/", "compatible", + "linux,dummy-loongson3"); + qemu_fdt_setprop_cell(lsms->fdt, "/", "#address-cells", 0x2); + qemu_fdt_setprop_cell(lsms->fdt, "/", "#size-cells", 0x2); +} + +static void fdt_add_cpu_nodes(const LoongarchMachineState *lsms) +{ + int num; + const MachineState *ms = MACHINE(lsms); + int smp_cpus = ms->smp.cpus; + + qemu_fdt_add_subnode(lsms->fdt, "/cpus"); + qemu_fdt_setprop_cell(lsms->fdt, "/cpus", "#address-cells", 0x1); + qemu_fdt_setprop_cell(lsms->fdt, "/cpus", "#size-cells", 0x0); + + /* cpu nodes */ + for (num = smp_cpus - 1; num >= 0; num--) { + char *nodename = g_strdup_printf("/cpus/cpu@%d", num); + LOONGARCHCPU *cpu = LOONGARCH_CPU(qemu_get_cpu(num)); + + qemu_fdt_add_subnode(lsms->fdt, nodename); + qemu_fdt_setprop_string(lsms->fdt, nodename, "device_type", "cpu"); + qemu_fdt_setprop_string(lsms->fdt, nodename, "compatible", + cpu->dtb_compatible); + qemu_fdt_setprop_cell(lsms->fdt, nodename, "reg", num); + qemu_fdt_setprop_cell(lsms->fdt, nodename, "phandle", + qemu_fdt_alloc_phandle(lsms->fdt)); + g_free(nodename); + } + + /*cpu map */ + qemu_fdt_add_subnode(lsms->fdt, "/cpus/cpu-map"); + + for (num = smp_cpus - 1; num >= 0; num--) { + char *cpu_path = g_strdup_printf("/cpus/cpu@%d", num); + char *map_path; + + if (ms->smp.threads > 1) { + map_path = + g_strdup_printf("/cpus/cpu-map/socket%d/core%d/thread%d", + num / (ms->smp.cores * ms->smp.threads), + (num / ms->smp.threads) % ms->smp.cores, + num % ms->smp.threads); + } else { + map_path = + g_strdup_printf("/cpus/cpu-map/socket%d/core%d", + num / ms->smp.cores, num % ms->smp.cores); + } + qemu_fdt_add_path(lsms->fdt, map_path); + qemu_fdt_setprop_phandle(lsms->fdt, map_path, "cpu", cpu_path); + + g_free(map_path); + g_free(cpu_path); + } +} + +static void fdt_add_fw_cfg_node(const LoongarchMachineState *lsms) +{ + char *nodename; + hwaddr base = FW_CFG_ADDR; + + nodename = g_strdup_printf("/fw_cfg@%" PRIx64, base); + qemu_fdt_add_subnode(lsms->fdt, nodename); + qemu_fdt_setprop_string(lsms->fdt, nodename, "compatible", + "qemu,fw-cfg-mmio"); + qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "reg", 2, base, 2, 0x8); + qemu_fdt_setprop(lsms->fdt, nodename, "dma-coherent", NULL, 0); + g_free(nodename); +} + +static void fdt_add_pcie_node(const LoongarchMachineState *lsms) +{ + char *nodename; + hwaddr base_mmio = PCIE_MEMORY_BASE; + hwaddr size_mmio = PCIE_MEMORY_SIZE; + hwaddr base_pio = LS3A5K_ISA_IO_BASE; + hwaddr size_pio = LS_ISA_IO_SIZE; + hwaddr base_pcie = LS_PCIECFG_BASE; + hwaddr size_pcie = LS_PCIECFG_SIZE; + hwaddr base = base_pcie; + + nodename = g_strdup_printf("/pcie@%" PRIx64, base); + qemu_fdt_add_subnode(lsms->fdt, nodename); + qemu_fdt_setprop_string(lsms->fdt, nodename, "compatible", + "pci-host-ecam-generic"); + qemu_fdt_setprop_string(lsms->fdt, nodename, "device_type", "pci"); + qemu_fdt_setprop_cell(lsms->fdt, nodename, "#address-cells", 3); + qemu_fdt_setprop_cell(lsms->fdt, nodename, "#size-cells", 2); + qemu_fdt_setprop_cell(lsms->fdt, nodename, "linux,pci-domain", 0); + qemu_fdt_setprop_cells(lsms->fdt, nodename, "bus-range", 0, + PCIE_MMCFG_BUS(LS_PCIECFG_SIZE - 1)); + qemu_fdt_setprop(lsms->fdt, nodename, "dma-coherent", NULL, 0); + qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "reg", 2, base_pcie, 2, + size_pcie); + qemu_fdt_setprop_sized_cells(lsms->fdt, nodename, "ranges", 1, + FDT_PCI_RANGE_IOPORT, 2, 0, 2, base_pio, 2, + size_pio, 1, FDT_PCI_RANGE_MMIO, 2, base_mmio, + 2, base_mmio, 2, size_mmio); + g_free(nodename); +} + +static void create_platform_bus(LoongarchMachineState *s, qemu_irq *pic) +{ + DeviceState *dev; + SysBusDevice *sysbus; + int i; + MemoryRegion *sysmem = get_system_memory(); + + dev = qdev_new(TYPE_PLATFORM_BUS_DEVICE); + dev->id = g_strdup(TYPE_PLATFORM_BUS_DEVICE); + qdev_prop_set_uint32(dev, "num_irqs", VIRT_PLATFORM_BUS_NUM_IRQS); + qdev_prop_set_uint32(dev, "mmio_size", VIRT_PLATFORM_BUS_SIZE); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + s->platform_bus_dev = dev; + + sysbus = SYS_BUS_DEVICE(dev); + for (i = 0; i < VIRT_PLATFORM_BUS_NUM_IRQS; i++) { + int irq = VIRT_PLATFORM_BUS_IRQ + i; + sysbus_connect_irq(sysbus, i, pic[irq - LOONGARCH_PCH_IRQ_BASE]); + } + + memory_region_add_subregion(sysmem, VIRT_PLATFORM_BUS_BASEADDRESS, + sysbus_mmio_get_region(sysbus, 0)); +} + +static void ls3a5k_init(MachineState *args) +{ + int i; + const char *cpu_model = args->cpu_type; + const char *kernel_filename = args->kernel_filename; + const char *kernel_cmdline = args->kernel_cmdline; + const char *initrd_filename = args->initrd_filename; + + ram_addr_t ram_size = args->ram_size; + MemoryRegion *address_space_mem = get_system_memory(); + ram_addr_t offset = 0; + MachineState *machine = args; + MachineClass *mc = MACHINE_GET_CLASS(machine); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + int smp_cpus = machine->smp.cpus; + int nb_numa_nodes = machine->numa_state->num_nodes; + NodeInfo *numa_info = machine->numa_state->nodes; + LOONGARCHCPU *cpu; + CPULOONGARCHState *env; + qemu_irq *ls7a_apic = NULL; + qemu_irq *pirq = NULL; + PCIBus *pci_bus = NULL; + char *ramName = NULL; + uint64_t lowram_size = 0, highram_size = 0, phyAddr = 0, memmap_size = 0, + highram_end_addr = 0; + + CPUArchIdList *possible_cpus; + if (strstr(lsmc->cpu_name, "5000")) { + if (strcmp(cpu_model, LOONGARCH_CPU_TYPE_NAME("Loongson-3A5000")) && + strcmp(cpu_model, LOONGARCH_CPU_TYPE_NAME("host"))) { + error_report("machine type %s does not match cpu type %s", + lsmc->cpu_name, cpu_model); + exit(1); + } + if (kvm_enabled()) { + kvm_vm_ioctl(kvm_state, KVM_LARCH_SET_CPUCFG, ls3a5k_cpucfgs); + } + } + + create_fdt(lsms); + + DPRINTF("isa 0x%lx\n", lsmc->isa_io_base); + DPRINTF("cpu_name %s bridge_name %s\n", lsmc->cpu_name, lsmc->bridge_name); + + /* init CPUs */ + mc->possible_cpu_arch_ids(machine); + possible_cpus = machine->possible_cpus; + + for (i = 0; i < smp_cpus; i++) { + Object *obj = NULL; + Error *local_err = NULL; + + obj = object_new(possible_cpus->cpus[i].type); + + object_property_set_uint(obj, "id", possible_cpus->cpus[i].arch_id, + &local_err); + object_property_set_bool(obj, "realized", true, &local_err); + + object_unref(obj); + error_propagate(&error_fatal, local_err); + + cpu = LOONGARCH_CPU(CPU(obj)); + if (cpu == NULL) { + fprintf(stderr, "Unable to find CPU definition\n"); + exit(1); + } + + env = &cpu->env; + cpu_states[i] = env; + env->CSR_TMID |= i; + + lsms->reset_info[i] = g_malloc0(sizeof(ResetData)); + lsms->reset_info[i]->cpu = cpu; + lsms->reset_info[i]->vector = env->active_tc.PC; + if (i == 0) { + qemu_register_reset(main_cpu_reset, lsms->reset_info[i]); + } else { + qemu_register_reset(slave_cpu_reset, lsms->reset_info[i]); + } + + /* Init CPU internal devices */ + cpu_init_irq(cpu); + cpu_loongarch_clock_init(cpu); + cpu_init_ipi(lsms, env->irq[12], i); + cpu_init_apic(lsms, env, i); + } + + lsms->hotpluged_cpu_num = 0; + fdt_add_cpu_nodes(lsms); + env = cpu_states[0]; + + /* node0 mem*/ + phyAddr = (uint64_t)0; + MemoryRegion *lowmem = g_new(MemoryRegion, 1); + ramName = g_strdup_printf("loongarch_ls3a.node%d.lowram", 0); + + lowram_size = MIN(ram_size, 256 * 0x100000); + memory_region_init_alias(lowmem, NULL, ramName, machine->ram, 0, + lowram_size); + memory_region_add_subregion(address_space_mem, phyAddr, lowmem); + + offset += lowram_size; + if (nb_numa_nodes > 0) { + highram_size = numa_info[0].node_mem - 256 * MiB; + if (numa_info[0].node_mem > GiB) { + memmap_size = numa_info[0].node_mem - GiB; + la_memmap_add_entry(0xc0000000ULL, memmap_size, SYSTEM_RAM); + } + } else { + highram_size = ram_size - 256 * MiB; + if (ram_size > GiB) { + memmap_size = ram_size - GiB; + la_memmap_add_entry(0xc0000000ULL, memmap_size, SYSTEM_RAM); + } + } + + phyAddr = (uint64_t)0x90000000; + MemoryRegion *highmem = g_new(MemoryRegion, 1); + ramName = g_strdup_printf("loongarch_ls3a.node%d.highram", 0); + memory_region_init_alias(highmem, NULL, ramName, machine->ram, offset, + highram_size); + memory_region_add_subregion(address_space_mem, phyAddr, highmem); + offset += highram_size; + phyAddr += highram_size; + + /* initrd address use high mem from high to low */ + highram_end_addr = phyAddr; + /* node1~ nodemax */ + for (i = 1; i < nb_numa_nodes; i++) { + MemoryRegion *nodemem = g_new(MemoryRegion, 1); + ramName = g_strdup_printf("loongarch_ls3a.node%d.ram", i); + memory_region_init_alias(nodemem, NULL, ramName, machine->ram, offset, + numa_info[i].node_mem); + memory_region_add_subregion(address_space_mem, phyAddr, nodemem); + la_memmap_add_entry(phyAddr, numa_info[i].node_mem, SYSTEM_RAM); + offset += numa_info[i].node_mem; + phyAddr += numa_info[i].node_mem; + } + + fdt_add_fw_cfg_node(lsms); + ls3a5k_bios_init(lsms, ram_size, highram_size, highram_end_addr, + kernel_filename, kernel_cmdline, initrd_filename); + + lsms->machine_done.notify = loongarch_machine_done; + qemu_add_machine_init_done_notifier(&lsms->machine_done); + /*vmstate_register_ram_global(bios);*/ + + /* initialize hotplug memory address space */ + lsms->hotplug_memory_size = 0; + + /* always allocate the device memory information */ + machine->device_memory = g_malloc0(sizeof(*machine->device_memory)); + if (machine->ram_size < machine->maxram_size) { + int max_memslots; + + lsms->hotplug_memory_size = machine->maxram_size - machine->ram_size; + /* + * Limit the number of hotpluggable memory slots to half the number + * slots that KVM supports, leaving the other half for PCI and other + * devices. However ensure that number of slots doesn't drop below 32. + */ + max_memslots = LOONGARCH_MAX_RAM_SLOTS; + if (kvm_enabled()) { + max_memslots = kvm_get_max_memslots() / 2; + } + + if (machine->ram_slots == 0) + machine->ram_slots = + lsms->hotplug_memory_size / LOONGARCH_HOTPLUG_MEM_ALIGN; + + if (machine->ram_slots > max_memslots) { + error_report("Specified number of memory slots %" PRIu64 + " exceeds max supported %d", + machine->ram_slots, max_memslots); + exit(1); + } + + lsms->ram_slots = machine->ram_slots; + + machine->device_memory->base = get_hotplug_membase(machine->ram_size); + memory_region_init(&machine->device_memory->mr, OBJECT(lsms), + "device-memory", lsms->hotplug_memory_size); + memory_region_add_subregion(get_system_memory(), + machine->device_memory->base, + &machine->device_memory->mr); + } + + if (!strcmp(lsmc->bridge_name, "ls7a")) { + /*Initialize the 7A IO interrupt subsystem*/ + DeviceState *ls7a_dev; + lsms->apic_xrupt_override = kvm_irqchip_in_kernel(); + ls7a_apic = ls3a_intctl_init(machine, cpu_states); + if (!ls7a_apic) { + perror("Init 7A APIC failed\n"); + exit(1); + } + pci_bus = ls7a_init(machine, ls7a_apic, &ls7a_dev); + + object_property_add_link( + OBJECT(machine), LOONGARCH_MACHINE_ACPI_DEVICE_PROP, + TYPE_HOTPLUG_HANDLER, (Object **)&lsms->acpi_dev, + object_property_allow_set_link, OBJ_PROP_LINK_STRONG); + object_property_set_link(OBJECT(machine), + LOONGARCH_MACHINE_ACPI_DEVICE_PROP, + OBJECT(ls7a_dev), &error_abort); + + create_platform_bus(lsms, ls7a_apic); + +#ifdef CONFIG_KVM + if (kvm_enabled()) { + kvm_direct_msi_allowed = + (kvm_check_extension(kvm_state, KVM_CAP_SIGNAL_MSI) > 0); + } else { + kvm_direct_msi_allowed = 0; + } + msi_nonbroken = kvm_direct_msi_allowed; +#else + msi_nonbroken = true; +#endif + sysbus_create_simple("ls7a_rtc", LS7A_RTC_REG_BASE, + ls7a_apic[LS7A_RTC_IRQ - LOONGARCH_PCH_IRQ_BASE]); + } + + /*Initialize the CPU serial device*/ + + if (serial_hd(0)) { + pirq = qemu_allocate_irqs( + legacy_set_irq, + ls7a_apic + (LS7A_UART_IRQ - LOONGARCH_PCH_IRQ_BASE), 1); + serial_mm_init(address_space_mem, LS7A_UART_BASE, 0, pirq[0], 115200, + serial_hd(0), DEVICE_NATIVE_ENDIAN); + } + + /*network card*/ + network_init(pci_bus); + + sysbus_realize_and_unref(SYS_BUS_DEVICE(qdev_new("iocsr")), &error_fatal); + +#ifdef CONFIG_TCG + int nb_nodes = (smp_cpus - 1) / 4; + for (i = 0; i <= nb_nodes; i++) { + uint64_t off = (uint64_t)i << 44; + SIMPLE_OPS(((hwaddr)0x1fe00180 | off), 0x8); + SIMPLE_OPS(((hwaddr)0x1fe0019c | off), 0x8); + SIMPLE_OPS(((hwaddr)0x1fe001d0 | off), 0x8); + SIMPLE_OPS(((hwaddr)FEATURE_REG | off), 0x8); + SIMPLE_OPS(((hwaddr)VENDOR_REG | off), 0x8); + SIMPLE_OPS(((hwaddr)CPUNAME_REG | off), 0x8); + SIMPLE_OPS(((hwaddr)OTHER_FUNC_REG | off), 0x8); + } + + SIMPLE_OPS(0x1001041c, 0x4); + SIMPLE_OPS(0x10002000, 0x14); + SIMPLE_OPS(0x10013ffc, 0x4); +#endif + + fdt_add_pcie_node(lsms); +} + +static const CPUArchIdList *loongarch_possible_cpu_arch_ids(MachineState *ms) +{ + int i; + int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + /* + * make sure that max_cpus hasn't changed since the first use, i.e. + * -smp hasn't been parsed after it + */ + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = + g_malloc0(sizeof(CPUArchIdList) + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (i = 0; i < ms->possible_cpus->len; i++) { + ms->possible_cpus->cpus[i].type = ms->cpu_type; + ms->possible_cpus->cpus[i].vcpus_count = 1; + ms->possible_cpus->cpus[i].props.has_core_id = true; + ms->possible_cpus->cpus[i].props.core_id = i; + ms->possible_cpus->cpus[i].arch_id = i; + } + return ms->possible_cpus; +} + +static PFlashCFI01 *loongarch_pflash_create(LoongarchMachineState *lsms, + const char *name, + const char *alias_prop_name) +{ + DeviceState *dev = qdev_new(TYPE_PFLASH_CFI01); + + qdev_prop_set_uint64(dev, "sector-length", FLASH_SECTOR_SIZE); + qdev_prop_set_uint8(dev, "width", 1); + qdev_prop_set_string(dev, "name", name); + object_property_add_child(OBJECT(lsms), name, OBJECT(dev)); + object_property_add_alias(OBJECT(lsms), alias_prop_name, OBJECT(dev), + "drive"); + return PFLASH_CFI01(dev); +} + +static void loongarch_system_flash_create(LoongarchMachineState *lsms) +{ + lsms->flash[0] = loongarch_pflash_create(lsms, "system.flash0", "pflash0"); + lsms->flash[1] = loongarch_pflash_create(lsms, "system.flash1", "pflash1"); +} + +static void loongarch_machine_initfn(Object *obj) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(obj); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + lsms->acpi_build_enabled = lsmc->has_acpi_build; + loongarch_system_flash_create(lsms); + lsms->oem_id = g_strndup(EFI_ACPI_OEM_ID, 6); + lsms->oem_table_id = g_strndup(EFI_ACPI_OEM_TABLE_ID, 6); +} + +static void ls3a5k_ls7a_machine_options(MachineClass *m) +{ + char *cpu_name = get_host_cpu_model_name(); + LoongarchMachineClass *lsmc = LoongarchMACHINE_CLASS(m); + m->desc = "Loongarch3a5k LS7A1000 machine"; + m->max_cpus = LOONGARCH_MAX_VCPUS; + m->alias = "loongson7a"; + m->is_default = 1; + lsmc->isa_io_base = LS3A5K_ISA_IO_BASE; + lsmc->pciecfg_base = LS_PCIECFG_BASE; + lsmc->ls7a_ioapic_reg_base = LS3A5K_LS7A_IOAPIC_REG_BASE; + lsmc->node_shift = 44; + strncpy(lsmc->cpu_name, cpu_name, sizeof(lsmc->cpu_name) - 1); + lsmc->cpu_name[sizeof(lsmc->cpu_name) - 1] = 0; + strncpy(lsmc->bridge_name, "ls7a", sizeof(lsmc->bridge_name) - 1); + lsmc->bridge_name[sizeof(lsmc->bridge_name) - 1] = 0; + compat_props_add(m->compat_props, loongarch_compat, loongarch_compat_len); +} + +static void ls3a_board_reset(MachineState *ms) +{ + qemu_devices_reset(); +#ifdef CONFIG_KVM + struct loongarch_kvm_irqchip *chip; + int length; + + if (!kvm_enabled()) { + return; + } + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct loongarch_gipiState); + chip = g_malloc0(length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS3A_GIPI; + chip->len = length; + kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); + + length = sizeof(struct loongarch_kvm_irqchip) + + sizeof(struct ls7a_ioapic_state); + chip = g_realloc(chip, length); + memset(chip, 0, length); + chip->chip_id = KVM_IRQCHIP_LS7A_IOAPIC; + chip->len = length; + kvm_vm_ioctl(kvm_state, KVM_SET_IRQCHIP, chip); + + g_free(chip); +#endif +} + +static CpuInstanceProperties ls3a_cpu_index_to_props(MachineState *ms, + unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t ls3a_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + int nb_numa_nodes = ms->numa_state->num_nodes; + int smp_cores = ms->smp.cores; + + if (nb_numa_nodes == 0) { + nb_numa_nodes = 1; + } + return idx / smp_cores % nb_numa_nodes; +} + +static void loongarch_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(oc); + LoongarchMachineClass *lsmc = LoongarchMACHINE_CLASS(oc); + + lsmc->get_hotplug_handler = mc->get_hotplug_handler; + lsmc->has_acpi_build = true; + mc->get_hotplug_handler = loongarch_get_hotpug_handler; + mc->has_hotpluggable_cpus = true; + mc->cpu_index_to_instance_props = ls3a_cpu_index_to_props; + mc->possible_cpu_arch_ids = loongarch_possible_cpu_arch_ids; + mc->get_default_cpu_node_id = ls3a_get_default_cpu_node_id; + mc->default_ram_size = 1 * GiB; + mc->default_cpu_type = LOONGARCH_CPU_TYPE_NAME("Loongson-3A5000"); + mc->default_ram_id = "loongarch_ls3a.ram"; + +#ifdef CONFIG_TPM + machine_class_allow_dynamic_sysbus_dev(mc, TYPE_TPM_TIS_SYSBUS); +#endif + + mc->reset = ls3a_board_reset; + mc->max_cpus = LOONGARCH_MAX_VCPUS; + hc->pre_plug = loongarch_machine_device_pre_plug; + hc->plug = loongarch_machine_device_plug; + hc->unplug = longson_machine_device_unplug; + hc->unplug_request = loongarch_machine_device_unplug_request; + + object_class_property_add(oc, "acpi", "OnOffAuto", loongarch_get_acpi, + loongarch_set_acpi, NULL, NULL); + object_class_property_set_description(oc, "acpi", "Enable ACPI"); +} + +static const TypeInfo loongarch_info = { + .name = TYPE_LOONGARCH_MACHINE, + .parent = TYPE_MACHINE, + .abstract = true, + .instance_size = sizeof(LoongarchMachineState), + .instance_init = loongarch_machine_initfn, + .class_size = sizeof(LoongarchMachineClass), + .class_init = loongarch_class_init, + .interfaces = (InterfaceInfo[]){ { TYPE_HOTPLUG_HANDLER }, {} }, +}; + +static void loongarch_machine_register_types(void) +{ + type_register_static(&loongarch_info); +} + +type_init(loongarch_machine_register_types) + + DEFINE_LS3A5K_MACHINE(loongson7a_v1_0, "loongson7a_v1.0", + ls3a5k_ls7a_machine_options); diff --git a/hw/loongarch/larch_hotplug.c b/hw/loongarch/larch_hotplug.c new file mode 100644 index 0000000000000000000000000000000000000000..52f13af7b3a869631bcace96d3ad61f27cf0a009 --- /dev/null +++ b/hw/loongarch/larch_hotplug.c @@ -0,0 +1,377 @@ +/* + * Hotplug emulation on Loongarch system. + * + * Copyright (c) 2023 Loongarch Technology + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu-common.h" +#include "qemu/queue.h" +#include "qemu/units.h" +#include "qemu/cutils.h" +#include "qemu/bcd.h" +#include "hw/hotplug.h" +#include "hw/loongarch/cpudevs.h" +#include "hw/mem/memory-device.h" +#include "sysemu/numa.h" +#include "sysemu/cpus.h" +#include "hw/loongarch/larch.h" +#include "hw/cpu/core.h" +#include "hw/nvram/fw_cfg.h" +#include "hw/platform-bus.h" + +/* find cpu slot in machine->possible_cpus by core_id */ +static CPUArchId *loongarch_find_cpu_slot(MachineState *ms, uint32_t id, + int *idx) +{ + int index = id; + + if (index >= ms->possible_cpus->len) { + return NULL; + } + if (idx) { + *idx = index; + } + return &ms->possible_cpus->cpus[index]; +} + +static void loongarch_memory_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev); + HotplugHandlerClass *hhc; + uint64_t size; + + size = memory_device_get_region_size(MEMORY_DEVICE(dev), &error_abort); + if (size % LOONGARCH_HOTPLUG_MEM_ALIGN) { + error_setg(&local_err, + "Hotplugged memory size must be a multiple of " + "%lld MB", + LOONGARCH_HOTPLUG_MEM_ALIGN / MiB); + goto out; + } + + pc_dimm_plug(PC_DIMM(dev), MACHINE(lsms)); + + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->plug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &error_abort); +out: + error_propagate(errp, local_err); +} + +static void loongarch_memory_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + HotplugHandlerClass *hhc; + LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev); + + if (!lsms->acpi_dev || !loongarch_is_acpi_enabled(lsms)) { + error_setg( + &local_err, + "memory hotplug is not enabled: missing acpi device or acpi disabled"); + goto out; + } + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->unplug_request(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err); + +out: + error_propagate(errp, local_err); +} + +static void loongarch_cpu_unplug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + CPUArchId *found_cpu; + HotplugHandlerClass *hhc; + Error *local_err = NULL; + LOONGARCHCPU *cpu = LOONGARCH_CPU(dev); + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->unplug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err); + + if (local_err) { + goto out; + } + + loongarch_cpu_destroy(machine, cpu); + + found_cpu = loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, NULL); + found_cpu->cpu = NULL; + object_unparent(OBJECT(dev)); + lsms->hotpluged_cpu_num -= 1; +out: + error_propagate(errp, local_err); +} + +static void loongarch_memory_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + Error *local_err = NULL; + HotplugHandlerClass *hhc; + LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev); + + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->unplug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err); + + if (local_err) { + goto out; + } + + pc_dimm_unplug(PC_DIMM(dev), MACHINE(hotplug_dev)); + object_unparent(OBJECT(dev)); + +out: + error_propagate(errp, local_err); +} + +static void loongarch_cpu_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + MachineState *ms = MACHINE(OBJECT(hotplug_dev)); + MachineClass *mc = MACHINE_GET_CLASS(hotplug_dev); + LoongarchMachineState *lsms = LoongarchMACHINE(ms); + LOONGARCHCPU *cpu = LOONGARCH_CPU(dev); + CPUArchId *cpu_slot; + Error *local_err = NULL; + int index; + int free_index = lsms->hotpluged_cpu_num + ms->smp.cpus; + int max_cpus = ms->smp.max_cpus; + + if (dev->hotplugged && !mc->has_hotpluggable_cpus) { + error_setg(&local_err, "CPU hotplug not supported for this machine"); + goto out; + } + + if (!object_dynamic_cast(OBJECT(cpu), ms->cpu_type)) { + error_setg(errp, "Invalid CPU type, expected cpu type: '%s'", + ms->cpu_type); + return; + } + + /* if ID is not set, set it based on core properties */ + if (cpu->id == UNASSIGNED_CPU_ID) { + if ((cpu->core_id) > (max_cpus - 1)) { + error_setg(errp, "Invalid CPU core-id: %u must be in range 0:%u", + cpu->core_id, max_cpus - 1); + return; + } + + if (free_index > (max_cpus - 1)) { + error_setg(errp, "The maximum number of CPUs cannot exceed %u.", + max_cpus); + return; + } + + if (cpu->core_id != free_index) { + error_setg(errp, "Invalid CPU core-id: %u must be :%u", + cpu->core_id, free_index); + return; + } + + cpu->id = cpu->core_id; + } + + cpu_slot = loongarch_find_cpu_slot(MACHINE(hotplug_dev), cpu->id, &index); + if (!cpu_slot) { + error_setg(&local_err, "core id %d out of range", cpu->id); + goto out; + } + + if (cpu_slot->cpu) { + error_setg(&local_err, "core %d already populated", cpu->id); + goto out; + } + + numa_cpu_pre_plug(cpu_slot, dev, &local_err); + + return; +out: + error_propagate(errp, local_err); +} + +static void loongarch_memory_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + PCDIMMDevice *dimm = PC_DIMM(dev); + Error *local_err = NULL; + uint64_t size; + + if (!lsms->acpi_dev || !loongarch_is_acpi_enabled(lsms)) { + error_setg( + errp, + "memory hotplug is not enabled: missing acpi device or acpi disabled"); + return; + } + + size = memory_device_get_region_size(MEMORY_DEVICE(dimm), &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (size % LOONGARCH_HOTPLUG_MEM_ALIGN) { + error_setg(errp, + "Hotplugged memory size must be a multiple of " + "%lld MB", + LOONGARCH_HOTPLUG_MEM_ALIGN / MiB); + return; + } + + pc_dimm_pre_plug(dimm, MACHINE(hotplug_dev), NULL, errp); +} + +static void loongarch_cpu_plug(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp) +{ + CPUArchId *found_cpu; + HotplugHandlerClass *hhc; + Error *local_err = NULL; + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + LOONGARCHCPU *cpu = LOONGARCH_CPU(dev); + + if (lsms->acpi_dev) { + loongarch_cpu_create(machine, cpu, errp); + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->plug(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err); + if (local_err) { + goto out; + } + } + + found_cpu = loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, NULL); + found_cpu->cpu = OBJECT(dev); + lsms->hotpluged_cpu_num += 1; +out: + error_propagate(errp, local_err); +} + +static void loongarch_cpu_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + MachineState *machine = MACHINE(OBJECT(hotplug_dev)); + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + LOONGARCHCPU *cpu = LOONGARCH_CPU(dev); + Error *local_err = NULL; + HotplugHandlerClass *hhc; + int idx = -1; + + if (!lsms->acpi_dev) { + error_setg(&local_err, "CPU hot unplug not supported without ACPI"); + goto out; + } + + loongarch_find_cpu_slot(MACHINE(lsms), cpu->id, &idx); + assert(idx != -1); + if (idx == 0) { + error_setg(&local_err, "Boot CPU is unpluggable"); + goto out; + } + + hhc = HOTPLUG_HANDLER_GET_CLASS(lsms->acpi_dev); + hhc->unplug_request(HOTPLUG_HANDLER(lsms->acpi_dev), dev, &local_err); + + if (local_err) { + goto out; + } + +out: + error_propagate(errp, local_err); +} + +void longson_machine_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + loongarch_memory_unplug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + if (!mc->has_hotpluggable_cpus) { + error_setg(errp, "CPU hot unplug not supported on this machine"); + return; + } + loongarch_cpu_unplug(hotplug_dev, dev, errp); + } else { + error_setg(errp, + "acpi: device unplug for not supported device" + " type: %s", + object_get_typename(OBJECT(dev))); + } + + return; +} + +void loongarch_machine_device_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + loongarch_memory_unplug_request(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + loongarch_cpu_unplug_request(hotplug_dev, dev, errp); + } +} + +HotplugHandler *loongarch_get_hotpug_handler(MachineState *machine, + DeviceState *dev) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM) || + object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU) || + object_dynamic_cast(OBJECT(dev), TYPE_SYS_BUS_DEVICE)) { + return HOTPLUG_HANDLER(machine); + } + return NULL; +} + +void loongarch_machine_device_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + loongarch_memory_pre_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + loongarch_cpu_pre_plug(hotplug_dev, dev, errp); + } +} + +void loongarch_machine_device_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(hotplug_dev); + + if (lsms->platform_bus_dev) { + MachineClass *mc = MACHINE_GET_CLASS(lsms); + + if (device_is_dynamic_sysbus(mc, dev)) { + platform_bus_link_device( + PLATFORM_BUS_DEVICE(lsms->platform_bus_dev), + SYS_BUS_DEVICE(dev)); + } + } + + if (object_dynamic_cast(OBJECT(dev), TYPE_PC_DIMM)) { + loongarch_memory_plug(hotplug_dev, dev, errp); + } else if (object_dynamic_cast(OBJECT(dev), TYPE_LOONGARCH_CPU)) { + loongarch_cpu_plug(hotplug_dev, dev, errp); + } +} diff --git a/hw/loongarch/larch_int.c b/hw/loongarch/larch_int.c new file mode 100644 index 0000000000000000000000000000000000000000..ff3750e9827c7797e10be9bb9d57f900e400a2ea --- /dev/null +++ b/hw/loongarch/larch_int.c @@ -0,0 +1,87 @@ +/* + * QEMU LOONGARCH interrupt support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/loongarch/cpudevs.h" +#include "cpu.h" +#include "sysemu/kvm.h" +#include "kvm_larch.h" +#ifdef CONFIG_KVM +#include +#endif + +static void cpu_irq_request(void *opaque, int irq, int level) +{ + LOONGARCHCPU *cpu = opaque; + CPULOONGARCHState *env = &cpu->env; + CPUState *cs = CPU(cpu); + bool locked = false; + + if (irq < 0 || irq > 13) { + return; + } + + /* Make sure locking works even if BQL is already held by the caller */ + if (!qemu_mutex_iothread_locked()) { + locked = true; + qemu_mutex_lock_iothread(); + } + + if (level) { + env->CSR_ESTAT |= 1 << irq; + } else { + env->CSR_ESTAT &= ~(1 << irq); + } + + if (kvm_enabled()) { + if (irq == 2) { + kvm_loongarch_set_interrupt(cpu, irq, level); + } else if (irq == 3) { + kvm_loongarch_set_interrupt(cpu, irq, level); + } else if (irq == 12) { + kvm_loongarch_set_ipi_interrupt(cpu, irq, level); + } + } + + if (env->CSR_ESTAT & CSR_ESTAT_IPMASK) { + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + } else { + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } + + if (locked) { + qemu_mutex_unlock_iothread(); + } +} + +void cpu_init_irq(LOONGARCHCPU *cpu) +{ + CPULOONGARCHState *env = &cpu->env; + qemu_irq *qi; + int i; + + qi = qemu_allocate_irqs(cpu_irq_request, loongarch_env_get_cpu(env), + N_IRQS); + for (i = 0; i < N_IRQS; i++) { + env->irq[i] = qi[i]; + } +} diff --git a/hw/loongarch/ls7a_nb.c b/hw/loongarch/ls7a_nb.c new file mode 100644 index 0000000000000000000000000000000000000000..7a3613bf56a3bc236cbfe7ca587e33b655148782 --- /dev/null +++ b/hw/loongarch/ls7a_nb.c @@ -0,0 +1,314 @@ +/* + * Loongarch 7A1000 north bridge support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" + +#include "hw/hw.h" +#include "hw/irq.h" +#include "hw/sysbus.h" +#include "hw/pci/pci.h" +#include "hw/i386/pc.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pcie_host.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" +#include "qapi/error.h" +#include "hw/loongarch/cpudevs.h" +#include "hw/acpi/ls7a.h" +#include "hw/i386/pc.h" +#include "hw/isa/isa.h" +#include "hw/boards.h" +#include "qemu/log.h" +#include "hw/loongarch/bios.h" +#include "hw/loader.h" +#include "elf.h" +#include "exec/address-spaces.h" +#include "exec/memory.h" +#include "hw/pci/pci_bridge.h" +#include "hw/pci/pci_bus.h" +#include "linux/kvm.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "sysemu/reset.h" +#include "migration/vmstate.h" +#include "hw/loongarch/larch.h" +#include "hw/loongarch/ls7a.h" + +#undef DEBUG_LS7A + +#ifdef DEBUG_LS7A +#define DPRINTF(fmt, ...) fprintf(stderr, "%s: " fmt, __func__, ##__VA_ARGS__) +#else +#define DPRINTF(fmt, ...) +#endif + +static void ls7a_reset(void *opaque) +{ + uint64_t wmask; + wmask = ~(-1); + + PCIDevice *dev = opaque; + pci_set_word(dev->config + PCI_VENDOR_ID, 0x0014); + pci_set_word(dev->wmask + PCI_VENDOR_ID, wmask & 0xffff); + pci_set_word(dev->cmask + PCI_VENDOR_ID, 0xffff); + pci_set_word(dev->config + PCI_DEVICE_ID, 0x7a00); + pci_set_word(dev->wmask + PCI_DEVICE_ID, wmask & 0xffff); + pci_set_word(dev->cmask + PCI_DEVICE_ID, 0xffff); + pci_set_word(dev->config + 0x4, 0x0000); + pci_set_word(dev->config + PCI_STATUS, 0x0010); + pci_set_word(dev->wmask + PCI_STATUS, wmask & 0xffff); + pci_set_word(dev->cmask + PCI_STATUS, 0xffff); + pci_set_byte(dev->config + PCI_REVISION_ID, 0x0); + pci_set_byte(dev->wmask + PCI_REVISION_ID, wmask & 0xff); + pci_set_byte(dev->cmask + PCI_REVISION_ID, 0xff); + pci_set_byte(dev->config + 0x9, 0x00); + pci_set_byte(dev->wmask + 0x9, wmask & 0xff); + pci_set_byte(dev->cmask + 0x9, 0xff); + pci_set_byte(dev->config + 0xa, 0x00); + pci_set_byte(dev->wmask + 0xa, wmask & 0xff); + pci_set_byte(dev->cmask + 0xa, 0xff); + pci_set_byte(dev->config + 0xb, 0x06); + pci_set_byte(dev->wmask + 0xb, wmask & 0xff); + pci_set_byte(dev->cmask + 0xb, 0xff); + pci_set_byte(dev->config + 0xc, 0x00); + pci_set_byte(dev->wmask + 0xc, wmask & 0xff); + pci_set_byte(dev->cmask + 0xc, 0xff); + pci_set_byte(dev->config + 0xe, 0x80); + pci_set_byte(dev->wmask + 0xe, wmask & 0xff); + pci_set_byte(dev->cmask + 0xe, 0xff); +} + +static const VMStateDescription vmstate_ls7a_pcie = { + .name = "LS7A_PCIE", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]){ VMSTATE_PCI_DEVICE(dev, LS7APCIState), + VMSTATE_STRUCT(pm, LS7APCIState, 0, + vmstate_ls7a_pm, LS7APCIPMRegs), + VMSTATE_END_OF_LIST() } +}; + +static PCIINTxRoute ls7a_route_intx_pin_to_irq(void *opaque, int pin) +{ + PCIINTxRoute route; + + route.irq = pin; + route.mode = PCI_INTX_ENABLED; + return route; +} + +static int pci_ls7a_map_irq(PCIDevice *d, int irq_num) +{ + int irq; + + irq = 16 + ((PCI_SLOT(d->devfn) * 4 + irq_num) & 0xf); + return irq; +} + +static void pci_ls7a_set_irq(void *opaque, int irq_num, int level) +{ + qemu_irq *pic = opaque; + DPRINTF("------ %s irq %d %d\n", __func__, irq_num, level); + qemu_set_irq(pic[irq_num], level); +} + +static void ls7a_pcie_realize(PCIDevice *dev, Error **errp) +{ + LS7APCIState *s = PCIE_LS7A(dev); + /* Ls7a North Bridge, built on FPGA, VENDOR_ID/DEVICE_ID are "undefined" */ + pci_config_set_prog_interface(dev->config, 0x00); + + /* set the default value of north bridge pci config */ + qemu_register_reset(ls7a_reset, s); +} + +static AddressSpace *ls7a_pci_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + return &address_space_memory; +} + +static PCIBus *pci_ls7a_init(MachineState *machine, DeviceState *dev, + qemu_irq *pic) +{ + LoongarchMachineState *lsms = LoongarchMACHINE(machine); + LoongarchMachineClass *lsmc = LoongarchMACHINE_GET_CLASS(lsms); + LS7APCIEHost *pciehost = LS7A_PCIE_HOST_BRIDGE(dev); + PCIExpressHost *e; + SysBusDevice *sysbus; + PCIHostState *phb; + MemoryRegion *mmio_alias; + + e = PCIE_HOST_BRIDGE(dev); + sysbus = SYS_BUS_DEVICE(e); + phb = PCI_HOST_BRIDGE(e); + + sysbus_init_mmio(sysbus, &e->mmio); + + memory_region_init(&pciehost->io_mmio, OBJECT(pciehost), + "pciehost-mmio", UINT64_MAX); + sysbus_init_mmio(sysbus, &pciehost->io_mmio); + mmio_alias = g_new0(MemoryRegion, 1); + memory_region_init_alias(mmio_alias, OBJECT(dev), "pcie-mmio", + &pciehost->io_mmio, PCIE_MEMORY_BASE, + PCIE_MEMORY_SIZE); + memory_region_add_subregion(get_system_memory(), + PCIE_MEMORY_BASE, mmio_alias); + + memory_region_init(&pciehost->io_ioport, OBJECT(pciehost), + "pciehost-ioport", LS_ISA_IO_SIZE); + sysbus_init_mmio(sysbus, &pciehost->io_ioport); + + sysbus_mmio_map(sysbus, 2, LS3A5K_ISA_IO_BASE); + + + phb->bus = pci_register_root_bus(dev, "pcie.0", pci_ls7a_set_irq, + pci_ls7a_map_irq, pic, + &pciehost->io_mmio, &pciehost->io_ioport, + (1 << 3), 128, TYPE_PCIE_BUS); + /*update pcie config memory*/ + pcie_host_mmcfg_update(e, true, lsmc->pciecfg_base, LS_PCIECFG_SIZE); + + pci_bus_set_route_irq_fn(phb->bus, ls7a_route_intx_pin_to_irq); + + return phb->bus; +} + +PCIBus *ls7a_init(MachineState *machine, qemu_irq *pic, DeviceState **ls7a_dev) +{ + DeviceState *dev; + PCIHostState *phb; + LS7APCIState *pbs; + PCIDevice *pcid; + PCIBus *pci_bus; + PCIExpressHost *e; + + /*1. init the HT PCI CFG*/ + DPRINTF("------ %d\n", __LINE__); + dev = qdev_new(TYPE_LS7A_PCIE_HOST_BRIDGE); + e = PCIE_HOST_BRIDGE(dev); + phb = PCI_HOST_BRIDGE(e); + + DPRINTF("------ %d\n", __LINE__); + pci_bus = pci_ls7a_init(machine, dev, pic); + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + phb->bus = pci_bus; + /* set the pcihost pointer after rs780_pcihost_initfn is called */ + DPRINTF("------ %d\n", __LINE__); + pcid = pci_new(PCI_DEVFN(0, 0), TYPE_PCIE_LS7A); + pbs = PCIE_LS7A(pcid); + pbs->pciehost = LS7A_PCIE_HOST_BRIDGE(dev); + pbs->pciehost->pci_dev = pbs; + + if (ls7a_dev) { + *ls7a_dev = DEVICE(pcid); + } + + pci_realize_and_unref(pcid, phb->bus, &error_fatal); + + /* IOMMU */ + pci_setup_iommu(phb->bus, ls7a_pci_dma_iommu, NULL); + + ls7a_pm_init(&pbs->pm, pic); + DPRINTF("------ %d\n", __LINE__); + /*3. init the north bridge VGA,not do now*/ + return pci_bus; +} + +LS7APCIState *get_ls7a_type(Object *obj) +{ + LS7APCIState *pbs; + + pbs = PCIE_LS7A(obj); + return pbs; +} + +static void ls7a_pcie_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); + HotplugHandlerClass *hc = HOTPLUG_HANDLER_CLASS(klass); + AcpiDeviceIfClass *adevc = ACPI_DEVICE_IF_CLASS(klass); + + k->realize = ls7a_pcie_realize; + k->vendor_id = 0x0014; + k->device_id = 0x7a00; + k->revision = 0x00; + k->class_id = PCI_CLASS_BRIDGE_HOST; + dc->desc = "LS7A1000 PCIE Host bridge"; + dc->vmsd = &vmstate_ls7a_pcie; + /* + * PCI-facing part of the host bridge, not usable without the + * host-facing part, which can't be device_add'ed, yet. + */ + dc->user_creatable = false; + hc->plug = ls7a_pm_device_plug_cb; + hc->unplug_request = ls7a_pm_device_unplug_request_cb; + hc->unplug = ls7a_pm_device_unplug_cb; + adevc->ospm_status = ls7a_pm_ospm_status; + adevc->send_event = ls7a_send_gpe; + adevc->madt_cpu = ls7a_madt_cpu_entry; +} + +static void ls7a_pci_add_properties(LS7APCIState *ls7a) +{ + ls7a_pm_add_properties(OBJECT(ls7a), &ls7a->pm, NULL); +} + +static void ls7a_pci_initfn(Object *obj) +{ + LS7APCIState *ls7a = get_ls7a_type(obj); + + ls7a_pci_add_properties(ls7a); +} + +static const TypeInfo ls7a_pcie_device_info = { + .name = TYPE_PCIE_LS7A, + .parent = TYPE_PCI_DEVICE, + .instance_size = sizeof(LS7APCIState), + .class_init = ls7a_pcie_class_init, + .instance_init = ls7a_pci_initfn, + .interfaces = + (InterfaceInfo[]){ + { TYPE_HOTPLUG_HANDLER }, + { TYPE_ACPI_DEVICE_IF }, + { INTERFACE_CONVENTIONAL_PCI_DEVICE }, + {}, + }, +}; + +static void ls7a_pciehost_class_init(ObjectClass *klass, void *data) +{ + SysBusDeviceClass *k = SYS_BUS_DEVICE_CLASS(klass); + k->parent_class.fw_name = "pci"; +} + +static const TypeInfo ls7a_pciehost_info = { + .name = TYPE_LS7A_PCIE_HOST_BRIDGE, + .parent = TYPE_PCIE_HOST_BRIDGE, + .instance_size = sizeof(LS7APCIEHost), + .class_init = ls7a_pciehost_class_init, +}; + +static void ls7a_register_types(void) +{ + type_register_static(&ls7a_pciehost_info); + type_register_static(&ls7a_pcie_device_info); +} + +type_init(ls7a_register_types) diff --git a/hw/loongarch/meson.build b/hw/loongarch/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..ca4d5567b5d35b52365494cd430759b37cb7ede2 --- /dev/null +++ b/hw/loongarch/meson.build @@ -0,0 +1,15 @@ +loongarch_ss = ss.source_set() +loongarch_ss.add(files('larch_3a.c'), fdt) +loongarch_ss.add(files( + 'larch_int.c', + 'larch_hotplug.c', + 'ls7a_nb.c', + 'ioapic.c', + 'acpi-build.c', + 'ipi.c', + 'apic.c', + 'iocsr.c', + 'sysbus-fdt.c', +)) + +hw_arch += {'loongarch64': loongarch_ss} diff --git a/hw/loongarch/sysbus-fdt.c b/hw/loongarch/sysbus-fdt.c new file mode 100644 index 0000000000000000000000000000000000000000..05b4dda33a05f9ee024b1d57cd76f0cbdfe1d7b2 --- /dev/null +++ b/hw/loongarch/sysbus-fdt.c @@ -0,0 +1,178 @@ +/* + * Loongarch Platform Bus device tree generation helpers + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include +#include "qemu/error-report.h" +#include "sysemu/device_tree.h" +#include "hw/platform-bus.h" +#include "hw/display/ramfb.h" +#include "hw/loongarch/sysbus-fdt.h" +#include "sysemu/tpm.h" + +/* + * internal struct that contains the information to create dynamic + * sysbus device node + */ +typedef struct PlatformBusFDTData { + void *fdt; /* device tree handle */ + int irq_start; /* index of the first IRQ usable by platform bus devices */ + const char *pbus_node_name; /* name of the platform bus node */ + PlatformBusDevice *pbus; +} PlatformBusFDTData; + +/* struct that allows to match a device and create its FDT node */ +typedef struct BindingEntry { + const char *typename; + const char *compat; + int (*add_fn)(SysBusDevice *sbdev, void *opaque); + bool (*match_fn)(SysBusDevice *sbdev, const struct BindingEntry *combo); +} BindingEntry; + +static int no_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + return 0; +} + +/* Device type based matching */ +static bool type_match(SysBusDevice *sbdev, const BindingEntry *entry) +{ + return !strcmp(object_get_typename(OBJECT(sbdev)), entry->typename); +} + +#define TYPE_BINDING(type, add_fn) \ + { \ + (type), NULL, (add_fn), NULL \ + } + +#ifdef CONFIG_TPM +/* + * add_tpm_tis_fdt_node: Create a DT node for TPM TIS + * + * See kernel documentation: + * Documentation/devicetree/bindings/security/tpm/tpm_tis_mmio.txt + * Optional interrupt for command completion is not exposed + */ +static int add_tpm_tis_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + PlatformBusFDTData *data = opaque; + PlatformBusDevice *pbus = data->pbus; + void *fdt = data->fdt; + const char *parent_node = data->pbus_node_name; + char *nodename; + uint32_t reg_attr[2]; + uint64_t mmio_base; + + mmio_base = platform_bus_get_mmio_addr(pbus, sbdev, 0); + nodename = g_strdup_printf("%s/tpm_tis@%" PRIx64, parent_node, mmio_base); + qemu_fdt_add_subnode(fdt, nodename); + + qemu_fdt_setprop_string(fdt, nodename, "compatible", "tcg,tpm-tis-mmio"); + + reg_attr[0] = cpu_to_be32(mmio_base); + reg_attr[1] = cpu_to_be32(0x5000); + qemu_fdt_setprop(fdt, nodename, "reg", reg_attr, 2 * sizeof(uint32_t)); + + g_free(nodename); + + return 0; +} +#endif + +/* list of supported dynamic sysbus bindings */ +static const BindingEntry bindings[] = { +#ifdef CONFIG_TPM + TYPE_BINDING(TYPE_TPM_TIS_SYSBUS, add_tpm_tis_fdt_node), +#endif + TYPE_BINDING(TYPE_RAMFB_DEVICE, no_fdt_node), + TYPE_BINDING("", NULL), /* last element */ +}; + +/** + * add_fdt_node - add the device tree node of a dynamic sysbus device + * + * @sbdev: handle to the sysbus device + * @opaque: handle to the PlatformBusFDTData + * + * Checks the sysbus type belongs to the list of device types that + * are dynamically instantiable and if so call the node creation + * function. + */ +static void add_fdt_node(SysBusDevice *sbdev, void *opaque) +{ + int i, ret; + + for (i = 0; i < ARRAY_SIZE(bindings); i++) { + const BindingEntry *iter = &bindings[i]; + + if (type_match(sbdev, iter)) { + if (!iter->match_fn || iter->match_fn(sbdev, iter)) { + ret = iter->add_fn(sbdev, opaque); + assert(!ret); + return; + } + } + } + error_report("Device %s can not be dynamically instantiated", + qdev_fw_name(DEVICE(sbdev))); + exit(1); +} + +void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr, + hwaddr bus_size, int irq_start) +{ + const char platcomp[] = "qemu,platform\0simple-bus"; + PlatformBusDevice *pbus; + DeviceState *dev; + gchar *node; + + assert(fdt); + + node = g_strdup_printf("/platform@%" PRIx64, addr); + + /* Create a /platform node that we can put all devices into */ + qemu_fdt_add_subnode(fdt, node); + qemu_fdt_setprop(fdt, node, "compatible", platcomp, sizeof(platcomp)); + + /* + * Our platform bus region is less than 32bits, so 1 cell is enough for + * address and size + */ + qemu_fdt_setprop_cells(fdt, node, "#size-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "#address-cells", 1); + qemu_fdt_setprop_cells(fdt, node, "ranges", 0, addr >> 32, addr, bus_size); + if (intc != NULL) { + qemu_fdt_setprop_phandle(fdt, node, "interrupt-parent", intc); + } + dev = qdev_find_recursive(sysbus_get_default(), TYPE_PLATFORM_BUS_DEVICE); + pbus = PLATFORM_BUS_DEVICE(dev); + + PlatformBusFDTData data = { + .fdt = fdt, + .irq_start = irq_start, + .pbus_node_name = node, + .pbus = pbus, + }; + + /* Loop through all dynamic sysbus devices and create their node */ + foreach_dynamic_sysbus_device(add_fdt_node, &data); + + g_free(node); +} diff --git a/hw/mem/nvdimm.c b/hw/mem/nvdimm.c index 7397b671565276ac4b88644944d75e6381f7eab3..8df1d7e088da936a4e9f25f18bbbb8e867b41dbf 100644 --- a/hw/mem/nvdimm.c +++ b/hw/mem/nvdimm.c @@ -149,7 +149,7 @@ static void nvdimm_prepare_memory_region(NVDIMMDevice *nvdimm, Error **errp) if (!nvdimm->unarmed && memory_region_is_rom(mr)) { HostMemoryBackend *hostmem = dimm->hostmem; - error_setg(errp, "'unarmed' property must be off since memdev %s " + error_setg(errp, "'unarmed' property must be 'on' since memdev %s " "is read-only", object_get_canonical_path_component(OBJECT(hostmem))); return; diff --git a/hw/meson.build b/hw/meson.build index b3366c888ef61b3093091c260d23926c0524ca6e..a9a078ec3392aed25eb253a6a1a67821eff4b0fc 100644 --- a/hw/meson.build +++ b/hw/meson.build @@ -17,6 +17,7 @@ subdir('intc') subdir('ipack') subdir('ipmi') subdir('isa') +subdir('loongarch') subdir('mem') subdir('misc') subdir('net') @@ -62,5 +63,6 @@ subdir('s390x') subdir('sh4') subdir('sparc') subdir('sparc64') +subdir('sw64') subdir('tricore') subdir('xtensa') diff --git a/hw/microblaze/boot.c b/hw/microblaze/boot.c index 8821d009f1a9c59bd170d0e8c8ce2ea9921980df..26f315aaeaa8baab62560303ab2619be1d8fdeea 100644 --- a/hw/microblaze/boot.c +++ b/hw/microblaze/boot.c @@ -31,6 +31,7 @@ #include "qemu/option.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/guest-random.h" #include "sysemu/device_tree.h" #include "sysemu/reset.h" #include "hw/boards.h" @@ -76,6 +77,7 @@ static int microblaze_load_dtb(hwaddr addr, int fdt_size; void *fdt = NULL; int r; + uint8_t rng_seed[32]; if (dtb_filename) { fdt = load_device_tree(dtb_filename, &fdt_size); @@ -84,6 +86,9 @@ static int microblaze_load_dtb(hwaddr addr, return 0; } + qemu_guest_getrandom_nofail(rng_seed, sizeof(rng_seed)); + qemu_fdt_setprop(fdt, "/chosen", "rng-seed", rng_seed, sizeof(rng_seed)); + if (kernel_cmdline) { r = qemu_fdt_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); diff --git a/hw/mips/meson.build b/hw/mips/meson.build index dd0101ad4d832be71a4a82c0b8bc865e1114d675..1e57c633748d8a5b949abc0fe47e54d505e92daa 100644 --- a/hw/mips/meson.build +++ b/hw/mips/meson.build @@ -1,6 +1,6 @@ mips_ss = ss.source_set() mips_ss.add(files('bootloader.c', 'mips_int.c')) -mips_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c')) +common_ss.add(when: 'CONFIG_FW_CFG_MIPS', if_true: files('fw_cfg.c')) mips_ss.add(when: 'CONFIG_LOONGSON3V', if_true: files('loongson3_bootp.c', 'loongson3_virt.c')) mips_ss.add(when: 'CONFIG_MALTA', if_true: files('gt64xxx_pci.c', 'malta.c')) mips_ss.add(when: 'CONFIG_MIPS_CPS', if_true: files('cps.c')) diff --git a/hw/misc/applesmc.c b/hw/misc/applesmc.c index 1b9acaf1d38de119e8a88ae34d0d8d54addff0f1..e17229025fe7955e10589ad0a64d73e24a226a41 100644 --- a/hw/misc/applesmc.c +++ b/hw/misc/applesmc.c @@ -269,6 +269,7 @@ static void qdev_applesmc_isa_reset(DeviceState *dev) /* Remove existing entries */ QLIST_FOREACH_SAFE(d, &s->data_def, node, next) { QLIST_REMOVE(d, node); + g_free(d); } s->status = 0x00; s->status_1e = 0x00; diff --git a/hw/misc/aspeed_hace.c b/hw/misc/aspeed_hace.c index 10f00e65f4e1c9d7b2b902e6165e11550b48e12e..1192322f0a568e7fefbf7ddbcc308ca4416ec27b 100644 --- a/hw/misc/aspeed_hace.c +++ b/hw/misc/aspeed_hace.c @@ -97,7 +97,7 @@ static int hash_algo_lookup(uint32_t reg) static void do_hash_operation(AspeedHACEState *s, int algo, bool sg_mode) { struct iovec iov[ASPEED_HACE_MAX_SG]; - g_autofree uint8_t *digest_buf; + g_autofree uint8_t *digest_buf = NULL; size_t digest_len = 0; int i; diff --git a/hw/misc/bcm2835_property.c b/hw/misc/bcm2835_property.c index 73941bdae977d65abc457c99fb23883b0bb331a2..76ea511d53d0e783623861eebf08d901e6c8ce89 100644 --- a/hw/misc/bcm2835_property.c +++ b/hw/misc/bcm2835_property.c @@ -69,7 +69,8 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value) break; case 0x00010003: /* Get board MAC address */ resplen = sizeof(s->macaddr.a); - dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen); + dma_memory_write(&s->dma_as, value + 12, s->macaddr.a, resplen, + MEMTXATTRS_UNSPECIFIED); break; case 0x00010004: /* Get board serial */ qemu_log_mask(LOG_UNIMP, diff --git a/hw/misc/edu.c b/hw/misc/edu.c index e935c418d40066b600a434c3bd9b04babbad96ea..a9f6421aa5f8961e5ff82926b4a9edc4643c3e70 100644 --- a/hw/misc/edu.c +++ b/hw/misc/edu.c @@ -115,7 +115,7 @@ static void edu_check_range(uint64_t addr, uint64_t size1, uint64_t start, uint64_t end2 = start + size2; if (within(addr, start, end2) && - end1 > addr && within(end1, start, end2)) { + end1 > addr && end1 <= end2) { return; } diff --git a/hw/misc/imx_rngc.c b/hw/misc/imx_rngc.c index 632c03779cbbf4bab04b138150bf66de809639d4..082c6980ad541db2fec2d72f3db048e9439aa124 100644 --- a/hw/misc/imx_rngc.c +++ b/hw/misc/imx_rngc.c @@ -228,8 +228,10 @@ static void imx_rngc_realize(DeviceState *dev, Error **errp) sysbus_init_mmio(sbd, &s->iomem); sysbus_init_irq(sbd, &s->irq); - s->self_test_bh = qemu_bh_new(imx_rngc_self_test, s); - s->seed_bh = qemu_bh_new(imx_rngc_seed, s); + s->self_test_bh = qemu_bh_new_guarded(imx_rngc_self_test, s, + &dev->mem_reentrancy_guard); + s->seed_bh = qemu_bh_new_guarded(imx_rngc_seed, s, + &dev->mem_reentrancy_guard); } static void imx_rngc_reset(DeviceState *dev) diff --git a/hw/misc/ivshmem.c b/hw/misc/ivshmem.c index 1ba4a98377c6bfc12fd31d3afd4d1c447c2be8b2..4093c59079f2cc77ee59f5437ce746de2509a4f7 100644 --- a/hw/misc/ivshmem.c +++ b/hw/misc/ivshmem.c @@ -243,7 +243,7 @@ static uint64_t ivshmem_io_read(void *opaque, hwaddr addr, static const MemoryRegionOps ivshmem_mmio_ops = { .read = ivshmem_io_read, .write = ivshmem_io_write, - .endianness = DEVICE_NATIVE_ENDIAN, + .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = 4, .max_access_size = 4, @@ -400,6 +400,7 @@ static void close_peer_eventfds(IVShmemState *s, int posn) } g_free(s->peers[posn].eventfds); + s->peers[posn].eventfds = NULL; s->peers[posn].nb_eventfds = 0; } @@ -530,6 +531,10 @@ static void process_msg_connect(IVShmemState *s, uint16_t posn, int fd, close(fd); return; } + if (peer->eventfds == NULL) { + peer->eventfds = g_new0(EventNotifier, s->vectors); + peer->nb_eventfds = 0; + } vector = peer->nb_eventfds++; IVSHMEM_DPRINTF("eventfds[%d][%d] = %d\n", posn, vector, fd); diff --git a/hw/misc/macio/mac_dbdma.c b/hw/misc/macio/mac_dbdma.c index e220f1a92771dc9378bc2f61174701b2e14d709f..cc7e02203d699c31c88a21c0b91354e8fd1f8d10 100644 --- a/hw/misc/macio/mac_dbdma.c +++ b/hw/misc/macio/mac_dbdma.c @@ -94,7 +94,7 @@ static void dbdma_cmdptr_load(DBDMA_channel *ch) DBDMA_DPRINTFCH(ch, "dbdma_cmdptr_load 0x%08x\n", ch->regs[DBDMA_CMDPTR_LO]); dma_memory_read(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); } static void dbdma_cmdptr_save(DBDMA_channel *ch) @@ -104,7 +104,7 @@ static void dbdma_cmdptr_save(DBDMA_channel *ch) le16_to_cpu(ch->current.xfer_status), le16_to_cpu(ch->current.res_count)); dma_memory_write(&address_space_memory, ch->regs[DBDMA_CMDPTR_LO], - &ch->current, sizeof(dbdma_cmd)); + &ch->current, sizeof(dbdma_cmd), MEMTXATTRS_UNSPECIFIED); } static void kill_channel(DBDMA_channel *ch) @@ -371,7 +371,8 @@ static void load_word(DBDMA_channel *ch, int key, uint32_t addr, return; } - dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len); + dma_memory_read(&address_space_memory, addr, ¤t->cmd_dep, len, + MEMTXATTRS_UNSPECIFIED); if (conditional_wait(ch)) goto wait; @@ -403,7 +404,8 @@ static void store_word(DBDMA_channel *ch, int key, uint32_t addr, return; } - dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len); + dma_memory_write(&address_space_memory, addr, ¤t->cmd_dep, len, + MEMTXATTRS_UNSPECIFIED); if (conditional_wait(ch)) goto wait; @@ -912,7 +914,7 @@ static void mac_dbdma_realize(DeviceState *dev, Error **errp) { DBDMAState *s = MAC_DBDMA(dev); - s->bh = qemu_bh_new(DBDMA_run_bh, s); + s->bh = qemu_bh_new_guarded(DBDMA_run_bh, s, &dev->mem_reentrancy_guard); } static void mac_dbdma_class_init(ObjectClass *oc, void *data) diff --git a/hw/misc/mps2-scc.c b/hw/misc/mps2-scc.c index b3b42a792cd23e91d8ac93c912719f58fdc5edad..fe5034db1404a0a759cf7f3dfe5742a4dd4d779d 100644 --- a/hw/misc/mps2-scc.c +++ b/hw/misc/mps2-scc.c @@ -329,6 +329,13 @@ static void mps2_scc_realize(DeviceState *dev, Error **errp) s->oscclk = g_new0(uint32_t, s->num_oscclk); } +static void mps2_scc_finalize(Object *obj) +{ + MPS2SCC *s = MPS2_SCC(obj); + + g_free(s->oscclk_reset); +} + static const VMStateDescription mps2_scc_vmstate = { .name = "mps2-scc", .version_id = 3, @@ -385,6 +392,7 @@ static const TypeInfo mps2_scc_info = { .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(MPS2SCC), .instance_init = mps2_scc_init, + .instance_finalize = mps2_scc_finalize, .class_init = mps2_scc_class_init, }; diff --git a/hw/misc/nrf51_rng.c b/hw/misc/nrf51_rng.c index fc86e1b697905d2ae2fb6c5c69a54181a53b06f0..e911b3a3a30114309f1869e4f8e33774c1f072c1 100644 --- a/hw/misc/nrf51_rng.c +++ b/hw/misc/nrf51_rng.c @@ -107,25 +107,25 @@ static void rng_write(void *opaque, hwaddr offset, break; case NRF51_RNG_REG_SHORTS: s->shortcut_stop_on_valrdy = - (value & BIT_MASK(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_SHORTS_VALRDY_STOP)) ? 1 : 0; break; case NRF51_RNG_REG_INTEN: s->interrupt_enabled = - (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) ? 1 : 0; break; case NRF51_RNG_REG_INTENSET: - if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) { s->interrupt_enabled = 1; } break; case NRF51_RNG_REG_INTENCLR: - if (value & BIT_MASK(NRF51_RNG_REG_INTEN_VALRDY)) { + if (value & BIT(NRF51_RNG_REG_INTEN_VALRDY)) { s->interrupt_enabled = 0; } break; case NRF51_RNG_REG_CONFIG: s->filter_enabled = - (value & BIT_MASK(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0; + (value & BIT(NRF51_RNG_REG_CONFIG_DECEN)) ? 1 : 0; break; default: diff --git a/hw/net/allwinner-sun8i-emac.c b/hw/net/allwinner-sun8i-emac.c index ff611f18fbd5bb9250c0633eaf66b635297a16d5..cf93b2fdacb74828403f14d029805c1b138730cd 100644 --- a/hw/net/allwinner-sun8i-emac.c +++ b/hw/net/allwinner-sun8i-emac.c @@ -350,7 +350,8 @@ static void allwinner_sun8i_emac_get_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc)); + dma_memory_read(&s->dma_as, phys_addr, desc, sizeof(*desc), + MEMTXATTRS_UNSPECIFIED); } static uint32_t allwinner_sun8i_emac_next_desc(AwSun8iEmacState *s, @@ -402,7 +403,8 @@ static void allwinner_sun8i_emac_flush_desc(AwSun8iEmacState *s, FrameDescriptor *desc, uint32_t phys_addr) { - dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc)); + dma_memory_write(&s->dma_as, phys_addr, desc, sizeof(*desc), + MEMTXATTRS_UNSPECIFIED); } static bool allwinner_sun8i_emac_can_receive(NetClientState *nc) @@ -460,7 +462,8 @@ static ssize_t allwinner_sun8i_emac_receive(NetClientState *nc, << RX_DESC_STATUS_FRM_LEN_SHIFT; } - dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes); + dma_memory_write(&s->dma_as, desc.addr, buf, desc_bytes, + MEMTXATTRS_UNSPECIFIED); allwinner_sun8i_emac_flush_desc(s, &desc, s->rx_desc_curr); trace_allwinner_sun8i_emac_receive(s->rx_desc_curr, desc.addr, desc_bytes); @@ -512,7 +515,8 @@ static void allwinner_sun8i_emac_transmit(AwSun8iEmacState *s) desc.status |= TX_DESC_STATUS_LENGTH_ERR; break; } - dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, bytes); + dma_memory_read(&s->dma_as, desc.addr, packet_buf + packet_bytes, + bytes, MEMTXATTRS_UNSPECIFIED); packet_bytes += bytes; desc.status &= ~DESC_STATUS_CTL; allwinner_sun8i_emac_flush_desc(s, &desc, s->tx_desc_curr); @@ -634,7 +638,8 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_TX_CUR_BUF: /* Transmit Current Buffer */ if (s->tx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc)); + dma_memory_read(&s->dma_as, s->tx_desc_curr, &desc, sizeof(desc), + MEMTXATTRS_UNSPECIFIED); value = desc.addr; } else { value = 0; @@ -647,7 +652,8 @@ static uint64_t allwinner_sun8i_emac_read(void *opaque, hwaddr offset, break; case REG_RX_CUR_BUF: /* Receive Current Buffer */ if (s->rx_desc_curr != 0) { - dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc)); + dma_memory_read(&s->dma_as, s->rx_desc_curr, &desc, sizeof(desc), + MEMTXATTRS_UNSPECIFIED); value = desc.addr; } else { value = 0; @@ -810,7 +816,8 @@ static void allwinner_sun8i_emac_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_allwinner_sun8i_emac_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/allwinner_emac.c b/hw/net/allwinner_emac.c index ddddf35c45d156d57a26356df630900d39463dfc..b3d73143bf10e282bad1cb99624eaff6cecb5cc2 100644 --- a/hw/net/allwinner_emac.c +++ b/hw/net/allwinner_emac.c @@ -453,7 +453,8 @@ static void aw_emac_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_aw_emac_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); fifo8_create(&s->rx_fifo, RX_FIFO_SIZE); diff --git a/hw/net/cadence_gem.c b/hw/net/cadence_gem.c index 24b3a0ff667b5e3228a777e8ee827c784366dee8..bd7585c0180a2a7c7d739633d98cc2d349cda575 100644 --- a/hw/net/cadence_gem.c +++ b/hw/net/cadence_gem.c @@ -81,8 +81,8 @@ #define GEM_IPGSTRETCH (0x000000BC / 4) /* IPG Stretch reg */ #define GEM_SVLAN (0x000000C0 / 4) /* Stacked VLAN reg */ #define GEM_MODID (0x000000FC / 4) /* Module ID reg */ -#define GEM_OCTTXLO (0x00000100 / 4) /* Octects transmitted Low reg */ -#define GEM_OCTTXHI (0x00000104 / 4) /* Octects transmitted High reg */ +#define GEM_OCTTXLO (0x00000100 / 4) /* Octets transmitted Low reg */ +#define GEM_OCTTXHI (0x00000104 / 4) /* Octets transmitted High reg */ #define GEM_TXCNT (0x00000108 / 4) /* Error-free Frames transmitted */ #define GEM_TXBCNT (0x0000010C / 4) /* Error-free Broadcast Frames */ #define GEM_TXMCNT (0x00000110 / 4) /* Error-free Multicast Frame */ @@ -101,8 +101,8 @@ #define GEM_LATECOLLCNT (0x00000144 / 4) /* Late Collision Frames */ #define GEM_DEFERTXCNT (0x00000148 / 4) /* Deferred Transmission Frames */ #define GEM_CSENSECNT (0x0000014C / 4) /* Carrier Sense Error Counter */ -#define GEM_OCTRXLO (0x00000150 / 4) /* Octects Received register Low */ -#define GEM_OCTRXHI (0x00000154 / 4) /* Octects Received register High */ +#define GEM_OCTRXLO (0x00000150 / 4) /* Octets Received register Low */ +#define GEM_OCTRXHI (0x00000154 / 4) /* Octets Received register High */ #define GEM_RXCNT (0x00000158 / 4) /* Error-free Frames Received */ #define GEM_RXBROADCNT (0x0000015C / 4) /* Error-free Broadcast Frames RX */ #define GEM_RXMULTICNT (0x00000160 / 4) /* Error-free Multicast Frames RX */ @@ -1633,7 +1633,8 @@ static void gem_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_gem_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); if (s->jumbo_max_len > MAX_FRAME_SIZE) { error_setg(errp, "jumbo-max-len is greater than %d", diff --git a/hw/net/can/can_sja1000.c b/hw/net/can/can_sja1000.c index 34eea684ced278738bdb26327100b285fa941bd0..5eefb8407e87d28c2fa98e36a8edd7e655a9dade 100644 --- a/hw/net/can/can_sja1000.c +++ b/hw/net/can/can_sja1000.c @@ -108,7 +108,7 @@ void can_sja_single_filter(struct qemu_can_filter *filter, } filter->can_mask = (uint32_t)amr[0] << 3; - filter->can_mask |= (uint32_t)amr[1] << 5; + filter->can_mask |= (uint32_t)amr[1] >> 5; filter->can_mask = ~filter->can_mask & QEMU_CAN_SFF_MASK; if (!(amr[1] & 0x10)) { filter->can_mask |= QEMU_CAN_RTR_FLAG; diff --git a/hw/net/can/xlnx-zynqmp-can.c b/hw/net/can/xlnx-zynqmp-can.c index 22bb8910fa8c1ffaf5a44631b835938eff83d775..78a76a8ce24686b02ea59307288fe5c44c01359c 100644 --- a/hw/net/can/xlnx-zynqmp-can.c +++ b/hw/net/can/xlnx-zynqmp-can.c @@ -696,30 +696,30 @@ static void update_rx_fifo(XlnxZynqMPCANState *s, const qemu_can_frame *frame) timestamp)); /* First 32 bit of the data. */ - fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA1_DB3_SHIFT, - R_TXFIFO_DATA1_DB3_LENGTH, + fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA1_DB3_SHIFT, + R_RXFIFO_DATA1_DB3_LENGTH, frame->data[0]) | - deposit32(0, R_TXFIFO_DATA1_DB2_SHIFT, - R_TXFIFO_DATA1_DB2_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB2_SHIFT, + R_RXFIFO_DATA1_DB2_LENGTH, frame->data[1]) | - deposit32(0, R_TXFIFO_DATA1_DB1_SHIFT, - R_TXFIFO_DATA1_DB1_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB1_SHIFT, + R_RXFIFO_DATA1_DB1_LENGTH, frame->data[2]) | - deposit32(0, R_TXFIFO_DATA1_DB0_SHIFT, - R_TXFIFO_DATA1_DB0_LENGTH, + deposit32(0, R_RXFIFO_DATA1_DB0_SHIFT, + R_RXFIFO_DATA1_DB0_LENGTH, frame->data[3])); /* Last 32 bit of the data. */ - fifo32_push(&s->rx_fifo, deposit32(0, R_TXFIFO_DATA2_DB7_SHIFT, - R_TXFIFO_DATA2_DB7_LENGTH, + fifo32_push(&s->rx_fifo, deposit32(0, R_RXFIFO_DATA2_DB7_SHIFT, + R_RXFIFO_DATA2_DB7_LENGTH, frame->data[4]) | - deposit32(0, R_TXFIFO_DATA2_DB6_SHIFT, - R_TXFIFO_DATA2_DB6_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB6_SHIFT, + R_RXFIFO_DATA2_DB6_LENGTH, frame->data[5]) | - deposit32(0, R_TXFIFO_DATA2_DB5_SHIFT, - R_TXFIFO_DATA2_DB5_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB5_SHIFT, + R_RXFIFO_DATA2_DB5_LENGTH, frame->data[6]) | - deposit32(0, R_TXFIFO_DATA2_DB4_SHIFT, - R_TXFIFO_DATA2_DB4_LENGTH, + deposit32(0, R_RXFIFO_DATA2_DB4_SHIFT, + R_RXFIFO_DATA2_DB4_LENGTH, frame->data[7])); ARRAY_FIELD_DP32(s->regs, INTERRUPT_STATUS_REGISTER, RXOK, 1); diff --git a/hw/net/dp8393x.c b/hw/net/dp8393x.c index 45b954e46c25987ee3604e00ab277ba05b938d5e..abfcc6f69fee19554372098896c950c8dd7ab05b 100644 --- a/hw/net/dp8393x.c +++ b/hw/net/dp8393x.c @@ -943,7 +943,8 @@ static void dp8393x_realize(DeviceState *dev, Error **errp) "dp8393x-regs", SONIC_REG_COUNT << s->it_shift); s->nic = qemu_new_nic(&net_dp83932_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->watchdog = timer_new_ns(QEMU_CLOCK_VIRTUAL, dp8393x_watchdog, s); diff --git a/hw/net/e1000.c b/hw/net/e1000.c index f5bc81296d1a07f65176a7f71b22ea851fae4879..33cd33a8ff5cfd94570147e3bd38dcfc286e566a 100644 --- a/hw/net/e1000.c +++ b/hw/net/e1000.c @@ -979,7 +979,7 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) base = rx_desc_base(s) + sizeof(desc) * s->mac_reg[RDH]; pci_dma_read(d, base, &desc, sizeof(desc)); desc.special = vlan_special; - desc.status |= (vlan_status | E1000_RXD_STAT_DD); + desc.status &= ~E1000_RXD_STAT_DD; if (desc.buffer_addr) { if (desc_offset < size) { size_t iov_copy; @@ -1013,6 +1013,9 @@ e1000_receive_iov(NetClientState *nc, const struct iovec *iov, int iovcnt) DBGOUT(RX, "Null RX descriptor!!\n"); } pci_dma_write(d, base, &desc, sizeof(desc)); + desc.status |= (vlan_status | E1000_RXD_STAT_DD); + pci_dma_write(d, base + offsetof(struct e1000_rx_desc, status), + &desc.status, sizeof(desc.status)); if (++s->mac_reg[RDH] * sizeof(desc) >= s->mac_reg[RDLEN]) s->mac_reg[RDH] = 0; @@ -1733,7 +1736,8 @@ static void pci_e1000_realize(PCIDevice *pci_dev, Error **errp) macaddr); d->nic = qemu_new_nic(&net_e1000_info, &d->conf, - object_get_typename(OBJECT(d)), dev->id, d); + object_get_typename(OBJECT(d)), dev->id, + &dev->mem_reentrancy_guard, d); qemu_format_nic_info_str(qemu_get_queue(d->nic), macaddr); diff --git a/hw/net/e1000e.c b/hw/net/e1000e.c index ac96f7665afcd50c75400fca0963869018022ceb..b6e9b0e178485905d545d8c0dcae1d4361316c34 100644 --- a/hw/net/e1000e.c +++ b/hw/net/e1000e.c @@ -328,7 +328,7 @@ e1000e_init_net_peer(E1000EState *s, PCIDevice *pci_dev, uint8_t *macaddr) int i; s->nic = qemu_new_nic(&net_e1000e_info, &s->conf, - object_get_typename(OBJECT(s)), dev->id, s); + object_get_typename(OBJECT(s)), dev->id, &dev->mem_reentrancy_guard, s); s->core.max_queue_num = s->conf.peers.queues ? s->conf.peers.queues - 1 : 0; diff --git a/hw/net/e1000e_core.c b/hw/net/e1000e_core.c index 8ae6fb7e1459f31a8b6649cd5e374c3009ed7ae3..32c3507795ed55c8916e42a1227be59fcab515db 100644 --- a/hw/net/e1000e_core.c +++ b/hw/net/e1000e_core.c @@ -1620,15 +1620,16 @@ e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt) } } +/* Min. octets in an ethernet frame sans FCS */ +#define MIN_BUF_SIZE 60 + ssize_t e1000e_receive_iov(E1000ECore *core, const struct iovec *iov, int iovcnt) { static const int maximum_ethernet_hdr_len = (14 + 4); - /* Min. octets in an ethernet frame sans FCS */ - static const int min_buf_size = 60; uint32_t n = 0; - uint8_t min_buf[min_buf_size]; + uint8_t min_buf[MIN_BUF_SIZE]; struct iovec min_iov; uint8_t *filter_buf; size_t size, orig_size; diff --git a/hw/net/eepro100.c b/hw/net/eepro100.c index 16e95ef9cc99290d0576e8eef21d313eeb4e08f8..074d54ec97355d7b8d45d1a2cc737ef5ee373623 100644 --- a/hw/net/eepro100.c +++ b/hw/net/eepro100.c @@ -279,6 +279,9 @@ typedef struct { /* Quasi static device properties (no need to save them). */ uint16_t stats_size; bool has_extended_tcb_support; + + /* Flag to avoid recursions. */ + bool busy; } EEPRO100State; /* Word indices in EEPROM. */ @@ -700,6 +703,8 @@ static void set_ru_state(EEPRO100State * s, ru_state_t state) static void dump_statistics(EEPRO100State * s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; + /* Dump statistical data. Most data is never changed by the emulation * and always 0, so we first just copy the whole block and then those * values which really matter. @@ -707,16 +712,18 @@ static void dump_statistics(EEPRO100State * s) */ pci_dma_write(&s->dev, s->statsaddr, &s->statistics, s->stats_size); stl_le_pci_dma(&s->dev, s->statsaddr + 0, - s->statistics.tx_good_frames); + s->statistics.tx_good_frames, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 36, - s->statistics.rx_good_frames); + s->statistics.rx_good_frames, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 48, - s->statistics.rx_resource_errors); + s->statistics.rx_resource_errors, attrs); stl_le_pci_dma(&s->dev, s->statsaddr + 60, - s->statistics.rx_short_frame_errors); + s->statistics.rx_short_frame_errors, attrs); #if 0 - stw_le_pci_dma(&s->dev, s->statsaddr + 76, s->statistics.xmt_tco_frames); - stw_le_pci_dma(&s->dev, s->statsaddr + 78, s->statistics.rcv_tco_frames); + stw_le_pci_dma(&s->dev, s->statsaddr + 76, + s->statistics.xmt_tco_frames, attrs); + stw_le_pci_dma(&s->dev, s->statsaddr + 78, + s->statistics.rcv_tco_frames, attrs); missing("CU dump statistical counters"); #endif } @@ -733,6 +740,7 @@ static void read_cb(EEPRO100State *s) static void tx_command(EEPRO100State *s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; uint32_t tbd_array = s->tx.tbd_array_addr; uint16_t tcb_bytes = s->tx.tcb_bytes & 0x3fff; /* Sends larger than MAX_ETH_FRAME_SIZE are allowed, up to 2600 bytes. */ @@ -764,15 +772,16 @@ static void tx_command(EEPRO100State *s) } else { /* Flexible mode. */ uint8_t tbd_count = 0; + uint32_t tx_buffer_address; + uint16_t tx_buffer_size; + uint16_t tx_buffer_el; + if (s->has_extended_tcb_support && !(s->configuration[6] & BIT(4))) { /* Extended Flexible TCB. */ for (; tbd_count < 2; tbd_count++) { - uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, - tbd_address); - uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, - tbd_address + 4); - uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, - tbd_address + 6); + ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs); tbd_address += 8; TRACE(RXTX, logout ("TBD (extended flexible mode): buffer address 0x%08x, size 0x%04x\n", @@ -788,9 +797,9 @@ static void tx_command(EEPRO100State *s) } tbd_address = tbd_array; for (; tbd_count < s->tx.tbd_count; tbd_count++) { - uint32_t tx_buffer_address = ldl_le_pci_dma(&s->dev, tbd_address); - uint16_t tx_buffer_size = lduw_le_pci_dma(&s->dev, tbd_address + 4); - uint16_t tx_buffer_el = lduw_le_pci_dma(&s->dev, tbd_address + 6); + ldl_le_pci_dma(&s->dev, tbd_address, &tx_buffer_address, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 4, &tx_buffer_size, attrs); + lduw_le_pci_dma(&s->dev, tbd_address + 6, &tx_buffer_el, attrs); tbd_address += 8; TRACE(RXTX, logout ("TBD (flexible mode): buffer address 0x%08x, size 0x%04x\n", @@ -833,10 +842,19 @@ static void set_multicast_list(EEPRO100State *s) static void action_command(EEPRO100State *s) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; /* The loop below won't stop if it gets special handcrafted data. Therefore we limit the number of iterations. */ unsigned max_loop_count = 16; + if (s->busy) { + /* Prevent recursions. */ + logout("recursion in %s:%u\n", __FILE__, __LINE__); + return; + } + + s->busy = true; + for (;;) { bool bit_el; bool bit_s; @@ -911,7 +929,7 @@ static void action_command(EEPRO100State *s) } /* Write new status. */ stw_le_pci_dma(&s->dev, s->cb_address, - s->tx.status | ok_status | STATUS_C); + s->tx.status | ok_status | STATUS_C, attrs); if (bit_i) { /* CU completed action. */ eepro100_cx_interrupt(s); @@ -933,10 +951,12 @@ static void action_command(EEPRO100State *s) } TRACE(OTHER, logout("CU list empty\n")); /* List is empty. Now CU is idle or suspended. */ + s->busy = false; } static void eepro100_cu_command(EEPRO100State * s, uint8_t val) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; cu_state_t cu_state; switch (val) { case CU_NOP: @@ -986,7 +1006,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val) /* Dump statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats)\n", val)); dump_statistics(s); - stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa005, attrs); break; case CU_CMD_BASE: /* Load CU base. */ @@ -997,7 +1017,7 @@ static void eepro100_cu_command(EEPRO100State * s, uint8_t val) /* Dump and reset statistical counters. */ TRACE(OTHER, logout("val=0x%02x (dump stats and reset)\n", val)); dump_statistics(s); - stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007); + stl_le_pci_dma(&s->dev, s->statsaddr + s->stats_size, 0xa007, attrs); memset(&s->statistics, 0, sizeof(s->statistics)); break; case CU_SRESUME: @@ -1612,6 +1632,7 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) * - Magic packets should set bit 30 in power management driver register. * - Interesting packets should set bit 29 in power management driver register. */ + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; EEPRO100State *s = qemu_get_nic_opaque(nc); uint16_t rfd_status = 0xa000; #if defined(CONFIG_PAD_RECEIVED_FRAMES) @@ -1726,9 +1747,9 @@ static ssize_t nic_receive(NetClientState *nc, const uint8_t * buf, size_t size) TRACE(OTHER, logout("command 0x%04x, link 0x%08x, addr 0x%08x, size %u\n", rfd_command, rx.link, rx.rx_buf_addr, rfd_size)); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + - offsetof(eepro100_rx_t, status), rfd_status); + offsetof(eepro100_rx_t, status), rfd_status, attrs); stw_le_pci_dma(&s->dev, s->ru_base + s->ru_offset + - offsetof(eepro100_rx_t, count), size); + offsetof(eepro100_rx_t, count), size, attrs); /* Early receive interrupt not supported. */ #if 0 eepro100_er_interrupt(s); @@ -1865,7 +1886,9 @@ static void e100_nic_realize(PCIDevice *pci_dev, Error **errp) nic_reset(s); s->nic = qemu_new_nic(&net_eepro100_info, &s->conf, - object_get_typename(OBJECT(pci_dev)), pci_dev->qdev.id, s); + object_get_typename(OBJECT(pci_dev)), + pci_dev->qdev.id, + &pci_dev->qdev.mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); TRACE(OTHER, logout("%s\n", qemu_get_queue(s->nic)->info_str)); diff --git a/hw/net/etraxfs_eth.c b/hw/net/etraxfs_eth.c index 1b82aec7943d26b7850acf39f50eec3104bb78e1..ba57a978d15767a7c39bf2eea475df3642ee9a34 100644 --- a/hw/net/etraxfs_eth.c +++ b/hw/net/etraxfs_eth.c @@ -618,7 +618,8 @@ static void etraxfs_eth_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_etraxfs_info, &s->conf, - object_get_typename(OBJECT(s)), dev->id, s); + object_get_typename(OBJECT(s)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->phy.read = tdk_read; diff --git a/hw/net/fsl_etsec/etsec.c b/hw/net/fsl_etsec/etsec.c index bd9d62b5593d253b4db32b530cac9c94c5f1de78..f790613b5256722f05f6d718b5da9514d2273c16 100644 --- a/hw/net/fsl_etsec/etsec.c +++ b/hw/net/fsl_etsec/etsec.c @@ -391,7 +391,8 @@ static void etsec_realize(DeviceState *dev, Error **errp) eTSEC *etsec = ETSEC_COMMON(dev); etsec->nic = qemu_new_nic(&net_etsec_info, &etsec->conf, - object_get_typename(OBJECT(dev)), dev->id, etsec); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, etsec); qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a); etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT); diff --git a/hw/net/ftgmac100.c b/hw/net/ftgmac100.c index 25685ba3a954737c4aa0d493bdf36176ae249f54..be2cf63c085a8ad236d0690dfa6616ae8cbc3a11 100644 --- a/hw/net/ftgmac100.c +++ b/hw/net/ftgmac100.c @@ -453,7 +453,8 @@ static void do_phy_ctl(FTGMAC100State *s) static int ftgmac100_read_bd(FTGMAC100Desc *bd, dma_addr_t addr) { - if (dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd))) { + if (dma_memory_read(&address_space_memory, addr, + bd, sizeof(*bd), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -473,7 +474,8 @@ static int ftgmac100_write_bd(FTGMAC100Desc *bd, dma_addr_t addr) lebd.des1 = cpu_to_le32(bd->des1); lebd.des2 = cpu_to_le32(bd->des2); lebd.des3 = cpu_to_le32(bd->des3); - if (dma_memory_write(&address_space_memory, addr, &lebd, sizeof(lebd))) { + if (dma_memory_write(&address_space_memory, addr, + &lebd, sizeof(lebd), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -554,7 +556,8 @@ static void ftgmac100_do_tx(FTGMAC100State *s, uint32_t tx_ring, len = sizeof(s->frame) - frame_size; } - if (dma_memory_read(&address_space_memory, bd.des3, ptr, len)) { + if (dma_memory_read(&address_space_memory, bd.des3, + ptr, len, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: failed to read packet @ 0x%x\n", __func__, bd.des3); s->isr |= FTGMAC100_INT_AHB_ERR; @@ -977,9 +980,9 @@ static ssize_t ftgmac100_receive(NetClientState *nc, const uint8_t *buf, return size; } - /* 4 bytes for the CRC. */ - size += 4; crc = cpu_to_be32(crc32(~0, buf, size)); + /* Increase size by 4, loop below reads the last 4 bytes from crc_ptr. */ + size += 4; crc_ptr = (uint8_t *) &crc; /* Huge frames are truncated. */ @@ -1030,20 +1033,24 @@ static ssize_t ftgmac100_receive(NetClientState *nc, const uint8_t *buf, bd.des1 = lduw_be_p(buf + 14) | FTGMAC100_RXDES1_VLANTAG_AVAIL; if (s->maccr & FTGMAC100_MACCR_RM_VLAN) { - dma_memory_write(&address_space_memory, buf_addr, buf, 12); - dma_memory_write(&address_space_memory, buf_addr + 12, buf + 16, - buf_len - 16); + dma_memory_write(&address_space_memory, buf_addr, buf, 12, + MEMTXATTRS_UNSPECIFIED); + dma_memory_write(&address_space_memory, buf_addr + 12, + buf + 16, buf_len - 16, + MEMTXATTRS_UNSPECIFIED); } else { - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, + buf_len, MEMTXATTRS_UNSPECIFIED); } } else { bd.des1 = 0; - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); } buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } @@ -1111,7 +1118,8 @@ static void ftgmac100_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_ftgmac100_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/i82596.c b/hw/net/i82596.c index ec21e2699a189e0372277b37109c60159edf2497..dc64246f7549a7f9e47da6a42ba6682d3cccf881 100644 --- a/hw/net/i82596.c +++ b/hw/net/i82596.c @@ -743,7 +743,7 @@ void i82596_common_init(DeviceState *dev, I82596State *s, NetClientInfo *info) qemu_macaddr_default_if_unset(&s->conf.macaddr); } s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), - dev->id, s); + dev->id, &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); if (USE_TIMER) { diff --git a/hw/net/imx_fec.c b/hw/net/imx_fec.c index 9c7035bc948e4d4d6ea4d8b081e69e44fb409aa1..74e7e0d12220b0da5991d12b3be96f3a96327f14 100644 --- a/hw/net/imx_fec.c +++ b/hw/net/imx_fec.c @@ -387,19 +387,22 @@ static void imx_phy_write(IMXFECState *s, int reg, uint32_t val) static void imx_fec_read_bd(IMXFECBufDesc *bd, dma_addr_t addr) { - dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); trace_imx_fec_read_bd(addr, bd->flags, bd->length, bd->data); } static void imx_fec_write_bd(IMXFECBufDesc *bd, dma_addr_t addr) { - dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); } static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) { - dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_read(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); trace_imx_enet_read_bd(addr, bd->flags, bd->length, bd->data, bd->option, bd->status); @@ -407,7 +410,8 @@ static void imx_enet_read_bd(IMXENETBufDesc *bd, dma_addr_t addr) static void imx_enet_write_bd(IMXENETBufDesc *bd, dma_addr_t addr) { - dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd)); + dma_memory_write(&address_space_memory, addr, bd, sizeof(*bd), + MEMTXATTRS_UNSPECIFIED); } static void imx_eth_update(IMXFECState *s) @@ -474,7 +478,8 @@ static void imx_fec_do_tx(IMXFECState *s) len = ENET_MAX_FRAME_SIZE - frame_size; s->regs[ENET_EIR] |= ENET_INT_BABT; } - dma_memory_read(&address_space_memory, bd.data, ptr, len); + dma_memory_read(&address_space_memory, bd.data, ptr, len, + MEMTXATTRS_UNSPECIFIED); ptr += len; frame_size += len; if (bd.flags & ENET_BD_L) { @@ -555,7 +560,8 @@ static void imx_enet_do_tx(IMXFECState *s, uint32_t index) len = ENET_MAX_FRAME_SIZE - frame_size; s->regs[ENET_EIR] |= ENET_INT_BABT; } - dma_memory_read(&address_space_memory, bd.data, ptr, len); + dma_memory_read(&address_space_memory, bd.data, ptr, len, + MEMTXATTRS_UNSPECIFIED); ptr += len; frame_size += len; if (bd.flags & ENET_BD_L) { @@ -1103,11 +1109,12 @@ static ssize_t imx_fec_receive(NetClientState *nc, const uint8_t *buf, buf_len += size - 4; } buf_addr = bd.data; - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; @@ -1210,8 +1217,8 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, */ const uint8_t zeros[2] = { 0 }; - dma_memory_write(&address_space_memory, buf_addr, - zeros, sizeof(zeros)); + dma_memory_write(&address_space_memory, buf_addr, zeros, + sizeof(zeros), MEMTXATTRS_UNSPECIFIED); buf_addr += sizeof(zeros); buf_len -= sizeof(zeros); @@ -1220,11 +1227,12 @@ static ssize_t imx_enet_receive(NetClientState *nc, const uint8_t *buf, shift16 = false; } - dma_memory_write(&address_space_memory, buf_addr, buf, buf_len); + dma_memory_write(&address_space_memory, buf_addr, buf, buf_len, + MEMTXATTRS_UNSPECIFIED); buf += buf_len; if (size < 4) { dma_memory_write(&address_space_memory, buf_addr + buf_len, - crc_ptr, 4 - size); + crc_ptr, 4 - size, MEMTXATTRS_UNSPECIFIED); crc_ptr += 4 - size; } bd.flags &= ~ENET_BD_E; @@ -1310,7 +1318,7 @@ static void imx_eth_realize(DeviceState *dev, Error **errp) s->nic = qemu_new_nic(&imx_eth_net_info, &s->conf, object_get_typename(OBJECT(dev)), - dev->id, s); + dev->id, &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/lan9118.c b/hw/net/lan9118.c index 6aff424cbe54d33c4a0ee35a95c4ca005454d360..9897296c21d8c99882f3b9260d64b9abfffbf7a2 100644 --- a/hw/net/lan9118.c +++ b/hw/net/lan9118.c @@ -155,6 +155,12 @@ do { fprintf(stderr, "lan9118: error: " fmt , ## __VA_ARGS__);} while (0) #define GPT_TIMER_EN 0x20000000 +/* + * The MAC Interface Layer (MIL), within the MAC, contains a 2K Byte transmit + * and a 128 Byte receive FIFO which is separate from the TX and RX FIFOs. + */ +#define MIL_TXFIFO_SIZE 2048 + enum tx_state { TX_IDLE, TX_B, @@ -171,7 +177,7 @@ typedef struct { int32_t pad; int32_t fifo_used; int32_t len; - uint8_t data[2048]; + uint8_t data[MIL_TXFIFO_SIZE]; } LAN9118Packet; static const VMStateDescription vmstate_lan9118_packet = { @@ -187,7 +193,7 @@ static const VMStateDescription vmstate_lan9118_packet = { VMSTATE_INT32(pad, LAN9118Packet), VMSTATE_INT32(fifo_used, LAN9118Packet), VMSTATE_INT32(len, LAN9118Packet), - VMSTATE_UINT8_ARRAY(data, LAN9118Packet, 2048), + VMSTATE_UINT8_ARRAY(data, LAN9118Packet, MIL_TXFIFO_SIZE), VMSTATE_END_OF_LIST() } }; @@ -549,7 +555,7 @@ static ssize_t lan9118_receive(NetClientState *nc, const uint8_t *buf, return -1; } - if (size >= 2048 || size < 14) { + if (size >= MIL_TXFIFO_SIZE || size < 14) { return -1; } @@ -696,6 +702,14 @@ static void do_tx_packet(lan9118_state *s) n = (s->tx_status_fifo_head + s->tx_status_fifo_used) & 511; s->tx_status_fifo[n] = status; s->tx_status_fifo_used++; + + /* + * Generate TSFL interrupt if TX FIFO level exceeds the level + * specified in the FIFO_INT TX Status Level field. + */ + if (s->tx_status_fifo_used > ((s->fifo_int >> 16) & 0xff)) { + s->int_sts |= TSFL_INT; + } if (s->tx_status_fifo_used == 512) { s->int_sts |= TSFF_INT; /* TODO: Stop transmission. */ @@ -1354,7 +1368,8 @@ static void lan9118_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_lan9118_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->eeprom[0] = 0xa5; for (i = 0; i < 6; i++) { diff --git a/hw/net/mcf_fec.c b/hw/net/mcf_fec.c index 25e3e453ab12dff8912ca57a7865cbd37fa94b5e..a6be7bf4130dc16c19b735555b94e05f2d4e9f91 100644 --- a/hw/net/mcf_fec.c +++ b/hw/net/mcf_fec.c @@ -643,7 +643,8 @@ static void mcf_fec_realize(DeviceState *dev, Error **errp) mcf_fec_state *s = MCF_FEC_NET(dev); s->nic = qemu_new_nic(&net_mcf_fec_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/mipsnet.c b/hw/net/mipsnet.c index 2ade72dea081f5895056591587bd0e9e3ed06729..8e925de867c2aba1fba9bc6b23cca9247ea55a6b 100644 --- a/hw/net/mipsnet.c +++ b/hw/net/mipsnet.c @@ -255,7 +255,8 @@ static void mipsnet_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->irq); s->nic = qemu_new_nic(&net_mipsnet_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/msf2-emac.c b/hw/net/msf2-emac.c index 9278fdce0b3d06a524c0562d22310cd6a619d8de..1efa3dbf0128de621620412c77ce5a62b1db7f5a 100644 --- a/hw/net/msf2-emac.c +++ b/hw/net/msf2-emac.c @@ -527,7 +527,8 @@ static void msf2_emac_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_msf2_emac_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/ne2000-isa.c b/hw/net/ne2000-isa.c index dd6f6e34d3cfb262b7dfd8e28056000c7077927e..30bd20c293909bd443ca22281a8cb774214c9cea 100644 --- a/hw/net/ne2000-isa.c +++ b/hw/net/ne2000-isa.c @@ -74,7 +74,8 @@ static void isa_ne2000_realizefn(DeviceState *dev, Error **errp) ne2000_reset(s); s->nic = qemu_new_nic(&net_ne2000_isa_info, &s->c, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); } diff --git a/hw/net/ne2000-pci.c b/hw/net/ne2000-pci.c index 9e5d10859aca49959697a6e4234b3e90c44a0122..4f8a69908137a9e20537991336804e45220ffce6 100644 --- a/hw/net/ne2000-pci.c +++ b/hw/net/ne2000-pci.c @@ -71,7 +71,8 @@ static void pci_ne2000_realize(PCIDevice *pci_dev, Error **errp) s->nic = qemu_new_nic(&net_ne2000_info, &s->c, object_get_typename(OBJECT(pci_dev)), - pci_dev->qdev.id, s); + pci_dev->qdev.id, + &pci_dev->qdev.mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); } diff --git a/hw/net/npcm7xx_emc.c b/hw/net/npcm7xx_emc.c index 7c892f820fb1a5814213dfca5589d3584020eb78..cafda7877290154b5f87b1d520e272efc4e8203c 100644 --- a/hw/net/npcm7xx_emc.c +++ b/hw/net/npcm7xx_emc.c @@ -200,7 +200,8 @@ static void emc_update_irq_from_reg_change(NPCM7xxEMCState *emc) static int emc_read_tx_desc(dma_addr_t addr, NPCM7xxEMCTxDesc *desc) { - if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -221,7 +222,7 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) le_desc.status_and_length = cpu_to_le32(desc->status_and_length); le_desc.ntxdsa = cpu_to_le32(desc->ntxdsa); if (dma_memory_write(&address_space_memory, addr, &le_desc, - sizeof(le_desc))) { + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -231,7 +232,8 @@ static int emc_write_tx_desc(const NPCM7xxEMCTxDesc *desc, dma_addr_t addr) static int emc_read_rx_desc(dma_addr_t addr, NPCM7xxEMCRxDesc *desc) { - if (dma_memory_read(&address_space_memory, addr, desc, sizeof(*desc))) { + if (dma_memory_read(&address_space_memory, addr, desc, + sizeof(*desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -252,7 +254,7 @@ static int emc_write_rx_desc(const NPCM7xxEMCRxDesc *desc, dma_addr_t addr) le_desc.reserved = cpu_to_le32(desc->reserved); le_desc.nrxdsa = cpu_to_le32(desc->nrxdsa); if (dma_memory_write(&address_space_memory, addr, &le_desc, - sizeof(le_desc))) { + sizeof(le_desc), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to write descriptor @ 0x%" HWADDR_PRIx "\n", __func__, addr); return -1; @@ -284,6 +286,12 @@ static void emc_halt_rx(NPCM7xxEMCState *emc, uint32_t mista_flag) emc_set_mista(emc, mista_flag); } +static void emc_enable_rx_and_flush(NPCM7xxEMCState *emc) +{ + emc->rx_active = true; + qemu_flush_queued_packets(qemu_get_queue(emc->nic)); +} + static void emc_set_next_tx_descriptor(NPCM7xxEMCState *emc, const NPCM7xxEMCTxDesc *tx_desc, uint32_t desc_addr) @@ -360,7 +368,8 @@ static void emc_try_send_next_packet(NPCM7xxEMCState *emc) buf = malloced_buf; } - if (dma_memory_read(&address_space_memory, next_buf_addr, buf, length)) { + if (dma_memory_read(&address_space_memory, next_buf_addr, buf, + length, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Failed to read packet @ 0x%x\n", __func__, next_buf_addr); emc_set_mista(emc, REG_MISTA_TXBERR); @@ -545,10 +554,11 @@ static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) buf_addr = rx_desc.rxbsa; emc->regs[REG_CRXBSA] = buf_addr; - if (dma_memory_write(&address_space_memory, buf_addr, buf, len) || + if (dma_memory_write(&address_space_memory, buf_addr, buf, + len, MEMTXATTRS_UNSPECIFIED) || (!(emc->regs[REG_MCMDR] & REG_MCMDR_SPCRC) && - dma_memory_write(&address_space_memory, buf_addr + len, crc_ptr, - 4))) { + dma_memory_write(&address_space_memory, buf_addr + len, + crc_ptr, 4, MEMTXATTRS_UNSPECIFIED))) { qemu_log_mask(LOG_GUEST_ERROR, "%s: Bus error writing packet\n", __func__); emc_set_mista(emc, REG_MISTA_RXBERR); @@ -581,13 +591,6 @@ static ssize_t emc_receive(NetClientState *nc, const uint8_t *buf, size_t len1) return len; } -static void emc_try_receive_next_packet(NPCM7xxEMCState *emc) -{ - if (emc_can_receive(qemu_get_queue(emc->nic))) { - qemu_flush_queued_packets(qemu_get_queue(emc->nic)); - } -} - static uint64_t npcm7xx_emc_read(void *opaque, hwaddr offset, unsigned size) { NPCM7xxEMCState *emc = opaque; @@ -703,7 +706,7 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, emc->regs[REG_MGSTA] |= REG_MGSTA_RXHA; } if (value & REG_MCMDR_RXON) { - emc->rx_active = true; + emc_enable_rx_and_flush(emc); } else { emc_halt_rx(emc, 0); } @@ -739,8 +742,7 @@ static void npcm7xx_emc_write(void *opaque, hwaddr offset, break; case REG_RSDR: if (emc->regs[REG_MCMDR] & REG_MCMDR_RXON) { - emc->rx_active = true; - emc_try_receive_next_packet(emc); + emc_enable_rx_and_flush(emc); } break; case REG_MIIDA: @@ -802,7 +804,8 @@ static void npcm7xx_emc_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&emc->conf.macaddr); emc->nic = qemu_new_nic(&net_npcm7xx_emc_info, &emc->conf, - object_get_typename(OBJECT(dev)), dev->id, emc); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, emc); qemu_format_nic_info_str(qemu_get_queue(emc->nic), emc->conf.macaddr.a); } diff --git a/hw/net/opencores_eth.c b/hw/net/opencores_eth.c index 0b3dc3146e61f73056ff29ce8964f77cba0950a1..f96d6ea2ccfe6b0f8d200ca9f3fe7c90577081b9 100644 --- a/hw/net/opencores_eth.c +++ b/hw/net/opencores_eth.c @@ -732,7 +732,8 @@ static void sysbus_open_eth_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->irq); s->nic = qemu_new_nic(&net_open_eth_info, &s->conf, - object_get_typename(OBJECT(s)), dev->id, s); + object_get_typename(OBJECT(s)), dev->id, + &dev->mem_reentrancy_guard, s); } static void qdev_open_eth_reset(DeviceState *dev) diff --git a/hw/net/pcnet.c b/hw/net/pcnet.c index dcd3fc49481b46a6d4bb7c726572975d65266e02..9c475d5721bf45816769643743abdd411a77afb5 100644 --- a/hw/net/pcnet.c +++ b/hw/net/pcnet.c @@ -632,7 +632,7 @@ static inline int ladr_match(PCNetState *s, const uint8_t *buf, int size) { struct qemu_ether_header *hdr = (void *)buf; if ((*(hdr->ether_dhost)&0x01) && - ((uint64_t *)&s->csr[8])[0] != 0LL) { + (s->csr[8] | s->csr[9] | s->csr[10] | s->csr[11]) != 0) { uint8_t ladr[8] = { s->csr[8] & 0xff, s->csr[8] >> 8, s->csr[9] & 0xff, s->csr[9] >> 8, @@ -1718,7 +1718,8 @@ void pcnet_common_init(DeviceState *dev, PCNetState *s, NetClientInfo *info) s->poll_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, pcnet_poll_timer, s); qemu_macaddr_default_if_unset(&s->conf.macaddr); - s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), dev->id, s); + s->nic = qemu_new_nic(info, &s->conf, object_get_typename(OBJECT(dev)), + dev->id, &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); /* Initialize the PROM */ diff --git a/hw/net/rocker/rocker.c b/hw/net/rocker/rocker.c index 31f2340fb910963bbfb77cf7e41980b36a28b67c..d8f3f16fe87463e140ec78fd8e6892eb989a8cba 100644 --- a/hw/net/rocker/rocker.c +++ b/hw/net/rocker/rocker.c @@ -1010,7 +1010,7 @@ static uint64_t rocker_port_phys_link_status(Rocker *r) FpPort *port = r->fp_port[i]; if (fp_port_get_link_up(port)) { - status |= 1 << (i + 1); + status |= 1ULL << (i + 1); } } return status; @@ -1025,7 +1025,7 @@ static uint64_t rocker_port_phys_enable_read(Rocker *r) FpPort *port = r->fp_port[i]; if (fp_port_enabled(port)) { - ret |= 1 << (i + 1); + ret |= 1ULL << (i + 1); } } return ret; diff --git a/hw/net/rocker/rocker_fp.c b/hw/net/rocker/rocker_fp.c index cbeed65bd5ec4fa6e24cf0f4b171c08628018b1f..0d21948adab320c6c299684e551884c26bd09fc3 100644 --- a/hw/net/rocker/rocker_fp.c +++ b/hw/net/rocker/rocker_fp.c @@ -241,8 +241,8 @@ FpPort *fp_port_alloc(Rocker *r, char *sw_name, port->conf.bootindex = -1; port->conf.peers = *peers; - port->nic = qemu_new_nic(&fp_port_info, &port->conf, - sw_name, NULL, port); + port->nic = qemu_new_nic(&fp_port_info, &port->conf, sw_name, NULL, + &DEVICE(r)->mem_reentrancy_guard, port); qemu_format_nic_info_str(qemu_get_queue(port->nic), port->conf.macaddr.a); diff --git a/hw/net/rocker/rocker_of_dpa.c b/hw/net/rocker/rocker_of_dpa.c index b3b8c5bb6d4b5700e4d22b72955af759f60be184..8ac26e6bebda1ae85907a9d9902201780a925d4c 100644 --- a/hw/net/rocker/rocker_of_dpa.c +++ b/hw/net/rocker/rocker_of_dpa.c @@ -2070,6 +2070,7 @@ static int of_dpa_cmd_add_l2_flood(OfDpa *of_dpa, OfDpaGroup *group, err_out: group->l2_flood.group_count = 0; g_free(group->l2_flood.group_ids); + group->l2_flood.group_ids = NULL; g_free(tlvs); return err; diff --git a/hw/net/rtl8139.c b/hw/net/rtl8139.c index 90b4fc63ce64e07ab495bb477f5dc83371fc4ce3..7bcc05e2a5f0af4bff507a05652776cd856d976c 100644 --- a/hw/net/rtl8139.c +++ b/hw/net/rtl8139.c @@ -2156,7 +2156,6 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) ip_data_len, saved_size - ETH_HLEN, large_send_mss); int tcp_send_offset = 0; - int send_count = 0; /* maximum IP header length is 60 bytes */ uint8_t saved_ip_header[60]; @@ -2261,7 +2260,6 @@ static int rtl8139_cplus_transmit_one(RTL8139State *s) /* add transferred count to TCP sequence number */ stl_be_p(&p_tcp_hdr->th_seq, chunk_size + ldl_be_p(&p_tcp_hdr->th_seq)); - ++send_count; } /* Stop sending this frame */ @@ -2748,7 +2746,11 @@ static void rtl8139_io_writeb(void *opaque, uint8_t addr, uint32_t val) } break; - + case RxConfig: + DPRINTF("RxConfig write(b) val=0x%02x\n", val); + rtl8139_RxConfig_write(s, + (rtl8139_RxConfig_read(s) & 0xFFFFFF00) | val); + break; default: DPRINTF("not implemented write(b) addr=0x%x val=0x%02x\n", addr, val); @@ -3398,7 +3400,8 @@ static void pci_rtl8139_realize(PCIDevice *dev, Error **errp) s->eeprom.contents[9] = s->conf.macaddr.a[4] | s->conf.macaddr.a[5] << 8; s->nic = qemu_new_nic(&net_rtl8139_info, &s->conf, - object_get_typename(OBJECT(dev)), d->id, s); + object_get_typename(OBJECT(dev)), d->id, + &d->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->cplus_txbuffer = NULL; diff --git a/hw/net/smc91c111.c b/hw/net/smc91c111.c index ad778cd8fc791ff5990744c063ce895c2d758050..4eda971ef3ed209437a68492b3525b5ddfa1aeb6 100644 --- a/hw/net/smc91c111.c +++ b/hw/net/smc91c111.c @@ -783,7 +783,8 @@ static void smc91c111_realize(DeviceState *dev, Error **errp) sysbus_init_irq(sbd, &s->irq); qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_smc91c111_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); /* ??? Save/restore. */ } diff --git a/hw/net/spapr_llan.c b/hw/net/spapr_llan.c index a6876a936db5551b864364900c427be3d6144b33..475d5f3a348d0c6f35d6d36f12be7e3caa06f9fd 100644 --- a/hw/net/spapr_llan.c +++ b/hw/net/spapr_llan.c @@ -325,7 +325,8 @@ static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp) memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a)); dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf, - object_get_typename(OBJECT(sdev)), sdev->qdev.id, dev); + object_get_typename(OBJECT(sdev)), sdev->qdev.id, + &sdev->qdev.mem_reentrancy_guard, dev); qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a); dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue, diff --git a/hw/net/stellaris_enet.c b/hw/net/stellaris_enet.c index 8dd60783d81ddcada047227034a8ffc6972ae9cf..6768a6912f02836a97ce0a17134f73ddeb74947b 100644 --- a/hw/net/stellaris_enet.c +++ b/hw/net/stellaris_enet.c @@ -492,7 +492,8 @@ static void stellaris_enet_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_stellaris_enet_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/sungem.c b/hw/net/sungem.c index 3684a4d733b6ec8bea39b4487e189d20cccf9eb6..c12d44e9dc9ac4a5031fbbeb2d13efe0cda4a82a 100644 --- a/hw/net/sungem.c +++ b/hw/net/sungem.c @@ -1361,7 +1361,7 @@ static void sungem_realize(PCIDevice *pci_dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_sungem_info, &s->conf, object_get_typename(OBJECT(dev)), - dev->id, s); + dev->id, &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/sunhme.c b/hw/net/sunhme.c index fc34905f87590b4e5f516e8ba45c3275b127f588..fa98528d71328239e155c8e45f23bf960796b861 100644 --- a/hw/net/sunhme.c +++ b/hw/net/sunhme.c @@ -892,7 +892,8 @@ static void sunhme_realize(PCIDevice *pci_dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_sunhme_info, &s->conf, - object_get_typename(OBJECT(d)), d->id, s); + object_get_typename(OBJECT(d)), d->id, + &d->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/net/tulip.c b/hw/net/tulip.c index ca69f7ea5e17785afda221ddb6fe07242d16a149..956093abd764dcb1b95f146e0fb53cbcb5580448 100644 --- a/hw/net/tulip.c +++ b/hw/net/tulip.c @@ -70,32 +70,36 @@ static const VMStateDescription vmstate_pci_tulip = { static void tulip_desc_read(TULIPState *s, hwaddr p, struct tulip_descriptor *desc) { + const MemTxAttrs attrs = { .memory = true }; + if (s->csr[0] & CSR0_DBO) { - desc->status = ldl_be_pci_dma(&s->dev, p); - desc->control = ldl_be_pci_dma(&s->dev, p + 4); - desc->buf_addr1 = ldl_be_pci_dma(&s->dev, p + 8); - desc->buf_addr2 = ldl_be_pci_dma(&s->dev, p + 12); + ldl_be_pci_dma(&s->dev, p, &desc->status, attrs); + ldl_be_pci_dma(&s->dev, p + 4, &desc->control, attrs); + ldl_be_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs); + ldl_be_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs); } else { - desc->status = ldl_le_pci_dma(&s->dev, p); - desc->control = ldl_le_pci_dma(&s->dev, p + 4); - desc->buf_addr1 = ldl_le_pci_dma(&s->dev, p + 8); - desc->buf_addr2 = ldl_le_pci_dma(&s->dev, p + 12); + ldl_le_pci_dma(&s->dev, p, &desc->status, attrs); + ldl_le_pci_dma(&s->dev, p + 4, &desc->control, attrs); + ldl_le_pci_dma(&s->dev, p + 8, &desc->buf_addr1, attrs); + ldl_le_pci_dma(&s->dev, p + 12, &desc->buf_addr2, attrs); } } static void tulip_desc_write(TULIPState *s, hwaddr p, struct tulip_descriptor *desc) { + const MemTxAttrs attrs = { .memory = true }; + if (s->csr[0] & CSR0_DBO) { - stl_be_pci_dma(&s->dev, p, desc->status); - stl_be_pci_dma(&s->dev, p + 4, desc->control); - stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1); - stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2); + stl_be_pci_dma(&s->dev, p, desc->status, attrs); + stl_be_pci_dma(&s->dev, p + 4, desc->control, attrs); + stl_be_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs); + stl_be_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs); } else { - stl_le_pci_dma(&s->dev, p, desc->status); - stl_le_pci_dma(&s->dev, p + 4, desc->control); - stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1); - stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2); + stl_le_pci_dma(&s->dev, p, desc->status, attrs); + stl_le_pci_dma(&s->dev, p + 4, desc->control, attrs); + stl_le_pci_dma(&s->dev, p + 8, desc->buf_addr1, attrs); + stl_le_pci_dma(&s->dev, p + 12, desc->buf_addr2, attrs); } } @@ -866,11 +870,10 @@ static const MemoryRegionOps tulip_ops = { static void tulip_idblock_crc(TULIPState *s, uint16_t *srom) { - int word, n; + int word; int bit; unsigned char bitval, crc; const int len = 9; - n = 0; crc = -1; for (word = 0; word < len; word++) { @@ -883,7 +886,6 @@ static void tulip_idblock_crc(TULIPState *s, uint16_t *srom) srom[len - 1] = (srom[len - 1] & 0xff00) | (unsigned short)crc; break; } - n++; bitval = ((srom[word] >> bit) & 1) ^ ((crc >> 7) & 1); crc = crc << 1; if (bitval == 1) { @@ -963,6 +965,8 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp) pci_conf = s->dev.config; pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */ + qemu_macaddr_default_if_unset(&s->c.macaddr); + s->eeprom = eeprom93xx_new(&pci_dev->qdev, 64); tulip_fill_eeprom(s); @@ -977,11 +981,10 @@ static void pci_tulip_realize(PCIDevice *pci_dev, Error **errp) s->irq = pci_allocate_irq(&s->dev); - qemu_macaddr_default_if_unset(&s->c.macaddr); - s->nic = qemu_new_nic(&net_tulip_info, &s->c, object_get_typename(OBJECT(pci_dev)), - pci_dev->qdev.id, s); + pci_dev->qdev.id, + &pci_dev->qdev.mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->c.macaddr.a); } diff --git a/hw/net/vhost_net-stub.c b/hw/net/vhost_net-stub.c index 89d71cfb8e130ec4618a673cd88f55aa04a37930..db171829b475264e90290f68b63f9db7973e84b5 100644 --- a/hw/net/vhost_net-stub.c +++ b/hw/net/vhost_net-stub.c @@ -82,6 +82,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, { } +bool vhost_net_config_pending(VHostNetState *net) +{ + return false; +} + +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask) +{ +} + int vhost_net_notify_migration_done(struct vhost_net *net, char* mac_addr) { return -1; @@ -101,3 +110,8 @@ int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu) { return 0; } + +void vhost_net_save_acked_features(NetClientState *nc) +{ + +} diff --git a/hw/net/vhost_net.c b/hw/net/vhost_net.c index 30379d2ca4105fcf30803dcf242a823604c9b354..d226dba83c67930f76aa1e2ef2ed34d3525219ad 100644 --- a/hw/net/vhost_net.c +++ b/hw/net/vhost_net.c @@ -141,6 +141,13 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net) return net->dev.acked_features; } +void vhost_net_save_acked_features(NetClientState *nc) +{ + if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + vhost_user_save_acked_features(nc); + } +} + static int vhost_net_get_fd(NetClientState *backend) { switch (backend->info->type) { @@ -152,9 +159,26 @@ static int vhost_net_get_fd(NetClientState *backend) } } +static uint64_t vhost_get_mask_features(const int *feature_bits, uint64_t features) +{ + const int *bit = feature_bits; + uint64_t out_features = 0; + + while (*bit != VHOST_INVALID_FEATURE_BIT) { + uint64_t bit_mask = (1ULL << *bit); + if (features & bit_mask) { + out_features |= bit_mask; + } + bit++; + } + return out_features; +} + struct vhost_net *vhost_net_init(VhostNetOptions *options) { int r; + VirtIONet *n; + VirtIODevice *vdev; bool backend_kernel = options->backend_type == VHOST_BACKEND_TYPE_KERNEL; struct vhost_net *net = g_new0(struct vhost_net, 1); uint64_t features = 0; @@ -180,7 +204,46 @@ struct vhost_net *vhost_net_init(VhostNetOptions *options) net->backend = r; net->dev.protocol_features = 0; } else { - net->dev.backend_features = 0; + /* for ovs restart when vm start. + * Normal situation: + * 1.vm start. + * 2.vhost_net_init init ok, then dev.acked_features is 0x40000000. + * 3.guest virtio-net mod load. qemu will call virtio_net_set_features set + * dev.acked_features to 0x40408000. + * 4.feature set to ovs's vhostuser(0x40408000). + * 5.ovs restart. + * 6.vhost_user_stop will save net->dev.acked_features(0x40408000) to + * VhostUserState's acked_features(0x40408000). + * 7.restart ok. + * 8.vhost_net_init fun call vhost_user_get_acked_features get the save + * features, and set to net->dev.acked_features. + * Abnormal situation: + * 1.vm start. + * 2.vhost_net_init init ok, then dev.acked_features is 0x40000000. + * 3.ovs restart. + * 4.vhost_user_stop will save net->dev.acked_features(0x40000000) to + * VhostUserState's acked_features(0x40000000). + * 5.guest virtio-net mod load. qemu will call virtio_net_set_features set + * dev.acked_features to 0x40408000. + * 6.restart ok. + * 7.vhost_net_init fun call vhost_user_get_acked_features get the save + * features(0x40000000), and set to net->dev.acked_features(0x40000000). + * 8.feature set to ovs's vhostuser(0x40000000). + * + * in abnormal situation, qemu set the wrong features to ovs's vhostuser, + * then the vm's network will be down. + * in abnormal situation, we found it just lost the guest feartures in + * acked_features, so hear we set the acked_features to vm's featrue + * just the same as guest virtio-net mod load. + */ + if (options->net_backend->peer) { + n = qemu_get_nic_opaque(options->net_backend->peer); + vdev = VIRTIO_DEVICE(n); + net->dev.backend_features = vhost_get_mask_features(vhost_net_get_feature_bits(net), + vdev->guest_features); + } else { + net->dev.backend_features = 0; + } net->dev.protocol_features = 0; net->backend = -1; @@ -244,12 +307,19 @@ static int vhost_net_start_one(struct vhost_net *net, struct vhost_vring_file file = { }; int r; + if (net->nc->info->start) { + r = net->nc->info->start(net->nc); + if (r < 0) { + return r; + } + } + r = vhost_dev_enable_notifiers(&net->dev, dev); if (r < 0) { goto fail_notifiers; } - r = vhost_dev_start(&net->dev, dev); + r = vhost_dev_start(&net->dev, dev, false); if (r < 0) { goto fail_start; } @@ -274,6 +344,13 @@ static int vhost_net_start_one(struct vhost_net *net, } } } + + if (net->nc->info->load) { + r = net->nc->info->load(net->nc); + if (r < 0) { + goto fail; + } + } return 0; fail: file.fd = -1; @@ -291,7 +368,7 @@ fail: if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); } - vhost_dev_stop(&net->dev, dev); + vhost_dev_stop(&net->dev, dev, false); fail_start: vhost_dev_disable_notifiers(&net->dev, dev); fail_notifiers: @@ -312,7 +389,10 @@ static void vhost_net_stop_one(struct vhost_net *net, if (net->nc->info->poll) { net->nc->info->poll(net->nc, true); } - vhost_dev_stop(&net->dev, dev); + vhost_dev_stop(&net->dev, dev, false); + if (net->nc->info->stop) { + net->nc->info->stop(net->nc); + } vhost_dev_disable_notifiers(&net->dev, dev); } @@ -376,11 +456,14 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, goto err_start; } - if (peer->vring_enable) { + /* ovs needs to restore all states of vring */ + if (peer->vring_enable || + ncs[i].peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { /* restore vring enable state */ r = vhost_set_vring_enable(peer, peer->vring_enable); if (r < 0) { + vhost_net_stop_one(get_vhost_net(peer), dev); goto err_start; } } @@ -390,7 +473,8 @@ int vhost_net_start(VirtIODevice *dev, NetClientState *ncs, err_start: while (--i >= 0) { - peer = qemu_get_peer(ncs , i); + peer = qemu_get_peer(ncs, i < data_queue_pairs ? + i : n->max_queue_pairs); vhost_net_stop_one(get_vhost_net(peer), dev); } e = k->set_guest_notifiers(qbus->parent, total_notifiers, false); @@ -457,6 +541,15 @@ void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, vhost_virtqueue_mask(&net->dev, dev, idx, mask); } +bool vhost_net_config_pending(VHostNetState *net) +{ + return vhost_config_pending(&net->dev); +} + +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask) +{ + vhost_config_mask(&net->dev, dev, mask); +} VHostNetState *get_vhost_net(NetClientState *nc) { VHostNetState *vhost_net = 0; diff --git a/hw/net/virtio-net.c b/hw/net/virtio-net.c index f2014d5ea0b30ceed3b422aeecca3ed8b043fd46..7c87aa28ef19793082994e87ffb6aea7d0a4eb15 100644 --- a/hw/net/virtio-net.c +++ b/hw/net/virtio-net.c @@ -14,6 +14,7 @@ #include "qemu/osdep.h" #include "qemu/atomic.h" #include "qemu/iov.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/module.h" #include "hw/virtio/virtio.h" @@ -47,16 +48,14 @@ #define VIRTIO_NET_VM_VERSION 11 -#define MAC_TABLE_ENTRIES 64 #define MAX_VLAN (1 << 12) /* Per 802.1Q definition */ /* previously fixed value */ -#define VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE 256 -#define VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE 256 +#define VIRTIO_NET_VHOST_USER_DEFAULT_SIZE 2048 /* for now, only allow larger queue_pairs; with virtio-1, guest can downsize */ -#define VIRTIO_NET_RX_QUEUE_MIN_SIZE VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE -#define VIRTIO_NET_TX_QUEUE_MIN_SIZE VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE +#define VIRTIO_NET_RX_QUEUE_MIN_SIZE 256 +#define VIRTIO_NET_TX_QUEUE_MIN_SIZE 256 #define VIRTIO_NET_IP4_ADDR_SIZE 8 /* ipv4 saddr + daddr */ @@ -244,7 +243,8 @@ static void virtio_net_vhost_status(VirtIONet *n, uint8_t status) VirtIODevice *vdev = VIRTIO_DEVICE(n); NetClientState *nc = qemu_get_queue(n->nic); int queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; - int cvq = n->max_ncs - n->max_queue_pairs; + int cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ? + n->max_ncs - n->max_queue_pairs : 0; if (!get_vhost_net(nc->peer)) { return; @@ -600,6 +600,11 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, n->mergeable_rx_bufs = mergeable_rx_bufs; + /* + * Note: when extending the vnet header, please make sure to + * change the vnet header copying logic in virtio_net_flush_tx() + * as well. + */ if (version_1) { n->guest_hdr_len = hash_report ? sizeof(struct virtio_net_hdr_v1_hash) : @@ -622,6 +627,28 @@ static void virtio_net_set_mrg_rx_bufs(VirtIONet *n, int mergeable_rx_bufs, } } +static void virtio_net_set_default_queue_size(VirtIONet *n) +{ + NetClientState *peer = n->nic_conf.peers.ncs[0]; + + /* Default value is 0 if not set */ + if (n->net_conf.rx_queue_size == 0) { + if (peer && peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + n->net_conf.rx_queue_size = VIRTIO_NET_VHOST_USER_DEFAULT_SIZE; + } else { + n->net_conf.rx_queue_size = VIRTIO_NET_VQ_MAX_SIZE; + } + } + + if (n->net_conf.tx_queue_size == 0) { + if (peer && peer->info->type == NET_CLIENT_DRIVER_VHOST_USER) { + n->net_conf.tx_queue_size = VIRTIO_NET_VHOST_USER_DEFAULT_SIZE; + } else { + n->net_conf.tx_queue_size = VIRTIO_NET_VQ_MAX_SIZE; + } + } +} + static int virtio_net_max_tx_queue_size(VirtIONet *n) { NetClientState *peer = n->nic_conf.peers.ncs[0]; @@ -630,14 +657,14 @@ static int virtio_net_max_tx_queue_size(VirtIONet *n) * Backends other than vhost-user don't support max queue size. */ if (!peer) { - return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; + return VIRTIO_NET_VQ_MAX_SIZE; } if (peer->info->type != NET_CLIENT_DRIVER_VHOST_USER) { - return VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE; + return VIRTIO_NET_VQ_MAX_SIZE; } - return VIRTQUEUE_MAX_SIZE; + return VIRTIO_NET_VQ_MAX_SIZE; } static int peer_attach(VirtIONet *n, int index) @@ -749,6 +776,21 @@ static uint64_t virtio_net_get_features(VirtIODevice *vdev, uint64_t features, features |= (1ULL << VIRTIO_NET_F_MTU); } + /* + * Since GUEST_ANNOUNCE is emulated the feature bit could be set without + * enabled. This happens in the vDPA case. + * + * Make sure the feature set is not incoherent, as the driver could refuse + * to start. + * + * TODO: QEMU is able to emulate a CVQ just for guest_announce purposes, + * helping guest to notify the new location with vDPA devices that does not + * support it. + */ + if (!virtio_has_feature(vdev->backend_features, VIRTIO_NET_F_CTRL_VQ)) { + virtio_clear_feature(&features, VIRTIO_NET_F_GUEST_ANNOUNCE); + } + return features; } @@ -789,7 +831,7 @@ static uint64_t virtio_net_guest_offloads_by_features(uint32_t features) return guest_offloads_mask & features; } -static inline uint64_t virtio_net_supported_guest_offloads(VirtIONet *n) +static inline uint64_t virtio_net_supported_guest_offloads(const VirtIONet *n) { VirtIODevice *vdev = VIRTIO_DEVICE(n); return virtio_net_guest_offloads_by_features(vdev->guest_features); @@ -913,6 +955,12 @@ static void virtio_net_set_features(VirtIODevice *vdev, uint64_t features) continue; } vhost_net_ack_features(get_vhost_net(nc->peer), features); + + /* + * keep acked_features in NetVhostUserState up-to-date so it + * can't miss any features configured by guest virtio driver. + */ + vhost_net_save_acked_features(nc->peer); } if (virtio_has_feature(features, VIRTIO_NET_F_CTRL_VLAN)) { @@ -1371,6 +1419,7 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, { VirtIODevice *vdev = VIRTIO_DEVICE(n); uint16_t queue_pairs; + NetClientState *nc = qemu_get_queue(n->nic); virtio_net_disable_rss(n); if (cmd == VIRTIO_NET_CTRL_MQ_HASH_CONFIG) { @@ -1403,6 +1452,13 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, } n->curr_queue_pairs = queue_pairs; + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_VHOST_VDPA) { + /* + * Avoid updating the backend for a vdpa device: We're only interested + * in updating the device model queues. + */ + return VIRTIO_NET_OK; + } /* stop the backend before changing the number of queue_pairs to avoid handling a * disabled queue */ virtio_net_set_status(vdev, vdev->status); @@ -1411,56 +1467,71 @@ static int virtio_net_handle_mq(VirtIONet *n, uint8_t cmd, return VIRTIO_NET_OK; } -static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num) { VirtIONet *n = VIRTIO_NET(vdev); struct virtio_net_ctrl_hdr ctrl; virtio_net_ctrl_ack status = VIRTIO_NET_ERR; - VirtQueueElement *elem; size_t s; struct iovec *iov, *iov2; - unsigned int iov_cnt; + + if (iov_size(in_sg, in_num) < sizeof(status) || + iov_size(out_sg, out_num) < sizeof(ctrl)) { + virtio_error(vdev, "virtio-net ctrl missing headers"); + return 0; + } + + iov2 = iov = g_memdup(out_sg, sizeof(struct iovec) * out_num); + s = iov_to_buf(iov, out_num, 0, &ctrl, sizeof(ctrl)); + iov_discard_front(&iov, &out_num, sizeof(ctrl)); + if (s != sizeof(ctrl)) { + status = VIRTIO_NET_ERR; + } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { + status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { + status = virtio_net_handle_mac(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { + status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { + status = virtio_net_handle_announce(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { + status = virtio_net_handle_mq(n, ctrl.cmd, iov, out_num); + } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { + status = virtio_net_handle_offloads(n, ctrl.cmd, iov, out_num); + } + + s = iov_from_buf(in_sg, in_num, 0, &status, sizeof(status)); + assert(s == sizeof(status)); + + g_free(iov2); + return sizeof(status); +} + +static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq) +{ + VirtQueueElement *elem; for (;;) { + size_t written; elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); if (!elem) { break; } - if (iov_size(elem->in_sg, elem->in_num) < sizeof(status) || - iov_size(elem->out_sg, elem->out_num) < sizeof(ctrl)) { - virtio_error(vdev, "virtio-net ctrl missing headers"); + + written = virtio_net_handle_ctrl_iov(vdev, elem->in_sg, elem->in_num, + elem->out_sg, elem->out_num); + if (written > 0) { + virtqueue_push(vq, elem, written); + virtio_notify(vdev, vq); + g_free(elem); + } else { virtqueue_detach_element(vq, elem, 0); g_free(elem); break; } - - iov_cnt = elem->out_num; - iov2 = iov = g_memdup(elem->out_sg, sizeof(struct iovec) * elem->out_num); - s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl)); - iov_discard_front(&iov, &iov_cnt, sizeof(ctrl)); - if (s != sizeof(ctrl)) { - status = VIRTIO_NET_ERR; - } else if (ctrl.class == VIRTIO_NET_CTRL_RX) { - status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MAC) { - status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) { - status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) { - status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_MQ) { - status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt); - } else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) { - status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt); - } - - s = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, sizeof(status)); - assert(s == sizeof(status)); - - virtqueue_push(vq, elem, sizeof(status)); - virtio_notify(vdev, vq); - g_free(iov2); - g_free(elem); } } @@ -1747,7 +1818,8 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, if (!no_rss && n->rss_data.enabled && n->rss_data.enabled_software_rss) { int index = virtio_net_process_rss(nc, buf, size); if (index >= 0) { - NetClientState *nc2 = qemu_get_subqueue(n->nic, index); + NetClientState *nc2 = + qemu_get_subqueue(n->nic, index % n->curr_queue_pairs); return virtio_net_receive_rcu(nc2, buf, size, true); } } @@ -1862,6 +1934,7 @@ static ssize_t virtio_net_receive_rcu(NetClientState *nc, const uint8_t *buf, err: for (j = 0; j < i; j++) { + virtqueue_detach_element(q->rx_vq, elems[j], lens[j]); g_free(elems[j]); } @@ -2519,7 +2592,7 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) ssize_t ret; unsigned int out_num; struct iovec sg[VIRTQUEUE_MAX_SIZE], sg2[VIRTQUEUE_MAX_SIZE + 1], *out_sg; - struct virtio_net_hdr_mrg_rxbuf mhdr; + struct virtio_net_hdr_v1_hash vhdr; elem = virtqueue_pop(q->tx_vq, sizeof(VirtQueueElement)); if (!elem) { @@ -2530,22 +2603,18 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) out_sg = elem->out_sg; if (out_num < 1) { virtio_error(vdev, "virtio-net header not in first element"); - virtqueue_detach_element(q->tx_vq, elem, 0); - g_free(elem); - return -EINVAL; + goto detach; } if (n->has_vnet_hdr) { - if (iov_to_buf(out_sg, out_num, 0, &mhdr, n->guest_hdr_len) < + if (iov_to_buf(out_sg, out_num, 0, &vhdr, n->guest_hdr_len) < n->guest_hdr_len) { virtio_error(vdev, "virtio-net header incorrect"); - virtqueue_detach_element(q->tx_vq, elem, 0); - g_free(elem); - return -EINVAL; + goto detach; } if (n->needs_vnet_hdr_swap) { - virtio_net_hdr_swap(vdev, (void *) &mhdr); - sg2[0].iov_base = &mhdr; + virtio_net_hdr_swap(vdev, (void *) &vhdr); + sg2[0].iov_base = &vhdr; sg2[0].iov_len = n->guest_hdr_len; out_num = iov_copy(&sg2[1], ARRAY_SIZE(sg2) - 1, out_sg, out_num, @@ -2572,6 +2641,11 @@ static int32_t virtio_net_flush_tx(VirtIONetQueue *q) n->guest_hdr_len, -1); out_num = sg_num; out_sg = sg; + + if (out_num < 1) { + virtio_error(vdev, "virtio-net nothing to send"); + goto detach; + } } ret = qemu_sendv_packet_async(qemu_get_subqueue(n->nic, queue_index), @@ -2592,6 +2666,11 @@ drop: } } return num_packets; + +detach: + virtqueue_detach_element(q->tx_vq, elem, 0); + g_free(elem); + return -EINVAL; } static void virtio_net_handle_tx_timer(VirtIODevice *vdev, VirtQueue *vq) @@ -2630,6 +2709,10 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) VirtIONet *n = VIRTIO_NET(vdev); VirtIONetQueue *q = &n->vqs[vq2q(virtio_get_queue_index(vq))]; + if (unlikely(n->vhost_started)) { + return; + } + if (unlikely((n->status & VIRTIO_NET_S_LINK_UP) == 0)) { virtio_net_drop_tx_queue_data(vdev, vq); return; @@ -2644,7 +2727,10 @@ static void virtio_net_handle_tx_bh(VirtIODevice *vdev, VirtQueue *vq) return; } virtio_queue_set_notification(vq, 0); - qemu_bh_schedule(q->tx_bh); + + if (q->tx_bh) { + qemu_bh_schedule(q->tx_bh); + } } static void virtio_net_tx_timer(void *opaque) @@ -2737,7 +2823,8 @@ static void virtio_net_add_queue(VirtIONet *n, int index) n->vqs[index].tx_vq = virtio_add_queue(vdev, n->net_conf.tx_queue_size, virtio_net_handle_tx_bh); - n->vqs[index].tx_bh = qemu_bh_new(virtio_net_tx_bh, &n->vqs[index]); + n->vqs[index].tx_bh = qemu_bh_new_guarded(virtio_net_tx_bh, &n->vqs[index], + &DEVICE(vdev)->mem_reentrancy_guard); } n->vqs[index].tx_waiting = 0; @@ -3161,8 +3248,31 @@ static NetClientInfo net_virtio_info = { static bool virtio_net_guest_notifier_pending(VirtIODevice *vdev, int idx) { VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + NetClientState *nc; assert(n->vhost_started); + if (!n->multiqueue && idx == 2) { + /* Must guard against invalid features and bogus queue index + * from being set by malicious guest, or penetrated through + * buggy migration stream. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bogus vq index ignored\n", __func__); + return false; + } + nc = qemu_get_subqueue(n->nic, n->max_queue_pairs); + } else { + nc = qemu_get_subqueue(n->nic, vq2q(idx)); + } + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return false + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return vhost_net_config_pending(get_vhost_net(nc->peer)); + } return vhost_net_virtqueue_pending(get_vhost_net(nc->peer), idx); } @@ -3170,10 +3280,33 @@ static void virtio_net_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VirtIONet *n = VIRTIO_NET(vdev); - NetClientState *nc = qemu_get_subqueue(n->nic, vq2q(idx)); + NetClientState *nc; assert(n->vhost_started); - vhost_net_virtqueue_mask(get_vhost_net(nc->peer), - vdev, idx, mask); + if (!n->multiqueue && idx == 2) { + /* Must guard against invalid features and bogus queue index + * from being set by malicious guest, or penetrated through + * buggy migration stream. + */ + if (!virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: bogus vq index ignored\n", __func__); + return; + } + nc = qemu_get_subqueue(n->nic, n->max_queue_pairs); + } else { + nc = qemu_get_subqueue(n->nic, vq2q(idx)); + } + /* + *Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + vhost_net_config_mask(get_vhost_net(nc->peer), vdev, mask); + return; + } + vhost_net_virtqueue_mask(get_vhost_net(nc->peer), vdev, idx, mask); } static void virtio_net_set_config_size(VirtIONet *n, uint64_t host_features) @@ -3385,29 +3518,31 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) virtio_net_set_config_size(n, n->host_features); virtio_init(vdev, "virtio-net", VIRTIO_ID_NET, n->config_size); + virtio_net_set_default_queue_size(n); + /* * We set a lower limit on RX queue size to what it always was. * Guests that want a smaller ring can always resize it without * help from us (using virtio 1 and up). */ if (n->net_conf.rx_queue_size < VIRTIO_NET_RX_QUEUE_MIN_SIZE || - n->net_conf.rx_queue_size > VIRTQUEUE_MAX_SIZE || + n->net_conf.rx_queue_size > VIRTIO_NET_VQ_MAX_SIZE || !is_power_of_2(n->net_conf.rx_queue_size)) { error_setg(errp, "Invalid rx_queue_size (= %" PRIu16 "), " "must be a power of 2 between %d and %d.", n->net_conf.rx_queue_size, VIRTIO_NET_RX_QUEUE_MIN_SIZE, - VIRTQUEUE_MAX_SIZE); + VIRTIO_NET_VQ_MAX_SIZE); virtio_cleanup(vdev); return; } if (n->net_conf.tx_queue_size < VIRTIO_NET_TX_QUEUE_MIN_SIZE || - n->net_conf.tx_queue_size > VIRTQUEUE_MAX_SIZE || + n->net_conf.tx_queue_size > VIRTIO_NET_VQ_MAX_SIZE || !is_power_of_2(n->net_conf.tx_queue_size)) { error_setg(errp, "Invalid tx_queue_size (= %" PRIu16 "), " "must be a power of 2 between %d and %d", n->net_conf.tx_queue_size, VIRTIO_NET_TX_QUEUE_MIN_SIZE, - VIRTQUEUE_MAX_SIZE); + VIRTIO_NET_VQ_MAX_SIZE); virtio_cleanup(vdev); return; } @@ -3467,10 +3602,12 @@ static void virtio_net_device_realize(DeviceState *dev, Error **errp) * Happen when virtio_net_set_netclient_name has been called. */ n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, - n->netclient_type, n->netclient_name, n); + n->netclient_type, n->netclient_name, + &dev->mem_reentrancy_guard, n); } else { n->nic = qemu_new_nic(&net_virtio_info, &n->nic_conf, - object_get_typename(OBJECT(dev)), dev->id, n); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, n); } for (i = 0; i < n->max_queue_pairs; i++) { @@ -3610,6 +3747,14 @@ static bool dev_unplug_pending(void *opaque) return vdc->primary_unplug_pending(dev); } +static struct vhost_dev *virtio_net_get_vhost(VirtIODevice *vdev) +{ + VirtIONet *n = VIRTIO_NET(vdev); + NetClientState *nc = qemu_get_queue(n->nic); + struct vhost_net *net = get_vhost_net(nc->peer); + return &net->dev; +} + static const VMStateDescription vmstate_virtio_net = { .name = "virtio-net", .minimum_version_id = VIRTIO_NET_VM_VERSION, @@ -3676,10 +3821,8 @@ static Property virtio_net_properties[] = { TX_TIMER_INTERVAL), DEFINE_PROP_INT32("x-txburst", VirtIONet, net_conf.txburst, TX_BURST), DEFINE_PROP_STRING("tx", VirtIONet, net_conf.tx), - DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, - VIRTIO_NET_RX_QUEUE_DEFAULT_SIZE), - DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, - VIRTIO_NET_TX_QUEUE_DEFAULT_SIZE), + DEFINE_PROP_UINT16("rx_queue_size", VirtIONet, net_conf.rx_queue_size, 0), + DEFINE_PROP_UINT16("tx_queue_size", VirtIONet, net_conf.tx_queue_size, 0), DEFINE_PROP_UINT16("host_mtu", VirtIONet, net_conf.mtu, 0), DEFINE_PROP_BOOL("x-mtu-bypass-backend", VirtIONet, mtu_bypass_backend, true), @@ -3689,6 +3832,46 @@ static Property virtio_net_properties[] = { DEFINE_PROP_END_OF_LIST(), }; +static void virtio_net_print_features(uint64_t features) +{ + Property *props = virtio_net_properties; + int feature_cnt = 0; + + if (!features) { + return; + } + printf("virtio_net_feature: "); + + for (; features && props->name; props++) { + /* The bitnr of property may be default(0) besides 'csum' property. */ + if (props->bitnr == 0 && strcmp(props->name, "csum")) { + continue; + } + + /* Features only support 64bit. */ + if (props->bitnr > 63) { + continue; + } + + if (virtio_has_feature(features, props->bitnr)) { + virtio_clear_feature(&features, props->bitnr); + if (feature_cnt != 0) { + printf(", "); + } + printf("%s", props->name); + feature_cnt++; + } + } + + if (features) { + if (feature_cnt != 0) { + printf(", "); + } + printf("unkown bits 0x%." PRIx64, features); + } + printf("\n"); +} + static void virtio_net_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -3703,6 +3886,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->set_config = virtio_net_set_config; vdc->get_features = virtio_net_get_features; vdc->set_features = virtio_net_set_features; + vdc->print_features = virtio_net_print_features; vdc->bad_features = virtio_net_bad_features; vdc->reset = virtio_net_reset; vdc->set_status = virtio_net_set_status; @@ -3712,6 +3896,7 @@ static void virtio_net_class_init(ObjectClass *klass, void *data) vdc->post_load = virtio_net_post_load_virtio; vdc->vmsd = &vmstate_virtio_net_device; vdc->primary_unplug_pending = primary_unplug_pending; + vdc->get_vhost = virtio_net_get_vhost; } static const TypeInfo virtio_net_info = { diff --git a/hw/net/vmxnet3.c b/hw/net/vmxnet3.c index f65af4e9ef27a85850968c811e52b1fff64ec8c2..674b3a69467a61eb9ffd45fce779c70dff36678c 100644 --- a/hw/net/vmxnet3.c +++ b/hw/net/vmxnet3.c @@ -1441,7 +1441,10 @@ static void vmxnet3_activate_device(VMXNET3State *s) vmxnet3_setup_rx_filtering(s); /* Cache fields from shared memory */ s->mtu = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, devRead.misc.mtu); - assert(VMXNET3_MIN_MTU <= s->mtu && s->mtu < VMXNET3_MAX_MTU); + if (s->mtu < VMXNET3_MIN_MTU || s->mtu > VMXNET3_MAX_MTU) { + qemu_log_mask(LOG_GUEST_ERROR, "vmxnet3: Bad MTU size: %u\n", s->mtu); + return; + } VMW_CFPRN("MTU is %u", s->mtu); s->max_rx_frags = @@ -1816,7 +1819,9 @@ vmxnet3_io_bar1_write(void *opaque, case VMXNET3_REG_ICR: VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d", val, size); - g_assert_not_reached(); + qemu_log_mask(LOG_GUEST_ERROR, + "%s: write to read-only register VMXNET3_REG_ICR\n", + TYPE_VMXNET3); break; /* Event Cause Register */ @@ -2078,7 +2083,7 @@ static void vmxnet3_net_init(VMXNET3State *s) s->nic = qemu_new_nic(&net_vmxnet3_info, &s->conf, object_get_typename(OBJECT(s)), - d->id, s); + d->id, &d->mem_reentrancy_guard, s); s->peer_has_vhdr = vmxnet3_peer_has_vnet_hdr(s); s->tx_sop = true; diff --git a/hw/net/xen_nic.c b/hw/net/xen_nic.c index 5c815b4f0c5228e4e8439edcccd72f84b45f674d..712fe6706dfa00172b57eef15d776e3186f0cfc4 100644 --- a/hw/net/xen_nic.c +++ b/hw/net/xen_nic.c @@ -294,7 +294,7 @@ static int net_init(struct XenLegacyDevice *xendev) } netdev->nic = qemu_new_nic(&net_xen_info, &netdev->conf, - "xen", NULL, netdev); + "xen", NULL, &xendev->qdev.mem_reentrancy_guard, netdev); snprintf(qemu_get_queue(netdev->nic)->info_str, sizeof(qemu_get_queue(netdev->nic)->info_str), diff --git a/hw/net/xgmac.c b/hw/net/xgmac.c index 0ab6ae91aa11b00c5675d263fc911782484e3632..1f4f277d840f826fb7b6c345e2cecc6aace6c82f 100644 --- a/hw/net/xgmac.c +++ b/hw/net/xgmac.c @@ -402,7 +402,8 @@ static void xgmac_enet_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xgmac_enet_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); s->regs[XGMAC_ADDR_HIGH(0)] = (s->conf.macaddr.a[5] << 8) | diff --git a/hw/net/xilinx_axienet.c b/hw/net/xilinx_axienet.c index 990ff3a1c25dfe32fa3decf10028cd8f4caeb1a6..8a34243803d22cf4ec9035ef5d390be670856fbd 100644 --- a/hw/net/xilinx_axienet.c +++ b/hw/net/xilinx_axienet.c @@ -968,7 +968,8 @@ static void xilinx_enet_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xilinx_enet_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); tdk_init(&s->TEMAC.phy); diff --git a/hw/net/xilinx_ethlite.c b/hw/net/xilinx_ethlite.c index 6e09f7e422e6072c8d38e799de7fcf398bf18e51..80cb869e22b3f4e4730966ab1ea92f5a71cf2d94 100644 --- a/hw/net/xilinx_ethlite.c +++ b/hw/net/xilinx_ethlite.c @@ -235,7 +235,8 @@ static void xilinx_ethlite_realize(DeviceState *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_xilinx_ethlite_info, &s->conf, - object_get_typename(OBJECT(dev)), dev->id, s); + object_get_typename(OBJECT(dev)), dev->id, + &dev->mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); } diff --git a/hw/nvme/ctrl.c b/hw/nvme/ctrl.c index 5f573c417b3d66c30814a74b192a6a96d143e901..f1c7641158d92148ef5cdef6fc14e03a14366de5 100644 --- a/hw/nvme/ctrl.c +++ b/hw/nvme/ctrl.c @@ -71,7 +71,7 @@ * the SUBNQN field in the controller will report the NQN of the subsystem * device. This also enables multi controller capability represented in * Identify Controller data structure in CMIC (Controller Multi-path I/O and - * Namesapce Sharing Capabilities). + * Namespace Sharing Capabilities). * * - `aerl` * The Asynchronous Event Request Limit (AERL). Indicates the maximum number @@ -357,6 +357,24 @@ static inline void *nvme_addr_to_pmr(NvmeCtrl *n, hwaddr addr) return memory_region_get_ram_ptr(&n->pmr.dev->mr) + (addr - n->pmr.cba); } +static inline bool nvme_addr_is_iomem(NvmeCtrl *n, hwaddr addr) +{ + hwaddr hi, lo; + + /* + * The purpose of this check is to guard against invalid "local" access to + * the iomem (i.e. controller registers). Thus, we check against the range + * covered by the 'bar0' MemoryRegion since that is currently composed of + * two subregions (the NVMe "MBAR" and the MSI-X table/pba). Note, however, + * that if the device model is ever changed to allow the CMB to be located + * in BAR0 as well, then this must be changed. + */ + lo = n->bar0.addr; + hi = lo + int128_get64(n->bar0.size); + + return addr >= lo && addr < hi; +} + static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) { hwaddr hi = addr + size - 1; @@ -614,6 +632,10 @@ static uint16_t nvme_map_addr(NvmeCtrl *n, NvmeSg *sg, hwaddr addr, size_t len) trace_pci_nvme_map_addr(addr, len); + if (nvme_addr_is_iomem(n, addr)) { + return NVME_DATA_TRAS_ERROR; + } + if (nvme_addr_is_cmb(n, addr)) { cmb = true; } else if (nvme_addr_is_pmr(n, addr)) { @@ -680,7 +702,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1, len -= trans_len; if (len) { if (len > n->page_size) { - uint64_t prp_list[n->max_prp_ents]; + g_autofree uint64_t *prp_list = g_new(uint64_t, n->max_prp_ents); uint32_t nents, prp_trans; int i = 0; @@ -1146,12 +1168,13 @@ static uint16_t nvme_tx(NvmeCtrl *n, NvmeSg *sg, uint8_t *ptr, uint32_t len, assert(sg->flags & NVME_SG_ALLOC); if (sg->flags & NVME_SG_DMA) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; uint64_t residual; if (dir == NVME_TX_DIRECTION_TO_DEVICE) { - residual = dma_buf_write(ptr, len, &sg->qsg); + residual = dma_buf_write(ptr, len, &sg->qsg, attrs); } else { - residual = dma_buf_read(ptr, len, &sg->qsg); + residual = dma_buf_read(ptr, len, &sg->qsg, attrs); } if (unlikely(residual)) { @@ -1240,26 +1263,26 @@ uint16_t nvme_bounce_mdata(NvmeCtrl *n, uint8_t *ptr, uint32_t len, } static inline void nvme_blk_read(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_read(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_preadv(blk, offset, &req->sg.iov, 0, cb, req); } } static inline void nvme_blk_write(BlockBackend *blk, int64_t offset, - BlockCompletionFunc *cb, NvmeRequest *req) + uint32_t align, BlockCompletionFunc *cb, + NvmeRequest *req) { assert(req->sg.flags & NVME_SG_ALLOC); if (req->sg.flags & NVME_SG_DMA) { - req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, BDRV_SECTOR_SIZE, - cb, req); + req->aiocb = dma_blk_write(blk, &req->sg.qsg, offset, align, cb, req); } else { req->aiocb = blk_aio_pwritev(blk, offset, &req->sg.iov, 0, cb, req); } @@ -1935,10 +1958,10 @@ static void nvme_rw_cb(void *opaque, int ret) } if (req->cmd.opcode == NVME_CMD_READ) { - return nvme_blk_read(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_read(blk, offset, 1, nvme_rw_complete_cb, req); } - return nvme_blk_write(blk, offset, nvme_rw_complete_cb, req); + return nvme_blk_write(blk, offset, 1, nvme_rw_complete_cb, req); } } @@ -2100,7 +2123,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) for (bufp = buf; mbufp < end; bufp += ns->lbaf.ms, mbufp += ns->lbaf.ms) { if (memcmp(bufp + pil, mbufp + pil, ns->lbaf.ms - pil)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } } @@ -2109,7 +2132,7 @@ static void nvme_compare_mdata_cb(void *opaque, int ret) } if (memcmp(buf, ctx->mdata.bounce, ctx->mdata.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -2158,7 +2181,7 @@ static void nvme_compare_data_cb(void *opaque, int ret) } if (memcmp(buf, ctx->data.bounce, ctx->data.iov.size)) { - req->status = NVME_CMP_FAILURE; + req->status = NVME_CMP_FAILURE | NVME_DNR; goto out; } @@ -2358,6 +2381,9 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req) status = nvme_h2c(n, (uint8_t *)iocb->range, sizeof(NvmeDsmRange) * nr, req); if (status) { + g_free(iocb->range); + qemu_aio_unref(iocb); + return status; } @@ -3122,7 +3148,7 @@ static uint16_t nvme_read(NvmeCtrl *n, NvmeRequest *req) block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_READ); - nvme_blk_read(blk, data_offset, nvme_rw_cb, req); + nvme_blk_read(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); return NVME_NO_COMPLETE; invalid: @@ -3249,7 +3275,7 @@ static uint16_t nvme_do_write(NvmeCtrl *n, NvmeRequest *req, bool append, block_acct_start(blk_get_stats(blk), &req->acct, data_size, BLOCK_ACCT_WRITE); - nvme_blk_write(blk, data_offset, nvme_rw_cb, req); + nvme_blk_write(blk, data_offset, BDRV_SECTOR_SIZE, nvme_rw_cb, req); } else { req->aiocb = blk_aio_pwrite_zeroes(blk, data_offset, data_size, BDRV_REQ_MAY_UNMAP, nvme_rw_cb, diff --git a/hw/nvram/fw_cfg.c b/hw/nvram/fw_cfg.c index c06b30de11216f0c64dfd85cbb06c816c6884369..e5f3c981841d04778849bb5e6df1cfb055e907f4 100644 --- a/hw/nvram/fw_cfg.c +++ b/hw/nvram/fw_cfg.c @@ -357,9 +357,10 @@ static void fw_cfg_dma_transfer(FWCfgState *s) dma_addr = s->dma_addr; s->dma_addr = 0; - if (dma_memory_read(s->dma_as, dma_addr, &dma, sizeof(dma))) { + if (dma_memory_read(s->dma_as, dma_addr, + &dma, sizeof(dma), MEMTXATTRS_UNSPECIFIED)) { stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), - FW_CFG_DMA_CTL_ERROR); + FW_CFG_DMA_CTL_ERROR, MEMTXATTRS_UNSPECIFIED); return; } @@ -399,7 +400,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) * tested before. */ if (read) { - if (dma_memory_set(s->dma_as, dma.address, 0, len)) { + if (dma_memory_set(s->dma_as, dma.address, 0, len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } } @@ -418,7 +420,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) */ if (read) { if (dma_memory_write(s->dma_as, dma.address, - &e->data[s->cur_offset], len)) { + &e->data[s->cur_offset], len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } } @@ -426,7 +429,8 @@ static void fw_cfg_dma_transfer(FWCfgState *s) if (!e->allow_write || len != dma.length || dma_memory_read(s->dma_as, dma.address, - &e->data[s->cur_offset], len)) { + &e->data[s->cur_offset], len, + MEMTXATTRS_UNSPECIFIED)) { dma.control |= FW_CFG_DMA_CTL_ERROR; } else if (e->write_cb) { e->write_cb(e->callback_opaque, s->cur_offset, len); @@ -442,7 +446,7 @@ static void fw_cfg_dma_transfer(FWCfgState *s) } stl_be_dma(s->dma_as, dma_addr + offsetof(FWCfgDmaAccess, control), - dma.control); + dma.control, MEMTXATTRS_UNSPECIFIED); trace_fw_cfg_read(s, 0); } diff --git a/hw/nvram/xlnx-efuse.c b/hw/nvram/xlnx-efuse.c index a0fd77b586dc123581af22e941b955dbc5da02db..5b131e89b108ede89dfacde787dbdc3a29314394 100644 --- a/hw/nvram/xlnx-efuse.c +++ b/hw/nvram/xlnx-efuse.c @@ -217,6 +217,13 @@ static void efuse_realize(DeviceState *dev, Error **errp) } } +static void efuse_finalize(Object *obj) +{ + XlnxEFuse *s = XLNX_EFUSE(obj); + + g_free(s->ro_bits); +} + static void efuse_prop_set_drive(Object *obj, Visitor *v, const char *name, void *opaque, Error **errp) { @@ -273,6 +280,7 @@ static const TypeInfo efuse_info = { .name = TYPE_XLNX_EFUSE, .parent = TYPE_DEVICE, .instance_size = sizeof(XlnxEFuse), + .instance_finalize = efuse_finalize, .class_init = efuse_class_init, }; diff --git a/hw/nvram/xlnx-versal-efuse-ctrl.c b/hw/nvram/xlnx-versal-efuse-ctrl.c index b35ba65ab57bd107a1d387f297879be581f79212..2d2dc09526b8d0ec126a660d03ea2a3112dbb6a7 100644 --- a/hw/nvram/xlnx-versal-efuse-ctrl.c +++ b/hw/nvram/xlnx-versal-efuse-ctrl.c @@ -725,6 +725,13 @@ static void efuse_ctrl_init(Object *obj) sysbus_init_irq(sbd, &s->irq_efuse_imr); } +static void efuse_ctrl_finalize(Object *obj) +{ + XlnxVersalEFuseCtrl *s = XLNX_VERSAL_EFUSE_CTRL(obj); + + g_free(s->extra_pg0_lock_spec); +} + static const VMStateDescription vmstate_efuse_ctrl = { .name = TYPE_XLNX_VERSAL_EFUSE_CTRL, .version_id = 1, @@ -762,6 +769,7 @@ static const TypeInfo efuse_ctrl_info = { .instance_size = sizeof(XlnxVersalEFuseCtrl), .class_init = efuse_ctrl_class_init, .instance_init = efuse_ctrl_init, + .instance_finalize = efuse_ctrl_finalize, }; static void efuse_ctrl_register_types(void) diff --git a/hw/pci-bridge/Kconfig b/hw/pci-bridge/Kconfig index f8df4315baae394798c03860bd7fc80077b6ee48..d87b96e8bc1b196f4ab9a6e6a8b9fcacf7549d73 100644 --- a/hw/pci-bridge/Kconfig +++ b/hw/pci-bridge/Kconfig @@ -1,3 +1,8 @@ +config PCI_BRIDGE + bool + default y if PCI_DEVICES + depends on PCI + config PCIE_PORT bool default y if PCI_DEVICES diff --git a/hw/pci-bridge/gen_pcie_root_port.c b/hw/pci-bridge/gen_pcie_root_port.c index 20099a8ae31afa3ecf7a4740dfe9d094a448e232..0bf9df9c58af9ef7d13837f7e13330becbb696ed 100644 --- a/hw/pci-bridge/gen_pcie_root_port.c +++ b/hw/pci-bridge/gen_pcie_root_port.c @@ -140,6 +140,8 @@ static Property gen_rp_props[] = { speed, PCIE_LINK_SPEED_16), DEFINE_PROP_PCIE_LINK_WIDTH("x-width", PCIESlot, width, PCIE_LINK_WIDTH_32), + DEFINE_PROP_UINT8("fast-plug", PCIESlot, fast_plug, 0), + DEFINE_PROP_UINT8("fast-unplug", PCIESlot, fast_unplug, 0), DEFINE_PROP_END_OF_LIST() }; diff --git a/hw/pci-bridge/meson.build b/hw/pci-bridge/meson.build index daab8acf2aae19b7e42263f265fc9b0880126fea..a48a6b9653a0609c123fab14a3a088a299895bd7 100644 --- a/hw/pci-bridge/meson.build +++ b/hw/pci-bridge/meson.build @@ -1,5 +1,5 @@ pci_ss = ss.source_set() -pci_ss.add(files('pci_bridge_dev.c')) +pci_ss.add(when: 'CONFIG_PCI_BRIDGE', if_true: files('pci_bridge_dev.c')) pci_ss.add(when: 'CONFIG_I82801B11', if_true: files('i82801b11.c')) pci_ss.add(when: 'CONFIG_IOH3420', if_true: files('ioh3420.c')) pci_ss.add(when: 'CONFIG_PCIE_PORT', if_true: files('pcie_root_port.c', 'gen_pcie_root_port.c', 'pcie_pci_bridge.c')) diff --git a/hw/pci-bridge/pci_expander_bridge.c b/hw/pci-bridge/pci_expander_bridge.c index 10e6e7c2ab0bd4f4c04a4f4cc38ca7470c034f25..de932286b54192a6596535ad53a49e545ba62c00 100644 --- a/hw/pci-bridge/pci_expander_bridge.c +++ b/hw/pci-bridge/pci_expander_bridge.c @@ -192,6 +192,12 @@ static int pxb_map_irq_fn(PCIDevice *pci_dev, int pin) { PCIDevice *pxb = pci_get_bus(pci_dev)->parent_dev; + /* + * First carry out normal swizzle to handle + * multple root ports on a pxb instance. + */ + pin = pci_swizzle_map_irq_fn(pci_dev, pin); + /* * The bios does not index the pxb slot number when * it computes the IRQ because it resides on bus 0 diff --git a/hw/pci-bridge/xio3130_downstream.c b/hw/pci-bridge/xio3130_downstream.c index 04aae72cd614d4d8eb9c8a3f023ec71323f09086..b17cafd359b9d27d8fba32f667e6a7491edf9bb1 100644 --- a/hw/pci-bridge/xio3130_downstream.c +++ b/hw/pci-bridge/xio3130_downstream.c @@ -28,6 +28,7 @@ #include "migration/vmstate.h" #include "qapi/error.h" #include "qemu/module.h" +#include "hw/pci-bridge/xio3130_downstream.h" #define PCI_DEVICE_ID_TI_XIO3130D 0x8233 /* downstream port */ #define XIO3130_REVISION 0x1 @@ -173,7 +174,7 @@ static void xio3130_downstream_class_init(ObjectClass *klass, void *data) } static const TypeInfo xio3130_downstream_info = { - .name = "xio3130-downstream", + .name = TYPE_XIO3130_DOWNSTREAM, .parent = TYPE_PCIE_SLOT, .class_init = xio3130_downstream_class_init, .interfaces = (InterfaceInfo[]) { diff --git a/hw/pci-host/designware.c b/hw/pci-host/designware.c index bde3a343a2f50bfc5a3a8159097b100dfe337ae4..c235b9daa34d410a3f2c3e1444a2755809e879c5 100644 --- a/hw/pci-host/designware.c +++ b/hw/pci-host/designware.c @@ -340,6 +340,8 @@ static void designware_pcie_root_config_write(PCIDevice *d, uint32_t address, break; case DESIGNWARE_PCIE_ATU_VIEWPORT: + val &= DESIGNWARE_PCIE_ATU_REGION_INBOUND | + (DESIGNWARE_PCIE_NUM_VIEWPORTS - 1); root->atu_viewport = val; break; diff --git a/hw/pci-host/pnv_phb3.c b/hw/pci-host/pnv_phb3.c index a7f96850055a7ff24120d2bb4e153a4a4e80b250..bdc128013e67d87de2add1dfae0d6ec1bb678a92 100644 --- a/hw/pci-host/pnv_phb3.c +++ b/hw/pci-host/pnv_phb3.c @@ -715,7 +715,8 @@ static bool pnv_phb3_resolve_pe(PnvPhb3DMASpace *ds) bus_num = pci_bus_num(ds->bus); addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; addr += 2 * ((bus_num << 8) | ds->devfn); - if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + if (dma_memory_read(&address_space_memory, addr, &rte, + sizeof(rte), MEMTXATTRS_UNSPECIFIED)) { phb3_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); /* Set error bits ? fence ? ... */ return false; @@ -794,7 +795,7 @@ static void pnv_phb3_translate_tve(PnvPhb3DMASpace *ds, hwaddr addr, /* Grab the TCE address */ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); if (dma_memory_read(&address_space_memory, taddr, &tce, - sizeof(tce))) { + sizeof(tce), MEMTXATTRS_UNSPECIFIED)) { phb3_error(phb, "Failed to read TCE at 0x%"PRIx64, taddr); return; } @@ -1129,6 +1130,7 @@ static void pnv_phb3_root_bus_class_init(ObjectClass *klass, void *data) static const TypeInfo pnv_phb3_root_bus_info = { .name = TYPE_PNV_PHB3_ROOT_BUS, .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(PnvPHB3RootBus), .class_init = pnv_phb3_root_bus_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, diff --git a/hw/pci-host/pnv_phb3_msi.c b/hw/pci-host/pnv_phb3_msi.c index 099d2092a2c2fcaf9863f26a142062368d15ca1b..8bcbc2cc4f37fb3585af3c45beefaa70b3f3624c 100644 --- a/hw/pci-host/pnv_phb3_msi.c +++ b/hw/pci-host/pnv_phb3_msi.c @@ -53,7 +53,8 @@ static bool phb3_msi_read_ive(PnvPHB3 *phb, int srcno, uint64_t *out_ive) return false; } - if (dma_memory_read(&address_space_memory, ive_addr, &ive, sizeof(ive))) { + if (dma_memory_read(&address_space_memory, ive_addr, + &ive, sizeof(ive), MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to read IVE at 0x%" PRIx64, ive_addr); return false; @@ -73,7 +74,8 @@ static void phb3_msi_set_p(Phb3MsiState *msi, int srcno, uint8_t gen) return; } - if (dma_memory_write(&address_space_memory, ive_addr + 4, &p, 1)) { + if (dma_memory_write(&address_space_memory, ive_addr + 4, + &p, 1, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to write IVE (set P) at 0x%" PRIx64, ive_addr); } @@ -89,7 +91,8 @@ static void phb3_msi_set_q(Phb3MsiState *msi, int srcno) return; } - if (dma_memory_write(&address_space_memory, ive_addr + 5, &q, 1)) { + if (dma_memory_write(&address_space_memory, ive_addr + 5, + &q, 1, MEMTXATTRS_UNSPECIFIED)) { qemu_log_mask(LOG_GUEST_ERROR, "Failed to write IVE (set Q) at 0x%" PRIx64, ive_addr); } diff --git a/hw/pci-host/pnv_phb4.c b/hw/pci-host/pnv_phb4.c index 5c375a9f285dc74978aae8760451b94bb075ade3..9f115da7ac47d05258d655bda9ec29d3bf99dde9 100644 --- a/hw/pci-host/pnv_phb4.c +++ b/hw/pci-host/pnv_phb4.c @@ -891,7 +891,8 @@ static bool pnv_phb4_resolve_pe(PnvPhb4DMASpace *ds) bus_num = pci_bus_num(ds->bus); addr = rtt & PHB_RTT_BASE_ADDRESS_MASK; addr += 2 * PCI_BUILD_BDF(bus_num, ds->devfn); - if (dma_memory_read(&address_space_memory, addr, &rte, sizeof(rte))) { + if (dma_memory_read(&address_space_memory, addr, &rte, + sizeof(rte), MEMTXATTRS_UNSPECIFIED)) { phb_error(ds->phb, "Failed to read RTT entry at 0x%"PRIx64, addr); /* Set error bits ? fence ? ... */ return false; @@ -961,7 +962,7 @@ static void pnv_phb4_translate_tve(PnvPhb4DMASpace *ds, hwaddr addr, /* Grab the TCE address */ taddr = base | (((addr >> sh) & ((1ul << tbl_shift) - 1)) << 3); if (dma_memory_read(&address_space_memory, taddr, &tce, - sizeof(tce))) { + sizeof(tce), MEMTXATTRS_UNSPECIFIED)) { phb_error(ds->phb, "Failed to read TCE at 0x%"PRIx64, taddr); return; } @@ -1320,6 +1321,7 @@ static void pnv_phb4_root_bus_class_init(ObjectClass *klass, void *data) static const TypeInfo pnv_phb4_root_bus_info = { .name = TYPE_PNV_PHB4_ROOT_BUS, .parent = TYPE_PCIE_BUS, + .instance_size = sizeof(PnvPHB4RootBus), .class_init = pnv_phb4_root_bus_class_init, .interfaces = (InterfaceInfo[]) { { INTERFACE_PCIE_DEVICE }, diff --git a/hw/pci/pci.c b/hw/pci/pci.c index e5993c1ef52b7c9e39faa7de4020ab5898aef0a3..df58028b4c9938b21dc4f76e1280b34856016507 100644 --- a/hw/pci/pci.c +++ b/hw/pci/pci.c @@ -79,6 +79,8 @@ static Property pci_props[] = { DEFINE_PROP_STRING("failover_pair_id", PCIDevice, failover_pair_id), DEFINE_PROP_UINT32("acpi-index", PCIDevice, acpi_index, 0), + DEFINE_PROP_SIZE32("x-max-bounce-buffer-size", PCIDevice, + max_bounce_buffer_size, DEFAULT_MAX_BOUNCE_BUFFER_SIZE), DEFINE_PROP_END_OF_LIST() }; @@ -269,8 +271,15 @@ static void pci_change_irq_level(PCIDevice *pci_dev, int irq_num, int change) { PCIBus *bus; for (;;) { + int dev_irq = irq_num; bus = pci_get_bus(pci_dev); + if (!bus) { + return; + } irq_num = bus->map_irq(pci_dev, irq_num); + trace_pci_route_irq(dev_irq, DEVICE(pci_dev)->canonical_path, irq_num, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); if (bus->set_irq) break; pci_dev = bus->parent_dev; @@ -1101,6 +1110,8 @@ static PCIDevice *do_pci_register_device(PCIDevice *pci_dev, "bus master container", UINT64_MAX); address_space_init(&pci_dev->bus_master_as, &pci_dev->bus_master_container_region, pci_dev->name); + pci_dev->bus_master_as.max_bounce_buffer_size = + pci_dev->max_bounce_buffer_size; if (phase_check(PHASE_MACHINE_READY)) { pci_init_bus_master(pci_dev); @@ -1464,7 +1475,7 @@ void pci_default_write_config(PCIDevice *d, uint32_t addr, uint32_t val_in, int range_covers_byte(addr, l, PCI_COMMAND)) pci_update_mappings(d); - if (range_covers_byte(addr, l, PCI_COMMAND)) { + if (ranges_overlap(addr, l, PCI_COMMAND, 2)) { pci_update_irq_disabled(d, was_irq_disabled); memory_region_set_enabled(&d->bus_master_enable_region, (pci_get_word(d->config + PCI_COMMAND) @@ -1497,11 +1508,6 @@ static void pci_irq_handler(void *opaque, int irq_num, int level) pci_change_irq_level(pci_dev, irq_num, change); } -static inline int pci_intx(PCIDevice *pci_dev) -{ - return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; -} - qemu_irq pci_allocate_irq(PCIDevice *pci_dev) { int intx = pci_intx(pci_dev); @@ -1528,8 +1534,12 @@ PCIINTxRoute pci_device_route_intx_to_irq(PCIDevice *dev, int pin) PCIBus *bus; do { + int dev_irq = pin; bus = pci_get_bus(dev); pin = bus->map_irq(dev, pin); + trace_pci_route_irq(dev_irq, DEVICE(dev)->canonical_path, pin, + pci_bus_is_root(bus) ? "root-complex" + : DEVICE(bus->parent_dev)->canonical_path); dev = bus->parent_dev; } while (dev); @@ -1576,7 +1586,7 @@ void pci_device_set_intx_routing_notifier(PCIDevice *dev, * 9.1: Interrupt routing. Table 9-1 * * the PCI Express Base Specification, Revision 2.1 - * 2.2.8.1: INTx interrutp signaling - Rules + * 2.2.8.1: INTx interrupt signaling - Rules * the Implementation Note * Table 2-20 */ @@ -2408,6 +2418,7 @@ static void pci_add_option_rom(PCIDevice *pdev, bool is_default_rom, } else { snprintf(name, sizeof(name), "%s.rom", object_get_typename(OBJECT(pdev))); } + qemu_log("add rom file: %s\n", name); pdev->has_rom = true; memory_region_init_rom(&pdev->rom, OBJECT(pdev), name, pdev->romsize, &error_fatal); ptr = memory_region_get_ram_ptr(&pdev->rom); @@ -2576,15 +2587,15 @@ static char *pci_dev_fw_name(DeviceState *dev, char *buf, int len) static char *pcibus_get_fw_dev_path(DeviceState *dev) { PCIDevice *d = (PCIDevice *)dev; - char path[50], name[33]; - int off; + char name[33]; + int has_func = !!PCI_FUNC(d->devfn); - off = snprintf(path, sizeof(path), "%s@%x", - pci_dev_fw_name(dev, name, sizeof name), - PCI_SLOT(d->devfn)); - if (PCI_FUNC(d->devfn)) - snprintf(path + off, sizeof(path) + off, ",%x", PCI_FUNC(d->devfn)); - return g_strdup(path); + return g_strdup_printf("%s@%x%s%.*x", + pci_dev_fw_name(dev, name, sizeof(name)), + PCI_SLOT(d->devfn), + has_func ? "," : "", + has_func, + PCI_FUNC(d->devfn)); } static char *pcibus_get_dev_path(DeviceState *dev) @@ -2689,6 +2700,10 @@ static void pci_device_class_init(ObjectClass *klass, void *data) k->unrealize = pci_qdev_unrealize; k->bus_type = TYPE_PCI_BUS; device_class_set_props(k, pci_props); + object_class_property_set_description( + klass, "x-max-bounce-buffer-size", + "Maximum buffer size allocated for bounce buffers used for mapped " + "access to indirect DMA memory"); } static void pci_device_class_base_init(ObjectClass *klass, void *data) diff --git a/hw/pci/pcie.c b/hw/pci/pcie.c index d7d73a31e4cc40a24a65e40046691b731c572c40..a2d1ae60218fb942cf0039f0dc828453fcb6e19a 100644 --- a/hw/pci/pcie.c +++ b/hw/pci/pcie.c @@ -92,13 +92,6 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) return; } - /* Clear and fill LNKCAP from what was configured above */ - pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, - PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); - pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, - QEMU_PCI_EXP_LNKCAP_MLW(s->width) | - QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); - /* * Link bandwidth notification is required for all root ports and * downstream ports supporting links wider than x1 or multiple link @@ -106,6 +99,12 @@ static void pcie_cap_fill_slot_lnk(PCIDevice *dev) */ if (s->width > QEMU_PCI_EXP_LNK_X1 || s->speed > QEMU_PCI_EXP_LNK_2_5GT) { + /* Clear and fill LNKCAP from what was configured above */ + pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, + PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); + pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, + QEMU_PCI_EXP_LNKCAP_MLW(s->width) | + QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_LBNC); } @@ -366,6 +365,17 @@ static void hotplug_event_clear(PCIDevice *dev) } } +void pcie_cap_slot_enable_power(PCIDevice *dev) +{ + uint8_t *exp_cap = dev->config + dev->exp.exp_cap; + uint32_t sltcap = pci_get_long(exp_cap + PCI_EXP_SLTCAP); + + if (sltcap & PCI_EXP_SLTCAP_PCP) { + pci_set_word_by_mask(exp_cap + PCI_EXP_SLTCTL, + PCI_EXP_SLTCTL_PCC, PCI_EXP_SLTCTL_PWR_ON); + } +} + static void pcie_set_power_device(PCIBus *bus, PCIDevice *dev, void *opaque) { bool *power = opaque; @@ -526,6 +536,7 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, uint8_t *exp_cap = hotplug_pdev->config + hotplug_pdev->exp.exp_cap; uint32_t sltcap = pci_get_word(exp_cap + PCI_EXP_SLTCAP); uint16_t sltctl = pci_get_word(exp_cap + PCI_EXP_SLTCTL); + PCIESlot *s = PCIE_SLOT(hotplug_pdev); /* Check if hot-unplug is disabled on the slot */ if ((sltcap & PCI_EXP_SLTCAP_HPC) == 0) { @@ -572,7 +583,17 @@ void pcie_cap_slot_unplug_request_cb(HotplugHandler *hotplug_dev, return; } - pcie_cap_slot_push_attention_button(hotplug_pdev); + if ((pci_dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA) && s->fast_plug) { + pci_word_test_and_clear_mask(pci_dev->config + pci_dev->exp.exp_cap + PCI_EXP_LNKSTA, + PCI_EXP_LNKSTA_DLLLA); + } + + if (s->fast_unplug) { + pcie_cap_slot_event(hotplug_pdev, + PCI_EXP_HP_EV_PDC | PCI_EXP_HP_EV_ABP); + } else { + pcie_cap_slot_push_attention_button(hotplug_pdev); + } } /* pci express slot for pci express root/downstream port diff --git a/hw/pci/pcie_aer.c b/hw/pci/pcie_aer.c index 27f9cc56af695dfbd0668ba3e2b9f625f5fc9cfd..e1a8a88c8c08787b16417f16dc117f1fb709c244 100644 --- a/hw/pci/pcie_aer.c +++ b/hw/pci/pcie_aer.c @@ -774,7 +774,9 @@ void pcie_aer_root_write_config(PCIDevice *dev, uint32_t root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); /* 6.2.4.1.2 Interrupt Generation */ if (!msix_enabled(dev) && !msi_enabled(dev)) { - pci_set_irq(dev, !!(root_cmd & enabled_cmd)); + if (pci_intx(dev) != -1) { + pci_set_irq(dev, !!(root_cmd & enabled_cmd)); + } return; } diff --git a/hw/pci/trace-events b/hw/pci/trace-events index fc777d0b5e6e5cc9a715a90f410b6b208104ef9c..7e294b7e8a237580e57ab2d70c30aff7b8444f38 100644 --- a/hw/pci/trace-events +++ b/hw/pci/trace-events @@ -3,6 +3,7 @@ # pci.c pci_update_mappings_del(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 pci_update_mappings_add(void *d, uint32_t bus, uint32_t slot, uint32_t func, int bar, uint64_t addr, uint64_t size) "d=%p %02x:%02x.%x %d,0x%"PRIx64"+0x%"PRIx64 +pci_route_irq(int dev_irq, const char *dev_path, int parent_irq, const char *parent_path) "IRQ %d @%s -> IRQ %d @%s" # pci_host.c pci_cfg_read(const char *dev, unsigned devid, unsigned fnid, unsigned offs, unsigned val) "%s %02u:%u @0x%x -> 0x%x" diff --git a/hw/ppc/Kconfig b/hw/ppc/Kconfig index 400511c6b70351a45a2253fdd99514b32b34f05b..9e0b7184e384609843073057b55704231e3e4955 100644 --- a/hw/ppc/Kconfig +++ b/hw/ppc/Kconfig @@ -119,6 +119,7 @@ config MAC_NEWWORLD select MAC_PMU select UNIN_PCI select FW_CFG_PPC + select USB_OHCI_PCI config E500 bool diff --git a/hw/ppc/e500.c b/hw/ppc/e500.c index 960e7efcd31fe56232f47cefc7aceabac4e6039a..564c15d09cdb6c3cbd79a730a3c4b70882337441 100644 --- a/hw/ppc/e500.c +++ b/hw/ppc/e500.c @@ -197,6 +197,8 @@ static void dt_i2c_create(void *fdt, const char *soc, const char *mpic, qemu_fdt_setprop_cells(fdt, i2c, "cell-index", 0); qemu_fdt_setprop_cells(fdt, i2c, "interrupts", irq0, 0x2); qemu_fdt_setprop_phandle(fdt, i2c, "interrupt-parent", mpic); + qemu_fdt_setprop_cell(fdt, i2c, "#size-cells", 0); + qemu_fdt_setprop_cell(fdt, i2c, "#address-cells", 1); qemu_fdt_setprop_string(fdt, "/aliases", alias, i2c); g_free(i2c); @@ -762,7 +764,7 @@ static DeviceState *ppce500_init_mpic_qemu(PPCE500MachineState *pms, } static DeviceState *ppce500_init_mpic_kvm(const PPCE500MachineClass *pmc, - IrqLines *irqs, Error **errp) + Error **errp) { DeviceState *dev; CPUState *cs; @@ -798,7 +800,7 @@ static DeviceState *ppce500_init_mpic(PPCE500MachineState *pms, Error *err = NULL; if (kvm_kernel_irqchip_allowed()) { - dev = ppce500_init_mpic_kvm(pmc, irqs, &err); + dev = ppce500_init_mpic_kvm(pmc, &err); } if (kvm_kernel_irqchip_required() && !dev) { error_reportf_err(err, @@ -949,7 +951,7 @@ void ppce500_init(MachineState *machine) sysbus_connect_irq(s, 0, qdev_get_gpio_in(mpicdev, MPC8544_I2C_IRQ)); memory_region_add_subregion(ccsr_addr_space, MPC8544_I2C_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - i2c = (I2CBus *)qdev_get_child_bus(dev, "i2c"); + i2c = I2C_BUS(qdev_get_child_bus(dev, "i2c")); i2c_slave_create_simple(i2c, "ds1338", RTC_REGS_OFFSET); @@ -974,7 +976,7 @@ void ppce500_init(MachineState *machine) memory_region_add_subregion(ccsr_addr_space, MPC8544_PCI_REGS_OFFSET, sysbus_mmio_get_region(s, 0)); - pci_bus = (PCIBus *)qdev_get_child_bus(dev, "pci.0"); + pci_bus = PCI_BUS(qdev_get_child_bus(dev, "pci.0")); if (!pci_bus) printf("couldn't create PCI controller!\n"); diff --git a/hw/ppc/mac.h b/hw/ppc/mac.h index 22c8408078d2c0689aa56e37f083bf0483401aa3..a1fa8f8e41a995fb45df379ff041eba5b4250a3b 100644 --- a/hw/ppc/mac.h +++ b/hw/ppc/mac.h @@ -36,9 +36,6 @@ #include "hw/pci-host/uninorth.h" #include "qom/object.h" -/* SMP is not enabled, for now */ -#define MAX_CPUS 1 - #define NVRAM_SIZE 0x2000 #define PROM_FILENAME "openbios-ppc" diff --git a/hw/ppc/mac_newworld.c b/hw/ppc/mac_newworld.c index 7bb7ac39975f1ca1467249da9fe988905b901ef3..4bddb529c2a6e09aca332e30f7cc64d58827bcfc 100644 --- a/hw/ppc/mac_newworld.c +++ b/hw/ppc/mac_newworld.c @@ -581,7 +581,8 @@ static void core99_machine_class_init(ObjectClass *oc, void *data) mc->desc = "Mac99 based PowerMAC"; mc->init = ppc_core99_init; mc->block_default_type = IF_IDE; - mc->max_cpus = MAX_CPUS; + /* SMP is not supported currently */ + mc->max_cpus = 1; mc->default_boot_order = "cd"; mc->default_display = "std"; mc->kvm_type = core99_kvm_type; diff --git a/hw/ppc/mac_oldworld.c b/hw/ppc/mac_oldworld.c index de2be960e6c41fa4721c3e0e9778003d99d06c5d..7016979a7cd022b800b888c4de59cec962977d04 100644 --- a/hw/ppc/mac_oldworld.c +++ b/hw/ppc/mac_oldworld.c @@ -423,7 +423,8 @@ static void heathrow_class_init(ObjectClass *oc, void *data) mc->desc = "Heathrow based PowerMAC"; mc->init = ppc_heathrow_init; mc->block_default_type = IF_IDE; - mc->max_cpus = MAX_CPUS; + /* SMP is not supported currently */ + mc->max_cpus = 1; #ifndef TARGET_PPC64 mc->is_default = true; #endif diff --git a/hw/ppc/pegasos2.c b/hw/ppc/pegasos2.c index 298e6b93e2dd61069003bea5070c16d4d9d935f2..7b7eb38152cdd5b27f3e936a3355e0ee03b85283 100644 --- a/hw/ppc/pegasos2.c +++ b/hw/ppc/pegasos2.c @@ -457,7 +457,7 @@ static void pegasos2_hypercall(PPCVirtualHypervisor *vhyp, PowerPCCPU *cpu) /* The TCG path should also be holding the BQL at this point */ g_assert(qemu_mutex_iothread_locked()); - if (msr_pr) { + if (FIELD_EX64(env->msr, MSR, PR)) { qemu_log_mask(LOG_GUEST_ERROR, "Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else if (env->gpr[3] == KVMPPC_H_RTAS) { diff --git a/hw/ppc/ppc.c b/hw/ppc/ppc.c index e8127599c9073e77decebbf3a828ac6b1b39d056..dedd56263d3fcad4c601f2883aa684c304666422 100644 --- a/hw/ppc/ppc.c +++ b/hw/ppc/ppc.c @@ -66,7 +66,9 @@ void ppc_set_irq(PowerPCCPU *cpu, int n_IRQ, int level) } if (old_pending != env->pending_interrupts) { - kvmppc_set_interrupt(cpu, n_IRQ, level); + if (kvm_enabled()) { + kvmppc_set_interrupt(cpu, n_IRQ, level); + } } @@ -743,7 +745,7 @@ target_ulong cpu_ppc_load_decr(CPUPPCState *env) decr = _cpu_ppc_load_decr(env, tb_env->decr_next); /* - * If large decrementer is enabled then the decrementer is signed extened + * If large decrementer is enabled then the decrementer is signed extended * to 64 bits, otherwise it is a 32 bit value. */ if (env->spr[SPR_LPCR] & LPCR_LD) { @@ -1461,5 +1463,7 @@ void ppc_irq_reset(PowerPCCPU *cpu) CPUPPCState *env = &cpu->env; env->irq_input_state = 0; - kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0); + if (kvm_enabled()) { + kvmppc_set_interrupt(cpu, PPC_INTERRUPT_EXT, 0); + } } diff --git a/hw/ppc/prep_systemio.c b/hw/ppc/prep_systemio.c index b2bd78324878c19ef9e3dca7655adf3f3c59a463..e51da91de5bef5d7f2f9021e33fd23bbe96c0f64 100644 --- a/hw/ppc/prep_systemio.c +++ b/hw/ppc/prep_systemio.c @@ -39,7 +39,7 @@ #define TYPE_PREP_SYSTEMIO "prep-systemio" OBJECT_DECLARE_SIMPLE_TYPE(PrepSystemIoState, PREP_SYSTEMIO) -/* Bit as defined in PowerPC Reference Plaform v1.1, sect. 6.1.5, p. 132 */ +/* Bit as defined in PowerPC Reference Platform v1.1, sect. 6.1.5, p. 132 */ #define PREP_BIT(n) (1 << (7 - (n))) struct PrepSystemIoState { diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c index 3b5fd749be8961c682bee48b2239bcb07be3fc6d..6727ea7118cf8198033d0fddf1477545ce3c03aa 100644 --- a/hw/ppc/spapr.c +++ b/hw/ppc/spapr.c @@ -1268,7 +1268,7 @@ static void emulate_spapr_hypercall(PPCVirtualHypervisor *vhyp, /* The TCG path should also be holding the BQL at this point */ g_assert(qemu_mutex_iothread_locked()); - if (msr_pr) { + if (FIELD_EX64(env->msr, MSR, PR)) { hcall_dprintf("Hypercall made with MSR[PR]=1\n"); env->gpr[3] = H_PRIVILEGE; } else { @@ -1482,7 +1482,7 @@ int spapr_hpt_shift_for_ramsize(uint64_t ramsize) void spapr_free_hpt(SpaprMachineState *spapr) { - g_free(spapr->htab); + qemu_vfree(spapr->htab); spapr->htab = NULL; spapr->htab_shift = 0; close_htab_fd(spapr); @@ -2488,7 +2488,7 @@ static void spapr_set_vsmt_mode(SpaprMachineState *spapr, Error **errp) return; } - /* Detemine the VSMT mode to use: */ + /* Determine the VSMT mode to use: */ if (vsmt_user) { if (spapr->vsmt < smp_threads) { error_setg(errp, "Cannot support VSMT mode %d" @@ -3016,7 +3016,7 @@ static int spapr_kvm_type(MachineState *machine, const char *vm_type) { /* * The use of g_ascii_strcasecmp() for 'hv' and 'pr' is to - * accomodate the 'HV' and 'PV' formats that exists in the + * accommodate the 'HV' and 'PV' formats that exists in the * wild. The 'auto' mode is being introduced already as * lower-case, thus we don't need to bother checking for * "AUTO". @@ -4250,7 +4250,7 @@ spapr_cpu_index_to_props(MachineState *machine, unsigned cpu_index) CPUArchId *core_slot; MachineClass *mc = MACHINE_GET_CLASS(machine); - /* make sure possible_cpu are intialized */ + /* make sure possible_cpu are initialized */ mc->possible_cpu_arch_ids(machine); /* get CPU core slot containing thread that matches cpu_index */ core_slot = spapr_find_cpu_slot(machine, cpu_index, NULL); @@ -4870,7 +4870,7 @@ static void spapr_machine_2_12_class_options(MachineClass *mc) /* We depend on kvm_enabled() to choose a default value for the * hpt-max-page-size capability. Of course we can't do it here - * because this is too early and the HW accelerator isn't initialzed + * because this is too early and the HW accelerator isn't initialized * yet. Postpone this to machine init (see default_caps_with_cpu()). */ smc->default_caps.caps[SPAPR_CAP_HPT_MAXPAGESIZE] = 0; diff --git a/hw/ppc/spapr_hcall.c b/hw/ppc/spapr_hcall.c index 222c1b6bbdb6a05b226080186282a5c633ca68c2..5364bbcffac92b1490990448b897e0e75c49b155 100644 --- a/hw/ppc/spapr_hcall.c +++ b/hw/ppc/spapr_hcall.c @@ -1532,7 +1532,7 @@ static void hypercall_register_types(void) spapr_register_hypercall(H_GET_CPU_CHARACTERISTICS, h_get_cpu_characteristics); - /* "debugger" hcalls (also used by SLOF). Note: We do -not- differenciate + /* "debugger" hcalls (also used by SLOF). Note: We do -not- differentiate * here between the "CI" and the "CACHE" variants, they will use whatever * mapping attributes qemu is using. When using KVM, the kernel will * enforce the attributes more strongly diff --git a/hw/ppc/spapr_nvdimm.c b/hw/ppc/spapr_nvdimm.c index 91de1052f23382fbfefeb981da72edfb62b58479..b111380a45d4ad25699c4af18acca7dddaea3efc 100644 --- a/hw/ppc/spapr_nvdimm.c +++ b/hw/ppc/spapr_nvdimm.c @@ -336,7 +336,7 @@ static target_ulong h_scm_bind_mem(PowerPCCPU *cpu, SpaprMachineState *spapr, /* * Currently continue token should be zero qemu has already bound - * everything and this hcall doesnt return H_BUSY. + * everything and this hcall doesn't return H_BUSY. */ if (continue_token > 0) { return H_P5; diff --git a/hw/ppc/spapr_pci.c b/hw/ppc/spapr_pci.c index 5bfd4aa9e5aa3251fb029c09da07917f8f298dcd..567bc8481c5de03ee6507154f81dfc86d2d298c7 100644 --- a/hw/ppc/spapr_pci.c +++ b/hw/ppc/spapr_pci.c @@ -800,6 +800,7 @@ static char *spapr_phb_vfio_get_loc_code(SpaprPhbState *sphb, PCIDevice *pdev) } /* Construct and read from host device tree the loc-code */ + g_free(path); path = g_strdup_printf("/proc/device-tree%s/ibm,loc-code", devspec); if (!g_file_get_contents(path, &buf, NULL, NULL)) { return NULL; @@ -1554,7 +1555,7 @@ static void spapr_pci_pre_plug(HotplugHandler *plug_handler, */ if (plugged_dev->hotplugged) { error_setg(errp, QERR_BUS_NO_HOTPLUG, - object_get_typename(OBJECT(phb))); + phb->parent_obj.bus->qbus.name); return; } } @@ -1677,7 +1678,7 @@ static void spapr_pci_unplug_request(HotplugHandler *plug_handler, if (!phb->dr_enabled) { error_setg(errp, QERR_BUS_NO_HOTPLUG, - object_get_typename(OBJECT(phb))); + phb->parent_obj.bus->qbus.name); return; } @@ -2044,7 +2045,7 @@ static int spapr_phb_children_reset(Object *child, void *opaque) DeviceState *dev = (DeviceState *) object_dynamic_cast(child, TYPE_DEVICE); if (dev) { - device_legacy_reset(dev); + device_cold_reset(dev); } return 0; diff --git a/hw/ppc/spapr_pci_vfio.c b/hw/ppc/spapr_pci_vfio.c index 2a76b4e0b518afc6dec4b1d09a40a21d76e827a7..6326948143321205c55aae6186401750312fb59c 100644 --- a/hw/ppc/spapr_pci_vfio.c +++ b/hw/ppc/spapr_pci_vfio.c @@ -77,7 +77,7 @@ int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb, * call. Now we just need to check the validity of the PCI * pass-through devices (vfio-pci) under this sphb bus. * We have already validated that all the devices under this sphb - * are from same iommu group (within same PE) before comming here. + * are from same iommu group (within same PE) before coming here. * * Prior to linux commit 98ba956f6a389 ("powerpc/pseries/eeh: * Rework device EEH PE determination") kernel would call diff --git a/hw/ppc/spapr_vof.c b/hw/ppc/spapr_vof.c index 40ce8fe0037ce545e3ff58c55fd23d7de5299104..e437cab64264ea5cd7eb5fc5d4ed9b219391465e 100644 --- a/hw/ppc/spapr_vof.c +++ b/hw/ppc/spapr_vof.c @@ -29,7 +29,7 @@ target_ulong spapr_h_vof_client(PowerPCCPU *cpu, SpaprMachineState *spapr, void spapr_vof_client_dt_finalize(SpaprMachineState *spapr, void *fdt) { - char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); + g_autofree char *stdout_path = spapr_vio_stdout_path(spapr->vio_bus); vof_build_dt(fdt, spapr->vof); diff --git a/hw/ppc/vof.c b/hw/ppc/vof.c index 73adc44ec212693dbd4b21c6af1f1338ed6856e6..cb1ae6fabf58c0b1088b9392c1db33cda2f5b8be 100644 --- a/hw/ppc/vof.c +++ b/hw/ppc/vof.c @@ -648,7 +648,7 @@ static void vof_dt_memory_available(void *fdt, GArray *claimed, uint64_t base) mem0_reg = fdt_getprop(fdt, offset, "reg", &proplen); g_assert(mem0_reg && proplen == sizeof(uint32_t) * (ac + sc)); if (sc == 2) { - mem0_end = be64_to_cpu(*(uint64_t *)(mem0_reg + sizeof(uint32_t) * ac)); + mem0_end = ldq_be_p(mem0_reg + sizeof(uint32_t) * ac); } else { mem0_end = be32_to_cpu(*(uint32_t *)(mem0_reg + sizeof(uint32_t) * ac)); } @@ -1026,6 +1026,8 @@ void vof_cleanup(Vof *vof) } vof->claimed = NULL; vof->of_instances = NULL; + vof->of_instance_last = 0; + vof->claimed_base = 0; } void vof_build_dt(void *fdt, Vof *vof) diff --git a/hw/rdma/vmw/pvrdma_cmd.c b/hw/rdma/vmw/pvrdma_cmd.c index da7ddfa548ffb349dd3d695a6766b464e13c8980..89db963c4683909242f3a7c68c034acc24d63214 100644 --- a/hw/rdma/vmw/pvrdma_cmd.c +++ b/hw/rdma/vmw/pvrdma_cmd.c @@ -796,6 +796,12 @@ int pvrdma_exec_cmd(PVRDMADev *dev) dsr_info = &dev->dsr_info; + if (!dsr_info->dsr) { + /* Buggy or malicious guest driver */ + rdma_error_report("Exec command without dsr, req or rsp buffers"); + goto out; + } + if (dsr_info->req->hdr.cmd >= sizeof(cmd_handlers) / sizeof(struct cmd_handler)) { rdma_error_report("Unsupported command"); diff --git a/hw/rdma/vmw/pvrdma_main.c b/hw/rdma/vmw/pvrdma_main.c index 91206dbb8eb0b1952168ecc38daf59aba4696df0..f99b12a59297aff77cf2c917068b8a9969082552 100644 --- a/hw/rdma/vmw/pvrdma_main.c +++ b/hw/rdma/vmw/pvrdma_main.c @@ -91,19 +91,33 @@ static int init_dev_ring(PvrdmaRing *ring, PvrdmaRingState **ring_state, dma_addr_t dir_addr, uint32_t num_pages) { uint64_t *dir, *tbl; - int rc = 0; + int max_pages, rc = 0; if (!num_pages) { rdma_error_report("Ring pages count must be strictly positive"); return -EINVAL; } + /* + * Make sure we can satisfy the requested number of pages in a single + * TARGET_PAGE_SIZE sized page table (taking into account that first entry + * is reserved for ring-state) + */ + max_pages = TARGET_PAGE_SIZE / sizeof(dma_addr_t) - 1; + if (num_pages > max_pages) { + rdma_error_report("Maximum pages on a single directory must not exceed %d\n", + max_pages); + return -EINVAL; + } + dir = rdma_pci_dma_map(pci_dev, dir_addr, TARGET_PAGE_SIZE); if (!dir) { rdma_error_report("Failed to map to page directory (ring %s)", name); rc = -ENOMEM; goto out; } + + /* We support only one page table for a ring */ tbl = rdma_pci_dma_map(pci_dev, dir[0], TARGET_PAGE_SIZE); if (!tbl) { rdma_error_report("Failed to map to page table (ring %s)", name); diff --git a/hw/remote/message.c b/hw/remote/message.c index 11d729845c5a10ebdf6e82c1f2e6676f7f944308..83b91c87623f150fdad1ed3404a1c19a1d7a8232 100644 --- a/hw/remote/message.c +++ b/hw/remote/message.c @@ -216,13 +216,10 @@ fail: static void process_device_reset_msg(QIOChannel *ioc, PCIDevice *dev, Error **errp) { - DeviceClass *dc = DEVICE_GET_CLASS(dev); DeviceState *s = DEVICE(dev); MPQemuMsg ret = { 0 }; - if (dc->reset) { - dc->reset(s); - } + device_cold_reset(s); ret.cmd = MPQEMU_CMD_RET; diff --git a/hw/riscv/boot.c b/hw/riscv/boot.c index 519fa455a1548cabd85b4b986e19a1583d388a40..c035aa68f542d3a3681e948d84198d563cfb5905 100644 --- a/hw/riscv/boot.c +++ b/hw/riscv/boot.c @@ -217,11 +217,11 @@ uint32_t riscv_load_fdt(hwaddr dram_base, uint64_t mem_size, void *fdt) /* * We should put fdt as far as possible to avoid kernel/initrd overwriting * its content. But it should be addressable by 32 bit system as well. - * Thus, put it at an 16MB aligned address that less than fdt size from the + * Thus, put it at an 2MB aligned address that less than fdt size from the * end of dram or 3GB whichever is lesser. */ temp = MIN(dram_end, 3072 * MiB); - fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 16 * MiB); + fdt_addr = QEMU_ALIGN_DOWN(temp - fdtsize, 2 * MiB); ret = fdt_pack(fdt); /* Should only fail if we've built a corrupted tree */ diff --git a/hw/riscv/virt.c b/hw/riscv/virt.c index 3af074148ef4d49f61a9c1309e2247a4dc84dabd..cd03ba1d766f2d1a7e59782c6fcb0d592807bbbb 100644 --- a/hw/riscv/virt.c +++ b/hw/riscv/virt.c @@ -984,16 +984,14 @@ static void virt_machine_instance_init(Object *obj) static bool virt_get_aclint(Object *obj, Error **errp) { - MachineState *ms = MACHINE(obj); - RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); return s->have_aclint; } static void virt_set_aclint(Object *obj, bool value, Error **errp) { - MachineState *ms = MACHINE(obj); - RISCVVirtState *s = RISCV_VIRT_MACHINE(ms); + RISCVVirtState *s = RISCV_VIRT_MACHINE(obj); s->have_aclint = value; } diff --git a/hw/rtc/mc146818rtc.c b/hw/rtc/mc146818rtc.c index 4fbafddb226d4e65fa2e4504ab225f0a7696590d..af1df9aaebaaef382b4ac217785d4fab447f71d8 100644 --- a/hw/rtc/mc146818rtc.c +++ b/hw/rtc/mc146818rtc.c @@ -616,7 +616,8 @@ static void rtc_set_time(RTCState *s) s->base_rtc = mktimegm(&tm); s->last_update = qemu_clock_get_ns(rtc_clock); - qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + set_rtc_date_diff(qemu_timedate_diff(&tm)); + qapi_event_send_rtc_change(get_rtc_date_diff()); } static void rtc_set_cmos(RTCState *s, const struct tm *tm) diff --git a/hw/rtc/pl031.c b/hw/rtc/pl031.c index e7ced90b0258a662b24b5942caea1238212413fe..61a2948f77f50b62cdb416c5f3f1c0f2d8bff09e 100644 --- a/hw/rtc/pl031.c +++ b/hw/rtc/pl031.c @@ -63,6 +63,15 @@ static uint32_t pl031_get_count(PL031State *s) return s->tick_offset + now / NANOSECONDS_PER_SECOND; } +static void pl031_get_date(Object *obj, struct tm *current_tm, Error **errp) +{ + PL031State *s = PL031(obj); + time_t ti = pl031_get_count(s); + + /* Changed to UTC time */ + gmtime_r(&ti, current_tm); +} + static void pl031_set_alarm(PL031State *s) { uint32_t ticks; @@ -143,7 +152,8 @@ static void pl031_write(void * opaque, hwaddr offset, s->tick_offset += value - pl031_get_count(s); qemu_get_timedate(&tm, s->tick_offset); - qapi_event_send_rtc_change(qemu_timedate_diff(&tm)); + set_rtc_date_diff(qemu_timedate_diff(&tm)); + qapi_event_send_rtc_change(get_rtc_date_diff()); pl031_set_alarm(s); break; @@ -200,6 +210,20 @@ static void pl031_init(Object *obj) qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND; s->timer = timer_new_ns(rtc_clock, pl031_interrupt, s); + object_property_add_tm(OBJECT(s), "date", pl031_get_date); +} + +static void pl031_realize(DeviceState *d, Error **errp) +{ + object_property_add_alias(qdev_get_machine(), "rtc-time", + OBJECT(d), "date"); +} + +static void pl031_unrealize(DeviceState *d) +{ + if (object_property_find(qdev_get_machine(), "rtc-time")) { + object_property_del(qdev_get_machine(), "rtc-time"); + } } static void pl031_finalize(Object *obj) @@ -336,6 +360,8 @@ static void pl031_class_init(ObjectClass *klass, void *data) DeviceClass *dc = DEVICE_CLASS(klass); dc->vmsd = &vmstate_pl031; + dc->realize = pl031_realize; + dc->unrealize = pl031_unrealize; device_class_set_props(dc, pl031_properties); } diff --git a/hw/rx/rx-gdbsim.c b/hw/rx/rx-gdbsim.c index 75d1fec6ca468b8784f4871917b94b2e786ab16c..887083737bfa05c3a7025164126074edb0bb7abd 100644 --- a/hw/rx/rx-gdbsim.c +++ b/hw/rx/rx-gdbsim.c @@ -142,7 +142,7 @@ static void rx_gdbsim_init(MachineState *machine) exit(1); } /* DTB is located at the end of SDRAM space. */ - dtb_offset = machine->ram_size - dtb_size; + dtb_offset = ROUND_DOWN(machine->ram_size - dtb_size, 16); rom_add_blob_fixed("dtb", dtb, dtb_size, SDRAM_BASE + dtb_offset); /* Set dtb address to R1 */ diff --git a/hw/s390x/ipl.h b/hw/s390x/ipl.h index dfc6dfd89c886cf821dfdd1e119930791c6871ba..7fc86e790547f2d40060030bb79aa8673996c7c7 100644 --- a/hw/s390x/ipl.h +++ b/hw/s390x/ipl.h @@ -140,7 +140,7 @@ void s390_ipl_clear_reset_request(void); * have an offset of 4 + n * 8 bytes within the struct in order * to keep it double-word aligned. * The total size of the struct must never exceed 28 bytes. - * This definition must be kept in sync with the defininition + * This definition must be kept in sync with the definition * in pc-bios/s390-ccw/iplb.h. */ struct QemuIplParameters { diff --git a/hw/s390x/s390-virtio-ccw.c b/hw/s390x/s390-virtio-ccw.c index 653587ea62f4c1bf59a51c2a1459b6311829efaf..0a57399b758bf53a921e424937363e9ff664e0a9 100644 --- a/hw/s390x/s390-virtio-ccw.c +++ b/hw/s390x/s390-virtio-ccw.c @@ -99,6 +99,7 @@ static const char *const reset_dev_types[] = { "s390-flic", "diag288", TYPE_S390_PCI_HOST_BRIDGE, + TYPE_AP_BRIDGE, }; static void subsystem_reset(void) @@ -345,7 +346,7 @@ static int s390_machine_protect(S390CcwMachineState *ms) } error_setg(&pv_mig_blocker, - "protected VMs are currently not migrateable."); + "protected VMs are currently not migratable."); rc = migrate_add_blocker(pv_mig_blocker, &local_err); if (rc) { ram_block_discard_disable(false); @@ -434,7 +435,7 @@ static void s390_machine_reset(MachineState *machine) break; case S390_RESET_MODIFIED_CLEAR: /* - * Susbsystem reset needs to be done before we unshare memory + * Subsystem reset needs to be done before we unshare memory * and lose access to VIRTIO structures in guest memory. */ subsystem_reset(); @@ -447,7 +448,7 @@ static void s390_machine_reset(MachineState *machine) break; case S390_RESET_LOAD_NORMAL: /* - * Susbsystem reset needs to be done before we unshare memory + * Subsystem reset needs to be done before we unshare memory * and lose access to VIRTIO structures in guest memory. */ subsystem_reset(); diff --git a/hw/s390x/sclp.c b/hw/s390x/sclp.c index 89c30a8a91a7b6235c3bc0da655f0840a75c0053..24e29e8cda725cd44d1214e2538f239312aa17ba 100644 --- a/hw/s390x/sclp.c +++ b/hw/s390x/sclp.c @@ -20,13 +20,14 @@ #include "hw/s390x/event-facility.h" #include "hw/s390x/s390-pci-bus.h" #include "hw/s390x/ipl.h" +#include "hw/s390x/s390-virtio-ccw.h" -static inline SCLPDevice *get_sclp_device(void) +static SCLPDevice *get_sclp_device(void) { static SCLPDevice *sclp; if (!sclp) { - sclp = SCLP(object_resolve_path_type("", TYPE_SCLP, NULL)); + sclp = S390_CCW_MACHINE(qdev_get_machine())->sclp; } return sclp; } diff --git a/hw/scsi/esp-pci.c b/hw/scsi/esp-pci.c index dac054aeed462a46c670f58440f73faf189edc85..1792f84cea6c3554160c3ce1cf2dd1e778253ead 100644 --- a/hw/scsi/esp-pci.c +++ b/hw/scsi/esp-pci.c @@ -280,7 +280,7 @@ static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len, len = pci->dma_regs[DMA_WBC]; } - pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir); + pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir, MEMTXATTRS_UNSPECIFIED); /* update status registers */ pci->dma_regs[DMA_WBC] -= len; diff --git a/hw/scsi/esp.c b/hw/scsi/esp.c index 58d0edbd56d7f9b989d221637b05ce9f0b2c65f4..9f071e72188af8bf5dcd8b86f7c9aafeafa1f78b 100644 --- a/hw/scsi/esp.c +++ b/hw/scsi/esp.c @@ -510,7 +510,7 @@ static void do_dma_pdma_cb(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -622,7 +622,7 @@ static void esp_do_dma(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -733,7 +733,7 @@ static void esp_do_nodma(ESPState *s) } else { /* * Extra message out bytes received: update cmdfifo_cdb_offset - * and then switch to commmand phase + * and then switch to command phase */ s->cmdfifo_cdb_offset = fifo8_num_used(&s->cmdfifo); s->rregs[ESP_RSTAT] = STAT_TC | STAT_CD; @@ -754,7 +754,8 @@ static void esp_do_nodma(ESPState *s) } if (to_device) { - len = MIN(fifo8_num_used(&s->fifo), ESP_FIFO_SZ); + len = MIN(s->async_len, ESP_FIFO_SZ); + len = MIN(len, fifo8_num_used(&s->fifo)); esp_fifo_pop_buf(&s->fifo, s->async_buf, len); s->async_buf += len; s->async_len -= len; @@ -911,6 +912,11 @@ static void esp_soft_reset(ESPState *s) esp_hard_reset(s); } +static void esp_bus_reset(ESPState *s) +{ + qbus_reset_all(BUS(&s->bus)); +} + static void parent_esp_reset(ESPState *s, int irq, int level) { if (level) { @@ -1039,6 +1045,7 @@ void esp_reg_write(ESPState *s, uint32_t saddr, uint64_t val) break; case CMD_BUSRESET: trace_esp_mem_writeb_cmd_bus_reset(val); + esp_bus_reset(s); if (!(s->wregs[ESP_CFG1] & CFG1_RESREPT)) { s->rregs[ESP_RINTR] |= INTR_RST; esp_raise_irq(s); diff --git a/hw/scsi/lsi53c895a.c b/hw/scsi/lsi53c895a.c index 85e907a7854a8aeaa812978675cd8bdb457381bb..71f15052279cc160236833b7ecbee9e5be374b18 100644 --- a/hw/scsi/lsi53c895a.c +++ b/hw/scsi/lsi53c895a.c @@ -621,8 +621,7 @@ static void lsi_do_dma(LSIState *s, int out) dma_addr_t addr; SCSIDevice *dev; - assert(s->current); - if (!s->current->dma_len) { + if (!s->current || !s->current->dma_len) { /* Wait until data is available. */ trace_lsi_do_dma_unavailable(); return; @@ -1029,8 +1028,9 @@ static void lsi_do_msgout(LSIState *s) case 0x0d: /* The ABORT TAG message clears the current I/O process only. */ trace_lsi_do_msgout_abort(current_tag); - if (current_req) { + if (current_req && current_req->req) { scsi_req_cancel(current_req->req); + current_req = NULL; } lsi_disconnect(s); break; @@ -1056,6 +1056,7 @@ static void lsi_do_msgout(LSIState *s) /* clear the current I/O process */ if (s->current) { scsi_req_cancel(s->current->req); + current_req = NULL; } /* As the current implemented devices scsi_disk and scsi_generic @@ -1133,15 +1134,24 @@ static void lsi_execute_script(LSIState *s) uint32_t addr, addr_high; int opcode; int insn_processed = 0; + static int reentrancy_level; + + reentrancy_level++; s->istat1 |= LSI_ISTAT1_SRUN; again: - if (++insn_processed > LSI_MAX_INSN) { - /* Some windows drivers make the device spin waiting for a memory - location to change. If we have been executed a lot of code then - assume this is the case and force an unexpected device disconnect. - This is apparently sufficient to beat the drivers into submission. - */ + /* + * Some windows drivers make the device spin waiting for a memory location + * to change. If we have executed more than LSI_MAX_INSN instructions then + * assume this is the case and force an unexpected device disconnect. This + * is apparently sufficient to beat the drivers into submission. + * + * Another issue (CVE-2023-0330) can occur if the script is programmed to + * trigger itself again and again. Avoid this problem by stopping after + * being called multiple times in a reentrant way (8 is an arbitrary value + * which should be enough for all valid use cases). + */ + if (++insn_processed > LSI_MAX_INSN || reentrancy_level > 8) { if (!(s->sien0 & LSI_SIST0_UDC)) { qemu_log_mask(LOG_GUEST_ERROR, "lsi_scsi: inf. loop with UDC masked"); @@ -1149,6 +1159,7 @@ again: lsi_script_scsi_interrupt(s, LSI_SIST0_UDC, 0); lsi_disconnect(s); trace_lsi_execute_script_stop(); + reentrancy_level--; return; } insn = read_dword(s, s->dsp); @@ -1595,6 +1606,8 @@ again: } } trace_lsi_execute_script_stop(); + + reentrancy_level--; } static uint8_t lsi_reg_readb(LSIState *s, int offset) diff --git a/hw/scsi/megasas.c b/hw/scsi/megasas.c index 4ff51221d4cd0952d9394b8f66b1388b98d43348..83c321ec208a6b3ce51db8b1455b4041fbaae74a 100644 --- a/hw/scsi/megasas.c +++ b/hw/scsi/megasas.c @@ -109,8 +109,8 @@ struct MegasasState { uint64_t reply_queue_pa; void *reply_queue; uint16_t reply_queue_len; - uint16_t reply_queue_head; - uint16_t reply_queue_tail; + uint32_t reply_queue_head; + uint32_t reply_queue_tail; uint64_t consumer_pa; uint64_t producer_pa; @@ -168,14 +168,16 @@ static void megasas_frame_set_cmd_status(MegasasState *s, unsigned long frame, uint8_t v) { PCIDevice *pci = &s->parent_obj; - stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), v); + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, cmd_status), + v, MEMTXATTRS_UNSPECIFIED); } static void megasas_frame_set_scsi_status(MegasasState *s, unsigned long frame, uint8_t v) { PCIDevice *pci = &s->parent_obj; - stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), v); + stb_pci_dma(pci, frame + offsetof(struct mfi_frame_header, scsi_status), + v, MEMTXATTRS_UNSPECIFIED); } static inline const char *mfi_frame_desc(unsigned int cmd) @@ -200,7 +202,12 @@ static uint64_t megasas_frame_get_context(MegasasState *s, unsigned long frame) { PCIDevice *pci = &s->parent_obj; - return ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context)); + uint64_t val; + + ldq_le_pci_dma(pci, frame + offsetof(struct mfi_frame_header, context), + &val, MEMTXATTRS_UNSPECIFIED); + + return val; } static bool megasas_frame_is_ieee_sgl(MegasasCmd *cmd) @@ -303,6 +310,7 @@ static int megasas_map_sgl(MegasasState *s, MegasasCmd *cmd, union mfi_sgl *sgl) } if (cmd->iov_size > iov_size) { trace_megasas_iovec_overflow(cmd->index, iov_size, cmd->iov_size); + goto unmap; } else if (cmd->iov_size < iov_size) { trace_megasas_iovec_underflow(cmd->index, iov_size, cmd->iov_size); } @@ -375,8 +383,7 @@ static int megasas_setup_inquiry(uint8_t *cdb, int pg, int len) cdb[1] = 0x1; cdb[2] = pg; } - cdb[3] = (len >> 8) & 0xff; - cdb[4] = (len & 0xff); + stw_be_p(&cdb[3], len); return len; } @@ -392,18 +399,8 @@ static void megasas_encode_lba(uint8_t *cdb, uint64_t lba, } else { cdb[0] = READ_16; } - cdb[2] = (lba >> 56) & 0xff; - cdb[3] = (lba >> 48) & 0xff; - cdb[4] = (lba >> 40) & 0xff; - cdb[5] = (lba >> 32) & 0xff; - cdb[6] = (lba >> 24) & 0xff; - cdb[7] = (lba >> 16) & 0xff; - cdb[8] = (lba >> 8) & 0xff; - cdb[9] = (lba) & 0xff; - cdb[10] = (len >> 24) & 0xff; - cdb[11] = (len >> 16) & 0xff; - cdb[12] = (len >> 8) & 0xff; - cdb[13] = (len) & 0xff; + stq_be_p(&cdb[2], lba); + stl_be_p(&cdb[2 + 8], len); } /* @@ -531,7 +528,8 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s, s->busy++; if (s->consumer_pa) { - s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail, + MEMTXATTRS_UNSPECIFIED); } trace_megasas_qf_enqueue(cmd->index, cmd->count, cmd->context, s->reply_queue_head, s->reply_queue_tail, s->busy); @@ -541,6 +539,7 @@ static MegasasCmd *megasas_enqueue_frame(MegasasState *s, static void megasas_complete_frame(MegasasState *s, uint64_t context) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pci_dev = PCI_DEVICE(s); int tail, queue_offset; @@ -554,24 +553,26 @@ static void megasas_complete_frame(MegasasState *s, uint64_t context) */ if (megasas_use_queue64(s)) { queue_offset = s->reply_queue_head * sizeof(uint64_t); - stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + stq_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, + context, attrs); } else { queue_offset = s->reply_queue_head * sizeof(uint32_t); - stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, context); + stl_le_pci_dma(pci_dev, s->reply_queue_pa + queue_offset, + context, attrs); } - s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs); trace_megasas_qf_complete(context, s->reply_queue_head, s->reply_queue_tail, s->busy); } if (megasas_intr_enabled(s)) { /* Update reply queue pointer */ - s->reply_queue_tail = ldl_le_pci_dma(pci_dev, s->consumer_pa); + ldl_le_pci_dma(pci_dev, s->consumer_pa, &s->reply_queue_tail, attrs); tail = s->reply_queue_head; s->reply_queue_head = megasas_next_index(s, tail, s->fw_cmds); trace_megasas_qf_update(s->reply_queue_head, s->reply_queue_tail, s->busy); - stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head); + stl_le_pci_dma(pci_dev, s->producer_pa, s->reply_queue_head, attrs); /* Notify HBA */ if (msix_enabled(pci_dev)) { trace_megasas_msix_raise(0); @@ -631,6 +632,7 @@ static void megasas_abort_command(MegasasCmd *cmd) static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pcid = PCI_DEVICE(s); uint32_t pa_hi, pa_lo; hwaddr iq_pa, initq_size = sizeof(struct mfi_init_qinfo); @@ -669,9 +671,9 @@ static int megasas_init_firmware(MegasasState *s, MegasasCmd *cmd) pa_lo = le32_to_cpu(initq->pi_addr_lo); pa_hi = le32_to_cpu(initq->pi_addr_hi); s->producer_pa = ((uint64_t) pa_hi << 32) | pa_lo; - s->reply_queue_head = ldl_le_pci_dma(pcid, s->producer_pa); + ldl_le_pci_dma(pcid, s->producer_pa, &s->reply_queue_head, attrs); s->reply_queue_head %= MEGASAS_MAX_FRAMES; - s->reply_queue_tail = ldl_le_pci_dma(pcid, s->consumer_pa); + ldl_le_pci_dma(pcid, s->consumer_pa, &s->reply_queue_tail, attrs); s->reply_queue_tail %= MEGASAS_MAX_FRAMES; flags = le32_to_cpu(initq->flags); if (flags & MFI_QUEUE_FLAG_CONTEXT64) { @@ -847,7 +849,7 @@ static int megasas_ctrl_get_info(MegasasState *s, MegasasCmd *cmd) MFI_INFO_PDMIX_SATA | MFI_INFO_PDMIX_LD); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -877,7 +879,7 @@ static int megasas_mfc_get_defaults(MegasasState *s, MegasasCmd *cmd) info.disable_preboot_cli = 1; info.cluster_disable = 1; - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -898,7 +900,7 @@ static int megasas_dcmd_get_bios_info(MegasasState *s, MegasasCmd *cmd) info.expose_all_drives = 1; } - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -909,7 +911,7 @@ static int megasas_dcmd_get_fw_time(MegasasState *s, MegasasCmd *cmd) fw_time = cpu_to_le64(megasas_fw_time()); - cmd->iov_size -= dma_buf_read((uint8_t *)&fw_time, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&fw_time, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -936,7 +938,7 @@ static int megasas_event_info(MegasasState *s, MegasasCmd *cmd) info.shutdown_seq_num = cpu_to_le32(s->shutdown_event); info.boot_seq_num = cpu_to_le32(s->boot_event); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -1005,7 +1007,7 @@ static int megasas_dcmd_pd_get_list(MegasasState *s, MegasasCmd *cmd) info.size = cpu_to_le32(offset); info.count = cpu_to_le32(num_pd_disks); - cmd->iov_size -= dma_buf_read((uint8_t *)&info, offset, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, offset, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -1099,7 +1101,7 @@ static int megasas_pd_get_info_submit(SCSIDevice *sdev, int lun, info->connected_port_bitmap = 0x1; info->device_speed = 1; info->link_speed = 1; - resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); g_free(cmd->iov_buf); cmd->iov_size = dcmd_size - resid; cmd->iov_buf = NULL; @@ -1171,7 +1173,7 @@ static int megasas_dcmd_ld_get_list(MegasasState *s, MegasasCmd *cmd) info.ld_count = cpu_to_le32(num_ld_disks); trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); - resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + resid = dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); cmd->iov_size = dcmd_size - resid; return MFI_STAT_OK; } @@ -1220,7 +1222,7 @@ static int megasas_dcmd_ld_list_query(MegasasState *s, MegasasCmd *cmd) info.size = dcmd_size; trace_megasas_dcmd_ld_get_list(cmd->index, num_ld_disks, max_ld_disks); - resid = dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + resid = dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); cmd->iov_size = dcmd_size - resid; return MFI_STAT_OK; } @@ -1270,7 +1272,7 @@ static int megasas_ld_get_info_submit(SCSIDevice *sdev, int lun, info->ld_config.span[0].num_blocks = info->size; info->ld_config.span[0].array_ref = cpu_to_le16(sdev_id); - resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg); + resid = dma_buf_read(cmd->iov_buf, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); g_free(cmd->iov_buf); cmd->iov_size = dcmd_size - resid; cmd->iov_buf = NULL; @@ -1389,7 +1391,7 @@ static int megasas_dcmd_cfg_read(MegasasState *s, MegasasCmd *cmd) ld_offset += sizeof(struct mfi_ld_config); } - cmd->iov_size -= dma_buf_read((uint8_t *)data, info->size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(data, info->size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -1419,7 +1421,7 @@ static int megasas_dcmd_get_properties(MegasasState *s, MegasasCmd *cmd) info.ecc_bucket_leak_rate = cpu_to_le16(1440); info.expose_encl_devices = 1; - cmd->iov_size -= dma_buf_read((uint8_t *)&info, dcmd_size, &cmd->qsg); + cmd->iov_size -= dma_buf_read(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); return MFI_STAT_OK; } @@ -1464,7 +1466,7 @@ static int megasas_dcmd_set_properties(MegasasState *s, MegasasCmd *cmd) dcmd_size); return MFI_STAT_INVALID_PARAMETER; } - dma_buf_write((uint8_t *)&info, dcmd_size, &cmd->qsg); + dma_buf_write(&info, dcmd_size, &cmd->qsg, MEMTXATTRS_UNSPECIFIED); trace_megasas_dcmd_unsupported(cmd->index, cmd->iov_size); return MFI_STAT_OK; } diff --git a/hw/scsi/mptsas.c b/hw/scsi/mptsas.c index f6c77655443b9c45ec5e7cb2bd8afd4c0efbfeee..8487138cb65354ad2e8985ec6c8485cb7ec6874f 100644 --- a/hw/scsi/mptsas.c +++ b/hw/scsi/mptsas.c @@ -172,14 +172,21 @@ static const int mpi_request_sizes[] = { static dma_addr_t mptsas_ld_sg_base(MPTSASState *s, uint32_t flags_and_length, dma_addr_t *sgaddr) { + const MemTxAttrs attrs = MEMTXATTRS_UNSPECIFIED; PCIDevice *pci = (PCIDevice *) s; dma_addr_t addr; if (flags_and_length & MPI_SGE_FLAGS_64_BIT_ADDRESSING) { - addr = ldq_le_pci_dma(pci, *sgaddr + 4); + uint64_t addr64; + + ldq_le_pci_dma(pci, *sgaddr + 4, &addr64, attrs); + addr = addr64; *sgaddr += 12; } else { - addr = ldl_le_pci_dma(pci, *sgaddr + 4); + uint32_t addr32; + + ldl_le_pci_dma(pci, *sgaddr + 4, &addr32, attrs); + addr = addr32; *sgaddr += 8; } return addr; @@ -203,7 +210,7 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr) dma_addr_t addr, len; uint32_t flags_and_length; - flags_and_length = ldl_le_pci_dma(pci, sgaddr); + ldl_le_pci_dma(pci, sgaddr, &flags_and_length, MEMTXATTRS_UNSPECIFIED); len = flags_and_length & MPI_SGE_LENGTH_MASK; if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) != MPI_SGE_FLAGS_SIMPLE_ELEMENT || @@ -234,7 +241,8 @@ static int mptsas_build_sgl(MPTSASState *s, MPTSASRequest *req, hwaddr addr) break; } - flags_and_length = ldl_le_pci_dma(pci, next_chain_addr); + ldl_le_pci_dma(pci, next_chain_addr, &flags_and_length, + MEMTXATTRS_UNSPECIFIED); if ((flags_and_length & MPI_SGE_FLAGS_ELEMENT_TYPE_MASK) != MPI_SGE_FLAGS_CHAIN_ELEMENT) { return MPI_IOCSTATUS_INVALID_SGL; @@ -1313,7 +1321,8 @@ static void mptsas_scsi_realize(PCIDevice *dev, Error **errp) } s->max_devices = MPTSAS_NUM_PORTS; - s->request_bh = qemu_bh_new(mptsas_fetch_requests, s); + s->request_bh = qemu_bh_new_guarded(mptsas_fetch_requests, s, + &DEVICE(dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), &dev->qdev, &mptsas_scsi_info); } diff --git a/hw/scsi/scsi-bus.c b/hw/scsi/scsi-bus.c index 77325d8cc7aa9cb7ade10e4d0dfa86eacb3fb5fe..613ad41de927dc1ce572d11b10a8e531f39ecdde 100644 --- a/hw/scsi/scsi-bus.c +++ b/hw/scsi/scsi-bus.c @@ -143,14 +143,10 @@ void scsi_bus_init_named(SCSIBus *bus, size_t bus_size, DeviceState *host, qbus_set_bus_hotplug_handler(BUS(bus)); } -static void scsi_dma_restart_bh(void *opaque) +void scsi_retry_requests(SCSIDevice *s) { - SCSIDevice *s = opaque; SCSIRequest *req, *next; - qemu_bh_delete(s->bh); - s->bh = NULL; - aio_context_acquire(blk_get_aio_context(s->conf.blk)); QTAILQ_FOREACH_SAFE(req, &s->requests, next, next) { scsi_req_ref(req); @@ -170,6 +166,17 @@ static void scsi_dma_restart_bh(void *opaque) scsi_req_unref(req); } aio_context_release(blk_get_aio_context(s->conf.blk)); +} + +static void scsi_dma_restart_bh(void *opaque) +{ + SCSIDevice *s = opaque; + + qemu_bh_delete(s->bh); + s->bh = NULL; + + scsi_retry_requests(s); + /* Drop the reference that was acquired in scsi_dma_restart_cb */ object_unref(OBJECT(s)); } @@ -192,7 +199,8 @@ static void scsi_dma_restart_cb(void *opaque, bool running, RunState state) AioContext *ctx = blk_get_aio_context(s->conf.blk); /* The reference is dropped in scsi_dma_restart_bh.*/ object_ref(OBJECT(s)); - s->bh = aio_bh_new(ctx, scsi_dma_restart_bh, s); + s->bh = aio_bh_new_guarded(ctx, scsi_dma_restart_bh, s, + &DEVICE(s)->mem_reentrancy_guard); qemu_bh_schedule(s->bh); } } @@ -411,19 +419,35 @@ static const struct SCSIReqOps reqops_invalid_opcode = { /* SCSIReqOps implementation for unit attention conditions. */ -static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) +static void scsi_fetch_unit_attention_sense(SCSIRequest *req) { + SCSISense *ua = NULL; + if (req->dev->unit_attention.key == UNIT_ATTENTION) { - scsi_req_build_sense(req, req->dev->unit_attention); + ua = &req->dev->unit_attention; } else if (req->bus->unit_attention.key == UNIT_ATTENTION) { - scsi_req_build_sense(req, req->bus->unit_attention); + ua = &req->bus->unit_attention; + } + + /* + * Fetch the unit attention sense immediately so that another + * scsi_req_new does not use reqops_unit_attention. + */ + if (ua) { + scsi_req_build_sense(req, *ua); + *ua = SENSE_CODE(NO_SENSE); } +} + +static int32_t scsi_unit_attention(SCSIRequest *req, uint8_t *buf) +{ scsi_req_complete(req, CHECK_CONDITION); return 0; } static const struct SCSIReqOps reqops_unit_attention = { .size = sizeof(SCSIRequest), + .init_req = scsi_fetch_unit_attention_sense, .send_command = scsi_unit_attention }; @@ -697,6 +721,11 @@ SCSIRequest *scsi_req_alloc(const SCSIReqOps *reqops, SCSIDevice *d, object_ref(OBJECT(d)); object_ref(OBJECT(qbus->parent)); notifier_list_init(&req->cancel_notifiers); + + if (reqops->init_req) { + reqops->init_req(req); + } + trace_scsi_req_alloc(req->dev->id, req->lun, req->tag); return req; } @@ -790,6 +819,15 @@ uint8_t *scsi_req_get_buf(SCSIRequest *req) static void scsi_clear_unit_attention(SCSIRequest *req) { SCSISense *ua; + + /* + * scsi_fetch_unit_attention_sense() already cleaned the unit attention + * in this case. + */ + if (req->ops == &reqops_unit_attention) { + return; + } + if (req->dev->unit_attention.key != UNIT_ATTENTION && req->bus->unit_attention.key != UNIT_ATTENTION) { return; @@ -1421,9 +1459,9 @@ void scsi_req_data(SCSIRequest *req, int len) buf = scsi_req_get_buf(req); if (req->cmd.mode == SCSI_XFER_FROM_DEV) { - req->resid = dma_buf_read(buf, len, req->sg); + req->resid = dma_buf_read(buf, len, req->sg, MEMTXATTRS_UNSPECIFIED); } else { - req->resid = dma_buf_write(buf, len, req->sg); + req->resid = dma_buf_write(buf, len, req->sg, MEMTXATTRS_UNSPECIFIED); } scsi_req_continue(req); } diff --git a/hw/scsi/scsi-disk.c b/hw/scsi/scsi-disk.c index d4914178ea01e3b3de2e8e778caf6960e57df1dd..edd2f895e7028d78e5dbbf0027abedc03e340db9 100644 --- a/hw/scsi/scsi-disk.c +++ b/hw/scsi/scsi-disk.c @@ -246,12 +246,16 @@ static bool scsi_handle_rw_error(SCSIDiskReq *r, int ret, bool acct_failed) scsi_req_retry(&r->req); return true; + case BLOCK_ERROR_ACTION_RETRY: + scsi_req_retry(&r->req); + return true; + default: g_assert_not_reached(); } } -static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) +static bool scsi_disk_req_handle_error(SCSIDiskReq *r, int ret, bool acct_failed) { if (r->req.io_canceled) { scsi_req_cancel_complete(&r->req); @@ -265,6 +269,18 @@ static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) return false; } +static bool scsi_disk_req_check_error(SCSIDiskReq *r, int ret, bool acct_failed) +{ + SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, r->req.dev); + + if (r->req.io_canceled || ret < 0) { + return scsi_disk_req_handle_error(r, ret, acct_failed); + } + + blk_error_retry_reset_timeout(s->qdev.conf.blk); + return false; +} + static void scsi_aio_complete(void *opaque, int ret) { SCSIDiskReq *r = (SCSIDiskReq *)opaque; @@ -411,7 +427,7 @@ static void scsi_do_read(SCSIDiskReq *r, int ret) SCSIDiskClass *sdc = (SCSIDiskClass *) object_get_class(OBJECT(s)); assert (r->req.aiocb == NULL); - if (scsi_disk_req_check_error(r, ret, false)) { + if (scsi_disk_req_handle_error(r, ret, false)) { goto done; } @@ -451,6 +467,9 @@ static void scsi_do_read_cb(void *opaque, int ret) block_acct_failed(blk_get_stats(s->qdev.conf.blk), &r->acct); } else { block_acct_done(blk_get_stats(s->qdev.conf.blk), &r->acct); + if (!r->req.io_canceled) { + blk_error_retry_reset_timeout(s->qdev.conf.blk); + } } scsi_do_read(opaque, ret); aio_context_release(blk_get_aio_context(s->qdev.conf.blk)); @@ -1781,7 +1800,7 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) uint32_t nb_sectors = scsi_data_cdb_xfer(r->req.cmd.buf); WriteSameCBData *data; uint8_t *buf; - int i; + int i, l; /* Fail if PBDATA=1 or LBDATA=1 or ANCHOR=1. */ if (nb_sectors == 0 || (req->cmd.buf[1] & 0x16)) { @@ -1823,8 +1842,9 @@ static void scsi_disk_emulate_write_same(SCSIDiskReq *r, uint8_t *inbuf) data->iov.iov_len); qemu_iovec_init_external(&data->qiov, &data->iov, 1); - for (i = 0; i < data->iov.iov_len; i += s->qdev.blocksize) { - memcpy(&buf[i], inbuf, s->qdev.blocksize); + for (i = 0; i < data->iov.iov_len; i += l) { + l = MIN(s->qdev.blocksize, data->iov.iov_len - i); + memcpy(&buf[i], inbuf, l); } scsi_req_ref(&r->req); @@ -1930,7 +1950,10 @@ static int32_t scsi_disk_emulate_command(SCSIRequest *req, uint8_t *buf) memset(outbuf, 0, r->buflen); switch (req->cmd.buf[0]) { case TEST_UNIT_READY: - assert(blk_is_available(s->qdev.conf.blk)); + if (!blk_is_available(s->qdev.conf.blk)) { + scsi_check_condition(r, SENSE_CODE(NO_MEDIUM)); + return 0; + } break; case INQUIRY: buflen = scsi_disk_emulate_inquiry(req, outbuf); @@ -2278,6 +2301,13 @@ static void scsi_disk_resize_cb(void *opaque) } } +static void scsi_disk_retry_request(void *opaque) +{ + SCSIDiskState *s = opaque; + + scsi_retry_requests(&s->qdev); +} + static void scsi_cd_change_media_cb(void *opaque, bool load, Error **errp) { SCSIDiskState *s = opaque; @@ -2326,10 +2356,12 @@ static const BlockDevOps scsi_disk_removable_block_ops = { .is_medium_locked = scsi_cd_is_medium_locked, .resize_cb = scsi_disk_resize_cb, + .retry_request_cb = scsi_disk_retry_request, }; static const BlockDevOps scsi_disk_block_ops = { .resize_cb = scsi_disk_resize_cb, + .retry_request_cb = scsi_disk_retry_request, }; static void scsi_disk_unit_attention_reported(SCSIDevice *dev) @@ -3107,9 +3139,7 @@ static const TypeInfo scsi_cd_info = { #ifdef __linux__ static Property scsi_block_properties[] = { - DEFINE_BLOCK_ERROR_PROPERTIES(SCSIDiskState, qdev.conf), - DEFINE_PROP_DRIVE("drive", SCSIDiskState, qdev.conf.blk), - DEFINE_PROP_BOOL("share-rw", SCSIDiskState, qdev.conf.share_rw, false), + DEFINE_SCSI_DISK_PROPERTIES(), DEFINE_PROP_UINT16("rotation_rate", SCSIDiskState, rotation_rate, 0), DEFINE_PROP_UINT64("max_unmap_size", SCSIDiskState, max_unmap_size, DEFAULT_MAX_UNMAP_SIZE), diff --git a/hw/scsi/scsi-generic.c b/hw/scsi/scsi-generic.c index 0306ccc7b1e4827a67aaed926f9333ff4658ad86..1f515860480224ac721b3593acbba3b2c6c8a846 100644 --- a/hw/scsi/scsi-generic.c +++ b/hw/scsi/scsi-generic.c @@ -179,6 +179,10 @@ static int scsi_handle_inquiry_reply(SCSIGenericReq *r, SCSIDevice *s, int len) (r->req.cmd.buf[1] & 0x01)) { page = r->req.cmd.buf[2]; if (page == 0xb0) { + if (s->blocksize == 0) { + qemu_log("device blocksize is 0!\n"); + abort(); + } uint64_t max_transfer = blk_get_max_hw_transfer(s->conf.blk); uint32_t max_iov = blk_get_max_hw_iov(s->conf.blk); @@ -314,11 +318,23 @@ static void scsi_read_complete(void * opaque, int ret) /* Snoop READ CAPACITY output to set the blocksize. */ if (r->req.cmd.buf[0] == READ_CAPACITY_10 && (ldl_be_p(&r->buf[0]) != 0xffffffffU || s->max_lba == 0)) { - s->blocksize = ldl_be_p(&r->buf[4]); + int new_blocksize = ldl_be_p(&r->buf[4]); + if (s->blocksize != new_blocksize) { + qemu_log("device id=%s type=%d: blocksize %d change to %d\n", + s->qdev.id ? s->qdev.id : "null", s->type, + s->blocksize, new_blocksize); + } + s->blocksize = new_blocksize; s->max_lba = ldl_be_p(&r->buf[0]) & 0xffffffffULL; } else if (r->req.cmd.buf[0] == SERVICE_ACTION_IN_16 && (r->req.cmd.buf[1] & 31) == SAI_READ_CAPACITY_16) { - s->blocksize = ldl_be_p(&r->buf[8]); + int new_blocksize = ldl_be_p(&r->buf[8]); + if (s->blocksize != new_blocksize) { + qemu_log("device id=%s type=%d: blocksize %d change to %d\n", + s->qdev.id ? s->qdev.id : "null", s->type, + s->blocksize, new_blocksize); + } + s->blocksize = new_blocksize; s->max_lba = ldq_be_p(&r->buf[0]); } blk_set_guest_block_size(s->conf.blk, s->blocksize); diff --git a/hw/scsi/trace-events b/hw/scsi/trace-events index 92d5b40f892ac0ced15fefed2b342da90ffdfb84..ae8551f2797e56f0934771e49280d47cbd80d722 100644 --- a/hw/scsi/trace-events +++ b/hw/scsi/trace-events @@ -42,18 +42,18 @@ mptsas_config_sas_phy(void *dev, int address, int port, int phy_handle, int dev_ # megasas.c megasas_init_firmware(uint64_t pa) "pa 0x%" PRIx64 " " -megasas_init_queue(uint64_t queue_pa, int queue_len, uint64_t head, uint64_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx64 " tail 0x%" PRIx64 " flags 0x%x" +megasas_init_queue(uint64_t queue_pa, int queue_len, uint32_t head, uint32_t tail, uint32_t flags) "queue at 0x%" PRIx64 " len %d head 0x%" PRIx32 " tail 0x%" PRIx32 " flags 0x%x" megasas_initq_map_failed(int frame) "scmd %d: failed to map queue" megasas_initq_mapped(uint64_t pa) "queue already mapped at 0x%" PRIx64 megasas_initq_mismatch(int queue_len, int fw_cmds) "queue size %d max fw cmds %d" megasas_qf_mapped(unsigned int index) "skip mapped frame 0x%x" megasas_qf_new(unsigned int index, uint64_t frame) "frame 0x%x addr 0x%" PRIx64 megasas_qf_busy(unsigned long pa) "all frames busy for frame 0x%lx" -megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, unsigned int head, unsigned int tail, int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" -megasas_qf_update(unsigned int head, unsigned int tail, unsigned int busy) "head 0x%x tail 0x%x busy %d" +megasas_qf_enqueue(unsigned int index, unsigned int count, uint64_t context, uint32_t head, uint32_t tail, unsigned int busy) "frame 0x%x count %d context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" +megasas_qf_update(uint32_t head, uint32_t tail, unsigned int busy) "head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" megasas_qf_map_failed(int cmd, unsigned long frame) "scmd %d: frame %lu" megasas_qf_complete_noirq(uint64_t context) "context 0x%" PRIx64 " " -megasas_qf_complete(uint64_t context, unsigned int head, unsigned int tail, int busy) "context 0x%" PRIx64 " head 0x%x tail 0x%x busy %d" +megasas_qf_complete(uint64_t context, uint32_t head, uint32_t tail, int busy) "context 0x%" PRIx64 " head 0x%" PRIx32 " tail 0x%" PRIx32 " busy %u" megasas_frame_busy(uint64_t addr) "frame 0x%" PRIx64 " busy" megasas_unhandled_frame_cmd(int cmd, uint8_t frame_cmd) "scmd %d: MFI cmd 0x%x" megasas_handle_scsi(const char *frame, int bus, int dev, int lun, void *sdev, unsigned long size) "%s dev %x/%x/%x sdev %p xfer %lu" diff --git a/hw/scsi/vhost-scsi-common.c b/hw/scsi/vhost-scsi-common.c index 767f827e55beb923e96b903900aaa1c9e4e9c266..18ea5dcfa181ca72f6418cd3aefd4b05b0d4fe93 100644 --- a/hw/scsi/vhost-scsi-common.c +++ b/hw/scsi/vhost-scsi-common.c @@ -68,7 +68,7 @@ int vhost_scsi_common_start(VHostSCSICommon *vsc) goto err_guest_notifiers; } - ret = vhost_dev_start(&vsc->dev, vdev); + ret = vhost_dev_start(&vsc->dev, vdev, true); if (ret < 0) { error_report("Error start vhost dev"); goto err_guest_notifiers; @@ -101,7 +101,7 @@ void vhost_scsi_common_stop(VHostSCSICommon *vsc) VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); int ret = 0; - vhost_dev_stop(&vsc->dev, vdev); + vhost_dev_stop(&vsc->dev, vdev, true); if (k->set_guest_notifiers) { ret = k->set_guest_notifiers(qbus->parent, vsc->dev.nvqs, false); diff --git a/hw/scsi/vhost-scsi.c b/hw/scsi/vhost-scsi.c index 039caf2614eb16db1df0ffb9636d5625eaf6bbb2..305906817578bba5180354c05151199292131f53 100644 --- a/hw/scsi/vhost-scsi.c +++ b/hw/scsi/vhost-scsi.c @@ -170,6 +170,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) Error *err = NULL; int vhostfd = -1; int ret; + struct vhost_virtqueue *vqs = NULL; if (!vs->conf.wwpn) { error_setg(errp, "vhost-scsi: missing wwpn"); @@ -213,13 +214,19 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) } vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; - vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); + vsc->dev.vqs = vqs; vsc->dev.vq_index = 0; vsc->dev.backend_features = 0; ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0, errp); if (ret < 0) { + /* + * vhost_dev_init calls vhost_dev_cleanup on error, which closes + * vhostfd, don't double close it. + */ + vhostfd = -1; goto free_vqs; } @@ -232,7 +239,7 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) return; free_vqs: - g_free(vsc->dev.vqs); + g_free(vqs); if (!vsc->migratable) { migrate_del_blocker(vsc->migration_blocker); } @@ -240,7 +247,9 @@ static void vhost_scsi_realize(DeviceState *dev, Error **errp) error_free(vsc->migration_blocker); virtio_scsi_common_unrealize(dev); close_fd: - close(vhostfd); + if (vhostfd >= 0) { + close(vhostfd); + } return; } @@ -264,6 +273,13 @@ static void vhost_scsi_unrealize(DeviceState *dev) virtio_scsi_common_unrealize(dev); } +static struct vhost_dev *vhost_scsi_get_vhost(VirtIODevice *vdev) +{ + VHostSCSI *s = VHOST_SCSI(vdev); + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + return &vsc->dev; +} + static Property vhost_scsi_properties[] = { DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd), DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn), @@ -298,6 +314,7 @@ static void vhost_scsi_class_init(ObjectClass *klass, void *data) vdc->get_features = vhost_scsi_common_get_features; vdc->set_config = vhost_scsi_common_set_config; vdc->set_status = vhost_scsi_set_status; + vdc->get_vhost = vhost_scsi_get_vhost; fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; } diff --git a/hw/scsi/vhost-user-scsi.c b/hw/scsi/vhost-user-scsi.c index 1b2f7eed98895b8851bf13661881209c10e0676a..052740a76ec9fad97bfe2976db62152cc60a8a97 100644 --- a/hw/scsi/vhost-user-scsi.c +++ b/hw/scsi/vhost-user-scsi.c @@ -29,6 +29,9 @@ #include "hw/virtio/virtio-access.h" #include "chardev/char-fe.h" #include "sysemu/sysemu.h" +#include "qemu/log.h" + +#define VHOST_USER_SCSI_RECONNECT_TIME 3 /* Features supported by the host application */ static const int user_feature_bits[] = { @@ -59,7 +62,7 @@ static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) ret = vhost_scsi_common_start(vsc); if (ret < 0) { error_report("unable to start vhost-user-scsi: %s", strerror(-ret)); - exit(1); + return; } } else { vhost_scsi_common_stop(vsc); @@ -89,11 +92,43 @@ static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) { } +static void vhost_user_scsi_event(void *opaque, QEMUChrEvent event) +{ + int ret; + VHostUserSCSI *s = (VHostUserSCSI *)opaque; + VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + VirtIODevice *vdev = VIRTIO_DEVICE(s); + + qemu_log("event:%d, vdev status:%d\n", event, vdev->status); + + /* if CHR_EVENT_CLOSED, do nothing */ + if (event != CHR_EVENT_OPENED) { + return; + }; + + /* if status of vdev is not DRIVER_OK, just waiting. + * vsc should start when status change to DRIVER_OK */ + if (!(vdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) { + return; + } + + /* vsc may not fully start because of vhost app stopping */ + if (vsc->dev.started) { + vhost_scsi_common_stop(vsc); + } + + ret = vhost_scsi_common_start(vsc); + if (ret < 0) { + qemu_log("unable to start vhost-user-scsi: %s\n", strerror(-ret)); + } +} + static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostUserSCSI *s = VHOST_USER_SCSI(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); + Chardev *chr; struct vhost_virtqueue *vqs = NULL; Error *err = NULL; int ret; @@ -132,6 +167,11 @@ static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) vsc->lun = 0; vsc->target = vs->conf.boot_tpgt; + chr = qemu_chr_fe_get_driver(&vs->conf.chardev); + qemu_chr_set_reconnect_time(chr, VHOST_USER_SCSI_RECONNECT_TIME); + qemu_chr_fe_set_handlers(&vs->conf.chardev, NULL, NULL, + vhost_user_scsi_event, NULL, s, NULL, true); + return; free_vhost: diff --git a/hw/scsi/virtio-scsi.c b/hw/scsi/virtio-scsi.c index 51fd09522ac687cbb8a32bf1c6ecf4d9c2ba25ab..781a37fe89e0a51de889a9aa1c46e33053858351 100644 --- a/hw/scsi/virtio-scsi.c +++ b/hw/scsi/virtio-scsi.c @@ -638,7 +638,7 @@ static int virtio_scsi_handle_cmd_req_prepare(VirtIOSCSI *s, VirtIOSCSIReq *req) req->req.cmd.tag, req->req.cmd.cdb[0]); d = virtio_scsi_device_get(s, req->req.cmd.lun); - if (!d) { + if (!d || !d->qdev.realized) { req->resp.cmd.response = VIRTIO_SCSI_S_BAD_TARGET; virtio_scsi_complete_cmd_req(req); return -ENOENT; diff --git a/hw/scsi/vmw_pvscsi.c b/hw/scsi/vmw_pvscsi.c index cd76bd67ab77159868b6b247cb96fc631a7a4c6b..d5c6293a2147adcbccee3bb9ce9b531d8b5ad3a2 100644 --- a/hw/scsi/vmw_pvscsi.c +++ b/hw/scsi/vmw_pvscsi.c @@ -50,12 +50,14 @@ #define PVSCSI_MAX_CMD_DATA_WORDS \ (sizeof(PVSCSICmdDescSetupRings)/sizeof(uint32_t)) -#define RS_GET_FIELD(m, field) \ - (ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ - (m)->rs_pa + offsetof(struct PVSCSIRingsState, field))) +#define RS_GET_FIELD(pval, m, field) \ + ldl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), \ + pval, MEMTXATTRS_UNSPECIFIED) #define RS_SET_FIELD(m, field, val) \ (stl_le_pci_dma(&container_of(m, PVSCSIState, rings)->parent_obj, \ - (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val)) + (m)->rs_pa + offsetof(struct PVSCSIRingsState, field), val, \ + MEMTXATTRS_UNSPECIFIED)) struct PVSCSIClass { PCIDeviceClass parent_class; @@ -247,10 +249,11 @@ pvscsi_ring_cleanup(PVSCSIRingInfo *mgr) static hwaddr pvscsi_ring_pop_req_descr(PVSCSIRingInfo *mgr) { - uint32_t ready_ptr = RS_GET_FIELD(mgr, reqProdIdx); + uint32_t ready_ptr; uint32_t ring_size = PVSCSI_MAX_NUM_PAGES_REQ_RING * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; + RS_GET_FIELD(&ready_ptr, mgr, reqProdIdx); if (ready_ptr != mgr->consumed_ptr && ready_ptr - mgr->consumed_ptr < ring_size) { uint32_t next_ready_ptr = @@ -321,8 +324,11 @@ pvscsi_ring_flush_cmp(PVSCSIRingInfo *mgr) static bool pvscsi_ring_msg_has_room(PVSCSIRingInfo *mgr) { - uint32_t prodIdx = RS_GET_FIELD(mgr, msgProdIdx); - uint32_t consIdx = RS_GET_FIELD(mgr, msgConsIdx); + uint32_t prodIdx; + uint32_t consIdx; + + RS_GET_FIELD(&prodIdx, mgr, msgProdIdx); + RS_GET_FIELD(&consIdx, mgr, msgConsIdx); return (prodIdx - consIdx) < (mgr->msg_len_mask + 1); } @@ -1178,7 +1184,8 @@ pvscsi_realizefn(PCIDevice *pci_dev, Error **errp) pcie_endpoint_cap_init(pci_dev, PVSCSI_EXP_EP_OFFSET); } - s->completion_worker = qemu_bh_new(pvscsi_process_completion_queue, s); + s->completion_worker = qemu_bh_new_guarded(pvscsi_process_completion_queue, s, + &DEVICE(pci_dev)->mem_reentrancy_guard); scsi_bus_init(&s->bus, sizeof(s->bus), DEVICE(pci_dev), &pvscsi_scsi_info); /* override default SCSI bus hotplug-handler, with pvscsi's one */ diff --git a/hw/sd/allwinner-sdhost.c b/hw/sd/allwinner-sdhost.c index 9166d6638de98d6795e84dba90f722a1b15cfd8f..de5bc49e68f7f1de3129dd7a44cf891364e198a6 100644 --- a/hw/sd/allwinner-sdhost.c +++ b/hw/sd/allwinner-sdhost.c @@ -311,7 +311,8 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, uint8_t buf[1024]; /* Read descriptor */ - dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc)); + dma_memory_read(&s->dma_as, desc_addr, desc, sizeof(*desc), + MEMTXATTRS_UNSPECIFIED); if (desc->size == 0) { desc->size = klass->max_desc_size; } else if (desc->size > klass->max_desc_size) { @@ -337,23 +338,24 @@ static uint32_t allwinner_sdhost_process_desc(AwSdHostState *s, /* Write to SD bus */ if (is_write) { dma_memory_read(&s->dma_as, - (desc->addr & DESC_SIZE_MASK) + num_done, - buf, buf_bytes); + (desc->addr & DESC_SIZE_MASK) + num_done, buf, + buf_bytes, MEMTXATTRS_UNSPECIFIED); sdbus_write_data(&s->sdbus, buf, buf_bytes); /* Read from SD bus */ } else { sdbus_read_data(&s->sdbus, buf, buf_bytes); dma_memory_write(&s->dma_as, - (desc->addr & DESC_SIZE_MASK) + num_done, - buf, buf_bytes); + (desc->addr & DESC_SIZE_MASK) + num_done, buf, + buf_bytes, MEMTXATTRS_UNSPECIFIED); } num_done += buf_bytes; } /* Clear hold flag and flush descriptor */ desc->status &= ~DESC_STATUS_HOLD; - dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc)); + dma_memory_write(&s->dma_as, desc_addr, desc, sizeof(*desc), + MEMTXATTRS_UNSPECIFIED); return num_done; } diff --git a/hw/sd/sdhci.c b/hw/sd/sdhci.c index c9dc065cc52d401fdec417a8b6f067b5d4f111f8..211daa4bb0b9c743bdde4cedc1b73dd7101c161f 100644 --- a/hw/sd/sdhci.c +++ b/hw/sd/sdhci.c @@ -471,6 +471,7 @@ static uint32_t sdhci_read_dataport(SDHCIState *s, unsigned size) } for (i = 0; i < size; i++) { + assert(s->data_count < s->buf_maxsz); value |= s->fifo_buffer[s->data_count] << i * 8; s->data_count++; /* check if we've read all valid data (blksize bytes) from buffer */ @@ -559,6 +560,7 @@ static void sdhci_write_dataport(SDHCIState *s, uint32_t value, unsigned size) } for (i = 0; i < size; i++) { + assert(s->data_count < s->buf_maxsz); s->fifo_buffer[s->data_count] = value & 0xFF; s->data_count++; value >>= 8; @@ -616,8 +618,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->blkcnt--; } } - dma_memory_write(s->dma_as, s->sdmasysad, - &s->fifo_buffer[begin], s->data_count - begin); + dma_memory_write(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], + s->data_count - begin, MEMTXATTRS_UNSPECIFIED); s->sdmasysad += s->data_count - begin; if (s->data_count == block_size) { s->data_count = 0; @@ -637,8 +639,8 @@ static void sdhci_sdma_transfer_multi_blocks(SDHCIState *s) s->data_count = block_size; boundary_count -= block_size - begin; } - dma_memory_read(s->dma_as, s->sdmasysad, - &s->fifo_buffer[begin], s->data_count - begin); + dma_memory_read(s->dma_as, s->sdmasysad, &s->fifo_buffer[begin], + s->data_count - begin, MEMTXATTRS_UNSPECIFIED); s->sdmasysad += s->data_count - begin; if (s->data_count == block_size) { sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); @@ -670,9 +672,11 @@ static void sdhci_sdma_transfer_single_block(SDHCIState *s) if (s->trnmod & SDHC_TRNS_READ) { sdbus_read_data(&s->sdbus, s->fifo_buffer, datacnt); - dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + dma_memory_write(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, + MEMTXATTRS_UNSPECIFIED); } else { - dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt); + dma_memory_read(s->dma_as, s->sdmasysad, s->fifo_buffer, datacnt, + MEMTXATTRS_UNSPECIFIED); sdbus_write_data(&s->sdbus, s->fifo_buffer, datacnt); } s->blkcnt--; @@ -694,7 +698,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) hwaddr entry_addr = (hwaddr)s->admasysaddr; switch (SDHC_DMA_TYPE(s->hostctl1)) { case SDHC_CTRL_ADMA2_32: - dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2)); + dma_memory_read(s->dma_as, entry_addr, &adma2, sizeof(adma2), + MEMTXATTRS_UNSPECIFIED); adma2 = le64_to_cpu(adma2); /* The spec does not specify endianness of descriptor table. * We currently assume that it is LE. @@ -705,7 +710,8 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) dscr->incr = 8; break; case SDHC_CTRL_ADMA1_32: - dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1)); + dma_memory_read(s->dma_as, entry_addr, &adma1, sizeof(adma1), + MEMTXATTRS_UNSPECIFIED); adma1 = le32_to_cpu(adma1); dscr->addr = (hwaddr)(adma1 & 0xFFFFF000); dscr->attr = (uint8_t)extract32(adma1, 0, 7); @@ -717,10 +723,13 @@ static void get_adma_description(SDHCIState *s, ADMADescr *dscr) } break; case SDHC_CTRL_ADMA2_64: - dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1); - dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2); + dma_memory_read(s->dma_as, entry_addr, &dscr->attr, 1, + MEMTXATTRS_UNSPECIFIED); + dma_memory_read(s->dma_as, entry_addr + 2, &dscr->length, 2, + MEMTXATTRS_UNSPECIFIED); dscr->length = le16_to_cpu(dscr->length); - dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8); + dma_memory_read(s->dma_as, entry_addr + 4, &dscr->addr, 8, + MEMTXATTRS_UNSPECIFIED); dscr->addr = le64_to_cpu(dscr->addr); dscr->attr &= (uint8_t) ~0xC0; dscr->incr = 12; @@ -785,7 +794,8 @@ static void sdhci_do_adma(SDHCIState *s) } dma_memory_write(s->dma_as, dscr.addr, &s->fifo_buffer[begin], - s->data_count - begin); + s->data_count - begin, + MEMTXATTRS_UNSPECIFIED); dscr.addr += s->data_count - begin; if (s->data_count == block_size) { s->data_count = 0; @@ -810,7 +820,8 @@ static void sdhci_do_adma(SDHCIState *s) } dma_memory_read(s->dma_as, dscr.addr, &s->fifo_buffer[begin], - s->data_count - begin); + s->data_count - begin, + MEMTXATTRS_UNSPECIFIED); dscr.addr += s->data_count - begin; if (s->data_count == block_size) { sdbus_write_data(&s->sdbus, s->fifo_buffer, block_size); @@ -1175,6 +1186,12 @@ sdhci_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) if (!(s->capareg & R_SDHC_CAPAB_SDMA_MASK)) { value &= ~SDHC_TRNS_DMA; } + + /* TRNMOD writes are inhibited while Command Inhibit (DAT) is true */ + if (s->prnsts & SDHC_DATA_INHIBIT) { + mask |= 0xffff; + } + MASKED_WRITE(s->trnmod, mask, value & SDHC_TRNMOD_MASK); MASKED_WRITE(s->cmdreg, mask >> 16, value >> 16); diff --git a/hw/smbios/smbios.c b/hw/smbios/smbios.c index 7397e567373b2dbfabae8fe10a5f0979d46bd160..d506fd4e7e7fd2867f8a223df77296a9fd476061 100644 --- a/hw/smbios/smbios.c +++ b/hw/smbios/smbios.c @@ -332,6 +332,11 @@ static const QemuOptDesc qemu_smbios_type4_opts[] = { }; static const QemuOptDesc qemu_smbios_type11_opts[] = { + { + .name = "type", + .type = QEMU_OPT_NUMBER, + .help = "SMBIOS element type", + }, { .name = "value", .type = QEMU_OPT_STRING, @@ -342,6 +347,7 @@ static const QemuOptDesc qemu_smbios_type11_opts[] = { .type = QEMU_OPT_STRING, .help = "OEM string data from file", }, + { /* end of list */ } }; static const QemuOptDesc qemu_smbios_type17_opts[] = { @@ -688,7 +694,9 @@ static void smbios_build_type_4_table(MachineState *ms, unsigned instance) t->thread_count = ms->smp.threads; t->processor_characteristics = cpu_to_le16(0x02); /* Unknown */ t->processor_family2 = cpu_to_le16(0x01); /* Other */ - + t->corecount2 = 0; + t->enabledcorecount2 = 0; + t->threadcount2 = 0; SMBIOS_BUILD_TABLE_POST; smbios_type4_count++; } @@ -1163,13 +1171,15 @@ void smbios_entry_add(QemuOpts *opts, Error **errp) return; } - if (test_bit(header->type, have_fields_bitmap)) { - error_setg(errp, - "can't load type %d struct, fields already specified!", - header->type); - return; + if (header->type <= SMBIOS_MAX_TYPE) { + if (test_bit(header->type, have_fields_bitmap)) { + error_setg(errp, + "can't load type %d struct, fields already specified!", + header->type); + return; + } + set_bit(header->type, have_binfile_bitmap); } - set_bit(header->type, have_binfile_bitmap); if (header->type == 4) { smbios_type4_count++; diff --git a/hw/ssi/xilinx_spi.c b/hw/ssi/xilinx_spi.c index b2819a7ff092c7e6d2263be94f19ffa4a23bd583..92e7cabf42c28cdb7a06bd5aa35de60f4b06bd60 100644 --- a/hw/ssi/xilinx_spi.c +++ b/hw/ssi/xilinx_spi.c @@ -156,6 +156,7 @@ static void xlx_spi_do_reset(XilinxSPI *s) txfifo_reset(s); s->regs[R_SPISSR] = ~0; + s->regs[R_SPICR] = R_SPICR_MTI; xlx_spi_update_irq(s); xlx_spi_update_cs(s); } diff --git a/hw/sw64/Kconfig b/hw/sw64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..0dc49576a55b28e8f68d9aca2a96f86aa881b242 --- /dev/null +++ b/hw/sw64/Kconfig @@ -0,0 +1,14 @@ +config CORE3 + bool + imply PCI_DEVICES + imply TEST_DEVICES + imply E1000_PCI + select PCI_EXPRESS + select SUN4V_RTC + select VIRTIO_MMIO + select SERIAL + select VIRTIO_VGA + select IDE_CMD646 + select ISA_BUS + select PCKBD + select MSI_NONBROKEN diff --git a/hw/sw64/Makefile.objs b/hw/sw64/Makefile.objs new file mode 100644 index 0000000000000000000000000000000000000000..73add9a91d697bebe22dd577e158aca67543e452 --- /dev/null +++ b/hw/sw64/Makefile.objs @@ -0,0 +1 @@ +obj-y += core3.o core3_board.o diff --git a/hw/sw64/core.h b/hw/sw64/core.h new file mode 100644 index 0000000000000000000000000000000000000000..49233822299d5b310f9dcc2183f3eae4f692ec69 --- /dev/null +++ b/hw/sw64/core.h @@ -0,0 +1,25 @@ +#ifndef HW_SW64_SYS_H +#define HW_SW64_SYS_H + +typedef struct boot_params { + unsigned long initrd_size; /* size of initrd */ + unsigned long initrd_start; /* logical address of initrd */ + unsigned long dtb_start; /* logical address of dtb */ + unsigned long efi_systab; /* logical address of EFI system table */ + unsigned long efi_memmap; /* logical address of EFI memory map */ + unsigned long efi_memmap_size; /* size of EFI memory map */ + unsigned long efi_memdesc_size; /* size of an EFI memory map descriptor */ + unsigned long efi_memdesc_version; /* memory descriptor version */ + unsigned long cmdline; /* logical address of cmdline */ +} BOOT_PARAMS; + +void core3_board_init(SW64CPU *cpus[4], MemoryRegion *ram); +#endif + +#define MAX_CPUS 64 + +#ifdef CONFIG_KVM +#define MAX_CPUS_CORE3 64 +#else +#define MAX_CPUS_CORE3 32 +#endif diff --git a/hw/sw64/core3.c b/hw/sw64/core3.c new file mode 100644 index 0000000000000000000000000000000000000000..eceeb3bec3ba22740fdfaf76a966fc1bd42373db --- /dev/null +++ b/hw/sw64/core3.c @@ -0,0 +1,203 @@ +/* + * QEMU CORE3 hardware system emulator. + * + * Copyright (c) 2021 Lu Feifei + * + * This work is licensed under the GNU GPL license version 2 or later. + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu/datadir.h" +#include "cpu.h" +#include "hw/hw.h" +#include "elf.h" +#include "hw/loader.h" +#include "hw/boards.h" +#include "qemu/error-report.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/reset.h" +#include "hw/ide.h" +#include "hw/char/serial.h" +#include "qemu/cutils.h" +#include "ui/console.h" +#include "core.h" +#include "hw/boards.h" +#include "sysemu/numa.h" +#include "qemu/uuid.h" +#include "qemu/bswap.h" + +#define VMUUID 0xFF40 + +static uint64_t cpu_sw64_virt_to_phys(void *opaque, uint64_t addr) +{ + return addr &= ~0xffffffff80000000 ; +} + +static CpuInstanceProperties +sw64_cpu_index_to_props(MachineState *ms, unsigned cpu_index) +{ + MachineClass *mc = MACHINE_GET_CLASS(ms); + const CPUArchIdList *possible_cpus = mc->possible_cpu_arch_ids(ms); + + assert(cpu_index < possible_cpus->len); + return possible_cpus->cpus[cpu_index].props; +} + +static int64_t sw64_get_default_cpu_node_id(const MachineState *ms, int idx) +{ + int nb_numa_nodes = ms->numa_state->num_nodes; + return idx % nb_numa_nodes; +} + +static const CPUArchIdList *sw64_possible_cpu_arch_ids(MachineState *ms) +{ + int i; + unsigned int max_cpus = ms->smp.max_cpus; + + if (ms->possible_cpus) { + /* + * make sure that max_cpus hasn't changed since the first use, i.e. + * -smp hasn't been parsed after it + */ + assert(ms->possible_cpus->len == max_cpus); + return ms->possible_cpus; + } + + ms->possible_cpus = g_malloc0(sizeof(CPUArchIdList) + + sizeof(CPUArchId) * max_cpus); + ms->possible_cpus->len = max_cpus; + for (i = 0; i < ms->possible_cpus->len; i++) { + ms->possible_cpus->cpus[i].type = ms->cpu_type; + ms->possible_cpus->cpus[i].vcpus_count = 1; + ms->possible_cpus->cpus[i].arch_id = i; + ms->possible_cpus->cpus[i].props.has_thread_id = true; + ms->possible_cpus->cpus[i].props.has_core_id = true; + ms->possible_cpus->cpus[i].props.core_id = i; + } + + return ms->possible_cpus; +} + +static void core3_cpu_reset(void *opaque) +{ + SW64CPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} + +static void core3_init(MachineState *machine) +{ + ram_addr_t ram_size = machine->ram_size; + ram_addr_t buf; + SW64CPU *cpus[machine->smp.max_cpus]; + long i, size; + const char *kernel_filename = machine->kernel_filename; + const char *kernel_cmdline = machine->kernel_cmdline; + char *hmcode_filename; + char *uefi_filename; + uint64_t hmcode_entry, hmcode_low, hmcode_high; + uint64_t kernel_entry, kernel_low, kernel_high; + BOOT_PARAMS *core3_boot_params = g_new0(BOOT_PARAMS, 1); + uint64_t param_offset; + QemuUUID uuid_out_put; + + memset(cpus, 0, sizeof(cpus)); + + for (i = 0; i < machine->smp.cpus; ++i) { + cpus[i] = SW64_CPU(cpu_create(machine->cpu_type)); + cpus[i]->env.csr[CID] = i; + qemu_register_reset(core3_cpu_reset, cpus[i]); + } + core3_board_init(cpus, machine->ram); + if (kvm_enabled()) + buf = ram_size; + else + buf = ram_size | (1UL << 63); + + rom_add_blob_fixed("ram_size", (char *)&buf, 0x8, 0x2040); + + uuid_out_put = qemu_uuid; + uuid_out_put = qemu_uuid_bswap(uuid_out_put); + pstrcpy_targphys("vm-uuid", VMUUID, 0x12, (char *)&(uuid_out_put)); + param_offset = 0x90B000UL; + core3_boot_params->cmdline = param_offset | 0xfff0000000000000UL; + rom_add_blob_fixed("core3_boot_params", (core3_boot_params), 0x48, 0x90A100); + + hmcode_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, kvm_enabled() ? "core3-reset":"core3-hmcode"); + if (hmcode_filename == NULL) { + if (kvm_enabled()) + error_report("no core3-reset provided"); + else + error_report("no core3-hmcode provided"); + exit(1); + } + size = load_elf(hmcode_filename, NULL, cpu_sw64_virt_to_phys, NULL, + &hmcode_entry, &hmcode_low, &hmcode_high, NULL, 0, EM_SW64, 0, 0); + if (size < 0) { + if (kvm_enabled()) + error_report("could not load core3-reset: '%s'", hmcode_filename); + else + error_report("could not load core3-hmcode: '%s'", hmcode_filename); + exit(1); + } + g_free(hmcode_filename); + + /* Start all cpus at the hmcode RESET entry point. */ + for (i = 0; i < machine->smp.cpus; ++i) { + if (kvm_enabled()) + cpus[i]->env.pc = init_pc; + else + cpus[i]->env.pc = hmcode_entry; + cpus[i]->env.hm_entry = hmcode_entry; + } + + if (!kernel_filename) { + uefi_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "uefi-bios-sw"); + if (uefi_filename == NULL) { + error_report("no virtual bios provided"); + exit(1); + } + size = load_image_targphys(uefi_filename, 0x2f00000UL, -1); + if (size < 0) { + error_report("could not load virtual bios: '%s'", uefi_filename); + exit(1); + } + g_free(uefi_filename); + } else { + /* Load a kernel. */ + size = load_elf(kernel_filename, NULL, cpu_sw64_virt_to_phys, NULL, + &kernel_entry, &kernel_low, &kernel_high, NULL, 0, EM_SW64, 0, 0); + if (size < 0) { + error_report("could not load kernel '%s'", kernel_filename); + exit(1); + } + cpus[0]->env.trap_arg1 = kernel_entry; + if (kernel_cmdline) + pstrcpy_targphys("cmdline", param_offset, 0x400, kernel_cmdline); + } +} + +static void board_reset(MachineState *state) +{ + qemu_devices_reset(); +} + +static void core3_machine_init(MachineClass *mc) +{ + mc->desc = "core3 BOARD"; + mc->init = core3_init; + mc->block_default_type = IF_IDE; + mc->max_cpus = MAX_CPUS_CORE3; + mc->pci_allow_0_address = true; + mc->is_default = 0; + mc->reset = board_reset; + mc->possible_cpu_arch_ids = sw64_possible_cpu_arch_ids; + mc->cpu_index_to_instance_props = sw64_cpu_index_to_props; + mc->default_cpu_type = SW64_CPU_TYPE_NAME("core3"); + mc->default_ram_id = "ram"; + mc->get_default_cpu_node_id = sw64_get_default_cpu_node_id; +} + +DEFINE_MACHINE("core3", core3_machine_init) diff --git a/hw/sw64/core3_board.c b/hw/sw64/core3_board.c new file mode 100644 index 0000000000000000000000000000000000000000..7f623cf773bc39064a9587ee2cb5300d201eb8f3 --- /dev/null +++ b/hw/sw64/core3_board.c @@ -0,0 +1,556 @@ +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "cpu.h" +#include "core.h" +#include "hw/hw.h" +#include "hw/boards.h" +#include "sysemu/sysemu.h" +#include "exec/address-spaces.h" +#include "hw/pci/pci_host.h" +#include "hw/pci/pci.h" +#include "hw/char/serial.h" +#include "hw/irq.h" +#include "net/net.h" +#include "hw/usb.h" +#include "hw/ide/pci.h" +#include "hw/ide/ahci.h" +#include "sysemu/numa.h" +#include "sysemu/kvm.h" +#include "sysemu/cpus.h" +#include "hw/pci/msi.h" +#include "hw/sw64/sw64_iommu.h" +#include "hw/loader.h" +#include "hw/nvram/fw_cfg.h" + +#define TYPE_SWBOARD_PCI_HOST_BRIDGE "core_board-pcihost" +#define SWBOARD_PCI_HOST_BRIDGE(obj) \ + OBJECT_CHECK(BoardState, (obj), TYPE_SWBOARD_PCI_HOST_BRIDGE) + +#define CORE3_MAX_CPUS_MASK 0x3ff +#define CORE3_CORES_SHIFT 10 +#define CORE3_CORES_MASK 0x3ff +#define CORE3_THREADS_SHIFT 20 +#define CORE3_THREADS_MASK 0xfff + +#define MAX_IDE_BUS 2 +#define SW_PIN_TO_IRQ 16 + +#define SW_FW_CFG_P_BASE (0x804920000000ULL) + +typedef struct SWBoard { + SW64CPU *cpu[MAX_CPUS_CORE3]; +} SWBoard; + +typedef struct BoardState { + PCIHostState parent_obj; + + SWBoard sboard; + uint64_t expire_time; +} BoardState; + +typedef struct TimerState { + void *opaque; + int order; +} TimerState; + +static void sw_create_fw_cfg(hwaddr addr) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + uint16_t smp_cpus = ms->smp.cpus; + FWCfgState *fw_cfg; + fw_cfg = fw_cfg_init_mem_wide(addr + 8, addr, 8, addr + 16, &address_space_memory); + fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, smp_cpus); + rom_set_fw(fw_cfg); +} + +#ifndef CONFIG_KVM +static void swboard_alarm_timer(void *opaque) +{ + TimerState *ts = (TimerState *)((uintptr_t)opaque); + BoardState *bs = (BoardState *)((uintptr_t)ts->opaque); + + int cpu = ts->order; + cpu_interrupt(CPU(bs->sboard.cpu[cpu]), CPU_INTERRUPT_TIMER); +} +#endif + +static PCIINTxRoute sw_route_intx_pin_to_irq(void *opaque, int pin) +{ + PCIINTxRoute route; + + route.mode = PCI_INTX_ENABLED; + route.irq = SW_PIN_TO_IRQ; + return route; +} + +static uint64_t convert_bit(int n) +{ + uint64_t ret; + + if (n == 64) + ret = 0xffffffffffffffffUL; + else + ret = (1UL << n) - 1; + + return ret; +} + +static uint64_t mcu_read(void *opaque, hwaddr addr, unsigned size) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int smp_cpus = ms->smp.cpus; + unsigned int smp_threads = ms->smp.threads; + unsigned int smp_cores = ms->smp.cores; + unsigned int max_cpus = ms->smp.max_cpus; + uint64_t ret = 0; + switch (addr) { + case 0x0000: + /* CG_ONLINE */ + { + int i; + for (i = 0; i < smp_cpus; i = i + 4) + ret |= (1UL << i); + } + break; + case 0x0080: + /* SMP_INFO */ + ret = (smp_threads & CORE3_THREADS_MASK) << CORE3_THREADS_SHIFT; + ret += (smp_cores & CORE3_CORES_MASK) << CORE3_CORES_SHIFT; + ret += max_cpus & CORE3_MAX_CPUS_MASK; + break; + /*IO_START*/ + case 0x1300: + ret = 0x1; + break; + case 0x3780: + /* MC_ONLINE */ + ret = convert_bit(smp_cpus); + break; + case 0x0900: + /* CPUID */ + ret = 0; + break; + case 0x1180: + /* LONGTIME */ + ret = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 80; + break; + case 0x4900: + /* MC_CONFIG */ + break; + case 0x0780: + /* CORE_ONLINE */ + ret = convert_bit(smp_cpus); + break; + case 0x0680: + /* INIT_CTL */ + ret = 0x000003AE00000D28; + break; + default: + fprintf(stderr, "Unsupported MCU addr: 0x%04lx\n", addr); + return -1; + } + return ret; +} + +static void mcu_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) +{ +#ifndef CONFIG_KVM +#ifdef CONFIG_DUMP_PRINTK + uint64_t print_addr; + uint32_t len; + int i; + + if (addr == 0x40000) { + print_addr = val & 0x7fffffff; + len = (uint32_t)(val >> 32); + uint8_t *buf; + buf = malloc(len + 10); + memset(buf, 0, len + 10); + cpu_physical_memory_rw(print_addr, buf, len, 0); + for (i = 0; i < len; i++) + printf("%c", buf[i]); + + free(buf); + return; + } +#endif +#endif +} + +static const MemoryRegionOps mcu_ops = { + .read = mcu_read, + .write = mcu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static uint64_t intpu_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t ret = 0; +#ifndef CONFIG_KVM + switch (addr) { + case 0x180: + /* LONGTIME */ + ret = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 32; + break; + } +#endif + return ret; +} + +static void intpu_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ +#ifndef CONFIG_KVM + BoardState *bs = (BoardState *)opaque; + SW64CPU *cpu; + switch (addr) { + case 0x00: + val &= 0x1f; + cpu = bs->sboard.cpu[val]; + cpu->env.csr[II_REQ] = 0x100000; + cpu_interrupt(CPU(cpu),CPU_INTERRUPT_II0); + break; + default: + fprintf(stderr, "Unsupported IPU addr: 0x%04lx\n", addr); + break; + } +#endif +} + +static const MemoryRegionOps intpu_ops = { + .read = intpu_read, + .write = intpu_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 8, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 8, + .max_access_size = 8, + }, +}; + +static MemTxResult msi_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, + MemTxAttrs attrs) +{ + return MEMTX_OK; +} + +MemTxResult msi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ +#ifdef CONFIG_KVM + int ret = 0; + MSIMessage msg = {}; + + msg.address = (uint64_t) addr + 0x8000fee00000; + msg.data = (uint32_t) value; + + ret = kvm_irqchip_send_msi(kvm_state, msg); + if (ret < 0) { + fprintf(stderr, "KVM: injection failed, MSI lost (%s)\n", + strerror(-ret)); + } +#endif + return MEMTX_OK; +} + +static const MemoryRegionOps msi_ops = { + .read_with_attrs = msi_read, + .write_with_attrs = msi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND; + return val; +} + +static void rtc_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ +} + +static const MemoryRegionOps rtc_ops = { + .read = rtc_read, + .write = rtc_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size) +{ + return 1; +} + +static void ignore_write(void *opaque, hwaddr addr, uint64_t v, unsigned size) +{ +} + +const MemoryRegionOps core3_pci_ignore_ops = { + .read = ignore_read, + .write = ignore_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static uint64_t config_read(void *opaque, hwaddr addr, unsigned size) +{ + PCIBus *b = opaque; + uint32_t trans_addr = 0; + trans_addr |= ((addr >> 16) & 0xffff) << 8; + trans_addr |= (addr & 0xff); + return pci_data_read(b, trans_addr, size); +} + +static void config_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + PCIBus *b = opaque; + uint32_t trans_addr = 0; + trans_addr |= ((addr >> 16) & 0xffff) << 8; + trans_addr |= (addr & 0xff); + pci_data_write(b, trans_addr, val, size); +} + +const MemoryRegionOps core3_pci_config_ops = { + .read = config_read, + .write = config_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = + { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = + { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +static void cpu_irq_change(SW64CPU *cpu, uint64_t req) +{ + if (cpu != NULL) { + CPUState *cs = CPU(cpu); + if (req) + cpu_interrupt(cs, CPU_INTERRUPT_HARD); + else + cpu_reset_interrupt(cs, CPU_INTERRUPT_HARD); + } +} + +static void swboard_set_irq(void *opaque, int irq, int level) +{ + BoardState *bs = opaque; + SW64CPU *cpu; + int i; + + if (kvm_enabled()) { + if (level == 0) + return; + kvm_set_irq(kvm_state, irq, level); + return; + } + + for (i = 0; i < 1; i++) { + cpu = bs->sboard.cpu[i]; + if (cpu != NULL) { + CPUState *cs = CPU(cpu); + if (level) + cpu_interrupt(cs, CPU_INTERRUPT_PCIE); + else + cpu_reset_interrupt(cs, CPU_INTERRUPT_PCIE); + } + } +} + +static int swboard_map_irq(PCIDevice *d, int irq_num) +{ + /* In fact,the return value is the interrupt type passed to kernel, + * so it must keep same with the type in do_entInt in kernel. + */ + return 16; +} + +static void serial_set_irq(void *opaque, int irq, int level) +{ + BoardState *bs = (BoardState *)opaque; + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int smp_cpus = ms->smp.cpus; + int i; + if (level == 0) + return; + if (kvm_enabled()) { + kvm_set_irq(kvm_state, irq, level); + return; + } + for (i = 0; i < smp_cpus; i++) { + if (bs->sboard.cpu[i]) + cpu_irq_change(bs->sboard.cpu[i], 1); + } +} + +void core3_board_init(SW64CPU *cpus[MAX_CPUS], MemoryRegion *ram) +{ + DeviceState *dev; + BoardState *bs; +#ifndef CONFIG_KVM + TimerState *ts; +#endif + MemoryRegion *io_mcu = g_new(MemoryRegion, 1); + MemoryRegion *io_intpu = g_new(MemoryRegion, 1); + MemoryRegion *msi_ep = g_new(MemoryRegion, 1); + qemu_irq serial_irq; + uint64_t MB = 1024 * 1024; + MemoryRegion *mem_ep = g_new(MemoryRegion, 1); + MemoryRegion *mem_ep64 = g_new(MemoryRegion, 1); + MemoryRegion *conf_piu0 = g_new(MemoryRegion, 1); + MemoryRegion *io_ep = g_new(MemoryRegion, 1); + MemoryRegion *io_rtc = g_new(MemoryRegion, 1); + MachineState *ms = MACHINE(qdev_get_machine()); + unsigned int smp_cpus = ms->smp.cpus; + + PCIBus *b; + PCIHostState *phb; + uint64_t GB = 1024 * MB; + + int i; + dev = qdev_new(TYPE_SWBOARD_PCI_HOST_BRIDGE); + phb = PCI_HOST_BRIDGE(dev); + bs = SWBOARD_PCI_HOST_BRIDGE(dev); + +#ifdef CONFIG_KVM + if (kvm_has_gsi_routing()) + msi_nonbroken = true; +#endif + + for (i = 0; i < smp_cpus; ++i) { + if (cpus[i] == NULL) + continue; + bs->sboard.cpu[i] = cpus[i]; +#ifndef CONFIG_KVM + ts = g_new(TimerState, 1); + ts->opaque = (void *) ((uintptr_t)bs); + ts->order = i; + cpus[i]->alarm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, &swboard_alarm_timer, ts); +#endif + } + memory_region_add_subregion(get_system_memory(), 0, ram); + + memory_region_init_io(io_mcu, NULL, &mcu_ops, bs, "io_mcu", 16 * MB); + memory_region_add_subregion(get_system_memory(), 0x803000000000ULL, io_mcu); + + memory_region_init_io(io_intpu, NULL, &intpu_ops, bs, "io_intpu", 1 * MB); + memory_region_add_subregion(get_system_memory(), 0x802a00000000ULL, + io_intpu); + + memory_region_init_io(msi_ep, NULL, &msi_ops, bs, "msi_ep", 1 * MB); + memory_region_add_subregion(get_system_memory(), 0x8000fee00000ULL, msi_ep); + + memory_region_init(mem_ep, OBJECT(bs), "pci0-mem", 0x890000000000ULL); + memory_region_add_subregion(get_system_memory(), 0x880000000000ULL, mem_ep); + + memory_region_init_alias(mem_ep64, NULL, "mem_ep64", mem_ep, 0x888000000000ULL, 1ULL << 39); + memory_region_add_subregion(get_system_memory(), 0x888000000000ULL, mem_ep64); + + memory_region_init_io(io_ep, OBJECT(bs), &core3_pci_ignore_ops, NULL, + "pci0-io-ep", 4 * GB); + + memory_region_add_subregion(get_system_memory(), 0x880100000000ULL, io_ep); + b = pci_register_root_bus(dev, "pcie.0", swboard_set_irq, swboard_map_irq, bs, + mem_ep, io_ep, 0, 537, TYPE_PCIE_BUS); + phb->bus = b; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal); + pci_bus_set_route_irq_fn(b, sw_route_intx_pin_to_irq); + memory_region_init_io(conf_piu0, OBJECT(bs), &core3_pci_config_ops, b, + "pci0-ep-conf-io", 4 * GB); + memory_region_add_subregion(get_system_memory(), 0x880600000000ULL, + conf_piu0); + memory_region_init_io(io_rtc, OBJECT(bs), &rtc_ops, b, + "sw64-rtc", 0x08ULL); + memory_region_add_subregion(get_system_memory(), 0x804910000000ULL, + io_rtc); +#ifdef SW64_VT_IOMMU + sw64_vt_iommu_init(b); +#endif + for (i = 0; i < nb_nics; i++) { + pci_nic_init_nofail(&nd_table[i], b, "e1000", NULL); + } + + pci_vga_init(b); +#define MAX_SATA_PORTS 6 + PCIDevice *ahci; + DriveInfo *hd[MAX_SATA_PORTS]; + ahci = pci_create_simple_multifunction(b, PCI_DEVFN(0x1f, 0), true, + TYPE_ICH9_AHCI); + g_assert(MAX_SATA_PORTS == ahci_get_num_ports(ahci)); + ide_drive_get(hd, ahci_get_num_ports(ahci)); + ahci_ide_create_devs(ahci, hd); + + serial_irq = qemu_allocate_irq(serial_set_irq, bs, 12); + if (serial_hd(0)) { + serial_mm_init(get_system_memory(), 0x3F8 + 0x880100000000ULL, 0, + serial_irq, (1843200 >> 4), serial_hd(0), + DEVICE_LITTLE_ENDIAN); + } + pci_create_simple(phb->bus, -1, "nec-usb-xhci"); + sw_create_fw_cfg(SW_FW_CFG_P_BASE); +} + +static const TypeInfo swboard_pcihost_info = { + .name = TYPE_SWBOARD_PCI_HOST_BRIDGE, + .parent = TYPE_PCI_HOST_BRIDGE, + .instance_size = sizeof(BoardState), +}; + +static void swboard_register_types(void) +{ + type_register_static(&swboard_pcihost_info); +} + +type_init(swboard_register_types) diff --git a/hw/sw64/meson.build b/hw/sw64/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..8abb18222a5ae67e0d32c81a9af002589f65e1a5 --- /dev/null +++ b/hw/sw64/meson.build @@ -0,0 +1,10 @@ +sw64_ss = ss.source_set() + +sw64_ss.add(files('sw64_iommu.c')) + +sw64_ss.add(when: 'CONFIG_CORE3', if_true: files( + 'core3.c', + 'core3_board.c', +)) + +hw_arch += {'sw64': sw64_ss} diff --git a/hw/sw64/sw64_iommu.c b/hw/sw64/sw64_iommu.c new file mode 100644 index 0000000000000000000000000000000000000000..1ede2a2ce43d45f37bb2d023c193bcc47d0b71e7 --- /dev/null +++ b/hw/sw64/sw64_iommu.c @@ -0,0 +1,570 @@ +/* + * QEMU sw64 IOMMU emulation + * + * Copyright (c) 2021 Lu Feifei + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "exec/address-spaces.h" +#include "qemu/log.h" +#include "qapi/error.h" +#include "hw/sw64/sw64_iommu.h" +#include "sysemu/kvm.h" + +#define IOMMU_PAGE_SHIFT 13 +#define IOMMU_PAGE_SIZE_8K (1ULL << IOMMU_PAGE_SHIFT) +#define IOMMU_PAGE_MASK_8K (~(IOMMU_PAGE_SIZE_8K - 1)) +#define IOMMU_IOVA_SHIFT 16 +#define SW64IOMMU_PTIOTLB_MAX_SIZE 256 + +static MemTxResult swvt_msi_read(void *opaque, hwaddr addr, + uint64_t *data, unsigned size, MemTxAttrs attrs) +{ + return MEMTX_OK; +} + +static MemTxResult swvt_msi_write(void *opaque, hwaddr addr, + uint64_t value, unsigned size, + MemTxAttrs attrs) +{ + MemTxResult ret; + + ret = msi_write(opaque, addr, value, size, attrs); + + return ret; +} + +static const MemoryRegionOps swvt_msi_ops = { + .read_with_attrs = swvt_msi_read, + .write_with_attrs = swvt_msi_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn) +{ + uintptr_t key = (uintptr_t)bus; + SWVTBus *swvt_bus = g_hash_table_lookup(s->swvtbus_as_by_busptr, &key); + SWVTAddressSpace *swvt_dev_as; + char name[128]; + + if (!swvt_bus) { + uintptr_t *new_key = g_malloc(sizeof(*new_key)); + *new_key = (uintptr_t)bus; + /* No corresponding free() */ + swvt_bus = g_malloc0(sizeof(SWVTBus) + sizeof(SWVTAddressSpace *) * \ + PCI_DEVFN_MAX); + swvt_bus->bus = bus; + g_hash_table_insert(s->swvtbus_as_by_busptr, new_key, swvt_bus); + } + swvt_dev_as = swvt_bus->dev_as[devfn]; + if (!swvt_dev_as) { + snprintf(name, sizeof(name), "sw64_iommu_devfn_%d", devfn); + swvt_bus->dev_as[devfn] = swvt_dev_as = g_malloc0(sizeof(SWVTAddressSpace)); + + swvt_dev_as->bus = bus; + swvt_dev_as->devfn = (uint8_t)devfn; + swvt_dev_as->iommu_state = s; + + memory_region_init_iommu(&swvt_dev_as->iommu, sizeof(swvt_dev_as->iommu), + TYPE_SW64_IOMMU_MEMORY_REGION, OBJECT(s), + "sw64_iommu_dmar", + 1UL << 32); + memory_region_init_io(&swvt_dev_as->msi, OBJECT(s), + &swvt_msi_ops, s, "sw_msi", 1 * 1024 * 1024); + memory_region_init(&swvt_dev_as->root, OBJECT(s), + "swvt_root", UINT64_MAX); + memory_region_add_subregion_overlap(&swvt_dev_as->root, + 0x8000fee00000ULL, + &swvt_dev_as->msi, 64); + address_space_init(&swvt_dev_as->as, &swvt_dev_as->root, name); + memory_region_add_subregion_overlap(&swvt_dev_as->root, 0, + MEMORY_REGION(&swvt_dev_as->iommu), + 1); + } + + memory_region_set_enabled(MEMORY_REGION(&swvt_dev_as->iommu), true); + + return swvt_dev_as; +} + +/** + * get_pte - Get the content of a page table entry located at + * @base_addr[@index] + */ +static int get_pte(dma_addr_t baseaddr, uint64_t *pte) +{ + int ret; + + /* TODO: guarantee 64-bit single-copy atomicity */ + ret = dma_memory_read(&address_space_memory, baseaddr, + (uint8_t *)pte, sizeof(*pte), MEMTXATTRS_UNSPECIFIED); + + if (ret != MEMTX_OK) + return -EINVAL; + + return 0; +} + +static bool swvt_do_iommu_translate(SWVTAddressSpace *swvt_as, PCIBus *bus, + uint8_t devfn, hwaddr addr, IOMMUTLBEntry *entry) +{ + SW64IOMMUState *s = swvt_as->iommu_state; + uint8_t bus_num = pci_bus_num(bus); + unsigned long dtbbaseaddr, dtbbasecond; + unsigned long pdebaseaddr, ptebaseaddr; + unsigned long pte; + uint16_t source_id; + SW64DTIOTLBEntry *dtcached_entry = NULL; + SW64DTIOTLBKey dtkey, *new_key; + + dtcached_entry = g_hash_table_lookup(s->dtiotlb, &dtkey); + + if (unlikely(!dtcached_entry)) { + dtbbaseaddr = s->dtbr + (bus_num << 3); + + if (get_pte(dtbbaseaddr, &pte)) + goto error; + + dtbbasecond = (pte & (~(SW_IOMMU_ENTRY_VALID))) + (devfn << 3); + if (get_pte(dtbbasecond, &pte)) + goto error; + + source_id = ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); + dtcached_entry = g_new0(SW64DTIOTLBEntry, 1); + dtcached_entry->ptbase_addr = pte & (~(SW_IOMMU_ENTRY_VALID)); + dtcached_entry->source_id = source_id; + + new_key = g_new0(SW64DTIOTLBKey, 1); + new_key->source_id = source_id; + + g_hash_table_insert(s->dtiotlb, new_key, dtcached_entry); + } + + pdebaseaddr = dtcached_entry->ptbase_addr; + pdebaseaddr += ((addr >> 23) & SW_IOMMU_LEVEL1_OFFSET) << 3; + + if (get_pte(pdebaseaddr, &pte)) + goto error; + + ptebaseaddr = pte & (~(SW_IOMMU_ENTRY_VALID)); + ptebaseaddr += ((addr >> IOMMU_PAGE_SHIFT) & SW_IOMMU_LEVEL2_OFFSET) << 3; + + if (get_pte(ptebaseaddr, &pte)) + goto error; + + pte &= ~(SW_IOMMU_ENTRY_VALID | SW_IOMMU_GRN | SW_IOMMU_ENABLE); + entry->translated_addr = pte; + entry->addr_mask = IOMMU_PAGE_SIZE_8K - 1; + + return 0; + +error: + entry->perm = IOMMU_NONE; + return -EINVAL; +} + +static void swvt_ptiotlb_inv_all(SW64IOMMUState *s) +{ + g_hash_table_remove_all(s->ptiotlb); +} + +static IOMMUTLBEntry *swvt_lookup_ptiotlb(SW64IOMMUState *s, uint16_t source_id, + hwaddr addr) +{ + SW64PTIOTLBKey ptkey; + IOMMUTLBEntry *entry = NULL; + + ptkey.source_id = source_id; + ptkey.iova = addr; + + entry = g_hash_table_lookup(s->ptiotlb, &ptkey); + + return entry; +} + +static IOMMUTLBEntry sw64_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr, + IOMMUAccessFlags flag, int iommu_idx) +{ + SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu); + SW64IOMMUState *s = swvt_as->iommu_state; + IOMMUTLBEntry *cached_entry = NULL; + IOMMUTLBEntry entry = { + .target_as = &address_space_memory, + .iova = addr, + .translated_addr = addr, + .addr_mask = ~(hwaddr)0, + .perm = IOMMU_NONE, + }; + uint8_t bus_num = pci_bus_num(swvt_as->bus); + uint16_t source_id; + SW64PTIOTLBKey *new_ptkey; + hwaddr aligned_addr; + + source_id = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); + + qemu_mutex_lock(&s->iommu_lock); + + aligned_addr = addr & IOMMU_PAGE_MASK_8K; + + cached_entry = swvt_lookup_ptiotlb(s, source_id, aligned_addr); + + if (cached_entry) + goto out; + + if (g_hash_table_size(s->ptiotlb) >= SW64IOMMU_PTIOTLB_MAX_SIZE) { + swvt_ptiotlb_inv_all(s); + } + + cached_entry = g_new0(IOMMUTLBEntry, 1); + + if (swvt_do_iommu_translate(swvt_as, swvt_as->bus, swvt_as->devfn, + addr, cached_entry)) { + g_free(cached_entry); + qemu_mutex_unlock(&s->iommu_lock); + printf("%s: detected translation failure " + "(busnum=%d, devfn=%#x, iova=%#lx.\n", + __func__, pci_bus_num(swvt_as->bus), swvt_as->devfn, + entry.iova); + entry.iova = 0; + entry.translated_addr = 0; + entry.addr_mask = 0; + entry.perm = IOMMU_NONE; + + return entry; + } else { + new_ptkey = g_new0(SW64PTIOTLBKey, 1); + new_ptkey->source_id = source_id; + new_ptkey->iova = aligned_addr; + g_hash_table_insert(s->ptiotlb, new_ptkey, cached_entry); + } + +out: + qemu_mutex_unlock(&s->iommu_lock); + entry.perm = flag; + entry.translated_addr = cached_entry->translated_addr + + (addr & (IOMMU_PAGE_SIZE_8K - 1)); + entry.addr_mask = cached_entry->addr_mask; + + return entry; +} + +static void swvt_ptiotlb_inv_iova(SW64IOMMUState *s, uint16_t source_id, dma_addr_t iova) +{ + SW64PTIOTLBKey key = {.source_id = source_id, .iova = iova}; + + qemu_mutex_lock(&s->iommu_lock); + g_hash_table_remove(s->ptiotlb, &key); + qemu_mutex_unlock(&s->iommu_lock); +} + +void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val) +{ + SWVTAddressSpace *swvt_as; + IOMMUNotifier *n; + uint16_t source_id; + dma_addr_t iova; + IOMMUTLBEvent event; + + source_id = val & 0xffff; + iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT; + + swvt_ptiotlb_inv_iova(s, source_id, iova); + + QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) { + uint8_t bus_num = pci_bus_num(swvt_as->bus); + uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); + + if (as_sourceid == source_id) { + IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) { + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = iova & IOMMU_PAGE_MASK_8K; + event.entry.translated_addr = 0; + event.entry.perm = IOMMU_NONE; + event.entry.addr_mask = IOMMU_PAGE_SIZE_8K - 1; + + memory_region_notify_iommu(&swvt_as->iommu, 0, event); + } + } + } +} + +/* Unmap the whole range in the notifier's scope. */ +static void swvt_address_space_unmap(SWVTAddressSpace *as, IOMMUNotifier *n) +{ + IOMMUTLBEvent event; + hwaddr size; + hwaddr start = n->start; + hwaddr end = n->end; + + assert(start <= end); + size = end - start; + + event.entry.target_as = &address_space_memory; + /* Adjust iova for the size */ + event.entry.iova = n->start & ~(size - 1); + /* This field is meaningless for unmap */ + event.entry.translated_addr = 0; + event.entry.perm = IOMMU_NONE; + event.entry.addr_mask = size - 1; + + memory_region_notify_iommu_one(n, &event); +} + +void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val) +{ + SWVTAddressSpace *swvt_as; + IOMMUNotifier *n; + uint16_t source_id; + dma_addr_t iova; + IOMMUTLBEvent event; + int ret; + + source_id = val & 0xffff; + iova = (val >> IOMMU_IOVA_SHIFT) << IOMMU_PAGE_SHIFT; + + swvt_ptiotlb_inv_iova(s, source_id, iova); + + QLIST_FOREACH(swvt_as, &s->swvt_as_with_notifiers, next) { + uint8_t bus_num = pci_bus_num(swvt_as->bus); + uint16_t as_sourceid = ((bus_num & 0xffUL) << 8) | (swvt_as->devfn & 0xffUL); + + if (as_sourceid == source_id) { + IOMMU_NOTIFIER_FOREACH(n, &swvt_as->iommu) { + event.type = IOMMU_NOTIFIER_UNMAP; + event.entry.target_as = &address_space_memory; + event.entry.iova = iova & IOMMU_PAGE_MASK_8K; + event.entry.perm = IOMMU_RW; + + ret = swvt_do_iommu_translate(swvt_as, swvt_as->bus, + swvt_as->devfn, iova, &event.entry); + if (ret) + goto out; + + memory_region_notify_iommu(&swvt_as->iommu, 0, event); + } + } + } +out: + return; +} + +void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val) +{ + int map_flag; + + map_flag = val >> 36; + + if (map_flag) + swvt_address_space_map_iova(s, val & 0xfffffffff); + else + swvt_address_space_unmap_iova(s, val); + + return; +} + +static AddressSpace *sw64_dma_iommu(PCIBus *bus, void *opaque, int devfn) +{ + SW64IOMMUState *s = opaque; + SWVTAddressSpace *swvt_as; + + assert(0 <= devfn && devfn < PCI_DEVFN_MAX); + + swvt_as = iommu_find_add_as(s, bus, devfn); + return &swvt_as->as; +} + +static uint64_t piu0_read(void *opaque, hwaddr addr, unsigned size) +{ + uint64_t ret = 0; + switch (addr) { + default: + break; + } + return ret; +} + +static void piu0_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + SW64IOMMUState *s = (SW64IOMMUState *)opaque; + + switch (addr) { + case 0xb000: + /* DTBaseAddr */ + s->dtbr = val; + break; + case 0xb280: + /* PTLB_FlushVAddr */ + swvt_address_space_invalidate_iova(s, val); + break; + default: + break; + } +} + +const MemoryRegionOps core3_pci_piu0_ops = { + .read = piu0_read, + .write = piu0_write, + .endianness = DEVICE_LITTLE_ENDIAN, + .valid = { + .min_access_size = 1, + .max_access_size = 8, + }, + .impl = { + .min_access_size = 1, + .max_access_size = 8, + }, +}; + +void sw64_vt_iommu_init(PCIBus *b) +{ + DeviceState *dev_iommu; + SW64IOMMUState *s; + MemoryRegion *io_piu0 = g_new(MemoryRegion, 1); + + dev_iommu = qdev_new(TYPE_SW64_IOMMU); + s = SW64_IOMMU(dev_iommu); + + s->pci_bus = b; + sysbus_realize_and_unref(SYS_BUS_DEVICE(dev_iommu), &error_fatal); + + pci_setup_iommu(b, sw64_dma_iommu, dev_iommu); + + memory_region_init_io(io_piu0, OBJECT(s), &core3_pci_piu0_ops, s, + "pci0-piu0-io", 4 * 1024 * 1024); + memory_region_add_subregion(get_system_memory(), 0x880200000000ULL, + io_piu0); +} + +static int swvt_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, + IOMMUNotifierFlag old, + IOMMUNotifierFlag new, + Error **errp) +{ + SWVTAddressSpace *swvt_as = container_of(iommu, SWVTAddressSpace, iommu); + SW64IOMMUState *s = swvt_as->iommu_state; + + /* Update per-address-space notifier flags */ + swvt_as->notifier_flags = new; + + if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP) { + error_setg(errp, "swvt does not support dev-iotlb yet"); + return -EINVAL; + } + + if (old == IOMMU_NOTIFIER_NONE) { + QLIST_INSERT_HEAD(&s->swvt_as_with_notifiers, swvt_as, next); + } else if (new == IOMMU_NOTIFIER_NONE) { + QLIST_REMOVE(swvt_as, next); + } + return 0; +} + +static void swvt_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) +{ + SWVTAddressSpace *swvt_as = container_of(iommu_mr, SWVTAddressSpace, iommu); + + /* + * The replay can be triggered by either a invalidation or a newly + * created entry. No matter what, we release existing mappings + * (it means flushing caches for UNMAP-only registers). + */ + swvt_address_space_unmap(swvt_as, n); +} + +/* GHashTable functions */ +static gboolean swvt_uint64_equal(gconstpointer v1, gconstpointer v2) +{ + return *((const uint64_t *)v1) == *((const uint64_t *)v2); +} + +static guint swvt_uint64_hash(gconstpointer v) +{ + return (guint)*(const uint64_t *)v; +} + +static void iommu_realize(DeviceState *d, Error **errp) +{ + SW64IOMMUState *s = SW64_IOMMU(d); + + QLIST_INIT(&s->swvt_as_with_notifiers); + qemu_mutex_init(&s->iommu_lock); + + s->dtiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal, + g_free, g_free); + s->ptiotlb = g_hash_table_new_full(swvt_uint64_hash, swvt_uint64_equal, + g_free, g_free); + + s->swvtbus_as_by_busptr = g_hash_table_new(NULL, NULL); +} + +static void iommu_reset(DeviceState *d) +{ +} + +static void sw64_iommu_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + + dc->reset = iommu_reset; + dc->realize = iommu_realize; +} + +static void sw64_iommu_memory_region_class_init(ObjectClass *klass, void *data) +{ + IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); + + imrc->translate = sw64_translate_iommu; + imrc->notify_flag_changed = swvt_iommu_notify_flag_changed; + imrc->replay = swvt_iommu_replay; +} + +static const TypeInfo sw64_iommu_info = { + .name = TYPE_SW64_IOMMU, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(SW64IOMMUState), + .class_init = sw64_iommu_class_init, + .class_size = sizeof(SW64IOMMUClass), +}; + +static const TypeInfo sw64_iommu_memory_region_info = { + .parent = TYPE_IOMMU_MEMORY_REGION, + .name = TYPE_SW64_IOMMU_MEMORY_REGION, + .class_init = sw64_iommu_memory_region_class_init, +}; + +static void sw64_iommu_register_types(void) +{ + type_register_static(&sw64_iommu_info); + type_register_static(&sw64_iommu_memory_region_info); +} + +type_init(sw64_iommu_register_types) diff --git a/hw/sw64/trace-events b/hw/sw64/trace-events new file mode 100644 index 0000000000000000000000000000000000000000..1aa744c984979af6d8231bcff799dc6309951030 --- /dev/null +++ b/hw/sw64/trace-events @@ -0,0 +1,3 @@ +# See docs/devel/tracing.rst for syntax documentation. + +# pci.c diff --git a/hw/timer/Kconfig b/hw/timer/Kconfig index 010be7ed1f56d72069825a236ae164f04079f353..b395c72d7d31185685353a81a2701a2c97e7b847 100644 --- a/hw/timer/Kconfig +++ b/hw/timer/Kconfig @@ -60,3 +60,5 @@ config STELLARIS_GPTM config AVR_TIMER16 bool +config LS7A_RTC + bool diff --git a/hw/timer/imx_epit.c b/hw/timer/imx_epit.c index ebd58254d15fd9d8ca37bbc50592717c2a8b4a1f..a78b625d15ce2ee31e27f3221438fb7459fd9fba 100644 --- a/hw/timer/imx_epit.c +++ b/hw/timer/imx_epit.c @@ -275,10 +275,15 @@ static void imx_epit_write(void *opaque, hwaddr offset, uint64_t value, /* If IOVW bit is set then set the timer value */ ptimer_set_count(s->timer_reload, s->lr); } - + /* + * Commit the change to s->timer_reload, so it can propagate. Otherwise + * the timer interrupt may not fire properly. The commit must happen + * before calling imx_epit_reload_compare_timer(), which reads + * s->timer_reload internally again. + */ + ptimer_transaction_commit(s->timer_reload); imx_epit_reload_compare_timer(s); ptimer_transaction_commit(s->timer_cmp); - ptimer_transaction_commit(s->timer_reload); break; case 3: /* CMP */ diff --git a/hw/timer/ls7a_rtc.c b/hw/timer/ls7a_rtc.c new file mode 100644 index 0000000000000000000000000000000000000000..56c2695654b6e0d99333e22eb0b53886f7adbd25 --- /dev/null +++ b/hw/timer/ls7a_rtc.c @@ -0,0 +1,343 @@ +/* + * Loongarch rtc emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/sysbus.h" +#include "hw/irq.h" +#include "include/hw/register.h" +#include "qemu/timer.h" +#include "sysemu/sysemu.h" +#include "qemu/cutils.h" +#include "qemu/log.h" +#include "qemu-common.h" +#include "migration/vmstate.h" + +#ifdef DEBUG_LS7A_RTC +#define DPRINTF \ + (fmt, ...) do \ + { \ + printf("ls7a_rtc: " fmt, ##__VA_ARGS__); \ + } \ + while (0) +#else +#define DPRINTF \ + (fmt, ...) do \ + { \ + } \ + while (0) +#endif + +#define SYS_TOYTRIM 0x20 +#define SYS_TOYWRITE0 0x24 +#define SYS_TOYWRITE1 0x28 +#define SYS_TOYREAD0 0x2C +#define SYS_TOYREAD1 0x30 +#define SYS_TOYMATCH0 0x34 +#define SYS_TOYMATCH1 0x38 +#define SYS_TOYMATCH2 0x3C +#define SYS_RTCCTRL 0x40 +#define SYS_RTCTRIM 0x60 +#define SYS_RTCWRTIE0 0x64 +#define SYS_RTCREAD0 0x68 +#define SYS_RTCMATCH0 0x6C +#define SYS_RTCMATCH1 0x70 +#define SYS_RTCMATCH2 0x74 + +/** + ** shift bits and filed mask + **/ +#define TOY_MON_MASK 0x3f +#define TOY_DAY_MASK 0x1f +#define TOY_HOUR_MASK 0x1f +#define TOY_MIN_MASK 0x3f +#define TOY_SEC_MASK 0x3f +#define TOY_MSEC_MASK 0xf + +#define TOY_MON_SHIFT 26 +#define TOY_DAY_SHIFT 21 +#define TOY_HOUR_SHIFT 16 +#define TOY_MIN_SHIFT 10 +#define TOY_SEC_SHIFT 4 +#define TOY_MSEC_SHIFT 0 + +#define TOY_MATCH_YEAR_MASK 0x3f +#define TOY_MATCH_MON_MASK 0xf +#define TOY_MATCH_DAY_MASK 0x1f +#define TOY_MATCH_HOUR_MASK 0x1f +#define TOY_MATCH_MIN_MASK 0x3f +#define TOY_MATCH_SEC_MASK 0x3f + +#define TOY_MATCH_YEAR_SHIFT 26 +#define TOY_MATCH_MON_SHIFT 22 +#define TOY_MATCH_DAY_SHIFT 17 +#define TOY_MATCH_HOUR_SHIFT 12 +#define TOY_MATCH_MIN_SHIFT 6 +#define TOY_MATCH_SEC_SHIFT 0 + +#define TOY_ENABLE_BIT (1U << 11) + +#define TYPE_LS7A_RTC "ls7a_rtc" +#define LS7A_RTC(obj) OBJECT_CHECK(LS7A_RTCState, (obj), TYPE_LS7A_RTC) + +typedef struct LS7A_RTCState { + SysBusDevice parent_obj; + + MemoryRegion iomem; + QEMUTimer *timer; + /* + * Needed to preserve the tick_count across migration, even if the + * absolute value of the rtc_clock is different on the source and + * destination. + */ + int64_t offset; + int64_t data; + int64_t save_alarm_offset; + int tidx; + uint32_t toymatch[3]; + uint32_t toytrim; + uint32_t cntrctl; + uint32_t rtctrim; + uint32_t rtccount; + uint32_t rtcmatch[3]; + qemu_irq toy_irq; +} LS7A_RTCState; + +enum { + TOYEN = 1UL << 11, + RTCEN = 1UL << 13, +}; + +static uint64_t ls7a_rtc_read(void *opaque, hwaddr addr, unsigned size) +{ + LS7A_RTCState *s = (LS7A_RTCState *)opaque; + struct tm tm; + unsigned int val = 0; + + switch (addr) { + case SYS_TOYREAD0: + qemu_get_timedate(&tm, s->offset); + val = (((tm.tm_mon + 1) & TOY_MON_MASK) << TOY_MON_SHIFT) | + (((tm.tm_mday) & TOY_DAY_MASK) << TOY_DAY_SHIFT) | + (((tm.tm_hour) & TOY_HOUR_MASK) << TOY_HOUR_SHIFT) | + (((tm.tm_min) & TOY_MIN_MASK) << TOY_MIN_SHIFT) | + (((tm.tm_sec) & TOY_SEC_MASK) << TOY_SEC_SHIFT) | 0x0; + break; + case SYS_TOYREAD1: + qemu_get_timedate(&tm, s->offset); + val = tm.tm_year; + break; + case SYS_TOYMATCH0: + val = s->toymatch[0]; + break; + case SYS_TOYMATCH1: + val = s->toymatch[1]; + break; + case SYS_TOYMATCH2: + val = s->toymatch[2]; + break; + case SYS_RTCCTRL: + val = s->cntrctl; + break; + case SYS_RTCREAD0: + val = s->rtccount; + break; + case SYS_RTCMATCH0: + val = s->rtcmatch[0]; + break; + case SYS_RTCMATCH1: + val = s->rtcmatch[1]; + break; + case SYS_RTCMATCH2: + val = s->rtcmatch[2]; + break; + default: + break; + } + return val; +} + +static void ls7a_rtc_write(void *opaque, hwaddr addr, uint64_t val, + unsigned size) +{ + LS7A_RTCState *s = (LS7A_RTCState *)opaque; + struct tm tm; + int64_t alarm_offset, year_diff, expire_time; + + switch (addr) { + case SYS_TOYWRITE0: + qemu_get_timedate(&tm, s->offset); + tm.tm_sec = (val >> TOY_SEC_SHIFT) & TOY_SEC_MASK; + tm.tm_min = (val >> TOY_MIN_SHIFT) & TOY_MIN_MASK; + tm.tm_hour = (val >> TOY_HOUR_SHIFT) & TOY_HOUR_MASK; + tm.tm_mday = ((val >> TOY_DAY_SHIFT) & TOY_DAY_MASK); + tm.tm_mon = ((val >> TOY_MON_SHIFT) & TOY_MON_MASK) - 1; + s->offset = qemu_timedate_diff(&tm); + break; + case SYS_TOYWRITE1: + qemu_get_timedate(&tm, s->offset); + tm.tm_year = val; + s->offset = qemu_timedate_diff(&tm); + break; + case SYS_TOYMATCH0: + s->toymatch[0] = val; + qemu_get_timedate(&tm, s->offset); + tm.tm_sec = (val >> TOY_MATCH_SEC_SHIFT) & TOY_MATCH_SEC_MASK; + tm.tm_min = (val >> TOY_MATCH_MIN_SHIFT) & TOY_MATCH_MIN_MASK; + tm.tm_hour = ((val >> TOY_MATCH_HOUR_SHIFT) & TOY_MATCH_HOUR_MASK); + tm.tm_mday = ((val >> TOY_MATCH_DAY_SHIFT) & TOY_MATCH_DAY_MASK); + tm.tm_mon = ((val >> TOY_MATCH_MON_SHIFT) & TOY_MATCH_MON_MASK) - 1; + year_diff = ((val >> TOY_MATCH_YEAR_SHIFT) & TOY_MATCH_YEAR_MASK); + year_diff = year_diff - (tm.tm_year & TOY_MATCH_YEAR_MASK); + tm.tm_year = tm.tm_year + year_diff; + alarm_offset = qemu_timedate_diff(&tm) - s->offset; + if ((alarm_offset < 0) && (alarm_offset > -5)) { + alarm_offset = 0; + } + expire_time = qemu_clock_get_ms(rtc_clock); + expire_time += ((alarm_offset * 1000) + 100); + timer_mod(s->timer, expire_time); + break; + case SYS_TOYMATCH1: + s->toymatch[1] = val; + break; + case SYS_TOYMATCH2: + s->toymatch[2] = val; + break; + case SYS_RTCCTRL: + s->cntrctl = val; + break; + case SYS_RTCWRTIE0: + s->rtccount = val; + break; + case SYS_RTCMATCH0: + s->rtcmatch[0] = val; + break; + case SYS_RTCMATCH1: + val = s->rtcmatch[1]; + break; + case SYS_RTCMATCH2: + val = s->rtcmatch[2]; + break; + default: + break; + } +} + +static const MemoryRegionOps ls7a_rtc_ops = { + .read = ls7a_rtc_read, + .write = ls7a_rtc_write, + .endianness = DEVICE_NATIVE_ENDIAN, + .valid = { + .min_access_size = 4, + .max_access_size = 4, + }, + +}; + +static void toy_timer(void *opaque) +{ + LS7A_RTCState *s = (LS7A_RTCState *)opaque; + + if (s->cntrctl & TOY_ENABLE_BIT) { + qemu_irq_pulse(s->toy_irq); + } +} + +static void ls7a_rtc_realize(DeviceState *dev, Error **errp) +{ + SysBusDevice *sbd = SYS_BUS_DEVICE(dev); + LS7A_RTCState *d = LS7A_RTC(sbd); + memory_region_init_io(&d->iomem, NULL, &ls7a_rtc_ops, (void *)d, + "ls7a_rtc", 0x100); + + sysbus_init_irq(sbd, &d->toy_irq); + + sysbus_init_mmio(sbd, &d->iomem); + d->timer = timer_new_ms(rtc_clock, toy_timer, d); + timer_mod(d->timer, qemu_clock_get_ms(rtc_clock) + 100); + d->offset = 0; +} + +static int ls7a_rtc_pre_save(void *opaque) +{ + LS7A_RTCState *s = (LS7A_RTCState *)opaque; + struct tm tm; + int64_t year_diff, value; + + value = s->toymatch[0]; + qemu_get_timedate(&tm, s->offset); + tm.tm_sec = (value >> TOY_MATCH_SEC_SHIFT) & TOY_MATCH_SEC_MASK; + tm.tm_min = (value >> TOY_MATCH_MIN_SHIFT) & TOY_MATCH_MIN_MASK; + tm.tm_hour = ((value >> TOY_MATCH_HOUR_SHIFT) & TOY_MATCH_HOUR_MASK); + tm.tm_mday = ((value >> TOY_MATCH_DAY_SHIFT) & TOY_MATCH_DAY_MASK); + tm.tm_mon = ((value >> TOY_MATCH_MON_SHIFT) & TOY_MATCH_MON_MASK) - 1; + year_diff = ((value >> TOY_MATCH_YEAR_SHIFT) & TOY_MATCH_YEAR_MASK); + year_diff = year_diff - (tm.tm_year & TOY_MATCH_YEAR_MASK); + tm.tm_year = tm.tm_year + year_diff; + s->save_alarm_offset = qemu_timedate_diff(&tm) - s->offset; + + return 0; +} + +static int ls7a_rtc_post_load(void *opaque, int version_id) +{ + LS7A_RTCState *s = (LS7A_RTCState *)opaque; + int64_t expire_time; + + expire_time = qemu_clock_get_ms(rtc_clock) + (s->save_alarm_offset * 1000); + timer_mod(s->timer, expire_time); + + return 0; +} + +static const VMStateDescription vmstate_ls7a_rtc = { + .name = "ls7a_rtc", + .version_id = 1, + .minimum_version_id = 1, + .pre_save = ls7a_rtc_pre_save, + .post_load = ls7a_rtc_post_load, + .fields = + (VMStateField[]){ VMSTATE_INT64(offset, LS7A_RTCState), + VMSTATE_INT64(save_alarm_offset, LS7A_RTCState), + VMSTATE_UINT32(toymatch[0], LS7A_RTCState), + VMSTATE_UINT32(cntrctl, LS7A_RTCState), + VMSTATE_END_OF_LIST() } +}; + +static void ls7a_rtc_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + dc->vmsd = &vmstate_ls7a_rtc; + dc->realize = ls7a_rtc_realize; + dc->desc = "ls7a rtc"; +} + +static const TypeInfo ls7a_rtc_info = { + .name = TYPE_LS7A_RTC, + .parent = TYPE_SYS_BUS_DEVICE, + .instance_size = sizeof(LS7A_RTCState), + .class_init = ls7a_rtc_class_init, +}; + +static void ls7a_rtc_register_types(void) +{ + type_register_static(&ls7a_rtc_info); +} + +type_init(ls7a_rtc_register_types) diff --git a/hw/timer/meson.build b/hw/timer/meson.build index 03092e2cebf4e8811084b643af6acd3bf52df7e7..e841a2f6ee883d8c0b9919a90c671817c8f2ce20 100644 --- a/hw/timer/meson.build +++ b/hw/timer/meson.build @@ -16,6 +16,7 @@ softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_mct.c')) softmmu_ss.add(when: 'CONFIG_EXYNOS4', if_true: files('exynos4210_pwm.c')) softmmu_ss.add(when: 'CONFIG_GRLIB', if_true: files('grlib_gptimer.c')) softmmu_ss.add(when: 'CONFIG_HPET', if_true: files('hpet.c')) +softmmu_ss.add(when: 'CONFIG_LS7A_RTC', if_true: files('ls7a_rtc.c')) softmmu_ss.add(when: 'CONFIG_I8254', if_true: files('i8254_common.c', 'i8254.c')) softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_epit.c')) softmmu_ss.add(when: 'CONFIG_IMX', if_true: files('imx_gpt.c')) diff --git a/hw/timer/npcm7xx_timer.c b/hw/timer/npcm7xx_timer.c index 32f5e021f85709122d32587929b76daeabb21bfc..a8bd93aeb2cfd761c200b16c21a3428291a8d81f 100644 --- a/hw/timer/npcm7xx_timer.c +++ b/hw/timer/npcm7xx_timer.c @@ -138,6 +138,9 @@ static int64_t npcm7xx_timer_count_to_ns(NPCM7xxTimer *t, uint32_t count) /* Convert a time interval in nanoseconds to a timer cycle count. */ static uint32_t npcm7xx_timer_ns_to_count(NPCM7xxTimer *t, int64_t ns) { + if (ns < 0) { + return 0; + } return clock_ns_to_ticks(t->ctrl->clock, ns) / npcm7xx_tcsr_prescaler(t->tcsr); } diff --git a/hw/timer/trace-events b/hw/timer/trace-events index 3eccef83858f7160d5c2b96c6628c8e83724b53b..8145e18e3da8f0d85458db66b43139f386f81d3b 100644 --- a/hw/timer/trace-events +++ b/hw/timer/trace-events @@ -35,7 +35,7 @@ aspeed_timer_read(uint64_t offset, unsigned size, uint64_t value) "From 0x%" PRI # armv7m_systick.c systick_reload(void) "systick reload" -systick_timer_tick(void) "systick reload" +systick_timer_tick(void) "systick tick" systick_read(uint64_t addr, uint32_t value, unsigned size) "systick read addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" systick_write(uint64_t addr, uint32_t value, unsigned size) "systick write addr 0x%" PRIx64 " data 0x%" PRIx32 " size %u" diff --git a/hw/tpm/tpm_crb.c b/hw/tpm/tpm_crb.c index 58ebd1469c35064430ab5edf34c84885b2c2970f..ed89ab4afea62c67f2a8e319113d3e360de718f7 100644 --- a/hw/tpm/tpm_crb.c +++ b/hw/tpm/tpm_crb.c @@ -25,6 +25,7 @@ #include "sysemu/tpm_backend.h" #include "sysemu/tpm_util.h" #include "sysemu/reset.h" +#include "sysemu/xen.h" #include "tpm_prop.h" #include "tpm_ppi.h" #include "trace.h" @@ -196,6 +197,7 @@ static void tpm_crb_request_completed(TPMIf *ti, int ret) ARRAY_FIELD_DP32(s->regs, CRB_CTRL_STS, tpmSts, 1); /* fatal error */ } + memory_region_set_dirty(&s->cmdmem, 0, CRB_CTRL_CMD_SIZE); } static enum TPMVersion tpm_crb_get_version(TPMIf *ti) @@ -306,7 +308,11 @@ static void tpm_crb_realize(DeviceState *dev, Error **errp) TPM_PPI_ADDR_BASE, OBJECT(s)); } - qemu_register_reset(tpm_crb_reset, dev); + if (xen_enabled()) { + tpm_crb_reset(dev); + } else { + qemu_register_reset(tpm_crb_reset, dev); + } } static void tpm_crb_class_init(ObjectClass *klass, void *data) diff --git a/hw/usb/bus.c b/hw/usb/bus.c index 92d6ed5626141f8622c55bedb9dc23326b3338c8..20cd9b6e6fd9e70689bfe756ff44a2ff8a013fca 100644 --- a/hw/usb/bus.c +++ b/hw/usb/bus.c @@ -536,6 +536,10 @@ void usb_check_attach(USBDevice *dev, Error **errp) bus->qbus.name, port->path, portspeed); return; } + + qemu_log("attach usb device \"%s\" (%s speed) to VM bus \"%s\", " + "port \"%s\" (%s speed)\n", dev->product_desc, devspeed, + bus->qbus.name, port->path, portspeed); } void usb_device_attach(USBDevice *dev, Error **errp) @@ -564,6 +568,8 @@ int usb_device_detach(USBDevice *dev) usb_detach(port); dev->attached = false; + qemu_log("detach usb device \"%s\" from VM bus \"%s\", port \"%s\"\n", + dev->product_desc, bus->qbus.name, port->path); return 0; } diff --git a/hw/usb/core.c b/hw/usb/core.c index 975f76250a1a34b79252975b13724368577240f4..a62826e051bc86f6872b1db86fb02d9791b38eb2 100644 --- a/hw/usb/core.c +++ b/hw/usb/core.c @@ -87,7 +87,7 @@ void usb_device_reset(USBDevice *dev) return; } usb_device_handle_reset(dev); - dev->remote_wakeup = 0; + dev->remote_wakeup &= ~USB_DEVICE_REMOTE_WAKEUP; dev->addr = 0; dev->state = USB_STATE_DEFAULT; } @@ -105,7 +105,8 @@ void usb_wakeup(USBEndpoint *ep, unsigned int stream) */ return; } - if (dev->remote_wakeup && dev->port && dev->port->ops->wakeup) { + if ((dev->remote_wakeup & USB_DEVICE_REMOTE_WAKEUP) + && dev->port && dev->port->ops->wakeup) { dev->port->ops->wakeup(dev->port); } if (bus->ops->wakeup_endpoint) { @@ -205,7 +206,15 @@ static void do_token_in(USBDevice *s, USBPacket *p) case SETUP_STATE_DATA: if (s->setup_buf[0] & USB_DIR_IN) { - int len = s->setup_len - s->setup_index; + int len; + if (s->setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small do_token_in(%d > %zu)\n", + s->setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } + len = s->setup_len - s->setup_index; if (len > p->iov.size) { len = p->iov.size; } @@ -243,7 +252,15 @@ static void do_token_out(USBDevice *s, USBPacket *p) case SETUP_STATE_DATA: if (!(s->setup_buf[0] & USB_DIR_IN)) { - int len = s->setup_len - s->setup_index; + int len; + if (s->setup_len > sizeof(s->data_buf)) { + fprintf(stderr, + "usb_generic_handle_packet: ctrl buffer too small do_token_out(%d > %zu)\n", + s->setup_len, sizeof(s->data_buf)); + p->status = USB_RET_STALL; + return; + } + len = s->setup_len - s->setup_index; if (len > p->iov.size) { len = p->iov.size; } diff --git a/hw/usb/desc.c b/hw/usb/desc.c index 8b6eaea4079e24a7488fb4fcc7b8f36a65585381..78bbe74c713258d4080b4d8127eb0b07d0d29fd3 100644 --- a/hw/usb/desc.c +++ b/hw/usb/desc.c @@ -751,7 +751,7 @@ int usb_desc_handle_control(USBDevice *dev, USBPacket *p, if (config->bmAttributes & USB_CFG_ATT_SELFPOWER) { data[0] |= 1 << USB_DEVICE_SELF_POWERED; } - if (dev->remote_wakeup) { + if (dev->remote_wakeup & USB_DEVICE_REMOTE_WAKEUP) { data[0] |= 1 << USB_DEVICE_REMOTE_WAKEUP; } data[1] = 0x00; @@ -761,14 +761,15 @@ int usb_desc_handle_control(USBDevice *dev, USBPacket *p, } case DeviceOutRequest | USB_REQ_CLEAR_FEATURE: if (value == USB_DEVICE_REMOTE_WAKEUP) { - dev->remote_wakeup = 0; + dev->remote_wakeup &= ~USB_DEVICE_REMOTE_WAKEUP; ret = 0; } trace_usb_clear_device_feature(dev->addr, value, ret); break; case DeviceOutRequest | USB_REQ_SET_FEATURE: + dev->remote_wakeup |= USB_DEVICE_REMOTE_WAKEUP_IS_SUPPORTED; if (value == USB_DEVICE_REMOTE_WAKEUP) { - dev->remote_wakeup = 1; + dev->remote_wakeup |= USB_DEVICE_REMOTE_WAKEUP; ret = 0; } trace_usb_set_device_feature(dev->addr, value, ret); diff --git a/hw/usb/dev-hid.c b/hw/usb/dev-hid.c index 1c7ae97c3033442dba820db492bdd04cba7c6299..9fb89f6955c395067b6549c9b028b63372d17794 100644 --- a/hw/usb/dev-hid.c +++ b/hw/usb/dev-hid.c @@ -745,7 +745,7 @@ static int usb_ptr_post_load(void *opaque, int version_id) { USBHIDState *s = opaque; - if (s->dev.remote_wakeup) { + if (s->dev.remote_wakeup & USB_DEVICE_REMOTE_WAKEUP) { hid_pointer_activate(&s->hid); } return 0; diff --git a/hw/usb/dev-hub.c b/hw/usb/dev-hub.c index e35813d772222666419489233a9a507627d5c060..605fee4fa928d03329d06aa95f9757a2d098afdb 100644 --- a/hw/usb/dev-hub.c +++ b/hw/usb/dev-hub.c @@ -479,6 +479,7 @@ static void usb_hub_handle_control(USBDevice *dev, USBPacket *p, usb_hub_port_clear(port, PORT_STAT_SUSPEND); port->wPortChange = 0; } + break; default: goto fail; } diff --git a/hw/usb/dev-mtp.c b/hw/usb/dev-mtp.c index c1d1694fd0b45e22b62594d592076860d1352f5b..882f6bc72f789b824a9d98cde0a3184f343d8f10 100644 --- a/hw/usb/dev-mtp.c +++ b/hw/usb/dev-mtp.c @@ -15,7 +15,7 @@ #include "qemu/error-report.h" #include #include - +#include #include @@ -1623,7 +1623,7 @@ static void usb_mtp_write_data(MTPState *s, uint32_t handle) if (s->dataset.filename) { path = g_strdup_printf("%s/%s", parent->path, s->dataset.filename); if (s->dataset.format == FMT_ASSOCIATION) { - ret = mkdir(path, mask); + ret = g_mkdir(path, mask); if (!ret) { usb_mtp_queue_result(s, RES_OK, d->trans, 3, QEMU_STORAGE_ID, diff --git a/hw/usb/dev-network.c b/hw/usb/dev-network.c index 6c49c16015e03dc44ec631688cb0b786023f5e22..ae447a8bc3299d815afddfb75cb26d15e52b4c9d 100644 --- a/hw/usb/dev-network.c +++ b/hw/usb/dev-network.c @@ -1362,7 +1362,8 @@ static void usb_net_realize(USBDevice *dev, Error **errp) qemu_macaddr_default_if_unset(&s->conf.macaddr); s->nic = qemu_new_nic(&net_usbnet_info, &s->conf, - object_get_typename(OBJECT(s)), s->dev.qdev.id, s); + object_get_typename(OBJECT(s)), s->dev.qdev.id, + &s->dev.qdev.mem_reentrancy_guard, s); qemu_format_nic_info_str(qemu_get_queue(s->nic), s->conf.macaddr.a); snprintf(s->usbstring_mac, sizeof(s->usbstring_mac), "%02x%02x%02x%02x%02x%02x", diff --git a/hw/usb/dev-uas.c b/hw/usb/dev-uas.c index 599d6b52a0122c0bba89448322b1dc96eeabe008..67bcfac62676bcb1098ad916e381e5cecb9e2e78 100644 --- a/hw/usb/dev-uas.c +++ b/hw/usb/dev-uas.c @@ -908,6 +908,7 @@ static void usb_uas_handle_data(USBDevice *dev, USBPacket *p) p->status = USB_RET_STALL; break; } + return; err_stream: error_report("%s: invalid stream %d", __func__, p->stream); @@ -935,7 +936,8 @@ static void usb_uas_realize(USBDevice *dev, Error **errp) QTAILQ_INIT(&uas->results); QTAILQ_INIT(&uas->requests); - uas->status_bh = qemu_bh_new(usb_uas_send_status_bh, uas); + uas->status_bh = qemu_bh_new_guarded(usb_uas_send_status_bh, uas, + &d->mem_reentrancy_guard); dev->flags |= (1 << USB_DEV_FLAG_IS_SCSI_STORAGE); scsi_bus_init(&uas->bus, sizeof(uas->bus), DEVICE(dev), &usb_uas_scsi_info); diff --git a/hw/usb/hcd-dwc2.c b/hw/usb/hcd-dwc2.c index e1d96acf7ecf80319d63e36652e4456ea1c6ba93..a0c4e782b2a2e361317ddd49ecdf968903d67ef4 100644 --- a/hw/usb/hcd-dwc2.c +++ b/hw/usb/hcd-dwc2.c @@ -272,8 +272,8 @@ static void dwc2_handle_packet(DWC2State *s, uint32_t devadr, USBDevice *dev, if (pid != USB_TOKEN_IN) { trace_usb_dwc2_memory_read(hcdma, tlen); - if (dma_memory_read(&s->dma_as, hcdma, - s->usb_buf[chan], tlen) != MEMTX_OK) { + if (dma_memory_read(&s->dma_as, hcdma, s->usb_buf[chan], tlen, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_read failed\n", __func__); } @@ -328,8 +328,8 @@ babble: if (pid == USB_TOKEN_IN) { trace_usb_dwc2_memory_write(hcdma, actual); - if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], - actual) != MEMTX_OK) { + if (dma_memory_write(&s->dma_as, hcdma, s->usb_buf[chan], actual, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { qemu_log_mask(LOG_GUEST_ERROR, "%s: dma_memory_write failed\n", __func__); } @@ -1364,7 +1364,8 @@ static void dwc2_realize(DeviceState *dev, Error **errp) s->fi = USB_FRMINTVL - 1; s->eof_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_frame_boundary, s); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, dwc2_work_timer, s); - s->async_bh = qemu_bh_new(dwc2_work_bh, s); + s->async_bh = qemu_bh_new_guarded(dwc2_work_bh, s, + &dev->mem_reentrancy_guard); sysbus_init_irq(sbd, &s->irq); } diff --git a/hw/usb/hcd-ehci.c b/hw/usb/hcd-ehci.c index 6caa7ac6c28f56416f652b6657878ef18fcb3388..3fbb06e248e06c788cd03d2f28715f6dda3ce4a0 100644 --- a/hw/usb/hcd-ehci.c +++ b/hw/usb/hcd-ehci.c @@ -383,7 +383,8 @@ static inline int get_dwords(EHCIState *ehci, uint32_t addr, } for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - dma_memory_read(ehci->as, addr, buf, sizeof(*buf)); + dma_memory_read(ehci->as, addr, buf, sizeof(*buf), + MEMTXATTRS_UNSPECIFIED); *buf = le32_to_cpu(*buf); } @@ -405,7 +406,8 @@ static inline int put_dwords(EHCIState *ehci, uint32_t addr, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint32_t tmp = cpu_to_le32(*buf); - dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp)); + dma_memory_write(ehci->as, addr, &tmp, sizeof(tmp), + MEMTXATTRS_UNSPECIFIED); } return num; @@ -612,6 +614,8 @@ static void ehci_free_queue(EHCIQueue *q, const char *warn) ehci_trace_guest_bug(q->ehci, warn); } QTAILQ_REMOVE(head, q, next); + memset(q, 0, sizeof(*q)); + *(volatile char *)q = *(volatile char *)q; g_free(q); } @@ -2009,7 +2013,10 @@ static int ehci_state_writeback(EHCIQueue *q) ehci_trace_qtd(q, NLPTR_GET(p->qtdaddr), (EHCIqtd *) &q->qh.next_qtd); qtd = (uint32_t *) &q->qh.next_qtd; addr = NLPTR_GET(p->qtdaddr); - put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 2); + /* First write back the offset */ + put_dwords(q->ehci, addr + 3 * sizeof(uint32_t), qtd + 3, 1); + /* Then write back the token, clearing the 'active' bit */ + put_dwords(q->ehci, addr + 2 * sizeof(uint32_t), qtd + 2, 1); ehci_free_packet(p); /* @@ -2528,7 +2535,8 @@ void usb_ehci_realize(EHCIState *s, DeviceState *dev, Error **errp) } s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, ehci_work_timer, s); - s->async_bh = qemu_bh_new(ehci_work_bh, s); + s->async_bh = qemu_bh_new_guarded(ehci_work_bh, s, + &dev->mem_reentrancy_guard); s->device = dev; s->vmstate = qemu_add_vm_change_state_handler(usb_ehci_vm_state_change, s); diff --git a/hw/usb/hcd-ohci.c b/hw/usb/hcd-ohci.c index 1cf2816772c5377f44525d162d4c272155463acf..8f65c02c3db60b68801c69dcbe483bc6f23225fb 100644 --- a/hw/usb/hcd-ohci.c +++ b/hw/usb/hcd-ohci.c @@ -452,7 +452,8 @@ static inline int get_dwords(OHCIState *ohci, addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + if (dma_memory_read(ohci->as, addr, + buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) { return -1; } *buf = le32_to_cpu(*buf); @@ -471,7 +472,8 @@ static inline int put_dwords(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint32_t tmp = cpu_to_le32(*buf); - if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + if (dma_memory_write(ohci->as, addr, + &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) { return -1; } } @@ -488,7 +490,8 @@ static inline int get_words(OHCIState *ohci, addr += ohci->localmem_base; for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { - if (dma_memory_read(ohci->as, addr, buf, sizeof(*buf))) { + if (dma_memory_read(ohci->as, addr, + buf, sizeof(*buf), MEMTXATTRS_UNSPECIFIED)) { return -1; } *buf = le16_to_cpu(*buf); @@ -507,7 +510,8 @@ static inline int put_words(OHCIState *ohci, for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) { uint16_t tmp = cpu_to_le16(*buf); - if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) { + if (dma_memory_write(ohci->as, addr, + &tmp, sizeof(tmp), MEMTXATTRS_UNSPECIFIED)) { return -1; } } @@ -537,8 +541,8 @@ static inline int ohci_read_iso_td(OHCIState *ohci, static inline int ohci_read_hcca(OHCIState *ohci, dma_addr_t addr, struct ohci_hcca *hcca) { - return dma_memory_read(ohci->as, addr + ohci->localmem_base, - hcca, sizeof(*hcca)); + return dma_memory_read(ohci->as, addr + ohci->localmem_base, hcca, + sizeof(*hcca), MEMTXATTRS_UNSPECIFIED); } static inline int ohci_put_ed(OHCIState *ohci, @@ -572,7 +576,7 @@ static inline int ohci_put_hcca(OHCIState *ohci, return dma_memory_write(ohci->as, addr + ohci->localmem_base + HCCA_WRITEBACK_OFFSET, (char *)hcca + HCCA_WRITEBACK_OFFSET, - HCCA_WRITEBACK_SIZE); + HCCA_WRITEBACK_SIZE, MEMTXATTRS_UNSPECIFIED); } /* Read/Write the contents of a TD from/to main memory. */ @@ -586,7 +590,8 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, if (n > len) n = len; - if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } if (n == len) { @@ -595,7 +600,7 @@ static int ohci_copy_td(OHCIState *ohci, struct ohci_td *td, ptr = td->be & ~0xfffu; buf += n; if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, - len - n, dir)) { + len - n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } return 0; @@ -613,7 +618,8 @@ static int ohci_copy_iso_td(OHCIState *ohci, if (n > len) n = len; - if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, n, dir)) { + if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, + n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } if (n == len) { @@ -622,7 +628,7 @@ static int ohci_copy_iso_td(OHCIState *ohci, ptr = end_addr & ~0xfffu; buf += n; if (dma_memory_rw(ohci->as, ptr + ohci->localmem_base, buf, - len - n, dir)) { + len - n, dir, MEMTXATTRS_UNSPECIFIED)) { return -1; } return 0; @@ -662,6 +668,11 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, addr = ed->head & OHCI_DPTR_MASK; + if (addr == 0) { + ohci_die(ohci); + return 1; + } + if (ohci_read_iso_td(ohci, addr, &iso_td)) { trace_usb_ohci_iso_td_read_failed(addr); ohci_die(ohci); @@ -894,13 +905,14 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed, return 1; } +#define HEX_CHAR_PER_LINE 16 + static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) { bool print16; bool printall; - const int width = 16; int i; - char tmp[3 * width + 1]; + char tmp[3 * HEX_CHAR_PER_LINE + 1]; char *p = tmp; print16 = !!trace_event_get_state_backends(TRACE_USB_OHCI_TD_PKT_SHORT); @@ -911,7 +923,7 @@ static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len) } for (i = 0; ; i++) { - if (i && (!(i % width) || (i == len))) { + if (i && (!(i % HEX_CHAR_PER_LINE) || (i == len))) { if (!printall) { trace_usb_ohci_td_pkt_short(msg, tmp); break; @@ -947,6 +959,11 @@ static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed) int completion; addr = ed->head & OHCI_DPTR_MASK; + if (addr == 0) { + ohci_die(ohci); + return 1; + } + /* See if this TD has already been submitted to the device. */ completion = (addr == ohci->async_td); if (completion && !ohci->async_complete) { diff --git a/hw/usb/hcd-uhci.c b/hw/usb/hcd-uhci.c index d1b5657d722a3a9463ab78ffd60d677c6bd2f57e..00a8de2fba386ae8a6d17774fd535616a2c4dd27 100644 --- a/hw/usb/hcd-uhci.c +++ b/hw/usb/hcd-uhci.c @@ -44,6 +44,8 @@ #include "hcd-uhci.h" #define FRAME_TIMER_FREQ 1000 +#define FRAME_TIMER_FREQ_LAZY 10 +#define USB_DEVICE_NEED_NORMAL_FREQ "QEMU USB Tablet" #define FRAME_MAX_LOOPS 256 @@ -111,6 +113,22 @@ static void uhci_async_cancel(UHCIAsync *async); static void uhci_queue_fill(UHCIQueue *q, UHCI_TD *td); static void uhci_resume(void *opaque); +static int64_t uhci_frame_timer_freq = FRAME_TIMER_FREQ_LAZY; + +static void uhci_set_frame_freq(int freq) +{ + if (freq <= 0) { + return; + } + + uhci_frame_timer_freq = freq; +} + +static qemu_usb_controller qemu_uhci = { + .name = "uhci", + .qemu_set_freq = uhci_set_frame_freq, +}; + static inline int32_t uhci_queue_token(UHCI_TD *td) { if ((td->token & (0xf << 15)) == 0) { @@ -353,7 +371,7 @@ static int uhci_post_load(void *opaque, int version_id) if (version_id < 2) { s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ); + (NANOSECONDS_PER_SECOND / uhci_frame_timer_freq); } return 0; } @@ -394,8 +412,29 @@ static void uhci_port_write(void *opaque, hwaddr addr, if ((val & UHCI_CMD_RS) && !(s->cmd & UHCI_CMD_RS)) { /* start frame processing */ trace_usb_uhci_schedule_start(); - s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + - (NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ); + + /* + * If the frequency of frame_timer is too slow, Guest OS (Win2012) would become + * blue-screen after hotplugging some vcpus. + * If this USB device support the remote-wakeup, the UHCI controller + * will enter global suspend mode when there is no input for several seconds. + * In this case, Qemu will delete the frame_timer. Since the frame_timer has been deleted, + * there is no influence to the performance of Vms. So, we can change the frequency to 1000. + * After that the frequency will be safe when we trigger the frame_timer again. + * Excepting this, there are two ways to change the frequency: + * 1)VNC connect/disconnect;2)attach/detach USB device. + */ + if ((uhci_frame_timer_freq != FRAME_TIMER_FREQ) + && (s->ports[0].port.dev) + && (!memcmp(s->ports[0].port.dev->product_desc, + USB_DEVICE_NEED_NORMAL_FREQ, strlen(USB_DEVICE_NEED_NORMAL_FREQ))) + && (s->ports[0].port.dev->remote_wakeup & USB_DEVICE_REMOTE_WAKEUP_IS_SUPPORTED)) { + qemu_log("turn up the frequency of UHCI controller to %d\n", FRAME_TIMER_FREQ); + uhci_frame_timer_freq = FRAME_TIMER_FREQ; + } + + s->frame_time = NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ; + s->expire_time = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + s->frame_time; timer_mod(s->frame_timer, s->expire_time); s->status &= ~UHCI_STS_HCHALTED; } else if (!(val & UHCI_CMD_RS)) { @@ -1083,7 +1122,6 @@ static void uhci_frame_timer(void *opaque) UHCIState *s = opaque; uint64_t t_now, t_last_run; int i, frames; - const uint64_t frame_t = NANOSECONDS_PER_SECOND / FRAME_TIMER_FREQ; s->completions_only = false; qemu_bh_cancel(s->bh); @@ -1099,14 +1137,14 @@ static void uhci_frame_timer(void *opaque) } /* We still store expire_time in our state, for migration */ - t_last_run = s->expire_time - frame_t; + t_last_run = s->expire_time - s->frame_time; t_now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); /* Process up to MAX_FRAMES_PER_TICK frames */ - frames = (t_now - t_last_run) / frame_t; + frames = (t_now - t_last_run) / s->frame_time; if (frames > s->maxframes) { int skipped = frames - s->maxframes; - s->expire_time += skipped * frame_t; + s->expire_time += skipped * s->frame_time; s->frnum = (s->frnum + skipped) & 0x7ff; frames -= skipped; } @@ -1123,7 +1161,7 @@ static void uhci_frame_timer(void *opaque) /* The spec says frnum is the frame currently being processed, and * the guest must look at frnum - 1 on interrupt, so inc frnum now */ s->frnum = (s->frnum + 1) & 0x7ff; - s->expire_time += frame_t; + s->expire_time += s->frame_time; } /* Complete the previous frame(s) */ @@ -1134,7 +1172,12 @@ static void uhci_frame_timer(void *opaque) } s->pending_int_mask = 0; - timer_mod(s->frame_timer, t_now + frame_t); + /* expire_time is calculated from last frame_time, we should calculate it + * according to new frame_time which equals to + * NANOSECONDS_PER_SECOND / uhci_frame_timer_freq */ + s->expire_time -= s->frame_time - NANOSECONDS_PER_SECOND / uhci_frame_timer_freq; + s->frame_time = NANOSECONDS_PER_SECOND / uhci_frame_timer_freq; + timer_mod(s->frame_timer, t_now + s->frame_time); } static const MemoryRegionOps uhci_ioport_ops = { @@ -1193,11 +1236,13 @@ void usb_uhci_common_realize(PCIDevice *dev, Error **errp) USB_SPEED_MASK_LOW | USB_SPEED_MASK_FULL); } } - s->bh = qemu_bh_new(uhci_bh, s); + s->bh = qemu_bh_new_guarded(uhci_bh, s, &DEVICE(dev)->mem_reentrancy_guard); s->frame_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, uhci_frame_timer, s); s->num_ports_vmstate = NB_PORTS; + s->frame_time = NANOSECONDS_PER_SECOND / uhci_frame_timer_freq; QTAILQ_INIT(&s->queues); + qemu_register_usb_controller(&qemu_uhci, QEMU_USB_CONTROLLER_UHCI); memory_region_init_io(&s->io_bar, OBJECT(s), &uhci_ioport_ops, s, "uhci", 0x20); diff --git a/hw/usb/hcd-uhci.h b/hw/usb/hcd-uhci.h index c85ab7868eee496323a92fd51942cb4e1429c5fc..5194d22ab41fa166db60f00d2ceacc11c198e4da 100644 --- a/hw/usb/hcd-uhci.h +++ b/hw/usb/hcd-uhci.h @@ -50,6 +50,7 @@ typedef struct UHCIState { uint16_t status; uint16_t intr; /* interrupt enable register */ uint16_t frnum; /* frame number */ + uint64_t frame_time; /* frame time in ns */ uint32_t fl_base_addr; /* frame list base address */ uint8_t sof_timing; uint8_t status2; /* bit 0 and 1 are used to generate UHCI_STS_USBINT */ diff --git a/hw/usb/hcd-xhci-pci.c b/hw/usb/hcd-xhci-pci.c index e934b1a5b1fbbf38a588e4299a77e780fb3b9055..643d4643e4d6cdfe660c6cc169874bd37799bf5a 100644 --- a/hw/usb/hcd-xhci-pci.c +++ b/hw/usb/hcd-xhci-pci.c @@ -85,7 +85,7 @@ static void xhci_pci_reset(DeviceState *dev) { XHCIPciState *s = XHCI_PCI(dev); - device_legacy_reset(DEVICE(&s->xhci)); + device_cold_reset(DEVICE(&s->xhci)); } static int xhci_pci_vmstate_post_load(void *opaque, int version_id) diff --git a/hw/usb/hcd-xhci-sysbus.c b/hw/usb/hcd-xhci-sysbus.c index a14e4381960ef5d34d0567ed1b58eb55375a3e61..faf57b47975dc53dc43bd8fb1c6bd06451eb085b 100644 --- a/hw/usb/hcd-xhci-sysbus.c +++ b/hw/usb/hcd-xhci-sysbus.c @@ -29,7 +29,7 @@ void xhci_sysbus_reset(DeviceState *dev) { XHCISysbusState *s = XHCI_SYSBUS(dev); - device_legacy_reset(DEVICE(&s->xhci)); + device_cold_reset(DEVICE(&s->xhci)); } static void xhci_sysbus_realize(DeviceState *dev, Error **errp) diff --git a/hw/usb/hcd-xhci.c b/hw/usb/hcd-xhci.c index e01700039b13d1404d3dc66eb3d3f99233e1277f..29636795bef01713c1e5ed93cd2c2a64b3c0d1d0 100644 --- a/hw/usb/hcd-xhci.c +++ b/hw/usb/hcd-xhci.c @@ -21,12 +21,14 @@ #include "qemu/osdep.h" #include "qemu/timer.h" +#include "qemu/log.h" #include "qemu/module.h" #include "qemu/queue.h" #include "migration/vmstate.h" #include "hw/qdev-properties.h" #include "trace.h" #include "qapi/error.h" +#include "qemu/log.h" #include "hcd-xhci.h" @@ -487,7 +489,7 @@ static inline void xhci_dma_read_u32s(XHCIState *xhci, dma_addr_t addr, assert((len % sizeof(uint32_t)) == 0); - dma_memory_read(xhci->as, addr, buf, len); + dma_memory_read(xhci->as, addr, buf, len, MEMTXATTRS_UNSPECIFIED); for (i = 0; i < (len / sizeof(uint32_t)); i++) { buf[i] = le32_to_cpu(buf[i]); @@ -507,7 +509,7 @@ static inline void xhci_dma_write_u32s(XHCIState *xhci, dma_addr_t addr, for (i = 0; i < n; i++) { tmp[i] = cpu_to_le32(buf[i]); } - dma_memory_write(xhci->as, addr, tmp, len); + dma_memory_write(xhci->as, addr, tmp, len, MEMTXATTRS_UNSPECIFIED); } static XHCIPort *xhci_lookup_port(XHCIState *xhci, struct USBPort *uport) @@ -618,7 +620,7 @@ static void xhci_write_event(XHCIState *xhci, XHCIEvent *event, int v) ev_trb.status, ev_trb.control); addr = intr->er_start + TRB_SIZE*intr->er_ep_idx; - dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE); + dma_memory_write(xhci->as, addr, &ev_trb, TRB_SIZE, MEMTXATTRS_UNSPECIFIED); intr->er_ep_idx++; if (intr->er_ep_idx >= intr->er_size) { @@ -679,7 +681,8 @@ static TRBType xhci_ring_fetch(XHCIState *xhci, XHCIRing *ring, XHCITRB *trb, while (1) { TRBType type; - dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE); + dma_memory_read(xhci->as, ring->dequeue, trb, TRB_SIZE, + MEMTXATTRS_UNSPECIFIED); trb->addr = ring->dequeue; trb->ccs = ring->ccs; le64_to_cpus(&trb->parameter); @@ -724,9 +727,14 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) bool control_td_set = 0; uint32_t link_cnt = 0; - while (1) { + do { TRBType type; - dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE); + if (dma_memory_read(xhci->as, dequeue, &trb, TRB_SIZE, + MEMTXATTRS_UNSPECIFIED) != MEMTX_OK) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: DMA memory access failed!\n", + __func__); + return -1; + } le64_to_cpus(&trb.parameter); le32_to_cpus(&trb.status); le32_to_cpus(&trb.control); @@ -760,7 +768,17 @@ static int xhci_ring_chain_length(XHCIState *xhci, const XHCIRing *ring) if (!control_td_set && !(trb.control & TRB_TR_CH)) { return length; } - } + + /* + * According to the xHCI spec, Transfer Ring segments should have + * a maximum size of 64 kB (see chapter "6 Data Structures") + */ + } while (length < TRB_LINK_LIMIT * 65536 / TRB_SIZE); + + qemu_log_mask(LOG_GUEST_ERROR, "%s: exceeded maximum transfer ring size!\n", + __func__); + + return -1; } static void xhci_er_reset(XHCIState *xhci, int v) @@ -781,7 +799,8 @@ static void xhci_er_reset(XHCIState *xhci, int v) xhci_die(xhci); return; } - dma_memory_read(xhci->as, erstba, &seg, sizeof(seg)); + dma_memory_read(xhci->as, erstba, &seg, sizeof(seg), + MEMTXATTRS_UNSPECIFIED); le32_to_cpus(&seg.addr_low); le32_to_cpus(&seg.addr_high); le32_to_cpus(&seg.size); @@ -2059,7 +2078,7 @@ static TRBCCode xhci_address_slot(XHCIState *xhci, unsigned int slotid, assert(slotid >= 1 && slotid <= xhci->numslots); dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); - poctx = ldq_le_dma(xhci->as, dcbaap + 8 * slotid); + ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &poctx, MEMTXATTRS_UNSPECIFIED); ictx = xhci_mask64(pictx); octx = xhci_mask64(poctx); @@ -2397,7 +2416,8 @@ static TRBCCode xhci_get_port_bandwidth(XHCIState *xhci, uint64_t pctx) /* TODO: actually implement real values here */ bw_ctx[0] = 0; memset(&bw_ctx[1], 80, xhci->numports); /* 80% */ - dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx)); + dma_memory_write(xhci->as, ctx, bw_ctx, sizeof(bw_ctx), + MEMTXATTRS_UNSPECIFIED); return CC_SUCCESS; } @@ -3017,14 +3037,17 @@ static void xhci_runtime_write(void *ptr, hwaddr reg, XHCIInterrupter *intr; int v; - trace_usb_xhci_runtime_write(reg, val); - if (reg < 0x20) { trace_usb_xhci_unimplemented("runtime write", reg); return; } v = (reg - 0x20) / 0x20; + if (v >= xhci->numintrs) { + qemu_log("intr nr out of range (%d >= %d)\n", v, xhci->numintrs); + return; + } intr = &xhci->intr[v]; + trace_usb_xhci_runtime_write(reg, val); switch (reg & 0x1f) { case 0x00: /* IMAN */ @@ -3265,7 +3288,8 @@ static void xhci_wakeup_endpoint(USBBus *bus, USBEndpoint *ep, DPRINTF("%s\n", __func__); slotid = ep->dev->addr; - if (slotid == 0 || !xhci->slots[slotid-1].enabled) { + if (slotid == 0 || slotid > xhci->numslots || + !xhci->slots[slotid - 1].enabled) { DPRINTF("%s: oops, no slot for dev %d\n", __func__, ep->dev->addr); return; } @@ -3425,6 +3449,7 @@ static int usb_xhci_post_load(void *opaque, int version_id) uint32_t slot_ctx[4]; uint32_t ep_ctx[5]; int slotid, epid, state; + uint64_t addr; dcbaap = xhci_addr64(xhci->dcbaap_low, xhci->dcbaap_high); @@ -3433,8 +3458,9 @@ static int usb_xhci_post_load(void *opaque, int version_id) if (!slot->addressed) { continue; } - slot->ctx = - xhci_mask64(ldq_le_dma(xhci->as, dcbaap + 8 * slotid)); + ldq_le_dma(xhci->as, dcbaap + 8 * slotid, &addr, MEMTXATTRS_UNSPECIFIED); + slot->ctx = xhci_mask64(addr); + xhci_dma_read_u32s(xhci, slot->ctx, slot_ctx, sizeof(slot_ctx)); slot->uport = xhci_lookup_uport(xhci, slot_ctx); if (!slot->uport) { diff --git a/hw/usb/host-libusb.c b/hw/usb/host-libusb.c index d0d46dd0a4a37ad00f3ad5d3c34ee47ae6ecb71f..13e83a30504896bbda6d3ea7960f569f817bdf98 100644 --- a/hw/usb/host-libusb.c +++ b/hw/usb/host-libusb.c @@ -945,6 +945,30 @@ static void usb_host_ep_update(USBHostDevice *s) libusb_free_config_descriptor(conf); } +static unsigned int usb_get_controller_type(int speed) +{ + unsigned int type = MAX_USB_CONTROLLER_TYPES; + + switch (speed) { + case USB_SPEED_SUPER: + type = QEMU_USB_CONTROLLER_XHCI; + break; + case USB_SPEED_HIGH: + type = QEMU_USB_CONTROLLER_EHCI; + break; + case USB_SPEED_FULL: + type = QEMU_USB_CONTROLLER_UHCI; + break; + case USB_SPEED_LOW: + type = QEMU_USB_CONTROLLER_OHCI; + break; + default: + break; + } + + return type; +} + static int usb_host_open(USBHostDevice *s, libusb_device *dev, int hostfd) { USBDevice *udev = USB_DEVICE(s); @@ -968,6 +992,8 @@ static int usb_host_open(USBHostDevice *s, libusb_device *dev, int hostfd) rc = libusb_open(dev, &s->dh); if (rc != 0) { + qemu_log("libusb open usb device bus %d, device %d failed\n", + bus_num, addr); goto fail; } } else { @@ -995,6 +1021,7 @@ static int usb_host_open(USBHostDevice *s, libusb_device *dev, int hostfd) libusb_get_device_descriptor(dev, &s->ddesc); usb_host_get_port(s->dev, s->port, sizeof(s->port)); + qemu_log("open a host usb device on bus %d, device %d\n", bus_num, addr); usb_ep_init(udev); usb_host_ep_update(s); @@ -1054,6 +1081,12 @@ static int usb_host_open(USBHostDevice *s, libusb_device *dev, int hostfd) } trace_usb_host_open_success(bus_num, addr); + + /* change ehci frame time freq when USB passthrough */ + qemu_log("usb host speed is %d\n", udev->speed); + qemu_timer_set_mode(QEMU_TIMER_USB_NORMAL_MODE, + usb_get_controller_type(udev->speed)); + return 0; fail: @@ -1116,6 +1149,8 @@ static int usb_host_close(USBHostDevice *s) usb_device_detach(udev); } + qemu_log("begin to reset the usb device, bus : %d, device : %d\n", + s->bus_num, s->addr); usb_host_release_interfaces(s); libusb_reset_device(s->dh); usb_host_attach_kernel(s); @@ -1129,6 +1164,8 @@ static int usb_host_close(USBHostDevice *s) } usb_host_auto_check(NULL); + qemu_timer_set_mode(QEMU_TIMER_USB_LAZY_MODE, + usb_get_controller_type(udev->speed)); return 0; } @@ -1141,7 +1178,8 @@ static void usb_host_nodev_bh(void *opaque) static void usb_host_nodev(USBHostDevice *s) { if (!s->bh_nodev) { - s->bh_nodev = qemu_bh_new(usb_host_nodev_bh, s); + s->bh_nodev = qemu_bh_new_guarded(usb_host_nodev_bh, s, + &DEVICE(s)->mem_reentrancy_guard); } qemu_bh_schedule(s->bh_nodev); } @@ -1739,7 +1777,8 @@ static int usb_host_post_load(void *opaque, int version_id) USBHostDevice *dev = opaque; if (!dev->bh_postld) { - dev->bh_postld = qemu_bh_new(usb_host_post_load_bh, dev); + dev->bh_postld = qemu_bh_new_guarded(usb_host_post_load_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); } qemu_bh_schedule(dev->bh_postld); dev->bh_postld_pending = true; diff --git a/hw/usb/imx-usb-phy.c b/hw/usb/imx-usb-phy.c index 5d7a549e34d15f6d5fc9646a0fc75229a8926ad9..1a97b36a1194f6c4ad84ec703ef037c995db94fc 100644 --- a/hw/usb/imx-usb-phy.c +++ b/hw/usb/imx-usb-phy.c @@ -13,6 +13,7 @@ #include "qemu/osdep.h" #include "hw/usb/imx-usb-phy.h" #include "migration/vmstate.h" +#include "qemu/log.h" #include "qemu/module.h" static const VMStateDescription vmstate_imx_usbphy = { @@ -90,7 +91,15 @@ static uint64_t imx_usbphy_read(void *opaque, hwaddr offset, unsigned size) value = s->usbphy[index - 3]; break; default: - value = s->usbphy[index]; + if (index < USBPHY_MAX) { + value = s->usbphy[index]; + } else { + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Read from non-existing USB PHY register 0x%" + HWADDR_PRIx "\n", + __func__, offset); + value = 0; + } break; } return (uint64_t)value; @@ -168,7 +177,13 @@ static void imx_usbphy_write(void *opaque, hwaddr offset, uint64_t value, s->usbphy[index - 3] ^= value; break; default: - /* Other registers are read-only */ + /* Other registers are read-only or do not exist */ + qemu_log_mask(LOG_GUEST_ERROR, + "%s: Write to %s USB PHY register 0x%" + HWADDR_PRIx "\n", + __func__, + index >= USBPHY_MAX ? "non-existing" : "read-only", + offset); break; } } diff --git a/hw/usb/libhw.c b/hw/usb/libhw.c index 9c33a1640f7ff21bef07ae5f97b52f8c5df6748e..f350eae443d6a179ba6e87371a28e89b35eed586 100644 --- a/hw/usb/libhw.c +++ b/hw/usb/libhw.c @@ -36,7 +36,8 @@ int usb_packet_map(USBPacket *p, QEMUSGList *sgl) while (len) { dma_addr_t xlen = len; - mem = dma_memory_map(sgl->as, base, &xlen, dir); + mem = dma_memory_map(sgl->as, base, &xlen, dir, + MEMTXATTRS_UNSPECIFIED); if (!mem) { goto err; } diff --git a/hw/usb/redirect.c b/hw/usb/redirect.c index 5f0ef9cb3b0fc9e5f6706413d6c79b6b11423dca..59cd3cd7c4a05fbf3efd0e0f82c2af421c5d1754 100644 --- a/hw/usb/redirect.c +++ b/hw/usb/redirect.c @@ -1437,8 +1437,10 @@ static void usbredir_realize(USBDevice *udev, Error **errp) } } - dev->chardev_close_bh = qemu_bh_new(usbredir_chardev_close_bh, dev); - dev->device_reject_bh = qemu_bh_new(usbredir_device_reject_bh, dev); + dev->chardev_close_bh = qemu_bh_new_guarded(usbredir_chardev_close_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); + dev->device_reject_bh = qemu_bh_new_guarded(usbredir_device_reject_bh, dev, + &DEVICE(dev)->mem_reentrancy_guard); dev->attach_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, usbredir_do_attach, dev); packet_id_queue_init(&dev->cancelled, dev, "cancelled"); diff --git a/hw/usb/xen-usb.c b/hw/usb/xen-usb.c index 0f7369e7ed64683d0dd2b7e98fe817f2ba6792aa..dec91294ad0b32f9371e0416311ca884be945a59 100644 --- a/hw/usb/xen-usb.c +++ b/hw/usb/xen-usb.c @@ -1021,7 +1021,8 @@ static void usbback_alloc(struct XenLegacyDevice *xendev) QTAILQ_INIT(&usbif->req_free_q); QSIMPLEQ_INIT(&usbif->hotplug_q); - usbif->bh = qemu_bh_new(usbback_bh, usbif); + usbif->bh = qemu_bh_new_guarded(usbback_bh, usbif, + &DEVICE(xendev)->mem_reentrancy_guard); } static int usbback_free(struct XenLegacyDevice *xendev) diff --git a/hw/vfio/common.c b/hw/vfio/common.c index 080046e3f511dda693025797df5391f925db476d..6cb91e7ffd264434e7b696ce35558e9d27efcf8c 100644 --- a/hw/vfio/common.c +++ b/hw/vfio/common.c @@ -445,6 +445,29 @@ unmap_exit: return ret; } +static VFIODMARange *vfio_lookup_match_range(VFIOContainer *container, + hwaddr start_addr, hwaddr size) +{ + VFIODMARange *qrange; + + QLIST_FOREACH(qrange, &container->dma_list, next) { + if (qrange->iova == start_addr && qrange->size == size) { + return qrange; + } + } + return NULL; +} + +static void vfio_dma_range_init_dirty_bitmap(VFIODMARange *qrange) +{ + uint64_t pages, size; + + pages = REAL_HOST_PAGE_ALIGN(qrange->size) / qemu_real_host_page_size; + size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / BITS_PER_BYTE; + + qrange->bitmap = g_malloc0(size); +} + /* * DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86 */ @@ -458,12 +481,29 @@ static int vfio_dma_unmap(VFIOContainer *container, .iova = iova, .size = size, }; + VFIODMARange *qrange; if (iotlb && container->dirty_pages_supported && vfio_devices_all_running_and_saving(container)) { return vfio_dma_unmap_bitmap(container, iova, size, iotlb); } + /* + * unregister the DMA range + * + * It seems that the memory layer will give us the same section as the one + * used in region_add(). Otherwise it'll be complicated to manipulate the + * bitmap across region_{add,del}. Is there any guarantee? + * + * But there is really not such a restriction on the kernel interface + * (VFIO_IOMMU_DIRTY_PAGES_FLAG_{UN}MAP_DMA, etc). + */ + qrange = vfio_lookup_match_range(container, iova, size); + assert(qrange); + g_free(qrange->bitmap); + QLIST_REMOVE(qrange, next); + g_free(qrange); + while (ioctl(container->fd, VFIO_IOMMU_UNMAP_DMA, &unmap)) { /* * The type1 backend has an off-by-one bug in the kernel (71a7d3d78e3c @@ -500,6 +540,14 @@ static int vfio_dma_map(VFIOContainer *container, hwaddr iova, .iova = iova, .size = size, }; + VFIODMARange *qrange; + + qrange = g_malloc0(sizeof(*qrange)); + qrange->iova = iova; + qrange->size = size; + QLIST_INSERT_HEAD(&container->dma_list, qrange, next); + /* XXX allocate the dirty bitmap on demand */ + vfio_dma_range_init_dirty_bitmap(qrange); if (!readonly) { map.flags |= VFIO_DMA_MAP_FLAG_WRITE; @@ -1256,13 +1304,20 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, { struct vfio_iommu_type1_dirty_bitmap *dbitmap; struct vfio_iommu_type1_dirty_bitmap_get *range; + VFIODMARange *qrange; uint64_t pages; int ret; + qrange = vfio_lookup_match_range(container, iova, size); + /* the same as vfio_dma_unmap() */ + assert(qrange); + dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); - dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; + dbitmap->flags = container->dirty_log_manual_clear ? + VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP_NOCLEAR : + VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP; range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; range->iova = iova; range->size = size; @@ -1277,11 +1332,8 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, pages = REAL_HOST_PAGE_ALIGN(range->size) / qemu_real_host_page_size; range->bitmap.size = ROUND_UP(pages, sizeof(__u64) * BITS_PER_BYTE) / BITS_PER_BYTE; - range->bitmap.data = g_try_malloc0(range->bitmap.size); - if (!range->bitmap.data) { - ret = -ENOMEM; - goto err_out; - } + + range->bitmap.data = (__u64 *)qrange->bitmap; ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); if (ret) { @@ -1297,7 +1349,6 @@ static int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova, trace_vfio_get_dirty_bitmap(container->fd, range->iova, range->size, range->bitmap.size, ram_addr); err_out: - g_free(range->bitmap.data); g_free(dbitmap); return ret; @@ -1442,6 +1493,141 @@ static void vfio_listener_log_sync(MemoryListener *listener, } } +/* + * I'm not sure if there's any alignment requirement for the CLEAR_BITMAP + * ioctl. But copy from kvm side and align {start, size} with 64 pages. + * + * I think the code can be simplified a lot if no alignment requirement. + */ +#define VFIO_CLEAR_LOG_SHIFT 6 +#define VFIO_CLEAR_LOG_ALIGN (qemu_real_host_page_size << VFIO_CLEAR_LOG_SHIFT) +#define VFIO_CLEAR_LOG_MASK (-VFIO_CLEAR_LOG_ALIGN) + +static int vfio_log_clear_one_range(VFIOContainer *container, + VFIODMARange *qrange, uint64_t start, uint64_t size) +{ + struct vfio_iommu_type1_dirty_bitmap *dbitmap; + struct vfio_iommu_type1_dirty_bitmap_get *range; + + dbitmap = g_malloc0(sizeof(*dbitmap) + sizeof(*range)); + + dbitmap->argsz = sizeof(*dbitmap) + sizeof(*range); + dbitmap->flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_CLEAR_BITMAP; + range = (struct vfio_iommu_type1_dirty_bitmap_get *)&dbitmap->data; + + /* + * Now let's deal with the actual bitmap, which is almost the same + * as the kvm side. + */ + uint64_t end, bmap_start, start_delta, bmap_npages; + unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size; + int ret; + + bmap_start = start & VFIO_CLEAR_LOG_MASK; + start_delta = start - bmap_start; + bmap_start /= psize; + + bmap_npages = DIV_ROUND_UP(size + start_delta, VFIO_CLEAR_LOG_ALIGN) + << VFIO_CLEAR_LOG_SHIFT; + end = qrange->size / psize; + if (bmap_npages > end - bmap_start) { + bmap_npages = end - bmap_start; + } + start_delta /= psize; + + if (start_delta) { + bmap_clear = bitmap_new(bmap_npages); + bitmap_copy_with_src_offset(bmap_clear, qrange->bitmap, + bmap_start, start_delta + size / psize); + bitmap_clear(bmap_clear, 0, start_delta); + range->bitmap.data = (__u64 *)bmap_clear; + } else { + range->bitmap.data = (__u64 *)(qrange->bitmap + BIT_WORD(bmap_start)); + } + + range->iova = qrange->iova + bmap_start * psize; + range->size = bmap_npages * psize; + range->bitmap.size = ROUND_UP(bmap_npages, sizeof(__u64) * BITS_PER_BYTE) / + BITS_PER_BYTE; + range->bitmap.pgsize = qemu_real_host_page_size; + + ret = ioctl(container->fd, VFIO_IOMMU_DIRTY_PAGES, dbitmap); + if (ret) { + error_report("Failed to clear dirty log for iova: 0x%"PRIx64 + " size: 0x%"PRIx64" err: %d", (uint64_t)range->iova, + (uint64_t)range->size, errno); + goto err_out; + } + + bitmap_clear(qrange->bitmap, bmap_start + start_delta, size / psize); +err_out: + g_free(bmap_clear); + g_free(dbitmap); + return 0; +} + +static int vfio_physical_log_clear(VFIOContainer *container, + MemoryRegionSection *section) +{ + uint64_t start, size, offset, count; + VFIODMARange *qrange; + int ret = 0; + + if (!container->dirty_log_manual_clear) { + /* No need to do explicit clear */ + return ret; + } + + start = section->offset_within_address_space; + size = int128_get64(section->size); + + if (!size) { + return ret; + } + + QLIST_FOREACH(qrange, &container->dma_list, next) { + /* + * Discard ranges that do not overlap the section (e.g., the + * Memory BAR regions of the device) + */ + if (qrange->iova > start + size - 1 || + start > qrange->iova + qrange->size - 1) { + continue; + } + + if (start >= qrange->iova) { + /* The range starts before section or is aligned to it. */ + offset = start - qrange->iova; + count = MIN(qrange->size - offset, size); + } else { + /* The range starts after section. */ + offset = 0; + count = MIN(qrange->size, size - (qrange->iova - start)); + } + ret = vfio_log_clear_one_range(container, qrange, offset, count); + if (ret < 0) { + break; + } + } + + return ret; +} + +static void vfio_listener_log_clear(MemoryListener *listener, + MemoryRegionSection *section) +{ + VFIOContainer *container = container_of(listener, VFIOContainer, listener); + + if (vfio_listener_skipped_section(section) || + !container->dirty_pages_supported) { + return; + } + + if (vfio_devices_all_dirty_tracking(container)) { + vfio_physical_log_clear(container, section); + } +} + static const MemoryListener vfio_memory_listener = { .name = "vfio", .region_add = vfio_listener_region_add, @@ -1449,6 +1635,7 @@ static const MemoryListener vfio_memory_listener = { .log_global_start = vfio_listener_log_global_start, .log_global_stop = vfio_listener_log_global_stop, .log_sync = vfio_listener_log_sync, + .log_clear = vfio_listener_log_clear, }; static void vfio_listener_release(VFIOContainer *container) @@ -1876,7 +2063,7 @@ static int vfio_get_iommu_type(VFIOContainer *container, static int vfio_init_container(VFIOContainer *container, int group_fd, Error **errp) { - int iommu_type, ret; + int iommu_type, dirty_log_manual_clear, ret; iommu_type = vfio_get_iommu_type(container, errp); if (iommu_type < 0) { @@ -1905,6 +2092,13 @@ static int vfio_init_container(VFIOContainer *container, int group_fd, } container->iommu_type = iommu_type; + + dirty_log_manual_clear = ioctl(container->fd, VFIO_CHECK_EXTENSION, + VFIO_DIRTY_LOG_MANUAL_CLEAR); + if (dirty_log_manual_clear) { + container->dirty_log_manual_clear = dirty_log_manual_clear; + } + return 0; } @@ -2061,6 +2255,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as, QLIST_INIT(&container->giommu_list); QLIST_INIT(&container->hostwin_list); QLIST_INIT(&container->vrdl_list); + QLIST_INIT(&container->dma_list); ret = vfio_init_container(container, group->fd, errp); if (ret) { diff --git a/hw/vfio/migration.c b/hw/vfio/migration.c index ff6b45de6b557eeab37fe11bfcef8eefba9a90db..e69b5f2e42681b6086e915e03d2e6019db91b77c 100644 --- a/hw/vfio/migration.c +++ b/hw/vfio/migration.c @@ -805,6 +805,8 @@ static int vfio_migration_init(VFIODevice *vbasedev, } vbasedev->migration = g_new0(VFIOMigration, 1); + vbasedev->migration->device_state = VFIO_DEVICE_STATE_RUNNING; + vbasedev->migration->vm_running = runstate_is_running(); ret = vfio_region_setup(obj, vbasedev, &vbasedev->migration->region, info->index, "migration"); diff --git a/hw/vfio/pci-quirks.c b/hw/vfio/pci-quirks.c index 0cf69a8c6d6f4d46b0c59efd675c25e4584dcef4..d83ed21387d79053dcc2d14bda9d6f4d559013df 100644 --- a/hw/vfio/pci-quirks.c +++ b/hw/vfio/pci-quirks.c @@ -1209,6 +1209,260 @@ int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev, return 0; } +#define PCI_VENDOR_ID_HUAWEI 0x19e5 +#define PCI_DEVICE_ID_ASCEND910 0xd801 +#define PCI_DEVICE_ID_ASCEND910B 0xd802 +#define PCI_DEVICE_ID_ASCEND710 0xd500 +#define PCI_DEVICE_ID_ASCEND310B 0xd105 +#define PCI_DEVICE_ID_ASCEND310 0xd100 +#define PCI_SUB_DEVICE_ID_ASCEND710_1P_MIN 0x100 +#define PCI_SUB_DEVICE_ID_ASCEND710_1P_MAX 0x10f +#define PCI_SUB_DEVICE_ID_ASCEND710_2P_MIN 0x110 +#define PCI_SUB_DEVICE_ID_ASCEND710_2P_MAX 0x11f +#define ASCEND910_XLOADER_SIZE 4 +#define ASCEND910_XLOADER_OFFSET 0x80400 +#define ASCEND910B_XLOADER_SIZE 4 +#define ASCEND910B_XLOADER_OFFSET 0x18208430 +#define ASCEND710_2P_BASE (128 * 1024 * 1024) +#define ASCEND710_1P_DEVNUM 1 +#define ASCEND710_2P_DEVNUM 2 +#define ASCEND710_XLOADER_SIZE 4 +#define ASCEND710_XLOADER_OFFSET 0x100430 +#define ASCEND310B_XLOADER_SIZE 4 +#define ASCEND310B_XLOADER_OFFSET 0x4430 +#define ASCEND310_XLOADER_SIZE 4 +#define ASCEND310_XLOADER_OFFSET 0x400 + +typedef struct VFIOAscendBarQuirk { + struct VFIOPCIDevice *vdev; + pcibus_t offset; + uint8_t bar; + MemoryRegion *mem; +} VFIOAscendBarQuirk; + +static uint64_t vfio_ascend_quirk_read(void *opaque, + hwaddr addr, unsigned size) +{ + VFIOAscendBarQuirk *quirk = opaque; + VFIOPCIDevice *vdev = quirk->vdev; + + qemu_log("read RO region! addr=0x%" HWADDR_PRIx ", size=%d\n", + addr + quirk->offset, size); + + return vfio_region_read(&vdev->bars[quirk->bar].region, + addr + quirk->offset, size); +} + +static void vfio_ascend_quirk_write(void *opaque, hwaddr addr, + uint64_t data, unsigned size) +{ + VFIOAscendBarQuirk *quirk = opaque; + + qemu_log("modifying RO region is not allowed! addr=0x%" + HWADDR_PRIx ", data=0x%" PRIx64 ", size=%d\n", + addr + quirk->offset, data, size); +} + +static const MemoryRegionOps vfio_ascend_intercept_regs_quirk = { + .read = vfio_ascend_quirk_read, + .write = vfio_ascend_quirk_write, + .endianness = DEVICE_LITTLE_ENDIAN, +}; + +static void vfio_probe_ascend910b_bar2_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOAscendBarQuirk *bar2_quirk; + + if (vdev->vendor_id != PCI_VENDOR_ID_HUAWEI || nr != 2 + || vdev->device_id != PCI_DEVICE_ID_ASCEND910B) { + return; + } + + quirk = g_malloc0(sizeof(*quirk)); + quirk->nr_mem = 1; + quirk->mem = g_new0(MemoryRegion, quirk->nr_mem); + bar2_quirk = quirk->data = g_new0(typeof(*bar2_quirk), quirk->nr_mem); + bar2_quirk[0].vdev = vdev; + bar2_quirk[0].offset = ASCEND910B_XLOADER_OFFSET; + bar2_quirk[0].bar = nr; + + /* intercept w/r to the xloader-updating register, + * so the vm can't enable xloader-updating + */ + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar2_quirk[0], + "vfio-ascend910b-bar2-intercept-regs-quirk", + ASCEND910B_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar2_quirk[0].offset, + &quirk->mem[0], 1); + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); +} + +static void vfio_probe_ascend910_bar0_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOAscendBarQuirk *bar0_quirk; + + if (vdev->vendor_id != PCI_VENDOR_ID_HUAWEI || nr != 0 || + vdev->device_id != PCI_DEVICE_ID_ASCEND910) { + return; + } + + quirk = g_malloc0(sizeof(*quirk)); + quirk->nr_mem = 1; + quirk->mem = g_new0(MemoryRegion, quirk->nr_mem); + bar0_quirk = quirk->data = g_new0(typeof(*bar0_quirk), quirk->nr_mem); + bar0_quirk[0].vdev = vdev; + bar0_quirk[0].offset = ASCEND910_XLOADER_OFFSET; + bar0_quirk[0].bar = nr; + + /* + * intercept w/r to the xloader-updating register, + * so the vm can't enable xloader-updating + */ + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar0_quirk[0], + "vfio-ascend910-bar0-intercept-regs-quirk", + ASCEND910_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar0_quirk[0].offset, + &quirk->mem[0], 1); + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); +} + +static void vfio_probe_ascend710_bar2_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOAscendBarQuirk *bar2_quirk; + int sub_device_id; + int devnum = 0; + + if (vdev->vendor_id != PCI_VENDOR_ID_HUAWEI || nr != 2 || + vdev->device_id != PCI_DEVICE_ID_ASCEND710) { + return; + } + + sub_device_id = pci_get_word(vdev->pdev.config + PCI_SUBSYSTEM_ID); + if (sub_device_id >= PCI_SUB_DEVICE_ID_ASCEND710_1P_MIN && + sub_device_id <= PCI_SUB_DEVICE_ID_ASCEND710_1P_MAX) { + devnum = ASCEND710_1P_DEVNUM; + } else if (sub_device_id >= PCI_SUB_DEVICE_ID_ASCEND710_2P_MIN && + sub_device_id <= PCI_SUB_DEVICE_ID_ASCEND710_2P_MAX) { + devnum = ASCEND710_2P_DEVNUM; + } + + if (devnum != ASCEND710_1P_DEVNUM && devnum != ASCEND710_2P_DEVNUM) { + return; + } + + quirk = g_malloc0(sizeof(*quirk)); + quirk->nr_mem = devnum; + quirk->mem = g_new0(MemoryRegion, quirk->nr_mem); + bar2_quirk = quirk->data = g_new0(typeof(*bar2_quirk), quirk->nr_mem); + bar2_quirk[0].vdev = vdev; + bar2_quirk[0].offset = ASCEND710_XLOADER_OFFSET; + bar2_quirk[0].bar = nr; + + /* + * intercept w/r to the xloader-updating register, + * so the vm can't enable xloader-updating + */ + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar2_quirk[0], + "vfio-ascend710-bar2-1p-intercept-regs-quirk", + ASCEND710_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar2_quirk[0].offset, + &quirk->mem[0], 1); + + if (devnum == ASCEND710_2P_DEVNUM) { + bar2_quirk[1].vdev = vdev; + bar2_quirk[1].offset = (ASCEND710_2P_BASE + ASCEND710_XLOADER_OFFSET); + bar2_quirk[1].bar = nr; + + memory_region_init_io(&quirk->mem[1], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar2_quirk[1], + "vfio-ascend710-bar2-2p-intercept-regs-quirk", + ASCEND710_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar2_quirk[1].offset, + &quirk->mem[1], 1); + } + + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); +} + +static void vfio_probe_ascend310b_bar2_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOAscendBarQuirk *bar2_quirk; + + if (vdev->vendor_id != PCI_VENDOR_ID_HUAWEI || nr != 2 + || vdev->device_id != PCI_DEVICE_ID_ASCEND310B) { + return; + } + + quirk = g_malloc0(sizeof(*quirk)); + quirk->nr_mem = 1; + quirk->mem = g_new0(MemoryRegion, quirk->nr_mem); + bar2_quirk = quirk->data = g_new0(typeof(*bar2_quirk), quirk->nr_mem); + bar2_quirk[0].vdev = vdev; + bar2_quirk[0].offset = ASCEND310B_XLOADER_OFFSET; + bar2_quirk[0].bar = nr; + + /* intercept w/r to the xloader-updating register, + * so the vm can't enable xloader-updating + */ + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar2_quirk[0], + "vfio-ascend310b-bar2-intercept-regs-quirk", + ASCEND310B_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar2_quirk[0].offset, + &quirk->mem[0], 1); + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); +} + +static void vfio_probe_ascend310_bar4_quirk(VFIOPCIDevice *vdev, int nr) +{ + VFIOQuirk *quirk; + VFIOAscendBarQuirk *bar4_quirk; + + if (vdev->vendor_id != PCI_VENDOR_ID_HUAWEI || nr != 4 || + vdev->device_id != PCI_DEVICE_ID_ASCEND310) { + return; + } + + quirk = g_malloc0(sizeof(*quirk)); + quirk->nr_mem = 1; + quirk->mem = g_new0(MemoryRegion, quirk->nr_mem); + bar4_quirk = quirk->data = g_new0(typeof(*bar4_quirk), quirk->nr_mem); + bar4_quirk[0].vdev = vdev; + bar4_quirk[0].offset = ASCEND310_XLOADER_OFFSET; + bar4_quirk[0].bar = nr; + + /* + * intercept w/r to the xloader-updating register, + * so the vm can't enable xloader-updating + */ + memory_region_init_io(&quirk->mem[0], OBJECT(vdev), + &vfio_ascend_intercept_regs_quirk, + &bar4_quirk[0], + "vfio-ascend310-bar4-intercept-regs-quirk", + ASCEND310_XLOADER_SIZE); + memory_region_add_subregion_overlap(vdev->bars[nr].region.mem, + bar4_quirk[0].offset, + &quirk->mem[0], 1); + QLIST_INSERT_HEAD(&vdev->bars[nr].quirks, quirk, next); +} + /* * Common quirk probe entry points. */ @@ -1261,6 +1515,11 @@ void vfio_bar_quirk_setup(VFIOPCIDevice *vdev, int nr) #ifdef CONFIG_VFIO_IGD vfio_probe_igd_bar4_quirk(vdev, nr); #endif + vfio_probe_ascend910b_bar2_quirk(vdev, nr); + vfio_probe_ascend910_bar0_quirk(vdev, nr); + vfio_probe_ascend710_bar2_quirk(vdev, nr); + vfio_probe_ascend310b_bar2_quirk(vdev, nr); + vfio_probe_ascend310_bar4_quirk(vdev, nr); } void vfio_bar_quirk_exit(VFIOPCIDevice *vdev, int nr) @@ -1490,6 +1749,9 @@ void vfio_setup_resetfn_quirk(VFIOPCIDevice *vdev) * +---------------------------------+---------------------------------+ * * https://lists.gnu.org/archive/html/qemu-devel/2017-08/pdfUda5iEpgOS.pdf + * + * Specification for Turning and later GPU architectures: + * https://lists.gnu.org/archive/html/qemu-devel/2023-06/pdf142OR4O4c2.pdf */ static void get_nv_gpudirect_clique_id(Object *obj, Visitor *v, const char *name, void *opaque, @@ -1527,10 +1789,18 @@ const PropertyInfo qdev_prop_nv_gpudirect_clique = { .set = set_nv_gpudirect_clique_id, }; +static bool is_valid_std_cap_offset(uint8_t pos) +{ + return (pos >= PCI_STD_HEADER_SIZEOF && + pos <= (PCI_CFG_SPACE_SIZE - PCI_CAP_SIZEOF)); +} + static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) { PCIDevice *pdev = &vdev->pdev; - int ret, pos = 0xC8; + int ret, pos; + bool c8_conflict = false, d4_conflict = false; + uint8_t tmp; if (vdev->nv_gpudirect_clique == 0xFF) { return 0; @@ -1547,6 +1817,40 @@ static int vfio_add_nv_gpudirect_cap(VFIOPCIDevice *vdev, Error **errp) return -EINVAL; } + /* + * Per the updated specification above, it's recommended to use offset + * D4h for Turing and later GPU architectures due to a conflict of the + * MSI-X capability at C8h. We don't know how to determine the GPU + * architecture, instead we walk the capability chain to mark conflicts + * and choose one or error based on the result. + * + * NB. Cap list head in pdev->config is already cleared, read from device. + */ + ret = pread(vdev->vbasedev.fd, &tmp, 1, + vdev->config_offset + PCI_CAPABILITY_LIST); + if (ret != 1 || !is_valid_std_cap_offset(tmp)) { + error_setg(errp, "NVIDIA GPUDirect Clique ID: error getting cap list"); + return -EINVAL; + } + + do { + if (tmp == 0xC8) { + c8_conflict = true; + } else if (tmp == 0xD4) { + d4_conflict = true; + } + tmp = pdev->config[tmp + PCI_CAP_LIST_NEXT]; + } while (is_valid_std_cap_offset(tmp)); + + if (!c8_conflict) { + pos = 0xC8; + } else if (!d4_conflict) { + pos = 0xD4; + } else { + error_setg(errp, "NVIDIA GPUDirect Clique ID: invalid config space"); + return -EINVAL; + } + ret = pci_add_capability(pdev, PCI_CAP_ID_VNDR, pos, 8, errp); if (ret < 0) { error_prepend(errp, "Failed to add NVIDIA GPUDirect cap: "); diff --git a/hw/vfio/pci.c b/hw/vfio/pci.c index 7b45353ce27ffb17d2ef4bcd7bdea18535f06d19..b085389ff85f923faae7726304c51d9c90bdb44d 100644 --- a/hw/vfio/pci.c +++ b/hw/vfio/pci.c @@ -3112,7 +3112,9 @@ static void vfio_realize(PCIDevice *pdev, Error **errp) out_deregister: pci_device_set_intx_routing_notifier(&vdev->pdev, NULL); - kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + if (vdev->irqchip_change_notifier.notify) { + kvm_irqchip_remove_change_notifier(&vdev->irqchip_change_notifier); + } out_teardown: vfio_teardown_msi(vdev); vfio_bars_exit(vdev); diff --git a/hw/vfio/trace-events b/hw/vfio/trace-events index 0ef1b5f4a65ff38171380c91877002d13035bc34..f4b74a3e81260972b329aafdf4ad30b7ff856752 100644 --- a/hw/vfio/trace-events +++ b/hw/vfio/trace-events @@ -116,7 +116,7 @@ vfio_region_mmaps_set_enabled(const char *name, bool enabled) "Region %s mmaps e vfio_region_unmap(const char *name, unsigned long offset, unsigned long end) "Region %s unmap [0x%lx - 0x%lx]" vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries" vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]" -vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%0x8" +vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x" vfio_dma_unmap_overflow_workaround(void) "" # platform.c diff --git a/hw/virtio/Kconfig b/hw/virtio/Kconfig index c144d42f9bd06918db694fdb0bf1c85ea0464b35..724eb58a32378d6f7da561d2641a5bc3ffccad96 100644 --- a/hw/virtio/Kconfig +++ b/hw/virtio/Kconfig @@ -68,3 +68,8 @@ config VHOST_USER_RNG bool default y depends on VIRTIO && VHOST_USER + +config VHOST_VDPA_DEV + bool + default y + depends on VIRTIO && VHOST_VDPA && LINUX diff --git a/hw/virtio/meson.build b/hw/virtio/meson.build index 521f7d64a86aea2bd8e0325298f7968e2790ca5e..94a030f3291777ea6f2952461aff27cfb23c9881 100644 --- a/hw/virtio/meson.build +++ b/hw/virtio/meson.build @@ -11,9 +11,9 @@ softmmu_ss.add(when: 'CONFIG_ALL', if_true: files('vhost-stub.c')) virtio_ss = ss.source_set() virtio_ss.add(files('virtio.c')) -virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c')) +virtio_ss.add(when: 'CONFIG_VHOST', if_true: files('vhost.c', 'vhost-backend.c', 'vhost-iova-tree.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER', if_true: files('vhost-user.c')) -virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-vdpa.c')) +virtio_ss.add(when: 'CONFIG_VHOST_VDPA', if_true: files('vhost-shadow-virtqueue.c', 'vhost-vdpa.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_BALLOON', if_true: files('virtio-balloon.c')) virtio_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('virtio-crypto.c')) virtio_ss.add(when: ['CONFIG_VIRTIO_CRYPTO', 'CONFIG_VIRTIO_PCI'], if_true: files('virtio-crypto-pci.c')) @@ -29,6 +29,7 @@ virtio_ss.add(when: 'CONFIG_VHOST_USER_I2C', if_true: files('vhost-user-i2c.c')) virtio_ss.add(when: ['CONFIG_VIRTIO_PCI', 'CONFIG_VHOST_USER_I2C'], if_true: files('vhost-user-i2c-pci.c')) virtio_ss.add(when: 'CONFIG_VHOST_USER_RNG', if_true: files('vhost-user-rng.c')) virtio_ss.add(when: ['CONFIG_VHOST_USER_RNG', 'CONFIG_VIRTIO_PCI'], if_true: files('vhost-user-rng-pci.c')) +virtio_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev.c', 'vdpa-dev-mig.c')) virtio_pci_ss = ss.source_set() virtio_pci_ss.add(when: 'CONFIG_VHOST_VSOCK', if_true: files('vhost-vsock-pci.c')) @@ -49,6 +50,7 @@ virtio_pci_ss.add(when: 'CONFIG_VIRTIO_SERIAL', if_true: files('virtio-serial-pc virtio_pci_ss.add(when: 'CONFIG_VIRTIO_PMEM', if_true: files('virtio-pmem-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_IOMMU', if_true: files('virtio-iommu-pci.c')) virtio_pci_ss.add(when: 'CONFIG_VIRTIO_MEM', if_true: files('virtio-mem-pci.c')) +virtio_pci_ss.add(when: 'CONFIG_VHOST_VDPA_DEV', if_true: files('vdpa-dev-pci.c')) virtio_ss.add_all(when: 'CONFIG_VIRTIO_PCI', if_true: virtio_pci_ss) diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 650e521e3518e3ea492e2aef3db6e9ae6286b5bc..0396518241b2c2d410b52d9ca4b48a547b384e9d 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -8,6 +8,10 @@ vhost_region_add_section_aligned(const char *name, uint64_t gpa, uint64_t size, vhost_section(const char *name) "%s" vhost_reject_section(const char *name, int d) "%s:%d" vhost_iotlb_miss(void *dev, int step) "%p step %d" +vhost_dev_cleanup(void *dev) "%p" +vhost_dev_start(void *dev, const char *name, bool vrings) "%p:%s vrings:%d" +vhost_dev_stop(void *dev, const char *name, bool vrings) "%p:%s vrings:%d" + # vhost-user.c vhost_user_postcopy_end_entry(void) "" @@ -21,10 +25,14 @@ vhost_user_set_mem_table_withfd(int index, const char *name, uint64_t memory_siz vhost_user_postcopy_waker(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 vhost_user_postcopy_waker_found(uint64_t client_addr) "0x%"PRIx64 vhost_user_postcopy_waker_nomatch(const char *rb, uint64_t rb_offset) "%s + 0x%"PRIx64 +vhost_user_read(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" +vhost_user_write(uint32_t req, uint32_t flags) "req:%d flags:0x%"PRIx32"" # vhost-vdpa.c -vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 -vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_dma_map(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint64_t uaddr, uint8_t perm, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" uaddr: 0x%"PRIx64" perm: 0x%"PRIx8" type: %"PRIu8 +vhost_vdpa_dma_unmap(void *vdpa, int fd, uint32_t msg_type, uint32_t asid, uint64_t iova, uint64_t size, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" asid: %"PRIu32" iova: 0x%"PRIx64" size: 0x%"PRIx64" type: %"PRIu8 +vhost_vdpa_listener_begin_batch(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 +vhost_vdpa_listener_commit(void *v, int fd, uint32_t msg_type, uint8_t type) "vdpa:%p fd: %d msg_type: %"PRIu32" type: %"PRIu8 vhost_vdpa_listener_region_add(void *vdpa, uint64_t iova, uint64_t llend, void *vaddr, bool readonly) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64" vaddr: %p read-only: %d" vhost_vdpa_listener_region_del(void *vdpa, uint64_t iova, uint64_t llend) "vdpa: %p iova 0x%"PRIx64" llend 0x%"PRIx64 vhost_vdpa_add_status(void *dev, uint8_t status) "dev: %p status: 0x%"PRIx8 @@ -53,6 +61,7 @@ vhost_vdpa_get_features(void *dev, uint64_t features) "dev: %p features: 0x%"PRI vhost_vdpa_set_owner(void *dev) "dev: %p" vhost_vdpa_vq_get_addr(void *dev, void *vq, uint64_t desc_user_addr, uint64_t avail_user_addr, uint64_t used_user_addr) "dev: %p vq: %p desc_user_addr: 0x%"PRIx64" avail_user_addr: 0x%"PRIx64" used_user_addr: 0x%"PRIx64 vhost_vdpa_get_iova_range(void *dev, uint64_t first, uint64_t last) "dev: %p first: 0x%"PRIx64" last: 0x%"PRIx64 +vhost_vdpa_set_config_call(void *dev, int fd)"dev: %p fd: %d" # virtio.c virtqueue_alloc_element(void *elem, size_t sz, unsigned in_num, unsigned out_num) "elem %p size %zd in_num %u out_num %u" diff --git a/hw/virtio/vdpa-dev-mig.c b/hw/virtio/vdpa-dev-mig.c new file mode 100644 index 0000000000000000000000000000000000000000..c080f9d89b5bafe67745b8aecac1bd2d6e751658 --- /dev/null +++ b/hw/virtio/vdpa-dev-mig.c @@ -0,0 +1,355 @@ +/* + * Copyright (c) Huawei Technologies Co., Ltd. 2023. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#include +#include +#include "qemu/osdep.h" +#include "migration/misc.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/vdpa-dev.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "migration/register.h" +#include "migration/migration.h" +#include "qemu-common.h" +#include "sysemu/runstate.h" +#include "qemu/error-report.h" +#include "hw/virtio/vdpa-dev-mig.h" +#include "migration/qemu-file-types.h" +#include "qemu/main-loop.h" + +/* + * Flags used as delimiter: + * 0xffffffff => MSB 32-bit all 1s + * 0xef10 => emulated (virtual) function IO + * 0x0000 => 16-bits reserved for flags + */ +#define VDPA_MIG_FLAG_END_OF_STATE (0xffffffffef100001ULL) +#define VDPA_MIG_FLAG_DEV_CONFIG_STATE (0xffffffffef100002ULL) +#define VDPA_MIG_FLAG_DEV_SETUP_STATE (0xffffffffef100003ULL) + +static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, + void *arg) +{ + struct vhost_vdpa *v = dev->opaque; + int fd = v->device_fd; + + if (dev->vhost_ops->backend_type != VHOST_BACKEND_TYPE_VDPA) { + error_report("backend type isn't VDPA. Operation not permitted!\n"); + return -EPERM; + } + + return ioctl(fd, request, arg); +} + +static int vhost_vdpa_set_mig_state(struct vhost_dev *dev, uint8_t state) +{ + return vhost_vdpa_call(dev, VHOST_VDPA_SET_MIG_STATE, &state); +} + +static int vhost_vdpa_dev_buffer_size(struct vhost_dev *dev, uint32_t *size) +{ + return vhost_vdpa_call(dev, VHOST_GET_DEV_BUFFER_SIZE, size); +} + +static int vhost_vdpa_dev_buffer_save(struct vhost_dev *dev, QEMUFile *f) +{ + struct vhost_vdpa_config *config; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + uint32_t buffer_size = 0; + int ret; + + ret = vhost_vdpa_dev_buffer_size(dev, &buffer_size); + if (ret) { + error_report("get dev buffer size failed: %d\n", ret); + return ret; + } + + qemu_put_be32(f, buffer_size); + + config = g_malloc(buffer_size + config_size); + config->off = 0; + config->len = buffer_size; + + ret = vhost_vdpa_call(dev, VHOST_GET_DEV_BUFFER, config); + if (ret) { + error_report("get dev buffer failed: %d\n", ret); + goto free; + } + + qemu_put_buffer(f, config->buf, buffer_size); +free: + g_free(config); + + return ret; +} + +static int vhost_vdpa_dev_buffer_load(struct vhost_dev *dev, QEMUFile *f) +{ + struct vhost_vdpa_config *config; + unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); + uint32_t buffer_size, recv_size; + int ret; + + buffer_size = qemu_get_be32(f); + + config = g_malloc(buffer_size + config_size); + config->off = 0; + config->len = buffer_size; + + recv_size = qemu_get_buffer(f, config->buf, buffer_size); + if (recv_size != buffer_size) { + error_report("read dev mig buffer failed, buffer_size: %u, " + "recv_size: %u\n", buffer_size, recv_size); + ret = -EINVAL; + goto free; + } + + ret = vhost_vdpa_call(dev, VHOST_SET_DEV_BUFFER, config); + if (ret) { + error_report("set dev buffer failed: %d\n", ret); + } + +free: + g_free(config); + + return ret; +} + +static int vhost_vdpa_device_suspend(VhostVdpaDevice *vdpa) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vdpa); + + if (!vdpa->started || vdpa->suspended) { + return 0; + } + + vdpa->suspended = true; + + return vhost_dev_suspend(&vdpa->dev, vdev, false); +} + +static int vhost_vdpa_device_resume(VhostVdpaDevice *vdpa) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(vdpa); + MigrationIncomingState *mis = migration_incoming_get_current(); + int ret; + + if (!vdpa->started || + (!vdpa->suspended && mis->state != RUN_STATE_RESTORE_VM)) { + return 0; + } + + ret = vhost_dev_resume(&vdpa->dev, vdev, false); + if (ret < 0) { + return ret; + } + + vdpa->suspended = false; + return ret; +} + +static void vdpa_dev_migration_handle_incoming_bh(void *opaque) +{ + struct vhost_dev *hdev = opaque; + int ret; + + /* Post start device, unsupport rollback if failed! */ + ret = vhost_vdpa_set_mig_state(hdev, VDPA_DEVICE_POST_START); + if (ret) { + error_report("Failed to set state: POST_START\n"); + } +} + +static void vdpa_dev_vmstate_change(void *opaque, bool running, RunState state) +{ + VhostVdpaDevice *vdpa = VHOST_VDPA_DEVICE(opaque); + struct vhost_dev *hdev = &vdpa->dev; + int ret; + MigrationState *ms = migrate_get_current(); + MigrationIncomingState *mis = migration_incoming_get_current(); + + if (!running) { + if (state == RUN_STATE_FINISH_MIGRATE || state == RUN_STATE_PAUSED) { + ret = vhost_vdpa_device_suspend(vdpa); + if (ret) { + error_report("suspend vdpa device failed: %d\n", ret); + if (ms->migration_thread_running) { + migrate_fd_cancel(ms); + } + } + } + } else { + if (vdpa->suspended) { + ret = vhost_vdpa_device_resume(vdpa); + if (ret) { + error_report("vhost vdpa device resume failed: %d\n", ret); + } + } + + if (mis->state == RUN_STATE_RESTORE_VM) { + ret = vhost_vdpa_call(hdev, VHOST_VDPA_RESUME, NULL); + if (ret) { + error_report("migration dest resume device failed: %d\n", ret); + exit(EXIT_FAILURE); + } + /* post resume */ + mis->bh = qemu_bh_new(vdpa_dev_migration_handle_incoming_bh, + hdev); + qemu_bh_schedule(mis->bh); + } + } +} + +static int vdpa_save_setup(QEMUFile *f, void *opaque) +{ + qemu_put_be64(f, VDPA_MIG_FLAG_DEV_SETUP_STATE); + qemu_put_be64(f, VDPA_MIG_FLAG_END_OF_STATE); + + return qemu_file_get_error(f); +} + +static int vdpa_save_complete_precopy(QEMUFile *f, void *opaque) +{ + VhostVdpaDevice *vdev = VHOST_VDPA_DEVICE(opaque); + struct vhost_dev *hdev = &vdev->dev; + int ret; + + qemu_put_be64(f, VDPA_MIG_FLAG_DEV_CONFIG_STATE); + qemu_put_be16(f, (uint16_t)vdev->suspended); + if (vdev->suspended) { + ret = vhost_vdpa_dev_buffer_save(hdev, f); + if (ret) { + error_report("Save vdpa device buffer failed: %d\n", ret); + return ret; + } + } + qemu_put_be64(f, VDPA_MIG_FLAG_END_OF_STATE); + + return qemu_file_get_error(f); +} + +static int vdpa_load_state(QEMUFile *f, void *opaque, int version_id) +{ + VhostVdpaDevice *vdev = VHOST_VDPA_DEVICE(opaque); + struct vhost_dev *hdev = &vdev->dev; + + int ret; + uint64_t data; + uint16_t suspended; + + data = qemu_get_be64(f); + while (data != VDPA_MIG_FLAG_END_OF_STATE) { + if (data == VDPA_MIG_FLAG_DEV_SETUP_STATE) { + data = qemu_get_be64(f); + if (data == VDPA_MIG_FLAG_END_OF_STATE) { + return 0; + } else { + error_report("SETUP STATE: EOS not found 0x%lx\n", data); + return -EINVAL; + } + } else if (data == VDPA_MIG_FLAG_DEV_CONFIG_STATE) { + suspended = qemu_get_be16(f); + if (suspended) { + ret = vhost_vdpa_dev_buffer_load(hdev, f); + if (ret) { + error_report("fail to restore device buffer.\n"); + return ret; + } + } + } + + ret = qemu_file_get_error(f); + if (ret) { + error_report("qemu file error: %d\n", ret); + return ret; + } + data = qemu_get_be64(f); + } + + return 0; +} + +static int vdpa_load_setup(QEMUFile *f, void *opaque) +{ + VhostVdpaDevice *v = VHOST_VDPA_DEVICE(opaque); + struct vhost_dev *hdev = &v->dev; + int ret = 0; + + ret = vhost_vdpa_set_mig_state(hdev, VDPA_DEVICE_PRE_START); + if (ret) { + error_report("pre start device failed: %d\n", ret); + goto out; + } + + return qemu_file_get_error(f); +out: + return ret; +} + +static SaveVMHandlers savevm_vdpa_handlers = { + .save_setup = vdpa_save_setup, + .save_live_complete_precopy = vdpa_save_complete_precopy, + .load_state = vdpa_load_state, + .load_setup = vdpa_load_setup, +}; + +static void vdpa_migration_state_notifier(Notifier *notifier, void *data) +{ + MigrationState *s = data; + VhostVdpaDevice *vdev = container_of(notifier, + VhostVdpaDevice, + migration_state); + struct vhost_dev *hdev = &vdev->dev; + int ret; + + switch (s->state) { + case MIGRATION_STATUS_CANCELLING: + case MIGRATION_STATUS_CANCELLED: + case MIGRATION_STATUS_FAILED: + ret = vhost_vdpa_set_mig_state(hdev, VDPA_DEVICE_CANCEL); + if (ret) { + error_report("Failed to set state CANCEL\n"); + } + + break; + case MIGRATION_STATUS_COMPLETED: + default: + break; + } +} + +void vdpa_migration_register(VhostVdpaDevice *vdev) +{ + vdev->vmstate = qdev_add_vm_change_state_handler(DEVICE(vdev), + vdpa_dev_vmstate_change, + DEVICE(vdev)); + register_savevm_live("vdpa", -1, 1, + &savevm_vdpa_handlers, DEVICE(vdev)); + vdev->migration_state.notify = vdpa_migration_state_notifier; + add_migration_state_change_notifier(&vdev->migration_state); +} + +void vdpa_migration_unregister(VhostVdpaDevice *vdev) +{ + remove_migration_state_change_notifier(&vdev->migration_state); + unregister_savevm(NULL, "vdpa", DEVICE(vdev)); + qemu_del_vm_change_state_handler(vdev->vmstate); +} diff --git a/hw/virtio/vdpa-dev-pci.c b/hw/virtio/vdpa-dev-pci.c new file mode 100644 index 0000000000000000000000000000000000000000..5446e6b393ef3427d19a2f4cd9014c41fd21c11b --- /dev/null +++ b/hw/virtio/vdpa-dev-pci.c @@ -0,0 +1,102 @@ +/* + * Vhost Vdpa Device PCI Bindings + * + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved. + * + * Authors: + * Longpeng + * + * Largely based on the "vhost-user-blk-pci.c" and "vhost-user-blk.c" + * implemented by: + * Changpeng Liu + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#include "qemu/osdep.h" +#include +#include +#include "hw/virtio/virtio.h" +#include "hw/virtio/vdpa-dev.h" +#include "hw/pci/pci.h" +#include "hw/qdev-properties.h" +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/module.h" +#include "hw/virtio/virtio-pci.h" +#include "qom/object.h" + + +typedef struct VhostVdpaDevicePCI VhostVdpaDevicePCI; + +#define TYPE_VHOST_VDPA_DEVICE_PCI "vhost-vdpa-device-pci-base" +DECLARE_INSTANCE_CHECKER(VhostVdpaDevicePCI, VHOST_VDPA_DEVICE_PCI, + TYPE_VHOST_VDPA_DEVICE_PCI) + +struct VhostVdpaDevicePCI { + VirtIOPCIProxy parent_obj; + VhostVdpaDevice vdev; +}; + +static void vhost_vdpa_device_pci_instance_init(Object *obj) +{ + VhostVdpaDevicePCI *dev = VHOST_VDPA_DEVICE_PCI(obj); + + virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), + TYPE_VHOST_VDPA_DEVICE); + object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), + "bootindex"); +} + +static Property vhost_vdpa_device_pci_properties[] = { + DEFINE_PROP_END_OF_LIST(), +}; + +static int vhost_vdpa_device_pci_post_init(VhostVdpaDevice *v, Error **errp) +{ + VhostVdpaDevicePCI *dev = container_of(v, VhostVdpaDevicePCI, vdev); + VirtIOPCIProxy *vpci_dev = &dev->parent_obj; + + vpci_dev->class_code = virtio_pci_get_class_id(v->vdev_id); + vpci_dev->trans_devid = virtio_pci_get_trans_devid(v->vdev_id); + /* one for config vector */ + vpci_dev->nvectors = v->num_queues + 1; + + return 0; +} + +static void +vhost_vdpa_device_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) +{ + VhostVdpaDevicePCI *dev = VHOST_VDPA_DEVICE_PCI(vpci_dev); + + dev->vdev.post_init = vhost_vdpa_device_pci_post_init; + qdev_realize(DEVICE(&dev->vdev), BUS(&vpci_dev->bus), errp); +} + +static void vhost_vdpa_device_pci_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); + + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + device_class_set_props(dc, vhost_vdpa_device_pci_properties); + k->realize = vhost_vdpa_device_pci_realize; +} + +static const VirtioPCIDeviceTypeInfo vhost_vdpa_device_pci_info = { + .base_name = TYPE_VHOST_VDPA_DEVICE_PCI, + .generic_name = "vhost-vdpa-device-pci", + .transitional_name = "vhost-vdpa-device-pci-transitional", + .non_transitional_name = "vhost-vdpa-device-pci-non-transitional", + .instance_size = sizeof(VhostVdpaDevicePCI), + .instance_init = vhost_vdpa_device_pci_instance_init, + .class_init = vhost_vdpa_device_pci_class_init, +}; + +static void vhost_vdpa_device_pci_register(void) +{ + virtio_pci_types_register(&vhost_vdpa_device_pci_info); +} + +type_init(vhost_vdpa_device_pci_register); diff --git a/hw/virtio/vdpa-dev.c b/hw/virtio/vdpa-dev.c new file mode 100644 index 0000000000000000000000000000000000000000..fa3a4dc8bcdf4c859b43ff53bea16968e01a1505 --- /dev/null +++ b/hw/virtio/vdpa-dev.c @@ -0,0 +1,412 @@ +/* + * Vhost Vdpa Device + * + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved. + * + * Authors: + * Longpeng + * + * Largely based on the "vhost-user-blk-pci.c" and "vhost-user-blk.c" + * implemented by: + * Changpeng Liu + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#include "qemu/osdep.h" +#include +#include +#include "qapi/error.h" +#include "qemu/error-report.h" +#include "qemu/cutils.h" +#include "hw/qdev-core.h" +#include "hw/qdev-properties.h" +#include "hw/qdev-properties-system.h" +#include "hw/virtio/vhost.h" +#include "hw/virtio/virtio.h" +#include "hw/virtio/virtio-bus.h" +#include "hw/virtio/virtio-access.h" +#include "hw/virtio/vdpa-dev.h" +#include "sysemu/sysemu.h" +#include "sysemu/runstate.h" +#include "hw/virtio/vdpa-dev-mig.h" +#include "migration/migration.h" +#include "exec/address-spaces.h" +#include "standard-headers/linux/virtio_ids.h" + +static void +vhost_vdpa_device_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) +{ + /* Nothing to do */ +} + +static uint32_t +vhost_vdpa_device_get_u32(int fd, unsigned long int cmd, Error **errp) +{ + uint32_t val = (uint32_t)-1; + + if (ioctl(fd, cmd, &val) < 0) { + error_setg(errp, "vhost-vdpa-device: cmd 0x%lx failed: %s", + cmd, strerror(errno)); + } + + return val; +} + +static void vhost_vdpa_device_realize(DeviceState *dev, Error **errp) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VhostVdpaDevice *v = VHOST_VDPA_DEVICE(vdev); + struct vhost_vdpa_iova_range iova_range; + uint16_t max_queue_size; + struct vhost_virtqueue *vqs; + int i, ret; + + if (!v->vhostdev) { + error_setg(errp, "vhost-vdpa-device: vhostdev are missing"); + return; + } + + v->vhostfd = qemu_open(v->vhostdev, O_RDWR, errp); + if (*errp) { + return; + } + v->vdpa.device_fd = v->vhostfd; + + v->vdev_id = vhost_vdpa_device_get_u32(v->vhostfd, + VHOST_VDPA_GET_DEVICE_ID, errp); + if (*errp) { + goto out; + } + + max_queue_size = vhost_vdpa_device_get_u32(v->vhostfd, + VHOST_VDPA_GET_VRING_NUM, errp); + if (*errp) { + goto out; + } + + if (v->queue_size > max_queue_size) { + error_setg(errp, "vhost-vdpa-device: invalid queue_size: %u (max:%u)", + v->queue_size, max_queue_size); + goto out; + } else if (!v->queue_size) { + v->queue_size = max_queue_size; + } + + v->num_queues = vhost_vdpa_device_get_u32(v->vhostfd, + VHOST_VDPA_GET_VQS_COUNT, errp); + if (*errp) { + goto out; + } + + if (!v->num_queues || v->num_queues > VIRTIO_QUEUE_MAX) { + error_setg(errp, "invalid number of virtqueues: %u (max:%u)", + v->num_queues, VIRTIO_QUEUE_MAX); + goto out; + } + + v->dev.nvqs = v->num_queues; + vqs = g_new0(struct vhost_virtqueue, v->dev.nvqs); + v->dev.vqs = vqs; + v->dev.vq_index = 0; + v->dev.vq_index_end = v->dev.nvqs; + v->dev.backend_features = 0; + v->started = false; + + ret = vhost_vdpa_get_iova_range(v->vhostfd, &iova_range); + if (ret < 0) { + error_setg(errp, "vhost-vdpa-device: get iova range failed: %s", + strerror(-ret)); + goto free_vqs; + } + v->vdpa.iova_range = iova_range; + + ret = vhost_dev_init(&v->dev, &v->vdpa, VHOST_BACKEND_TYPE_VDPA, 0, NULL); + if (ret < 0) { + error_setg(errp, "vhost-vdpa-device: vhost initialization failed: %s", + strerror(-ret)); + goto free_vqs; + } + + memory_listener_register(&v->vdpa.listener, &address_space_memory); + v->config_size = vhost_vdpa_device_get_u32(v->vhostfd, + VHOST_VDPA_GET_CONFIG_SIZE, + errp); + if (*errp) { + goto vhost_cleanup; + } + + /* + * Invoke .post_init() to initialize the transport-specific fields + * before calling virtio_init(). + */ + if (v->post_init && v->post_init(v, errp) < 0) { + goto vhost_cleanup; + } + + v->config = g_malloc0(v->config_size); + + ret = vhost_dev_get_config(&v->dev, v->config, v->config_size, NULL); + if (ret < 0) { + error_setg(errp, "vhost-vdpa-device: get config failed"); + goto free_config; + } + + virtio_init(vdev, "vhost-vdpa", v->vdev_id, v->config_size); + + v->virtqs = g_new0(VirtQueue *, v->dev.nvqs); + for (i = 0; i < v->dev.nvqs; i++) { + v->virtqs[i] = virtio_add_queue(vdev, v->queue_size, + vhost_vdpa_device_dummy_handle_output); + } + + vdpa_migration_register(v); + + return; + +free_config: + g_free(v->config); +vhost_cleanup: + memory_listener_unregister(&v->vdpa.listener); + vhost_dev_cleanup(&v->dev); +free_vqs: + g_free(vqs); +out: + qemu_close(v->vhostfd); + v->vhostfd = -1; +} + +static void vhost_vdpa_device_unrealize(DeviceState *dev) +{ + VirtIODevice *vdev = VIRTIO_DEVICE(dev); + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + int i; + + vdpa_migration_unregister(s); + virtio_set_status(vdev, 0); + + for (i = 0; i < s->num_queues; i++) { + virtio_delete_queue(s->virtqs[i]); + } + g_free(s->virtqs); + virtio_cleanup(vdev); + + g_free(s->config); + g_free(s->dev.vqs); + memory_listener_unregister(&s->vdpa.listener); + vhost_dev_cleanup(&s->dev); + qemu_close(s->vhostfd); + s->vhostfd = -1; +} + +static void +vhost_vdpa_device_get_config(VirtIODevice *vdev, uint8_t *config) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + uint8_t *new_config; + int ret; + + if (s->vdev_id != VIRTIO_ID_BLOCK) { + goto out; + } + + new_config = g_malloc0(s->config_size); + ret = vhost_dev_get_config(&s->dev, new_config, s->config_size, NULL); + if (ret < 0) { + error_report("vhost-vdpa-device: get config failed(%d)\n", ret); + goto free; + } + memcpy(s->config, new_config, s->config_size); +free: + g_free(new_config); +out: + memcpy(config, s->config, s->config_size); +} + +static void +vhost_vdpa_device_set_config(VirtIODevice *vdev, const uint8_t *config) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + int ret; + + ret = vhost_dev_set_config(&s->dev, s->config, 0, s->config_size, + VHOST_SET_CONFIG_TYPE_MASTER); + if (ret) { + error_report("set device config space failed"); + return; + } +} + +static uint64_t vhost_vdpa_device_get_features(VirtIODevice *vdev, + uint64_t features, + Error **errp) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + uint64_t backend_features = s->dev.features; + + if (!virtio_has_feature(features, VIRTIO_F_IOMMU_PLATFORM)) { + virtio_clear_feature(&backend_features, VIRTIO_F_IOMMU_PLATFORM); + } + + return backend_features; +} + +static int vhost_vdpa_device_start(VirtIODevice *vdev, Error **errp) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int i, ret; + + if (!k->set_guest_notifiers) { + error_setg(errp, "binding does not support guest notifiers"); + return -ENOSYS; + } + + ret = vhost_dev_enable_notifiers(&s->dev, vdev); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error enabling host notifiers"); + return ret; + } + + ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, true); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error binding guest notifier"); + goto err_host_notifiers; + } + + s->dev.acked_features = vdev->guest_features; + + ret = vhost_dev_start(&s->dev, vdev, false); + if (ret < 0) { + error_setg_errno(errp, -ret, "Error starting vhost"); + goto err_guest_notifiers; + } + s->started = true; + + /* + * guest_notifier_mask/pending not used yet, so just unmask + * everything here. virtio-pci will do the right thing by + * enabling/disabling irqfd. + */ + for (i = 0; i < s->dev.nvqs; i++) { + vhost_virtqueue_mask(&s->dev, vdev, i, false); + } + + return ret; + +err_guest_notifiers: + k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); +err_host_notifiers: + vhost_dev_disable_notifiers(&s->dev, vdev); + return ret; +} + +static void vhost_vdpa_device_stop(VirtIODevice *vdev) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + BusState *qbus = BUS(qdev_get_parent_bus(DEVICE(vdev))); + VirtioBusClass *k = VIRTIO_BUS_GET_CLASS(qbus); + int ret; + + if (!s->started) { + return; + } + s->started = false; + + if (!k->set_guest_notifiers) { + return; + } + + vhost_dev_stop(&s->dev, vdev, false); + + ret = k->set_guest_notifiers(qbus->parent, s->dev.nvqs, false); + if (ret < 0) { + error_report("vhost guest notifier cleanup failed: %d", ret); + return; + } + + vhost_dev_disable_notifiers(&s->dev, vdev); +} + +static void vhost_vdpa_device_set_status(VirtIODevice *vdev, uint8_t status) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(vdev); + bool should_start = virtio_device_started(vdev, status); + Error *local_err = NULL; + int ret; + + if (!vdev->vm_running) { + should_start = false; + } + + if (s->started == should_start || s->suspended) { + return; + } + + if (should_start) { + ret = vhost_vdpa_device_start(vdev, &local_err); + if (ret < 0) { + error_reportf_err(local_err, "vhost-vdpa-device: start failed: "); + } + } else { + vhost_vdpa_device_stop(vdev); + } +} + +static Property vhost_vdpa_device_properties[] = { + DEFINE_PROP_STRING("vhostdev", VhostVdpaDevice, vhostdev), + DEFINE_PROP_UINT16("queue-size", VhostVdpaDevice, queue_size, 0), + DEFINE_PROP_END_OF_LIST(), +}; + +static const VMStateDescription vmstate_vhost_vdpa_device = { + .name = "vhost-vdpa-device", + .unmigratable = 0, + .minimum_version_id = 1, + .version_id = 1, + .fields = (VMStateField[]) { + VMSTATE_VIRTIO_DEVICE, + VMSTATE_END_OF_LIST() + }, +}; + +static void vhost_vdpa_device_class_init(ObjectClass *klass, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(klass); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); + + device_class_set_props(dc, vhost_vdpa_device_properties); + dc->desc = "VDPA-based generic device assignment"; + dc->vmsd = &vmstate_vhost_vdpa_device; + set_bit(DEVICE_CATEGORY_MISC, dc->categories); + vdc->realize = vhost_vdpa_device_realize; + vdc->unrealize = vhost_vdpa_device_unrealize; + vdc->get_config = vhost_vdpa_device_get_config; + vdc->set_config = vhost_vdpa_device_set_config; + vdc->get_features = vhost_vdpa_device_get_features; + vdc->set_status = vhost_vdpa_device_set_status; +} + +static void vhost_vdpa_device_instance_init(Object *obj) +{ + VhostVdpaDevice *s = VHOST_VDPA_DEVICE(obj); + + device_add_bootindex_property(obj, &s->bootindex, "bootindex", + NULL, DEVICE(obj)); +} + +static const TypeInfo vhost_vdpa_device_info = { + .name = TYPE_VHOST_VDPA_DEVICE, + .parent = TYPE_VIRTIO_DEVICE, + .instance_size = sizeof(VhostVdpaDevice), + .class_init = vhost_vdpa_device_class_init, + .instance_init = vhost_vdpa_device_instance_init, +}; + +static void register_vhost_vdpa_device_type(void) +{ + type_register_static(&vhost_vdpa_device_info); +} + +type_init(register_vhost_vdpa_device_type); diff --git a/hw/virtio/vhost-backend.c b/hw/virtio/vhost-backend.c index b65f8f7e97bfc3e78128115b01836a1620342fa3..5787775c9131ad358b5332cef5fd4a07cd624784 100644 --- a/hw/virtio/vhost-backend.c +++ b/hw/virtio/vhost-backend.c @@ -20,6 +20,8 @@ #include #include +static unsigned int vhost_kernel_used_memslots; + static int vhost_kernel_call(struct vhost_dev *dev, unsigned long int request, void *arg) { @@ -58,7 +60,7 @@ static int vhost_kernel_memslots_limit(struct vhost_dev *dev) if (g_file_get_contents("/sys/module/vhost/parameters/max_mem_regions", &s, NULL, NULL)) { uint64_t val = g_ascii_strtoull(s, NULL, 10); - if (!((val == G_MAXUINT64 || !val) && errno)) { + if (val < INT_MAX && val > 0) { g_free(s); return val; } @@ -293,6 +295,16 @@ static void vhost_kernel_set_iotlb_callback(struct vhost_dev *dev, qemu_set_fd_handler((uintptr_t)dev->opaque, NULL, NULL, NULL); } +static void vhost_kernel_set_used_memslots(struct vhost_dev *dev) +{ + vhost_kernel_used_memslots = dev->mem->nregions; +} + +static unsigned int vhost_kernel_get_used_memslots(void) +{ + return vhost_kernel_used_memslots; +} + const VhostOps kernel_ops = { .backend_type = VHOST_BACKEND_TYPE_KERNEL, .vhost_backend_init = vhost_kernel_init, @@ -325,6 +337,8 @@ const VhostOps kernel_ops = { #endif /* CONFIG_VHOST_VSOCK */ .vhost_set_iotlb_callback = vhost_kernel_set_iotlb_callback, .vhost_send_device_iotlb_msg = vhost_kernel_send_device_iotlb_msg, + .vhost_set_used_memslots = vhost_kernel_set_used_memslots, + .vhost_get_used_memslots = vhost_kernel_get_used_memslots, }; #endif diff --git a/hw/virtio/vhost-iova-tree.c b/hw/virtio/vhost-iova-tree.c new file mode 100644 index 0000000000000000000000000000000000000000..1339a4de8b4b567f5621203fdcd17cf55d9a55c0 --- /dev/null +++ b/hw/virtio/vhost-iova-tree.c @@ -0,0 +1,110 @@ +/* + * vhost software live migration iova tree + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "qemu/iova-tree.h" +#include "vhost-iova-tree.h" + +#define iova_min_addr qemu_real_host_page_size + +/** + * VhostIOVATree, able to: + * - Translate iova address + * - Reverse translate iova address (from translated to iova) + * - Allocate IOVA regions for translated range (linear operation) + */ +struct VhostIOVATree { + /* First addressable iova address in the device */ + uint64_t iova_first; + + /* Last addressable iova address in the device */ + uint64_t iova_last; + + /* IOVA address to qemu memory maps. */ + IOVATree *iova_taddr_map; +}; + +/** + * Create a new IOVA tree + * + * Returns the new IOVA tree + */ +VhostIOVATree *vhost_iova_tree_new(hwaddr iova_first, hwaddr iova_last) +{ + VhostIOVATree *tree = g_new(VhostIOVATree, 1); + + /* Some devices do not like 0 addresses */ + tree->iova_first = MAX(iova_first, iova_min_addr); + tree->iova_last = iova_last; + + tree->iova_taddr_map = iova_tree_new(); + return tree; +} + +/** + * Delete an iova tree + */ +void vhost_iova_tree_delete(VhostIOVATree *iova_tree) +{ + iova_tree_destroy(iova_tree->iova_taddr_map); + g_free(iova_tree); +} + +/** + * Find the IOVA address stored from a memory address + * + * @tree: The iova tree + * @map: The map with the memory address + * + * Return the stored mapping, or NULL if not found. + */ +const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *tree, + const DMAMap *map) +{ + return iova_tree_find_iova(tree->iova_taddr_map, map); +} + +/** + * Allocate a new mapping + * + * @tree: The iova tree + * @map: The iova map + * + * Returns: + * - IOVA_OK if the map fits in the container + * - IOVA_ERR_INVALID if the map does not make sense (like size overflow) + * - IOVA_ERR_NOMEM if tree cannot allocate more space. + * + * It returns assignated iova in map->iova if return value is VHOST_DMA_MAP_OK. + */ +int vhost_iova_tree_map_alloc(VhostIOVATree *tree, DMAMap *map) +{ + /* Some vhost devices do not like addr 0. Skip first page */ + hwaddr iova_first = tree->iova_first ?: qemu_real_host_page_size; + + if (map->translated_addr + map->size < map->translated_addr || + map->perm == IOMMU_NONE) { + return IOVA_ERR_INVALID; + } + + /* Allocate a node in IOVA address */ + return iova_tree_alloc_map(tree->iova_taddr_map, map, iova_first, + tree->iova_last); +} + +/** + * Remove existing mappings from iova tree + * + * @iova_tree: The vhost iova tree + * @map: The map to remove + */ +void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map) +{ + iova_tree_remove(iova_tree->iova_taddr_map, map); +} diff --git a/hw/virtio/vhost-iova-tree.h b/hw/virtio/vhost-iova-tree.h new file mode 100644 index 0000000000000000000000000000000000000000..4adfd79ff039081f8dd604ceccb4d858f8e89cea --- /dev/null +++ b/hw/virtio/vhost-iova-tree.h @@ -0,0 +1,27 @@ +/* + * vhost software live migration iova tree + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_VIRTIO_VHOST_IOVA_TREE_H +#define HW_VIRTIO_VHOST_IOVA_TREE_H + +#include "qemu/iova-tree.h" +#include "exec/memory.h" + +typedef struct VhostIOVATree VhostIOVATree; + +VhostIOVATree *vhost_iova_tree_new(uint64_t iova_first, uint64_t iova_last); +void vhost_iova_tree_delete(VhostIOVATree *iova_tree); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostIOVATree, vhost_iova_tree_delete); + +const DMAMap *vhost_iova_tree_find_iova(const VhostIOVATree *iova_tree, + const DMAMap *map); +int vhost_iova_tree_map_alloc(VhostIOVATree *iova_tree, DMAMap *map); +void vhost_iova_tree_remove(VhostIOVATree *iova_tree, DMAMap map); + +#endif diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c new file mode 100644 index 0000000000000000000000000000000000000000..8b5902a8a52a86cb60ea81fc352ed81f7615fdf5 --- /dev/null +++ b/hw/virtio/vhost-shadow-virtqueue.c @@ -0,0 +1,721 @@ +/* + * vhost shadow virtqueue + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#include "qemu/osdep.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" + +#include "qemu/error-report.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qemu/log.h" +#include "linux-headers/linux/vhost.h" + +/** + * Validate the transport device features that both guests can use with the SVQ + * and SVQs can use with the device. + * + * @dev_features: The features + * @errp: Error pointer + */ +bool vhost_svq_valid_features(uint64_t features, Error **errp) +{ + bool ok = true; + uint64_t svq_features = features; + + for (uint64_t b = VIRTIO_TRANSPORT_F_START; b <= VIRTIO_TRANSPORT_F_END; + ++b) { + switch (b) { + case VIRTIO_F_ANY_LAYOUT: + continue; + + case VIRTIO_F_ACCESS_PLATFORM: + /* SVQ trust in the host's IOMMU to translate addresses */ + case VIRTIO_F_VERSION_1: + /* SVQ trust that the guest vring is little endian */ + if (!(svq_features & BIT_ULL(b))) { + svq_features |= BIT_ULL(b); + ok = false; + } + continue; + + default: + if (svq_features & BIT_ULL(b)) { + svq_features &= ~BIT_ULL(b); + ok = false; + } + } + } + + if (!ok) { + error_setg(errp, "SVQ Invalid device feature flags, offer: 0x%"PRIx64 + ", ok: 0x%"PRIx64, features, svq_features); + } + return ok; +} + +/** + * Number of descriptors that the SVQ can make available from the guest. + * + * @svq: The svq + */ +static uint16_t vhost_svq_available_slots(const VhostShadowVirtqueue *svq) +{ + return svq->num_free; +} + +/** + * Translate addresses between the qemu's virtual address and the SVQ IOVA + * + * @svq: Shadow VirtQueue + * @vaddr: Translated IOVA addresses + * @iovec: Source qemu's VA addresses + * @num: Length of iovec and minimum length of vaddr + */ +static bool vhost_svq_translate_addr(const VhostShadowVirtqueue *svq, + hwaddr *addrs, const struct iovec *iovec, + size_t num) +{ + if (num == 0) { + return true; + } + + for (size_t i = 0; i < num; ++i) { + DMAMap needle = { + .translated_addr = (hwaddr)(uintptr_t)iovec[i].iov_base, + .size = iovec[i].iov_len, + }; + Int128 needle_last, map_last; + size_t off; + + const DMAMap *map = vhost_iova_tree_find_iova(svq->iova_tree, &needle); + /* + * Map cannot be NULL since iova map contains all guest space and + * qemu already has a physical address mapped + */ + if (unlikely(!map)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Invalid address 0x%"HWADDR_PRIx" given by guest", + needle.translated_addr); + return false; + } + + off = needle.translated_addr - map->translated_addr; + addrs[i] = map->iova + off; + + needle_last = int128_add(int128_make64(needle.translated_addr), + int128_makes64(iovec[i].iov_len - 1)); + map_last = int128_make64(map->translated_addr + map->size); + if (unlikely(int128_gt(needle_last, map_last))) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest buffer expands over iova range"); + return false; + } + } + + return true; +} + +/** + * Write descriptors to SVQ vring + * + * @svq: The shadow virtqueue + * @sg: Cache for hwaddr + * @iovec: The iovec from the guest + * @num: iovec length + * @more_descs: True if more descriptors come in the chain + * @write: True if they are writeable descriptors + * + * Return true if success, false otherwise and print error. + */ +static bool vhost_svq_vring_write_descs(VhostShadowVirtqueue *svq, hwaddr *sg, + const struct iovec *iovec, size_t num, + bool more_descs, bool write) +{ + uint16_t i = svq->free_head, last = svq->free_head; + unsigned n; + uint16_t flags = write ? cpu_to_le16(VRING_DESC_F_WRITE) : 0; + vring_desc_t *descs = svq->vring.desc; + bool ok; + + if (num == 0) { + return true; + } + + ok = vhost_svq_translate_addr(svq, sg, iovec, num); + if (unlikely(!ok)) { + return false; + } + + for (n = 0; n < num; n++) { + if (more_descs || (n + 1 < num)) { + descs[i].flags = flags | cpu_to_le16(VRING_DESC_F_NEXT); + descs[i].next = cpu_to_le16(svq->desc_next[i]); + } else { + descs[i].flags = flags; + } + descs[i].addr = cpu_to_le64(sg[n]); + descs[i].len = cpu_to_le32(iovec[n].iov_len); + + last = i; + i = cpu_to_le16(svq->desc_next[i]); + } + + svq->free_head = le16_to_cpu(svq->desc_next[last]); + return true; +} + +static bool vhost_svq_add_split(VhostShadowVirtqueue *svq, + const struct iovec *out_sg, size_t out_num, + const struct iovec *in_sg, size_t in_num, + unsigned *head) +{ + unsigned avail_idx; + vring_avail_t *avail = svq->vring.avail; + bool ok; + g_autofree hwaddr *sgs = g_new(hwaddr, MAX(out_num, in_num)); + + *head = svq->free_head; + + /* We need some descriptors here */ + if (unlikely(!out_num && !in_num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Guest provided element with no descriptors"); + return false; + } + + ok = vhost_svq_vring_write_descs(svq, sgs, out_sg, out_num, in_num > 0, + false); + if (unlikely(!ok)) { + return false; + } + + ok = vhost_svq_vring_write_descs(svq, sgs, in_sg, in_num, false, true); + if (unlikely(!ok)) { + return false; + } + + /* + * Put the entry in the available array (but don't update avail->idx until + * they do sync). + */ + avail_idx = svq->shadow_avail_idx & (svq->vring.num - 1); + avail->ring[avail_idx] = cpu_to_le16(*head); + svq->shadow_avail_idx++; + + /* Update the avail index after write the descriptor */ + smp_wmb(); + avail->idx = cpu_to_le16(svq->shadow_avail_idx); + + return true; +} + +static void vhost_svq_kick(VhostShadowVirtqueue *svq) +{ + /* + * We need to expose the available array entries before checking the used + * flags + */ + smp_mb(); + if (svq->vring.used->flags & VRING_USED_F_NO_NOTIFY) { + return; + } + + event_notifier_set(&svq->hdev_kick); +} + +/** + * Add an element to a SVQ. + * + * The caller must check that there is enough slots for the new element. It + * takes ownership of the element: In case of failure not ENOSPC, it is free. + * + * Return -EINVAL if element is invalid, -ENOSPC if dev queue is full + */ +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem) +{ + unsigned qemu_head; + unsigned ndescs = in_num + out_num; + bool ok; + + if (unlikely(ndescs > vhost_svq_available_slots(svq))) { + return -ENOSPC; + } + + ok = vhost_svq_add_split(svq, out_sg, out_num, in_sg, in_num, &qemu_head); + if (unlikely(!ok)) { + g_free(elem); + return -EINVAL; + } + + svq->num_free -= ndescs; + svq->desc_state[qemu_head].elem = elem; + svq->desc_state[qemu_head].ndescs = ndescs; + vhost_svq_kick(svq); + return 0; +} + +/* Convenience wrapper to add a guest's element to SVQ */ +static int vhost_svq_add_element(VhostShadowVirtqueue *svq, + VirtQueueElement *elem) +{ + return vhost_svq_add(svq, elem->out_sg, elem->out_num, elem->in_sg, + elem->in_num, elem); +} + +/** + * Forward available buffers. + * + * @svq: Shadow VirtQueue + * + * Note that this function does not guarantee that all guest's available + * buffers are available to the device in SVQ avail ring. The guest may have + * exposed a GPA / GIOVA contiguous buffer, but it may not be contiguous in + * qemu vaddr. + * + * If that happens, guest's kick notifications will be disabled until the + * device uses some buffers. + */ +static void vhost_handle_guest_kick(VhostShadowVirtqueue *svq) +{ + /* Clear event notifier */ + event_notifier_test_and_clear(&svq->svq_kick); + + /* Forward to the device as many available buffers as possible */ + do { + virtio_queue_set_notification(svq->vq, false); + + while (true) { + VirtQueueElement *elem; + int r; + + if (svq->next_guest_avail_elem) { + elem = g_steal_pointer(&svq->next_guest_avail_elem); + } else { + elem = virtqueue_pop(svq->vq, sizeof(*elem)); + } + + if (!elem) { + break; + } + + if (svq->ops) { + r = svq->ops->avail_handler(svq, elem, svq->ops_opaque); + } else { + r = vhost_svq_add_element(svq, elem); + } + if (unlikely(r != 0)) { + if (r == -ENOSPC) { + /* + * This condition is possible since a contiguous buffer in + * GPA does not imply a contiguous buffer in qemu's VA + * scatter-gather segments. If that happens, the buffer + * exposed to the device needs to be a chain of descriptors + * at this moment. + * + * SVQ cannot hold more available buffers if we are here: + * queue the current guest descriptor and ignore kicks + * until some elements are used. + */ + svq->next_guest_avail_elem = elem; + } + + /* VQ is full or broken, just return and ignore kicks */ + return; + } + } + + virtio_queue_set_notification(svq->vq, true); + } while (!virtio_queue_empty(svq->vq)); +} + +/** + * Handle guest's kick. + * + * @n: guest kick event notifier, the one that guest set to notify svq. + */ +static void vhost_handle_guest_kick_notifier(EventNotifier *n) +{ + VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, svq_kick); + event_notifier_test_and_clear(n); + vhost_handle_guest_kick(svq); +} + +static bool vhost_svq_more_used(VhostShadowVirtqueue *svq) +{ + uint16_t *used_idx = &svq->vring.used->idx; + if (svq->last_used_idx != svq->shadow_used_idx) { + return true; + } + + svq->shadow_used_idx = cpu_to_le16(*(volatile uint16_t *)used_idx); + + return svq->last_used_idx != svq->shadow_used_idx; +} + +/** + * Enable vhost device calls after disable them. + * + * @svq: The svq + * + * It returns false if there are pending used buffers from the vhost device, + * avoiding the possible races between SVQ checking for more work and enabling + * callbacks. True if SVQ used vring has no more pending buffers. + */ +static bool vhost_svq_enable_notification(VhostShadowVirtqueue *svq) +{ + svq->vring.avail->flags &= ~cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); + /* Make sure the flag is written before the read of used_idx */ + smp_mb(); + return !vhost_svq_more_used(svq); +} + +static void vhost_svq_disable_notification(VhostShadowVirtqueue *svq) +{ + svq->vring.avail->flags |= cpu_to_le16(VRING_AVAIL_F_NO_INTERRUPT); +} + +static uint16_t vhost_svq_last_desc_of_chain(const VhostShadowVirtqueue *svq, + uint16_t num, uint16_t i) +{ + for (uint16_t j = 0; j < (num - 1); ++j) { + i = le16_to_cpu(svq->desc_next[i]); + } + + return i; +} + +static VirtQueueElement *vhost_svq_get_buf(VhostShadowVirtqueue *svq, + uint32_t *len) +{ + const vring_used_t *used = svq->vring.used; + vring_used_elem_t used_elem; + uint16_t last_used, last_used_chain, num; + + if (!vhost_svq_more_used(svq)) { + return NULL; + } + + /* Only get used array entries after they have been exposed by dev */ + smp_rmb(); + last_used = svq->last_used_idx & (svq->vring.num - 1); + used_elem.id = le32_to_cpu(used->ring[last_used].id); + used_elem.len = le32_to_cpu(used->ring[last_used].len); + + svq->last_used_idx++; + if (unlikely(used_elem.id >= svq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, "Device %s says index %u is used", + svq->vdev->name, used_elem.id); + return NULL; + } + + if (unlikely(!svq->desc_state[used_elem.id].elem)) { + qemu_log_mask(LOG_GUEST_ERROR, + "Device %s says index %u is used, but it was not available", + svq->vdev->name, used_elem.id); + return NULL; + } + + num = svq->desc_state[used_elem.id].ndescs; + last_used_chain = vhost_svq_last_desc_of_chain(svq, num, used_elem.id); + svq->desc_next[last_used_chain] = svq->free_head; + svq->free_head = used_elem.id; + svq->num_free += num; + + *len = used_elem.len; + return g_steal_pointer(&svq->desc_state[used_elem.id].elem); +} + +/** + * Push an element to SVQ, returning it to the guest. + */ +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len) +{ + virtqueue_push(svq->vq, elem, len); + if (svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } +} + +static void vhost_svq_flush(VhostShadowVirtqueue *svq, + bool check_for_avail_queue) +{ + VirtQueue *vq = svq->vq; + + /* Forward as many used buffers as possible. */ + do { + unsigned i = 0; + + vhost_svq_disable_notification(svq); + while (true) { + uint32_t len; + g_autofree VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); + if (!elem) { + break; + } + + if (unlikely(i >= svq->vring.num)) { + qemu_log_mask(LOG_GUEST_ERROR, + "More than %u used buffers obtained in a %u size SVQ", + i, svq->vring.num); + virtqueue_fill(vq, elem, len, i); + virtqueue_flush(vq, i); + return; + } + virtqueue_fill(vq, elem, len, i++); + } + + virtqueue_flush(vq, i); + event_notifier_set(&svq->svq_call); + + if (check_for_avail_queue && svq->next_guest_avail_elem) { + /* + * Avail ring was full when vhost_svq_flush was called, so it's a + * good moment to make more descriptors available if possible. + */ + vhost_handle_guest_kick(svq); + } + } while (!vhost_svq_enable_notification(svq)); +} + +/** + * Poll the SVQ for one device used buffer. + * + * This function race with main event loop SVQ polling, so extra + * synchronization is needed. + * + * Return the length written by the device. + */ +size_t vhost_svq_poll(VhostShadowVirtqueue *svq) +{ + int64_t start_us = g_get_monotonic_time(); + do { + uint32_t len; + VirtQueueElement *elem = vhost_svq_get_buf(svq, &len); + if (elem) { + return len; + } + + if (unlikely(g_get_monotonic_time() - start_us > 10e6)) { + return 0; + } + + /* Make sure we read new used_idx */ + smp_rmb(); + } while (true); +} + +/** + * Forward used buffers. + * + * @n: hdev call event notifier, the one that device set to notify svq. + * + * Note that we are not making any buffers available in the loop, there is no + * way that it runs more than virtqueue size times. + */ +static void vhost_svq_handle_call(EventNotifier *n) +{ + VhostShadowVirtqueue *svq = container_of(n, VhostShadowVirtqueue, + hdev_call); + event_notifier_test_and_clear(n); + vhost_svq_flush(svq, true); +} + +/** + * Set the call notifier for the SVQ to call the guest + * + * @svq: Shadow virtqueue + * @call_fd: call notifier + * + * Called on BQL context. + */ +void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd) +{ + if (call_fd == VHOST_FILE_UNBIND) { + /* + * Fail event_notifier_set if called handling device call. + * + * SVQ still needs device notifications, since it needs to keep + * forwarding used buffers even with the unbind. + */ + memset(&svq->svq_call, 0, sizeof(svq->svq_call)); + } else { + event_notifier_init_fd(&svq->svq_call, call_fd); + } +} + +/** + * Get the shadow vq vring address. + * @svq: Shadow virtqueue + * @addr: Destination to store address + */ +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr) +{ + addr->desc_user_addr = (uint64_t)(uintptr_t)svq->vring.desc; + addr->avail_user_addr = (uint64_t)(uintptr_t)svq->vring.avail; + addr->used_user_addr = (uint64_t)(uintptr_t)svq->vring.used; +} + +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq) +{ + size_t desc_size = sizeof(vring_desc_t) * svq->vring.num; + size_t avail_size = offsetof(vring_avail_t, ring) + + sizeof(uint16_t) * svq->vring.num; + + return ROUND_UP(desc_size + avail_size, qemu_real_host_page_size); +} + +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq) +{ + size_t used_size = offsetof(vring_used_t, ring) + + sizeof(vring_used_elem_t) * svq->vring.num; + return ROUND_UP(used_size, qemu_real_host_page_size); +} + +/** + * Set a new file descriptor for the guest to kick the SVQ and notify for avail + * + * @svq: The svq + * @svq_kick_fd: The svq kick fd + * + * Note that the SVQ will never close the old file descriptor. + */ +void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd) +{ + EventNotifier *svq_kick = &svq->svq_kick; + bool poll_stop = VHOST_FILE_UNBIND != event_notifier_get_fd(svq_kick); + bool poll_start = svq_kick_fd != VHOST_FILE_UNBIND; + + if (poll_stop) { + event_notifier_set_handler(svq_kick, NULL); + } + + event_notifier_init_fd(svq_kick, svq_kick_fd); + /* + * event_notifier_set_handler already checks for guest's notifications if + * they arrive at the new file descriptor in the switch, so there is no + * need to explicitly check for them. + */ + if (poll_start) { + event_notifier_set(svq_kick); + event_notifier_set_handler(svq_kick, vhost_handle_guest_kick_notifier); + } +} + +/** + * Start the shadow virtqueue operation. + * + * @svq: Shadow Virtqueue + * @vdev: VirtIO device + * @vq: Virtqueue to shadow + * @iova_tree: Tree to perform descriptors translations + */ +void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + VirtQueue *vq, VhostIOVATree *iova_tree) +{ + size_t desc_size, driver_size, device_size; + + event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call); + svq->next_guest_avail_elem = NULL; + svq->shadow_avail_idx = 0; + svq->shadow_used_idx = 0; + svq->last_used_idx = 0; + svq->vdev = vdev; + svq->vq = vq; + svq->iova_tree = iova_tree; + + svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq)); + svq->num_free = svq->vring.num; + driver_size = vhost_svq_driver_area_size(svq); + device_size = vhost_svq_device_area_size(svq); + svq->vring.desc = qemu_memalign(qemu_real_host_page_size, driver_size); + desc_size = sizeof(vring_desc_t) * svq->vring.num; + svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size); + memset(svq->vring.desc, 0, driver_size); + svq->vring.used = qemu_memalign(qemu_real_host_page_size, device_size); + memset(svq->vring.used, 0, device_size); + svq->desc_state = g_new0(SVQDescState, svq->vring.num); + svq->desc_next = g_new0(uint16_t, svq->vring.num); + for (unsigned i = 0; i < svq->vring.num - 1; i++) { + svq->desc_next[i] = cpu_to_le16(i + 1); + } +} + +/** + * Stop the shadow virtqueue operation. + * @svq: Shadow Virtqueue + */ +void vhost_svq_stop(VhostShadowVirtqueue *svq) +{ + vhost_svq_set_svq_kick_fd(svq, VHOST_FILE_UNBIND); + g_autofree VirtQueueElement *next_avail_elem = NULL; + + if (!svq->vq) { + return; + } + + /* Send all pending used descriptors to guest */ + vhost_svq_flush(svq, false); + + for (unsigned i = 0; i < svq->vring.num; ++i) { + g_autofree VirtQueueElement *elem = NULL; + elem = g_steal_pointer(&svq->desc_state[i].elem); + if (elem) { + virtqueue_detach_element(svq->vq, elem, 0); + } + } + + next_avail_elem = g_steal_pointer(&svq->next_guest_avail_elem); + if (next_avail_elem) { + virtqueue_detach_element(svq->vq, next_avail_elem, 0); + } + svq->vq = NULL; + g_free(svq->desc_next); + g_free(svq->desc_state); + qemu_vfree(svq->vring.desc); + qemu_vfree(svq->vring.used); + event_notifier_set_handler(&svq->hdev_call, NULL); +} + +/** + * Creates vhost shadow virtqueue, and instructs the vhost device to use the + * shadow methods and file descriptors. + * + * @ops: SVQ owner callbacks + * @ops_opaque: ops opaque pointer + */ +VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops, + void *ops_opaque) +{ + VhostShadowVirtqueue *svq = g_new0(VhostShadowVirtqueue, 1); + + event_notifier_init_fd(&svq->svq_kick, VHOST_FILE_UNBIND); + svq->ops = ops; + svq->ops_opaque = ops_opaque; + return svq; +} + +/** + * Free the resources of the shadow virtqueue. + * + * @pvq: gpointer to SVQ so it can be used by autofree functions. + */ +void vhost_svq_free(gpointer pvq) +{ + VhostShadowVirtqueue *vq = pvq; + vhost_svq_stop(vq); + g_free(vq); +} diff --git a/hw/virtio/vhost-shadow-virtqueue.h b/hw/virtio/vhost-shadow-virtqueue.h new file mode 100644 index 0000000000000000000000000000000000000000..6efe051a703e4a7ee4430d408984895d04c7f7f4 --- /dev/null +++ b/hw/virtio/vhost-shadow-virtqueue.h @@ -0,0 +1,141 @@ +/* + * vhost shadow virtqueue + * + * SPDX-FileCopyrightText: Red Hat, Inc. 2021 + * SPDX-FileContributor: Author: Eugenio Pérez + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef VHOST_SHADOW_VIRTQUEUE_H +#define VHOST_SHADOW_VIRTQUEUE_H + +#include "qemu/event_notifier.h" +#include "hw/virtio/virtio.h" +#include "standard-headers/linux/vhost_types.h" +#include "hw/virtio/vhost-iova-tree.h" + +typedef struct SVQDescState { + VirtQueueElement *elem; + + /* + * Number of descriptors exposed to the device. May or may not match + * guest's + */ + unsigned int ndescs; +} SVQDescState; + +typedef struct VhostShadowVirtqueue VhostShadowVirtqueue; + +/** + * Callback to handle an avail buffer. + * + * @svq: Shadow virtqueue + * @elem: Element placed in the queue by the guest + * @vq_callback_opaque: Opaque + * + * Returns 0 if the vq is running as expected. + * + * Note that ownership of elem is transferred to the callback. + */ +typedef int (*VirtQueueAvailCallback)(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *vq_callback_opaque); + +typedef struct VhostShadowVirtqueueOps { + VirtQueueAvailCallback avail_handler; +} VhostShadowVirtqueueOps; + +/* Shadow virtqueue to relay notifications */ +typedef struct VhostShadowVirtqueue { + /* Shadow vring */ + struct vring vring; + + /* Shadow kick notifier, sent to vhost */ + EventNotifier hdev_kick; + /* Shadow call notifier, sent to vhost */ + EventNotifier hdev_call; + + /* + * Borrowed virtqueue's guest to host notifier. To borrow it in this event + * notifier allows to recover the VhostShadowVirtqueue from the event loop + * easily. If we use the VirtQueue's one, we don't have an easy way to + * retrieve VhostShadowVirtqueue. + * + * So shadow virtqueue must not clean it, or we would lose VirtQueue one. + */ + EventNotifier svq_kick; + + /* Guest's call notifier, where the SVQ calls guest. */ + EventNotifier svq_call; + + /* Virtio queue shadowing */ + VirtQueue *vq; + + /* Virtio device */ + VirtIODevice *vdev; + + /* IOVA mapping */ + VhostIOVATree *iova_tree; + + /* SVQ vring descriptors state */ + SVQDescState *desc_state; + + /* Next VirtQueue element that guest made available */ + VirtQueueElement *next_guest_avail_elem; + + /* + * Backup next field for each descriptor so we can recover securely, not + * needing to trust the device access. + */ + uint16_t *desc_next; + + /* Caller callbacks */ + const VhostShadowVirtqueueOps *ops; + + /* Caller callbacks opaque */ + void *ops_opaque; + + /* Next head to expose to the device */ + uint16_t shadow_avail_idx; + + /* Next free descriptor */ + uint16_t free_head; + + /* Last seen used idx */ + uint16_t shadow_used_idx; + + /* Next head to consume from the device */ + uint16_t last_used_idx; + + /* Size of SVQ vring free descriptors */ + uint16_t num_free; +} VhostShadowVirtqueue; + +bool vhost_svq_valid_features(uint64_t features, Error **errp); + +void vhost_svq_push_elem(VhostShadowVirtqueue *svq, + const VirtQueueElement *elem, uint32_t len); +int vhost_svq_add(VhostShadowVirtqueue *svq, const struct iovec *out_sg, + size_t out_num, const struct iovec *in_sg, size_t in_num, + VirtQueueElement *elem); +size_t vhost_svq_poll(VhostShadowVirtqueue *svq); + +void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd); +void vhost_svq_set_svq_call_fd(VhostShadowVirtqueue *svq, int call_fd); +void vhost_svq_get_vring_addr(const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr); +size_t vhost_svq_driver_area_size(const VhostShadowVirtqueue *svq); +size_t vhost_svq_device_area_size(const VhostShadowVirtqueue *svq); + +void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev, + VirtQueue *vq, VhostIOVATree *iova_tree); +void vhost_svq_stop(VhostShadowVirtqueue *svq); + +VhostShadowVirtqueue *vhost_svq_new(const VhostShadowVirtqueueOps *ops, + void *ops_opaque); + +void vhost_svq_free(gpointer vq); +G_DEFINE_AUTOPTR_CLEANUP_FUNC(VhostShadowVirtqueue, vhost_svq_free); + +#endif diff --git a/hw/virtio/vhost-user-fs.c b/hw/virtio/vhost-user-fs.c index c59595798397ee447e804b83044f4e93ceb9683c..5ac5dcce49750613c47247d210c7e723b2daca2b 100644 --- a/hw/virtio/vhost-user-fs.c +++ b/hw/virtio/vhost-user-fs.c @@ -74,7 +74,7 @@ static void vuf_start(VirtIODevice *vdev) } fs->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&fs->vhost_dev, vdev); + ret = vhost_dev_start(&fs->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost: %d", -ret); goto err_guest_notifiers; @@ -108,7 +108,7 @@ static void vuf_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&fs->vhost_dev, vdev); + vhost_dev_stop(&fs->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, fs->vhost_dev.nvqs, false); if (ret < 0) { @@ -161,6 +161,15 @@ static void vuf_guest_notifier_mask(VirtIODevice *vdev, int idx, { VHostUserFS *fs = VHOST_USER_FS(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } vhost_virtqueue_mask(&fs->vhost_dev, vdev, idx, mask); } @@ -168,6 +177,15 @@ static bool vuf_guest_notifier_pending(VirtIODevice *vdev, int idx) { VHostUserFS *fs = VHOST_USER_FS(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } return vhost_virtqueue_pending(&fs->vhost_dev, idx); } @@ -258,6 +276,7 @@ static void vuf_device_unrealize(DeviceState *dev) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserFS *fs = VHOST_USER_FS(dev); + struct vhost_virtqueue *vhost_vqs = fs->vhost_dev.vqs; int i; /* This will stop vhost backend if appropriate. */ @@ -273,8 +292,13 @@ static void vuf_device_unrealize(DeviceState *dev) } g_free(fs->req_vqs); virtio_cleanup(vdev); - g_free(fs->vhost_dev.vqs); - fs->vhost_dev.vqs = NULL; + g_free(vhost_vqs); +} + +static struct vhost_dev *vuf_get_vhost(VirtIODevice *vdev) +{ + VHostUserFS *fs = VHOST_USER_FS(vdev); + return &fs->vhost_dev; } static const VMStateDescription vuf_vmstate = { @@ -314,6 +338,7 @@ static void vuf_class_init(ObjectClass *klass, void *data) vdc->set_status = vuf_set_status; vdc->guest_notifier_mask = vuf_guest_notifier_mask; vdc->guest_notifier_pending = vuf_guest_notifier_pending; + vdc->get_vhost = vuf_get_vhost; } static const TypeInfo vuf_info = { diff --git a/hw/virtio/vhost-user-i2c.c b/hw/virtio/vhost-user-i2c.c index d172632bb0cd382bd50b27ed73e0547166319450..19add4a7070ee79c0870563063af5e56920b5329 100644 --- a/hw/virtio/vhost-user-i2c.c +++ b/hw/virtio/vhost-user-i2c.c @@ -45,7 +45,7 @@ static void vu_i2c_start(VirtIODevice *vdev) i2c->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&i2c->vhost_dev, vdev); + ret = vhost_dev_start(&i2c->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost-user-i2c: %d", -ret); goto err_guest_notifiers; @@ -79,7 +79,7 @@ static void vu_i2c_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&i2c->vhost_dev, vdev); + vhost_dev_stop(&i2c->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, i2c->vhost_dev.nvqs, false); if (ret < 0) { @@ -129,6 +129,14 @@ static void vu_i2c_guest_notifier_mask(VirtIODevice *vdev, int idx, bool mask) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } + vhost_virtqueue_mask(&i2c->vhost_dev, vdev, idx, mask); } @@ -136,6 +144,14 @@ static bool vu_i2c_guest_notifier_pending(VirtIODevice *vdev, int idx) { VHostUserI2C *i2c = VHOST_USER_I2C(vdev); + /* + * We don't support interrupts, return early if index is set to + * VIRTIO_CONFIG_IRQ_IDX. + */ + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } + return vhost_virtqueue_pending(&i2c->vhost_dev, idx); } diff --git a/hw/virtio/vhost-user-rng.c b/hw/virtio/vhost-user-rng.c index 209ee5bf9acd2bb2d2f191de932d3ce97d35d02a..f25b7cf6249468d001921e6b0ca551f9d1b28de9 100644 --- a/hw/virtio/vhost-user-rng.c +++ b/hw/virtio/vhost-user-rng.c @@ -42,7 +42,7 @@ static void vu_rng_start(VirtIODevice *vdev) } rng->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&rng->vhost_dev, vdev); + ret = vhost_dev_start(&rng->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost-user-rng: %d", -ret); goto err_guest_notifiers; @@ -76,7 +76,7 @@ static void vu_rng_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&rng->vhost_dev, vdev); + vhost_dev_stop(&rng->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, rng->vhost_dev.nvqs, false); if (ret < 0) { @@ -247,6 +247,12 @@ static void vu_rng_device_unrealize(DeviceState *dev) vhost_user_cleanup(&rng->vhost_user); } +static struct vhost_dev *vu_rng_get_vhost(VirtIODevice *vdev) +{ + VHostUserRNG *rng = VHOST_USER_RNG(vdev); + return &rng->vhost_dev; +} + static const VMStateDescription vu_rng_vmstate = { .name = "vhost-user-rng", .unmigratable = 1, @@ -272,6 +278,7 @@ static void vu_rng_class_init(ObjectClass *klass, void *data) vdc->set_status = vu_rng_set_status; vdc->guest_notifier_mask = vu_rng_guest_notifier_mask; vdc->guest_notifier_pending = vu_rng_guest_notifier_pending; + vdc->get_vhost = vu_rng_get_vhost; } static const TypeInfo vu_rng_info = { diff --git a/hw/virtio/vhost-user.c b/hw/virtio/vhost-user.c index bf6e50223cb49d27fa428e442f3aa9c4948e79dd..f10620494bb60b4864b95739364c7847552a1ab3 100644 --- a/hw/virtio/vhost-user.c +++ b/hw/virtio/vhost-user.c @@ -24,7 +24,9 @@ #include "sysemu/cryptodev.h" #include "migration/migration.h" #include "migration/postcopy-ram.h" +#include "migration/register.h" #include "trace.h" +#include "exec/ramblock.h" #include #include @@ -50,7 +52,7 @@ #include "hw/acpi/acpi.h" #define VHOST_USER_MAX_RAM_SLOTS ACPI_MAX_RAM_SLOTS -#elif defined(TARGET_PPC) || defined(TARGET_PPC_64) +#elif defined(TARGET_PPC) || defined(TARGET_PPC64) #include "hw/ppc/spapr.h" #define VHOST_USER_MAX_RAM_SLOTS SPAPR_MAX_RAM_SLOTS @@ -233,6 +235,7 @@ static VhostUserMsg m __attribute__ ((unused)); /* The version of the protocol we support */ #define VHOST_USER_VERSION (0x1) +static unsigned int vhost_user_used_memslots; struct vhost_user { struct vhost_dev *dev; @@ -280,9 +283,10 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { + int saved_errno = errno; error_report("Failed to read msg header. Read %d instead of %d." " Original request %d.", r, size, msg->hdr.request); - return -1; + return r < 0 ? -saved_errno : -EIO; } /* validate received flags */ @@ -290,9 +294,11 @@ static int vhost_user_read_header(struct vhost_dev *dev, VhostUserMsg *msg) error_report("Failed to read msg header." " Flags 0x%x instead of 0x%x.", msg->hdr.flags, VHOST_USER_REPLY_MASK | VHOST_USER_VERSION); - return -1; + return -EPROTO; } + trace_vhost_user_read(msg->hdr.request, msg->hdr.flags); + return 0; } @@ -314,8 +320,9 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, uint8_t *p = (uint8_t *) msg; int r, size; - if (vhost_user_read_header(dev, msg) < 0) { - data->ret = -1; + r = vhost_user_read_header(dev, msg); + if (r < 0) { + data->ret = r; goto end; } @@ -324,7 +331,7 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, error_report("Failed to read msg header." " Size %d exceeds the maximum %zu.", msg->hdr.size, VHOST_USER_PAYLOAD_SIZE); - data->ret = -1; + data->ret = -EPROTO; goto end; } @@ -333,9 +340,10 @@ static gboolean vhost_user_read_cb(void *do_not_use, GIOCondition condition, size = msg->hdr.size; r = qemu_chr_fe_read_all(chr, p, size); if (r != size) { + int saved_errno = errno; error_report("Failed to read msg payload." " Read %d instead of %d.", r, msg->hdr.size); - data->ret = -1; + data->ret = r < 0 ? -saved_errno : -EIO; goto end; } } @@ -418,24 +426,26 @@ static int vhost_user_read(struct vhost_dev *dev, VhostUserMsg *msg) static int process_message_reply(struct vhost_dev *dev, const VhostUserMsg *msg) { + int ret; VhostUserMsg msg_reply; if ((msg->hdr.flags & VHOST_USER_NEED_REPLY_MASK) == 0) { return 0; } - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } if (msg_reply.hdr.request != msg->hdr.request) { error_report("Received unexpected msg type. " "Expected %d received %d", msg->hdr.request, msg_reply.hdr.request); - return -1; + return -EPROTO; } - return msg_reply.payload.u64 ? -1 : 0; + return msg_reply.payload.u64 ? -EIO : 0; } static bool vhost_user_one_time_request(VhostUserRequest request) @@ -446,6 +456,7 @@ static bool vhost_user_one_time_request(VhostUserRequest request) case VHOST_USER_SET_MEM_TABLE: case VHOST_USER_GET_QUEUE_NUM: case VHOST_USER_NET_SET_MTU: + case VHOST_USER_SET_LOG_BASE: return true; default: return false; @@ -472,16 +483,19 @@ static int vhost_user_write(struct vhost_dev *dev, VhostUserMsg *msg, if (qemu_chr_fe_set_msgfds(chr, fds, fd_num) < 0) { error_report("Failed to set msg fds."); - return -1; + return -EINVAL; } ret = qemu_chr_fe_write_all(chr, (const uint8_t *) msg, size); if (ret != size) { + int saved_errno = errno; error_report("Failed to write msg." " Wrote %d instead of %d.", ret, size); - return -1; + return ret < 0 ? -saved_errno : -EIO; } + trace_vhost_user_write(msg->hdr.request, msg->hdr.flags); + return 0; } @@ -502,6 +516,7 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, size_t fd_num = 0; bool shmfd = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_LOG_SHMFD); + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_SET_LOG_BASE, .hdr.flags = VHOST_USER_VERSION, @@ -514,21 +529,23 @@ static int vhost_user_set_log_base(struct vhost_dev *dev, uint64_t base, fds[fd_num++] = log->fd; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } - if (shmfd) { + if (shmfd && (dev->vq_index == 0)) { msg.hdr.size = 0; - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_SET_LOG_BASE) { error_report("Received unexpected msg type. " "Expected %d received %d", VHOST_USER_SET_LOG_BASE, msg.hdr.request); - return -1; + return -EPROTO; } } @@ -588,7 +605,7 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, u->region_rb[i] = mr->ram_block; } else if (*fd_num == VHOST_MEMORY_BASELINE_NREGIONS) { error_report("Failed preparing vhost-user memory table msg"); - return -1; + return -ENOBUFS; } vhost_user_fill_msg_region(®ion_buffer, reg, offset); msg->payload.memory.regions[*fd_num] = region_buffer; @@ -604,14 +621,14 @@ static int vhost_user_fill_set_mem_table_msg(struct vhost_user *u, if (!*fd_num) { error_report("Failed initializing vhost-user memory map, " "consider using -object memory-backend-file share=on"); - return -1; + return -EINVAL; } msg->hdr.size = sizeof(msg->payload.memory.nregions); msg->hdr.size += sizeof(msg->payload.memory.padding); msg->hdr.size += *fd_num * sizeof(VhostUserMemoryRegion); - return 1; + return 0; } static inline bool reg_equal(struct vhost_memory_region *shadow_reg, @@ -741,8 +758,9 @@ static int send_remove_regions(struct vhost_dev *dev, vhost_user_fill_msg_region(®ion_buffer, shadow_reg, 0); msg->payload.mem_reg.region = region_buffer; - if (vhost_user_write(dev, msg, &fd, 1) < 0) { - return -1; + ret = vhost_user_write(dev, msg, &fd, 1); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -801,15 +819,17 @@ static int send_add_regions(struct vhost_dev *dev, vhost_user_fill_msg_region(®ion_buffer, reg, offset); msg->payload.mem_reg.region = region_buffer; - if (vhost_user_write(dev, msg, &fd, 1) < 0) { - return -1; + ret = vhost_user_write(dev, msg, &fd, 1); + if (ret < 0) { + return ret; } if (track_ramblocks) { uint64_t reply_gpa; - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } reply_gpa = msg_reply.payload.mem_reg.region.guest_phys_addr; @@ -819,7 +839,7 @@ static int send_add_regions(struct vhost_dev *dev, "Expected %d received %d", __func__, VHOST_USER_ADD_MEM_REG, msg_reply.hdr.request); - return -1; + return -EPROTO; } /* @@ -830,7 +850,7 @@ static int send_add_regions(struct vhost_dev *dev, error_report("%s: Unexpected size for postcopy reply " "%d vs %d", __func__, msg_reply.hdr.size, msg->hdr.size); - return -1; + return -EPROTO; } /* Get the postcopy client base from the backend's reply. */ @@ -846,7 +866,7 @@ static int send_add_regions(struct vhost_dev *dev, "Got guest physical address %" PRIX64 ", expected " "%" PRIX64, __func__, reply_gpa, dev->mem->regions[reg_idx].guest_phys_addr); - return -1; + return -EPROTO; } } else if (reply_supported) { ret = process_message_reply(dev, msg); @@ -887,6 +907,7 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, struct scrub_regions rem_reg[VHOST_USER_MAX_RAM_SLOTS]; uint64_t shadow_pcb[VHOST_USER_MAX_RAM_SLOTS] = {}; int nr_add_reg, nr_rem_reg; + int ret; msg->hdr.size = sizeof(msg->payload.mem_reg); @@ -894,16 +915,20 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, scrub_shadow_regions(dev, add_reg, &nr_add_reg, rem_reg, &nr_rem_reg, shadow_pcb, track_ramblocks); - if (nr_rem_reg && send_remove_regions(dev, rem_reg, nr_rem_reg, msg, - reply_supported) < 0) - { - goto err; + if (nr_rem_reg) { + ret = send_remove_regions(dev, rem_reg, nr_rem_reg, msg, + reply_supported); + if (ret < 0) { + goto err; + } } - if (nr_add_reg && send_add_regions(dev, add_reg, nr_add_reg, msg, - shadow_pcb, reply_supported, track_ramblocks) < 0) - { - goto err; + if (nr_add_reg) { + ret = send_add_regions(dev, add_reg, nr_add_reg, msg, shadow_pcb, + reply_supported, track_ramblocks); + if (ret < 0) { + goto err; + } } if (track_ramblocks) { @@ -918,8 +943,9 @@ static int vhost_user_add_remove_regions(struct vhost_dev *dev, msg->hdr.size = sizeof(msg->payload.u64); msg->payload.u64 = 0; /* OK */ - if (vhost_user_write(dev, msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, msg, NULL, 0); + if (ret < 0) { + return ret; } } @@ -931,7 +957,7 @@ err: sizeof(uint64_t) * VHOST_USER_MAX_RAM_SLOTS); } - return -1; + return ret; } static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, @@ -944,6 +970,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, size_t fd_num = 0; VhostUserMsg msg_reply; int region_i, msg_i; + int ret; VhostUserMsg msg = { .hdr.flags = VHOST_USER_VERSION, @@ -961,29 +988,32 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, } if (config_mem_slots) { - if (vhost_user_add_remove_regions(dev, &msg, reply_supported, - true) < 0) { - return -1; + ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, true); + if (ret < 0) { + return ret; } } else { - if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, - true) < 0) { - return -1; + ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + true); + if (ret < 0) { + return ret; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg_reply) < 0) { - return -1; + ret = vhost_user_read(dev, &msg_reply); + if (ret < 0) { + return ret; } if (msg_reply.hdr.request != VHOST_USER_SET_MEM_TABLE) { error_report("%s: Received unexpected msg type." "Expected %d received %d", __func__, VHOST_USER_SET_MEM_TABLE, msg_reply.hdr.request); - return -1; + return -EPROTO; } /* @@ -994,7 +1024,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, error_report("%s: Unexpected size for postcopy reply " "%d vs %d", __func__, msg_reply.hdr.size, msg.hdr.size); - return -1; + return -EPROTO; } memset(u->postcopy_client_bases, 0, @@ -1024,7 +1054,7 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, error_report("%s: postcopy reply not fully consumed " "%d vs %zd", __func__, msg_i, fd_num); - return -1; + return -EIO; } /* @@ -1035,8 +1065,9 @@ static int vhost_user_set_mem_table_postcopy(struct vhost_dev *dev, /* TODO: Use this for failure cases as well with a bad value. */ msg.hdr.size = sizeof(msg.payload.u64); msg.payload.u64 = 0; /* OK */ - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } } @@ -1055,6 +1086,7 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev, bool config_mem_slots = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS); + int ret; if (do_postcopy) { /* @@ -1074,17 +1106,20 @@ static int vhost_user_set_mem_table(struct vhost_dev *dev, } if (config_mem_slots) { - if (vhost_user_add_remove_regions(dev, &msg, reply_supported, - false) < 0) { - return -1; + ret = vhost_user_add_remove_regions(dev, &msg, reply_supported, false); + if (ret < 0) { + return ret; } } else { - if (vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, - false) < 0) { - return -1; + ret = vhost_user_fill_set_mem_table_msg(u, dev, &msg, fds, &fd_num, + false); + if (ret < 0) { + return ret; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; + + ret = vhost_user_write(dev, &msg, fds, fd_num); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -1109,14 +1144,10 @@ static int vhost_user_set_vring_endian(struct vhost_dev *dev, if (!cross_endian) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_set_vring(struct vhost_dev *dev, @@ -1130,11 +1161,7 @@ static int vhost_set_vring(struct vhost_dev *dev, .hdr.size = sizeof(msg.payload.state), }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_set_vring_num(struct vhost_dev *dev, @@ -1143,37 +1170,32 @@ static int vhost_user_set_vring_num(struct vhost_dev *dev, return vhost_set_vring(dev, VHOST_USER_SET_VRING_NUM, ring); } -static void vhost_user_host_notifier_restore(struct vhost_dev *dev, - int queue_idx) +static void vhost_user_host_notifier_free(VhostUserHostNotifier *n) { - struct vhost_user *u = dev->opaque; - VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; - VirtIODevice *vdev = dev->vdev; - - if (n->addr && !n->set) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true); - n->set = true; - } + assert(n && n->unmap_addr); + munmap(n->unmap_addr, qemu_real_host_page_size); + n->unmap_addr = NULL; } -static void vhost_user_host_notifier_remove(struct vhost_dev *dev, - int queue_idx) +static void vhost_user_host_notifier_remove(VhostUserState *user, + VirtIODevice *vdev, int queue_idx) { - struct vhost_user *u = dev->opaque; - VhostUserHostNotifier *n = &u->user->notifier[queue_idx]; - VirtIODevice *vdev = dev->vdev; + VhostUserHostNotifier *n = &user->notifier[queue_idx]; - if (n->addr && n->set) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); - n->set = false; + if (n->addr) { + if (vdev) { + virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); + } + assert(!n->unmap_addr); + n->unmap_addr = n->addr; + n->addr = NULL; + call_rcu(n, vhost_user_host_notifier_free, rcu); } } static int vhost_user_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - vhost_user_host_notifier_restore(dev, ring->index); - return vhost_set_vring(dev, VHOST_USER_SET_VRING_BASE, ring); } @@ -1182,16 +1204,25 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) int i; if (!virtio_has_feature(dev->features, VHOST_USER_F_PROTOCOL_FEATURES)) { - return -1; + return -EINVAL; } for (i = 0; i < dev->nvqs; ++i) { + int ret; struct vhost_vring_state state = { .index = dev->vq_index + i, .num = enable, }; - vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + ret = vhost_set_vring(dev, VHOST_USER_SET_VRING_ENABLE, &state); + if (ret < 0) { + /* + * Restoring the previous state is likely infeasible, as well as + * proceeding regardless the error, so just bail out and hope for + * the device-level recovery. + */ + return ret; + } } return 0; @@ -1200,32 +1231,36 @@ static int vhost_user_set_vring_enable(struct vhost_dev *dev, int enable) static int vhost_user_get_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_GET_VRING_BASE, .hdr.flags = VHOST_USER_VERSION, .payload.state = *ring, .hdr.size = sizeof(msg.payload.state), }; + struct vhost_user *u = dev->opaque; - vhost_user_host_notifier_remove(dev, ring->index); + vhost_user_host_notifier_remove(u->user, dev->vdev, ring->index); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_GET_VRING_BASE) { error_report("Received unexpected msg type. Expected %d received %d", VHOST_USER_GET_VRING_BASE, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.state)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } *ring = msg.payload.state; @@ -1252,11 +1287,7 @@ static int vhost_set_vring_file(struct vhost_dev *dev, msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; } - if (vhost_user_write(dev, &msg, fds, fd_num) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, fds, fd_num); } static int vhost_user_set_vring_kick(struct vhost_dev *dev, @@ -1274,6 +1305,7 @@ static int vhost_user_set_vring_call(struct vhost_dev *dev, static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) { + int ret; VhostUserMsg msg = { .hdr.request = request, .hdr.flags = VHOST_USER_VERSION, @@ -1283,23 +1315,25 @@ static int vhost_user_get_u64(struct vhost_dev *dev, int request, uint64_t *u64) return 0; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != request) { error_report("Received unexpected msg type. Expected %d received %d", request, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.u64)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } *u64 = msg.payload.u64; @@ -1337,6 +1371,7 @@ static int enforce_reply(struct vhost_dev *dev, static int vhost_user_set_vring_addr(struct vhost_dev *dev, struct vhost_vring_addr *addr) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_SET_VRING_ADDR, .hdr.flags = VHOST_USER_VERSION, @@ -1357,8 +1392,9 @@ static int vhost_user_set_vring_addr(struct vhost_dev *dev, msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } if (wait_for_reply) { @@ -1377,6 +1413,7 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, .payload.u64 = u64, .hdr.size = sizeof(msg.payload.u64), }; + int ret; if (wait_for_reply) { bool reply_supported = virtio_has_feature(dev->protocol_features, @@ -1386,8 +1423,9 @@ static int vhost_user_set_u64(struct vhost_dev *dev, int request, uint64_t u64, } } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } if (wait_for_reply) { @@ -1424,11 +1462,7 @@ static int vhost_user_set_owner(struct vhost_dev *dev) .hdr.flags = VHOST_USER_VERSION, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -EPROTO; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_get_max_memslots(struct vhost_dev *dev, @@ -1459,26 +1493,16 @@ static int vhost_user_reset_device(struct vhost_dev *dev) ? VHOST_USER_RESET_DEVICE : VHOST_USER_RESET_OWNER; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, NULL, 0); } static int vhost_user_slave_handle_config_change(struct vhost_dev *dev) { - int ret = -1; - - if (!dev->config_ops) { - return -1; + if (!dev->config_ops || !dev->config_ops->vhost_dev_config_notifier) { + return -ENOSYS; } - if (dev->config_ops->vhost_dev_config_notifier) { - ret = dev->config_ops->vhost_dev_config_notifier(dev); - } - - return ret; + return dev->config_ops->vhost_dev_config_notifier(dev); } static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, @@ -1497,17 +1521,12 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, if (!virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_HOST_NOTIFIER) || vdev == NULL || queue_idx >= virtio_get_num_queues(vdev)) { - return -1; + return -EINVAL; } n = &user->notifier[queue_idx]; - if (n->addr) { - virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, false); - object_unparent(OBJECT(&n->mr)); - munmap(n->addr, page_size); - n->addr = NULL; - } + vhost_user_host_notifier_remove(user, vdev, queue_idx); if (area->u64 & VHOST_USER_VRING_NOFD_MASK) { return 0; @@ -1515,30 +1534,32 @@ static int vhost_user_slave_handle_vring_host_notifier(struct vhost_dev *dev, /* Sanity check. */ if (area->size != page_size) { - return -1; + return -EINVAL; } addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, area->offset); if (addr == MAP_FAILED) { - return -1; + return -EFAULT; } name = g_strdup_printf("vhost-user/host-notifier@%p mmaps[%d]", user, queue_idx); - if (!n->mr.ram) /* Don't init again after suspend. */ + if (!n->mr.ram) { /* Don't init again after suspend. */ memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name, page_size, addr); + } else { + n->mr.ram_block->host = addr; + } g_free(name); if (virtio_queue_set_host_notifier_mr(vdev, queue_idx, &n->mr, true)) { object_unparent(OBJECT(&n->mr)); munmap(addr, page_size); - return -1; + return -ENXIO; } n->addr = addr; - n->set = true; return 0; } @@ -1664,14 +1685,15 @@ static int vhost_setup_slave_channel(struct vhost_dev *dev) } if (socketpair(PF_UNIX, SOCK_STREAM, 0, sv) == -1) { + int saved_errno = errno; error_report("socketpair() failed"); - return -1; + return -saved_errno; } ioc = QIO_CHANNEL(qio_channel_socket_new_fd(sv[0], &local_err)); if (!ioc) { error_report_err(local_err); - return -1; + return -ECONNREFUSED; } u->slave_ioc = ioc; slave_update_read_handler(dev, NULL); @@ -1778,35 +1800,38 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; int ufd; + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_POSTCOPY_ADVISE, .hdr.flags = VHOST_USER_VERSION, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_advise to vhost"); - return -1; + return ret; } - if (vhost_user_read(dev, &msg) < 0) { + ret = vhost_user_read(dev, &msg); + if (ret < 0) { error_setg(errp, "Failed to get postcopy_advise reply from vhost"); - return -1; + return ret; } if (msg.hdr.request != VHOST_USER_POSTCOPY_ADVISE) { error_setg(errp, "Unexpected msg type. Expected %d received %d", VHOST_USER_POSTCOPY_ADVISE, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size) { error_setg(errp, "Received bad msg size."); - return -1; + return -EPROTO; } ufd = qemu_chr_fe_get_msgfd(chr); if (ufd < 0) { error_setg(errp, "%s: Failed to get ufd", __func__); - return -1; + return -EIO; } qemu_set_nonblock(ufd); @@ -1820,7 +1845,7 @@ static int vhost_user_postcopy_advise(struct vhost_dev *dev, Error **errp) return 0; #else error_setg(errp, "Postcopy not supported on non-Linux systems"); - return -1; + return -ENOSYS; #endif } @@ -1836,10 +1861,13 @@ static int vhost_user_postcopy_listen(struct vhost_dev *dev, Error **errp) .hdr.flags = VHOST_USER_VERSION | VHOST_USER_NEED_REPLY_MASK, }; u->postcopy_listen = true; + trace_vhost_user_postcopy_listen(); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_listen to vhost"); - return -1; + return ret; } ret = process_message_reply(dev, &msg); @@ -1864,9 +1892,11 @@ static int vhost_user_postcopy_end(struct vhost_dev *dev, Error **errp) struct vhost_user *u = dev->opaque; trace_vhost_user_postcopy_end_entry(); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { + + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { error_setg(errp, "Failed to send postcopy_end to vhost"); - return -1; + return ret; } ret = process_message_reply(dev, &msg); @@ -1919,15 +1949,43 @@ static int vhost_user_postcopy_notifier(NotifierWithReturn *notifier, return 0; } +static int vhost_user_load_setup(QEMUFile *f, void *opaque) +{ + struct vhost_dev *hdev = opaque; + int r; + + if (hdev->vhost_ops && hdev->vhost_ops->vhost_set_mem_table) { + r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); + if (r < 0) { + qemu_log("error: vhost_set_mem_table failed: %s(%d)\n", + strerror(errno), errno); + return r; + } else { + qemu_log("info: vhost_set_mem_table OK\n"); + } + } + return 0; +} + +SaveVMHandlers savevm_vhost_user_handlers = { + .load_setup = vhost_user_load_setup, +}; + static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, Error **errp) { uint64_t features, protocol_features, ram_slots; struct vhost_user *u; int err; + Chardev *chr; assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER); + chr = qemu_chr_fe_get_driver(((VhostUserState *)opaque)->chr); + if (chr) { + chr->chr_for_flag = CHR_FOR_VHOST_USER; + } + u = g_new0(struct vhost_user, 1); u->user = opaque; u->dev = dev; @@ -2037,6 +2095,7 @@ static int vhost_user_backend_init(struct vhost_dev *dev, void *opaque, u->postcopy_notifier.notify = vhost_user_postcopy_notifier; postcopy_add_notifier(&u->postcopy_notifier); + register_savevm_live("vhost-user", -1, 1, &savevm_vhost_user_handlers, dev); return 0; } @@ -2068,6 +2127,7 @@ static int vhost_user_backend_cleanup(struct vhost_dev *dev) u->region_rb_len = 0; g_free(u); dev->opaque = 0; + unregister_savevm(NULL, "vhost-user", dev); return 0; } @@ -2115,7 +2175,7 @@ static int vhost_user_migration_done(struct vhost_dev *dev, char* mac_addr) return vhost_user_write(dev, &msg, NULL, 0); } - return -1; + return -ENOTSUP; } static bool vhost_user_can_merge(struct vhost_dev *dev, @@ -2136,6 +2196,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) VhostUserMsg msg; bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); + int ret; if (!(dev->protocol_features & (1ULL << VHOST_USER_PROTOCOL_F_NET_MTU))) { return 0; @@ -2149,8 +2210,9 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) msg.hdr.flags |= VHOST_USER_NEED_REPLY_MASK; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } /* If reply_ack supported, slave has to ack specified MTU is valid */ @@ -2164,6 +2226,7 @@ static int vhost_user_net_set_mtu(struct vhost_dev *dev, uint16_t mtu) static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *imsg) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_IOTLB_MSG, .hdr.size = sizeof(msg.payload.iotlb), @@ -2171,8 +2234,9 @@ static int vhost_user_send_device_iotlb_msg(struct vhost_dev *dev, .payload.iotlb = *imsg, }; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -EFAULT; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } return process_message_reply(dev, &msg); @@ -2187,6 +2251,7 @@ static void vhost_user_set_iotlb_callback(struct vhost_dev *dev, int enabled) static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, uint32_t config_len, Error **errp) { + int ret; VhostUserMsg msg = { .hdr.request = VHOST_USER_GET_CONFIG, .hdr.flags = VHOST_USER_VERSION, @@ -2203,26 +2268,28 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, msg.payload.config.offset = 0; msg.payload.config.size = config_len; - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - error_setg_errno(errp, EPROTO, "vhost_get_config failed"); - return -EPROTO; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_get_config failed"); + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - error_setg_errno(errp, EPROTO, "vhost_get_config failed"); - return -EPROTO; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_setg_errno(errp, -ret, "vhost_get_config failed"); + return ret; } if (msg.hdr.request != VHOST_USER_GET_CONFIG) { error_setg(errp, "Received unexpected msg type. Expected %d received %d", VHOST_USER_GET_CONFIG, msg.hdr.request); - return -EINVAL; + return -EPROTO; } if (msg.hdr.size != VHOST_USER_CONFIG_HDR_SIZE + config_len) { error_setg(errp, "Received bad msg size."); - return -EINVAL; + return -EPROTO; } memcpy(config, msg.payload.config.region, config_len); @@ -2233,6 +2300,7 @@ static int vhost_user_get_config(struct vhost_dev *dev, uint8_t *config, static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, uint32_t offset, uint32_t size, uint32_t flags) { + int ret; uint8_t *p; bool reply_supported = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_REPLY_ACK); @@ -2245,7 +2313,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, if (!virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CONFIG)) { - return -1; + return -ENOTSUP; } if (reply_supported) { @@ -2253,7 +2321,7 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, } if (size > VHOST_USER_MAX_CONFIG_SIZE) { - return -1; + return -EINVAL; } msg.payload.config.offset = offset, @@ -2262,8 +2330,9 @@ static int vhost_user_set_config(struct vhost_dev *dev, const uint8_t *data, p = msg.payload.config.region; memcpy(p, data, size); - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } if (reply_supported) { @@ -2277,6 +2346,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, void *session_info, uint64_t *session_id) { + int ret; bool crypto_session = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); CryptoDevBackendSymSessionInfo *sess_info = session_info; @@ -2290,7 +2360,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, if (!crypto_session) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } memcpy(&msg.payload.session.session_setup_data, sess_info, @@ -2303,31 +2373,35 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, memcpy(&msg.payload.session.auth_key, sess_info->auth_key, sess_info->auth_key_len); } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - error_report("vhost_user_write() return -1, create session failed"); - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_report("vhost_user_write() return %d, create session failed", + ret); + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - error_report("vhost_user_read() return -1, create session failed"); - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + error_report("vhost_user_read() return %d, create session failed", + ret); + return ret; } if (msg.hdr.request != VHOST_USER_CREATE_CRYPTO_SESSION) { error_report("Received unexpected msg type. Expected %d received %d", VHOST_USER_CREATE_CRYPTO_SESSION, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.session)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } if (msg.payload.session.session_id < 0) { error_report("Bad session id: %" PRId64 "", msg.payload.session.session_id); - return -1; + return -EINVAL; } *session_id = msg.payload.session.session_id; @@ -2337,6 +2411,7 @@ static int vhost_user_crypto_create_session(struct vhost_dev *dev, static int vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) { + int ret; bool crypto_session = virtio_has_feature(dev->protocol_features, VHOST_USER_PROTOCOL_F_CRYPTO_SESSION); VhostUserMsg msg = { @@ -2348,12 +2423,14 @@ vhost_user_crypto_close_session(struct vhost_dev *dev, uint64_t session_id) if (!crypto_session) { error_report("vhost-user trying to send unhandled ioctl"); - return -1; + return -ENOTSUP; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - error_report("vhost_user_write() return -1, close session failed"); - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + error_report("vhost_user_write() return %d, close session failed", + ret); + return ret; } return 0; @@ -2375,6 +2452,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, { void *addr; int fd; + int ret; struct vhost_user *u = dev->opaque; CharBackend *chr = u->user->chr; VhostUserMsg msg = { @@ -2390,24 +2468,26 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, return 0; } - if (vhost_user_write(dev, &msg, NULL, 0) < 0) { - return -1; + ret = vhost_user_write(dev, &msg, NULL, 0); + if (ret < 0) { + return ret; } - if (vhost_user_read(dev, &msg) < 0) { - return -1; + ret = vhost_user_read(dev, &msg); + if (ret < 0) { + return ret; } if (msg.hdr.request != VHOST_USER_GET_INFLIGHT_FD) { error_report("Received unexpected msg type. " "Expected %d received %d", VHOST_USER_GET_INFLIGHT_FD, msg.hdr.request); - return -1; + return -EPROTO; } if (msg.hdr.size != sizeof(msg.payload.inflight)) { error_report("Received bad msg size."); - return -1; + return -EPROTO; } if (!msg.payload.inflight.mmap_size) { @@ -2417,7 +2497,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, fd = qemu_chr_fe_get_msgfd(chr); if (fd < 0) { error_report("Failed to get mem fd"); - return -1; + return -EIO; } addr = mmap(0, msg.payload.inflight.mmap_size, PROT_READ | PROT_WRITE, @@ -2426,7 +2506,7 @@ static int vhost_user_get_inflight_fd(struct vhost_dev *dev, if (addr == MAP_FAILED) { error_report("Failed to mmap mem fd"); close(fd); - return -1; + return -EFAULT; } inflight->addr = addr; @@ -2456,11 +2536,7 @@ static int vhost_user_set_inflight_fd(struct vhost_dev *dev, return 0; } - if (vhost_user_write(dev, &msg, &inflight->fd, 1) < 0) { - return -1; - } - - return 0; + return vhost_user_write(dev, &msg, &inflight->fd, 1); } bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) @@ -2477,22 +2553,45 @@ bool vhost_user_init(VhostUserState *user, CharBackend *chr, Error **errp) void vhost_user_cleanup(VhostUserState *user) { int i; + VhostUserHostNotifier *n; if (!user->chr) { return; } memory_region_transaction_begin(); for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { - if (user->notifier[i].addr) { - object_unparent(OBJECT(&user->notifier[i].mr)); - munmap(user->notifier[i].addr, qemu_real_host_page_size); - user->notifier[i].addr = NULL; - } + n = &user->notifier[i]; + vhost_user_host_notifier_remove(user, NULL, i); + object_unparent(OBJECT(&n->mr)); } memory_region_transaction_commit(); user->chr = NULL; } +static void vhost_user_set_used_memslots(struct vhost_dev *dev) +{ + unsigned int counter = 0; + int i; + + for (i = 0; i < dev->mem->nregions; ++i) { + struct vhost_memory_region *reg = dev->mem->regions + i; + ram_addr_t offset; + MemoryRegion *mr; + + mr = memory_region_from_host((void *)(uintptr_t)reg->userspace_addr, + &offset); + if (mr && memory_region_get_fd(mr) > 0) { + counter++; + } + } + vhost_user_used_memslots = counter; +} + +static unsigned int vhost_user_get_used_memslots(void) +{ + return vhost_user_used_memslots; +} + const VhostOps user_ops = { .backend_type = VHOST_BACKEND_TYPE_USER, .vhost_backend_init = vhost_user_backend_init, @@ -2526,4 +2625,6 @@ const VhostOps user_ops = { .vhost_backend_mem_section_filter = vhost_user_mem_section_filter, .vhost_get_inflight_fd = vhost_user_get_inflight_fd, .vhost_set_inflight_fd = vhost_user_set_inflight_fd, + .vhost_set_used_memslots = vhost_user_set_used_memslots, + .vhost_get_used_memslots = vhost_user_get_used_memslots, }; diff --git a/hw/virtio/vhost-vdpa.c b/hw/virtio/vhost-vdpa.c index bcaf00e09f33c7559125e2e89b73f4ea711e1391..f5d4e2d939aacb52510d22ce3002d0292baea63d 100644 --- a/hw/virtio/vhost-vdpa.c +++ b/hw/virtio/vhost-vdpa.c @@ -17,12 +17,17 @@ #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-backend.h" #include "hw/virtio/virtio-net.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/vhost-vdpa.h" #include "exec/address-spaces.h" +#include "migration/blocker.h" #include "qemu/main-loop.h" #include "cpu.h" #include "trace.h" #include "qemu-common.h" +#include "qapi/error.h" + +static unsigned int vhost_vdpa_used_memslots; /* * Return one past the end of the end of section. Be careful with uint64_t @@ -69,22 +74,28 @@ static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section, return false; } -static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, - void *vaddr, bool readonly) +/* + * The caller must set asid = 0 if the device does not support asid. + * This is not an ABI break since it is set to 0 by the initializer anyway. + */ +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, + hwaddr size, void *vaddr, bool readonly) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; int ret = 0; msg.type = v->msg_type; + msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr; msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW; msg.iotlb.type = VHOST_IOTLB_UPDATE; - trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size, - msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type); + trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova, + msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm, + msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", @@ -95,19 +106,24 @@ static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size, return ret; } -static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, - hwaddr size) +/* + * The caller must set asid = 0 if the device does not support asid. + * This is not an ABI break since it is set to 0 by the initializer anyway. + */ +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, + hwaddr size) { struct vhost_msg_v2 msg = {}; int fd = v->device_fd; int ret = 0; msg.type = v->msg_type; + msg.asid = asid; msg.iotlb.iova = iova; msg.iotlb.size = size; msg.iotlb.type = VHOST_IOTLB_INVALIDATE; - trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova, + trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova, msg.iotlb.size, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { @@ -127,6 +143,7 @@ static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v) .iotlb.type = VHOST_IOTLB_BATCH_BEGIN, }; + trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); @@ -161,6 +178,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) msg.type = v->msg_type; msg.iotlb.type = VHOST_IOTLB_BATCH_END; + trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type); if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) { error_report("failed to write, fd=%d, errno=%d (%s)", fd, errno, strerror(errno)); @@ -172,6 +190,7 @@ static void vhost_vdpa_listener_commit(MemoryListener *listener) static void vhost_vdpa_listener_region_add(MemoryListener *listener, MemoryRegionSection *section) { + DMAMap mem_region = {}; struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener); hwaddr iova; Int128 llend, llsize; @@ -207,17 +226,37 @@ static void vhost_vdpa_listener_region_add(MemoryListener *listener, vaddr, section->readonly); llsize = int128_sub(llend, int128_make64(iova)); + if (v->shadow_data) { + int r; + + mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr, + mem_region.size = int128_get64(llsize) - 1, + mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly), + + r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region); + if (unlikely(r != IOVA_OK)) { + error_report("Can't allocate a mapping (%d)", r); + goto fail; + } + + iova = mem_region.iova; + } vhost_vdpa_iotlb_batch_begin_once(v); - ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize), - vaddr, section->readonly); + ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova, + int128_get64(llsize), vaddr, section->readonly); if (ret) { error_report("vhost vdpa map fail!"); - goto fail; + goto fail_map; } return; +fail_map: + if (v->shadow_data) { + vhost_iova_tree_remove(v->iova_tree, mem_region); + } + fail: /* * On the initfn path, store the first error in the container so we @@ -251,7 +290,8 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, iova = TARGET_PAGE_ALIGN(section->offset_within_address_space); llend = vhost_vdpa_section_end(section); - trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend)); + trace_vhost_vdpa_listener_region_del(v, iova, + int128_get64(int128_sub(llend, int128_one()))); if (int128_ge(int128_make64(iova), llend)) { return; @@ -259,8 +299,27 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, llsize = int128_sub(llend, int128_make64(iova)); + if (v->shadow_data) { + const DMAMap *result; + const void *vaddr = memory_region_get_ram_ptr(section->mr) + + section->offset_within_region + + (iova - section->offset_within_address_space); + DMAMap mem_region = { + .translated_addr = (hwaddr)(uintptr_t)vaddr, + .size = int128_get64(llsize) - 1, + }; + + result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region); + if (!result) { + /* The memory listener map wasn't mapped */ + return; + } + iova = result->iova; + vhost_iova_tree_remove(v->iova_tree, *result); + } vhost_vdpa_iotlb_batch_begin_once(v); - ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize)); + ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova, + int128_get64(llsize)); if (ret) { error_report("vhost_vdpa dma unmap error!"); } @@ -268,7 +327,7 @@ static void vhost_vdpa_listener_region_del(MemoryListener *listener, memory_region_unref(section->mr); } /* - * IOTLB API is used by vhost-vpda which requires incremental updating + * IOTLB API is used by vhost-vdpa which requires incremental updating * of the mapping. So we can not use generic vhost memory listener which * depends on the addnop(). */ @@ -292,38 +351,80 @@ static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request, return ret < 0 ? -errno : ret; } -static void vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) +static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status) { uint8_t s; + int ret; trace_vhost_vdpa_add_status(dev, status); - if (vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s)) { - return; + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); + if (ret < 0) { + return ret; } s |= status; - vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); + ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s); + if (ret < 0) { + return ret; + } + + ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s); + if (ret < 0) { + return ret; + } + + if (!(s & status)) { + return -EIO; + } + + return 0; } -static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v) +int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range) { - int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE, - &v->iova_range); - if (ret != 0) { - v->iova_range.first = 0; - v->iova_range.last = UINT64_MAX; - } + int ret = ioctl(fd, VHOST_VDPA_GET_IOVA_RANGE, iova_range); - trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first, - v->iova_range.last); + return ret < 0 ? -errno : 0; } -static bool vhost_vdpa_one_time_request(struct vhost_dev *dev) +/* + * The use of this function is for requests that only need to be + * applied once. Typically such request occurs at the beginning + * of operation, and before setting up queues. It should not be + * used for request that performs operation until all queues are + * set, which would need to check dev->vq_index_end instead. + */ +static bool vhost_vdpa_first_dev(struct vhost_dev *dev) { struct vhost_vdpa *v = dev->opaque; - return v->index != 0; + return v->index == 0; +} + +static int vhost_vdpa_get_dev_features(struct vhost_dev *dev, + uint64_t *features) +{ + int ret; + + ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); + trace_vhost_vdpa_get_features(dev, *features); + return ret; +} + +static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v) +{ + g_autoptr(GPtrArray) shadow_vqs = NULL; + + shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free); + for (unsigned n = 0; n < hdev->nvqs; ++n) { + VhostShadowVirtqueue *svq; + + svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque); + g_ptr_array_add(shadow_vqs, svq); + } + + v->shadow_vqs = g_steal_pointer(&shadow_vqs); } static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) @@ -348,10 +449,10 @@ static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp) dev->opaque = opaque ; v->listener = vhost_vdpa_memory_listener; v->msg_type = VHOST_IOTLB_MSG_V2; + vhost_vdpa_init_svq(dev, v); - vhost_vdpa_get_iova_range(v); - - if (vhost_vdpa_one_time_request(dev)) { + error_propagate(&dev->migration_blocker, v->migration_blocker); + if (!vhost_vdpa_first_dev(dev)) { return 0; } @@ -379,15 +480,6 @@ static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev, } } -static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) -{ - int i; - - for (i = 0; i < n; i++) { - vhost_vdpa_host_notifier_uninit(dev, i); - } -} - static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) { size_t page_size = qemu_real_host_page_size; @@ -415,6 +507,7 @@ static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index) g_free(name); if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) { + object_unparent(OBJECT(&n->mr)); munmap(addr, page_size); goto err; } @@ -426,21 +519,60 @@ err: return -1; } +static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n) +{ + int i; + + /* + * Pack all the changes to the memory regions in a single + * transaction to avoid a few updating of the address space + * topology. + */ + memory_region_transaction_begin(); + + for (i = dev->vq_index; i < dev->vq_index + n; i++) { + vhost_vdpa_host_notifier_uninit(dev, i); + } + + memory_region_transaction_commit(); +} + static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev) { + struct vhost_vdpa *v = dev->opaque; int i; + if (v->shadow_vqs_enabled) { + /* FIXME SVQ is not compatible with host notifiers mr */ + return; + } + + /* + * Pack all the changes to the memory regions in a single + * transaction to avoid a few updating of the address space + * topology. + */ + memory_region_transaction_begin(); + for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) { if (vhost_vdpa_host_notifier_init(dev, i)) { - goto err; + vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index); + break; } } - return; + memory_region_transaction_commit(); +} -err: - vhost_vdpa_host_notifiers_uninit(dev, i); - return; +static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + size_t idx; + + for (idx = 0; idx < v->shadow_vqs->len; ++idx) { + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx)); + } + g_ptr_array_free(v->shadow_vqs, true); } static int vhost_vdpa_cleanup(struct vhost_dev *dev) @@ -451,6 +583,7 @@ static int vhost_vdpa_cleanup(struct vhost_dev *dev) trace_vhost_vdpa_cleanup(dev, v); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); memory_listener_unregister(&v->listener); + vhost_vdpa_svq_cleanup(dev); dev->opaque = NULL; ram_block_discard_disable(false); @@ -467,7 +600,7 @@ static int vhost_vdpa_memslots_limit(struct vhost_dev *dev) static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, struct vhost_memory *mem) { - if (vhost_vdpa_one_time_request(dev)) { + if (!vhost_vdpa_first_dev(dev)) { return 0; } @@ -484,7 +617,7 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, } } if (mem->padding) { - return -1; + return -EINVAL; } return 0; @@ -493,29 +626,45 @@ static int vhost_vdpa_set_mem_table(struct vhost_dev *dev, static int vhost_vdpa_set_features(struct vhost_dev *dev, uint64_t features) { + struct vhost_vdpa *v = dev->opaque; int ret; - if (vhost_vdpa_one_time_request(dev)) { + if (!vhost_vdpa_first_dev(dev)) { return 0; } + if (v->shadow_vqs_enabled) { + if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) { + /* + * QEMU is just trying to enable or disable logging. SVQ handles + * this sepparately, so no need to forward this. + */ + v->acked_features = features; + return 0; + } + + v->acked_features = features; + + /* We must not ack _F_LOG if SVQ is enabled */ + features &= ~BIT_ULL(VHOST_F_LOG_ALL); + } + trace_vhost_vdpa_set_features(dev, features); ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features); - uint8_t status = 0; if (ret) { return ret; } - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); - vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); - return !(status & VIRTIO_CONFIG_S_FEATURES_OK); + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); } static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) { uint64_t features; - uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 | - 0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH; + uint64_t f = BIT_ULL(VHOST_BACKEND_F_IOTLB_MSG_V2) | + BIT_ULL(VHOST_BACKEND_F_IOTLB_BATCH) | + BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID) | + BIT_ULL(VHOST_BACKEND_F_BYTEMAPLOG); int r; if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) { @@ -524,7 +673,7 @@ static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev) features &= f; - if (vhost_vdpa_one_time_request(dev)) { + if (vhost_vdpa_first_dev(dev)) { r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features); if (r) { return -EFAULT; @@ -566,8 +715,17 @@ static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx) static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) { int i; + int idx; + hwaddr addr; + trace_vhost_vdpa_set_vring_ready(dev); for (i = 0; i < dev->nvqs; ++i) { + idx = vhost_vdpa_get_vq_index(dev, dev->vq_index + i); + addr = virtio_queue_get_desc_addr(dev->vdev, idx); + if (addr == 0) { + continue; + } + struct vhost_vring_state state = { .index = dev->vq_index + i, .num = 1, @@ -577,6 +735,13 @@ static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev) return 0; } +static int vhost_vdpa_set_config_call(struct vhost_dev *dev, + int fd) +{ + trace_vhost_vdpa_set_config_call(dev, fd); + return vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG_CALL, &fd); +} + static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config, uint32_t config_len) { @@ -633,15 +798,330 @@ static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config, return ret; } -static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) +static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev, + struct vhost_vring_state *ring) +{ + trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); + return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); +} + +static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); +} + +static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev, + struct vhost_vring_file *file) +{ + trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); + return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); +} + +static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev, + struct vhost_vring_addr *addr) +{ + trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, + addr->desc_user_addr, addr->used_user_addr, + addr->avail_user_addr, + addr->log_guest_addr); + + return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); + +} + +/** + * Set the shadow virtqueue descriptors to the device + * + * @dev: The vhost device model + * @svq: The shadow virtqueue + * @idx: The index of the virtqueue in the vhost device + * @errp: Error + * + * Note that this function does not rewind kick file descriptor if cannot set + * call one. + */ +static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) +{ + struct vhost_vring_file file = { + .index = dev->vq_index + idx, + }; + const EventNotifier *event_notifier = &svq->hdev_kick; + int r; + + r = event_notifier_init(&svq->hdev_kick, 0); + if (r != 0) { + error_setg_errno(errp, -r, "Couldn't create kick event notifier"); + goto err_init_hdev_kick; + } + + r = event_notifier_init(&svq->hdev_call, 0); + if (r != 0) { + error_setg_errno(errp, -r, "Couldn't create call event notifier"); + goto err_init_hdev_call; + } + + file.fd = event_notifier_get_fd(event_notifier); + r = vhost_vdpa_set_vring_dev_kick(dev, &file); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Can't set device kick fd"); + goto err_init_set_dev_fd; + } + + event_notifier = &svq->hdev_call; + file.fd = event_notifier_get_fd(event_notifier); + r = vhost_vdpa_set_vring_dev_call(dev, &file); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Can't set device call fd"); + goto err_init_set_dev_fd; + } + + return 0; + +err_init_set_dev_fd: + event_notifier_set_handler(&svq->hdev_call, NULL); + +err_init_hdev_call: + event_notifier_cleanup(&svq->hdev_kick); + +err_init_hdev_kick: + return r; +} + +/** + * Unmap a SVQ area in the device + */ +static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr) +{ + const DMAMap needle = { + .translated_addr = addr, + }; + const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle); + hwaddr size; + int r; + + if (unlikely(!result)) { + error_report("Unable to find SVQ address to unmap"); + return; + } + + size = ROUND_UP(result->size, qemu_real_host_page_size); + r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size); + if (unlikely(r < 0)) { + error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r); + return; + } + + vhost_iova_tree_remove(v->iova_tree, *result); +} + +static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq) { struct vhost_vdpa *v = dev->opaque; + struct vhost_vring_addr svq_addr; + + vhost_svq_get_vring_addr(svq, &svq_addr); + + vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr); + + vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr); +} + +/** + * Map the SVQ area in the device + * + * @v: Vhost-vdpa device + * @needle: The area to search iova + * @errorp: Error pointer + */ +static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle, + Error **errp) +{ + int r; + + r = vhost_iova_tree_map_alloc(v->iova_tree, needle); + if (unlikely(r != IOVA_OK)) { + error_setg(errp, "Cannot allocate iova (%d)", r); + return false; + } + + r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova, + needle->size + 1, + (void *)(uintptr_t)needle->translated_addr, + needle->perm == IOMMU_RO); + if (unlikely(r != 0)) { + error_setg_errno(errp, -r, "Cannot map region to device"); + vhost_iova_tree_remove(v->iova_tree, *needle); + } + + return r == 0; +} + +/** + * Map the shadow virtqueue rings in the device + * + * @dev: The vhost device + * @svq: The shadow virtqueue + * @addr: Assigned IOVA addresses + * @errp: Error pointer + */ +static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev, + const VhostShadowVirtqueue *svq, + struct vhost_vring_addr *addr, + Error **errp) +{ + DMAMap device_region, driver_region; + struct vhost_vring_addr svq_addr; + struct vhost_vdpa *v = dev->opaque; + size_t device_size = vhost_svq_device_area_size(svq); + size_t driver_size = vhost_svq_driver_area_size(svq); + size_t avail_offset; + bool ok; + + ERRP_GUARD(); + vhost_svq_get_vring_addr(svq, &svq_addr); + + driver_region = (DMAMap) { + .translated_addr = svq_addr.desc_user_addr, + .size = driver_size - 1, + .perm = IOMMU_RO, + }; + ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp); + if (unlikely(!ok)) { + error_prepend(errp, "Cannot create vq driver region: "); + return false; + } + addr->desc_user_addr = driver_region.iova; + avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr; + addr->avail_user_addr = driver_region.iova + avail_offset; + + device_region = (DMAMap) { + .translated_addr = svq_addr.used_user_addr, + .size = device_size - 1, + .perm = IOMMU_RW, + }; + ok = vhost_vdpa_svq_map_ring(v, &device_region, errp); + if (unlikely(!ok)) { + error_prepend(errp, "Cannot create vq device region: "); + vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr); + } + addr->used_user_addr = device_region.iova; + + return ok; +} + +static bool vhost_vdpa_svq_setup(struct vhost_dev *dev, + VhostShadowVirtqueue *svq, unsigned idx, + Error **errp) +{ + uint16_t vq_index = dev->vq_index + idx; + struct vhost_vring_state s = { + .index = vq_index, + }; + int r; + + r = vhost_vdpa_set_dev_vring_base(dev, &s); + if (unlikely(r)) { + error_setg_errno(errp, -r, "Cannot set vring base"); + return false; + } + + r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp); + return r == 0; +} + +static bool vhost_vdpa_svqs_start(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + Error *err = NULL; + unsigned i; + + if (!v->shadow_vqs_enabled) { + return true; + } + + for (i = 0; i < v->shadow_vqs->len; ++i) { + VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i); + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + struct vhost_vring_addr addr = { + .index = dev->vq_index + i, + }; + int r; + bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err); + if (unlikely(!ok)) { + goto err; + } + + vhost_svq_start(svq, dev->vdev, vq, v->iova_tree); + ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err); + if (unlikely(!ok)) { + goto err_map; + } + + /* Override vring GPA set by vhost subsystem */ + r = vhost_vdpa_set_vring_dev_addr(dev, &addr); + if (unlikely(r != 0)) { + error_setg_errno(&err, -r, "Cannot set device address"); + goto err_set_addr; + } + } + + return true; + +err_set_addr: + vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i)); + +err_map: + vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i)); + +err: + error_reportf_err(err, "Cannot setup SVQ %u: ", i); + for (unsigned j = 0; j < i; ++j) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j); + vhost_vdpa_svq_unmap_rings(dev, svq); + vhost_svq_stop(svq); + } + + return false; +} + +static void vhost_vdpa_svqs_stop(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + + if (!v->shadow_vqs_enabled) { + return; + } + + for (unsigned i = 0; i < v->shadow_vqs->len; ++i) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i); + + vhost_svq_stop(svq); + vhost_vdpa_svq_unmap_rings(dev, svq); + + event_notifier_cleanup(&svq->hdev_kick); + event_notifier_cleanup(&svq->hdev_call); + } +} + +static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) +{ + bool ok; trace_vhost_vdpa_dev_start(dev, started); if (started) { vhost_vdpa_host_notifiers_init(dev); + ok = vhost_vdpa_svqs_start(dev); + if (unlikely(!ok)) { + return -1; + } vhost_vdpa_set_vring_ready(dev); } else { + vhost_vdpa_svqs_stop(dev); vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); } @@ -650,18 +1130,11 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) } if (started) { - uint8_t status = 0; - memory_listener_register(&v->listener, &address_space_memory); - vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); - vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &status); - - return !(status & VIRTIO_CONFIG_S_DRIVER_OK); + return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK); } else { vhost_vdpa_reset_device(dev); vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER); - memory_listener_unregister(&v->listener); - return 0; } } @@ -669,7 +1142,8 @@ static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started) static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, struct vhost_log *log) { - if (vhost_vdpa_one_time_request(dev)) { + struct vhost_vdpa *v = dev->opaque; + if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { return 0; } @@ -678,14 +1152,44 @@ static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base, return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base); } +static int vhost_vdpa_set_log_fd(struct vhost_dev *dev, int fd, + struct vhost_log *log) +{ + struct vhost_vdpa *v = dev->opaque; + if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { + return 0; + } + + return vhost_vdpa_call(dev, VHOST_SET_LOG_FD, &fd); +} + +static int vhost_vdpa_set_log_size(struct vhost_dev *dev, uint64_t size, + struct vhost_log *log) +{ + struct vhost_vdpa *v = dev->opaque; + uint64_t logsize = size * sizeof(*(log->log)); + + if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { + return 0; + } + + return vhost_vdpa_call(dev, VHOST_SET_LOG_SIZE, &logsize); +} + static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev, struct vhost_vring_addr *addr) { - trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags, - addr->desc_user_addr, addr->used_user_addr, - addr->avail_user_addr, - addr->log_guest_addr); - return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr); + struct vhost_vdpa *v = dev->opaque; + + if (v->shadow_vqs_enabled) { + /* + * Device vring addr was set at device start. SVQ base is handled by + * VirtQueue code. + */ + return 0; + } + + return vhost_vdpa_set_vring_dev_addr(dev, addr); } static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, @@ -698,15 +1202,41 @@ static int vhost_vdpa_set_vring_num(struct vhost_dev *dev, static int vhost_vdpa_set_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { - trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num); - return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring); + struct vhost_vdpa *v = dev->opaque; + VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index); + + /* + * vhost-vdpa devices does not support in-flight requests. Set all of them + * as available. + * + * TODO: This is ok for networking, but other kinds of devices might + * have problems with these retransmissions. + */ + while (virtqueue_rewind(vq, 1)) { + continue; + } + if (v->shadow_vqs_enabled) { + /* + * Device vring base was set at device start. SVQ base is handled by + * VirtQueue code. + */ + return 0; + } + + return vhost_vdpa_set_dev_vring_base(dev, ring); } static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, struct vhost_vring_state *ring) { + struct vhost_vdpa *v = dev->opaque; int ret; + if (v->shadow_vqs_enabled) { + ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index); + return 0; + } + ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring); trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num); return ret; @@ -715,30 +1245,51 @@ static int vhost_vdpa_get_vring_base(struct vhost_dev *dev, static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev, struct vhost_vring_file *file) { - trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd); - return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file); + struct vhost_vdpa *v = dev->opaque; + int vdpa_idx = file->index - dev->vq_index; + + if (v->shadow_vqs_enabled) { + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); + vhost_svq_set_svq_kick_fd(svq, file->fd); + return 0; + } else { + return vhost_vdpa_set_vring_dev_kick(dev, file); + } } static int vhost_vdpa_set_vring_call(struct vhost_dev *dev, struct vhost_vring_file *file) { - trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd); - return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file); + struct vhost_vdpa *v = dev->opaque; + + if (v->shadow_vqs_enabled) { + int vdpa_idx = file->index - dev->vq_index; + VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx); + + vhost_svq_set_svq_call_fd(svq, file->fd); + return 0; + } else { + return vhost_vdpa_set_vring_dev_call(dev, file); + } } static int vhost_vdpa_get_features(struct vhost_dev *dev, uint64_t *features) { - int ret; + struct vhost_vdpa *v = dev->opaque; + int ret = vhost_vdpa_get_dev_features(dev, features); + + if (ret == 0 && v->shadow_vqs_enabled) { + /* Add SVQ logging capabilities */ + *features |= BIT_ULL(VHOST_F_LOG_ALL); + } - ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features); - trace_vhost_vdpa_get_features(dev, *features); return ret; } static int vhost_vdpa_set_owner(struct vhost_dev *dev) { - if (vhost_vdpa_one_time_request(dev)) { + if (!vhost_vdpa_first_dev(dev)) { return 0; } @@ -763,11 +1314,66 @@ static bool vhost_vdpa_force_iommu(struct vhost_dev *dev) return true; } +static void vhost_vdpa_set_used_memslots(struct vhost_dev *dev) +{ + vhost_vdpa_used_memslots = dev->mem->nregions; +} + +static unsigned int vhost_vdpa_get_used_memslots(void) +{ + return vhost_vdpa_used_memslots; +} + +static int vhost_vdpa_suspend_device(struct vhost_dev *dev) +{ + int ret; + + vhost_vdpa_svqs_stop(dev); + vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs); + + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + ret = vhost_vdpa_call(dev, VHOST_VDPA_SUSPEND, NULL); + return ret; +} + +static int vhost_vdpa_resume_device(struct vhost_dev *dev) +{ + bool ok; + + vhost_vdpa_host_notifiers_init(dev); + ok = vhost_vdpa_svqs_start(dev); + if (unlikely(!ok)) { + return -1; + } + vhost_vdpa_set_vring_ready(dev); + + if (dev->vq_index + dev->nvqs != dev->vq_index_end) { + return 0; + } + + return vhost_vdpa_call(dev, VHOST_VDPA_RESUME, NULL); +} + +static int vhost_vdpa_log_sync(struct vhost_dev *dev) +{ + struct vhost_vdpa *v = dev->opaque; + if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) { + return 0; + } + + return vhost_vdpa_call(dev, VHOST_LOG_SYNC, NULL); +} + const VhostOps vdpa_ops = { .backend_type = VHOST_BACKEND_TYPE_VDPA, .vhost_backend_init = vhost_vdpa_init, .vhost_backend_cleanup = vhost_vdpa_cleanup, .vhost_set_log_base = vhost_vdpa_set_log_base, + .vhost_set_log_size = vhost_vdpa_set_log_size, + .vhost_set_log_fd = vhost_vdpa_set_log_fd, .vhost_set_vring_addr = vhost_vdpa_set_vring_addr, .vhost_set_vring_num = vhost_vdpa_set_vring_num, .vhost_set_vring_base = vhost_vdpa_set_vring_base, @@ -795,4 +1401,11 @@ const VhostOps vdpa_ops = { .vhost_get_device_id = vhost_vdpa_get_device_id, .vhost_vq_get_addr = vhost_vdpa_vq_get_addr, .vhost_force_iommu = vhost_vdpa_force_iommu, + .vhost_log_sync = vhost_vdpa_log_sync, + .vhost_set_config_call = vhost_vdpa_set_config_call, + .vhost_set_used_memslots = vhost_vdpa_set_used_memslots, + .vhost_get_used_memslots = vhost_vdpa_get_used_memslots, + .vhost_dev_suspend = vhost_vdpa_suspend_device, + .vhost_dev_resume = vhost_vdpa_resume_device, + }; diff --git a/hw/virtio/vhost-vsock-common.c b/hw/virtio/vhost-vsock-common.c index 3f3771274e772ef6f086c87184ebb630a8d590d7..b9cf5f3f297b7267ae400061e891adcd82ae337f 100644 --- a/hw/virtio/vhost-vsock-common.c +++ b/hw/virtio/vhost-vsock-common.c @@ -68,7 +68,7 @@ int vhost_vsock_common_start(VirtIODevice *vdev) } vvc->vhost_dev.acked_features = vdev->guest_features; - ret = vhost_dev_start(&vvc->vhost_dev, vdev); + ret = vhost_dev_start(&vvc->vhost_dev, vdev, true); if (ret < 0) { error_report("Error starting vhost: %d", -ret); goto err_guest_notifiers; @@ -103,7 +103,7 @@ void vhost_vsock_common_stop(VirtIODevice *vdev) return; } - vhost_dev_stop(&vvc->vhost_dev, vdev); + vhost_dev_stop(&vvc->vhost_dev, vdev, true); ret = k->set_guest_notifiers(qbus->parent, vvc->vhost_dev.nvqs, false); if (ret < 0) { @@ -125,6 +125,15 @@ static void vhost_vsock_common_guest_notifier_mask(VirtIODevice *vdev, int idx, { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } vhost_virtqueue_mask(&vvc->vhost_dev, vdev, idx, mask); } @@ -133,6 +142,15 @@ static bool vhost_vsock_common_guest_notifier_pending(VirtIODevice *vdev, { VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } return vhost_virtqueue_pending(&vvc->vhost_dev, idx); } @@ -153,19 +171,23 @@ static void vhost_vsock_common_send_transport_reset(VHostVSockCommon *vvc) if (elem->out_num) { error_report("invalid vhost-vsock event virtqueue element with " "out buffers"); - goto out; + goto err; } if (iov_from_buf(elem->in_sg, elem->in_num, 0, &event, sizeof(event)) != sizeof(event)) { error_report("vhost-vsock event virtqueue element is too short"); - goto out; + goto err; } virtqueue_push(vq, elem, sizeof(event)); virtio_notify(VIRTIO_DEVICE(vvc), vq); -out: + g_free(elem); + return; + +err: + virtqueue_detach_element(vq, elem, 0); g_free(elem); } @@ -255,6 +277,12 @@ void vhost_vsock_common_unrealize(VirtIODevice *vdev) virtio_cleanup(vdev); } +static struct vhost_dev *vhost_vsock_common_get_vhost(VirtIODevice *vdev) +{ + VHostVSockCommon *vvc = VHOST_VSOCK_COMMON(vdev); + return &vvc->vhost_dev; +} + static Property vhost_vsock_common_properties[] = { DEFINE_PROP_ON_OFF_AUTO("seqpacket", VHostVSockCommon, seqpacket, ON_OFF_AUTO_AUTO), @@ -270,6 +298,7 @@ static void vhost_vsock_common_class_init(ObjectClass *klass, void *data) set_bit(DEVICE_CATEGORY_MISC, dc->categories); vdc->guest_notifier_mask = vhost_vsock_common_guest_notifier_mask; vdc->guest_notifier_pending = vhost_vsock_common_guest_notifier_pending; + vdc->get_vhost = vhost_vsock_common_get_vhost; } static const TypeInfo vhost_vsock_common_info = { diff --git a/hw/virtio/vhost-vsock.c b/hw/virtio/vhost-vsock.c index 478c0c9a8789c58cfee48149a6f15c686d30f9cb..433d42d897dfb566f50e756cfb6b0642fe9c16db 100644 --- a/hw/virtio/vhost-vsock.c +++ b/hw/virtio/vhost-vsock.c @@ -171,6 +171,10 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) ret = vhost_dev_init(&vvc->vhost_dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0, errp); if (ret < 0) { + /* + * vhostfd is closed by vhost_dev_cleanup, which is called + * by vhost_dev_init on initialization error. + */ goto err_virtio; } @@ -183,15 +187,10 @@ static void vhost_vsock_device_realize(DeviceState *dev, Error **errp) return; err_vhost_dev: - vhost_dev_cleanup(&vvc->vhost_dev); /* vhost_dev_cleanup() closes the vhostfd passed to vhost_dev_init() */ - vhostfd = -1; + vhost_dev_cleanup(&vvc->vhost_dev); err_virtio: vhost_vsock_common_unrealize(vdev); - if (vhostfd >= 0) { - close(vhostfd); - } - return; } static void vhost_vsock_device_unrealize(DeviceState *dev) diff --git a/hw/virtio/vhost.c b/hw/virtio/vhost.c index 437347ad01c99ec2f97f989a66a625043ffc8274..91b00194690b5712c58e172e7e19102a0a250a70 100644 --- a/hw/virtio/vhost.c +++ b/hw/virtio/vhost.c @@ -19,45 +19,63 @@ #include "qemu/atomic.h" #include "qemu/range.h" #include "qemu/error-report.h" +#include "cpu.h" #include "qemu/memfd.h" +#include "qemu/log.h" #include "standard-headers/linux/vhost_types.h" +#include "exec/ram_addr.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "migration/blocker.h" #include "migration/qemu-file-types.h" +#include "migration/migration.h" #include "sysemu/dma.h" #include "sysemu/tcg.h" #include "trace.h" +#include "qapi/qapi-commands-migration.h" /* enabled until disconnected backend stabilizes */ #define _VHOST_DEBUG 1 #ifdef _VHOST_DEBUG -#define VHOST_OPS_DEBUG(fmt, ...) \ - do { error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ - strerror(errno), errno); } while (0) +#define VHOST_OPS_DEBUG(retval, fmt, ...) \ + do { \ + error_report(fmt ": %s (%d)", ## __VA_ARGS__, \ + strerror(-retval), -retval); \ + } while (0) #else -#define VHOST_OPS_DEBUG(fmt, ...) \ +#define VHOST_OPS_DEBUG(retval, fmt, ...) \ do { } while (0) #endif +static inline bool vhost_bytemap_log_support(struct vhost_dev *dev) +{ + return (dev->backend_cap & BIT_ULL(VHOST_BACKEND_F_BYTEMAPLOG)); +} + static struct vhost_log *vhost_log; static struct vhost_log *vhost_log_shm; -static unsigned int used_memslots; static QLIST_HEAD(, vhost_dev) vhost_devices = QLIST_HEAD_INITIALIZER(vhost_devices); +bool used_memslots_exceeded; + bool vhost_has_free_slot(void) { - unsigned int slots_limit = ~0U; struct vhost_dev *hdev; QLIST_FOREACH(hdev, &vhost_devices, entry) { - unsigned int r = hdev->vhost_ops->vhost_backend_memslots_limit(hdev); - slots_limit = MIN(slots_limit, r); + if (!hdev->vhost_ops->vhost_get_used_memslots || + !hdev->vhost_ops->vhost_backend_memslots_limit) { + continue; + } + if (hdev->vhost_ops->vhost_get_used_memslots() >= + hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { + return false; + } } - return slots_limit > used_memslots; + return true; } static void vhost_dev_sync_region(struct vhost_dev *dev, @@ -105,6 +123,24 @@ static void vhost_dev_sync_region(struct vhost_dev *dev, } } +static bool vhost_dev_has_iommu(struct vhost_dev *dev) +{ + VirtIODevice *vdev = dev->vdev; + + /* + * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support + * incremental memory mapping API via IOTLB API. For platform that + * does not have IOMMU, there's no need to enable this feature + * which may cause unnecessary IOTLB miss/update transactions. + */ + if (vdev) { + return virtio_bus_device_iommu_enabled(vdev) && + virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); + } else { + return false; + } +} + static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, MemoryRegionSection *section, hwaddr first, @@ -136,18 +172,142 @@ static int vhost_sync_dirty_bitmap(struct vhost_dev *dev, continue; } - vhost_dev_sync_region(dev, section, start_addr, end_addr, vq->used_phys, - range_get_last(vq->used_phys, vq->used_size)); + if (vhost_dev_has_iommu(dev)) { + IOMMUTLBEntry iotlb; + hwaddr used_phys = vq->used_phys, used_size = vq->used_size; + hwaddr phys, s, offset; + + while (used_size) { + rcu_read_lock(); + iotlb = address_space_get_iotlb_entry(dev->vdev->dma_as, + used_phys, + true, + MEMTXATTRS_UNSPECIFIED); + rcu_read_unlock(); + + if (!iotlb.target_as) { + qemu_log_mask(LOG_GUEST_ERROR, "translation " + "failure for used_iova %"PRIx64"\n", + used_phys); + return -EINVAL; + } + + offset = used_phys & iotlb.addr_mask; + phys = iotlb.translated_addr + offset; + + /* + * Distance from start of used ring until last byte of + * IOMMU page. + */ + s = iotlb.addr_mask - offset; + /* + * Size of used ring, or of the part of it until end + * of IOMMU page. To avoid zero result, do the adding + * outside of MIN(). + */ + s = MIN(s, used_size - 1) + 1; + + vhost_dev_sync_region(dev, section, start_addr, end_addr, phys, + range_get_last(phys, s)); + used_size -= s; + used_phys += s; + } + } else { + vhost_dev_sync_region(dev, section, start_addr, + end_addr, vq->used_phys, + range_get_last(vq->used_phys, vq->used_size)); + } } return 0; } +#define BYTES_PER_LONG (sizeof(unsigned long)) +#define BYTE_WORD(nr) ((nr) / BYTES_PER_LONG) +#define BYTES_TO_LONGS(nr) DIV_ROUND_UP(nr, BYTES_PER_LONG) + +static inline int64_t _set_dirty_bytemap_atomic(unsigned long *bytemap, unsigned long cur_pfn) +{ + char *byte_of_long = (char *)bytemap; + int i; + int64_t dirty_num = 0; + + for (i = 0; i < BYTES_PER_LONG; i++) { + if (byte_of_long[i]) { + cpu_physical_memory_set_dirty_range((cur_pfn + i) << TARGET_PAGE_BITS, + TARGET_PAGE_SIZE, + 1 << DIRTY_MEMORY_MIGRATION); + /* Per byte ops, no need to atomic_xchg */ + byte_of_long[i] = 0; + dirty_num++; + } + } + + return dirty_num; +} + +static inline int64_t cpu_physical_memory_set_dirty_bytemap(unsigned long *bytemap, + ram_addr_t start, + ram_addr_t pages) +{ + unsigned long i; + unsigned long len = BYTES_TO_LONGS(pages); + unsigned long pfn = (start >> TARGET_PAGE_BITS) / + BYTES_PER_LONG * BYTES_PER_LONG; + int64_t dirty_mig_bits = 0; + + for (i = 0; i < len; i++) { + if (bytemap[i]) { + dirty_mig_bits += _set_dirty_bytemap_atomic(&bytemap[i], + pfn + BYTES_PER_LONG * i); + } + } + + return dirty_mig_bits; +} + +static int vhost_sync_dirty_bytemap(struct vhost_dev *dev, + MemoryRegionSection *section) +{ + struct vhost_log *log = dev->log; + + ram_addr_t start = section->offset_within_region + + memory_region_get_ram_addr(section->mr); + ram_addr_t pages = int128_get64(section->size) >> TARGET_PAGE_BITS; + + hwaddr idx = BYTE_WORD( + section->offset_within_address_space >> TARGET_PAGE_BITS); + + return cpu_physical_memory_set_dirty_bytemap((unsigned long *)log->log + idx, + start, pages); +} + static void vhost_log_sync(MemoryListener *listener, MemoryRegionSection *section) { struct vhost_dev *dev = container_of(listener, struct vhost_dev, memory_listener); - vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); + MigrationState *ms = migrate_get_current(); + + if (!dev->log_enabled || !dev->log) { + return; + } + + if (dev->vhost_ops->vhost_log_sync) { + int r = dev->vhost_ops->vhost_log_sync(dev); + if (r < 0) { + error_report("Failed to sync dirty log: 0x%x\n", r); + if (migration_is_running(ms->state)) { + qmp_migrate_cancel(NULL); + } + return; + } + } + + if (vhost_bytemap_log_support(dev)) { + vhost_sync_dirty_bytemap(dev, section); + } else { + vhost_sync_dirty_bitmap(dev, section, 0x0, ~0x0ULL); + } } static void vhost_log_sync_range(struct vhost_dev *dev, @@ -157,7 +317,11 @@ static void vhost_log_sync_range(struct vhost_dev *dev, /* FIXME: this is N^2 in number of sections */ for (i = 0; i < dev->n_mem_sections; ++i) { MemoryRegionSection *section = &dev->mem_sections[i]; - vhost_sync_dirty_bitmap(dev, section, first, last); + if (vhost_bytemap_log_support(dev)) { + vhost_sync_dirty_bytemap(dev, section); + } else { + vhost_sync_dirty_bitmap(dev, section, first, last); + } } } @@ -165,11 +329,19 @@ static uint64_t vhost_get_log_size(struct vhost_dev *dev) { uint64_t log_size = 0; int i; + uint64_t vhost_log_chunk_size; + + if (vhost_bytemap_log_support(dev)) { + vhost_log_chunk_size = VHOST_LOG_CHUNK_BYTES; + } else { + vhost_log_chunk_size = VHOST_LOG_CHUNK; + } + for (i = 0; i < dev->mem->nregions; ++i) { struct vhost_memory_region *reg = dev->mem->regions + i; uint64_t last = range_get_last(reg->guest_phys_addr, reg->memory_size); - log_size = MAX(log_size, last / VHOST_LOG_CHUNK + 1); + log_size = MAX(log_size, last / vhost_log_chunk_size + 1); } return log_size; } @@ -287,36 +459,41 @@ static bool vhost_dev_log_is_shared(struct vhost_dev *dev) dev->vhost_ops->vhost_requires_shm_log(dev); } -static inline void vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) +static inline int vhost_dev_log_resize(struct vhost_dev *dev, uint64_t size) { struct vhost_log *log = vhost_log_get(size, vhost_dev_log_is_shared(dev)); - uint64_t log_base = (uintptr_t)log->log; + uint64_t log_base; + int log_fd; int r; + if (!log) { + r = -ENOMEM; + goto out; + } + + log_base = (uint64_t)log->log; + log_fd = log_fd; + /* inform backend of log switching, this must be done before releasing the current log, to ensure no logging is lost */ r = dev->vhost_ops->vhost_set_log_base(dev, log_base, log); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_log_base failed"); + VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); + } + + if (dev->vhost_ops->vhost_set_log_size) { + r = dev->vhost_ops->vhost_set_log_size(dev, size, dev->log); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost_set_log_size failed"); + } } vhost_log_put(dev, true); dev->log = log; dev->log_size = size; -} - -static int vhost_dev_has_iommu(struct vhost_dev *dev) -{ - VirtIODevice *vdev = dev->vdev; - /* - * For vhost, VIRTIO_F_IOMMU_PLATFORM means the backend support - * incremental memory mapping API via IOTLB API. For platform that - * does not have IOMMU, there's no need to enable this feature - * which may cause unnecessary IOTLB miss/update trnasactions. - */ - return virtio_bus_device_iommu_enabled(vdev) && - virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM); +out: + return r; } static void *vhost_memory_map(struct vhost_dev *dev, hwaddr addr, @@ -520,7 +697,6 @@ static void vhost_commit(MemoryListener *listener) dev->n_mem_sections * sizeof dev->mem->regions[0]; dev->mem = g_realloc(dev->mem, regions_size); dev->mem->nregions = dev->n_mem_sections; - used_memslots = dev->mem->nregions; for (i = 0; i < dev->n_mem_sections; i++) { struct vhost_memory_region *cur_vmr = dev->mem->regions + i; struct MemoryRegionSection *mrs = dev->mem_sections + i; @@ -550,7 +726,7 @@ static void vhost_commit(MemoryListener *listener) if (!dev->log_enabled) { r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); } goto out; } @@ -564,7 +740,7 @@ static void vhost_commit(MemoryListener *listener) } r = dev->vhost_ops->vhost_set_mem_table(dev, dev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); } /* To log less, can only decrease log size after table update. */ if (dev->log_size > log_size + VHOST_LOG_BUFFER) { @@ -696,6 +872,9 @@ static void vhost_region_add_section(struct vhost_dev *dev, dev->tmp_sections[dev->n_tmp_sections - 1].fv = NULL; memory_region_ref(section->mr); } + if (dev->vhost_ops->vhost_set_used_memslots) { + dev->vhost_ops->vhost_set_used_memslots(dev); + } } /* Used for both add and nop callbacks */ @@ -711,6 +890,19 @@ static void vhost_region_addnop(MemoryListener *listener, vhost_region_add_section(dev, section); } +static void vhost_region_del(MemoryListener *listener, + MemoryRegionSection *section) +{ + struct vhost_dev *dev = container_of(listener, struct vhost_dev, + memory_listener); + if (!vhost_section(dev, section)) { + return; + } + if (dev->vhost_ops->vhost_set_used_memslots) { + dev->vhost_ops->vhost_set_used_memslots(dev); + } +} + static void vhost_iommu_unmap_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb) { struct vhost_iommu *iommu = container_of(n, struct vhost_iommu, n); @@ -803,8 +995,8 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, if (dev->vhost_ops->vhost_vq_get_addr) { r = dev->vhost_ops->vhost_vq_get_addr(dev, &addr, vq); if (r < 0) { - VHOST_OPS_DEBUG("vhost_vq_get_addr failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_vq_get_addr failed"); + return r; } } else { addr.desc_user_addr = (uint64_t)(unsigned long)vq->desc; @@ -816,10 +1008,9 @@ static int vhost_virtqueue_set_addr(struct vhost_dev *dev, addr.flags = enable_log ? (1 << VHOST_VRING_F_LOG) : 0; r = dev->vhost_ops->vhost_set_vring_addr(dev, &addr); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_vring_addr failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_addr failed"); } - return 0; + return r; } static int vhost_dev_set_features(struct vhost_dev *dev, @@ -840,19 +1031,19 @@ static int vhost_dev_set_features(struct vhost_dev *dev, } r = dev->vhost_ops->vhost_set_features(dev, features); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_features failed"); + VHOST_OPS_DEBUG(r, "vhost_set_features failed"); goto out; } if (dev->vhost_ops->vhost_set_backend_cap) { r = dev->vhost_ops->vhost_set_backend_cap(dev); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_backend_cap failed"); + VHOST_OPS_DEBUG(r, "vhost_set_backend_cap failed"); goto out; } } out: - return r < 0 ? -errno : 0; + return r; } static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) @@ -886,6 +1077,10 @@ static int vhost_dev_set_log(struct vhost_dev *dev, bool enable_log) err_vq: for (; i >= 0; --i) { idx = dev->vhost_ops->vhost_get_vq_index(dev, dev->vq_index + i); + addr = virtio_queue_get_desc_addr(dev->vdev, idx); + if (!addr) { + continue; + } vhost_virtqueue_set_addr(dev, dev->vqs + i, idx, dev->log_enabled); } @@ -915,7 +1110,11 @@ static int vhost_migration_log(MemoryListener *listener, bool enable) } vhost_log_put(dev, false); } else { - vhost_dev_log_resize(dev, vhost_get_log_size(dev)); + r = vhost_dev_log_resize(dev, vhost_get_log_size(dev)); + if ( r < 0 ) { + return r; + } + r = vhost_dev_set_log(dev, true); if (r < 0) { goto check_dev_state; @@ -947,20 +1146,24 @@ check_dev_state: static void vhost_log_global_start(MemoryListener *listener) { int r; + Error *errp = NULL; r = vhost_migration_log(listener, true); if (r < 0) { - abort(); + error_setg(&errp, "Failed to start vhost migration log"); + migrate_fd_error(migrate_get_current(), errp); } } static void vhost_log_global_stop(MemoryListener *listener) { int r; + Error *errp = NULL; r = vhost_migration_log(listener, false); if (r < 0) { - abort(); + error_setg(&errp, "Failed to stop vhost migration log"); + migrate_fd_error(migrate_get_current(), errp); } } @@ -999,22 +1202,17 @@ static int vhost_virtqueue_set_vring_endian_legacy(struct vhost_dev *dev, bool is_big_endian, int vhost_vq_index) { + int r; struct vhost_vring_state s = { .index = vhost_vq_index, .num = is_big_endian }; - if (!dev->vhost_ops->vhost_set_vring_endian(dev, &s)) { - return 0; - } - - VHOST_OPS_DEBUG("vhost_set_vring_endian failed"); - if (errno == ENOTTY) { - error_report("vhost does not support cross-endian"); - return -ENOSYS; + r = dev->vhost_ops->vhost_set_vring_endian(dev, &s); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost_set_vring_endian failed"); } - - return -errno; + return r; } static int vhost_memory_region_lookup(struct vhost_dev *hdev, @@ -1106,15 +1304,15 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, vq->num = state.num = virtio_queue_get_num(vdev, idx); r = dev->vhost_ops->vhost_set_vring_num(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_num failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_num failed"); + return r; } state.num = virtio_queue_get_last_avail_idx(vdev, idx); r = dev->vhost_ops->vhost_set_vring_base(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_base failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_base failed"); + return r; } if (vhost_needs_vring_endian(vdev)) { @@ -1122,7 +1320,7 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, virtio_is_big_endian(vdev), vhost_vq_index); if (r) { - return -errno; + return r; } } @@ -1150,15 +1348,13 @@ static int vhost_virtqueue_start(struct vhost_dev *dev, r = vhost_virtqueue_set_addr(dev, vq, vhost_vq_index, dev->log_enabled); if (r < 0) { - r = -errno; goto fail_alloc; } file.fd = event_notifier_get_fd(virtio_queue_get_host_notifier(vvq)); r = dev->vhost_ops->vhost_set_vring_kick(dev, &file); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_kick failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_kick failed"); goto fail_kick; } @@ -1218,7 +1414,7 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev, r = dev->vhost_ops->vhost_get_vring_base(dev, &state); if (r < 0) { - VHOST_OPS_DEBUG("vhost VQ %u ring restore failed: %d", idx, r); + VHOST_OPS_DEBUG(r, "vhost VQ %u ring restore failed: %d", idx, r); /* Connection to the backend is broken, so let's sync internal * last avail idx to the device used idx. */ @@ -1246,18 +1442,6 @@ static void vhost_virtqueue_stop(struct vhost_dev *dev, 0, virtio_queue_get_desc_size(vdev, idx)); } -static void vhost_eventfd_add(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - -static void vhost_eventfd_del(MemoryListener *listener, - MemoryRegionSection *section, - bool match_data, uint64_t data, EventNotifier *e) -{ -} - static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, int n, uint32_t timeout) { @@ -1274,7 +1458,7 @@ static int vhost_virtqueue_set_busyloop_timeout(struct vhost_dev *dev, r = dev->vhost_ops->vhost_set_vring_busyloop_timeout(dev, &state); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_busyloop_timeout failed"); + VHOST_OPS_DEBUG(r, "vhost_set_vring_busyloop_timeout failed"); return r; } @@ -1296,8 +1480,7 @@ static int vhost_virtqueue_init(struct vhost_dev *dev, file.fd = event_notifier_get_fd(&vq->masked_notifier); r = dev->vhost_ops->vhost_set_vring_call(dev, &file); if (r) { - VHOST_OPS_DEBUG("vhost_set_vring_call failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_vring_call failed"); goto fail_call; } @@ -1314,6 +1497,26 @@ static void vhost_virtqueue_cleanup(struct vhost_virtqueue *vq) event_notifier_cleanup(&vq->masked_notifier); } +static bool vhost_dev_used_memslots_is_exceeded(struct vhost_dev *hdev) +{ + if (!hdev->vhost_ops->vhost_get_used_memslots || + !hdev->vhost_ops->vhost_backend_memslots_limit) { + goto out; + } + + if (hdev->vhost_ops->vhost_get_used_memslots() > + hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { + error_report("vhost backend memory slots limit is less" + " than current number of present memory slots"); + used_memslots_exceeded = true; + return true; + } + +out: + used_memslots_exceeded = false; + return false; +} + int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout, Error **errp) @@ -1369,6 +1572,7 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, .name = "vhost", .begin = vhost_begin, .commit = vhost_commit, + .region_del = vhost_region_del, .region_add = vhost_region_addnop, .region_nop = vhost_region_addnop, .log_start = vhost_log_start, @@ -1376,8 +1580,6 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, .log_sync = vhost_log_sync, .log_global_start = vhost_log_global_start, .log_global_stop = vhost_log_global_stop, - .eventfd_add = vhost_eventfd_add, - .eventfd_del = vhost_eventfd_del, .priority = 10 }; @@ -1415,9 +1617,13 @@ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, memory_listener_register(&hdev->memory_listener, &address_space_memory); QLIST_INSERT_HEAD(&vhost_devices, hdev, entry); - if (used_memslots > hdev->vhost_ops->vhost_backend_memslots_limit(hdev)) { - error_setg(errp, "vhost backend memory slots limit is less" - " than current number of present memory slots"); + /* + * If we started a VM without any vhost device, + * vhost_dev_used_memslots_is_exceeded will always return false for the + * first time vhost device hot-plug(vhost_get_used_memslots is always 0), + * so it needs to double check here + */ + if (vhost_dev_used_memslots_is_exceeded(hdev)) { r = -EINVAL; goto fail_busyloop; } @@ -1440,6 +1646,8 @@ void vhost_dev_cleanup(struct vhost_dev *hdev) { int i; + trace_vhost_dev_cleanup(hdev); + for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_cleanup(hdev->vqs + i); } @@ -1557,7 +1765,68 @@ void vhost_virtqueue_mask(struct vhost_dev *hdev, VirtIODevice *vdev, int n, file.index = hdev->vhost_ops->vhost_get_vq_index(hdev, n); r = hdev->vhost_ops->vhost_set_vring_call(hdev, &file); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_vring_call failed"); + error_report("vhost_set_vring_call failed %d", -r); + } +} + +bool vhost_config_pending(struct vhost_dev *hdev) +{ + assert(hdev->vhost_ops); + if ((hdev->started == false) || + (hdev->vhost_ops->vhost_set_config_call == NULL)) { + return false; + } + + EventNotifier *notifier = + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; + return event_notifier_test_and_clear(notifier); +} + +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask) +{ + int fd; + int r; + EventNotifier *notifier = + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; + EventNotifier *config_notifier = &vdev->config_notifier; + assert(hdev->vhost_ops); + + if ((hdev->started == false) || + (hdev->vhost_ops->vhost_set_config_call == NULL)) { + return; + } + if (mask) { + assert(vdev->use_guest_notifier_mask); + fd = event_notifier_get_fd(notifier); + } else { + fd = event_notifier_get_fd(config_notifier); + } + r = hdev->vhost_ops->vhost_set_config_call(hdev, fd); + if (r < 0) { + error_report("vhost_set_config_call failed %d", -r); + } +} + +static void vhost_stop_config_intr(struct vhost_dev *dev) +{ + int fd = -1; + assert(dev->vhost_ops); + if (dev->vhost_ops->vhost_set_config_call) { + dev->vhost_ops->vhost_set_config_call(dev, fd); + } +} + +static void vhost_start_config_intr(struct vhost_dev *dev) +{ + int r; + + assert(dev->vhost_ops); + int fd = event_notifier_get_fd(&dev->vdev->config_notifier); + if (dev->vhost_ops->vhost_set_config_call) { + r = dev->vhost_ops->vhost_set_config_call(dev, fd); + if (!r) { + event_notifier_set(&dev->vdev->config_notifier); + } } } @@ -1599,7 +1868,7 @@ int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, } error_setg(errp, "vhost_get_config not implemented"); - return -ENOTSUP; + return -ENOSYS; } int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, @@ -1612,7 +1881,7 @@ int vhost_dev_set_config(struct vhost_dev *hdev, const uint8_t *data, size, flags); } - return -1; + return -ENOSYS; } void vhost_dev_set_config_notifier(struct vhost_dev *hdev, @@ -1641,7 +1910,7 @@ static int vhost_dev_resize_inflight(struct vhost_inflight *inflight, if (err) { error_report_err(err); - return -1; + return -ENOMEM; } vhost_dev_free_inflight(inflight); @@ -1674,8 +1943,9 @@ int vhost_dev_load_inflight(struct vhost_inflight *inflight, QEMUFile *f) } if (inflight->size != size) { - if (vhost_dev_resize_inflight(inflight, size)) { - return -1; + int ret = vhost_dev_resize_inflight(inflight, size); + if (ret < 0) { + return ret; } } inflight->queue_size = qemu_get_be16(f); @@ -1698,7 +1968,7 @@ int vhost_dev_prepare_inflight(struct vhost_dev *hdev, VirtIODevice *vdev) r = vhost_dev_set_features(hdev, hdev->log_enabled); if (r < 0) { - VHOST_OPS_DEBUG("vhost_dev_prepare_inflight failed"); + VHOST_OPS_DEBUG(r, "vhost_dev_prepare_inflight failed"); return r; } @@ -1713,8 +1983,8 @@ int vhost_dev_set_inflight(struct vhost_dev *dev, if (dev->vhost_ops->vhost_set_inflight_fd && inflight->addr) { r = dev->vhost_ops->vhost_set_inflight_fd(dev, inflight); if (r) { - VHOST_OPS_DEBUG("vhost_set_inflight_fd failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_set_inflight_fd failed"); + return r; } } @@ -1729,22 +1999,46 @@ int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, if (dev->vhost_ops->vhost_get_inflight_fd) { r = dev->vhost_ops->vhost_get_inflight_fd(dev, queue_size, inflight); if (r) { - VHOST_OPS_DEBUG("vhost_get_inflight_fd failed"); - return -errno; + VHOST_OPS_DEBUG(r, "vhost_get_inflight_fd failed"); + return r; } } return 0; } +static int vhost_dev_set_vring_enable(struct vhost_dev *hdev, int enable) +{ + if (!hdev->vhost_ops->vhost_set_vring_enable) { + return 0; + } + + /* + * For vhost-user devices, if VHOST_USER_F_PROTOCOL_FEATURES has not + * been negotiated, the rings start directly in the enabled state, and + * .vhost_set_vring_enable callback will fail since + * VHOST_USER_SET_VRING_ENABLE is not supported. + */ + if (hdev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_USER && + !virtio_has_feature(hdev->backend_features, + VHOST_USER_F_PROTOCOL_FEATURES)) { + return 0; + } + + return hdev->vhost_ops->vhost_set_vring_enable(hdev, enable); +} + /* Host notifiers must be enabled at this point. */ -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i, r; /* should only be called after backend is connected */ assert(hdev->vhost_ops); + trace_vhost_dev_start(hdev, vdev->name, vrings); + + vdev->vhost_started = true; hdev->started = true; hdev->vdev = vdev; @@ -1759,8 +2053,7 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_mem_table failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); goto fail_mem; } for (i = 0; i < hdev->nvqs; ++i) { @@ -1773,6 +2066,17 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) } } + r = event_notifier_init( + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier, 0); + if (r < 0) { + VHOST_OPS_DEBUG(r, "event_notifier_init failed"); + goto fail_vq; + } + event_notifier_test_and_clear( + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); + if (!vdev->use_guest_notifier_mask) { + vhost_config_mask(hdev, vdev, true); + } if (hdev->log_enabled) { uint64_t log_base; @@ -1784,15 +2088,28 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) hdev->log_size ? log_base : 0, hdev->log); if (r < 0) { - VHOST_OPS_DEBUG("vhost_set_log_base failed"); - r = -errno; + VHOST_OPS_DEBUG(r, "vhost_set_log_base failed"); + goto fail_log; + } + + if (hdev->vhost_ops->vhost_set_log_size) { + r = hdev->vhost_ops->vhost_set_log_size(hdev, hdev->log_size, hdev->log); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost_set_log_size failed"); + goto fail_log; + } + } + } + if (vrings) { + r = vhost_dev_set_vring_enable(hdev, true); + if (r) { goto fail_log; } } if (hdev->vhost_ops->vhost_dev_start) { r = hdev->vhost_ops->vhost_dev_start(hdev, true); if (r) { - goto fail_log; + goto fail_start; } } if (vhost_dev_has_iommu(hdev) && @@ -1806,7 +2123,12 @@ int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) vhost_device_iotlb_miss(hdev, vq->used_phys, true); } } + vhost_start_config_intr(hdev); return 0; +fail_start: + if (vrings) { + vhost_dev_set_vring_enable(hdev, false); + } fail_log: vhost_log_put(hdev, false); fail_vq: @@ -1819,22 +2141,32 @@ fail_vq: fail_mem: fail_features: - + vdev->vhost_started = false; hdev->started = false; return r; } /* Host notifiers must be enabled at this point. */ -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) +void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) { int i; /* should only be called after backend is connected */ assert(hdev->vhost_ops); + event_notifier_test_and_clear( + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); + event_notifier_test_and_clear(&vdev->config_notifier); + event_notifier_cleanup( + &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier); + + trace_vhost_dev_stop(hdev, vdev->name, vrings); if (hdev->vhost_ops->vhost_dev_start) { hdev->vhost_ops->vhost_dev_start(hdev, false); } + if (vrings) { + vhost_dev_set_vring_enable(hdev, false); + } for (i = 0; i < hdev->nvqs; ++i) { vhost_virtqueue_stop(hdev, vdev, @@ -1848,8 +2180,10 @@ void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev) } memory_listener_unregister(&hdev->iommu_listener); } + vhost_stop_config_intr(hdev); vhost_log_put(hdev, true); hdev->started = false; + vdev->vhost_started = false; hdev->vdev = NULL; } @@ -1860,5 +2194,147 @@ int vhost_net_set_backend(struct vhost_dev *hdev, return hdev->vhost_ops->vhost_net_set_backend(hdev, file); } - return -1; + return -ENOSYS; +} + +bool used_memslots_is_exceeded(void) +{ + return used_memslots_exceeded; +} + +int vhost_dev_resume(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) +{ + int i, r; + EventNotifier *e = &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; + + /* should only be called after backend is connected */ + if (!hdev->vhost_ops) { + error_report("Missing vhost_ops! Operation not permitted!\n"); + return -EPERM; + } + + vdev->vhost_started = true; + hdev->started = true; + hdev->vdev = vdev; + + if (vhost_dev_has_iommu(hdev)) { + memory_listener_register(&hdev->iommu_listener, vdev->dma_as); + } + + r = hdev->vhost_ops->vhost_set_mem_table(hdev, hdev->mem); + if (r < 0) { + VHOST_OPS_DEBUG(r, "vhost_set_mem_table failed"); + goto fail_mem; + } + for (i = 0; i < hdev->nvqs; ++i) { + r = vhost_virtqueue_start(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + if (r < 0) { + goto fail_vq; + } + } + + r = event_notifier_init(e, 0); + if (r < 0) { + return r; + } + event_notifier_test_and_clear(e); + if (!vdev->use_guest_notifier_mask) { + vhost_config_mask(hdev, vdev, true); + } + if (vrings) { + r = vhost_dev_set_vring_enable(hdev, true); + if (r) { + goto fail_vq; + } + } + if (hdev->vhost_ops->vhost_dev_resume) { + r = hdev->vhost_ops->vhost_dev_resume(hdev); + if (r) { + goto fail_start; + } + } + if (vhost_dev_has_iommu(hdev)) { + hdev->vhost_ops->vhost_set_iotlb_callback(hdev, true); + + /* + * Update used ring information for IOTLB to work correctly, + * vhost-kernel code requires for this. + */ + for (i = 0; i < hdev->nvqs; ++i) { + struct vhost_virtqueue *vq = hdev->vqs + i; + vhost_device_iotlb_miss(hdev, vq->used_phys, true); + } + } + vhost_start_config_intr(hdev); + return 0; +fail_start: + if (vrings) { + vhost_dev_set_vring_enable(hdev, false); + } +fail_vq: + while (--i >= 0) { + vhost_virtqueue_stop(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + } + +fail_mem: + vdev->vhost_started = false; + hdev->started = false; + return r; +} + +int vhost_dev_suspend(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings) +{ + int i; + int ret = 0; + EventNotifier *e = &hdev->vqs[VHOST_QUEUE_NUM_CONFIG_INR].masked_config_notifier; + + /* should only be called after backend is connected */ + if (!hdev->vhost_ops) { + error_report("Missing vhost_ops! Operation not permitted!\n"); + return -EPERM; + } + + event_notifier_test_and_clear(e); + event_notifier_test_and_clear(&vdev->config_notifier); + + if (hdev->vhost_ops->vhost_dev_suspend) { + ret = hdev->vhost_ops->vhost_dev_suspend(hdev); + if (ret) { + goto fail_suspend; + } + } + if (vrings) { + ret = vhost_dev_set_vring_enable(hdev, false); + if (ret) { + goto fail_suspend; + } + } + for (i = 0; i < hdev->nvqs; ++i) { + vhost_virtqueue_stop(hdev, + vdev, + hdev->vqs + i, + hdev->vq_index + i); + } + + if (vhost_dev_has_iommu(hdev)) { + hdev->vhost_ops->vhost_set_iotlb_callback(hdev, false); + memory_listener_unregister(&hdev->iommu_listener); + } + vhost_stop_config_intr(hdev); + hdev->started = false; + vdev->vhost_started = false; + hdev->vdev = NULL; + + return ret; + +fail_suspend: + event_notifier_test_and_clear(e); + + return ret; } diff --git a/hw/virtio/virtio-balloon.c b/hw/virtio/virtio-balloon.c index 9a4f491b54df51a72ec92f012e53b868d3020294..f503572e279bb9a3714645aa0fe9756442d60795 100644 --- a/hw/virtio/virtio-balloon.c +++ b/hw/virtio/virtio-balloon.c @@ -917,8 +917,9 @@ static void virtio_balloon_device_realize(DeviceState *dev, Error **errp) precopy_add_notifier(&s->free_page_hint_notify); object_ref(OBJECT(s->iothread)); - s->free_page_bh = aio_bh_new(iothread_get_aio_context(s->iothread), - virtio_ballloon_get_free_page_hints, s); + s->free_page_bh = aio_bh_new_guarded(iothread_get_aio_context(s->iothread), + virtio_ballloon_get_free_page_hints, s, + &dev->mem_reentrancy_guard); } if (virtio_has_feature(s->host_features, VIRTIO_BALLOON_F_REPORTING)) { diff --git a/hw/virtio/virtio-crypto.c b/hw/virtio/virtio-crypto.c index 54f9bbb789ca304afef59257297aacc4cb9caff9..07566f0d462d4a23c411570a47e749d062600707 100644 --- a/hw/virtio/virtio-crypto.c +++ b/hw/virtio/virtio-crypto.c @@ -349,15 +349,17 @@ static void virtio_crypto_free_request(VirtIOCryptoReq *req) size_t max_len; CryptoDevBackendSymOpInfo *op_info = req->u.sym_op_info; - max_len = op_info->iv_len + - op_info->aad_len + - op_info->src_len + - op_info->dst_len + - op_info->digest_result_len; - - /* Zeroize and free request data structure */ - memset(op_info, 0, sizeof(*op_info) + max_len); - g_free(op_info); + if (op_info) { + max_len = op_info->iv_len + + op_info->aad_len + + op_info->src_len + + op_info->dst_len + + op_info->digest_result_len; + + /* Zeroize and free request data structure */ + memset(op_info, 0, sizeof(*op_info) + max_len); + g_free(op_info); + } } g_free(req); } @@ -461,6 +463,16 @@ virtio_crypto_sym_op_helper(VirtIODevice *vdev, return NULL; } + if (unlikely(src_len != dst_len)) { + virtio_error(vdev, "sym request src len is different from dst len"); + return NULL; + } + + if (unlikely(src_len != dst_len)) { + virtio_error(vdev, "sym request src len is different from dst len"); + return NULL; + } + max_len = (uint64_t)iv_len + aad_len + src_len + dst_len + hash_result_len; if (unlikely(max_len > vcrypto->conf.max_size)) { virtio_error(vdev, "virtio-crypto too big length"); @@ -817,7 +829,8 @@ static void virtio_crypto_device_realize(DeviceState *dev, Error **errp) vcrypto->vqs[i].dataq = virtio_add_queue(vdev, 1024, virtio_crypto_handle_dataq_bh); vcrypto->vqs[i].dataq_bh = - qemu_bh_new(virtio_crypto_dataq_bh, &vcrypto->vqs[i]); + virtio_bh_new_guarded(dev, virtio_crypto_dataq_bh, + &vcrypto->vqs[i]); vcrypto->vqs[i].vcrypto = vcrypto; } @@ -948,6 +961,15 @@ static void virtio_crypto_guest_notifier_mask(VirtIODevice *vdev, int idx, assert(vcrypto->vhost_started); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return; + } cryptodev_vhost_virtqueue_mask(vdev, queue, idx, mask); } @@ -958,9 +980,27 @@ static bool virtio_crypto_guest_notifier_pending(VirtIODevice *vdev, int idx) assert(vcrypto->vhost_started); + /* + * Add the check for configure interrupt, Use VIRTIO_CONFIG_IRQ_IDX -1 + * as the macro of configure interrupt's IDX, If this driver does not + * support, the function will return + */ + + if (idx == VIRTIO_CONFIG_IRQ_IDX) { + return false; + } return cryptodev_vhost_virtqueue_pending(vdev, queue, idx); } +static struct vhost_dev *virtio_crypto_get_vhost(VirtIODevice *vdev) +{ + VirtIOCrypto *vcrypto = VIRTIO_CRYPTO(vdev); + CryptoDevBackend *b = vcrypto->cryptodev; + CryptoDevBackendClient *cc = b->conf.peers.ccs[0]; + CryptoDevBackendVhost *vhost_crypto = cryptodev_get_vhost(cc, b, 0); + return &vhost_crypto->dev; +} + static void virtio_crypto_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); @@ -977,6 +1017,7 @@ static void virtio_crypto_class_init(ObjectClass *klass, void *data) vdc->set_status = virtio_crypto_set_status; vdc->guest_notifier_mask = virtio_crypto_guest_notifier_mask; vdc->guest_notifier_pending = virtio_crypto_guest_notifier_pending; + vdc->get_vhost = virtio_crypto_get_vhost; } static void virtio_crypto_instance_init(Object *obj) diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 1b23e8e18c780dd176ad3a54e4873b45889d9cc5..190ec2579ab9194bee6232ca4dff7d2698222575 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -125,6 +125,32 @@ static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) } } +static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion *mr, + IOMMUTLBEvent *event, + hwaddr virt_start, hwaddr virt_end) +{ + uint64_t delta = virt_end - virt_start; + + event->entry.iova = virt_start; + event->entry.addr_mask = delta; + + if (delta == UINT64_MAX) { + memory_region_notify_iommu(mr, 0, *event); + } + + while (virt_start != virt_end + 1) { + uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); + + event->entry.addr_mask = mask; + event->entry.iova = virt_start; + memory_region_notify_iommu(mr, 0, *event); + virt_start += mask + 1; + if (event->entry.perm != IOMMU_NONE) { + event->entry.translated_addr += mask + 1; + } + } +} + static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end, hwaddr paddr, uint32_t flags) @@ -143,19 +169,16 @@ static void virtio_iommu_notify_map(IOMMUMemoryRegion *mr, hwaddr virt_start, event.type = IOMMU_NOTIFIER_MAP; event.entry.target_as = &address_space_memory; - event.entry.addr_mask = virt_end - virt_start; - event.entry.iova = virt_start; event.entry.perm = perm; event.entry.translated_addr = paddr; - memory_region_notify_iommu(mr, 0, event); + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, hwaddr virt_end) { IOMMUTLBEvent event; - uint64_t delta = virt_end - virt_start; if (!(mr->iommu_notify_flags & IOMMU_NOTIFIER_UNMAP)) { return; @@ -167,22 +190,8 @@ static void virtio_iommu_notify_unmap(IOMMUMemoryRegion *mr, hwaddr virt_start, event.entry.target_as = &address_space_memory; event.entry.perm = IOMMU_NONE; event.entry.translated_addr = 0; - event.entry.addr_mask = delta; - event.entry.iova = virt_start; - - if (delta == UINT64_MAX) { - memory_region_notify_iommu(mr, 0, event); - } - - while (virt_start != virt_end + 1) { - uint64_t mask = dma_aligned_pow2_mask(virt_start, virt_end, 64); - - event.entry.addr_mask = mask; - event.entry.iova = virt_start; - memory_region_notify_iommu(mr, 0, event); - virt_start += mask + 1; - } + virtio_iommu_notify_map_unmap(mr, &event, virt_start, virt_end); } static gboolean virtio_iommu_notify_unmap_cb(gpointer key, gpointer value, @@ -547,11 +556,10 @@ static int virtio_iommu_probe(VirtIOIOMMU *s, static int virtio_iommu_iov_to_req(struct iovec *iov, unsigned int iov_cnt, - void *req, size_t req_sz) + void *req, size_t payload_sz) { - size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail); + size_t sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); - sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); if (unlikely(sz != payload_sz)) { return VIRTIO_IOMMU_S_INVAL; } @@ -564,7 +572,8 @@ static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \ unsigned int iov_cnt) \ { \ struct virtio_iommu_req_ ## __req req; \ - int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \ + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, \ + sizeof(req) - sizeof(struct virtio_iommu_req_tail));\ \ return ret ? ret : virtio_iommu_ ## __req(s, &req); \ } @@ -657,6 +666,7 @@ out: virtio_notify(vdev, vq); g_free(elem); g_free(buf); + buf = NULL; } } diff --git a/hw/virtio/virtio-mem.c b/hw/virtio/virtio-mem.c index d5a578142b7bade87e1493701e250c50197550d3..452f60266ad5564eb74862f6954df562001ad503 100644 --- a/hw/virtio/virtio-mem.c +++ b/hw/virtio/virtio-mem.c @@ -61,6 +61,7 @@ static uint32_t thp_size; #define HPAGE_PMD_SIZE_PATH "/sys/kernel/mm/transparent_hugepage/hpage_pmd_size" +#define HPAGE_PATH "/sys/kernel/mm/transparent_hugepage/" static uint32_t virtio_mem_thp_size(void) { gchar *content = NULL; @@ -71,6 +72,12 @@ static uint32_t virtio_mem_thp_size(void) return thp_size; } + /* No THP -> no restrictions. */ + if (!g_file_test(HPAGE_PATH, G_FILE_TEST_EXISTS)) { + thp_size = VIRTIO_MEM_MIN_BLOCK_SIZE; + return thp_size; + } + /* * Try to probe the actual THP size, fallback to (sane but eventually * incorrect) default sizes. @@ -205,7 +212,7 @@ static int virtio_mem_for_each_plugged_section(const VirtIOMEM *vmem, uint64_t offset, size; int ret = 0; - first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = s->offset_within_region / vmem->block_size; first_bit = find_next_bit(vmem->bitmap, vmem->bitmap_size, first_bit); while (first_bit < vmem->bitmap_size) { MemoryRegionSection tmp = *s; @@ -237,7 +244,7 @@ static int virtio_mem_for_each_unplugged_section(const VirtIOMEM *vmem, uint64_t offset, size; int ret = 0; - first_bit = s->offset_within_region / vmem->bitmap_size; + first_bit = s->offset_within_region / vmem->block_size; first_bit = find_next_zero_bit(vmem->bitmap, vmem->bitmap_size, first_bit); while (first_bit < vmem->bitmap_size) { MemoryRegionSection tmp = *s; @@ -311,7 +318,7 @@ static int virtio_mem_notify_plug(VirtIOMEM *vmem, uint64_t offset, if (ret) { /* Notify all already-notified listeners. */ QLIST_FOREACH(rdl2, &vmem->rdl_list, next) { - MemoryRegionSection tmp = *rdl->section; + MemoryRegionSection tmp = *rdl2->section; if (rdl2 == rdl) { break; @@ -733,7 +740,8 @@ static void virtio_mem_device_realize(DeviceState *dev, Error **errp) warn_report("'%s' property is smaller than the default block size (%" PRIx64 " MiB)", VIRTIO_MEM_BLOCK_SIZE_PROP, virtio_mem_default_block_size(rb) / MiB); - } else if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { + } + if (!QEMU_IS_ALIGNED(vmem->requested_size, vmem->block_size)) { error_setg(errp, "'%s' property has to be multiples of '%s' (0x%" PRIx64 ")", VIRTIO_MEM_REQUESTED_SIZE_PROP, VIRTIO_MEM_BLOCK_SIZE_PROP, vmem->block_size); @@ -876,7 +884,7 @@ static int virtio_mem_mig_sanity_checks_post_load(void *opaque, int version_id) return -EINVAL; } /* - * Note: Preparation for resizeable memory regions. The maximum size + * Note: Preparation for resizable memory regions. The maximum size * of the memory region must not change during migration. */ if (tmp->region_size != new_region_size) { diff --git a/hw/virtio/virtio-mmio.c b/hw/virtio/virtio-mmio.c index 72da12fea594141ac3df8511feeb1e1e2b5965c6..508dd4cdb7d8b263c58d6cf7c6dbda274a7a6c38 100644 --- a/hw/virtio/virtio-mmio.c +++ b/hw/virtio/virtio-mmio.c @@ -673,7 +673,30 @@ static int virtio_mmio_set_guest_notifier(DeviceState *d, int n, bool assign, return 0; } +static int virtio_mmio_set_config_guest_notifier(DeviceState *d, bool assign, + bool with_irqfd) +{ + VirtIOMMIOProxy *proxy = VIRTIO_MMIO(d); + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); + EventNotifier *notifier = virtio_config_get_guest_notifier(vdev); + int r = 0; + if (assign) { + r = event_notifier_init(notifier, 0); + if (r < 0) { + return r; + } + virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd); + } else { + virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd); + event_notifier_cleanup(notifier); + } + if (vdc->guest_notifier_mask && vdev->use_guest_notifier_mask) { + vdc->guest_notifier_mask(vdev, VIRTIO_CONFIG_IRQ_IDX, !assign); + } + return r; +} static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) { @@ -695,6 +718,10 @@ static int virtio_mmio_set_guest_notifiers(DeviceState *d, int nvqs, goto assign_error; } } + r = virtio_mmio_set_config_guest_notifier(d, assign, with_irqfd); + if (r < 0) { + goto assign_error; + } return 0; diff --git a/hw/virtio/virtio-pci.c b/hw/virtio/virtio-pci.c index 750aa47ec148267f36ee0d6672cb68b5c5e7fdf2..1e4661bebd36fa1c43b11e97b4bca85b4dfbe346 100644 --- a/hw/virtio/virtio-pci.c +++ b/hw/virtio/virtio-pci.c @@ -19,6 +19,7 @@ #include "exec/memop.h" #include "standard-headers/linux/virtio_pci.h" +#include "standard-headers/linux/virtio_ids.h" #include "hw/boards.h" #include "hw/virtio/virtio.h" #include "migration/qemu-file-types.h" @@ -213,6 +214,90 @@ static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) return 0; } +typedef struct VirtIOPCIIDInfo { + /* virtio id */ + uint16_t vdev_id; + /* pci device id for the transitional device */ + uint16_t trans_devid; + uint16_t class_id; +} VirtIOPCIIDInfo; + +static const VirtIOPCIIDInfo virtio_pci_id_info[] = { + { + .vdev_id = VIRTIO_ID_CRYPTO, + .class_id = PCI_CLASS_OTHERS, + }, { + .vdev_id = VIRTIO_ID_FS, + .class_id = PCI_CLASS_STORAGE_OTHER, + }, { + .vdev_id = VIRTIO_ID_NET, + .trans_devid = PCI_DEVICE_ID_VIRTIO_NET, + .class_id = PCI_CLASS_NETWORK_ETHERNET, + }, { + .vdev_id = VIRTIO_ID_BLOCK, + .trans_devid = PCI_DEVICE_ID_VIRTIO_BLOCK, + .class_id = PCI_CLASS_STORAGE_SCSI, + }, { + .vdev_id = VIRTIO_ID_CONSOLE, + .trans_devid = PCI_DEVICE_ID_VIRTIO_CONSOLE, + .class_id = PCI_CLASS_COMMUNICATION_OTHER, + }, { + .vdev_id = VIRTIO_ID_SCSI, + .trans_devid = PCI_DEVICE_ID_VIRTIO_SCSI, + .class_id = PCI_CLASS_STORAGE_SCSI + }, { + .vdev_id = VIRTIO_ID_9P, + .trans_devid = PCI_DEVICE_ID_VIRTIO_9P, + .class_id = PCI_BASE_CLASS_NETWORK, + }, { + .vdev_id = VIRTIO_ID_BALLOON, + .trans_devid = PCI_DEVICE_ID_VIRTIO_BALLOON, + .class_id = PCI_CLASS_OTHERS, + }, { + .vdev_id = VIRTIO_ID_RNG, + .trans_devid = PCI_DEVICE_ID_VIRTIO_RNG, + .class_id = PCI_CLASS_OTHERS, + }, +}; + +static const VirtIOPCIIDInfo *virtio_pci_get_id_info(uint16_t vdev_id) +{ + const VirtIOPCIIDInfo *info = NULL; + int i; + + for (i = 0; i < ARRAY_SIZE(virtio_pci_id_info); i++) { + if (virtio_pci_id_info[i].vdev_id == vdev_id) { + info = &virtio_pci_id_info[i]; + break; + } + } + + if (!info) { + /* The device id is invalid or not added to the id_info yet. */ + error_report("Invalid virtio device(id %u)", vdev_id); + abort(); + } + + return info; +} + +/* + * Get the Transitional Device ID for the specific device, return + * zero if the device is non-transitional. + */ +uint16_t virtio_pci_get_trans_devid(uint16_t device_id) +{ + return virtio_pci_get_id_info(device_id)->trans_devid; +} + +/* + * Get the Class ID for the specific device. + */ +uint16_t virtio_pci_get_class_id(uint16_t device_id) +{ + return virtio_pci_get_id_info(device_id)->class_id; +} + static bool virtio_pci_ioeventfd_enabled(DeviceState *d) { VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); @@ -677,7 +762,6 @@ static uint32_t virtio_read_config(PCIDevice *pci_dev, } static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, - unsigned int queue_no, unsigned int vector) { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; @@ -704,112 +788,204 @@ static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, } static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, - unsigned int queue_no, + EventNotifier *n, unsigned int vector) { VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - VirtQueue *vq = virtio_get_queue(vdev, queue_no); - EventNotifier *n = virtio_queue_get_guest_notifier(vq); return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); } static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, - unsigned int queue_no, + EventNotifier *n , unsigned int vector) { - VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); - VirtQueue *vq = virtio_get_queue(vdev, queue_no); - EventNotifier *n = virtio_queue_get_guest_notifier(vq); VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; int ret; ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); assert(ret == 0); } +static int virtio_pci_get_notifier(VirtIOPCIProxy *proxy, int queue_no, + EventNotifier **n, unsigned int *vector) +{ + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + VirtQueue *vq; -static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) + if (!proxy->vector_irqfd && vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) + return -1; + + if (queue_no == VIRTIO_CONFIG_IRQ_IDX) { + *n = virtio_config_get_guest_notifier(vdev); + *vector = vdev->config_vector; + } else { + if (!virtio_queue_get_num(vdev, queue_no)) { + return -1; + } + *vector = virtio_queue_vector(vdev, queue_no); + vq = virtio_get_queue(vdev, queue_no); + *n = virtio_queue_get_guest_notifier(vq); + } + return 0; +} + +static int kvm_virtio_pci_vector_use_one(VirtIOPCIProxy *proxy, int queue_no) { + unsigned int vector; + int ret; + EventNotifier *n; PCIDevice *dev = &proxy->pci_dev; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - unsigned int vector; - int ret, queue_no; - for (queue_no = 0; queue_no < nvqs; queue_no++) { - if (!virtio_queue_get_num(vdev, queue_no)) { - break; - } - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; - } - ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); + if (ret < 0) { + return ret; + } + if (vector >= msix_nr_vectors_allocated(dev)) { + return 0; + } + ret = kvm_virtio_pci_vq_vector_use(proxy, vector); + if (ret < 0) { + goto undo; + } + /* + * If guest supports masking, set up irqfd now. + * Otherwise, delay until unmasked in the frontend. + */ + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); if (ret < 0) { + kvm_virtio_pci_vq_vector_release(proxy, vector); goto undo; } - /* If guest supports masking, set up irqfd now. - * Otherwise, delay until unmasked in the frontend. - */ - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); - if (ret < 0) { - kvm_virtio_pci_vq_vector_release(proxy, vector); - goto undo; - } - } } - return 0; + return 0; undo: - while (--queue_no >= 0) { - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; + + vector = virtio_queue_vector(vdev, queue_no); + if (vector >= msix_nr_vectors_allocated(dev)) { + return ret; + } + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); + if (ret < 0) { + return ret; } - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); + kvm_virtio_pci_irqfd_release(proxy, n, vector); + } + return ret; +} + +#ifdef __aarch64__ +int __attribute__((weak)) kvm_create_shadow_device(PCIDevice *dev) +{ + return 0; +} + +int __attribute__((weak)) kvm_delete_shadow_device(PCIDevice *dev) +{ + return 0; +} +#endif + +#ifdef __aarch64__ +static bool shadow_device_supported(VirtIODevice *vdev) +{ + return !strcmp(vdev->name, "virtio-net") || + !strcmp(vdev->name, "virtio-blk") || + !strcmp(vdev->name, "virtio-scsi"); +} +#endif + +static int kvm_virtio_pci_vector_vq_use(VirtIOPCIProxy *proxy, int nvqs) +{ + int queue_no; + int ret = 0; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + +#ifdef __aarch64__ + if (shadow_device_supported(vdev)) { + kvm_create_shadow_device(&proxy->pci_dev); + } +#endif + + for (queue_no = 0; queue_no < nvqs; queue_no++) { + if (!virtio_queue_get_num(vdev, queue_no)) { + return -1; } - kvm_virtio_pci_vq_vector_release(proxy, vector); + ret = kvm_virtio_pci_vector_use_one(proxy, queue_no); + } + +#ifdef __aarch64__ + if (shadow_device_supported(vdev) && ret != 0) { + kvm_delete_shadow_device(&proxy->pci_dev); } +#endif + return ret; } -static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) +static int kvm_virtio_pci_vector_config_use(VirtIOPCIProxy *proxy) +{ + return kvm_virtio_pci_vector_use_one(proxy, VIRTIO_CONFIG_IRQ_IDX); +} + +static void kvm_virtio_pci_vector_release_one(VirtIOPCIProxy *proxy, + int queue_no) { - PCIDevice *dev = &proxy->pci_dev; VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); unsigned int vector; - int queue_no; + EventNotifier *n; + int ret; VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); + PCIDevice *dev = &proxy->pci_dev; + + ret = virtio_pci_get_notifier(proxy, queue_no, &n, &vector); + if (ret < 0) { + return; + } + if (vector >= msix_nr_vectors_allocated(dev)) { + return; + } + if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { + kvm_virtio_pci_irqfd_release(proxy, n, vector); + } + kvm_virtio_pci_vq_vector_release(proxy, vector); +} + +static void kvm_virtio_pci_vector_vq_release(VirtIOPCIProxy *proxy, int nvqs) +{ + int queue_no; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); for (queue_no = 0; queue_no < nvqs; queue_no++) { if (!virtio_queue_get_num(vdev, queue_no)) { break; } - vector = virtio_queue_vector(vdev, queue_no); - if (vector >= msix_nr_vectors_allocated(dev)) { - continue; - } - /* If guest supports masking, clean up irqfd now. - * Otherwise, it was cleaned when masked in the frontend. - */ - if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); - } - kvm_virtio_pci_vq_vector_release(proxy, vector); + kvm_virtio_pci_vector_release_one(proxy, queue_no); + } + +#ifdef __aarch64__ + if (shadow_device_supported(vdev)) { + kvm_delete_shadow_device(&proxy->pci_dev); } +#endif } -static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, +static void kvm_virtio_pci_vector_config_release(VirtIOPCIProxy *proxy) +{ + kvm_virtio_pci_vector_release_one(proxy, VIRTIO_CONFIG_IRQ_IDX); +} + +static int virtio_pci_one_vector_unmask(VirtIOPCIProxy *proxy, unsigned int queue_no, unsigned int vector, - MSIMessage msg) + MSIMessage msg, + EventNotifier *n) { VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); - VirtQueue *vq = virtio_get_queue(vdev, queue_no); - EventNotifier *n = virtio_queue_get_guest_notifier(vq); VirtIOIRQFD *irqfd; int ret = 0; @@ -836,14 +1012,15 @@ static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, event_notifier_set(n); } } else { - ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); + ret = kvm_virtio_pci_irqfd_use(proxy, n, vector); } return ret; } -static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, +static void virtio_pci_one_vector_mask(VirtIOPCIProxy *proxy, unsigned int queue_no, - unsigned int vector) + unsigned int vector, + EventNotifier *n) { VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); @@ -854,7 +1031,7 @@ static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { k->guest_notifier_mask(vdev, queue_no, true); } else { - kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); + kvm_virtio_pci_irqfd_release(proxy, n, vector); } } @@ -864,6 +1041,7 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtQueue *vq = virtio_vector_first_queue(vdev, vector); + EventNotifier *n; int ret, index, unmasked = 0; while (vq) { @@ -872,7 +1050,8 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, break; } if (index < proxy->nvqs_with_notifiers) { - ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); + n = virtio_queue_get_guest_notifier(vq); + ret = virtio_pci_one_vector_unmask(proxy, index, vector, msg, n); if (ret < 0) { goto undo; } @@ -880,15 +1059,26 @@ static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, } vq = virtio_vector_next_queue(vq); } - + /* unmask config intr */ + if (vector == vdev->config_vector) { + n = virtio_config_get_guest_notifier(vdev); + ret = virtio_pci_one_vector_unmask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, + msg, n); + if (ret < 0) { + goto undo_config; + } + } return 0; - +undo_config: + n = virtio_config_get_guest_notifier(vdev); + virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n); undo: vq = virtio_vector_first_queue(vdev, vector); while (vq && unmasked >= 0) { index = virtio_get_queue_index(vq); if (index < proxy->nvqs_with_notifiers) { - virtio_pci_vq_vector_mask(proxy, index, vector); + n = virtio_queue_get_guest_notifier(vq); + virtio_pci_one_vector_mask(proxy, index, vector, n); --unmasked; } vq = virtio_vector_next_queue(vq); @@ -901,18 +1091,25 @@ static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtQueue *vq = virtio_vector_first_queue(vdev, vector); + EventNotifier *n; int index; while (vq) { index = virtio_get_queue_index(vq); + n = virtio_queue_get_guest_notifier(vq); if (!virtio_queue_get_num(vdev, index)) { break; } if (index < proxy->nvqs_with_notifiers) { - virtio_pci_vq_vector_mask(proxy, index, vector); + virtio_pci_one_vector_mask(proxy, index, vector, n); } vq = virtio_vector_next_queue(vq); } + + if (vector == vdev->config_vector) { + n = virtio_config_get_guest_notifier(vdev); + virtio_pci_one_vector_mask(proxy, VIRTIO_CONFIG_IRQ_IDX, vector, n); + } } static void virtio_pci_vector_poll(PCIDevice *dev, @@ -925,19 +1122,17 @@ static void virtio_pci_vector_poll(PCIDevice *dev, int queue_no; unsigned int vector; EventNotifier *notifier; - VirtQueue *vq; + int ret; for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { - if (!virtio_queue_get_num(vdev, queue_no)) { + ret = virtio_pci_get_notifier(proxy, queue_no, ¬ifier, &vector); + if (ret < 0) { break; } - vector = virtio_queue_vector(vdev, queue_no); if (vector < vector_start || vector >= vector_end || !msix_is_masked(dev, vector)) { continue; } - vq = virtio_get_queue(vdev, queue_no); - notifier = virtio_queue_get_guest_notifier(vq); if (k->guest_notifier_pending) { if (k->guest_notifier_pending(vdev, queue_no)) { msix_set_pending(dev, vector); @@ -946,6 +1141,34 @@ static void virtio_pci_vector_poll(PCIDevice *dev, msix_set_pending(dev, vector); } } + /* poll the config intr */ + ret = virtio_pci_get_notifier(proxy, VIRTIO_CONFIG_IRQ_IDX, ¬ifier, + &vector); + if (ret < 0) { + return; + } + if (vector < vector_start || vector >= vector_end || + !msix_is_masked(dev, vector)) { + return; + } + if (k->guest_notifier_pending) { + if (k->guest_notifier_pending(vdev, VIRTIO_CONFIG_IRQ_IDX)) { + msix_set_pending(dev, vector); + } + } else if (event_notifier_test_and_clear(notifier)) { + msix_set_pending(dev, vector); + } +} + +void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq, + int n, bool assign, + bool with_irqfd) +{ + if (n == VIRTIO_CONFIG_IRQ_IDX) { + virtio_config_set_guest_notifier_fd_handler(vdev, assign, with_irqfd); + } else { + virtio_queue_set_guest_notifier_fd_handler(vq, assign, with_irqfd); + } } static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, @@ -954,17 +1177,25 @@ static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); - VirtQueue *vq = virtio_get_queue(vdev, n); - EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); + VirtQueue *vq = NULL; + EventNotifier *notifier = NULL; + + if (n == VIRTIO_CONFIG_IRQ_IDX) { + notifier = virtio_config_get_guest_notifier(vdev); + } else { + vq = virtio_get_queue(vdev, n); + notifier = virtio_queue_get_guest_notifier(vq); + } if (assign) { int r = event_notifier_init(notifier, 0); if (r < 0) { return r; } - virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); + virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, true, with_irqfd); } else { - virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); + virtio_pci_set_guest_notifier_fd_handler(vdev, vq, n, false, + with_irqfd); event_notifier_cleanup(notifier); } @@ -994,18 +1225,26 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); - /* When deassigning, pass a consistent nvqs value - * to avoid leaking notifiers. + /* + * When deassigning, pass a consistent nvqs value to avoid leaking + * notifiers. But first check we've actually been configured, exit + * early if we haven't. */ + if (!assign && !proxy->nvqs_with_notifiers) { + return 0; + } assert(assign || nvqs == proxy->nvqs_with_notifiers); proxy->nvqs_with_notifiers = nvqs; /* Must unset vector notifier while guest notifier is still assigned */ - if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { + if ((proxy->vector_irqfd || + (vdev->use_guest_notifier_mask && k->guest_notifier_mask)) && + !assign) { msix_unset_vector_notifiers(&proxy->pci_dev); if (proxy->vector_irqfd) { - kvm_virtio_pci_vector_release(proxy, nvqs); + kvm_virtio_pci_vector_vq_release(proxy, nvqs); + kvm_virtio_pci_vector_config_release(proxy); g_free(proxy->vector_irqfd); proxy->vector_irqfd = NULL; } @@ -1021,20 +1260,30 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) goto assign_error; } } - + r = virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, assign, + with_irqfd); + if (r < 0) { + goto config_assign_error; + } /* Must set vector notifier after guest notifier has been assigned */ - if ((with_irqfd || k->guest_notifier_mask) && assign) { + if ((with_irqfd || + (vdev->use_guest_notifier_mask && k->guest_notifier_mask)) && + assign) { if (with_irqfd) { proxy->vector_irqfd = g_malloc0(sizeof(*proxy->vector_irqfd) * msix_nr_vectors_allocated(&proxy->pci_dev)); - r = kvm_virtio_pci_vector_use(proxy, nvqs); + r = kvm_virtio_pci_vector_vq_use(proxy, nvqs); if (r < 0) { - goto assign_error; + goto config_assign_error; + } + r = kvm_virtio_pci_vector_config_use(proxy); + if (r < 0) { + goto config_error; } } - r = msix_set_vector_notifiers(&proxy->pci_dev, - virtio_pci_vector_unmask, + + r = msix_set_vector_notifiers(&proxy->pci_dev, virtio_pci_vector_unmask, virtio_pci_vector_mask, virtio_pci_vector_poll); if (r < 0) { @@ -1047,9 +1296,15 @@ static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) notifiers_error: if (with_irqfd) { assert(assign); - kvm_virtio_pci_vector_release(proxy, nvqs); + kvm_virtio_pci_vector_vq_release(proxy, nvqs); } - +config_error: + if (with_irqfd) { + kvm_virtio_pci_vector_config_release(proxy); + } +config_assign_error: + virtio_pci_set_guest_notifier(d, VIRTIO_CONFIG_IRQ_IDX, !assign, + with_irqfd); assign_error: /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ assert(assign); @@ -1674,6 +1929,9 @@ static void virtio_pci_device_plugged(DeviceState *d, Error **errp) * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. */ pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); + if (proxy->trans_devid) { + pci_config_set_device_id(config, proxy->trans_devid); + } } else { /* pure virtio-1.0 */ pci_set_word(config + PCI_VENDOR_ID, @@ -1772,7 +2030,9 @@ static void virtio_pci_device_unplugged(DeviceState *d) VirtIOPCIProxy *proxy = VIRTIO_PCI(d); bool modern = virtio_pci_modern(proxy); bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; + VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); + qemu_log("unplug device name: %s\n", !vdev ? "NULL" : vdev->name); virtio_pci_stop_ioeventfd(proxy); if (modern) { diff --git a/hw/virtio/virtio-pci.h b/hw/virtio/virtio-pci.h index 2446dcd9aef197964b59f83b61831af85deb300a..6d8e071d8d4567974a122d3f30601c1b5fda6fc2 100644 --- a/hw/virtio/virtio-pci.h +++ b/hw/virtio/virtio-pci.h @@ -146,6 +146,8 @@ struct VirtIOPCIProxy { bool disable_modern; bool ignore_backend_features; OnOffAuto disable_legacy; + /* Transitional device id */ + uint16_t trans_devid; uint32_t class_code; uint32_t nvectors; uint32_t dfselect; @@ -179,6 +181,9 @@ static inline void virtio_pci_disable_modern(VirtIOPCIProxy *proxy) proxy->disable_modern = true; } +uint16_t virtio_pci_get_trans_devid(uint16_t device_id); +uint16_t virtio_pci_get_class_id(uint16_t device_id); + /* * virtio-input-pci: This extends VirtioPCIProxy. */ @@ -251,5 +256,7 @@ void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t); * @fixed_queues. */ unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues); - +void virtio_pci_set_guest_notifier_fd_handler(VirtIODevice *vdev, VirtQueue *vq, + int n, bool assign, + bool with_irqfd); #endif diff --git a/hw/virtio/virtio-pmem.c b/hw/virtio/virtio-pmem.c index d1aeb90a31a6dddb67637c6803860e459ec78cbd..39f3949a3babfae7a6fe9d2bca5d84401746238e 100644 --- a/hw/virtio/virtio-pmem.c +++ b/hw/virtio/virtio-pmem.c @@ -149,10 +149,7 @@ static void virtio_pmem_fill_device_info(const VirtIOPMEM *pmem, static MemoryRegion *virtio_pmem_get_memory_region(VirtIOPMEM *pmem, Error **errp) { - if (!pmem->memdev) { - error_setg(errp, "'%s' property must be set", VIRTIO_PMEM_MEMDEV_PROP); - return NULL; - } + assert(pmem->memdev); return &pmem->memdev->mr; } diff --git a/hw/virtio/virtio-rng.c b/hw/virtio/virtio-rng.c index cc8e9f775d87865a255c80cf977b9719371a8661..e0f71ddd0f60f7586af37d69d0c80ce9fe69620d 100644 --- a/hw/virtio/virtio-rng.c +++ b/hw/virtio/virtio-rng.c @@ -184,8 +184,9 @@ static void virtio_rng_device_realize(DeviceState *dev, Error **errp) /* Workaround: Property parsing does not enforce unsigned integers, * So this is a hack to reject such numbers. */ - if (vrng->conf.max_bytes > INT64_MAX) { - error_setg(errp, "'max-bytes' parameter must be non-negative, " + if (vrng->conf.max_bytes == 0 || + vrng->conf.max_bytes > INT64_MAX) { + error_setg(errp, "'max-bytes' parameter must be positive, " "and less than 2^63"); return; } diff --git a/hw/virtio/virtio-scsi-pci.c b/hw/virtio/virtio-scsi-pci.c index 97fab742368ac4c4332e4f3f6f1da1153468f488..498f9e2c984ccb996694c1390f766cdfc9e514d0 100644 --- a/hw/virtio/virtio-scsi-pci.c +++ b/hw/virtio/virtio-scsi-pci.c @@ -18,6 +18,7 @@ #include "hw/qdev-properties.h" #include "hw/virtio/virtio-scsi.h" #include "qemu/module.h" +#include "qemu/log.h" #include "virtio-pci.h" #include "qom/object.h" @@ -51,6 +52,8 @@ static void virtio_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) VirtIOSCSIConf *conf = &dev->vdev.parent_obj.conf; char *bus_name; + qemu_log("virtio scsi HBA %s begin to initialize.\n", + !proxy->id ? "NULL" : proxy->id); if (conf->num_queues == VIRTIO_SCSI_AUTO_NUM_QUEUES) { conf->num_queues = virtio_pci_optimal_num_queues(VIRTIO_SCSI_VQ_NUM_FIXED); diff --git a/hw/virtio/virtio.c b/hw/virtio/virtio.c index ea7c079fb046b0a339e86e68c66ff4f787a541fd..9c40d565bb452cbdb172ae25e316e3f8c09228e4 100644 --- a/hw/virtio/virtio.c +++ b/hw/virtio/virtio.c @@ -251,7 +251,6 @@ static void vring_packed_event_read(VirtIODevice *vdev, /* Make sure flags is seen before off_wrap */ smp_rmb(); e->off_wrap = virtio_lduw_phys_cached(vdev, cache, off_off); - virtio_tswap16s(vdev, &e->flags); } static void vring_packed_off_wrap_write(VirtIODevice *vdev, @@ -885,6 +884,7 @@ static void virtqueue_packed_flush(VirtQueue *vq, unsigned int count) if (vq->used_idx >= vq->vring.num) { vq->used_idx -= vq->vring.num; vq->used_wrap_counter ^= 1; + vq->signalled_used_valid = false; } } @@ -983,7 +983,7 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; int64_t len = 0; @@ -992,13 +992,12 @@ static void virtqueue_split_get_avail_bytes(VirtQueue *vq, idx = vq->last_avail_idx; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - while ((rc = virtqueue_num_heads(vq, idx)) > 0) { MemoryRegionCache *desc_cache = &caches->desc; unsigned int num_bufs; VRingDesc desc; unsigned int i; + unsigned int max = vq->vring.num; num_bufs = total_bufs; @@ -1120,7 +1119,7 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, VRingMemoryRegionCaches *caches) { VirtIODevice *vdev = vq->vdev; - unsigned int max, idx; + unsigned int idx; unsigned int total_bufs, in_total, out_total; MemoryRegionCache *desc_cache; MemoryRegionCache indirect_desc_cache = MEMORY_REGION_CACHE_INVALID; @@ -1132,14 +1131,14 @@ static void virtqueue_packed_get_avail_bytes(VirtQueue *vq, wrap_counter = vq->last_avail_wrap_counter; total_bufs = in_total = out_total = 0; - max = vq->vring.num; - for (;;) { unsigned int num_bufs = total_bufs; unsigned int i = idx; int rc; + unsigned int max = vq->vring.num; desc_cache = &caches->desc; + vring_packed_desc_read(vdev, &desc, desc_cache, idx, true); if (!is_desc_avail(desc.flags, wrap_counter)) { break; @@ -1306,7 +1305,8 @@ static bool virtqueue_map_desc(VirtIODevice *vdev, unsigned int *p_num_sg, iov[num_sg].iov_base = dma_memory_map(vdev->dma_as, pa, &len, is_write ? DMA_DIRECTION_FROM_DEVICE : - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!iov[num_sg].iov_base) { virtio_error(vdev, "virtio: bogus descriptor or out of resources"); goto out; @@ -1355,7 +1355,8 @@ static void virtqueue_map_iovec(VirtIODevice *vdev, struct iovec *sg, sg[i].iov_base = dma_memory_map(vdev->dma_as, addr[i], &len, is_write ? DMA_DIRECTION_FROM_DEVICE : - DMA_DIRECTION_TO_DEVICE); + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); if (!sg[i].iov_base) { error_report("virtio: error trying to map MMIO memory"); exit(1); @@ -1633,6 +1634,11 @@ static void *virtqueue_packed_pop(VirtQueue *vq, size_t sz) &indirect_desc_cache); } while (rc == VIRTQUEUE_READ_DESC_MORE); + if (desc_cache != &indirect_desc_cache) { + /* Buffer ID is included in the last descriptor in the list. */ + id = desc.id; + } + /* Now copy what we have collected and mapped */ elem = virtqueue_alloc_element(sz, out_num, in_num); for (i = 0; i < out_num; i++) { @@ -1945,7 +1951,14 @@ int virtio_set_status(VirtIODevice *vdev, uint8_t val) k->set_status(vdev, val); } vdev->status = val; - + if (val) { + qemu_log("%s device status is %d that means %s\n", + vdev->name, val, + (val & VIRTIO_CONFIG_S_DRIVER_OK) ? "DRIVER OK" : + (val & VIRTIO_CONFIG_S_DRIVER) ? "DRIVER" : + (val & VIRTIO_CONFIG_S_ACKNOWLEDGE) ? "ACKNOWLEDGE" : + (val & VIRTIO_CONFIG_S_FAILED) ? "FAILED" : "UNKNOWN"); + } return 0; } @@ -2234,12 +2247,17 @@ void virtio_queue_set_rings(VirtIODevice *vdev, int n, hwaddr desc, void virtio_queue_set_num(VirtIODevice *vdev, int n, int num) { + int vq_max_size = VIRTQUEUE_MAX_SIZE; + + if (!strcmp(vdev->name, "virtio-net")) { + vq_max_size = VIRTIO_NET_VQ_MAX_SIZE; + } + /* Don't allow guest to flip queue between existent and * nonexistent states, or to set it to an invalid size. */ if (!!num != !!vdev->vq[n].vring.num || - num > VIRTQUEUE_MAX_SIZE || - num < 0) { + num > vq_max_size || num < 0) { return; } vdev->vq[n].vring.num = num; @@ -2389,8 +2407,11 @@ VirtQueue *virtio_add_queue(VirtIODevice *vdev, int queue_size, break; } - if (i == VIRTIO_QUEUE_MAX || queue_size > VIRTQUEUE_MAX_SIZE) + if (i == VIRTIO_QUEUE_MAX) { + qemu_log("unacceptable queue_size (%d) or num (%d)\n", + queue_size, i); abort(); + } vdev->vq[i].vring.num = queue_size; vdev->vq[i].vring.num_default = queue_size; @@ -2407,6 +2428,7 @@ void virtio_delete_queue(VirtQueue *vq) { vq->vring.num = 0; vq->vring.num_default = 0; + vq->vring.align = 0; vq->handle_output = NULL; vq->handle_aio_output = NULL; g_free(vq->used_elems); @@ -2844,6 +2866,35 @@ static const VMStateDescription vmstate_virtio = { } }; +static void check_vring_avail_num(VirtIODevice *vdev, int index) +{ + uint16_t nheads; + VRingMemoryRegionCaches *caches; + + rcu_read_lock(); + caches = qatomic_rcu_read(&vdev->vq[index].vring.caches); + if (caches == NULL) { + /* + * caches may be NULL if virtio_reset is called at the same time, + * such as when the virtual machine starts. + */ + rcu_read_unlock(); + return; + } + + /* Check it isn't doing strange things with descriptor numbers. */ + nheads = vring_avail_idx(&vdev->vq[index]) - vdev->vq[index].last_avail_idx; + if (nheads > vdev->vq[index].vring.num) { + qemu_log("VQ %d size 0x%x Guest index 0x%x " + "inconsistent with Host index 0x%x: " + "delta 0x%x\n", + index, vdev->vq[index].vring.num, + vring_avail_idx(&vdev->vq[index]), + vdev->vq[index].last_avail_idx, nheads); + } + rcu_read_unlock(); +} + int virtio_save(VirtIODevice *vdev, QEMUFile *f) { BusState *qbus = qdev_get_parent_bus(DEVICE(vdev)); @@ -2874,6 +2925,8 @@ int virtio_save(VirtIODevice *vdev, QEMUFile *f) if (vdev->vq[i].vring.num == 0) break; + check_vring_avail_num(vdev, i); + qemu_put_be32(f, vdev->vq[i].vring.num); if (k->has_variable_vring_alignment) { qemu_put_be32(f, vdev->vq[i].vring.align); @@ -2931,6 +2984,13 @@ static int virtio_set_features_nocheck(VirtIODevice *vdev, uint64_t val) { VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); bool bad = (val & ~(vdev->host_features)) != 0; + uint64_t feat = val & ~(vdev->host_features); + + if (bad && k->print_features) { + qemu_log("error: Please check host config, "\ + "because host does not support required feature bits 0x%" PRIx64 "\n", feat); + k->print_features(feat); + } val &= vdev->host_features; if (k->set_features) { @@ -3242,6 +3302,7 @@ void virtio_init(VirtIODevice *vdev, const char *name, vdev->start_on_kick = false; vdev->started = false; + vdev->vhost_started = false; vdev->device_id = device_id; vdev->status = 0; qatomic_set(&vdev->isr, 0); @@ -3491,7 +3552,14 @@ static void virtio_queue_guest_notifier_read(EventNotifier *n) virtio_irq(vq); } } +static void virtio_config_guest_notifier_read(EventNotifier *n) +{ + VirtIODevice *vdev = container_of(n, VirtIODevice, config_notifier); + if (event_notifier_test_and_clear(n)) { + virtio_notify_config(vdev); + } +} void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, bool with_irqfd) { @@ -3508,6 +3576,23 @@ void virtio_queue_set_guest_notifier_fd_handler(VirtQueue *vq, bool assign, } } +void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev, + bool assign, bool with_irqfd) +{ + EventNotifier *n; + n = &vdev->config_notifier; + if (assign && !with_irqfd) { + event_notifier_set_handler(n, virtio_config_guest_notifier_read); + } else { + event_notifier_set_handler(n, NULL); + } + if (!assign) { + /* Test and clear notifier before closing it,*/ + /* in case poll callback didn't have time to run. */ + virtio_config_guest_notifier_read(n); + } +} + EventNotifier *virtio_queue_get_guest_notifier(VirtQueue *vq) { return &vq->guest_notifier; @@ -3581,6 +3666,11 @@ EventNotifier *virtio_queue_get_host_notifier(VirtQueue *vq) return &vq->host_notifier; } +EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev) +{ + return &vdev->config_notifier; +} + void virtio_queue_set_host_notifier_enabled(VirtQueue *vq, bool enabled) { vq->host_notifier_enabled = enabled; @@ -3874,3 +3964,13 @@ static void virtio_register_types(void) } type_init(virtio_register_types) + +QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev, + QEMUBHFunc *cb, void *opaque, + const char *name) +{ + DeviceState *transport = qdev_get_parent_bus(dev)->parent; + + return qemu_bh_new_full(cb, opaque, name, + &transport->mem_reentrancy_guard); +} diff --git a/hw/xen/xen_pt_config_init.c b/hw/xen/xen_pt_config_init.c index c5c4e943a8453083e806156222f8c6bce35062e2..5f28800879a8256b6672fee2a0df72429b2c196d 100644 --- a/hw/xen/xen_pt_config_init.c +++ b/hw/xen/xen_pt_config_init.c @@ -1924,7 +1924,7 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s, if (reg->init) { uint32_t host_mask, size_mask; unsigned int offset; - uint32_t val; + uint32_t val = 0; /* initialize emulate register */ rc = reg->init(s, reg_entry->reg, @@ -1965,11 +1965,12 @@ static void xen_pt_config_reg_init(XenPCIPassthroughState *s, if ((data & host_mask) != (val & host_mask)) { uint32_t new_val; - - /* Mask out host (including past size). */ - new_val = val & host_mask; - /* Merge emulated ones (excluding the non-emulated ones). */ - new_val |= data & host_mask; + /* + * Merge the emulated bits (data) with the host bits (val) + * and mask out the bits past size to enable restoration + * of the proper value for logging below. + */ + new_val = XEN_PT_MERGE_VALUE(val, data, host_mask) & size_mask; /* Leave intact host and emulated values past the size - even though * we do not care as we write per reg->size granularity, but for the * logging below lets have the proper value. */ @@ -2031,12 +2032,16 @@ void xen_pt_config_init(XenPCIPassthroughState *s, Error **errp) } } - /* - * By default we will trap up to 0x40 in the cfg space. - * If an intel device is pass through we need to trap 0xfc, - * therefore the size should be 0xff. - */ if (xen_pt_emu_reg_grps[i].grp_id == XEN_PCI_INTEL_OPREGION) { + if (!is_igd_vga_passthrough(&s->real_device) || + s->real_device.vendor_id != PCI_VENDOR_ID_INTEL) { + continue; + } + /* + * By default we will trap up to 0x40 in the cfg space. + * If an intel device is pass through we need to trap 0xfc, + * therefore the size should be 0xff. + */ reg_grp_offset = XEN_PCI_INTEL_OPREGION; } diff --git a/include/block/aio-wait.h b/include/block/aio-wait.h index b39eefb38d1607e881b4020e384b644d8849ff1d..e4b811433d3f2bfd24d3eeb645ce027ed571a55f 100644 --- a/include/block/aio-wait.h +++ b/include/block/aio-wait.h @@ -59,10 +59,13 @@ typedef struct { extern AioWait global_aio_wait; /** - * AIO_WAIT_WHILE: + * AIO_WAIT_WHILE_INTERNAL: * @ctx: the aio context, or NULL if multiple aio contexts (for which the * caller does not hold a lock) are involved in the polling condition. * @cond: wait while this conditional expression is true + * @unlock: whether to unlock and then lock again @ctx. This apples + * only when waiting for another AioContext from the main loop. + * Otherwise it's ignored. * * Wait while a condition is true. Use this to implement synchronous * operations that require event loop activity. @@ -75,7 +78,7 @@ extern AioWait global_aio_wait; * wait on conditions between two IOThreads since that could lead to deadlock, * go via the main loop instead. */ -#define AIO_WAIT_WHILE(ctx, cond) ({ \ +#define AIO_WAIT_WHILE_INTERNAL(ctx, cond, unlock) ({ \ bool waited_ = false; \ AioWait *wait_ = &global_aio_wait; \ AioContext *ctx_ = (ctx); \ @@ -90,11 +93,11 @@ extern AioWait global_aio_wait; assert(qemu_get_current_aio_context() == \ qemu_get_aio_context()); \ while ((cond)) { \ - if (ctx_) { \ + if (unlock && ctx_) { \ aio_context_release(ctx_); \ } \ aio_poll(qemu_get_aio_context(), true); \ - if (ctx_) { \ + if (unlock && ctx_) { \ aio_context_acquire(ctx_); \ } \ waited_ = true; \ @@ -103,6 +106,12 @@ extern AioWait global_aio_wait; qatomic_dec(&wait_->num_waiters); \ waited_; }) +#define AIO_WAIT_WHILE(ctx, cond) \ + AIO_WAIT_WHILE_INTERNAL(ctx, cond, true) + +#define AIO_WAIT_WHILE_UNLOCKED(ctx, cond) \ + AIO_WAIT_WHILE_INTERNAL(ctx, cond, false) + /** * aio_wait_kick: * Wake up the main thread if it is waiting on AIO_WAIT_WHILE(). During diff --git a/include/block/aio.h b/include/block/aio.h index 47fbe9d81f2ee6ebcc7d2921f1ba6ab1693c9160..c7da152985e100b2a15db190b2dbbde06b4cb665 100644 --- a/include/block/aio.h +++ b/include/block/aio.h @@ -22,6 +22,8 @@ #include "qemu/event_notifier.h" #include "qemu/thread.h" #include "qemu/timer.h" +#include "hw/qdev-core.h" + typedef struct BlockAIOCB BlockAIOCB; typedef void BlockCompletionFunc(void *opaque, int ret); @@ -321,9 +323,11 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, * is opaque and must be allocated prior to its use. * * @name: A human-readable identifier for debugging purposes. + * @reentrancy_guard: A guard set when entering a cb to prevent + * device-reentrancy issues */ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, - const char *name); + const char *name, MemReentrancyGuard *reentrancy_guard); /** * aio_bh_new: Allocate a new bottom half structure @@ -332,7 +336,17 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, * string. */ #define aio_bh_new(ctx, cb, opaque) \ - aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb))) + aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL) + +/** + * aio_bh_new_guarded: Allocate a new bottom half structure with a + * reentrancy_guard + * + * A convenience wrapper for aio_bh_new_full() that uses the cb as the name + * string. + */ +#define aio_bh_new_guarded(ctx, cb, opaque, guard) \ + aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard) /** * aio_notify: Force processing of pending events. diff --git a/include/block/block.h b/include/block/block.h index e5dd22b03435b6d7972fb8cf991c76681a2681cd..f885f113efb951779b60c438280debf6ecdeec36 100644 --- a/include/block/block.h +++ b/include/block/block.h @@ -376,6 +376,9 @@ BdrvChild *bdrv_open_child(const char *filename, const BdrvChildClass *child_class, BdrvChildRole child_role, bool allow_none, Error **errp); +int bdrv_open_file_child(const char *filename, + QDict *options, const char *bdref_key, + BlockDriverState *parent, Error **errp); BlockDriverState *bdrv_open_blockdev_ref(BlockdevRef *ref, Error **errp); int bdrv_set_backing_hd(BlockDriverState *bs, BlockDriverState *backing_hd, Error **errp); diff --git a/include/block/block_int.h b/include/block/block_int.h index f4c75e8ba956b4486e9155bb9be1efbd2aeaca49..701f031102a7437d5c93dd9d25f431f53d089271 100644 --- a/include/block/block_int.h +++ b/include/block/block_int.h @@ -61,6 +61,8 @@ #define BLOCK_OPT_DATA_FILE_RAW "data_file_raw" #define BLOCK_OPT_COMPRESSION_TYPE "compression_type" #define BLOCK_OPT_EXTL2 "extended_l2" +#define BLOCK_OPT_CACHE "cache" +#define BLOCK_OPT_BUFFER_SIZE "buffer_size" #define BLOCK_PROBE_BUF_SIZE 512 diff --git a/include/block/nbd.h b/include/block/nbd.h index 78d101b77488ab2b32d27d3ccb0e6f92354e3433..a31c34a8a6b3a48a023b4554a6dc7ef6acb44d64 100644 --- a/include/block/nbd.h +++ b/include/block/nbd.h @@ -27,6 +27,19 @@ extern const BlockExportDriver blk_exp_nbd; +/* + * NBD_DEFAULT_HANDSHAKE_MAX_SECS: Number of seconds in which client must + * succeed at NBD_OPT_GO before being forcefully dropped as too slow. + */ +#define NBD_DEFAULT_HANDSHAKE_MAX_SECS 10 + +/* + * NBD_DEFAULT_MAX_CONNECTIONS: Number of client sockets to allow at + * once; must be large enough to allow a MULTI_CONN-aware client like + * nbdcopy to create its typical number of 8-16 sockets. + */ +#define NBD_DEFAULT_MAX_CONNECTIONS 100 + /* Handshake phase structs - this struct is passed on the wire */ struct NBDOption { @@ -338,9 +351,12 @@ AioContext *nbd_export_aio_context(NBDExport *exp); NBDExport *nbd_export_find(const char *name); void nbd_client_new(QIOChannelSocket *sioc, + uint32_t handshake_max_secs, QCryptoTLSCreds *tlscreds, const char *tlsauthz, - void (*close_fn)(NBDClient *, bool)); + void (*close_fn)(NBDClient *, bool), + void *owner); +void *nbd_client_owner(NBDClient *client); void nbd_client_get(NBDClient *client); void nbd_client_put(NBDClient *client); diff --git a/include/chardev/char.h b/include/chardev/char.h index a319b5fdff7f583ba9493618eab1ab49dde08896..f388d4b109b4641d235f64979ba8bef8b795bc6a 100644 --- a/include/chardev/char.h +++ b/include/chardev/char.h @@ -14,6 +14,8 @@ #define IAC_SB 250 #define IAC 255 +#define CHR_FOR_VHOST_USER 0x32a1 + /* character device */ typedef struct CharBackend CharBackend; @@ -70,6 +72,7 @@ struct Chardev { GSource *gsource; GMainContext *gcontext; DECLARE_BITMAP(features, QEMU_CHAR_FEATURE_LAST); + int chr_for_flag; }; /** @@ -227,6 +230,16 @@ int qemu_chr_write(Chardev *s, const uint8_t *buf, int len, bool write_all); #define qemu_chr_write_all(s, buf, len) qemu_chr_write(s, buf, len, true) int qemu_chr_wait_connected(Chardev *chr, Error **errp); +/** + * @qemu_chr_set_reconnect_time: + * + * Set reconnect time for char disconnect. + * Currently, only vhost user will call it. + * + * @reconnect_time the reconnect_time to be set + */ +void qemu_chr_set_reconnect_time(Chardev *chr, int64_t reconnect_time); + #define TYPE_CHARDEV "chardev" OBJECT_DECLARE_TYPE(Chardev, ChardevClass, CHARDEV) @@ -306,6 +319,9 @@ struct ChardevClass { /* handle various events */ void (*chr_be_event)(Chardev *s, QEMUChrEvent event); + + /* set reconnect time */ + void (*chr_set_reconnect_time)(Chardev *chr, int64_t reconnect_time); }; Chardev *qemu_chardev_new(const char *id, const char *typename, diff --git a/include/disas/dis-asm.h b/include/disas/dis-asm.h index 08e1beec854f1a7260f883d902dff379e8f1bbaa..b165453fa188c8ca72977b6593d8595a9fd77093 100644 --- a/include/disas/dis-asm.h +++ b/include/disas/dis-asm.h @@ -191,6 +191,9 @@ enum bfd_architecture #define bfd_mach_alpha_ev4 0x10 #define bfd_mach_alpha_ev5 0x20 #define bfd_mach_alpha_ev6 0x30 + bfd_arch_sw_64, /* Dec Sw_64 */ +#define bfd_mach_sw_64 1 +#define bfd_mach_sw_64_core3 1621 bfd_arch_arm, /* Advanced Risc Machines ARM */ #define bfd_mach_arm_unknown 0 #define bfd_mach_arm_2 1 @@ -333,7 +336,7 @@ typedef struct disassemble_info { Returns an errno value or 0 for success. */ int (*read_memory_func) (bfd_vma memaddr, bfd_byte *myaddr, int length, - struct disassemble_info *info); + struct disassemble_info *info); /* Function which should be called if we get an error that we can't recover from. STATUS is the errno value from read_memory_func and @@ -429,6 +432,7 @@ int print_insn_h8500 (bfd_vma, disassemble_info*); int print_insn_arm_a64 (bfd_vma, disassemble_info*); int print_insn_alpha (bfd_vma, disassemble_info*); disassembler_ftype arc_get_disassembler (int, int); +int print_insn_sw_64 (bfd_vma, disassemble_info*); int print_insn_arm (bfd_vma, disassemble_info*); int print_insn_sparc (bfd_vma, disassemble_info*); int print_insn_big_a29k (bfd_vma, disassemble_info*); @@ -461,6 +465,7 @@ int print_insn_riscv32 (bfd_vma, disassemble_info*); int print_insn_riscv64 (bfd_vma, disassemble_info*); int print_insn_rx(bfd_vma, disassemble_info *); int print_insn_hexagon(bfd_vma, disassemble_info *); +int print_insn_loongarch(bfd_vma, disassemble_info*); #ifdef CONFIG_CAPSTONE bool cap_disas_target(disassemble_info *info, uint64_t pc, size_t size); diff --git a/include/elf.h b/include/elf.h index 811bf4a1cb5cc9e271ac09c65b1554350a929c26..cd7808f37a64a6345e08c24e753dc17615f40060 100644 --- a/include/elf.h +++ b/include/elf.h @@ -182,6 +182,8 @@ typedef struct mips_elf_abiflags_v0 { #define EM_NANOMIPS 249 /* Wave Computing nanoMIPS */ +#define EM_LOONGARCH 258 /* Loongarch */ + /* * This is an interim value that we will use until the committee comes * up with a final number. @@ -207,6 +209,8 @@ typedef struct mips_elf_abiflags_v0 { #define EF_AVR_MACH 0x7F /* Mask for AVR e_flags to get core type */ +#define EM_SW64 0x9916 /* SW64 */ + /* This is the info that is needed to parse the dynamic section of the file */ #define DT_NULL 0 #define DT_NEEDED 1 @@ -1417,6 +1421,48 @@ typedef struct { #define EF_RISCV_RVE 0x0008 #define EF_RISCV_TSO 0x0010 +/* + SW_64 ELF relocation types + */ +#define EM_SW_64 0x9916 +#define R_SW_64_NONE 0 /* No reloc */ +#define R_SW_64_REFLONG 1 /* Direct 32 bit */ +#define R_SW_64_REFQUAD 2 /* Direct 64 bit */ +#define R_SW_64_GPREL32 3 /* GP relative 32 bit */ +#define R_SW_64_LITERAL 4 /* GP relative 16 bit w/optimization */ +#define R_SW_64_LITUSE 5 /* Optimization hint for LITERAL */ +#define R_SW_64_GPDISP 6 /* Add displacement to GP */ +#define R_SW_64_BRADDR 7 /* PC+4 relative 23 bit shifted */ +#define R_SW_64_HINT 8 /* PC+4 relative 16 bit shifted */ +#define R_SW_64_SREL16 9 /* PC relative 16 bit */ +#define R_SW_64_SREL32 10 /* PC relative 32 bit */ +#define R_SW_64_SREL64 11 /* PC relative 64 bit */ +#define R_SW_64_GPRELHIGH 17 /* GP relative 32 bit, high 16 bits */ +#define R_SW_64_GPRELLOW 18 /* GP relative 32 bit, low 16 bits */ +#define R_SW_64_GPREL16 19 /* GP relative 16 bit */ +#define R_SW_64_COPY 24 /* Copy symbol at runtime */ +#define R_SW_64_GLOB_DAT 25 /* Create GOT entry */ +#define R_SW_64_JMP_SLOT 26 /* Create PLT entry */ +#define R_SW_64_RELATIVE 27 /* Adjust by program base */ +#define R_SW_64_TLS_GD_HI 28 +#define R_SW_64_TLSGD 29 +#define R_SW_64_TLS_LDM 30 +#define R_SW_64_DTPMOD64 31 +#define R_SW_64_GOTDTPREL 32 +#define R_SW_64_DTPREL64 33 +#define R_SW_64_DTPRELHI 34 +#define R_SW_64_DTPRELLO 35 +#define R_SW_64_DTPREL16 36 +#define R_SW_64_GOTTPREL 37 +#define R_SW_64_TPREL64 38 +#define R_SW_64_TPRELHI 39 +#define R_SW_64_TPRELLO 40 +#define R_SW_64_TPREL16 41 +/* Keep this the last entry. */ +#define R_SW_64_NUM 46 +/* Legal values for sh_flags field of Elf64_Shdr. */ +#define SHF_SW_64_GPREL 0x10000000 + typedef struct elf32_rel { Elf32_Addr r_offset; Elf32_Word r_info; diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h index 039d422bf4cb40d4bfd5f37296c0b4e3f699f248..2a3050f553a18b671c6524502128681c2696621e 100644 --- a/include/exec/cpu-common.h +++ b/include/exec/cpu-common.h @@ -11,6 +11,7 @@ void qemu_init_cpu_list(void); void cpu_list_lock(void); void cpu_list_unlock(void); +unsigned int cpu_list_generation_id_get(void); void tcg_flush_softmmu_tlb(CPUState *cs); @@ -86,8 +87,6 @@ void *cpu_physical_memory_map(hwaddr addr, bool is_write); void cpu_physical_memory_unmap(void *buffer, hwaddr len, bool is_write, hwaddr access_len); -void cpu_register_map_client(QEMUBH *bh); -void cpu_unregister_map_client(QEMUBH *bh); bool cpu_physical_memory_is_io(hwaddr phys_addr); diff --git a/include/exec/memattrs.h b/include/exec/memattrs.h index 95f2d20d55b8266903eb430084a2f8b7c39759e7..9fb98bc1efd3d10c1346e10fcc121f589b55af53 100644 --- a/include/exec/memattrs.h +++ b/include/exec/memattrs.h @@ -35,6 +35,14 @@ typedef struct MemTxAttrs { unsigned int secure:1; /* Memory access is usermode (unprivileged) */ unsigned int user:1; + /* + * Bus interconnect and peripherals can access anything (memories, + * devices) by default. By setting the 'memory' bit, bus transaction + * are restricted to "normal" memories (per the AMBA documentation) + * versus devices. Access to devices will be logged and rejected + * (see MEMTX_ACCESS_ERROR). + */ + unsigned int memory:1; /* Requester ID (for MSI for example) */ unsigned int requester_id:16; /* Invert endianness for this page */ @@ -66,6 +74,7 @@ typedef struct MemTxAttrs { #define MEMTX_OK 0 #define MEMTX_ERROR (1U << 0) /* device returned an error */ #define MEMTX_DECODE_ERROR (1U << 1) /* nothing at that address */ +#define MEMTX_ACCESS_ERROR (1U << 2) /* access denied */ typedef uint32_t MemTxResult; #endif diff --git a/include/exec/memory.h b/include/exec/memory.h index 20f1b27377eabf12b217d67c848257399dfd1177..2444e0f39d44b2ff4856af385d9e8bee329c304f 100644 --- a/include/exec/memory.h +++ b/include/exec/memory.h @@ -69,7 +69,10 @@ static inline void fuzz_dma_read_cb(size_t addr, /* Dirty tracking enabled because measuring dirty rate */ #define GLOBAL_DIRTY_DIRTY_RATE (1U << 1) -#define GLOBAL_DIRTY_MASK (0x3) +/* Dirty tracking enabled because dirty limit */ +#define GLOBAL_DIRTY_LIMIT (1U << 2) + +#define GLOBAL_DIRTY_MASK (0x7) extern unsigned int global_dirty_tracking; @@ -558,7 +561,7 @@ typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); * A #RamDiscardManager coordinates which parts of specific RAM #MemoryRegion * regions are currently populated to be used/accessed by the VM, notifying * after parts were discarded (freeing up memory) and before parts will be - * populated (consuming memory), to be used/acessed by the VM. + * populated (consuming memory), to be used/accessed by the VM. * * A #RamDiscardManager can only be set for a RAM #MemoryRegion while the * #MemoryRegion isn't mapped yet; it cannot change while the #MemoryRegion is @@ -582,7 +585,7 @@ typedef void (*ReplayRamDiscard)(MemoryRegionSection *section, void *opaque); * Listeners are called in multiples of the minimum granularity (unless it * would exceed the registered range) and changes are aligned to the minimum * granularity within the #MemoryRegion. Listeners have to prepare for memory - * becomming discarded in a different granularity than it was populated and the + * becoming discarded in a different granularity than it was populated and the * other way around. */ struct RamDiscardManagerClass { @@ -734,6 +737,8 @@ struct MemoryRegion { bool is_iommu; RAMBlock *ram_block; Object *owner; + /* owner as TYPE_DEVICE. Used for re-entrancy checks in MR access hotpath */ + DeviceState *dev; const MemoryRegionOps *ops; void *opaque; @@ -757,6 +762,9 @@ struct MemoryRegion { unsigned ioeventfd_nb; MemoryRegionIoeventfd *ioeventfds; RamDiscardManager *rdm; /* Only for RAM */ + + /* For devices designed to perform re-entrant IO into their own IO MRs */ + bool disable_reentrancy_guard; }; struct IOMMUMemoryRegion { @@ -1027,6 +1035,13 @@ struct MemoryListener { QTAILQ_ENTRY(MemoryListener) link_as; }; +typedef struct AddressSpaceMapClient { + QEMUBH *bh; + QLIST_ENTRY(AddressSpaceMapClient) link; +} AddressSpaceMapClient; + +#define DEFAULT_MAX_BOUNCE_BUFFER_SIZE (4096) + /** * struct AddressSpace: describes a mapping of addresses to #MemoryRegion objects */ @@ -1043,6 +1058,14 @@ struct AddressSpace { struct MemoryRegionIoeventfd *ioeventfds; QTAILQ_HEAD(, MemoryListener) listeners; QTAILQ_ENTRY(AddressSpace) address_spaces_link; + + /* Maximum DMA bounce buffer size used for indirect memory map requests */ + size_t max_bounce_buffer_size; + /* Total size of bounce buffers currently allocated, atomically accessed */ + size_t bounce_buffer_size; + /* List of callbacks to invoke when buffers free up */ + QemuMutex map_client_list_lock; + QLIST_HEAD(, AddressSpaceMapClient) map_client_list; }; typedef struct AddressSpaceDispatch AddressSpaceDispatch; @@ -1239,7 +1262,7 @@ void memory_region_init_ram_flags_nomigrate(MemoryRegion *mr, Error **errp); /** - * memory_region_init_resizeable_ram: Initialize memory region with resizeable + * memory_region_init_resizeable_ram: Initialize memory region with resizable * RAM. Accesses into the region will * modify memory directly. Only an initial * portion of this RAM is actually used. @@ -2764,8 +2787,8 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, hwaddr len, * May return %NULL and set *@plen to zero(0), if resources needed to perform * the mapping are exhausted. * Use only for reads OR writes - not for read-modify-write operations. - * Use cpu_register_map_client() to know when retrying the map operation is - * likely to succeed. + * Use address_space_register_map_client() to know when retrying the map + * operation is likely to succeed. * * @as: #AddressSpace to be accessed * @addr: address within that address space @@ -2790,6 +2813,28 @@ void *address_space_map(AddressSpace *as, hwaddr addr, void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len); +/* + * address_space_register_map_client: Register a callback to invoke when + * resources for address_space_map() are available again. + * + * address_space_map may fail when there are not enough resources available, + * such as when bounce buffer memory would exceed the limit. The callback can + * be used to retry the address_space_map operation. Note that the callback + * gets automatically removed after firing. + * + * @as: #AddressSpace to be accessed + * @bh: callback to invoke when address_space_map() retry is appropriate + */ +void address_space_register_map_client(AddressSpace *as, QEMUBH *bh); + +/* + * address_space_unregister_map_client: Unregister a callback that has + * previously been registered and not fired yet. + * + * @as: #AddressSpace to be accessed + * @bh: callback to unregister + */ +void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh); /* Internal functions, part of the implementation of address_space_read. */ MemTxResult address_space_read_full(AddressSpace *as, hwaddr addr, @@ -2906,6 +2951,22 @@ address_space_write_cached(MemoryRegionCache *cache, hwaddr addr, } } +/** + * address_space_set: Fill address space with a constant byte. + * + * Return a MemTxResult indicating whether the operation succeeded + * or failed (eg unassigned memory, device rejected the transaction, + * IOMMU fault). + * + * @as: #AddressSpace to be accessed + * @addr: address within that address space + * @c: constant byte to fill the memory + * @len: the number of bytes to fill with the constant byte + * @attrs: memory transaction attributes + */ +MemTxResult address_space_set(AddressSpace *as, hwaddr addr, + uint8_t c, hwaddr len, MemTxAttrs attrs); + #ifdef NEED_CPU_H /* enum device_endian to MemOp. */ static inline MemOp devend_memop(enum device_endian end) diff --git a/include/hw/acpi/acpi-defs.h b/include/hw/acpi/acpi-defs.h index c97e8633ad8f902992d4be88f108b393c436be92..ab8658322823b0f7550f6f65e1c457bb0d205a58 100644 --- a/include/hw/acpi/acpi-defs.h +++ b/include/hw/acpi/acpi-defs.h @@ -92,4 +92,42 @@ typedef struct AcpiFadtData { #define ACPI_FADT_ARM_PSCI_COMPLIANT (1 << 0) #define ACPI_FADT_ARM_PSCI_USE_HVC (1 << 1) +/* + * CPPC register definition from kernel header + * include/acpi/cppc_acpi.h + * The last element is newly added for easy use + */ +enum cppc_regs { + HIGHEST_PERF, + NOMINAL_PERF, + LOW_NON_LINEAR_PERF, + LOWEST_PERF, + GUARANTEED_PERF, + DESIRED_PERF, + MIN_PERF, + MAX_PERF, + PERF_REDUC_TOLERANCE, + TIME_WINDOW, + CTR_WRAP_TIME, + REFERENCE_CTR, + DELIVERED_CTR, + PERF_LIMITED, + ENABLE, + AUTO_SEL_ENABLE, + AUTO_ACT_WINDOW, + ENERGY_PERF, + REFERENCE_PERF, + LOWEST_FREQ, + NOMINAL_FREQ, + CPPC_REG_COUNT, +}; + +#define CPPC_REG_PER_CPU_STRIDE 0x40 + +/* + * Offset for each CPPC register; -1 for unavailable + * The whole register space is unavailable if desired perf offset is -1. + */ +extern int cppc_regs_offset[CPPC_REG_COUNT]; + #endif diff --git a/include/hw/acpi/acpi_dev_interface.h b/include/hw/acpi/acpi_dev_interface.h index ea6056ab926ffaf30c327032f2361d804729029e..601931433aaaf5e4540c304bec2d6a9cc956f72b 100644 --- a/include/hw/acpi/acpi_dev_interface.h +++ b/include/hw/acpi/acpi_dev_interface.h @@ -5,6 +5,7 @@ #include "qom/object.h" #include "hw/boards.h" #include "hw/qdev-core.h" +#include "hw/acpi/aml-build.h" /* These values are part of guest ABI, and can not be changed */ typedef enum { @@ -55,5 +56,6 @@ struct AcpiDeviceIfClass { void (*madt_cpu)(AcpiDeviceIf *adev, int uid, const CPUArchIdList *apic_ids, GArray *entry, bool force_enabled); + void (*cpu_cppc)(AcpiDeviceIf *adev, int uid, int num_cpu, Aml *dev); }; #endif diff --git a/include/hw/acpi/aml-build.h b/include/hw/acpi/aml-build.h index 8346003a224b027e01996024abaae0582f116ce9..84a72267f9d5f267c5f4550cc934e2025ecaa5aa 100644 --- a/include/hw/acpi/aml-build.h +++ b/include/hw/acpi/aml-build.h @@ -382,6 +382,9 @@ Aml *aml_dma(AmlDmaType typ, AmlDmaBusMaster bm, AmlTransferSize sz, uint8_t channel); Aml *aml_sleep(uint64_t msec); Aml *aml_i2c_serial_bus_device(uint16_t address, const char *resource_source); +Aml *aml_generic_register(AmlRegionSpace rs, uint8_t reg_width, + uint8_t reg_offset, AmlAccessType type, + uint64_t addr); /* Block AML object primitives */ Aml *aml_scope(const char *name_format, ...) GCC_FMT_ATTR(1, 2); @@ -412,6 +415,7 @@ Aml *aml_sizeof(Aml *arg); Aml *aml_concatenate(Aml *source1, Aml *source2, Aml *target); Aml *aml_object_type(Aml *object); +void build_append_byte(GArray *array, uint8_t val); void build_append_int_noprefix(GArray *table, uint64_t value, int size); typedef struct AcpiTable { @@ -489,6 +493,11 @@ void build_srat_memory(GArray *table_data, uint64_t base, void build_slit(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); +void build_processor_hierarchy_node(GArray *tbl, uint32_t flags, + uint32_t parent, uint32_t id, + uint32_t *priv_rsrc, + uint32_t priv_num); + void build_pptt(GArray *table_data, BIOSLinker *linker, MachineState *ms, const char *oem_id, const char *oem_table_id); diff --git a/include/hw/acpi/cpu.h b/include/hw/acpi/cpu.h index 999caaf510600dda467b91dfa77c8ab9c0450068..d521025830904b1e48433ad627f43b484f4b6760 100644 --- a/include/hw/acpi/cpu.h +++ b/include/hw/acpi/cpu.h @@ -17,6 +17,8 @@ #include "hw/acpi/aml-build.h" #include "hw/hotplug.h" +#define ACPI_CPU_HOTPLUG_REG_LEN 12 + typedef struct AcpiCpuStatus { struct CPUState *cpu; uint64_t arch_id; @@ -58,7 +60,8 @@ typedef struct CPUHotplugFeatures { void build_cpus_aml(Aml *table, MachineState *machine, CPUHotplugFeatures opts, hwaddr io_base, const char *res_root, - const char *event_handler_method); + const char *event_handler_method, + AmlRegionSpace rs); void acpi_cpu_ospm_status(CPUHotplugState *cpu_st, ACPIOSTInfoList ***list); diff --git a/include/hw/acpi/generic_event_device.h b/include/hw/acpi/generic_event_device.h index d49217c445fbcc04664db41a87439386f71e190f..95ade521e9c722d6dfc8cb50b990512f0fbfd2bf 100644 --- a/include/hw/acpi/generic_event_device.h +++ b/include/hw/acpi/generic_event_device.h @@ -63,6 +63,7 @@ #include "hw/acpi/memory_hotplug.h" #include "hw/acpi/ghes.h" #include "qom/object.h" +#include "hw/acpi/cpu.h" #define ACPI_POWER_BUTTON_DEVICE "PWRB" @@ -82,8 +83,11 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) /* ACPI_GED_REG_RESET value for reset*/ #define ACPI_GED_RESET_VALUE 0x42 -/* ACPI_GED_REG_SLEEP_CTL.SLP_TYP value for S5 (aka poweroff) */ -#define ACPI_GED_SLP_TYP_S5 0x05 +/* [ACPI 5.0 Chapter 4.8.3.7] Sleep Control and Status Register */ +#define ACPI_GED_SLP_TYP_POS 0x2 /* SLP_TYPx Bit Offset */ +#define ACPI_GED_SLP_TYP_MASK 0x07 /* SLP_TYPx 3-bit mask */ +#define ACPI_GED_SLP_TYP_S5 0x05 /* System _S5 State (Soft Off) */ +#define ACPI_GED_SLP_EN 0x20 /* SLP_EN write-only bit */ #define GED_DEVICE "GED" #define AML_GED_EVT_REG "EREG" @@ -97,6 +101,7 @@ OBJECT_DECLARE_SIMPLE_TYPE(AcpiGedState, ACPI_GED) #define ACPI_GED_MEM_HOTPLUG_EVT 0x1 #define ACPI_GED_PWR_DOWN_EVT 0x2 #define ACPI_GED_NVDIMM_HOTPLUG_EVT 0x4 +#define ACPI_GED_CPU_HOTPLUG_EVT 0x8 typedef struct GEDState { MemoryRegion evt; @@ -108,6 +113,8 @@ struct AcpiGedState { SysBusDevice parent_obj; MemHotplugState memhp_state; MemoryRegion container_memhp; + CPUHotplugState cpuhp_state; + MemoryRegion container_cpuhp; GEDState ged_state; uint32_t ged_event_bitmap; qemu_irq irq; diff --git a/include/hw/acpi/ls7a.h b/include/hw/acpi/ls7a.h new file mode 100644 index 0000000000000000000000000000000000000000..295baa4b5a7b4f57375b639c473b35b994d5d27d --- /dev/null +++ b/include/hw/acpi/ls7a.h @@ -0,0 +1,79 @@ +/* + * QEMU GMCH/LS7A PCI PM Emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_ACPI_LS7A_H +#define HW_ACPI_LS7A_H + +#include "hw/acpi/acpi.h" +#include "hw/acpi/cpu_hotplug.h" +#include "hw/acpi/cpu.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/acpi/acpi_dev_interface.h" +#include "hw/acpi/tco.h" + +#define CPU_HOTPLUG_BASE 0x1e000000 +#define MEMORY_HOTPLUG_BASE 0x1e00000c + +typedef struct LS7APCIPMRegs { + /* + * In ls7a spec says that pm1_cnt register is 32bit width and + * that the upper 16bits are reserved and unused. + * PM1a_CNT_BLK = 2 in FADT so it is defined as uint16_t. + */ + ACPIREGS acpi_regs; + + MemoryRegion iomem; + MemoryRegion iomem_gpe; + MemoryRegion iomem_smi; + MemoryRegion iomem_reset; + + qemu_irq irq; /* SCI */ + + uint32_t pm_io_base; + Notifier powerdown_notifier; + + bool cpu_hotplug_legacy; + AcpiCpuHotplug gpe_cpu; + CPUHotplugState cpuhp_state; + + MemHotplugState acpi_memory_hotplug; + + uint8_t disable_s3; + uint8_t disable_s4; + uint8_t s4_val; +} LS7APCIPMRegs; + +void ls7a_pm_init(LS7APCIPMRegs *ls7a, qemu_irq *sci_irq); + +void ls7a_pm_iospace_update(LS7APCIPMRegs *pm, uint32_t pm_io_base); +extern const VMStateDescription vmstate_ls7a_pm; + +void ls7a_pm_add_properties(Object *obj, LS7APCIPMRegs *pm, Error **errp); + +void ls7a_pm_device_plug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp); +void ls7a_pm_device_unplug_request_cb(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp); +void ls7a_pm_device_unplug_cb(HotplugHandler *hotplug_dev, DeviceState *dev, + Error **errp); + +void ls7a_pm_ospm_status(AcpiDeviceIf *adev, ACPIOSTInfoList ***list); + +void ls7a_send_gpe(AcpiDeviceIf *adev, AcpiEventStatusBits ev); +#endif /* HW_ACPI_LS7A_H */ diff --git a/include/hw/arm/aspeed_soc.h b/include/hw/arm/aspeed_soc.h index 8139358549df70afdbdfb36dcc90d1917773d7da..18fb7eed4612eb2b3082fb6a6310dc0db67fa5b0 100644 --- a/include/hw/arm/aspeed_soc.h +++ b/include/hw/arm/aspeed_soc.h @@ -139,6 +139,8 @@ enum { ASPEED_DEV_EMMC, ASPEED_DEV_KCS, ASPEED_DEV_HACE, + ASPEED_DEV_DPMCU, + ASPEED_DEV_DP, }; #endif /* ASPEED_SOC_H */ diff --git a/include/hw/arm/boot.h b/include/hw/arm/boot.h index ce2b48b88bca591e3747c25bba7330aa17304485..36aa5dd5c67645d7386ba3cc0dfd35a2a36eb6be 100644 --- a/include/hw/arm/boot.h +++ b/include/hw/arm/boot.h @@ -36,6 +36,7 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size); /* arm_boot.c */ struct arm_boot_info { uint64_t ram_size; + void *numa_info; const char *kernel_filename; const char *kernel_cmdline; const char *initrd_filename; @@ -119,6 +120,9 @@ struct arm_boot_info { arm_endianness endianness; }; +void cpu_hotplug_register_reset(int ncpu); +void cpu_hotplug_reset_manually(int ncpu); + /** * arm_load_kernel - Loads memory with everything needed to boot * diff --git a/include/hw/arm/topology.h b/include/hw/arm/topology.h new file mode 100644 index 0000000000000000000000000000000000000000..33e4a0d55265cbaa26a7bb6bbb636e59e28b389f --- /dev/null +++ b/include/hw/arm/topology.h @@ -0,0 +1,68 @@ +/* + * ARM CPU topology data structures and functions + * + * Copyright (c) 2020 HUAWEI TECHNOLOGIES CO.,LTD. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_ARM_TOPOLOGY_H +#define HW_ARM_TOPOLOGY_H + +typedef struct ARMCPUTopoInfo { + unsigned pkg_id; + unsigned cluster_id; + unsigned core_id; + unsigned smt_id; +} ARMCPUTopoInfo; + +/* Calculate (contiguous) CPU index based on topology */ +static inline unsigned idx_from_topo_ids(unsigned nr_clusters, + unsigned nr_cores, + unsigned nr_threads, + const ARMCPUTopoInfo *topo) +{ + assert(nr_clusters > 0); + assert(nr_cores > 0); + assert(nr_threads > 0); + assert(topo != NULL); + + return topo->pkg_id * nr_clusters * nr_cores * nr_threads + + topo->cluster_id * nr_cores * nr_threads + + topo->core_id * nr_threads + + topo->smt_id; +} + +/* Calculate thread/core/cluster/package topology + * based on (contiguous) CPU index + */ +static inline void topo_ids_from_idx(unsigned cpu_index, + unsigned nr_clusters, + unsigned nr_cores, + unsigned nr_threads, + ARMCPUTopoInfo *topo) +{ + assert(nr_clusters > 0); + assert(nr_cores > 0); + assert(nr_threads > 0); + assert(topo != NULL); + + topo->smt_id = cpu_index % nr_threads; + topo->core_id = cpu_index / nr_threads % nr_cores; + topo->cluster_id = cpu_index / nr_threads / nr_cores % nr_clusters; + topo->pkg_id = cpu_index / nr_threads / nr_cores / nr_clusters; +} + +#endif /* HW_ARM_TOPOLOGY_H */ + diff --git a/include/hw/arm/virt.h b/include/hw/arm/virt.h index dc6b66ffc8f8ceffcb1d83dc9909583e0a500828..7b99e84377401bfc7162309d52f91b34d623274b 100644 --- a/include/hw/arm/virt.h +++ b/include/hw/arm/virt.h @@ -38,6 +38,7 @@ #include "sysemu/kvm.h" #include "hw/intc/arm_gicv3_common.h" #include "qom/object.h" +#include "hw/acpi/acpi_dev_interface.h" #define NUM_GICV2M_SPIS 64 #define NUM_VIRTIO_TRANSPORTS 32 @@ -57,6 +58,65 @@ /* See Linux kernel arch/arm64/include/asm/pvclock-abi.h */ #define PVTIME_SIZE_PER_CPU 64 +/* ARM CLIDR_EL1 related definitions */ +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */ +#define CTYPE_NONE 0b000 +#define CTYPE_INS 0b001 +#define CTYPE_DATA 0b010 +#define CTYPE_INS_DATA 0b011 +#define CTYPE_UNIFIED 0b100 + +#define ARM64_REG_CLIDR_EL1 ARM64_SYS_REG(3, 1, 0, 0, 1) + +#define CLIDR_CTYPE_SHIFT(level) (3 * (level - 1)) +#define CLIDR_CTYPE_MASK(level) (7 << CLIDR_CTYPE_SHIFT(level)) +#define CLIDR_CTYPE(clidr, level) \ + (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level)) + +/* L1 data cache */ +#define ARM_L1DCACHE_SIZE 65536 +#define ARM_L1DCACHE_SETS 256 +#define ARM_L1DCACHE_ASSOCIATIVITY 4 +#define ARM_L1DCACHE_ATTRIBUTES 2 +#define ARM_L1DCACHE_LINE_SIZE 64 + +/* L1 instruction cache */ +#define ARM_L1ICACHE_SIZE 65536 +#define ARM_L1ICACHE_SETS 256 +#define ARM_L1ICACHE_ASSOCIATIVITY 4 +#define ARM_L1ICACHE_ATTRIBUTES 4 +#define ARM_L1ICACHE_LINE_SIZE 64 + +/* L1 unified cache */ +#define ARM_L1CACHE_SIZE 131072 +#define ARM_L1CACHE_SETS 256 +#define ARM_L1CACHE_ASSOCIATIVITY 4 +#define ARM_L1CACHE_ATTRIBUTES 10 +#define ARM_L1CACHE_LINE_SIZE 128 + +/* L2 unified cache */ +#define ARM_L2CACHE_SIZE 524288 +#define ARM_L2CACHE_SETS 1024 +#define ARM_L2CACHE_ASSOCIATIVITY 8 +#define ARM_L2CACHE_ATTRIBUTES 10 +#define ARM_L2CACHE_LINE_SIZE 64 + +/* L3 unified cache */ +#define ARM_L3CACHE_SIZE 33554432 +#define ARM_L3CACHE_SETS 2048 +#define ARM_L3CACHE_ASSOCIATIVITY 15 +#define ARM_L3CACHE_ATTRIBUTES 10 +#define ARM_L3CACHE_LINE_SIZE 128 + +/* Definitions of the hardcoded cache info */ +typedef enum { + ARM_L1D_CACHE, + ARM_L1I_CACHE, + ARM_L1_CACHE, + ARM_L2_CACHE, + ARM_L3_CACHE +} ArmCacheType; + enum { VIRT_FLASH, VIRT_MEM, @@ -70,6 +130,7 @@ enum { VIRT_GIC_REDIST, VIRT_SMMU, VIRT_UART, + VIRT_CPUFREQ, VIRT_MMIO, VIRT_RTC, VIRT_FW_CFG, @@ -86,6 +147,7 @@ enum { VIRT_ACPI_GED, VIRT_NVDIMM_ACPI, VIRT_PVTIME, + VIRT_CPU_ACPI, VIRT_LOWMEMMAP_LAST, }; @@ -146,6 +208,7 @@ struct VirtMachineState { bool its; bool tcg_its; bool virt; + bool cpu_hotplug_enabled; bool ras; bool mte; OnOffAuto acpi; @@ -164,6 +227,7 @@ struct VirtMachineState { uint32_t msi_phandle; uint32_t iommu_phandle; int psci_conduit; + uint32_t boot_cpus; hwaddr highest_gpa; DeviceState *gic; DeviceState *acpi_dev; @@ -171,6 +235,7 @@ struct VirtMachineState { PCIBus *bus; char *oem_id; char *oem_table_id; + char *kvm_type; }; #define VIRT_ECAM_ID(high) (high ? VIRT_HIGH_PCIE_ECAM : VIRT_PCIE_ECAM) @@ -180,6 +245,12 @@ OBJECT_DECLARE_TYPE(VirtMachineState, VirtMachineClass, VIRT_MACHINE) void virt_acpi_setup(VirtMachineState *vms); bool virt_is_acpi_enabled(VirtMachineState *vms); +void virt_madt_cpu_entry(AcpiDeviceIf *adev, int uid, + const CPUArchIdList *cpu_list, GArray *entry, + bool force_enabled); +void virt_acpi_dsdt_cpu_cppc(AcpiDeviceIf *adev, int uid, + int num_cpu, Aml *dev); +bool cpu_l1_cache_unified(int cpu); /* Return the number of used redistributor regions */ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms) @@ -188,8 +259,9 @@ static inline int virt_gicv3_redist_region_count(VirtMachineState *vms) vms->memmap[VIRT_GIC_REDIST].size / GICV3_REDIST_SIZE; assert(vms->gic_version == VIRT_GIC_VERSION_3); + GICv3State *s = ARM_GICV3_COMMON(vms->gic); - return MACHINE(vms)->smp.cpus > redist0_capacity ? 2 : 1; + return s->num_cpu > redist0_capacity ? 2 : 1; } #endif /* QEMU_ARM_VIRT_H */ diff --git a/include/hw/block/block.h b/include/hw/block/block.h index 5902c0440a5f26f78d6f09453ef929986eca0330..282929e8f015e1bc36b3ab43c4513aae934ed04c 100644 --- a/include/hw/block/block.h +++ b/include/hw/block/block.h @@ -33,6 +33,8 @@ typedef struct BlockConf { bool share_rw; BlockdevOnError rerror; BlockdevOnError werror; + int64_t retry_interval; + int64_t retry_timeout; } BlockConf; static inline unsigned int get_physical_block_exp(BlockConf *conf) @@ -79,7 +81,11 @@ static inline unsigned int get_physical_block_exp(BlockConf *conf) DEFINE_PROP_BLOCKDEV_ON_ERROR("rerror", _state, _conf.rerror, \ BLOCKDEV_ON_ERROR_AUTO), \ DEFINE_PROP_BLOCKDEV_ON_ERROR("werror", _state, _conf.werror, \ - BLOCKDEV_ON_ERROR_AUTO) + BLOCKDEV_ON_ERROR_AUTO), \ + DEFINE_PROP_BLOCKDEV_RETRY_INTERVAL("retry_interval", _state, \ + _conf.retry_interval, 1000), \ + DEFINE_PROP_BLOCKDEV_RETRY_TIMEOUT("retry_timeout", _state, \ + _conf.retry_timeout, 0) /* Backend access helpers */ diff --git a/include/hw/boards.h b/include/hw/boards.h index 9c1c1901046c2ee7c8480551430872966907227d..f7ba05c56adfacf8f6c01c1b6bc4d94494ace4be 100644 --- a/include/hw/boards.h +++ b/include/hw/boards.h @@ -34,7 +34,15 @@ HotpluggableCPUList *machine_query_hotpluggable_cpus(MachineState *machine); void machine_set_cpu_numa_node(MachineState *machine, const CpuInstanceProperties *props, Error **errp); -void smp_parse(MachineState *ms, SMPConfiguration *config, Error **errp); +void machine_parse_smp_config(MachineState *ms, + const SMPConfiguration *config, Error **errp); +bool machine_parse_smp_cache(MachineState *ms, + const SmpCachePropertiesList *caches, + Error **errp); +uint64_t machine_get_cache_size(const MachineState *ms, + CacheLevelAndType cache); +void machine_set_cache_size(MachineState *ms, CacheLevelAndType cache, + uint64_t size); /** * machine_class_allow_dynamic_sysbus_dev: Add type to list of valid devices @@ -128,10 +136,12 @@ typedef struct { * SMPCompatProps: * @prefer_sockets - whether sockets are preferred over cores in smp parsing * @dies_supported - whether dies are supported by the machine + * @clusters_supported - whether clusters are supported by the machine */ typedef struct { bool prefer_sockets; bool dies_supported; + bool clusters_supported; } SMPCompatProps; /** @@ -298,7 +308,8 @@ typedef struct DeviceMemoryState { * @cpus: the number of present logical processors on the machine * @sockets: the number of sockets on the machine * @dies: the number of dies in one socket - * @cores: the number of cores in one die + * @clusters: the number of clusters in one die + * @cores: the number of cores in one cluster * @threads: the number of threads in one core * @max_cpus: the maximum number of logical processors on the machine */ @@ -306,11 +317,16 @@ typedef struct CpuTopology { unsigned int cpus; unsigned int sockets; unsigned int dies; + unsigned int clusters; unsigned int cores; unsigned int threads; unsigned int max_cpus; } CpuTopology; +typedef struct SmpCache { + SmpCacheProperties props[CACHE_LEVEL_AND_TYPE__MAX]; +} SmpCache; + /** * MachineState: */ @@ -354,6 +370,7 @@ struct MachineState { AccelState *accelerator; CPUArchIdList *possible_cpus; CpuTopology smp; + SmpCache smp_cache; struct NVDIMMState *nvdimms_state; struct NumaState *numa_state; }; diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h index e948e81f1a972898790f52ce274398fe591e1d55..9631c1e2f6da55ce44eddb2165c9c55ac7504632 100644 --- a/include/hw/core/cpu.h +++ b/include/hw/core/cpu.h @@ -411,6 +411,12 @@ struct CPUState { */ bool throttle_thread_scheduled; + /* + * Sleep throttle_us_per_full microseconds once dirty ring is full + * if dirty page rate limit is enabled. + */ + int64_t throttle_us_per_full; + bool ignore_memory_transaction_failures; struct hax_vcpu_state *hax_vcpu; diff --git a/include/hw/elf_ops.h b/include/hw/elf_ops.h index 995de8495c2777de60cec421756a1d4c545130a9..ea17fe9fb5d50b3dd84dd325f8a0c1432b732639 100644 --- a/include/hw/elf_ops.h +++ b/include/hw/elf_ops.h @@ -499,7 +499,7 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd, } if (data_swab) { - int j; + elf_word j; for (j = 0; j < file_size; j += (1 << data_swab)) { uint8_t *dp = data + j; switch (data_swab) { @@ -555,6 +555,19 @@ static ssize_t glue(load_elf, SZ)(const char *name, int fd, if (res != MEMTX_OK) { goto fail; } + /* + * We need to zero'ify the space that is not copied + * from file + */ + if (file_size < mem_size) { + res = address_space_set(as ? as : &address_space_memory, + addr + file_size, 0, + mem_size - file_size, + MEMTXATTRS_UNSPECIFIED); + if (res != MEMTX_OK) { + goto fail; + } + } } } diff --git a/include/hw/firmware/smbios.h b/include/hw/firmware/smbios.h index 5a0dd0c8cffff8524bdf5b9d0ba67cc955d36400..5a696cf75a5a837b3a81b4527f59196ca54439f4 100644 --- a/include/hw/firmware/smbios.h +++ b/include/hw/firmware/smbios.h @@ -193,6 +193,9 @@ struct smbios_type_4 { uint8_t thread_count; uint16_t processor_characteristics; uint16_t processor_family2; + uint16_t corecount2; + uint16_t enabledcorecount2; + uint16_t threadcount2; } QEMU_PACKED; /* SMBIOS type 11 - OEM strings */ diff --git a/include/hw/i386/sgx-epc.h b/include/hw/i386/sgx-epc.h index a6a65be854f88db0e0c7b5d565a0d07f722f3d11..581fac389a630d66bc173d389e51bdf713490cae 100644 --- a/include/hw/i386/sgx-epc.h +++ b/include/hw/i386/sgx-epc.h @@ -25,6 +25,7 @@ #define SGX_EPC_ADDR_PROP "addr" #define SGX_EPC_SIZE_PROP "size" #define SGX_EPC_MEMDEV_PROP "memdev" +#define SGX_EPC_NUMA_NODE_PROP "node" /** * SGXEPCDevice: @@ -38,6 +39,7 @@ typedef struct SGXEPCDevice { /* public */ uint64_t addr; + uint32_t node; HostMemoryBackendEpc *hostmem; } SGXEPCDevice; @@ -56,6 +58,7 @@ typedef struct SGXEPCState { } SGXEPCState; bool sgx_epc_get_section(int section_nr, uint64_t *addr, uint64_t *size); +void sgx_epc_build_srat(GArray *table_data); static inline uint64_t sgx_epc_above_4g_end(SGXEPCState *sgx_epc) { diff --git a/include/hw/i386/topology.h b/include/hw/i386/topology.h index 81573f6cfde011a477aaab67030ede3a7b8cf5a1..5a19679f618b0b875ebcd0670b59a32b1d0a84bf 100644 --- a/include/hw/i386/topology.h +++ b/include/hw/i386/topology.h @@ -24,7 +24,8 @@ #ifndef HW_I386_TOPOLOGY_H #define HW_I386_TOPOLOGY_H -/* This file implements the APIC-ID-based CPU topology enumeration logic, +/* + * This file implements the APIC-ID-based CPU topology enumeration logic, * documented at the following document: * Intel® 64 Architecture Processor Topology Enumeration * http://software.intel.com/en-us/articles/intel-64-architecture-processor-topology-enumeration/ @@ -41,7 +42,8 @@ #include "qemu/bitops.h" -/* APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support +/* + * APIC IDs can be 32-bit, but beware: APIC IDs > 255 require x2APIC support */ typedef uint32_t apic_id_t; @@ -58,8 +60,7 @@ typedef struct X86CPUTopoInfo { unsigned threads_per_core; } X86CPUTopoInfo; -/* Return the bit width needed for 'count' IDs - */ +/* Return the bit width needed for 'count' IDs */ static unsigned apicid_bitwidth_for_count(unsigned count) { g_assert(count >= 1); @@ -67,15 +68,13 @@ static unsigned apicid_bitwidth_for_count(unsigned count) return count ? 32 - clz32(count) : 0; } -/* Bit width of the SMT_ID (thread ID) field on the APIC ID - */ +/* Bit width of the SMT_ID (thread ID) field on the APIC ID */ static inline unsigned apicid_smt_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->threads_per_core); } -/* Bit width of the Core_ID field - */ +/* Bit width of the Core_ID field */ static inline unsigned apicid_core_width(X86CPUTopoInfo *topo_info) { return apicid_bitwidth_for_count(topo_info->cores_per_die); @@ -87,8 +86,7 @@ static inline unsigned apicid_die_width(X86CPUTopoInfo *topo_info) return apicid_bitwidth_for_count(topo_info->dies_per_pkg); } -/* Bit offset of the Core_ID field - */ +/* Bit offset of the Core_ID field */ static inline unsigned apicid_core_offset(X86CPUTopoInfo *topo_info) { return apicid_smt_width(topo_info); @@ -100,14 +98,14 @@ static inline unsigned apicid_die_offset(X86CPUTopoInfo *topo_info) return apicid_core_offset(topo_info) + apicid_core_width(topo_info); } -/* Bit offset of the Pkg_ID (socket ID) field - */ +/* Bit offset of the Pkg_ID (socket ID) field */ static inline unsigned apicid_pkg_offset(X86CPUTopoInfo *topo_info) { return apicid_die_offset(topo_info) + apicid_die_width(topo_info); } -/* Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID +/* + * Make APIC ID for the CPU based on Pkg_ID, Core_ID, SMT_ID * * The caller must make sure core_id < nr_cores and smt_id < nr_threads. */ @@ -120,7 +118,8 @@ static inline apic_id_t x86_apicid_from_topo_ids(X86CPUTopoInfo *topo_info, topo_ids->smt_id; } -/* Calculate thread/core/package IDs for a specific topology, +/* + * Calculate thread/core/package IDs for a specific topology, * based on (contiguous) CPU index */ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, @@ -137,7 +136,8 @@ static inline void x86_topo_ids_from_idx(X86CPUTopoInfo *topo_info, topo_ids->smt_id = cpu_index % nr_threads; } -/* Calculate thread/core/package IDs for a specific topology, +/* + * Calculate thread/core/package IDs for a specific topology, * based on APIC ID */ static inline void x86_topo_ids_from_apicid(apic_id_t apicid, @@ -155,7 +155,8 @@ static inline void x86_topo_ids_from_apicid(apic_id_t apicid, topo_ids->pkg_id = apicid >> apicid_pkg_offset(topo_info); } -/* Make APIC ID for the CPU 'cpu_index' +/* + * Make APIC ID for the CPU 'cpu_index' * * 'cpu_index' is a sequential, contiguous ID for the CPU. */ diff --git a/include/hw/intc/arm_gicv3.h b/include/hw/intc/arm_gicv3.h index a81a6ae7ecad6d1251738842e0ea67e94495eb27..e360556bd5031ee77ab7b1ff23e9041d4ce579a8 100644 --- a/include/hw/intc/arm_gicv3.h +++ b/include/hw/intc/arm_gicv3.h @@ -26,6 +26,8 @@ struct ARMGICv3Class { ARMGICv3CommonClass parent_class; /*< public >*/ + CPUHotplugRealize parent_cpu_hotplug_realize; + DeviceRealize parent_realize; }; diff --git a/include/hw/intc/arm_gicv3_common.h b/include/hw/intc/arm_gicv3_common.h index fc38e4b7dca4ac7f4a5e0377a5cc300e9a13feaf..c208a191ff26a94e312cf633007b5275ed5a3e14 100644 --- a/include/hw/intc/arm_gicv3_common.h +++ b/include/hw/intc/arm_gicv3_common.h @@ -306,11 +306,15 @@ typedef struct ARMGICv3CommonClass ARMGICv3CommonClass; DECLARE_OBJ_CHECKERS(GICv3State, ARMGICv3CommonClass, ARM_GICV3_COMMON, TYPE_ARM_GICV3_COMMON) +typedef void (*CPUHotplugRealize)(GICv3State *s, int ncpu); + struct ARMGICv3CommonClass { /*< private >*/ SysBusDeviceClass parent_class; /*< public >*/ + CPUHotplugRealize cpu_hotplug_realize; + void (*pre_save)(GICv3State *s); void (*post_load)(GICv3State *s); }; diff --git a/include/hw/loongarch/bios.h b/include/hw/loongarch/bios.h new file mode 100644 index 0000000000000000000000000000000000000000..8e0f6c7d6455d099a0440dbd4aaaa6ff8f0d3c80 --- /dev/null +++ b/include/hw/loongarch/bios.h @@ -0,0 +1,24 @@ +/* + * bios on Loongarch system. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/units.h" +#include "cpu.h" + +#define BIOS_SIZE (4 * MiB) +#define BIOS_FILENAME "loongarch_bios.bin" diff --git a/include/hw/loongarch/cpudevs.h b/include/hw/loongarch/cpudevs.h new file mode 100644 index 0000000000000000000000000000000000000000..ea4007f8fab2343ebc288888ade6d38d9d5ba2f2 --- /dev/null +++ b/include/hw/loongarch/cpudevs.h @@ -0,0 +1,71 @@ +/* + * cpu device emulation on Loongarch system. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_LOONGARCH_CPUDEVS_H +#define HW_LOONGARCH_CPUDEVS_H + +#include "target/loongarch64/cpu-qom.h" + +/* Definitions for LOONGARCH CPU internal devices. */ +#define MAX_GIPI_CORE_NUM 256 +#define MAX_GIPI_MBX_NUM 4 + +#define LS3A_INTC_IP 8 +#define MAX_CORES 256 +#define EXTIOI_IRQS (256) +#define EXTIOI_IRQS_BITMAP_SIZE (256 / 8) +/* map to ipnum per 32 irqs */ +#define EXTIOI_IRQS_IPMAP_SIZE (256 / 32) + +typedef struct gipi_core { + uint32_t status; + uint32_t en; + uint32_t set; + uint32_t clear; + uint64_t buf[MAX_GIPI_MBX_NUM]; + qemu_irq irq; +} gipi_core; + +typedef struct gipiState { + gipi_core core[MAX_GIPI_CORE_NUM]; +} gipiState; + +typedef struct apicState { + /* hardware state */ + uint8_t ext_en[EXTIOI_IRQS_BITMAP_SIZE]; + uint8_t ext_bounce[EXTIOI_IRQS_BITMAP_SIZE]; + uint8_t ext_isr[EXTIOI_IRQS_BITMAP_SIZE]; + uint8_t ext_coreisr[MAX_CORES][EXTIOI_IRQS_BITMAP_SIZE]; + uint8_t ext_ipmap[EXTIOI_IRQS_IPMAP_SIZE]; + uint8_t ext_coremap[EXTIOI_IRQS]; + uint16_t ext_nodetype[16]; + uint64_t ext_control; + + /* software state */ + uint8_t ext_sw_ipmap[EXTIOI_IRQS]; + uint8_t ext_sw_coremap[EXTIOI_IRQS]; + uint8_t ext_ipisr[MAX_CORES * LS3A_INTC_IP][EXTIOI_IRQS_BITMAP_SIZE]; + + qemu_irq parent_irq[MAX_CORES][LS3A_INTC_IP]; + qemu_irq *irq; +} apicState; + +void cpu_init_irq(LOONGARCHCPU *cpu); +void cpu_loongarch_clock_init(LOONGARCHCPU *cpu); +#endif diff --git a/include/hw/loongarch/larch.h b/include/hw/loongarch/larch.h new file mode 100644 index 0000000000000000000000000000000000000000..a401892844a4f20a8e8f79f45733f34c8318d797 --- /dev/null +++ b/include/hw/loongarch/larch.h @@ -0,0 +1,170 @@ +/* + * Hotplug emulation on Loongarch system. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_LOONGARCH_H +#define HW_LOONGARCH_H + +#include "target/loongarch64/cpu.h" +#include "qemu-common.h" +#include "exec/memory.h" +#include "hw/mem/pc-dimm.h" +#include "hw/hotplug.h" +#include "hw/boards.h" +#include "hw/acpi/acpi.h" +#include "qemu/notify.h" +#include "qemu/error-report.h" +#include "qemu/queue.h" +#include "hw/acpi/memory_hotplug.h" +#include "hw/loongarch/cpudevs.h" +#include "hw/block/flash.h" + +#define LOONGARCH_MAX_VCPUS 256 +#define LOONGARCH_MAX_PFLASH 2 +/* 256MB alignment for hotplug memory region */ +#define LOONGARCH_HOTPLUG_MEM_ALIGN (1ULL << 28) +#define LOONGARCH_MAX_RAM_SLOTS 10 + +#ifdef CONFIG_KVM +#define LS_ISA_IO_SIZE 0x02000000 +#else +#define LS_ISA_IO_SIZE 0x00010000 +#endif + +/* Memory types: */ +#define SYSTEM_RAM 1 +#define SYSTEM_RAM_RESERVED 2 +#define ACPI_TABLE 3 +#define ACPI_NVS 4 +#define SYSTEM_PMEM 5 + +#define MAX_MEM_MAP 128 + +typedef struct LoongarchMachineClass { + /*< private >*/ + MachineClass parent_class; + + /* Methods: */ + HotplugHandler *(*get_hotplug_handler)(MachineState *machine, + DeviceState *dev); + + bool has_acpi_build; + + /* save different cpu address*/ + uint64_t isa_io_base; + uint64_t ht_control_regs_base; + uint64_t hpet_mmio_addr; + uint64_t smbus_cfg_base; + uint64_t pciecfg_base; + uint64_t ls7a_ioapic_reg_base; + uint32_t node_shift; + char cpu_name[40]; + char bridge_name[16]; + +} LoongarchMachineClass; + +typedef struct ResetData { + LOONGARCHCPU *cpu; + uint64_t vector; +} ResetData; + +typedef struct LoongarchMachineState { + /*< private >*/ + MachineState parent_obj; + + /* */ + ram_addr_t hotplug_memory_size; + + /* State for other subsystems/APIs: */ + Notifier machine_done; + /* Pointers to devices and objects: */ + HotplugHandler *acpi_dev; + int ram_slots; + ResetData *reset_info[LOONGARCH_MAX_VCPUS]; + DeviceState *rtc; + gipiState *gipi; + apicState *apic; + + FWCfgState *fw_cfg; + bool acpi_build_enabled; + bool apic_xrupt_override; + CPUArchIdList *possible_cpus; + PFlashCFI01 *flash[LOONGARCH_MAX_PFLASH]; + void *fdt; + int fdt_size; + unsigned int hotpluged_cpu_num; + DeviceState *platform_bus_dev; + OnOffAuto acpi; + char *oem_id; + char *oem_table_id; +} LoongarchMachineState; + +#define LOONGARCH_MACHINE_ACPI_DEVICE_PROP "loongarch-acpi-device" +#define TYPE_LOONGARCH_MACHINE "loongarch-machine" + +#define LoongarchMACHINE(obj) \ + OBJECT_CHECK(LoongarchMachineState, (obj), TYPE_LOONGARCH_MACHINE) +#define LoongarchMACHINE_GET_CLASS(obj) \ + OBJECT_GET_CLASS(LoongarchMachineClass, (obj), TYPE_LOONGARCH_MACHINE) +#define LoongarchMACHINE_CLASS(klass) \ + OBJECT_CLASS_CHECK(LoongarchMachineClass, (klass), TYPE_LOONGARCH_MACHINE) + +#define DEFINE_LOONGARCH_MACHINE(suffix, namestr, initfn, optsfn) \ + static void loongarch_machine_##suffix##_class_init(ObjectClass *oc, \ + void *data) \ + { \ + MachineClass *mc = MACHINE_CLASS(oc); \ + optsfn(mc); \ + mc->init = initfn; \ + } \ + static const TypeInfo loongarch_machine_type_##suffix = { \ + .name = namestr TYPE_MACHINE_SUFFIX, \ + .parent = TYPE_LOONGARCH_MACHINE, \ + .class_init = loongarch_machine_##suffix##_class_init, \ + }; \ + static void loongarch_machine_init_##suffix(void) \ + { \ + type_register(&loongarch_machine_type_##suffix); \ + } \ + type_init(loongarch_machine_init_##suffix) + +void loongarch_machine_device_unplug_request(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp); +void longson_machine_device_unplug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp); +HotplugHandler *loongarch_get_hotpug_handler(MachineState *machine, + DeviceState *dev); +void loongarch_machine_device_pre_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp); +void loongarch_machine_device_plug(HotplugHandler *hotplug_dev, + DeviceState *dev, Error **errp); + +LOONGARCHCPU *loongarch_cpu_create(MachineState *machine, LOONGARCHCPU *cpu, + Error **errp); +void loongarch_cpu_destroy(MachineState *machine, LOONGARCHCPU *cpu); +int cpu_init_ipi(LoongarchMachineState *ms, qemu_irq parent, int cpu); +int cpu_init_apic(LoongarchMachineState *ms, CPULOONGARCHState *env, int cpu); +int la_memmap_add_entry(uint64_t address, uint64_t length, uint32_t type); +bool loongarch_is_acpi_enabled(LoongarchMachineState *vms); + +/* acpi-build.c */ +void ls7a_madt_cpu_entry(AcpiDeviceIf *adev, int uid, + const CPUArchIdList *apic_ids, GArray *entry, + bool force_enabled); +void slave_cpu_reset(void *opaque); +#endif diff --git a/include/hw/loongarch/ls7a.h b/include/hw/loongarch/ls7a.h new file mode 100644 index 0000000000000000000000000000000000000000..31165cb8779b7dfdf5c4981e603681dd44e83324 --- /dev/null +++ b/include/hw/loongarch/ls7a.h @@ -0,0 +1,169 @@ +/* + * Acpi emulation on Loongarch system. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_LS7A_H +#define HW_LS7A_H + +#include "hw/hw.h" +#include "hw/isa/isa.h" +#include "hw/sysbus.h" +#include "hw/isa/apm.h" +#include "hw/pci/pci.h" +#include "hw/pci/pcie_host.h" +#include "hw/pci/pci_bridge.h" +#include "hw/acpi/acpi.h" +#include "hw/acpi/ls7a.h" +#include "hw/pci/pci_bus.h" + +/* LS7A PCH Registers (Misc, Confreg) */ +#define LS7A_PCH_REG_BASE 0x10000000UL +#define LS3A5K_LS7A_IOAPIC_REG_BASE (LS7A_PCH_REG_BASE) +#define LS7A_MISC_REG_BASE (LS7A_PCH_REG_BASE + 0x00080000) +#define LS7A_ACPI_REG_BASE (LS7A_MISC_REG_BASE + 0x00050000) + +#define LOONGARCH_PCH_IRQ_BASE 64 +#define LS7A_UART_IRQ (LOONGARCH_PCH_IRQ_BASE + 2) +#define LS7A_RTC_IRQ (LOONGARCH_PCH_IRQ_BASE + 3) +#define LS7A_SCI_IRQ (LOONGARCH_PCH_IRQ_BASE + 4) +#define LS7A_ACPI_IO_BASE 0x800 +#define LS7A_ACPI_IO_SIZE 0x100 +#define LS7A_PM_EVT_BLK (0x0C) /* 4 bytes */ +#define LS7A_PM_CNT_BLK (0x14) /* 2 bytes */ +#define LS7A_GPE0_STS_REG (0x28) /* 4 bytes */ +#define LS7A_GPE0_ENA_REG (0x2C) /* 4 bytes */ +#define LS7A_GPE0_RESET_REG (0x30) /* 4 bytes */ +#define LS7A_PM_TMR_BLK (0x18) /* 4 bytes */ +#define LS7A_GPE0_LEN (8) +#define LS7A_RTC_REG_BASE (LS7A_MISC_REG_BASE + 0x00050100) +#define LS7A_RTC_LEN (0x100) + +#define ACPI_IO_BASE (LS7A_ACPI_REG_BASE) +#define ACPI_GPE0_LEN (LS7A_GPE0_LEN) +#define ACPI_IO_SIZE (LS7A_ACPI_IO_SIZE) +#define ACPI_SCI_IRQ (LS7A_SCI_IRQ) + +#define VIRT_PLATFORM_BUS_BASEADDRESS 0x16000000 +#define VIRT_PLATFORM_BUS_SIZE 0x02000000 +#define VIRT_PLATFORM_BUS_NUM_IRQS 2 +#define VIRT_PLATFORM_BUS_IRQ (LOONGARCH_PCH_IRQ_BASE + 5) + +#define LS3A5K_ISA_IO_BASE 0x18000000UL +#define LS_BIOS_BASE 0x1c000000 +#define LS_BIOS_VAR_BASE 0x1c3a0000 +#define LS_BIOS_SIZE (4 * 1024 * 1024) +#define LS_FDT_BASE 0x1c400000 +#define LS_FDT_SIZE 0x00100000 + +#define FW_CFG_ADDR 0x1e020000 +#define LS7A_REG_BASE 0x1FE00000 +#define LS7A_UART_BASE 0x1fe001e0 +#define LS7A_UART_LEN 0x8 +#define SMP_GIPI_MAILBOX 0x1f000000ULL +#define CORE0_STATUS_OFF 0x000 +#define CORE0_EN_OFF 0x004 +#define CORE0_SET_OFF 0x008 +#define CORE0_CLEAR_OFF 0x00c +#define CORE0_BUF_20 0x020 +#define CORE0_BUF_28 0x028 +#define CORE0_BUF_30 0x030 +#define CORE0_BUF_38 0x038 +#define CORE0_IPI_SEND 0x040 +#define CORE0_MAIL_SEND 0x048 +#define INT_ROUTER_REGS_BASE 0x1fe01400UL +#define INT_ROUTER_REGS_SIZE 0x100 +#define INT_ROUTER_REGS_SYS_INT0 0x00 +#define INT_ROUTER_REGS_SYS_INT1 0x01 +#define INT_ROUTER_REGS_SYS_INT2 0x02 +#define INT_ROUTER_REGS_SYS_INT3 0x03 +#define INT_ROUTER_REGS_PCI_INT0 0x04 +#define INT_ROUTER_REGS_PCI_INT1 0x05 +#define INT_ROUTER_REGS_PCI_INT2 0x06 +#define INT_ROUTER_REGS_PCI_INT3 0x07 +#define INT_ROUTER_REGS_MATRIX_INT0 0x08 +#define INT_ROUTER_REGS_MATRIX_INT1 0x09 +#define INT_ROUTER_REGS_LPC_INT 0x0a +#define INT_ROUTER_REGS_MC0 0x0b +#define INT_ROUTER_REGS_MC1 0x0c +#define INT_ROUTER_REGS_BARRIER 0x0d +#define INT_ROUTER_REGS_THSENS_INT 0x0e +#define INT_ROUTER_REGS_PCI_PERR 0x0f +#define INT_ROUTER_REGS_HT0_INT0 0x10 +#define INT_ROUTER_REGS_HT0_INT1 0x11 +#define INT_ROUTER_REGS_HT0_INT2 0x12 +#define INT_ROUTER_REGS_HT0_INT3 0x13 +#define INT_ROUTER_REGS_HT0_INT4 0x14 +#define INT_ROUTER_REGS_HT0_INT5 0x15 +#define INT_ROUTER_REGS_HT0_INT6 0x16 +#define INT_ROUTER_REGS_HT0_INT7 0x17 +#define INT_ROUTER_REGS_HT1_INT0 0x18 +#define INT_ROUTER_REGS_HT1_INT1 0x19 +#define INT_ROUTER_REGS_HT1_INT2 0x1a +#define INT_ROUTER_REGS_HT1_INT3 0x1b +#define INT_ROUTER_REGS_HT1_INT4 0x1c +#define INT_ROUTER_REGS_HT1_INT5 0x1d +#define INT_ROUTER_REGS_HT1_INT6 0x1e +#define INT_ROUTER_REGS_HT1_INT7 0x1f +#define INT_ROUTER_REGS_ISR 0x20 +#define INT_ROUTER_REGS_EN 0x24 +#define INT_ROUTER_REGS_EN_SET 0x28 +#define INT_ROUTER_REGS_EN_CLR 0x2c +#define INT_ROUTER_REGS_EDGE 0x38 +#define INT_ROUTER_REGS_CORE0_INTISR 0x40 +#define INT_ROUTER_REGS_CORE1_INTISR 0x48 +#define INT_ROUTER_REGS_CORE2_INTISR 0x50 +#define INT_ROUTER_REGS_CORE3_INTISR 0x58 + +#define LS_PCIECFG_BASE 0x20000000 +#define LS_PCIECFG_SIZE 0x08000000 +#define MSI_ADDR_LOW 0x2FF00000 +#define MSI_ADDR_HI 0x0 + +#define PCIE_MEMORY_BASE 0x40000000 +#define PCIE_MEMORY_SIZE 0x40000000 + +typedef struct LS7APCIState LS7APCIState; +typedef struct LS7APCIEHost { + PCIExpressHost parent_obj; + MemoryRegion io_ioport; + MemoryRegion io_mmio; + LS7APCIState *pci_dev; +} LS7APCIEHost; + +struct LS7APCIState { + PCIDevice dev; + + LS7APCIEHost *pciehost; + + /* LS7A registers */ + MemoryRegion iomem; + LS7APCIPMRegs pm; +}; + +#define TYPE_LS7A_PCIE_HOST_BRIDGE "ls7a1000-pciehost" +#define LS7A_PCIE_HOST_BRIDGE(obj) \ + OBJECT_CHECK(LS7APCIEHost, (obj), TYPE_LS7A_PCIE_HOST_BRIDGE) + +#define TYPE_PCIE_LS7A "ls7a1000_pcie" +#define PCIE_LS7A(obj) OBJECT_CHECK(LS7APCIState, (obj), TYPE_PCIE_LS7A) + +PCIBus *ls7a_init(MachineState *machine, qemu_irq *irq, + DeviceState **ls7a_dev); +LS7APCIState *get_ls7a_type(Object *obj); + +#endif /* HW_LS7A_H */ diff --git a/include/hw/loongarch/sysbus-fdt.h b/include/hw/loongarch/sysbus-fdt.h new file mode 100644 index 0000000000000000000000000000000000000000..6bf53097e11e0a338afcc81a5b702b2e4977c101 --- /dev/null +++ b/include/hw/loongarch/sysbus-fdt.h @@ -0,0 +1,33 @@ +/* + * Dynamic sysbus device tree node generation API + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef HW_ARM_SYSBUS_FDT_H +#define HW_ARM_SYSBUS_FDT_H + +#include "exec/hwaddr.h" + +/** + * platform_bus_add_all_fdt_nodes - create all the platform bus nodes + * + * builds the parent platform bus node and all the nodes of dynamic + * sysbus devices attached to it. + */ +void platform_bus_add_all_fdt_nodes(void *fdt, const char *intc, hwaddr addr, + hwaddr bus_size, int irq_start); +#endif diff --git a/include/hw/pci-bridge/xio3130_downstream.h b/include/hw/pci-bridge/xio3130_downstream.h new file mode 100644 index 0000000000000000000000000000000000000000..1d10139aeab2c955e7a3cd93561d49aa3dc7c1a9 --- /dev/null +++ b/include/hw/pci-bridge/xio3130_downstream.h @@ -0,0 +1,15 @@ +/* + * TI X3130 pci express downstream port switch + * + * Copyright (C) 2022 Igor Mammedov + * + * SPDX-License-Identifier: GPL-2.0-or-later + */ + +#ifndef HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H +#define HW_PCI_BRIDGE_XIO3130_DOWNSTREAM_H + +#define TYPE_XIO3130_DOWNSTREAM "xio3130-downstream" + +#endif + diff --git a/include/hw/pci/pci.h b/include/hw/pci/pci.h index e7cdf2d5ec5d4f6212a67bc29e68db753537c65d..3811724d313f50c06cb2fd06103af1c65d4ed6ee 100644 --- a/include/hw/pci/pci.h +++ b/include/hw/pci/pci.h @@ -18,7 +18,7 @@ extern bool pci_available; #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define PCI_FUNC(devfn) ((devfn) & 0x07) -#define PCI_BUILD_BDF(bus, devfn) ((bus << 8) | (devfn)) +#define PCI_BUILD_BDF(bus, devfn) (((bus) << 8) | (devfn)) #define PCI_BUS_MAX 256 #define PCI_DEVFN_MAX 256 #define PCI_SLOT_MAX 32 @@ -361,6 +361,9 @@ struct PCIDevice { /* ID of standby device in net_failover pair */ char *failover_pair_id; uint32_t acpi_index; + + /* Maximum DMA bounce buffer size used for indirect memory map requests */ + uint32_t max_bounce_buffer_size; }; void pci_register_bar(PCIDevice *pci_dev, int region_num, @@ -735,6 +738,11 @@ void lsi53c8xx_handle_legacy_cmdline(DeviceState *lsi_dev); qemu_irq pci_allocate_irq(PCIDevice *pci_dev); void pci_set_irq(PCIDevice *pci_dev, int level); +static inline int pci_intx(PCIDevice *pci_dev) +{ + return pci_get_byte(pci_dev->config + PCI_INTERRUPT_PIN) - 1; +} + static inline void pci_irq_assert(PCIDevice *pci_dev) { pci_set_irq(pci_dev, 1); @@ -745,16 +753,6 @@ static inline void pci_irq_deassert(PCIDevice *pci_dev) pci_set_irq(pci_dev, 0); } -/* - * FIXME: PCI does not work this way. - * All the callers to this method should be fixed. - */ -static inline void pci_irq_pulse(PCIDevice *pci_dev) -{ - pci_irq_assert(pci_dev); - pci_irq_deassert(pci_dev); -} - static inline int pci_is_express(const PCIDevice *d) { return d->cap_present & QEMU_PCI_CAP_EXPRESS; @@ -806,9 +804,10 @@ static inline AddressSpace *pci_get_address_space(PCIDevice *dev) */ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { - return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, dir); + return dma_memory_rw(pci_get_address_space(dev), addr, buf, len, + dir, attrs); } /** @@ -826,7 +825,8 @@ static inline MemTxResult pci_dma_rw(PCIDevice *dev, dma_addr_t addr, static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr, void *buf, dma_addr_t len) { - return pci_dma_rw(dev, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return pci_dma_rw(dev, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, MEMTXATTRS_UNSPECIFIED); } /** @@ -844,19 +844,24 @@ static inline MemTxResult pci_dma_read(PCIDevice *dev, dma_addr_t addr, static inline MemTxResult pci_dma_write(PCIDevice *dev, dma_addr_t addr, const void *buf, dma_addr_t len) { - return pci_dma_rw(dev, addr, (void *) buf, len, DMA_DIRECTION_FROM_DEVICE); -} - -#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \ - static inline uint##_bits##_t ld##_l##_pci_dma(PCIDevice *dev, \ - dma_addr_t addr) \ - { \ - return ld##_l##_dma(pci_get_address_space(dev), addr); \ - } \ - static inline void st##_s##_pci_dma(PCIDevice *dev, \ - dma_addr_t addr, uint##_bits##_t val) \ - { \ - st##_s##_dma(pci_get_address_space(dev), addr, val); \ + return pci_dma_rw(dev, addr, (void *) buf, len, + DMA_DIRECTION_FROM_DEVICE, MEMTXATTRS_UNSPECIFIED); +} + +#define PCI_DMA_DEFINE_LDST(_l, _s, _bits) \ + static inline MemTxResult ld##_l##_pci_dma(PCIDevice *dev, \ + dma_addr_t addr, \ + uint##_bits##_t *val, \ + MemTxAttrs attrs) \ + { \ + return ld##_l##_dma(pci_get_address_space(dev), addr, val, attrs); \ + } \ + static inline MemTxResult st##_s##_pci_dma(PCIDevice *dev, \ + dma_addr_t addr, \ + uint##_bits##_t val, \ + MemTxAttrs attrs) \ + { \ + return st##_s##_dma(pci_get_address_space(dev), addr, val, attrs); \ } PCI_DMA_DEFINE_LDST(ub, b, 8); @@ -874,7 +879,8 @@ static inline void *pci_dma_map(PCIDevice *dev, dma_addr_t addr, { void *buf; - buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir); + buf = dma_memory_map(pci_get_address_space(dev), addr, plen, dir, + MEMTXATTRS_UNSPECIFIED); return buf; } diff --git a/include/hw/pci/pcie.h b/include/hw/pci/pcie.h index 6063bee0ec632c563f236f520aef8e52dc87b182..c27368d077814bb67278613f3ca826551e419a1c 100644 --- a/include/hw/pci/pcie.h +++ b/include/hw/pci/pcie.h @@ -112,6 +112,7 @@ void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len); int pcie_cap_slot_post_load(void *opaque, int version_id); void pcie_cap_slot_push_attention_button(PCIDevice *dev); +void pcie_cap_slot_enable_power(PCIDevice *dev); void pcie_cap_root_init(PCIDevice *dev); void pcie_cap_root_reset(PCIDevice *dev); diff --git a/include/hw/pci/pcie_port.h b/include/hw/pci/pcie_port.h index e25b289ce84c74312b9310422346bf4e107ea23e..5b80a13c4d569893e42807cc47c9238abe848bbe 100644 --- a/include/hw/pci/pcie_port.h +++ b/include/hw/pci/pcie_port.h @@ -51,6 +51,9 @@ struct PCIESlot { uint8_t chassis; uint16_t slot; + uint8_t fast_plug; + uint8_t fast_unplug; + PCIExpLinkSpeed speed; PCIExpLinkWidth width; diff --git a/include/hw/ppc/mac_dbdma.h b/include/hw/ppc/mac_dbdma.h index 4a3f644516b3bbbb12d5b8b42d9b1b078d68e051..c774f6bf84f1a4f6b40306ad1f76da8e3bd42041 100644 --- a/include/hw/ppc/mac_dbdma.h +++ b/include/hw/ppc/mac_dbdma.h @@ -44,10 +44,6 @@ struct DBDMA_io { DBDMA_end dma_end; /* DMA is in progress, don't start another one */ bool processing; - /* DMA request */ - void *dma_mem; - dma_addr_t dma_len; - DMADirection dir; }; /* diff --git a/include/hw/ppc/openpic.h b/include/hw/ppc/openpic.h index ebdaf8a4932e75c6b29992df8ad61a45b64e388f..44976e6b073691d169e5667463804fa01a30813a 100644 --- a/include/hw/ppc/openpic.h +++ b/include/hw/ppc/openpic.h @@ -14,7 +14,7 @@ enum { OPENPIC_OUTPUT_INT = 0, /* IRQ */ OPENPIC_OUTPUT_CINT, /* critical IRQ */ OPENPIC_OUTPUT_MCK, /* Machine check event */ - OPENPIC_OUTPUT_DEBUG, /* Inconditional debug event */ + OPENPIC_OUTPUT_DEBUG, /* Unconditional debug event */ OPENPIC_OUTPUT_RESET, /* Core reset event */ OPENPIC_OUTPUT_NB, }; diff --git a/include/hw/ppc/spapr.h b/include/hw/ppc/spapr.h index ee7504b9760b289df4070d1c9149c0933f50f0da..8390dd774339dc81d253dd39c3d0c5d2c8fa9a86 100644 --- a/include/hw/ppc/spapr.h +++ b/include/hw/ppc/spapr.h @@ -179,7 +179,7 @@ struct SpaprMachineState { SpaprResizeHpt resize_hpt; void *htab; uint32_t htab_shift; - uint64_t patb_entry; /* Process tbl registed in H_REGISTER_PROC_TBL */ + uint64_t patb_entry; /* Process tbl registered in H_REGISTER_PROC_TBL */ SpaprPendingHpt *pending_hpt; /* in-progress resize */ hwaddr rma_size; @@ -754,7 +754,8 @@ static inline uint64_t ppc64_phys_to_real(uint64_t addr) static inline uint32_t rtas_ld(target_ulong phys, int n) { - return ldl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n)); + return ldl_be_phys(&address_space_memory, + ppc64_phys_to_real(phys + 4 * n)); } static inline uint64_t rtas_ldq(target_ulong phys, int n) @@ -764,7 +765,7 @@ static inline uint64_t rtas_ldq(target_ulong phys, int n) static inline void rtas_st(target_ulong phys, int n, uint32_t val) { - stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4*n), val); + stl_be_phys(&address_space_memory, ppc64_phys_to_real(phys + 4 * n), val); } typedef void (*spapr_rtas_fn)(PowerPCCPU *cpu, SpaprMachineState *sm, diff --git a/include/hw/ppc/spapr_vio.h b/include/hw/ppc/spapr_vio.h index 4bea87f39cc4ee100da1e6454276becbf300b449..7eae1a484780ba623c60fc5bfe6d053b8838f829 100644 --- a/include/hw/ppc/spapr_vio.h +++ b/include/hw/ppc/spapr_vio.h @@ -91,35 +91,47 @@ static inline void spapr_vio_irq_pulse(SpaprVioDevice *dev) static inline bool spapr_vio_dma_valid(SpaprVioDevice *dev, uint64_t taddr, uint32_t size, DMADirection dir) { - return dma_memory_valid(&dev->as, taddr, size, dir); + return dma_memory_valid(&dev->as, taddr, size, dir, MEMTXATTRS_UNSPECIFIED); } static inline int spapr_vio_dma_read(SpaprVioDevice *dev, uint64_t taddr, void *buf, uint32_t size) { - return (dma_memory_read(&dev->as, taddr, buf, size) != 0) ? + return (dma_memory_read(&dev->as, taddr, + buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } static inline int spapr_vio_dma_write(SpaprVioDevice *dev, uint64_t taddr, const void *buf, uint32_t size) { - return (dma_memory_write(&dev->as, taddr, buf, size) != 0) ? + return (dma_memory_write(&dev->as, taddr, + buf, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } static inline int spapr_vio_dma_set(SpaprVioDevice *dev, uint64_t taddr, uint8_t c, uint32_t size) { - return (dma_memory_set(&dev->as, taddr, c, size) != 0) ? + return (dma_memory_set(&dev->as, taddr, + c, size, MEMTXATTRS_UNSPECIFIED) != 0) ? H_DEST_PARM : H_SUCCESS; } -#define vio_stb(_dev, _addr, _val) (stb_dma(&(_dev)->as, (_addr), (_val))) -#define vio_sth(_dev, _addr, _val) (stw_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_stl(_dev, _addr, _val) (stl_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_stq(_dev, _addr, _val) (stq_be_dma(&(_dev)->as, (_addr), (_val))) -#define vio_ldq(_dev, _addr) (ldq_be_dma(&(_dev)->as, (_addr))) +#define vio_stb(_dev, _addr, _val) \ + (stb_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_sth(_dev, _addr, _val) \ + (stw_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_stl(_dev, _addr, _val) \ + (stl_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_stq(_dev, _addr, _val) \ + (stq_be_dma(&(_dev)->as, (_addr), (_val), MEMTXATTRS_UNSPECIFIED)) +#define vio_ldq(_dev, _addr) \ + ({ \ + uint64_t _val; \ + ldq_be_dma(&(_dev)->as, (_addr), &_val, MEMTXATTRS_UNSPECIFIED); \ + _val; \ + }) int spapr_vio_send_crq(SpaprVioDevice *dev, uint8_t *crq); diff --git a/include/hw/qdev-core.h b/include/hw/qdev-core.h index 20d3066595e4436672bf82557798618c90c0e09a..a1169c1c9aa001d3400f686f87cc062f88815b27 100644 --- a/include/hw/qdev-core.h +++ b/include/hw/qdev-core.h @@ -162,6 +162,10 @@ struct NamedClockList { QLIST_ENTRY(NamedClockList) node; }; +typedef struct { + bool engaged_in_io; +} MemReentrancyGuard; + /** * DeviceState: * @realized: Indicates whether the device has been fully constructed. @@ -193,6 +197,9 @@ struct DeviceState { int instance_id_alias; int alias_required_for_version; ResettableState reset; + + /* Is the device currently in mmio/pio/dma? Used to prevent re-entrancy */ + MemReentrancyGuard mem_reentrancy_guard; }; struct DeviceListener { @@ -321,6 +328,7 @@ compat_props_add(GPtrArray *arr, * The returned object has a reference count of 1. */ DeviceState *qdev_new(const char *name); + /** * qdev_try_new: Try to create a device on the heap * @name: device type to create @@ -329,6 +337,7 @@ DeviceState *qdev_new(const char *name); * does not exist, rather than asserting. */ DeviceState *qdev_try_new(const char *name); + /** * qdev_realize: Realize @dev. * @dev: device to realize @@ -347,6 +356,7 @@ DeviceState *qdev_try_new(const char *name); * qdev_realize_and_unref() instead. */ bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp); + /** * qdev_realize_and_unref: Realize @dev and drop a reference * @dev: device to realize @@ -372,6 +382,7 @@ bool qdev_realize(DeviceState *dev, BusState *bus, Error **errp); * would be incorrect. For that use case you want qdev_realize(). */ bool qdev_realize_and_unref(DeviceState *dev, BusState *bus, Error **errp); + /** * qdev_unrealize: Unrealize a device * @dev: device to unrealize @@ -450,6 +461,7 @@ typedef enum { * For named input GPIO lines, use qdev_get_gpio_in_named(). */ qemu_irq qdev_get_gpio_in(DeviceState *dev, int n); + /** * qdev_get_gpio_in_named: Get one of a device's named input GPIO lines * @dev: Device whose GPIO we want @@ -488,7 +500,7 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n); * qemu_irqs at once, or to connect multiple outbound GPIOs to the * same qemu_irq. (Warning: there is no assertion or other guard to * catch this error: the model will just not do the right thing.) - * Instead, for fan-out you can use the TYPE_IRQ_SPLIT device: connect + * Instead, for fan-out you can use the TYPE_SPLIT_IRQ device: connect * a device's outbound GPIO to the splitter's input, and connect each * of the splitter's outputs to a different device. For fan-in you * can use the TYPE_OR_IRQ device, which is a model of a logical OR @@ -497,8 +509,10 @@ qemu_irq qdev_get_gpio_in_named(DeviceState *dev, const char *name, int n); * For named output GPIO lines, use qdev_connect_gpio_out_named(). */ void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin); + /** - * qdev_connect_gpio_out: Connect one of a device's anonymous output GPIO lines + * qdev_connect_gpio_out_named: Connect one of a device's named output + GPIO lines * @dev: Device whose GPIO to connect * @name: Name of the output GPIO array * @n: Number of the anonymous output GPIO line (which must be in range) @@ -520,10 +534,11 @@ void qdev_connect_gpio_out(DeviceState *dev, int n, qemu_irq pin); * qemu_irqs at once, or to connect multiple outbound GPIOs to the * same qemu_irq; see qdev_connect_gpio_out() for details. * - * For named output GPIO lines, use qdev_connect_gpio_out_named(). + * For anonymous output GPIO lines, use qdev_connect_gpio_out(). */ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, qemu_irq pin); + /** * qdev_get_gpio_out_connector: Get the qemu_irq connected to an output GPIO * @dev: Device whose output GPIO we are interested in @@ -541,6 +556,7 @@ void qdev_connect_gpio_out_named(DeviceState *dev, const char *name, int n, * by the platform-bus subsystem. */ qemu_irq qdev_get_gpio_out_connector(DeviceState *dev, const char *name, int n); + /** * qdev_intercept_gpio_out: Intercept an existing GPIO connection * @dev: Device to intercept the outbound GPIO line from @@ -582,6 +598,7 @@ BusState *qdev_get_child_bus(DeviceState *dev, const char *name); * hold of an input GPIO line to manipulate it. */ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n); + /** * qdev_init_gpio_out: create an array of anonymous output GPIO lines * @dev: Device to create output GPIOs for @@ -610,8 +627,9 @@ void qdev_init_gpio_in(DeviceState *dev, qemu_irq_handler handler, int n); * handler. */ void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n); + /** - * qdev_init_gpio_out: create an array of named output GPIO lines + * qdev_init_gpio_out_named: create an array of named output GPIO lines * @dev: Device to create output GPIOs for * @pins: Pointer to qemu_irq or qemu_irq array for the GPIO lines * @name: Name to give this array of GPIO lines @@ -623,6 +641,7 @@ void qdev_init_gpio_out(DeviceState *dev, qemu_irq *pins, int n); */ void qdev_init_gpio_out_named(DeviceState *dev, qemu_irq *pins, const char *name, int n); + /** * qdev_init_gpio_in_named_with_opaque: create an array of input GPIO lines * for the specified device diff --git a/include/hw/qdev-properties-system.h b/include/hw/qdev-properties-system.h index 0ac327ae609f2879446f824005468549b7cb6401..906a0276761693411d6a259612d7a6dff6814e89 100644 --- a/include/hw/qdev-properties-system.h +++ b/include/hw/qdev-properties-system.h @@ -9,6 +9,8 @@ extern const PropertyInfo qdev_prop_reserved_region; extern const PropertyInfo qdev_prop_multifd_compression; extern const PropertyInfo qdev_prop_losttickpolicy; extern const PropertyInfo qdev_prop_blockdev_on_error; +extern const PropertyInfo qdev_prop_blockdev_retry_interval; +extern const PropertyInfo qdev_prop_blockdev_retry_timeout; extern const PropertyInfo qdev_prop_bios_chs_trans; extern const PropertyInfo qdev_prop_fdc_drive_type; extern const PropertyInfo qdev_prop_drive; @@ -47,6 +49,12 @@ extern const PropertyInfo qdev_prop_pcie_link_width; #define DEFINE_PROP_BLOCKDEV_ON_ERROR(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_on_error, \ BlockdevOnError) +#define DEFINE_PROP_BLOCKDEV_RETRY_INTERVAL(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_retry_interval, \ + int64_t) +#define DEFINE_PROP_BLOCKDEV_RETRY_TIMEOUT(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_blockdev_retry_timeout, \ + int64_t) #define DEFINE_PROP_BIOS_CHS_TRANS(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_bios_chs_trans, int) #define DEFINE_PROP_BLOCKSIZE(_n, _s, _f) \ diff --git a/include/hw/qdev-properties.h b/include/hw/qdev-properties.h index f7925f67d03b2110923a8524a1620f1ba79a8d44..ea129d65a684f368bd207b7583c6d79acce62ddf 100644 --- a/include/hw/qdev-properties.h +++ b/include/hw/qdev-properties.h @@ -58,6 +58,7 @@ extern const PropertyInfo qdev_prop_int64; extern const PropertyInfo qdev_prop_size; extern const PropertyInfo qdev_prop_string; extern const PropertyInfo qdev_prop_on_off_auto; +extern const PropertyInfo qdev_prop_compress_method; extern const PropertyInfo qdev_prop_size32; extern const PropertyInfo qdev_prop_arraylen; extern const PropertyInfo qdev_prop_link; @@ -161,6 +162,9 @@ extern const PropertyInfo qdev_prop_link; DEFINE_PROP(_n, _s, _f, qdev_prop_string, char*) #define DEFINE_PROP_ON_OFF_AUTO(_n, _s, _f, _d) \ DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_on_off_auto, OnOffAuto) +#define DEFINE_PROP_COMPRESS_METHOD(_n, _s, _f, _d) \ + DEFINE_PROP_SIGNED(_n, _s, _f, _d, qdev_prop_compress_method, \ + CompressMethod) #define DEFINE_PROP_SIZE32(_n, _s, _f, _d) \ DEFINE_PROP_UNSIGNED(_n, _s, _f, _d, qdev_prop_size32, uint32_t) diff --git a/include/hw/s390x/sclp.h b/include/hw/s390x/sclp.h index d3ade40a5a8d1a1b630bf9a96257b00373faf6c3..a170ee5d7175cb836c48024164a9168d106b048f 100644 --- a/include/hw/s390x/sclp.h +++ b/include/hw/s390x/sclp.h @@ -38,10 +38,8 @@ #define MAX_STORAGE_INCREMENTS 1020 /* CPU hotplug SCLP codes */ -#define SCLP_HAS_CPU_INFO 0x0C00000000000000ULL +#define SCLP_HAS_CPU_INFO 0x0800000000000000ULL #define SCLP_CMDW_READ_CPU_INFO 0x00010001 -#define SCLP_CMDW_CONFIGURE_CPU 0x00110001 -#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001 /* SCLP PCI codes */ #define SCLP_HAS_IOA_RECONFIG 0x0000000040000000ULL diff --git a/include/hw/scsi/scsi.h b/include/hw/scsi/scsi.h index a567a5ed86b14b9a7f56652c6f7ddaa5e03cc294..80c9eb00e41379e75fdcb4c72919cb1f8ede351f 100644 --- a/include/hw/scsi/scsi.h +++ b/include/hw/scsi/scsi.h @@ -108,6 +108,7 @@ int cdrom_read_toc_raw(int nb_sectors, uint8_t *buf, int msf, int session_num); /* scsi-bus.c */ struct SCSIReqOps { size_t size; + void (*init_req)(SCSIRequest *req); void (*free_req)(SCSIRequest *req); int32_t (*send_command)(SCSIRequest *req, uint8_t *buf); void (*read_data)(SCSIRequest *req); @@ -212,6 +213,7 @@ void scsi_req_cancel_complete(SCSIRequest *req); void scsi_req_cancel(SCSIRequest *req); void scsi_req_cancel_async(SCSIRequest *req, Notifier *notifier); void scsi_req_retry(SCSIRequest *req); +void scsi_retry_requests(SCSIDevice *s); void scsi_device_purge_requests(SCSIDevice *sdev, SCSISense sense); void scsi_device_set_ua(SCSIDevice *sdev, SCSISense sense); void scsi_device_report_change(SCSIDevice *dev, SCSISense sense); diff --git a/include/hw/sw64/sw64_iommu.h b/include/hw/sw64/sw64_iommu.h new file mode 100644 index 0000000000000000000000000000000000000000..71918760836034dba09bba4505e832d5cf0eaece --- /dev/null +++ b/include/hw/sw64/sw64_iommu.h @@ -0,0 +1,105 @@ +/* + * Copyright (C) 2021-2025 Wuxi Institute of Advanced Technology + * Written by Lu Feifei + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see . + */ + +#ifndef HW_SW64_IOMMU_H +#define HW_SW64_IOMMU_H + +#include "hw/sysbus.h" +#include "hw/pci/pci.h" + +#define TYPE_SW64_IOMMU_MEMORY_REGION "sw64-iommu-memory-region" +#define SW_IOMMU_ENTRY_VALID ((1UL) << 63) +#define SW_IOMMU_LEVEL1_OFFSET 0x1ff +#define SW_IOMMU_LEVEL2_OFFSET 0x3ff +#define SW_IOMMU_ENABLE 3 +#define SW_IOMMU_GRN ((0UL) << 4) +#define SWVT_PCI_BUS_MAX 256 + +typedef struct SW64IOMMUClass SW64IOMMUClass; +typedef struct SW64IOMMUState SW64IOMMUState; +typedef struct SWVTAddressSpace SWVTAddressSpace; +typedef struct SW64DTIOTLBKey SW64DTIOTLBKey; +typedef struct SW64PTIOTLBKey SW64PTIOTLBKey; +typedef struct SW64DTIOTLBEntry SW64DTIOTLBEntry; +typedef struct SWVTBus SWVTBus; + +struct SW64DTIOTLBEntry { + uint16_t source_id; + unsigned long ptbase_addr; +}; + +struct SW64DTIOTLBKey { + uint16_t source_id; +}; + +struct SW64PTIOTLBKey { + uint16_t source_id; + dma_addr_t iova; +}; + +struct SWVTAddressSpace { + PCIBus *bus; + uint8_t devfn; + AddressSpace as; + IOMMUMemoryRegion iommu; + MemoryRegion root; + MemoryRegion msi; /* Interrupt region: 0xfeeXXXXX */ + SW64IOMMUState *iommu_state; + QLIST_ENTRY(SWVTAddressSpace) next; + /* Superset of notifier flags that this address space has */ + IOMMUNotifierFlag notifier_flags; +}; + +struct SWVTBus { + PCIBus* bus; /* A reference to the bus to provide translation for */ + SWVTAddressSpace *dev_as[0]; /* A table of SWVTAddressSpace objects indexed by devfn */ +}; + +struct SW64IOMMUState { + SysBusDevice busdev; + dma_addr_t dtbr; /* Current root table pointer */ + GHashTable *dtiotlb; /* IOTLB for device table */ + GHashTable *ptiotlb; /* IOTLB for page table */ + + GHashTable *swvtbus_as_by_busptr; + /* list of registered notifiers */ + QLIST_HEAD(, SWVTAddressSpace) swvt_as_with_notifiers; + + PCIBus *pci_bus; + QemuMutex iommu_lock; +}; + +struct SW64IOMMUClass { + SysBusDeviceClass parent; + DeviceRealize realize; +}; + +#define TYPE_SW64_IOMMU "sw64-iommu" +#define SW64_IOMMU(obj) \ + OBJECT_CHECK(SW64IOMMUState, (obj), TYPE_SW64_IOMMU) +#define SW64_IOMMU_CLASS(klass) \ + OBJECT_CLASS_CHECK(SW64IOMMUClass, (klass), TYPE_SW64_IOMMU) +#define SW64_IOMMU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SW64IOMMUClass, (obj), TYPE_SW64_IOMMU) +extern void sw64_vt_iommu_init(PCIBus *b); +extern void swvt_address_space_invalidate_iova(SW64IOMMUState *s, unsigned long val); +extern void swvt_address_space_unmap_iova(SW64IOMMUState *s, unsigned long val); +extern void swvt_address_space_map_iova(SW64IOMMUState *s, unsigned long val); +extern SWVTAddressSpace *iommu_find_add_as(SW64IOMMUState *s, PCIBus *bus, int devfn); +extern MemTxResult msi_write(void *opaque, hwaddr addr, uint64_t value, unsigned size, + MemTxAttrs attrs); +#endif diff --git a/include/hw/usb.h b/include/hw/usb.h index 33668dd0a99aa61eb235e64326773605f15b04cf..fa3a176159d7a9c5c6723d9396a865fbd2297ca5 100644 --- a/include/hw/usb.h +++ b/include/hw/usb.h @@ -142,6 +142,7 @@ #define USB_DEVICE_SELF_POWERED 0 #define USB_DEVICE_REMOTE_WAKEUP 1 +#define USB_DEVICE_REMOTE_WAKEUP_IS_SUPPORTED 2 #define USB_DT_DEVICE 0x01 #define USB_DT_CONFIG 0x02 diff --git a/include/hw/vfio/vfio-common.h b/include/hw/vfio/vfio-common.h index 8af11b0a76924f3fd9f935bb80fb019e80ddeef0..0234f5e1b1627012021b976fddf2e4f8161b24e7 100644 --- a/include/hw/vfio/vfio-common.h +++ b/include/hw/vfio/vfio-common.h @@ -76,6 +76,14 @@ typedef struct VFIOAddressSpace { struct VFIOGroup; +typedef struct VFIODMARange { + QLIST_ENTRY(VFIODMARange) next; + hwaddr iova; + size_t size; + void *vaddr; /* unused */ + unsigned long *bitmap; /* dirty bitmap cache for this range */ +} VFIODMARange; + typedef struct VFIOContainer { VFIOAddressSpace *space; int fd; /* /dev/vfio/vfio, empowered by the attached groups */ @@ -85,6 +93,7 @@ typedef struct VFIOContainer { Error *error; bool initialized; bool dirty_pages_supported; + bool dirty_log_manual_clear; uint64_t dirty_pgsizes; uint64_t max_dirty_bitmap_size; unsigned long pgsizes; @@ -93,6 +102,7 @@ typedef struct VFIOContainer { QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list; QLIST_HEAD(, VFIOGroup) group_list; QLIST_HEAD(, VFIORamDiscardListener) vrdl_list; + QLIST_HEAD(, VFIODMARange) dma_list; QLIST_ENTRY(VFIOContainer) next; } VFIOContainer; diff --git a/include/hw/virtio/vdpa-dev-mig.h b/include/hw/virtio/vdpa-dev-mig.h new file mode 100644 index 0000000000000000000000000000000000000000..adc1d657f70aa500cb9a6578c6a9444c7c0677b9 --- /dev/null +++ b/include/hw/virtio/vdpa-dev-mig.h @@ -0,0 +1,29 @@ +/* + * Vhost Vdpa Device Migration Header + * + * Copyright (c) Huawei Technologies Co., Ltd. 2023. All Rights Reserved. + */ + +#ifndef _VHOST_VDPA_MIGRATION_H +#define _VHOST_VDPA_MIGRATION_H + +#include "hw/virtio/vdpa-dev.h" + +enum { + VDPA_DEVICE_START, + VDPA_DEVICE_STOP, + VDPA_DEVICE_PRE_START, + VDPA_DEVICE_PRE_STOP, + VDPA_DEVICE_CANCEL, + VDPA_DEVICE_POST_START, + VDPA_DEVICE_START_ASYNC, + VDPA_DEVICE_STOP_ASYNC, + VDPA_DEVICE_PRE_START_ASYNC, + VDPA_DEVICE_QUERY_OP_STATE, +}; + +void vdpa_migration_register(VhostVdpaDevice *vdev); + +void vdpa_migration_unregister(VhostVdpaDevice *vdev); + +#endif /* _VHOST_VDPA_MIGRATION_H */ diff --git a/include/hw/virtio/vdpa-dev.h b/include/hw/virtio/vdpa-dev.h new file mode 100644 index 0000000000000000000000000000000000000000..60e9c3f3fe99767be784a993b0a9add621e508f6 --- /dev/null +++ b/include/hw/virtio/vdpa-dev.h @@ -0,0 +1,46 @@ +/* + * Vhost Vdpa Device + * + * Copyright (c) Huawei Technologies Co., Ltd. 2022. All Rights Reserved. + * + * Authors: + * Longpeng + * + * Largely based on the "vhost-user-blk.h" implemented by: + * Changpeng Liu + * + * This work is licensed under the terms of the GNU LGPL, version 2 or later. + * See the COPYING.LIB file in the top-level directory. + */ +#ifndef _VHOST_VDPA_DEVICE_H +#define _VHOST_VDPA_DEVICE_H + +#include "hw/virtio/vhost.h" +#include "hw/virtio/vhost-vdpa.h" +#include "qom/object.h" + + +#define TYPE_VHOST_VDPA_DEVICE "vhost-vdpa-device" +OBJECT_DECLARE_SIMPLE_TYPE(VhostVdpaDevice, VHOST_VDPA_DEVICE) + +struct VhostVdpaDevice { + VirtIODevice parent_obj; + char *vhostdev; + int vhostfd; + int32_t bootindex; + uint32_t vdev_id; + uint32_t num_queues; + struct vhost_dev dev; + struct vhost_vdpa vdpa; + VirtQueue **virtqs; + uint8_t *config; + int config_size; + uint16_t queue_size; + bool started; + bool suspended; + int (*post_init)(VhostVdpaDevice *v, Error **errp); + VMChangeStateEntry *vmstate; + Notifier migration_state; +}; + +#endif diff --git a/include/hw/virtio/vhost-backend.h b/include/hw/virtio/vhost-backend.h index 81bf3109f837e15001e04b165b029a2f5a87adfd..2ca6250567fc05513ef80b3faa935d1a722d658d 100644 --- a/include/hw/virtio/vhost-backend.h +++ b/include/hw/virtio/vhost-backend.h @@ -53,6 +53,11 @@ typedef int (*vhost_scsi_get_abi_version_op)(struct vhost_dev *dev, int *version); typedef int (*vhost_set_log_base_op)(struct vhost_dev *dev, uint64_t base, struct vhost_log *log); +typedef int (*vhost_set_log_size_op)(struct vhost_dev *dev, uint64_t size, + struct vhost_log *log); +typedef int (*vhost_set_log_fd_op)(struct vhost_dev *dev, int fd, + struct vhost_log *log); +typedef int (*vhost_log_sync_op)(struct vhost_dev *dev); typedef int (*vhost_set_mem_table_op)(struct vhost_dev *dev, struct vhost_memory *mem); typedef int (*vhost_set_vring_addr_op)(struct vhost_dev *dev, @@ -125,6 +130,13 @@ typedef int (*vhost_vq_get_addr_op)(struct vhost_dev *dev, typedef int (*vhost_get_device_id_op)(struct vhost_dev *dev, uint32_t *dev_id); typedef bool (*vhost_force_iommu_op)(struct vhost_dev *dev); +typedef int (*vhost_set_config_call_op)(struct vhost_dev *dev, + int fd); +typedef void (*vhost_set_used_memslots_op)(struct vhost_dev *dev); +typedef unsigned int (*vhost_get_used_memslots_op)(void); + +typedef int (*vhost_dev_suspend_op)(struct vhost_dev *dev); +typedef int (*vhost_dev_resume_op)(struct vhost_dev *dev); typedef struct VhostOps { VhostBackendType backend_type; @@ -137,6 +149,9 @@ typedef struct VhostOps { vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint; vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version; vhost_set_log_base_op vhost_set_log_base; + vhost_set_log_size_op vhost_set_log_size; + vhost_set_log_fd_op vhost_set_log_fd; + vhost_log_sync_op vhost_log_sync; vhost_set_mem_table_op vhost_set_mem_table; vhost_set_vring_addr_op vhost_set_vring_addr; vhost_set_vring_endian_op vhost_set_vring_endian; @@ -171,6 +186,11 @@ typedef struct VhostOps { vhost_vq_get_addr_op vhost_vq_get_addr; vhost_get_device_id_op vhost_get_device_id; vhost_force_iommu_op vhost_force_iommu; + vhost_set_config_call_op vhost_set_config_call; + vhost_set_used_memslots_op vhost_set_used_memslots; + vhost_get_used_memslots_op vhost_get_used_memslots; + vhost_dev_suspend_op vhost_dev_suspend; + vhost_dev_resume_op vhost_dev_resume; } VhostOps; int vhost_backend_update_device_iotlb(struct vhost_dev *dev, diff --git a/include/hw/virtio/vhost-user.h b/include/hw/virtio/vhost-user.h index a9abca3288db817410246725dc834f15a4b37497..e44a41bb70d66c6c7243f820e7798df60194c474 100644 --- a/include/hw/virtio/vhost-user.h +++ b/include/hw/virtio/vhost-user.h @@ -12,9 +12,10 @@ #include "hw/virtio/virtio.h" typedef struct VhostUserHostNotifier { + struct rcu_head rcu; MemoryRegion mr; void *addr; - bool set; + void *unmap_addr; } VhostUserHostNotifier; typedef struct VhostUserState { diff --git a/include/hw/virtio/vhost-vdpa.h b/include/hw/virtio/vhost-vdpa.h index 3ce79a646df36c6f5a057b53afca5a559a7050c8..620a0f70abe4694c2712378e123ccf8d5da6157a 100644 --- a/include/hw/virtio/vhost-vdpa.h +++ b/include/hw/virtio/vhost-vdpa.h @@ -12,9 +12,19 @@ #ifndef HW_VIRTIO_VHOST_VDPA_H #define HW_VIRTIO_VHOST_VDPA_H +#include + +#include "hw/virtio/vhost-iova-tree.h" +#include "hw/virtio/vhost-shadow-virtqueue.h" #include "hw/virtio/virtio.h" #include "standard-headers/linux/vhost_types.h" +/* + * ASID dedicated to map guest's addresses. If SVQ is disabled it maps GPA to + * qemu's IOVA. If SVQ is enabled it maps also the SVQ vring here + */ +#define VHOST_VDPA_GUEST_PA_ASID 0 + typedef struct VhostVDPAHostNotifier { MemoryRegion mr; void *addr; @@ -25,10 +35,28 @@ typedef struct vhost_vdpa { int index; uint32_t msg_type; bool iotlb_batch_begin_sent; + uint32_t address_space_id; MemoryListener listener; struct vhost_vdpa_iova_range iova_range; + uint64_t acked_features; + bool shadow_vqs_enabled; + /* Vdpa must send shadow addresses as IOTLB key for data queues, not GPA */ + bool shadow_data; + /* IOVA mapping used by the Shadow Virtqueue */ + VhostIOVATree *iova_tree; + GPtrArray *shadow_vqs; + const VhostShadowVirtqueueOps *shadow_vq_ops; + void *shadow_vq_ops_opaque; struct vhost_dev *dev; + Error *migration_blocker; VhostVDPAHostNotifier notifier[VIRTIO_QUEUE_MAX]; } VhostVDPA; +int vhost_vdpa_get_iova_range(int fd, struct vhost_vdpa_iova_range *iova_range); + +int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, + hwaddr size, void *vaddr, bool readonly); +int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova, + hwaddr size); + #endif diff --git a/include/hw/virtio/vhost.h b/include/hw/virtio/vhost.h index 58a73e7b7a19b2d42ad400ce35625116a3e31dbb..9441b4c50e2e600e3c0d5112dbd27f7ca340f885 100644 --- a/include/hw/virtio/vhost.h +++ b/include/hw/virtio/vhost.h @@ -5,6 +5,9 @@ #include "hw/virtio/virtio.h" #include "exec/memory.h" +#define VHOST_F_DEVICE_IOTLB 63 +#define VHOST_USER_F_PROTOCOL_FEATURES 30 + /* Generic structures common for any vhost based device. */ struct vhost_inflight { @@ -29,6 +32,7 @@ struct vhost_virtqueue { unsigned long long used_phys; unsigned used_size; EventNotifier masked_notifier; + EventNotifier masked_config_notifier; struct vhost_dev *dev; }; @@ -36,7 +40,9 @@ typedef unsigned long vhost_log_chunk_t; #define VHOST_LOG_PAGE 0x1000 #define VHOST_LOG_BITS (8 * sizeof(vhost_log_chunk_t)) #define VHOST_LOG_CHUNK (VHOST_LOG_PAGE * VHOST_LOG_BITS) +#define VHOST_LOG_CHUNK_BYTES (VHOST_LOG_PAGE * sizeof(vhost_log_chunk_t)) #define VHOST_INVALID_FEATURE_BIT (0xff) +#define VHOST_QUEUE_NUM_CONFIG_INR 0 struct vhost_log { unsigned long long size; @@ -61,6 +67,12 @@ typedef struct VhostDevConfigOps { } VhostDevConfigOps; struct vhost_memory; + +/** + * struct vhost_dev - common vhost_dev structure + * @vhost_ops: backend specific ops + * @config_ops: ops for config changes (see @vhost_dev_set_config_notifier) + */ struct vhost_dev { VirtIODevice *vdev; MemoryListener memory_listener; @@ -108,14 +120,132 @@ struct vhost_net { NetClientState *nc; }; +/** + * vhost_dev_init() - initialise the vhost interface + * @hdev: the common vhost_dev structure + * @opaque: opaque ptr passed to backend (vhost/vhost-user/vdpa) + * @backend_type: type of backend + * @busyloop_timeout: timeout for polling virtqueue + * @errp: error handle + * + * The initialisation of the vhost device will trigger the + * initialisation of the backend and potentially capability + * negotiation of backend interface. Configuration of the VirtIO + * itself won't happen until the interface is started. + * + * Return: 0 on success, non-zero on error while setting errp. + */ int vhost_dev_init(struct vhost_dev *hdev, void *opaque, VhostBackendType backend_type, uint32_t busyloop_timeout, Error **errp); + +/** + * vhost_dev_cleanup() - tear down and cleanup vhost interface + * @hdev: the common vhost_dev structure + */ void vhost_dev_cleanup(struct vhost_dev *hdev); -int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev); -void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev); + +/** + * vhost_dev_enable_notifiers() - enable event notifiers + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * + * Enable notifications directly to the vhost device rather than being + * triggered by QEMU itself. Notifications should be enabled before + * the vhost device is started via @vhost_dev_start. + * + * Return: 0 on success, < 0 on error. + */ int vhost_dev_enable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); + +/** + * vhost_dev_disable_notifiers - disable event notifications + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * + * Disable direct notifications to vhost device. + */ void vhost_dev_disable_notifiers(struct vhost_dev *hdev, VirtIODevice *vdev); +bool vhost_config_pending(struct vhost_dev *hdev); +void vhost_config_mask(struct vhost_dev *hdev, VirtIODevice *vdev, bool mask); + +/** + * vhost_dev_start() - start the vhost device + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * @vrings: true to have vrings enabled in this call + * + * Starts the vhost device. From this point VirtIO feature negotiation + * can start and the device can start processing VirtIO transactions. + * + * Return: 0 on success, < 0 on error. + */ +int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + +/** + * vhost_dev_stop() - stop the vhost device + * @hdev: common vhost_dev structure + * @vdev: the VirtIODevice structure + * @vrings: true to have vrings disabled in this call + * + * Stop the vhost device. After the device is stopped the notifiers + * can be disabled (@vhost_dev_disable_notifiers) and the device can + * be torn down (@vhost_dev_cleanup). + */ +void vhost_dev_stop(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + +/** + * DOC: vhost device configuration handling + * + * The VirtIO device configuration space is used for rarely changing + * or initialisation time parameters. The configuration can be updated + * by either the guest driver or the device itself. If the device can + * change the configuration over time the vhost handler should + * register a @VhostDevConfigOps structure with + * @vhost_dev_set_config_notifier so the guest can be notified. Some + * devices register a handler anyway and will signal an error if an + * unexpected config change happens. + */ + +/** + * vhost_dev_get_config() - fetch device configuration + * @hdev: common vhost_dev_structure + * @config: pointer to device appropriate config structure + * @config_len: size of device appropriate config structure + * + * Return: 0 on success, < 0 on error while setting errp + */ +int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, + uint32_t config_len, Error **errp); + +/** + * vhost_dev_set_config() - set device configuration + * @hdev: common vhost_dev_structure + * @data: pointer to data to set + * @offset: offset into configuration space + * @size: length of set + * @flags: @VhostSetConfigType flags + * + * By use of @offset/@size a subset of the configuration space can be + * written to. The @flags are used to indicate if it is a normal + * transaction or related to migration. + * + * Return: 0 on success, non-zero on error + */ +int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, + uint32_t offset, uint32_t size, uint32_t flags); + +/** + * vhost_dev_set_config_notifier() - register VhostDevConfigOps + * @hdev: common vhost_dev_structure + * @ops: notifier ops + * + * If the device is expected to change configuration a notifier can be + * setup to handle the case. + */ +void vhost_dev_set_config_notifier(struct vhost_dev *dev, + const VhostDevConfigOps *ops); + /* Test and clear masked event pending status. * Should be called after unmask to avoid losing events. @@ -136,14 +266,6 @@ int vhost_net_set_backend(struct vhost_dev *hdev, struct vhost_vring_file *file); int vhost_device_iotlb_miss(struct vhost_dev *dev, uint64_t iova, int write); -int vhost_dev_get_config(struct vhost_dev *hdev, uint8_t *config, - uint32_t config_len, Error **errp); -int vhost_dev_set_config(struct vhost_dev *dev, const uint8_t *data, - uint32_t offset, uint32_t size, uint32_t flags); -/* notifier callback in case vhost device config space changed - */ -void vhost_dev_set_config_notifier(struct vhost_dev *dev, - const VhostDevConfigOps *ops); void vhost_dev_reset_inflight(struct vhost_inflight *inflight); void vhost_dev_free_inflight(struct vhost_inflight *inflight); @@ -154,4 +276,9 @@ int vhost_dev_set_inflight(struct vhost_dev *dev, struct vhost_inflight *inflight); int vhost_dev_get_inflight(struct vhost_dev *dev, uint16_t queue_size, struct vhost_inflight *inflight); +bool used_memslots_is_exceeded(void); + +int vhost_dev_resume(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); +int vhost_dev_suspend(struct vhost_dev *hdev, VirtIODevice *vdev, bool vrings); + #endif diff --git a/include/hw/virtio/virtio-gpu-bswap.h b/include/hw/virtio/virtio-gpu-bswap.h index e2bee8f59557b8ce7516151567496ca2aa682e42..5faac0d8d5f3dd5c1eb4b967214b4ea3cf9d8147 100644 --- a/include/hw/virtio/virtio-gpu-bswap.h +++ b/include/hw/virtio/virtio-gpu-bswap.h @@ -24,7 +24,6 @@ virtio_gpu_ctrl_hdr_bswap(struct virtio_gpu_ctrl_hdr *hdr) le32_to_cpus(&hdr->flags); le64_to_cpus(&hdr->fence_id); le32_to_cpus(&hdr->ctx_id); - le32_to_cpus(&hdr->padding); } static inline void diff --git a/include/hw/virtio/virtio-gpu.h b/include/hw/virtio/virtio-gpu.h index acfba7c76c10656c4c460d53bc4bbfb1720d6a4a..2179b757037a4591629d22bc1d63262d7095819e 100644 --- a/include/hw/virtio/virtio-gpu.h +++ b/include/hw/virtio/virtio-gpu.h @@ -147,8 +147,8 @@ struct VirtIOGPUBaseClass { DEFINE_PROP_UINT32("max_outputs", _state, _conf.max_outputs, 1), \ DEFINE_PROP_BIT("edid", _state, _conf.flags, \ VIRTIO_GPU_FLAG_EDID_ENABLED, true), \ - DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1024), \ - DEFINE_PROP_UINT32("yres", _state, _conf.yres, 768) + DEFINE_PROP_UINT32("xres", _state, _conf.xres, 1280), \ + DEFINE_PROP_UINT32("yres", _state, _conf.yres, 800) typedef struct VGPUDMABuf { QemuDmaBuf buf; diff --git a/include/hw/virtio/virtio-net.h b/include/hw/virtio/virtio-net.h index eb87032627d221a90002fb5e0f1bc9e2e0cfdd01..ef234ffe7ef61f1a931467b28ed7ba5b025901bd 100644 --- a/include/hw/virtio/virtio-net.h +++ b/include/hw/virtio/virtio-net.h @@ -35,6 +35,9 @@ OBJECT_DECLARE_SIMPLE_TYPE(VirtIONet, VIRTIO_NET) * and latency. */ #define TX_BURST 256 +/* Maximum VIRTIO_NET_CTRL_MAC_TABLE_SET unicast + multicast entries. */ +#define MAC_TABLE_ENTRIES 64 + typedef struct virtio_net_conf { uint32_t txtimer; @@ -218,6 +221,10 @@ struct VirtIONet { struct EBPFRSSContext ebpf_rss; }; +size_t virtio_net_handle_ctrl_iov(VirtIODevice *vdev, + const struct iovec *in_sg, unsigned in_num, + const struct iovec *out_sg, + unsigned out_num); void virtio_net_set_netclient_name(VirtIONet *n, const char *name, const char *type); diff --git a/include/hw/virtio/virtio.h b/include/hw/virtio/virtio.h index 8bab9cfb7507dd9749866ff99d9a7b3ea6660dcb..4cc278f12c67c733315b7dada6b227753cc34747 100644 --- a/include/hw/virtio/virtio.h +++ b/include/hw/virtio/virtio.h @@ -22,6 +22,8 @@ #include "standard-headers/linux/virtio_config.h" #include "standard-headers/linux/virtio_ring.h" #include "qom/object.h" +#include "hw/virtio/vhost.h" +#include "block/aio.h" /* A guest should never accept this. It implies negotiation is broken. */ #define VIRTIO_F_BAD_FEATURE 30 @@ -49,6 +51,7 @@ size_t virtio_feature_get_config_size(const VirtIOFeature *features, typedef struct VirtQueue VirtQueue; #define VIRTQUEUE_MAX_SIZE 1024 +#define VIRTIO_NET_VQ_MAX_SIZE (4096) typedef struct VirtQueueElement { @@ -67,6 +70,9 @@ typedef struct VirtQueueElement #define VIRTIO_NO_VECTOR 0xffff +/* special index value used internally for config irqs */ +#define VIRTIO_CONFIG_IRQ_IDX -1 + #define TYPE_VIRTIO_DEVICE "virtio-device" OBJECT_DECLARE_TYPE(VirtIODevice, VirtioDeviceClass, VIRTIO_DEVICE) @@ -102,12 +108,14 @@ struct VirtIODevice bool started; bool start_on_kick; /* when virtio 1.0 feature has not been negotiated */ bool disable_legacy_check; + bool vhost_started; VMChangeStateEntry *vmstate; char *bus_name; uint8_t device_endian; bool use_guest_notifier_mask; AddressSpace *dma_as; QLIST_HEAD(, VirtQueue) *vector_queues; + EventNotifier config_notifier; }; struct VirtioDeviceClass { @@ -126,6 +134,7 @@ struct VirtioDeviceClass { int (*validate_features)(VirtIODevice *vdev); void (*get_config)(VirtIODevice *vdev, uint8_t *config); void (*set_config)(VirtIODevice *vdev, const uint8_t *config); + void (*print_features)(uint64_t features); void (*reset)(VirtIODevice *vdev); void (*set_status)(VirtIODevice *vdev, uint8_t val); /* For transitional devices, this is a bitmap of features @@ -160,6 +169,7 @@ struct VirtioDeviceClass { int (*post_load)(VirtIODevice *vdev); const VMStateDescription *vmsd; bool (*primary_unplug_pending)(void *opaque); + struct vhost_dev *(*get_vhost)(VirtIODevice *vdev); }; void virtio_instance_init_common(Object *proxy_obj, void *data, @@ -321,6 +331,9 @@ void virtio_queue_aio_set_host_notifier_handler(VirtQueue *vq, AioContext *ctx, VirtIOHandleAIOOutput handle_output); VirtQueue *virtio_vector_first_queue(VirtIODevice *vdev, uint16_t vector); VirtQueue *virtio_vector_next_queue(VirtQueue *vq); +EventNotifier *virtio_config_get_guest_notifier(VirtIODevice *vdev); +void virtio_config_set_guest_notifier_fd_handler(VirtIODevice *vdev, + bool assign, bool with_irqfd); static inline void virtio_add_feature(uint64_t *features, unsigned int fbit) { @@ -397,4 +410,10 @@ static inline bool virtio_device_disabled(VirtIODevice *vdev) bool virtio_legacy_allowed(VirtIODevice *vdev); bool virtio_legacy_check_disabled(VirtIODevice *vdev); +QEMUBH *virtio_bh_new_guarded_full(DeviceState *dev, + QEMUBHFunc *cb, void *opaque, + const char *name); +#define virtio_bh_new_guarded(dev, cb, opaque) \ + virtio_bh_new_guarded_full((dev), (cb), (opaque), (stringify(cb))) + #endif diff --git a/include/io/channel-tls.h b/include/io/channel-tls.h index 5672479e9eb60d6117592cc4a66e8329dbdf9e1c..26c67f17e2d364eeed9ce0a156b41aa7c210d1fc 100644 --- a/include/io/channel-tls.h +++ b/include/io/channel-tls.h @@ -48,6 +48,7 @@ struct QIOChannelTLS { QIOChannel *master; QCryptoTLSSession *session; QIOChannelShutdown shutdown; + guint hs_ioc_tag; }; /** diff --git a/include/migration/colo.h b/include/migration/colo.h index 768e1f04c3a60fc9ee98928333be46b6a10f5baa..5fbe1a6d5d30418419b2eef8cab5134c60f11c50 100644 --- a/include/migration/colo.h +++ b/include/migration/colo.h @@ -37,4 +37,5 @@ COLOMode get_colo_mode(void); void colo_do_failover(void); void colo_checkpoint_notify(void *opaque); +void colo_shutdown(void); #endif diff --git a/include/monitor/hmp.h b/include/monitor/hmp.h index 96d014826ada85ab9e9e743c5a7bffbcf50ab750..478820e54fe3023192003636ee651c8b2afa6211 100644 --- a/include/monitor/hmp.h +++ b/include/monitor/hmp.h @@ -131,6 +131,9 @@ void hmp_replay_delete_break(Monitor *mon, const QDict *qdict); void hmp_replay_seek(Monitor *mon, const QDict *qdict); void hmp_info_dirty_rate(Monitor *mon, const QDict *qdict); void hmp_calc_dirty_rate(Monitor *mon, const QDict *qdict); +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict); void hmp_human_readable_text_helper(Monitor *mon, HumanReadableText *(*qmp_handler)(Error **)); diff --git a/include/monitor/monitor.h b/include/monitor/monitor.h index 12d395d62d6b3993e035331d4d7570edbbc7b83a..847445f972ed4d0a870964e77785e80475d6a77b 100644 --- a/include/monitor/monitor.h +++ b/include/monitor/monitor.h @@ -56,4 +56,6 @@ void monitor_register_hmp(const char *name, bool info, void monitor_register_hmp_info_hrt(const char *name, HumanReadableText *(*handler)(Error **errp)); +void monitor_qapi_event_discard_io_error(void); + #endif /* MONITOR_H */ diff --git a/include/net/net.h b/include/net/net.h index 523136c7acba67ce7fbbf4c8bb46d2eeb8b1116e..b55f6cf698cd365c5632c021593d59a4f391ad09 100644 --- a/include/net/net.h +++ b/include/net/net.h @@ -44,6 +44,9 @@ typedef struct NICConf { typedef void (NetPoll)(NetClientState *, bool enable); typedef bool (NetCanReceive)(NetClientState *); +typedef int (NetStart)(NetClientState *); +typedef int (NetLoad)(NetClientState *); +typedef void (NetStop)(NetClientState *); typedef ssize_t (NetReceive)(NetClientState *, const uint8_t *, size_t); typedef ssize_t (NetReceiveIOV)(NetClientState *, const struct iovec *, int); typedef void (NetCleanup) (NetClientState *); @@ -71,6 +74,9 @@ typedef struct NetClientInfo { NetReceive *receive_raw; NetReceiveIOV *receive_iov; NetCanReceive *can_receive; + NetStart *start; + NetLoad *load; + NetStop *stop; NetCleanup *cleanup; LinkStatusChanged *link_status_changed; QueryRxFilter *query_rx_filter; @@ -112,6 +118,7 @@ struct NetClientState { typedef struct NICState { NetClientState *ncs; NICConf *conf; + MemReentrancyGuard *reentrancy_guard; void *opaque; bool peer_deleted; } NICState; @@ -145,6 +152,7 @@ NICState *qemu_new_nic(NetClientInfo *info, NICConf *conf, const char *model, const char *name, + MemReentrancyGuard *reentrancy_guard, void *opaque); void qemu_del_nic(NICState *nic); NetClientState *qemu_get_subqueue(NICState *nic, int queue_index); diff --git a/include/net/vhost-user.h b/include/net/vhost-user.h index 5bcd8a6285986f8922b55173308f37eaa120b014..35bf61970985c38458205ea03142b12e8133391e 100644 --- a/include/net/vhost-user.h +++ b/include/net/vhost-user.h @@ -14,5 +14,6 @@ struct vhost_net; struct vhost_net *vhost_user_get_vhost_net(NetClientState *nc); uint64_t vhost_user_get_acked_features(NetClientState *nc); +void vhost_user_save_acked_features(NetClientState *nc); #endif /* VHOST_USER_H */ diff --git a/include/net/vhost_net.h b/include/net/vhost_net.h index 387e913e4e608b49f97ba93e2770983f6380ba3f..1844f0ed466014fe2143e564683c3f26c33ed839 100644 --- a/include/net/vhost_net.h +++ b/include/net/vhost_net.h @@ -39,6 +39,8 @@ int vhost_net_set_config(struct vhost_net *net, const uint8_t *data, bool vhost_net_virtqueue_pending(VHostNetState *net, int n); void vhost_net_virtqueue_mask(VHostNetState *net, VirtIODevice *dev, int idx, bool mask); +bool vhost_net_config_pending(VHostNetState *net); +void vhost_net_config_mask(VHostNetState *net, VirtIODevice *dev, bool mask); int vhost_net_notify_migration_done(VHostNetState *net, char* mac_addr); VHostNetState *get_vhost_net(NetClientState *nc); @@ -48,4 +50,5 @@ uint64_t vhost_net_get_acked_features(VHostNetState *net); int vhost_net_set_mtu(struct vhost_net *net, uint16_t mtu); +void vhost_net_save_acked_features(NetClientState *nc); #endif diff --git a/include/qemu-common.h b/include/qemu-common.h index 73bcf763ed8281dc8f369d81beea5081d6fe8c4c..9ed883215225c55d2753090fd9b2c15bd5c7d7c5 100644 --- a/include/qemu-common.h +++ b/include/qemu-common.h @@ -27,7 +27,9 @@ int qemu_main(int argc, char **argv, char **envp); #endif void qemu_get_timedate(struct tm *tm, int offset); -int qemu_timedate_diff(struct tm *tm); +time_t qemu_timedate_diff(struct tm *tm); +time_t get_rtc_date_diff(void); +void set_rtc_date_diff(time_t diff); void *qemu_oom_check(void *ptr); diff --git a/include/qemu/atomic.h b/include/qemu/atomic.h index 112a29910bea1d481ab8f23557141e0fa2406bfc..614112230880ad1f97b89aa8178c5ce30de198b0 100644 --- a/include/qemu/atomic.h +++ b/include/qemu/atomic.h @@ -85,6 +85,8 @@ #define smp_read_barrier_depends() ({ barrier(); __atomic_thread_fence(__ATOMIC_CONSUME); }) #elif defined(__alpha__) #define smp_read_barrier_depends() asm volatile("mb":::"memory") +#elif defined(__sw_64__) +#define smp_read_barrier_depends() asm volatile("memb":::"memory") #else #define smp_read_barrier_depends() barrier() #endif diff --git a/include/qemu/bswap.h b/include/qemu/bswap.h index 2d3bb8bbeddac5a2461a9b8a402582fb62907c0c..d8364f5011b10d52dc312e92037a0625fce87075 100644 --- a/include/qemu/bswap.h +++ b/include/qemu/bswap.h @@ -183,6 +183,8 @@ CPU_CONVERT(le, 16, uint16_t) CPU_CONVERT(le, 32, uint32_t) CPU_CONVERT(le, 64, uint64_t) +#undef CPU_CONVERT + /* * Same as cpu_to_le{16,32}, except that gcc will figure the result is * a compile-time constant if you pass in a constant. So this can be diff --git a/include/qemu/coroutine_int.h b/include/qemu/coroutine_int.h index 1da148552f74b542959dd48448eb6bbc3aead935..11b550a0fcbf55cf67c2ed0eaf7317e1a6128475 100644 --- a/include/qemu/coroutine_int.h +++ b/include/qemu/coroutine_int.h @@ -73,5 +73,6 @@ Coroutine *qemu_coroutine_new(void); void qemu_coroutine_delete(Coroutine *co); CoroutineAction qemu_coroutine_switch(Coroutine *from, Coroutine *to, CoroutineAction action); - +void qemu_coroutine_info_add(const Coroutine *co_); +void qemu_coroutine_info_delete(const Coroutine *co_); #endif diff --git a/include/qemu/iova-tree.h b/include/qemu/iova-tree.h index 8249edd764e6d40dfc8ab3d7d5b06074d9f74f8d..8528e5c98fbc06541ae0ad6a5bb0681e0babfe4b 100644 --- a/include/qemu/iova-tree.h +++ b/include/qemu/iova-tree.h @@ -29,6 +29,7 @@ #define IOVA_OK (0) #define IOVA_ERR_INVALID (-1) /* Invalid parameters */ #define IOVA_ERR_OVERLAP (-2) /* IOVA range overlapped */ +#define IOVA_ERR_NOMEM (-3) /* Cannot allocate */ typedef struct IOVATree IOVATree; typedef struct DMAMap { @@ -71,10 +72,8 @@ int iova_tree_insert(IOVATree *tree, const DMAMap *map); * provided. The range does not need to be exactly what has inserted, * all the mappings that are included in the provided range will be * removed from the tree. Here map->translated_addr is meaningless. - * - * Return: 0 if succeeded, or <0 if error. */ -int iova_tree_remove(IOVATree *tree, const DMAMap *map); +void iova_tree_remove(IOVATree *tree, DMAMap map); /** * iova_tree_find: @@ -82,7 +81,7 @@ int iova_tree_remove(IOVATree *tree, const DMAMap *map); * @tree: the iova tree to search from * @map: the mapping to search * - * Search for a mapping in the iova tree that overlaps with the + * Search for a mapping in the iova tree that iova overlaps with the * mapping range specified. Only the first found mapping will be * returned. * @@ -94,6 +93,24 @@ int iova_tree_remove(IOVATree *tree, const DMAMap *map); */ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map); +/** + * iova_tree_find_iova: + * + * @tree: the iova tree to search from + * @map: the mapping to search + * + * Search for a mapping in the iova tree that translated_addr overlaps with the + * mapping range specified. Only the first found mapping will be + * returned. + * + * Return: DMAMap pointer if found, or NULL if not found. Note that + * the returned DMAMap pointer is maintained internally. User should + * only read the content but never modify or free the content. Also, + * user is responsible to make sure the pointer is valid (say, no + * concurrent deletion in progress). + */ +const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map); + /** * iova_tree_find_address: * @@ -119,6 +136,23 @@ const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova); */ void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator); +/** + * iova_tree_alloc_map: + * + * @tree: the iova tree to allocate from + * @map: the new map (as translated addr & size) to allocate in the iova region + * @iova_begin: the minimum address of the allocation + * @iova_end: the maximum addressable direction of the allocation + * + * Allocates a new region of a given size, between iova_min and iova_max. + * + * Return: Same as iova_tree_insert, but cannot overlap and can return error if + * iova tree is out of free contiguous range. The caller gets the assigned iova + * in map->iova. + */ +int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin, + hwaddr iova_end); + /** * iova_tree_destroy: * diff --git a/include/qemu/main-loop.h b/include/qemu/main-loop.h index 8dbc6fcb8947497d0621bdf2d420213141d64c46..a472ebd28929ee36973414205fd79546c30f056c 100644 --- a/include/qemu/main-loop.h +++ b/include/qemu/main-loop.h @@ -242,9 +242,33 @@ AioContext *iohandler_get_aio_context(void); * must always be taken outside other locks. This function helps * functions take different paths depending on whether the current * thread is running within the main loop mutex. + * + * This function should never be used in the block layer, because + * unit tests, block layer tools and qemu-storage-daemon do not + * have a BQL. + * Please instead refer to qemu_in_main_thread(). */ bool qemu_mutex_iothread_locked(void); +/** + * qemu_in_main_thread: return whether it's possible to safely access + * the global state of the block layer. + * + * Global state of the block layer is not accessible from I/O threads + * or worker threads; only from threads that "own" the default + * AioContext that qemu_get_aio_context() returns. For tests, block + * layer tools and qemu-storage-daemon there is a designated thread that + * runs the event loop for qemu_get_aio_context(), and that is the + * main thread. + * + * For emulators, however, any thread that holds the BQL can act + * as the block layer main thread; this will be any of the actual + * main thread, the vCPU threads or the RCU thread. + * + * For clarity, do not use this function outside the block layer. + */ +bool qemu_in_main_thread(void); + /** * qemu_mutex_lock_iothread: Lock the main loop mutex. * @@ -294,9 +318,12 @@ void qemu_cond_timedwait_iothread(QemuCond *cond, int ms); void qemu_fd_register(int fd); +#define qemu_bh_new_guarded(cb, opaque, guard) \ + qemu_bh_new_full((cb), (opaque), (stringify(cb)), guard) #define qemu_bh_new(cb, opaque) \ - qemu_bh_new_full((cb), (opaque), (stringify(cb))) -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name); + qemu_bh_new_full((cb), (opaque), (stringify(cb)), NULL) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard); void qemu_bh_schedule_idle(QEMUBH *bh); enum { diff --git a/include/qemu/mmap-alloc.h b/include/qemu/mmap-alloc.h index 90d0eee705351853ab7f7435e1e7a74dc305e4a0..707202e5be03294a365ad9574f56521d58e84d3b 100644 --- a/include/qemu/mmap-alloc.h +++ b/include/qemu/mmap-alloc.h @@ -1,6 +1,9 @@ #ifndef QEMU_MMAP_ALLOC_H #define QEMU_MMAP_ALLOC_H +#define HUGETLBFS_MAGIC 0x958458f6 + +size_t qemu_fd_getfiletype(int fd); size_t qemu_fd_getpagesize(int fd); diff --git a/include/qemu/osdep.h b/include/qemu/osdep.h index 60718fc3429f987b48ed411f141559fe283fc5ad..fd9e53f6239ab3cd50d95ccfa9507dc392cff202 100644 --- a/include/qemu/osdep.h +++ b/include/qemu/osdep.h @@ -533,6 +533,10 @@ static inline void qemu_cleanup_generic_vfree(void *p) Valgrind does not support alignments larger than 1 MiB, therefore we need special code which handles running on Valgrind. */ # define QEMU_VMALLOC_ALIGN (512 * 4096) +#elif defined(__linux__) && defined(__loongarch__) + /* Use 32 MiB alignment so transparent hugepages can be used by KVM. */ +#define QEMU_VMALLOC_ALIGN (qemu_real_host_page_size * \ + qemu_real_host_page_size / 8) #elif defined(__linux__) && defined(__s390x__) /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ # define QEMU_VMALLOC_ALIGN (256 * 4096) diff --git a/include/qemu/timer.h b/include/qemu/timer.h index 88ef11468944bb4a6ee924f271c8b8f41363c402..e6d442abeedca8a17690d25287ae965a5a5119b6 100644 --- a/include/qemu/timer.h +++ b/include/qemu/timer.h @@ -91,6 +91,34 @@ struct QEMUTimer { int scale; }; +#define QEMU_USB_NORMAL_FREQ 1000 +#define QEMU_USB_LAZY_FREQ 10 +#define MAX_USB_CONTROLLER_TYPES 4 +#define QEMU_USB_CONTROLLER_OHCI 0 +#define QEMU_USB_CONTROLLER_UHCI 1 +#define QEMU_USB_CONTROLLER_EHCI 2 +#define QEMU_USB_CONTROLLER_XHCI 3 + +typedef void (*QEMUSetFreqHandler) (int freq); + +typedef struct qemu_usb_controller { + const char *name; + QEMUSetFreqHandler qemu_set_freq; +} qemu_usb_controller; + +typedef qemu_usb_controller* qemu_usb_controller_ptr; + +enum qemu_timer_mode { + QEMU_TIMER_USB_NORMAL_MODE = 1 << 0, /* Set when VNC connect or + * with usb dev passthrough + */ + QEMU_TIMER_USB_LAZY_MODE = 1 << 1, /* Set when VNC disconnect */ +}; + +int qemu_register_usb_controller(qemu_usb_controller_ptr controller, + unsigned int type); +int qemu_timer_set_mode(enum qemu_timer_mode mode, unsigned int type); + extern QEMUTimerListGroup main_loop_tlg; /* @@ -979,6 +1007,16 @@ static inline int64_t cpu_get_host_ticks(void) return cur - ofs; } +#elif defined(__sw_64__) + +static inline int64_t cpu_get_host_ticks(void) +{ + uint64_t cc; + + asm volatile("rtc %0" : "=r"(cc)); + return cc; +} + #else /* The host CPU doesn't have an easily accessible cycle counter. Just return a monotonically increasing value. This will be diff --git a/include/qemu/userfaultfd.h b/include/qemu/userfaultfd.h index 6b74f92792da4268c2033254ef710710ac4f46bf..99831c052d679423478cb58271dcd4bf5ebf3495 100644 --- a/include/qemu/userfaultfd.h +++ b/include/qemu/userfaultfd.h @@ -30,6 +30,5 @@ int uffd_copy_page(int uffd_fd, void *dst_addr, void *src_addr, int uffd_zero_page(int uffd_fd, void *addr, uint64_t length, bool dont_wake); int uffd_wakeup(int uffd_fd, void *addr, uint64_t length); int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count); -bool uffd_poll_events(int uffd_fd, int tmo); #endif /* USERFAULTFD_H */ diff --git a/include/qom/object.h b/include/qom/object.h index fae096f51cce1649a4f2083ad40680ae09f16d41..f658e1e0a08944ee9aa07405ecbd992531696b17 100644 --- a/include/qom/object.h +++ b/include/qom/object.h @@ -16,7 +16,6 @@ #include "qapi/qapi-builtin-types.h" #include "qemu/module.h" -#include "qom/object.h" struct TypeImpl; typedef struct TypeImpl *Type; diff --git a/include/standard-headers/linux/vhost_types.h b/include/standard-headers/linux/vhost_types.h index 0bd2684a2ae47993594e5f8fcf5b56f98aa0db84..3801d95182ebd4ae35c77361fd6df1f06694736c 100644 --- a/include/standard-headers/linux/vhost_types.h +++ b/include/standard-headers/linux/vhost_types.h @@ -87,7 +87,7 @@ struct vhost_msg { struct vhost_msg_v2 { uint32_t type; - uint32_t reserved; + uint32_t asid; union { struct vhost_iotlb_msg iotlb; uint8_t padding[64]; @@ -153,4 +153,11 @@ struct vhost_vdpa_iova_range { /* vhost-net should add virtio_net_hdr for RX, and strip for TX packets. */ #define VHOST_NET_F_VIRTIO_NET_HDR 27 +/* IOTLB can accept address space identifier through V2 type of IOTLB + * message + */ +#define VHOST_BACKEND_F_IOTLB_ASID 0x3 +/* device can use bytemap log */ +#define VHOST_BACKEND_F_BYTEMAPLOG 0x3f + #endif diff --git a/include/sysemu/arch_init.h b/include/sysemu/arch_init.h index 70c579560adfd4e2bea6efadf2ea7f7ebc15fa38..0907b92cd1b66efe9d53fdaa700f642cd76c5f79 100644 --- a/include/sysemu/arch_init.h +++ b/include/sysemu/arch_init.h @@ -24,6 +24,8 @@ enum { QEMU_ARCH_RX = (1 << 20), QEMU_ARCH_AVR = (1 << 21), QEMU_ARCH_HEXAGON = (1 << 22), + QEMU_ARCH_SW64 = (1 << 23), + QEMU_ARCH_LOONGARCH64 = (1 << 24), }; extern const uint32_t arch_type; diff --git a/include/sysemu/block-backend.h b/include/sysemu/block-backend.h index e5e1524f065b22546b41dc2cc2020ef71a012391..887c19ff5dfd10f7cca67c1d6cfeb98526a41dce 100644 --- a/include/sysemu/block-backend.h +++ b/include/sysemu/block-backend.h @@ -25,6 +25,9 @@ */ #include "block/block.h" +/* block backend default retry interval */ +#define BLOCK_BACKEND_DEFAULT_RETRY_INTERVAL 1000 + /* Callbacks for block device models */ typedef struct BlockDevOps { /* @@ -70,6 +73,10 @@ typedef struct BlockDevOps { * Is the device still busy? */ bool (*drained_poll)(void *opaque); + /* + * Runs when retrying failed requests. + */ + void (*retry_request_cb)(void *opaque); } BlockDevOps; /* This struct is embedded in (the private) BlockBackend struct and contains @@ -194,6 +201,9 @@ void blk_inc_in_flight(BlockBackend *blk); void blk_dec_in_flight(BlockBackend *blk); void blk_drain(BlockBackend *blk); void blk_drain_all(void); +void blk_set_on_error_retry_interval(BlockBackend *blk, int64_t interval); +void blk_set_on_error_retry_timeout(BlockBackend *blk, int64_t timeout); +void blk_error_retry_reset_timeout(BlockBackend *blk); void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error, BlockdevOnError on_write_error); BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read); diff --git a/include/sysemu/dirtylimit.h b/include/sysemu/dirtylimit.h new file mode 100644 index 0000000000000000000000000000000000000000..d11ebbbbdb78cfc132f8591df07cfb6ddd4360cb --- /dev/null +++ b/include/sysemu/dirtylimit.h @@ -0,0 +1,39 @@ +/* + * Dirty page rate limit common functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ +#ifndef QEMU_DIRTYRLIMIT_H +#define QEMU_DIRTYRLIMIT_H + +#define DIRTYLIMIT_CALC_TIME_MS 1000 /* 1000ms */ + +int64_t vcpu_dirty_rate_get(int cpu_index); +void vcpu_dirty_rate_stat_start(void); +void vcpu_dirty_rate_stat_stop(void); +void vcpu_dirty_rate_stat_initialize(void); +void vcpu_dirty_rate_stat_finalize(void); + +void dirtylimit_state_lock(void); +void dirtylimit_state_unlock(void); +void dirtylimit_state_initialize(void); +void dirtylimit_state_finalize(void); +bool dirtylimit_in_service(void); +bool dirtylimit_vcpu_index_valid(int cpu_index); +void dirtylimit_process(void); +void dirtylimit_change(bool start); +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable); +void dirtylimit_set_all(uint64_t quota, + bool enable); +void dirtylimit_vcpu_execute(CPUState *cpu); +uint64_t dirtylimit_throttle_time_per_round(void); +uint64_t dirtylimit_ring_full_time(void); +#endif diff --git a/include/sysemu/dirtyrate.h b/include/sysemu/dirtyrate.h new file mode 100644 index 0000000000000000000000000000000000000000..4d3b9a4902211a1f8d475054fc244dddae009bcf --- /dev/null +++ b/include/sysemu/dirtyrate.h @@ -0,0 +1,28 @@ +/* + * dirty page rate helper functions + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef QEMU_DIRTYRATE_H +#define QEMU_DIRTYRATE_H + +typedef struct VcpuStat { + int nvcpu; /* number of vcpu */ + DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ +} VcpuStat; + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot); + +void global_dirty_log_change(unsigned int flag, + bool start); +#endif diff --git a/include/sysemu/dma.h b/include/sysemu/dma.h index 3201e7901dbfcbbbb4b0d76aca1a8a5b2bd74c75..b3faef41b2f22eae40772ea4245829ef861e48b3 100644 --- a/include/sysemu/dma.h +++ b/include/sysemu/dma.h @@ -73,19 +73,20 @@ static inline void dma_barrier(AddressSpace *as, DMADirection dir) * dma_memory_{read,write}() and check for errors */ static inline bool dma_memory_valid(AddressSpace *as, dma_addr_t addr, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { return address_space_access_valid(as, addr, len, dir == DMA_DIRECTION_FROM_DEVICE, - MEMTXATTRS_UNSPECIFIED); + attrs); } static inline MemTxResult dma_memory_rw_relaxed(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, + MemTxAttrs attrs) { - return address_space_rw(as, addr, MEMTXATTRS_UNSPECIFIED, + return address_space_rw(as, addr, attrs, buf, len, dir == DMA_DIRECTION_FROM_DEVICE); } @@ -93,7 +94,9 @@ static inline MemTxResult dma_memory_read_relaxed(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len) { - return dma_memory_rw_relaxed(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return dma_memory_rw_relaxed(as, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, + MEMTXATTRS_UNSPECIFIED); } static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, @@ -102,7 +105,8 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, dma_addr_t len) { return dma_memory_rw_relaxed(as, addr, (void *)buf, len, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, + MEMTXATTRS_UNSPECIFIED); } /** @@ -117,14 +121,15 @@ static inline MemTxResult dma_memory_write_relaxed(AddressSpace *as, * @buf: buffer with the data transferred * @len: the number of bytes to read or write * @dir: indicates the transfer direction + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr, void *buf, dma_addr_t len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { dma_barrier(as, dir); - return dma_memory_rw_relaxed(as, addr, buf, len, dir); + return dma_memory_rw_relaxed(as, addr, buf, len, dir, attrs); } /** @@ -138,11 +143,14 @@ static inline MemTxResult dma_memory_rw(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @buf: buffer with the data transferred * @len: length of the data transferred + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, - void *buf, dma_addr_t len) + void *buf, dma_addr_t len, + MemTxAttrs attrs) { - return dma_memory_rw(as, addr, buf, len, DMA_DIRECTION_TO_DEVICE); + return dma_memory_rw(as, addr, buf, len, + DMA_DIRECTION_TO_DEVICE, attrs); } /** @@ -156,12 +164,14 @@ static inline MemTxResult dma_memory_read(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @buf: buffer with the data transferred * @len: the number of bytes to write + * @attrs: memory transaction attributes */ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr, - const void *buf, dma_addr_t len) + const void *buf, dma_addr_t len, + MemTxAttrs attrs) { return dma_memory_rw(as, addr, (void *)buf, len, - DMA_DIRECTION_FROM_DEVICE); + DMA_DIRECTION_FROM_DEVICE, attrs); } /** @@ -175,9 +185,10 @@ static inline MemTxResult dma_memory_write(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @c: constant byte to fill the memory * @len: the number of bytes to fill with the constant byte + * @attrs: memory transaction attributes */ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, - uint8_t c, dma_addr_t len); + uint8_t c, dma_addr_t len, MemTxAttrs attrs); /** * address_space_map: Map a physical memory region into a host virtual address. @@ -191,16 +202,17 @@ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, * @addr: address within that address space * @len: pointer to length of buffer; updated on return * @dir: indicates the transfer direction + * @attrs: memory attributes */ static inline void *dma_memory_map(AddressSpace *as, dma_addr_t addr, dma_addr_t *len, - DMADirection dir) + DMADirection dir, MemTxAttrs attrs) { hwaddr xlen = *len; void *p; p = address_space_map(as, addr, &xlen, dir == DMA_DIRECTION_FROM_DEVICE, - MEMTXATTRS_UNSPECIFIED); + attrs); *len = xlen; return p; } @@ -228,32 +240,34 @@ static inline void dma_memory_unmap(AddressSpace *as, } #define DEFINE_LDST_DMA(_lname, _sname, _bits, _end) \ - static inline uint##_bits##_t ld##_lname##_##_end##_dma(AddressSpace *as, \ - dma_addr_t addr) \ - { \ - uint##_bits##_t val; \ - dma_memory_read(as, addr, &val, (_bits) / 8); \ - return _end##_bits##_to_cpu(val); \ - } \ - static inline void st##_sname##_##_end##_dma(AddressSpace *as, \ - dma_addr_t addr, \ - uint##_bits##_t val) \ - { \ - val = cpu_to_##_end##_bits(val); \ - dma_memory_write(as, addr, &val, (_bits) / 8); \ + static inline MemTxResult ld##_lname##_##_end##_dma(AddressSpace *as, \ + dma_addr_t addr, \ + uint##_bits##_t *pval, \ + MemTxAttrs attrs) \ + { \ + MemTxResult res = dma_memory_read(as, addr, pval, (_bits) / 8, attrs); \ + _end##_bits##_to_cpus(pval); \ + return res; \ + } \ + static inline MemTxResult st##_sname##_##_end##_dma(AddressSpace *as, \ + dma_addr_t addr, \ + uint##_bits##_t val, \ + MemTxAttrs attrs) \ + { \ + val = cpu_to_##_end##_bits(val); \ + return dma_memory_write(as, addr, &val, (_bits) / 8, attrs); \ } -static inline uint8_t ldub_dma(AddressSpace *as, dma_addr_t addr) +static inline MemTxResult ldub_dma(AddressSpace *as, dma_addr_t addr, + uint8_t *val, MemTxAttrs attrs) { - uint8_t val; - - dma_memory_read(as, addr, &val, 1); - return val; + return dma_memory_read(as, addr, val, 1, attrs); } -static inline void stb_dma(AddressSpace *as, dma_addr_t addr, uint8_t val) +static inline MemTxResult stb_dma(AddressSpace *as, dma_addr_t addr, + uint8_t val, MemTxAttrs attrs) { - dma_memory_write(as, addr, &val, 1); + return dma_memory_write(as, addr, &val, 1, attrs); } DEFINE_LDST_DMA(uw, w, 16, le); @@ -290,8 +304,8 @@ BlockAIOCB *dma_blk_read(BlockBackend *blk, BlockAIOCB *dma_blk_write(BlockBackend *blk, QEMUSGList *sg, uint64_t offset, uint32_t align, BlockCompletionFunc *cb, void *opaque); -uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg); -uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg); +uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs); +uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs); void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, QEMUSGList *sg, enum BlockAcctType type); diff --git a/include/sysemu/kvm.h b/include/sysemu/kvm.h index 7b22aeb6ae1a111c158370933260ead5059c35c9..5c06cd3d9137438c680d6130bb7d043d179d3f08 100644 --- a/include/sysemu/kvm.h +++ b/include/sysemu/kvm.h @@ -19,6 +19,7 @@ #include "exec/memattrs.h" #include "qemu/accel.h" #include "qom/object.h" +#include "linux-headers/linux/kvm.h" #ifdef NEED_CPU_H # ifdef CONFIG_KVM @@ -32,6 +33,7 @@ #ifdef CONFIG_KVM_IS_POSSIBLE extern bool kvm_allowed; +extern bool virtcca_cvm_allowed; extern bool kvm_kernel_irqchip; extern bool kvm_split_irqchip; extern bool kvm_async_interrupts_allowed; @@ -48,6 +50,8 @@ extern bool kvm_ioeventfd_any_length_allowed; extern bool kvm_msi_use_devid; #define kvm_enabled() (kvm_allowed) +#define virtcca_cvm_enabled() (virtcca_cvm_allowed) +#define VIRTCCA_CVM_TYPE (1UL << 8) /** * kvm_irqchip_in_kernel: * @@ -170,6 +174,8 @@ extern bool kvm_msi_use_devid; #else #define kvm_enabled() (0) +#define virtcca_cvm_enabled() (0) +#define VIRTCCA_CVM_TYPE (0) #define kvm_irqchip_in_kernel() (false) #define kvm_irqchip_is_split() (false) #define kvm_async_interrupts_enabled() (false) @@ -221,6 +227,7 @@ int kvm_has_pit_state2(void); int kvm_has_many_ioeventfds(void); int kvm_has_gsi_routing(void); int kvm_has_intx_set_mask(void); +int kvm_create_parked_vcpu(unsigned long vcpu_id); /** * kvm_arm_supports_user_irq @@ -251,11 +258,11 @@ int kvm_on_sigbus(int code, void *addr); /* internal API */ -int kvm_ioctl(KVMState *s, int type, ...); +int kvm_ioctl(KVMState *s, unsigned long type, ...); -int kvm_vm_ioctl(KVMState *s, int type, ...); +int kvm_vm_ioctl(KVMState *s, unsigned long type, ...); -int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); +int kvm_vcpu_ioctl(CPUState *cpu, unsigned long type, ...); /** * kvm_device_ioctl - call an ioctl on a kvm device @@ -264,7 +271,7 @@ int kvm_vcpu_ioctl(CPUState *cpu, int type, ...); * * Returns: -errno on error, nonnegative on success */ -int kvm_device_ioctl(int fd, int type, ...); +int kvm_device_ioctl(int fd, unsigned long type, ...); /** * kvm_vm_check_attr - check for existence of a specific vm attribute @@ -333,6 +340,8 @@ bool kvm_device_supported(int vmfd, uint64_t type); extern const KVMCapabilityInfo kvm_arch_required_capabilities[]; +void kvm_arch_accel_class_init(ObjectClass *oc); + void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run); MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run); @@ -548,4 +557,14 @@ bool kvm_cpu_check_are_resettable(void); bool kvm_arch_cpu_check_are_resettable(void); bool kvm_dirty_ring_enabled(void); + +uint32_t kvm_dirty_ring_size(void); + +int kvm_load_user_data(hwaddr loader_start, hwaddr image_end, hwaddr initrd_start, hwaddr dtb_end, hwaddr ram_size, + struct kvm_numa_info *numa_info); + +#ifdef __aarch64__ +int kvm_create_shadow_device(PCIDevice *dev); +int kvm_delete_shadow_device(PCIDevice *dev); +#endif #endif diff --git a/include/sysemu/kvm_int.h b/include/sysemu/kvm_int.h index 1f5487d9b74db353820c693134ec5e7fbdb61e96..b332d79ce531af0af20d91347347f68abf965e10 100644 --- a/include/sysemu/kvm_int.h +++ b/include/sysemu/kvm_int.h @@ -10,6 +10,7 @@ #define QEMU_KVM_INT_H #include "exec/memory.h" +#include "qapi/qapi-types-common.h" #include "qemu/accel.h" #include "sysemu/kvm.h" @@ -36,6 +37,90 @@ typedef struct KVMMemoryListener { int as_id; } KVMMemoryListener; +#define KVM_MSI_HASHTAB_SIZE 256 + +enum KVMDirtyRingReaperState { + KVM_DIRTY_RING_REAPER_NONE = 0, + /* The reaper is sleeping */ + KVM_DIRTY_RING_REAPER_WAIT, + /* The reaper is reaping for dirty pages */ + KVM_DIRTY_RING_REAPER_REAPING, +}; + +/* + * KVM reaper instance, responsible for collecting the KVM dirty bits + * via the dirty ring. + */ +struct KVMDirtyRingReaper { + /* The reaper thread */ + QemuThread reaper_thr; + volatile uint64_t reaper_iteration; /* iteration number of reaper thr */ + volatile enum KVMDirtyRingReaperState reaper_state; /* reap thr state */ +}; +struct KVMState +{ + AccelState parent_obj; + + int nr_slots; + int fd; + int vmfd; + int coalesced_mmio; + int coalesced_pio; + struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; + bool coalesced_flush_in_progress; + int vcpu_events; + int robust_singlestep; + int debugregs; +#ifdef KVM_CAP_SET_GUEST_DEBUG + QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints; +#endif + int max_nested_state_len; + int many_ioeventfds; + int intx_set_mask; + int kvm_shadow_mem; + bool kernel_irqchip_allowed; + bool kernel_irqchip_required; + OnOffAuto kernel_irqchip_split; + bool sync_mmu; + uint64_t manual_dirty_log_protect; + /* + * Older POSIX says that ioctl numbers are signed int, but in + * practice they are not. (Newer POSIX doesn't specify ioctl + * at all.) Linux, glibc and *BSD all treat ioctl numbers as + * unsigned, and real-world ioctl values like KVM_GET_XSAVE have + * bit 31 set, which means that passing them via an 'int' will + * result in sign-extension when they get converted back to the + * 'unsigned long' which the ioctl() prototype uses. Luckily Linux + * always treats the argument as an unsigned 32-bit int, so any + * possible sign-extension is deliberately ignored, but for + * consistency we keep to the same type that glibc is using. + */ + unsigned long irq_set_ioctl; + unsigned int sigmask_len; + GHashTable *gsimap; +#ifdef KVM_CAP_IRQ_ROUTING + struct kvm_irq_routing *irq_routes; + int nr_allocated_irq_routes; + unsigned long *used_gsi_bitmap; + unsigned int gsi_count; + QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; +#endif + KVMMemoryListener memory_listener; + QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus; + + /* For "info mtree -f" to tell if an MR is registered in KVM */ + int nr_as; + struct KVMAs { + KVMMemoryListener *ml; + AddressSpace *as; + } *as; + uint64_t kvm_dirty_ring_bytes; /* Size of the per-vcpu dirty ring */ + uint32_t kvm_dirty_ring_size; /* Number of dirty GFNs per ring */ + struct KVMDirtyRingReaper reaper; + NotifyVmexitOption notify_vmexit; + uint32_t notify_window; +}; + void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, AddressSpace *as, int as_id, const char *name); diff --git a/include/sysemu/reset.h b/include/sysemu/reset.h index 0b0d6d7598c9566c11cab829c6d15fe22b57634a..f3ff26c6371a88c011dc32164dc6226f8397ec0f 100644 --- a/include/sysemu/reset.h +++ b/include/sysemu/reset.h @@ -2,7 +2,11 @@ #define QEMU_SYSEMU_RESET_H typedef void QEMUResetHandler(void *opaque); +typedef struct QEMUResetEntry QEMUResetEntry; +QEMUResetEntry *qemu_get_reset_entry(QEMUResetHandler *func, void *opaque); +void qemu_register_reset_after(QEMUResetEntry *entry, + QEMUResetHandler *func, void *opaque); void qemu_register_reset(QEMUResetHandler *func, void *opaque); void qemu_unregister_reset(QEMUResetHandler *func, void *opaque); void qemu_devices_reset(void); diff --git a/include/sysemu/sysemu.h b/include/sysemu/sysemu.h index 8fae667172acf3196bddc9c3141a955c3bb2bcb8..b9421e03ffddf5980b831ab7cf9241e1b0c4a18b 100644 --- a/include/sysemu/sysemu.h +++ b/include/sysemu/sysemu.h @@ -16,7 +16,6 @@ extern bool qemu_uuid_set; void qemu_add_exit_notifier(Notifier *notify); void qemu_remove_exit_notifier(Notifier *notify); -void qemu_run_machine_init_done_notifiers(void); void qemu_add_machine_init_done_notifier(Notifier *notify); void qemu_remove_machine_init_done_notifier(Notifier *notify); diff --git a/include/tcg/tcg.h b/include/tcg/tcg.h index 42f5b500ed6a7692f6e2fbe448a241fe9316214d..0ab8e4e7350742bdff9ce0eca85b1d15d03d7476 100644 --- a/include/tcg/tcg.h +++ b/include/tcg/tcg.h @@ -1240,12 +1240,6 @@ uint64_t dup_const(unsigned vece, uint64_t c); : (target_long)dup_const(VECE, C)) #endif -#ifdef CONFIG_DEBUG_TCG -void tcg_assert_listed_vecop(TCGOpcode); -#else -static inline void tcg_assert_listed_vecop(TCGOpcode op) { } -#endif - static inline const TCGOpcode *tcg_swap_vecop_list(const TCGOpcode *n) { #ifdef CONFIG_DEBUG_TCG diff --git a/include/ui/console.h b/include/ui/console.h index 6d678924f6fd2c57cc682abec0e8fe5b761bc8cc..407efaf5e3af5e798064955c51e4ee0915550ba0 100644 --- a/include/ui/console.h +++ b/include/ui/console.h @@ -70,6 +70,7 @@ void hmp_mouse_set(Monitor *mon, const QDict *qdict); /* keysym is a unicode code except for special keys (see QEMU_KEY_xxx constants) */ #define QEMU_KEY_ESC1(c) ((c) | 0xe100) +#define QEMU_KEY_TAB 0x0009 #define QEMU_KEY_BACKSPACE 0x007f #define QEMU_KEY_UP QEMU_KEY_ESC1('A') #define QEMU_KEY_DOWN QEMU_KEY_ESC1('B') diff --git a/io/channel-tls.c b/io/channel-tls.c index 2ae1b92fc0a8005e28c720cbcd3af99ecad9be8e..34476e6b7baaac8650a747c2eef548f68724720e 100644 --- a/io/channel-tls.c +++ b/io/channel-tls.c @@ -195,12 +195,13 @@ static void qio_channel_tls_handshake_task(QIOChannelTLS *ioc, } trace_qio_channel_tls_handshake_pending(ioc, status); - qio_channel_add_watch_full(ioc->master, - condition, - qio_channel_tls_handshake_io, - data, - NULL, - context); + ioc->hs_ioc_tag = + qio_channel_add_watch_full(ioc->master, + condition, + qio_channel_tls_handshake_io, + data, + NULL, + context); } } @@ -215,6 +216,7 @@ static gboolean qio_channel_tls_handshake_io(QIOChannel *ioc, QIOChannelTLS *tioc = QIO_CHANNEL_TLS( qio_task_get_source(task)); + tioc->hs_ioc_tag = 0; g_free(data); qio_channel_tls_handshake_task(tioc, task, context); @@ -373,6 +375,10 @@ static int qio_channel_tls_close(QIOChannel *ioc, { QIOChannelTLS *tioc = QIO_CHANNEL_TLS(ioc); + if (tioc->hs_ioc_tag) { + g_clear_handle_id(&tioc->hs_ioc_tag, g_source_remove); + } + return qio_channel_close(tioc->master, errp); } diff --git a/io/channel-websock.c b/io/channel-websock.c index 70889bb54da5a0502619a978f8124d1dc6127512..d9fc27a003c8d7c3bd27996ef3609a2209e5df77 100644 --- a/io/channel-websock.c +++ b/io/channel-websock.c @@ -32,7 +32,7 @@ #define QIO_CHANNEL_WEBSOCK_CLIENT_KEY_LEN 24 #define QIO_CHANNEL_WEBSOCK_GUID "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" -#define QIO_CHANNEL_WEBSOCK_GUID_LEN strlen(QIO_CHANNEL_WEBSOCK_GUID) +#define QIO_CHANNEL_WEBSOCK_GUID_LEN (sizeof(QIO_CHANNEL_WEBSOCK_GUID) - 1) #define QIO_CHANNEL_WEBSOCK_HEADER_PROTOCOL "sec-websocket-protocol" #define QIO_CHANNEL_WEBSOCK_HEADER_VERSION "sec-websocket-version" diff --git a/job.c b/job.c index dbfa67bb0a3b14dfd49f8f7a4d2b9da39392bfba..54db80df662da1343069baca50980bcf9ba1d7df 100644 --- a/job.c +++ b/job.c @@ -352,6 +352,7 @@ void *job_create(const char *job_id, const JobDriver *driver, JobTxn *txn, notifier_list_init(&job->on_finalize_completed); notifier_list_init(&job->on_pending); notifier_list_init(&job->on_ready); + notifier_list_init(&job->on_idle); job_state_transition(job, JOB_STATUS_CREATED); aio_timer_init(qemu_get_aio_context(), &job->sleep_timer, diff --git a/libdecnumber/dpd/decimal64.c b/libdecnumber/dpd/decimal64.c index 4816176410f963ff026903dbf8ec66665f33cf24..f6fb2963d9fed88782f80f0730da59d3c8a0fae5 100644 --- a/libdecnumber/dpd/decimal64.c +++ b/libdecnumber/dpd/decimal64.c @@ -617,7 +617,6 @@ static const uInt multies[]={131073, 26215, 5243, 1049, 210}; #endif void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { Int cut; /* work */ - Int n; /* output bunch counter */ Int digits=dn->digits; /* digit countdown */ uInt dpd; /* densely packed decimal value */ uInt bin; /* binary value 0-999 */ @@ -676,7 +675,7 @@ void decDigitsToDPD(const decNumber *dn, uInt *targ, Int shift) { bin=0; /* [keep compiler quiet] */ #endif - for(n=0; digits>0; n++) { /* each output bunch */ + while (digits > 0) { /* each output bunch */ #if DECDPUN==3 /* fast path, 3-at-a-time */ bin=*inu; /* 3 digits ready for convert */ digits-=3; /* [may go negative] */ diff --git a/linux-headers/asm-arm64/kvm.h b/linux-headers/asm-arm64/kvm.h index 3d2ce9912dc64ec441eec9de451c6efbefe9e0d6..4e98a829f7cf50c84aae110d1a91c9b07ff0df5a 100644 --- a/linux-headers/asm-arm64/kvm.h +++ b/linux-headers/asm-arm64/kvm.h @@ -106,6 +106,7 @@ struct kvm_regs { #define KVM_ARM_VCPU_SVE 4 /* enable SVE for this CPU */ #define KVM_ARM_VCPU_PTRAUTH_ADDRESS 5 /* VCPU uses address authentication */ #define KVM_ARM_VCPU_PTRAUTH_GENERIC 6 /* VCPU uses generic authentication */ +#define KVM_ARM_VCPU_TEC 8 /* VCPU TEC state as part of cvm */ struct kvm_vcpu_init { __u32 target; @@ -411,6 +412,67 @@ struct kvm_arm_copy_mte_tags { #define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS #define KVM_PSCI_RET_DENIED PSCI_RET_DENIED +/* KVM_CAP_ARM_TMM on VM fd */ +#define KVM_CAP_ARM_TMM_CONFIG_CVM 0 +#define KVM_CAP_ARM_TMM_CREATE_RD 1 +#define KVM_CAP_ARM_TMM_POPULATE_CVM 2 +#define KVM_CAP_ARM_TMM_ACTIVATE_CVM 3 + +#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256 0 +#define KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512 1 + +#define KVM_CAP_ARM_TMM_RPV_SIZE 64 + +/* List of configuration items accepted for KVM_CAP_ARM_RME_CONFIG_REALM */ +#define KVM_CAP_ARM_TMM_CFG_RPV 0 +#define KVM_CAP_ARM_TMM_CFG_HASH_ALGO 1 +#define KVM_CAP_ARM_TMM_CFG_SVE 2 +#define KVM_CAP_ARM_TMM_CFG_DBG 3 +#define KVM_CAP_ARM_TMM_CFG_PMU 4 + +struct kvm_cap_arm_tmm_config_item { + __u32 cfg; + union { + /* cfg == KVM_CAP_ARM_TMM_CFG_RPV */ + struct { + __u8 rpv[KVM_CAP_ARM_TMM_RPV_SIZE]; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_HASH_ALGO */ + struct { + __u32 hash_algo; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_SVE */ + struct { + __u32 sve_vq; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_DBG */ + struct { + __u32 num_brps; + __u32 num_wrps; + }; + + /* cfg == KVM_CAP_ARM_TMM_CFG_PMU */ + struct { + __u32 num_pmu_cntrs; + }; + /* Fix the size of the union */ + __u8 reserved[256]; + }; +}; + +#define KVM_ARM_TMM_POPULATE_FLAGS_MEASURE (1U << 0) +struct kvm_cap_arm_tmm_populate_region_args { + __u64 populate_ipa_base1; + __u64 populate_ipa_size1; + __u64 populate_ipa_base2; + __u64 populate_ipa_size2; + __u32 flags; + __u32 reserved[3]; +}; + #endif #endif /* __ARM_KVM_H__ */ diff --git a/linux-headers/asm-loongarch64/bitsperlong.h b/linux-headers/asm-loongarch64/bitsperlong.h new file mode 100644 index 0000000000000000000000000000000000000000..a7981540d2853883f9ada33be8a3c02e6fd3e625 --- /dev/null +++ b/linux-headers/asm-loongarch64/bitsperlong.h @@ -0,0 +1,25 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef __ASM_LOONGARCH_BITSPERLONG_H +#define __ASM_LOONGARCH_BITSPERLONG_H + +#define __BITS_PER_LONG _LOONGARCH_SZLONG + +#include + +#endif /* __ASM_LOONGARCH_BITSPERLONG_H */ diff --git a/linux-headers/asm-loongarch64/kvm.h b/linux-headers/asm-loongarch64/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..a036ea57cd9c1966c7bf4f708057dc024833694e --- /dev/null +++ b/linux-headers/asm-loongarch64/kvm.h @@ -0,0 +1,366 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef __LINUX_KVM_LOONGARCH_H +#define __LINUX_KVM_LOONGARCH_H + +#include + +#define __KVM_HAVE_GUEST_DEBUG +#define KVM_GUESTDBG_USE_SW_BP 0x00010000 +#define KVM_GUESTDBG_USE_HW_BP 0x00020000 +#define KVM_DATA_HW_BREAKPOINT_NUM 8 +#define KVM_INST_HW_BREAKPOINT_NUM 8 + +/* + * KVM Loongarch specific structures and definitions. + * + * Some parts derived from the x86 version of this file. + */ + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 +#define KVM_LARCH_VCPU_PVTIME_CTRL 2 +#define KVM_LARCH_VCPU_PVTIME_IPA 0 + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ + __u64 gpr[32]; + __u64 pc; +}; + +/* + * for KVM_GET_CPUCFG + */ +struct kvm_cpucfg { + /* out (KVM_GET_CPUCFG) */ + __u32 cpucfg[64]; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { + __u32 fcsr; + __u32 vcsr; + __u64 fcc; /* 8x8 */ + struct kvm_fpureg { + __u64 val64[4]; // support max 256 bits + } fpr[32]; +}; + +/* + * For LOONGARCH, we use KVM_SET_ONE_REG and KVM_GET_ONE_REG to access various + * registers. The id field is broken down as follows: + * + * bits[63..52] - As per linux/kvm.h + * bits[51..32] - Must be zero. + * bits[31..16] - Register set. + * + * Register set = 0: GP registers from kvm_regs (see definitions below). + * + * Register set = 1: CSR registers. + * + * Register set = 2: KVM specific registers (see definitions below). + * + * Register set = 3: FPU / MSA registers (see definitions below). + * Register set = 4: LBT registers (see definitions below). + * + * Other sets registers may be added in the future. Each set would + * have its own identifier in bits[31..16]. + */ + +#define KVM_REG_LOONGARCH_GP (KVM_REG_LOONGARCH | 0x0000000000000000ULL) +#define KVM_REG_LOONGARCH_CSR (KVM_REG_LOONGARCH | 0x0000000000010000ULL) +#define KVM_REG_LOONGARCH_KVM (KVM_REG_LOONGARCH | 0x0000000000020000ULL) +#define KVM_REG_LOONGARCH_FPU (KVM_REG_LOONGARCH | 0x0000000000030000ULL) +#define KVM_REG_LOONGARCH_LBT (KVM_REG_LOONGARCH | 0x0000000000040000ULL) + +/* + * KVM_REG_LOONGARCH_GP - General purpose registers from kvm_regs. + */ + +#define KVM_REG_LOONGARCH_R0 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_LOONGARCH_R1 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LOONGARCH_R2 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_R3 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_R4 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LOONGARCH_R5 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LOONGARCH_R6 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 6) +#define KVM_REG_LOONGARCH_R7 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 7) +#define KVM_REG_LOONGARCH_R8 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 8) +#define KVM_REG_LOONGARCH_R9 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 9) +#define KVM_REG_LOONGARCH_R10 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 10) +#define KVM_REG_LOONGARCH_R11 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 11) +#define KVM_REG_LOONGARCH_R12 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 12) +#define KVM_REG_LOONGARCH_R13 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 13) +#define KVM_REG_LOONGARCH_R14 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 14) +#define KVM_REG_LOONGARCH_R15 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 15) +#define KVM_REG_LOONGARCH_R16 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 16) +#define KVM_REG_LOONGARCH_R17 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 17) +#define KVM_REG_LOONGARCH_R18 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 18) +#define KVM_REG_LOONGARCH_R19 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 19) +#define KVM_REG_LOONGARCH_R20 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 20) +#define KVM_REG_LOONGARCH_R21 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 21) +#define KVM_REG_LOONGARCH_R22 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 22) +#define KVM_REG_LOONGARCH_R23 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 23) +#define KVM_REG_LOONGARCH_R24 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 24) +#define KVM_REG_LOONGARCH_R25 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 25) +#define KVM_REG_LOONGARCH_R26 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 26) +#define KVM_REG_LOONGARCH_R27 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 27) +#define KVM_REG_LOONGARCH_R28 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 28) +#define KVM_REG_LOONGARCH_R29 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 29) +#define KVM_REG_LOONGARCH_R30 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 30) +#define KVM_REG_LOONGARCH_R31 (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 31) + +#define KVM_REG_LOONGARCH_HI (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 32) +#define KVM_REG_LOONGARCH_LO (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 33) +#define KVM_REG_LOONGARCH_PC (KVM_REG_LOONGARCH_GP | KVM_REG_SIZE_U64 | 34) + +/* + * KVM_REG_LOONGARCH_KVM - KVM specific control registers. + */ + +/* + * CP0_Count control + * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now + * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer + * interrupts since COUNT_RESUME + * This can be used to freeze the timer to get a consistent snapshot of + * the CP0_Count and timer interrupt pending state, while also resuming + * safely without losing time or guest timer interrupts. + * Other: Reserved, do not change. + */ +#define KVM_REG_LOONGARCH_COUNT_CTL \ + (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_LOONGARCH_COUNT_CTL_DC 0x00000001 + +/* + * CP0_Count resume monotonic nanoseconds + * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master + * disable). Any reads and writes of Count related registers while + * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is + * cleared again (master enable) any timer interrupts since this time will be + * emulated. + * Modifications to times in the future are rejected. + */ +#define KVM_REG_LOONGARCH_COUNT_RESUME \ + (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 1) +/* + * CP0_Count rate in Hz + * Specifies the rate of the CP0_Count timer in Hz. Modifications occur without + * discontinuities in CP0_Count. + */ +#define KVM_REG_LOONGARCH_COUNT_HZ \ + (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LOONGARCH_COUNTER \ + (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LOONGARCH_VCPU_RESET \ + (KVM_REG_LOONGARCH_KVM | KVM_REG_SIZE_U64 | 4) + +#define KVM_REG_LBT_SCR0 \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 1) +#define KVM_REG_LBT_SCR1 \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 2) +#define KVM_REG_LBT_SCR2 \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 3) +#define KVM_REG_LBT_SCR3 \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 4) +#define KVM_REG_LBT_FLAGS \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 5) +#define KVM_REG_LBT_FTOP \ + (KVM_REG_LOONGARCH_LBT | KVM_REG_SIZE_U64 | 6) + +struct kvm_iocsr_entry { + __u32 addr; + __u32 pad; + __u64 data; +}; + +struct kvm_csr_entry { + __u32 index; + __u32 reserved; + __u64 data; +}; + +/* for KVM_GET_MSRS and KVM_SET_MSRS */ +struct kvm_msrs { + __u32 ncsrs; /* number of msrs in entries */ + __u32 pad; + struct kvm_csr_entry entries[0]; +}; + +#define __KVM_HAVE_IRQ_LINE + +struct kvm_debug_exit_arch { + __u64 epc; + __u32 fwps; + __u32 mwps; + __u32 exception; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct hw_breakpoint { + __u64 addr; + __u64 mask; + __u32 asid; + __u32 ctrl; +}; + +struct kvm_guest_debug_arch { + struct hw_breakpoint data_breakpoint[KVM_DATA_HW_BREAKPOINT_NUM]; + struct hw_breakpoint inst_breakpoint[KVM_INST_HW_BREAKPOINT_NUM]; + int inst_bp_nums, data_bp_nums; +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +struct kvm_loongarch_interrupt { + /* in */ + __u32 cpu; + __u32 irq; +}; + +#define KVM_IRQCHIP_LS7A_IOAPIC 0x0 +#define KVM_IRQCHIP_LS3A_GIPI 0x1 +#define KVM_IRQCHIP_LS3A_HT_IRQ 0x2 +#define KVM_IRQCHIP_LS3A_ROUTE 0x3 +#define KVM_IRQCHIP_LS3A_EXTIRQ 0x4 +#define KVM_IRQCHIP_LS3A_IPMASK 0x5 +#define KVM_NR_IRQCHIPS 1 +#define KVM_IRQCHIP_NUM_PINS 64 + +#define KVM_MAX_CORES 256 +#define KVM_EXTIOI_IRQS (256) +#define KVM_EXTIOI_IRQS_BITMAP_SIZE (KVM_EXTIOI_IRQS / 8) +/* map to ipnum per 32 irqs */ +#define KVM_EXTIOI_IRQS_IPMAP_SIZE (KVM_EXTIOI_IRQS / 32) +#define KVM_EXTIOI_IRQS_PER_GROUP 32 +#define KVM_EXTIOI_IRQS_COREMAP_SIZE (KVM_EXTIOI_IRQS) +#define KVM_EXTIOI_IRQS_NODETYPE_SIZE 16 + +struct ls7a_ioapic_state { + __u64 int_id; + /* 0x020 interrupt mask register */ + __u64 int_mask; + /* 0x040 1=msi */ + __u64 htmsi_en; + /* 0x060 edge=1 level =0 */ + __u64 intedge; + /* 0x080 for clean edge int,set 1 clean,set 0 is noused */ + __u64 intclr; + /* 0x0c0 */ + __u64 auto_crtl0; + /* 0x0e0 */ + __u64 auto_crtl1; + /* 0x100 - 0x140 */ + __u8 route_entry[64]; + /* 0x200 - 0x240 */ + __u8 htmsi_vector[64]; + /* 0x300 */ + __u64 intisr_chip0; + /* 0x320 */ + __u64 intisr_chip1; + /* edge detection */ + __u64 last_intirr; + /* 0x380 interrupt request register */ + __u64 intirr; + /* 0x3a0 interrupt service register */ + __u64 intisr; + /* 0x3e0 interrupt level polarity selection register, + * 0 for high level tirgger + */ + __u64 int_polarity; +}; + +struct loongarch_gipi_single { + __u32 status; + __u32 en; + __u32 set; + __u32 clear; + __u64 buf[4]; +}; + +struct loongarch_gipiState { + struct loongarch_gipi_single core[KVM_MAX_CORES]; +}; + +struct kvm_loongarch_ls3a_extirq_state { + union ext_en_r + { + uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8]; + uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE]; + } ext_en_r; + union bounce_r + { + uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8]; + uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE]; + } bounce_r; + union ext_isr_r + { + uint64_t reg_u64[KVM_EXTIOI_IRQS_BITMAP_SIZE / 8]; + uint32_t reg_u32[KVM_EXTIOI_IRQS_BITMAP_SIZE / 4]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_BITMAP_SIZE]; + } ext_isr_r; + union ext_core_isr_r + { + uint64_t reg_u64[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 8]; + uint32_t reg_u32[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE / 4]; + uint8_t reg_u8[KVM_MAX_CORES][KVM_EXTIOI_IRQS_BITMAP_SIZE]; + } ext_core_isr_r; + union ip_map_r + { + uint64_t reg_u64; + uint32_t reg_u32[KVM_EXTIOI_IRQS_IPMAP_SIZE / 4]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_IPMAP_SIZE]; + } ip_map_r; + union core_map_r + { + uint64_t reg_u64[KVM_EXTIOI_IRQS_COREMAP_SIZE / 8]; + uint32_t reg_u32[KVM_EXTIOI_IRQS_COREMAP_SIZE / 4]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_COREMAP_SIZE]; + } core_map_r; + union node_type_r + { + uint64_t reg_u64[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 4]; + uint32_t reg_u32[KVM_EXTIOI_IRQS_NODETYPE_SIZE / 2]; + uint16_t reg_u16[KVM_EXTIOI_IRQS_NODETYPE_SIZE]; + uint8_t reg_u8[KVM_EXTIOI_IRQS_NODETYPE_SIZE * 2]; + } node_type_r; +}; + +struct loongarch_kvm_irqchip { + __u16 chip_id; + __u16 len; + __u16 vcpu_id; + __u16 reserved; + char data[0]; +}; + +#endif /* __LINUX_KVM_LOONGARCH_H */ diff --git a/linux-headers/asm-loongarch64/sgidefs.h b/linux-headers/asm-loongarch64/sgidefs.h new file mode 100644 index 0000000000000000000000000000000000000000..89e8be582e1c09f75cddd285851c74549aa497d4 --- /dev/null +++ b/linux-headers/asm-loongarch64/sgidefs.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef __ASM_SGIDEFS_H +#define __ASM_SGIDEFS_H + +#define _LOONGARCH_ISA_LOONGARCH32 6 +#define _LOONGARCH_ISA_LOONGARCH64 7 + +/* + * Subprogram calling convention + */ +#define _LOONGARCH_SIM_ABILP32 1 +#define _LOONGARCH_SIM_ABILPX32 2 +#define _LOONGARCH_SIM_ABILP64 3 + +#endif /* __ASM_SGIDEFS_H */ diff --git a/linux-headers/asm-loongarch64/unistd.h b/linux-headers/asm-loongarch64/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..ef710673a3e2f722175c017b1f7214e33895a021 --- /dev/null +++ b/linux-headers/asm-loongarch64/unistd.h @@ -0,0 +1,22 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifdef __LP64__ +#define __ARCH_WANT_NEW_STAT +#endif /* __LP64__ */ + +#include diff --git a/linux-headers/asm-sw64/kvm.h b/linux-headers/asm-sw64/kvm.h new file mode 100644 index 0000000000000000000000000000000000000000..5de7014b5237584d716c802810bb970d482f1046 --- /dev/null +++ b/linux-headers/asm-sw64/kvm.h @@ -0,0 +1,136 @@ +#ifndef __LINUX_KVM_SW64_H +#define __LINUX_KVM_SW64_H + +#include + +#define __KVM_HAVE_GUEST_DEBUG + +/* + * for KVM_GET_REGS and KVM_SET_REGS + */ +struct kvm_regs { + unsigned long r0; + unsigned long r1; + unsigned long r2; + unsigned long r3; + + unsigned long r4; + unsigned long r5; + unsigned long r6; + unsigned long r7; + + unsigned long r8; + unsigned long r9; + unsigned long r10; + unsigned long r11; + + unsigned long r12; + unsigned long r13; + unsigned long r14; + unsigned long r15; + + unsigned long r19; + unsigned long r20; + unsigned long r21; + unsigned long r22; + + unsigned long r23; + unsigned long r24; + unsigned long r25; + unsigned long r26; + + unsigned long r27; + unsigned long r28; + unsigned long __padding0; + unsigned long fpcr; + + unsigned long fp[124]; + /* These are saved by hmcode: */ + unsigned long ps; + unsigned long pc; + unsigned long gp; + unsigned long r16; + unsigned long r17; + unsigned long r18; +}; + +struct vcpucb { + unsigned long go_flag; + unsigned long pcbb; + unsigned long ksp; + unsigned long usp; + unsigned long kgp; + unsigned long ent_arith; + unsigned long ent_if; + unsigned long ent_int; + unsigned long ent_mm; + unsigned long ent_sys; + unsigned long ent_una; + unsigned long stack_pc; + unsigned long new_a0; + unsigned long new_a1; + unsigned long new_a2; + unsigned long whami; + unsigned long csr_save; + unsigned long wakeup_magic; + unsigned long host_vcpucb; + unsigned long upcr; + unsigned long vpcr; + unsigned long dtb_pcr; + unsigned long guest_ksp; + unsigned long guest_usp; + unsigned long vcpu_irq_disabled; + unsigned long vcpu_irq; + unsigned long ptbr; + unsigned long int_stat0; + unsigned long int_stat1; + unsigned long int_stat2; + unsigned long int_stat3; + unsigned long reset_entry; + unsigned long pvcpu; + unsigned long exit_reason; + unsigned long ipaddr; + unsigned long vcpu_irq_vector; + unsigned long pri_base; + unsigned long stack_pc_dfault; + unsigned long guest_p20; + unsigned long guest_dfault_double; + unsigned long guest_irqs_pending; + unsigned long guest_hm_r30; + unsigned long migration_mark; + unsigned long guest_longtime; + unsigned long guest_longtime_offset; + unsigned long reserved[3]; +}; + +/* + * for KVM_GET_FPU and KVM_SET_FPU + */ +struct kvm_fpu { +}; + +/* + * KVM SW_64 specific structures and definitions + */ +struct kvm_debug_exit_arch { + unsigned long epc; +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* dummy definition */ +struct kvm_sregs { +}; + +#define KVM_SW64_VCPU_INIT _IO(KVMIO, 0xba) +#define KVM_SW64_USE_SLAVE _IO(KVMIO, 0xbb) +#define KVM_SW64_GET_VCB _IO(KVMIO, 0xbc) +#define KVM_SW64_SET_VCB _IO(KVMIO, 0xbd) + +#endif /* __LINUX_KVM_SW64_H */ diff --git a/linux-headers/asm-sw64/unistd.h b/linux-headers/asm-sw64/unistd.h new file mode 100644 index 0000000000000000000000000000000000000000..affe297e734c41df6d57eddc1539e104d9d7d73a --- /dev/null +++ b/linux-headers/asm-sw64/unistd.h @@ -0,0 +1,380 @@ +#ifndef _UAPI_ASM_SW64_UNISTD_64_H +#define _UAPI_ASM_SW64_UNISTD_64_H + +#define __NR_exit 1 +#define __NR_fork 2 +#define __NR_read 3 +#define __NR_write 4 +#define __NR_close 6 +#define __NR_osf_wait4 7 +#define __NR_link 9 +#define __NR_unlink 10 +#define __NR_chdir 12 +#define __NR_fchdir 13 +#define __NR_mknod 14 +#define __NR_chmod 15 +#define __NR_chown 16 +#define __NR_brk 17 +#define __NR_lseek 19 +#define __NR_getxpid 20 +#define __NR_osf_mount 21 +#define __NR_umount2 22 +#define __NR_setuid 23 +#define __NR_getxuid 24 +#define __NR_ptrace 26 +#define __NR_access 33 +#define __NR_sync 36 +#define __NR_kill 37 +#define __NR_setpgid 39 +#define __NR_dup 41 +#define __NR_pipe 42 +#define __NR_osf_set_program_attributes 43 +#define __NR_open 45 +#define __NR_getxgid 47 +#define __NR_osf_sigprocmask 48 +#define __NR_acct 51 +#define __NR_sigpending 52 +#define __NR_ioctl 54 +#define __NR_symlink 57 +#define __NR_readlink 58 +#define __NR_execve 59 +#define __NR_umask 60 +#define __NR_chroot 61 +#define __NR_getpgrp 63 +#define __NR_getpagesize 64 +#define __NR_vfork 66 +#define __NR_stat 67 +#define __NR_lstat 68 +#define __NR_mmap 71 +#define __NR_munmap 73 +#define __NR_mprotect 74 +#define __NR_madvise 75 +#define __NR_vhangup 76 +#define __NR_getgroups 79 +#define __NR_setgroups 80 +#define __NR_setpgrp 82 +#define __NR_osf_setitimer 83 +#define __NR_osf_getitimer 86 +#define __NR_gethostname 87 +#define __NR_sethostname 88 +#define __NR_getdtablesize 89 +#define __NR_dup2 90 +#define __NR_fstat 91 +#define __NR_fcntl 92 +#define __NR_osf_select 93 +#define __NR_poll 94 +#define __NR_fsync 95 +#define __NR_setpriority 96 +#define __NR_socket 97 +#define __NR_connect 98 +#define __NR_accept 99 +#define __NR_getpriority 100 +#define __NR_send 101 +#define __NR_recv 102 +#define __NR_sigreturn 103 +#define __NR_bind 104 +#define __NR_setsockopt 105 +#define __NR_listen 106 +#define __NR_sigsuspend 111 +#define __NR_osf_sigstack 112 +#define __NR_recvmsg 113 +#define __NR_sendmsg 114 +#define __NR_osf_gettimeofday 116 +#define __NR_osf_getrusage 117 +#define __NR_getsockopt 118 +#define __NR_socketcall 119 +#define __NR_readv 120 +#define __NR_writev 121 +#define __NR_osf_settimeofday 122 +#define __NR_fchown 123 +#define __NR_fchmod 124 +#define __NR_recvfrom 125 +#define __NR_setreuid 126 +#define __NR_setregid 127 +#define __NR_rename 128 +#define __NR_truncate 129 +#define __NR_ftruncate 130 +#define __NR_flock 131 +#define __NR_setgid 132 +#define __NR_sendto 133 +#define __NR_shutdown 134 +#define __NR_socketpair 135 +#define __NR_mkdir 136 +#define __NR_rmdir 137 +#define __NR_osf_utimes 138 +#define __NR_getpeername 141 +#define __NR_getrlimit 144 +#define __NR_setrlimit 145 +#define __NR_setsid 147 +#define __NR_quotactl 148 +#define __NR_getsockname 150 +#define __NR_sigaction 156 +#define __NR_osf_getdirentries 159 +#define __NR_osf_statfs 160 +#define __NR_osf_fstatfs 161 +#define __NR_osf_getdomainname 165 +#define __NR_setdomainname 166 +#define __NR_bpf 170 +#define __NR_userfaultfd 171 +#define __NR_membarrier 172 +#define __NR_mlock2 173 +#define __NR_getpid 174 +#define __NR_getppid 175 +#define __NR_getuid 176 +#define __NR_geteuid 177 +#define __NR_getgid 178 +#define __NR_getegid 179 +#define __NR_osf_swapon 199 +#define __NR_msgctl 200 +#define __NR_msgget 201 +#define __NR_msgrcv 202 +#define __NR_msgsnd 203 +#define __NR_semctl 204 +#define __NR_semget 205 +#define __NR_semop 206 +#define __NR_osf_utsname 207 +#define __NR_lchown 208 +#define __NR_shmat 209 +#define __NR_shmctl 210 +#define __NR_shmdt 211 +#define __NR_shmget 212 +#define __NR_msync 217 +#define __NR_osf_stat 224 +#define __NR_osf_lstat 225 +#define __NR_osf_fstat 226 +#define __NR_osf_statfs64 227 +#define __NR_osf_fstatfs64 228 +#define __NR_statfs64 229 +#define __NR_fstatfs64 230 +#define __NR_getpgid 233 +#define __NR_getsid 234 +#define __NR_sigaltstack 235 +#define __NR_osf_sysinfo 241 +#define __NR_osf_proplist_syscall 244 +#define __NR_osf_usleep_thread 251 +#define __NR_sysfs 254 +#define __NR_osf_getsysinfo 256 +#define __NR_osf_setsysinfo 257 +#define __NR_bdflush 300 +#define __NR_sethae 301 +#define __NR_mount 302 +#define __NR_old_adjtimex 303 +#define __NR_swapoff 304 +#define __NR_getdents 305 +#define __NR_create_module 306 +#define __NR_init_module 307 +#define __NR_delete_module 308 +#define __NR_get_kernel_syms 309 +#define __NR_syslog 310 +#define __NR_reboot 311 +#define __NR_clone 312 +#define __NR_uselib 313 +#define __NR_mlock 314 +#define __NR_munlock 315 +#define __NR_mlockall 316 +#define __NR_munlockall 317 +#define __NR_sysinfo 318 +#define __NR__sysctl 319 +#define __NR_oldumount 321 +#define __NR_swapon 322 +#define __NR_times 323 +#define __NR_personality 324 +#define __NR_setfsuid 325 +#define __NR_setfsgid 326 +#define __NR_ustat 327 +#define __NR_statfs 328 +#define __NR_fstatfs 329 +#define __NR_sched_setparam 330 +#define __NR_sched_getparam 331 +#define __NR_sched_setscheduler 332 +#define __NR_sched_getscheduler 333 +#define __NR_sched_yield 334 +#define __NR_sched_get_priority_max 335 +#define __NR_sched_get_priority_min 336 +#define __NR_sched_rr_get_interval 337 +#define __NR_afs_syscall 338 +#define __NR_uname 339 +#define __NR_nanosleep 340 +#define __NR_mremap 341 +#define __NR_nfsservctl 342 +#define __NR_setresuid 343 +#define __NR_getresuid 344 +#define __NR_pciconfig_read 345 +#define __NR_pciconfig_write 346 +#define __NR_query_module 347 +#define __NR_prctl 348 +#define __NR_pread64 349 +#define __NR_pwrite64 350 +#define __NR_rt_sigreturn 351 +#define __NR_rt_sigaction 352 +#define __NR_rt_sigprocmask 353 +#define __NR_rt_sigpending 354 +#define __NR_rt_sigtimedwait 355 +#define __NR_rt_sigqueueinfo 356 +#define __NR_rt_sigsuspend 357 +#define __NR_select 358 +#define __NR_gettimeofday 359 +#define __NR_settimeofday 360 +#define __NR_getitimer 361 +#define __NR_setitimer 362 +#define __NR_utimes 363 +#define __NR_getrusage 364 +#define __NR_wait4 365 +#define __NR_adjtimex 366 +#define __NR_getcwd 367 +#define __NR_capget 368 +#define __NR_capset 369 +#define __NR_sendfile 370 +#define __NR_setresgid 371 +#define __NR_getresgid 372 +#define __NR_dipc 373 +#define __NR_pivot_root 374 +#define __NR_mincore 375 +#define __NR_pciconfig_iobase 376 +#define __NR_getdents64 377 +#define __NR_gettid 378 +#define __NR_readahead 379 +#define __NR_tkill 381 +#define __NR_setxattr 382 +#define __NR_lsetxattr 383 +#define __NR_fsetxattr 384 +#define __NR_getxattr 385 +#define __NR_lgetxattr 386 +#define __NR_fgetxattr 387 +#define __NR_listxattr 388 +#define __NR_llistxattr 389 +#define __NR_flistxattr 390 +#define __NR_removexattr 391 +#define __NR_lremovexattr 392 +#define __NR_fremovexattr 393 +#define __NR_futex 394 +#define __NR_sched_setaffinity 395 +#define __NR_sched_getaffinity 396 +#define __NR_tuxcall 397 +#define __NR_io_setup 398 +#define __NR_io_destroy 399 +#define __NR_io_getevents 400 +#define __NR_io_submit 401 +#define __NR_io_cancel 402 +#define __NR_io_pgetevents 403 +#define __NR_rseq 404 +#define __NR_exit_group 405 +#define __NR_lookup_dcookie 406 +#define __NR_epoll_create 407 +#define __NR_epoll_ctl 408 +#define __NR_epoll_wait 409 +#define __NR_remap_file_pages 410 +#define __NR_set_tid_address 411 +#define __NR_restart_syscall 412 +#define __NR_fadvise64 413 +#define __NR_timer_create 414 +#define __NR_timer_settime 415 +#define __NR_timer_gettime 416 +#define __NR_timer_getoverrun 417 +#define __NR_timer_delete 418 +#define __NR_clock_settime 419 +#define __NR_clock_gettime 420 +#define __NR_clock_getres 421 +#define __NR_clock_nanosleep 422 +#define __NR_semtimedop 423 +#define __NR_tgkill 424 +#define __NR_stat64 425 +#define __NR_lstat64 426 +#define __NR_fstat64 427 +#define __NR_vserver 428 +#define __NR_mbind 429 +#define __NR_get_mempolicy 430 +#define __NR_set_mempolicy 431 +#define __NR_mq_open 432 +#define __NR_mq_unlink 433 +#define __NR_mq_timedsend 434 +#define __NR_mq_timedreceive 435 +#define __NR_mq_notify 436 +#define __NR_mq_getsetattr 437 +#define __NR_waitid 438 +#define __NR_add_key 439 +#define __NR_request_key 440 +#define __NR_keyctl 441 +#define __NR_ioprio_set 442 +#define __NR_ioprio_get 443 +#define __NR_inotify_init 444 +#define __NR_inotify_add_watch 445 +#define __NR_inotify_rm_watch 446 +#define __NR_fdatasync 447 +#define __NR_kexec_load 448 +#define __NR_migrate_pages 449 +#define __NR_openat 450 +#define __NR_mkdirat 451 +#define __NR_mknodat 452 +#define __NR_fchownat 453 +#define __NR_futimesat 454 +#define __NR_fstatat64 455 +#define __NR_unlinkat 456 +#define __NR_renameat 457 +#define __NR_linkat 458 +#define __NR_symlinkat 459 +#define __NR_readlinkat 460 +#define __NR_fchmodat 461 +#define __NR_faccessat 462 +#define __NR_pselect6 463 +#define __NR_ppoll 464 +#define __NR_unshare 465 +#define __NR_set_robust_list 466 +#define __NR_get_robust_list 467 +#define __NR_splice 468 +#define __NR_sync_file_range 469 +#define __NR_tee 470 +#define __NR_vmsplice 471 +#define __NR_move_pages 472 +#define __NR_getcpu 473 +#define __NR_epoll_pwait 474 +#define __NR_utimensat 475 +#define __NR_signalfd 476 +#define __NR_timerfd 477 +#define __NR_eventfd 478 +#define __NR_recvmmsg 479 +#define __NR_fallocate 480 +#define __NR_timerfd_create 481 +#define __NR_timerfd_settime 482 +#define __NR_timerfd_gettime 483 +#define __NR_signalfd4 484 +#define __NR_eventfd2 485 +#define __NR_epoll_create1 486 +#define __NR_dup3 487 +#define __NR_pipe2 488 +#define __NR_inotify_init1 489 +#define __NR_preadv 490 +#define __NR_pwritev 491 +#define __NR_rt_tgsigqueueinfo 492 +#define __NR_perf_event_open 493 +#define __NR_fanotify_init 494 +#define __NR_fanotify_mark 495 +#define __NR_prlimit64 496 +#define __NR_name_to_handle_at 497 +#define __NR_open_by_handle_at 498 +#define __NR_clock_adjtime 499 +#define __NR_syncfs 500 +#define __NR_setns 501 +#define __NR_accept4 502 +#define __NR_sendmmsg 503 +#define __NR_process_vm_readv 504 +#define __NR_process_vm_writev 505 +#define __NR_kcmp 506 +#define __NR_finit_module 507 +#define __NR_sched_setattr 508 +#define __NR_sched_getattr 509 +#define __NR_renameat2 510 +#define __NR_getrandom 511 +#define __NR_memfd_create 512 +#define __NR_execveat 513 +#define __NR_seccomp 514 +#define __NR_copy_file_range 515 +#define __NR_preadv2 516 +#define __NR_pwritev2 517 +#define __NR_statx 518 + +#ifdef __KERNEL__ +#define __NR_syscalls 519 +#endif + +#endif /* _UAPI_ASM_SW64_UNISTD_64_H */ diff --git a/linux-headers/asm-x86/kvm.h b/linux-headers/asm-x86/kvm.h index a6c327f8ad9e5f079dd7039229e3df7e296fc345..46e730b62f506522d909766e982fbc31689b0dc1 100644 --- a/linux-headers/asm-x86/kvm.h +++ b/linux-headers/asm-x86/kvm.h @@ -324,6 +324,7 @@ struct kvm_reinject_control { #define KVM_VCPUEVENT_VALID_SHADOW 0x00000004 #define KVM_VCPUEVENT_VALID_SMM 0x00000008 #define KVM_VCPUEVENT_VALID_PAYLOAD 0x00000010 +#define KVM_VCPUEVENT_VALID_TRIPLE_FAULT 0x00000020 /* Interrupt shadow states */ #define KVM_X86_SHADOW_INT_MOV_SS 0x01 @@ -358,7 +359,10 @@ struct kvm_vcpu_events { __u8 smm_inside_nmi; __u8 latched_init; } smi; - __u8 reserved[27]; + struct { + __u8 pending; + } triple_fault; + __u8 reserved[26]; __u8 exception_has_payload; __u64 exception_payload; }; @@ -437,6 +441,9 @@ struct kvm_sync_regs { #define KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE 0x00000001 +/* attributes for system fd (group 0) */ +#define KVM_X86_XCOMP_GUEST_SUPP 0 + struct kvm_vmx_nested_state_data { __u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; __u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE]; diff --git a/linux-headers/linux/kvm.h b/linux-headers/linux/kvm.h index bcaf66cc4d2a06a861322b0ee82c2c5b38454b98..fa22d85adf7e80387aa0317b139f92583ce71491 100644 --- a/linux-headers/linux/kvm.h +++ b/linux-headers/linux/kvm.h @@ -14,6 +14,8 @@ #include #include +#include "sysemu/numa.h" + #define KVM_API_VERSION 12 /* *** Deprecated interfaces *** */ @@ -269,6 +271,7 @@ struct kvm_xen_exit { #define KVM_EXIT_AP_RESET_HOLD 32 #define KVM_EXIT_X86_BUS_LOCK 33 #define KVM_EXIT_XEN 34 +#define KVM_EXIT_NOTIFY 37 /* For KVM_EXIT_INTERNAL_ERROR */ /* Emulate instruction failed. */ @@ -469,6 +472,11 @@ struct kvm_run { } msr; /* KVM_EXIT_XEN */ struct kvm_xen_exit xen; + /* KVM_EXIT_NOTIFY */ + struct { +#define KVM_NOTIFY_CONTEXT_INVALID (1 << 0) + __u32 flags; + } notify; /* Fix the size of the union. */ char padding[256]; }; @@ -1112,6 +1120,17 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_BINARY_STATS_FD 203 #define KVM_CAP_EXIT_ON_EMULATION_FAILURE 204 #define KVM_CAP_ARM_MTE 205 +#define KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM 206 +#define KVM_CAP_VM_GPA_BITS 207 +#define KVM_CAP_XSAVE2 208 +#define KVM_CAP_SYS_ATTRIBUTES 209 +#define KVM_CAP_X86_TRIPLE_FAULT_EVENT 218 +#define KVM_CAP_X86_NOTIFY_VMEXIT 219 + +#define KVM_CAP_ARM_CPU_FEATURE 555 +#define KVM_CAP_ARM_TMM 300 + +#define KVM_CAP_ARM_VIRT_MSI_BYPASS 799 #ifdef KVM_CAP_IRQ_ROUTING @@ -1354,6 +1373,32 @@ struct kvm_vfio_spapr_tce { __s32 tablefd; }; +#define MAX_NUMA_NODE 8 +#define MAX_CPU_BIT_MAP 4 +#define MAX_NODE_BIT_MAP (MAX_NODES / BITS_PER_LONG) + +struct kvm_numa_node { + __u64 numa_id; + __u64 ipa_start; + __u64 ipa_size; + __u64 host_numa_nodes[MAX_NODE_BIT_MAP]; + __u64 cpu_id[MAX_CPU_BIT_MAP]; +}; + +struct kvm_numa_info { + __u64 numa_cnt; + struct kvm_numa_node numa_nodes[MAX_NUMA_NODE]; +}; + +struct kvm_user_data { + __u64 loader_start; + __u64 image_end; + __u64 initrd_start; + __u64 dtb_end; + __u64 ram_size; + struct kvm_numa_info numa_info; +}; + /* * ioctls for VM fds */ @@ -1372,7 +1417,7 @@ struct kvm_vfio_spapr_tce { struct kvm_userspace_memory_region) #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) #define KVM_SET_IDENTITY_MAP_ADDR _IOW(KVMIO, 0x48, __u64) - +#define KVM_LOAD_USER_DATA _IOW(KVMIO, 0x49, struct kvm_user_data) /* enable ucontrol for s390 */ struct kvm_s390_ucas_mapping { __u64 user_addr; @@ -1417,6 +1462,17 @@ struct kvm_s390_ucas_mapping { #define KVM_XEN_HVM_CONFIG _IOW(KVMIO, 0x7a, struct kvm_xen_hvm_config) #define KVM_SET_CLOCK _IOW(KVMIO, 0x7b, struct kvm_clock_data) #define KVM_GET_CLOCK _IOR(KVMIO, 0x7c, struct kvm_clock_data) + +#ifdef __aarch64__ +struct kvm_master_dev_info +{ + __u32 nvectors; /* number of msi vectors */ + struct kvm_msi msi[0]; +}; +#define KVM_CREATE_SHADOW_DEV _IOW(KVMIO, 0xf0, struct kvm_master_dev_info) +#define KVM_DEL_SHADOW_DEV _IOW(KVMIO, 0xf1, __u32) +#endif + /* Available with KVM_CAP_PIT_STATE2 */ #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) @@ -2002,6 +2058,38 @@ struct kvm_stats_desc { char name[]; }; +#ifdef __loongarch__ +struct kvm_loongarch_vcpu_state { + __u8 online_vcpus; + __u8 is_migrate; + __u32 cpu_freq; + __u32 count_ctl; + __u64 pending_exceptions; + __u64 pending_exceptions_clr; + __u64 core_ext_ioisr[4]; +}; + +#define KVM_CAP_LOONGARCH_FPU 800 +#define KVM_CAP_LOONGARCH_LSX 801 +#define KVM_CAP_LOONGARCH_VZ 802 +#define KVM_REG_LOONGARCH 0x9000000000000000ULL +#define KVM_LARCH_GET_VCPU_STATE \ + _IOR(KVMIO, 0xc0, struct kvm_loongarch_vcpu_state) +#define KVM_LARCH_SET_VCPU_STATE \ + _IOW(KVMIO, 0xc1, struct kvm_loongarch_vcpu_state) +#define KVM_LARCH_GET_CPUCFG _IOR(KVMIO, 0xc2, struct kvm_cpucfg) +#define KVM_LOONGARCH_GET_IOCSR _IOR(KVMIO, 0xc3, struct kvm_iocsr_entry) +#define KVM_LOONGARCH_SET_IOCSR _IOW(KVMIO, 0xc4, struct kvm_iocsr_entry) +#define KVM_LARCH_SET_CPUCFG _IOR(KVMIO, 0xc5, struct kvm_cpucfg) +#endif + #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + +/* Available with KVM_CAP_X86_NOTIFY_VMEXIT */ +#define KVM_X86_NOTIFY_VMEXIT_ENABLED (1ULL << 0) +#define KVM_X86_NOTIFY_VMEXIT_USER (1ULL << 1) + #endif /* __LINUX_KVM_H */ diff --git a/linux-headers/linux/vfio.h b/linux-headers/linux/vfio.h index e680594f27b7c2b5160ad3e711f6a22b58964faf..f4ff038e8c2d9b7ab29a92f6b89a0332cb061b78 100644 --- a/linux-headers/linux/vfio.h +++ b/linux-headers/linux/vfio.h @@ -52,6 +52,16 @@ /* Supports the vaddr flag for DMA map and unmap */ #define VFIO_UPDATE_VADDR 10 +/* + * The vfio_iommu driver may support user clears dirty log manually, which means + * dirty log can be requested to not cleared automatically after dirty log is + * copied to userspace, it's user's duty to clear dirty log. + * + * Note: please refer to VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP_NOCLEAR and + * VFIO_IOMMU_DIRTY_PAGES_FLAG_CLEAR_BITMAP. + */ +#define VFIO_DIRTY_LOG_MANUAL_CLEAR 11 + /* * The IOCTL interface is designed for extensibility by embedding the * structure length (argsz) and flags into structures passed between @@ -1196,8 +1206,30 @@ struct vfio_iommu_type1_dma_unmap { * actual bitmap. If dirty pages logging is not enabled, an error will be * returned. * - * Only one of the flags _START, _STOP and _GET may be specified at a time. + * The VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP_NOCLEAR flag is almost same as + * VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP, except that it requires underlying + * dirty bitmap is not cleared automatically. The user can clear it manually by + * calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_CLEAR_BITMAP flag set. * + * Calling the IOCTL with VFIO_IOMMU_DIRTY_PAGES_FLAG_CLEAR_BITMAP flag set, + * instructs the IOMMU driver to clear the dirty status of pages in a bitmap + * for IOMMU container for a given IOVA range. The user must specify the IOVA + * range, the bitmap and the pgsize through the structure + * vfio_iommu_type1_dirty_bitmap_get in the data[] portion. This interface + * supports clearing a bitmap of the smallest supported pgsize only and can be + * modified in future to clear a bitmap of any specified supported pgsize. The + * user must provide a memory area for the bitmap memory and specify its size + * in bitmap.size. One bit is used to represent one page consecutively starting + * from iova offset. The user should provide page size in bitmap.pgsize field. + * A bit set in the bitmap indicates that the page at that offset from iova is + * cleared the dirty status, and dirty tracking is re-enabled for that page. The + * caller must set argsz to a value including the size of structure + * vfio_iommu_dirty_bitmap_get, but excluing the size of the actual bitmap. If + * dirty pages logging is not enabled, an error will be returned. Note: user + * should clear dirty log before handle corresponding dirty pages. + * + * Only one of the flags _START, _STOP, _GET, _GET_NOCLEAR_, and _CLEAR may be + * specified at a time. */ struct vfio_iommu_type1_dirty_bitmap { __u32 argsz; @@ -1205,6 +1237,8 @@ struct vfio_iommu_type1_dirty_bitmap { #define VFIO_IOMMU_DIRTY_PAGES_FLAG_START (1 << 0) #define VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP (1 << 1) #define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP (1 << 2) +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP_NOCLEAR (1 << 3) +#define VFIO_IOMMU_DIRTY_PAGES_FLAG_CLEAR_BITMAP (1 << 4) __u8 data[]; }; diff --git a/linux-headers/linux/vhost.h b/linux-headers/linux/vhost.h index c998860d7bbc4351c37c702ea69ea88a814b19cf..457923974c532c345af8e30754736d824b9997b3 100644 --- a/linux-headers/linux/vhost.h +++ b/linux-headers/linux/vhost.h @@ -43,6 +43,10 @@ * The bit is set using an atomic 32 bit operation. */ /* Set base address for logging. */ #define VHOST_SET_LOG_BASE _IOW(VHOST_VIRTIO, 0x04, __u64) +/* Set buffer size for logging */ +#define VHOST_SET_LOG_SIZE _IOW(VHOST_VIRTIO, 0x05, __u64) +/* Logging sync */ +#define VHOST_LOG_SYNC _IO(VHOST_VIRTIO, 0x06) /* Specify an eventfd file descriptor to signal on log write. */ #define VHOST_SET_LOG_FD _IOW(VHOST_VIRTIO, 0x07, int) @@ -150,4 +154,50 @@ /* Get the valid iova range */ #define VHOST_VDPA_GET_IOVA_RANGE _IOR(VHOST_VIRTIO, 0x78, \ struct vhost_vdpa_iova_range) + +/* Get the config size */ +#define VHOST_VDPA_GET_CONFIG_SIZE _IOR(VHOST_VIRTIO, 0x79, __u32) + +/* Get the count of all virtqueues */ +#define VHOST_VDPA_GET_VQS_COUNT _IOR(VHOST_VIRTIO, 0x80, __u32) + +/* Get the group for a virtqueue: read index, write group in num, + * The virtqueue index is stored in the index field of + * vhost_vring_state. The group for this specific virtqueue is + * returned via num field of vhost_vring_state. + */ +#define VHOST_VDPA_GET_VRING_GROUP _IOWR(VHOST_VIRTIO, 0x7B, \ + struct vhost_vring_state) +/* Set the ASID for a virtqueue group. The group index is stored in + * the index field of vhost_vring_state, the ASID associated with this + * group is stored at num field of vhost_vring_state. + */ +#define VHOST_VDPA_SET_GROUP_ASID _IOW(VHOST_VIRTIO, 0x7C, \ + struct vhost_vring_state) + +/* Suspend a device so it does not process virtqueue requests anymore + * + * After the return of ioctl the device must preserve all the necessary state + * (the virtqueue vring base plus the possible device specific states) that is + * required for restoring in the future. The device must not change its + * configuration after that point. + */ +#define VHOST_VDPA_SUSPEND _IO(VHOST_VIRTIO, 0x7D) + +/* Resume a device so it can resume processing virtqueue requests + * + * After the return of this ioctl the device will have restored all the + * necessary states and it is fully operational to continue processing the + * virtqueue descriptors. + */ +#define VHOST_VDPA_RESUME _IO(VHOST_VIRTIO, 0x7E) + +/* set and get device buffer */ +#define VHOST_GET_DEV_BUFFER _IOR(VHOST_VIRTIO, 0xb0, struct vhost_vdpa_config) +#define VHOST_SET_DEV_BUFFER _IOW(VHOST_VIRTIO, 0xb1, struct vhost_vdpa_config) +#define VHOST_GET_DEV_BUFFER_SIZE _IOR(VHOST_VIRTIO, 0xb3, __u32) + +/* set device migtration state */ +#define VHOST_VDPA_SET_MIG_STATE _IOW(VHOST_VIRTIO, 0xb2, __u8) + #endif diff --git a/linux-user/cpu_loop-common.h b/linux-user/cpu_loop-common.h index 8828af28a4d99a137d69e41328a7d469e280188b..0803cc55d8ae0f52c855e31afab41745af488e29 100644 --- a/linux-user/cpu_loop-common.h +++ b/linux-user/cpu_loop-common.h @@ -26,9 +26,11 @@ do { \ CPUState *cs = env_cpu(env); \ fprintf(stderr, fmt , ## __VA_ARGS__); \ + fprintf(stderr, "Failing executable: %s\n", exec_path); \ cpu_dump_state(cs, stderr, 0); \ if (qemu_log_separate()) { \ qemu_log(fmt, ## __VA_ARGS__); \ + qemu_log("Failing executable: %s\n", exec_path); \ log_cpu_state(cs, 0); \ } \ } while (0) diff --git a/linux-user/elfload.c b/linux-user/elfload.c index 767f54c76dc5844538bc071ac50dfe7c1d5b78a5..e274c0bd336731a622c409b735f1b72b5e8fefe9 100644 --- a/linux-user/elfload.c +++ b/linux-user/elfload.c @@ -1041,6 +1041,70 @@ static uint32_t get_elf_hwcap(void) #endif /* TARGET_MIPS */ +#ifdef TARGET_LOONGARCH64 + +#define ELF_START_MMAP 0x80000000 + +#define ELF_CLASS ELFCLASS64 +#define ELF_ARCH EM_LOONGARCH + +#define elf_check_arch(x) ((x) == EM_LOONGARCH) + +static inline void init_thread(struct target_pt_regs *regs, + struct image_info *infop) +{ + regs->csr_crmd = 2 << 3; + regs->csr_era = infop->entry; + regs->regs[3] = infop->start_stack; +} + +/* See linux kernel: arch/mips/include/asm/elf.h. */ +#define ELF_NREG 45 +typedef target_elf_greg_t target_elf_gregset_t[ELF_NREG]; + +/* See linux kernel: arch/loongarch/include/uapi/asm/reg.h */ +enum { + TARGET_EF_R0 = 0, + TARGET_EF_R26 = TARGET_EF_R0 + 26, + TARGET_EF_R27 = TARGET_EF_R0 + 27, + TARGET_EF_CSR_ERA = TARGET_EF_R0 + 32, + TARGET_EF_CSR_BADV = TARGET_EF_R0 + 33, + TARGET_EF_CSR_CRMD = TARGET_EF_R0 + 34, + TARGET_EF_CSR_ESTAT = TARGET_EF_R0 + 38 +}; + +/* See linux kernel: arch/loongarch/kernel/process.c:elf_dump_regs. */ +static void elf_core_copy_regs(target_elf_gregset_t *regs, + const CPULOONGARCHState *env) +{ + int i; + + (*regs)[TARGET_EF_R0] = 0; + + for (i = 1; i < ARRAY_SIZE(env->active_tc.gpr); i++) { + (*regs)[TARGET_EF_R0 + i] = tswapreg(env->active_tc.gpr[i]); + } + + (*regs)[TARGET_EF_R26] = 0; + (*regs)[TARGET_EF_R27] = 0; + (*regs)[TARGET_EF_CSR_ERA] = tswapreg(env->active_tc.PC); + (*regs)[TARGET_EF_CSR_BADV] = tswapreg(env->CSR_BADV); + (*regs)[TARGET_EF_CSR_CRMD] = tswapreg(env->CSR_CRMD); + (*regs)[TARGET_EF_CSR_ESTAT] = tswapreg(env->CSR_ESTAT); +} + +#define USE_ELF_CORE_DUMP +#define ELF_EXEC_PAGESIZE 4096 + +#define ELF_HWCAP get_elf_hwcap() + +static uint32_t get_elf_hwcap(void) +{ + return 0; +} + +#endif /* TARGET_LOONGARCH64 */ + #ifdef TARGET_MICROBLAZE #define ELF_START_MMAP 0x80000000 @@ -1485,6 +1549,22 @@ static inline void init_thread(struct target_pt_regs *regs, #endif /* TARGET_HPPA */ +#ifdef TARGET_SW64 + +#define ELF_CLASS ELFCLASS64 +#define ELF_ARCH EM_SW64 + +#define ELF_START_MMAP (0x30000000000ULL) + +static inline void init_thread(struct target_pt_regs *regs, + struct image_info *infop) +{ + regs->pc = infop->entry; + regs->usp = infop->start_stack; +} + +#endif /* TARGET_SW64 */ + #ifdef TARGET_XTENSA #define ELF_START_MMAP 0x20000000 diff --git a/linux-user/flatload.c b/linux-user/flatload.c index e4c2f89a226778f4f1178071c69d628edfa12935..e99570ca182b9c34abec16daff4f414bd65d91c6 100644 --- a/linux-user/flatload.c +++ b/linux-user/flatload.c @@ -808,7 +808,7 @@ int load_flt_binary(struct linux_binprm *bprm, struct image_info *info) /* Stash our initial stack pointer into the mm structure */ info->start_code = libinfo[0].start_code; - info->end_code = libinfo[0].start_code = libinfo[0].text_len; + info->end_code = libinfo[0].start_code + libinfo[0].text_len; info->start_data = libinfo[0].start_data; info->end_data = libinfo[0].end_data; info->start_brk = libinfo[0].start_brk; diff --git a/linux-user/host/sw64/host-signal.h b/linux-user/host/sw64/host-signal.h new file mode 100644 index 0000000000000000000000000000000000000000..11d6e976055cb22e87e83afd7778bee97f25e417 --- /dev/null +++ b/linux-user/host/sw64/host-signal.h @@ -0,0 +1,46 @@ +/* + * host-signal.h: signal info dependent on the host architecture + * + * Copyright (c) 2023 wxiat + * + * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SW64_HOST_SIGNAL_H +#define SW64_HOST_SIGNAL_H + +static inline uintptr_t host_signal_pc(ucontext_t *uc) +{ + return uc->uc_mcontext.sc_pc; +} + +static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc) +{ + uc->uc_mcontext.sc_pc = pc; +} + +static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc) +{ + uint32_t *pc = (uint32_t *)host_signal_pc(uc); + uint32_t insn = *pc; + + /* XXX: need kernel patch to get write flag faster */ + switch (insn >> 26) { + case 0x0d: /* stw */ + case 0x0e: /* stb */ + case 0x0f: /* stq_u */ + case 0x24: /* stf */ + case 0x25: /* stg */ + case 0x26: /* sts */ + case 0x27: /* stt */ + case 0x2c: /* stl */ + case 0x2d: /* stq */ + case 0x2e: /* stl_c */ + case 0x2f: /* stq_c */ + return true; + } + return false; +} + +#endif diff --git a/linux-user/host/sw64/hostdep.h b/linux-user/host/sw64/hostdep.h new file mode 100755 index 0000000000000000000000000000000000000000..b30ac70100c5ff884423564f36d65cc95cd60511 --- /dev/null +++ b/linux-user/host/sw64/hostdep.h @@ -0,0 +1,14 @@ +/* + * hostdep.h : things which are dependent on the host architecture + * + * * Written by Wang Yuanheng + * + * Copyright (C) 2023 wxiat + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#ifndef SW_64_HOSTDEP_H +#define SW_64_HOSTDEP_H +#endif diff --git a/linux-user/i386/target_elf.h b/linux-user/i386/target_elf.h index 1c6142e7da0d7f5a1bda8fc6536d29a1a33b5062..238a9aba738a0a855055e4616801365796f82080 100644 --- a/linux-user/i386/target_elf.h +++ b/linux-user/i386/target_elf.h @@ -9,6 +9,6 @@ #define I386_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu32"; + return "max"; } #endif diff --git a/linux-user/loongarch64/cpu_loop.c b/linux-user/loongarch64/cpu_loop.c new file mode 100644 index 0000000000000000000000000000000000000000..eb455465abfb2a737b80a4ee1c4dbfc33d7d2abb --- /dev/null +++ b/linux-user/loongarch64/cpu_loop.c @@ -0,0 +1,179 @@ +/* + * qemu user cpu loop + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "cpu_loop-common.h" +#include "elf.h" + +/* Break codes */ +enum { BRK_OVERFLOW = 6, BRK_DIVZERO = 7 }; + +void force_sig_fault(CPULOONGARCHState *env, target_siginfo_t *info, + unsigned int code) +{ + + switch (code) { + case BRK_OVERFLOW: + case BRK_DIVZERO: + info->si_signo = TARGET_SIGFPE; + info->si_errno = 0; + info->si_code = (code == BRK_OVERFLOW) ? FPE_INTOVF : FPE_INTDIV; + queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info); + ret = 0; + break; + default: + info->si_signo = TARGET_SIGTRAP; + info->si_errno = 0; + queue_signal(env, info->si_signo, QEMU_SI_FAULT, &*info); + ret = 0; + break; + } +} + +void cpu_loop(CPULOONGARCHState *env) +{ + CPUState *cs = CPU(loongarch_env_get_cpu(env)); + target_siginfo_t info; + int trapnr; + abi_long ret; + + for (;;) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + switch (trapnr) { + case EXCP_SYSCALL: + env->active_tc.PC += 4; + ret = + do_syscall(env, env->active_tc.gpr[11], env->active_tc.gpr[4], + env->active_tc.gpr[5], env->active_tc.gpr[6], + env->active_tc.gpr[7], env->active_tc.gpr[8], + env->active_tc.gpr[9], -1, -1); + if (ret == -TARGET_ERESTARTSYS) { + env->active_tc.PC -= 4; + break; + } + if (ret == -TARGET_QEMU_ESIGRETURN) { + /* + * Returning from a successful sigreturn syscall. + * Avoid clobbering register state. + */ + break; + } + env->active_tc.gpr[4] = ret; + break; + case EXCP_TLBL: + case EXCP_TLBS: + case EXCP_AdEL: + case EXCP_AdES: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + /* XXX: check env->error_code */ + info.si_code = TARGET_SEGV_MAPERR; + info._sifields._sigfault._addr = env->CSR_BADV; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_FPDIS: + case EXCP_LSXDIS: + case EXCP_LASXDIS: + case EXCP_RI: + info.si_signo = TARGET_SIGILL; + info.si_errno = 0; + info.si_code = 0; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + case EXCP_DEBUG: + info.si_signo = TARGET_SIGTRAP; + info.si_errno = 0; + info.si_code = TARGET_TRAP_BRKPT; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_FPE: + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_FLTUNK; + if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_INVALID) { + info.si_code = TARGET_FPE_FLTINV; + } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_DIV0) { + info.si_code = TARGET_FPE_FLTDIV; + } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_OVERFLOW) { + info.si_code = TARGET_FPE_FLTOVF; + } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_UNDERFLOW) { + info.si_code = TARGET_FPE_FLTUND; + } else if (GET_FP_CAUSE(env->active_fpu.fcsr0) & FP_INEXACT) { + info.si_code = TARGET_FPE_FLTRES; + } + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_BREAK: { + abi_ulong trap_instr; + unsigned int code; + + ret = get_user_u32(trap_instr, env->active_tc.PC); + if (ret != 0) { + goto error; + } + + code = trap_instr & 0x7fff; + force_sig_fault(env, &info, code); + } break; + case EXCP_TRAP: { + abi_ulong trap_instr; + unsigned int code = 0; + + ret = get_user_u32(trap_instr, env->active_tc.PC); + + if (ret != 0) { + goto error; + } + + /* The immediate versions don't provide a code. */ + if (!(trap_instr & 0xFC000000)) { + code = ((trap_instr >> 6) & ((1 << 10) - 1)); + } + force_sig_fault(env, &info, code); + } break; + case EXCP_ATOMIC: + cpu_exec_step_atomic(cs); + break; + default: + error: + EXCP_DUMP(env, "qemu: unhandled CPU exception 0x%x - aborting\n", + trapnr); + abort(); + } + process_pending_signals(env); + } +} + +void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +{ + int i; + + for (i = 0; i < 32; i++) { + env->active_tc.gpr[i] = regs->regs[i]; + } + env->active_tc.PC = regs->csr_era & ~(target_ulong)1; +} diff --git a/linux-user/loongarch64/meson.build b/linux-user/loongarch64/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..c4c0b4d70191b623f30e5615964483c44e150e65 --- /dev/null +++ b/linux-user/loongarch64/meson.build @@ -0,0 +1,6 @@ +syscall_nr_generators += { + 'loongarch64': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@', + '', 'TARGET_SYSCALL_OFFSET' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/loongarch64/signal.c b/linux-user/loongarch64/signal.c new file mode 100644 index 0000000000000000000000000000000000000000..2f336035c913c3a3a763966a3f42b542a448f1bc --- /dev/null +++ b/linux-user/loongarch64/signal.c @@ -0,0 +1,218 @@ +/* + * Emulation of Linux signals + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu.h" +#include "signal-common.h" +#include "linux-user/trace.h" + +#define FPU_REG_WIDTH 256 +union fpureg +{ + uint32_t val32[FPU_REG_WIDTH / 32]; + uint64_t val64[FPU_REG_WIDTH / 64]; +}; + +struct target_sigcontext { + uint64_t sc_pc; + uint64_t sc_regs[32]; + uint32_t sc_flags; + + uint32_t sc_fcsr; + uint32_t sc_vcsr; + uint64_t sc_fcc; + union fpureg sc_fpregs[32] __attribute__((aligned(32))); + + uint32_t sc_reserved; +}; + +struct sigframe { + uint32_t sf_ass[4]; /* argument save space for o32 */ + uint32_t sf_code[2]; /* signal trampoline */ + struct target_sigcontext sf_sc; + target_sigset_t sf_mask; +}; + +struct target_ucontext { + target_ulong tuc_flags; + target_ulong tuc_link; + target_stack_t tuc_stack; + target_ulong pad0; + struct target_sigcontext tuc_mcontext; + target_sigset_t tuc_sigmask; +}; + +struct target_rt_sigframe { + uint32_t rs_ass[4]; /* argument save space for o32 */ + uint32_t rs_code[2]; /* signal trampoline */ + struct target_siginfo rs_info; + struct target_ucontext rs_uc; +}; + +static inline void setup_sigcontext(CPULOONGARCHState *regs, + struct target_sigcontext *sc) +{ + int i; + + __put_user(exception_resume_pc(regs), &sc->sc_pc); + regs->hflags &= ~LARCH_HFLAG_BMASK; + + __put_user(0, &sc->sc_regs[0]); + for (i = 1; i < 32; ++i) { + __put_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); + } + + for (i = 0; i < 32; ++i) { + __put_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i].val64[0]); + } +} + +static inline void restore_sigcontext(CPULOONGARCHState *regs, + struct target_sigcontext *sc) +{ + int i; + + __get_user(regs->CSR_ERA, &sc->sc_pc); + + for (i = 1; i < 32; ++i) { + __get_user(regs->active_tc.gpr[i], &sc->sc_regs[i]); + } + + for (i = 0; i < 32; ++i) { + __get_user(regs->active_fpu.fpr[i].d, &sc->sc_fpregs[i].val64[0]); + } +} + +/* + * Determine which stack to use.. + */ +static inline abi_ulong get_sigframe(struct target_sigaction *ka, + CPULOONGARCHState *regs, + size_t frame_size) +{ + unsigned long sp; + + /* + * FPU emulator may have its own trampoline active just + * above the user stack, 16-bytes before the next lowest + * 16 byte boundary. Try to avoid trashing it. + */ + sp = target_sigsp(get_sp_from_cpustate(regs) - 32, ka); + + return (sp - frame_size) & ~7; +} + +void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, target_sigset_t *set, + CPULOONGARCHState *env) +{ + struct target_rt_sigframe *frame; + abi_ulong frame_addr; + int i; + + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + trace_user_setup_rt_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto give_sigsegv; + } + + /* ori a7, $r0, TARGET_NR_rt_sigreturn */ + /* syscall 0 */ + __put_user(0x0380000b + (TARGET_NR_rt_sigreturn << 10), + &frame->rs_code[0]); + __put_user(0x002b0000, &frame->rs_code[1]); + + tswap_siginfo(&frame->rs_info, info); + + __put_user(0, &frame->rs_uc.tuc_flags); + __put_user(0, &frame->rs_uc.tuc_link); + target_save_altstack(&frame->rs_uc.tuc_stack, env); + + setup_sigcontext(env, &frame->rs_uc.tuc_mcontext); + + for (i = 0; i < TARGET_NSIG_WORDS; i++) { + __put_user(set->sig[i], &frame->rs_uc.tuc_sigmask.sig[i]); + } + + /* + * Arguments to signal handler: + * + * a0 = signal number + * a1 = pointer to siginfo_t + * a2 = pointer to ucontext_t + * + * $25 and PC point to the signal handler, $29 points to the + * struct sigframe. + */ + env->active_tc.gpr[4] = sig; + env->active_tc.gpr[5] = + frame_addr + offsetof(struct target_rt_sigframe, rs_info); + env->active_tc.gpr[6] = + frame_addr + offsetof(struct target_rt_sigframe, rs_uc); + env->active_tc.gpr[3] = frame_addr; + env->active_tc.gpr[1] = + frame_addr + offsetof(struct target_rt_sigframe, rs_code); + /* + * The original kernel code sets CP0_ERA to the handler + * since it returns to userland using ertn + * we cannot do this here, and we must set PC directly + */ + env->active_tc.PC = env->active_tc.gpr[20] = ka->_sa_handler; + unlock_user_struct(frame, frame_addr, 1); + return; + +give_sigsegv: + unlock_user_struct(frame, frame_addr, 1); + force_sigsegv(sig); +} + +long do_rt_sigreturn(CPULOONGARCHState *env) +{ + struct target_rt_sigframe *frame; + abi_ulong frame_addr; + sigset_t blocked; + + frame_addr = env->active_tc.gpr[3]; + trace_user_do_rt_sigreturn(env, frame_addr); + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + + target_to_host_sigset(&blocked, &frame->rs_uc.tuc_sigmask); + set_sigmask(&blocked); + + restore_sigcontext(env, &frame->rs_uc.tuc_mcontext); + + if (do_sigaltstack( + frame_addr + offsetof(struct target_rt_sigframe, rs_uc.tuc_stack), + 0, get_sp_from_cpustate(env)) == -EFAULT) + goto badframe; + + env->active_tc.PC = env->CSR_ERA; + /* + * I am not sure this is right, but it seems to work + * maybe a problem with nested signals ? + */ + env->CSR_ERA = 0; + return -TARGET_QEMU_ESIGRETURN; + +badframe: + force_sig(TARGET_SIGSEGV); + return -TARGET_QEMU_ESIGRETURN; +} diff --git a/linux-user/loongarch64/sockbits.h b/linux-user/loongarch64/sockbits.h new file mode 100644 index 0000000000000000000000000000000000000000..8bcbfcb060bdaa7dc30a28e8b5885826d260fbd6 --- /dev/null +++ b/linux-user/loongarch64/sockbits.h @@ -0,0 +1,18 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "../generic/sockbits.h" diff --git a/linux-user/loongarch64/syscall_nr.h b/linux-user/loongarch64/syscall_nr.h new file mode 100644 index 0000000000000000000000000000000000000000..0217ad77f96b364b09eed60463801fa4ff92363c --- /dev/null +++ b/linux-user/loongarch64/syscall_nr.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LINUX_USER_LOONGARCH_SYSCALL_NR_H +#define LINUX_USER_LOONGARCH_SYSCALL_NR_H + +#define TARGET_NR_io_setup 0 +#define TARGET_NR_io_destroy 1 +#define TARGET_NR_io_submit 2 +#define TARGET_NR_io_cancel 3 +#define TARGET_NR_io_getevents 4 +#define TARGET_NR_setxattr 5 +#define TARGET_NR_lsetxattr 6 +#define TARGET_NR_fsetxattr 7 +#define TARGET_NR_getxattr 8 +#define TARGET_NR_lgetxattr 9 +#define TARGET_NR_fgetxattr 10 +#define TARGET_NR_listxattr 11 +#define TARGET_NR_llistxattr 12 +#define TARGET_NR_flistxattr 13 +#define TARGET_NR_removexattr 14 +#define TARGET_NR_lremovexattr 15 +#define TARGET_NR_fremovexattr 16 +#define TARGET_NR_getcwd 17 +#define TARGET_NR_lookup_dcookie 18 +#define TARGET_NR_eventfd2 19 +#define TARGET_NR_epoll_create1 20 +#define TARGET_NR_epoll_ctl 21 +#define TARGET_NR_epoll_pwait 22 +#define TARGET_NR_dup 23 +#define TARGET_NR_dup3 24 +#define TARGET_NR_fcntl 25 +#define TARGET_NR_inotify_init1 26 +#define TARGET_NR_inotify_add_watch 27 +#define TARGET_NR_inotify_rm_watch 28 +#define TARGET_NR_ioctl 29 +#define TARGET_NR_ioprio_set 30 +#define TARGET_NR_ioprio_get 31 +#define TARGET_NR_flock 32 +#define TARGET_NR_mknodat 33 +#define TARGET_NR_mkdirat 34 +#define TARGET_NR_unlinkat 35 +#define TARGET_NR_symlinkat 36 +#define TARGET_NR_linkat 37 +#define TARGET_NR_renameat 38 +#define TARGET_NR_umount2 39 +#define TARGET_NR_mount 40 +#define TARGET_NR_pivot_root 41 +#define TARGET_NR_nfsservctl 42 +#define TARGET_NR_statfs 43 +#define TARGET_NR_fstatfs 44 +#define TARGET_NR_truncate 45 +#define TARGET_NR_ftruncate 46 +#define TARGET_NR_fallocate 47 +#define TARGET_NR_faccessat 48 +#define TARGET_NR_chdir 49 +#define TARGET_NR_fchdir 50 +#define TARGET_NR_chroot 51 +#define TARGET_NR_fchmod 52 +#define TARGET_NR_fchmodat 53 +#define TARGET_NR_fchownat 54 +#define TARGET_NR_fchown 55 +#define TARGET_NR_openat 56 +#define TARGET_NR_close 57 +#define TARGET_NR_vhangup 58 +#define TARGET_NR_pipe2 59 +#define TARGET_NR_quotactl 60 +#define TARGET_NR_getdents64 61 +#define TARGET_NR_lseek 62 +#define TARGET_NR_read 63 +#define TARGET_NR_write 64 +#define TARGET_NR_readv 65 +#define TARGET_NR_writev 66 +#define TARGET_NR_pread64 67 +#define TARGET_NR_pwrite64 68 +#define TARGET_NR_preadv 69 +#define TARGET_NR_pwritev 70 +#define TARGET_NR_sendfile 71 +#define TARGET_NR_pselect6 72 +#define TARGET_NR_ppoll 73 +#define TARGET_NR_signalfd4 74 +#define TARGET_NR_vmsplice 75 +#define TARGET_NR_splice 76 +#define TARGET_NR_tee 77 +#define TARGET_NR_readlinkat 78 +#define TARGET_NR_newfstatat 79 +#define TARGET_NR_fstat 80 +#define TARGET_NR_sync 81 +#define TARGET_NR_fsync 82 +#define TARGET_NR_fdatasync 83 +#define TARGET_NR_sync_file_range 84 +#define TARGET_NR_timerfd_create 85 +#define TARGET_NR_timerfd_settime 86 +#define TARGET_NR_timerfd_gettime 87 +#define TARGET_NR_utimensat 88 +#define TARGET_NR_acct 89 +#define TARGET_NR_capget 90 +#define TARGET_NR_capset 91 +#define TARGET_NR_personality 92 +#define TARGET_NR_exit 93 +#define TARGET_NR_exit_group 94 +#define TARGET_NR_waitid 95 +#define TARGET_NR_set_tid_address 96 +#define TARGET_NR_unshare 97 +#define TARGET_NR_futex 98 +#define TARGET_NR_set_robust_list 99 +#define TARGET_NR_get_robust_list 100 +#define TARGET_NR_nanosleep 101 +#define TARGET_NR_getitimer 102 +#define TARGET_NR_setitimer 103 +#define TARGET_NR_kexec_load 104 +#define TARGET_NR_init_module 105 +#define TARGET_NR_delete_module 106 +#define TARGET_NR_timer_create 107 +#define TARGET_NR_timer_gettime 108 +#define TARGET_NR_timer_getoverrun 109 +#define TARGET_NR_timer_settime 110 +#define TARGET_NR_timer_delete 111 +#define TARGET_NR_clock_settime 112 +#define TARGET_NR_clock_gettime 113 +#define TARGET_NR_clock_getres 114 +#define TARGET_NR_clock_nanosleep 115 +#define TARGET_NR_syslog 116 +#define TARGET_NR_ptrace 117 +#define TARGET_NR_sched_setparam 118 +#define TARGET_NR_sched_setscheduler 119 +#define TARGET_NR_sched_getscheduler 120 +#define TARGET_NR_sched_getparam 121 +#define TARGET_NR_sched_setaffinity 122 +#define TARGET_NR_sched_getaffinity 123 +#define TARGET_NR_sched_yield 124 +#define TARGET_NR_sched_get_priority_max 125 +#define TARGET_NR_sched_get_priority_min 126 +#define TARGET_NR_sched_rr_get_interval 127 +#define TARGET_NR_restart_syscall 128 +#define TARGET_NR_kill 129 +#define TARGET_NR_tkill 130 +#define TARGET_NR_tgkill 131 +#define TARGET_NR_sigaltstack 132 +#define TARGET_NR_rt_sigsuspend 133 +#define TARGET_NR_rt_sigaction 134 +#define TARGET_NR_rt_sigprocmask 135 +#define TARGET_NR_rt_sigpending 136 +#define TARGET_NR_rt_sigtimedwait 137 +#define TARGET_NR_rt_sigqueueinfo 138 +#define TARGET_NR_rt_sigreturn 139 +#define TARGET_NR_setpriority 140 +#define TARGET_NR_getpriority 141 +#define TARGET_NR_reboot 142 +#define TARGET_NR_setregid 143 +#define TARGET_NR_setgid 144 +#define TARGET_NR_setreuid 145 +#define TARGET_NR_setuid 146 +#define TARGET_NR_setresuid 147 +#define TARGET_NR_getresuid 148 +#define TARGET_NR_setresgid 149 +#define TARGET_NR_getresgid 150 +#define TARGET_NR_setfsuid 151 +#define TARGET_NR_setfsgid 152 +#define TARGET_NR_times 153 +#define TARGET_NR_setpgid 154 +#define TARGET_NR_getpgid 155 +#define TARGET_NR_getsid 156 +#define TARGET_NR_setsid 157 +#define TARGET_NR_getgroups 158 +#define TARGET_NR_setgroups 159 +#define TARGET_NR_uname 160 +#define TARGET_NR_sethostname 161 +#define TARGET_NR_setdomainname 162 +#define TARGET_NR_getrlimit 163 +#define TARGET_NR_setrlimit 164 +#define TARGET_NR_getrusage 165 +#define TARGET_NR_umask 166 +#define TARGET_NR_prctl 167 +#define TARGET_NR_getcpu 168 +#define TARGET_NR_gettimeofday 169 +#define TARGET_NR_settimeofday 170 +#define TARGET_NR_adjtimex 171 +#define TARGET_NR_getpid 172 +#define TARGET_NR_getppid 173 +#define TARGET_NR_getuid 174 +#define TARGET_NR_geteuid 175 +#define TARGET_NR_getgid 176 +#define TARGET_NR_getegid 177 +#define TARGET_NR_gettid 178 +#define TARGET_NR_sysinfo 179 +#define TARGET_NR_mq_open 180 +#define TARGET_NR_mq_unlink 181 +#define TARGET_NR_mq_timedsend 182 +#define TARGET_NR_mq_timedreceive 183 +#define TARGET_NR_mq_notify 184 +#define TARGET_NR_mq_getsetattr 185 +#define TARGET_NR_msgget 186 +#define TARGET_NR_msgctl 187 +#define TARGET_NR_msgrcv 188 +#define TARGET_NR_msgsnd 189 +#define TARGET_NR_semget 190 +#define TARGET_NR_semctl 191 +#define TARGET_NR_semtimedop 192 +#define TARGET_NR_semop 193 +#define TARGET_NR_shmget 194 +#define TARGET_NR_shmctl 195 +#define TARGET_NR_shmat 196 +#define TARGET_NR_shmdt 197 +#define TARGET_NR_socket 198 +#define TARGET_NR_socketpair 199 +#define TARGET_NR_bind 200 +#define TARGET_NR_listen 201 +#define TARGET_NR_accept 202 +#define TARGET_NR_connect 203 +#define TARGET_NR_getsockname 204 +#define TARGET_NR_getpeername 205 +#define TARGET_NR_sendto 206 +#define TARGET_NR_recvfrom 207 +#define TARGET_NR_setsockopt 208 +#define TARGET_NR_getsockopt 209 +#define TARGET_NR_shutdown 210 +#define TARGET_NR_sendmsg 211 +#define TARGET_NR_recvmsg 212 +#define TARGET_NR_readahead 213 +#define TARGET_NR_brk 214 +#define TARGET_NR_munmap 215 +#define TARGET_NR_mremap 216 +#define TARGET_NR_add_key 217 +#define TARGET_NR_request_key 218 +#define TARGET_NR_keyctl 219 +#define TARGET_NR_clone 220 +#define TARGET_NR_execve 221 +#define TARGET_NR_mmap 222 +#define TARGET_NR_fadvise64 223 +#define TARGET_NR_swapon 224 +#define TARGET_NR_swapoff 225 +#define TARGET_NR_mprotect 226 +#define TARGET_NR_msync 227 +#define TARGET_NR_mlock 228 +#define TARGET_NR_munlock 229 +#define TARGET_NR_mlockall 230 +#define TARGET_NR_munlockall 231 +#define TARGET_NR_mincore 232 +#define TARGET_NR_madvise 233 +#define TARGET_NR_remap_file_pages 234 +#define TARGET_NR_mbind 235 +#define TARGET_NR_get_mempolicy 236 +#define TARGET_NR_set_mempolicy 237 +#define TARGET_NR_migrate_pages 238 +#define TARGET_NR_move_pages 239 +#define TARGET_NR_rt_tgsigqueueinfo 240 +#define TARGET_NR_perf_event_open 241 +#define TARGET_NR_accept4 242 +#define TARGET_NR_recvmmsg 243 +#define TARGET_NR_arch_specific_syscall 244 +#define TARGET_NR_wait4 260 +#define TARGET_NR_prlimit64 261 +#define TARGET_NR_fanotify_init 262 +#define TARGET_NR_fanotify_mark 263 +#define TARGET_NR_name_to_handle_at 264 +#define TARGET_NR_open_by_handle_at 265 +#define TARGET_NR_clock_adjtime 266 +#define TARGET_NR_syncfs 267 +#define TARGET_NR_setns 268 +#define TARGET_NR_sendmmsg 269 +#define TARGET_NR_process_vm_readv 270 +#define TARGET_NR_process_vm_writev 271 +#define TARGET_NR_kcmp 272 +#define TARGET_NR_finit_module 273 +#define TARGET_NR_sched_setattr 274 +#define TARGET_NR_sched_getattr 275 +#define TARGET_NR_renameat2 276 +#define TARGET_NR_seccomp 277 +#define TARGET_NR_getrandom 278 +#define TARGET_NR_memfd_create 279 +#define TARGET_NR_bpf 280 +#define TARGET_NR_execveat 281 +#define TARGET_NR_userfaultfd 282 +#define TARGET_NR_membarrier 283 +#define TARGET_NR_mlock2 284 +#define TARGET_NR_copy_file_range 285 +#define TARGET_NR_preadv2 286 +#define TARGET_NR_pwritev2 287 +#define TARGET_NR_pkey_mprotect 288 +#define TARGET_NR_pkey_alloc 289 +#define TARGET_NR_pkey_free 290 +#define TARGET_NR_statx 291 +#define TARGET_NR_io_pgetevents 292 +#define TARGET_NR_rseq 293 +#define TARGET_NR_kexec_file_load 294 + +#define TARGET_NR_syscalls (TARGET_NR_kexec_file_load + 1) + +#endif diff --git a/linux-user/loongarch64/target_cpu.h b/linux-user/loongarch64/target_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..c4bdb4648bd3abccd01fecadd080247f78331cd2 --- /dev/null +++ b/linux-user/loongarch64/target_cpu.h @@ -0,0 +1,47 @@ +/* + * loongarch specific CPU ABI and functions for linux-user + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_CPU_H +#define LOONGARCH_TARGET_CPU_H + +static inline void cpu_clone_regs_child(CPULOONGARCHState *env, + target_ulong newsp, unsigned flags) +{ + if (newsp) { + env->active_tc.gpr[3] = newsp; + } + env->active_tc.gpr[7] = 0; + env->active_tc.gpr[4] = 0; +} + +static inline void cpu_clone_regs_parent(CPULOONGARCHState *env, + unsigned flags) +{ +} + +static inline void cpu_set_tls(CPULOONGARCHState *env, target_ulong newtls) +{ + env->active_tc.gpr[2] = newtls; +} + +static inline abi_ulong get_sp_from_cpustate(CPULOONGARCHState *state) +{ + return state->active_tc.gpr[3]; +} +#endif diff --git a/linux-user/loongarch64/target_elf.h b/linux-user/loongarch64/target_elf.h new file mode 100644 index 0000000000000000000000000000000000000000..2290a9a6d1755b22ac436912cd3175bc28522957 --- /dev/null +++ b/linux-user/loongarch64/target_elf.h @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_ELF_H +#define LOONGARCH_TARGET_ELF_H +static inline const char *cpu_get_model(uint32_t eflags) +{ + return "Loongson-3A5000"; +} +#endif diff --git a/linux-user/loongarch64/target_fcntl.h b/linux-user/loongarch64/target_fcntl.h new file mode 100644 index 0000000000000000000000000000000000000000..9a2bc1cef5687b95eef79ff06a1a4c3e19f3bd92 --- /dev/null +++ b/linux-user/loongarch64/target_fcntl.h @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_FCNTL_H +#define LOONGARCH_TARGET_FCNTL_H + +#include "../generic/fcntl.h" + +#endif /* LOONGARCH_TARGET_FCNTL_H */ diff --git a/linux-user/loongarch64/target_signal.h b/linux-user/loongarch64/target_signal.h new file mode 100644 index 0000000000000000000000000000000000000000..be98151723318e34ada4d305e3ba6082d06399b1 --- /dev/null +++ b/linux-user/loongarch64/target_signal.h @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_SIGNAL_H +#define LOONGARCH_TARGET_SIGNAL_H + +/* this struct defines a stack used during syscall handling */ + +typedef struct target_sigaltstack { + abi_long ss_sp; + abi_int ss_flags; + abi_ulong ss_size; +} target_stack_t; + +/* + * sigaltstack controls + */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_SIGSTKSZ 8192 + +#include "../generic/signal.h" + +#endif /* LOONGARCH_TARGET_SIGNAL_H */ diff --git a/linux-user/loongarch64/target_structs.h b/linux-user/loongarch64/target_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..53e7b3e0e202714a415f5b859cd13204ca235e28 --- /dev/null +++ b/linux-user/loongarch64/target_structs.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_STRUCTS_H +#define LOONGARCH_TARGET_STRUCTS_H + +struct target_ipc_perm { + abi_int __key; /* Key. */ + abi_uint uid; /* Owner's user ID. */ + abi_uint gid; /* Owner's group ID. */ + abi_uint cuid; /* Creator's user ID. */ + abi_uint cgid; /* Creator's group ID. */ + abi_uint mode; /* Read/write permission. */ + abi_ushort __seq; /* Sequence number. */ + abi_ushort __pad1; + abi_ulong __unused1; + abi_ulong __unused2; +}; + +struct target_shmid_ds { + struct target_ipc_perm shm_perm; /* operation permission struct */ + abi_long shm_segsz; /* size of segment in bytes */ + abi_ulong shm_atime; /* time of last shmat() */ + abi_ulong shm_dtime; /* time of last shmdt() */ + abi_ulong shm_ctime; /* time of last change by shmctl() */ + abi_int shm_cpid; /* pid of creator */ + abi_int shm_lpid; /* pid of last shmop */ + abi_ulong shm_nattch; /* number of current attaches */ + abi_ulong __unused1; + abi_ulong __unused2; +}; + +#define TARGET_SEMID64_DS + +/* + * The semid64_ds structure for the MIPS architecture. + * Note extra padding because this structure is passed back and forth + * between kernel and user space. + */ +struct target_semid64_ds { + struct target_ipc_perm sem_perm; + abi_ulong sem_otime; + abi_ulong sem_ctime; + abi_ulong sem_nsems; + abi_ulong __unused1; + abi_ulong __unused2; +}; + +#endif diff --git a/linux-user/loongarch64/target_syscall.h b/linux-user/loongarch64/target_syscall.h new file mode 100644 index 0000000000000000000000000000000000000000..6acc015b85121036a877a1587a72d8a30eeb4a58 --- /dev/null +++ b/linux-user/loongarch64/target_syscall.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TARGET_SYSCALL_H +#define LOONGARCH_TARGET_SYSCALL_H + +/* + * this struct defines the way the registers are stored on the + * stack during a system call. + */ + +struct target_pt_regs { + /* Saved main processor registers. */ + target_ulong regs[32]; + + /* Saved special registers. */ + /* Saved special registers. */ + target_ulong csr_crmd; + target_ulong csr_prmd; + target_ulong csr_euen; + target_ulong csr_ecfg; + target_ulong csr_estat; + target_ulong csr_era; + target_ulong csr_badvaddr; + target_ulong orig_a0; + target_ulong __last[0]; +}; + +#define UNAME_MACHINE "loongarch" +#define UNAME_MINIMUM_RELEASE "2.6.32" + +#define TARGET_CLONE_BACKWARDS +#define TARGET_MINSIGSTKSZ 2048 +#define TARGET_MLOCKALL_MCL_CURRENT 1 +#define TARGET_MLOCKALL_MCL_FUTURE 2 + +#define TARGET_FORCE_SHMLBA + +static inline abi_ulong target_shmlba(CPULOONGARCHState *env) +{ + return 0x40000; +} + +#define TARGET_PR_SET_FP_MODE 45 +#define TARGET_PR_GET_FP_MODE 46 +#define TARGET_PR_FP_MODE_FR (1 << 0) +#define TARGET_PR_FP_MODE_FRE (1 << 1) + +#endif /* LOONGARCH_TARGET_SYSCALL_H */ diff --git a/linux-user/loongarch64/termbits.h b/linux-user/loongarch64/termbits.h new file mode 100644 index 0000000000000000000000000000000000000000..dd251e14b36e8f7ec95e9d4994dc59cd1366c067 --- /dev/null +++ b/linux-user/loongarch64/termbits.h @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LINUX_USER_LOONGARCH_TERMBITS_H +#define LINUX_USER_LOONGARCH_TERMBITS_H + +#define TARGET_NCCS 19 + +struct target_termios { + unsigned int c_iflag; /* input mode flags */ + unsigned int c_oflag; /* output mode flags */ + unsigned int c_cflag; /* control mode flags */ + unsigned int c_lflag; /* local mode flags */ + unsigned char c_line; /* line discipline */ + unsigned char c_cc[TARGET_NCCS]; /* control characters */ +}; + +/* c_iflag bits */ +#define TARGET_IGNBRK 0000001 +#define TARGET_BRKINT 0000002 +#define TARGET_IGNPAR 0000004 +#define TARGET_PARMRK 0000010 +#define TARGET_INPCK 0000020 +#define TARGET_ISTRIP 0000040 +#define TARGET_INLCR 0000100 +#define TARGET_IGNCR 0000200 +#define TARGET_ICRNL 0000400 +#define TARGET_IUCLC 0001000 +#define TARGET_IXON 0002000 +#define TARGET_IXANY 0004000 +#define TARGET_IXOFF 0010000 +#define TARGET_IMAXBEL 0020000 +#define TARGET_IUTF8 0040000 + +/* c_oflag bits */ +#define TARGET_OPOST 0000001 +#define TARGET_OLCUC 0000002 +#define TARGET_ONLCR 0000004 +#define TARGET_OCRNL 0000010 +#define TARGET_ONOCR 0000020 +#define TARGET_ONLRET 0000040 +#define TARGET_OFILL 0000100 +#define TARGET_OFDEL 0000200 +#define TARGET_NLDLY 0000400 +#define TARGET_NL0 0000000 +#define TARGET_NL1 0000400 +#define TARGET_CRDLY 0003000 +#define TARGET_CR0 0000000 +#define TARGET_CR1 0001000 +#define TARGET_CR2 0002000 +#define TARGET_CR3 0003000 +#define TARGET_TABDLY 0014000 +#define TARGET_TAB0 0000000 +#define TARGET_TAB1 0004000 +#define TARGET_TAB2 0010000 +#define TARGET_TAB3 0014000 +#define TARGET_XTABS 0014000 +#define TARGET_BSDLY 0020000 +#define TARGET_BS0 0000000 +#define TARGET_BS1 0020000 +#define TARGET_VTDLY 0040000 +#define TARGET_VT0 0000000 +#define TARGET_VT1 0040000 +#define TARGET_FFDLY 0100000 +#define TARGET_FF0 0000000 +#define TARGET_FF1 0100000 + +/* c_cflag bit meaning */ +#define TARGET_CBAUD 0010017 +#define TARGET_B0 0000000 /* hang up */ +#define TARGET_B50 0000001 +#define TARGET_B75 0000002 +#define TARGET_B110 0000003 +#define TARGET_B134 0000004 +#define TARGET_B150 0000005 +#define TARGET_B200 0000006 +#define TARGET_B300 0000007 +#define TARGET_B600 0000010 +#define TARGET_B1200 0000011 +#define TARGET_B1800 0000012 +#define TARGET_B2400 0000013 +#define TARGET_B4800 0000014 +#define TARGET_B9600 0000015 +#define TARGET_B19200 0000016 +#define TARGET_B38400 0000017 +#define TARGET_EXTA B19200 +#define TARGET_EXTB B38400 +#define TARGET_CSIZE 0000060 +#define TARGET_CS5 0000000 +#define TARGET_CS6 0000020 +#define TARGET_CS7 0000040 +#define TARGET_CS8 0000060 +#define TARGET_CSTOPB 0000100 +#define TARGET_CREAD 0000200 +#define TARGET_PARENB 0000400 +#define TARGET_PARODD 0001000 +#define TARGET_HUPCL 0002000 +#define TARGET_CLOCAL 0004000 +#define TARGET_CBAUDEX 0010000 +#define TARGET_B57600 0010001 +#define TARGET_B115200 0010002 +#define TARGET_B230400 0010003 +#define TARGET_B460800 0010004 +#define TARGET_CIBAUD 002003600000 /* input baud rate (not used) */ +#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ +#define TARGET_CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define TARGET_ISIG 0000001 +#define TARGET_ICANON 0000002 +#define TARGET_XCASE 0000004 +#define TARGET_ECHO 0000010 +#define TARGET_ECHOE 0000020 +#define TARGET_ECHOK 0000040 +#define TARGET_ECHONL 0000100 +#define TARGET_NOFLSH 0000200 +#define TARGET_TOSTOP 0000400 +#define TARGET_ECHOCTL 0001000 +#define TARGET_ECHOPRT 0002000 +#define TARGET_ECHOKE 0004000 +#define TARGET_FLUSHO 0010000 +#define TARGET_PENDIN 0040000 +#define TARGET_IEXTEN 0100000 + +/* c_cc character offsets */ +#define TARGET_VINTR 0 +#define TARGET_VQUIT 1 +#define TARGET_VERASE 2 +#define TARGET_VKILL 3 +#define TARGET_VEOF 4 +#define TARGET_VTIME 5 +#define TARGET_VMIN 6 +#define TARGET_VSWTC 7 +#define TARGET_VSTART 8 +#define TARGET_VSTOP 9 +#define TARGET_VSUSP 10 +#define TARGET_VEOL 11 +#define TARGET_VREPRINT 12 +#define TARGET_VDISCARD 13 +#define TARGET_VWERASE 14 +#define TARGET_VLNEXT 15 +#define TARGET_VEOL2 16 + +/* ioctls */ + +#define TARGET_TCGETS 0x5401 +#define TARGET_TCSETS 0x5402 +#define TARGET_TCSETSW 0x5403 +#define TARGET_TCSETSF 0x5404 +#define TARGET_TCGETA 0x5405 +#define TARGET_TCSETA 0x5406 +#define TARGET_TCSETAW 0x5407 +#define TARGET_TCSETAF 0x5408 +#define TARGET_TCSBRK 0x5409 +#define TARGET_TCXONC 0x540A +#define TARGET_TCFLSH 0x540B + +#define TARGET_TIOCEXCL 0x540C +#define TARGET_TIOCNXCL 0x540D +#define TARGET_TIOCSCTTY 0x540E +#define TARGET_TIOCGPGRP 0x540F +#define TARGET_TIOCSPGRP 0x5410 +#define TARGET_TIOCOUTQ 0x5411 +#define TARGET_TIOCSTI 0x5412 +#define TARGET_TIOCGWINSZ 0x5413 +#define TARGET_TIOCSWINSZ 0x5414 +#define TARGET_TIOCMGET 0x5415 +#define TARGET_TIOCMBIS 0x5416 +#define TARGET_TIOCMBIC 0x5417 +#define TARGET_TIOCMSET 0x5418 +#define TARGET_TIOCGSOFTCAR 0x5419 +#define TARGET_TIOCSSOFTCAR 0x541A +#define TARGET_FIONREAD 0x541B +#define TARGET_TIOCINQ TARGET_FIONREAD +#define TARGET_TIOCLINUX 0x541C +#define TARGET_TIOCCONS 0x541D +#define TARGET_TIOCGSERIAL 0x541E +#define TARGET_TIOCSSERIAL 0x541F +#define TARGET_TIOCPKT 0x5420 +#define TARGET_FIONBIO 0x5421 +#define TARGET_TIOCNOTTY 0x5422 +#define TARGET_TIOCSETD 0x5423 +#define TARGET_TIOCGETD 0x5424 +#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TARGET_TIOCTTYGSTRUCT 0x5426 /* For debugging only */ +#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ +#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ +#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ +/* Get Pty Number (of pty-mux device) */ +#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int) +/* Lock/unlock Pty */ +#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int) +/* Safely open the slave */ +#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) + +#define TARGET_FIONCLEX 0x5450 /* these numbers need to be adjusted. */ +#define TARGET_FIOCLEX 0x5451 +#define TARGET_FIOASYNC 0x5452 +#define TARGET_TIOCSERCONFIG 0x5453 +#define TARGET_TIOCSERGWILD 0x5454 +#define TARGET_TIOCSERSWILD 0x5455 +#define TARGET_TIOCGLCKTRMIOS 0x5456 +#define TARGET_TIOCSLCKTRMIOS 0x5457 +#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ +#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ + +/* wait for a change on serial input line(s) */ +#define TARGET_TIOCMIWAIT 0x545C +/* read serial port inline interrupt counts */ +#define TARGET_TIOCGICOUNT 0x545D +#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ + +/* Used for packet mode */ +#define TARGET_TIOCPKT_DATA 0 +#define TARGET_TIOCPKT_FLUSHREAD 1 +#define TARGET_TIOCPKT_FLUSHWRITE 2 +#define TARGET_TIOCPKT_STOP 4 +#define TARGET_TIOCPKT_START 8 +#define TARGET_TIOCPKT_NOSTOP 16 +#define TARGET_TIOCPKT_DOSTOP 32 + +#define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ + +#endif diff --git a/linux-user/meson.build b/linux-user/meson.build index bf62c13e378957087adc1d44e585bb14e83dd369..8b8edefa6ea30cba64117713db682e70339c4e0f 100644 --- a/linux-user/meson.build +++ b/linux-user/meson.build @@ -37,5 +37,7 @@ subdir('ppc') subdir('s390x') subdir('sh4') subdir('sparc') +subdir('sw64') subdir('x86_64') subdir('xtensa') +subdir('loongarch64') diff --git a/linux-user/mmap.c b/linux-user/mmap.c index c125031b9046b3e05b71893131f0002f574de7bb..d674d5b00d4bb3219fddca4c8c835be4e0e7f785 100644 --- a/linux-user/mmap.c +++ b/linux-user/mmap.c @@ -251,8 +251,12 @@ static int mmap_frag(abi_ulong real_start, # define TASK_UNMAPPED_BASE (1ul << 38) #endif #else +#ifdef TARGET_HPPA +# define TASK_UNMAPPED_BASE 0xfa000000 +#else # define TASK_UNMAPPED_BASE 0x40000000 #endif +#endif abi_ulong mmap_next_start = TASK_UNMAPPED_BASE; unsigned long last_brk; diff --git a/linux-user/qemu.h b/linux-user/qemu.h index 5c713fa8ab26f8566cb820777772b09fa830c22e..66ddb25d1c88ce7a22d04cf8d41341b3f59d51bb 100644 --- a/linux-user/qemu.h +++ b/linux-user/qemu.h @@ -61,7 +61,7 @@ struct image_info { /* For target-specific processing of NT_GNU_PROPERTY_TYPE_0. */ uint32_t note_flags; -#ifdef TARGET_MIPS +#if defined(TARGET_MIPS) || defined(TARGET_LOONGARCH64) int fp_abi; int interp_fp_abi; #endif diff --git a/linux-user/riscv/signal.c b/linux-user/riscv/signal.c index a0f9542ce39a7ebc297efee8bf39df6ecad00e04..c50ac6d0aaab8e1ff30fcdb503b3751979caa748 100644 --- a/linux-user/riscv/signal.c +++ b/linux-user/riscv/signal.c @@ -64,9 +64,7 @@ static abi_ulong get_sigframe(struct target_sigaction *ka, /* This is the X/Open sanctioned signal stack switching. */ sp = target_sigsp(sp, ka) - framesize; - - /* XXX: kernel aligns with 0xf ? */ - sp &= ~3UL; /* align sp on 4-byte boundary */ + sp &= ~0xf; return sp; } diff --git a/linux-user/signal-common.h b/linux-user/signal-common.h index 7457f8025c477a5623b29448361af14eeff3c3b5..00d9e04d74bbc5284bc7ed8914a1134eed3de644 100644 --- a/linux-user/signal-common.h +++ b/linux-user/signal-common.h @@ -90,4 +90,50 @@ abi_long do_swapcontext(CPUArchState *env, abi_ulong uold_ctx, */ int block_signals(void); /* Returns non zero if signal pending */ +#if defined(SIGSTKFLT) && defined(TARGET_SIGSTKFLT) +#define MAKE_SIG_ENTRY_SIGSTKFLT MAKE_SIG_ENTRY(SIGSTKFLT) +#else +#define MAKE_SIG_ENTRY_SIGSTKFLT +#endif + +#if defined(SIGIOT) && defined(TARGET_SIGIOT) +#define MAKE_SIG_ENTRY_SIGIOT MAKE_SIG_ENTRY(SIGIOT) +#else +#define MAKE_SIG_ENTRY_SIGIOT +#endif + +#define MAKE_SIGNAL_LIST \ + MAKE_SIG_ENTRY(SIGHUP) \ + MAKE_SIG_ENTRY(SIGINT) \ + MAKE_SIG_ENTRY(SIGQUIT) \ + MAKE_SIG_ENTRY(SIGILL) \ + MAKE_SIG_ENTRY(SIGTRAP) \ + MAKE_SIG_ENTRY(SIGABRT) \ + MAKE_SIG_ENTRY(SIGBUS) \ + MAKE_SIG_ENTRY(SIGFPE) \ + MAKE_SIG_ENTRY(SIGKILL) \ + MAKE_SIG_ENTRY(SIGUSR1) \ + MAKE_SIG_ENTRY(SIGSEGV) \ + MAKE_SIG_ENTRY(SIGUSR2) \ + MAKE_SIG_ENTRY(SIGPIPE) \ + MAKE_SIG_ENTRY(SIGALRM) \ + MAKE_SIG_ENTRY(SIGTERM) \ + MAKE_SIG_ENTRY(SIGCHLD) \ + MAKE_SIG_ENTRY(SIGCONT) \ + MAKE_SIG_ENTRY(SIGSTOP) \ + MAKE_SIG_ENTRY(SIGTSTP) \ + MAKE_SIG_ENTRY(SIGTTIN) \ + MAKE_SIG_ENTRY(SIGTTOU) \ + MAKE_SIG_ENTRY(SIGURG) \ + MAKE_SIG_ENTRY(SIGXCPU) \ + MAKE_SIG_ENTRY(SIGXFSZ) \ + MAKE_SIG_ENTRY(SIGVTALRM) \ + MAKE_SIG_ENTRY(SIGPROF) \ + MAKE_SIG_ENTRY(SIGWINCH) \ + MAKE_SIG_ENTRY(SIGIO) \ + MAKE_SIG_ENTRY(SIGPWR) \ + MAKE_SIG_ENTRY(SIGSYS) \ + MAKE_SIG_ENTRY_SIGSTKFLT \ + MAKE_SIG_ENTRY_SIGIOT + #endif diff --git a/linux-user/signal.c b/linux-user/signal.c index 6d5e5b698cc827416b4fdf78aac36d39894af2ff..f65d6cff2f2f4a88b08ac5f2615176c92447b540 100644 --- a/linux-user/signal.c +++ b/linux-user/signal.c @@ -53,40 +53,9 @@ abi_ulong default_rt_sigreturn; QEMU_BUILD_BUG_ON(__SIGRTMAX + 1 != _NSIG); #endif static uint8_t host_to_target_signal_table[_NSIG] = { - [SIGHUP] = TARGET_SIGHUP, - [SIGINT] = TARGET_SIGINT, - [SIGQUIT] = TARGET_SIGQUIT, - [SIGILL] = TARGET_SIGILL, - [SIGTRAP] = TARGET_SIGTRAP, - [SIGABRT] = TARGET_SIGABRT, -/* [SIGIOT] = TARGET_SIGIOT,*/ - [SIGBUS] = TARGET_SIGBUS, - [SIGFPE] = TARGET_SIGFPE, - [SIGKILL] = TARGET_SIGKILL, - [SIGUSR1] = TARGET_SIGUSR1, - [SIGSEGV] = TARGET_SIGSEGV, - [SIGUSR2] = TARGET_SIGUSR2, - [SIGPIPE] = TARGET_SIGPIPE, - [SIGALRM] = TARGET_SIGALRM, - [SIGTERM] = TARGET_SIGTERM, -#ifdef SIGSTKFLT - [SIGSTKFLT] = TARGET_SIGSTKFLT, -#endif - [SIGCHLD] = TARGET_SIGCHLD, - [SIGCONT] = TARGET_SIGCONT, - [SIGSTOP] = TARGET_SIGSTOP, - [SIGTSTP] = TARGET_SIGTSTP, - [SIGTTIN] = TARGET_SIGTTIN, - [SIGTTOU] = TARGET_SIGTTOU, - [SIGURG] = TARGET_SIGURG, - [SIGXCPU] = TARGET_SIGXCPU, - [SIGXFSZ] = TARGET_SIGXFSZ, - [SIGVTALRM] = TARGET_SIGVTALRM, - [SIGPROF] = TARGET_SIGPROF, - [SIGWINCH] = TARGET_SIGWINCH, - [SIGIO] = TARGET_SIGIO, - [SIGPWR] = TARGET_SIGPWR, - [SIGSYS] = TARGET_SIGSYS, +#define MAKE_SIG_ENTRY(sig) [sig] = TARGET_##sig, + MAKE_SIGNAL_LIST +#undef MAKE_SIG_ENTRY /* next signals stay the same */ }; diff --git a/linux-user/strace.c b/linux-user/strace.c index 2cdbf030ba44fb2ed37794f2cab136bd44272871..b66a645cb02d97bc765db4a8dfd72769e58ac0a9 100644 --- a/linux-user/strace.c +++ b/linux-user/strace.c @@ -17,6 +17,7 @@ #include "qemu.h" #include "user-internals.h" #include "strace.h" +#include "signal-common.h" struct syscallname { int nr; @@ -81,6 +82,7 @@ UNUSED static void print_buf(abi_long addr, abi_long len, int last); UNUSED static void print_raw_param(const char *, abi_long, int); UNUSED static void print_timeval(abi_ulong, int); UNUSED static void print_timespec(abi_ulong, int); +UNUSED static void print_timespec64(abi_ulong, int); UNUSED static void print_timezone(abi_ulong, int); UNUSED static void print_itimerval(abi_ulong, int); UNUSED static void print_number(abi_long, int); @@ -141,30 +143,21 @@ if( cmd == val ) { \ qemu_log("%d", cmd); } +static const char * const target_signal_name[] = { +#define MAKE_SIG_ENTRY(sig) [TARGET_##sig] = #sig, + MAKE_SIGNAL_LIST +#undef MAKE_SIG_ENTRY +}; + static void print_signal(abi_ulong arg, int last) { const char *signal_name = NULL; - switch(arg) { - case TARGET_SIGHUP: signal_name = "SIGHUP"; break; - case TARGET_SIGINT: signal_name = "SIGINT"; break; - case TARGET_SIGQUIT: signal_name = "SIGQUIT"; break; - case TARGET_SIGILL: signal_name = "SIGILL"; break; - case TARGET_SIGABRT: signal_name = "SIGABRT"; break; - case TARGET_SIGFPE: signal_name = "SIGFPE"; break; - case TARGET_SIGKILL: signal_name = "SIGKILL"; break; - case TARGET_SIGSEGV: signal_name = "SIGSEGV"; break; - case TARGET_SIGPIPE: signal_name = "SIGPIPE"; break; - case TARGET_SIGALRM: signal_name = "SIGALRM"; break; - case TARGET_SIGTERM: signal_name = "SIGTERM"; break; - case TARGET_SIGUSR1: signal_name = "SIGUSR1"; break; - case TARGET_SIGUSR2: signal_name = "SIGUSR2"; break; - case TARGET_SIGCHLD: signal_name = "SIGCHLD"; break; - case TARGET_SIGCONT: signal_name = "SIGCONT"; break; - case TARGET_SIGSTOP: signal_name = "SIGSTOP"; break; - case TARGET_SIGTTIN: signal_name = "SIGTTIN"; break; - case TARGET_SIGTTOU: signal_name = "SIGTTOU"; break; + + if (arg < ARRAY_SIZE(target_signal_name)) { + signal_name = target_signal_name[arg]; } + if (signal_name == NULL) { print_raw_param("%ld", arg, last); return; @@ -803,6 +796,24 @@ print_syscall_ret_clock_gettime(void *cpu_env, const struct syscallname *name, #define print_syscall_ret_clock_getres print_syscall_ret_clock_gettime #endif +#if defined(TARGET_NR_clock_gettime64) +static void +print_syscall_ret_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name, + abi_long ret, abi_long arg0, abi_long arg1, + abi_long arg2, abi_long arg3, abi_long arg4, + abi_long arg5) +{ + if (!print_syscall_err(ret)) { + qemu_log(TARGET_ABI_FMT_ld, ret); + qemu_log(" ("); + print_timespec64(arg1, 1); + qemu_log(")"); + } + + qemu_log("\n"); +} +#endif + #ifdef TARGET_NR_gettimeofday static void print_syscall_ret_gettimeofday(void *cpu_env, const struct syscallname *name, @@ -1368,7 +1379,8 @@ UNUSED static struct flags termios_lflags[] = { FLAG_END, }; -UNUSED static struct flags mlockall_flags[] = { +#ifdef TARGET_NR_mlockall +static struct flags mlockall_flags[] = { FLAG_TARGET(MCL_CURRENT), FLAG_TARGET(MCL_FUTURE), #ifdef MCL_ONFAULT @@ -1376,6 +1388,7 @@ UNUSED static struct flags mlockall_flags[] = { #endif FLAG_END, }; +#endif /* IDs of the various system clocks */ #define TARGET_CLOCK_REALTIME 0 @@ -1494,6 +1507,11 @@ print_file_mode(abi_long mode, int last) const char *sep = ""; const struct flags *m; + if (mode == 0) { + qemu_log("000%s", get_comma(last)); + return; + } + for (m = &mode_flags[0]; m->f_string != NULL; m++) { if ((m->f_value & mode) == m->f_value) { qemu_log("%s%s", m->f_string, sep); @@ -1660,6 +1678,27 @@ print_timespec(abi_ulong ts_addr, int last) } } +static void +print_timespec64(abi_ulong ts_addr, int last) +{ + if (ts_addr) { + struct target__kernel_timespec *ts; + + ts = lock_user(VERIFY_READ, ts_addr, sizeof(*ts), 1); + if (!ts) { + print_pointer(ts_addr, last); + return; + } + qemu_log("{tv_sec = %lld" + ",tv_nsec = %lld}%s", + (long long)tswap64(ts->tv_sec), (long long)tswap64(ts->tv_nsec), + get_comma(last)); + unlock_user(ts, ts_addr, 0); + } else { + qemu_log("NULL%s", get_comma(last)); + } +} + static void print_timezone(abi_ulong tz_addr, int last) { @@ -2275,6 +2314,19 @@ print_clock_gettime(void *cpu_env, const struct syscallname *name, #define print_clock_getres print_clock_gettime #endif +#if defined(TARGET_NR_clock_gettime64) +static void +print_clock_gettime64(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_enums(clockids, arg0, 0); + print_pointer(arg1, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_clock_settime static void print_clock_settime(void *cpu_env, const struct syscallname *name, @@ -3272,6 +3324,34 @@ print_openat(void *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_pidfd_send_signal +static void +print_pidfd_send_signal(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + void *p; + target_siginfo_t uinfo; + + print_syscall_prologue(name); + print_raw_param("%d", arg0, 0); + print_signal(arg1, 0); + + p = lock_user(VERIFY_READ, arg2, sizeof(target_siginfo_t), 1); + if (p) { + get_target_siginfo(&uinfo, p); + print_siginfo(&uinfo); + + unlock_user(p, arg2, 0); + } else { + print_pointer(arg2, 1); + } + + print_raw_param("%u", arg3, 0); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_mq_unlink static void print_mq_unlink(void *cpu_env, const struct syscallname *name, @@ -3489,6 +3569,21 @@ print_unshare(void *cpu_env, const struct syscallname *name, } #endif +#ifdef TARGET_NR_clock_nanosleep +static void +print_clock_nanosleep(CPUArchState *cpu_env, const struct syscallname *name, + abi_long arg0, abi_long arg1, abi_long arg2, + abi_long arg3, abi_long arg4, abi_long arg5) +{ + print_syscall_prologue(name); + print_enums(clockids, arg0, 0); + print_raw_param("%d", arg1, 0); + print_timespec(arg2, 0); + print_timespec(arg3, 1); + print_syscall_epilogue(name); +} +#endif + #ifdef TARGET_NR_utime static void print_utime(void *cpu_env, const struct syscallname *name, @@ -3621,11 +3716,20 @@ print_futex(void *cpu_env, const struct syscallname *name, abi_long arg0, abi_long arg1, abi_long arg2, abi_long arg3, abi_long arg4, abi_long arg5) { + abi_long op = arg1 & FUTEX_CMD_MASK; print_syscall_prologue(name); print_pointer(arg0, 0); print_futex_op(arg1, 0); print_raw_param(",%d", arg2, 0); - print_pointer(arg3, 0); /* struct timespec */ + switch (op) { + case FUTEX_WAIT: + case FUTEX_WAIT_BITSET: + print_timespec(arg3, 0); + break; + default: + print_pointer(arg3, 0); + break; + } print_pointer(arg4, 0); print_raw_param("%d", arg4, 1); print_syscall_epilogue(name); diff --git a/linux-user/strace.list b/linux-user/strace.list index 278596acd131d327a0f19f308f328bccbc262eb9..8d5ab6dfac31206960f9e56468fd2c19aa92f1b1 100644 --- a/linux-user/strace.list +++ b/linux-user/strace.list @@ -91,7 +91,8 @@ print_syscall_ret_clock_gettime }, #endif #ifdef TARGET_NR_clock_nanosleep -{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, NULL, NULL }, +{ TARGET_NR_clock_nanosleep, "clock_nanosleep" , NULL, print_clock_nanosleep, + NULL }, #endif #ifdef TARGET_NR_clock_settime { TARGET_NR_clock_settime, "clock_settime" , NULL, print_clock_settime, NULL }, @@ -278,10 +279,10 @@ { TARGET_NR_getcwd, "getcwd" , "%s(%p,%d)", NULL, NULL }, #endif #ifdef TARGET_NR_getdents -{ TARGET_NR_getdents, "getdents" , NULL, NULL, NULL }, +{ TARGET_NR_getdents, "getdents" , "%s(%d,%p,%u)", NULL, NULL }, #endif #ifdef TARGET_NR_getdents64 -{ TARGET_NR_getdents64, "getdents64" , NULL, NULL, NULL }, +{ TARGET_NR_getdents64, "getdents64" , "%s(%d,%p,%u)", NULL, NULL }, #endif #ifdef TARGET_NR_getdomainname { TARGET_NR_getdomainname, "getdomainname" , NULL, NULL, NULL }, @@ -1522,7 +1523,10 @@ { TARGET_NR_timer_gettime, "timer_gettime" , NULL, NULL, NULL }, #endif #ifdef TARGET_NR_timer_settime -{ TARGET_NR_timer_settime, "timer_settime" , NULL, NULL, NULL }, +{ TARGET_NR_timer_settime, "timer_settime" , "%s(%d,%d,%p,%p)", NULL, NULL }, +#endif +#ifdef TARGET_NR_timer_settime64 +{ TARGET_NR_timer_settime64, "timer_settime64" , "%s(%d,%d,%p,%p)", NULL, NULL }, #endif #ifdef TARGET_NR_timerfd { TARGET_NR_timerfd, "timerfd" , NULL, NULL, NULL }, @@ -1659,6 +1663,15 @@ #ifdef TARGET_NR_pipe2 { TARGET_NR_pipe2, "pipe2", NULL, NULL, NULL }, #endif +#ifdef TARGET_NR_pidfd_open +{ TARGET_NR_pidfd_open, "pidfd_open", "%s(%d,%u)", NULL, NULL }, +#endif +#ifdef TARGET_NR_pidfd_send_signal +{ TARGET_NR_pidfd_send_signal, "pidfd_send_signal", NULL, print_pidfd_send_signal, NULL }, +#endif +#ifdef TARGET_NR_pidfd_getfd +{ TARGET_NR_pidfd_getfd, "pidfd_getfd", "%s(%d,%d,%u)", NULL, NULL }, +#endif #ifdef TARGET_NR_atomic_cmpxchg_32 { TARGET_NR_atomic_cmpxchg_32, "atomic_cmpxchg_32", NULL, NULL, NULL }, #endif @@ -1671,3 +1684,7 @@ #ifdef TARGET_NR_copy_file_range { TARGET_NR_copy_file_range, "copy_file_range", "%s(%d,%p,%d,%p,"TARGET_ABI_FMT_lu",%u)", NULL, NULL }, #endif +#ifdef TARGET_NR_clock_gettime64 +{ TARGET_NR_clock_gettime64, "clock_gettime64" , NULL, print_clock_gettime64, + print_syscall_ret_clock_gettime64 }, +#endif diff --git a/linux-user/sw64/cpu_loop.c b/linux-user/sw64/cpu_loop.c new file mode 100644 index 0000000000000000000000000000000000000000..389b753401ca1ccaaf53f8ba793095bb0e6b4bc0 --- /dev/null +++ b/linux-user/sw64/cpu_loop.c @@ -0,0 +1,111 @@ +/* + * qemu user cpu loop + * + * Copyright (c) 2003-2008 Fabrice Bellard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "qemu.h" +#include "user-internals.h" +#include "cpu_loop-common.h" +#include "signal-common.h" + +void cpu_loop(CPUSW64State *env) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + int trapnr; + target_siginfo_t info; + abi_long sysret; + + while (1) { + cpu_exec_start(cs); + trapnr = cpu_exec(cs); + cpu_exec_end(cs); + process_queued_cpu_work(cs); + + switch (trapnr) { + case EXCP_OPCDEC: + cpu_abort(cs, "ILLEGAL SW64 insn at line %d!", __LINE__); + case EXCP_CALL_SYS: + switch (env->error_code) { + case 0x83: + /* CALLSYS */ + trapnr = env->ir[IDX_V0]; + sysret = do_syscall(env, trapnr, + env->ir[IDX_A0], env->ir[IDX_A1], + env->ir[IDX_A2], env->ir[IDX_A3], + env->ir[IDX_A4], env->ir[IDX_A5], + 0, 0); + if (sysret == -TARGET_ERESTARTSYS) { + env->pc -= 4; + break; + } + if (sysret == -TARGET_QEMU_ESIGRETURN) { + break; + } + /* Syscall writes 0 to V0 to bypass error check, similar + to how this is handled internal to Linux kernel. + (Ab)use trapnr temporarily as boolean indicating error. */ + trapnr = (env->ir[IDX_V0] != 0 && sysret < 0); + env->ir[IDX_V0] = (trapnr ? -sysret : sysret); + env->ir[IDX_A3] = trapnr; + break; + default: + printf("UNDO sys_call %lx\n", env->error_code); + exit(-1); + } + break; + case EXCP_MMFAULT: + info.si_signo = TARGET_SIGSEGV; + info.si_errno = 0; + info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID + ? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR); + info._sifields._sigfault._addr = env->trap_arg0; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_ARITH: + info.si_signo = TARGET_SIGFPE; + info.si_errno = 0; + info.si_code = TARGET_FPE_FLTINV; + info._sifields._sigfault._addr = env->pc; + queue_signal(env, info.si_signo, QEMU_SI_FAULT, &info); + break; + case EXCP_INTERRUPT: + /* just indicate that signals should be handled asap */ + break; + default: + cpu_abort(cs, "UNDO"); + } + process_pending_signals (env); + + /* Most of the traps imply a transition through hmcode, which + implies an REI instruction has been executed. Which means + that RX and LOCK_ADDR should be cleared. But there are a + few exceptions for traps internal to QEMU. */ + } +} + +void target_cpu_copy_regs(CPUArchState *env, struct target_pt_regs *regs) +{ + int i; + + for(i = 0; i < 28; i++) { + env->ir[i] = ((abi_ulong *)regs)[i]; + } + env->ir[IDX_SP] = regs->usp; + env->pc = regs->pc; +} diff --git a/linux-user/sw64/meson.build b/linux-user/sw64/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..eda005678228e860e02badbe3499aed24a502db0 --- /dev/null +++ b/linux-user/sw64/meson.build @@ -0,0 +1,5 @@ +syscall_nr_generators += { + 'sw64': generator(sh, + arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ], + output: '@BASENAME@_nr.h') +} diff --git a/linux-user/sw64/signal.c b/linux-user/sw64/signal.c new file mode 100644 index 0000000000000000000000000000000000000000..572e192a951562aedfc1ac2d64109c1120b0af6b --- /dev/null +++ b/linux-user/sw64/signal.c @@ -0,0 +1,288 @@ +/* + * Emulation of Linux signals + * + * Copyright (c) 2003 Fabrice Bellard + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu.h" +#include "user-internals.h" +#include "signal-common.h" +#include "linux-user/trace.h" + +struct target_sigcontext { + abi_long sc_onstack; + abi_long sc_mask; + abi_long sc_pc; + abi_long sc_ps; + abi_long sc_regs[32]; + abi_long sc_ownedfp; + abi_long sc_fpregs[32]; + abi_ulong sc_fpcr; + abi_ulong sc_fp_control; + abi_ulong sc_reserved1; + abi_ulong sc_reserved2; + abi_ulong sc_ssize; + abi_ulong sc_sbase; + abi_ulong sc_traparg_a0; + abi_ulong sc_traparg_a1; + abi_ulong sc_traparg_a2; + abi_ulong sc_fp_trap_pc; + abi_ulong sc_fp_trigger_sum; + abi_ulong sc_fp_trigger_inst; +}; + +struct target_ucontext { + abi_ulong tuc_flags; + abi_ulong tuc_link; + abi_ulong tuc_osf_sigmask; + target_stack_t tuc_stack; + struct target_sigcontext tuc_mcontext; + target_sigset_t tuc_sigmask; +}; + +struct target_sigframe { + struct target_sigcontext sc; + unsigned int retcode[3]; +}; + +struct target_rt_sigframe { + target_siginfo_t info; + struct target_ucontext uc; + unsigned int retcode[3]; +}; + +#define INSN_MOV_R30_R16 0x47fe0410 +#define INSN_LDI_R0 0x201f0000 +#define INSN_CALLSYS 0x00000083 + +static void setup_sigcontext(struct target_sigcontext *sc, CPUSW64State *env, + abi_ulong frame_addr, target_sigset_t *set) +{ + int i; + + __put_user(on_sig_stack(frame_addr), &sc->sc_onstack); + __put_user(set->sig[0], &sc->sc_mask); + __put_user(env->pc, &sc->sc_pc); + __put_user(8, &sc->sc_ps); + + for (i = 0; i < 31; ++i) { + __put_user(env->ir[i], &sc->sc_regs[i]); + } + __put_user(0, &sc->sc_regs[31]); + + for (i = 0; i < 31; ++i) { + __put_user(env->fr[i], &sc->sc_fpregs[i]); + } + __put_user(0, &sc->sc_fpregs[31]); + __put_user(cpu_sw64_load_fpcr(env), &sc->sc_fpcr); + + __put_user(0, &sc->sc_traparg_a0); /* FIXME */ + __put_user(0, &sc->sc_traparg_a1); /* FIXME */ + __put_user(0, &sc->sc_traparg_a2); /* FIXME */ +} + +static void restore_sigcontext(CPUSW64State *env, + struct target_sigcontext *sc) +{ + uint64_t fpcr; + int i; + + __get_user(env->pc, &sc->sc_pc); + + for (i = 0; i < 31; ++i) { + __get_user(env->ir[i], &sc->sc_regs[i]); + } + for (i = 0; i < 31; ++i) { + __get_user(env->fr[i], &sc->sc_fpregs[i]); + } + + __get_user(fpcr, &sc->sc_fpcr); + cpu_sw64_store_fpcr(env, fpcr); +} + +static inline abi_ulong get_sigframe(struct target_sigaction *sa, + CPUSW64State *env, + unsigned long framesize) +{ + abi_ulong sp; + + sp = target_sigsp(get_sp_from_cpustate(env), sa); + + return (sp - framesize) & -32; +} + +void setup_frame(int sig, struct target_sigaction *ka, + target_sigset_t *set, CPUSW64State *env) +{ + abi_ulong frame_addr, r26; + struct target_sigframe *frame; + int err = 0; + + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + trace_user_setup_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto give_sigsegv; + } + + setup_sigcontext(&frame->sc, env, frame_addr, set); + + if (ka->ka_restorer) { + r26 = ka->ka_restorer; + } else { + __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); + __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, + &frame->retcode[1]); + __put_user(INSN_CALLSYS, &frame->retcode[2]); + /* imb() */ + r26 = frame_addr + offsetof(struct target_sigframe, retcode); + } + + unlock_user_struct(frame, frame_addr, 1); + + if (err) { +give_sigsegv: + force_sigsegv(sig); + return; + } + + env->ir[IDX_RA] = r26; + env->ir[IDX_PV] = env->pc = ka->_sa_handler; + env->ir[IDX_A0] = sig; + env->ir[IDX_A1] = 0; + env->ir[IDX_A2] = frame_addr + offsetof(struct target_sigframe, sc); + env->ir[IDX_SP] = frame_addr; +} + +void setup_rt_frame(int sig, struct target_sigaction *ka, + target_siginfo_t *info, + target_sigset_t *set, CPUSW64State *env) +{ + abi_ulong frame_addr, r26; + struct target_rt_sigframe *frame; + int i, err = 0; + + frame_addr = get_sigframe(ka, env, sizeof(*frame)); + trace_user_setup_rt_frame(env, frame_addr); + if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 0)) { + goto give_sigsegv; + } + + tswap_siginfo(&frame->info, info); + + __put_user(0, &frame->uc.tuc_flags); + __put_user(0, &frame->uc.tuc_link); + __put_user(set->sig[0], &frame->uc.tuc_osf_sigmask); + + target_save_altstack(&frame->uc.tuc_stack, env); + + setup_sigcontext(&frame->uc.tuc_mcontext, env, frame_addr, set); + for (i = 0; i < TARGET_NSIG_WORDS; ++i) { + __put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]); + } + + if (ka->ka_restorer) { + r26 = ka->ka_restorer; + } else { + __put_user(INSN_MOV_R30_R16, &frame->retcode[0]); + __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, + &frame->retcode[1]); + __put_user(INSN_CALLSYS, &frame->retcode[2]); + r26 = frame_addr + offsetof(struct target_sigframe, retcode); + } + + if (err) { +give_sigsegv: + force_sigsegv(sig); + return; + } + + env->ir[IDX_RA] = r26; + env->ir[IDX_PV] = env->pc = ka->_sa_handler; + env->ir[IDX_A0] = sig; + env->ir[IDX_A1] = frame_addr + offsetof(struct target_rt_sigframe, info); + env->ir[IDX_A2] = frame_addr + offsetof(struct target_rt_sigframe, uc); + env->ir[IDX_SP] = frame_addr; +} + +long do_sigreturn(CPUSW64State *env) +{ + struct target_sigcontext *sc; + abi_ulong sc_addr = env->ir[IDX_A0]; + target_sigset_t target_set; + sigset_t set; + + if (!lock_user_struct(VERIFY_READ, sc, sc_addr, 1)) { + goto badframe; + } + + target_sigemptyset(&target_set); + __get_user(target_set.sig[0], &sc->sc_mask); + + target_to_host_sigset_internal(&set, &target_set); + set_sigmask(&set); + + restore_sigcontext(env, sc); + unlock_user_struct(sc, sc_addr, 0); + return -TARGET_QEMU_ESIGRETURN; + +badframe: + force_sig(TARGET_SIGSEGV); + return -TARGET_QEMU_ESIGRETURN; +} + +long do_rt_sigreturn(CPUSW64State *env) +{ + abi_ulong frame_addr = env->ir[IDX_A0]; + struct target_rt_sigframe *frame; + sigset_t set; + + trace_user_do_rt_sigreturn(env, frame_addr); + if (!lock_user_struct(VERIFY_READ, frame, frame_addr, 1)) { + goto badframe; + } + target_to_host_sigset(&set, &frame->uc.tuc_sigmask); + set_sigmask(&set); + + restore_sigcontext(env, &frame->uc.tuc_mcontext); + target_restore_altstack(&frame->uc.tuc_stack, env); + + unlock_user_struct(frame, frame_addr, 0); + return -TARGET_QEMU_ESIGRETURN; + + +badframe: + unlock_user_struct(frame, frame_addr, 0); + force_sig(TARGET_SIGSEGV); + return -TARGET_QEMU_ESIGRETURN; +} + +void setup_sigtramp(abi_ulong sigtramp_page) +{ + uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0); + assert(tramp != NULL); + + default_sigreturn = sigtramp_page; + __put_user(INSN_MOV_R30_R16, &tramp[0]); + __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]); + __put_user(INSN_CALLSYS, &tramp[2]); + + default_rt_sigreturn = sigtramp_page + 3 * 4; + __put_user(INSN_MOV_R30_R16, &tramp[3]); + __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]); + __put_user(INSN_CALLSYS, &tramp[5]); + + unlock_user(tramp, sigtramp_page, 6 * 4); +} diff --git a/linux-user/sw64/sockbits.h b/linux-user/sw64/sockbits.h new file mode 100644 index 0000000000000000000000000000000000000000..0e4c8f012d781261da71333ae360abe22ca8083b --- /dev/null +++ b/linux-user/sw64/sockbits.h @@ -0,0 +1 @@ +#include "../generic/sockbits.h" diff --git a/linux-user/sw64/syscall.tbl b/linux-user/sw64/syscall.tbl new file mode 100644 index 0000000000000000000000000000000000000000..d007c7bb0761fa471f17575d13eb48cfe53bcef8 --- /dev/null +++ b/linux-user/sw64/syscall.tbl @@ -0,0 +1,488 @@ +# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note +# +# system call numbers and entry vectors for sw64 +# +# The format is: +# +# +# The is always "common" for this file +# +0 common osf_syscall sw64_syscall_zero +1 common exit sys_exit +2 common fork sw64_fork +3 common read sys_read +4 common write sys_write +5 common osf_old_open sys_ni_syscall +6 common close sys_close +7 common osf_wait4 sys_osf_wait4 +8 common osf_old_creat sys_ni_syscall +9 common link sys_link +10 common unlink sys_unlink +11 common osf_execve sys_ni_syscall +12 common chdir sys_chdir +13 common fchdir sys_fchdir +14 common mknod sys_mknod +15 common chmod sys_chmod +16 common chown sys_chown +17 common brk sys_osf_brk +18 common osf_getfsstat sys_ni_syscall +19 common lseek sys_lseek +20 common getxpid sys_getxpid +21 common osf_mount sys_osf_mount +22 common umount2 sys_umount +23 common setuid sys_setuid +24 common getxuid sys_getxuid +25 common exec_with_loader sys_ni_syscall +26 common ptrace sys_ptrace +27 common osf_nrecvmsg sys_ni_syscall +28 common osf_nsendmsg sys_ni_syscall +29 common osf_nrecvfrom sys_ni_syscall +30 common osf_naccept sys_ni_syscall +31 common osf_ngetpeername sys_ni_syscall +32 common osf_ngetsockname sys_ni_syscall +33 common access sys_access +34 common osf_chflags sys_ni_syscall +35 common osf_fchflags sys_ni_syscall +36 common sync sys_sync +37 common kill sys_kill +38 common osf_old_stat sys_ni_syscall +39 common setpgid sys_setpgid +40 common osf_old_lstat sys_ni_syscall +41 common dup sys_dup +42 common pipe sys_sw64_pipe +43 common osf_set_program_attributes sys_osf_set_program_attributes +44 common osf_profil sys_ni_syscall +45 common open sys_open +46 common osf_old_sigaction sys_ni_syscall +47 common getxgid sys_getxgid +48 common osf_sigprocmask sys_osf_sigprocmask +49 common osf_getlogin sys_ni_syscall +50 common osf_setlogin sys_ni_syscall +51 common acct sys_acct +52 common sigpending sys_sigpending +54 common ioctl sys_ioctl +55 common osf_reboot sys_ni_syscall +56 common osf_revoke sys_ni_syscall +57 common symlink sys_symlink +58 common readlink sys_readlink +59 common execve sys_execve +60 common umask sys_umask +61 common chroot sys_chroot +62 common osf_old_fstat sys_ni_syscall +63 common getpgrp sys_getpgrp +64 common getpagesize sys_getpagesize +65 common osf_mremap sys_ni_syscall +66 common vfork sw64_vfork +67 common stat sys_newstat +68 common lstat sys_newlstat +69 common osf_sbrk sys_ni_syscall +70 common osf_sstk sys_ni_syscall +71 common mmap sys_osf_mmap +72 common osf_old_vadvise sys_ni_syscall +73 common munmap sys_munmap +74 common mprotect sys_mprotect +75 common madvise sys_madvise +76 common vhangup sys_vhangup +77 common osf_kmodcall sys_ni_syscall +78 common osf_mincore sys_ni_syscall +79 common getgroups sys_getgroups +80 common setgroups sys_setgroups +81 common osf_old_getpgrp sys_ni_syscall +82 common setpgrp sys_setpgid +83 common osf_setitimer compat_sys_setitimer +84 common osf_old_wait sys_ni_syscall +85 common osf_table sys_ni_syscall +86 common osf_getitimer compat_sys_getitimer +87 common gethostname sys_gethostname +88 common sethostname sys_sethostname +89 common getdtablesize sys_getdtablesize +90 common dup2 sys_dup2 +91 common fstat sys_newfstat +92 common fcntl sys_fcntl +93 common osf_select sys_osf_select +94 common poll sys_poll +95 common fsync sys_fsync +96 common setpriority sys_setpriority +97 common socket sys_socket +98 common connect sys_connect +99 common accept sys_accept +100 common getpriority sys_osf_getpriority +101 common send sys_send +102 common recv sys_recv +103 common sigreturn sys_sigreturn +104 common bind sys_bind +105 common setsockopt sys_setsockopt +106 common listen sys_listen +107 common osf_plock sys_ni_syscall +108 common osf_old_sigvec sys_ni_syscall +109 common osf_old_sigblock sys_ni_syscall +110 common osf_old_sigsetmask sys_ni_syscall +111 common sigsuspend sys_sigsuspend +112 common osf_sigstack sys_osf_sigstack +113 common recvmsg sys_recvmsg +114 common sendmsg sys_sendmsg +115 common osf_old_vtrace sys_ni_syscall +116 common osf_gettimeofday sys_osf_gettimeofday +117 common osf_getrusage sys_osf_getrusage +118 common getsockopt sys_getsockopt +120 common readv sys_osf_readv +121 common writev sys_osf_writev +122 common osf_settimeofday sys_osf_settimeofday +123 common fchown sys_fchown +124 common fchmod sys_fchmod +125 common recvfrom sys_recvfrom +126 common setreuid sys_setreuid +127 common setregid sys_setregid +128 common rename sys_rename +129 common truncate sys_truncate +130 common ftruncate sys_ftruncate +131 common flock sys_flock +132 common setgid sys_setgid +133 common sendto sys_sendto +134 common shutdown sys_shutdown +135 common socketpair sys_socketpair +136 common mkdir sys_mkdir +137 common rmdir sys_rmdir +138 common osf_utimes sys_osf_utimes +139 common osf_old_sigreturn sys_ni_syscall +140 common osf_adjtime sys_ni_syscall +141 common getpeername sys_getpeername +142 common osf_gethostid sys_ni_syscall +143 common osf_sethostid sys_ni_syscall +144 common getrlimit sys_getrlimit +145 common setrlimit sys_setrlimit +146 common osf_old_killpg sys_ni_syscall +147 common setsid sys_setsid +148 common quotactl sys_quotactl +149 common osf_oldquota sys_ni_syscall +150 common getsockname sys_getsockname +153 common osf_pid_block sys_ni_syscall +154 common osf_pid_unblock sys_ni_syscall +156 common sigaction sys_osf_sigaction +157 common osf_sigwaitprim sys_ni_syscall +158 common osf_nfssvc sys_ni_syscall +159 common osf_getdirentries sys_osf_getdirentries +160 common osf_statfs sys_osf_statfs +161 common osf_fstatfs sys_osf_fstatfs +163 common osf_asynch_daemon sys_ni_syscall +164 common osf_getfh sys_ni_syscall +165 common osf_getdomainname sys_osf_getdomainname +166 common setdomainname sys_setdomainname +169 common osf_exportfs sys_ni_syscall +181 common osf_alt_plock sys_ni_syscall +184 common osf_getmnt sys_ni_syscall +187 common osf_alt_sigpending sys_ni_syscall +188 common osf_alt_setsid sys_ni_syscall +199 common osf_swapon sys_swapon +200 common msgctl sys_old_msgctl +201 common msgget sys_msgget +202 common msgrcv sys_msgrcv +203 common msgsnd sys_msgsnd +204 common semctl sys_old_semctl +205 common semget sys_semget +206 common semop sys_semop +207 common osf_utsname sys_osf_utsname +208 common lchown sys_lchown +209 common shmat sys_shmat +210 common shmctl sys_old_shmctl +211 common shmdt sys_shmdt +212 common shmget sys_shmget +213 common osf_mvalid sys_ni_syscall +214 common osf_getaddressconf sys_ni_syscall +215 common osf_msleep sys_ni_syscall +216 common osf_mwakeup sys_ni_syscall +217 common msync sys_msync +218 common osf_signal sys_ni_syscall +219 common osf_utc_gettime sys_ni_syscall +220 common osf_utc_adjtime sys_ni_syscall +222 common osf_security sys_ni_syscall +223 common osf_kloadcall sys_ni_syscall +224 common osf_stat sys_osf_stat +225 common osf_lstat sys_osf_lstat +226 common osf_fstat sys_osf_fstat +227 common osf_statfs64 sys_osf_statfs64 +228 common osf_fstatfs64 sys_osf_fstatfs64 +233 common getpgid sys_getpgid +234 common getsid sys_getsid +235 common sigaltstack sys_sigaltstack +236 common osf_waitid sys_ni_syscall +237 common osf_priocntlset sys_ni_syscall +238 common osf_sigsendset sys_ni_syscall +239 common osf_set_speculative sys_ni_syscall +240 common osf_msfs_syscall sys_ni_syscall +241 common osf_sysinfo sys_osf_sysinfo +242 common osf_uadmin sys_ni_syscall +243 common osf_fuser sys_ni_syscall +244 common osf_proplist_syscall sys_osf_proplist_syscall +245 common osf_ntp_adjtime sys_ni_syscall +246 common osf_ntp_gettime sys_ni_syscall +247 common osf_pathconf sys_ni_syscall +248 common osf_fpathconf sys_ni_syscall +250 common osf_uswitch sys_ni_syscall +251 common osf_usleep_thread sys_osf_usleep_thread +252 common osf_audcntl sys_ni_syscall +253 common osf_audgen sys_ni_syscall +254 common sysfs sys_sysfs +255 common osf_subsys_info sys_ni_syscall +256 common osf_getsysinfo sys_osf_getsysinfo +257 common osf_setsysinfo sys_osf_setsysinfo +258 common osf_afs_syscall sys_ni_syscall +259 common osf_swapctl sys_ni_syscall +260 common osf_memcntl sys_ni_syscall +261 common osf_fdatasync sys_ni_syscall +300 common bdflush sys_bdflush +301 common sethae sys_sethae +302 common mount sys_mount +303 common old_adjtimex sys_old_adjtimex +304 common swapoff sys_swapoff +305 common getdents sys_getdents +306 common create_module sys_ni_syscall +307 common init_module sys_init_module +308 common delete_module sys_delete_module +309 common get_kernel_syms sys_ni_syscall +310 common syslog sys_syslog +311 common reboot sys_reboot +312 common clone sw64_clone +313 common uselib sys_uselib +314 common mlock sys_mlock +315 common munlock sys_munlock +316 common mlockall sys_mlockall +317 common munlockall sys_munlockall +318 common sysinfo sys_sysinfo +319 common _sysctl sys_ni_syscall +# 320 was sys_idle +321 common oldumount sys_oldumount +322 common swapon sys_swapon +323 common times sys_times +324 common personality sys_personality +325 common setfsuid sys_setfsuid +326 common setfsgid sys_setfsgid +327 common ustat sys_ustat +328 common statfs sys_statfs +329 common fstatfs sys_fstatfs +330 common sched_setparam sys_sched_setparam +331 common sched_getparam sys_sched_getparam +332 common sched_setscheduler sys_sched_setscheduler +333 common sched_getscheduler sys_sched_getscheduler +334 common sched_yield sys_sched_yield +335 common sched_get_priority_max sys_sched_get_priority_max +336 common sched_get_priority_min sys_sched_get_priority_min +337 common sched_rr_get_interval sys_sched_rr_get_interval +338 common afs_syscall sys_ni_syscall +339 common uname sys_newuname +340 common nanosleep sys_nanosleep +341 common mremap sys_mremap +342 common nfsservctl sys_ni_syscall +343 common setresuid sys_setresuid +344 common getresuid sys_getresuid +345 common pciconfig_read sys_pciconfig_read +346 common pciconfig_write sys_pciconfig_write +347 common query_module sys_ni_syscall +348 common prctl sys_prctl +349 common pread64 sys_pread64 +350 common pwrite64 sys_pwrite64 +351 common rt_sigreturn sys_rt_sigreturn +352 common rt_sigaction sys_rt_sigaction +353 common rt_sigprocmask sys_rt_sigprocmask +354 common rt_sigpending sys_rt_sigpending +355 common rt_sigtimedwait sys_rt_sigtimedwait +356 common rt_sigqueueinfo sys_rt_sigqueueinfo +357 common rt_sigsuspend sys_rt_sigsuspend +358 common select sys_select +359 common gettimeofday sys_gettimeofday +360 common settimeofday sys_settimeofday +361 common getitimer sys_getitimer +362 common setitimer sys_setitimer +363 common utimes sys_utimes +364 common getrusage sys_getrusage +365 common wait4 sys_wait4 +366 common adjtimex sys_adjtimex +367 common getcwd sys_getcwd +368 common capget sys_capget +369 common capset sys_capset +370 common sendfile sys_sendfile64 +371 common setresgid sys_setresgid +372 common getresgid sys_getresgid +373 common dipc sys_ni_syscall +374 common pivot_root sys_pivot_root +375 common mincore sys_mincore +376 common pciconfig_iobase sys_pciconfig_iobase +377 common getdents64 sys_getdents64 +378 common gettid sys_gettid +379 common readahead sys_readahead +# 380 is unused +381 common tkill sys_tkill +382 common setxattr sys_setxattr +383 common lsetxattr sys_lsetxattr +384 common fsetxattr sys_fsetxattr +385 common getxattr sys_getxattr +386 common lgetxattr sys_lgetxattr +387 common fgetxattr sys_fgetxattr +388 common listxattr sys_listxattr +389 common llistxattr sys_llistxattr +390 common flistxattr sys_flistxattr +391 common removexattr sys_removexattr +392 common lremovexattr sys_lremovexattr +393 common fremovexattr sys_fremovexattr +394 common futex sys_futex +395 common sched_setaffinity sys_sched_setaffinity +396 common sched_getaffinity sys_sched_getaffinity +397 common tuxcall sys_ni_syscall +398 common io_setup sys_io_setup +399 common io_destroy sys_io_destroy +400 common io_getevents sys_io_getevents +401 common io_submit sys_io_submit +402 common io_cancel sys_io_cancel +405 common exit_group sys_exit_group +406 common lookup_dcookie sys_lookup_dcookie +407 common epoll_create sys_epoll_create +408 common epoll_ctl sys_epoll_ctl +409 common epoll_wait sys_epoll_wait +410 common remap_file_pages sys_remap_file_pages +411 common set_tid_address sys_set_tid_address +412 common restart_syscall sys_restart_syscall +413 common fadvise64 sys_fadvise64 +414 common timer_create sys_timer_create +415 common timer_settime sys_timer_settime +416 common timer_gettime sys_timer_gettime +417 common timer_getoverrun sys_timer_getoverrun +418 common timer_delete sys_timer_delete +419 common clock_settime sys_clock_settime +420 common clock_gettime sys_clock_gettime +421 common clock_getres sys_clock_getres +422 common clock_nanosleep sys_clock_nanosleep +423 common semtimedop sys_semtimedop +424 common tgkill sys_tgkill +425 common stat64 sys_stat64 +426 common lstat64 sys_lstat64 +427 common fstat64 sys_fstat64 +428 common vserver sys_ni_syscall +429 common mbind sys_ni_syscall +430 common get_mempolicy sys_ni_syscall +431 common set_mempolicy sys_ni_syscall +432 common mq_open sys_mq_open +433 common mq_unlink sys_mq_unlink +434 common mq_timedsend sys_mq_timedsend +435 common mq_timedreceive sys_mq_timedreceive +436 common mq_notify sys_mq_notify +437 common mq_getsetattr sys_mq_getsetattr +438 common waitid sys_waitid +439 common add_key sys_add_key +440 common request_key sys_request_key +441 common keyctl sys_keyctl +442 common ioprio_set sys_ioprio_set +443 common ioprio_get sys_ioprio_get +444 common inotify_init sys_inotify_init +445 common inotify_add_watch sys_inotify_add_watch +446 common inotify_rm_watch sys_inotify_rm_watch +447 common fdatasync sys_fdatasync +448 common kexec_load sys_kexec_load +449 common migrate_pages sys_migrate_pages +450 common openat sys_openat +451 common mkdirat sys_mkdirat +452 common mknodat sys_mknodat +453 common fchownat sys_fchownat +454 common futimesat sys_futimesat +455 common fstatat64 sys_fstatat64 +456 common unlinkat sys_unlinkat +457 common renameat sys_renameat +458 common linkat sys_linkat +459 common symlinkat sys_symlinkat +460 common readlinkat sys_readlinkat +461 common fchmodat sys_fchmodat +462 common faccessat sys_faccessat +463 common pselect6 sys_pselect6 +464 common ppoll sys_ppoll +465 common unshare sys_unshare +466 common set_robust_list sys_set_robust_list +467 common get_robust_list sys_get_robust_list +468 common splice sys_splice +469 common sync_file_range sys_sync_file_range +470 common tee sys_tee +471 common vmsplice sys_vmsplice +472 common move_pages sys_move_pages +473 common getcpu sys_getcpu +474 common epoll_pwait sys_epoll_pwait +475 common utimensat sys_utimensat +476 common signalfd sys_signalfd +477 common timerfd sys_ni_syscall +478 common eventfd sys_eventfd +479 common recvmmsg sys_recvmmsg +480 common fallocate sys_fallocate +481 common timerfd_create sys_timerfd_create +482 common timerfd_settime sys_timerfd_settime +483 common timerfd_gettime sys_timerfd_gettime +484 common signalfd4 sys_signalfd4 +485 common eventfd2 sys_eventfd2 +486 common epoll_create1 sys_epoll_create1 +487 common dup3 sys_dup3 +488 common pipe2 sys_pipe2 +489 common inotify_init1 sys_inotify_init1 +490 common preadv sys_preadv +491 common pwritev sys_pwritev +492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo +493 common perf_event_open sys_perf_event_open +494 common fanotify_init sys_fanotify_init +495 common fanotify_mark sys_fanotify_mark +496 common prlimit64 sys_prlimit64 +497 common name_to_handle_at sys_name_to_handle_at +498 common open_by_handle_at sys_open_by_handle_at +499 common clock_adjtime sys_clock_adjtime +500 common syncfs sys_syncfs +501 common setns sys_setns +502 common accept4 sys_accept4 +503 common sendmmsg sys_sendmmsg +504 common process_vm_readv sys_process_vm_readv +505 common process_vm_writev sys_process_vm_writev +506 common kcmp sys_kcmp +507 common finit_module sys_finit_module +508 common sched_setattr sys_sched_setattr +509 common sched_getattr sys_sched_getattr +510 common renameat2 sys_renameat2 +511 common getrandom sys_getrandom +512 common memfd_create sys_memfd_create +513 common execveat sys_execveat +514 common seccomp sys_seccomp +515 common bpf sys_bpf +516 common userfaultfd sys_userfaultfd +517 common membarrier sys_membarrier +518 common mlock2 sys_mlock2 +519 common copy_file_range sys_copy_file_range +520 common preadv2 sys_preadv2 +521 common pwritev2 sys_pwritev2 +522 common statx sys_statx +523 common io_pgetevents sys_io_pgetevents +524 common pkey_mprotect sys_pkey_mprotect +525 common pkey_alloc sys_pkey_alloc +526 common pkey_free sys_pkey_free +527 common rseq sys_rseq +528 common statfs64 sys_statfs64 +529 common fstatfs64 sys_fstatfs64 +530 common getegid sys_getegid +531 common geteuid sys_geteuid +532 common getppid sys_getppid +# all other architectures have common numbers for new syscall, sw64 +# is the exception. +534 common pidfd_send_signal sys_pidfd_send_signal +535 common io_uring_setup sys_io_uring_setup +536 common io_uring_enter sys_io_uring_enter +537 common io_uring_register sys_io_uring_register +538 common open_tree sys_open_tree +539 common move_mount sys_move_mount +540 common fsopen sys_fsopen +541 common fsconfig sys_fsconfig +542 common fsmount sys_fsmount +543 common fspick sys_fspick +544 common pidfd_open sys_pidfd_open +# 545 reserved for clone3 +546 common close_range sys_close_range +547 common openat2 sys_openat2 +548 common pidfd_getfd sys_pidfd_getfd +549 common faccessat2 sys_faccessat2 +550 common process_madvise sys_process_madvise +551 common epoll_pwait2 sys_epoll_pwait2 +552 common mount_setattr sys_mount_setattr +# 553 reserved for quotactl_path +554 common landlock_create_ruleset sys_landlock_create_ruleset +555 common landlock_add_rule sys_landlock_add_rule +556 common landlock_restrict_self sys_landlock_restrict_self diff --git a/linux-user/sw64/syscall_nr.h b/linux-user/sw64/syscall_nr.h new file mode 100644 index 0000000000000000000000000000000000000000..91737af322e64376faaadd1c5ba9a5f0d9de2cfc --- /dev/null +++ b/linux-user/sw64/syscall_nr.h @@ -0,0 +1,471 @@ +/* + * This file contains the system call numbers. + */ +#define TARGET_NR_osf_syscall 0 /* not implemented */ +#define TARGET_NR_exit 1 +#define TARGET_NR_fork 2 +#define TARGET_NR_read 3 +#define TARGET_NR_write 4 +#define TARGET_NR_osf_old_open 5 /* not implemented */ +#define TARGET_NR_close 6 +#define TARGET_NR_osf_wait4 7 +#define TARGET_NR_osf_old_creat 8 /* not implemented */ +#define TARGET_NR_link 9 +#define TARGET_NR_unlink 10 +#define TARGET_NR_osf_execve 11 /* not implemented */ +#define TARGET_NR_chdir 12 +#define TARGET_NR_fchdir 13 +#define TARGET_NR_mknod 14 +#define TARGET_NR_chmod 15 +#define TARGET_NR_chown 16 +#define TARGET_NR_brk 17 +#define TARGET_NR_osf_getfsstat 18 /* not implemented */ +#define TARGET_NR_lseek 19 +#define TARGET_NR_getxpid 20 +#define TARGET_NR_osf_mount 21 +#define TARGET_NR_umount 22 +#define TARGET_NR_setuid 23 +#define TARGET_NR_getxuid 24 +#define TARGET_NR_exec_with_loader 25 /* not implemented */ +#define TARGET_NR_ptrace 26 +#define TARGET_NR_osf_nrecvmsg 27 /* not implemented */ +#define TARGET_NR_osf_nsendmsg 28 /* not implemented */ +#define TARGET_NR_osf_nrecvfrom 29 /* not implemented */ +#define TARGET_NR_osf_naccept 30 /* not implemented */ +#define TARGET_NR_osf_ngetpeername 31 /* not implemented */ +#define TARGET_NR_osf_ngetsockname 32 /* not implemented */ +#define TARGET_NR_access 33 +#define TARGET_NR_osf_chflags 34 /* not implemented */ +#define TARGET_NR_osf_fchflags 35 /* not implemented */ +#define TARGET_NR_sync 36 +#define TARGET_NR_kill 37 +#define TARGET_NR_osf_old_stat 38 /* not implemented */ +#define TARGET_NR_setpgid 39 +#define TARGET_NR_osf_old_lstat 40 /* not implemented */ +#define TARGET_NR_dup 41 +#define TARGET_NR_pipe 42 +#define TARGET_NR_osf_set_program_attributes 43 +#define TARGET_NR_osf_profil 44 /* not implemented */ +#define TARGET_NR_open 45 +#define TARGET_NR_osf_old_sigaction 46 /* not implemented */ +#define TARGET_NR_getxgid 47 +#define TARGET_NR_osf_sigprocmask 48 +#define TARGET_NR_osf_getlogin 49 /* not implemented */ +#define TARGET_NR_osf_setlogin 50 /* not implemented */ +#define TARGET_NR_acct 51 +#define TARGET_NR_sigpending 52 + +#define TARGET_NR_ioctl 54 +#define TARGET_NR_osf_reboot 55 /* not implemented */ +#define TARGET_NR_osf_revoke 56 /* not implemented */ +#define TARGET_NR_symlink 57 +#define TARGET_NR_readlink 58 +#define TARGET_NR_execve 59 +#define TARGET_NR_umask 60 +#define TARGET_NR_chroot 61 +#define TARGET_NR_osf_old_fstat 62 /* not implemented */ +#define TARGET_NR_getpgrp 63 +#define TARGET_NR_getpagesize 64 +#define TARGET_NR_osf_mremap 65 /* not implemented */ +#define TARGET_NR_vfork 66 +#define TARGET_NR_stat 67 +#define TARGET_NR_lstat 68 +#define TARGET_NR_osf_sbrk 69 /* not implemented */ +#define TARGET_NR_osf_sstk 70 /* not implemented */ +#define TARGET_NR_mmap 71 /* OSF/1 mmap is superset of Linux */ +#define TARGET_NR_osf_old_vadvise 72 /* not implemented */ +#define TARGET_NR_munmap 73 +#define TARGET_NR_mprotect 74 +#define TARGET_NR_madvise 75 +#define TARGET_NR_vhangup 76 +#define TARGET_NR_osf_kmodcall 77 /* not implemented */ +#define TARGET_NR_osf_mincore 78 /* not implemented */ +#define TARGET_NR_getgroups 79 +#define TARGET_NR_setgroups 80 +#define TARGET_NR_osf_old_getpgrp 81 /* not implemented */ +#define TARGET_NR_setpgrp 82 /* BSD alias for setpgid */ +#define TARGET_NR_osf_setitimer 83 +#define TARGET_NR_osf_old_wait 84 /* not implemented */ +#define TARGET_NR_osf_table 85 /* not implemented */ +#define TARGET_NR_osf_getitimer 86 +#define TARGET_NR_gethostname 87 +#define TARGET_NR_sethostname 88 +#define TARGET_NR_getdtablesize 89 +#define TARGET_NR_dup2 90 +#define TARGET_NR_fstat 91 +#define TARGET_NR_fcntl 92 +#define TARGET_NR_osf_select 93 +#define TARGET_NR_poll 94 +#define TARGET_NR_fsync 95 +#define TARGET_NR_setpriority 96 +#define TARGET_NR_socket 97 +#define TARGET_NR_connect 98 +#define TARGET_NR_accept 99 +#define TARGET_NR_getpriority 100 +#define TARGET_NR_send 101 +#define TARGET_NR_recv 102 +#define TARGET_NR_sigreturn 103 +#define TARGET_NR_bind 104 +#define TARGET_NR_setsockopt 105 +#define TARGET_NR_listen 106 +#define TARGET_NR_osf_plock 107 /* not implemented */ +#define TARGET_NR_osf_old_sigvec 108 /* not implemented */ +#define TARGET_NR_osf_old_sigblock 109 /* not implemented */ +#define TARGET_NR_osf_old_sigsetmask 110 /* not implemented */ +#define TARGET_NR_sigsuspend 111 +#define TARGET_NR_osf_sigstack 112 +#define TARGET_NR_recvmsg 113 +#define TARGET_NR_sendmsg 114 +#define TARGET_NR_osf_old_vtrace 115 /* not implemented */ +#define TARGET_NR_osf_gettimeofday 116 +#define TARGET_NR_osf_getrusage 117 +#define TARGET_NR_getsockopt 118 + +#define TARGET_NR_readv 120 +#define TARGET_NR_writev 121 +#define TARGET_NR_osf_settimeofday 122 +#define TARGET_NR_fchown 123 +#define TARGET_NR_fchmod 124 +#define TARGET_NR_recvfrom 125 +#define TARGET_NR_setreuid 126 +#define TARGET_NR_setregid 127 +#define TARGET_NR_rename 128 +#define TARGET_NR_truncate 129 +#define TARGET_NR_ftruncate 130 +#define TARGET_NR_flock 131 +#define TARGET_NR_setgid 132 +#define TARGET_NR_sendto 133 +#define TARGET_NR_shutdown 134 +#define TARGET_NR_socketpair 135 +#define TARGET_NR_mkdir 136 +#define TARGET_NR_rmdir 137 +#define TARGET_NR_osf_utimes 138 +#define TARGET_NR_osf_old_sigreturn 139 /* not implemented */ +#define TARGET_NR_osf_adjtime 140 /* not implemented */ +#define TARGET_NR_getpeername 141 +#define TARGET_NR_osf_gethostid 142 /* not implemented */ +#define TARGET_NR_osf_sethostid 143 /* not implemented */ +#define TARGET_NR_getrlimit 144 +#define TARGET_NR_setrlimit 145 +#define TARGET_NR_osf_old_killpg 146 /* not implemented */ +#define TARGET_NR_setsid 147 +#define TARGET_NR_quotactl 148 +#define TARGET_NR_osf_oldquota 149 /* not implemented */ +#define TARGET_NR_getsockname 150 + +#define TARGET_NR_osf_pid_block 153 /* not implemented */ +#define TARGET_NR_osf_pid_unblock 154 /* not implemented */ + +#define TARGET_NR_sigaction 156 +#define TARGET_NR_osf_sigwaitprim 157 /* not implemented */ +#define TARGET_NR_osf_nfssvc 158 /* not implemented */ +#define TARGET_NR_osf_getdirentries 159 +#define TARGET_NR_osf_statfs 160 +#define TARGET_NR_osf_fstatfs 161 + +#define TARGET_NR_osf_asynch_daemon 163 /* not implemented */ +#define TARGET_NR_osf_getfh 164 /* not implemented */ +#define TARGET_NR_osf_getdomainname 165 +#define TARGET_NR_setdomainname 166 + +#define TARGET_NR_osf_exportfs 169 /* not implemented */ + +#define TARGET_NR_osf_alt_plock 181 /* not implemented */ + +#define TARGET_NR_osf_getmnt 184 /* not implemented */ + +#define TARGET_NR_osf_alt_sigpending 187 /* not implemented */ +#define TARGET_NR_osf_alt_setsid 188 /* not implemented */ + +#define TARGET_NR_osf_swapon 199 +#define TARGET_NR_msgctl 200 +#define TARGET_NR_msgget 201 +#define TARGET_NR_msgrcv 202 +#define TARGET_NR_msgsnd 203 +#define TARGET_NR_semctl 204 +#define TARGET_NR_semget 205 +#define TARGET_NR_semop 206 +#define TARGET_NR_osf_utsname 207 +#define TARGET_NR_lchown 208 +#define TARGET_NR_osf_shmat 209 +#define TARGET_NR_shmctl 210 +#define TARGET_NR_shmdt 211 +#define TARGET_NR_shmget 212 +#define TARGET_NR_osf_mvalid 213 /* not implemented */ +#define TARGET_NR_osf_getaddressconf 214 /* not implemented */ +#define TARGET_NR_osf_msleep 215 /* not implemented */ +#define TARGET_NR_osf_mwakeup 216 /* not implemented */ +#define TARGET_NR_msync 217 +#define TARGET_NR_osf_signal 218 /* not implemented */ +#define TARGET_NR_osf_utc_gettime 219 /* not implemented */ +#define TARGET_NR_osf_utc_adjtime 220 /* not implemented */ + +#define TARGET_NR_osf_security 222 /* not implemented */ +#define TARGET_NR_osf_kloadcall 223 /* not implemented */ + +#define TARGET_NR_osf_stat 224 +#define TARGET_NR_osf_lstat 225 +#define TARGET_NR_osf_fstat 226 +#define TARGET_NR_osf_statfs64 227 +#define TARGET_NR_osf_fstatfs64 228 + +#define TARGET_NR_getpgid 233 +#define TARGET_NR_getsid 234 +#define TARGET_NR_sigaltstack 235 +#define TARGET_NR_osf_waitid 236 /* not implemented */ +#define TARGET_NR_osf_priocntlset 237 /* not implemented */ +#define TARGET_NR_osf_sigsendset 238 /* not implemented */ +#define TARGET_NR_osf_set_speculative 239 /* not implemented */ +#define TARGET_NR_osf_msfs_syscall 240 /* not implemented */ +#define TARGET_NR_osf_sysinfo 241 +#define TARGET_NR_osf_uadmin 242 /* not implemented */ +#define TARGET_NR_osf_fuser 243 /* not implemented */ +#define TARGET_NR_osf_proplist_syscall 244 +#define TARGET_NR_osf_ntp_adjtime 245 /* not implemented */ +#define TARGET_NR_osf_ntp_gettime 246 /* not implemented */ +#define TARGET_NR_osf_pathconf 247 /* not implemented */ +#define TARGET_NR_osf_fpathconf 248 /* not implemented */ + +#define TARGET_NR_osf_uswitch 250 /* not implemented */ +#define TARGET_NR_osf_usleep_thread 251 +#define TARGET_NR_osf_audcntl 252 /* not implemented */ +#define TARGET_NR_osf_audgen 253 /* not implemented */ +#define TARGET_NR_sysfs 254 +#define TARGET_NR_osf_subsys_info 255 /* not implemented */ +#define TARGET_NR_osf_getsysinfo 256 +#define TARGET_NR_osf_setsysinfo 257 +#define TARGET_NR_osf_afs_syscall 258 /* not implemented */ +#define TARGET_NR_osf_swapctl 259 /* not implemented */ +#define TARGET_NR_osf_memcntl 260 /* not implemented */ +#define TARGET_NR_osf_fdatasync 261 /* not implemented */ + +/* + * Ignore legacy syscalls that we don't use. + */ +#define TARGET_IGNORE_alarm +#define TARGET_IGNORE_creat +#define TARGET_IGNORE_getegid +#define TARGET_IGNORE_geteuid +#define TARGET_IGNORE_getgid +#define TARGET_IGNORE_getpid +#define TARGET_IGNORE_getppid +#define TARGET_IGNORE_getuid +#define TARGET_IGNORE_pause +#define TARGET_IGNORE_time +#define TARGET_IGNORE_utime +#define TARGET_IGNORE_umount2 + +/* + * Linux-specific system calls begin at 300 + */ +#define TARGET_NR_bdflush 300 +#define TARGET_NR_sethae 301 +#define TARGET_NR_mount 302 +#define TARGET_NR_old_adjtimex 303 +#define TARGET_NR_swapoff 304 +#define TARGET_NR_getdents 305 +#define TARGET_NR_create_module 306 +#define TARGET_NR_init_module 307 +#define TARGET_NR_delete_module 308 +#define TARGET_NR_get_kernel_syms 309 +#define TARGET_NR_syslog 310 +#define TARGET_NR_reboot 311 +#define TARGET_NR_clone 312 +#define TARGET_NR_uselib 313 +#define TARGET_NR_mlock 314 +#define TARGET_NR_munlock 315 +#define TARGET_NR_mlockall 316 +#define TARGET_NR_munlockall 317 +#define TARGET_NR_sysinfo 318 +#define TARGET_NR__sysctl 319 +/* 320 was sysTARGETidle. */ +#define TARGET_NR_oldumount 321 +#define TARGET_NR_swapon 322 +#define TARGET_NR_times 323 +#define TARGET_NR_personality 324 +#define TARGET_NR_setfsuid 325 +#define TARGET_NR_setfsgid 326 +#define TARGET_NR_ustat 327 +#define TARGET_NR_statfs 328 +#define TARGET_NR_fstatfs 329 +#define TARGET_NR_sched_setparam 330 +#define TARGET_NR_sched_getparam 331 +#define TARGET_NR_sched_setscheduler 332 +#define TARGET_NR_sched_getscheduler 333 +#define TARGET_NR_sched_yield 334 +#define TARGET_NR_sched_get_priority_max 335 +#define TARGET_NR_sched_get_priority_min 336 +#define TARGET_NR_sched_rr_get_interval 337 +#define TARGET_NR_afs_syscall 338 +#define TARGET_NR_uname 339 +#define TARGET_NR_nanosleep 340 +#define TARGET_NR_mremap 341 +#define TARGET_NR_nfsservctl 342 +#define TARGET_NR_setresuid 343 +#define TARGET_NR_getresuid 344 +#define TARGET_NR_pciconfig_read 345 +#define TARGET_NR_pciconfig_write 346 +#define TARGET_NR_query_module 347 +#define TARGET_NR_prctl 348 +#define TARGET_NR_pread64 349 +#define TARGET_NR_pwrite64 350 +#define TARGET_NR_rt_sigreturn 351 +#define TARGET_NR_rt_sigaction 352 +#define TARGET_NR_rt_sigprocmask 353 +#define TARGET_NR_rt_sigpending 354 +#define TARGET_NR_rt_sigtimedwait 355 +#define TARGET_NR_rt_sigqueueinfo 356 +#define TARGET_NR_rt_sigsuspend 357 +#define TARGET_NR_select 358 +#define TARGET_NR_gettimeofday 359 +#define TARGET_NR_settimeofday 360 +#define TARGET_NR_getitimer 361 +#define TARGET_NR_setitimer 362 +#define TARGET_NR_utimes 363 +#define TARGET_NR_getrusage 364 +#define TARGET_NR_wait4 365 +#define TARGET_NR_adjtimex 366 +#define TARGET_NR_getcwd 367 +#define TARGET_NR_capget 368 +#define TARGET_NR_capset 369 +#define TARGET_NR_sendfile 370 +#define TARGET_NR_setresgid 371 +#define TARGET_NR_getresgid 372 +#define TARGET_NR_dipc 373 +#define TARGET_NR_pivot_root 374 +#define TARGET_NR_mincore 375 +#define TARGET_NR_pciconfig_iobase 376 +#define TARGET_NR_getdents64 377 +#define TARGET_NR_gettid 378 +#define TARGET_NR_readahead 379 +/* 380 is unused */ +#define TARGET_NR_tkill 381 +#define TARGET_NR_setxattr 382 +#define TARGET_NR_lsetxattr 383 +#define TARGET_NR_fsetxattr 384 +#define TARGET_NR_getxattr 385 +#define TARGET_NR_lgetxattr 386 +#define TARGET_NR_fgetxattr 387 +#define TARGET_NR_listxattr 388 +#define TARGET_NR_llistxattr 389 +#define TARGET_NR_flistxattr 390 +#define TARGET_NR_removexattr 391 +#define TARGET_NR_lremovexattr 392 +#define TARGET_NR_fremovexattr 393 +#define TARGET_NR_futex 394 +#define TARGET_NR_sched_setaffinity 395 +#define TARGET_NR_sched_getaffinity 396 +#define TARGET_NR_tuxcall 397 +#define TARGET_NR_io_setup 398 +#define TARGET_NR_io_destroy 399 +#define TARGET_NR_io_getevents 400 +#define TARGET_NR_io_submit 401 +#define TARGET_NR_io_cancel 402 +#define TARGET_NR_exit_group 405 +#define TARGET_NR_lookup_dcookie 406 +#define TARGET_NR_epoll_create 407 +#define TARGET_NR_epoll_ctl 408 +#define TARGET_NR_epoll_wait 409 +/* Feb 2007: These three sysTARGETepoll defines shouldn't be here but culling + * them would break userspace apps ... we'll kill them off in 2010 :) */ +#define TARGET_NR_sys_epoll_create TARGET_NR_epoll_create +#define TARGET_NR_sys_epoll_ctl TARGET_NR_epoll_ctl +#define TARGET_NR_sys_epoll_wait TARGET_NR_epoll_wait +#define TARGET_NR_remap_file_pages 410 +#define TARGET_NR_set_tid_address 411 +#define TARGET_NR_restart_syscall 412 +#define TARGET_NR_fadvise64 413 +#define TARGET_NR_timer_create 414 +#define TARGET_NR_timer_settime 415 +#define TARGET_NR_timer_gettime 416 +#define TARGET_NR_timer_getoverrun 417 +#define TARGET_NR_timer_delete 418 +#define TARGET_NR_clock_settime 419 +#define TARGET_NR_clock_gettime 420 +#define TARGET_NR_clock_getres 421 +#define TARGET_NR_clock_nanosleep 422 +#define TARGET_NR_semtimedop 423 +#define TARGET_NR_tgkill 424 +#define TARGET_NR_stat64 425 +#define TARGET_NR_lstat64 426 +#define TARGET_NR_fstat64 427 +#define TARGET_NR_vserver 428 +#define TARGET_NR_mbind 429 +#define TARGET_NR_get_mempolicy 430 +#define TARGET_NR_set_mempolicy 431 +#define TARGET_NR_mq_open 432 +#define TARGET_NR_mq_unlink 433 +#define TARGET_NR_mq_timedsend 434 +#define TARGET_NR_mq_timedreceive 435 +#define TARGET_NR_mq_notify 436 +#define TARGET_NR_mq_getsetattr 437 +#define TARGET_NR_waitid 438 +#define TARGET_NR_add_key 439 +#define TARGET_NR_request_key 440 +#define TARGET_NR_keyctl 441 +#define TARGET_NR_ioprio_set 442 +#define TARGET_NR_ioprio_get 443 +#define TARGET_NR_inotify_init 444 +#define TARGET_NR_inotify_add_watch 445 +#define TARGET_NR_inotify_rm_watch 446 +#define TARGET_NR_fdatasync 447 +#define TARGET_NR_kexec_load 448 +#define TARGET_NR_migrate_pages 449 +#define TARGET_NR_openat 450 +#define TARGET_NR_mkdirat 451 +#define TARGET_NR_mknodat 452 +#define TARGET_NR_fchownat 453 +#define TARGET_NR_futimesat 454 +#define TARGET_NR_fstatat64 455 +#define TARGET_NR_unlinkat 456 +#define TARGET_NR_renameat 457 +#define TARGET_NR_linkat 458 +#define TARGET_NR_symlinkat 459 +#define TARGET_NR_readlinkat 460 +#define TARGET_NR_fchmodat 461 +#define TARGET_NR_faccessat 462 +#define TARGET_NR_pselect6 463 +#define TARGET_NR_ppoll 464 +#define TARGET_NR_unshare 465 +#define TARGET_NR_set_robust_list 466 +#define TARGET_NR_get_robust_list 467 +#define TARGET_NR_splice 468 +#define TARGET_NR_sync_file_range 469 +#define TARGET_NR_tee 470 +#define TARGET_NR_vmsplice 471 +#define TARGET_NR_move_pages 472 +#define TARGET_NR_getcpu 473 +#define TARGET_NR_epoll_pwait 474 +#define TARGET_NR_utimensat 475 +#define TARGET_NR_signalfd 476 +#define TARGET_NR_timerfd 477 +#define TARGET_NR_eventfd 478 +#define TARGET_NR_recvmmsg 479 +#define TARGET_NR_fallocate 480 +#define TARGET_NR_timerfd_create 481 +#define TARGET_NR_timerfd_settime 482 +#define TARGET_NR_timerfd_gettime 483 +#define TARGET_NR_signalfd4 484 +#define TARGET_NR_eventfd2 485 +#define TARGET_NR_epoll_create1 486 +#define TARGET_NR_dup3 487 +#define TARGET_NR_pipe2 488 +#define TARGET_NR_inotify_init1 489 +#define TARGET_NR_preadv 490 +#define TARGET_NR_pwritev 491 +#define TARGET_NR_rt_tgsigqueueinfo 492 +#define TARGET_NR_perf_event_open 493 +#define TARGET_NR_fanotify_init 494 +#define TARGET_NR_fanotify_mark 495 +#define TARGET_NR_prlimit64 496 +#define TARGET_NR_name_to_handle_at 497 +#define TARGET_NR_open_by_handle_at 498 +#define TARGET_NR_clock_adjtime 499 +#define TARGET_NR_syncfs 500 +#define TARGET_NR_setns 501 +#define TARGET_NR_accept4 502 +#define TARGET_NR_sendmmsg 503 +#define TARGET_NR_process_vm_readv 504 +#define TARGET_NR_process_vm_writev 505 +#define TARGET_NR_sw_slave_rwperfmons 506 +#define TARGET_NR_sys_get_vmflags 507 diff --git a/linux-user/sw64/syscallhdr.sh b/linux-user/sw64/syscallhdr.sh new file mode 100644 index 0000000000000000000000000000000000000000..46c166d8ae096898b70371ed610599e80ee16af4 --- /dev/null +++ b/linux-user/sw64/syscallhdr.sh @@ -0,0 +1,32 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +in="$1" +out="$2" +my_abis=`echo "($3)" | tr ',' '|'` +prefix="$4" +offset="$5" + +fileguard=LINUX_USER_SW64_`basename "$out" | sed \ + -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ + -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` +grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( + printf "#ifndef %s\n" "${fileguard}" + printf "#define %s\n" "${fileguard}" + printf "\n" + + nxt=0 + while read nr abi name entry ; do + if [ -z "$offset" ]; then + printf "#define TARGET_NR_%s%s\t%s\n" \ + "${prefix}" "${name}" "${nr}" + else + printf "#define TARGET_NR_%s%s\t(%s + %s)\n" \ + "${prefix}" "${name}" "${offset}" "${nr}" + fi + nxt=$((nr+1)) + done + + printf "\n" + printf "#endif /* %s */" "${fileguard}" +) > "$out" diff --git a/linux-user/sw64/target_cpu.h b/linux-user/sw64/target_cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..63afa699c3b5c243b3e7ac2117e7416827dfa84a --- /dev/null +++ b/linux-user/sw64/target_cpu.h @@ -0,0 +1,51 @@ +/* + * SW64 specific CPU ABI and functions for linux-user + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#ifndef SW64_TARGET_CPU_H +#define SW64_TARGET_CPU_H + +static inline void cpu_clone_regs_child(CPUSW64State *env, target_ulong newsp, unsigned flags) +{ + if (newsp) { + env->ir[IDX_SP] = newsp; + } + env->ir[IDX_V0] = 0; + env->ir[IDX_A3] = 0; + env->ir[IDX_A4] = 1; /* OSF/1 secondary return: child */ +} + +static inline void cpu_clone_regs_parent(CPUSW64State *env, unsigned flags) +{ + /* + * OSF/1 secondary return: parent + * Note that the kernel does not do this if SETTLS, because the + * settls argument register is still live after copy_thread. + */ + if (!(flags & CLONE_SETTLS)) { + env->ir[IDX_A4] = 0; + } +} + +static inline void cpu_set_tls(CPUSW64State *env, target_ulong newtls) +{ + env->unique = newtls; +} + +static inline abi_ulong get_sp_from_cpustate(CPUSW64State *state) +{ + return state->ir[IDX_SP]; +} +#endif diff --git a/linux-user/sw64/target_elf.h b/linux-user/sw64/target_elf.h new file mode 100644 index 0000000000000000000000000000000000000000..be48b6dee3e27d5f518263a0924ced93bd3b0115 --- /dev/null +++ b/linux-user/sw64/target_elf.h @@ -0,0 +1,14 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ + +#ifndef SW64_TARGET_ELF_H +#define SW64_TARGET_ELF_H +static inline const char *cpu_get_model(uint32_t eflags) +{ + return "any"; +} +#endif diff --git a/linux-user/sw64/target_errno_defs.h b/linux-user/sw64/target_errno_defs.h new file mode 100644 index 0000000000000000000000000000000000000000..fd637f5bc965414b5153c2eb8bb5369a1b652f1a --- /dev/null +++ b/linux-user/sw64/target_errno_defs.h @@ -0,0 +1,204 @@ +#ifndef sw64_TARGET_ERRNO_DEFS_H +#define sw64_TARGET_ERRNO_DEFS_H + +#include "../generic/target_errno_defs.h" + +/* + * Generic target errno overridden with definitions taken + * from asm-sw64/errno.h + */ +#undef TARGET_EWOULDBLOCK +#define TARGET_EWOULDBLOCK TARGET_EAGAIN +#undef TARGET_EDEADLK +#define TARGET_EDEADLK 11 +#undef TARGET_EAGAIN +#define TARGET_EAGAIN 35 +#undef TARGET_EINPROGRESS +#define TARGET_EINPROGRESS 36 +#undef TARGET_EALREADY +#define TARGET_EALREADY 37 +#undef TARGET_ENOTSOCK +#define TARGET_ENOTSOCK 38 +#undef TARGET_EDESTADDRREQ +#define TARGET_EDESTADDRREQ 39 +#undef TARGET_EMSGSIZE +#define TARGET_EMSGSIZE 40 +#undef TARGET_EPROTOTYPE +#define TARGET_EPROTOTYPE 41 +#undef TARGET_ENOPROTOOPT +#define TARGET_ENOPROTOOPT 42 +#undef TARGET_EPROTONOSUPPORT +#define TARGET_EPROTONOSUPPORT 43 +#undef TARGET_ESOCKTNOSUPPORT +#define TARGET_ESOCKTNOSUPPORT 44 +#undef TARGET_EOPNOTSUPP +#define TARGET_EOPNOTSUPP 45 +#undef TARGET_EPFNOSUPPORT +#define TARGET_EPFNOSUPPORT 46 +#undef TARGET_EAFNOSUPPORT +#define TARGET_EAFNOSUPPORT 47 +#undef TARGET_EADDRINUSE +#define TARGET_EADDRINUSE 48 +#undef TARGET_EADDRNOTAVAIL +#define TARGET_EADDRNOTAVAIL 49 +#undef TARGET_ENETDOWN +#define TARGET_ENETDOWN 50 +#undef TARGET_ENETUNREACH +#define TARGET_ENETUNREACH 51 +#undef TARGET_ENETRESET +#define TARGET_ENETRESET 52 +#undef TARGET_ECONNABORTED +#define TARGET_ECONNABORTED 53 +#undef TARGET_ECONNRESET +#define TARGET_ECONNRESET 54 +#undef TARGET_ENOBUFS +#define TARGET_ENOBUFS 55 +#undef TARGET_EISCONN +#define TARGET_EISCONN 56 +#undef TARGET_ENOTCONN +#define TARGET_ENOTCONN 57 +#undef TARGET_ESHUTDOWN +#define TARGET_ESHUTDOWN 58 +#undef TARGET_ETOOMANYREFS +#define TARGET_ETOOMANYREFS 59 +#undef TARGET_ETIMEDOUT +#define TARGET_ETIMEDOUT 60 +#undef TARGET_ECONNREFUSED +#define TARGET_ECONNREFUSED 61 +#undef TARGET_ELOOP +#define TARGET_ELOOP 62 +#undef TARGET_ENAMETOOLONG +#define TARGET_ENAMETOOLONG 63 +#undef TARGET_EHOSTDOWN +#define TARGET_EHOSTDOWN 64 +#undef TARGET_EHOSTUNREACH +#define TARGET_EHOSTUNREACH 65 +#undef TARGET_ENOTEMPTY +#define TARGET_ENOTEMPTY 66 +/* Unused 67 */ +#undef TARGET_EUSERS +#define TARGET_EUSERS 68 +#undef TARGET_EDQUOT +#define TARGET_EDQUOT 69 +#undef TARGET_ESTALE +#define TARGET_ESTALE 70 +#undef TARGET_EREMOTE +#define TARGET_EREMOTE 71 +/* Unused 72-76 */ +#undef TARGET_ENOLCK +#define TARGET_ENOLCK 77 +#undef TARGET_ENOSYS +#define TARGET_ENOSYS 78 +/* Unused 79 */ +#undef TARGET_ENOMSG +#define TARGET_ENOMSG 80 +#undef TARGET_EIDRM +#define TARGET_EIDRM 81 +#undef TARGET_ENOSR +#define TARGET_ENOSR 82 +#undef TARGET_ETIME +#define TARGET_ETIME 83 +#undef TARGET_EBADMSG +#define TARGET_EBADMSG 84 +#undef TARGET_EPROTO +#define TARGET_EPROTO 85 +#undef TARGET_ENODATA +#define TARGET_ENODATA 86 +#undef TARGET_ENOSTR +#define TARGET_ENOSTR 87 +#undef TARGET_ECHRNG +#define TARGET_ECHRNG 88 +#undef TARGET_EL2NSYNC +#define TARGET_EL2NSYNC 89 +#undef TARGET_EL3HLT +#define TARGET_EL3HLT 90 +#undef TARGET_EL3RST +#define TARGET_EL3RST 91 +#undef TARGET_ENOPKG +#define TARGET_ENOPKG 92 +#undef TARGET_ELNRNG +#define TARGET_ELNRNG 93 +#undef TARGET_EUNATCH +#define TARGET_EUNATCH 94 +#undef TARGET_ENOCSI +#define TARGET_ENOCSI 95 +#undef TARGET_EL2HLT +#define TARGET_EL2HLT 96 +#undef TARGET_EBADE +#define TARGET_EBADE 97 +#undef TARGET_EBADR +#define TARGET_EBADR 98 +#undef TARGET_EXFULL +#define TARGET_EXFULL 99 +#undef TARGET_ENOANO +#define TARGET_ENOANO 100 +#undef TARGET_EBADRQC +#define TARGET_EBADRQC 101 +#undef TARGET_EBADSLT +#define TARGET_EBADSLT 102 +/* Unused 103 */ +#undef TARGET_EBFONT +#define TARGET_EBFONT 104 +#undef TARGET_ENONET +#define TARGET_ENONET 105 +#undef TARGET_ENOLINK +#define TARGET_ENOLINK 106 +#undef TARGET_EADV +#define TARGET_EADV 107 +#undef TARGET_ESRMNT +#define TARGET_ESRMNT 108 +#undef TARGET_ECOMM +#define TARGET_ECOMM 109 +#undef TARGET_EMULTIHOP +#define TARGET_EMULTIHOP 110 +#undef TARGET_EDOTDOT +#define TARGET_EDOTDOT 111 +#undef TARGET_EOVERFLOW +#define TARGET_EOVERFLOW 112 +#undef TARGET_ENOTUNIQ +#define TARGET_ENOTUNIQ 113 +#undef TARGET_EBADFD +#define TARGET_EBADFD 114 +#undef TARGET_EREMCHG +#define TARGET_EREMCHG 115 +#undef TARGET_EILSEQ +#define TARGET_EILSEQ 116 +/* Same as default 117-121 */ +#undef TARGET_ELIBACC +#define TARGET_ELIBACC 122 +#undef TARGET_ELIBBAD +#define TARGET_ELIBBAD 123 +#undef TARGET_ELIBSCN +#define TARGET_ELIBSCN 124 +#undef TARGET_ELIBMAX +#define TARGET_ELIBMAX 125 +#undef TARGET_ELIBEXEC +#define TARGET_ELIBEXEC 126 +#undef TARGET_ERESTART +#define TARGET_ERESTART 127 +#undef TARGET_ESTRPIPE +#define TARGET_ESTRPIPE 128 +#undef TARGET_ENOMEDIUM +#define TARGET_ENOMEDIUM 129 +#undef TARGET_EMEDIUMTYPE +#define TARGET_EMEDIUMTYPE 130 +#undef TARGET_ECANCELED +#define TARGET_ECANCELED 131 +#undef TARGET_ENOKEY +#define TARGET_ENOKEY 132 +#undef TARGET_EKEYEXPIRED +#define TARGET_EKEYEXPIRED 133 +#undef TARGET_EKEYREVOKED +#define TARGET_EKEYREVOKED 134 +#undef TARGET_EKEYREJECTED +#define TARGET_EKEYREJECTED 135 +#undef TARGET_EOWNERDEAD +#define TARGET_EOWNERDEAD 136 +#undef TARGET_ENOTRECOVERABLE +#define TARGET_ENOTRECOVERABLE 137 +#undef TARGET_ERFKILL +#define TARGET_ERFKILL 138 +#undef TARGET_EHWPOISON +#define TARGET_EHWPOISON 139 + +#endif diff --git a/linux-user/sw64/target_fcntl.h b/linux-user/sw64/target_fcntl.h new file mode 100644 index 0000000000000000000000000000000000000000..9721e3de39bf6570f809436aee1d09fd14fc9042 --- /dev/null +++ b/linux-user/sw64/target_fcntl.h @@ -0,0 +1,11 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation, or (at your option) any + * later version. See the COPYING file in the top-level directory. + */ + +#ifndef SW64_TARGET_FCNTL_H +#define sw64_TARGET_FCNTL_H +#include "../generic/fcntl.h" +#endif diff --git a/linux-user/sw64/target_signal.h b/linux-user/sw64/target_signal.h new file mode 100644 index 0000000000000000000000000000000000000000..8cc1693b0582b9d16bebd7071e19e675e37a0d19 --- /dev/null +++ b/linux-user/sw64/target_signal.h @@ -0,0 +1,100 @@ +#ifndef SW64_TARGET_SIGNAL_H +#define SW64_TARGET_SIGNAL_H + +#include "cpu.h" + +#define TARGET_SIGHUP 1 +#define TARGET_SIGINT 2 +#define TARGET_SIGQUIT 3 +#define TARGET_SIGILL 4 +#define TARGET_SIGTRAP 5 +#define TARGET_SIGABRT 6 +#define TARGET_SIGSTKFLT 7 /* actually SIGEMT */ +#define TARGET_SIGFPE 8 +#define TARGET_SIGKILL 9 +#define TARGET_SIGBUS 10 +#define TARGET_SIGSEGV 11 +#define TARGET_SIGSYS 12 +#define TARGET_SIGPIPE 13 +#define TARGET_SIGALRM 14 +#define TARGET_SIGTERM 15 +#define TARGET_SIGURG 16 +#define TARGET_SIGSTOP 17 +#define TARGET_SIGTSTP 18 +#define TARGET_SIGCONT 19 +#define TARGET_SIGCHLD 20 +#define TARGET_SIGTTIN 21 +#define TARGET_SIGTTOU 22 +#define TARGET_SIGIO 23 +#define TARGET_SIGXCPU 24 +#define TARGET_SIGXFSZ 25 +#define TARGET_SIGVTALRM 26 +#define TARGET_SIGPROF 27 +#define TARGET_SIGWINCH 28 +#define TARGET_SIGPWR 29 /* actually SIGINFO */ +#define TARGET_SIGUSR1 30 +#define TARGET_SIGUSR2 31 +#define TARGET_SIGRTMIN 32 + +#define TARGET_SIG_BLOCK 1 +#define TARGET_SIG_UNBLOCK 2 +#define TARGET_SIG_SETMASK 3 + +/* this struct defines a stack used during syscall handling */ + +typedef struct target_sigaltstack { + abi_ulong ss_sp; + int32_t ss_flags; + int32_t dummy; + abi_ulong ss_size; +} target_stack_t; + + +/* + * sigaltstack controls + */ +#define TARGET_SS_ONSTACK 1 +#define TARGET_SS_DISABLE 2 + +#define TARGET_SA_ONSTACK 0x00000001 +#define TARGET_SA_RESTART 0x00000002 +#define TARGET_SA_NOCLDSTOP 0x00000004 +#define TARGET_SA_NODEFER 0x00000008 +#define TARGET_SA_RESETHAND 0x00000010 +#define TARGET_SA_NOCLDWAIT 0x00000020 /* not supported yet */ +#define TARGET_SA_SIGINFO 0x00000040 + +#define TARGET_MINSIGSTKSZ 4096 +#define TARGET_SIGSTKSZ 16384 + +/* From . */ +#define TARGET_GEN_INTOVF -1 /* integer overflow */ +#define TARGET_GEN_INTDIV -2 /* integer division by zero */ +#define TARGET_GEN_FLTOVF -3 /* fp overflow */ +#define TARGET_GEN_FLTDIV -4 /* fp division by zero */ +#define TARGET_GEN_FLTUND -5 /* fp underflow */ +#define TARGET_GEN_FLTINV -6 /* invalid fp operand */ +#define TARGET_GEN_FLTINE -7 /* inexact fp operand */ +#define TARGET_GEN_DECOVF -8 /* decimal overflow (for COBOL??) */ +#define TARGET_GEN_DECDIV -9 /* decimal division by zero */ +#define TARGET_GEN_DECINV -10 /* invalid decimal operand */ +#define TARGET_GEN_ROPRAND -11 /* reserved operand */ +#define TARGET_GEN_ASSERTERR -12 /* assertion error */ +#define TARGET_GEN_NULPTRERR -13 /* null pointer error */ +#define TARGET_GEN_STKOVF -14 /* stack overflow */ +#define TARGET_GEN_STRLENERR -15 /* string length error */ +#define TARGET_GEN_SUBSTRERR -16 /* substring error */ +#define TARGET_GEN_RANGERR -17 /* range error */ +#define TARGET_GEN_SUBRNG -18 +#define TARGET_GEN_SUBRNG1 -19 +#define TARGET_GEN_SUBRNG2 -20 +#define TARGET_GEN_SUBRNG3 -21 +#define TARGET_GEN_SUBRNG4 -22 +#define TARGET_GEN_SUBRNG5 -23 +#define TARGET_GEN_SUBRNG6 -24 +#define TARGET_GEN_SUBRNG7 -25 + +#define TARGET_ARCH_HAS_SETUP_FRAME +#define TARGET_ARCH_HAS_KA_RESTORER +#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1 +#endif /* SW64_TARGET_SIGNAL_H */ diff --git a/linux-user/sw64/target_structs.h b/linux-user/sw64/target_structs.h new file mode 100644 index 0000000000000000000000000000000000000000..7c13dc4bac0841cb2e649f35e7ca8ecf0933836e --- /dev/null +++ b/linux-user/sw64/target_structs.h @@ -0,0 +1,47 @@ +/* + * SW64 specific structures for linux-user + * + * Copyright (c) 2018 Lin Hainan + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + */ +#ifndef SW64_TARGET_STRUCTS_H +#define SW64_TARGET_STRUCTS_H + +/* TODO: Maybe it should be update. now it's different from other arch */ +struct target_ipc_perm { + abi_int __key; /* Key. */ + abi_uint uid; /* Owner's user ID. */ + abi_uint gid; /* Owner's group ID. */ + abi_uint cuid; /* Creator's user ID. */ + abi_uint cgid; /* Creator's group ID. */ + abi_uint mode; /* Read/write permission. */ + abi_ushort __seq; /* Sequence number. */ + abi_ushort __pad1; + abi_ulong __unused1; + abi_ulong __unused2; +}; + +struct target_shmid_ds { + struct target_ipc_perm shm_perm; /* operation permission struct */ + abi_long shm_segsz; /* size of segment in bytes */ + abi_ulong shm_atime; /* time of last shmat() */ + abi_ulong shm_dtime; /* time of last shmdt() */ + abi_ulong shm_ctime; /* time of last change by shmctl() */ + abi_int shm_cpid; /* pid of creator */ + abi_int shm_lpid; /* pid of last shmop */ + abi_ulong shm_nattch; /* number of current attaches */ + abi_ulong __unused1; + abi_ulong __unused2; +}; + +#endif diff --git a/linux-user/sw64/target_syscall.h b/linux-user/sw64/target_syscall.h new file mode 100644 index 0000000000000000000000000000000000000000..418905110cbe6dc880f8b8f4e9d3836559768395 --- /dev/null +++ b/linux-user/sw64/target_syscall.h @@ -0,0 +1,125 @@ +#ifndef SW64_TARGET_SYSCALL_H +#define SW64_TARGET_SYSCALL_H + +/* TODO */ +struct target_pt_regs { + abi_ulong r0; + abi_ulong r1; + abi_ulong r2; + abi_ulong r3; + abi_ulong r4; + abi_ulong r5; + abi_ulong r6; + abi_ulong r7; + abi_ulong r8; + abi_ulong r19; + abi_ulong r20; + abi_ulong r21; + abi_ulong r22; + abi_ulong r23; + abi_ulong r24; + abi_ulong r25; + abi_ulong r26; + abi_ulong r27; + abi_ulong r28; + abi_ulong hae; +/* JRP - These are the values provided to a0-a2 by hmcode */ + abi_ulong trap_a0; + abi_ulong trap_a1; + abi_ulong trap_a2; +/* These are saved by hmcode: */ + abi_ulong ps; + abi_ulong pc; + abi_ulong gp; + abi_ulong r16; + abi_ulong r17; + abi_ulong r18; +/* Those is needed by qemu to temporary store the user stack pointer */ + abi_ulong usp; + abi_ulong unique; +}; + + +#define TARGET_MCL_CURRENT 0x2000 +#define TARGET_MCL_FUTURE 0x4000 +#define TARGET_MCL_ONFAULT 0x8000 + +#define UNAME_MACHINE "sw64" +#define UNAME_MINIMUM_RELEASE "2.6.32" +#undef TARGET_EOPNOTSUPP +#define TARGET_EOPNOTSUPP 45 /* Operation not supported on transport endpoint */ +#define SWCR_STATUS_INV0 (1UL<<17) +#define SWCR_STATUS_DZE0 (1UL<<18) +#define SWCR_STATUS_OVF0 (1UL<<19) +#define SWCR_STATUS_UNF0 (1UL<<20) +#define SWCR_STATUS_INE0 (1UL<<21) +#define SWCR_STATUS_DNO0 (1UL<<22) + +#define SWCR_STATUS_MASK0 (SWCR_STATUS_INV0 | SWCR_STATUS_DZE0 | \ + SWCR_STATUS_OVF0 | SWCR_STATUS_UNF0 | \ + SWCR_STATUS_INE0 | SWCR_STATUS_DNO0) + +#define SWCR_STATUS0_TO_EXCSUM_SHIFT 16 + +#define SWCR_STATUS_INV1 (1UL<<23) +#define SWCR_STATUS_DZE1 (1UL<<24) +#define SWCR_STATUS_OVF1 (1UL<<25) +#define SWCR_STATUS_UNF1 (1UL<<26) +#define SWCR_STATUS_INE1 (1UL<<27) +#define SWCR_STATUS_DNO1 (1UL<<28) + +#define SWCR_STATUS_MASK1 (SWCR_STATUS_INV1 | SWCR_STATUS_DZE1 | \ + SWCR_STATUS_OVF1 | SWCR_STATUS_UNF1 | \ + SWCR_STATUS_INE1 | SWCR_STATUS_DNO1) + +#define SWCR_STATUS1_TO_EXCSUM_SHIFT 22 +#define SWCR_STATUS_INV2 (1UL<<34) +#define SWCR_STATUS_DZE2 (1UL<<35) +#define SWCR_STATUS_OVF2 (1UL<<36) +#define SWCR_STATUS_UNF2 (1UL<<37) +#define SWCR_STATUS_INE2 (1UL<<38) +#define SWCR_STATUS_DNO2 (1UL<<39) + +#define SWCR_STATUS_MASK2 (SWCR_STATUS_INV2 | SWCR_STATUS_DZE2 | \ + SWCR_STATUS_OVF2 | SWCR_STATUS_UNF2 | \ + SWCR_STATUS_INE2 | SWCR_STATUS_DNO2) +#define SWCR_STATUS_INV3 (1UL<<40) +#define SWCR_STATUS_DZE3 (1UL<<41) +#define SWCR_STATUS_OVF3 (1UL<<42) +#define SWCR_STATUS_UNF3 (1UL<<43) +#define SWCR_STATUS_INE3 (1UL<<44) +#define SWCR_STATUS_DNO3 (1UL<<45) + +#define SWCR_STATUS_MASK3 (SWCR_STATUS_INV3 | SWCR_STATUS_DZE3 | \ + SWCR_STATUS_OVF3 | SWCR_STATUS_UNF3 | \ + SWCR_STATUS_INE3 | SWCR_STATUS_DNO3) +#define SWCR_TRAP_ENABLE_INV (1UL<<1) /* invalid op */ +#define SWCR_TRAP_ENABLE_DZE (1UL<<2) /* division by zero */ +#define SWCR_TRAP_ENABLE_OVF (1UL<<3) /* overflow */ +#define SWCR_TRAP_ENABLE_UNF (1UL<<4) /* underflow */ +#define SWCR_TRAP_ENABLE_INE (1UL<<5) /* inexact */ +#define SWCR_TRAP_ENABLE_DNO (1UL<<6) /* denorm */ +#define SWCR_TRAP_ENABLE_MASK (SWCR_TRAP_ENABLE_INV | SWCR_TRAP_ENABLE_DZE | \ + SWCR_TRAP_ENABLE_OVF | SWCR_TRAP_ENABLE_UNF | \ + SWCR_TRAP_ENABLE_INE | SWCR_TRAP_ENABLE_DNO) + +/* Denorm and Underflow flushing */ +#define SWCR_MAP_DMZ (1UL<<12) /* Map denorm inputs to zero */ +#define SWCR_MAP_UMZ (1UL<<13) /* Map underflowed outputs to zero */ + +#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ) + +/* status bits coming from fpcr: */ +#define SWCR_STATUS_INV (1UL<<17) +#define SWCR_STATUS_DZE (1UL<<18) +#define SWCR_STATUS_OVF (1UL<<19) +#define SWCR_STATUS_UNF (1UL<<20) +#define SWCR_STATUS_INE (1UL<<21) +#define SWCR_STATUS_DNO (1UL<<22) + +#define SWCR_STATUS_MASK (SWCR_STATUS_INV | SWCR_STATUS_DZE | \ + SWCR_STATUS_OVF | SWCR_STATUS_UNF | \ + SWCR_STATUS_INE | SWCR_STATUS_DNO) +#define TARGET_GSI_IEEE_FP_CONTROL 45 +#define TARGET_SSI_IEEE_FP_CONTROL 14 +#endif diff --git a/linux-user/sw64/termbits.h b/linux-user/sw64/termbits.h new file mode 100644 index 0000000000000000000000000000000000000000..5c40efcb2074787e34c8c296d4b46951a400550e --- /dev/null +++ b/linux-user/sw64/termbits.h @@ -0,0 +1,266 @@ +typedef unsigned char target_cc_t; +typedef unsigned int target_speed_t; +typedef unsigned int target_tcflag_t; + +#define TARGET_NCCS 19 +struct target_termios { + target_tcflag_t c_iflag; /* input mode flags */ + target_tcflag_t c_oflag; /* output mode flags */ + target_tcflag_t c_cflag; /* control mode flags */ + target_tcflag_t c_lflag; /* local mode flags */ + target_cc_t c_cc[TARGET_NCCS]; /* control characters */ + target_cc_t c_line; /* line discipline (== c_cc[19]) */ + target_speed_t c_ispeed; /* input speed */ + target_speed_t c_ospeed; /* output speed */ +}; + +/* c_cc characters */ +#define TARGET_VEOF 0 +#define TARGET_VEOL 1 +#define TARGET_VEOL2 2 +#define TARGET_VERASE 3 +#define TARGET_VWERASE 4 +#define TARGET_VKILL 5 +#define TARGET_VREPRINT 6 +#define TARGET_VSWTC 7 +#define TARGET_VINTR 8 +#define TARGET_VQUIT 9 +#define TARGET_VSUSP 10 +#define TARGET_VSTART 12 +#define TARGET_VSTOP 13 +#define TARGET_VLNEXT 14 +#define TARGET_VDISCARD 15 +#define TARGET_VMIN 16 +#define TARGET_VTIME 17 + +/* c_iflag bits */ +#define TARGET_IGNBRK 0000001 +#define TARGET_BRKINT 0000002 +#define TARGET_IGNPAR 0000004 +#define TARGET_PARMRK 0000010 +#define TARGET_INPCK 0000020 +#define TARGET_ISTRIP 0000040 +#define TARGET_INLCR 0000100 +#define TARGET_IGNCR 0000200 +#define TARGET_ICRNL 0000400 +#define TARGET_IXON 0001000 +#define TARGET_IXOFF 0002000 +#define TARGET_IXANY 0004000 +#define TARGET_IUCLC 0010000 +#define TARGET_IMAXBEL 0020000 +#define TARGET_IUTF8 0040000 + +/* c_oflag bits */ +#define TARGET_OPOST 0000001 +#define TARGET_ONLCR 0000002 +#define TARGET_OLCUC 0000004 + +#define TARGET_OCRNL 0000010 +#define TARGET_ONOCR 0000020 +#define TARGET_ONLRET 0000040 + +#define TARGET_OFILL 00000100 +#define TARGET_OFDEL 00000200 +#define TARGET_NLDLY 00001400 +#define TARGET_NL0 00000000 +#define TARGET_NL1 00000400 +#define TARGET_NL2 00001000 +#define TARGET_NL3 00001400 +#define TARGET_TABDLY 00006000 +#define TARGET_TAB0 00000000 +#define TARGET_TAB1 00002000 +#define TARGET_TAB2 00004000 +#define TARGET_TAB3 00006000 +#define TARGET_CRDLY 00030000 +#define TARGET_CR0 00000000 +#define TARGET_CR1 00010000 +#define TARGET_CR2 00020000 +#define TARGET_CR3 00030000 +#define TARGET_FFDLY 00040000 +#define TARGET_FF0 00000000 +#define TARGET_FF1 00040000 +#define TARGET_BSDLY 00100000 +#define TARGET_BS0 00000000 +#define TARGET_BS1 00100000 +#define TARGET_VTDLY 00200000 +#define TARGET_VT0 00000000 +#define TARGET_VT1 00200000 +#define TARGET_XTABS 01000000 /* Hmm.. Linux/i386 considers this part of TABDLY.. */ + +/* c_cflag bit meaning */ +#define TARGET_CBAUD 0000037 +#define TARGET_B0 0000000 /* hang up */ +#define TARGET_B50 0000001 +#define TARGET_B75 0000002 +#define TARGET_B110 0000003 +#define TARGET_B134 0000004 +#define TARGET_B150 0000005 +#define TARGET_B200 0000006 +#define TARGET_B300 0000007 +#define TARGET_B600 0000010 +#define TARGET_B1200 0000011 +#define TARGET_B1800 0000012 +#define TARGET_B2400 0000013 +#define TARGET_B4800 0000014 +#define TARGET_B9600 0000015 +#define TARGET_B19200 0000016 +#define TARGET_B38400 0000017 +#define TARGET_EXTA B19200 +#define TARGET_EXTB B38400 +#define TARGET_CBAUDEX 0000000 +#define TARGET_B57600 00020 +#define TARGET_B115200 00021 +#define TARGET_B230400 00022 +#define TARGET_B460800 00023 +#define TARGET_B500000 00024 +#define TARGET_B576000 00025 +#define TARGET_B921600 00026 +#define TARGET_B1000000 00027 +#define TARGET_B1152000 00030 +#define TARGET_B1500000 00031 +#define TARGET_B2000000 00032 +#define TARGET_B2500000 00033 +#define TARGET_B3000000 00034 +#define TARGET_B3500000 00035 +#define TARGET_B4000000 00036 + +#define TARGET_CSIZE 00001400 +#define TARGET_CS5 00000000 +#define TARGET_CS6 00000400 +#define TARGET_CS7 00001000 +#define TARGET_CS8 00001400 + +#define TARGET_CSTOPB 00002000 +#define TARGET_CREAD 00004000 +#define TARGET_PARENB 00010000 +#define TARGET_PARODD 00020000 +#define TARGET_HUPCL 00040000 + +#define TARGET_CLOCAL 00100000 +#define TARGET_CMSPAR 010000000000 /* mark or space (stick) parity */ +#define TARGET_CRTSCTS 020000000000 /* flow control */ + +/* c_lflag bits */ +#define TARGET_ISIG 0x00000080 +#define TARGET_ICANON 0x00000100 +#define TARGET_XCASE 0x00004000 +#define TARGET_ECHO 0x00000008 +#define TARGET_ECHOE 0x00000002 +#define TARGET_ECHOK 0x00000004 +#define TARGET_ECHONL 0x00000010 +#define TARGET_NOFLSH 0x80000000 +#define TARGET_TOSTOP 0x00400000 +#define TARGET_ECHOCTL 0x00000040 +#define TARGET_ECHOPRT 0x00000020 +#define TARGET_ECHOKE 0x00000001 +#define TARGET_FLUSHO 0x00800000 +#define TARGET_PENDIN 0x20000000 +#define TARGET_IEXTEN 0x00000400 +#define TARGET_EXTPROC 0x10000000 + +#define TARGET_FIOCLEX TARGET_IO('f', 1) +#define TARGET_FIONCLEX TARGET_IO('f', 2) +#define TARGET_FIOASYNC TARGET_IOW('f', 125, int) +#define TARGET_FIONBIO TARGET_IOW('f', 126, int) +#define TARGET_FIONREAD TARGET_IOR('f', 127, int) +#define TARGET_TIOCINQ FIONREAD +#define TARGET_FIOQSIZE TARGET_IOR('f', 128, loff_t) + +#define TARGET_TIOCGETP TARGET_IOR('t', 8, struct target_sgttyb) +#define TARGET_TIOCSETP TARGET_IOW('t', 9, struct target_sgttyb) +#define TARGET_TIOCSETN TARGET_IOW('t', 10, struct target_sgttyb) /* TIOCSETP wo flush */ + +#define TARGET_TIOCSETC TARGET_IOW('t', 17, struct target_tchars) +#define TARGET_TIOCGETC TARGET_IOR('t', 18, struct target_tchars) +#define TARGET_TCGETS TARGET_IOR('t', 19, struct target_termios) +#define TARGET_TCSETS TARGET_IOW('t', 20, struct target_termios) +#define TARGET_TCSETSW TARGET_IOW('t', 21, struct target_termios) +#define TARGET_TCSETSF TARGET_IOW('t', 22, struct target_termios) + +#define TARGET_TCGETA TARGET_IOR('t', 23, struct target_termio) +#define TARGET_TCSETA TARGET_IOW('t', 24, struct target_termio) +#define TARGET_TCSETAW TARGET_IOW('t', 25, struct target_termio) +#define TARGET_TCSETAF TARGET_IOW('t', 28, struct target_termio) + +#define TARGET_TCSBRK TARGET_IO('t', 29) +#define TARGET_TCXONC TARGET_IO('t', 30) +#define TARGET_TCFLSH TARGET_IO('t', 31) + +#define TARGET_TIOCSWINSZ TARGET_IOW('t', 103, struct target_winsize) +#define TARGET_TIOCGWINSZ TARGET_IOR('t', 104, struct target_winsize) +#define TARGET_TIOCSTART TARGET_IO('t', 110) /* start output, like ^Q */ +#define TARGET_TIOCSTOP TARGET_IO('t', 111) /* stop output, like ^S */ +#define TARGET_TIOCOUTQ TARGET_IOR('t', 115, int) /* output queue size */ + +#define TARGET_TIOCGLTC TARGET_IOR('t', 116, struct target_ltchars) +#define TARGET_TIOCSLTC TARGET_IOW('t', 117, struct target_ltchars) +#define TARGET_TIOCSPGRP TARGET_IOW('t', 118, int) +#define TARGET_TIOCGPGRP TARGET_IOR('t', 119, int) + +#define TARGET_TIOCEXCL 0x540C +#define TARGET_TIOCNXCL 0x540D +#define TARGET_TIOCSCTTY 0x540E + +#define TARGET_TIOCSTI 0x5412 +#define TARGET_TIOCMGET 0x5415 +#define TARGET_TIOCMBIS 0x5416 +#define TARGET_TIOCMBIC 0x5417 +#define TARGET_TIOCMSET 0x5418 +# define TARGET_TIOCM_LE 0x001 +# define TARGET_TIOCM_DTR 0x002 +# define TARGET_TIOCM_RTS 0x004 +# define TARGET_TIOCM_ST 0x008 +# define TARGET_TIOCM_SR 0x010 +# define TARGET_TIOCM_CTS 0x020 +# define TARGET_TIOCM_CAR 0x040 +# define TARGET_TIOCM_RNG 0x080 +# define TARGET_TIOCM_DSR 0x100 +# define TARGET_TIOCM_CD TIOCM_CAR +# define TARGET_TIOCM_RI TIOCM_RNG +# define TARGET_TIOCM_OUT1 0x2000 +# define TARGET_TIOCM_OUT2 0x4000 +# define TARGET_TIOCM_LOOP 0x8000 + +#define TARGET_TIOCGSOFTCAR 0x5419 +#define TARGET_TIOCSSOFTCAR 0x541A +#define TARGET_TIOCLINUX 0x541C +#define TARGET_TIOCCONS 0x541D +#define TARGET_TIOCGSERIAL 0x541E +#define TARGET_TIOCSSERIAL 0x541F +#define TARGET_TIOCPKT 0x5420 +# define TARGET_TIOCPKT_DATA 0 +# define TARGET_TIOCPKT_FLUSHREAD 1 +# define TARGET_TIOCPKT_FLUSHWRITE 2 +# define TARGET_TIOCPKT_STOP 4 +# define TARGET_TIOCPKT_START 8 +# define TARGET_TIOCPKT_NOSTOP 16 +# define TARGET_TIOCPKT_DOSTOP 32 + + +#define TARGET_TIOCNOTTY 0x5422 +#define TARGET_TIOCSETD 0x5423 +#define TARGET_TIOCGETD 0x5424 +#define TARGET_TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */ +#define TARGET_TIOCSBRK 0x5427 /* BSD compatibility */ +#define TARGET_TIOCCBRK 0x5428 /* BSD compatibility */ +#define TARGET_TIOCGSID 0x5429 /* Return the session ID of FD */ +#define TARGET_TIOCGPTN TARGET_IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ +#define TARGET_TIOCSPTLCK TARGET_IOW('T', 0x31, int) /* Lock/unlock Pty */ +#define TARGET_TIOCGPTPEER TARGET_IO('T', 0x41) /* Safely open the slave */ + +#define TARGET_TIOCSERCONFIG 0x5453 +#define TARGET_TIOCSERGWILD 0x5454 +#define TARGET_TIOCSERSWILD 0x5455 +#define TARGET_TIOCGLCKTRMIOS 0x5456 +#define TARGET_TIOCSLCKTRMIOS 0x5457 +#define TARGET_TIOCSERGSTRUCT 0x5458 /* For debugging only */ +#define TARGET_TIOCSERGETLSR 0x5459 /* Get line status register */ + /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ +# define TARGET_TIOCSER_TEMT 0x01 /* Transmitter physically empty */ +#define TARGET_TIOCSERGETMULTI 0x545A /* Get multiport config */ +#define TARGET_TIOCSERSETMULTI 0x545B /* Set multiport config */ + +#define TARGET_TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */ +#define TARGET_TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ +#define TARGET_TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */ +#define TARGET_TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */ diff --git a/linux-user/syscall.c b/linux-user/syscall.c index f1cfcc81048695222abc0a0f546d258f92e8f328..5f1bdfe8576dcd9bc6ace62e6d6f15bb12eb47d4 100644 --- a/linux-user/syscall.c +++ b/linux-user/syscall.c @@ -333,6 +333,16 @@ _syscall6(int,sys_futex,int *,uaddr,int,op,int,val, _syscall6(int,sys_futex_time64,int *,uaddr,int,op,int,val, const struct timespec *,timeout,int *,uaddr2,int,val3) #endif +#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) +_syscall2(int, pidfd_open, pid_t, pid, unsigned int, flags); +#endif +#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) +_syscall4(int, pidfd_send_signal, int, pidfd, int, sig, siginfo_t *, info, + unsigned int, flags); +#endif +#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) +_syscall3(int, pidfd_getfd, int, pidfd, int, targetfd, unsigned int, flags); +#endif #define __NR_sys_sched_getaffinity __NR_sched_getaffinity _syscall3(int, sys_sched_getaffinity, pid_t, pid, unsigned int, len, unsigned long *, user_mask_ptr); @@ -1614,6 +1624,9 @@ static abi_long do_pipe(void *cpu_env, abi_ulong pipedes, #elif defined(TARGET_MIPS) ((CPUMIPSState*)cpu_env)->active_tc.gpr[3] = host_pipe[1]; return host_pipe[0]; +#elif defined(TARGET_LOONGARCH64) + ((CPULOONGARCHState *)cpu_env)->active_tc.gpr[5] = host_pipe[1]; + return host_pipe[0]; #elif defined(TARGET_SH4) ((CPUSH4State*)cpu_env)->gregs[1] = host_pipe[1]; return host_pipe[0]; @@ -1698,6 +1711,11 @@ static inline abi_long target_to_host_sockaddr(int fd, struct sockaddr *addr, lladdr = (struct target_sockaddr_ll *)addr; lladdr->sll_ifindex = tswap32(lladdr->sll_ifindex); lladdr->sll_hatype = tswap16(lladdr->sll_hatype); + } else if (sa_family == AF_INET6) { + struct sockaddr_in6 *in6addr; + + in6addr = (struct sockaddr_in6 *)addr; + in6addr->sin6_scope_id = tswap32(in6addr->sin6_scope_id); } unlock_user(target_saddr, target_addr, 0); @@ -3296,7 +3314,8 @@ static abi_long do_sendrecvmsg_locked(int fd, struct target_msghdr *msgp, if (fd_trans_host_to_target_data(fd)) { ret = fd_trans_host_to_target_data(fd)(msg.msg_iov->iov_base, MIN(msg.msg_iov->iov_len, len)); - } else { + } + if (!is_error(ret)) { ret = host_to_target_cmsg(msgp, &msg); } if (!is_error(ret)) { @@ -8336,7 +8355,13 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, if (CPU_NEXT(first_cpu)) { TaskState *ts = cpu->opaque; - object_property_set_bool(OBJECT(cpu), "realized", false, NULL); + if (ts->child_tidptr) { + put_user_u32(0, ts->child_tidptr); + do_sys_futex(g2h(cpu, ts->child_tidptr), + FUTEX_WAKE, INT_MAX, NULL, NULL, 0); + } + + object_unparent(OBJECT(cpu)); object_unref(OBJECT(cpu)); /* * At this point the CPU should be unrealized and removed @@ -8346,11 +8371,6 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, pthread_mutex_unlock(&clone_lock); - if (ts->child_tidptr) { - put_user_u32(0, ts->child_tidptr); - do_sys_futex(g2h(cpu, ts->child_tidptr), - FUTEX_WAKE, INT_MAX, NULL, NULL, 0); - } thread_cpu = NULL; g_free(ts); rcu_unregister_thread(); @@ -8425,6 +8445,30 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, ret = do_open_by_handle_at(arg1, arg2, arg3); fd_trans_unregister(ret); return ret; +#endif +#if defined(__NR_pidfd_open) && defined(TARGET_NR_pidfd_open) + case TARGET_NR_pidfd_open: + return get_errno(pidfd_open(arg1, arg2)); +#endif +#if defined(__NR_pidfd_send_signal) && defined(TARGET_NR_pidfd_send_signal) + case TARGET_NR_pidfd_send_signal: + { + siginfo_t uinfo; + + p = lock_user(VERIFY_READ, arg3, sizeof(target_siginfo_t), 1); + if (!p) { + return -TARGET_EFAULT; + } + target_to_host_siginfo(&uinfo, p); + unlock_user(p, arg3, 0); + ret = get_errno(pidfd_send_signal(arg1, target_to_host_signal(arg2), + &uinfo, arg4)); + } + return ret; +#endif +#if defined(__NR_pidfd_getfd) && defined(TARGET_NR_pidfd_getfd) + case TARGET_NR_pidfd_getfd: + return get_errno(pidfd_getfd(arg1, arg2, arg3)); #endif case TARGET_NR_close: fd_trans_unregister(arg1); @@ -8450,14 +8494,24 @@ static abi_long do_syscall1(void *cpu_env, int num, abi_long arg1, #ifdef TARGET_NR_waitid case TARGET_NR_waitid: { + struct rusage ru; siginfo_t info; - info.si_pid = 0; - ret = get_errno(safe_waitid(arg1, arg2, &info, arg4, NULL)); - if (!is_error(ret) && arg3 && info.si_pid != 0) { - if (!(p = lock_user(VERIFY_WRITE, arg3, sizeof(target_siginfo_t), 0))) + + ret = get_errno(safe_waitid(arg1, arg2, (arg3 ? &info : NULL), + arg4, (arg5 ? &ru : NULL))); + if (!is_error(ret)) { + if (arg3) { + p = lock_user(VERIFY_WRITE, arg3, + sizeof(target_siginfo_t), 0); + if (!p) { + return -TARGET_EFAULT; + } + host_to_target_siginfo(p, &info); + unlock_user(p, arg3, sizeof(target_siginfo_t)); + } + if (arg5 && host_to_target_rusage(arg5, &ru)) { return -TARGET_EFAULT; - host_to_target_siginfo(p, &info); - unlock_user(p, arg3, sizeof(target_siginfo_t)); + } } } return ret; diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h index 0b139759377b6b3a1d09173fd549f28d22f6736e..077a059701758e21dfb1157f1d181423ea42463f 100644 --- a/linux-user/syscall_defs.h +++ b/linux-user/syscall_defs.h @@ -74,7 +74,7 @@ || defined(TARGET_M68K) || defined(TARGET_CRIS) \ || defined(TARGET_S390X) || defined(TARGET_OPENRISC) \ || defined(TARGET_NIOS2) || defined(TARGET_RISCV) \ - || defined(TARGET_XTENSA) + || defined(TARGET_XTENSA) || defined(TARGET_LOONGARCH64) #define TARGET_IOC_SIZEBITS 14 #define TARGET_IOC_DIRBITS 2 @@ -85,7 +85,7 @@ #elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \ defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \ - defined(TARGET_MIPS) + defined(TARGET_MIPS) || defined(TARGET_SW64) #define TARGET_IOC_SIZEBITS 13 #define TARGET_IOC_DIRBITS 3 @@ -450,7 +450,7 @@ struct target_dirent64 { #define TARGET_SIG_IGN ((abi_long)1) /* ignore signal */ #define TARGET_SIG_ERR ((abi_long)-1) /* error return from signal */ -#ifdef TARGET_MIPS +#if defined(TARGET_MIPS) || defined(TARGET_LOONGARCH64) #define TARGET_NSIG 128 #else #define TARGET_NSIG 64 @@ -1295,7 +1295,7 @@ struct target_winsize { #include "termbits.h" -#if defined(TARGET_MIPS) +#if defined(TARGET_MIPS) || defined(TARGET_XTENSA) #define TARGET_PROT_SEM 0x10 #else #define TARGET_PROT_SEM 0x08 @@ -2133,7 +2133,8 @@ struct target_stat64 { abi_ulong __unused5; }; -#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || defined(TARGET_RISCV) +#elif defined(TARGET_OPENRISC) || defined(TARGET_NIOS2) || \ + defined(TARGET_RISCV) || defined(TARGET_LOONGARCH64) /* These are the asm-generic versions of the stat and stat64 structures */ @@ -2161,7 +2162,7 @@ struct target_stat { unsigned int __unused5; }; -#if !defined(TARGET_RISCV64) +#if !(defined(TARGET_RISCV64) || defined(TARGET_LOONGARCH64)) #define TARGET_HAS_STRUCT_STAT64 struct target_stat64 { uint64_t st_dev; @@ -2269,6 +2270,50 @@ struct target_stat { int __unused[2]; }; +#elif defined(TARGET_SW64) + +struct target_stat { + unsigned int st_dev; + unsigned int st_ino; + unsigned int st_mode; + unsigned int st_nlink; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_rdev; + abi_long st_size; + abi_ulong target_st_atime; + abi_ulong target_st_mtime; + abi_ulong target_st_ctime; + unsigned int st_blksize; + unsigned int st_blocks; + unsigned int st_flags; + unsigned int st_gen; +}; + +#define TARGET_HAS_STRUCT_STAT64 +struct target_stat64 { + abi_ulong st_dev; + abi_ulong st_ino; + abi_ulong st_rdev; + abi_long st_size; + abi_ulong st_blocks; + + unsigned int st_mode; + unsigned int st_uid; + unsigned int st_gid; + unsigned int st_blksize; + unsigned int st_nlink; + unsigned int __pad0; + + abi_ulong target_st_atime; + abi_ulong target_st_atime_nsec; + abi_ulong target_st_mtime; + abi_ulong target_st_mtime_nsec; + abi_ulong target_st_ctime; + abi_ulong target_st_ctime_nsec; + abi_long __unused[3]; +}; + #else #error unsupported CPU #endif @@ -2331,6 +2376,7 @@ struct target_statfs64 { }; #elif (defined(TARGET_PPC64) || defined(TARGET_X86_64) || \ defined(TARGET_SPARC64) || defined(TARGET_AARCH64) || \ + defined(TARGET_LOONGARCH64) || \ defined(TARGET_RISCV)) && !defined(TARGET_ABI32) struct target_statfs { abi_long f_type; diff --git a/linux-user/x86_64/target_elf.h b/linux-user/x86_64/target_elf.h index 7b76a90de8805a84b4983f3b2bb9f1b967d77090..3f628f8d66197faae698cbec4e244ba4e4950f19 100644 --- a/linux-user/x86_64/target_elf.h +++ b/linux-user/x86_64/target_elf.h @@ -9,6 +9,6 @@ #define X86_64_TARGET_ELF_H static inline const char *cpu_get_model(uint32_t eflags) { - return "qemu64"; + return "max"; } #endif diff --git a/meson.build b/meson.build index 96de1a6ef948542aa93bd03242005907643e6c47..1a225b51f38cde3121a33c6beb30dae67a68c5e2 100644 --- a/meson.build +++ b/meson.build @@ -56,7 +56,7 @@ python = import('python').find_installation() supported_oses = ['windows', 'freebsd', 'netbsd', 'openbsd', 'darwin', 'sunos', 'linux'] supported_cpus = ['ppc', 'ppc64', 's390x', 'riscv', 'x86', 'x86_64', - 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64'] + 'arm', 'aarch64', 'mips', 'mips64', 'sparc', 'sparc64', 'sw64', 'loongarch64'] cpu = host_machine.cpu_family() @@ -65,6 +65,10 @@ if cpu in ['riscv32', 'riscv64'] cpu = 'riscv' endif +if cpu == 'sw_64' + cpu = 'sw64' +endif + targetos = host_machine.system() if cpu in ['x86', 'x86_64'] @@ -77,6 +81,10 @@ elif cpu in ['ppc', 'ppc64'] kvm_targets = ['ppc-softmmu', 'ppc64-softmmu'] elif cpu in ['mips', 'mips64'] kvm_targets = ['mips-softmmu', 'mipsel-softmmu', 'mips64-softmmu', 'mips64el-softmmu'] +elif cpu == 'sw64' + kvm_targets = ['sw64-softmmu'] +elif cpu == 'loongarch64' + kvm_targets = ['loongarch64-softmmu'] else kvm_targets = [] endif @@ -359,6 +367,10 @@ if not get_option('tcg').disabled() tcg_arch = 'i386' elif config_host['ARCH'] == 'ppc64' tcg_arch = 'ppc' + elif config_host['ARCH'] in ['sw64'] + tcg_arch = 'sw64' + elif config_host['ARCH'] == 'loongarch64' + tcg_arch = 'loongarch64' endif add_project_arguments('-iquote', meson.current_source_dir() / 'tcg' / tcg_arch, language: ['c', 'cpp', 'objc']) @@ -998,6 +1010,7 @@ endif # gcrypt over nettle for performance reasons. gcrypt = not_found nettle = not_found +crypto_sm4 = not_found xts = 'none' if get_option('nettle').enabled() and get_option('gcrypt').enabled() @@ -1023,6 +1036,17 @@ if not gnutls_crypto.found() gcrypt, cc.find_library('gpg-error', required: true, kwargs: static_kwargs)]) endif + crypto_sm4 = gcrypt + # SM4 ALG is available in libgcrypt >= 1.9 + if gcrypt.found() and not cc.links(''' + #include + int main(void) { + gcry_cipher_hd_t handler; + gcry_cipher_open(&handler, GCRY_CIPHER_SM4, GCRY_CIPHER_MODE_ECB, 0); + return 0; + }''', dependencies: gcrypt) + crypto_sm4 = not_found + endif endif if (not get_option('nettle').auto() or have_system) and not gcrypt.found() nettle = dependency('nettle', version: '>=3.4', @@ -1032,6 +1056,18 @@ if not gnutls_crypto.found() if nettle.found() and not cc.has_header('nettle/xts.h', dependencies: nettle) xts = 'private' endif + crypto_sm4 = nettle + # SM4 ALG is available in nettle >= 3.9 + if nettle.found() and not cc.links(''' + #include + int main(void) { + struct sm4_ctx ctx; + unsigned char key[16] = {0}; + sm4_set_encrypt_key(&ctx, key); + return 0; + }''', dependencies: nettle) + crypto_sm4 = not_found + endif endif endif @@ -1399,6 +1435,17 @@ if get_option('virtfs').enabled() elif not have_system error('virtio-9p (virtfs) needs system emulation support') endif + crypto_sm4 = gcrypt + # SM4 ALG is available in libgcrypt >= 1.9 + if gcrypt.found() and not cc.links(''' + #include + int main(void) { + gcry_cipher_hd_t handler; + gcry_cipher_open(&handler, GCRY_CIPHER_SM4, GCRY_CIPHER_MODE_ECB, 0); + return 0; + }''', dependencies: gcrypt) + crypto_sm4 = not_found + endif endif elif get_option('virtfs').disabled() have_virtfs = false @@ -1475,6 +1522,7 @@ config_host_data.set('CONFIG_GNUTLS', gnutls.found()) config_host_data.set('CONFIG_GNUTLS_CRYPTO', gnutls_crypto.found()) config_host_data.set('CONFIG_GCRYPT', gcrypt.found()) config_host_data.set('CONFIG_NETTLE', nettle.found()) +config_host_data.set('CONFIG_CRYPTO_SM4', crypto_sm4.found()) config_host_data.set('CONFIG_QEMU_PRIVATE_XTS', xts == 'private') config_host_data.set('CONFIG_MALLOC_TRIM', has_malloc_trim) config_host_data.set('CONFIG_STATX', has_statx) @@ -1738,6 +1786,52 @@ config_host_data.set('CONFIG_GETAUXVAL', cc.links(gnu_source_prefix + ''' return getauxval(AT_HWCAP) == 0; }''')) +have_cpuid_h = cc.links(''' + #include + int main(void) { + unsigned a, b, c, d; + unsigned max = __get_cpuid_max(0, 0); + + if (max >= 1) { + __cpuid(1, a, b, c, d); + } + + if (max >= 7) { + __cpuid_count(7, 0, a, b, c, d); + } + + return 0; + }''') +config_host_data.set('CONFIG_CPUID_H', have_cpuid_h) + +config_host_data.set('CONFIG_AVX2_OPT', get_option('avx2') \ + .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX2') \ + .require(cc.links(''' + #pragma GCC push_options + #pragma GCC target("avx2") + #include + #include + static int bar(void *a) { + __m256i x = *(__m256i *)a; + return _mm256_testz_si256(x, x); + } + int main(int argc, char *argv[]) { return bar(argv[0]); } + '''), error_message: 'AVX2 not available').allowed()) + +config_host_data.set('CONFIG_AVX512F_OPT', get_option('avx512f') \ + .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512F') \ + .require(cc.links(''' + #pragma GCC push_options + #pragma GCC target("avx512f") + #include + #include + static int bar(void *a) { + __m512i x = *(__m512i *)a; + return _mm512_test_epi64_mask(x, x); + } + int main(int argc, char *argv[]) { return bar(argv[0]); } + '''), error_message: 'AVX512F not available').allowed()) + config_host_data.set('CONFIG_AF_VSOCK', cc.compiles(gnu_source_prefix + ''' #include #include @@ -1758,6 +1852,22 @@ config_host_data.set('CONFIG_AF_VSOCK', cc.compiles(gnu_source_prefix + ''' return -1; }''')) +config_host_data.set('CONFIG_AVX512BW_OPT', get_option('avx512bw') \ + .require(have_cpuid_h, error_message: 'cpuid.h not available, cannot enable AVX512BW') \ + .require(cc.links(''' + #pragma GCC push_options + #pragma GCC target("avx512bw") + #include + #include + static int bar(void *a) { + + __m512i *x = a; + __m512i res= _mm512_abs_epi8(*x); + return res[1]; + } + int main(int argc, char *argv[]) { return bar(argv[0]); } + '''), error_message: 'AVX512BW not available').allowed()) + ignored = ['CONFIG_QEMU_INTERP_PREFIX', # actually per-target 'HAVE_GDB_BIN'] arrays = ['CONFIG_BDRV_RW_WHITELIST', 'CONFIG_BDRV_RO_WHITELIST'] @@ -1814,6 +1924,8 @@ disassemblers = { 'sh4' : ['CONFIG_SH4_DIS'], 'sparc' : ['CONFIG_SPARC_DIS'], 'xtensa' : ['CONFIG_XTENSA_DIS'], + 'loongarch64' : ['CONFIG_LOONGARCH_DIS'], + 'sw64' : ['CONFIG_SW64_DIS'], } if link_language == 'cpp' disassemblers += { @@ -2017,6 +2129,18 @@ if capstone_opt in ['enabled', 'auto', 'system'] if capstone_opt == 'system' error('system capstone requested, it does not appear to work') endif + crypto_sm4 = nettle + # SM4 ALG is available in nettle >= 3.9 + if nettle.found() and not cc.links(''' + #include + int main(void) { + struct sm4_ctx ctx; + unsigned char key[16] = {0}; + sm4_set_encrypt_key(&ctx, key); + return 0; + }''', dependencies: nettle) + crypto_sm4 = not_found + endif endif if capstone.found() @@ -2466,6 +2590,7 @@ if have_system 'hw/sparc', 'hw/sparc64', 'hw/ssi', + 'hw/sw64', 'hw/timer', 'hw/tpm', 'hw/usb', @@ -3256,8 +3381,9 @@ summary_info += {'membarrier': config_host.has_key('CONFIG_MEMBARRIER')} summary_info += {'debug stack usage': config_host.has_key('CONFIG_DEBUG_STACK_USAGE')} summary_info += {'mutex debugging': config_host.has_key('CONFIG_DEBUG_MUTEX')} summary_info += {'memory allocator': get_option('malloc')} -summary_info += {'avx2 optimization': config_host.has_key('CONFIG_AVX2_OPT')} -summary_info += {'avx512f optimization': config_host.has_key('CONFIG_AVX512F_OPT')} +summary_info += {'avx2 optimization': config_host_data.get('CONFIG_AVX2_OPT')} +summary_info += {'avx512bw optimization': config_host_data.get('CONFIG_AVX512BW_OPT')} +summary_info += {'avx512f optimization': config_host_data.get('CONFIG_AVX512F_OPT')} summary_info += {'gprof enabled': config_host.has_key('CONFIG_GPROF')} summary_info += {'gcov': get_option('b_coverage')} summary_info += {'thread sanitizer': config_host.has_key('CONFIG_TSAN')} @@ -3354,6 +3480,7 @@ summary_info += {'nettle': nettle} if nettle.found() summary_info += {' XTS': xts != 'private'} endif +summary_info += {'SM4 ALG support': crypto_sm4} summary_info += {'crypto afalg': config_host.has_key('CONFIG_AF_ALG')} summary_info += {'rng-none': config_host.has_key('CONFIG_RNG_NONE')} summary_info += {'Linux keyring': config_host.has_key('CONFIG_SECRET_KEYRING')} diff --git a/meson_options.txt b/meson_options.txt index e3923237322a7bac13915c439d4944136afe61bb..ec9c3c0a05e2e6ab545320242e2e105bffa4f38f 100644 --- a/meson_options.txt +++ b/meson_options.txt @@ -66,6 +66,12 @@ option('cfi_debug', type: 'boolean', value: 'false', description: 'Verbose errors in case of CFI violation') option('multiprocess', type: 'feature', value: 'auto', description: 'Out of process device emulation support') +option('avx2', type: 'feature', value: 'auto', + description: 'AVX2 optimizations') +option('avx512f', type: 'feature', value: 'disabled', + description: 'AVX512F optimizations') +option('avx512bw', type: 'feature', value: 'auto', + description: 'AVX512BW optimizations') option('attr', type : 'feature', value : 'auto', description: 'attr/xattr support') diff --git a/migration/block.c b/migration/block.c index a950977855281bb0b3de321f1432462485a9d62f..4055a6bb60d69afab63dc79604d46b0e74cb33d6 100644 --- a/migration/block.c +++ b/migration/block.c @@ -376,7 +376,9 @@ static void unset_dirty_tracking(void) BlkMigDevState *bmds; QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) { - bdrv_release_dirty_bitmap(bmds->dirty_bitmap); + if (bmds->dirty_bitmap) { + bdrv_release_dirty_bitmap(bmds->dirty_bitmap); + } } } @@ -413,7 +415,10 @@ static int init_blk_migration(QEMUFile *f) } sectors = bdrv_nb_sectors(bs); - if (sectors <= 0) { + if (sectors == 0) { + continue; + } + if (sectors < 0) { ret = sectors; bdrv_next_cleanup(&it); goto out; @@ -684,13 +689,18 @@ static int64_t get_remaining_dirty(void) static void block_migration_cleanup_bmds(void) { BlkMigDevState *bmds; + BlockDriverState *bs; AioContext *ctx; unset_dirty_tracking(); while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) { QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry); - bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker); + + bs = blk_bs(bmds->blk); + if (bs) { + bdrv_op_unblock_all(bs,bmds->blocker); + } error_free(bmds->blocker); /* Save ctx, because bmds->blk can disappear during blk_unref. */ diff --git a/migration/colo.c b/migration/colo.c index 2415325262be071429f86ee4452de386e6e41450..2a855049666118646966d7a21891f098da4dfcda 100644 --- a/migration/colo.c +++ b/migration/colo.c @@ -530,7 +530,6 @@ static void colo_process_checkpoint(MigrationState *s) { QIOChannelBuffer *bioc; QEMUFile *fb = NULL; - int64_t current_time = qemu_clock_get_ms(QEMU_CLOCK_HOST); Error *local_err = NULL; int ret; @@ -578,8 +577,8 @@ static void colo_process_checkpoint(MigrationState *s) qemu_mutex_unlock_iothread(); trace_colo_vm_state_change("stop", "run"); - timer_mod(s->colo_delay_timer, - current_time + s->parameters.x_checkpoint_delay); + timer_mod(s->colo_delay_timer, qemu_clock_get_ms(QEMU_CLOCK_HOST) + + s->parameters.x_checkpoint_delay); while (s->state == MIGRATION_STATUS_COLO) { if (failover_get_state() != FAILOVER_STATUS_NONE) { @@ -820,6 +819,26 @@ static void colo_wait_handle_message(MigrationIncomingState *mis, } } +void colo_shutdown(void) +{ + MigrationIncomingState *mis = NULL; + MigrationState *s = NULL; + + switch (get_colo_mode()) { + case COLO_MODE_PRIMARY: + s = migrate_get_current(); + qemu_event_set(&s->colo_checkpoint_event); + qemu_sem_post(&s->colo_exit_sem); + break; + case COLO_MODE_SECONDARY: + mis = migration_incoming_get_current(); + qemu_sem_post(&mis->colo_incoming_sem); + break; + default: + break; + } +} + void *colo_process_incoming_thread(void *opaque) { MigrationIncomingState *mis = opaque; diff --git a/migration/dirtyrate.c b/migration/dirtyrate.c index d65e744af920158c61f22b74e2246641cf86c2ab..c449095fc31776acea96c737d5a499f3ea635656 100644 --- a/migration/dirtyrate.c +++ b/migration/dirtyrate.c @@ -46,7 +46,7 @@ static struct DirtyRateStat DirtyStat; static DirtyRateMeasureMode dirtyrate_mode = DIRTY_RATE_MEASURE_MODE_PAGE_SAMPLING; -static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) +static int64_t dirty_stat_wait(int64_t msec, int64_t initial_time) { int64_t current_time; @@ -60,6 +60,132 @@ static int64_t set_sample_page_period(int64_t msec, int64_t initial_time) return msec; } +static inline void record_dirtypages(DirtyPageRecord *dirty_pages, + CPUState *cpu, bool start) +{ + if (start) { + dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; + } else { + dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; + } +} + +static int64_t do_calculate_dirtyrate(DirtyPageRecord dirty_pages, + int64_t calc_time_ms) +{ + uint64_t memory_size_MB; + uint64_t increased_dirty_pages = + dirty_pages.end_pages - dirty_pages.start_pages; + + memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; + + return memory_size_MB * 1000 / calc_time_ms; +} + +void global_dirty_log_change(unsigned int flag, bool start) +{ + qemu_mutex_lock_iothread(); + if (start) { + memory_global_dirty_log_start(flag); + } else { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +/* + * global_dirty_log_sync + * 1. sync dirty log from kvm + * 2. stop dirty tracking if needed. + */ +static void global_dirty_log_sync(unsigned int flag, bool one_shot) +{ + qemu_mutex_lock_iothread(); + memory_global_dirty_log_sync(); + if (one_shot) { + memory_global_dirty_log_stop(flag); + } + qemu_mutex_unlock_iothread(); +} + +static DirtyPageRecord *vcpu_dirty_stat_alloc(VcpuStat *stat) +{ + CPUState *cpu; + DirtyPageRecord *records; + int nvcpu = 0; + + CPU_FOREACH(cpu) { + nvcpu++; + } + + stat->nvcpu = nvcpu; + stat->rates = g_malloc0(sizeof(DirtyRateVcpu) * nvcpu); + + records = g_malloc0(sizeof(DirtyPageRecord) * nvcpu); + + return records; +} + +static void vcpu_dirty_stat_collect(VcpuStat *stat, + DirtyPageRecord *records, + bool start) +{ + CPUState *cpu; + + CPU_FOREACH(cpu) { + record_dirtypages(records, cpu, start); + } +} + +int64_t vcpu_calculate_dirtyrate(int64_t calc_time_ms, + VcpuStat *stat, + unsigned int flag, + bool one_shot) +{ + DirtyPageRecord *records; + int64_t init_time_ms; + int64_t duration; + int64_t dirtyrate; + int i = 0; + unsigned int gen_id; + +retry: + init_time_ms = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + + cpu_list_lock(); + gen_id = cpu_list_generation_id_get(); + records = vcpu_dirty_stat_alloc(stat); + vcpu_dirty_stat_collect(stat, records, true); + cpu_list_unlock(); + + duration = dirty_stat_wait(calc_time_ms, init_time_ms); + + global_dirty_log_sync(flag, one_shot); + + cpu_list_lock(); + if (gen_id != cpu_list_generation_id_get()) { + g_free(records); + g_free(stat->rates); + cpu_list_unlock(); + goto retry; + } + vcpu_dirty_stat_collect(stat, records, false); + cpu_list_unlock(); + + for (i = 0; i < stat->nvcpu; i++) { + dirtyrate = do_calculate_dirtyrate(records[i], duration); + + stat->rates[i].id = i; + stat->rates[i].dirty_rate = dirtyrate; + + trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); + } + + g_free(records); + + return duration; +} + static bool is_sample_period_valid(int64_t sec) { if (sec < MIN_FETCH_DIRTYRATE_TIME_SEC || @@ -157,7 +283,7 @@ static void cleanup_dirtyrate_stat(struct DirtyRateConfig config) { /* last calc-dirty-rate qmp use dirty ring mode */ if (dirtyrate_mode == DIRTY_RATE_MEASURE_MODE_DIRTY_RING) { - free(DirtyStat.dirty_ring.rates); + g_free(DirtyStat.dirty_ring.rates); DirtyStat.dirty_ring.rates = NULL; } } @@ -396,44 +522,6 @@ static bool compare_page_hash_info(struct RamblockDirtyInfo *info, return true; } -static inline void record_dirtypages(DirtyPageRecord *dirty_pages, - CPUState *cpu, bool start) -{ - if (start) { - dirty_pages[cpu->cpu_index].start_pages = cpu->dirty_pages; - } else { - dirty_pages[cpu->cpu_index].end_pages = cpu->dirty_pages; - } -} - -static void dirtyrate_global_dirty_log_start(void) -{ - qemu_mutex_lock_iothread(); - memory_global_dirty_log_start(GLOBAL_DIRTY_DIRTY_RATE); - qemu_mutex_unlock_iothread(); -} - -static void dirtyrate_global_dirty_log_stop(void) -{ - qemu_mutex_lock_iothread(); - memory_global_dirty_log_sync(); - memory_global_dirty_log_stop(GLOBAL_DIRTY_DIRTY_RATE); - qemu_mutex_unlock_iothread(); -} - -static int64_t do_calculate_dirtyrate_vcpu(DirtyPageRecord dirty_pages) -{ - uint64_t memory_size_MB; - int64_t time_s; - uint64_t increased_dirty_pages = - dirty_pages.end_pages - dirty_pages.start_pages; - - memory_size_MB = (increased_dirty_pages * TARGET_PAGE_SIZE) >> 20; - time_s = DirtyStat.calc_time; - - return memory_size_MB / time_s; -} - static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, bool start) { @@ -444,11 +532,6 @@ static inline void record_dirtypages_bitmap(DirtyPageRecord *dirty_pages, } } -static void do_calculate_dirtyrate_bitmap(DirtyPageRecord dirty_pages) -{ - DirtyStat.dirty_rate = do_calculate_dirtyrate_vcpu(dirty_pages); -} - static inline void dirtyrate_manual_reset_protect(void) { RAMBlock *block = NULL; @@ -492,71 +575,49 @@ static void calculate_dirtyrate_dirty_bitmap(struct DirtyRateConfig config) DirtyStat.start_time = start_time / 1000; msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, start_time); + msec = dirty_stat_wait(msec, start_time); DirtyStat.calc_time = msec / 1000; /* - * dirtyrate_global_dirty_log_stop do two things. + * do two things. * 1. fetch dirty bitmap from kvm * 2. stop dirty tracking */ - dirtyrate_global_dirty_log_stop(); + global_dirty_log_sync(GLOBAL_DIRTY_DIRTY_RATE, true); record_dirtypages_bitmap(&dirty_pages, false); - do_calculate_dirtyrate_bitmap(dirty_pages); + DirtyStat.dirty_rate = do_calculate_dirtyrate(dirty_pages, msec); } static void calculate_dirtyrate_dirty_ring(struct DirtyRateConfig config) { - CPUState *cpu; - int64_t msec = 0; - int64_t start_time; + int64_t duration; uint64_t dirtyrate = 0; uint64_t dirtyrate_sum = 0; - DirtyPageRecord *dirty_pages; - int nvcpu = 0; int i = 0; - CPU_FOREACH(cpu) { - nvcpu++; - } - - dirty_pages = malloc(sizeof(*dirty_pages) * nvcpu); - - DirtyStat.dirty_ring.nvcpu = nvcpu; - DirtyStat.dirty_ring.rates = malloc(sizeof(DirtyRateVcpu) * nvcpu); - - dirtyrate_global_dirty_log_start(); - - CPU_FOREACH(cpu) { - record_dirtypages(dirty_pages, cpu, true); - } - - start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); - DirtyStat.start_time = start_time / 1000; + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_DIRTY_RATE, true); - msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, start_time); - DirtyStat.calc_time = msec / 1000; + DirtyStat.start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) / 1000; - dirtyrate_global_dirty_log_stop(); + /* calculate vcpu dirtyrate */ + duration = vcpu_calculate_dirtyrate(config.sample_period_seconds * 1000, + &DirtyStat.dirty_ring, + GLOBAL_DIRTY_DIRTY_RATE, + true); - CPU_FOREACH(cpu) { - record_dirtypages(dirty_pages, cpu, false); - } + DirtyStat.calc_time = duration / 1000; + /* calculate vm dirtyrate */ for (i = 0; i < DirtyStat.dirty_ring.nvcpu; i++) { - dirtyrate = do_calculate_dirtyrate_vcpu(dirty_pages[i]); - trace_dirtyrate_do_calculate_vcpu(i, dirtyrate); - - DirtyStat.dirty_ring.rates[i].id = i; + dirtyrate = DirtyStat.dirty_ring.rates[i].dirty_rate; DirtyStat.dirty_ring.rates[i].dirty_rate = dirtyrate; dirtyrate_sum += dirtyrate; } DirtyStat.dirty_rate = dirtyrate_sum; - free(dirty_pages); } static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) @@ -574,7 +635,7 @@ static void calculate_dirtyrate_sample_vm(struct DirtyRateConfig config) rcu_read_unlock(); msec = config.sample_period_seconds * 1000; - msec = set_sample_page_period(msec, initial_time); + msec = dirty_stat_wait(msec, initial_time); DirtyStat.start_time = initial_time / 1000; DirtyStat.calc_time = msec / 1000; diff --git a/migration/dirtyrate.h b/migration/dirtyrate.h index 69d4c5b8655f5fa6ae7bd31a97e46e05cf260d27..594a5c0bb64c296bd294ed37949e095ddf9209cd 100644 --- a/migration/dirtyrate.h +++ b/migration/dirtyrate.h @@ -13,6 +13,8 @@ #ifndef QEMU_MIGRATION_DIRTYRATE_H #define QEMU_MIGRATION_DIRTYRATE_H +#include "sysemu/dirtyrate.h" + /* * Sample 512 pages per GB as default. */ @@ -65,11 +67,6 @@ typedef struct SampleVMStat { uint64_t total_block_mem_MB; /* size of total sampled pages in MB */ } SampleVMStat; -typedef struct VcpuStat { - int nvcpu; /* number of vcpu */ - DirtyRateVcpu *rates; /* array of dirty rate for each vcpu */ -} VcpuStat; - /* * Store calculation statistics for each measure. */ diff --git a/migration/migration.c b/migration/migration.c index abaf6f9e3d790fa2d0d9528ca0fcce070f0429cf..353b7c9a09935260cd7f77474b7cd64b0aa74aea 100644 --- a/migration/migration.c +++ b/migration/migration.c @@ -60,6 +60,8 @@ #include "qemu/yank.h" #include "sysemu/cpus.h" #include "yank_functions.h" +#include "sysemu/kvm.h" +#include "sysemu/dirtylimit.h" #define MAX_THROTTLE (128 << 20) /* Migration transfer speed throttling */ @@ -83,6 +85,7 @@ #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 +#define DEFAULT_MIGRATE_COMPRESS_METHOD COMPRESS_METHOD_ZLIB /* Define default autoconverge cpu throttle migration parameters */ #define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20 @@ -115,6 +118,11 @@ #define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5 #define DEFAULT_MIGRATE_ANNOUNCE_STEP 100 +#define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD 1000 /* milliseconds */ +#define DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT 1 /* MB/s */ + +#define DEFAULT_FD_MAX 4096 + static NotifierList migration_state_notifiers = NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); @@ -177,7 +185,6 @@ static bool migration_object_check(MigrationState *ms, Error **errp); static int migration_maybe_pause(MigrationState *s, int *current_active_state, int new_state); -static void migrate_fd_cancel(MigrationState *s); static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp) { @@ -220,11 +227,20 @@ void migration_cancel(const Error *error) if (error) { migrate_set_error(current_migration, error); } + if (migrate_dirty_limit()) { + qmp_cancel_vcpu_dirty_limit(false, -1, NULL); + } migrate_fd_cancel(current_migration); } void migration_shutdown(void) { + /* + * When the QEMU main thread exit, the COLO thread + * may wait a semaphore. So, we should wakeup the + * COLO thread before migration shutdown. + */ + colo_shutdown(); /* * Cancel the current migration - that will (eventually) * stop the migration using this structure @@ -855,6 +871,8 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) params->compress_wait_thread = s->parameters.compress_wait_thread; params->has_decompress_threads = true; params->decompress_threads = s->parameters.decompress_threads; + params->has_compress_method = true; + params->compress_method = s->parameters.compress_method; params->has_throttle_trigger_threshold = true; params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold; params->has_cpu_throttle_initial = true; @@ -908,6 +926,11 @@ MigrationParameters *qmp_query_migrate_parameters(Error **errp) s->parameters.block_bitmap_mapping); } + params->has_x_vcpu_dirty_limit_period = true; + params->x_vcpu_dirty_limit_period = s->parameters.x_vcpu_dirty_limit_period; + params->has_vcpu_dirty_limit = true; + params->vcpu_dirty_limit = s->parameters.vcpu_dirty_limit; + return params; } @@ -1040,6 +1063,15 @@ static void populate_ram_info(MigrationInfo *info, MigrationState *s) info->ram->remaining = ram_bytes_remaining(); info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate; } + + if (migrate_dirty_limit() && dirtylimit_in_service()) { + info->has_dirty_limit_throttle_time_per_round = true; + info->dirty_limit_throttle_time_per_round = + dirtylimit_throttle_time_per_round(); + + info->has_dirty_limit_ring_full_time = true; + info->dirty_limit_ring_full_time = dirtylimit_ring_full_time(); + } } static void populate_disk_info(MigrationInfo *info) @@ -1249,6 +1281,20 @@ static bool migrate_caps_check(bool *cap_list, error_setg(errp, "multifd is not supported by current protocol"); return false; } + + if (cap_list[MIGRATION_CAPABILITY_DIRTY_LIMIT]) { + if (cap_list[MIGRATION_CAPABILITY_AUTO_CONVERGE]) { + error_setg(errp, "dirty-limit conflicts with auto-converge" + " either of then available currently"); + return false; + } + + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + error_setg(errp, "dirty-limit requires KVM with accelerator" + " property 'dirty-ring-size' set"); + return false; + } + } return true; } @@ -1317,14 +1363,40 @@ void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, } } +static bool compress_level_check(MigrationParameters *params, Error **errp) +{ + switch (params->compress_method) { + case COMPRESS_METHOD_ZLIB: + if (params->compress_level > 9 || params->compress_level < 1) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", + "a value in the range of 0 to 9 for Zlib method"); + return false; + } + break; +#ifdef CONFIG_ZSTD + case COMPRESS_METHOD_ZSTD: + if (params->compress_level > 19 || params->compress_level < 1) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", + "a value in the range of 1 to 19 for Zstd method"); + return false; + } + break; +#endif + default: + error_setg(errp, "Checking compress_level failed for unknown reason"); + return false; + } + + return true; +} + /* * Check whether the parameters are valid. Error will be put into errp * (if provided). Return true if valid, otherwise false. */ static bool migrate_params_check(MigrationParameters *params, Error **errp) { - if (params->has_compress_level && - (params->compress_level > 9)) { + if (params->has_compress_level && !compress_level_check(params, errp)) { error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", "a value between 0 and 9"); return false; @@ -1465,6 +1537,23 @@ static bool migrate_params_check(MigrationParameters *params, Error **errp) return false; } + if (params->has_x_vcpu_dirty_limit_period && + (params->x_vcpu_dirty_limit_period < 1 || + params->x_vcpu_dirty_limit_period > 1000)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "x-vcpu-dirty-limit-period", + "a value between 1 and 1000"); + return false; + } + + if (params->has_vcpu_dirty_limit && + (params->vcpu_dirty_limit < 1)) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, + "vcpu_dirty_limit", + "is invalid, it must greater then 1 MB/s"); + return false; + } + return true; } @@ -1491,6 +1580,10 @@ static void migrate_params_test_apply(MigrateSetParameters *params, dest->decompress_threads = params->decompress_threads; } + if (params->has_compress_method) { + dest->compress_method = params->compress_method; + } + if (params->has_throttle_trigger_threshold) { dest->throttle_trigger_threshold = params->throttle_trigger_threshold; } @@ -1564,6 +1657,14 @@ static void migrate_params_test_apply(MigrateSetParameters *params, dest->has_block_bitmap_mapping = true; dest->block_bitmap_mapping = params->block_bitmap_mapping; } + + if (params->has_x_vcpu_dirty_limit_period) { + dest->x_vcpu_dirty_limit_period = + params->x_vcpu_dirty_limit_period; + } + if (params->has_vcpu_dirty_limit) { + dest->vcpu_dirty_limit = params->vcpu_dirty_limit; + } } static void migrate_params_apply(MigrateSetParameters *params, Error **errp) @@ -1588,6 +1689,10 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) s->parameters.decompress_threads = params->decompress_threads; } + if (params->has_compress_method) { + s->parameters.compress_method = params->compress_method; + } + if (params->has_throttle_trigger_threshold) { s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold; } @@ -1686,6 +1791,14 @@ static void migrate_params_apply(MigrateSetParameters *params, Error **errp) QAPI_CLONE(BitmapMigrationNodeAliasList, params->block_bitmap_mapping); } + + if (params->has_x_vcpu_dirty_limit_period) { + s->parameters.x_vcpu_dirty_limit_period = + params->x_vcpu_dirty_limit_period; + } + if (params->has_vcpu_dirty_limit) { + s->parameters.vcpu_dirty_limit = params->vcpu_dirty_limit; + } } void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp) @@ -1877,7 +1990,7 @@ void migrate_fd_error(MigrationState *s, const Error *error) migrate_set_error(s, error); } -static void migrate_fd_cancel(MigrationState *s) +void migrate_fd_cancel(MigrationState *s) { int old_state ; QEMUFile *f = migrate_get_current()->to_dst_file; @@ -2085,6 +2198,31 @@ void migrate_del_blocker(Error *reason) migration_blockers = g_slist_remove(migration_blockers, reason); } +/* + * Kernel will expand the fatable allocated to the qemu process when + * the number of fds held by qemu process exceeds a power of 2 (starting from 64). + * Each expansion introduces tens of ms of latency due to RCU synchronization. + * The expansion is completed during qemu process initialization to avoid + * triggering this action during the migration downtime phase. + */ +static void qemu_pre_extend_fdtable(void) +{ + int buffer[DEFAULT_FD_MAX] = {0}; + int i; + + /* expand fdtable */ + for (i = 0; i < DEFAULT_FD_MAX; i++) { + buffer[i] = qemu_dup(STDIN_FILENO); + } + + /* close tmp fd */ + for (i = 0; i < DEFAULT_FD_MAX; i++) { + if (buffer[i] > 0) { + (void)qemu_close(buffer[i]); + } + } +} + void qmp_migrate_incoming(const char *uri, Error **errp) { Error *local_err = NULL; @@ -2103,6 +2241,8 @@ void qmp_migrate_incoming(const char *uri, Error **errp) return; } + qemu_pre_extend_fdtable(); + qemu_start_incoming_migration(uri, &local_err); if (local_err) { @@ -2449,6 +2589,15 @@ int migrate_decompress_threads(void) return s->parameters.decompress_threads; } +CompressMethod migrate_compress_method(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->parameters.compress_method; +} + bool migrate_dirty_bitmaps(void) { MigrationState *s; @@ -2458,6 +2607,15 @@ bool migrate_dirty_bitmaps(void) return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS]; } +bool migrate_dirty_limit(void) +{ + MigrationState *s; + + s = migrate_get_current(); + + return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_LIMIT]; +} + bool migrate_ignore_shared(void) { MigrationState *s; @@ -2938,7 +3096,6 @@ static int postcopy_start(MigrationState *ms) int ret; QIOChannelBuffer *bioc; QEMUFile *fb; - int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); int64_t bandwidth = migrate_max_postcopy_bandwidth(); bool restart_block = false; int cur_state = MIGRATION_STATUS_ACTIVE; @@ -2951,6 +3108,8 @@ static int postcopy_start(MigrationState *ms) qemu_mutex_lock_iothread(); trace_postcopy_start_set_run(); + ms->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); + qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL); global_state_store(); ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); @@ -3061,7 +3220,7 @@ static int postcopy_start(MigrationState *ms) ms->postcopy_after_devices = true; notifier_list_notify(&migration_state_notifiers, ms); - ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; + ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - ms->downtime_start; qemu_mutex_unlock_iothread(); @@ -3781,6 +3940,9 @@ static void *migration_thread(void *opaque) MigThrError thr_error; bool urgent = false; + /* report migration thread pid to libvirt */ + qapi_event_send_migration_pid(qemu_get_thread_id()); + rcu_register_thread(); object_ref(OBJECT(s)); @@ -4159,6 +4321,9 @@ static Property migration_properties[] = { DEFINE_PROP_UINT8("x-decompress-threads", MigrationState, parameters.decompress_threads, DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT), + DEFINE_PROP_COMPRESS_METHOD("compress-method", MigrationState, + parameters.compress_method, + DEFAULT_MIGRATE_COMPRESS_METHOD), DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState, parameters.throttle_trigger_threshold, DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD), @@ -4211,6 +4376,13 @@ static Property migration_properties[] = { DEFINE_PROP_SIZE("announce-step", MigrationState, parameters.announce_step, DEFAULT_MIGRATE_ANNOUNCE_STEP), + DEFINE_PROP_UINT64("x-vcpu-dirty-limit-period", MigrationState, + parameters.x_vcpu_dirty_limit_period, + DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT_PERIOD), + DEFINE_PROP_UINT64("vcpu-dirty-limit", MigrationState, + parameters.vcpu_dirty_limit, + DEFAULT_MIGRATE_VCPU_DIRTY_LIMIT), + DEFINE_PROP_MIG_CAP("x-dirty-limit", MIGRATION_CAPABILITY_DIRTY_LIMIT), /* Migration capabilities */ DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE), @@ -4257,17 +4429,8 @@ static void migration_instance_finalize(Object *obj) error_free(ms->error); } -static void migration_instance_init(Object *obj) +void migrate_params_init(MigrationParameters *params) { - MigrationState *ms = MIGRATION_OBJ(obj); - MigrationParameters *params = &ms->parameters; - - ms->state = MIGRATION_STATUS_NONE; - ms->mbps = -1; - ms->pages_per_second = -1; - qemu_sem_init(&ms->pause_sem, 0); - qemu_mutex_init(&ms->error_mutex); - params->tls_hostname = g_strdup(""); params->tls_creds = g_strdup(""); @@ -4275,6 +4438,7 @@ static void migration_instance_init(Object *obj) params->has_compress_level = true; params->has_compress_threads = true; params->has_decompress_threads = true; + params->has_compress_method = true; params->has_throttle_trigger_threshold = true; params->has_cpu_throttle_initial = true; params->has_cpu_throttle_increment = true; @@ -4294,6 +4458,22 @@ static void migration_instance_init(Object *obj) params->has_announce_max = true; params->has_announce_rounds = true; params->has_announce_step = true; + params->has_x_vcpu_dirty_limit_period = true; + params->has_vcpu_dirty_limit = true; +} + + +static void migration_instance_init(Object *obj) +{ + MigrationState *ms = MIGRATION_OBJ(obj); + + ms->state = MIGRATION_STATUS_NONE; + ms->mbps = -1; + ms->pages_per_second = -1; + qemu_sem_init(&ms->pause_sem, 0); + qemu_mutex_init(&ms->error_mutex); + + migrate_params_init(&ms->parameters); qemu_sem_init(&ms->postcopy_pause_sem, 0); qemu_sem_init(&ms->postcopy_pause_rp_sem, 0); diff --git a/migration/migration.h b/migration/migration.h index 8130b703eb958e0c5091151ca6c775bbce0ec3f6..9ce1fc8ea042c07770e308de1927750c72dce8f6 100644 --- a/migration/migration.h +++ b/migration/migration.h @@ -317,6 +317,7 @@ bool migration_is_setup_or_active(int state); bool migration_is_running(int state); void migrate_init(MigrationState *s); +void migrate_params_init(MigrationParameters *params); bool migration_is_blocked(Error **errp); /* True if outgoing migration has entered postcopy phase */ bool migration_in_postcopy(void); @@ -328,6 +329,7 @@ bool migrate_release_ram(void); bool migrate_postcopy_ram(void); bool migrate_zero_blocks(void); bool migrate_dirty_bitmaps(void); +bool migrate_dirty_limit(void); bool migrate_ignore_shared(void); bool migrate_validate_uuid(void); @@ -355,6 +357,7 @@ int migrate_compress_level(void); int migrate_compress_threads(void); int migrate_compress_wait_thread(void); int migrate_decompress_threads(void); +CompressMethod migrate_compress_method(void); bool migrate_use_events(void); bool migrate_postcopy_blocktime(void); bool migrate_background_snapshot(void); @@ -392,4 +395,6 @@ void migration_cancel(const Error *error); void populate_vfio_info(MigrationInfo *info); +void migrate_fd_cancel(MigrationState *s); + #endif diff --git a/migration/multifd.c b/migration/multifd.c index 7c9deb1921d4a14e4aea60ab00bbef3ce826256a..4befde5cad981f9959d6085849910bc6c042cb8c 100644 --- a/migration/multifd.c +++ b/migration/multifd.c @@ -17,6 +17,7 @@ #include "exec/ramblock.h" #include "qemu/error-report.h" #include "qapi/error.h" +#include "qapi/qapi-events-migration.h" #include "ram.h" #include "migration.h" #include "socket.h" @@ -629,6 +630,9 @@ static void *multifd_send_thread(void *opaque) int ret = 0; uint32_t flags = 0; + /* report multifd thread pid to libvirt */ + qapi_event_send_migration_multifd_pid(qemu_get_thread_id()); + trace_multifd_send_thread_start(p->id); rcu_register_thread(); @@ -925,12 +929,10 @@ int multifd_save_setup(Error **errp) for (i = 0; i < thread_count; i++) { MultiFDSendParams *p = &multifd_send_state->params[i]; - Error *local_err = NULL; int ret; - ret = multifd_send_state->ops->send_setup(p, &local_err); + ret = multifd_send_state->ops->send_setup(p, errp); if (ret) { - error_propagate(errp, local_err); return ret; } } @@ -1163,12 +1165,10 @@ int multifd_load_setup(Error **errp) for (i = 0; i < thread_count; i++) { MultiFDRecvParams *p = &multifd_recv_state->params[i]; - Error *local_err = NULL; int ret; - ret = multifd_recv_state->ops->recv_setup(p, &local_err); + ret = multifd_recv_state->ops->recv_setup(p, errp); if (ret) { - error_propagate(errp, local_err); return ret; } } diff --git a/migration/qemu-file.c b/migration/qemu-file.c index 6338d8e2ff532c6b0bebe12b867887386ddb42e6..e07026da4f26a99422354901092746c283bf67e3 100644 --- a/migration/qemu-file.c +++ b/migration/qemu-file.c @@ -745,55 +745,6 @@ uint64_t qemu_get_be64(QEMUFile *f) return v; } -/* return the size after compression, or negative value on error */ -static int qemu_compress_data(z_stream *stream, uint8_t *dest, size_t dest_len, - const uint8_t *source, size_t source_len) -{ - int err; - - err = deflateReset(stream); - if (err != Z_OK) { - return -1; - } - - stream->avail_in = source_len; - stream->next_in = (uint8_t *)source; - stream->avail_out = dest_len; - stream->next_out = dest; - - err = deflate(stream, Z_FINISH); - if (err != Z_STREAM_END) { - return -1; - } - - return stream->next_out - dest; -} - -/* Compress size bytes of data start at p and store the compressed - * data to the buffer of f. - * - * Since the file is dummy file with empty_ops, return -1 if f has no space to - * save the compressed data. - */ -ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream, - const uint8_t *p, size_t size) -{ - ssize_t blen = IO_BUF_SIZE - f->buf_index - sizeof(int32_t); - - if (blen < compressBound(size)) { - return -1; - } - - blen = qemu_compress_data(stream, f->buf + f->buf_index + sizeof(int32_t), - blen, p, size); - if (blen < 0) { - return -1; - } - - qemu_put_be32(f, blen); - add_buf_to_iovec(f, blen); - return blen + sizeof(int32_t); -} /* Put the data in the buffer of f_src to the buffer of f_des, and * then reset the buf_index of f_src to 0. @@ -866,3 +817,15 @@ QIOChannel *qemu_file_get_ioc(QEMUFile *file) { return file->has_ioc ? QIO_CHANNEL(file->opaque) : NULL; } + +ssize_t qemu_put_compress_start(QEMUFile *f, uint8_t **dest_ptr) +{ + *dest_ptr = f->buf + f->buf_index + sizeof(int32_t); + return IO_BUF_SIZE - f->buf_index - sizeof(int32_t); +} + +void qemu_put_compress_end(QEMUFile *f, unsigned int v) +{ + qemu_put_be32(f, v); + add_buf_to_iovec(f, v); +} diff --git a/migration/qemu-file.h b/migration/qemu-file.h index 3f36d4dc8c4b4109a3897d82b898789c9b10b88a..617a1373adcc767d95c48e6ba248b82e7ef178ef 100644 --- a/migration/qemu-file.h +++ b/migration/qemu-file.h @@ -139,8 +139,6 @@ bool qemu_file_is_writable(QEMUFile *f); size_t qemu_peek_buffer(QEMUFile *f, uint8_t **buf, size_t size, size_t offset); size_t qemu_get_buffer_in_place(QEMUFile *f, uint8_t **buf, size_t size); -ssize_t qemu_put_compression_data(QEMUFile *f, z_stream *stream, - const uint8_t *p, size_t size); int qemu_put_qemu_file(QEMUFile *f_des, QEMUFile *f_src); /* @@ -167,6 +165,8 @@ void ram_control_before_iterate(QEMUFile *f, uint64_t flags); void ram_control_after_iterate(QEMUFile *f, uint64_t flags); void ram_control_load_hook(QEMUFile *f, uint64_t flags, void *data); +ssize_t qemu_put_compress_start(QEMUFile *f, uint8_t **dest_ptr); +void qemu_put_compress_end(QEMUFile *f, unsigned int v); /* Whenever this is found in the data stream, the flags * will be passed to ram_control_load_hook in the incoming-migration * side. This lets before_ram_iterate/after_ram_iterate add diff --git a/migration/ram.c b/migration/ram.c index 863035d23517c17265593a2d4465f27bcaddab07..ff5f4ab07af551b823beb8f3c65ca7e2d4a95854 100644 --- a/migration/ram.c +++ b/migration/ram.c @@ -43,6 +43,7 @@ #include "qapi/error.h" #include "qapi/qapi-types-migration.h" #include "qapi/qapi-events-migration.h" +#include "qapi/qapi-commands-migration.h" #include "qapi/qmp/qerror.h" #include "trace.h" #include "exec/ram_addr.h" @@ -55,6 +56,8 @@ #include "qemu/iov.h" #include "multifd.h" #include "sysemu/runstate.h" +#include "sysemu/dirtylimit.h" +#include "sysemu/kvm.h" #include "hw/boards.h" /* for machine_dump_guest_core() */ @@ -62,6 +65,11 @@ #include "qemu/userfaultfd.h" #endif /* defined(__linux__) */ +#ifdef CONFIG_ZSTD +#include +#include +#endif + /***********************************************************/ /* ram save/restore */ @@ -86,6 +94,34 @@ static inline bool is_zero_range(uint8_t *p, uint64_t size) return buffer_is_zero(p, size); } +int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int, + uint8_t *, int) = xbzrle_encode_buffer; +#if defined(CONFIG_AVX512BW_OPT) +#include "qemu/cpuid.h" +static void __attribute__((constructor)) init_cpu_flag(void) +{ + unsigned max = __get_cpuid_max(0, NULL); + int a, b, c, d; + if (max >= 1) { + __cpuid(1, a, b, c, d); + /* We must check that AVX is not just available, but usable. */ + if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { + int bv; + __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); + __cpuid_count(7, 0, a, b, c, d); + /* 0xe6: + * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 + * and ZMM16-ZMM31 state are enabled by OS) + * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) + */ + if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { + xbzrle_encode_buffer_func = xbzrle_encode_buffer_avx512; + } + } + } +} +#endif + XBZRLECacheStats xbzrle_counters; /* struct contains XBZRLE cache and a static page @@ -415,8 +451,16 @@ struct CompressParam { ram_addr_t offset; /* internally used fields */ - z_stream stream; uint8_t *originbuf; + + /* for zlib compression */ + z_stream stream; + +#ifdef CONFIG_ZSTD + ZSTD_CStream *zstd_cs; + ZSTD_inBuffer in; + ZSTD_outBuffer out; +#endif }; typedef struct CompressParam CompressParam; @@ -428,12 +472,34 @@ struct DecompressParam { void *des; uint8_t *compbuf; int len; + + /* for zlib compression */ z_stream stream; +#ifdef CONFIG_ZSTD + ZSTD_DStream *zstd_ds; + ZSTD_inBuffer in; + ZSTD_outBuffer out; +#endif }; typedef struct DecompressParam DecompressParam; +typedef struct { + int (*save_setup)(CompressParam *param); + void (*save_cleanup)(CompressParam *param); + ssize_t (*compress_data)(CompressParam *param, size_t size); +} MigrationCompressOps; + +typedef struct { + int (*load_setup)(DecompressParam *param); + void (*load_cleanup)(DecompressParam *param); + int (*decompress_data)(DecompressParam *param, uint8_t *dest, size_t size); + int (*check_len)(int len); +} MigrationDecompressOps; + static CompressParam *comp_param; static QemuThread *compress_threads; +static MigrationCompressOps *compress_ops; +static MigrationDecompressOps *decompress_ops; /* comp_done_cond is used to wake up the migration thread when * one of the compression threads has finished the compression. * comp_done_lock is used to co-work with comp_done_cond. @@ -449,26 +515,288 @@ static QemuThread *decompress_threads; static QemuMutex decomp_done_lock; static QemuCond decomp_done_cond; -static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, - ram_addr_t offset, uint8_t *source_buf); +static bool do_compress_ram_page(CompressParam *param, RAMBlock *block); + +static int zlib_save_setup(CompressParam *param) +{ + if (deflateInit(¶m->stream, + migrate_compress_level()) != Z_OK) { + return -1; + } + + return 0; +} + +static ssize_t zlib_compress_data(CompressParam *param, size_t size) +{ + int err; + uint8_t *dest = NULL; + z_stream *stream = ¶m->stream; + uint8_t *p = param->originbuf; + QEMUFile *f = f = param->file; + ssize_t blen = qemu_put_compress_start(f, &dest); + + if (blen < compressBound(size)) { + return -1; + } + + err = deflateReset(stream); + if (err != Z_OK) { + return -1; + } + + stream->avail_in = size; + stream->next_in = p; + stream->avail_out = blen; + stream->next_out = dest; + + err = deflate(stream, Z_FINISH); + if (err != Z_STREAM_END) { + return -1; + } + + blen = stream->next_out - dest; + if (blen < 0) { + return -1; + } + + qemu_put_compress_end(f, blen); + return blen + sizeof(int32_t); +} + +static void zlib_save_cleanup(CompressParam *param) +{ + deflateEnd(¶m->stream); +} + +static int zlib_load_setup(DecompressParam *param) +{ + if (inflateInit(¶m->stream) != Z_OK) { + return -1; + } + + return 0; +} + +static int +zlib_decompress_data(DecompressParam *param, uint8_t *dest, size_t size) +{ + int err; + + z_stream *stream = ¶m->stream; + + err = inflateReset(stream); + if (err != Z_OK) { + return -1; + } + + stream->avail_in = param->len; + stream->next_in = param->compbuf; + stream->avail_out = size; + stream->next_out = dest; + + err = inflate(stream, Z_NO_FLUSH); + if (err != Z_STREAM_END) { + return -1; + } + + return stream->total_out; +} + +static void zlib_load_cleanup(DecompressParam *param) +{ + inflateEnd(¶m->stream); +} + +static int zlib_check_len(int len) +{ + return len < 0 || len > compressBound(TARGET_PAGE_SIZE); +} + +#ifdef CONFIG_ZSTD +static int zstd_save_setup(CompressParam *param) +{ + int res; + param->zstd_cs = ZSTD_createCStream(); + if (!param->zstd_cs) { + return -1; + } + res = ZSTD_initCStream(param->zstd_cs, migrate_compress_level()); + if (ZSTD_isError(res)) { + return -1; + } + return 0; +} +static void zstd_save_cleanup(CompressParam *param) +{ + ZSTD_freeCStream(param->zstd_cs); + param->zstd_cs = NULL; +} +static ssize_t zstd_compress_data(CompressParam *param, size_t size) +{ + int ret; + uint8_t *dest = NULL; + uint8_t *p = param->originbuf; + QEMUFile *f = f = param->file; + ssize_t blen = qemu_put_compress_start(f, &dest); + if (blen < ZSTD_compressBound(size)) { + return -1; + } + param->out.dst = dest; + param->out.size = blen; + param->out.pos = 0; + param->in.src = p; + param->in.size = size; + param->in.pos = 0; + do { + ret = ZSTD_compressStream2(param->zstd_cs, ¶m->out, + ¶m->in, ZSTD_e_end); + } while (ret > 0 && (param->in.size - param->in.pos > 0) + && (param->out.size - param->out.pos > 0)); + if (ret > 0 && (param->in.size - param->in.pos > 0)) { + return -1; + } + if (ZSTD_isError(ret)) { + return -1; + } + blen = param->out.pos; + qemu_put_compress_end(f, blen); + return blen + sizeof(int32_t); +} + +static int zstd_load_setup(DecompressParam *param) +{ + int ret; + param->zstd_ds = ZSTD_createDStream(); + if (!param->zstd_ds) { + return -1; + } + ret = ZSTD_initDStream(param->zstd_ds); + if (ZSTD_isError(ret)) { + return -1; + } + return 0; +} +static void zstd_load_cleanup(DecompressParam *param) +{ + ZSTD_freeDStream(param->zstd_ds); + param->zstd_ds = NULL; +} +static int +zstd_decompress_data(DecompressParam *param, uint8_t *dest, size_t size) +{ + int ret; + param->out.dst = dest; + param->out.size = size; + param->out.pos = 0; + param->in.src = param->compbuf; + param->in.size = param->len; + param->in.pos = 0; + do { + ret = ZSTD_decompressStream(param->zstd_ds, ¶m->out, ¶m->in); + } while (ret > 0 && (param->in.size - param->in.pos > 0) + && (param->out.size - param->out.pos > 0)); + if (ret > 0 && (param->in.size - param->in.pos > 0)) { + return -1; + } + if (ZSTD_isError(ret)) { + return -1; + } + return ret; +} +static int zstd_check_len(int len) +{ + return len < 0 || len > ZSTD_compressBound(TARGET_PAGE_SIZE); +} +#endif + +static int set_compress_ops(void) +{ + compress_ops = g_new0(MigrationCompressOps, 1); + + switch (migrate_compress_method()) { + case COMPRESS_METHOD_ZLIB: + compress_ops->save_setup = zlib_save_setup; + compress_ops->save_cleanup = zlib_save_cleanup; + compress_ops->compress_data = zlib_compress_data; + break; +#ifdef CONFIG_ZSTD + case COMPRESS_METHOD_ZSTD: + compress_ops->save_setup = zstd_save_setup; + compress_ops->save_cleanup = zstd_save_cleanup; + compress_ops->compress_data = zstd_compress_data; + break; +#endif + default: + return -1; + } + + return 0; +} + +static int set_decompress_ops(void) +{ + decompress_ops = g_new0(MigrationDecompressOps, 1); + + switch (migrate_compress_method()) { + case COMPRESS_METHOD_ZLIB: + decompress_ops->load_setup = zlib_load_setup; + decompress_ops->load_cleanup = zlib_load_cleanup; + decompress_ops->decompress_data = zlib_decompress_data; + decompress_ops->check_len = zlib_check_len; + break; +#ifdef CONFIG_ZSTD + case COMPRESS_METHOD_ZSTD: + decompress_ops->load_setup = zstd_load_setup; + decompress_ops->load_cleanup = zstd_load_cleanup; + decompress_ops->decompress_data = zstd_decompress_data; + decompress_ops->check_len = zstd_check_len; + break; +#endif + default: + return -1; + } + + return 0; +} + +static void clean_compress_ops(void) +{ + compress_ops->save_setup = NULL; + compress_ops->save_cleanup = NULL; + compress_ops->compress_data = NULL; + + g_free(compress_ops); + compress_ops = NULL; +} + +static void clean_decompress_ops(void) +{ + decompress_ops->load_setup = NULL; + decompress_ops->load_cleanup = NULL; + decompress_ops->decompress_data = NULL; + + g_free(decompress_ops); + decompress_ops = NULL; +} static void *do_data_compress(void *opaque) { CompressParam *param = opaque; RAMBlock *block; - ram_addr_t offset; bool zero_page; + /* report compress thread pids to libvirt */ + qapi_event_send_migration_compress_pid(qemu_get_thread_id()); + qemu_mutex_lock(¶m->mutex); while (!param->quit) { if (param->block) { block = param->block; - offset = param->offset; param->block = NULL; qemu_mutex_unlock(¶m->mutex); - zero_page = do_compress_ram_page(param->file, ¶m->stream, - block, offset, param->originbuf); + zero_page = do_compress_ram_page(param, block); qemu_mutex_lock(&comp_done_lock); param->done = true; @@ -512,7 +840,7 @@ static void compress_threads_save_cleanup(void) qemu_thread_join(compress_threads + i); qemu_mutex_destroy(&comp_param[i].mutex); qemu_cond_destroy(&comp_param[i].cond); - deflateEnd(&comp_param[i].stream); + compress_ops->save_cleanup(&comp_param[i]); g_free(comp_param[i].originbuf); qemu_fclose(comp_param[i].file); comp_param[i].file = NULL; @@ -523,6 +851,7 @@ static void compress_threads_save_cleanup(void) g_free(comp_param); compress_threads = NULL; comp_param = NULL; + clean_compress_ops(); } static int compress_threads_save_setup(void) @@ -532,6 +861,12 @@ static int compress_threads_save_setup(void) if (!migrate_use_compression()) { return 0; } + + if (set_compress_ops() < 0) { + clean_compress_ops(); + return -1; + } + thread_count = migrate_compress_threads(); compress_threads = g_new0(QemuThread, thread_count); comp_param = g_new0(CompressParam, thread_count); @@ -543,8 +878,7 @@ static int compress_threads_save_setup(void) goto exit; } - if (deflateInit(&comp_param[i].stream, - migrate_compress_level()) != Z_OK) { + if (compress_ops->save_setup(&comp_param[i]) < 0) { g_free(comp_param[i].originbuf); goto exit; } @@ -731,9 +1065,9 @@ static int save_xbzrle_page(RAMState *rs, uint8_t **current_data, memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE); /* XBZRLE encoding (if there is no overflow) */ - encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf, - TARGET_PAGE_SIZE, XBZRLE.encoded_buf, - TARGET_PAGE_SIZE); + encoded_len = xbzrle_encode_buffer_func(prev_cached_page, XBZRLE.current_buf, + TARGET_PAGE_SIZE, XBZRLE.encoded_buf, + TARGET_PAGE_SIZE); /* * Update the cache contents, so that it corresponds to the data @@ -1068,6 +1402,37 @@ static void migration_update_rates(RAMState *rs, int64_t end_time) } } +/* + * Enable dirty-limit to throttle down the guest + */ +static void migration_dirty_limit_guest(void) +{ + /* + * dirty page rate quota for all vCPUs fetched from + * migration parameter 'vcpu_dirty_limit' + */ + static int64_t quota_dirtyrate; + MigrationState *s = migrate_get_current(); + + /* + * If dirty limit already enabled and migration parameter + * vcpu-dirty-limit untouched. + */ + if (dirtylimit_in_service() && + quota_dirtyrate == s->parameters.vcpu_dirty_limit) { + return; + } + + quota_dirtyrate = s->parameters.vcpu_dirty_limit; + + /* + * Set all vCPU a quota dirtyrate, note that the second + * parameter will be ignored if setting all vCPU for the vm + */ + qmp_set_vcpu_dirty_limit(false, -1, quota_dirtyrate, NULL); + trace_migration_dirty_limit_guest(quota_dirtyrate); +} + static void migration_trigger_throttle(RAMState *rs) { MigrationState *s = migrate_get_current(); @@ -1080,19 +1445,26 @@ static void migration_trigger_throttle(RAMState *rs) /* During block migration the auto-converge logic incorrectly detects * that ram migration makes no progress. Avoid this by disabling the * throttling logic during the bulk phase of block migration. */ - if (migrate_auto_converge() && !blk_mig_bulk_active()) { - /* The following detection logic can be refined later. For now: - Check to see if the ratio between dirtied bytes and the approx. - amount of bytes that just got transferred since the last time - we were in this routine reaches the threshold. If that happens - twice, start or increase throttling. */ - - if ((bytes_dirty_period > bytes_dirty_threshold) && - (++rs->dirty_rate_high_cnt >= 2)) { + if (blk_mig_bulk_active()) { + return; + } + + /* + * The following detection logic can be refined later. For now: + * Check to see if the ratio between dirtied bytes and the approx. + * amount of bytes that just got transferred since the last time + * we were in this routine reaches the threshold. If that happens + * twice, start or increase throttling. + */ + if ((bytes_dirty_period > bytes_dirty_threshold) && + (++rs->dirty_rate_high_cnt >= 2)) { + rs->dirty_rate_high_cnt = 0; + if (migrate_auto_converge()) { trace_migration_throttle(); - rs->dirty_rate_high_cnt = 0; mig_throttle_guest_down(bytes_dirty_period, bytes_dirty_threshold); + } else if (migrate_dirty_limit()) { + migration_dirty_limit_guest(); } } } @@ -1342,28 +1714,29 @@ static int ram_save_multifd_page(RAMState *rs, RAMBlock *block, return 1; } -static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block, - ram_addr_t offset, uint8_t *source_buf) +static bool do_compress_ram_page(CompressParam *param, RAMBlock *block) { RAMState *rs = ram_state; + ram_addr_t offset = param->offset; uint8_t *p = block->host + (offset & TARGET_PAGE_MASK); bool zero_page = false; int ret; - if (save_zero_page_to_file(rs, f, block, offset)) { + if (save_zero_page_to_file(rs, param->file, block, offset)) { zero_page = true; goto exit; } - save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE); + save_page_header(rs, param->file, block, + offset | RAM_SAVE_FLAG_COMPRESS_PAGE); /* * copy it to a internal buffer to avoid it being modified by VM * so that we can catch up the error during compression and * decompression */ - memcpy(source_buf, p, TARGET_PAGE_SIZE); - ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE); + memcpy(param->originbuf, p, TARGET_PAGE_SIZE); + ret = compress_ops->compress_data(param, TARGET_PAGE_SIZE); if (ret < 0) { qemu_file_set_error(migrate_get_current()->to_dst_file, ret); error_report("compressed data failed!"); @@ -1688,13 +2061,15 @@ out: static inline void populate_read_range(RAMBlock *block, ram_addr_t offset, ram_addr_t size) { + const ram_addr_t end = offset + size; + /* * We read one byte of each page; this will preallocate page tables if * required and populate the shared zeropage on MAP_PRIVATE anonymous memory * where no page was populated yet. This might require adaption when * supporting other mappings, like shmem. */ - for (; offset < size; offset += block->page_size) { + for (; offset < end; offset += block->page_size) { char tmp = *((char *)block->host + offset); /* Don't optimize the read out */ @@ -1808,13 +2183,14 @@ int ram_write_tracking_start(void) block->max_length, UFFDIO_REGISTER_MODE_WP, NULL)) { goto fail; } + block->flags |= RAM_UF_WRITEPROTECT; + memory_region_ref(block->mr); + /* Apply UFFD write protection to the block memory range */ if (uffd_change_protection(rs->uffdio_fd, block->host, block->max_length, true, false)) { goto fail; } - block->flags |= RAM_UF_WRITEPROTECT; - memory_region_ref(block->mr); trace_ram_write_tracking_ramblock_start(block->idstr, block->page_size, block->host, block->max_length); @@ -3372,50 +3748,20 @@ void ram_handle_compressed(void *host, uint8_t ch, uint64_t size) } } -/* return the size after decompression, or negative value on error */ -static int -qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len, - const uint8_t *source, size_t source_len) -{ - int err; - - err = inflateReset(stream); - if (err != Z_OK) { - return -1; - } - - stream->avail_in = source_len; - stream->next_in = (uint8_t *)source; - stream->avail_out = dest_len; - stream->next_out = dest; - - err = inflate(stream, Z_NO_FLUSH); - if (err != Z_STREAM_END) { - return -1; - } - - return stream->total_out; -} - static void *do_data_decompress(void *opaque) { DecompressParam *param = opaque; - unsigned long pagesize; uint8_t *des; - int len, ret; + int ret; qemu_mutex_lock(¶m->mutex); while (!param->quit) { if (param->des) { des = param->des; - len = param->len; param->des = 0; qemu_mutex_unlock(¶m->mutex); - pagesize = TARGET_PAGE_SIZE; - - ret = qemu_uncompress_data(¶m->stream, des, pagesize, - param->compbuf, len); + ret = decompress_ops->decompress_data(param, des, TARGET_PAGE_SIZE); if (ret < 0 && migrate_get_current()->decompress_error_check) { error_report("decompress data failed"); qemu_file_set_error(decomp_file, ret); @@ -3485,7 +3831,7 @@ static void compress_threads_load_cleanup(void) qemu_thread_join(decompress_threads + i); qemu_mutex_destroy(&decomp_param[i].mutex); qemu_cond_destroy(&decomp_param[i].cond); - inflateEnd(&decomp_param[i].stream); + decompress_ops->load_cleanup(&decomp_param[i]); g_free(decomp_param[i].compbuf); decomp_param[i].compbuf = NULL; } @@ -3494,6 +3840,7 @@ static void compress_threads_load_cleanup(void) decompress_threads = NULL; decomp_param = NULL; decomp_file = NULL; + clean_decompress_ops(); } static int compress_threads_load_setup(QEMUFile *f) @@ -3504,6 +3851,11 @@ static int compress_threads_load_setup(QEMUFile *f) return 0; } + if (set_decompress_ops() < 0) { + clean_decompress_ops(); + return -1; + } + thread_count = migrate_decompress_threads(); decompress_threads = g_new0(QemuThread, thread_count); decomp_param = g_new0(DecompressParam, thread_count); @@ -3511,7 +3863,7 @@ static int compress_threads_load_setup(QEMUFile *f) qemu_cond_init(&decomp_done_cond); decomp_file = f; for (i = 0; i < thread_count; i++) { - if (inflateInit(&decomp_param[i].stream) != Z_OK) { + if (decompress_ops->load_setup(&decomp_param[i]) < 0) { goto exit; } @@ -3682,7 +4034,9 @@ static int ram_load_cleanup(void *opaque) RAMBlock *rb; RAMBLOCK_FOREACH_NOT_IGNORED(rb) { - qemu_ram_block_writeback(rb); + if (memory_region_is_nonvolatile(rb->mr)) { + qemu_ram_block_writeback(rb); + } } xbzrle_load_cleanup(); @@ -4119,7 +4473,7 @@ static int ram_load_precopy(QEMUFile *f) case RAM_SAVE_FLAG_COMPRESS_PAGE: len = qemu_get_be32(f); - if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) { + if (decompress_ops->check_len(len)) { error_report("Invalid compressed data length: %d", len); ret = -EINVAL; break; @@ -4368,6 +4722,11 @@ static void ram_mig_ram_block_resized(RAMBlockNotifier *n, void *host, RAMBlock *rb = qemu_ram_block_from_host(host, false, &offset); Error *err = NULL; + if (!rb) { + error_report("RAM block not found"); + return; + } + if (ramblock_is_ignored(rb)) { return; } diff --git a/migration/rdma.c b/migration/rdma.c index f5d3bbe7e9c095463c2a128c2384a90152cf6b8a..60c856dd2fd7d8e5cb5219372afcd460c9b30a0e 100644 --- a/migration/rdma.c +++ b/migration/rdma.c @@ -2866,7 +2866,7 @@ static ssize_t qio_channel_rdma_writev(QIOChannel *ioc, size_t remaining = iov[i].iov_len; uint8_t * data = (void *)iov[i].iov_base; while (remaining) { - RDMAControlHeader head; + RDMAControlHeader head = {}; len = MIN(remaining, RDMA_SEND_INCREMENT); remaining -= len; diff --git a/migration/savevm.c b/migration/savevm.c index d59e976d50e7c81c20bbf5b930d1ee19251c36e2..b501504bd51cc6e400276ccb7d4cdc97c720af9d 100644 --- a/migration/savevm.c +++ b/migration/savevm.c @@ -3022,6 +3022,7 @@ bool load_snapshot(const char *name, const char *vmstate, ret = bdrv_snapshot_find(bs_vm_state, &sn, name); aio_context_release(aio_context); if (ret < 0) { + error_setg(errp, "Snapshot can not be found"); return false; } else if (sn.vm_state_size == 0) { error_setg(errp, "This is a disk-only snapshot. Revert to it " diff --git a/migration/target.c b/migration/target.c index 907ebf0a0affe6d88a20a23228670e5ee42d6904..00ca007f978470f2bbbdf70dc18c4e9efe98787f 100644 --- a/migration/target.c +++ b/migration/target.c @@ -8,6 +8,7 @@ #include "qemu/osdep.h" #include "qapi/qapi-types-migration.h" #include "migration.h" +#include CONFIG_DEVICES #ifdef CONFIG_VFIO #include "hw/vfio/vfio-common.h" @@ -17,7 +18,6 @@ void populate_vfio_info(MigrationInfo *info) { #ifdef CONFIG_VFIO if (vfio_mig_active()) { - info->has_vfio = true; info->vfio = g_malloc0(sizeof(*info->vfio)); info->vfio->transferred = vfio_mig_bytes_transferred(); } diff --git a/migration/trace-events b/migration/trace-events index b48d873b8a0313a855a43337a3d3aa2513f47fca..246710f4886f128db570130235ab6705b2f9355a 100644 --- a/migration/trace-events +++ b/migration/trace-events @@ -92,6 +92,7 @@ migration_bitmap_sync_start(void) "" migration_bitmap_sync_end(uint64_t dirty_pages) "dirty_pages %" PRIu64 migration_bitmap_clear_dirty(char *str, uint64_t start, uint64_t size, unsigned long page) "rb %s start 0x%"PRIx64" size 0x%"PRIx64" page 0x%lx" migration_throttle(void) "" +migration_dirty_limit_guest(int64_t dirtyrate) "guest dirty page rate limit %" PRIi64 " MB/s" ram_discard_range(const char *rbname, uint64_t start, size_t len) "%s: start: %" PRIx64 " %zx" ram_load_loop(const char *rbname, uint64_t addr, int flags, void *host) "%s: addr: 0x%" PRIx64 " flags: 0x%x host: %p" ram_load_postcopy_loop(uint64_t addr, int flags) "@%" PRIx64 " %x" diff --git a/migration/vmstate.c b/migration/vmstate.c index 05f87cdddc5d3e5174935c718c856a5093c27394..c9db071beef60a8da0c597345e478c89029852db 100644 --- a/migration/vmstate.c +++ b/migration/vmstate.c @@ -454,7 +454,7 @@ static int vmstate_subsection_load(QEMUFile *f, const VMStateDescription *vmsd, len = qemu_peek_byte(f, 1); if (len < strlen(vmsd->name) + 1) { - /* subsection name has be be "section_name/a" */ + /* subsection name has to be "section_name/a" */ trace_vmstate_subsection_load_bad(vmsd->name, "(short)", ""); return 0; } diff --git a/migration/xbzrle.c b/migration/xbzrle.c index 1ba482ded9c4ffcfb48fe774396e379bb7726284..c6f8b209175acc31ca77d5fae758a14b9ac2d5f3 100644 --- a/migration/xbzrle.c +++ b/migration/xbzrle.c @@ -12,6 +12,7 @@ */ #include "qemu/osdep.h" #include "qemu/cutils.h" +#include "qemu/host-utils.h" #include "xbzrle.h" /* @@ -174,3 +175,126 @@ int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen) return d; } + +#if defined(CONFIG_AVX512BW_OPT) +#pragma GCC push_options +#pragma GCC target("avx512bw") +#include +int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen) +{ + uint32_t zrun_len = 0, nzrun_len = 0; + int d = 0, i = 0, num = 0; + uint8_t *nzrun_start = NULL; + /* add 1 to include residual part in main loop */ + uint32_t count512s = (slen >> 6) + 1; + /* countResidual is tail of data, i.e., countResidual = slen % 64 */ + uint32_t count_residual = slen & 0b111111; + bool never_same = true; + uint64_t mask_residual = 1; + mask_residual <<= count_residual; + mask_residual -= 1; + __m512i r = _mm512_set1_epi32(0); + + while (count512s) { + int bytes_to_check = 64; + uint64_t mask = 0xffffffffffffffff; + if (count512s == 1) { + bytes_to_check = count_residual; + mask = mask_residual; + } + __m512i old_data = _mm512_mask_loadu_epi8(r, + mask, old_buf + i); + __m512i new_data = _mm512_mask_loadu_epi8(r, + mask, new_buf + i); + uint64_t comp = _mm512_cmpeq_epi8_mask(old_data, new_data); + count512s--; + + bool is_same = (comp & 0x1); + while (bytes_to_check) { + if (d + 2 > dlen) { + return -1; + } + if (is_same) { + if (nzrun_len) { + d += uleb128_encode_small(dst + d, nzrun_len); + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + nzrun_len = 0; + } + /* 64 data at a time for speed */ + if (count512s && (comp == 0xffffffffffffffff)) { + i += 64; + zrun_len += 64; + break; + } + never_same = false; + num = ctz64(~comp); + num = (num < bytes_to_check) ? num : bytes_to_check; + zrun_len += num; + bytes_to_check -= num; + comp >>= num; + i += num; + if (bytes_to_check) { + /* still has different data after same data */ + d += uleb128_encode_small(dst + d, zrun_len); + zrun_len = 0; + } else { + break; + } + } + if (never_same || zrun_len) { + /* + * never_same only acts if + * data begins with diff in first count512s + */ + d += uleb128_encode_small(dst + d, zrun_len); + zrun_len = 0; + never_same = false; + } + /* has diff, 64 data at a time for speed */ + if ((bytes_to_check == 64) && (comp == 0x0)) { + i += 64; + nzrun_len += 64; + break; + } + num = ctz64(comp); + num = (num < bytes_to_check) ? num : bytes_to_check; + nzrun_len += num; + bytes_to_check -= num; + comp >>= num; + i += num; + if (bytes_to_check) { + /* mask like 111000 */ + d += uleb128_encode_small(dst + d, nzrun_len); + /* overflow */ + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + nzrun_len = 0; + is_same = true; + } + } + } + + if (nzrun_len != 0) { + d += uleb128_encode_small(dst + d, nzrun_len); + /* overflow */ + if (d + nzrun_len > dlen) { + return -1; + } + nzrun_start = new_buf + i - nzrun_len; + memcpy(dst + d, nzrun_start, nzrun_len); + d += nzrun_len; + } + return d; +} +#pragma GCC pop_options +#endif diff --git a/migration/xbzrle.h b/migration/xbzrle.h index a0db507b9cd9475277749de89a91d2cb98315c21..6feb49160adfff0fd1cdf52e16bf0d6952117954 100644 --- a/migration/xbzrle.h +++ b/migration/xbzrle.h @@ -18,4 +18,8 @@ int xbzrle_encode_buffer(uint8_t *old_buf, uint8_t *new_buf, int slen, uint8_t *dst, int dlen); int xbzrle_decode_buffer(uint8_t *src, int slen, uint8_t *dst, int dlen); +#if defined(CONFIG_AVX512BW_OPT) +int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen); +#endif #endif diff --git a/monitor/hmp-cmds.c b/monitor/hmp-cmds.c index 9c91bf93e94cda6baccc53e9c0a3216e870e4020..4abd4a8aa00115e9e229d3d7676161e50cb30670 100644 --- a/monitor/hmp-cmds.c +++ b/monitor/hmp-cmds.c @@ -45,7 +45,7 @@ #include "qapi/qapi-visit-net.h" #include "qapi/qapi-visit-migration.h" #include "qapi/qmp/qdict.h" -#include "qapi/qmp/qerror.h" +#include "qapi/qapi-visit-migration.h" #include "qapi/string-input-visitor.h" #include "qapi/string-output-visitor.h" #include "qom/object_interfaces.h" @@ -60,6 +60,8 @@ #include #endif +#include "hw/pci/pci.h" + bool hmp_handle_error(Monitor *mon, Error *err) { if (err) { @@ -339,6 +341,16 @@ void hmp_info_migrate(Monitor *mon, const QDict *qdict) info->cpu_throttle_percentage); } + if (info->has_dirty_limit_throttle_time_per_round) { + monitor_printf(mon, "dirty-limit throttle time: %" PRIu64 " us\n", + info->dirty_limit_throttle_time_per_round); + } + + if (info->has_dirty_limit_ring_full_time) { + monitor_printf(mon, "dirty-limit ring full time: %" PRIu64 " us\n", + info->dirty_limit_ring_full_time); + } + if (info->has_postcopy_blocktime) { monitor_printf(mon, "postcopy blocktime: %u\n", info->postcopy_blocktime); @@ -429,6 +441,9 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) MigrationParameter_str(MIGRATION_PARAMETER_DECOMPRESS_THREADS), params->decompress_threads); assert(params->has_throttle_trigger_threshold); + monitor_printf(mon, "%s: %s\n", + MigrationParameter_str(MIGRATION_PARAMETER_COMPRESS_METHOD), + CompressMethod_str(params->compress_method)); monitor_printf(mon, "%s: %u\n", MigrationParameter_str(MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD), params->throttle_trigger_threshold); @@ -513,6 +528,14 @@ void hmp_info_migrate_parameters(Monitor *mon, const QDict *qdict) } } } + + monitor_printf(mon, "%s: %" PRIu64 " ms\n", + MigrationParameter_str(MIGRATION_PARAMETER_X_VCPU_DIRTY_LIMIT_PERIOD), + params->x_vcpu_dirty_limit_period); + + monitor_printf(mon, "%s: %" PRIu64 " MB/s\n", + MigrationParameter_str(MIGRATION_PARAMETER_VCPU_DIRTY_LIMIT), + params->vcpu_dirty_limit); } qapi_free_MigrationParameters(params); @@ -760,15 +783,25 @@ static void hmp_info_pci_device(Monitor *mon, const PciDeviceInfo *dev) monitor_printf(mon, " BAR%" PRId64 ": ", region->value->bar); if (!strcmp(region->value->type, "io")) { - monitor_printf(mon, "I/O at 0x%04" PRIx64 - " [0x%04" PRIx64 "].\n", - addr, addr + size - 1); - } else { - monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64 - " [0x%08" PRIx64 "].\n", - region->value->mem_type_64 ? 64 : 32, - region->value->prefetch ? " prefetchable" : "", - addr, addr + size - 1); + if (addr != PCI_BAR_UNMAPPED) { + monitor_printf(mon, "I/O at 0x%04" PRIx64 + " [0x%04" PRIx64 "]\n", + addr, addr + size - 1); + } else { + monitor_printf(mon, "I/O (not mapped)\n"); + } + } else { + if (addr != PCI_BAR_UNMAPPED) { + monitor_printf(mon, "%d bit%s memory at 0x%08" PRIx64 + " [0x%08" PRIx64 "]\n", + region->value->mem_type_64 ? 64 : 32, + region->value->prefetch ? " prefetchable" : "", + addr, addr + size - 1); + } else { + monitor_printf(mon, "%d bit%s memory (not mapped)\n", + region->value->mem_type_64 ? 64 : 32, + region->value->prefetch ? " prefetchable" : ""); + } } } @@ -916,7 +949,8 @@ void hmp_sync_profile(Monitor *mon, const QDict *qdict) } else { Error *err = NULL; - error_setg(&err, QERR_INVALID_PARAMETER, op); + error_setg(&err, "invalid parameter '%s'," + " expecting 'on', 'off', or 'reset'", op); hmp_handle_error(mon, err); } } @@ -1191,6 +1225,7 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) MigrateSetParameters *p = g_new0(MigrateSetParameters, 1); uint64_t valuebw = 0; uint64_t cache_size; + CompressMethod compress_method; Error *err = NULL; int val, ret; @@ -1216,6 +1251,14 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) p->has_decompress_threads = true; visit_type_uint8(v, param, &p->decompress_threads, &err); break; + case MIGRATION_PARAMETER_COMPRESS_METHOD: + p->has_compress_method = true; + visit_type_CompressMethod(v, param, &compress_method, &err); + if (err) { + break; + } + p->compress_method = compress_method; + break; case MIGRATION_PARAMETER_THROTTLE_TRIGGER_THRESHOLD: p->has_throttle_trigger_threshold = true; visit_type_uint8(v, param, &p->throttle_trigger_threshold, &err); @@ -1332,6 +1375,14 @@ void hmp_migrate_set_parameter(Monitor *mon, const QDict *qdict) error_setg(&err, "The block-bitmap-mapping parameter can only be set " "through QMP"); break; + case MIGRATION_PARAMETER_X_VCPU_DIRTY_LIMIT_PERIOD: + p->has_x_vcpu_dirty_limit_period = true; + visit_type_size(v, param, &p->x_vcpu_dirty_limit_period, &err); + break; + case MIGRATION_PARAMETER_VCPU_DIRTY_LIMIT: + p->has_vcpu_dirty_limit = true; + visit_type_size(v, param, &p->vcpu_dirty_limit, &err); + break; default: assert(0); } @@ -1810,6 +1861,7 @@ void hmp_info_memory_devices(Monitor *mon, const QDict *qdict) se->id ? se->id : ""); monitor_printf(mon, " memaddr: 0x%" PRIx64 "\n", se->memaddr); monitor_printf(mon, " size: %" PRIu64 "\n", se->size); + monitor_printf(mon, " node: %" PRId64 "\n", se->node); monitor_printf(mon, " memdev: %s\n", se->memdev); break; default: diff --git a/monitor/hmp.c b/monitor/hmp.c index b20737e63c3bbc1b1d9615a971dffa45bb0f7c9d..9cbbe528127222af3eb965ebac56f95d6ebf4856 100644 --- a/monitor/hmp.c +++ b/monitor/hmp.c @@ -285,10 +285,15 @@ void help_cmd(Monitor *mon, const char *name) if (!strcmp(name, "log")) { const QEMULogItem *item; monitor_printf(mon, "Log items (comma separated):\n"); - monitor_printf(mon, "%-10s %s\n", "none", "remove all logs"); + monitor_printf(mon, "%-15s %s\n", "none", "remove all logs"); for (item = qemu_log_items; item->mask != 0; item++) { - monitor_printf(mon, "%-10s %s\n", item->name, item->help); + monitor_printf(mon, "%-15s %s\n", item->name, item->help); } +#ifdef CONFIG_TRACE_LOG + monitor_printf(mon, "trace:PATTERN enable trace events\n"); + monitor_printf(mon, "\nUse \"log trace:help\" to get a list of " + "trace events.\n\n"); +#endif return; } diff --git a/monitor/misc.c b/monitor/misc.c index a3a6e478444e80200d325e28075cf26c7a5b793b..25a23e22909d061fbfed5ade2b87ed36a2397c7e 100644 --- a/monitor/misc.c +++ b/monitor/misc.c @@ -397,7 +397,7 @@ void qmp_client_migrate_info(const char *protocol, const char *hostname, } if (!has_port && !has_tls_port) { - error_setg(errp, QERR_MISSING_PARAMETER, "port/tls-port"); + error_setg(errp, "parameter 'port' or 'tls-port' is required"); return; } diff --git a/monitor/monitor-internal.h b/monitor/monitor-internal.h index 3da3f86c6ae691d5dfaf7fd587132b96969d4350..5435864add2cb5f4060428b4d9618de5c549e02e 100644 --- a/monitor/monitor-internal.h +++ b/monitor/monitor-internal.h @@ -144,6 +144,7 @@ typedef struct { const QmpCommandList *commands; bool capab_offered[QMP_CAPABILITY__MAX]; /* capabilities offered */ bool capab[QMP_CAPABILITY__MAX]; /* offered and accepted */ + uint64_t qmp_client_id; /*qmp client id, update if peer disconnect */ /* * Protects qmp request/response queue. * Take monitor_lock first when you need both. diff --git a/monitor/monitor.c b/monitor/monitor.c index 21c7a68758f552cfe17cfcf7d5c946b19e112c93..257ef4ee5430b0ceca4a9f055831ef74630de803 100644 --- a/monitor/monitor.c +++ b/monitor/monitor.c @@ -23,16 +23,21 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "monitor-internal.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" #include "qapi/qapi-emit-events.h" #include "qapi/qapi-visit-control.h" #include "qapi/qmp/qdict.h" +#include "qapi/qmp/qjson.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "sysemu/qtest.h" #include "trace.h" +#include "qemu/log.h" +#include "qapi/qmp/qjson.h" +#include "qapi/qmp/qobject.h" /* * To prevent flooding clients, events can be throttled. The @@ -318,6 +323,7 @@ static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) { Monitor *mon; MonitorQMP *qmp_mon; + GString *json; trace_monitor_protocol_event_emit(event, qdict); QTAILQ_FOREACH(mon, &mon_list, entry) { @@ -328,6 +334,13 @@ static void monitor_qapi_event_emit(QAPIEvent event, QDict *qdict) qmp_mon = container_of(mon, MonitorQMP, common); if (qmp_mon->commands != &qmp_cap_negotiation_commands) { qmp_send_response(qmp_mon, qdict); + json = qobject_to_json(QOBJECT(qdict)); + if (json) { + if (!strstr(json->str, "RTC_CHANGE")) { + qemu_log("%s\n", json->str); + } + g_string_free(json, true); + } } } } @@ -758,6 +771,33 @@ int monitor_init_opts(QemuOpts *opts, Error **errp) return ret; } +void monitor_qapi_event_discard_io_error(void) +{ + GHashTableIter event_iter; + MonitorQAPIEventState *evstate; + gpointer key, value; + GString *json; + + qemu_mutex_lock(&monitor_lock); + g_hash_table_iter_init(&event_iter, monitor_qapi_event_state); + while (g_hash_table_iter_next(&event_iter, &key, &value)) { + evstate = key; + /* Only QAPI_EVENT_BLOCK_IO_ERROR is discarded */ + if (evstate->event == QAPI_EVENT_BLOCK_IO_ERROR) { + g_hash_table_iter_remove(&event_iter); + json = qobject_to_json(QOBJECT(evstate->qdict)); + qemu_log(" %s event discarded\n", json->str); + timer_del(evstate->timer); + timer_free(evstate->timer); + qobject_unref(evstate->data); + qobject_unref(evstate->qdict); + g_string_free(json, true); + g_free(evstate); + } + } + qemu_mutex_unlock(&monitor_lock); +} + QemuOptsList qemu_mon_opts = { .name = "mon", .implied_opt_name = "chardev", diff --git a/monitor/qmp-cmds.c b/monitor/qmp-cmds.c index 343353e27a7acaafe661f17a6002144d817a116e..b44cca8234a2c9a24a65a465361a17a940eb6d90 100644 --- a/monitor/qmp-cmds.c +++ b/monitor/qmp-cmds.c @@ -21,6 +21,7 @@ #include "sysemu/sysemu.h" #include "qemu/config-file.h" #include "qemu/uuid.h" +#include "qemu/log.h" #include "chardev/char.h" #include "ui/qemu-spice.h" #include "ui/console.h" @@ -150,8 +151,10 @@ void qmp_cont(Error **errp) } if (runstate_check(RUN_STATE_INMIGRATE)) { + qemu_log("qmp cont is received in migration\n"); autostart = 1; } else { + qemu_log("qmp cont is received and vm is started\n"); vm_start(); } } @@ -196,7 +199,8 @@ void qmp_set_password(const char *protocol, const char *password, } else if (strcmp(protocol, "vnc") == 0) { if (fail_if_connected || disconnect_if_connected) { /* vnc supports "connected=keep" only */ - error_setg(errp, QERR_INVALID_PARAMETER, "connected"); + error_setg(errp, "parameter 'connected' must be 'keep'" + " when 'protocol' is 'vnc'"); return; } /* Note that setting an empty password will not disable login through @@ -466,3 +470,8 @@ HumanReadableText *qmp_x_query_irq(Error **errp) return human_readable_text_from_str(buf); } + +int64_t qmp_query_rtc_date_diff(Error **errp) +{ + return get_rtc_date_diff(); +} diff --git a/monitor/qmp.c b/monitor/qmp.c index 092c527b6fc9c6363f4bf81d85736144b656d038..4d1ac66785d673a28521e3e30bfb118dff6191db 100644 --- a/monitor/qmp.c +++ b/monitor/qmp.c @@ -125,18 +125,19 @@ void qmp_send_response(MonitorQMP *mon, const QDict *rsp) * Null @rsp can only happen for commands with QCO_NO_SUCCESS_RESP. * Nothing is emitted then. */ -static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp) +static void monitor_qmp_respond(MonitorQMP *mon, QDict *rsp, uint64_t req_client_id) { - if (rsp) { - qmp_send_response(mon, rsp); + if (!rsp || (mon->qmp_client_id != req_client_id)) { + return; } + qmp_send_response(mon, rsp); } /* * Runs outside of coroutine context for OOB commands, but in * coroutine context for everything else. */ -static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req) +static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req, uint64_t req_client_id) { QDict *rsp; QDict *error; @@ -156,7 +157,7 @@ static void monitor_qmp_dispatch(MonitorQMP *mon, QObject *req) } } - monitor_qmp_respond(mon, rsp); + monitor_qmp_respond(mon, rsp, req_client_id); qobject_unref(rsp); } @@ -315,13 +316,13 @@ void coroutine_fn monitor_qmp_dispatcher_co(void *data) trace_monitor_qmp_cmd_in_band(id_json->str); g_string_free(id_json, true); } - monitor_qmp_dispatch(mon, req_obj->req); + monitor_qmp_dispatch(mon, req_obj->req, mon->qmp_client_id); } else { assert(req_obj->err); trace_monitor_qmp_err_in_band(error_get_pretty(req_obj->err)); rsp = qmp_error_response(req_obj->err); req_obj->err = NULL; - monitor_qmp_respond(mon, rsp); + monitor_qmp_respond(mon, rsp, mon->qmp_client_id); qobject_unref(rsp); } @@ -366,7 +367,7 @@ static void handle_qmp_command(void *opaque, QObject *req, Error *err) trace_monitor_qmp_cmd_out_of_band(id_json->str); g_string_free(id_json, true); } - monitor_qmp_dispatch(mon, req); + monitor_qmp_dispatch(mon, req, mon->qmp_client_id); qobject_unref(req); return; } @@ -452,6 +453,7 @@ static void monitor_qmp_event(void *opaque, QEMUChrEvent event) mon_refcount++; break; case CHR_EVENT_CLOSED: + mon->qmp_client_id++; /* * Note: this is only useful when the output of the chardev * backend is still open. For example, when the backend is @@ -505,6 +507,7 @@ void monitor_init_qmp(Chardev *chr, bool pretty, Error **errp) } qemu_chr_fe_set_echo(&mon->common.chr, true); + mon->qmp_client_id = 1; /* Note: we run QMP monitor in I/O thread when @chr supports that */ monitor_data_init(&mon->common, true, false, qemu_chr_has_feature(chr, QEMU_CHAR_FEATURE_GCONTEXT)); diff --git a/nbd/client.c b/nbd/client.c index 30d5383cb1952fbc4ee5ad57f930217d46e11852..8ed50140f2d4b3fa1627ee511d819b6578a5539c 100644 --- a/nbd/client.c +++ b/nbd/client.c @@ -24,6 +24,8 @@ #include "nbd-internal.h" #include "qemu/cutils.h" +#define NBD_TIMEOUT_SECONDS 30 + /* Definitions for opaque data types */ static QTAILQ_HEAD(, NBDExport) exports = QTAILQ_HEAD_INITIALIZER(exports); @@ -1301,6 +1303,12 @@ int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info, } } + if (ioctl(fd, NBD_SET_TIMEOUT, NBD_TIMEOUT_SECONDS) < 0) { + int serrno = errno; + error_setg(errp, "Failed setting timeout"); + return -serrno; + } + trace_nbd_init_finish(); return 0; diff --git a/nbd/server.c b/nbd/server.c index 4630dd732250bea820157bbc7b1a9027fbb7773c..026e0b5919ae2fbc163c5930a9c9bcea28b72643 100644 --- a/nbd/server.c +++ b/nbd/server.c @@ -120,10 +120,12 @@ typedef struct NBDExportMetaContexts { struct NBDClient { int refcount; void (*close_fn)(NBDClient *client, bool negotiated); + void *owner; NBDExport *exp; QCryptoTLSCreds *tlscreds; char *tlsauthz; + uint32_t handshake_max_secs; QIOChannelSocket *sioc; /* The underlying data channel */ QIOChannel *ioc; /* The current I/O channel which may differ (eg TLS) */ @@ -1801,7 +1803,7 @@ static void nbd_export_request_shutdown(BlockExport *blk_exp) blk_exp_ref(&exp->common); /* - * TODO: Should we expand QMP NbdServerRemoveNode enum to allow a + * TODO: Should we expand QMP BlockExportRemoveMode enum to allow a * close mode that stops advertising the export to new clients but * still permits existing clients to run to completion? Because of * that possibility, nbd_export_close() can be called more than @@ -2606,6 +2608,7 @@ static coroutine_fn void nbd_trip(void *opaque) NBDRequestData *req; NBDRequest request = { 0 }; /* GCC thinks it can be used uninitialized */ int ret; + bool client_closing; Error *local_err = NULL; trace_nbd_trip(); @@ -2681,8 +2684,11 @@ disconnect: if (local_err) { error_reportf_err(local_err, "Disconnect client, due to: "); } + client_closing = client->closing; nbd_request_put(req); - client_close(client, true); + if (!client_closing) { + client_close(client, true); + } nbd_client_put(client); } @@ -2696,33 +2702,63 @@ static void nbd_client_receive_next_request(NBDClient *client) } } +static void nbd_handshake_timer_cb(void *opaque) +{ + QIOChannel *ioc = opaque; + + trace_nbd_handshake_timer_cb(); + qio_channel_shutdown(ioc, QIO_CHANNEL_SHUTDOWN_BOTH, NULL); +} + static coroutine_fn void nbd_co_client_start(void *opaque) { NBDClient *client = opaque; Error *local_err = NULL; + QEMUTimer *handshake_timer = NULL; qemu_co_mutex_init(&client->send_lock); + /* + * Create a timer to bound the time spent in negotiation. If the + * timer expires, it is likely nbd_negotiate will fail because the + * socket was shutdown. + */ + if (client->handshake_max_secs > 0) { + handshake_timer = aio_timer_new(qemu_get_aio_context(), + QEMU_CLOCK_REALTIME, + SCALE_NS, + nbd_handshake_timer_cb, + client->sioc); + timer_mod(handshake_timer, + qemu_clock_get_ns(QEMU_CLOCK_REALTIME) + + client->handshake_max_secs * NANOSECONDS_PER_SECOND); + } + if (nbd_negotiate(client, &local_err)) { if (local_err) { error_report_err(local_err); } + timer_free(handshake_timer); client_close(client, false); return; } + timer_free(handshake_timer); nbd_client_receive_next_request(client); } /* - * Create a new client listener using the given channel @sioc. + * Create a new client listener using the given channel @sioc and @owner. * Begin servicing it in a coroutine. When the connection closes, call - * @close_fn with an indication of whether the client completed negotiation. + * @close_fn with an indication of whether the client completed negotiation + * within @handshake_max_secs seconds (0 for unbounded). */ void nbd_client_new(QIOChannelSocket *sioc, + uint32_t handshake_max_secs, QCryptoTLSCreds *tlscreds, const char *tlsauthz, - void (*close_fn)(NBDClient *, bool)) + void (*close_fn)(NBDClient *, bool), + void *owner) { NBDClient *client; Coroutine *co; @@ -2734,12 +2770,20 @@ void nbd_client_new(QIOChannelSocket *sioc, object_ref(OBJECT(client->tlscreds)); } client->tlsauthz = g_strdup(tlsauthz); + client->handshake_max_secs = handshake_max_secs; client->sioc = sioc; object_ref(OBJECT(client->sioc)); client->ioc = QIO_CHANNEL(sioc); object_ref(OBJECT(client->ioc)); client->close_fn = close_fn; + client->owner = owner; co = qemu_coroutine_create(nbd_co_client_start, client); qemu_coroutine_enter(co); } + +void * +nbd_client_owner(NBDClient *client) +{ + return client->owner; +} diff --git a/nbd/trace-events b/nbd/trace-events index c4919a2dd581b642acb0b1e7cd1d4105ca4d0477..553546f1f273a6875cad9803a89841713d2c9c7c 100644 --- a/nbd/trace-events +++ b/nbd/trace-events @@ -73,3 +73,4 @@ nbd_co_receive_request_decode_type(uint64_t handle, uint16_t type, const char *n nbd_co_receive_request_payload_received(uint64_t handle, uint32_t len) "Payload received: handle = %" PRIu64 ", len = %" PRIu32 nbd_co_receive_align_compliance(const char *op, uint64_t from, uint32_t len, uint32_t align) "client sent non-compliant unaligned %s request: from=0x%" PRIx64 ", len=0x%" PRIx32 ", align=0x%" PRIx32 nbd_trip(void) "Reading request" +nbd_handshake_timer_cb(void) "client took too long to negotiate" diff --git a/net/colo-compare.c b/net/colo-compare.c index b966e7e514d031f67d7ddd9a482d7d1ee4cb4d1d..e845c63f2e522e4f8f4818d23155a359fa258099 100644 --- a/net/colo-compare.c +++ b/net/colo-compare.c @@ -414,8 +414,7 @@ static void colo_compare_tcp(CompareState *s, Connection *conn) * can ensure that the packet's payload is acknowledged by * primary and secondary. */ - uint32_t min_ack = conn->pack - conn->sack > 0 ? - conn->sack : conn->pack; + uint32_t min_ack = MIN(conn->pack, conn->sack); pri: if (g_queue_is_empty(&conn->primary_list)) { diff --git a/net/dump.c b/net/dump.c index a07ba624011c741dafe9690d795950aee81a1258..c32d3bf4e613c38a3cf540f16dfd57bcc89611cc 100644 --- a/net/dump.c +++ b/net/dump.c @@ -86,7 +86,7 @@ static ssize_t dump_receive_iov(DumpState *s, const struct iovec *iov, int cnt) dumpiov[0].iov_len = sizeof(hdr); cnt = iov_copy(&dumpiov[1], cnt, iov, cnt, 0, caplen); - if (writev(s->fd, dumpiov, cnt + 1) != sizeof(hdr) + caplen) { + if (writev(s->fd, &dumpiov[0], cnt + 1) != sizeof(hdr) + caplen) { error_report("network dump write error - stopping dump"); close(s->fd); s->fd = -1; diff --git a/net/eth.c b/net/eth.c index fe876d1a554dce47113aa9b87891c14b4e501bd3..f074b2f9f3bb60733ceff1e105f7fe28db026c9d 100644 --- a/net/eth.c +++ b/net/eth.c @@ -389,7 +389,6 @@ eth_is_ip6_extension_header_type(uint8_t hdr_type) case IP6_HOP_BY_HOP: case IP6_ROUTING: case IP6_FRAGMENT: - case IP6_ESP: case IP6_AUTHENTICATION: case IP6_DESTINATON: case IP6_MOBILITY: diff --git a/net/net.c b/net/net.c index f0d14dbfc1f0f929eb93da9e789dcb4560366054..abdb9dfdc50a1e8eb02fdb7de8310dac288155ed 100644 --- a/net/net.c +++ b/net/net.c @@ -299,6 +299,7 @@ NICState *qemu_new_nic(NetClientInfo *info, NICConf *conf, const char *model, const char *name, + MemReentrancyGuard *reentrancy_guard, void *opaque) { NetClientState **peers = conf->peers.ncs; @@ -311,6 +312,7 @@ NICState *qemu_new_nic(NetClientInfo *info, nic = g_malloc0(info->size + sizeof(NetClientState) * queues); nic->ncs = (void *)nic + info->size; nic->conf = conf; + nic->reentrancy_guard = reentrancy_guard, nic->opaque = opaque; for (i = 0; i < queues; i++) { @@ -766,6 +768,7 @@ static ssize_t qemu_deliver_packet_iov(NetClientState *sender, int iovcnt, void *opaque) { + MemReentrancyGuard *owned_reentrancy_guard; NetClientState *nc = opaque; int ret; @@ -778,12 +781,24 @@ static ssize_t qemu_deliver_packet_iov(NetClientState *sender, return 0; } + if (nc->info->type != NET_CLIENT_DRIVER_NIC || + qemu_get_nic(nc)->reentrancy_guard->engaged_in_io) { + owned_reentrancy_guard = NULL; + } else { + owned_reentrancy_guard = qemu_get_nic(nc)->reentrancy_guard; + owned_reentrancy_guard->engaged_in_io = true; + } + if (nc->info->receive_iov && !(flags & QEMU_NET_PACKET_FLAG_RAW)) { ret = nc->info->receive_iov(nc, iov, iovcnt); } else { ret = nc_sendv_compat(nc, iov, iovcnt, flags); } + if (owned_reentrancy_guard) { + owned_reentrancy_guard->engaged_in_io = false; + } + if (ret == 0) { nc->receive_disabled = 1; } @@ -1122,7 +1137,7 @@ static int net_client_init(QemuOpts *opts, bool is_netdev, Error **errp) int ret = -1; Visitor *v = opts_visitor_new(opts); - /* Parse convenience option format ip6-net=fec0::0[/64] */ + /* Parse convenience option format ipv6-net=fec0::0[/64] */ const char *ip6_net = qemu_opt_get(opts, "ipv6-net"); if (ip6_net) { @@ -1142,8 +1157,8 @@ static int net_client_init(QemuOpts *opts, bool is_netdev, Error **errp) if (substrings[1] && qemu_strtoul(substrings[1], NULL, 10, &prefix_len)) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, - "ipv6-prefixlen", "a number"); + error_setg(errp, + "parameter 'ipv6-net' expects a number after '/'"); goto out; } @@ -1202,6 +1217,12 @@ void qmp_netdev_del(const char *id, Error **errp) return; } + if (nc->info->type == NET_CLIENT_DRIVER_VHOST_USER && nc->peer) { + error_setg(errp, "Device '%s' is a netdev for vhostuser," + "please delete the peer front-end device (virtio-net) first.", id); + return; + } + qemu_del_net_client(nc); /* diff --git a/net/tap-linux.c b/net/tap-linux.c index 95847697401009aa5b146e2012c2da987694cc68..5e70b9303710d3c2eef4bf6f2b0170f5314714a8 100644 --- a/net/tap-linux.c +++ b/net/tap-linux.c @@ -150,6 +150,7 @@ void tap_set_sndbuf(int fd, const NetdevTapOptions *tap, Error **errp) int tap_probe_vnet_hdr(int fd, Error **errp) { struct ifreq ifr; + memset(&ifr, 0, sizeof(ifr)); if (ioctl(fd, TUNGETIFF, &ifr) != 0) { /* TUNGETIFF is available since kernel v2.6.27 */ diff --git a/net/tap.c b/net/tap.c index f716be3e3fba2383eddf8391c728ddc4938b6f17..3f79cd06c2622e3c387ab4bf307e654069ed96dd 100644 --- a/net/tap.c +++ b/net/tap.c @@ -684,7 +684,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, tap_set_sndbuf(s->fd, tap, &err); if (err) { error_propagate(errp, err); - return; + goto fail; } if (tap->has_fd || tap->has_fds) { @@ -726,13 +726,13 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } else { warn_report_err(err); } - return; + goto fail; } ret = qemu_try_set_nonblock(vhostfd); if (ret < 0) { error_setg_errno(errp, -ret, "%s: Can't use file descriptor %d", name, fd); - return; + goto fail; } } else { vhostfd = open("/dev/vhost-net", O_RDWR); @@ -744,7 +744,7 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, warn_report("tap: open vhost char device failed: %s", strerror(errno)); } - return; + goto fail; } qemu_set_nonblock(vhostfd); } @@ -758,11 +758,17 @@ static void net_init_tap_one(const NetdevTapOptions *tap, NetClientState *peer, } else { warn_report(VHOST_NET_INIT_FAILED); } - return; + goto fail; } } else if (vhostfdname) { error_setg(errp, "vhostfd(s)= is not valid without vhost"); + goto fail; } + + return; + +fail: + qemu_del_net_client(&s->nc); } static int get_fds(char *str, char *fds[], int max) @@ -900,6 +906,7 @@ int net_init_tap(const Netdev *netdev, const char *name, if (i == 0) { vnet_hdr = tap_probe_vnet_hdr(fd, errp); if (vnet_hdr < 0) { + ret = -1; goto free_fail; } } else if (vnet_hdr != tap_probe_vnet_hdr(fd, NULL)) { diff --git a/net/vhost-user.c b/net/vhost-user.c index b1a0247b59818ca5ee29a1c2ed5663c4469cf865..e3680b769f4222fb163357c43c642b0644a7833b 100644 --- a/net/vhost-user.c +++ b/net/vhost-user.c @@ -20,6 +20,9 @@ #include "qemu/error-report.h" #include "qemu/option.h" #include "trace.h" +#include "include/hw/virtio/vhost.h" + +#define VHOST_USER_RECONNECT_TIME (3) typedef struct NetVhostUserState { NetClientState nc; @@ -45,10 +48,23 @@ uint64_t vhost_user_get_acked_features(NetClientState *nc) return s->acked_features; } -static void vhost_user_stop(int queues, NetClientState *ncs[]) +void vhost_user_save_acked_features(NetClientState *nc) { NetVhostUserState *s; + + s = DO_UPCAST(NetVhostUserState, nc, nc); + if (s->vhost_net) { + uint64_t features = vhost_net_get_acked_features(s->vhost_net); + if (features) { + s->acked_features = features; + } + } +} + +static void vhost_user_stop(int queues, NetClientState *ncs[]) +{ int i; + NetVhostUserState *s; for (i = 0; i < queues; i++) { assert(ncs[i]->info->type == NET_CLIENT_DRIVER_VHOST_USER); @@ -56,11 +72,7 @@ static void vhost_user_stop(int queues, NetClientState *ncs[]) s = DO_UPCAST(NetVhostUserState, nc, ncs[i]); if (s->vhost_net) { - /* save acked features */ - uint64_t features = vhost_net_get_acked_features(s->vhost_net); - if (features) { - s->acked_features = features; - } + vhost_user_save_acked_features(ncs[i]); vhost_net_cleanup(s->vhost_net); } } @@ -251,11 +263,7 @@ static void chr_closed_bh(void *opaque) s = DO_UPCAST(NetVhostUserState, nc, ncs[0]); for (i = queues -1; i >= 0; i--) { - s = DO_UPCAST(NetVhostUserState, nc, ncs[i]); - - if (s->vhost_net) { - s->acked_features = vhost_net_get_acked_features(s->vhost_net); - } + vhost_user_save_acked_features(ncs[i]); } qmp_set_link(name, false, &err); @@ -287,6 +295,7 @@ static void net_vhost_user_event(void *opaque, QEMUChrEvent event) trace_vhost_user_event(chr->label, event); switch (event) { case CHR_EVENT_OPENED: + qemu_chr_set_reconnect_time(chr, VHOST_USER_RECONNECT_TIME); if (vhost_user_start(queues, ncs, s->vhost_user) < 0) { qemu_chr_fe_disconnect(&s->chr); return; @@ -366,6 +375,11 @@ static int net_vhost_user_init(NetClientState *peer, const char *device, qemu_chr_fe_set_handlers(&s->chr, NULL, NULL, net_vhost_user_event, NULL, nc0->name, NULL, true); + if (used_memslots_is_exceeded()) { + error_report("used memslots exceeded the backend limit, quit " + "loop"); + goto err; + } } while (!s->started); assert(s->vhost_net); diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c index 25dd6dd97547c66201f092040eba87fad66a5dff..a1b931ae2c34e16a5fe4e7a3582fc606cd5b336e 100644 --- a/net/vhost-vdpa.c +++ b/net/vhost-vdpa.c @@ -11,11 +11,13 @@ #include "qemu/osdep.h" #include "clients.h" +#include "hw/virtio/virtio-net.h" #include "net/vhost_net.h" #include "net/vhost-vdpa.h" #include "hw/virtio/vhost-vdpa.h" #include "qemu/config-file.h" #include "qemu/error-report.h" +#include "qemu/log.h" #include "qemu/option.h" #include "qapi/error.h" #include @@ -30,6 +32,13 @@ typedef struct VhostVDPAState { NetClientState nc; struct vhost_vdpa vhost_vdpa; VHostNetState *vhost_net; + + /* Control commands shadow buffers */ + void *cvq_cmd_out_buffer; + virtio_net_ctrl_ack *status; + + /* The device always have SVQ enabled */ + bool always_svq; bool started; } VhostVDPAState; @@ -55,7 +64,6 @@ const int vdpa_feature_bits[] = { VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_RX_EXTRA, VIRTIO_NET_F_CTRL_VLAN, - VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_CTRL_MAC_ADDR, VIRTIO_NET_F_RSS, VIRTIO_NET_F_MQ, @@ -64,11 +72,35 @@ const int vdpa_feature_bits[] = { VIRTIO_F_RING_PACKED, VIRTIO_NET_F_RSS, VIRTIO_NET_F_HASH_REPORT, - VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_STATUS, VHOST_INVALID_FEATURE_BIT }; +/** Supported device specific feature bits with SVQ */ +static const uint64_t vdpa_svq_device_features = + BIT_ULL(VIRTIO_NET_F_CSUM) | + BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) | + BIT_ULL(VIRTIO_NET_F_MTU) | + BIT_ULL(VIRTIO_NET_F_MAC) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) | + BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) | + BIT_ULL(VIRTIO_NET_F_GUEST_ECN) | + BIT_ULL(VIRTIO_NET_F_GUEST_UFO) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO4) | + BIT_ULL(VIRTIO_NET_F_HOST_TSO6) | + BIT_ULL(VIRTIO_NET_F_HOST_ECN) | + BIT_ULL(VIRTIO_NET_F_HOST_UFO) | + BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) | + BIT_ULL(VIRTIO_NET_F_STATUS) | + BIT_ULL(VIRTIO_NET_F_CTRL_VQ) | + BIT_ULL(VIRTIO_NET_F_MQ) | + BIT_ULL(VIRTIO_F_ANY_LAYOUT) | + BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) | + BIT_ULL(VIRTIO_NET_F_RSC_EXT) | + BIT_ULL(VIRTIO_NET_F_STANDBY); + +#define VHOST_VDPA_NET_CVQ_ASID 1 + VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); @@ -76,6 +108,23 @@ VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc) return s->vhost_net; } +static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp) +{ + uint64_t invalid_dev_features = + features & ~vdpa_svq_device_features & + /* Transport are all accepted at this point */ + ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START, + VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); + + if (invalid_dev_features) { + error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64, + invalid_dev_features); + return false; + } + + return vhost_svq_valid_features(features, errp); +} + static int vhost_vdpa_net_check_device_id(struct vhost_net *net) { uint32_t device_id; @@ -127,7 +176,22 @@ err_init: static void vhost_vdpa_cleanup(NetClientState *nc) { VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_dev *dev = &s->vhost_net->dev; + + /* + * If a peer NIC is attached, do not cleanup anything. + * Cleanup will happen as a part of qemu_cleanup() -> net_cleanup() + * when the guest is shutting down. + */ + if (nc->peer && nc->peer->info->type == NET_CLIENT_DRIVER_NIC) { + return; + } + qemu_vfree(s->cvq_cmd_out_buffer); + qemu_vfree(s->status); + if (dev->vq_index + dev->nvqs == dev->vq_index_end) { + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); + } if (s->vhost_net) { vhost_net_cleanup(s->vhost_net); g_free(s->vhost_net); @@ -174,7 +238,7 @@ static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc, static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf, size_t size) { - return 0; + return size; } static NetClientInfo net_vhost_vdpa_info = { @@ -187,13 +251,444 @@ static NetClientInfo net_vhost_vdpa_info = { .check_peer_type = vhost_vdpa_check_peer_type, }; +static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index) +{ + struct vhost_vring_state state = { + .index = vq_index, + }; + int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state); + + if (unlikely(r < 0)) { + error_report("Cannot get VQ %u group: %s", vq_index, + g_strerror(errno)); + return r; + } + + return state.num; +} + +static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v, + unsigned vq_group, + unsigned asid_num) +{ + struct vhost_vring_state asid = { + .index = vq_group, + .num = asid_num, + }; + int r; + + r = ioctl(v->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); + if (unlikely(r < 0)) { + error_report("Can't set vq group %u asid %u, errno=%d (%s)", + asid.index, asid.num, errno, g_strerror(errno)); + } + return r; +} + +static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr) +{ + VhostIOVATree *tree = v->iova_tree; + DMAMap needle = { + /* + * No need to specify size or to look for more translations since + * this contiguous chunk was allocated by us. + */ + .translated_addr = (hwaddr)(uintptr_t)addr, + }; + const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle); + int r; + + if (unlikely(!map)) { + error_report("Cannot locate expected map"); + return; + } + + r = vhost_vdpa_dma_unmap(v, v->address_space_id, map->iova, map->size + 1); + if (unlikely(r != 0)) { + error_report("Device cannot unmap: %s(%d)", g_strerror(r), r); + } + + vhost_iova_tree_remove(tree, *map); +} + +static size_t vhost_vdpa_net_cvq_cmd_len(void) +{ + /* + * MAC_TABLE_SET is the ctrl command that produces the longer out buffer. + * In buffer is always 1 byte, so it should fit here + */ + return sizeof(struct virtio_net_ctrl_hdr) + + 2 * sizeof(struct virtio_net_ctrl_mac) + + MAC_TABLE_ENTRIES * ETH_ALEN; +} + +static size_t vhost_vdpa_net_cvq_cmd_page_len(void) +{ + return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size); +} + +/** Map CVQ buffer. */ +static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size, + bool write) +{ + DMAMap map = {}; + int r; + + map.translated_addr = (hwaddr)(uintptr_t)buf; + map.size = size - 1; + map.perm = write ? IOMMU_RW : IOMMU_RO, + r = vhost_iova_tree_map_alloc(v->iova_tree, &map); + if (unlikely(r != IOVA_OK)) { + error_report("Cannot map injected element"); + return r; + } + + r = vhost_vdpa_dma_map(v, v->address_space_id, map.iova, + vhost_vdpa_net_cvq_cmd_page_len(), buf, !write); + if (unlikely(r < 0)) { + goto dma_map_err; + } + + return 0; + +dma_map_err: + vhost_iova_tree_remove(v->iova_tree, map); + return r; +} + +static int vhost_vdpa_net_cvq_start(NetClientState *nc) +{ + VhostVDPAState *s; + struct vhost_vdpa *v; + uint64_t backend_features; + int64_t cvq_group; + int cvq_index, r; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + s = DO_UPCAST(VhostVDPAState, nc, nc); + v = &s->vhost_vdpa; + + v->shadow_data = s->always_svq; + v->shadow_vqs_enabled = s->always_svq; + s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; + + if (s->always_svq) { + /* SVQ is already configured for all virtqueues */ + goto out; + } + + /* + * If we early return in these cases SVQ will not be enabled. The migration + * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. + * + * Calling VHOST_GET_BACKEND_FEATURES as they are not available in v->dev + * yet. + */ + r = ioctl(v->device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features); + if (unlikely(r < 0)) { + error_report("Cannot get vdpa backend_features: %s(%d)", + g_strerror(errno), errno); + return -1; + } + if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID)) || + !vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { + return 0; + } + + /* + * Check if all the virtqueues of the virtio device are in a different vq + * than the last vq. VQ group of last group passed in cvq_group. + */ + cvq_index = v->dev->vq_index_end - 1; + cvq_group = vhost_vdpa_get_vring_group(v->device_fd, cvq_index); + if (unlikely(cvq_group < 0)) { + return cvq_group; + } + for (int i = 0; i < cvq_index; ++i) { + int64_t group = vhost_vdpa_get_vring_group(v->device_fd, i); + + if (unlikely(group < 0)) { + return group; + } + + if (group == cvq_group) { + return 0; + } + } + + r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID); + if (unlikely(r < 0)) { + return r; + } + + v->iova_tree = vhost_iova_tree_new(v->iova_range.first, + v->iova_range.last); + v->shadow_vqs_enabled = true; + s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; + +out: + if (!s->vhost_vdpa.shadow_vqs_enabled) { + return 0; + } + + r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, + vhost_vdpa_net_cvq_cmd_page_len(), false); + if (unlikely(r < 0)) { + return r; + } + + r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, + vhost_vdpa_net_cvq_cmd_page_len(), true); + if (unlikely(r < 0)) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); + } + + return r; +} + +static void vhost_vdpa_net_cvq_stop(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (s->vhost_vdpa.shadow_vqs_enabled) { + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); + vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); + if (!s->always_svq) { + /* + * If only the CVQ is shadowed we can delete this safely. + * If all the VQs are shadows this will be needed by the time the + * device is started again to register SVQ vrings and similar. + */ + g_clear_pointer(&s->vhost_vdpa.iova_tree, vhost_iova_tree_delete); + } + } +} + +static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s, size_t out_len, + size_t in_len) +{ + /* Buffers for the device */ + const struct iovec out = { + .iov_base = s->cvq_cmd_out_buffer, + .iov_len = out_len, + }; + const struct iovec in = { + .iov_base = s->status, + .iov_len = sizeof(virtio_net_ctrl_ack), + }; + VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); + int r; + + r = vhost_svq_add(svq, &out, 1, &in, 1, NULL); + if (unlikely(r != 0)) { + if (unlikely(r == -ENOSPC)) { + qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n", + __func__); + } + return r; + } + + /* + * We can poll here since we've had BQL from the time we sent the + * descriptor. Also, we need to take the answer before SVQ pulls by itself, + * when BQL is released + */ + return vhost_svq_poll(svq); +} + +static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s, uint8_t class, + uint8_t cmd, const void *data, + size_t data_size) +{ + const struct virtio_net_ctrl_hdr ctrl = { + .class = class, + .cmd = cmd, + }; + + assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); + + memcpy(s->cvq_cmd_out_buffer, &ctrl, sizeof(ctrl)); + memcpy(s->cvq_cmd_out_buffer + sizeof(ctrl), data, data_size); + + return vhost_vdpa_net_cvq_add(s, sizeof(ctrl) + data_size, + sizeof(virtio_net_ctrl_ack)); +} + +static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n) +{ + uint64_t features = n->parent_obj.guest_features; + if (features & BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR)) { + ssize_t dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MAC, + VIRTIO_NET_CTRL_MAC_ADDR_SET, + n->mac, sizeof(n->mac)); + if (unlikely(dev_written < 0)) { + return dev_written; + } + if (*s->status != VIRTIO_NET_OK) { + return -EIO; + } + } + + return 0; +} + +static int vhost_vdpa_net_load_mq(VhostVDPAState *s, + const VirtIONet *n) +{ + struct virtio_net_ctrl_mq mq; + uint64_t features = n->parent_obj.guest_features; + ssize_t dev_written; + + if (!(features & BIT_ULL(VIRTIO_NET_F_MQ))) { + return 0; + } + + mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); + dev_written = vhost_vdpa_net_load_cmd(s, VIRTIO_NET_CTRL_MQ, + VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET, &mq, + sizeof(mq)); + if (unlikely(dev_written < 0)) { + return dev_written; + } + if (*s->status != VIRTIO_NET_OK) { + return -EIO; + } + + return 0; +} + +static int vhost_vdpa_net_load(NetClientState *nc) +{ + VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc); + struct vhost_vdpa *v = &s->vhost_vdpa; + const VirtIONet *n; + int r; + + assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); + + if (!v->shadow_vqs_enabled) { + return 0; + } + + n = VIRTIO_NET(v->dev->vdev); + r = vhost_vdpa_net_load_mac(s, n); + if (unlikely(r < 0)) { + return r; + } + r = vhost_vdpa_net_load_mq(s, n); + if (unlikely(r)) { + return r; + } + + return 0; +} + +static NetClientInfo net_vhost_vdpa_cvq_info = { + .type = NET_CLIENT_DRIVER_VHOST_VDPA, + .size = sizeof(VhostVDPAState), + .receive = vhost_vdpa_receive, + .start = vhost_vdpa_net_cvq_start, + .load = vhost_vdpa_net_load, + .stop = vhost_vdpa_net_cvq_stop, + .cleanup = vhost_vdpa_cleanup, + .has_vnet_hdr = vhost_vdpa_has_vnet_hdr, + .has_ufo = vhost_vdpa_has_ufo, + .check_peer_type = vhost_vdpa_check_peer_type, +}; + +/** + * Validate and copy control virtqueue commands. + * + * Following QEMU guidelines, we offer a copy of the buffers to the device to + * prevent TOCTOU bugs. + */ +static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq, + VirtQueueElement *elem, + void *opaque) +{ + VhostVDPAState *s = opaque; + size_t in_len; + virtio_net_ctrl_ack status = VIRTIO_NET_ERR; + /* Out buffer sent to both the vdpa device and the device model */ + struct iovec out = { + .iov_base = s->cvq_cmd_out_buffer, + }; + /* in buffer used for device model */ + const struct iovec in = { + .iov_base = &status, + .iov_len = sizeof(status), + }; + ssize_t dev_written = -EINVAL; + + out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, + s->cvq_cmd_out_buffer, + vhost_vdpa_net_cvq_cmd_len()); + if (*(uint8_t *)s->cvq_cmd_out_buffer == VIRTIO_NET_CTRL_ANNOUNCE) { + /* + * Guest announce capability is emulated by qemu, so don't forward to + * the device. + */ + dev_written = sizeof(status); + *s->status = VIRTIO_NET_OK; + } else { + dev_written = vhost_vdpa_net_cvq_add(s, out.iov_len, sizeof(status)); + if (unlikely(dev_written < 0)) { + goto out; + } + } + + if (unlikely(dev_written < sizeof(status))) { + error_report("Insufficient written data (%zu)", dev_written); + goto out; + } + + if (*s->status != VIRTIO_NET_OK) { + goto out; + } + + status = VIRTIO_NET_ERR; + virtio_net_handle_ctrl_iov(svq->vdev, &in, 1, &out, 1); + if (status != VIRTIO_NET_OK) { + error_report("Bad CVQ processing in model"); + } + +out: + in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, + sizeof(status)); + if (unlikely(in_len < sizeof(status))) { + error_report("Bad device CVQ written length"); + } + vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status))); + /* + * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when + * the function successfully forwards the CVQ command, indicated + * by a non-negative value of `dev_written`. Otherwise, it still + * belongs to SVQ. + * This function should only free the `elem` when it owns. + */ + if (dev_written >= 0) { + g_free(elem); + } + return dev_written < 0 ? dev_written : 0; +} + +static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = { + .avail_handler = vhost_vdpa_net_handle_ctrl_avail, +}; + static NetClientState *net_vhost_vdpa_init(NetClientState *peer, - const char *device, - const char *name, - int vdpa_device_fd, - int queue_pair_index, - int nvqs, - bool is_datapath) + const char *device, + const char *name, + int vdpa_device_fd, + int queue_pair_index, + int nvqs, + bool is_datapath, + bool svq, + struct vhost_vdpa_iova_range iova_range, + VhostIOVATree *iova_tree) { NetClientState *nc = NULL; VhostVDPAState *s; @@ -203,7 +698,7 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device, name); } else { - nc = qemu_new_net_control_client(&net_vhost_vdpa_info, peer, + nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer, device, name); } snprintf(nc->info_str, sizeof(nc->info_str), TYPE_VHOST_VDPA); @@ -211,6 +706,34 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, s->vhost_vdpa.device_fd = vdpa_device_fd; s->vhost_vdpa.index = queue_pair_index; + s->always_svq = svq; + s->vhost_vdpa.shadow_vqs_enabled = svq; + s->vhost_vdpa.iova_range = iova_range; + s->vhost_vdpa.shadow_data = svq; + s->vhost_vdpa.iova_tree = iova_tree; + if (!is_datapath) { + s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size, + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len()); + s->status = qemu_memalign(qemu_real_host_page_size, + vhost_vdpa_net_cvq_cmd_page_len()); + memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len()); + + s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; + s->vhost_vdpa.shadow_vq_ops_opaque = s; + + /* + * TODO: We cannot migrate devices with CVQ and no x-svq enabled as + * there is no way to set the device state (MAC, MQ, etc) before + * starting the datapath. + * + * Migration blocker ownership now belongs to s->vhost_vdpa. + */ + if (!svq) { + error_setg(&s->vhost_vdpa.migration_blocker, + "net vdpa cannot migrate with CVQ feature"); + } + } ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); if (ret) { qemu_del_net_client(nc); @@ -219,20 +742,24 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer, return nc; } -static int vhost_vdpa_get_max_queue_pairs(int fd, int *has_cvq, Error **errp) +static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp) +{ + int ret = ioctl(fd, VHOST_GET_FEATURES, features); + if (unlikely(ret < 0)) { + error_setg_errno(errp, errno, + "Fail to query features from vhost-vDPA device"); + } + return ret; +} + +static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features, + int *has_cvq, Error **errp) { unsigned long config_size = offsetof(struct vhost_vdpa_config, buf); g_autofree struct vhost_vdpa_config *config = NULL; __virtio16 *max_queue_pairs; - uint64_t features; int ret; - ret = ioctl(fd, VHOST_GET_FEATURES, &features); - if (ret) { - error_setg(errp, "Fail to query features from vhost-vDPA device"); - return ret; - } - if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) { *has_cvq = 1; } else { @@ -262,54 +789,100 @@ int net_init_vhost_vdpa(const Netdev *netdev, const char *name, NetClientState *peer, Error **errp) { const NetdevVhostVDPAOptions *opts; + uint64_t features; int vdpa_device_fd; - NetClientState **ncs, *nc; - int queue_pairs, i, has_cvq = 0; + g_autofree NetClientState **ncs = NULL; + g_autoptr(VhostIOVATree) iova_tree = NULL; + struct vhost_vdpa_iova_range iova_range; + NetClientState *nc; + int queue_pairs, r, i = 0, has_cvq = 0; assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); opts = &netdev->u.vhost_vdpa; - if (!opts->vhostdev) { - error_setg(errp, "vdpa character device not specified with vhostdev"); + if (!opts->has_vhostdev && !opts->has_vhostfd) { + error_setg(errp, + "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); return -1; } - vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); - if (vdpa_device_fd == -1) { - return -errno; + if (opts->has_vhostdev && opts->has_vhostfd) { + error_setg(errp, + "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); + return -1; + } + + if (opts->has_vhostdev) { + vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); + if (vdpa_device_fd == -1) { + return -errno; + } + } else { + /* has_vhostfd */ + vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); + if (vdpa_device_fd == -1) { + error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); + return -1; + } + } + + r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp); + if (unlikely(r < 0)) { + goto err; } - queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, + queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features, &has_cvq, errp); if (queue_pairs < 0) { qemu_close(vdpa_device_fd); return queue_pairs; } + r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range); + if (unlikely(r < 0)) { + error_setg(errp, "vhost-vdpa: get iova range failed: %s", + strerror(-r)); + goto err; + } + + if (opts->x_svq) { + if (!vhost_vdpa_net_valid_svq_features(features, errp)) { + goto err_svq; + } + + iova_tree = vhost_iova_tree_new(iova_range.first, iova_range.last); + } + ncs = g_malloc0(sizeof(*ncs) * queue_pairs); for (i = 0; i < queue_pairs; i++) { ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 2, true); + vdpa_device_fd, i, 2, true, opts->x_svq, + iova_range, iova_tree); if (!ncs[i]) goto err; } if (has_cvq) { nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name, - vdpa_device_fd, i, 1, false); + vdpa_device_fd, i, 1, false, + opts->x_svq, iova_range, iova_tree); if (!nc) goto err; } - g_free(ncs); + /* iova_tree ownership belongs to last NetClientState */ + g_steal_pointer(&iova_tree); return 0; err: if (i) { - qemu_del_net_client(ncs[0]); + for (i--; i >= 0; i--) { + qemu_del_net_client(ncs[i]); + } } + +err_svq: qemu_close(vdpa_device_fd); - g_free(ncs); return -1; } diff --git a/os-posix.c b/os-posix.c index ae6c9f2a5e983772745067ea9db74ac44b93f9ac..306c442bc87c6ab87ba5fcbfbae00d76650aac71 100644 --- a/os-posix.c +++ b/os-posix.c @@ -322,6 +322,7 @@ int os_mlock(void) #ifdef HAVE_MLOCKALL int ret = 0; + qemu_log("do mlockall\n"); ret = mlockall(MCL_CURRENT | MCL_FUTURE); if (ret < 0) { error_report("mlockall: %s", strerror(errno)); diff --git a/pc-bios/core3-hmcode b/pc-bios/core3-hmcode new file mode 100644 index 0000000000000000000000000000000000000000..a488707bba0d2db7762636a8e2c112bf69189376 Binary files /dev/null and b/pc-bios/core3-hmcode differ diff --git a/pc-bios/core3-reset b/pc-bios/core3-reset new file mode 100755 index 0000000000000000000000000000000000000000..5880780b3663f402d99b6fab68ed461c32f58566 Binary files /dev/null and b/pc-bios/core3-reset differ diff --git a/pc-bios/core4-hmcode b/pc-bios/core4-hmcode new file mode 100755 index 0000000000000000000000000000000000000000..668a9e97b3a39e9bd44273860d1f35505025ec71 Binary files /dev/null and b/pc-bios/core4-hmcode differ diff --git a/pc-bios/keymaps/meson.build b/pc-bios/keymaps/meson.build index 44247a12b54a365177ebc8edd94b16e8fe42f196..6e025cddf2da591a17f9e2fbeb1e4dced827a417 100644 --- a/pc-bios/keymaps/meson.build +++ b/pc-bios/keymaps/meson.build @@ -1,5 +1,5 @@ keymaps = { - 'ar': '-l ar', + 'ar': '-l ara', 'bepo': '-l fr -v dvorak', 'cz': '-l cz', 'da': '-l dk', diff --git a/pc-bios/loongarch_bios.bin b/pc-bios/loongarch_bios.bin new file mode 100755 index 0000000000000000000000000000000000000000..bba1c940a0502a2ae10adaf189e946cd3a459b61 Binary files /dev/null and b/pc-bios/loongarch_bios.bin differ diff --git a/pc-bios/loongarch_vars.bin b/pc-bios/loongarch_vars.bin new file mode 100755 index 0000000000000000000000000000000000000000..65bdb77af90b92dc268c0c5c70c054dee71599f4 Binary files /dev/null and b/pc-bios/loongarch_vars.bin differ diff --git a/pc-bios/meson.build b/pc-bios/meson.build index b40ff3f2bd395f6ef79f49ba4ac85a566ac7307d..f2a1d111a1518b32ff26abd45e90cc9161f3f0bf 100644 --- a/pc-bios/meson.build +++ b/pc-bios/meson.build @@ -38,6 +38,9 @@ blobs = files( 'vgabios-ramfb.bin', 'vgabios-bochs-display.bin', 'vgabios-ati.bin', + 'uefi-bios-sw', + 'core3-reset', + 'core3-hmcode', 'openbios-sparc32', 'openbios-sparc64', 'openbios-ppc', @@ -83,6 +86,8 @@ blobs = files( 'opensbi-riscv32-generic-fw_dynamic.elf', 'opensbi-riscv64-generic-fw_dynamic.elf', 'npcm7xx_bootrom.bin', + 'loongarch_bios.bin', + 'loongarch_vars.bin', ) if get_option('install_blobs') diff --git a/pc-bios/s390-ccw/cio.h b/pc-bios/s390-ccw/cio.h index 1e5d4e92e12c418289d27c160c7c0d8e8b2e7c07..88a88adfd2fdfba2dfa630d451c71bb33ec78ac4 100644 --- a/pc-bios/s390-ccw/cio.h +++ b/pc-bios/s390-ccw/cio.h @@ -20,7 +20,7 @@ struct pmcw { __u32 intparm; /* interruption parameter */ __u32 qf:1; /* qdio facility */ __u32 w:1; - __u32 isc:3; /* interruption sublass */ + __u32 isc:3; /* interruption subclass */ __u32 res5:3; /* reserved zeros */ __u32 ena:1; /* enabled */ __u32 lm:2; /* limit mode */ diff --git a/pc-bios/s390-ccw/iplb.h b/pc-bios/s390-ccw/iplb.h index 772d5c57c94ecc4bffb4b65f0e1ec5c74febfddc..cb6ac8a880a08c27ce5bd3d8c9fb4a0159e0a875 100644 --- a/pc-bios/s390-ccw/iplb.h +++ b/pc-bios/s390-ccw/iplb.h @@ -81,7 +81,7 @@ extern IplParameterBlock iplb __attribute__((__aligned__(PAGE_SIZE))); #define QIPL_FLAG_BM_OPTS_ZIPL 0x40 /* - * This definition must be kept in sync with the defininition + * This definition must be kept in sync with the definition * in hw/s390x/ipl.h */ struct QemuIplParameters { diff --git a/pc-bios/s390-ccw/start.S b/pc-bios/s390-ccw/start.S index 4d5ad21653d0992d4e65847258167d7aa419d46d..6072906df48c635f8a8db346f609485e34ecdbe0 100644 --- a/pc-bios/s390-ccw/start.S +++ b/pc-bios/s390-ccw/start.S @@ -19,7 +19,7 @@ _start: larl %r2, __bss_start larl %r3, _end slgr %r3, %r2 /* get sizeof bss */ - ltgr %r3,%r3 /* bss emtpy? */ + ltgr %r3,%r3 /* bss empty? */ jz done aghi %r3,-1 srlg %r4,%r3,8 /* how many 256 byte chunks? */ diff --git a/pc-bios/s390-ccw/virtio-blkdev.c b/pc-bios/s390-ccw/virtio-blkdev.c index 7d35050292dea3befb9a14c88af417252cf81d71..263a84d391b425e5ae6070eeeb8148504c683aa6 100644 --- a/pc-bios/s390-ccw/virtio-blkdev.c +++ b/pc-bios/s390-ccw/virtio-blkdev.c @@ -214,7 +214,7 @@ int virtio_get_block_size(void) switch (vdev->senseid.cu_model) { case VIRTIO_ID_BLOCK: - return vdev->config.blk.blk_size << vdev->config.blk.physical_block_exp; + return vdev->config.blk.blk_size; case VIRTIO_ID_SCSI: return vdev->scsi_block_size; } diff --git a/pc-bios/uefi-bios-sw b/pc-bios/uefi-bios-sw new file mode 100755 index 0000000000000000000000000000000000000000..8be24e6d73dac711276eb8bcc66625d689ce1e3f Binary files /dev/null and b/pc-bios/uefi-bios-sw differ diff --git a/plugins/core.c b/plugins/core.c index 792262da0839e1288b061112685d52f22a5a3617..e935e3c0c97d72eb941bf51131288ae87d6839c0 100644 --- a/plugins/core.c +++ b/plugins/core.c @@ -500,10 +500,18 @@ void qemu_plugin_user_exit(void) enum qemu_plugin_event ev; CPUState *cpu; - QEMU_LOCK_GUARD(&plugin.lock); + /* + * Locking order: we must acquire locks in an order that is consistent + * with the one in fork_start(). That is: + * - start_exclusive(), which acquires qemu_cpu_list_lock, + * must be called before acquiring plugin.lock. + * - tb_flush(), which acquires mmap_lock(), must be called + * while plugin.lock is not held. + */ start_exclusive(); + qemu_rec_mutex_lock(&plugin.lock); /* un-register all callbacks except the final AT_EXIT one */ for (ev = 0; ev < QEMU_PLUGIN_EV_MAX; ev++) { if (ev != QEMU_PLUGIN_EV_ATEXIT) { @@ -514,12 +522,12 @@ void qemu_plugin_user_exit(void) } } - tb_flush(current_cpu); - CPU_FOREACH(cpu) { qemu_plugin_disable_mem_helpers(cpu); } + qemu_rec_mutex_unlock(&plugin.lock); + tb_flush(current_cpu); end_exclusive(); /* now it's safe to handle the exit case */ diff --git a/python/qemu/aqmp/aqmp_tui.py b/python/qemu/aqmp/aqmp_tui.py index a2929f771cfb8fa6041661c96ce3ed7f67e8a25e..f1e926dd7565dd10636e5c34beaa15168c39dc60 100644 --- a/python/qemu/aqmp/aqmp_tui.py +++ b/python/qemu/aqmp/aqmp_tui.py @@ -35,7 +35,8 @@ import urwid import urwid_readline -from ..qmp import QEMUMonitorProtocol, QMPBadPortError +from qemu.qmp import QEMUMonitorProtocol, QMPBadPortError + from .error import ProtocolError from .message import DeserializationError, Message, UnexpectedTypeError from .protocol import ConnectError, Runstate diff --git a/python/qemu/aqmp/protocol.py b/python/qemu/aqmp/protocol.py index 5190b33b13df24fc2ca4aed934ed1f184a4888e1..c4fbe35a0e41c589059ec4fa37a816f4b76a3bd8 100644 --- a/python/qemu/aqmp/protocol.py +++ b/python/qemu/aqmp/protocol.py @@ -43,8 +43,8 @@ T = TypeVar('T') +_U = TypeVar('_U') _TaskFN = Callable[[], Awaitable[None]] # aka ``async def func() -> None`` -_FutureT = TypeVar('_FutureT', bound=Optional['asyncio.Future[Any]']) class Runstate(Enum): @@ -591,7 +591,8 @@ def _cleanup(self) -> None: """ Fully reset this object to a clean state and return to `IDLE`. """ - def _paranoid_task_erase(task: _FutureT) -> Optional[_FutureT]: + def _paranoid_task_erase(task: Optional['asyncio.Future[_U]'] + ) -> Optional['asyncio.Future[_U]']: # Help to erase a task, ENSURING it is fully quiesced first. assert (task is None) or task.done() return None if (task and task.done()) else task diff --git a/python/qemu/qmp/qom_common.py b/python/qemu/qmp/qom_common.py index a59ae1a2a1883cb4d89b0e44507c5001f44357a0..2e4c741f776240957b8a59c1a316d53e9a8ca985 100644 --- a/python/qemu/qmp/qom_common.py +++ b/python/qemu/qmp/qom_common.py @@ -30,10 +30,6 @@ from . import QEMUMonitorProtocol, QMPError -# The following is needed only for a type alias. -Subparsers = argparse._SubParsersAction # pylint: disable=protected-access - - class ObjectPropertyInfo: """ Represents the return type from e.g. qom-list. @@ -89,7 +85,7 @@ def __init__(self, args: argparse.Namespace): self.qmp.connect() @classmethod - def register(cls, subparsers: Subparsers) -> None: + def register(cls, subparsers: Any) -> None: """ Register this command with the argument parser. diff --git a/qapi/block-core.json b/qapi/block-core.json index 1d3dd9cb48edd36aa238002ae7a8624a58d45c33..618e417135776fe0c4d693d90ae6d935a0953ec0 100644 --- a/qapi/block-core.json +++ b/qapi/block-core.json @@ -1146,7 +1146,7 @@ # Since: 1.3 ## { 'enum': 'BlockdevOnError', - 'data': ['report', 'ignore', 'enospc', 'stop', 'auto'] } + 'data': ['report', 'ignore', 'enospc', 'stop', 'auto', 'retry'] } ## # @MirrorSyncMode: @@ -4096,6 +4096,12 @@ # future requests before a successful reconnect will # immediately fail. Default 0 (Since 4.2) # +# @open-timeout: In seconds. If zero, the nbd driver tries the connection +# only once, and fails to open if the connection fails. +# If non-zero, the nbd driver will repeat connection attempts +# until successful or until @open-timeout seconds have elapsed. +# Default 0 (Since 7.0) +# # Features: # @unstable: Member @x-dirty-bitmap is experimental. # @@ -4106,7 +4112,8 @@ '*export': 'str', '*tls-creds': 'str', '*x-dirty-bitmap': { 'type': 'str', 'features': [ 'unstable' ] }, - '*reconnect-delay': 'uint32' } } + '*reconnect-delay': 'uint32', + '*open-timeout': 'uint32' } } ## # @BlockdevOptionsRaw: @@ -4437,6 +4444,8 @@ # @nocow: Turn off copy-on-write (valid only on btrfs; default: off) # @extent-size-hint: Extent size hint to add to the image file; 0 for not # adding an extent size hint (default: 1 MB, since 5.1) +# @cache: Cache mode used to write the output disk image +# @buffersize: Buffer size for creating image # # Since: 2.12 ## @@ -4445,7 +4454,9 @@ 'size': 'size', '*preallocation': 'PreallocMode', '*nocow': 'bool', - '*extent-size-hint': 'size'} } + '*extent-size-hint': 'size', + '*cache': 'str', + '*buffersize': 'size'} } ## # @BlockdevCreateOptionsGluster: @@ -4952,7 +4963,7 @@ # Since: 2.1 ## { 'enum': 'BlockErrorAction', - 'data': [ 'ignore', 'report', 'stop' ] } + 'data': [ 'ignore', 'report', 'stop', 'retry' ] } ## diff --git a/qapi/block-export.json b/qapi/block-export.json index c1b92ce1c1c8e804eaade25896095f6d696c218d..181d7238feefd5ad12831e5b52f1dcc9a4610091 100644 --- a/qapi/block-export.json +++ b/qapi/block-export.json @@ -21,7 +21,7 @@ # recreated on the fly while the NBD server is active. # If missing, it will default to denying access (since 4.0). # @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. (since 5.2; default: 0) +# time, 0 for unlimited. (since 5.2; default: 100) # # Since: 4.2 ## @@ -50,7 +50,7 @@ # recreated on the fly while the NBD server is active. # If missing, it will default to denying access (since 4.0). # @max-connections: The maximum number of connections to allow at the same -# time, 0 for unlimited. (since 5.2; default: 0) +# time, 0 for unlimited. (since 5.2; default: 100) # # Returns: error if the server is already running. # diff --git a/qapi/block.json b/qapi/block.json index 82fcf2c914c83d8e2c7d55e6b7b828ae7114cb83..71136db77712759745b664cafb3160a789b1fe62 100644 --- a/qapi/block.json +++ b/qapi/block.json @@ -529,7 +529,8 @@ # Since: 4.0 # # Example: -# set new histograms for all io types with intervals + +# Set new histograms for all io types with intervals # [0, 10), [10, 50), [50, 100), [100, +inf): # # -> { "execute": "block-latency-histogram-set", @@ -538,7 +539,8 @@ # <- { "return": {} } # # Example: -# set new histogram only for write, other histograms will remain + +# Set new histogram only for write, other histograms will remain # not changed (or not created): # # -> { "execute": "block-latency-histogram-set", @@ -547,7 +549,8 @@ # <- { "return": {} } # # Example: -# set new histograms with the following intervals: + +# Set new histograms with the following intervals: # read, flush: [0, 10), [10, 50), [50, 100), [100, +inf) # write: [0, 1000), [1000, 5000), [5000, +inf) # @@ -558,7 +561,8 @@ # <- { "return": {} } # # Example: -# remove all latency histograms: + +# Remove all latency histograms: # # -> { "execute": "block-latency-histogram-set", # "arguments": { "id": "drive0" } } diff --git a/qapi/crypto.json b/qapi/crypto.json index 1ec54c15ca5f148c62d08d5c650af205415bf4ec..60e706ae099100c77705d1c892adc1aea8e55fee 100644 --- a/qapi/crypto.json +++ b/qapi/crypto.json @@ -75,6 +75,8 @@ # @twofish-128: Twofish with 128 bit / 16 byte keys # @twofish-192: Twofish with 192 bit / 24 byte keys # @twofish-256: Twofish with 256 bit / 32 byte keys +# @sm4: SM4 with 128 bit / 16 byte keys (since 9.0) +# # Since: 2.6 ## { 'enum': 'QCryptoCipherAlgorithm', @@ -83,7 +85,8 @@ 'des', '3des', 'cast5-128', 'serpent-128', 'serpent-192', 'serpent-256', - 'twofish-128', 'twofish-192', 'twofish-256']} + 'twofish-128', 'twofish-192', 'twofish-256', + 'sm4']} ## diff --git a/qapi/machine-target.json b/qapi/machine-target.json index f5ec4bc172b0dc48f73d8452945404cec3d77ae5..682dc86b427c02ddba0387cb84529d0ffc8a324d 100644 --- a/qapi/machine-target.json +++ b/qapi/machine-target.json @@ -324,7 +324,8 @@ 'TARGET_ARM', 'TARGET_I386', 'TARGET_S390X', - 'TARGET_MIPS' ] } } + 'TARGET_MIPS', + 'TARGET_LOONGARCH64' ] } } ## # @query-cpu-definitions: @@ -340,4 +341,5 @@ 'TARGET_ARM', 'TARGET_I386', 'TARGET_S390X', - 'TARGET_MIPS' ] } } + 'TARGET_MIPS', + 'TARGET_LOONGARCH64' ] } } diff --git a/qapi/machine.json b/qapi/machine.json index 067e3f53787928d38566e1e26e1c94ec1481ebf0..c12fa1e39930397180f50a1e2caf35eadf2905a5 100644 --- a/qapi/machine.json +++ b/qapi/machine.json @@ -29,12 +29,12 @@ # Since: 3.0 ## { 'enum' : 'SysEmuTarget', - 'data' : [ 'aarch64', 'alpha', 'arm', 'avr', 'cris', 'hppa', 'i386', + 'data' : [ 'aarch64', 'alpha', 'sw64', 'arm', 'avr', 'cris', 'hppa', 'i386', 'm68k', 'microblaze', 'microblazeel', 'mips', 'mips64', 'mips64el', 'mipsel', 'nios2', 'or1k', 'ppc', 'ppc64', 'riscv32', 'riscv64', 'rx', 's390x', 'sh4', 'sh4eb', 'sparc', 'sparc64', 'tricore', - 'x86_64', 'xtensa', 'xtensaeb' ] } + 'x86_64', 'xtensa', 'xtensaeb', 'loongarch64' ] } ## # @CpuS390State: @@ -867,7 +867,8 @@ # # @node-id: NUMA node ID the CPU belongs to # @socket-id: socket number within node/board the CPU belongs to -# @die-id: die number within node/board the CPU belongs to (Since 4.1) +# @die-id: die number within socket the CPU belongs to (since 4.1) +# @cluster-id: cluster number within die the CPU belongs to (since 6.2) # @core-id: core number within die the CPU belongs to # @thread-id: thread number within core the CPU belongs to # @@ -883,6 +884,7 @@ 'data': { '*node-id': 'int', '*socket-id': 'int', '*die-id': 'int', + '*cluster-id': 'int', '*core-id': 'int', '*thread-id': 'int' } @@ -1207,12 +1209,15 @@ # # @memdev: memory backend linked with device # +# @node: the numa node (Since: 7.0) +# # Since: 6.2 ## { 'struct': 'SgxEPCDeviceInfo', 'data': { '*id': 'str', 'memaddr': 'size', 'size': 'size', + 'node': 'int', 'memdev': 'str' } } @@ -1285,10 +1290,15 @@ # # @memdev: memory backend linked with device # +# @node: the numa node (Since: 7.0) +# # Since: 6.2 ## { 'struct': 'SgxEPC', - 'data': { 'memdev': 'str' } } + 'data': { 'memdev': 'str', + 'node': 'int' + } +} ## # @SgxEPCProperties: @@ -1396,7 +1406,9 @@ # # @dies: number of dies per socket in the CPU topology # -# @cores: number of cores per die in the CPU topology +# @clusters: number of clusters per die in the CPU topology (since 7.0) +# +# @cores: number of cores per cluster in the CPU topology # # @threads: number of threads per core in the CPU topology # @@ -1408,6 +1420,7 @@ '*cpus': 'int', '*sockets': 'int', '*dies': 'int', + '*clusters': 'int', '*cores': 'int', '*threads': 'int', '*maxcpus': 'int' } } @@ -1557,3 +1570,52 @@ { 'command': 'x-query-usb', 'returns': 'HumanReadableText', 'features': [ 'unstable' ] } + +## +# @CacheLevelAndType: +# +# Caches a system may have. The enumeration value here is the +# combination of cache level and cache type. +# +# @l1d: L1 data cache. +# +# @l1i: L1 instruction cache. +# +# @l1: L1 (unified) cache. +# +# @l2: L2 (unified) cache. +# +# @l3: L3 (unified) cache +# +# Since: 6.2 +## +{ 'enum': 'CacheLevelAndType', + 'data': [ 'l1d', 'l1i', 'l1', 'l2', 'l3' ] } + +## +# @SmpCacheProperties: +# +# Cache information for SMP system. +# +# @cache: Cache name, which is the combination of cache level and cache type. +# +# @size: Cache size in units of Byte. +# +# Since: 6.2 +## +{ 'struct': 'SmpCacheProperties', + 'data': { + 'cache': 'CacheLevelAndType', + 'size': 'uint64' } } + +## +# @SmpCachePropertiesWrapper: +# +# List wrapper of SmpCacheProperties. +# +# @caches: the list of SmpCacheProperties. +# +# Since 6.2 +## +{ 'struct': 'SmpCachePropertiesWrapper', + 'data': { 'caches': ['SmpCacheProperties'] } } diff --git a/qapi/migration.json b/qapi/migration.json index bbfd48cf0b170f8400fa33e8261a6d18cb41dc82..a36a4e943b514bd5da8fd5d847872839e0c2db78 100644 --- a/qapi/migration.json +++ b/qapi/migration.json @@ -228,6 +228,18 @@ # Present and non-empty when migration is blocked. # (since 6.0) # +# @dirty-limit-throttle-time-per-round: Maximum throttle time (in microseconds) of virtual +# CPUs each dirty ring full round, which shows how +# MigrationCapability dirty-limit affects the guest +# during live migration. (since 8.1) +# +# @dirty-limit-ring-full-time: Estimated average dirty ring full time (in microseconds) +# each dirty ring full round, note that the value equals +# dirty ring memory size divided by average dirty page rate +# of virtual CPU, which can be used to observe the average +# memory load of virtual CPU indirectly. Note that zero +# means guest doesn't dirty memory (since 8.1) +# # Since: 0.14 ## { 'struct': 'MigrationInfo', @@ -245,7 +257,9 @@ '*postcopy-blocktime' : 'uint32', '*postcopy-vcpu-blocktime': ['uint32'], '*compression': 'CompressionStats', - '*socket-address': ['SocketAddress'] } } + '*socket-address': ['SocketAddress'], + '*dirty-limit-throttle-time-per-round': 'uint64', + '*dirty-limit-ring-full-time': 'uint64'} } ## # @query-migrate: @@ -452,6 +466,16 @@ # procedure starts. The VM RAM is saved with running VM. # (since 6.0) # +# @dirty-limit: If enabled, migration will use the dirty-limit algo to +# throttle down guest instead of auto-converge algo. +# Throttle algo only works when vCPU's dirtyrate greater +# than 'vcpu-dirty-limit', read processes in guest os +# aren't penalized any more, so this algo can improve +# performance of vCPU during live migration. This is an +# optional performance feature and should not affect the +# correctness of the existing auto-converge algo. +# (since 8.1) +# # Features: # @unstable: Members @x-colo and @x-ignore-shared are experimental. # @@ -465,7 +489,8 @@ 'block', 'return-path', 'pause-before-switchover', 'multifd', 'dirty-bitmaps', 'postcopy-blocktime', 'late-block-activate', { 'name': 'x-ignore-shared', 'features': [ 'unstable' ] }, - 'validate-uuid', 'background-snapshot'] } + 'validate-uuid', 'background-snapshot', + 'dirty-limit'] } ## # @MigrationCapabilityStatus: @@ -596,6 +621,20 @@ 'bitmaps': [ 'BitmapMigrationBitmapAlias' ] } } +## +# @CompressMethod: +# +# An enumeration of multi-thread compression methods. +# +# @zlib: use zlib compression method. +# @zstd: use zstd compression method. +# +# Since: 5.0 +# +## +{ 'enum': 'CompressMethod', + 'data': [ 'zlib', { 'name': 'zstd', 'if': 'CONFIG_ZSTD' } ] } + ## # @MigrationParameter: # @@ -632,6 +671,9 @@ # compression, so set the decompress-threads to the number about 1/4 # of compress-threads is adequate. # +# @compress-method: Which multi-thread compression method to use. +# Defaults to none. (Since 5.0) +# # @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period # to trigger throttling. It is expressed as percentage. # The default value is 50. (Since 5.0) @@ -749,16 +791,23 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # -# Features: -# @unstable: Member @x-checkpoint-delay is experimental. +# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during +# live migration. Should be in the range 1 to 1000ms, +# defaults to 1000ms. (Since 8.1) # +# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. +# Defaults to 1. (Since 8.1) +# +# Features: +# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period +# are experimental. # Since: 2.4 ## { 'enum': 'MigrationParameter', 'data': ['announce-initial', 'announce-max', 'announce-rounds', 'announce-step', 'compress-level', 'compress-threads', 'decompress-threads', - 'compress-wait-thread', 'throttle-trigger-threshold', + 'compress-wait-thread', 'compress-method', 'throttle-trigger-threshold', 'cpu-throttle-initial', 'cpu-throttle-increment', 'cpu-throttle-tailslow', 'tls-creds', 'tls-hostname', 'tls-authz', 'max-bandwidth', @@ -768,8 +817,10 @@ 'multifd-channels', 'xbzrle-cache-size', 'max-postcopy-bandwidth', 'max-cpu-throttle', 'multifd-compression', - 'multifd-zlib-level' ,'multifd-zstd-level', - 'block-bitmap-mapping' ] } + 'multifd-zlib-level', 'multifd-zstd-level', + 'block-bitmap-mapping', + { 'name': 'x-vcpu-dirty-limit-period', 'features': ['unstable'] }, + 'vcpu-dirty-limit'] } ## # @MigrateSetParameters: @@ -797,6 +848,9 @@ # # @decompress-threads: decompression thread count # +# @compress-method: Set compression method to use in multi-thread compression. +# Defaults to none. (Since 5.0) +# # @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period # to trigger throttling. It is expressed as percentage. # The default value is 50. (Since 5.0) @@ -914,8 +968,16 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during +# live migration. Should be in the range 1 to 1000ms, +# defaults to 1000ms. (Since 8.1) +# +# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. +# Defaults to 1. (Since 8.1) +# # Features: -# @unstable: Member @x-checkpoint-delay is experimental. +# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period +# are experimental. # # Since: 2.4 ## @@ -930,6 +992,7 @@ '*compress-threads': 'uint8', '*compress-wait-thread': 'bool', '*decompress-threads': 'uint8', + '*compress-method': 'CompressMethod', '*throttle-trigger-threshold': 'uint8', '*cpu-throttle-initial': 'uint8', '*cpu-throttle-increment': 'uint8', @@ -949,7 +1012,10 @@ '*multifd-compression': 'MultiFDCompression', '*multifd-zlib-level': 'uint8', '*multifd-zstd-level': 'uint8', - '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } + '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], + '*x-vcpu-dirty-limit-period': { 'type': 'uint64', + 'features': [ 'unstable' ] }, + '*vcpu-dirty-limit': 'uint64'} } ## # @migrate-set-parameters: @@ -995,6 +1061,9 @@ # # @decompress-threads: decompression thread count # +# @compress-method: Which multi-thread compression method to use. +# Defaults to none. (Since 5.0) +# # @throttle-trigger-threshold: The ratio of bytes_dirty_period and bytes_xfer_period # to trigger throttling. It is expressed as percentage. # The default value is 50. (Since 5.0) @@ -1114,8 +1183,16 @@ # block device name if there is one, and to their node name # otherwise. (Since 5.2) # +# @x-vcpu-dirty-limit-period: Periodic time (in milliseconds) of dirty limit during +# live migration. Should be in the range 1 to 1000ms, +# defaults to 1000ms. (Since 8.1) +# +# @vcpu-dirty-limit: Dirtyrate limit (MB/s) during live migration. +# Defaults to 1. (Since 8.1) +# # Features: -# @unstable: Member @x-checkpoint-delay is experimental. +# @unstable: Members @x-checkpoint-delay and @x-vcpu-dirty-limit-period +# are experimental. # # Since: 2.4 ## @@ -1128,6 +1205,7 @@ '*compress-threads': 'uint8', '*compress-wait-thread': 'bool', '*decompress-threads': 'uint8', + '*compress-method': 'CompressMethod', '*throttle-trigger-threshold': 'uint8', '*cpu-throttle-initial': 'uint8', '*cpu-throttle-increment': 'uint8', @@ -1147,7 +1225,10 @@ '*multifd-compression': 'MultiFDCompression', '*multifd-zlib-level': 'uint8', '*multifd-zstd-level': 'uint8', - '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ] } } + '*block-bitmap-mapping': [ 'BitmapMigrationNodeAlias' ], + '*x-vcpu-dirty-limit-period': { 'type': 'uint64', + 'features': [ 'unstable' ] }, + '*vcpu-dirty-limit': 'uint64'} } ## # @query-migrate-parameters: @@ -1259,6 +1340,43 @@ { 'event': 'MIGRATION_PASS', 'data': { 'pass': 'int' } } +## +# @MIGRATION_MULTIFD_PID: +# +# Emitted when multifd thread appear +# +# @pid: pid of multifd thread +# +# Since: EulerOS Virtual +## +{ 'event': 'MIGRATION_MULTIFD_PID', + 'data': { 'pid': 'int' } } + +## +# @MIGRATION_PID: +# +# Emitted when migration thread appear +# +# @pid: pid of migration thread +# +# Since: EulerOS Virtual +## +{ 'event': 'MIGRATION_PID', + 'data': { 'pid': 'int' } } + +## +# @MIGRATION_COMPRESS_PID: +# +# Emitted when compress thread appear +# +# @pid: pid of compress thread +# +# Since: 6.2 +## +{ 'event': 'MIGRATION_COMPRESS_PID', + 'data': { 'pid': 'int' } } + + ## # @COLOMessage: # @@ -1849,6 +1967,86 @@ ## { 'command': 'query-dirty-rate', 'returns': 'DirtyRateInfo' } +## +# @DirtyLimitInfo: +# +# Dirty page rate limit information of a virtual CPU. +# +# @cpu-index: index of a virtual CPU. +# +# @limit-rate: upper limit of dirty page rate (MB/s) for a virtual +# CPU, 0 means unlimited. +# +# @current-rate: current dirty page rate (MB/s) for a virtual CPU. +# +# Since: 6.2 +# +## +{ 'struct': 'DirtyLimitInfo', + 'data': { 'cpu-index': 'int', + 'limit-rate': 'uint64', + 'current-rate': 'uint64' } } + +## +# @set-vcpu-dirty-limit: +# +# Set the upper limit of dirty page rate for virtual CPUs. +# +# Requires KVM with accelerator property "dirty-ring-size" set. +# A virtual CPU's dirty page rate is a measure of its memory load. +# To observe dirty page rates, use @calc-dirty-rate. +# +# @cpu-index: index of a virtual CPU, default is all. +# +# @dirty-rate: upper limit of dirty page rate (MB/s) for virtual CPUs. +# +# Since: 6.2 +# +# Example: +# {"execute": "set-vcpu-dirty-limit"} +# "arguments": { "dirty-rate": 200, +# "cpu-index": 1 } } +# +## +{ 'command': 'set-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int', + 'dirty-rate': 'uint64' } } + +## +# @cancel-vcpu-dirty-limit: +# +# Cancel the upper limit of dirty page rate for virtual CPUs. +# +# Cancel the dirty page limit for the vCPU which has been set with +# set-vcpu-dirty-limit command. Note that this command requires +# support from dirty ring, same as the "set-vcpu-dirty-limit". +# +# @cpu-index: index of a virtual CPU, default is all. +# +# Since: 6.2 +# +# Example: +# {"execute": "cancel-vcpu-dirty-limit"} +# "arguments": { "cpu-index": 1 } } +# +## +{ 'command': 'cancel-vcpu-dirty-limit', + 'data': { '*cpu-index': 'int'} } + +## +# @query-vcpu-dirty-limit: +# +# Returns information about virtual CPU dirty page rate limits, if any. +# +# Since: 6.2 +# +# Example: +# {"execute": "query-vcpu-dirty-limit"} +# +## +{ 'command': 'query-vcpu-dirty-limit', + 'returns': [ 'DirtyLimitInfo' ] } + ## # @snapshot-save: # diff --git a/qapi/misc-target.json b/qapi/misc-target.json index 5aa2b95b7d4aa09a4b11a9bbabafa2b30ce4f2ae..63cebef5733c470bbec2e83a7383ec6d31a4b7dd 100644 --- a/qapi/misc-target.json +++ b/qapi/misc-target.json @@ -33,6 +33,7 @@ 'TARGET_PPC64', 'TARGET_S390X', 'TARGET_SH4', + 'TARGET_LOONGARCH64', 'TARGET_SPARC' ] } } ## @@ -337,6 +338,21 @@ 'if': 'TARGET_ARM' } +## +# @SGXEPCSection: +# +# Information about intel SGX EPC section info +# +# @node: the numa node +# +# @size: the size of EPC section +# +# Since: 7.0 +## +{ 'struct': 'SGXEPCSection', + 'data': { 'node': 'int', + 'size': 'uint64'}} + ## # @SGXInfo: # @@ -351,6 +367,12 @@ # @flc: true if FLC is supported # # @section-size: The EPC section size for guest +# Redundant with @sections. Just for backward compatibility. +# +# @sections: The EPC sections info for guest (Since: 7.0) +# +# Features: +# @deprecated: Member @section-size is deprecated. Use @sections instead. # # Since: 6.2 ## @@ -359,7 +381,9 @@ 'sgx1': 'bool', 'sgx2': 'bool', 'flc': 'bool', - 'section-size': 'uint64'}, + 'section-size': { 'type': 'uint64', + 'features': [ 'deprecated' ] }, + 'sections': ['SGXEPCSection']}, 'if': 'TARGET_I386' } ## @@ -375,7 +399,9 @@ # # -> { "execute": "query-sgx" } # <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, "section-size" : 0 } } +# "flc": true, "section-size" : 96468992, +# "sections": [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } # ## { 'command': 'query-sgx', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } @@ -393,7 +419,9 @@ # # -> { "execute": "query-sgx-capabilities" } # <- { "return": { "sgx": true, "sgx1" : true, "sgx2" : true, -# "flc": true, "section-size" : 0 } } +# "flc": true, "section-size" : 96468992, +# "section" : [{"node": 0, "size": 67108864}, +# {"node": 1, "size": 29360128}]} } # ## { 'command': 'query-sgx-capabilities', 'returns': 'SGXInfo', 'if': 'TARGET_I386' } diff --git a/qapi/misc.json b/qapi/misc.json index 358548abe1adc80bbd252c1d38a84fb3c282a92d..5b6d653682e1cd51760368fef90bb46bd923c767 100644 --- a/qapi/misc.json +++ b/qapi/misc.json @@ -527,3 +527,12 @@ 'data': { '*option': 'str' }, 'returns': ['CommandLineOptionInfo'], 'allow-preconfig': true } + +## +# @query-rtc-date-diff: +# +# get vm's time offset +# +# Since: 2.8 +## +{ 'command': 'query-rtc-date-diff', 'returns': 'int64' } diff --git a/qapi/net.json b/qapi/net.json index 7fab2e7cd8aa5f5c504d3bad0c1e3d67037f117e..a38a7b611b11bf9644a8a5534641fc7fe121796a 100644 --- a/qapi/net.json +++ b/qapi/net.json @@ -442,15 +442,25 @@ # @vhostdev: path of vhost-vdpa device # (default:'/dev/vhost-vdpa-0') # +# @vhostfd: file descriptor of an already opened vhost vdpa device +# # @queues: number of queues to be created for multiqueue vhost-vdpa # (default: 1) # +# @x-svq: Start device with (experimental) shadow virtqueue. (Since 7.1) +# (default: false) +# +# Features: +# @unstable: Member @x-svq is experimental. +# # Since: 5.1 ## { 'struct': 'NetdevVhostVDPAOptions', 'data': { '*vhostdev': 'str', - '*queues': 'int' } } + '*vhostfd': 'str', + '*queues': 'int', + '*x-svq': {'type': 'bool', 'features' : [ 'unstable'] } } } ## # @NetClientDriver: diff --git a/qapi/pragma.json b/qapi/pragma.json index 3bc0335d1f9a4d7d6dd5337ddae65556f89b3e98..b37f6de4452f39ccb570b2b1a09a3b4fb9111a06 100644 --- a/qapi/pragma.json +++ b/qapi/pragma.json @@ -26,7 +26,8 @@ 'qom-get', 'query-tpm-models', 'query-tpm-types', - 'ringbuf-read' ], + 'ringbuf-read', + 'query-rtc-date-diff' ], # Externally visible types whose member names may use uppercase 'member-name-exceptions': [ # visible in: 'ACPISlotType', # query-acpi-ospm-status diff --git a/qapi/qdev.json b/qapi/qdev.json index 69656b14df2a956b454c142f4f09a3df383f2f76..ca96a0c6eb3cd97170875bd7f3ec51d206611293 100644 --- a/qapi/qdev.json +++ b/qapi/qdev.json @@ -47,12 +47,12 @@ # # Notes: # -# Additional arguments depend on the type. +# 1. Additional arguments depend on the type. # -# 1. For detailed information about this command, please refer to the +# 2. For detailed information about this command, please refer to the # 'docs/qdev-device-use.txt' file. # -# 2. It's possible to list device properties by running QEMU with the +# 3. It's possible to list device properties by running QEMU with the # "-device DEVICE,help" command-line argument, where DEVICE is the # device's name # diff --git a/qapi/qmp-dispatch.c b/qapi/qmp-dispatch.c index d378bccac73b4886f8ba1b9e5b14e50fc6a80dd3..e9ea5a70d4870d9347090a25adbbae3ce228a1f4 100644 --- a/qapi/qmp-dispatch.c +++ b/qapi/qmp-dispatch.c @@ -25,6 +25,8 @@ #include "qapi/qmp/qbool.h" #include "qemu/coroutine.h" #include "qemu/main-loop.h" +#include "qemu/log.h" +#include "qapi/qmp/qstring.h" Visitor *qobject_input_visitor_new_qmp(QObject *obj) { @@ -147,6 +149,7 @@ QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, QObject *id; QObject *ret = NULL; QDict *rsp = NULL; + GString *json; dict = qobject_to(QDict, request); if (!dict) { @@ -204,6 +207,19 @@ QDict *qmp_dispatch(const QmpCommandList *cmds, QObject *request, qobject_ref(args); } + json = qobject_to_json(QOBJECT(args)); + if (json) { + if ((strcmp(command, "query-block-jobs") != 0) + && (strcmp(command, "query-migrate") != 0) + && (strcmp(command, "query-blockstats") != 0) + && (strcmp(command, "query-balloon") != 0) + && (strcmp(command, "set_password") != 0)) { + qemu_log("qmp_cmd_name: %s, arguments: %s\n", + command, json->str); + } + g_string_free(json, true); + } + assert(!(oob && qemu_in_coroutine())); assert(monitor_cur() == NULL); if (!!(cmd->options & QCO_COROUTINE) == qemu_in_coroutine()) { diff --git a/qapi/qom.json b/qapi/qom.json index eeb5395ff3b765d6bbaa56df6e786e28d3112121..a0450f7494bd2cb883307ebacba3dd9feae1b04c 100644 --- a/qapi/qom.json +++ b/qapi/qom.json @@ -785,6 +785,30 @@ 'reduced-phys-bits': 'uint32', '*kernel-hashes': 'bool' } } +## +# @TmmGuestMeasurementAlgo: +# +# Algorithm to use for cvm measurements +# +# Since: FIXME +## +{ 'enum': 'TmmGuestMeasurementAlgo', +'data': ['default', 'sha256', 'sha512'] } + +## +# @TmmGuestProperties: +# +# Properties for tmm-guest objects. +# +# @sve-vector-length: SVE vector length (default: 0, SVE disabled) +# +# Since: FIXME +## +{ 'struct': 'TmmGuestProperties', + 'data': { '*sve-vector-length': 'uint32', + '*num-pmu-counters': 'uint32', + '*measurement-algo': 'TmmGuestMeasurementAlgo' } } + ## # @ObjectType: # @@ -842,7 +866,8 @@ 'tls-creds-psk', 'tls-creds-x509', 'tls-cipher-suites', - { 'name': 'x-remote-object', 'features': [ 'unstable' ] } + { 'name': 'x-remote-object', 'features': [ 'unstable' ] }, + 'tmm-guest' ] } ## @@ -905,7 +930,8 @@ 'tls-creds-psk': 'TlsCredsPskProperties', 'tls-creds-x509': 'TlsCredsX509Properties', 'tls-cipher-suites': 'TlsCredsProperties', - 'x-remote-object': 'RemoteObjectProperties' + 'x-remote-object': 'RemoteObjectProperties', + 'tmm-guest': 'TmmGuestProperties' } } ## diff --git a/qapi/run-state.json b/qapi/run-state.json index 43d66d700fcd202e50b33c9743eee8e9f9f7a98b..08c38b2c67bfa894313d068415cd415bd9c2a379 100644 --- a/qapi/run-state.json +++ b/qapi/run-state.json @@ -638,3 +638,20 @@ { 'struct': 'MemoryFailureFlags', 'data': { 'action-required': 'bool', 'recursive': 'bool'} } + +## +# @NotifyVmexitOption: +# +# An enumeration of the options specified when enabling notify VM exit +# +# @run: enable the feature, do nothing and continue if the notify VM exit happens. +# +# @internal-error: enable the feature, raise a internal error if the notify +# VM exit happens. +# +# @disable: disable the feature. +# +# Since: 7.2 +## +{ 'enum': 'NotifyVmexitOption', + 'data': [ 'run', 'internal-error', 'disable' ] } \ No newline at end of file diff --git a/qemu-edid.c b/qemu-edid.c index c3a9fba10dc494e404eb461f34bec369b950bccf..20c958d9c7eb627075ebe8982957ce01ce2b7950 100644 --- a/qemu-edid.c +++ b/qemu-edid.c @@ -10,8 +10,8 @@ #include "hw/display/edid.h" static qemu_edid_info info = { - .prefx = 1024, - .prefy = 768, + .prefx = 1280, + .prefy = 800, }; static void usage(FILE *out) diff --git a/qemu-img-cmds.hx b/qemu-img-cmds.hx index 72bcdcfbfad9e32eb4fae431d52d3ef0137058fe..ec6aa2886a7f50b69ae59b91f2d7b004474470a0 100644 --- a/qemu-img-cmds.hx +++ b/qemu-img-cmds.hx @@ -52,9 +52,9 @@ SRST ERST DEF("create", img_create, - "create [--object objectdef] [-q] [-f fmt] [-b backing_file] [-F backing_fmt] [-u] [-o options] filename [size]") + "create [--object objectdef] [-q] [-f fmt] [-b backing_file] [-F backing_fmt] [-u] [-t cache] [-o options] filename [size]") SRST -.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE] [-F BACKING_FMT] [-u] [-o OPTIONS] FILENAME [SIZE] +.. option:: create [--object OBJECTDEF] [-q] [-f FMT] [-b BACKING_FILE] [-F BACKING_FMT] [-u] [-t CACHE] [-o OPTIONS] FILENAME [SIZE] ERST DEF("dd", img_dd, diff --git a/qemu-img.c b/qemu-img.c index f036a1d428db21205ded31bd3035d7295cf3582b..059bf42fc1bd7919ad2c0459e504afec7f9bf0b4 100644 --- a/qemu-img.c +++ b/qemu-img.c @@ -496,6 +496,22 @@ static int64_t cvtnum(const char *name, const char *value) return cvtnum_full(name, value, 0, INT64_MAX); } +static bool is_reg_file(const char *filename) +{ + struct stat st; + + /* file not exist, file will be create later, so it's a reg file */ + if (access(filename, F_OK) == -1) { + return true; + } + + /* file exist, check file type */ + if (stat(filename, &st) >= 0 && S_ISREG(st.st_mode)) { + return true; + } + return false; +} + static int img_create(int argc, char **argv) { int c; @@ -504,6 +520,7 @@ static int img_create(int argc, char **argv) const char *base_fmt = NULL; const char *filename; const char *base_filename = NULL; + const char *cache = BDRV_DEFAULT_CACHE; char *options = NULL; Error *local_err = NULL; bool quiet = false; @@ -515,7 +532,7 @@ static int img_create(int argc, char **argv) {"object", required_argument, 0, OPTION_OBJECT}, {0, 0, 0, 0} }; - c = getopt_long(argc, argv, ":F:b:f:ho:qu", + c = getopt_long(argc, argv, ":F:b:f:t:ho:qu", long_options, NULL); if (c == -1) { break; @@ -539,6 +556,9 @@ static int img_create(int argc, char **argv) case 'f': fmt = optarg; break; + case 't': + cache = optarg; + break; case 'o': if (accumulate_options(&options, optarg) < 0) { goto fail; @@ -582,6 +602,16 @@ static int img_create(int argc, char **argv) error_exit("Unexpected argument: %s", argv[optind]); } + if (is_reg_file(filename)) { + if (!options) { + options = g_strdup_printf(BLOCK_OPT_CACHE"=%s", cache); + } else { + char *old_options = options; + options = g_strdup_printf("%s,"BLOCK_OPT_CACHE"=%s", options, cache); + g_free(old_options); + } + } + bdrv_img_create(filename, fmt, base_filename, base_fmt, options, img_size, flags, quiet, &local_err); if (local_err) { diff --git a/qemu-keymap.c b/qemu-keymap.c index 536e8f2385d7e6cce64d777d029c516613d215cf..4095b654a6073cfd9722d863e5d45e0d0af0d520 100644 --- a/qemu-keymap.c +++ b/qemu-keymap.c @@ -187,6 +187,7 @@ int main(int argc, char *argv[]) } fprintf(outfile, + "# SPDX-License-Identifier: GPL-2.0-or-later\n" "#\n" "# generated by qemu-keymap\n" "# model : %s\n" diff --git a/qemu-nbd.c b/qemu-nbd.c index c6c20df68a4dcb49bd47d02bcb5a674e8b9ec729..69fd101cd179eaf6884deaedfb750eae25a4d7e8 100644 --- a/qemu-nbd.c +++ b/qemu-nbd.c @@ -363,7 +363,9 @@ static void nbd_accept(QIONetListener *listener, QIOChannelSocket *cioc, nb_fds++; nbd_update_server_watch(); - nbd_client_new(cioc, tlscreds, tlsauthz, nbd_client_closed); + /* TODO - expose handshake timeout as command line option */ + nbd_client_new(cioc, NBD_DEFAULT_HANDSHAKE_MAX_SECS, + tlscreds, tlsauthz, nbd_client_closed, NULL); } static void nbd_update_server_watch(void) @@ -561,7 +563,8 @@ int main(int argc, char **argv) pthread_t client_thread; const char *fmt = NULL; Error *local_err = NULL; - BlockdevDetectZeroesOptions detect_zeroes = BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; + BlockdevDetectZeroesOptions detect_zeroes = + BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF; QDict *options = NULL; const char *export_name = NULL; /* defaults to "" later for server mode */ const char *export_description = NULL; @@ -800,6 +803,10 @@ int main(int argc, char **argv) trace_init_file(); qemu_set_log(LOG_TRACE); + if (!seen_aio && (flags & BDRV_O_NOCACHE)) { + flags |= BDRV_O_NATIVE_AIO; + } + socket_activation = check_socket_activation(); if (socket_activation == 0) { setup_address_and_port(&bindto, &port); diff --git a/qemu-options.hx b/qemu-options.hx index ae2c6dbbfc005c526026604d988612157dd5a7fd..ab82ad4150d8cb02ba67673843a2f0a67692b2e9 100644 --- a/qemu-options.hx +++ b/qemu-options.hx @@ -127,11 +127,11 @@ SRST ERST DEF("M", HAS_ARG, QEMU_OPTION_M, - " sgx-epc.0.memdev=memid\n", + " sgx-epc.0.memdev=memid,sgx-epc.0.node=numaid\n", QEMU_ARCH_ALL) SRST -``sgx-epc.0.memdev=@var{memid}`` +``sgx-epc.0.memdev=@var{memid},sgx-epc.0.node=@var{numaid}`` Define an SGX EPC section. ERST @@ -152,6 +152,7 @@ DEF("accel", HAS_ARG, QEMU_OPTION_accel, " split-wx=on|off (enable TCG split w^x mapping)\n" " tb-size=n (TCG translation block cache size)\n" " dirty-ring-size=n (KVM dirty ring GFN count, default 0)\n" + " notify-vmexit=run|internal-error|disable,notify-window=n (enable notify VM exit and set notify window, x86 only)\n" " thread=single|multi (enable multi-threaded TCG)\n", QEMU_ARCH_ALL) SRST ``-accel name[,prop=value[,...]]`` @@ -203,20 +204,43 @@ SRST is disabled (dirty-ring-size=0). When enabled, KVM will instead record dirty pages in a bitmap. + ``notify-vmexit=run|internal-error|disable,notify-window=n`` + Enables or disables notify VM exit support on x86 host and specify + the corresponding notify window to trigger the VM exit if enabled. + ``run`` option enables the feature. It does nothing and continue + if the exit happens. ``internal-error`` option enables the feature. + It raises a internal error. ``disable`` option doesn't enable the feature. + This feature can mitigate the CPU stuck issue due to event windows don't + open up for a specified of time (i.e. notify-window). + Default: notify-vmexit=run,notify-window=0. + ERST DEF("smp", HAS_ARG, QEMU_OPTION_smp, - "-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]\n" - " set the number of CPUs to 'n' [default=1]\n" + "-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]\n" + " set the number of initial CPUs to 'n' [default=1]\n" " maxcpus= maximum number of total CPUs, including\n" " offline CPUs for hotplug, etc\n" - " sockets= number of discrete sockets in the system\n" - " dies= number of CPU dies on one socket (for PC only)\n" - " cores= number of CPU cores on one socket (for PC, it's on one die)\n" - " threads= number of threads on one CPU core\n", - QEMU_ARCH_ALL) + " sockets= number of sockets on the machine board\n" + " dies= number of dies in one socket\n" + " clusters= number of clusters in one die\n" + " cores= number of cores in one cluster\n" + " threads= number of threads in one core\n" + "Note: Different machines may have different subsets of the CPU topology\n" + " parameters supported, so the actual meaning of the supported parameters\n" + " will vary accordingly. For example, for a machine type that supports a\n" + " three-level CPU hierarchy of sockets/cores/threads, the parameters will\n" + " sequentially mean as below:\n" + " sockets means the number of sockets on the machine board\n" + " cores means the number of cores in one socket\n" + " threads means the number of threads in one core\n" + " For a particular machine type board, an expected CPU topology hierarchy\n" + " can be defined through the supported sub-option. Unsupported parameters\n" + " can also be provided in addition to the sub-option, but their values\n" + " must be set as 1 in the purpose of correct parsing.\n", + QEMU_ARCH_ALL) SRST -``-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,cores=cores][,threads=threads]`` +``-smp [[cpus=]n][,maxcpus=maxcpus][,sockets=sockets][,dies=dies][,clusters=clusters][,cores=cores][,threads=threads]`` Simulate a SMP system with '\ ``n``\ ' CPUs initially present on the machine type board. On boards supporting CPU hotplug, the optional '\ ``maxcpus``\ ' parameter can be set to enable further CPUs to be @@ -225,27 +249,67 @@ SRST initial CPU count will match the maximum number. When only one of them is given then the omitted one will be set to its counterpart's value. Both parameters may be specified, but the maximum number of CPUs must - be equal to or greater than the initial CPU count. Both parameters are - subject to an upper limit that is determined by the specific machine - type chosen. - - To control reporting of CPU topology information, the number of sockets, - dies per socket, cores per die, and threads per core can be specified. - The sum `` sockets * cores * dies * threads `` must be equal to the - maximum CPU count. CPU targets may only support a subset of the topology - parameters. Where a CPU target does not support use of a particular - topology parameter, its value should be assumed to be 1 for the purpose - of computing the CPU maximum count. + be equal to or greater than the initial CPU count. Product of the + CPU topology hierarchy must be equal to the maximum number of CPUs. + Both parameters are subject to an upper limit that is determined by + the specific machine type chosen. + + To control reporting of CPU topology information, values of the topology + parameters can be specified. Machines may only support a subset of the + parameters and different machines may have different subsets supported + which vary depending on capacity of the corresponding CPU targets. So + for a particular machine type board, an expected topology hierarchy can + be defined through the supported sub-option. Unsupported parameters can + also be provided in addition to the sub-option, but their values must be + set as 1 in the purpose of correct parsing. Either the initial CPU count, or at least one of the topology parameters must be specified. The specified parameters must be greater than zero, explicit configuration like "cpus=0" is not allowed. Values for any omitted parameters will be computed from those which are given. + + For example, the following sub-option defines a CPU topology hierarchy + (2 sockets totally on the machine, 2 cores per socket, 2 threads per + core) for a machine that only supports sockets/cores/threads. + Some members of the option can be omitted but their values will be + automatically computed: + + :: + + -smp 8,sockets=2,cores=2,threads=2,maxcpus=8 + + The following sub-option defines a CPU topology hierarchy (2 sockets + totally on the machine, 2 dies per socket, 2 cores per die, 2 threads + per core) for PC machines which support sockets/dies/cores/threads. + Some members of the option can be omitted but their values will be + automatically computed: + + :: + + -smp 16,sockets=2,dies=2,cores=2,threads=2,maxcpus=16 + + The following sub-option defines a CPU topology hierarchy (2 sockets + totally on the machine, 2 clusters per socket, 2 cores per cluster, + 2 threads per core) for ARM virt machines which support sockets/clusters + /cores/threads. Some members of the option can be omitted but their values + will be automatically computed: + + :: + + -smp 16,sockets=2,clusters=2,cores=2,threads=2,maxcpus=16 + Historically preference was given to the coarsest topology parameters when computing missing values (ie sockets preferred over cores, which were preferred over threads), however, this behaviour is considered liable to change. Prior to 6.2 the preference was sockets over cores over threads. Since 6.2 the preference is cores over sockets over threads. + + For example, the following option defines a machine board with 2 sockets + of 1 core before 6.2 and 1 socket of 2 cores after 6.2: + + :: + + -smp 2 ERST DEF("numa", HAS_ARG, QEMU_OPTION_numa, @@ -265,7 +329,7 @@ SRST \ ``-numa cpu,node-id=node[,socket-id=x][,core-id=y][,thread-id=z]`` \ -``-numa hmat-lb,initiator=node,target=node,hierarchy=hierarchy,data-type=tpye[,latency=lat][,bandwidth=bw]`` +``-numa hmat-lb,initiator=node,target=node,hierarchy=hierarchy,data-type=type[,latency=lat][,bandwidth=bw]`` \ ``-numa hmat-cache,node-id=node,size=size,level=level[,associativity=str][,policy=str][,line=size]`` Define a NUMA node and assign RAM and VCPUs to it. Set the NUMA @@ -1664,7 +1728,7 @@ SRST directory on host is made directly accessible by guest as a pass-through file system by using the 9P network protocol for communication between host and guests, if desired even accessible, shared by several guests - simultaniously. + simultaneously. Note that ``-virtfs`` is actually just a convenience shortcut for its generalized form ``-fsdev -device virtio-9p-pci``. @@ -2275,6 +2339,10 @@ SRST host. It is possible to control the websocket listen address independently, using the syntax ``websocket``\ =host:port. + Websocket could be allowed over UNIX domain socket, using the syntax + ``websocket``\ =unix:path, where path is the location of a unix socket + to listen for connections on. + If no TLS credentials are provided, the websocket connection runs in unencrypted mode. If TLS credentials are provided, the websocket connection requires encrypted client connections. @@ -2480,7 +2548,7 @@ DEF("smbios", HAS_ARG, QEMU_OPTION_smbios, " specify SMBIOS type 17 fields\n" "-smbios type=41[,designation=str][,kind=str][,instance=%d][,pcidev=str]\n" " specify SMBIOS type 41 fields\n", - QEMU_ARCH_I386 | QEMU_ARCH_ARM) + QEMU_ARCH_I386 | QEMU_ARCH_ARM | QEMU_ARCH_LOONGARCH64) SRST ``-smbios file=binary`` Load SMBIOS entry from binary file. @@ -2675,8 +2743,10 @@ DEF("netdev", HAS_ARG, QEMU_OPTION_netdev, " configure a vhost-user network, backed by a chardev 'dev'\n" #endif #ifdef __linux__ - "-netdev vhost-vdpa,id=str,vhostdev=/path/to/dev\n" + "-netdev vhost-vdpa,id=str[,vhostdev=/path/to/dev][,vhostfd=h]\n" " configure a vhost-vdpa network,Establish a vhost-vdpa netdev\n" + " use 'vhostdev=/path/to/dev' to open a vhost vdpa device\n" + " use 'vhostfd=h' to connect to an already opened vhost vdpa device\n" #endif "-netdev hubport,id=str,hubid=n[,netdev=nd]\n" " configure a hub port on the hub with ID 'n'\n", QEMU_ARCH_ALL) @@ -3156,7 +3226,7 @@ SRST -netdev type=vhost-user,id=net0,chardev=chr0 \ -device virtio-net-pci,netdev=net0 -``-netdev vhost-vdpa,vhostdev=/path/to/dev`` +``-netdev vhost-vdpa[,vhostdev=/path/to/dev][,vhostfd=h]`` Establish a vhost-vdpa netdev. vDPA device is a device that uses a datapath which complies with @@ -3717,7 +3787,7 @@ SRST default device is ``vc`` in graphical mode and ``stdio`` in non graphical mode. - This option can be used several times to simulate up to 4 serial + This option can be used several times to simulate multiple serial ports. Use ``-serial none`` to disable all serial ports. diff --git a/qga/commands-posix.c b/qga/commands-posix.c index 75dbaab68ea99c920ac73523f457446e791c952e..bee7a47ed2435ade37b4db1ddda1697e101ae798 100644 --- a/qga/commands-posix.c +++ b/qga/commands-posix.c @@ -1201,7 +1201,15 @@ static void build_guest_fsinfo_for_device(char const *devpath, syspath = realpath(devpath, NULL); if (!syspath) { - error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + if (errno != ENOENT) { + error_setg_errno(errp, errno, "realpath(\"%s\")", devpath); + return; + } + + /* ENOENT: This devpath may not exist because of container config */ + if (!fs->name) { + fs->name = g_path_get_basename(devpath); + } return; } @@ -2104,10 +2112,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (systemd_supports_mode(mode, &local_err)) { mode_supported = true; systemd_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); @@ -2116,10 +2124,10 @@ static void guest_suspend(SuspendMode mode, Error **errp) if (pmutils_supports_mode(mode, &local_err)) { mode_supported = true; pmutils_suspend(mode, &local_err); - } - if (!local_err) { - return; + if (!local_err) { + return; + } } error_free(local_err); diff --git a/qga/commands-win32.c b/qga/commands-win32.c index 4e84afd83beeb48d9cfb2204dbe5a650d45e66ed..d2ca36564da18e20e3de3c5847a397b2f1c97e4f 100644 --- a/qga/commands-win32.c +++ b/qga/commands-win32.c @@ -514,7 +514,7 @@ DEFINE_GUID(GUID_DEVINTERFACE_STORAGEPORT, static GuestPCIAddress *get_pci_info(int number, Error **errp) { - HDEVINFO dev_info; + HDEVINFO dev_info = INVALID_HANDLE_VALUE; SP_DEVINFO_DATA dev_info_data; SP_DEVICE_INTERFACE_DATA dev_iface_data; HANDLE dev_file; @@ -749,7 +749,9 @@ static GuestPCIAddress *get_pci_info(int number, Error **errp) } free_dev_info: - SetupDiDestroyDeviceInfoList(dev_info); + if (dev_info != INVALID_HANDLE_VALUE) { + SetupDiDestroyDeviceInfoList(dev_info); + } out: return pci; } @@ -904,6 +906,8 @@ static GuestDiskAddressList *build_guest_disk_info(char *guid, Error **errp) DWORD last_err = GetLastError(); if (last_err == ERROR_MORE_DATA) { /* Try once more with big enough buffer */ + size = sizeof(VOLUME_DISK_EXTENTS) + + (sizeof(DISK_EXTENT) * (extents->NumberOfDiskExtents - 1)); g_free(extents); extents = g_malloc0(size); if (!DeviceIoControl( diff --git a/qga/commands.c b/qga/commands.c index 80501e4a737ced60f337059104c8438735b81fda..9fe73786fcb9cc07de6ea225e39b5564351d1da4 100644 --- a/qga/commands.c +++ b/qga/commands.c @@ -156,7 +156,7 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) gei = guest_exec_info_find(pid); if (gei == NULL) { - error_setg(errp, QERR_INVALID_PARAMETER, "pid"); + error_setg(errp, "PID " PRId64 " does not exist"); return NULL; } @@ -210,16 +210,16 @@ GuestExecStatus *qmp_guest_exec_status(int64_t pid, Error **errp) if (gei->out.length > 0) { ges->has_out_data = true; ges->out_data = g_base64_encode(gei->out.data, gei->out.length); - g_free(gei->out.data); ges->has_out_truncated = gei->out.truncated; } + g_free(gei->out.data); if (gei->err.length > 0) { ges->has_err_data = true; ges->err_data = g_base64_encode(gei->err.data, gei->err.length); - g_free(gei->err.data); ges->has_err_truncated = gei->err.truncated; } + g_free(gei->err.data); QTAILQ_REMOVE(&guest_exec_state.processes, gei, next); g_free(gei); diff --git a/qga/installer/qemu-ga.wxs b/qga/installer/qemu-ga.wxs index 0950e8c6becc3b8922f68097beca977c190a39d0..11b66a22e6f45b52c27066efcb3ba9aa94693865 100644 --- a/qga/installer/qemu-ga.wxs +++ b/qga/installer/qemu-ga.wxs @@ -58,6 +58,7 @@ /> + @@ -142,22 +143,22 @@ - + diff --git a/qga/main.c b/qga/main.c index 15fd3a4149f4a5565c6bd5c9ed5d2e79ec452b34..6f09a689ac6d957299e91351cd1d691e3a6efd70 100644 --- a/qga/main.c +++ b/qga/main.c @@ -1283,7 +1283,7 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) if (g_mkdir_with_parents(config->state_dir, S_IRWXU) == -1) { g_critical("unable to create (an ancestor of) the state directory" " '%s': %s", config->state_dir, strerror(errno)); - return NULL; + goto failed; } #endif @@ -1308,7 +1308,7 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) if (!log_file) { g_critical("unable to open specified log file: %s", strerror(errno)); - return NULL; + goto failed; } s->log_file = log_file; } @@ -1319,7 +1319,7 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) s->pstate_filepath, ga_is_frozen(s))) { g_critical("failed to load persistent state"); - return NULL; + goto failed; } config->blacklist = ga_command_blacklist_init(config->blacklist); @@ -1340,7 +1340,7 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) #ifndef _WIN32 if (!register_signal_handlers()) { g_critical("failed to register signal handlers"); - return NULL; + goto failed; } #endif @@ -1353,12 +1353,20 @@ static GAState *initialize_agent(GAConfig *config, int socket_activation) s->wakeup_event = CreateEvent(NULL, TRUE, FALSE, TEXT("WakeUp")); if (s->wakeup_event == NULL) { g_critical("CreateEvent failed"); - return NULL; + goto failed; } #endif ga_state = s; return s; +failed: + g_free(s->pstate_filepath); + g_free(s->state_filepath_isfrozen); + if (s->log_file) { + fclose(s->log_file); + } + g_free(s); + return NULL; } static void cleanup_agent(GAState *s) diff --git a/qga/vss-win32/install.cpp b/qga/vss-win32/install.cpp index 40de1337744b9aef6423f2c8c3fda89f46a46642..8b7400e4e584a977535261a38413a340240cdd19 100644 --- a/qga/vss-win32/install.cpp +++ b/qga/vss-win32/install.cpp @@ -352,6 +352,15 @@ out: return hr; } +STDAPI_(void) CALLBACK DLLCOMRegister(HWND, HINSTANCE, LPSTR, int) +{ + COMRegister(); +} + +STDAPI_(void) CALLBACK DLLCOMUnregister(HWND, HINSTANCE, LPSTR, int) +{ + COMUnregister(); +} static BOOL CreateRegistryKey(LPCTSTR key, LPCTSTR value, LPCTSTR data) { @@ -513,7 +522,7 @@ namespace _com_util /* Stop QGA VSS provider service using Winsvc API */ STDAPI StopService(void) { - HRESULT hr; + HRESULT hr = S_OK; SC_HANDLE manager = OpenSCManager(NULL, NULL, SC_MANAGER_ALL_ACCESS); SC_HANDLE service = NULL; diff --git a/qga/vss-win32/meson.build b/qga/vss-win32/meson.build index 90825edef334ea2dc261c8444cdca23e551c95dd..290796556c58f058bdeff8efe44036053bf971b2 100644 --- a/qga/vss-win32/meson.build +++ b/qga/vss-win32/meson.build @@ -3,7 +3,7 @@ if add_languages('cpp', required: false) link_args = cc.get_supported_link_arguments(['-fstack-protector-all', '-fstack-protector-strong', '-Wl,--add-stdcall-alias', '-Wl,--enable-stdcall-fixup']) - qga_vss = shared_module('qga-vss', ['requester.cpp', 'provider.cpp', 'install.cpp'], + qga_vss = shared_module('qga-vss', ['requester.cpp', 'provider.cpp', 'install.cpp', 'vss-debug.cpp'], name_prefix: '', cpp_args: ['-Wno-unknown-pragmas', '-Wno-delete-non-virtual-dtor', '-Wno-non-virtual-dtor'], link_args: link_args, diff --git a/qga/vss-win32/qga-vss.def b/qga/vss-win32/qga-vss.def index 927782c31b3e02192b154dba47d952f5c4c3b178..ee97a814275c6405d6537c497964e6927b751bc0 100644 --- a/qga/vss-win32/qga-vss.def +++ b/qga/vss-win32/qga-vss.def @@ -1,6 +1,8 @@ LIBRARY "QGA-PROVIDER.DLL" EXPORTS + DLLCOMRegister + DLLCOMUnregister COMRegister PRIVATE COMUnregister PRIVATE DllCanUnloadNow PRIVATE diff --git a/qga/vss-win32/requester.cpp b/qga/vss-win32/requester.cpp index 940a2c8f558328becff67f527d9cf4f1917bdfb2..418b9b6e4e7936ffc4ddf2c23a84ce4e6eca9009 100644 --- a/qga/vss-win32/requester.cpp +++ b/qga/vss-win32/requester.cpp @@ -248,6 +248,7 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) int num_fixed_drives = 0, i; int num_mount_points = 0; + VSS_BACKUP_TYPE vss_bt = get_vss_backup_type(); if (vss_ctx.pVssbc) { /* already frozen */ *num_vols = 0; return; @@ -294,7 +295,7 @@ void requester_freeze(int *num_vols, void *mountpoints, ErrorSet *errset) goto out; } - hr = vss_ctx.pVssbc->SetBackupState(true, true, VSS_BT_FULL, false); + hr = vss_ctx.pVssbc->SetBackupState(true, true, vss_bt, false); if (FAILED(hr)) { err_set(errset, hr, "failed to set backup state"); goto out; diff --git a/qga/vss-win32/vss-debug.cpp b/qga/vss-win32/vss-debug.cpp new file mode 100644 index 0000000000000000000000000000000000000000..820b1c6667a979e02fdffe894be733d3282c096c --- /dev/null +++ b/qga/vss-win32/vss-debug.cpp @@ -0,0 +1,39 @@ +/* + * QEMU Guest Agent VSS debug declarations + * + * Copyright (C) 2023 Red Hat Inc + * + * Authors: + * Konstantin Kostiuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "vss-debug.h" +#include "vss-common.h" + +void qga_debug_internal(const char *funcname, const char *fmt, ...) +{ + char user_string[512] = {0}; + char full_string[640] = {0}; + + va_list args; + va_start(args, fmt); + if (vsnprintf(user_string, _countof(user_string), fmt, args) <= 0) { + va_end(args); + return; + } + + va_end(args); + + if (snprintf(full_string, _countof(full_string), + QGA_PROVIDER_NAME "[%lu]: %s %s\n", + GetCurrentThreadId(), funcname, user_string) <= 0) { + return; + } + + OutputDebugString(full_string); + fputs(full_string, stderr); +} diff --git a/qga/vss-win32/vss-debug.h b/qga/vss-win32/vss-debug.h new file mode 100644 index 0000000000000000000000000000000000000000..7800457392d977a360f5e850e8ac145b9ebd5e98 --- /dev/null +++ b/qga/vss-win32/vss-debug.h @@ -0,0 +1,25 @@ +/* + * QEMU Guest Agent VSS debug declarations + * + * Copyright (C) 2023 Red Hat Inc + * + * Authors: + * Konstantin Kostiuk + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include + +#ifndef VSS_DEBUG_H +#define VSS_DEBUG_H + +void qga_debug_internal(const char *funcname, const char *fmt, ...) G_GNUC_PRINTF(2, 3); + +#define qga_debug(fmt, ...) qga_debug_internal(__func__, fmt, ## __VA_ARGS__) +#define qga_debug_begin qga_debug("begin") +#define qga_debug_end qga_debug("end") + +#endif diff --git a/qom/object.c b/qom/object.c index 4f0677cca9e494a3eb20d9dabd0c69ab1b121b04..5db3974f042d863c970c54e5e625201aea69b793 100644 --- a/qom/object.c +++ b/qom/object.c @@ -1167,10 +1167,14 @@ GSList *object_class_get_list_sorted(const char *implements_type, Object *object_ref(void *objptr) { Object *obj = OBJECT(objptr); + uint32_t ref; + if (!obj) { return NULL; } - qatomic_inc(&obj->ref); + ref = qatomic_fetch_inc(&obj->ref); + /* Assert waaay before the integer overflows */ + g_assert(ref < INT_MAX); return obj; } diff --git a/replay/replay-internal.h b/replay/replay-internal.h index 97649ed8d77b0d8785f498c5ae4a26596764e402..b4238226f47b0766df0863235bc2cce69574246f 100644 --- a/replay/replay-internal.h +++ b/replay/replay-internal.h @@ -141,7 +141,7 @@ bool replay_next_event_is(int event); /*! Reads next clock value from the file. If clock kind read from the file is different from the parameter, the value is not used. */ -void replay_read_next_clock(unsigned int kind); +void replay_read_next_clock(ReplayClockKind kind); /* Asynchronous events queue */ diff --git a/replay/replay.c b/replay/replay.c index 6df2abc18c7ff3e0a757ac0e5c01a6bb3ec32f6a..2d3607998ac18100b10344b53080af7d7d9871ff 100644 --- a/replay/replay.c +++ b/replay/replay.c @@ -387,9 +387,8 @@ void replay_finish(void) g_free(replay_snapshot); replay_snapshot = NULL; - replay_mode = REPLAY_MODE_NONE; - replay_finish_events(); + replay_mode = REPLAY_MODE_NONE; } void replay_add_blocker(Error *reason) diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index cb8eff233e0656e8d64344e2325467cb8671f114..b2428e80ccfd90eb9b2f1a1563d28a3ddfa0170b 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -2858,6 +2858,14 @@ sub process { if ($line =~ /\bsignal\s*\(/ && !($line =~ /SIG_(?:IGN|DFL)/)) { ERROR("use sigaction to establish signal handlers; signal is not portable\n" . $herecurr); } +# recommend qemu_bh_new_guarded instead of qemu_bh_new + if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\bqemu_bh_new\s*\(/) { + ERROR("use qemu_bh_new_guarded() instead of qemu_bh_new() to avoid reentrancy problems\n" . $herecurr); + } +# recommend aio_bh_new_guarded instead of aio_bh_new + if ($realfile =~ /.*\/hw\/.*/ && $line =~ /\baio_bh_new\s*\(/) { + ERROR("use aio_bh_new_guarded() instead of aio_bh_new() to avoid reentrancy problems\n" . $herecurr); + } # check for module_init(), use category-specific init macros explicitly please if ($line =~ /^module_init\s*\(/) { ERROR("please use block_init(), type_init() etc. instead of module_init()\n" . $herecurr); diff --git a/scripts/entitlement.sh b/scripts/entitlement.sh index e2c956a3ac9a56eb8cece06958fb5185eb9de863..0f412949ec6c286c9655a7b0d8cd2d5f68271f9e 100755 --- a/scripts/entitlement.sh +++ b/scripts/entitlement.sh @@ -15,7 +15,7 @@ ENTITLEMENT="$4" if $in_place; then trap 'rm "$DST.tmp"' exit - cp -af "$SRC" "$DST.tmp" + cp -pPf "$SRC" "$DST.tmp" SRC="$DST.tmp" else cd "$MESON_INSTALL_DESTDIR_PREFIX" diff --git a/scripts/meson-buildoptions.sh b/scripts/meson-buildoptions.sh index 7a17ff42182fc78b3b88c0551d4108caaa0d2d9c..8c00cce41183632396acb2163f6a1df2b14c9497 100644 --- a/scripts/meson-buildoptions.sh +++ b/scripts/meson-buildoptions.sh @@ -25,6 +25,9 @@ meson_options_help() { printf "%s\n" ' alsa ALSA sound support' printf "%s\n" ' attr attr/xattr support' printf "%s\n" ' auth-pam PAM access control' + printf "%s\n" ' avx2 AVX2 optimizations' + printf "%s\n" ' avx512bw AVX512BW optimizations' + printf "%s\n" ' avx512f AVX512F optimizations' printf "%s\n" ' bpf eBPF support' printf "%s\n" ' brlapi brlapi character device driver' printf "%s\n" ' bzip2 bzip2 support for DMG images' @@ -107,6 +110,12 @@ _meson_option_parse() { --disable-attr) printf "%s" -Dattr=disabled ;; --enable-auth-pam) printf "%s" -Dauth_pam=enabled ;; --disable-auth-pam) printf "%s" -Dauth_pam=disabled ;; + --enable-avx2) printf "%s" -Davx2=enabled ;; + --disable-avx2) printf "%s" -Davx2=disabled ;; + --enable-avx512bw) printf "%s" -Davx512bw=enabled ;; + --disable-avx512bw) printf "%s" -Davx512bw=disabled ;; + --enable-avx512f) printf "%s" -Davx512f=enabled ;; + --disable-avx512f) printf "%s" -Davx512f=disabled ;; --enable-bpf) printf "%s" -Dbpf=enabled ;; --disable-bpf) printf "%s" -Dbpf=disabled ;; --enable-brlapi) printf "%s" -Dbrlapi=enabled ;; diff --git a/scripts/minikconf.py b/scripts/minikconf.py index bcd91015d34a69f793efab42c6915d6348e30abb..6f7f43b291816d2549eb85c69d253605b83103ee 100644 --- a/scripts/minikconf.py +++ b/scripts/minikconf.py @@ -112,7 +112,7 @@ def has_value(self): def set_value(self, val, clause): self.clauses_for_var.append(clause) if self.has_value() and self.value != val: - print("The following clauses were found for " + self.name) + print("The following clauses were found for " + self.name, file=sys.stderr) for i in self.clauses_for_var: print(" " + str(i), file=sys.stderr) raise KconfigDataError('contradiction between clauses when setting %s' % self) diff --git a/scripts/qemu-binfmt-conf.sh b/scripts/qemu-binfmt-conf.sh index 7de996d536eaf9c41255ae9695a57ee3069d96c0..e9bfeb94d3140261e0b3e043a2c8773d63ef93af 100755 --- a/scripts/qemu-binfmt-conf.sh +++ b/scripts/qemu-binfmt-conf.sh @@ -340,7 +340,9 @@ PERSISTENT=no PRESERVE_ARG0=no QEMU_SUFFIX="" -options=$(getopt -o ds:Q:S:e:hc:p:g: -l debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,persistent:,preserve-argv0: -- "$@") +_longopts="debian,systemd:,qemu-path:,qemu-suffix:,exportdir:,help,credential:,\ +persistent:,preserve-argv0:" +options=$(getopt -o ds:Q:S:e:hc:p:g:F: -l ${_longopts} -- "$@") eval set -- "$options" while true ; do diff --git a/scripts/simplebench/bench-example.py b/scripts/simplebench/bench-example.py index 4864435f396f9b7e49cad2c38bf4119ad6d1af20..fc370691e04f73a472c58e6141619e30548c76f9 100644 --- a/scripts/simplebench/bench-example.py +++ b/scripts/simplebench/bench-example.py @@ -25,7 +25,7 @@ def bench_func(env, case): """ Handle one "cell" of benchmarking table. """ - return bench_block_copy(env['qemu_binary'], env['cmd'], {} + return bench_block_copy(env['qemu_binary'], env['cmd'], {}, case['source'], case['target']) diff --git a/scripts/tracetool/__init__.py b/scripts/tracetool/__init__.py index 5bc94d95cfc760e41d0c7d55c1c03d6715981c32..630e85a5d687d1ec029bb7a6976ace406700710b 100644 --- a/scripts/tracetool/__init__.py +++ b/scripts/tracetool/__init__.py @@ -94,7 +94,7 @@ def out(*lines, **kwargs): def validate_type(name): bits = name.split(" ") for bit in bits: - bit = re.sub("\*", "", bit) + bit = re.sub(r"\*", "", bit) if bit == "": continue if bit == "const": diff --git a/scripts/vmstate-static-checker.py b/scripts/vmstate-static-checker.py index 539ead62b498202fc40b42bff05f133469ed4bb8..6838bf7e7c8de3402385a3a85a309393e7a79a3a 100755 --- a/scripts/vmstate-static-checker.py +++ b/scripts/vmstate-static-checker.py @@ -367,7 +367,6 @@ def check_machine_type(s, d): if s["Name"] != d["Name"]: print("Warning: checking incompatible machine types:", end=' ') print("\"" + s["Name"] + "\", \"" + d["Name"] + "\"") - return def main(): diff --git a/scsi/qemu-pr-helper.c b/scsi/qemu-pr-helper.c index f281daeced8de939154e431b59645e493f1ec02d..bbb9b5774189995a612b1f440398d8a3115cb266 100644 --- a/scsi/qemu-pr-helper.c +++ b/scsi/qemu-pr-helper.c @@ -288,9 +288,12 @@ static void multipath_pr_init(void) static int is_mpath(int fd) { - struct dm_ioctl dm = { .flags = DM_NOFLUSH_FLAG }; + struct dm_ioctl dm; struct dm_target_spec *tgt; + memset(&dm, 0, sizeof(struct dm_ioctl)); + dm.flags = DM_NOFLUSH_FLAG; + tgt = dm_dev_ioctl(fd, DM_TABLE_STATUS, &dm); if (!tgt) { if (errno == ENXIO) { diff --git a/semihosting/config.c b/semihosting/config.c index 137171b717b20a2be1e44137604939f5e4e97753..3cbb94fac6cbd54094c077bcc205854b2469575d 100644 --- a/semihosting/config.c +++ b/semihosting/config.c @@ -27,6 +27,7 @@ QemuOptsList qemu_semihosting_config_opts = { .name = "semihosting-config", + .merge_lists = true, .implied_opt_name = "enable", .head = QTAILQ_HEAD_INITIALIZER(qemu_semihosting_config_opts.head), .desc = { @@ -109,12 +110,13 @@ static int add_semihosting_arg(void *opaque, void semihosting_arg_fallback(const char *file, const char *cmd) { char *cmd_token; + g_autofree char *cmd_dup = g_strdup(cmd); /* argv[0] */ add_semihosting_arg(&semihosting, "arg", file, NULL); /* split -append and initialize argv[1..n] */ - cmd_token = strtok(g_strdup(cmd), " "); + cmd_token = strtok(cmd_dup, " "); while (cmd_token) { add_semihosting_arg(&semihosting, "arg", cmd_token, NULL); cmd_token = strtok(NULL, " "); diff --git a/softmmu/balloon.c b/softmmu/balloon.c index e0e8969a4b926a89f2cc9e2cae060bac63e62428..fda7af832e40821ca0aecb0c60bc51362c3a5220 100644 --- a/softmmu/balloon.c +++ b/softmmu/balloon.c @@ -90,17 +90,17 @@ BalloonInfo *qmp_query_balloon(Error **errp) return info; } -void qmp_balloon(int64_t target, Error **errp) +void qmp_balloon(int64_t value, Error **errp) { if (!have_balloon(errp)) { return; } - if (target <= 0) { - error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "target", "a size"); + if (value <= 0) { + error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "value", "a size"); return; } - trace_balloon_event(balloon_opaque, target); - balloon_event_fn(balloon_opaque, target); + trace_balloon_event(balloon_opaque, value); + balloon_event_fn(balloon_opaque, value); } diff --git a/softmmu/cpus.c b/softmmu/cpus.c index 071085f840b02ce35327031386aec911639f2de6..38bae59024af62cf45199a42e6dbfc6dae8d79f9 100644 --- a/softmmu/cpus.c +++ b/softmmu/cpus.c @@ -259,6 +259,8 @@ static int do_vm_stop(RunState state, bool send_stop) runstate_set(state); cpu_disable_ticks(); pause_all_vcpus(); + trace_all_vcpus_paused(); + vm_state_notify(0, state); if (send_stop) { qapi_event_send_stop(); @@ -481,6 +483,11 @@ bool qemu_mutex_iothread_locked(void) return iothread_locked; } +bool qemu_in_main_thread(void) +{ + return qemu_mutex_iothread_locked(); +} + /* * The BQL is taken from so many places that it is worth profiling the * callers directly, instead of funneling them all through a single function. @@ -689,6 +696,7 @@ int vm_prepare_start(void) cpu_enable_ticks(); runstate_set(RUN_STATE_RUNNING); vm_state_notify(1, RUN_STATE_RUNNING); + trace_all_vcpus_prepared(); return 0; } diff --git a/softmmu/device_tree.c b/softmmu/device_tree.c index 3965c834ca629f04266c7389b26e57ff6a60e345..8897c79ea4347409f1f6249db73afb0d07684fb1 100644 --- a/softmmu/device_tree.c +++ b/softmmu/device_tree.c @@ -556,7 +556,6 @@ int qemu_fdt_add_subnode(void *fdt, const char *name) int qemu_fdt_add_path(void *fdt, const char *path) { const char *name; - const char *p = path; int namelen, retval; int parent = 0; @@ -564,10 +563,10 @@ int qemu_fdt_add_path(void *fdt, const char *path) return -1; } - while (p) { - name = p + 1; - p = strchr(name, '/'); - namelen = p != NULL ? p - name : strlen(name); + do { + name = path + 1; + path = strchr(name, '/'); + namelen = path != NULL ? path - name : strlen(name); retval = fdt_subnode_offset_namelen(fdt, parent, name, namelen); if (retval < 0 && retval != -FDT_ERR_NOTFOUND) { @@ -584,7 +583,7 @@ int qemu_fdt_add_path(void *fdt, const char *path) } parent = retval; - } + } while (path); return retval; } diff --git a/softmmu/dirtylimit.c b/softmmu/dirtylimit.c new file mode 100644 index 0000000000000000000000000000000000000000..5e6e5aba679368d074864a6d24daa615d4da30f7 --- /dev/null +++ b/softmmu/dirtylimit.c @@ -0,0 +1,679 @@ +/* + * Dirty page rate limit implementation code + * + * Copyright (c) 2022 CHINA TELECOM CO.,LTD. + * + * Authors: + * Hyman Huang(黄勇) + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/main-loop.h" +#include "qapi/qapi-commands-migration.h" +#include "qapi/qmp/qdict.h" +#include "qapi/error.h" +#include "sysemu/dirtyrate.h" +#include "sysemu/dirtylimit.h" +#include "monitor/hmp.h" +#include "monitor/monitor.h" +#include "exec/memory.h" +#include "hw/boards.h" +#include "sysemu/kvm.h" +#include "trace.h" +#include "migration/misc.h" +#include "migration/migration.h" + +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ +/* + * Plus or minus vcpu sleep time linearly if dirty + * page rate error value percentage over + * DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT. + * Otherwise, plus or minus a fixed vcpu sleep time. + */ +#define DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT 50 +/* + * Max vcpu sleep time percentage during a cycle + * composed of dirty ring full and sleep time. + */ +#define DIRTYLIMIT_THROTTLE_PCT_MAX 99 + +struct { + VcpuStat stat; + bool running; + QemuThread thread; +} *vcpu_dirty_rate_stat; + +typedef struct VcpuDirtyLimitState { + int cpu_index; + bool enabled; + /* + * Quota dirty page rate, unit is MB/s + * zero if not enabled. + */ + uint64_t quota; +} VcpuDirtyLimitState; + +struct { + VcpuDirtyLimitState *states; + /* Max cpus number configured by user */ + int max_cpus; + /* Number of vcpu under dirtylimit */ + int limited_nvcpu; +} *dirtylimit_state; + +/* protect dirtylimit_state */ +static QemuMutex dirtylimit_mutex; + +/* dirtylimit thread quit if dirtylimit_quit is true */ +static bool dirtylimit_quit; + +static void vcpu_dirty_rate_stat_collect(void) +{ + MigrationState *s = migrate_get_current(); + VcpuStat stat; + int i = 0; + int64_t period = DIRTYLIMIT_CALC_TIME_MS; + + if (migrate_dirty_limit() && + migration_is_active(s)) { + period = s->parameters.x_vcpu_dirty_limit_period; + } + + /* calculate vcpu dirtyrate */ + vcpu_calculate_dirtyrate(period, + &stat, + GLOBAL_DIRTY_LIMIT, + false); + + for (i = 0; i < stat.nvcpu; i++) { + vcpu_dirty_rate_stat->stat.rates[i].id = i; + vcpu_dirty_rate_stat->stat.rates[i].dirty_rate = + stat.rates[i].dirty_rate; + } + + free(stat.rates); +} + +static void *vcpu_dirty_rate_stat_thread(void *opaque) +{ + rcu_register_thread(); + + /* start log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, true); + + while (qatomic_read(&vcpu_dirty_rate_stat->running)) { + vcpu_dirty_rate_stat_collect(); + if (dirtylimit_in_service()) { + dirtylimit_process(); + } + } + + /* stop log sync */ + global_dirty_log_change(GLOBAL_DIRTY_LIMIT, false); + + rcu_unregister_thread(); + return NULL; +} + +int64_t vcpu_dirty_rate_get(int cpu_index) +{ + DirtyRateVcpu *rates = vcpu_dirty_rate_stat->stat.rates; + return qatomic_read_i64(&rates[cpu_index].dirty_rate); +} + +void vcpu_dirty_rate_stat_start(void) +{ + if (qatomic_read(&vcpu_dirty_rate_stat->running)) { + return; + } + + qatomic_set(&vcpu_dirty_rate_stat->running, 1); + qemu_thread_create(&vcpu_dirty_rate_stat->thread, + "dirtyrate-stat", + vcpu_dirty_rate_stat_thread, + NULL, + QEMU_THREAD_JOINABLE); +} + +void vcpu_dirty_rate_stat_stop(void) +{ + qatomic_set(&vcpu_dirty_rate_stat->running, 0); + dirtylimit_state_unlock(); + qemu_mutex_unlock_iothread(); + qemu_thread_join(&vcpu_dirty_rate_stat->thread); + qemu_mutex_lock_iothread(); + dirtylimit_state_lock(); +} + +void vcpu_dirty_rate_stat_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + + vcpu_dirty_rate_stat = + g_malloc0(sizeof(*vcpu_dirty_rate_stat)); + + vcpu_dirty_rate_stat->stat.nvcpu = max_cpus; + vcpu_dirty_rate_stat->stat.rates = + g_malloc0(sizeof(DirtyRateVcpu) * max_cpus); + + vcpu_dirty_rate_stat->running = false; +} + +void vcpu_dirty_rate_stat_finalize(void) +{ + free(vcpu_dirty_rate_stat->stat.rates); + vcpu_dirty_rate_stat->stat.rates = NULL; + + free(vcpu_dirty_rate_stat); + vcpu_dirty_rate_stat = NULL; +} + +void dirtylimit_state_lock(void) +{ + qemu_mutex_lock(&dirtylimit_mutex); +} + +void dirtylimit_state_unlock(void) +{ + qemu_mutex_unlock(&dirtylimit_mutex); +} + +static void +__attribute__((__constructor__)) dirtylimit_mutex_init(void) +{ + qemu_mutex_init(&dirtylimit_mutex); +} + +static inline VcpuDirtyLimitState *dirtylimit_vcpu_get_state(int cpu_index) +{ + return &dirtylimit_state->states[cpu_index]; +} + +void dirtylimit_state_initialize(void) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + dirtylimit_state = g_malloc0(sizeof(*dirtylimit_state)); + + dirtylimit_state->states = + g_malloc0(sizeof(VcpuDirtyLimitState) * max_cpus); + + for (i = 0; i < max_cpus; i++) { + dirtylimit_state->states[i].cpu_index = i; + } + + dirtylimit_state->max_cpus = max_cpus; + trace_dirtylimit_state_initialize(max_cpus); +} + +void dirtylimit_state_finalize(void) +{ + free(dirtylimit_state->states); + dirtylimit_state->states = NULL; + + free(dirtylimit_state); + dirtylimit_state = NULL; + + trace_dirtylimit_state_finalize(); +} + +bool dirtylimit_in_service(void) +{ + return !!dirtylimit_state; +} + +bool dirtylimit_vcpu_index_valid(int cpu_index) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + + return !(cpu_index < 0 || + cpu_index >= ms->smp.max_cpus); +} + +static inline int64_t dirtylimit_dirty_ring_full_time(uint64_t dirtyrate) +{ + static uint64_t max_dirtyrate; + uint32_t dirty_ring_size = kvm_dirty_ring_size(); + uint64_t dirty_ring_size_meory_MB = + dirty_ring_size * TARGET_PAGE_SIZE >> 20; + + if (max_dirtyrate < dirtyrate) { + max_dirtyrate = dirtyrate; + } + + return dirty_ring_size_meory_MB * 1000000 / max_dirtyrate; +} + +static inline bool dirtylimit_done(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) <= DIRTYLIMIT_TOLERANCE_RANGE) ? true : false; +} + +static inline bool +dirtylimit_need_linear_adjustment(uint64_t quota, + uint64_t current) +{ + uint64_t min, max; + + min = MIN(quota, current); + max = MAX(quota, current); + + return ((max - min) * 100 / max) > DIRTYLIMIT_LINEAR_ADJUSTMENT_PCT; +} + +static void dirtylimit_set_throttle(CPUState *cpu, + uint64_t quota, + uint64_t current) +{ + int64_t ring_full_time_us = 0; + uint64_t sleep_pct = 0; + uint64_t throttle_us = 0; + + if (current == 0) { + cpu->throttle_us_per_full = 0; + return; + } + + ring_full_time_us = dirtylimit_dirty_ring_full_time(current); + + if (dirtylimit_need_linear_adjustment(quota, current)) { + if (quota < current) { + sleep_pct = (current - quota) * 100 / current; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full += throttle_us; + } else { + sleep_pct = (quota - current) * 100 / quota; + throttle_us = + ring_full_time_us * sleep_pct / (double)(100 - sleep_pct); + cpu->throttle_us_per_full -= throttle_us; + } + + trace_dirtylimit_throttle_pct(cpu->cpu_index, + sleep_pct, + throttle_us); + } else { + if (quota < current) { + cpu->throttle_us_per_full += ring_full_time_us / 10; + } else { + cpu->throttle_us_per_full -= ring_full_time_us / 10; + } + } + + /* + * TODO: in the big kvm_dirty_ring_size case (eg: 65536, or other scenario), + * current dirty page rate may never reach the quota, we should stop + * increasing sleep time? + */ + cpu->throttle_us_per_full = MIN(cpu->throttle_us_per_full, + ring_full_time_us * DIRTYLIMIT_THROTTLE_PCT_MAX); + + cpu->throttle_us_per_full = MAX(cpu->throttle_us_per_full, 0); +} + +static void dirtylimit_adjust_throttle(CPUState *cpu) +{ + uint64_t quota = 0; + uint64_t current = 0; + int cpu_index = cpu->cpu_index; + + quota = dirtylimit_vcpu_get_state(cpu_index)->quota; + current = vcpu_dirty_rate_get(cpu_index); + + if (!dirtylimit_done(quota, current)) { + dirtylimit_set_throttle(cpu, quota, current); + } + + return; +} + +void dirtylimit_process(void) +{ + CPUState *cpu; + + if (!qatomic_read(&dirtylimit_quit)) { + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return; + } + + CPU_FOREACH(cpu) { + if (!dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled) { + continue; + } + dirtylimit_adjust_throttle(cpu); + } + dirtylimit_state_unlock(); + } +} + +void dirtylimit_change(bool start) +{ + if (start) { + qatomic_set(&dirtylimit_quit, 0); + } else { + qatomic_set(&dirtylimit_quit, 1); + } +} + +void dirtylimit_set_vcpu(int cpu_index, + uint64_t quota, + bool enable) +{ + trace_dirtylimit_set_vcpu(cpu_index, quota); + + if (enable) { + dirtylimit_state->states[cpu_index].quota = quota; + if (!dirtylimit_vcpu_get_state(cpu_index)->enabled) { + dirtylimit_state->limited_nvcpu++; + } + } else { + dirtylimit_state->states[cpu_index].quota = 0; + if (dirtylimit_state->states[cpu_index].enabled) { + dirtylimit_state->limited_nvcpu--; + } + } + + dirtylimit_state->states[cpu_index].enabled = enable; +} + +void dirtylimit_set_all(uint64_t quota, + bool enable) +{ + MachineState *ms = MACHINE(qdev_get_machine()); + int max_cpus = ms->smp.max_cpus; + int i; + + for (i = 0; i < max_cpus; i++) { + dirtylimit_set_vcpu(i, quota, enable); + } +} + +void dirtylimit_vcpu_execute(CPUState *cpu) +{ + if (dirtylimit_in_service() && + dirtylimit_vcpu_get_state(cpu->cpu_index)->enabled && + cpu->throttle_us_per_full) { + trace_dirtylimit_vcpu_execute(cpu->cpu_index, + cpu->throttle_us_per_full); + usleep(cpu->throttle_us_per_full); + } +} + +static void dirtylimit_init(void) +{ + dirtylimit_state_initialize(); + dirtylimit_change(true); + vcpu_dirty_rate_stat_initialize(); + vcpu_dirty_rate_stat_start(); +} + +static void dirtylimit_cleanup(void) +{ + vcpu_dirty_rate_stat_stop(); + vcpu_dirty_rate_stat_finalize(); + dirtylimit_change(false); + dirtylimit_state_finalize(); +} + +/* + * dirty page rate limit is not allowed to set if migration + * is running with dirty-limit capability enabled. + */ +static bool dirtylimit_is_allowed(void) +{ + MigrationState *ms = migrate_get_current(); + + if (migration_is_running(ms->state) && + (!qemu_thread_is_self(&ms->thread)) && + migrate_dirty_limit() && + dirtylimit_in_service()) { + return false; + } + return true; +} + +void qmp_cancel_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirtylimit_is_allowed()) { + error_setg(errp, "can't cancel dirty page rate limit while" + " migration is running"); + return; + } + + if (!dirtylimit_in_service()) { + return; + } + + dirtylimit_state_lock(); + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, 0, false); + } else { + dirtylimit_set_all(0, false); + } + + if (!dirtylimit_state->limited_nvcpu) { + dirtylimit_cleanup(); + } + + dirtylimit_state_unlock(); +} + +void hmp_cancel_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + qmp_cancel_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, &err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + monitor_printf(mon, "[Please use 'info vcpu_dirty_limit' to query " + "dirty limit for virtual CPU]\n"); +} + +void qmp_set_vcpu_dirty_limit(bool has_cpu_index, + int64_t cpu_index, + uint64_t dirty_rate, + Error **errp) +{ + if (!kvm_enabled() || !kvm_dirty_ring_enabled()) { + error_setg(errp, "dirty page limit feature requires KVM with" + " accelerator property 'dirty-ring-size' set'"); + return; + } + + if (has_cpu_index && !dirtylimit_vcpu_index_valid(cpu_index)) { + error_setg(errp, "incorrect cpu index specified"); + return; + } + + if (!dirtylimit_is_allowed()) { + error_setg(errp, "can't set dirty page rate limit while" + " migration is running"); + return; + } + + if (!dirty_rate) { + qmp_cancel_vcpu_dirty_limit(has_cpu_index, cpu_index, errp); + return; + } + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_init(); + } + + if (has_cpu_index) { + dirtylimit_set_vcpu(cpu_index, dirty_rate, true); + } else { + dirtylimit_set_all(dirty_rate, true); + } + + dirtylimit_state_unlock(); +} + +void hmp_set_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + int64_t dirty_rate = qdict_get_int(qdict, "dirty_rate"); + int64_t cpu_index = qdict_get_try_int(qdict, "cpu_index", -1); + Error *err = NULL; + + if (dirty_rate < 0) { + error_setg(&err, "invalid dirty page limit %" PRId64, dirty_rate); + goto out; + } + + qmp_set_vcpu_dirty_limit(!!(cpu_index != -1), cpu_index, dirty_rate, &err); + +out: + hmp_handle_error(mon, err); +} + +/* Return the max throttle time of each virtual CPU */ +uint64_t dirtylimit_throttle_time_per_round(void) +{ + CPUState *cpu; + int64_t max = 0; + + CPU_FOREACH(cpu) { + if (cpu->throttle_us_per_full > max) { + max = cpu->throttle_us_per_full; + } + } + + return max; +} + +/* + * Estimate average dirty ring full time of each virtaul CPU. + * Return 0 if guest doesn't dirty memory. + */ +uint64_t dirtylimit_ring_full_time(void) +{ + CPUState *cpu; + uint64_t curr_rate = 0; + int nvcpus = 0; + + CPU_FOREACH(cpu) { + if (cpu->running) { + nvcpus++; + curr_rate += vcpu_dirty_rate_get(cpu->cpu_index); + } + } + + if (!curr_rate || !nvcpus) { + return 0; + } + + return dirtylimit_dirty_ring_full_time(curr_rate / nvcpus); +} + +static struct DirtyLimitInfo *dirtylimit_query_vcpu(int cpu_index) +{ + DirtyLimitInfo *info = NULL; + + info = g_malloc0(sizeof(*info)); + info->cpu_index = cpu_index; + info->limit_rate = dirtylimit_vcpu_get_state(cpu_index)->quota; + info->current_rate = vcpu_dirty_rate_get(cpu_index); + + return info; +} + +static struct DirtyLimitInfoList *dirtylimit_query_all(void) +{ + int i, index; + DirtyLimitInfo *info = NULL; + DirtyLimitInfoList *head = NULL, **tail = &head; + + dirtylimit_state_lock(); + + if (!dirtylimit_in_service()) { + dirtylimit_state_unlock(); + return NULL; + } + + for (i = 0; i < dirtylimit_state->max_cpus; i++) { + index = dirtylimit_state->states[i].cpu_index; + if (dirtylimit_vcpu_get_state(index)->enabled) { + info = dirtylimit_query_vcpu(index); + QAPI_LIST_APPEND(tail, info); + } + } + + dirtylimit_state_unlock(); + + return head; +} + +struct DirtyLimitInfoList *qmp_query_vcpu_dirty_limit(Error **errp) +{ + if (!dirtylimit_in_service()) { + return NULL; + } + + return dirtylimit_query_all(); +} + +void hmp_info_vcpu_dirty_limit(Monitor *mon, const QDict *qdict) +{ + DirtyLimitInfoList *limit, *head, *info = NULL; + Error *err = NULL; + + if (!dirtylimit_in_service()) { + monitor_printf(mon, "Dirty page limit not enabled!\n"); + return; + } + + info = qmp_query_vcpu_dirty_limit(&err); + if (err) { + hmp_handle_error(mon, err); + return; + } + + head = info; + for (limit = head; limit != NULL; limit = limit->next) { + monitor_printf(mon, "vcpu[%"PRIi64"], limit rate %"PRIi64 " (MB/s)," + " current rate %"PRIi64 " (MB/s)\n", + limit->value->cpu_index, + limit->value->limit_rate, + limit->value->current_rate); + } + + g_free(info); +} diff --git a/softmmu/dma-helpers.c b/softmmu/dma-helpers.c index 7d766a5e89a3e7321a501ca8ce06e0faf922e6a8..3f664156dbe3ab563207644612a0059d526c007f 100644 --- a/softmmu/dma-helpers.c +++ b/softmmu/dma-helpers.c @@ -19,25 +19,11 @@ /* #define DEBUG_IOMMU */ MemTxResult dma_memory_set(AddressSpace *as, dma_addr_t addr, - uint8_t c, dma_addr_t len) + uint8_t c, dma_addr_t len, MemTxAttrs attrs) { dma_barrier(as, DMA_DIRECTION_FROM_DEVICE); -#define FILLBUF_SIZE 512 - uint8_t fillbuf[FILLBUF_SIZE]; - int l; - MemTxResult error = MEMTX_OK; - - memset(fillbuf, c, FILLBUF_SIZE); - while (len > 0) { - l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; - error |= address_space_write(as, addr, MEMTXATTRS_UNSPECIFIED, - fillbuf, l); - len -= l; - addr += l; - } - - return error; + return address_space_set(as, addr, c, len, attrs); } void qemu_sglist_init(QEMUSGList *qsg, DeviceState *dev, int alloc_hint, @@ -144,7 +130,8 @@ static void dma_blk_cb(void *opaque, int ret) while (dbs->sg_cur_index < dbs->sg->nsg) { cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; - mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir); + mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir, + MEMTXATTRS_UNSPECIFIED); /* * Make reads deterministic in icount mode. Windows sometimes issues * disk read requests with overlapping SGs. It leads @@ -178,7 +165,7 @@ static void dma_blk_cb(void *opaque, int ret) if (dbs->iov.size == 0) { trace_dma_map_wait(dbs); dbs->bh = aio_bh_new(dbs->ctx, reschedule_dma, dbs); - cpu_register_map_client(dbs->bh); + address_space_register_map_client(dbs->sg->as, dbs->bh); return; } @@ -208,7 +195,7 @@ static void dma_aio_cancel(BlockAIOCB *acb) } if (dbs->bh) { - cpu_unregister_map_client(dbs->bh); + address_space_unregister_map_client(dbs->sg->as, dbs->bh); qemu_bh_delete(dbs->bh); dbs->bh = NULL; } @@ -294,11 +281,14 @@ BlockAIOCB *dma_blk_write(BlockBackend *blk, } -static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, - DMADirection dir) +static MemTxResult dma_buf_rw(void *buf, int32_t len, uint64_t *residp, + QEMUSGList *sg, DMADirection dir, + MemTxAttrs attrs) { + uint8_t *ptr = buf; uint64_t resid; int sg_cur_index; + MemTxResult res = MEMTX_OK; resid = sg->size; sg_cur_index = 0; @@ -306,23 +296,34 @@ static uint64_t dma_buf_rw(uint8_t *ptr, int32_t len, QEMUSGList *sg, while (len > 0) { ScatterGatherEntry entry = sg->sg[sg_cur_index++]; int32_t xfer = MIN(len, entry.len); - dma_memory_rw(sg->as, entry.base, ptr, xfer, dir); + res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs); ptr += xfer; len -= xfer; resid -= xfer; } - return resid; + if (residp) { + *residp = resid; + } + return res; } -uint64_t dma_buf_read(uint8_t *ptr, int32_t len, QEMUSGList *sg) +uint64_t dma_buf_read(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs) { - return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_FROM_DEVICE); + uint64_t resid; + + dma_buf_rw(ptr, len, &resid, sg, DMA_DIRECTION_FROM_DEVICE, attrs); + + return resid; } -uint64_t dma_buf_write(uint8_t *ptr, int32_t len, QEMUSGList *sg) +uint64_t dma_buf_write(void *ptr, int32_t len, QEMUSGList *sg, MemTxAttrs attrs) { - return dma_buf_rw(ptr, len, sg, DMA_DIRECTION_TO_DEVICE); + uint64_t resid; + + dma_buf_rw(ptr, len, &resid, sg, DMA_DIRECTION_TO_DEVICE, attrs); + + return resid; } void dma_acct_start(BlockBackend *blk, BlockAcctCookie *cookie, diff --git a/softmmu/main.c b/softmmu/main.c index 639c67ff489356d51534d16665d7a4cf46414b5f..0acb41bd300dc261b639b8d3752f50d4ee2f9eb2 100644 --- a/softmmu/main.c +++ b/softmmu/main.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu-common.h" #include "sysemu/sysemu.h" @@ -47,6 +48,7 @@ int main(int argc, char **argv) int main(int argc, char **argv, char **envp) { qemu_init(argc, argv, envp); + qemu_log("qemu enter main_loop\n"); qemu_main_loop(); qemu_cleanup(); diff --git a/softmmu/memory.c b/softmmu/memory.c index 7340e19ff5e24b6776ddb5737443de18019a8414..0bb49da90497a667ac8b25cbd1a9695a1dafead7 100644 --- a/softmmu/memory.c +++ b/softmmu/memory.c @@ -541,6 +541,18 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, access_size_max = 4; } + /* Do not allow more than one simultaneous access to a device's IO Regions */ + if (mr->dev && !mr->disable_reentrancy_guard && + !mr->ram_device && !mr->ram && !mr->rom_device && !mr->readonly) { + if (mr->dev->mem_reentrancy_guard.engaged_in_io) { + warn_report_once("Blocked re-entrant IO on MemoryRegion: " + "%s at addr: 0x%" HWADDR_PRIX, + memory_region_name(mr), addr); + return MEMTX_ACCESS_ERROR; + } + mr->dev->mem_reentrancy_guard.engaged_in_io = true; + } + /* FIXME: support unaligned access? */ access_size = MAX(MIN(size, access_size_max), access_size_min); access_mask = MAKE_64BIT_MASK(0, access_size * 8); @@ -555,6 +567,9 @@ static MemTxResult access_with_adjusted_size(hwaddr addr, access_mask, attrs); } } + if (mr->dev) { + mr->dev->mem_reentrancy_guard.engaged_in_io = false; + } return r; } @@ -1169,6 +1184,7 @@ static void memory_region_do_init(MemoryRegion *mr, } mr->name = g_strdup(name); mr->owner = owner; + mr->dev = (DeviceState *) object_dynamic_cast(mr->owner, TYPE_DEVICE); mr->ram_block = NULL; if (name) { @@ -1309,22 +1325,7 @@ static uint64_t memory_region_ram_device_read(void *opaque, hwaddr addr, unsigned size) { MemoryRegion *mr = opaque; - uint64_t data = (uint64_t)~0; - - switch (size) { - case 1: - data = *(uint8_t *)(mr->ram_block->host + addr); - break; - case 2: - data = *(uint16_t *)(mr->ram_block->host + addr); - break; - case 4: - data = *(uint32_t *)(mr->ram_block->host + addr); - break; - case 8: - data = *(uint64_t *)(mr->ram_block->host + addr); - break; - } + uint64_t data = ldn_he_p(mr->ram_block->host + addr, size); trace_memory_region_ram_device_read(get_cpu_index(), mr, addr, data, size); @@ -1338,20 +1339,7 @@ static void memory_region_ram_device_write(void *opaque, hwaddr addr, trace_memory_region_ram_device_write(get_cpu_index(), mr, addr, data, size); - switch (size) { - case 1: - *(uint8_t *)(mr->ram_block->host + addr) = (uint8_t)data; - break; - case 2: - *(uint16_t *)(mr->ram_block->host + addr) = (uint16_t)data; - break; - case 4: - *(uint32_t *)(mr->ram_block->host + addr) = (uint32_t)data; - break; - case 8: - *(uint64_t *)(mr->ram_block->host + addr) = data; - break; - } + stn_he_p(mr->ram_block->host + addr, size, data); } static const MemoryRegionOps ram_device_mem_ops = { @@ -2958,6 +2946,10 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) as->ioeventfds = NULL; QTAILQ_INIT(&as->listeners); QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link); + as->max_bounce_buffer_size = DEFAULT_MAX_BOUNCE_BUFFER_SIZE; + as->bounce_buffer_size = 0; + qemu_mutex_init(&as->map_client_list_lock); + QLIST_INIT(&as->map_client_list); as->name = g_strdup(name ? name : "anonymous"); address_space_update_topology(as); address_space_update_ioeventfds(as); @@ -2965,6 +2957,10 @@ void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name) static void do_address_space_destroy(AddressSpace *as) { + assert(qatomic_read(&as->bounce_buffer_size) == 0); + assert(QLIST_EMPTY(&as->map_client_list)); + qemu_mutex_destroy(&as->map_client_list_lock); + assert(QTAILQ_EMPTY(&as->listeners)); flatview_unref(as->current_map); diff --git a/softmmu/meson.build b/softmmu/meson.build index d8e03018abf5e711079516b0fb48d5c8fa9f5cb1..95029a5db21ae0cc209356f1a76ed32eb7bb558d 100644 --- a/softmmu/meson.build +++ b/softmmu/meson.build @@ -15,6 +15,7 @@ specific_ss.add(when: 'CONFIG_SOFTMMU', if_true: [files( 'vl.c', 'cpu-timers.c', 'runstate-action.c', + 'dirtylimit.c', )]) specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: [files( diff --git a/softmmu/physmem.c b/softmmu/physmem.c index 3524c04c2a162b717c3975cc15da300c7a033710..45d290cde5872ae141b7b2375e9ba2629cd407d4 100644 --- a/softmmu/physmem.c +++ b/softmmu/physmem.c @@ -41,6 +41,7 @@ #include "qemu/config-file.h" #include "qemu/error-report.h" #include "qemu/qemu-print.h" +#include "qemu/log.h" #include "exec/memory.h" #include "exec/ioport.h" #include "sysemu/dma.h" @@ -667,7 +668,7 @@ void tcg_iommu_init_notifier_list(CPUState *cpu) /* Called from RCU critical section */ MemoryRegionSection * -address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, +address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr orig_addr, hwaddr *xlat, hwaddr *plen, MemTxAttrs attrs, int *prot) { @@ -676,6 +677,7 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, IOMMUMemoryRegionClass *imrc; IOMMUTLBEntry iotlb; int iommu_idx; + hwaddr addr = orig_addr; AddressSpaceDispatch *d = qatomic_rcu_read(&cpu->cpu_ases[asidx].memory_dispatch); @@ -720,6 +722,16 @@ address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, return section; translate_fail: + /* + * We should be given a page-aligned address -- certainly + * tlb_set_page_with_attrs() does so. The page offset of xlat + * is used to index sections[], and PHYS_SECTION_UNASSIGNED = 0. + * The page portion of xlat will be logged by memory_region_access_valid() + * when this memory access is rejected, so use the original untranslated + * physical address. + */ + assert((orig_addr & ~TARGET_PAGE_MASK) == 0); + *xlat = orig_addr; return &d->map.sections[PHYS_SECTION_UNASSIGNED]; } @@ -1496,7 +1508,14 @@ static int file_ram_open(const char *path, /* @path names a file that doesn't exist, create it */ fd = open(path, O_RDWR | O_CREAT | O_EXCL, 0644); if (fd >= 0) { - *created = true; + info_report("open %s success \n", path); + /* if fd file type is HUGETLBFS_MAGIC, unlink it, */ + /* in case to prevent residue after qemu killed */ + if (qemu_fd_getfiletype(fd) == HUGETLBFS_MAGIC) { + unlink(path); + } else { + *created = true; + } break; } } else if (errno == EISDIR) { @@ -1515,6 +1534,7 @@ static int file_ram_open(const char *path, fd = mkstemp(filename); if (fd >= 0) { + info_report("mkstemp %s success \n", filename); unlink(filename); g_free(filename); break; @@ -2759,6 +2779,33 @@ static bool prepare_mmio_access(MemoryRegion *mr) return release_lock; } +/** + * flatview_access_allowed + * @mr: #MemoryRegion to be accessed + * @attrs: memory transaction attributes + * @addr: address within that memory region + * @len: the number of bytes to access + * + * Check if a memory transaction is allowed. + * + * Returns: true if transaction is allowed, false if denied. + */ +static bool flatview_access_allowed(MemoryRegion *mr, MemTxAttrs attrs, + hwaddr addr, hwaddr len) +{ + if (likely(!attrs.memory)) { + return true; + } + if (memory_region_is_ram(mr)) { + return true; + } + qemu_log_mask(LOG_GUEST_ERROR, + "Invalid access to non-RAM device at " + "addr 0x%" HWADDR_PRIX ", size %" HWADDR_PRIu ", " + "region '%s'\n", addr, len, memory_region_name(mr)); + return false; +} + /* Called within RCU critical section. */ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, MemTxAttrs attrs, @@ -2773,7 +2820,10 @@ static MemTxResult flatview_write_continue(FlatView *fv, hwaddr addr, const uint8_t *buf = ptr; for (;;) { - if (!memory_access_is_direct(mr, true)) { + if (!flatview_access_allowed(mr, attrs, addr1, l)) { + result |= MEMTX_ACCESS_ERROR; + /* Keep going. */ + } else if (!memory_access_is_direct(mr, true)) { release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); /* XXX: could force current_cpu to NULL to avoid @@ -2815,14 +2865,14 @@ static MemTxResult flatview_write(FlatView *fv, hwaddr addr, MemTxAttrs attrs, hwaddr l; hwaddr addr1; MemoryRegion *mr; - MemTxResult result = MEMTX_OK; l = len; mr = flatview_translate(fv, addr, &addr1, &l, true, attrs); - result = flatview_write_continue(fv, addr, attrs, buf, len, - addr1, l, mr); - - return result; + if (!flatview_access_allowed(mr, attrs, addr, len)) { + return MEMTX_ACCESS_ERROR; + } + return flatview_write_continue(fv, addr, attrs, buf, len, + addr1, l, mr); } /* Called within RCU critical section. */ @@ -2839,7 +2889,10 @@ MemTxResult flatview_read_continue(FlatView *fv, hwaddr addr, fuzz_dma_read_cb(addr, len, mr); for (;;) { - if (!memory_access_is_direct(mr, false)) { + if (!flatview_access_allowed(mr, attrs, addr1, l)) { + result |= MEMTX_ACCESS_ERROR; + /* Keep going. */ + } else if (!memory_access_is_direct(mr, false)) { /* I/O case */ release_lock |= prepare_mmio_access(mr); l = memory_access_size(mr, l, addr1); @@ -2882,6 +2935,9 @@ static MemTxResult flatview_read(FlatView *fv, hwaddr addr, l = len; mr = flatview_translate(fv, addr, &addr1, &l, false, attrs); + if (!flatview_access_allowed(mr, attrs, addr, len)) { + return MEMTX_ACCESS_ERROR; + } return flatview_read_continue(fv, addr, attrs, buf, len, addr1, l, mr); } @@ -2927,6 +2983,25 @@ MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs, } } +MemTxResult address_space_set(AddressSpace *as, hwaddr addr, + uint8_t c, hwaddr len, MemTxAttrs attrs) +{ +#define FILLBUF_SIZE 512 + uint8_t fillbuf[FILLBUF_SIZE]; + int l; + MemTxResult error = MEMTX_OK; + + memset(fillbuf, c, FILLBUF_SIZE); + while (len > 0) { + l = len < FILLBUF_SIZE ? len : FILLBUF_SIZE; + error |= address_space_write(as, addr, attrs, fillbuf, l); + len -= l; + addr += l; + } + + return error; +} + void cpu_physical_memory_rw(hwaddr addr, void *buf, hwaddr len, bool is_write) { @@ -3006,53 +3081,49 @@ void cpu_flush_icache_range(hwaddr start, hwaddr len) NULL, len, FLUSH_CACHE); } +/* + * A magic value stored in the first 8 bytes of the bounce buffer struct. Used + * to detect illegal pointers passed to address_space_unmap. + */ +#define BOUNCE_BUFFER_MAGIC 0xb4017ceb4ffe12ed + typedef struct { + uint64_t magic; MemoryRegion *mr; - void *buffer; hwaddr addr; - hwaddr len; - bool in_use; + size_t len; + uint8_t buffer[]; } BounceBuffer; -static BounceBuffer bounce; - -typedef struct MapClient { - QEMUBH *bh; - QLIST_ENTRY(MapClient) link; -} MapClient; - -QemuMutex map_client_list_lock; -static QLIST_HEAD(, MapClient) map_client_list - = QLIST_HEAD_INITIALIZER(map_client_list); - -static void cpu_unregister_map_client_do(MapClient *client) +static void +address_space_unregister_map_client_do(AddressSpaceMapClient *client) { QLIST_REMOVE(client, link); g_free(client); } -static void cpu_notify_map_clients_locked(void) +static void address_space_notify_map_clients_locked(AddressSpace *as) { - MapClient *client; + AddressSpaceMapClient *client; - while (!QLIST_EMPTY(&map_client_list)) { - client = QLIST_FIRST(&map_client_list); + while (!QLIST_EMPTY(&as->map_client_list)) { + client = QLIST_FIRST(&as->map_client_list); qemu_bh_schedule(client->bh); - cpu_unregister_map_client_do(client); + address_space_unregister_map_client_do(client); } } -void cpu_register_map_client(QEMUBH *bh) +void address_space_register_map_client(AddressSpace *as, QEMUBH *bh) { - MapClient *client = g_malloc(sizeof(*client)); + AddressSpaceMapClient *client = g_malloc(sizeof(*client)); - qemu_mutex_lock(&map_client_list_lock); + qemu_mutex_lock(&as->map_client_list_lock); client->bh = bh; - QLIST_INSERT_HEAD(&map_client_list, client, link); - if (!qatomic_read(&bounce.in_use)) { - cpu_notify_map_clients_locked(); + QLIST_INSERT_HEAD(&as->map_client_list, client, link); + if (qatomic_read(&as->bounce_buffer_size) < as->max_bounce_buffer_size) { + address_space_notify_map_clients_locked(as); } - qemu_mutex_unlock(&map_client_list_lock); + qemu_mutex_unlock(&as->map_client_list_lock); } void cpu_exec_init_all(void) @@ -3068,28 +3139,27 @@ void cpu_exec_init_all(void) finalize_target_page_bits(); io_mem_init(); memory_map_init(); - qemu_mutex_init(&map_client_list_lock); } -void cpu_unregister_map_client(QEMUBH *bh) +void address_space_unregister_map_client(AddressSpace *as, QEMUBH *bh) { - MapClient *client; + AddressSpaceMapClient *client; - qemu_mutex_lock(&map_client_list_lock); - QLIST_FOREACH(client, &map_client_list, link) { + qemu_mutex_lock(&as->map_client_list_lock); + QLIST_FOREACH(client, &as->map_client_list, link) { if (client->bh == bh) { - cpu_unregister_map_client_do(client); + address_space_unregister_map_client_do(client); break; } } - qemu_mutex_unlock(&map_client_list_lock); + qemu_mutex_unlock(&as->map_client_list_lock); } -static void cpu_notify_map_clients(void) +static void address_space_notify_map_clients(AddressSpace *as) { - qemu_mutex_lock(&map_client_list_lock); - cpu_notify_map_clients_locked(); - qemu_mutex_unlock(&map_client_list_lock); + qemu_mutex_lock(&as->map_client_list_lock); + address_space_notify_map_clients_locked(as); + qemu_mutex_unlock(&as->map_client_list_lock); } static bool flatview_access_valid(FlatView *fv, hwaddr addr, hwaddr len, @@ -3119,12 +3189,10 @@ bool address_space_access_valid(AddressSpace *as, hwaddr addr, MemTxAttrs attrs) { FlatView *fv; - bool result; RCU_READ_LOCK_GUARD(); fv = address_space_to_flatview(as); - result = flatview_access_valid(fv, addr, len, is_write, attrs); - return result; + return flatview_access_valid(fv, addr, len, is_write, attrs); } static hwaddr @@ -3158,8 +3226,8 @@ flatview_extend_translation(FlatView *fv, hwaddr addr, * May map a subset of the requested range, given by and returned in *plen. * May return NULL if resources needed to perform the mapping are exhausted. * Use only for reads OR writes - not for read-modify-write operations. - * Use cpu_register_map_client() to know when retrying the map operation is - * likely to succeed. + * Use address_space_register_map_client() to know when retrying the map + * operation is likely to succeed. */ void *address_space_map(AddressSpace *as, hwaddr addr, @@ -3183,28 +3251,40 @@ void *address_space_map(AddressSpace *as, mr = flatview_translate(fv, addr, &xlat, &l, is_write, attrs); if (!memory_access_is_direct(mr, is_write)) { - if (qatomic_xchg(&bounce.in_use, true)) { + size_t used = qatomic_read(&as->bounce_buffer_size); + for (;;) { + hwaddr alloc = MIN(as->max_bounce_buffer_size - used, l); + size_t new_size = used + alloc; + size_t actual = + qatomic_cmpxchg(&as->bounce_buffer_size, used, new_size); + if (actual == used) { + l = alloc; + break; + } + used = actual; + } + + if (l == 0) { *plen = 0; return NULL; } - /* Avoid unbounded allocations */ - l = MIN(l, TARGET_PAGE_SIZE); - bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l); - bounce.addr = addr; - bounce.len = l; + BounceBuffer *bounce = g_malloc0(l + sizeof(BounceBuffer)); + bounce->magic = BOUNCE_BUFFER_MAGIC; memory_region_ref(mr); - bounce.mr = mr; + bounce->mr = mr; + bounce->addr = addr; + bounce->len = l; + if (!is_write) { flatview_read(fv, addr, MEMTXATTRS_UNSPECIFIED, - bounce.buffer, l); + bounce->buffer, l); } *plen = l; - return bounce.buffer; + return bounce->buffer; } - memory_region_ref(mr); *plen = flatview_extend_translation(fv, addr, len, mr, xlat, l, is_write, attrs); @@ -3221,12 +3301,11 @@ void *address_space_map(AddressSpace *as, void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, bool is_write, hwaddr access_len) { - if (buffer != bounce.buffer) { - MemoryRegion *mr; - ram_addr_t addr1; + MemoryRegion *mr; + ram_addr_t addr1; - mr = memory_region_from_host(buffer, &addr1); - assert(mr != NULL); + mr = memory_region_from_host(buffer, &addr1); + if (mr != NULL) { if (is_write) { invalidate_and_set_dirty(mr, addr1, access_len); } @@ -3236,15 +3315,23 @@ void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len, memory_region_unref(mr); return; } + + + BounceBuffer *bounce = container_of(buffer, BounceBuffer, buffer); + assert(bounce->magic == BOUNCE_BUFFER_MAGIC); + if (is_write) { - address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED, - bounce.buffer, access_len); + address_space_write(as, bounce->addr, MEMTXATTRS_UNSPECIFIED, + bounce->buffer, access_len); } - qemu_vfree(bounce.buffer); - bounce.buffer = NULL; - memory_region_unref(bounce.mr); - qatomic_mb_set(&bounce.in_use, false); - cpu_notify_map_clients(); + + qatomic_sub(&as->bounce_buffer_size, bounce->len); + bounce->magic = ~BOUNCE_BUFFER_MAGIC; + memory_region_unref(bounce->mr); + g_free(bounce); + /* Write bounce_buffer_size before reading map_client_list. */ + smp_mb(); + address_space_notify_map_clients(as); } void *cpu_physical_memory_map(hwaddr addr, @@ -3646,7 +3733,7 @@ void mtree_print_dispatch(AddressSpaceDispatch *d, MemoryRegion *root) " %s%s%s%s%s", i, s->offset_within_address_space, - s->offset_within_address_space + MR_SIZE(s->mr->size), + s->offset_within_address_space + MR_SIZE(s->size), s->mr->name ? s->mr->name : "(noname)", i < ARRAY_SIZE(names) ? names[i] : "", s->mr == root ? " [ROOT]" : "", diff --git a/softmmu/qdev-monitor.c b/softmmu/qdev-monitor.c index 01f3834db575a6932c8e292068d4ab71125abe45..14efb3701476953f6ddb36ac74ee52db6f212478 100644 --- a/softmmu/qdev-monitor.c +++ b/softmmu/qdev-monitor.c @@ -36,6 +36,7 @@ #include "qemu/option.h" #include "qemu/qemu-print.h" #include "qemu/option_int.h" +#include "qemu/log.h" #include "sysemu/block-backend.h" #include "migration/misc.h" #include "migration/migration.h" @@ -60,7 +61,8 @@ typedef struct QDevAlias QEMU_ARCH_HPPA | QEMU_ARCH_I386 | \ QEMU_ARCH_MIPS | QEMU_ARCH_PPC | \ QEMU_ARCH_RISCV | QEMU_ARCH_SH4 | \ - QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA) + QEMU_ARCH_SPARC | QEMU_ARCH_XTENSA | \ + QEMU_ARCH_SW64 | QEMU_ARCH_LOONGARCH64) #define QEMU_ARCH_VIRTIO_CCW (QEMU_ARCH_S390X) #define QEMU_ARCH_VIRTIO_MMIO (QEMU_ARCH_M68K) @@ -635,6 +637,7 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, if (path != NULL) { bus = qbus_find(path, errp); if (!bus) { + qemu_log("can not find bus for %s\n", driver); return NULL; } if (!object_dynamic_cast(OBJECT(bus), dc->bus_type)) { @@ -705,9 +708,11 @@ DeviceState *qdev_device_add_from_qdict(const QDict *opts, object_set_properties_from_keyval(&dev->parent_obj, dev->opts, from_json, errp); if (*errp) { + qemu_log("the bus %s -driver %s set property failed\n", + bus ? bus->name : "None", driver); goto err_del_dev; } - + qemu_log("add qdev %s:%s success\n", driver, dev->id ? dev->id : "none"); if (!qdev_realize(DEVICE(dev), bus, errp)) { goto err_del_dev; } @@ -729,6 +734,8 @@ DeviceState *qdev_device_add(QemuOpts *opts, Error **errp) ret = qdev_device_add_from_qdict(qdict, false, errp); if (ret) { + qemu_log("add qdev %s:%s success\n", qemu_opt_get(opts, "driver"), + qemu_opts_id(opts) ? qemu_opts_id(opts) : "none"); qemu_opts_del(opts); } qobject_unref(qdict); @@ -1034,6 +1041,13 @@ int qemu_global_option(const char *str) if (!opts) { return -1; } + if (!qemu_opt_get(opts, "driver") + || !qemu_opt_get(opts, "property") + || !qemu_opt_get(opts, "value")) { + error_report("options 'driver', 'property', and 'value'" + " are required"); + return -1; + } return 0; } diff --git a/softmmu/rtc.c b/softmmu/rtc.c index 5632684fc9d5bbc51507b5a89d03ce4ee208adcf..57bb8bba7c3a03c94f0a12ae6eab84ed4a985562 100644 --- a/softmmu/rtc.c +++ b/softmmu/rtc.c @@ -43,6 +43,7 @@ static time_t rtc_ref_start_datetime; static int rtc_realtime_clock_offset; /* used only with QEMU_CLOCK_REALTIME */ static int rtc_host_datetime_offset = -1; /* valid & used only with RTC_BASE_DATETIME */ +static time_t rtc_date_diff = 0; QEMUClockType rtc_clock; /***********************************************************/ /* RTC reference time/date access */ @@ -84,7 +85,7 @@ void qemu_get_timedate(struct tm *tm, int offset) } } -int qemu_timedate_diff(struct tm *tm) +time_t qemu_timedate_diff(struct tm *tm) { time_t seconds; @@ -107,6 +108,16 @@ int qemu_timedate_diff(struct tm *tm) return seconds - qemu_ref_timedate(QEMU_CLOCK_HOST); } +time_t get_rtc_date_diff(void) +{ + return rtc_date_diff; +} + +void set_rtc_date_diff(time_t diff) +{ + rtc_date_diff = diff; +} + static void configure_rtc_base_datetime(const char *startdate) { time_t rtc_start_datetime; diff --git a/softmmu/runstate.c b/softmmu/runstate.c index 10d9b7365aa78430266858fb82abc7165398b2a9..0757d7f26a79835c771c658fcbb509f3762f4000 100644 --- a/softmmu/runstate.c +++ b/softmmu/runstate.c @@ -115,6 +115,7 @@ static const RunStateTransition runstate_transitions_def[] = { { RUN_STATE_PRELAUNCH, RUN_STATE_RUNNING }, { RUN_STATE_PRELAUNCH, RUN_STATE_FINISH_MIGRATE }, { RUN_STATE_PRELAUNCH, RUN_STATE_INMIGRATE }, + { RUN_STATE_PRELAUNCH, RUN_STATE_POSTMIGRATE }, { RUN_STATE_FINISH_MIGRATE, RUN_STATE_RUNNING }, { RUN_STATE_FINISH_MIGRATE, RUN_STATE_PAUSED }, @@ -448,6 +449,7 @@ void qemu_system_reset(ShutdownCause reason) qapi_event_send_reset(shutdown_caused_by_guest(reason), reason); } cpu_synchronize_all_post_reset(); + monitor_qapi_event_discard_io_error(); } /* @@ -707,9 +709,11 @@ static bool main_loop_should_exit(void) } if (qemu_powerdown_requested()) { qemu_system_powerdown(); + qemu_log("domain is power down by outside operation\n"); } if (qemu_vmstop_requested(&r)) { vm_stop(r); + qemu_log("domain is stopped by outside operation\n"); } return false; } diff --git a/softmmu/trace-events b/softmmu/trace-events index 9c88887b3c643e10cef84c312296a6a92b457a12..998d870fe22b91f37aaf1d641757ad946d2c5086 100644 --- a/softmmu/trace-events +++ b/softmmu/trace-events @@ -23,6 +23,8 @@ global_dirty_changed(unsigned int bitmask) "bitmask 0x%"PRIx32 # softmmu.c vm_stop_flush_all(int ret) "ret %d" +all_vcpus_paused(void) "" +all_vcpus_prepared(void) "" # vl.c vm_state_notify(int running, int reason, const char *reason_str) "running %d reason %d (%s)" @@ -31,3 +33,10 @@ runstate_set(int current_state, const char *current_state_str, int new_state, co system_wakeup_request(int reason) "reason=%d" qemu_system_shutdown_request(int reason) "reason=%d" qemu_system_powerdown_request(void) "" + +#dirtylimit.c +dirtylimit_state_initialize(int max_cpus) "dirtylimit state initialize: max cpus %d" +dirtylimit_state_finalize(void) +dirtylimit_throttle_pct(int cpu_index, uint64_t pct, int64_t time_us) "CPU[%d] throttle percent: %" PRIu64 ", throttle adjust time %"PRIi64 " us" +dirtylimit_set_vcpu(int cpu_index, uint64_t quota) "CPU[%d] set dirty page rate limit %"PRIu64 +dirtylimit_vcpu_execute(int cpu_index, int64_t sleep_time_us) "CPU[%d] sleep %"PRIi64 " us" diff --git a/softmmu/vl.c b/softmmu/vl.c index 620a1f1367e2b033bfec541619d33257920eaa4f..9dcbc3b266a6e315d51d29ca0f4ae554e22ef889 100644 --- a/softmmu/vl.c +++ b/softmmu/vl.c @@ -26,6 +26,7 @@ #include "qemu-common.h" #include "qemu/datadir.h" #include "qemu/units.h" +#include "qemu/log.h" #include "exec/cpu-common.h" #include "hw/qdev-properties.h" #include "qapi/compat-policy.h" @@ -726,6 +727,9 @@ static QemuOptsList qemu_smp_opts = { }, { .name = "dies", .type = QEMU_OPT_NUMBER, + }, { + .name = "clusters", + .type = QEMU_OPT_NUMBER, }, { .name = "cores", .type = QEMU_OPT_NUMBER, @@ -970,7 +974,8 @@ static void select_vgahw(const MachineClass *machine_class, const char *p) if (vga_interface_available(t) && ti->opt_name) { printf("%-20s %s%s\n", ti->opt_name, ti->name ?: "", - g_str_equal(ti->opt_name, def) ? " (default)" : ""); + (def && g_str_equal(ti->opt_name, def)) ? + " (default)" : ""); } } exit(0); @@ -2469,6 +2474,10 @@ static void qemu_validate_options(const QDict *machine_opts) } } + if (loadvm && incoming) { + error_report("'incoming' and 'loadvm' options are mutually exclusive"); + exit(EXIT_FAILURE); + } if (loadvm && preconfig_requested) { error_report("'preconfig' and 'loadvm' options are " "mutually exclusive"); @@ -2677,6 +2686,7 @@ static void qemu_create_cli_devices(void) } /* init generic devices */ + qemu_log("device init start\n"); rom_set_order_override(FW_CFG_ORDER_OVERRIDE_DEVICE); qemu_opts_foreach(qemu_find_opts("device"), device_init_func, NULL, &error_fatal); @@ -2816,6 +2826,7 @@ void qemu_init(int argc, char **argv, char **envp) qemu_init_subsystems(); + qemu_log("qemu pid is %d, options parsing start\n", getpid()); /* first pass of option parsing */ optind = 1; while (optind < argc) { @@ -3024,6 +3035,7 @@ void qemu_init(int argc, char **argv, char **envp) exit(0); break; case QEMU_OPTION_m: + qemu_log("memory options parse start\n"); opts = qemu_opts_parse_noisily(qemu_find_opts("memory"), optarg, true); if (!opts) { @@ -3741,6 +3753,7 @@ void qemu_init(int argc, char **argv, char **envp) */ machine_class = MACHINE_GET_CLASS(current_machine); + qemu_log("configure accelerator %s start\n", machine_class->name); if (!qtest_enabled() && machine_class->deprecation_reason) { error_report("Machine type '%s' is deprecated: %s", machine_class->name, machine_class->deprecation_reason); @@ -3754,6 +3767,7 @@ void qemu_init(int argc, char **argv, char **envp) qemu_create_late_backends(); + qemu_log("machine init start\n"); /* parse features once if machine provides default cpu_type */ current_machine->cpu_type = machine_class->default_cpu_type; if (cpu_option) { diff --git a/storage-daemon/qemu-storage-daemon.c b/storage-daemon/qemu-storage-daemon.c index 52cf17e8acee159f4235312e276443637a64d88f..f3d8c4ca11fada5443a713de120be446d0188042 100644 --- a/storage-daemon/qemu-storage-daemon.c +++ b/storage-daemon/qemu-storage-daemon.c @@ -60,6 +60,7 @@ #include "trace/control.h" static const char *pid_file; +static char *pid_file_realpath; static volatile bool exit_requested = false; void qemu_system_killed(int signal, pid_t pid) @@ -292,7 +293,7 @@ static void process_options(int argc, char *argv[]) static void pid_file_cleanup(void) { - unlink(pid_file); + unlink(pid_file_realpath); } static void pid_file_init(void) @@ -308,6 +309,14 @@ static void pid_file_init(void) exit(EXIT_FAILURE); } + pid_file_realpath = g_malloc(PATH_MAX); + if (!realpath(pid_file, pid_file_realpath)) { + error_report("cannot resolve PID file path: %s: %s", + pid_file, strerror(errno)); + unlink(pid_file); + exit(EXIT_FAILURE); + } + atexit(pid_file_cleanup); } diff --git a/stubs/iothread-lock-block.c b/stubs/iothread-lock-block.c new file mode 100644 index 0000000000000000000000000000000000000000..c88ed70462514bf65ca98e9e749f387901f5cbc4 --- /dev/null +++ b/stubs/iothread-lock-block.c @@ -0,0 +1,8 @@ +#include "qemu/osdep.h" +#include "qemu/main-loop.h" + +bool qemu_in_main_thread(void) +{ + return qemu_get_current_aio_context() == qemu_get_aio_context(); +} + diff --git a/stubs/meson.build b/stubs/meson.build index 71469c1d50a18967812c7f46078715782a135f5d..3aca1d67c1ab520a93a05fd5a0453b33c9001466 100644 --- a/stubs/meson.build +++ b/stubs/meson.build @@ -18,6 +18,9 @@ if linux_io_uring.found() stub_ss.add(files('io_uring.c')) endif stub_ss.add(files('iothread-lock.c')) +if have_block + stub_ss.add(files('iothread-lock-block.c')) +endif stub_ss.add(files('isa-bus.c')) stub_ss.add(files('is-daemonized.c')) if libaio.found() diff --git a/subprojects/libvhost-user/libvhost-user.c b/subprojects/libvhost-user/libvhost-user.c index 787f4d2d4fef8311d994399e55dd36223f881f9b..79563ec597c3a7d73ee2f9605d69c7eb6fa727b3 100644 --- a/subprojects/libvhost-user/libvhost-user.c +++ b/subprojects/libvhost-user/libvhost-user.c @@ -756,15 +756,9 @@ vu_add_mem_reg(VuDev *dev, VhostUserMsg *vmsg) { /* Send the message back to qemu with the addresses filled in. */ vmsg->fd_num = 0; - if (!vu_send_reply(dev, dev->sock, vmsg)) { - vu_panic(dev, "failed to respond to add-mem-region for postcopy"); - return false; - } - DPRINT("Successfully added new region in postcopy\n"); dev->nregions++; - return false; - + return true; } else { for (i = 0; i < dev->max_queues; i++) { if (dev->vq[i].vring.desc) { @@ -1788,18 +1782,11 @@ vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg) static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg) { - vmsg->flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; - vmsg->size = sizeof(vmsg->payload.u64); - vmsg->payload.u64 = VHOST_USER_MAX_RAM_SLOTS; - vmsg->fd_num = 0; - - if (!vu_message_write(dev, dev->sock, vmsg)) { - vu_panic(dev, "Failed to send max ram slots: %s\n", strerror(errno)); - } + vmsg_set_reply_u64(vmsg, VHOST_USER_MAX_RAM_SLOTS); DPRINT("u64: 0x%016"PRIx64"\n", (uint64_t) VHOST_USER_MAX_RAM_SLOTS); - return false; + return true; } static bool diff --git a/target/Kconfig b/target/Kconfig index ae7f24fc66b00dea4318be4793fbf315dde02b90..b2abc7b60b14ac3bc8b261a991ff6fde3a7c8402 100644 --- a/target/Kconfig +++ b/target/Kconfig @@ -4,6 +4,7 @@ source avr/Kconfig source cris/Kconfig source hppa/Kconfig source i386/Kconfig +source loongarch64/Kconfig source m68k/Kconfig source microblaze/Kconfig source mips/Kconfig @@ -17,3 +18,4 @@ source sh4/Kconfig source sparc/Kconfig source tricore/Kconfig source xtensa/Kconfig +source sw64/Kconfig diff --git a/target/arm/cpu.c b/target/arm/cpu.c index a211804fd3df9f3694ddb7a9c8ed7a754fea29ac..d550022f18eb3d256ae6185c8d85b37a3044e50d 100644 --- a/target/arm/cpu.c +++ b/target/arm/cpu.c @@ -25,6 +25,8 @@ #include "qemu/module.h" #include "qapi/error.h" #include "qapi/visitor.h" +#include "qapi/qmp/qdict.h" +#include "qom/qom-qobject.h" #include "cpu.h" #ifdef CONFIG_TCG #include "hw/core/tcg-cpu-ops.h" @@ -176,9 +178,9 @@ static void arm_cpu_reset(DeviceState *dev) g_hash_table_foreach(cpu->cp_regs, cp_reg_check_reset, cpu); env->vfp.xregs[ARM_VFP_FPSID] = cpu->reset_fpsid; - env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.mvfr0; - env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.mvfr1; - env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.mvfr2; + env->vfp.xregs[ARM_VFP_MVFR0] = cpu->isar.regs[MVFR0]; + env->vfp.xregs[ARM_VFP_MVFR1] = cpu->isar.regs[MVFR1]; + env->vfp.xregs[ARM_VFP_MVFR2] = cpu->isar.regs[MVFR2]; cpu->power_state = s->start_powered_off ? PSCI_OFF : PSCI_ON; @@ -1211,6 +1213,509 @@ unsigned int gt_cntfrq_period_ns(ARMCPU *cpu) NANOSECONDS_PER_SECOND / cpu->gt_cntfrq_hz : 1; } +/** + * CPUFeatureInfo: + * @reg: The ID register where the ID field is in. + * @name: The name of the CPU feature. + * @length: The bit length of the ID field. + * @shift: The bit shift of the ID field in the ID register. + * @min_value: The minimum value equal to or larger than which means the CPU + * feature is implemented. + * @ni_value: Not-implemented value. It will be set to the ID field when + * disabling the CPU feature. Usually, it's min_value - 1. + * @sign: Whether the ID field is signed. + * @is_32bit: Whether the CPU feature is for 32-bit. + * + * In ARM, a CPU feature is described by an ID field, which is a 4-bit field in + * an ID register. + */ +typedef struct CPUFeatureInfo { + CPUIDReg reg; + const char *name; + int length; + int shift; + int min_value; + int ni_value; + bool sign; + bool is_32bit; +} CPUFeatureInfo; + +#define FIELD_INFO(feature_name, id_reg, field, s, min_val, ni_val, is32bit) { \ + .reg = id_reg, \ + .length = R_ ## id_reg ## _ ## field ## _LENGTH, \ + .shift = R_ ## id_reg ## _ ## field ## _SHIFT, \ + .sign = s, \ + .min_value = min_val, \ + .ni_value = ni_val, \ + .name = feature_name, \ + .is_32bit = is32bit, \ +} + +static struct CPUFeatureInfo cpu_features[] = { + FIELD_INFO("swap", ID_ISAR0, SWAP, false, 1, 0, true), + FIELD_INFO("bitcount", ID_ISAR0, BITCOUNT, false, 1, 0, true), + FIELD_INFO("bitfield", ID_ISAR0, BITFIELD, false, 1, 0, true), + FIELD_INFO("cmpbranch", ID_ISAR0, CMPBRANCH, false, 1, 0, true), + FIELD_INFO("coproc", ID_ISAR0, COPROC, false, 1, 0, true), + FIELD_INFO("debug", ID_ISAR0, DEBUG, false, 1, 0, true), + FIELD_INFO("device", ID_ISAR0, DIVIDE, false, 1, 0, true), + + FIELD_INFO("endian", ID_ISAR1, ENDIAN, false, 1, 0, true), + FIELD_INFO("except", ID_ISAR1, EXCEPT, false, 1, 0, true), + FIELD_INFO("except_ar", ID_ISAR1, EXCEPT_AR, false, 1, 0, true), + FIELD_INFO("extend", ID_ISAR1, EXTEND, false, 1, 0, true), + FIELD_INFO("ifthen", ID_ISAR1, IFTHEN, false, 1, 0, true), + FIELD_INFO("immediate", ID_ISAR1, IMMEDIATE, false, 1, 0, true), + FIELD_INFO("interwork", ID_ISAR1, INTERWORK, false, 1, 0, true), + FIELD_INFO("jazelle", ID_ISAR1, JAZELLE, false, 1, 0, true), + + FIELD_INFO("loadstore", ID_ISAR2, LOADSTORE, false, 1, 0, true), + FIELD_INFO("memhint", ID_ISAR2, MEMHINT, false, 1, 0, true), + FIELD_INFO("multiaccessint", ID_ISAR2, MULTIACCESSINT, false, 1, 0, true), + FIELD_INFO("mult", ID_ISAR2, MULT, false, 1, 0, true), + FIELD_INFO("mults", ID_ISAR2, MULTS, false, 1, 0, true), + FIELD_INFO("multu", ID_ISAR2, MULTU, false, 1, 0, true), + FIELD_INFO("psr_ar", ID_ISAR2, PSR_AR, false, 1, 0, true), + FIELD_INFO("reversal", ID_ISAR2, REVERSAL, false, 1, 0, true), + + FIELD_INFO("saturate", ID_ISAR3, SATURATE, false, 1, 0, true), + FIELD_INFO("simd", ID_ISAR3, SIMD, false, 1, 0, true), + FIELD_INFO("svc", ID_ISAR3, SVC, false, 1, 0, true), + FIELD_INFO("synchprim", ID_ISAR3, SYNCHPRIM, false, 1, 0, true), + FIELD_INFO("tabbranch", ID_ISAR3, TABBRANCH, false, 1, 0, true), + FIELD_INFO("t32copy", ID_ISAR3, T32COPY, false, 1, 0, true), + FIELD_INFO("truenop", ID_ISAR3, TRUENOP, false, 1, 0, true), + FIELD_INFO("t32ee", ID_ISAR3, T32EE, false, 1, 0, true), + + FIELD_INFO("unpriv", ID_ISAR4, UNPRIV, false, 1, 0, true), + FIELD_INFO("withshifts", ID_ISAR4, WITHSHIFTS, false, 1, 0, true), + FIELD_INFO("writeback", ID_ISAR4, WRITEBACK, false, 1, 0, true), + FIELD_INFO("smc", ID_ISAR4, SMC, false, 1, 0, true), + FIELD_INFO("barrier", ID_ISAR4, BARRIER, false, 1, 0, true), + FIELD_INFO("synchprim_frac", ID_ISAR4, SYNCHPRIM_FRAC, false, 1, 0, true), + FIELD_INFO("psr_m", ID_ISAR4, PSR_M, false, 1, 0, true), + FIELD_INFO("swp_frac", ID_ISAR4, SWP_FRAC, false, 1, 0, true), + + FIELD_INFO("sevl", ID_ISAR5, SEVL, false, 1, 0, true), + FIELD_INFO("aes", ID_ISAR5, AES, false, 1, 0, true), + FIELD_INFO("sha1", ID_ISAR5, SHA1, false, 1, 0, true), + FIELD_INFO("sha2", ID_ISAR5, SHA2, false, 1, 0, true), + FIELD_INFO("crc32", ID_ISAR5, CRC32, false, 1, 0, true), + FIELD_INFO("rdm", ID_ISAR5, RDM, false, 1, 0, true), + FIELD_INFO("vcma", ID_ISAR5, VCMA, false, 1, 0, true), + + FIELD_INFO("jscvt", ID_ISAR6, JSCVT, false, 1, 0, true), + FIELD_INFO("dp", ID_ISAR6, DP, false, 1, 0, true), + FIELD_INFO("fhm", ID_ISAR6, FHM, false, 1, 0, true), + FIELD_INFO("sb", ID_ISAR6, SB, false, 1, 0, true), + FIELD_INFO("specres", ID_ISAR6, SPECRES, false, 1, 0, true), + FIELD_INFO("i8mm", ID_AA64ISAR1, I8MM, false, 1, 0, false), + FIELD_INFO("bf16", ID_AA64ISAR1, BF16, false, 1, 0, false), + FIELD_INFO("dgh", ID_AA64ISAR1, DGH, false, 1, 0, false), + + FIELD_INFO("cmaintva", ID_MMFR3, CMAINTVA, false, 1, 0, true), + FIELD_INFO("cmaintsw", ID_MMFR3, CMAINTSW, false, 1, 0, true), + FIELD_INFO("bpmaint", ID_MMFR3, BPMAINT, false, 1, 0, true), + FIELD_INFO("maintbcst", ID_MMFR3, MAINTBCST, false, 1, 0, true), + FIELD_INFO("pan", ID_MMFR3, PAN, false, 1, 0, true), + FIELD_INFO("cohwalk", ID_MMFR3, COHWALK, false, 1, 0, true), + FIELD_INFO("cmemsz", ID_MMFR3, CMEMSZ, false, 1, 0, true), + FIELD_INFO("supersec", ID_MMFR3, SUPERSEC, false, 1, 0, true), + + FIELD_INFO("specsei", ID_MMFR4, SPECSEI, false, 1, 0, true), + FIELD_INFO("ac2", ID_MMFR4, AC2, false, 1, 0, true), + FIELD_INFO("xnx", ID_MMFR4, XNX, false, 1, 0, true), + FIELD_INFO("cnp", ID_MMFR4, CNP, false, 1, 0, true), + FIELD_INFO("hpds", ID_MMFR4, HPDS, false, 1, 0, true), + FIELD_INFO("lsm", ID_MMFR4, LSM, false, 1, 0, true), + FIELD_INFO("ccidx", ID_MMFR4, CCIDX, false, 1, 0, true), + FIELD_INFO("evt", ID_MMFR4, EVT, false, 1, 0, true), + + FIELD_INFO("simdreg", MVFR0, SIMDREG, false, 1, 0, true), + FIELD_INFO("fpsp", MVFR0, FPSP, false, 1, 0, true), + FIELD_INFO("fpdp", MVFR0, FPDP, false, 1, 0, true), + FIELD_INFO("fptrap", MVFR0, FPTRAP, false, 1, 0, true), + FIELD_INFO("fpdivide", MVFR0, FPDIVIDE, false, 1, 0, true), + FIELD_INFO("fpsqrt", MVFR0, FPSQRT, false, 1, 0, true), + FIELD_INFO("fpshvec", MVFR0, FPSHVEC, false, 1, 0, true), + FIELD_INFO("fpround", MVFR0, FPROUND, false, 1, 0, true), + + FIELD_INFO("fpftz", MVFR1, FPFTZ, false, 1, 0, true), + FIELD_INFO("fpdnan", MVFR1, FPDNAN, false, 1, 0, true), + FIELD_INFO("simdls", MVFR1, SIMDLS, false, 1, 0, true), + FIELD_INFO("simdint", MVFR1, SIMDINT, false, 1, 0, true), + FIELD_INFO("simdsp", MVFR1, SIMDSP, false, 1, 0, true), + FIELD_INFO("simdhp", MVFR1, SIMDHP, false, 1, 0, true), + FIELD_INFO("fphp", MVFR1, FPHP, false, 1, 0, true), + FIELD_INFO("simdfmac", MVFR1, SIMDFMAC, false, 1, 0, true), + + FIELD_INFO("simdmisc", MVFR2, SIMDMISC, false, 1, 0, true), + FIELD_INFO("fpmisc", MVFR2, FPMISC, false, 1, 0, true), + + FIELD_INFO("debugver", ID_AA64DFR0, DEBUGVER, false, 1, 0, false), + FIELD_INFO("tracever", ID_AA64DFR0, TRACEVER, false, 1, 0, false), + FIELD_INFO("pmuver", ID_AA64DFR0, PMUVER, false, 1, 0, false), + FIELD_INFO("brps", ID_AA64DFR0, BRPS, false, 1, 0, false), + FIELD_INFO("wrps", ID_AA64DFR0, WRPS, false, 1, 0, false), + FIELD_INFO("ctx_cmps", ID_AA64DFR0, CTX_CMPS, false, 1, 0, false), + FIELD_INFO("pmsver", ID_AA64DFR0, PMSVER, false, 1, 0, false), + FIELD_INFO("doublelock", ID_AA64DFR0, DOUBLELOCK, false, 1, 0, false), + FIELD_INFO("tracefilt", ID_AA64DFR0, TRACEFILT, false, 1, 0, false), + + FIELD_INFO("aes", ID_AA64ISAR0, AES, false, 1, 0, false), + FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + FIELD_INFO("crc32", ID_AA64ISAR0, CRC32, false, 1, 0, false), + FIELD_INFO("atomics", ID_AA64ISAR0, ATOMIC, false, 1, 0, false), + FIELD_INFO("asimdrdm", ID_AA64ISAR0, RDM, false, 1, 0, false), + FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + FIELD_INFO("asimddp", ID_AA64ISAR0, DP, false, 1, 0, false), + FIELD_INFO("asimdfhm", ID_AA64ISAR0, FHM, false, 1, 0, false), + FIELD_INFO("flagm", ID_AA64ISAR0, TS, false, 1, 0, false), + FIELD_INFO("tlb", ID_AA64ISAR0, TLB, false, 1, 0, false), + FIELD_INFO("rng", ID_AA64ISAR0, RNDR, false, 1, 0, false), + + FIELD_INFO("dcpop", ID_AA64ISAR1, DPB, false, 1, 0, false), + FIELD_INFO("papa", ID_AA64ISAR1, APA, false, 1, 0, false), + FIELD_INFO("api", ID_AA64ISAR1, API, false, 1, 0, false), + FIELD_INFO("jscvt", ID_AA64ISAR1, JSCVT, false, 1, 0, false), + FIELD_INFO("fcma", ID_AA64ISAR1, FCMA, false, 1, 0, false), + FIELD_INFO("lrcpc", ID_AA64ISAR1, LRCPC, false, 1, 0, false), + FIELD_INFO("pacg", ID_AA64ISAR1, GPA, false, 1, 0, false), + FIELD_INFO("gpi", ID_AA64ISAR1, GPI, false, 1, 0, false), + FIELD_INFO("frint", ID_AA64ISAR1, FRINTTS, false, 1, 0, false), + FIELD_INFO("sb", ID_AA64ISAR1, SB, false, 1, 0, false), + FIELD_INFO("specres", ID_AA64ISAR1, SPECRES, false, 1, 0, false), + + FIELD_INFO("el0", ID_AA64PFR0, EL0, false, 1, 0, false), + FIELD_INFO("el1", ID_AA64PFR0, EL1, false, 1, 0, false), + FIELD_INFO("el2", ID_AA64PFR0, EL2, false, 1, 0, false), + FIELD_INFO("el3", ID_AA64PFR0, EL3, false, 1, 0, false), + FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + FIELD_INFO("gic", ID_AA64PFR0, GIC, false, 1, 0, false), + FIELD_INFO("ras", ID_AA64PFR0, RAS, false, 1, 0, false), + FIELD_INFO("sve", ID_AA64PFR0, SVE, false, 1, 0, false), + + FIELD_INFO("bti", ID_AA64PFR1, BT, false, 1, 0, false), + FIELD_INFO("ssbs", ID_AA64PFR1, SSBS, false, 1, 0, false), + FIELD_INFO("mte", ID_AA64PFR1, MTE, false, 1, 0, false), + FIELD_INFO("ras_frac", ID_AA64PFR1, RAS_FRAC, false, 1, 0, false), + + FIELD_INFO("parange", ID_AA64MMFR0, PARANGE, false, 1, 0, false), + FIELD_INFO("asidbits", ID_AA64MMFR0, ASIDBITS, false, 1, 0, false), + FIELD_INFO("bigend", ID_AA64MMFR0, BIGEND, false, 1, 0, false), + FIELD_INFO("snsmem", ID_AA64MMFR0, SNSMEM, false, 1, 0, false), + FIELD_INFO("bigendel0", ID_AA64MMFR0, BIGENDEL0, false, 1, 0, false), + FIELD_INFO("tgran16", ID_AA64MMFR0, TGRAN16, false, 1, 0, false), + FIELD_INFO("tgran64", ID_AA64MMFR0, TGRAN64, false, 1, 0, false), + FIELD_INFO("tgran4", ID_AA64MMFR0, TGRAN4, false, 1, 0, false), + FIELD_INFO("tgran16_2", ID_AA64MMFR0, TGRAN16_2, false, 1, 0, false), + FIELD_INFO("tgran64_2", ID_AA64MMFR0, TGRAN64_2, false, 1, 0, false), + FIELD_INFO("tgran4_2", ID_AA64MMFR0, TGRAN4_2, false, 1, 0, false), + FIELD_INFO("exs", ID_AA64MMFR0, EXS, false, 1, 0, false), + + FIELD_INFO("hafdbs", ID_AA64MMFR1, HAFDBS, false, 1, 0, false), + FIELD_INFO("vmidbits", ID_AA64MMFR1, VMIDBITS, false, 1, 0, false), + FIELD_INFO("vh", ID_AA64MMFR1, VH, false, 1, 0, false), + FIELD_INFO("hpds", ID_AA64MMFR1, HPDS, false, 1, 0, false), + FIELD_INFO("lo", ID_AA64MMFR1, LO, false, 1, 0, false), + FIELD_INFO("pan", ID_AA64MMFR1, PAN, false, 1, 0, false), + FIELD_INFO("specsei", ID_AA64MMFR1, SPECSEI, false, 1, 0, false), + FIELD_INFO("xnx", ID_AA64MMFR1, XNX, false, 1, 0, false), + + FIELD_INFO("cnp", ID_AA64MMFR2, CNP, false, 1, 0, false), + FIELD_INFO("uao", ID_AA64MMFR2, UAO, false, 1, 0, false), + FIELD_INFO("lsm", ID_AA64MMFR2, LSM, false, 1, 0, false), + FIELD_INFO("iesb", ID_AA64MMFR2, IESB, false, 1, 0, false), + FIELD_INFO("varange", ID_AA64MMFR2, VARANGE, false, 1, 0, false), + FIELD_INFO("ccidx", ID_AA64MMFR2, CCIDX, false, 1, 0, false), + FIELD_INFO("nv", ID_AA64MMFR2, NV, false, 1, 0, false), + FIELD_INFO("st", ID_AA64MMFR2, ST, false, 1, 0, false), + FIELD_INFO("uscat", ID_AA64MMFR2, AT, false, 1, 0, false), + FIELD_INFO("ids", ID_AA64MMFR2, IDS, false, 1, 0, false), + FIELD_INFO("fwb", ID_AA64MMFR2, FWB, false, 1, 0, false), + FIELD_INFO("ttl", ID_AA64MMFR2, TTL, false, 1, 0, false), + FIELD_INFO("bbm", ID_AA64MMFR2, BBM, false, 1, 0, false), + FIELD_INFO("evt", ID_AA64MMFR2, EVT, false, 1, 0, false), + FIELD_INFO("e0pd", ID_AA64MMFR2, E0PD, false, 1, 0, false), + + FIELD_INFO("copdbg", ID_DFR0, COPDBG, false, 1, 0, false), + FIELD_INFO("copsdbg", ID_DFR0, COPSDBG, false, 1, 0, false), + FIELD_INFO("mmapdbg", ID_DFR0, MMAPDBG, false, 1, 0, false), + FIELD_INFO("coptrc", ID_DFR0, COPTRC, false, 1, 0, false), + FIELD_INFO("mmaptrc", ID_DFR0, MMAPTRC, false, 1, 0, false), + FIELD_INFO("mprofdbg", ID_DFR0, MPROFDBG, false, 1, 0, false), + FIELD_INFO("perfmon", ID_DFR0, PERFMON, false, 1, 0, false), + FIELD_INFO("tracefilt", ID_DFR0, TRACEFILT, false, 1, 0, false), + + { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_AES_LENGTH, + .shift = R_ID_AA64ISAR0_AES_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "pmull", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_TS_LENGTH, + .shift = R_ID_AA64ISAR0_TS_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "flagm2", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_DPB_LENGTH, + .shift = R_ID_AA64ISAR1_DPB_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "dcpodp", .is_32bit = false, + }, + { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_LRCPC_LENGTH, + .shift = R_ID_AA64ISAR1_LRCPC_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "ilrcpc", .is_32bit = false, + }, +}; + +typedef struct CPUFeatureDep { + CPUFeatureInfo from, to; +} CPUFeatureDep; + +static const CPUFeatureDep feature_dependencies[] = { + { + .from = FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + .to = FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + }, + { + .from = FIELD_INFO("asimd", ID_AA64PFR0, ADVSIMD, true, 0, 0xf, false), + .to = FIELD_INFO("fp", ID_AA64PFR0, FP, true, 0, 0xf, false), + }, + { + .from = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + .to = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + }, + { + .from = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_ADVSIMD_LENGTH, + .shift = R_ID_AA64PFR0_ADVSIMD_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "asimdhp", .is_32bit = false, + }, + .to = { + .reg = ID_AA64PFR0, .length = R_ID_AA64PFR0_FP_LENGTH, + .shift = R_ID_AA64PFR0_FP_SHIFT, .sign = true, .min_value = 1, + .ni_value = 0, .name = "fphp", .is_32bit = false, + }, + }, + { + + .from = FIELD_INFO("aes", ID_AA64ISAR0, AES, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_AES_LENGTH, + .shift = R_ID_AA64ISAR0_AES_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "pmull", .is_32bit = false, + }, + }, + { + + .from = FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + }, + { + .from = FIELD_INFO("lrcpc", ID_AA64ISAR1, LRCPC, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR1, .length = R_ID_AA64ISAR1_LRCPC_LENGTH, + .shift = R_ID_AA64ISAR1_LRCPC_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "ilrcpc", .is_32bit = false, + }, + }, + { + .from = FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + .to = FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sm4", ID_AA64ISAR0, SM4, false, 1, 0, false), + .to = FIELD_INFO("sm3", ID_AA64ISAR0, SM3, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + .to = FIELD_INFO("sha2", ID_AA64ISAR0, SHA2, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha1", ID_AA64ISAR0, SHA1, false, 1, 0, false), + .to = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + }, + { + .from = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + .to = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + }, + { + .from = { + .reg = ID_AA64ISAR0, .length = R_ID_AA64ISAR0_SHA2_LENGTH, + .shift = R_ID_AA64ISAR0_SHA2_SHIFT, .sign = false, .min_value = 2, + .ni_value = 1, .name = "sha512", .is_32bit = false, + }, + .to = FIELD_INFO("sha3", ID_AA64ISAR0, SHA3, false, 1, 0, false), + }, +}; + +void arm_cpu_features_to_dict(ARMCPU *cpu, QDict *features) +{ + Object *obj = OBJECT(cpu); + const char *name; + ObjectProperty *prop; + bool is_32bit = !arm_feature(&cpu->env, ARM_FEATURE_AARCH64); + int i; + + for (i = 0; i < ARRAY_SIZE(cpu_features); ++i) { + if (is_32bit != cpu_features[i].is_32bit) { + continue; + } + + name = cpu_features[i].name; + prop = object_property_find(obj, name); + if (prop) { + QObject *value; + + assert(prop->get); + value = object_property_get_qobject(obj, name, &error_abort); + qdict_put_obj(features, name, value); + } + } +} + +static void arm_cpu_get_bit_prop(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + ARMCPU *cpu = ARM_CPU(obj); + CPUFeatureInfo *feat = opaque; + int field_value = feat->sign ? sextract64(cpu->isar.regs[feat->reg], + feat->shift, feat->length) : + extract64(cpu->isar.regs[feat->reg], + feat->shift, feat->length); + bool value = field_value >= feat->min_value; + + visit_type_bool(v, name, &value, errp); +} + +static void arm_cpu_set_bit_prop(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + DeviceState *dev = DEVICE(obj); + ARMCPU *cpu = ARM_CPU(obj); + ARMISARegisters *isar = &cpu->isar; + CPUFeatureInfo *feat = opaque; + Error *local_err = NULL; + bool value; + + if (!kvm_arm_cpu_feature_supported()) { + warn_report("KVM doesn't support to set CPU feature in arm. " + "Setting to `%s` is ignored.", name); + return; + } + if (dev->realized) { + qdev_prop_set_after_realize(dev, name, errp); + return; + } + + visit_type_bool(v, name, &value, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + + if (value) { + if (object_property_get_bool(obj, feat->name, NULL)) { + return; + } + isar->regs[feat->reg] = deposit64(isar->regs[feat->reg], + feat->shift, feat->length, + feat->min_value); + /* Auto enable the features which current feature is dependent on. */ + for (int i = 0; i < ARRAY_SIZE(feature_dependencies); ++i) { + const CPUFeatureDep *d = &feature_dependencies[i]; + if (strcmp(d->to.name, feat->name) != 0) { + continue; + } + + object_property_set_bool(obj, d->from.name, true, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + } else { + if (!object_property_get_bool(obj, feat->name, NULL)) { + return; + } + isar->regs[feat->reg] = deposit64(isar->regs[feat->reg], + feat->shift, feat->length, + feat->ni_value); + /* Auto disable the features which are dependent on current feature. */ + for (int i = 0; i < ARRAY_SIZE(feature_dependencies); ++i) { + const CPUFeatureDep *d = &feature_dependencies[i]; + if (strcmp(d->from.name, feat->name) != 0) { + continue; + } + + object_property_set_bool(obj, d->to.name, false, &local_err); + if (local_err) { + error_propagate(errp, local_err); + return; + } + } + } +} + +static void arm_cpu_register_feature_props(ARMCPU *cpu) +{ + int i; + int num = ARRAY_SIZE(cpu_features); + ObjectProperty *op; + CPUARMState *env = &cpu->env; + + for (i = 0; i < num; i++) { + if ((arm_feature(env, ARM_FEATURE_AARCH64) && cpu_features[i].is_32bit) + || (!arm_feature(env, ARM_FEATURE_AARCH64) && + cpu_features[i].is_32bit)) { + continue; + } + op = object_property_find(OBJECT(cpu), cpu_features[i].name); + if (!op) { + object_property_add(OBJECT(cpu), cpu_features[i].name, "bool", + arm_cpu_get_bit_prop, + arm_cpu_set_bit_prop, + NULL, &cpu_features[i]); + } + } +} + void arm_cpu_post_init(Object *obj) { ARMCPU *cpu = ARM_CPU(obj); @@ -1319,6 +1824,8 @@ void arm_cpu_post_init(Object *obj) qdev_property_add_static(DEVICE(obj), &arm_cpu_cfgend_property); + arm_cpu_register_feature_props(cpu); + if (arm_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER)) { qdev_property_add_static(DEVICE(cpu), &arm_cpu_gt_cntfrq_property); } @@ -1520,20 +2027,20 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, FP, 0xf); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, JSCVT, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; - u = cpu->isar.mvfr0; + u = cpu->isar.regs[MVFR0]; u = FIELD_DP32(u, MVFR0, FPSP, 0); u = FIELD_DP32(u, MVFR0, FPDP, 0); u = FIELD_DP32(u, MVFR0, FPDIVIDE, 0); @@ -1543,20 +2050,20 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) u = FIELD_DP32(u, MVFR0, FPTRAP, 0); u = FIELD_DP32(u, MVFR0, FPSHVEC, 0); } - cpu->isar.mvfr0 = u; + cpu->isar.regs[MVFR0] = u; - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, FPFTZ, 0); u = FIELD_DP32(u, MVFR1, FPDNAN, 0); u = FIELD_DP32(u, MVFR1, FPHP, 0); if (arm_feature(env, ARM_FEATURE_M)) { u = FIELD_DP32(u, MVFR1, FP16, 0); } - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; - u = cpu->isar.mvfr2; + u = cpu->isar.regs[MVFR2]; u = FIELD_DP32(u, MVFR2, FPMISC, 0); - cpu->isar.mvfr2 = u; + cpu->isar.regs[MVFR2] = u; } if (!cpu->has_neon) { @@ -1565,43 +2072,43 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_NEON); - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, DP, 0); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 0); t = FIELD_DP64(t, ID_AA64ISAR1, BF16, 0); t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 0xf); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - u = cpu->isar.id_isar5; + u = cpu->isar.regs[ID_ISAR5]; u = FIELD_DP32(u, ID_ISAR5, RDM, 0); u = FIELD_DP32(u, ID_ISAR5, VCMA, 0); - cpu->isar.id_isar5 = u; + cpu->isar.regs[ID_ISAR5] = u; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, DP, 0); u = FIELD_DP32(u, ID_ISAR6, FHM, 0); u = FIELD_DP32(u, ID_ISAR6, BF16, 0); u = FIELD_DP32(u, ID_ISAR6, I8MM, 0); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; if (!arm_feature(env, ARM_FEATURE_M)) { - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, SIMDLS, 0); u = FIELD_DP32(u, MVFR1, SIMDINT, 0); u = FIELD_DP32(u, MVFR1, SIMDSP, 0); u = FIELD_DP32(u, MVFR1, SIMDHP, 0); - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; - u = cpu->isar.mvfr2; + u = cpu->isar.regs[MVFR2]; u = FIELD_DP32(u, MVFR2, SIMDMISC, 0); - cpu->isar.mvfr2 = u; + cpu->isar.regs[MVFR2] = u; } } @@ -1609,22 +2116,22 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) uint64_t t; uint32_t u; - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, FHM, 0); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 0); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - u = cpu->isar.mvfr0; + u = cpu->isar.regs[MVFR0]; u = FIELD_DP32(u, MVFR0, SIMDREG, 0); - cpu->isar.mvfr0 = u; + cpu->isar.regs[MVFR0] = u; /* Despite the name, this field covers both VFP and Neon */ - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, SIMDFMAC, 0); - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; } if (arm_feature(env, ARM_FEATURE_M) && !cpu->has_dsp) { @@ -1632,19 +2139,19 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) unset_feature(env, ARM_FEATURE_THUMB_DSP); - u = cpu->isar.id_isar1; + u = cpu->isar.regs[ID_ISAR1]; u = FIELD_DP32(u, ID_ISAR1, EXTEND, 1); - cpu->isar.id_isar1 = u; + cpu->isar.regs[ID_ISAR1] = u; - u = cpu->isar.id_isar2; + u = cpu->isar.regs[ID_ISAR2]; u = FIELD_DP32(u, ID_ISAR2, MULTU, 1); u = FIELD_DP32(u, ID_ISAR2, MULTS, 1); - cpu->isar.id_isar2 = u; + cpu->isar.regs[ID_ISAR2] = u; - u = cpu->isar.id_isar3; + u = cpu->isar.regs[ID_ISAR3]; u = FIELD_DP32(u, ID_ISAR3, SIMD, 1); u = FIELD_DP32(u, ID_ISAR3, SATURATE, 0); - cpu->isar.id_isar3 = u; + cpu->isar.regs[ID_ISAR3] = u; } /* Some features automatically imply others: */ @@ -1776,7 +2283,7 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) } } - if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3) { + if (!arm_feature(env, ARM_FEATURE_M) && !cpu->has_el3 && !kvm_enabled()) { /* If the has_el3 CPU property is disabled then we need to disable the * feature. */ @@ -1785,8 +2292,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) /* Disable the security extension feature bits in the processor feature * registers as well. These are id_pfr1[7:4] and id_aa64pfr0[15:12]. */ - cpu->isar.id_pfr1 &= ~0xf0; - cpu->isar.id_aa64pfr0 &= ~0xf000; + cpu->isar.regs[ID_PFR1] &= ~0xf0; + cpu->isar.regs[ID_AA64PFR0] &= ~0xf000; } if (!cpu->has_el2) { @@ -1809,20 +2316,21 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) cpu); #endif } else { - cpu->isar.id_aa64dfr0 = - FIELD_DP64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, PMUVER, 0); - cpu->isar.id_dfr0 = FIELD_DP32(cpu->isar.id_dfr0, ID_DFR0, PERFMON, 0); + cpu->isar.regs[ID_AA64DFR0] = + FIELD_DP64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER, 0); + cpu->isar.regs[ID_DFR0] = FIELD_DP32(cpu->isar.regs[ID_DFR0], ID_DFR0, + PERFMON, 0); cpu->pmceid0 = 0; cpu->pmceid1 = 0; } - if (!arm_feature(env, ARM_FEATURE_EL2)) { + if (!arm_feature(env, ARM_FEATURE_EL2) && !kvm_enabled()) { /* Disable the hypervisor feature bits in the processor feature * registers if we don't have EL2. These are id_pfr1[15:12] and * id_aa64pfr0_el1[11:8]. */ - cpu->isar.id_aa64pfr0 &= ~0xf00; - cpu->isar.id_pfr1 &= ~0xf000; + cpu->isar.regs[ID_AA64PFR0] &= ~0xf00; + cpu->isar.regs[ID_PFR1] &= ~0xf000; } #ifndef CONFIG_USER_ONLY @@ -1831,8 +2339,8 @@ static void arm_cpu_realizefn(DeviceState *dev, Error **errp) * Disable the MTE feature bits if we do not have tag-memory * provided by the machine. */ - cpu->isar.id_aa64pfr1 = - FIELD_DP64(cpu->isar.id_aa64pfr1, ID_AA64PFR1, MTE, 0); + cpu->isar.regs[ID_AA64PFR1] = + FIELD_DP64(cpu->isar.regs[ID_AA64PFR1], ID_AA64PFR1, MTE, 0); } #endif @@ -1999,6 +2507,10 @@ static Property arm_cpu_properties[] = { DEFINE_PROP_UINT64("mp-affinity", ARMCPU, mp_affinity, ARM64_AFFINITY_INVALID), DEFINE_PROP_INT32("node-id", ARMCPU, node_id, CPU_UNSET_NUMA_NODE_ID), + DEFINE_PROP_INT32("socket-id", ARMCPU, socket_id, -1), + DEFINE_PROP_INT32("cluster-id", ARMCPU, cluster_id, -1), + DEFINE_PROP_INT32("core-id", ARMCPU, core_id, -1), + DEFINE_PROP_INT32("thread-id", ARMCPU, thread_id, -1), DEFINE_PROP_INT32("core-count", ARMCPU, core_count, -1), DEFINE_PROP_END_OF_LIST() }; @@ -2049,6 +2561,13 @@ static const struct TCGCPUOps arm_tcg_ops = { }; #endif /* CONFIG_TCG */ +static int64_t arm_cpu_get_arch_id(CPUState *cs) +{ + ARMCPU *cpu = ARM_CPU(cs); + + return cpu->mp_affinity; +} + static void arm_cpu_class_init(ObjectClass *oc, void *data) { ARMCPUClass *acc = ARM_CPU_CLASS(oc); @@ -2061,12 +2580,15 @@ static void arm_cpu_class_init(ObjectClass *oc, void *data) device_class_set_props(dc, arm_cpu_properties); device_class_set_parent_reset(dc, arm_cpu_reset, &acc->parent_reset); + dc->user_creatable = true; + cc->class_by_name = arm_cpu_class_by_name; cc->has_work = arm_cpu_has_work; cc->dump_state = arm_cpu_dump_state; cc->set_pc = arm_cpu_set_pc; cc->gdb_read_register = arm_cpu_gdb_read_register; cc->gdb_write_register = arm_cpu_gdb_write_register; + cc->get_arch_id = arm_cpu_get_arch_id; #ifndef CONFIG_USER_ONLY cc->sysemu_ops = &arm_sysemu_ops; #endif diff --git a/target/arm/cpu.h b/target/arm/cpu.h index e33f37b70adaef0a49debcf703e4e1be66dc3055..eb804dffaa65e4f6badf69fa0bd7a74c109b7d04 100644 --- a/target/arm/cpu.h +++ b/target/arm/cpu.h @@ -69,6 +69,41 @@ #define ARMV7M_EXCP_PENDSV 14 #define ARMV7M_EXCP_SYSTICK 15 +typedef enum CPUIDReg { + MIDR_EL1, + ID_ISAR0, + ID_ISAR1, + ID_ISAR2, + ID_ISAR3, + ID_ISAR4, + ID_ISAR5, + ID_ISAR6, + ID_PFR0, + ID_PFR1, + ID_PFR2, + ID_MMFR0, + ID_MMFR1, + ID_MMFR2, + ID_MMFR3, + ID_MMFR4, + ID_AA64ISAR0, + ID_AA64ISAR1, + ID_AA64PFR0, + ID_AA64PFR1, + ID_AA64MMFR0, + ID_AA64MMFR1, + ID_AA64MMFR2, + ID_AA64DFR0, + ID_AA64DFR1, + ID_AA64ZFR0, + ID_DFR0, + MVFR0, + MVFR1, + MVFR2, + DBGDIDR, + ID_MAX, +} CPUIDReg; + /* For M profile, some registers are banked secure vs non-secure; * these are represented as a 2-element array where the first element * is the non-secure copy and the second is the secure copy. @@ -922,36 +957,7 @@ struct ARMCPU { * field by reading the value from the KVM vCPU. */ struct ARMISARegisters { - uint32_t id_isar0; - uint32_t id_isar1; - uint32_t id_isar2; - uint32_t id_isar3; - uint32_t id_isar4; - uint32_t id_isar5; - uint32_t id_isar6; - uint32_t id_mmfr0; - uint32_t id_mmfr1; - uint32_t id_mmfr2; - uint32_t id_mmfr3; - uint32_t id_mmfr4; - uint32_t id_pfr0; - uint32_t id_pfr1; - uint32_t id_pfr2; - uint32_t mvfr0; - uint32_t mvfr1; - uint32_t mvfr2; - uint32_t id_dfr0; - uint32_t dbgdidr; - uint64_t id_aa64isar0; - uint64_t id_aa64isar1; - uint64_t id_aa64pfr0; - uint64_t id_aa64pfr1; - uint64_t id_aa64mmfr0; - uint64_t id_aa64mmfr1; - uint64_t id_aa64mmfr2; - uint64_t id_aa64dfr0; - uint64_t id_aa64dfr1; - uint64_t id_aa64zfr0; + uint64_t regs[ID_MAX]; } isar; uint64_t midr; uint32_t revidr; @@ -1000,6 +1006,10 @@ struct ARMCPU { QLIST_HEAD(, ARMELChangeHook) el_change_hooks; int32_t node_id; /* NUMA node this CPU belongs to */ + int32_t socket_id; + int32_t cluster_id; + int32_t core_id; + int32_t thread_id; /* Used to synchronize KVM and QEMU in-kernel device levels */ uint8_t device_irq_level; @@ -3729,103 +3739,103 @@ static inline target_ulong cpu_untagged_addr(CPUState *cs, target_ulong x) */ static inline bool isar_feature_aa32_thumb_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) != 0; + return FIELD_EX32(id->regs[ID_ISAR0], ID_ISAR0, DIVIDE) != 0; } static inline bool isar_feature_aa32_arm_div(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar0, ID_ISAR0, DIVIDE) > 1; + return FIELD_EX32(id->regs[ID_ISAR0], ID_ISAR0, DIVIDE) > 1; } static inline bool isar_feature_aa32_lob(const ARMISARegisters *id) { /* (M-profile) low-overhead loops and branch future */ - return FIELD_EX32(id->id_isar0, ID_ISAR0, CMPBRANCH) >= 3; + return FIELD_EX32(id->regs[ID_ISAR0], ID_ISAR0, CMPBRANCH) >= 3; } static inline bool isar_feature_aa32_jazelle(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar1, ID_ISAR1, JAZELLE) != 0; + return FIELD_EX32(id->regs[ID_ISAR1], ID_ISAR1, JAZELLE) != 0; } static inline bool isar_feature_aa32_aes(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, AES) != 0; } static inline bool isar_feature_aa32_pmull(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, AES) > 1; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, AES) > 1; } static inline bool isar_feature_aa32_sha1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA1) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, SHA1) != 0; } static inline bool isar_feature_aa32_sha2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, SHA2) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, SHA2) != 0; } static inline bool isar_feature_aa32_crc32(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, CRC32) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, CRC32) != 0; } static inline bool isar_feature_aa32_rdm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, RDM) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, RDM) != 0; } static inline bool isar_feature_aa32_vcma(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar5, ID_ISAR5, VCMA) != 0; + return FIELD_EX32(id->regs[ID_ISAR5], ID_ISAR5, VCMA) != 0; } static inline bool isar_feature_aa32_jscvt(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, JSCVT) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, JSCVT) != 0; } static inline bool isar_feature_aa32_dp(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, DP) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, DP) != 0; } static inline bool isar_feature_aa32_fhm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, FHM) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, FHM) != 0; } static inline bool isar_feature_aa32_sb(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SB) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, SB) != 0; } static inline bool isar_feature_aa32_predinv(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, SPECRES) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, SPECRES) != 0; } static inline bool isar_feature_aa32_bf16(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, BF16) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, BF16) != 0; } static inline bool isar_feature_aa32_i8mm(const ARMISARegisters *id) { - return FIELD_EX32(id->id_isar6, ID_ISAR6, I8MM) != 0; + return FIELD_EX32(id->regs[ID_ISAR6], ID_ISAR6, I8MM) != 0; } static inline bool isar_feature_aa32_ras(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr0, ID_PFR0, RAS) != 0; + return FIELD_EX32(id->regs[ID_PFR0], ID_PFR0, RAS) != 0; } static inline bool isar_feature_aa32_mprofile(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr1, ID_PFR1, MPROGMOD) != 0; + return FIELD_EX32(id->regs[ID_PFR1], ID_PFR1, MPROGMOD) != 0; } static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) @@ -3834,16 +3844,16 @@ static inline bool isar_feature_aa32_m_sec_state(const ARMISARegisters *id) * Return true if M-profile state handling insns * (VSCCLRM, CLRM, FPCTX access insns) are implemented */ - return FIELD_EX32(id->id_pfr1, ID_PFR1, SECURITY) >= 3; + return FIELD_EX32(id->regs[ID_PFR1], ID_PFR1, SECURITY) >= 3; } static inline bool isar_feature_aa32_fp16_arith(const ARMISARegisters *id) { /* Sadly this is encoded differently for A-profile and M-profile */ if (isar_feature_aa32_mprofile(id)) { - return FIELD_EX32(id->mvfr1, MVFR1, FP16) > 0; + return FIELD_EX32(id->regs[MVFR1], MVFR1, FP16) > 0; } else { - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) >= 3; + return FIELD_EX32(id->regs[MVFR1], MVFR1, FPHP) >= 3; } } @@ -3855,7 +3865,7 @@ static inline bool isar_feature_aa32_mve(const ARMISARegisters *id) * else for A-profile. */ return isar_feature_aa32_mprofile(id) && - FIELD_EX32(id->mvfr1, MVFR1, MVE) > 0; + FIELD_EX32(id->regs[MVFR1], MVFR1, MVE) > 0; } static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id) @@ -3866,7 +3876,7 @@ static inline bool isar_feature_aa32_mve_fp(const ARMISARegisters *id) * else for A-profile. */ return isar_feature_aa32_mprofile(id) && - FIELD_EX32(id->mvfr1, MVFR1, MVE) >= 2; + FIELD_EX32(id->regs[MVFR1], MVFR1, MVE) >= 2; } static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) @@ -3875,42 +3885,42 @@ static inline bool isar_feature_aa32_vfp_simd(const ARMISARegisters *id) * Return true if either VFP or SIMD is implemented. * In this case, a minimum of VFP w/ D0-D15. */ - return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) > 0; + return FIELD_EX32(id->regs[MVFR0], MVFR0, SIMDREG) > 0; } static inline bool isar_feature_aa32_simd_r32(const ARMISARegisters *id) { /* Return true if D16-D31 are implemented */ - return FIELD_EX32(id->mvfr0, MVFR0, SIMDREG) >= 2; + return FIELD_EX32(id->regs[MVFR0], MVFR0, SIMDREG) >= 2; } static inline bool isar_feature_aa32_fpshvec(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr0, MVFR0, FPSHVEC) > 0; + return FIELD_EX32(id->regs[MVFR0], MVFR0, FPSHVEC) > 0; } static inline bool isar_feature_aa32_fpsp_v2(const ARMISARegisters *id) { /* Return true if CPU supports single precision floating point, VFPv2 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPSP) > 0; + return FIELD_EX32(id->regs[MVFR0], MVFR0, FPSP) > 0; } static inline bool isar_feature_aa32_fpsp_v3(const ARMISARegisters *id) { /* Return true if CPU supports single precision floating point, VFPv3 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPSP) >= 2; + return FIELD_EX32(id->regs[MVFR0], MVFR0, FPSP) >= 2; } static inline bool isar_feature_aa32_fpdp_v2(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point, VFPv2 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPDP) > 0; + return FIELD_EX32(id->regs[MVFR0], MVFR0, FPDP) > 0; } static inline bool isar_feature_aa32_fpdp_v3(const ARMISARegisters *id) { /* Return true if CPU supports double precision floating point, VFPv3 */ - return FIELD_EX32(id->mvfr0, MVFR0, FPDP) >= 2; + return FIELD_EX32(id->regs[MVFR0], MVFR0, FPDP) >= 2; } static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) @@ -3925,12 +3935,12 @@ static inline bool isar_feature_aa32_vfp(const ARMISARegisters *id) */ static inline bool isar_feature_aa32_fp16_spconv(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 0; + return FIELD_EX32(id->regs[MVFR1], MVFR1, FPHP) > 0; } static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr1, MVFR1, FPHP) > 1; + return FIELD_EX32(id->regs[MVFR1], MVFR1, FPHP) > 1; } /* @@ -3942,86 +3952,86 @@ static inline bool isar_feature_aa32_fp16_dpconv(const ARMISARegisters *id) */ static inline bool isar_feature_aa32_simdfmac(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr1, MVFR1, SIMDFMAC) != 0; + return FIELD_EX32(id->regs[MVFR1], MVFR1, SIMDFMAC) != 0; } static inline bool isar_feature_aa32_vsel(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 1; + return FIELD_EX32(id->regs[MVFR2], MVFR2, FPMISC) >= 1; } static inline bool isar_feature_aa32_vcvt_dr(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 2; + return FIELD_EX32(id->regs[MVFR2], MVFR2, FPMISC) >= 2; } static inline bool isar_feature_aa32_vrint(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 3; + return FIELD_EX32(id->regs[MVFR2], MVFR2, FPMISC) >= 3; } static inline bool isar_feature_aa32_vminmaxnm(const ARMISARegisters *id) { - return FIELD_EX32(id->mvfr2, MVFR2, FPMISC) >= 4; + return FIELD_EX32(id->regs[MVFR2], MVFR2, FPMISC) >= 4; } static inline bool isar_feature_aa32_pxn(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr0, ID_MMFR0, VMSA) >= 4; + return FIELD_EX32(id->regs[ID_MMFR0], ID_MMFR0, VMSA) >= 4; } static inline bool isar_feature_aa32_pan(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) != 0; + return FIELD_EX32(id->regs[ID_MMFR3], ID_MMFR3, PAN) != 0; } static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2; + return FIELD_EX32(id->regs[ID_MMFR3], ID_MMFR3, PAN) >= 2; } static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; + return FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) >= 4 && + FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id) { /* 0xf means "non-standard IMPDEF PMU" */ - return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 && - FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf; + return FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) >= 5 && + FIELD_EX32(id->regs[ID_DFR0], ID_DFR0, PERFMON) != 0xf; } static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0; + return FIELD_EX32(id->regs[ID_MMFR4], ID_MMFR4, HPDS) != 0; } static inline bool isar_feature_aa32_ac2(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, AC2) != 0; + return FIELD_EX32(id->regs[ID_MMFR4], ID_MMFR4, AC2) != 0; } static inline bool isar_feature_aa32_ccidx(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, CCIDX) != 0; + return FIELD_EX32(id->regs[ID_MMFR4], ID_MMFR4, CCIDX) != 0; } static inline bool isar_feature_aa32_tts2uxn(const ARMISARegisters *id) { - return FIELD_EX32(id->id_mmfr4, ID_MMFR4, XNX) != 0; + return FIELD_EX32(id->regs[ID_MMFR4], ID_MMFR4, XNX) != 0; } static inline bool isar_feature_aa32_dit(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr0, ID_PFR0, DIT) != 0; + return FIELD_EX32(id->regs[ID_PFR0], ID_PFR0, DIT) != 0; } static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) { - return FIELD_EX32(id->id_pfr2, ID_PFR2, SSBS) != 0; + return FIELD_EX32(id->regs[ID_PFR2], ID_PFR2, SSBS) != 0; } /* @@ -4029,92 +4039,92 @@ static inline bool isar_feature_aa32_ssbs(const ARMISARegisters *id) */ static inline bool isar_feature_aa64_aes(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, AES) != 0; } static inline bool isar_feature_aa64_pmull(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, AES) > 1; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, AES) > 1; } static inline bool isar_feature_aa64_sha1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA1) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA1) != 0; } static inline bool isar_feature_aa64_sha256(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA2) != 0; } static inline bool isar_feature_aa64_sha512(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA2) > 1; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA2) > 1; } static inline bool isar_feature_aa64_crc32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, CRC32) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, CRC32) != 0; } static inline bool isar_feature_aa64_atomics(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, ATOMIC) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, ATOMIC) != 0; } static inline bool isar_feature_aa64_rdm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RDM) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, RDM) != 0; } static inline bool isar_feature_aa64_sha3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SHA3) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SHA3) != 0; } static inline bool isar_feature_aa64_sm3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM3) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SM3) != 0; } static inline bool isar_feature_aa64_sm4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, SM4) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, SM4) != 0; } static inline bool isar_feature_aa64_dp(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, DP) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, DP) != 0; } static inline bool isar_feature_aa64_fhm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, FHM) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, FHM) != 0; } static inline bool isar_feature_aa64_condm_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TS) != 0; } static inline bool isar_feature_aa64_condm_5(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TS) >= 2; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TS) >= 2; } static inline bool isar_feature_aa64_rndr(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, RNDR) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, RNDR) != 0; } static inline bool isar_feature_aa64_jscvt(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, JSCVT) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, JSCVT) != 0; } static inline bool isar_feature_aa64_fcma(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FCMA) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, FCMA) != 0; } static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) @@ -4123,7 +4133,7 @@ static inline bool isar_feature_aa64_pauth(const ARMISARegisters *id) * Return true if any form of pauth is enabled, as this * predicate controls migration of the 128-bit keys. */ - return (id->id_aa64isar1 & + return (id->regs[ID_AA64ISAR1] & (FIELD_DP64(0, ID_AA64ISAR1, APA, 0xf) | FIELD_DP64(0, ID_AA64ISAR1, API, 0xf) | FIELD_DP64(0, ID_AA64ISAR1, GPA, 0xf) | @@ -4136,221 +4146,221 @@ static inline bool isar_feature_aa64_pauth_arch(const ARMISARegisters *id) * Return true if pauth is enabled with the architected QARMA algorithm. * QEMU will always set APA+GPA to the same value. */ - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, APA) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, APA) != 0; } static inline bool isar_feature_aa64_tlbirange(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) == 2; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TLB) == 2; } static inline bool isar_feature_aa64_tlbios(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar0, ID_AA64ISAR0, TLB) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR0], ID_AA64ISAR0, TLB) != 0; } static inline bool isar_feature_aa64_sb(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SB) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, SB) != 0; } static inline bool isar_feature_aa64_predinv(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, SPECRES) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, SPECRES) != 0; } static inline bool isar_feature_aa64_frint(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, FRINTTS) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, FRINTTS) != 0; } static inline bool isar_feature_aa64_dcpop(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, DPB) != 0; } static inline bool isar_feature_aa64_dcpodp(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, DPB) >= 2; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, DPB) >= 2; } static inline bool isar_feature_aa64_bf16(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, BF16) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, BF16) != 0; } static inline bool isar_feature_aa64_fp_simd(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) != 0xf; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, FP) != 0xf; } static inline bool isar_feature_aa64_fp16(const ARMISARegisters *id) { /* We always set the AdvSIMD and FP fields identically wrt FP16. */ - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, FP) == 1; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, FP) == 1; } static inline bool isar_feature_aa64_aa32(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL0) >= 2; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, EL0) >= 2; } static inline bool isar_feature_aa64_aa32_el1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, EL1) >= 2; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, EL1) >= 2; } static inline bool isar_feature_aa64_sve(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SVE) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, SVE) != 0; } static inline bool isar_feature_aa64_sel2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, SEL2) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, SEL2) != 0; } static inline bool isar_feature_aa64_vh(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, VH) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, VH) != 0; } static inline bool isar_feature_aa64_lor(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, LO) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, LO) != 0; } static inline bool isar_feature_aa64_pan(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, PAN) != 0; } static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, PAN) >= 2; } static inline bool isar_feature_aa64_uao(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR2], ID_AA64MMFR2, UAO) != 0; } static inline bool isar_feature_aa64_st(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR2], ID_AA64MMFR2, ST) != 0; } static inline bool isar_feature_aa64_bti(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR1], ID_AA64PFR1, BT) != 0; } static inline bool isar_feature_aa64_mte_insn_reg(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR1], ID_AA64PFR1, MTE) != 0; } static inline bool isar_feature_aa64_mte(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, MTE) >= 2; + return FIELD_EX64(id->regs[ID_AA64PFR1], ID_AA64PFR1, MTE) >= 2; } static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; + return FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) >= 4 && + FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 && - FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf; + return FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) >= 5 && + FIELD_EX64(id->regs[ID_AA64DFR0], ID_AA64DFR0, PMUVER) != 0xf; } static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, LRCPC) != 0; } static inline bool isar_feature_aa64_rcpc_8_4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) >= 2; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, LRCPC) >= 2; } static inline bool isar_feature_aa64_i8mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, I8MM) != 0; + return FIELD_EX64(id->regs[ID_AA64ISAR1], ID_AA64ISAR1, I8MM) != 0; } static inline bool isar_feature_aa64_ccidx(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, CCIDX) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR2], ID_AA64MMFR2, CCIDX) != 0; } static inline bool isar_feature_aa64_tts2uxn(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, XNX) != 0; + return FIELD_EX64(id->regs[ID_AA64MMFR1], ID_AA64MMFR1, XNX) != 0; } static inline bool isar_feature_aa64_dit(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr0, ID_AA64PFR0, DIT) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR0], ID_AA64PFR0, DIT) != 0; } static inline bool isar_feature_aa64_ssbs(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SSBS) != 0; + return FIELD_EX64(id->regs[ID_AA64PFR1], ID_AA64PFR1, SSBS) != 0; } static inline bool isar_feature_aa64_sve2(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SVEVER) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, SVEVER) != 0; } static inline bool isar_feature_aa64_sve2_aes(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, AES) != 0; } static inline bool isar_feature_aa64_sve2_pmull128(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, AES) >= 2; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, AES) >= 2; } static inline bool isar_feature_aa64_sve2_bitperm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BITPERM) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, BITPERM) != 0; } static inline bool isar_feature_aa64_sve_bf16(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, BFLOAT16) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, BFLOAT16) != 0; } static inline bool isar_feature_aa64_sve2_sha3(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SHA3) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, SHA3) != 0; } static inline bool isar_feature_aa64_sve2_sm4(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, SM4) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, SM4) != 0; } static inline bool isar_feature_aa64_sve_i8mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, I8MM) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, I8MM) != 0; } static inline bool isar_feature_aa64_sve_f32mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F32MM) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, F32MM) != 0; } static inline bool isar_feature_aa64_sve_f64mm(const ARMISARegisters *id) { - return FIELD_EX64(id->id_aa64zfr0, ID_AA64ZFR0, F64MM) != 0; + return FIELD_EX64(id->regs[ID_AA64ZFR0], ID_AA64ZFR0, F64MM) != 0; } /* @@ -4392,4 +4402,6 @@ static inline bool isar_feature_any_tts2uxn(const ARMISARegisters *id) #define cpu_isar_feature(name, cpu) \ ({ ARMCPU *cpu_ = (cpu); isar_feature_##name(&cpu_->isar); }) +void arm_cpu_features_to_dict(ARMCPU *cpu, QDict *features); + #endif diff --git a/target/arm/cpu64.c b/target/arm/cpu64.c index 15245a60a8c701bf598a0d493e38d859663ecc67..b4522a337f45a6170e81e09232dec625bbbd0322 100644 --- a/target/arm/cpu64.c +++ b/target/arm/cpu64.c @@ -108,31 +108,31 @@ static void aarch64_a57_initfn(Object *obj) cpu->midr = 0x411fd070; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; + cpu->isar.regs[ID_PFR0] = 0x00000131; + cpu->isar.regs[ID_PFR1] = 0x00011011; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_ISAR6] = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001124; + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -161,31 +161,31 @@ static void aarch64_a53_initfn(Object *obj) cpu->midr = 0x410fd034; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034070; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x84448004; /* L1Ip = VIPT */ cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; + cpu->isar.regs[ID_PFR0] = 0x00000131; + cpu->isar.regs[ID_PFR1] = 0x00011011; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_isar6 = 0; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */ - cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_ISAR6] = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001122; /* 40 bit physical addr */ + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */ @@ -202,6 +202,7 @@ static void aarch64_a72_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); cpu->dtb_compatible = "arm,cortex-a72"; + cpu->kvm_target = QEMU_KVM_ARM_TARGET_GENERIC_V8; set_feature(&cpu->env, ARM_FEATURE_V8); set_feature(&cpu->env, ARM_FEATURE_NEON); set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER); @@ -213,30 +214,30 @@ static void aarch64_a72_initfn(Object *obj) cpu->midr = 0x410fd083; cpu->revidr = 0x00000000; cpu->reset_fpsid = 0x41034080; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x12111111; - cpu->isar.mvfr2 = 0x00000043; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x12111111; + cpu->isar.regs[MVFR2] = 0x00000043; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50838; - cpu->isar.id_pfr0 = 0x00000131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x03010066; + cpu->isar.regs[ID_PFR0] = 0x00000131; + cpu->isar.regs[ID_PFR1] = 0x00011011; + cpu->isar.regs[ID_DFR0] = 0x03010066; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01260000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232042; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x00011142; - cpu->isar.id_isar5 = 0x00011121; - cpu->isar.id_aa64pfr0 = 0x00002222; - cpu->isar.id_aa64dfr0 = 0x10305106; - cpu->isar.id_aa64isar0 = 0x00011120; - cpu->isar.id_aa64mmfr0 = 0x00001124; - cpu->isar.dbgdidr = 0x3516d000; + cpu->isar.regs[ID_MMFR0] = 0x10201105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01260000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x00011142; + cpu->isar.regs[ID_ISAR5] = 0x00011121; + cpu->isar.regs[ID_AA64PFR0] = 0x00002222; + cpu->isar.regs[ID_AA64DFR0] = 0x10305106; + cpu->isar.regs[ID_AA64ISAR0] = 0x00011120; + cpu->isar.regs[ID_AA64MMFR0] = 0x00001124; + cpu->isar.regs[DBGDIDR] = 0x3516d000; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */ cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */ @@ -248,6 +249,48 @@ static void aarch64_a72_initfn(Object *obj) define_arm_cp_regs(cpu, cortex_a72_a57_a53_cp_reginfo); } +static void aarch64_kunpeng_920_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + /* + * Hisilicon Kunpeng-920 CPU is similar to cortex-a72, + * so first initialize cpu data as cortex-a72, + * and then update the special register. + */ + aarch64_a72_initfn(obj); + + cpu->midr = 0x480fd010; + cpu->ctr = 0x84448004; + cpu->isar.regs[ID_ISAR0] = 0; + cpu->isar.regs[ID_ISAR1] = 0; + cpu->isar.regs[ID_ISAR2] = 0; + cpu->isar.regs[ID_ISAR3] = 0; + cpu->isar.regs[ID_ISAR4] = 0; + cpu->isar.regs[ID_ISAR5] = 0; + cpu->isar.regs[ID_MMFR0] = 0; + cpu->isar.regs[ID_MMFR1] = 0; + cpu->isar.regs[ID_MMFR2] = 0; + cpu->isar.regs[ID_MMFR3] = 0; + cpu->isar.regs[ID_MMFR4] = 0; + cpu->isar.regs[MVFR0] = 0; + cpu->isar.regs[MVFR1] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[ID_DFR0] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[MVFR2] = 0; + cpu->isar.regs[ID_PFR0] = 0; + cpu->isar.regs[ID_PFR1] = 0; + cpu->isar.regs[ID_AA64PFR0] = 0x0000010011111111; + cpu->isar.regs[ID_AA64DFR0] = 0x110305408; + cpu->isar.regs[ID_AA64ISAR0] = 0x0001100010211120; + cpu->isar.regs[ID_AA64ISAR1] = 0x00011001; + cpu->isar.regs[ID_AA64MMFR0] = 0x101125; + cpu->isar.regs[ID_AA64MMFR1] = 0x10211122; + cpu->isar.regs[ID_AA64MMFR2] = 0x00001011; +} + void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp) { /* @@ -546,9 +589,9 @@ static void cpu_arm_set_sve(Object *obj, bool value, Error **errp) return; } - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, SVE, value); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; } #ifdef CONFIG_USER_ONLY @@ -642,12 +685,12 @@ void arm_cpu_pauth_finalize(ARMCPU *cpu, Error **errp) error_append_hint(errp, "Add pauth=on to the CPU property list.\n"); } - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, APA, arch_val); t = FIELD_DP64(t, ID_AA64ISAR1, GPA, arch_val); t = FIELD_DP64(t, ID_AA64ISAR1, API, impdef_val); t = FIELD_DP64(t, ID_AA64ISAR1, GPI, impdef_val); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; } static Property arm_cpu_pauth_property = @@ -655,6 +698,66 @@ static Property arm_cpu_pauth_property = static Property arm_cpu_pauth_impdef_property = DEFINE_PROP_BOOL("pauth-impdef", ARMCPU, prop_pauth_impdef, false); +static void aarch64_max_ft2000plus_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + if (kvm_enabled()) { + kvm_arm_set_cpu_features_from_host(cpu); + kvm_arm_add_vcpu_properties(obj); + } else { + aarch64_a72_initfn(obj); + cpu->midr = 0x70186622; + } +} + +static void aarch64_max_tengyun_s2500_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + if (kvm_enabled()) { + kvm_arm_set_cpu_features_from_host(cpu); + kvm_arm_add_vcpu_properties(obj); + } else { + aarch64_a72_initfn(obj); + cpu->midr = 0x70186632; + } +} + +static void aarch64_tengyun_s5000c_initfn(Object *obj) +{ + ARMCPU *cpu = ARM_CPU(obj); + + aarch64_a72_initfn(obj); + + cpu->midr = 0x700f8620; + cpu->ctr = 0x9444c004; + cpu->isar.regs[ID_ISAR0] = 0x2101110; + cpu->isar.regs[ID_ISAR1] = 0x1311211; + cpu->isar.regs[ID_ISAR2] = 0x21232042; + cpu->isar.regs[ID_ISAR3] = 0x1112131; + cpu->isar.regs[ID_ISAR4] = 0x10142; + cpu->isar.regs[ID_ISAR5] = 0x1011121; + cpu->isar.regs[ID_MMFR0] = 0x10201105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x1260000; + cpu->isar.regs[ID_MMFR3] = 0x2122211; + cpu->isar.regs[ID_MMFR4] = 0x21110; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x13211111; + cpu->isar.regs[MVFR2] = 0x43; + cpu->isar.regs[ID_DFR0] = 0x4010088; + cpu->isar.regs[ID_PFR0] = 0x10131; + cpu->isar.regs[ID_PFR1] = 0x10010000; + cpu->isar.regs[ID_AA64PFR0] = 0x1100000011111112; + cpu->isar.regs[ID_AA64DFR0] = 0x10305408; + cpu->isar.regs[ID_AA64ISAR0] = 0x111110212120; + cpu->isar.regs[ID_AA64ISAR1] = 0x100001; + cpu->isar.regs[ID_AA64MMFR0] = 0x101125; + cpu->isar.regs[ID_AA64MMFR1] = 0x10212122; + cpu->isar.regs[ID_AA64MMFR2] = 0x1011; +} + /* -cpu max: if KVM is enabled, like -cpu host (best possible with this host); * otherwise, a CPU with as many features enabled as our emulation supports. * The version of '-cpu max' for qemu-system-arm is defined in cpu.c; @@ -690,7 +793,7 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, MIDR_EL1, REVISION, 0); cpu->midr = t; - t = cpu->isar.id_aa64isar0; + t = cpu->isar.regs[ID_AA64ISAR0]; t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* AES + PMULL */ t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); t = FIELD_DP64(t, ID_AA64ISAR0, SHA2, 2); /* SHA512 */ @@ -705,9 +808,9 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR0, TS, 2); /* v8.5-CondM */ t = FIELD_DP64(t, ID_AA64ISAR0, TLB, 2); /* FEAT_TLBIRANGE */ t = FIELD_DP64(t, ID_AA64ISAR0, RNDR, 1); - cpu->isar.id_aa64isar0 = t; + cpu->isar.regs[ID_AA64ISAR0] = t; - t = cpu->isar.id_aa64isar1; + t = cpu->isar.regs[ID_AA64ISAR1]; t = FIELD_DP64(t, ID_AA64ISAR1, DPB, 2); t = FIELD_DP64(t, ID_AA64ISAR1, JSCVT, 1); t = FIELD_DP64(t, ID_AA64ISAR1, FCMA, 1); @@ -717,17 +820,17 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ISAR1, FRINTTS, 1); t = FIELD_DP64(t, ID_AA64ISAR1, LRCPC, 2); /* ARMv8.4-RCPC */ t = FIELD_DP64(t, ID_AA64ISAR1, I8MM, 1); - cpu->isar.id_aa64isar1 = t; + cpu->isar.regs[ID_AA64ISAR1] = t; - t = cpu->isar.id_aa64pfr0; + t = cpu->isar.regs[ID_AA64PFR0]; t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); t = FIELD_DP64(t, ID_AA64PFR0, FP, 1); t = FIELD_DP64(t, ID_AA64PFR0, ADVSIMD, 1); t = FIELD_DP64(t, ID_AA64PFR0, SEL2, 1); t = FIELD_DP64(t, ID_AA64PFR0, DIT, 1); - cpu->isar.id_aa64pfr0 = t; + cpu->isar.regs[ID_AA64PFR0] = t; - t = cpu->isar.id_aa64pfr1; + t = cpu->isar.regs[ID_AA64PFR1]; t = FIELD_DP64(t, ID_AA64PFR1, BT, 1); t = FIELD_DP64(t, ID_AA64PFR1, SSBS, 2); /* @@ -736,28 +839,28 @@ static void aarch64_max_initfn(Object *obj) * we do for EL2 with the virtualization=on property. */ t = FIELD_DP64(t, ID_AA64PFR1, MTE, 3); - cpu->isar.id_aa64pfr1 = t; + cpu->isar.regs[ID_AA64PFR1] = t; - t = cpu->isar.id_aa64mmfr0; + t = cpu->isar.regs[ID_AA64MMFR0]; t = FIELD_DP64(t, ID_AA64MMFR0, PARANGE, 5); /* PARange: 48 bits */ - cpu->isar.id_aa64mmfr0 = t; + cpu->isar.regs[ID_AA64MMFR0] = t; - t = cpu->isar.id_aa64mmfr1; + t = cpu->isar.regs[ID_AA64MMFR1]; t = FIELD_DP64(t, ID_AA64MMFR1, HPDS, 1); /* HPD */ t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); t = FIELD_DP64(t, ID_AA64MMFR1, VH, 1); t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* ATS1E1 */ t = FIELD_DP64(t, ID_AA64MMFR1, VMIDBITS, 2); /* VMID16 */ t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* TTS2UXN */ - cpu->isar.id_aa64mmfr1 = t; + cpu->isar.regs[ID_AA64MMFR1] = t; - t = cpu->isar.id_aa64mmfr2; + t = cpu->isar.regs[ID_AA64MMFR2]; t = FIELD_DP64(t, ID_AA64MMFR2, UAO, 1); t = FIELD_DP64(t, ID_AA64MMFR2, CNP, 1); /* TTCNP */ t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* TTST */ - cpu->isar.id_aa64mmfr2 = t; + cpu->isar.regs[ID_AA64MMFR2] = t; - t = cpu->isar.id_aa64zfr0; + t = cpu->isar.regs[ID_AA64ZFR0]; t = FIELD_DP64(t, ID_AA64ZFR0, SVEVER, 1); t = FIELD_DP64(t, ID_AA64ZFR0, AES, 2); /* PMULL */ t = FIELD_DP64(t, ID_AA64ZFR0, BITPERM, 1); @@ -767,19 +870,19 @@ static void aarch64_max_initfn(Object *obj) t = FIELD_DP64(t, ID_AA64ZFR0, I8MM, 1); t = FIELD_DP64(t, ID_AA64ZFR0, F32MM, 1); t = FIELD_DP64(t, ID_AA64ZFR0, F64MM, 1); - cpu->isar.id_aa64zfr0 = t; + cpu->isar.regs[ID_AA64ZFR0] = t; /* Replicate the same data to the 32-bit id registers. */ - u = cpu->isar.id_isar5; + u = cpu->isar.regs[ID_ISAR5]; u = FIELD_DP32(u, ID_ISAR5, AES, 2); /* AES + PMULL */ u = FIELD_DP32(u, ID_ISAR5, SHA1, 1); u = FIELD_DP32(u, ID_ISAR5, SHA2, 1); u = FIELD_DP32(u, ID_ISAR5, CRC32, 1); u = FIELD_DP32(u, ID_ISAR5, RDM, 1); u = FIELD_DP32(u, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = u; + cpu->isar.regs[ID_ISAR5] = u; - u = cpu->isar.id_isar6; + u = cpu->isar.regs[ID_ISAR6]; u = FIELD_DP32(u, ID_ISAR6, JSCVT, 1); u = FIELD_DP32(u, ID_ISAR6, DP, 1); u = FIELD_DP32(u, ID_ISAR6, FHM, 1); @@ -787,39 +890,39 @@ static void aarch64_max_initfn(Object *obj) u = FIELD_DP32(u, ID_ISAR6, SPECRES, 1); u = FIELD_DP32(u, ID_ISAR6, BF16, 1); u = FIELD_DP32(u, ID_ISAR6, I8MM, 1); - cpu->isar.id_isar6 = u; + cpu->isar.regs[ID_ISAR6] = u; - u = cpu->isar.id_pfr0; + u = cpu->isar.regs[ID_PFR0]; u = FIELD_DP32(u, ID_PFR0, DIT, 1); - cpu->isar.id_pfr0 = u; + cpu->isar.regs[ID_PFR0] = u; - u = cpu->isar.id_pfr2; + u = cpu->isar.regs[ID_PFR2]; u = FIELD_DP32(u, ID_PFR2, SSBS, 1); - cpu->isar.id_pfr2 = u; + cpu->isar.regs[ID_PFR2] = u; - u = cpu->isar.id_mmfr3; + u = cpu->isar.regs[ID_MMFR3]; u = FIELD_DP32(u, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->isar.id_mmfr3 = u; + cpu->isar.regs[ID_MMFR3] = u; - u = cpu->isar.id_mmfr4; + u = cpu->isar.regs[ID_MMFR4]; u = FIELD_DP32(u, ID_MMFR4, HPDS, 1); /* AA32HPD */ u = FIELD_DP32(u, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ u = FIELD_DP32(u, ID_MMFR4, CNP, 1); /* TTCNP */ u = FIELD_DP32(u, ID_MMFR4, XNX, 1); /* TTS2UXN */ - cpu->isar.id_mmfr4 = u; + cpu->isar.regs[ID_MMFR4] = u; - t = cpu->isar.id_aa64dfr0; + t = cpu->isar.regs[ID_AA64DFR0]; t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* v8.4-PMU */ - cpu->isar.id_aa64dfr0 = t; + cpu->isar.regs[ID_AA64DFR0] = t; - u = cpu->isar.id_dfr0; + u = cpu->isar.regs[ID_DFR0]; u = FIELD_DP32(u, ID_DFR0, PERFMON, 5); /* v8.4-PMU */ - cpu->isar.id_dfr0 = u; + cpu->isar.regs[ID_DFR0] = u; - u = cpu->isar.mvfr1; + u = cpu->isar.regs[MVFR1]; u = FIELD_DP32(u, MVFR1, FPHP, 3); /* v8.2-FP16 */ u = FIELD_DP32(u, MVFR1, SIMDHP, 2); /* v8.2-FP16 */ - cpu->isar.mvfr1 = u; + cpu->isar.regs[MVFR1] = u; #ifdef CONFIG_USER_ONLY /* For usermode -cpu max we can use a larger and more efficient DCZ @@ -857,18 +960,18 @@ static void aarch64_a64fx_initfn(Object *obj) cpu->revidr = 0x00000000; cpu->ctr = 0x86668006; cpu->reset_sctlr = 0x30000180; - cpu->isar.id_aa64pfr0 = 0x0000000101111111; /* No RAS Extensions */ - cpu->isar.id_aa64pfr1 = 0x0000000000000000; - cpu->isar.id_aa64dfr0 = 0x0000000010305408; - cpu->isar.id_aa64dfr1 = 0x0000000000000000; + cpu->isar.regs[ID_AA64PFR0] = 0x0000000101111111; /* No RAS Extensions */ + cpu->isar.regs[ID_AA64PFR1] = 0x0000000000000000; + cpu->isar.regs[ID_AA64DFR0] = 0x0000000010305408; + cpu->isar.regs[ID_AA64DFR1] = 0x0000000000000000; cpu->id_aa64afr0 = 0x0000000000000000; cpu->id_aa64afr1 = 0x0000000000000000; - cpu->isar.id_aa64mmfr0 = 0x0000000000001122; - cpu->isar.id_aa64mmfr1 = 0x0000000011212100; - cpu->isar.id_aa64mmfr2 = 0x0000000000001011; - cpu->isar.id_aa64isar0 = 0x0000000010211120; - cpu->isar.id_aa64isar1 = 0x0000000000010001; - cpu->isar.id_aa64zfr0 = 0x0000000000000000; + cpu->isar.regs[ID_AA64MMFR0] = 0x0000000000001122; + cpu->isar.regs[ID_AA64MMFR1] = 0x0000000011212100; + cpu->isar.regs[ID_AA64MMFR2] = 0x0000000000001011; + cpu->isar.regs[ID_AA64ISAR0] = 0x0000000010211120; + cpu->isar.regs[ID_AA64ISAR1] = 0x0000000000010001; + cpu->isar.regs[ID_AA64ZFR0] = 0x0000000000000000; cpu->clidr = 0x0000000080000023; cpu->ccsidr[0] = 0x7007e01c; /* 64KB L1 dcache */ cpu->ccsidr[1] = 0x2007e01c; /* 64KB L1 icache */ @@ -892,6 +995,10 @@ static const ARMCPUInfo aarch64_cpus[] = { { .name = "cortex-a57", .initfn = aarch64_a57_initfn }, { .name = "cortex-a53", .initfn = aarch64_a53_initfn }, { .name = "cortex-a72", .initfn = aarch64_a72_initfn }, + { .name = "Kunpeng-920", .initfn = aarch64_kunpeng_920_initfn}, + { .name = "FT-2000+", .initfn = aarch64_max_ft2000plus_initfn }, + { .name = "Tengyun-S2500", .initfn = aarch64_max_tengyun_s2500_initfn }, + { .name = "Tengyun-S5000C", .initfn = aarch64_tengyun_s5000c_initfn }, { .name = "a64fx", .initfn = aarch64_a64fx_initfn }, { .name = "max", .initfn = aarch64_max_initfn }, }; @@ -933,6 +1040,115 @@ static gchar *aarch64_gdb_arch_name(CPUState *cs) return g_strdup("aarch64"); } +static const char *unconfigurable_feats[] = { + "evtstrm", + "cpuid", + NULL +}; + +static bool is_configurable_feat(const char *name) +{ + int i; + + for (i = 0; unconfigurable_feats[i]; ++i) { + if (g_strcmp0(unconfigurable_feats[i], name) == 0) { + return false; + } + } + + return true; +} + +static void +cpu_add_feat_as_prop(const char *typename, const char *name, const char *val) +{ + GlobalProperty *prop; + + if (!is_configurable_feat(name)) { + info_report("CPU feature '%s' is not configurable by QEMU. Ignore it.", + name); + return; + } + + prop = g_new0(typeof(*prop), 1); + prop->driver = typename; + prop->property = g_strdup(name); + prop->value = g_strdup(val); + qdev_prop_register_global(prop); +} + +static gint compare_string(gconstpointer a, gconstpointer b) +{ + return g_strcmp0(a, b); +} + +static GList *plus_features, *minus_features; + +static void aarch64_cpu_parse_features(const char *typename, char *features, + Error **errp) +{ + GList *l; + char *featurestr; /* Single 'key=value" string being parsed */ + static bool cpu_globals_initialized; + + if (cpu_globals_initialized) { + return; + } + cpu_globals_initialized = true; + + if (!features) { + return; + } + for (featurestr = strtok(features, ","); + featurestr; + featurestr = strtok(NULL, ",")) { + const char *name; + const char *val = NULL; + char *eq = NULL; + + /* Compatibility syntax: */ + if (featurestr[0] == '+') { + plus_features = g_list_append(plus_features, + g_strdup(featurestr + 1)); + continue; + } else if (featurestr[0] == '-') { + minus_features = g_list_append(minus_features, + g_strdup(featurestr + 1)); + continue; + } + + eq = strchr(featurestr, '='); + name = featurestr; + if (eq) { + *eq++ = 0; + val = eq; + } else { + error_setg(errp, "Unsupported property format: %s", name); + return; + } + + if (g_list_find_custom(plus_features, name, compare_string)) { + warn_report("Ambiguous CPU model string. " + "Don't mix both \"+%s\" and \"%s=%s\"", + name, name, val); + } + if (g_list_find_custom(minus_features, name, compare_string)) { + warn_report("Ambiguous CPU model string. " + "Don't mix both \"-%s\" and \"%s=%s\"", + name, name, val); + } + cpu_add_feat_as_prop(typename, name, val); + } + + for (l = plus_features; l; l = l->next) { + cpu_add_feat_as_prop(typename, l->data, "on"); + } + + for (l = minus_features; l; l = l->next) { + cpu_add_feat_as_prop(typename, l->data, "off"); + } +} + static void aarch64_cpu_class_init(ObjectClass *oc, void *data) { CPUClass *cc = CPU_CLASS(oc); @@ -942,6 +1158,7 @@ static void aarch64_cpu_class_init(ObjectClass *oc, void *data) cc->gdb_num_core_regs = 34; cc->gdb_core_xml_file = "aarch64-core.xml"; cc->gdb_arch_name = aarch64_gdb_arch_name; + cc->parse_features = aarch64_cpu_parse_features; object_class_property_add_bool(oc, "aarch64", aarch64_cpu_get_aarch64, aarch64_cpu_set_aarch64); diff --git a/target/arm/cpu_tcg.c b/target/arm/cpu_tcg.c index 13d0e9b1954a826737d0975034d3e4d46f63e5aa..be9c3166fb6373325bcc7fe1168c549d4ea8424c 100644 --- a/target/arm/cpu_tcg.c +++ b/target/arm/cpu_tcg.c @@ -65,14 +65,16 @@ static void arm926_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + cpu->isar.regs[ID_ISAR1] = FIELD_DP32(cpu->isar.regs[ID_ISAR1], ID_ISAR1, + JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPSHVEC, + 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPSP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPDP, 1); } static void arm946_initfn(Object *obj) @@ -107,14 +109,16 @@ static void arm1026_initfn(Object *obj) * ARMv5 does not have the ID_ISAR registers, but we can still * set the field to indicate Jazelle support within QEMU. */ - cpu->isar.id_isar1 = FIELD_DP32(cpu->isar.id_isar1, ID_ISAR1, JAZELLE, 1); + cpu->isar.regs[ID_ISAR1] = FIELD_DP32(cpu->isar.regs[ID_ISAR1], ID_ISAR1, + JAZELLE, 1); /* * Similarly, we need to set MVFR0 fields to enable vfp and short vector * support even though ARMv5 doesn't have this register. */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSP, 1); - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPDP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPSHVEC, + 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPSP, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPDP, 1); { /* The 1026 had an IFAR at c6,c0,0,1 rather than the ARMv6 c6,c0,0,2 */ @@ -147,22 +151,22 @@ static void arm1136_r2_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4107b362; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0x2; + cpu->isar.regs[ID_PFR0] = 0x111; + cpu->isar.regs[ID_PFR1] = 0x1; + cpu->isar.regs[ID_DFR0] = 0x2; cpu->id_afr0 = 0x3; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222110; + cpu->isar.regs[ID_ISAR0] = 0x00140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231111; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 7; } @@ -178,22 +182,22 @@ static void arm1136_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CACHE_BLOCK_OPS); cpu->midr = 0x4117b363; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0x2; + cpu->isar.regs[ID_PFR0] = 0x111; + cpu->isar.regs[ID_PFR1] = 0x1; + cpu->isar.regs[ID_DFR0] = 0x2; cpu->id_afr0 = 0x3; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222110; - cpu->isar.id_isar0 = 0x00140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231111; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222110; + cpu->isar.regs[ID_ISAR0] = 0x00140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231111; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 7; } @@ -210,22 +214,22 @@ static void arm1176_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fb767; cpu->reset_fpsid = 0x410120b5; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1dd20d2; cpu->reset_sctlr = 0x00050078; - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x33; + cpu->isar.regs[ID_PFR0] = 0x111; + cpu->isar.regs[ID_PFR1] = 0x11; + cpu->isar.regs[ID_DFR0] = 0x33; cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x01130003; - cpu->isar.id_mmfr1 = 0x10030302; - cpu->isar.id_mmfr2 = 0x01222100; - cpu->isar.id_isar0 = 0x0140011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11231121; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x01141; + cpu->isar.regs[ID_MMFR0] = 0x01130003; + cpu->isar.regs[ID_MMFR1] = 0x10030302; + cpu->isar.regs[ID_MMFR2] = 0x01222100; + cpu->isar.regs[ID_ISAR0] = 0x0140011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11231121; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x01141; cpu->reset_auxcr = 7; } @@ -240,21 +244,21 @@ static void arm11mpcore_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_DUMMY_C15_REGS); cpu->midr = 0x410fb022; cpu->reset_fpsid = 0x410120b4; - cpu->isar.mvfr0 = 0x11111111; - cpu->isar.mvfr1 = 0x00000000; + cpu->isar.regs[MVFR0] = 0x11111111; + cpu->isar.regs[MVFR1] = 0x00000000; cpu->ctr = 0x1d192992; /* 32K icache 32K dcache */ - cpu->isar.id_pfr0 = 0x111; - cpu->isar.id_pfr1 = 0x1; - cpu->isar.id_dfr0 = 0; + cpu->isar.regs[ID_PFR0] = 0x111; + cpu->isar.regs[ID_PFR1] = 0x1; + cpu->isar.regs[ID_DFR0] = 0; cpu->id_afr0 = 0x2; - cpu->isar.id_mmfr0 = 0x01100103; - cpu->isar.id_mmfr1 = 0x10020302; - cpu->isar.id_mmfr2 = 0x01222000; - cpu->isar.id_isar0 = 0x00100011; - cpu->isar.id_isar1 = 0x12002111; - cpu->isar.id_isar2 = 0x11221011; - cpu->isar.id_isar3 = 0x01102131; - cpu->isar.id_isar4 = 0x141; + cpu->isar.regs[ID_MMFR0] = 0x01100103; + cpu->isar.regs[ID_MMFR1] = 0x10020302; + cpu->isar.regs[ID_MMFR2] = 0x01222000; + cpu->isar.regs[ID_ISAR0] = 0x00100011; + cpu->isar.regs[ID_ISAR1] = 0x12002111; + cpu->isar.regs[ID_ISAR2] = 0x11221011; + cpu->isar.regs[ID_ISAR3] = 0x01102131; + cpu->isar.regs[ID_ISAR4] = 0x141; cpu->reset_auxcr = 1; } @@ -278,24 +282,24 @@ static void cortex_a8_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_EL3); cpu->midr = 0x410fc080; cpu->reset_fpsid = 0x410330c0; - cpu->isar.mvfr0 = 0x11110222; - cpu->isar.mvfr1 = 0x00011111; + cpu->isar.regs[MVFR0] = 0x11110222; + cpu->isar.regs[MVFR1] = 0x00011111; cpu->ctr = 0x82048004; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x1031; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x400; + cpu->isar.regs[ID_PFR0] = 0x1031; + cpu->isar.regs[ID_PFR1] = 0x11; + cpu->isar.regs[ID_DFR0] = 0x400; cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x31100003; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01202000; - cpu->isar.id_mmfr3 = 0x11; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x12112111; - cpu->isar.id_isar2 = 0x21232031; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; - cpu->isar.dbgdidr = 0x15141000; + cpu->isar.regs[ID_MMFR0] = 0x31100003; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01202000; + cpu->isar.regs[ID_MMFR3] = 0x11; + cpu->isar.regs[ID_ISAR0] = 0x00101111; + cpu->isar.regs[ID_ISAR1] = 0x12112111; + cpu->isar.regs[ID_ISAR2] = 0x21232031; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x00111142; + cpu->isar.regs[DBGDIDR] = 0x15141000; cpu->clidr = (1 << 27) | (2 << 24) | 3; cpu->ccsidr[0] = 0xe007e01a; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */ @@ -352,24 +356,24 @@ static void cortex_a9_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_CBAR); cpu->midr = 0x410fc090; cpu->reset_fpsid = 0x41033090; - cpu->isar.mvfr0 = 0x11110222; - cpu->isar.mvfr1 = 0x01111111; + cpu->isar.regs[MVFR0] = 0x11110222; + cpu->isar.regs[MVFR1] = 0x01111111; cpu->ctr = 0x80038003; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x1031; - cpu->isar.id_pfr1 = 0x11; - cpu->isar.id_dfr0 = 0x000; + cpu->isar.regs[ID_PFR0] = 0x1031; + cpu->isar.regs[ID_PFR1] = 0x11; + cpu->isar.regs[ID_DFR0] = 0x000; cpu->id_afr0 = 0; - cpu->isar.id_mmfr0 = 0x00100103; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01230000; - cpu->isar.id_mmfr3 = 0x00002111; - cpu->isar.id_isar0 = 0x00101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x00111142; - cpu->isar.dbgdidr = 0x35141000; + cpu->isar.regs[ID_MMFR0] = 0x00100103; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01230000; + cpu->isar.regs[ID_MMFR3] = 0x00002111; + cpu->isar.regs[ID_ISAR0] = 0x00101111; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x00111142; + cpu->isar.regs[DBGDIDR] = 0x35141000; cpu->clidr = (1 << 27) | (1 << 24) | 3; cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */ cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */ @@ -417,28 +421,28 @@ static void cortex_a7_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A7; cpu->midr = 0x410fc075; cpu->reset_fpsid = 0x41023075; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x11111111; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x11111111; cpu->ctr = 0x84448003; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x00001131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x02010555; + cpu->isar.regs[ID_PFR0] = 0x00001131; + cpu->isar.regs[ID_PFR1] = 0x00011011; + cpu->isar.regs[ID_DFR0] = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10101105; - cpu->isar.id_mmfr1 = 0x40000000; - cpu->isar.id_mmfr2 = 0x01240000; - cpu->isar.id_mmfr3 = 0x02102211; + cpu->isar.regs[ID_MMFR0] = 0x10101105; + cpu->isar.regs[ID_MMFR1] = 0x40000000; + cpu->isar.regs[ID_MMFR2] = 0x01240000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; /* * a7_mpcore_r0p5_trm, page 4-4 gives 0x01101110; but * table 4-41 gives 0x02101110, which includes the arm div insns. */ - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; - cpu->isar.dbgdidr = 0x3515f005; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x10011142; + cpu->isar.regs[DBGDIDR] = 0x3515f005; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -463,24 +467,24 @@ static void cortex_a15_initfn(Object *obj) cpu->kvm_target = QEMU_KVM_ARM_TARGET_CORTEX_A15; cpu->midr = 0x412fc0f1; cpu->reset_fpsid = 0x410430f0; - cpu->isar.mvfr0 = 0x10110222; - cpu->isar.mvfr1 = 0x11111111; + cpu->isar.regs[MVFR0] = 0x10110222; + cpu->isar.regs[MVFR1] = 0x11111111; cpu->ctr = 0x8444c004; cpu->reset_sctlr = 0x00c50078; - cpu->isar.id_pfr0 = 0x00001131; - cpu->isar.id_pfr1 = 0x00011011; - cpu->isar.id_dfr0 = 0x02010555; + cpu->isar.regs[ID_PFR0] = 0x00001131; + cpu->isar.regs[ID_PFR1] = 0x00011011; + cpu->isar.regs[ID_DFR0] = 0x02010555; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x10201105; - cpu->isar.id_mmfr1 = 0x20000000; - cpu->isar.id_mmfr2 = 0x01240000; - cpu->isar.id_mmfr3 = 0x02102211; - cpu->isar.id_isar0 = 0x02101110; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232041; - cpu->isar.id_isar3 = 0x11112131; - cpu->isar.id_isar4 = 0x10011142; - cpu->isar.dbgdidr = 0x3515f021; + cpu->isar.regs[ID_MMFR0] = 0x10201105; + cpu->isar.regs[ID_MMFR1] = 0x20000000; + cpu->isar.regs[ID_MMFR2] = 0x01240000; + cpu->isar.regs[ID_MMFR3] = 0x02102211; + cpu->isar.regs[ID_ISAR0] = 0x02101110; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232041; + cpu->isar.regs[ID_ISAR3] = 0x11112131; + cpu->isar.regs[ID_ISAR4] = 0x10011142; + cpu->isar.regs[DBGDIDR] = 0x3515f021; cpu->clidr = 0x0a200023; cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */ cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */ @@ -504,21 +508,21 @@ static void cortex_m0_initfn(Object *obj) * by looking at ID register fields. We use the same values as * for the M3. */ - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; + cpu->isar.regs[ID_PFR0] = 0x00000030; + cpu->isar.regs[ID_PFR1] = 0x00000200; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00000030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x00000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01141110; + cpu->isar.regs[ID_ISAR1] = 0x02111000; + cpu->isar.regs[ID_ISAR2] = 0x21112231; + cpu->isar.regs[ID_ISAR3] = 0x01111110; + cpu->isar.regs[ID_ISAR4] = 0x01310102; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m3_initfn(Object *obj) @@ -529,21 +533,21 @@ static void cortex_m3_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_M_MAIN); cpu->midr = 0x410fc231; cpu->pmsav7_dregion = 8; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; + cpu->isar.regs[ID_PFR0] = 0x00000030; + cpu->isar.regs[ID_PFR1] = 0x00000200; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00000030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x00000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01141110; + cpu->isar.regs[ID_ISAR1] = 0x02111000; + cpu->isar.regs[ID_ISAR2] = 0x21112231; + cpu->isar.regs[ID_ISAR3] = 0x01111110; + cpu->isar.regs[ID_ISAR4] = 0x01310102; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m4_initfn(Object *obj) @@ -556,24 +560,24 @@ static void cortex_m4_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); cpu->midr = 0x410fc240; /* r0p0 */ cpu->pmsav7_dregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000000; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; + cpu->isar.regs[MVFR0] = 0x10110021; + cpu->isar.regs[MVFR1] = 0x11000011; + cpu->isar.regs[MVFR2] = 0x00000000; + cpu->isar.regs[ID_PFR0] = 0x00000030; + cpu->isar.regs[ID_PFR1] = 0x00000200; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00000030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x00000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01141110; - cpu->isar.id_isar1 = 0x02111000; - cpu->isar.id_isar2 = 0x21112231; - cpu->isar.id_isar3 = 0x01111110; - cpu->isar.id_isar4 = 0x01310102; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00000030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x00000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01141110; + cpu->isar.regs[ID_ISAR1] = 0x02111000; + cpu->isar.regs[ID_ISAR2] = 0x21112231; + cpu->isar.regs[ID_ISAR3] = 0x01111110; + cpu->isar.regs[ID_ISAR4] = 0x01310102; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m7_initfn(Object *obj) @@ -586,24 +590,24 @@ static void cortex_m7_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_THUMB_DSP); cpu->midr = 0x411fc272; /* r1p2 */ cpu->pmsav7_dregion = 8; - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x12000011; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000200; - cpu->isar.id_dfr0 = 0x00100000; + cpu->isar.regs[MVFR0] = 0x10110221; + cpu->isar.regs[MVFR1] = 0x12000011; + cpu->isar.regs[MVFR2] = 0x00000040; + cpu->isar.regs[ID_PFR0] = 0x00000030; + cpu->isar.regs[ID_PFR1] = 0x00000200; + cpu->isar.regs[ID_DFR0] = 0x00100000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00100030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02112000; - cpu->isar.id_isar2 = 0x20232231; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00100030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01101110; + cpu->isar.regs[ID_ISAR1] = 0x02112000; + cpu->isar.regs[ID_ISAR2] = 0x20232231; + cpu->isar.regs[ID_ISAR3] = 0x01111131; + cpu->isar.regs[ID_ISAR4] = 0x01310132; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; } static void cortex_m33_initfn(Object *obj) @@ -618,24 +622,24 @@ static void cortex_m33_initfn(Object *obj) cpu->midr = 0x410fd213; /* r0p3 */ cpu->pmsav7_dregion = 16; cpu->sau_sregion = 8; - cpu->isar.mvfr0 = 0x10110021; - cpu->isar.mvfr1 = 0x11000011; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x00000030; - cpu->isar.id_pfr1 = 0x00000210; - cpu->isar.id_dfr0 = 0x00200000; + cpu->isar.regs[MVFR0] = 0x10110021; + cpu->isar.regs[MVFR1] = 0x11000011; + cpu->isar.regs[MVFR2] = 0x00000040; + cpu->isar.regs[ID_PFR0] = 0x00000030; + cpu->isar.regs[ID_PFR1] = 0x00000210; + cpu->isar.regs[ID_DFR0] = 0x00200000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00101F40; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000000; - cpu->isar.id_isar0 = 0x01101110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00101F40; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01000000; + cpu->isar.regs[ID_MMFR3] = 0x00000000; + cpu->isar.regs[ID_ISAR0] = 0x01101110; + cpu->isar.regs[ID_ISAR1] = 0x02212000; + cpu->isar.regs[ID_ISAR2] = 0x20232232; + cpu->isar.regs[ID_ISAR3] = 0x01111131; + cpu->isar.regs[ID_ISAR4] = 0x01310132; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; cpu->clidr = 0x00000000; cpu->ctr = 0x8000c000; } @@ -655,24 +659,24 @@ static void cortex_m55_initfn(Object *obj) cpu->pmsav7_dregion = 16; cpu->sau_sregion = 8; /* These are the MVFR* values for the FPU + full MVE configuration */ - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x12100211; - cpu->isar.mvfr2 = 0x00000040; - cpu->isar.id_pfr0 = 0x20000030; - cpu->isar.id_pfr1 = 0x00000230; - cpu->isar.id_dfr0 = 0x10200000; + cpu->isar.regs[MVFR0] = 0x10110221; + cpu->isar.regs[MVFR1] = 0x12100211; + cpu->isar.regs[MVFR2] = 0x00000040; + cpu->isar.regs[ID_PFR0] = 0x20000030; + cpu->isar.regs[ID_PFR1] = 0x00000230; + cpu->isar.regs[ID_DFR0] = 0x10200000; cpu->id_afr0 = 0x00000000; - cpu->isar.id_mmfr0 = 0x00111040; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01000000; - cpu->isar.id_mmfr3 = 0x00000011; - cpu->isar.id_isar0 = 0x01103110; - cpu->isar.id_isar1 = 0x02212000; - cpu->isar.id_isar2 = 0x20232232; - cpu->isar.id_isar3 = 0x01111131; - cpu->isar.id_isar4 = 0x01310132; - cpu->isar.id_isar5 = 0x00000000; - cpu->isar.id_isar6 = 0x00000000; + cpu->isar.regs[ID_MMFR0] = 0x00111040; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01000000; + cpu->isar.regs[ID_MMFR3] = 0x00000011; + cpu->isar.regs[ID_ISAR0] = 0x01103110; + cpu->isar.regs[ID_ISAR1] = 0x02212000; + cpu->isar.regs[ID_ISAR2] = 0x20232232; + cpu->isar.regs[ID_ISAR3] = 0x01111131; + cpu->isar.regs[ID_ISAR4] = 0x01310132; + cpu->isar.regs[ID_ISAR5] = 0x00000000; + cpu->isar.regs[ID_ISAR6] = 0x00000000; cpu->clidr = 0x00000000; /* caches not implemented */ cpu->ctr = 0x8303c003; } @@ -697,21 +701,21 @@ static void cortex_r5_initfn(Object *obj) set_feature(&cpu->env, ARM_FEATURE_PMSA); set_feature(&cpu->env, ARM_FEATURE_PMU); cpu->midr = 0x411fc153; /* r1p3 */ - cpu->isar.id_pfr0 = 0x0131; - cpu->isar.id_pfr1 = 0x001; - cpu->isar.id_dfr0 = 0x010400; + cpu->isar.regs[ID_PFR0] = 0x0131; + cpu->isar.regs[ID_PFR1] = 0x001; + cpu->isar.regs[ID_DFR0] = 0x010400; cpu->id_afr0 = 0x0; - cpu->isar.id_mmfr0 = 0x0210030; - cpu->isar.id_mmfr1 = 0x00000000; - cpu->isar.id_mmfr2 = 0x01200000; - cpu->isar.id_mmfr3 = 0x0211; - cpu->isar.id_isar0 = 0x02101111; - cpu->isar.id_isar1 = 0x13112111; - cpu->isar.id_isar2 = 0x21232141; - cpu->isar.id_isar3 = 0x01112131; - cpu->isar.id_isar4 = 0x0010142; - cpu->isar.id_isar5 = 0x0; - cpu->isar.id_isar6 = 0x0; + cpu->isar.regs[ID_MMFR0] = 0x0210030; + cpu->isar.regs[ID_MMFR1] = 0x00000000; + cpu->isar.regs[ID_MMFR2] = 0x01200000; + cpu->isar.regs[ID_MMFR3] = 0x0211; + cpu->isar.regs[ID_ISAR0] = 0x02101111; + cpu->isar.regs[ID_ISAR1] = 0x13112111; + cpu->isar.regs[ID_ISAR2] = 0x21232141; + cpu->isar.regs[ID_ISAR3] = 0x01112131; + cpu->isar.regs[ID_ISAR4] = 0x0010142; + cpu->isar.regs[ID_ISAR5] = 0x0; + cpu->isar.regs[ID_ISAR6] = 0x0; cpu->mp_is_up = true; cpu->pmsav7_dregion = 16; define_arm_cp_regs(cpu, cortexr5_cp_reginfo); @@ -722,8 +726,8 @@ static void cortex_r5f_initfn(Object *obj) ARMCPU *cpu = ARM_CPU(obj); cortex_r5_initfn(obj); - cpu->isar.mvfr0 = 0x10110221; - cpu->isar.mvfr1 = 0x00000011; + cpu->isar.regs[MVFR0] = 0x10110221; + cpu->isar.regs[MVFR1] = 0x00000011; } static void ti925t_initfn(Object *obj) @@ -942,7 +946,8 @@ static void arm_max_initfn(Object *obj) cortex_a15_initfn(obj); /* old-style VFP short-vector support */ - cpu->isar.mvfr0 = FIELD_DP32(cpu->isar.mvfr0, MVFR0, FPSHVEC, 1); + cpu->isar.regs[MVFR0] = FIELD_DP32(cpu->isar.regs[MVFR0], MVFR0, FPSHVEC, + 1); #ifdef CONFIG_USER_ONLY /* @@ -954,16 +959,16 @@ static void arm_max_initfn(Object *obj) { uint32_t t; - t = cpu->isar.id_isar5; + t = cpu->isar.regs[ID_ISAR5]; t = FIELD_DP32(t, ID_ISAR5, AES, 2); t = FIELD_DP32(t, ID_ISAR5, SHA1, 1); t = FIELD_DP32(t, ID_ISAR5, SHA2, 1); t = FIELD_DP32(t, ID_ISAR5, CRC32, 1); t = FIELD_DP32(t, ID_ISAR5, RDM, 1); t = FIELD_DP32(t, ID_ISAR5, VCMA, 1); - cpu->isar.id_isar5 = t; + cpu->isar.regs[ID_ISAR5] = t; - t = cpu->isar.id_isar6; + t = cpu->isar.regs[ID_ISAR6]; t = FIELD_DP32(t, ID_ISAR6, JSCVT, 1); t = FIELD_DP32(t, ID_ISAR6, DP, 1); t = FIELD_DP32(t, ID_ISAR6, FHM, 1); @@ -971,36 +976,36 @@ static void arm_max_initfn(Object *obj) t = FIELD_DP32(t, ID_ISAR6, SPECRES, 1); t = FIELD_DP32(t, ID_ISAR6, BF16, 1); t = FIELD_DP32(t, ID_ISAR6, I8MM, 1); - cpu->isar.id_isar6 = t; + cpu->isar.regs[ID_ISAR6] = t; - t = cpu->isar.mvfr1; + t = cpu->isar.regs[MVFR1]; t = FIELD_DP32(t, MVFR1, FPHP, 3); /* v8.2-FP16 */ t = FIELD_DP32(t, MVFR1, SIMDHP, 2); /* v8.2-FP16 */ - cpu->isar.mvfr1 = t; + cpu->isar.regs[MVFR1] = t; - t = cpu->isar.mvfr2; + t = cpu->isar.regs[MVFR2]; t = FIELD_DP32(t, MVFR2, SIMDMISC, 3); /* SIMD MaxNum */ t = FIELD_DP32(t, MVFR2, FPMISC, 4); /* FP MaxNum */ - cpu->isar.mvfr2 = t; + cpu->isar.regs[MVFR2] = t; - t = cpu->isar.id_mmfr3; + t = cpu->isar.regs[ID_MMFR3]; t = FIELD_DP32(t, ID_MMFR3, PAN, 2); /* ATS1E1 */ - cpu->isar.id_mmfr3 = t; + cpu->isar.regs[ID_MMFR3] = t; - t = cpu->isar.id_mmfr4; + t = cpu->isar.regs[ID_MMFR4]; t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* AA32HPD */ t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */ t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* TTCNP */ t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* TTS2UXN */ - cpu->isar.id_mmfr4 = t; + cpu->isar.regs[ID_MMFR4] = t; - t = cpu->isar.id_pfr0; + t = cpu->isar.regs[ID_PFR0]; t = FIELD_DP32(t, ID_PFR0, DIT, 1); - cpu->isar.id_pfr0 = t; + cpu->isar.regs[ID_PFR0] = t; - t = cpu->isar.id_pfr2; + t = cpu->isar.regs[ID_PFR2]; t = FIELD_DP32(t, ID_PFR2, SSBS, 1); - cpu->isar.id_pfr2 = t; + cpu->isar.regs[ID_PFR2] = t; } #endif /* CONFIG_USER_ONLY */ } diff --git a/target/arm/helper.c b/target/arm/helper.c index 9b317899a66b66fbe7203b8df8b490b69236d54a..1854c6586329136900536ec3cf9a8ce7582c3ed6 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -35,6 +35,7 @@ #include "exec/cpu_ldst.h" #include "semihosting/common-semi.h" #endif +#include "kvm_arm.h" #define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */ #define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */ @@ -133,6 +134,16 @@ static bool raw_accessors_invalid(const ARMCPRegInfo *ri) return true; } +static bool is_id_reg(const ARMCPRegInfo *ri) +{ + /* + * (Op0, Op1, CRn, CRm, Op2) of ID registers is (3, 0, 0, crm, op2), + * where 1<=crm<8, 0<=op2<8. + */ + return ri->opc0 == 3 && ri->opc1 == 0 && ri->crn == 0 && + ri->crm > 0 && ri->crm < 8; +} + bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) { /* Write the coprocessor state from cpu->env to the (index,value) list. */ @@ -149,30 +160,53 @@ bool write_cpustate_to_list(ARMCPU *cpu, bool kvm_sync) ok = false; continue; } - if (ri->type & ARM_CP_NO_RAW) { + if ((ri->type & ARM_CP_NO_RAW) && !(kvm_sync && is_id_reg(ri))) { continue; } newval = read_raw_cp_reg(&cpu->env, ri); if (kvm_sync) { - /* - * Only sync if the previous list->cpustate sync succeeded. - * Rather than tracking the success/failure state for every - * item in the list, we just recheck "does the raw write we must - * have made in write_list_to_cpustate() read back OK" here. - */ - uint64_t oldval = cpu->cpreg_values[i]; + if (is_id_reg(ri)) { + /* Only sync if we can sync to KVM successfully. */ + uint64_t oldval = 0; + uint64_t kvmval = 0; - if (oldval == newval) { - continue; - } + if (kvm_arm_get_one_reg(cpu, cpu->cpreg_indexes[i], &oldval)) { + continue; + } + if (oldval == newval) { + continue; + } - write_raw_cp_reg(&cpu->env, ri, oldval); - if (read_raw_cp_reg(&cpu->env, ri) != oldval) { - continue; - } + if (kvm_arm_set_one_reg(cpu, cpu->cpreg_indexes[i], &newval)) { + continue; + } + if (kvm_arm_get_one_reg(cpu, cpu->cpreg_indexes[i], &kvmval) || + kvmval != newval) { + continue; + } + + kvm_arm_set_one_reg(cpu, cpu->cpreg_indexes[i], &oldval); + } else { + /* + * Only sync if the previous list->cpustate sync succeeded. + * Rather than tracking the success/failure state for every + * item in the list, we just recheck "does the raw write we must + * have made in write_list_to_cpustate() read back OK" here. + */ + uint64_t oldval = cpu->cpreg_values[i]; + + if (oldval == newval) { + continue; + } + + write_raw_cp_reg(&cpu->env, ri, oldval); + if (read_raw_cp_reg(&cpu->env, ri) != oldval) { + continue; + } - write_raw_cp_reg(&cpu->env, ri, newval); + write_raw_cp_reg(&cpu->env, ri, newval); + } } cpu->cpreg_values[i] = newval; } @@ -6547,12 +6581,12 @@ static void define_debug_regs(ARMCPU *cpu) * use AArch32. Given that bit 15 is RES1, if the value is 0 then * the register must not exist for this cpu. */ - if (cpu->isar.dbgdidr != 0) { + if (cpu->isar.regs[DBGDIDR] != 0) { ARMCPRegInfo dbgdidr = { .name = "DBGDIDR", .cp = 14, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 0, .access = PL0_R, .accessfn = access_tda, - .type = ARM_CP_CONST, .resetvalue = cpu->isar.dbgdidr, + .type = ARM_CP_CONST, .resetvalue = cpu->isar.regs[DBGDIDR], }; define_one_arm_cp_reg(cpu, &dbgdidr); } @@ -6707,9 +6741,9 @@ static void define_pmu_regs(ARMCPU *cpu) static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr1 = cpu->isar.id_pfr1; + uint64_t pfr1 = cpu->isar.regs[ID_PFR1]; - if (env->gicv3state) { + if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64) && env->gicv3state) { pfr1 |= 1 << 28; } return pfr1; @@ -6719,7 +6753,7 @@ static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri) static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri) { ARMCPU *cpu = env_archcpu(env); - uint64_t pfr0 = cpu->isar.id_aa64pfr0; + uint64_t pfr0 = cpu->isar.regs[ID_AA64PFR0]; if (env->gicv3state) { pfr0 |= 1 << 24; @@ -6964,18 +6998,42 @@ static const ARMCPRegInfo tlbios_reginfo[] = { .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 0, .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ASIDE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 2, .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_vmalle1is_write }, + { .name = "TLBI_VAAE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 3, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, + { .name = "TLBI_VAALE1OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 0, .crn = 8, .crm = 1, .opc2 = 7, + .access = PL1_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae1is_write }, { .name = "TLBI_ALLE2OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 0, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle2is_write }, + { .name = "TLBI_VAE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_ALLE1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 4, .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle1is_write }, + { .name = "TLBI_VALE2OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL2_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae2is_write }, { .name = "TLBI_VMALLS12E1OS", .state = ARM_CP_STATE_AA64, .opc0 = 1, .opc1 = 4, .crn = 8, .crm = 1, .opc2 = 6, .access = PL2_W, .type = ARM_CP_NO_RAW, @@ -6996,6 +7054,14 @@ static const ARMCPRegInfo tlbios_reginfo[] = { .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 0, .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = tlbi_aa64_alle3is_write }, + { .name = "TLBI_VAE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 1, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, + { .name = "TLBI_VALE3OS", .state = ARM_CP_STATE_AA64, + .opc0 = 1, .opc1 = 6, .crn = 8, .crm = 1, .opc2 = 5, + .access = PL3_W, .type = ARM_CP_NO_RAW, + .writefn = tlbi_aa64_vae3is_write }, REGINFO_SENTINEL }; @@ -7501,7 +7567,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_pfr0 }, + .resetvalue = cpu->isar.regs[ID_PFR0] }, /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know * the value of the GIC field until after we define these regs. */ @@ -7515,7 +7581,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_dfr0 }, + .resetvalue = cpu->isar.regs[ID_DFR0] }, { .name = "ID_AFR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -7525,62 +7591,62 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr0 }, + .resetvalue = cpu->isar.regs[ID_MMFR0] }, { .name = "ID_MMFR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr1 }, + .resetvalue = cpu->isar.regs[ID_MMFR1] }, { .name = "ID_MMFR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr2 }, + .resetvalue = cpu->isar.regs[ID_MMFR2] }, { .name = "ID_MMFR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr3 }, + .resetvalue = cpu->isar.regs[ID_MMFR3] }, { .name = "ID_ISAR0", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar0 }, + .resetvalue = cpu->isar.regs[ID_ISAR0] }, { .name = "ID_ISAR1", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar1 }, + .resetvalue = cpu->isar.regs[ID_ISAR1] }, { .name = "ID_ISAR2", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar2 }, + .resetvalue = cpu->isar.regs[ID_ISAR2] }, { .name = "ID_ISAR3", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar3 }, + .resetvalue = cpu->isar.regs[ID_ISAR3] }, { .name = "ID_ISAR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar4 }, + .resetvalue = cpu->isar.regs[ID_ISAR4] }, { .name = "ID_ISAR5", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar5 }, + .resetvalue = cpu->isar.regs[ID_ISAR5] }, { .name = "ID_MMFR4", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 6, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_mmfr4 }, + .resetvalue = cpu->isar.regs[ID_MMFR4] }, { .name = "ID_ISAR6", .state = ARM_CP_STATE_BOTH, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 7, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa32_tid3, - .resetvalue = cpu->isar.id_isar6 }, + .resetvalue = cpu->isar.regs[ID_ISAR6] }, REGINFO_SENTINEL }; define_arm_cp_regs(cpu, v6_idregs); @@ -7630,7 +7696,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .access = PL1_R, #ifdef CONFIG_USER_ONLY .type = ARM_CP_CONST, - .resetvalue = cpu->isar.id_aa64pfr0 + .resetvalue = cpu->isar.regs[ID_AA64PFR0] #else .type = ARM_CP_NO_RAW, .accessfn = access_aa64_tid3, @@ -7642,7 +7708,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64pfr1}, + .resetvalue = cpu->isar.regs[ID_AA64PFR1]}, { .name = "ID_AA64PFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -7657,7 +7723,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64zfr0 }, + .resetvalue = cpu->isar.regs[ID_AA64ZFR0] }, { .name = "ID_AA64PFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, @@ -7677,12 +7743,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr0 }, + .resetvalue = cpu->isar.regs[ID_AA64DFR0] }, { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64dfr1 }, + .resetvalue = cpu->isar.regs[ID_AA64DFR1] }, { .name = "ID_AA64DFR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -7717,12 +7783,12 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar0 }, + .resetvalue = cpu->isar.regs[ID_AA64ISAR0] }, { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64isar1 }, + .resetvalue = cpu->isar.regs[ID_AA64ISAR1] }, { .name = "ID_AA64ISAR2_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, @@ -7757,17 +7823,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr0 }, + .resetvalue = cpu->isar.regs[ID_AA64MMFR0] }, { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr1 }, + .resetvalue = cpu->isar.regs[ID_AA64MMFR1] }, { .name = "ID_AA64MMFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_aa64mmfr2 }, + .resetvalue = cpu->isar.regs[ID_AA64MMFR2] }, { .name = "ID_AA64MMFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -7797,17 +7863,17 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.mvfr0 }, + .resetvalue = cpu->isar.regs[MVFR0] }, { .name = "MVFR1_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.mvfr1 }, + .resetvalue = cpu->isar.regs[MVFR1] }, { .name = "MVFR2_EL1", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.mvfr2 }, + .resetvalue = cpu->isar.regs[MVFR2] }, { .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3, .access = PL1_R, .type = ARM_CP_CONST, @@ -7817,7 +7883,7 @@ void register_cp_regs_for_features(ARMCPU *cpu) .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 4, .access = PL1_R, .type = ARM_CP_CONST, .accessfn = access_aa64_tid3, - .resetvalue = cpu->isar.id_pfr2 }, + .resetvalue = cpu->isar.regs[ID_PFR2] }, { .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64, .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5, .access = PL1_R, .type = ARM_CP_CONST, diff --git a/target/arm/hvf/hvf.c b/target/arm/hvf/hvf.c index 0dc96560d3469316907f70efcc971b540687a570..66ad698df118acd7faa5c276772a542815d34c12 100644 --- a/target/arm/hvf/hvf.c +++ b/target/arm/hvf/hvf.c @@ -449,15 +449,15 @@ static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) int reg; uint64_t *val; } regs[] = { - { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, - { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, - { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, - { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, - { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, - { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, - { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, - { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, - { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, + { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.regs[ID_AA64PFR0] }, + { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.regs[ID_AA64PFR1] }, + { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.regs[ID_AA64DFR0] }, + { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.regs[ID_AA64DFR1] }, + { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.regs[ID_AA64ISAR0] }, + { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.regs[ID_AA64ISAR1] }, + { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.regs[ID_AA64MMFR0] }, + { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.regs[ID_AA64MMFR1] }, + { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.regs[ID_AA64MMFR2] }, }; hv_vcpu_t fd; hv_return_t r = HV_SUCCESS; @@ -593,7 +593,7 @@ int hvf_arch_init_vcpu(CPUState *cpu) /* We're limited to underlying hardware caps, override internal versions */ ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, - &arm_cpu->isar.id_aa64mmfr0); + &arm_cpu->isar.regs[ID_AA64MMFR0]); assert_hvf_ok(ret); return 0; diff --git a/target/arm/internals.h b/target/arm/internals.h index 89f7610ebc542b90aac8513d77e13ba03ba1f3e2..0ea225e480795e27e177c9d032ea79e1d2497030 100644 --- a/target/arm/internals.h +++ b/target/arm/internals.h @@ -254,7 +254,7 @@ static inline unsigned int arm_pamax(ARMCPU *cpu) [5] = 48, }; unsigned int parange = - FIELD_EX64(cpu->isar.id_aa64mmfr0, ID_AA64MMFR0, PARANGE); + FIELD_EX64(cpu->isar.regs[ID_AA64MMFR0], ID_AA64MMFR0, PARANGE); /* id_aa64mmfr0 is a read-only register so values outside of the * supported mappings can be considered an implementation error. */ @@ -808,9 +808,9 @@ static inline uint32_t arm_debug_exception_fsr(CPUARMState *env) static inline int arm_num_brps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, BRPS) + 1; + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, BRPS) + 1; } else { - return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, BRPS) + 1; + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, BRPS) + 1; } } @@ -822,9 +822,9 @@ static inline int arm_num_brps(ARMCPU *cpu) static inline int arm_num_wrps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, WRPS) + 1; + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, WRPS) + 1; } else { - return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, WRPS) + 1; + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, WRPS) + 1; } } @@ -836,9 +836,9 @@ static inline int arm_num_wrps(ARMCPU *cpu) static inline int arm_num_ctx_cmps(ARMCPU *cpu) { if (arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) { - return FIELD_EX64(cpu->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS) + 1; + return FIELD_EX64(cpu->isar.regs[ID_AA64DFR0], ID_AA64DFR0, CTX_CMPS) + 1; } else { - return FIELD_EX32(cpu->isar.dbgdidr, DBGDIDR, CTX_CMPS) + 1; + return FIELD_EX32(cpu->isar.regs[DBGDIDR], DBGDIDR, CTX_CMPS) + 1; } } diff --git a/target/arm/kvm-consts.h b/target/arm/kvm-consts.h index 580f1c1fee0c50455a284fed2bde9df0ad49f40e..5f1311ade74aed935491b1cd60019eb291d7ab71 100644 --- a/target/arm/kvm-consts.h +++ b/target/arm/kvm-consts.h @@ -130,6 +130,8 @@ MISMATCH_CHECK(QEMU_PSCI_RET_DISABLED, PSCI_RET_DISABLED); #define QEMU_KVM_ARM_TARGET_CORTEX_A57 2 #define QEMU_KVM_ARM_TARGET_XGENE_POTENZA 3 #define QEMU_KVM_ARM_TARGET_CORTEX_A53 4 +/* Generic ARM v8 target */ +#define QEMU_KVM_ARM_TARGET_GENERIC_V8 5 /* There's no kernel define for this: sentinel value which * matches no KVM target value for either 64 or 32 bit @@ -141,6 +143,7 @@ MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_FOUNDATION_V8, KVM_ARM_TARGET_FOUNDATION_V8); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A57, KVM_ARM_TARGET_CORTEX_A57); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_XGENE_POTENZA, KVM_ARM_TARGET_XGENE_POTENZA); MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_CORTEX_A53, KVM_ARM_TARGET_CORTEX_A53); +MISMATCH_CHECK(QEMU_KVM_ARM_TARGET_GENERIC_V8, KVM_ARM_TARGET_GENERIC_V8); #define CP_REG_ARM64 0x6000000000000000ULL #define CP_REG_ARM_COPROC_MASK 0x000000000FFF0000 diff --git a/target/arm/kvm-tmm.c b/target/arm/kvm-tmm.c new file mode 100644 index 0000000000000000000000000000000000000000..e7df48597992da82107ceae314f460ca1b81c105 --- /dev/null +++ b/target/arm/kvm-tmm.c @@ -0,0 +1,344 @@ +/* + * QEMU add virtcca cvm feature. + * + * Copyright (c) Huawei Technologies Co., Ltd. 2023-2024. All rights reserved. + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include "exec/confidential-guest-support.h" +#include "hw/boards.h" +#include "hw/core/cpu.h" +#include "kvm_arm.h" +#include "migration/blocker.h" +#include "qapi/error.h" +#include "qom/object_interfaces.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "hw/loader.h" + +#define TYPE_TMM_GUEST "tmm-guest" +OBJECT_DECLARE_SIMPLE_TYPE(TmmGuest, TMM_GUEST) + +#define TMM_PAGE_SIZE qemu_real_host_page_size +#define TMM_MAX_PMU_CTRS 0x20 +#define TMM_MAX_CFG 5 + +struct TmmGuest { + ConfidentialGuestSupport parent_obj; + GSList *ram_regions; + TmmGuestMeasurementAlgo measurement_algo; + uint32_t sve_vl; + uint32_t num_pmu_cntrs; +}; + +typedef struct { + hwaddr base1; + hwaddr len1; + hwaddr base2; + hwaddr len2; + bool populate; +} TmmRamRegion; + +static TmmGuest *tmm_guest; + +bool kvm_arm_tmm_enabled(void) +{ + return !!tmm_guest; +} + +static int tmm_configure_one(TmmGuest *guest, uint32_t cfg, Error **errp) +{ + int ret = 1; + const char *cfg_str; + struct kvm_cap_arm_tmm_config_item args = { + .cfg = cfg, + }; + + switch (cfg) { + case KVM_CAP_ARM_TMM_CFG_RPV: + return 0; + case KVM_CAP_ARM_TMM_CFG_HASH_ALGO: + switch (guest->measurement_algo) { + case TMM_GUEST_MEASUREMENT_ALGO_DEFAULT: + return 0; + case TMM_GUEST_MEASUREMENT_ALGO_SHA256: + args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA256; + break; + case TMM_GUEST_MEASUREMENT_ALGO_SHA512: + args.hash_algo = KVM_CAP_ARM_TMM_MEASUREMENT_ALGO_SHA512; + break; + default: + g_assert_not_reached(); + } + cfg_str = "hash algorithm"; + break; + case KVM_CAP_ARM_TMM_CFG_SVE: + if (!guest->sve_vl) { + return 0; + } + args.sve_vq = guest->sve_vl / 128; + cfg_str = "SVE"; + break; + case KVM_CAP_ARM_TMM_CFG_DBG: + return 0; + case KVM_CAP_ARM_TMM_CFG_PMU: + if (!guest->num_pmu_cntrs) { + return 0; + } + args.num_pmu_cntrs = guest->num_pmu_cntrs; + cfg_str = "PMU"; + break; + default: + g_assert_not_reached(); + } + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, + KVM_CAP_ARM_TMM_CONFIG_CVM, (intptr_t)&args); + if (ret) { + error_setg_errno(errp, -ret, "TMM: failed to configure %s", cfg_str); + } + + return ret; +} + +static gint tmm_compare_ram_regions(gconstpointer a, gconstpointer b) +{ + const TmmRamRegion *ra = a; + const TmmRamRegion *rb = b; + + g_assert(ra->base1 != rb->base1); + return ra->base1 < rb->base1 ? -1 : 1; +} + +void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate) +{ + TmmRamRegion *region; + + region = g_new0(TmmRamRegion, 1); + region->base1 = QEMU_ALIGN_DOWN(base1, TMM_PAGE_SIZE); + region->len1 = QEMU_ALIGN_UP(len1, TMM_PAGE_SIZE); + region->base2 = QEMU_ALIGN_DOWN(base2, TMM_PAGE_SIZE); + region->len2 = QEMU_ALIGN_UP(len2, TMM_PAGE_SIZE); + region->populate = populate; + + tmm_guest->ram_regions = g_slist_insert_sorted(tmm_guest->ram_regions, + region, tmm_compare_ram_regions); +} + +static void tmm_populate_region(gpointer data, gpointer unused) +{ + int ret; + const TmmRamRegion *region = data; + struct kvm_cap_arm_tmm_populate_region_args populate_args = { + .populate_ipa_base1 = region->base1, + .populate_ipa_size1 = region->len1, + .populate_ipa_base2 = region->base2, + .populate_ipa_size2 = region->len2, + .flags = KVM_ARM_TMM_POPULATE_FLAGS_MEASURE, + }; + + if (!region->populate) { + return; + } + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, + KVM_CAP_ARM_TMM_POPULATE_CVM, + (intptr_t)&populate_args); + if (ret) { + error_report("TMM: failed to populate cvm region (0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx", 0x%"HWADDR_PRIx"): %s", + region->base1, region->len1, region->base2, region->len2, strerror(-ret)); + exit(1); + } +} + +static int tmm_create_rd(Error **errp) +{ + int ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, + KVM_CAP_ARM_TMM_CREATE_RD); + if (ret) { + error_setg_errno(errp, -ret, "TMM: failed to create tmm Descriptor"); + } + return ret; +} + +static void tmm_vm_state_change(void *opaque, bool running, RunState state) +{ + int ret; + CPUState *cs; + + if (!running) { + return; + } + + g_slist_foreach(tmm_guest->ram_regions, tmm_populate_region, NULL); + g_slist_free_full(g_steal_pointer(&tmm_guest->ram_regions), g_free); + + CPU_FOREACH(cs) { + ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_TEC); + if (ret) { + error_report("TMM: failed to finalize vCPU: %s", strerror(-ret)); + exit(1); + } + } + + ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_ARM_TMM, 0, + KVM_CAP_ARM_TMM_ACTIVATE_CVM); + if (ret) { + error_report("TMM: failed to activate cvm: %s", strerror(-ret)); + exit(1); + } +} + +int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp) +{ + int ret; + int cfg; + + if (!tmm_guest) { + return -ENODEV; + } + + if (!kvm_check_extension(kvm_state, KVM_CAP_ARM_TMM)) { + error_setg(errp, "KVM does not support TMM"); + return -ENODEV; + } + + for (cfg = 0; cfg < TMM_MAX_CFG; cfg++) { + ret = tmm_configure_one(tmm_guest, cfg, &error_abort); + if (ret) { + return ret; + } + } + + ret = tmm_create_rd(&error_abort); + if (ret) { + return ret; + } + + qemu_add_vm_change_state_handler(tmm_vm_state_change, NULL); + return 0; +} + +static void tmm_get_sve_vl(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TmmGuest *guest = TMM_GUEST(obj); + + visit_type_uint32(v, name, &guest->sve_vl, errp); +} + +static void tmm_set_sve_vl(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TmmGuest *guest = TMM_GUEST(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value & 0x7f || value >= ARM_MAX_VQ * 128) { + error_setg(errp, "invalid SVE vector length"); + return; + } + + guest->sve_vl = value; +} + +static void tmm_get_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TmmGuest *guest = TMM_GUEST(obj); + + visit_type_uint32(v, name, &guest->num_pmu_cntrs, errp); +} + +static void tmm_set_num_pmu_cntrs(Object *obj, Visitor *v, const char *name, + void *opaque, Error **errp) +{ + TmmGuest *guest = TMM_GUEST(obj); + uint32_t value; + + if (!visit_type_uint32(v, name, &value, errp)) { + return; + } + + if (value >= TMM_MAX_PMU_CTRS) { + error_setg(errp, "invalid number of PMU counters"); + return; + } + + guest->num_pmu_cntrs = value; +} + +static int tmm_get_measurement_algo(Object *obj, Error **errp G_GNUC_UNUSED) +{ + TmmGuest *guest = TMM_GUEST(obj); + + return guest->measurement_algo; +} + +static void tmm_set_measurement_algo(Object *obj, int algo, Error **errp G_GNUC_UNUSED) +{ + TmmGuest *guest = TMM_GUEST(obj); + + guest->measurement_algo = algo; +} + +static void tmm_guest_class_init(ObjectClass *oc, void *data) +{ + object_class_property_add_enum(oc, "measurement-algo", + "TmmGuestMeasurementAlgo", + &TmmGuestMeasurementAlgo_lookup, + tmm_get_measurement_algo, + tmm_set_measurement_algo); + object_class_property_set_description(oc, "measurement-algo", + "cvm measurement algorithm ('sha256', 'sha512')"); + /* + * This is not ideal. Normally SVE parameters are given to -cpu, but the + * cvm parameters are needed much earlier than CPU initialization. We also + * don't have a way to discover what is supported at the moment, the idea is + * that the user knows exactly what hardware it is running on because these + * parameters are part of the measurement and play in the attestation. + */ + object_class_property_add(oc, "sve-vector-length", "uint32", tmm_get_sve_vl, + tmm_set_sve_vl, NULL, NULL); + object_class_property_set_description(oc, "sve-vector-length", + "SVE vector length. 0 disables SVE (the default)"); + object_class_property_add(oc, "num-pmu-counters", "uint32", + tmm_get_num_pmu_cntrs, tmm_set_num_pmu_cntrs, + NULL, NULL); + object_class_property_set_description(oc, "num-pmu-counters", + "Number of PMU counters"); +} + +static void tmm_guest_instance_init(Object *obj) +{ + if (tmm_guest) { + error_report("a single instance of TmmGuest is supported"); + exit(1); + } + tmm_guest = TMM_GUEST(obj); +} + +static const TypeInfo tmm_guest_info = { + .parent = TYPE_CONFIDENTIAL_GUEST_SUPPORT, + .name = TYPE_TMM_GUEST, + .instance_size = sizeof(struct TmmGuest), + .instance_init = tmm_guest_instance_init, + .class_init = tmm_guest_class_init, + .interfaces = (InterfaceInfo[]) { + { TYPE_USER_CREATABLE }, + { } + } +}; + +static void tmm_register_types(void) +{ + type_register_static(&tmm_guest_info); +} +type_init(tmm_register_types); diff --git a/target/arm/kvm.c b/target/arm/kvm.c index bbf1ce7ba3bc337908aa257b259b008a0ed4b5a8..f62d9ece3c4ba2929fe7f5e27fe72a891d5455d8 100644 --- a/target/arm/kvm.c +++ b/target/arm/kvm.c @@ -27,6 +27,8 @@ #include "trace.h" #include "internals.h" #include "hw/pci/pci.h" +#include "hw/pci/msi.h" +#include "hw/pci/msix.h" #include "exec/memattrs.h" #include "exec/address-spaces.h" #include "hw/boards.h" @@ -262,9 +264,9 @@ int kvm_arch_init(MachineState *ms, KVMState *s) cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE); - if (ms->smp.cpus > 256 && + if (ms->smp.max_cpus > 256 && !kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) { - error_report("Using more than 256 vcpus requires a host kernel " + error_report("Using more than max 256 vcpus requires a host kernel " "with KVM_CAP_ARM_IRQ_LINE_LAYOUT_2"); ret = -EINVAL; } @@ -514,6 +516,44 @@ out: return ret; } +int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target) +{ + uint32_t v32; + int ret; + + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + ret = kvm_get_one_reg(CPU(cpu), regidx, &v32); + if (ret == 0) { + *target = v32; + } + return ret; + case KVM_REG_SIZE_U64: + return kvm_get_one_reg(CPU(cpu), regidx, target); + default: + return -1; + } +} + +int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source) +{ + uint32_t v32; + + switch (regidx & KVM_REG_SIZE_MASK) { + case KVM_REG_SIZE_U32: + v32 = *source; + if (v32 != *source) { + error_report("the value of source is too large"); + return -1; + } + return kvm_set_one_reg(CPU(cpu), regidx, &v32); + case KVM_REG_SIZE_U64: + return kvm_set_one_reg(CPU(cpu), regidx, source); + default: + return -1; + } +} + bool write_kvmstate_to_list(ARMCPU *cpu) { CPUState *cs = CPU(cpu); @@ -565,7 +605,9 @@ bool write_list_to_kvmstate(ARMCPU *cpu, int level) if (kvm_arm_cpreg_level(regidx) > level) { continue; } - + if (virtcca_cvm_enabled() && regidx == KVM_REG_ARM_TIMER_CNT) { + continue; + } r.id = regidx; switch (regidx & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: @@ -1037,6 +1079,51 @@ int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, return 0; } +int kvm_create_shadow_device(PCIDevice *dev) +{ + KVMState *s = kvm_state; + struct kvm_master_dev_info *mdi; + MSIMessage msg; + uint32_t vector, nvectors = msix_nr_vectors_allocated(dev); + uint32_t request_id; + int ret; + + if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) { + return 0; + } + + mdi = g_malloc0(sizeof(uint32_t) + sizeof(struct kvm_msi) * nvectors); + mdi->nvectors = nvectors; + request_id = pci_requester_id(dev); + + for (vector = 0; vector < nvectors; vector++) { + msg = msix_get_message(dev, vector); + mdi->msi[vector].address_lo = extract64(msg.address, 0, 32); + mdi->msi[vector].address_hi = extract64(msg.address, 32, 32); + mdi->msi[vector].data = le32_to_cpu(msg.data); + mdi->msi[vector].flags = KVM_MSI_VALID_DEVID; + mdi->msi[vector].devid = request_id; + memset(mdi->msi[vector].pad, 0, sizeof(mdi->msi[vector].pad)); + } + + ret = kvm_vm_ioctl(s, KVM_CREATE_SHADOW_DEV, mdi); + g_free(mdi); + return ret; +} + +int kvm_delete_shadow_device(PCIDevice *dev) +{ + KVMState *s = kvm_state; + uint32_t request_id, nvectors = msix_nr_vectors_allocated(dev); + + if (!kvm_vm_check_extension(s, KVM_CAP_ARM_VIRT_MSI_BYPASS) || !nvectors) { + return 0; + } + + request_id = pci_requester_id(dev); + return kvm_vm_ioctl(s, KVM_DEL_SHADOW_DEV, &request_id); +} + int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, int vector, PCIDevice *dev) { @@ -1055,5 +1142,9 @@ int kvm_arch_msi_data_to_gsi(uint32_t data) bool kvm_arch_cpu_check_are_resettable(void) { - return true; + return !virtcca_cvm_enabled(); +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ } diff --git a/target/arm/kvm64.c b/target/arm/kvm64.c index e790d6c9a5732a58a060760154f8514252f4b38c..38d519846c4ea6c4a5c081b5c4387ee3d5eee876 100644 --- a/target/arm/kvm64.c +++ b/target/arm/kvm64.c @@ -468,7 +468,7 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa) } } -static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id) +static int read_sys_reg32(int fd, uint64_t *pret, uint64_t id) { uint64_t ret; struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret }; @@ -501,7 +501,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) int fdarray[3]; bool sve_supported; uint64_t features = 0; - uint64_t t; int err; /* Old kernels may not know about the PREFERRED_TARGET ioctl: however @@ -521,6 +520,15 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) */ struct kvm_vcpu_init init = { .target = -1, }; + /* + * Ask for SVE if supported, so that we can query ID_AA64ZFR0, + * which is otherwise RAZ. + */ + sve_supported = kvm_arm_sve_supported(); + if (sve_supported) { + init.features[0] |= 1 << KVM_ARM_VCPU_SVE; + } + if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) { return false; } @@ -528,7 +536,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) ahcf->target = init.target; ahcf->dtb_compatible = "arm,arm-v8"; - err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0, + err = read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64PFR0], ARM64_SYS_REG(3, 0, 0, 4, 0)); if (unlikely(err < 0)) { /* @@ -547,24 +555,24 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * ??? Either of these sounds like too much effort just * to work around running a modern host kernel. */ - ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */ + ahcf->isar.regs[ID_AA64PFR0] = 0x00000011; /* EL1&0, AArch64 only */ err = 0; } else { - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64PFR1], ARM64_SYS_REG(3, 0, 0, 4, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64DFR0], ARM64_SYS_REG(3, 0, 0, 5, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64DFR1], ARM64_SYS_REG(3, 0, 0, 5, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64ISAR0], ARM64_SYS_REG(3, 0, 0, 6, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64ISAR1], ARM64_SYS_REG(3, 0, 0, 6, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR0], ARM64_SYS_REG(3, 0, 0, 7, 0)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR1], ARM64_SYS_REG(3, 0, 0, 7, 1)); - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2, + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64MMFR2], ARM64_SYS_REG(3, 0, 0, 7, 2)); /* @@ -574,44 +582,44 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * than skipping the reads and leaving 0, as we must avoid * considering the values in every case. */ - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_PFR0], ARM64_SYS_REG(3, 0, 0, 1, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_PFR1], ARM64_SYS_REG(3, 0, 0, 1, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_PFR2], ARM64_SYS_REG(3, 0, 0, 3, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_DFR0], ARM64_SYS_REG(3, 0, 0, 1, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR0], ARM64_SYS_REG(3, 0, 0, 1, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR1], ARM64_SYS_REG(3, 0, 0, 1, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR2], ARM64_SYS_REG(3, 0, 0, 1, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR3], ARM64_SYS_REG(3, 0, 0, 1, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR0], ARM64_SYS_REG(3, 0, 0, 2, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR1], ARM64_SYS_REG(3, 0, 0, 2, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR2], ARM64_SYS_REG(3, 0, 0, 2, 2)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR3], ARM64_SYS_REG(3, 0, 0, 2, 3)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR4], ARM64_SYS_REG(3, 0, 0, 2, 4)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR5], ARM64_SYS_REG(3, 0, 0, 2, 5)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_MMFR4], ARM64_SYS_REG(3, 0, 0, 2, 6)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[ID_ISAR6], ARM64_SYS_REG(3, 0, 0, 2, 7)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR0], ARM64_SYS_REG(3, 0, 0, 3, 0)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR1], ARM64_SYS_REG(3, 0, 0, 3, 1)); - err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2, + err |= read_sys_reg32(fdarray[2], &ahcf->isar.regs[MVFR2], ARM64_SYS_REG(3, 0, 0, 3, 2)); /* @@ -624,14 +632,17 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does. * We only do this if the CPU supports AArch32 at EL1. */ - if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) { - int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS); - int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS); + if (FIELD_EX32(ahcf->isar.regs[ID_AA64PFR0], ID_AA64PFR0, EL1) >= 2) { + int wrps = FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], ID_AA64DFR0, + WRPS); + int brps = FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], ID_AA64DFR0, + BRPS); int ctx_cmps = - FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS); + FIELD_EX64(ahcf->isar.regs[ID_AA64DFR0], ID_AA64DFR0, + CTX_CMPS); int version = 6; /* ARMv8 debug architecture */ bool has_el3 = - !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3); + !!FIELD_EX32(ahcf->isar.regs[ID_AA64PFR0], ID_AA64PFR0, EL3); uint32_t dbgdidr = 0; dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps); @@ -641,26 +652,20 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3); dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3); dbgdidr |= (1 << 15); /* RES1 bit */ - ahcf->isar.dbgdidr = dbgdidr; + ahcf->isar.regs[DBGDIDR] = dbgdidr; } - } - sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0; - - /* Add feature bits that can't appear until after VCPU init. */ - if (sve_supported) { - t = ahcf->isar.id_aa64pfr0; - t = FIELD_DP64(t, ID_AA64PFR0, SVE, 1); - ahcf->isar.id_aa64pfr0 = t; - - /* - * Before v5.1, KVM did not support SVE and did not expose - * ID_AA64ZFR0_EL1 even as RAZ. After v5.1, KVM still does - * not expose the register to "user" requests like this - * unless the host supports SVE. - */ - err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0, - ARM64_SYS_REG(3, 0, 0, 4, 4)); + if (sve_supported) { + /* + * There is a range of kernels between kernel commit 73433762fcae + * and f81cb2c3ad41 which have a bug where the kernel doesn't + * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has + * enabled SVE support, which resulted in an error rather than RAZ. + * So only read the register if we set KVM_ARM_VCPU_SVE above. + */ + err |= read_sys_reg64(fdarray[2], &ahcf->isar.regs[ID_AA64ZFR0], + ARM64_SYS_REG(3, 0, 0, 4, 4)); + } } kvm_arm_destroy_scratch_host_vcpu(fdarray); @@ -813,6 +818,11 @@ static int kvm_arm_sve_set_vls(CPUState *cs) assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX); + if (virtcca_cvm_enabled()) { + /* Already set through tmm config */ + return 0; + } + for (vq = 1; vq <= cpu->sve_max_vq; ++vq) { if (test_bit(vq - 1, cpu->sve_vq_map)) { i = (vq - 1) / 64; @@ -824,6 +834,20 @@ static int kvm_arm_sve_set_vls(CPUState *cs) return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®); } +bool kvm_arm_cpu_feature_supported(void) +{ + static bool cpu_feature_initialized; + static bool cpu_feature_supported; + + if (!cpu_feature_initialized) { + cpu_feature_supported = kvm_check_extension(kvm_state, + KVM_CAP_ARM_CPU_FEATURE); + cpu_feature_initialized = true; + } + + return cpu_feature_supported; +} + #define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5 int kvm_arch_init_vcpu(CPUState *cs) diff --git a/target/arm/kvm_arm.h b/target/arm/kvm_arm.h index b7f78b5215456291a582e7ab3dea56ae711e13fe..475531dad43581742d81facd459a24847271efa7 100644 --- a/target/arm/kvm_arm.h +++ b/target/arm/kvm_arm.h @@ -306,6 +306,13 @@ bool kvm_arm_pmu_supported(void); */ bool kvm_arm_sve_supported(void); +/** + * kvm_arm_cpu_feature_supported: + * + * Returns true if KVM can set CPU features and false otherwise. + */ +bool kvm_arm_cpu_feature_supported(void); + /** * kvm_arm_get_max_vm_ipa_size: * @ms: Machine state handle @@ -370,6 +377,14 @@ void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa); int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level); +void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, hwaddr len2, bool populate); + +int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp); +bool kvm_arm_tmm_enabled(void); + +int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *target); +int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, uint64_t *source); + #else /* @@ -396,6 +411,11 @@ static inline bool kvm_arm_steal_time_supported(void) return false; } +static inline bool kvm_arm_cpu_feature_supported(void) +{ + return false; +} + /* * These functions should never actually be called without KVM support. */ @@ -444,6 +464,29 @@ static inline void kvm_arm_sve_get_vls(CPUState *cs, unsigned long *map) g_assert_not_reached(); } +static inline int kvm_arm_get_one_reg(ARMCPU *cpu, uint64_t regidx, + uint64_t *target) +{ + g_assert_not_reached(); +} + +static inline int kvm_arm_set_one_reg(ARMCPU *cpu, uint64_t regidx, + uint64_t *source) +{ + g_assert_not_reached(); +} + +static inline int kvm_arm_tmm_init(ConfidentialGuestSupport *cgs, Error **errp G_GNUC_UNUSED) +{ + g_assert_not_reached(); +} + +static inline void tmm_add_ram_region(hwaddr base1, hwaddr len1, hwaddr base2, + hwaddr len2, bool populate) +{ + g_assert_not_reached(); +} + #endif static inline const char *gic_class_name(void) diff --git a/target/arm/meson.build b/target/arm/meson.build index 50f152214afedd04fdb321d17a0ca76ef44c2a19..bb950fbffeba954110d5bfff5b90142d0085b890 100644 --- a/target/arm/meson.build +++ b/target/arm/meson.build @@ -39,6 +39,7 @@ arm_ss.add(files( arm_ss.add(zlib) arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c')) +arm_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c', 'kvm64.c', 'kvm-tmm.c'), if_false: files('kvm-stub.c')) arm_ss.add(when: 'TARGET_AARCH64', if_true: files( 'cpu64.c', diff --git a/target/arm/monitor.c b/target/arm/monitor.c index 80c64fa3556d84cc5da1f5d3756fa261852811d7..4c6f1181d9259ff99bebdd01ceb0145d6f899991 100644 --- a/target/arm/monitor.c +++ b/target/arm/monitor.c @@ -217,6 +217,8 @@ CpuModelExpansionInfo *qmp_query_cpu_model_expansion(CpuModelExpansionType type, } } + arm_cpu_features_to_dict(ARM_CPU(obj), qdict_out); + if (!qdict_size(qdict_out)) { qobject_unref(qdict_out); } else { diff --git a/target/arm/sve_helper.c b/target/arm/sve_helper.c index 07be55b7e1adc5cdb292ed8e34c37efd9120cb94..e455fd6ecfc852b77ba31ead01a64024b2f546a5 100644 --- a/target/arm/sve_helper.c +++ b/target/arm/sve_helper.c @@ -3387,10 +3387,10 @@ void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \ /* We produce output faster than we consume input. \ Therefore we must be mindful of possible overlap. */ \ if (unlikely((vn - vd) < (uintptr_t)oprsz)) { \ - vn = memcpy(&tmp_n, vn, oprsz_2); \ + vn = memcpy(&tmp_n, vn, oprsz); \ } \ if (unlikely((vm - vd) < (uintptr_t)oprsz)) { \ - vm = memcpy(&tmp_m, vm, oprsz_2); \ + vm = memcpy(&tmp_m, vm, oprsz); \ } \ for (i = 0; i < oprsz_2; i += sizeof(TYPE)) { \ *(TYPE *)(vd + H(2 * i + 0)) = *(TYPE *)(vn + H(i)); \ @@ -6484,9 +6484,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, flags = info.page[0].flags | info.page[1].flags; if (unlikely(flags != 0)) { -#ifdef CONFIG_USER_ONLY - g_assert_not_reached(); -#else /* * At least one page includes MMIO. * Any bus operation can fail with cpu_transaction_failed, @@ -6517,7 +6514,6 @@ void sve_stN_r(CPUARMState *env, uint64_t *vg, target_ulong addr, } while (reg_off & 63); } while (reg_off <= reg_last); return; -#endif } mem_off = info.mem_off_first[0]; diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index cec672f2296cd032fe70e22cca9c2523c48c4320..549a671beae76ad5b8fb85a68a135408544bbf51 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -3039,7 +3039,7 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, bool is_store = false; bool is_extended = false; bool is_unpriv = (idx == 2); - bool iss_valid = !is_vector; + bool iss_valid; bool post_index; bool writeback; int memidx; @@ -3092,6 +3092,8 @@ static void disas_ldst_reg_imm9(DisasContext *s, uint32_t insn, g_assert_not_reached(); } + iss_valid = !is_vector && !writeback; + if (rn == 31) { gen_check_sp_alignment(s); } diff --git a/target/arm/translate-neon.c b/target/arm/translate-neon.c index dd43de558e41286f68597309dc13513903208b88..761fd6a755349bc708ba91d1f89aacf88de06580 100644 --- a/target/arm/translate-neon.c +++ b/target/arm/translate-neon.c @@ -586,7 +586,11 @@ static bool trans_VLD_all_lanes(DisasContext *s, arg_VLD_all_lanes *a) case 3: return false; case 4: - align = pow2_align(size + 2); + if (size == 2) { + align = pow2_align(3); + } else { + align = pow2_align(size + 2); + } break; default: g_assert_not_reached(); diff --git a/target/hexagon/imported/encode_pp.def b/target/hexagon/imported/encode_pp.def index 939c6fc55fbc134db4dcc609e0ff049a2f5082e2..d71c04cd30e8b3928381340879987e9935546454 100644 --- a/target/hexagon/imported/encode_pp.def +++ b/target/hexagon/imported/encode_pp.def @@ -944,13 +944,6 @@ MPY_ENC(F2_dfmpyfix, "1000","ddddd","0","0","1","0","11") MPY_ENC(F2_dfmin, "1000","ddddd","0","0","1","1","11") MPY_ENC(F2_dfmax, "1000","ddddd","0","1","0","0","11") MPY_ENC(F2_dfmpyll, "1000","ddddd","0","1","0","1","11") -#ifdef ADD_DP_OPS -MPY_ENC(F2_dfdivcheat, "1000","ddddd","0","0","0","1","00") - -MPY_ENC(F2_dffixupn, "1000","ddddd","0","1","0","1","11") -MPY_ENC(F2_dffixupd, "1000","ddddd","0","1","1","0","11") -MPY_ENC(F2_dfrecipa, "1000","ddddd","0","1","1","1","ee") -#endif MPY_ENC(M7_dcmpyrw, "1000","ddddd","0","0","0","1","10") MPY_ENC(M7_dcmpyrwc, "1000","ddddd","0","0","1","1","10") @@ -1024,15 +1017,6 @@ MPY_ENC(M5_vdmacbsu, "1010","xxxxx","0","1","0","0","01") MPY_ENC(F2_dfmpylh, "1010","xxxxx","0","0","0","0","11") MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","0","1","11") -#ifdef ADD_DP_OPS -MPY_ENC(F2_dfmpyhh, "1010","xxxxx","0","0","1","0","11") -MPY_ENC(F2_dffma, "1010","xxxxx","0","0","0","0","11") -MPY_ENC(F2_dffms, "1010","xxxxx","0","0","0","1","11") - -MPY_ENC(F2_dffma_lib, "1010","xxxxx","0","0","1","0","11") -MPY_ENC(F2_dffms_lib, "1010","xxxxx","0","0","1","1","11") -MPY_ENC(F2_dffma_sc, "1010","xxxxx","0","1","1","1","uu") -#endif MPY_ENC(M7_dcmpyrw_acc, "1010","xxxxx","0","0","0","1","10") @@ -1547,15 +1531,8 @@ SH2_RR_ENC(F2_conv_df2d, "0000","111","0","0 00","ddddd") SH2_RR_ENC(F2_conv_df2ud, "0000","111","0","0 01","ddddd") SH2_RR_ENC(F2_conv_ud2df, "0000","111","0","0 10","ddddd") SH2_RR_ENC(F2_conv_d2df, "0000","111","0","0 11","ddddd") -#ifdef ADD_DP_OPS -SH2_RR_ENC(F2_dffixupr, "0000","111","0","1 00","ddddd") -SH2_RR_ENC(F2_dfsqrtcheat, "0000","111","0","1 01","ddddd") -#endif SH2_RR_ENC(F2_conv_df2d_chop, "0000","111","0","1 10","ddddd") SH2_RR_ENC(F2_conv_df2ud_chop,"0000","111","0","1 11","ddddd") -#ifdef ADD_DP_OPS -SH2_RR_ENC(F2_dfinvsqrta, "0000","111","1","0 ee","ddddd") -#endif diff --git a/target/hppa/helper.c b/target/hppa/helper.c index 1ccff5765a80a999a2100ba72a0456b086b44efa..eba133047b8a732239ede3a8efe5eb5436cbf00f 100644 --- a/target/hppa/helper.c +++ b/target/hppa/helper.c @@ -85,9 +85,11 @@ void hppa_cpu_dump_state(CPUState *cs, FILE *f, int flags) char psw_c[20]; int i; - qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx "\n", + qemu_fprintf(f, "IA_F " TARGET_FMT_lx " IA_B " TARGET_FMT_lx + " IIR " TREG_FMT_lx "\n", hppa_form_gva_psw(psw, env->iasq_f, env->iaoq_f), - hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b)); + hppa_form_gva_psw(psw, env->iasq_b, env->iaoq_b), + env->cr[CR_IIR]); psw_c[0] = (psw & PSW_W ? 'W' : '-'); psw_c[1] = (psw & PSW_E ? 'E' : '-'); diff --git a/target/hppa/translate.c b/target/hppa/translate.c index 3b9744deb446ceb0c688e725a2821de37c185aac..952027a28e1219cfca162e805c3b42d8d887eb21 100644 --- a/target/hppa/translate.c +++ b/target/hppa/translate.c @@ -140,6 +140,7 @@ #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64 #define tcg_gen_extract_reg tcg_gen_extract_i64 #define tcg_gen_sextract_reg tcg_gen_sextract_i64 +#define tcg_gen_extract2_reg tcg_gen_extract2_i64 #define tcg_const_reg tcg_const_i64 #define tcg_const_local_reg tcg_const_local_i64 #define tcg_constant_reg tcg_constant_i64 @@ -234,6 +235,7 @@ #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32 #define tcg_gen_extract_reg tcg_gen_extract_i32 #define tcg_gen_sextract_reg tcg_gen_sextract_i32 +#define tcg_gen_extract2_reg tcg_gen_extract2_i32 #define tcg_const_reg tcg_const_i32 #define tcg_const_local_reg tcg_const_local_i32 #define tcg_constant_reg tcg_constant_i32 @@ -3204,19 +3206,22 @@ static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a) dest = dest_gpr(ctx, a->t); t2 = load_gpr(ctx, a->r2); - if (a->r1 == a->r2) { + if (a->r1 == 0) { + tcg_gen_extract_reg(dest, t2, sa, 32 - sa); + } else if (TARGET_REGISTER_BITS == 32) { + tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa); + } else if (a->r1 == a->r2) { TCGv_i32 t32 = tcg_temp_new_i32(); tcg_gen_trunc_reg_i32(t32, t2); tcg_gen_rotri_i32(t32, t32, sa); tcg_gen_extu_i32_reg(dest, t32); tcg_temp_free_i32(t32); - } else if (a->r1 == 0) { - tcg_gen_extract_reg(dest, t2, sa, 32 - sa); } else { - TCGv_reg t0 = tcg_temp_new(); - tcg_gen_extract_reg(t0, t2, sa, 32 - sa); - tcg_gen_deposit_reg(dest, t0, cpu_gr[a->r1], 32 - sa, sa); - tcg_temp_free(t0); + TCGv_i64 t64 = tcg_temp_new_i64(); + tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]); + tcg_gen_shri_i64(t64, t64, sa); + tcg_gen_trunc_i64_reg(dest, t64); + tcg_temp_free_i64(t64); } save_gpr(ctx, a->t, dest); diff --git a/target/i386/cpu.c b/target/i386/cpu.c index aa9e6368004c7490ea63cc28ee3f9fe498771899..7aacae15bf1dcfb57550e0b16516a0193e5c976a 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -574,6 +574,18 @@ static CPUCacheInfo legacy_l3_cache = { #define INTEL_PT_CYCLE_BITMAP 0x1fff /* Support 0,2^(0~11) */ #define INTEL_PT_PSB_BITMAP (0x003f << 16) /* Support 2K,4K,8K,16K,32K,64K */ +/* CPUID Leaf 0x1D constants: */ +#define INTEL_AMX_TILE_MAX_SUBLEAF 0x1 +#define INTEL_AMX_TOTAL_TILE_BYTES 0x2000 +#define INTEL_AMX_BYTES_PER_TILE 0x400 +#define INTEL_AMX_BYTES_PER_ROW 0x40 +#define INTEL_AMX_TILE_MAX_NAMES 0x8 +#define INTEL_AMX_TILE_MAX_ROWS 0x10 + +/* CPUID Leaf 0x1E constants: */ +#define INTEL_AMX_TMUL_MAX_K 0x10 +#define INTEL_AMX_TMUL_MAX_N 0x40 + void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, uint32_t vendor2, uint32_t vendor3) { @@ -648,8 +660,11 @@ void x86_cpu_vendor_words2str(char *dst, uint32_t vendor1, #define TCG_7_0_ECX_FEATURES (CPUID_7_0_ECX_PKU | \ /* CPUID_7_0_ECX_OSPKE is dynamic */ \ CPUID_7_0_ECX_LA57 | CPUID_7_0_ECX_PKS) -#define TCG_7_0_EDX_FEATURES 0 -#define TCG_7_1_EAX_FEATURES 0 +#define TCG_7_0_EDX_FEATURES CPUID_7_0_EDX_FSRM +#define TCG_7_1_EAX_FEATURES (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | \ + CPUID_7_1_EAX_FSRC) +#define TCG_7_1_EDX_FEATURES 0 +#define TCG_7_2_EDX_FEATURES 0 #define TCG_APM_FEATURES 0 #define TCG_6_EAX_FEATURES CPUID_6_EAX_ARAT #define TCG_XSAVE_FEATURES (CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XGETBV1) @@ -791,7 +806,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "pfthreshold", "avic", NULL, "v-vmsave-vmload", "vgif", NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, "vnmi", NULL, NULL, "svme-addr-chk", NULL, NULL, NULL, }, .cpuid = { .eax = 0x8000000A, .reg = R_EDX, }, @@ -843,9 +858,9 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "avx512-vp2intersect", NULL, "md-clear", NULL, NULL, NULL, "serialize", NULL, "tsx-ldtrk", NULL, NULL /* pconfig */, NULL, - NULL, NULL, NULL, "avx512-fp16", - NULL, NULL, "spec-ctrl", "stibp", - NULL, "arch-capabilities", "core-capability", "ssbd", + NULL, NULL, "amx-bf16", "avx512-fp16", + "amx-tile", "amx-int8", "spec-ctrl", "stibp", + "flush-l1d", "arch-capabilities", "core-capability", "ssbd", }, .cpuid = { .eax = 7, @@ -858,7 +873,26 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .type = CPUID_FEATURE_WORD, .feat_names = { NULL, NULL, NULL, NULL, - "avx-vnni", "avx512-bf16", NULL, NULL, + "avx-vnni", "avx512-bf16", NULL, "cmpccxadd", + NULL, NULL, "fzrm", "fsrs", + "fsrc", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, "amx-fp16", NULL, "avx-ifma", + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 1, + .reg = R_EAX, + }, + .tcg_features = TCG_7_1_EAX_FEATURES, + }, + [FEAT_7_2_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, "mcdt-no", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -866,12 +900,31 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }, + .cpuid = { + .eax = 7, + .needs_ecx = true, .ecx = 2, + .reg = R_EDX, + }, + .tcg_features = TCG_7_2_EDX_FEATURES, + }, + [FEAT_7_1_EDX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + "avx-vnni-int8", "avx-ne-convert", NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, "prefetchiti", NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, .cpuid = { .eax = 7, .needs_ecx = true, .ecx = 1, - .reg = R_EAX, + .reg = R_EDX, }, - .tcg_features = TCG_7_1_EAX_FEATURES, + .tcg_features = TCG_7_1_EDX_FEATURES, }, [FEAT_8000_0007_EDX] = { .type = CPUID_FEATURE_WORD, @@ -896,20 +949,36 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { NULL, NULL, NULL, NULL, NULL, "wbnoinvd", NULL, NULL, "ibpb", NULL, "ibrs", "amd-stibp", - NULL, NULL, NULL, NULL, + NULL, "stibp-always-on", NULL, NULL, NULL, NULL, NULL, NULL, "amd-ssbd", "virt-ssbd", "amd-no-ssb", NULL, - NULL, NULL, NULL, NULL, + "amd-psfd", NULL, NULL, NULL, }, .cpuid = { .eax = 0x80000008, .reg = R_EBX, }, .tcg_features = 0, .unmigratable_flags = 0, }, + [FEAT_8000_0021_EAX] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + "no-nested-data-bp", NULL, "lfence-always-serializing", NULL, + NULL, NULL, "null-sel-clr-base", NULL, + "auto-ibrs", NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { .eax = 0x80000021, .reg = R_EAX, }, + .tcg_features = 0, + .unmigratable_flags = 0, + }, [FEAT_XSAVE] = { .type = CPUID_FEATURE_WORD, .feat_names = { "xsaveopt", "xsavec", "xgetbv1", "xsaves", - NULL, NULL, NULL, NULL, + "xfd", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -924,6 +993,34 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { }, .tcg_features = TCG_XSAVE_FEATURES, }, + [FEAT_XSAVE_XSS_LO] = { + .type = CPUID_FEATURE_WORD, + .feat_names = { + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, + }, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, + .ecx = 1, + .reg = R_ECX, + }, + }, + [FEAT_XSAVE_XSS_HI] = { + .type = CPUID_FEATURE_WORD, + .cpuid = { + .eax = 0xD, + .needs_ecx = true, + .ecx = 1, + .reg = R_EDX + }, + }, [FEAT_6_EAX] = { .type = CPUID_FEATURE_WORD, .feat_names = { @@ -939,7 +1036,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .cpuid = { .eax = 6, .reg = R_EAX, }, .tcg_features = TCG_6_EAX_FEATURES, }, - [FEAT_XSAVE_COMP_LO] = { + [FEAT_XSAVE_XCR0_LO] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, @@ -952,7 +1049,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { XSTATE_OPMASK_MASK | XSTATE_ZMM_Hi256_MASK | XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK, }, - [FEAT_XSAVE_COMP_HI] = { + [FEAT_XSAVE_XCR0_HI] = { .type = CPUID_FEATURE_WORD, .cpuid = { .eax = 0xD, @@ -968,11 +1065,11 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { "rdctl-no", "ibrs-all", "rsba", "skip-l1dfl-vmentry", "ssb-no", "mds-no", "pschange-mc-no", "tsx-ctrl", "taa-no", NULL, NULL, NULL, + NULL, "sbdr-ssdp-no", "fbsdp-no", "psdp-no", + NULL, "fb-clear", NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + "pbrsb-no", NULL, "gds-no", "rfds-no", + "rfds-clear", NULL, NULL, NULL, }, .msr = { .index = MSR_IA32_ARCH_CAPABILITIES, @@ -1192,7 +1289,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .feat_names = { "sgx1", "sgx2", NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, + NULL, NULL, NULL, "sgx-edeccssa", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1232,7 +1329,7 @@ FeatureWordInfo feature_word_info[FEATURE_WORDS] = { .feat_names = { NULL, "sgx-debug", "sgx-mode64", NULL, "sgx-provisionkey", "sgx-tokenkey", NULL, "sgx-kss", - NULL, NULL, NULL, NULL, + NULL, NULL, "sgx-aex-notify", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, @@ -1369,6 +1466,9 @@ static const X86RegisterInfo32 x86_reg_info_32[CPU_NB_REGS32] = { }; #undef REGISTER +/* CPUID feature bits available in XSS */ +#define CPUID_XSTATE_XSS_MASK (0) + ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_FP_BIT] = { /* x87 FP state component is always enabled if XSAVE is supported */ @@ -1401,17 +1501,28 @@ ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT] = { [XSTATE_PKRU_BIT] = { .feature = FEAT_7_0_ECX, .bits = CPUID_7_0_ECX_PKU, .size = sizeof(XSavePKRU) }, + [XSTATE_XTILE_CFG_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILECFG), + }, + [XSTATE_XTILE_DATA_BIT] = { + .feature = FEAT_7_0_EDX, .bits = CPUID_7_0_EDX_AMX_TILE, + .size = sizeof(XSaveXTILEDATA) + }, }; -static uint32_t xsave_area_size(uint64_t mask) +static uint32_t xsave_area_size(uint64_t mask, bool compacted) { + uint64_t ret = x86_ext_save_areas[0].size; + const ExtSaveArea *esa; + uint32_t offset = 0; int i; - uint64_t ret = 0; - for (i = 0; i < ARRAY_SIZE(x86_ext_save_areas); i++) { - const ExtSaveArea *esa = &x86_ext_save_areas[i]; + for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { + esa = &x86_ext_save_areas[i]; if ((mask >> i) & 1) { - ret = MAX(ret, esa->offset + esa->size); + offset = compacted ? ret : esa->offset; + ret = MAX(ret, offset + esa->size); } } return ret; @@ -1422,10 +1533,10 @@ static inline bool accel_uses_host_cpuid(void) return kvm_enabled() || hvf_enabled(); } -static inline uint64_t x86_cpu_xsave_components(X86CPU *cpu) +static inline uint64_t x86_cpu_xsave_xcr0_components(X86CPU *cpu) { - return ((uint64_t)cpu->env.features[FEAT_XSAVE_COMP_HI]) << 32 | - cpu->env.features[FEAT_XSAVE_COMP_LO]; + return ((uint64_t)cpu->env.features[FEAT_XSAVE_XCR0_HI]) << 32 | + cpu->env.features[FEAT_XSAVE_XCR0_LO]; } /* Return name of 32-bit register, from a R_* constant */ @@ -1437,6 +1548,12 @@ static const char *get_register_name_32(unsigned int reg) return x86_reg_info_32[reg].name; } +static inline uint64_t x86_cpu_xsave_xss_components(X86CPU *cpu) +{ + return ((uint64_t)cpu->env.features[FEAT_XSAVE_XSS_HI]) << 32 | + cpu->env.features[FEAT_XSAVE_XSS_LO]; +} + /* * Returns the set of feature flags that are supported and migratable by * QEMU, for a given FeatureWord. @@ -1523,6 +1640,7 @@ typedef struct X86CPUVersionDefinition { const char *alias; const char *note; PropValue *props; + const CPUCaches *const cache_info; } X86CPUVersionDefinition; /* Base definition for a CPU model */ @@ -1631,6 +1749,56 @@ static const CPUCaches epyc_cache_info = { }, }; +static CPUCaches epyc_v4_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 64 * KiB, + .line_size = 64, + .associativity = 4, + .partitions = 1, + .sets = 256, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 8 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 8192, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + static const CPUCaches epyc_rome_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, @@ -1681,6 +1849,56 @@ static const CPUCaches epyc_rome_cache_info = { }, }; +static const CPUCaches epyc_rome_v3_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + static const CPUCaches epyc_milan_cache_info = { .l1d_cache = &(CPUCacheInfo) { .type = DATA_CACHE, @@ -1731,6 +1949,156 @@ static const CPUCaches epyc_milan_cache_info = { }, }; +static const CPUCaches epyc_milan_v2_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + +static const CPUCaches epyc_genoa_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 1 * MiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 2048, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 32 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 32768, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = false, + }, +}; + +static const CPUCaches dharma_cache_info = { + .l1d_cache = &(CPUCacheInfo) { + .type = DATA_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l1i_cache = &(CPUCacheInfo) { + .type = INSTRUCTION_CACHE, + .level = 1, + .size = 32 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 64, + .lines_per_tag = 1, + .self_init = 1, + .no_invd_sharing = true, + }, + .l2_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 2, + .size = 512 * KiB, + .line_size = 64, + .associativity = 8, + .partitions = 1, + .sets = 1024, + .lines_per_tag = 1, + }, + .l3_cache = &(CPUCacheInfo) { + .type = UNIFIED_CACHE, + .level = 3, + .size = 16 * MiB, + .line_size = 64, + .associativity = 16, + .partitions = 1, + .sets = 16384, + .lines_per_tag = 1, + .self_init = true, + .inclusive = true, + .complex_indexing = true, + }, +}; + /* The following VMX features are not supported by KVM and are left out in the * CPU definitions: * @@ -3505,9 +3873,430 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { + .version = 7, + .note = "TSX, taa-no", + .props = (PropValue[]) { + /* Restore TSX features removed by -v2 above */ + { "hle", "on" }, + { "rtm", "on" }, + { /* end of list */ } + }, + }, { /* end of list */ } } }, + { + .name = "SapphireRapids", + .level = 0x20, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 143, + .stepping = 4, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | + CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | + CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_AVX512IFMA | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_TSX_LDTRK | CPUID_7_0_EDX_AMX_BF16 | + CPUID_7_0_EDX_AVX512_FP16 | CPUID_7_0_EDX_AMX_TILE | + CPUID_7_0_EDX_AMX_INT8 | CPUID_7_0_EDX_SPEC_CTRL | + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES | CPUID_D_1_EAX_XFD, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16 | + CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_PAGE_WALK_LENGTH_5 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (SapphireRapids)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "sbdr-ssdp-no", "on" }, + { "fbsdp-no", "on" }, + { "psdp-no", "on" }, + { /* end of list */ } + } + }, + { + .version = 3, + .props = (PropValue[]) { + { "ss", "on" }, + { "tsc-adjust", "on" }, + { "cldemote", "on" }, + { "movdiri", "on" }, + { "movdir64b", "on" }, + { /* end of list */ } + } + }, + { /* end of list */ } + } + }, + { + .name = "GraniteRapids", + .level = 0x20, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 173, + .stepping = 0, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_HLE | + CPUID_7_0_EBX_AVX2 | CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | + CPUID_7_0_EBX_ERMS | CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RTM | + CPUID_7_0_EBX_AVX512F | CPUID_7_0_EBX_AVX512DQ | + CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | + CPUID_7_0_EBX_AVX512IFMA | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_CLWB | CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_TSX_LDTRK | CPUID_7_0_EDX_AMX_BF16 | + CPUID_7_0_EDX_AVX512_FP16 | CPUID_7_0_EDX_AMX_TILE | + CPUID_7_0_EDX_AMX_INT8 | CPUID_7_0_EDX_SPEC_CTRL | + CPUID_7_0_EDX_ARCH_CAPABILITIES | CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_TAA_NO | + MSR_ARCH_CAP_SBDR_SSDP_NO | MSR_ARCH_CAP_FBSDP_NO | + MSR_ARCH_CAP_PSDP_NO | MSR_ARCH_CAP_PBRSB_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES | CPUID_D_1_EAX_XFD, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_AVX512_BF16 | + CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC | + CPUID_7_1_EAX_AMX_FP16, + .features[FEAT_7_1_EDX] = + CPUID_7_1_EDX_PREFETCHITI, + .features[FEAT_7_2_EDX] = + CPUID_7_2_EDX_MCDT_NO, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | + MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | MSR_VMX_EPT_PAGE_WALK_LENGTH_5 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (GraniteRapids)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { /* end of list */ }, + }, + }, + { + .name = "SierraForest", + .level = 0x23, + .vendor = CPUID_VENDOR_INTEL, + .family = 6, + .model = 175, + .stepping = 0, + /* + * please keep the ascending order so that we can have a clear view of + * bit position of each feature. + */ + .features[FEAT_1_EDX] = + CPUID_FP87 | CPUID_VME | CPUID_DE | CPUID_PSE | CPUID_TSC | + CPUID_MSR | CPUID_PAE | CPUID_MCE | CPUID_CX8 | CPUID_APIC | + CPUID_SEP | CPUID_MTRR | CPUID_PGE | CPUID_MCA | CPUID_CMOV | + CPUID_PAT | CPUID_PSE36 | CPUID_CLFLUSH | CPUID_MMX | CPUID_FXSR | + CPUID_SSE | CPUID_SSE2, + .features[FEAT_1_ECX] = + CPUID_EXT_SSE3 | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSSE3 | + CPUID_EXT_FMA | CPUID_EXT_CX16 | CPUID_EXT_PCID | CPUID_EXT_SSE41 | + CPUID_EXT_SSE42 | CPUID_EXT_X2APIC | CPUID_EXT_MOVBE | + CPUID_EXT_POPCNT | CPUID_EXT_TSC_DEADLINE_TIMER | CPUID_EXT_AES | + CPUID_EXT_XSAVE | CPUID_EXT_AVX | CPUID_EXT_F16C | CPUID_EXT_RDRAND, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_SYSCALL | CPUID_EXT2_NX | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_RDTSCP | CPUID_EXT2_LM, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_LAHF_LM | CPUID_EXT3_ABM | CPUID_EXT3_3DNOWPREFETCH, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_WBNOINVD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_RDPID | CPUID_7_0_ECX_BUS_LOCK_DETECT, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM | CPUID_7_0_EDX_SERIALIZE | + CPUID_7_0_EDX_SPEC_CTRL | CPUID_7_0_EDX_ARCH_CAPABILITIES | + CPUID_7_0_EDX_SPEC_CTRL_SSBD, + .features[FEAT_ARCH_CAPABILITIES] = + MSR_ARCH_CAP_RDCL_NO | MSR_ARCH_CAP_IBRS_ALL | + MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY | MSR_ARCH_CAP_MDS_NO | + MSR_ARCH_CAP_PSCHANGE_MC_NO | MSR_ARCH_CAP_SBDR_SSDP_NO | + MSR_ARCH_CAP_FBSDP_NO | MSR_ARCH_CAP_PSDP_NO | + MSR_ARCH_CAP_PBRSB_NO, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX_VNNI | CPUID_7_1_EAX_CMPCCXADD | + CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_AVX_IFMA, + .features[FEAT_7_1_EDX] = + CPUID_7_1_EDX_AVX_VNNI_INT8 | CPUID_7_1_EDX_AVX_NE_CONVERT, + .features[FEAT_7_2_EDX] = + CPUID_7_2_EDX_MCDT_NO, + .features[FEAT_VMX_BASIC] = + MSR_VMX_BASIC_INS_OUTS | MSR_VMX_BASIC_TRUE_CTLS, + .features[FEAT_VMX_ENTRY_CTLS] = + VMX_VM_ENTRY_LOAD_DEBUG_CONTROLS | VMX_VM_ENTRY_IA32E_MODE | + VMX_VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_ENTRY_LOAD_IA32_PAT | VMX_VM_ENTRY_LOAD_IA32_EFER, + .features[FEAT_VMX_EPT_VPID_CAPS] = + MSR_VMX_EPT_EXECONLY | MSR_VMX_EPT_PAGE_WALK_LENGTH_4 | + MSR_VMX_EPT_WB | MSR_VMX_EPT_2MB | MSR_VMX_EPT_1GB | + MSR_VMX_EPT_INVEPT | MSR_VMX_EPT_AD_BITS | + MSR_VMX_EPT_INVEPT_SINGLE_CONTEXT | MSR_VMX_EPT_INVEPT_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID | MSR_VMX_EPT_INVVPID_SINGLE_ADDR | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT | + MSR_VMX_EPT_INVVPID_ALL_CONTEXT | + MSR_VMX_EPT_INVVPID_SINGLE_CONTEXT_NOGLOBALS, + .features[FEAT_VMX_EXIT_CTLS] = + VMX_VM_EXIT_SAVE_DEBUG_CONTROLS | + VMX_VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | + VMX_VM_EXIT_ACK_INTR_ON_EXIT | VMX_VM_EXIT_SAVE_IA32_PAT | + VMX_VM_EXIT_LOAD_IA32_PAT | VMX_VM_EXIT_SAVE_IA32_EFER | + VMX_VM_EXIT_LOAD_IA32_EFER | VMX_VM_EXIT_SAVE_VMX_PREEMPTION_TIMER, + .features[FEAT_VMX_MISC] = + MSR_VMX_MISC_STORE_LMA | MSR_VMX_MISC_ACTIVITY_HLT | + MSR_VMX_MISC_VMWRITE_VMEXIT, + .features[FEAT_VMX_PINBASED_CTLS] = + VMX_PIN_BASED_EXT_INTR_MASK | VMX_PIN_BASED_NMI_EXITING | + VMX_PIN_BASED_VIRTUAL_NMIS | VMX_PIN_BASED_VMX_PREEMPTION_TIMER | + VMX_PIN_BASED_POSTED_INTR, + .features[FEAT_VMX_PROCBASED_CTLS] = + VMX_CPU_BASED_VIRTUAL_INTR_PENDING | + VMX_CPU_BASED_USE_TSC_OFFSETING | VMX_CPU_BASED_HLT_EXITING | + VMX_CPU_BASED_INVLPG_EXITING | VMX_CPU_BASED_MWAIT_EXITING | + VMX_CPU_BASED_RDPMC_EXITING | VMX_CPU_BASED_RDTSC_EXITING | + VMX_CPU_BASED_CR3_LOAD_EXITING | VMX_CPU_BASED_CR3_STORE_EXITING | + VMX_CPU_BASED_CR8_LOAD_EXITING | VMX_CPU_BASED_CR8_STORE_EXITING | + VMX_CPU_BASED_TPR_SHADOW | VMX_CPU_BASED_VIRTUAL_NMI_PENDING | + VMX_CPU_BASED_MOV_DR_EXITING | VMX_CPU_BASED_UNCOND_IO_EXITING | + VMX_CPU_BASED_USE_IO_BITMAPS | VMX_CPU_BASED_MONITOR_TRAP_FLAG | + VMX_CPU_BASED_USE_MSR_BITMAPS | VMX_CPU_BASED_MONITOR_EXITING | + VMX_CPU_BASED_PAUSE_EXITING | + VMX_CPU_BASED_ACTIVATE_SECONDARY_CONTROLS, + .features[FEAT_VMX_SECONDARY_CTLS] = + VMX_SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | + VMX_SECONDARY_EXEC_ENABLE_EPT | VMX_SECONDARY_EXEC_DESC | + VMX_SECONDARY_EXEC_RDTSCP | + VMX_SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | + VMX_SECONDARY_EXEC_ENABLE_VPID | VMX_SECONDARY_EXEC_WBINVD_EXITING | + VMX_SECONDARY_EXEC_UNRESTRICTED_GUEST | + VMX_SECONDARY_EXEC_APIC_REGISTER_VIRT | + VMX_SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | + VMX_SECONDARY_EXEC_RDRAND_EXITING | + VMX_SECONDARY_EXEC_ENABLE_INVPCID | + VMX_SECONDARY_EXEC_ENABLE_VMFUNC | VMX_SECONDARY_EXEC_SHADOW_VMCS | + VMX_SECONDARY_EXEC_RDSEED_EXITING | VMX_SECONDARY_EXEC_ENABLE_PML | + VMX_SECONDARY_EXEC_XSAVES, + .features[FEAT_VMX_VMFUNC] = + MSR_VMX_VMFUNC_EPT_SWITCHING, + .xlevel = 0x80000008, + .model_id = "Intel Xeon Processor (SierraForest)", + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { /* end of list */ }, + }, + }, { .name = "Denverton", .level = 21, @@ -4000,6 +4789,15 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .version = 4, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-v4 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_v4_cache_info + }, { /* end of list */ } } }, @@ -4057,6 +4855,20 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } }, }, + { .version = 3, + .props = (PropValue[]) { + { "xsaves", "off" }, + { "perfctr-core", "on" }, + { "clzero", "on" }, + { "xsaveerptr", "on" }, + { "aes", "on" }, + { "pclmulqdq", "on" }, + { "sha-ni", "on" }, + { "model-id", + "Hygon Dhyana-v3 processor" }, + { /* end of list */ } + }, + }, { /* end of list */ } } }, @@ -4119,6 +4931,15 @@ static const X86CPUDefinition builtin_x86_defs[] = { { /* end of list */ } } }, + { + .version = 3, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-Rome-v3 Processor" }, + { /* end of list */ } + }, + .cache_info = &epyc_rome_v3_cache_info + }, { /* end of list */ } } }, @@ -4176,6 +4997,147 @@ static const X86CPUDefinition builtin_x86_defs[] = { .xlevel = 0x8000001E, .model_id = "AMD EPYC-Milan Processor", .cache_info = &epyc_milan_cache_info, + .versions = (X86CPUVersionDefinition[]) { + { .version = 1 }, + { + .version = 2, + .props = (PropValue[]) { + { "model-id", + "AMD EPYC-Milan-v2 Processor" }, + { "vaes", "on" }, + { "vpclmulqdq", "on" }, + { "stibp-always-on", "on" }, + { "amd-psfd", "on" }, + { "no-nested-data-bp", "on" }, + { "lfence-always-serializing", "on" }, + { "null-sel-clr-base", "on" }, + { /* end of list */ } + }, + .cache_info = &epyc_milan_v2_cache_info + }, + { /* end of list */ } + } + }, + { + .name = "EPYC-Genoa", + .level = 0xd, + .vendor = CPUID_VENDOR_AMD, + .family = 25, + .model = 17, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_PCID | CPUID_EXT_CX16 | CPUID_EXT_FMA | + CPUID_EXT_SSSE3 | CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | + CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_WBNOINVD | CPUID_8000_0008_EBX_IBPB | + CPUID_8000_0008_EBX_IBRS | CPUID_8000_0008_EBX_STIBP | + CPUID_8000_0008_EBX_STIBP_ALWAYS_ON | + CPUID_8000_0008_EBX_AMD_SSBD | CPUID_8000_0008_EBX_AMD_PSFD, + .features[FEAT_8000_0021_EAX] = + CPUID_8000_0021_EAX_No_NESTED_DATA_BP | + CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING | + CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE | + CPUID_8000_0021_EAX_AUTO_IBRS, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_ERMS | + CPUID_7_0_EBX_INVPCID | CPUID_7_0_EBX_AVX512F | + CPUID_7_0_EBX_AVX512DQ | CPUID_7_0_EBX_RDSEED | CPUID_7_0_EBX_ADX | + CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_AVX512IFMA | + CPUID_7_0_EBX_CLFLUSHOPT | CPUID_7_0_EBX_CLWB | + CPUID_7_0_EBX_AVX512CD | CPUID_7_0_EBX_SHA_NI | + CPUID_7_0_EBX_AVX512BW | CPUID_7_0_EBX_AVX512VL, + .features[FEAT_7_0_ECX] = + CPUID_7_0_ECX_AVX512_VBMI | CPUID_7_0_ECX_UMIP | CPUID_7_0_ECX_PKU | + CPUID_7_0_ECX_AVX512_VBMI2 | CPUID_7_0_ECX_GFNI | + CPUID_7_0_ECX_VAES | CPUID_7_0_ECX_VPCLMULQDQ | + CPUID_7_0_ECX_AVX512VNNI | CPUID_7_0_ECX_AVX512BITALG | + CPUID_7_0_ECX_AVX512_VPOPCNTDQ | CPUID_7_0_ECX_LA57 | + CPUID_7_0_ECX_RDPID, + .features[FEAT_7_0_EDX] = + CPUID_7_0_EDX_FSRM, + .features[FEAT_7_1_EAX] = + CPUID_7_1_EAX_AVX512_BF16, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1 | CPUID_XSAVE_XSAVES, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE | CPUID_SVM_VNMI | + CPUID_SVM_SVME_ADDR_CHK, + .xlevel = 0x80000022, + .model_id = "AMD EPYC-Genoa Processor", + .cache_info = &epyc_genoa_cache_info, + }, + { + .name = "Dharma", + .level = 0xd, + .vendor = CPUID_VENDOR_HYGON, + .family = 24, + .model = 4, + .stepping = 0, + .features[FEAT_1_EDX] = + CPUID_SSE2 | CPUID_SSE | CPUID_FXSR | CPUID_MMX | CPUID_CLFLUSH | + CPUID_PSE36 | CPUID_PAT | CPUID_CMOV | CPUID_MCA | CPUID_PGE | + CPUID_MTRR | CPUID_SEP | CPUID_APIC | CPUID_CX8 | CPUID_MCE | + CPUID_PAE | CPUID_MSR | CPUID_TSC | CPUID_PSE | CPUID_DE | + CPUID_VME | CPUID_FP87, + .features[FEAT_1_ECX] = + CPUID_EXT_RDRAND | CPUID_EXT_F16C | CPUID_EXT_AVX | + CPUID_EXT_XSAVE | CPUID_EXT_AES | CPUID_EXT_POPCNT | + CPUID_EXT_MOVBE | CPUID_EXT_SSE42 | CPUID_EXT_SSE41 | + CPUID_EXT_CX16 | CPUID_EXT_FMA | CPUID_EXT_SSSE3 | + CPUID_EXT_MONITOR | CPUID_EXT_PCLMULQDQ | CPUID_EXT_SSE3, + .features[FEAT_8000_0001_EDX] = + CPUID_EXT2_LM | CPUID_EXT2_RDTSCP | CPUID_EXT2_PDPE1GB | + CPUID_EXT2_FFXSR | CPUID_EXT2_MMXEXT | CPUID_EXT2_NX | + CPUID_EXT2_SYSCALL, + .features[FEAT_8000_0001_ECX] = + CPUID_EXT3_OSVW | CPUID_EXT3_3DNOWPREFETCH | + CPUID_EXT3_MISALIGNSSE | CPUID_EXT3_SSE4A | CPUID_EXT3_ABM | + CPUID_EXT3_CR8LEG | CPUID_EXT3_SVM | CPUID_EXT3_LAHF_LM | + CPUID_EXT3_TOPOEXT | CPUID_EXT3_PERFCORE, + .features[FEAT_8000_0008_EBX] = + CPUID_8000_0008_EBX_CLZERO | CPUID_8000_0008_EBX_XSAVEERPTR | + CPUID_8000_0008_EBX_IBPB | CPUID_8000_0008_EBX_IBRS | + CPUID_8000_0008_EBX_STIBP | CPUID_8000_0008_EBX_AMD_SSBD, + .features[FEAT_7_0_EBX] = + CPUID_7_0_EBX_FSGSBASE | CPUID_7_0_EBX_BMI1 | CPUID_7_0_EBX_AVX2 | + CPUID_7_0_EBX_SMEP | CPUID_7_0_EBX_BMI2 | CPUID_7_0_EBX_RDSEED | + CPUID_7_0_EBX_ADX | CPUID_7_0_EBX_SMAP | CPUID_7_0_EBX_CLFLUSHOPT | + CPUID_7_0_EBX_SHA_NI, + .features[FEAT_7_0_ECX] = CPUID_7_0_ECX_UMIP, + .features[FEAT_XSAVE] = + CPUID_XSAVE_XSAVEOPT | CPUID_XSAVE_XSAVEC | + CPUID_XSAVE_XGETBV1, + .features[FEAT_6_EAX] = + CPUID_6_EAX_ARAT, + .features[FEAT_SVM] = + CPUID_SVM_NPT | CPUID_SVM_NRIPSAVE, + .xlevel = 0x8000001E, + .model_id = "Hygon Dharma Processor", + .cache_info = &dharma_cache_info, }, }; @@ -4470,7 +5432,8 @@ static void x86_cpuid_set_vendor(Object *obj, const char *value, int i; if (strlen(value) != CPUID_VENDOR_SZ) { - error_setg(errp, QERR_PROPERTY_VALUE_BAD, "", "vendor", value); + error_setg(errp, "value of property 'vendor' must consist of" + " exactly " stringify(CPUID_VENDOR_SZ) " characters"); return; } @@ -4604,8 +5567,8 @@ static const char *x86_cpu_feature_name(FeatureWord w, int bitnr) /* XSAVE components are automatically enabled by other features, * so return the original feature name instead */ - if (w == FEAT_XSAVE_COMP_LO || w == FEAT_XSAVE_COMP_HI) { - int comp = (w == FEAT_XSAVE_COMP_HI) ? bitnr + 32 : bitnr; + if (w == FEAT_XSAVE_XCR0_LO || w == FEAT_XSAVE_XCR0_HI) { + int comp = (w == FEAT_XSAVE_XCR0_HI) ? bitnr + 32 : bitnr; if (comp < ARRAY_SIZE(x86_ext_save_areas) && x86_ext_save_areas[comp].bits) { @@ -4752,6 +5715,7 @@ static void x86_cpu_get_unavailable_features(Object *obj, Visitor *v, x86_cpu_list_feature_names(xc->filtered_features, &result); visit_type_strList(v, "unavailable-features", &result, errp); + qapi_free_strList(result); } /* Check for missing features that may prevent the CPU class from @@ -4878,6 +5842,11 @@ static void x86_cpu_list_entry(gpointer data, gpointer user_data) desc = g_strdup_printf("%s", model_id); } + if (cc->model && cc->model->cpudef->deprecation_note) { + g_autofree char *olddesc = desc; + desc = g_strdup_printf("%s (deprecated)", olddesc); + } + qemu_printf("x86 %-20s %s\n", name, desc); } @@ -4952,8 +5921,8 @@ CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) return cpu_list; } -static uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, - bool migratable_only) +uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + bool migratable_only) { FeatureWordInfo *wi = &feature_word_info[w]; uint64_t r = 0; @@ -5041,6 +6010,31 @@ static void x86_cpu_apply_version_props(X86CPU *cpu, X86CPUModel *model) assert(vdef->version == version); } +static const CPUCaches *x86_cpu_get_versioned_cache_info(X86CPU *cpu, + X86CPUModel *model) +{ + const X86CPUVersionDefinition *vdef; + X86CPUVersion version = x86_cpu_model_resolve_version(model); + const CPUCaches *cache_info = model->cpudef->cache_info; + + if (version == CPU_VERSION_LEGACY) { + return cache_info; + } + + for (vdef = x86_cpu_def_get_versions(model->cpudef); vdef->version; vdef++) { + if (vdef->cache_info) { + cache_info = vdef->cache_info; + } + + if (vdef->version == version) { + break; + } + } + + assert(vdef->version == version); + return cache_info; +} + /* * Load data from X86CPUDefinition into a X86CPU object. * Only for builtin_x86_defs models initialized with x86_register_cpudef_types. @@ -5073,7 +6067,7 @@ static void x86_cpu_load_model(X86CPU *cpu, X86CPUModel *model) } /* legacy-cache defaults to 'off' if CPU model provides cache info */ - cpu->legacy_cache = !def->cache_info; + cpu->legacy_cache = !x86_cpu_get_versioned_cache_info(cpu, model); env->features[FEAT_1_ECX] |= CPUID_EXT_HYPERVISOR; @@ -5196,7 +6190,7 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, { X86CPU *cpu = env_archcpu(env); CPUState *cs = env_cpu(env); - uint32_t die_offset; + uint32_t die_offset, smt_width; uint32_t limit; uint32_t signature[3]; X86CPUTopoInfo topo_info; @@ -5205,6 +6199,9 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, topo_info.cores_per_die = cs->nr_cores; topo_info.threads_per_core = cs->nr_threads; + die_offset = apicid_die_offset(&topo_info); + smt_width = apicid_smt_width(&topo_info); + /* Calculate & apply limits for different index ranges */ if (index >= 0xC0000000) { limit = env->cpuid_xlevel2; @@ -5272,8 +6269,25 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, /* cache info: needed for Core compatibility */ if (cpu->cache_info_passthrough) { host_cpuid(index, count, eax, ebx, ecx, edx); - /* QEMU gives out its own APIC IDs, never pass down bits 31..26. */ - *eax &= ~0xFC000000; + /* + * QEMU gives out its own APIC IDs, never pass down bits 31..26. + * Update the cache topo bits 25..14, according to the guest + * vCPU topology instead of the host pCPU topology. + */ + *eax &= ~0xFFFFC000; + switch (count) { + case 0: /* L1 dcache info */ + case 1: /* L1 icache info */ + case 2: /* L2 cache info */ + *eax |= ((1 << smt_width) - 1) << 14; + break; + case 3: /* L3 cache info */ + *eax |= ((1 << die_offset) - 1) << 14; + break; + default: /* end of info */ + *eax = *ebx = *ecx = *edx = 0; + break; + } if ((*eax & 31) && cs->nr_cores > 1) { *eax |= (cs->nr_cores - 1) << 26; } @@ -5298,7 +6312,6 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ - die_offset = apicid_die_offset(&topo_info); if (cpu->enable_l3_cache) { encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, (1 << die_offset), cs->nr_cores, @@ -5358,9 +6371,14 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } else if (count == 1) { *eax = env->features[FEAT_7_1_EAX]; + *edx = env->features[FEAT_7_1_EDX]; + *ebx = 0; + *ecx = 0; + } else if (count == 2) { + *edx = env->features[FEAT_7_2_EDX]; + *eax = 0; *ebx = 0; *ecx = 0; - *edx = 0; } else { *eax = 0; *ebx = 0; @@ -5470,23 +6488,36 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } if (count == 0) { - *ecx = xsave_area_size(x86_cpu_xsave_components(cpu)); - *eax = env->features[FEAT_XSAVE_COMP_LO]; - *edx = env->features[FEAT_XSAVE_COMP_HI]; + *ecx = xsave_area_size(x86_cpu_xsave_xcr0_components(cpu), false); + *eax = env->features[FEAT_XSAVE_XCR0_LO]; + *edx = env->features[FEAT_XSAVE_XCR0_HI]; /* * The initial value of xcr0 and ebx == 0, On host without kvm * commit 412a3c41(e.g., CentOS 6), the ebx's value always == 0 * even through guest update xcr0, this will crash some legacy guest * (e.g., CentOS 6), So set ebx == ecx to workaroud it. */ - *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0); + *ebx = kvm_enabled() ? *ecx : xsave_area_size(env->xcr0, false); } else if (count == 1) { + uint64_t xstate = x86_cpu_xsave_xcr0_components(cpu) | + x86_cpu_xsave_xss_components(cpu); + *eax = env->features[FEAT_XSAVE]; + *ebx = xsave_area_size(xstate, true); + *ecx = env->features[FEAT_XSAVE_XSS_LO]; + *edx = env->features[FEAT_XSAVE_XSS_HI]; } else if (count < ARRAY_SIZE(x86_ext_save_areas)) { - if ((x86_cpu_xsave_components(cpu) >> count) & 1) { - const ExtSaveArea *esa = &x86_ext_save_areas[count]; + const ExtSaveArea *esa = &x86_ext_save_areas[count]; + + if (x86_cpu_xsave_xcr0_components(cpu) & (1ULL << count)) { *eax = esa->size; *ebx = esa->offset; + *ecx = esa->ecx & + (ESA_FEATURE_ALIGN64_MASK | ESA_FEATURE_XFD_MASK); + } else if (x86_cpu_xsave_xss_components(cpu) & (1ULL << count)) { + *eax = esa->size; + *ebx = 0; + *ecx = 1; } } break; @@ -5537,8 +6568,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } else { *eax &= env->features[FEAT_SGX_12_1_EAX]; *ebx &= 0; /* ebx reserve */ - *ecx &= env->features[FEAT_XSAVE_COMP_LO]; - *edx &= env->features[FEAT_XSAVE_COMP_HI]; + *ecx &= env->features[FEAT_XSAVE_XCR0_LO]; + *edx &= env->features[FEAT_XSAVE_XCR0_HI]; /* FP and SSE are always allowed regardless of XSAVE/XCR0. */ *ecx |= XSTATE_FP_MASK | XSTATE_SSE_MASK; @@ -5575,6 +6606,43 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; } + case 0x1D: { + /* AMX TILE, for now hardcoded for Sapphire Rapids*/ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *eax = INTEL_AMX_TILE_MAX_SUBLEAF; + } else if (count == 1) { + *eax = INTEL_AMX_TOTAL_TILE_BYTES | + (INTEL_AMX_BYTES_PER_TILE << 16); + *ebx = INTEL_AMX_BYTES_PER_ROW | (INTEL_AMX_TILE_MAX_NAMES << 16); + *ecx = INTEL_AMX_TILE_MAX_ROWS; + } + break; + } + case 0x1E: { + /* AMX TMUL, for now hardcoded for Sapphire Rapids */ + *eax = 0; + *ebx = 0; + *ecx = 0; + *edx = 0; + if (!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE)) { + break; + } + + if (count == 0) { + /* Highest numbered palette subleaf */ + *ebx = INTEL_AMX_TMUL_MAX_K | (INTEL_AMX_TMUL_MAX_N << 8); + } + break; + } case 0x40000000: /* * CPUID code in kvm_arch_init_vcpu() ignores stuff @@ -5705,9 +6773,31 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } break; case 0x8000001D: + /* Populate AMD Processor Cache Information */ *eax = 0; if (cpu->cache_info_passthrough) { host_cpuid(index, count, eax, ebx, ecx, edx); + + /* + * Clear BITs[25:14] and then update them based on the guest + * vCPU topology, like what we do in encode_cache_cpuid8000001d + * when cache_info_passthrough is not enabled. + */ + *eax &= ~0x03FFC000; + switch (count) { + case 0: /* L1 dcache info */ + case 1: /* L1 icache info */ + case 2: /* L2 cache info */ + *eax |= ((topo_info.threads_per_core - 1) << 14); + break; + case 3: /* L3 cache info */ + *eax |= ((topo_info.cores_per_die * + topo_info.threads_per_core - 1) << 14); + break; + default: /* end of info */ + *eax = *ebx = *ecx = *edx = 0; + break; + } break; } switch (count) { @@ -5773,6 +6863,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, *ebx |= sev_get_reduced_phys_bits() << 6; } break; + case 0x80000021: + *eax = env->features[FEAT_8000_0021_EAX]; + *ebx = *ecx = *edx = 0; + break; default: /* reserved values: zero */ *eax = 0; @@ -5884,6 +6978,9 @@ static void x86_cpu_reset(DeviceState *dev) } for (i = 2; i < ARRAY_SIZE(x86_ext_save_areas); i++) { const ExtSaveArea *esa = &x86_ext_save_areas[i]; + if (!((1 << i) & CPUID_XSTATE_XCR0_MASK)) { + continue; + } if (env->features[esa->feature] & esa->bits) { xcr0 |= 1ull << i; } @@ -5917,6 +7014,7 @@ static void x86_cpu_reset(DeviceState *dev) env->exception_has_payload = false; env->exception_payload = 0; env->nmi_injected = false; + env->triple_fault_pending = false; #if !defined(CONFIG_USER_ONLY) /* We hard-wire the BSP to the first CPU. */ apic_designate_bsp(cpu->apic_state, s->cpu_index == 0); @@ -5997,10 +7095,13 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) CPUX86State *env = &cpu->env; int i; uint64_t mask; + static bool request_perm; if (!(env->features[FEAT_1_ECX] & CPUID_EXT_XSAVE)) { - env->features[FEAT_XSAVE_COMP_LO] = 0; - env->features[FEAT_XSAVE_COMP_HI] = 0; + env->features[FEAT_XSAVE_XCR0_LO] = 0; + env->features[FEAT_XSAVE_XCR0_HI] = 0; + env->features[FEAT_XSAVE_XSS_LO] = 0; + env->features[FEAT_XSAVE_XSS_HI] = 0; return; } @@ -6012,8 +7113,16 @@ static void x86_cpu_enable_xsave_components(X86CPU *cpu) } } - env->features[FEAT_XSAVE_COMP_LO] = mask; - env->features[FEAT_XSAVE_COMP_HI] = mask >> 32; + /* Only request permission for first vcpu */ + if (kvm_enabled() && !request_perm) { + kvm_request_xsave_components(cpu, mask); + request_perm = true; + } + + env->features[FEAT_XSAVE_XCR0_LO] = mask & CPUID_XSTATE_XCR0_MASK; + env->features[FEAT_XSAVE_XCR0_HI] = (mask & CPUID_XSTATE_XCR0_MASK) >> 32; + env->features[FEAT_XSAVE_XSS_LO] = mask & CPUID_XSTATE_XSS_MASK; + env->features[FEAT_XSAVE_XSS_HI] = (mask & CPUID_XSTATE_XSS_MASK) >> 32; } /***** Steps involved on loading and filtering CPUID data @@ -6122,6 +7231,8 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_feat_level(cpu, FEAT_6_EAX); x86_cpu_adjust_feat_level(cpu, FEAT_7_0_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EAX); + x86_cpu_adjust_feat_level(cpu, FEAT_7_1_EDX); + x86_cpu_adjust_feat_level(cpu, FEAT_7_2_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_EDX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0001_ECX); x86_cpu_adjust_feat_level(cpu, FEAT_8000_0007_EDX); @@ -6163,6 +7274,10 @@ void x86_cpu_expand_features(X86CPU *cpu, Error **errp) x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x8000001F); } + if (env->features[FEAT_8000_0021_EAX]) { + x86_cpu_adjust_level(cpu, &env->cpuid_min_xlevel, 0x80000021); + } + /* SGX requires CPUID[0x12] for EPC enumeration */ if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_SGX) { x86_cpu_adjust_level(cpu, &env->cpuid_min_level, 0x12); @@ -6404,14 +7519,17 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) /* Cache information initialization */ if (!cpu->legacy_cache) { - if (!xcc->model || !xcc->model->cpudef->cache_info) { + const CPUCaches *cache_info = + x86_cpu_get_versioned_cache_info(cpu, xcc->model); + + if (!xcc->model || !cache_info) { g_autofree char *name = x86_cpu_class_get_model_name(xcc); error_setg(errp, "CPU model '%s' doesn't support legacy-cache=off", name); return; } env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = - *xcc->model->cpudef->cache_info; + *cache_info; } else { /* Build legacy cache information */ env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; @@ -6669,6 +7787,23 @@ static bool x86_cpu_get_paging_enabled(const CPUState *cs) return cpu->env.cr[0] & CR0_PG_MASK; } + +/* At present, we check the vm is *LARGE* or not, i.e. whether + * the memory size is more than 4T or not. + */ +const uint64_t large_vm_mem_size = 0x40000000000UL; +void x86_cpu_adjuest_by_ram_size(ram_addr_t ram_size, X86CPU *cpu) +{ + /* If there is not a large vm, we set the phys_bits to 42 bits, + * otherwise, we increase the phys_bits to 46 bits. + */ + if (ram_size < large_vm_mem_size) { + cpu->phys_bits = DEFAULT_VM_CPU_PHYS_BITS; + } else { + cpu->phys_bits = LARGE_VM_CPU_PHYS_BITS; + cpu->fill_mtrr_mask = true; + } +} #endif /* !CONFIG_USER_ONLY */ static void x86_cpu_set_pc(CPUState *cs, vaddr value) @@ -6862,7 +7997,7 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_UINT32("phys-bits", X86CPU, phys_bits, 0), DEFINE_PROP_BOOL("host-phys-bits", X86CPU, host_phys_bits, false), DEFINE_PROP_UINT8("host-phys-bits-limit", X86CPU, host_phys_bits_limit, 0), - DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, true), + DEFINE_PROP_BOOL("fill-mtrr-mask", X86CPU, fill_mtrr_mask, false), DEFINE_PROP_UINT32("level-func7", X86CPU, env.cpuid_level_func7, UINT32_MAX), DEFINE_PROP_UINT32("level", X86CPU, env.cpuid_level, UINT32_MAX), diff --git a/target/i386/cpu.h b/target/i386/cpu.h index 04f2b790c9fafd9a672b895833dafce082b13ad0..84910db8bb419dd45efd6b33aebf356259a18a16 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -24,6 +24,7 @@ #include "cpu-qom.h" #include "kvm/hyperv-proto.h" #include "exec/cpu-defs.h" +#include "exec/cpu-common.h" #include "qapi/qapi-types-common.h" /* The x86 has a strong memory model with some store-after-load re-ordering */ @@ -505,6 +506,9 @@ typedef enum X86Seg { #define MSR_VM_HSAVE_PA 0xc0010117 +#define MSR_IA32_XFD 0x000001c4 +#define MSR_IA32_XFD_ERR 0x000001c5 + #define MSR_IA32_BNDCFGS 0x00000d90 #define MSR_IA32_XSS 0x00000da0 #define MSR_IA32_UMWAIT_CONTROL 0xe1 @@ -537,6 +541,8 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_BIT 6 #define XSTATE_Hi16_ZMM_BIT 7 #define XSTATE_PKRU_BIT 9 +#define XSTATE_XTILE_CFG_BIT 17 +#define XSTATE_XTILE_DATA_BIT 18 #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) @@ -547,6 +553,25 @@ typedef enum X86Seg { #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) +#define XSTATE_XTILE_CFG_MASK (1ULL << XSTATE_XTILE_CFG_BIT) +#define XSTATE_XTILE_DATA_MASK (1ULL << XSTATE_XTILE_DATA_BIT) + +#define XSTATE_DYNAMIC_MASK (XSTATE_XTILE_DATA_MASK) + +#define ESA_FEATURE_ALIGN64_BIT 1 +#define ESA_FEATURE_XFD_BIT 2 + +#define ESA_FEATURE_ALIGN64_MASK (1U << ESA_FEATURE_ALIGN64_BIT) +#define ESA_FEATURE_XFD_MASK (1U << ESA_FEATURE_XFD_BIT) + + +/* CPUID feature bits available in XCR0 */ +#define CPUID_XSTATE_XCR0_MASK (XSTATE_FP_MASK | XSTATE_SSE_MASK | \ + XSTATE_YMM_MASK | XSTATE_BNDREGS_MASK | \ + XSTATE_BNDCSR_MASK | XSTATE_OPMASK_MASK | \ + XSTATE_ZMM_Hi256_MASK | \ + XSTATE_Hi16_ZMM_MASK | XSTATE_PKRU_MASK | \ + XSTATE_XTILE_CFG_MASK | XSTATE_XTILE_DATA_MASK) /* CPUID feature words */ typedef enum FeatureWord { @@ -560,14 +585,15 @@ typedef enum FeatureWord { FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ + FEAT_8000_0021_EAX, /* CPUID[8000_0021].EAX */ FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ FEAT_SVM, /* CPUID[8000_000A].EDX */ FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ FEAT_6_EAX, /* CPUID[6].EAX */ - FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ - FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ + FEAT_XSAVE_XCR0_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ + FEAT_XSAVE_XCR0_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ FEAT_ARCH_CAPABILITIES, FEAT_CORE_CAPABILITY, FEAT_PERF_CAPABILITIES, @@ -584,10 +610,16 @@ typedef enum FeatureWord { FEAT_SGX_12_0_EAX, /* CPUID[EAX=0x12,ECX=0].EAX (SGX) */ FEAT_SGX_12_0_EBX, /* CPUID[EAX=0x12,ECX=0].EBX (SGX MISCSELECT[31:0]) */ FEAT_SGX_12_1_EAX, /* CPUID[EAX=0x12,ECX=1].EAX (SGX ATTRIBUTES[31:0]) */ + FEAT_XSAVE_XSS_LO, /* CPUID[EAX=0xd,ECX=1].ECX */ + FEAT_XSAVE_XSS_HI, /* CPUID[EAX=0xd,ECX=1].EDX */ + FEAT_7_1_EDX, /* CPUID[EAX=7,ECX=1].EDX */ + FEAT_7_2_EDX, /* CPUID[EAX=7,ECX=2].EDX */ FEATURE_WORDS, } FeatureWord; typedef uint64_t FeatureWordArray[FEATURE_WORDS]; +uint64_t x86_cpu_get_supported_feature_word(FeatureWord w, + bool migratable_only); /* cpuid_features bits */ #define CPUID_FP87 (1U << 0) @@ -728,6 +760,7 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_SVM_AVIC (1U << 13) #define CPUID_SVM_V_VMSAVE_VMLOAD (1U << 15) #define CPUID_SVM_VGIF (1U << 16) +#define CPUID_SVM_VNMI (1U << 25) #define CPUID_SVM_SVME_ADDR_CHK (1U << 28) /* Support RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE */ @@ -838,12 +871,20 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_0_EDX_SERIALIZE (1U << 14) /* TSX Suspend Load Address Tracking instruction */ #define CPUID_7_0_EDX_TSX_LDTRK (1U << 16) +/* AMX_BF16 instruction */ +#define CPUID_7_0_EDX_AMX_BF16 (1U << 22) /* AVX512_FP16 instruction */ #define CPUID_7_0_EDX_AVX512_FP16 (1U << 23) +/* AMX tile (two-dimensional register) */ +#define CPUID_7_0_EDX_AMX_TILE (1U << 24) +/* AMX_INT8 instruction */ +#define CPUID_7_0_EDX_AMX_INT8 (1U << 25) /* Speculation Control */ #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Single Thread Indirect Branch Predictors */ #define CPUID_7_0_EDX_STIBP (1U << 27) +/* Flush L1D cache */ +#define CPUID_7_0_EDX_FLUSH_L1D (1U << 28) /* Arch Capabilities */ #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /* Core Capability */ @@ -855,6 +896,30 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_7_1_EAX_AVX_VNNI (1U << 4) /* AVX512 BFloat16 Instruction */ #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) +/* CMPCCXADD Instructions */ +#define CPUID_7_1_EAX_CMPCCXADD (1U << 7) +/* Fast Zero REP MOVS */ +#define CPUID_7_1_EAX_FZRM (1U << 10) +/* Fast Short REP STOS */ +#define CPUID_7_1_EAX_FSRS (1U << 11) +/* Fast Short REP CMPS/SCAS */ +#define CPUID_7_1_EAX_FSRC (1U << 12) +/* Support Tile Computational Operations on FP16 Numbers */ +#define CPUID_7_1_EAX_AMX_FP16 (1U << 21) +/* Support for VPMADD52[H,L]UQ */ +#define CPUID_7_1_EAX_AVX_IFMA (1U << 23) +/* Support for VPDPB[SU,UU,SS]D[,S] */ +#define CPUID_7_1_EDX_AVX_VNNI_INT8 (1U << 4) +/* AVX NE CONVERT Instructions */ +#define CPUID_7_1_EDX_AVX_NE_CONVERT (1U << 5) +/* PREFETCHIT0/1 Instructions */ +#define CPUID_7_1_EDX_PREFETCHITI (1U << 14) + +/* Do not exhibit MXCSR Configuration Dependent Timing (MCDT) behavior */ +#define CPUID_7_2_EDX_MCDT_NO (1U << 5) + +/* XFD Extend Feature Disabled */ +#define CPUID_D_1_EAX_XFD (1U << 4) /* Packets which contain IP payload have LIP values */ #define CPUID_14_0_ECX_LIP (1U << 31) @@ -871,8 +936,21 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define CPUID_8000_0008_EBX_IBRS (1U << 14) /* Single Thread Indirect Branch Predictors */ #define CPUID_8000_0008_EBX_STIBP (1U << 15) +/* STIBP mode has enhanced performance and may be left always on */ +#define CPUID_8000_0008_EBX_STIBP_ALWAYS_ON (1U << 17) /* Speculative Store Bypass Disable */ #define CPUID_8000_0008_EBX_AMD_SSBD (1U << 24) +/* Predictive Store Forwarding Disable */ +#define CPUID_8000_0008_EBX_AMD_PSFD (1U << 28) + +/* Processor ignores nested data breakpoints */ +#define CPUID_8000_0021_EAX_No_NESTED_DATA_BP (1U << 0) +/* LFENCE is always serializing */ +#define CPUID_8000_0021_EAX_LFENCE_ALWAYS_SERIALIZING (1U << 2) +/* Null Selector Clears Base */ +#define CPUID_8000_0021_EAX_NULL_SEL_CLR_BASE (1U << 6) +/* Automatic IBRS */ +#define CPUID_8000_0021_EAX_AUTO_IBRS (1U << 8) #define CPUID_XSAVE_XSAVEOPT (1U << 0) #define CPUID_XSAVE_XSAVEC (1U << 1) @@ -926,6 +1004,11 @@ typedef uint64_t FeatureWordArray[FEATURE_WORDS]; #define MSR_ARCH_CAP_PSCHANGE_MC_NO (1U << 6) #define MSR_ARCH_CAP_TSX_CTRL_MSR (1U << 7) #define MSR_ARCH_CAP_TAA_NO (1U << 8) +#define MSR_ARCH_CAP_SBDR_SSDP_NO (1U << 13) +#define MSR_ARCH_CAP_FBSDP_NO (1U << 14) +#define MSR_ARCH_CAP_PSDP_NO (1U << 15) +#define MSR_ARCH_CAP_FB_CLEAR (1U << 17) +#define MSR_ARCH_CAP_PBRSB_NO (1U << 24) #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) @@ -1343,6 +1426,16 @@ typedef struct XSavePKRU { uint32_t padding; } XSavePKRU; +/* Ext. save area 17: AMX XTILECFG state */ +typedef struct XSaveXTILECFG { + uint8_t xtilecfg[64]; +} XSaveXTILECFG; + +/* Ext. save area 18: AMX XTILEDATA state */ +typedef struct XSaveXTILEDATA { + uint8_t xtiledata[8][1024]; +} XSaveXTILEDATA; + QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); @@ -1350,13 +1443,16 @@ QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILECFG) != 0x40); +QEMU_BUILD_BUG_ON(sizeof(XSaveXTILEDATA) != 0x2000); typedef struct ExtSaveArea { uint32_t feature, bits; uint32_t offset, size; + uint32_t ecx; } ExtSaveArea; -#define XSAVE_STATE_AREA_COUNT (XSTATE_PKRU_BIT + 1) +#define XSAVE_STATE_AREA_COUNT (XSTATE_XTILE_DATA_BIT + 1) extern ExtSaveArea x86_ext_save_areas[XSAVE_STATE_AREA_COUNT]; @@ -1494,6 +1590,10 @@ typedef struct CPUX86State { uint64_t opmask_regs[NB_OPMASK_REGS]; YMMReg zmmh_regs[CPU_NB_REGS]; ZMMReg hi16_zmm_regs[CPU_NB_REGS]; +#ifdef TARGET_X86_64 + uint8_t xtilecfg[64]; + uint8_t xtiledata[8192]; +#endif /* sysenter registers */ uint32_t sysenter_cs; @@ -1579,6 +1679,10 @@ typedef struct CPUX86State { uint64_t msr_rtit_cr3_match; uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; + /* Per-VCPU XFD MSRs */ + uint64_t msr_xfd; + uint64_t msr_xfd_err; + /* exception/interrupt handling */ int error_code; int exception_is_int; @@ -1654,6 +1758,7 @@ typedef struct CPUX86State { uint8_t has_error_code; uint8_t exception_has_payload; uint64_t exception_payload; + uint8_t triple_fault_pending; uint32_t ins_len; uint32_t sipi_vector; bool tsc_valid; @@ -1841,6 +1946,13 @@ struct X86CPU { extern const VMStateDescription vmstate_x86_cpu; #endif +#ifndef CONFIG_USER_ONLY +#define DEFAULT_VM_CPU_PHYS_BITS 42 +#define LARGE_VM_CPU_PHYS_BITS 46 + +void x86_cpu_adjuest_by_ram_size(ram_addr_t ram_size, X86CPU *cpu); +#endif + int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, diff --git a/target/i386/host-cpu.c b/target/i386/host-cpu.c index 10f8aba86e5388ef03d38ff0c3272aa471631e0b..5a1bbefa36c2087c474069bb5b2cdbc84489049d 100644 --- a/target/i386/host-cpu.c +++ b/target/i386/host-cpu.c @@ -12,6 +12,7 @@ #include "host-cpu.h" #include "qapi/error.h" #include "sysemu/sysemu.h" +#include "hw/boards.h" /* Note: Only safe for use on x86(-64) hosts */ static uint32_t host_cpu_phys_bits(void) @@ -56,14 +57,14 @@ static uint32_t host_cpu_adjust_phys_bits(X86CPU *cpu) uint32_t phys_bits = cpu->phys_bits; static bool warned; - /* - * Print a warning if the user set it to a value that's not the - * host value. - */ - if (phys_bits != host_phys_bits && phys_bits != 0 && + /* adjust x86 cpu phys_bits according to ram_size. */ + x86_cpu_adjuest_by_ram_size(current_machine->ram_size, cpu); + + /* Print a warning if the host value less than the user set. */ + if (phys_bits > host_phys_bits && phys_bits != 0 && !warned) { warn_report("Host physical bits (%u)" - " does not match phys-bits property (%u)", + " less than phys-bits property (%u)", host_phys_bits, phys_bits); warned = true; } diff --git a/target/i386/hvf/x86_task.c b/target/i386/hvf/x86_task.c index 422156128b7ad45c82d6710d642a1b2a862fbc30..1550002341349c21411a1c7f9dd8b288b283b211 100644 --- a/target/i386/hvf/x86_task.c +++ b/target/i386/hvf/x86_task.c @@ -123,7 +123,6 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea load_regs(cpu); struct x86_segment_descriptor curr_tss_desc, next_tss_desc; - int ret; x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); uint32_t desc_limit; @@ -139,7 +138,7 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea if (reason == TSR_IDT_GATE && gate_valid) { int dpl; - ret = x86_read_call_gate(cpu, &task_gate_desc, gate); + x86_read_call_gate(cpu, &task_gate_desc, gate); dpl = task_gate_desc.dpl; x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); @@ -168,11 +167,12 @@ void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int rea x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); } - if (next_tss_desc.type & 8) - ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); - else + if (next_tss_desc.type & 8) { + task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); + } else { //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); + } macvm_set_cr0(cpu->hvf->fd, rvmcs(cpu->hvf->fd, VMCS_GUEST_CR0) | CR0_TS); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); diff --git a/target/i386/kvm/kvm-cpu.c b/target/i386/kvm/kvm-cpu.c index d95028018e8921bb1d7127861aec06284a609749..7237378a7d4e4121f018bb1977bb88db1f5d8eed 100644 --- a/target/i386/kvm/kvm-cpu.c +++ b/target/i386/kvm/kvm-cpu.c @@ -84,7 +84,7 @@ static void kvm_cpu_max_instance_init(X86CPU *cpu) static void kvm_cpu_xsave_init(void) { static bool first = true; - KVMState *s = kvm_state; + uint32_t eax, ebx, ecx, edx; int i; if (!first) { @@ -99,12 +99,18 @@ static void kvm_cpu_xsave_init(void) for (i = XSTATE_SSE_BIT + 1; i < XSAVE_STATE_AREA_COUNT; i++) { ExtSaveArea *esa = &x86_ext_save_areas[i]; - if (esa->size) { - int sz = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EAX); - if (sz != 0) { - assert(esa->size == sz); - esa->offset = kvm_arch_get_supported_cpuid(s, 0xd, i, R_EBX); - } + if (!esa->size) { + continue; + } + if ((x86_cpu_get_supported_feature_word(esa->feature, false) & esa->bits) + != esa->bits) { + continue; + } + host_cpuid(0xd, i, &eax, &ebx, &ecx, &edx); + if (eax != 0) { + assert(esa->size == eax); + esa->offset = ebx; + esa->ecx = ecx; } } } @@ -165,7 +171,7 @@ static void kvm_cpu_instance_init(CPUState *cs) /* only applies to builtin_x86_defs cpus */ if (!kvm_irqchip_in_kernel()) { x86_cpu_change_kvm_default("x2apic", "off"); - } else if (kvm_irqchip_is_split() && kvm_enable_x2apic()) { + } else if (kvm_irqchip_is_split()) { x86_cpu_change_kvm_default("kvm-msi-ext-dest-id", "on"); } diff --git a/target/i386/kvm/kvm-stub.c b/target/i386/kvm/kvm-stub.c index f6e7e4466e1ab738e4a7dcca08a1ac9c1f60dac7..e052f1c7b0ef25f5db673e5717bfe82c94807496 100644 --- a/target/i386/kvm/kvm-stub.c +++ b/target/i386/kvm/kvm-stub.c @@ -44,3 +44,8 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp) { abort(); } + +void kvm_set_max_apic_id(uint32_t max_apic_id) +{ + return; +} diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 5a698bde19ace17be8d3eff255cfa82fa6b4740e..1c019f2f980a1f5ef20fc48b1cc6e36e7a82ea5e 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -15,8 +15,10 @@ #include "qemu/osdep.h" #include "qapi/qapi-events-run-state.h" #include "qapi/error.h" +#include "qapi/visitor.h" #include #include +#include #include #include "standard-headers/asm-x86/kvm_para.h" @@ -122,9 +124,11 @@ static uint32_t num_architectural_pmu_gp_counters; static uint32_t num_architectural_pmu_fixed_counters; static int has_xsave; +static int has_xsave2; static int has_xcrs; static int has_pit_state2; static int has_exception_payload; +static int has_triple_fault_event; static bool has_msr_mcg_ext_ctl; @@ -134,6 +138,7 @@ static struct kvm_msr_list *kvm_feature_msrs; #define BUS_LOCK_SLICE_TIME 1000000000ULL /* ns */ static RateLimit bus_lock_ratelimit_ctrl; +static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value); int kvm_has_pit_state2(void) { @@ -149,7 +154,7 @@ bool kvm_has_adjust_clock_stable(void) { int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); - return (ret == KVM_CLOCK_TSC_STABLE); + return (ret & KVM_CLOCK_TSC_STABLE); } bool kvm_has_adjust_clock(void) @@ -204,28 +209,21 @@ static int kvm_get_tsc(CPUState *cs) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[1]; - } msr_data = {}; + uint64_t value; int ret; if (env->tsc_valid) { return 0; } - memset(&msr_data, 0, sizeof(msr_data)); - msr_data.info.nmsrs = 1; - msr_data.entries[0].index = MSR_IA32_TSC; env->tsc_valid = !runstate_is_running(); - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + ret = kvm_get_one_msr(cpu, MSR_IA32_TSC, &value); if (ret < 0) { return ret; } - assert(ret == 1); - env->tsc = msr_data.entries[0].data; + env->tsc = value; return 0; } @@ -346,7 +344,8 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, { struct kvm_cpuid2 *cpuid; uint32_t ret = 0; - uint32_t cpuid_1_edx; + uint32_t cpuid_1_edx, unused; + uint64_t bitmask; cpuid = get_supported_cpuid(s); @@ -392,10 +391,20 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, } else if (function == 6 && reg == R_EAX) { ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ } else if (function == 7 && index == 0 && reg == R_EBX) { + /* Not new instructions, just an optimization. */ + uint32_t ebx; + host_cpuid(7, 0, &unused, &ebx, &unused, &unused); + ret |= ebx & CPUID_7_0_EBX_ERMS; + if (host_tsx_broken()) { ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); } } else if (function == 7 && index == 0 && reg == R_EDX) { + /* Not new instructions, just an optimization. */ + uint32_t edx; + host_cpuid(7, 0, &unused, &unused, &unused, &edx); + ret |= edx & CPUID_7_0_EDX_FSRM; + /* * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is @@ -404,6 +413,43 @@ uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, if (!has_msr_arch_capabs) { ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; } + } else if (function == 7 && index == 1 && reg == R_EAX) { + /* Not new instructions, just an optimization. */ + uint32_t eax; + host_cpuid(7, 1, &eax, &unused, &unused, &unused); + ret |= eax & (CPUID_7_1_EAX_FZRM | CPUID_7_1_EAX_FSRS | CPUID_7_1_EAX_FSRC); + } else if (function == 7 && index == 2 && reg == R_EDX) { + uint32_t edx; + host_cpuid(7, 2, &unused, &unused, &unused, &edx); + ret |= edx & CPUID_7_2_EDX_MCDT_NO; + } else if (function == 0xd && index == 0 && + (reg == R_EAX || reg == R_EDX)) { + /* + * The value returned by KVM_GET_SUPPORTED_CPUID does not include + * features that still have to be enabled with the arch_prctl + * system call. QEMU needs the full value, which is retrieved + * with KVM_GET_DEVICE_ATTR. + */ + struct kvm_device_attr attr = { + .group = 0, + .attr = KVM_X86_XCOMP_GUEST_SUPP, + .addr = (unsigned long) &bitmask + }; + + bool sys_attr = kvm_check_extension(s, KVM_CAP_SYS_ATTRIBUTES); + if (!sys_attr) { + return ret; + } + + int rc = kvm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr); + if (rc < 0) { + if (rc != -ENXIO) { + warn_report("KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) " + "error: %d", rc); + } + return ret; + } + ret = (reg == R_EAX) ? bitmask : bitmask >> 32; } else if (function == 0x80000001 && reg == R_ECX) { /* * It's safe to enable TOPOEXT even if it's not returned by @@ -1406,7 +1452,7 @@ static int hyperv_fill_cpuids(CPUState *cs, c->edx = cpu->hyperv_limits[2]; if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { - __u32 function; + uint32_t function; /* Create zeroed 0x40000006..0x40000009 leaves */ for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; @@ -1477,21 +1523,14 @@ static int hyperv_init_vcpu(X86CPU *cpu) * the kernel doesn't support setting vp_index; assert that its value * is in sync */ - struct { - struct kvm_msrs info; - struct kvm_msr_entry entries[1]; - } msr_data = { - .info.nmsrs = 1, - .entries[0].index = HV_X64_MSR_VP_INDEX, - }; + uint64_t value; - ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); + ret = kvm_get_one_msr(cpu, HV_X64_MSR_VP_INDEX, &value); if (ret < 0) { return ret; } - assert(ret == 1); - if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { + if (value != hyperv_vp_index(CPU(cpu))) { error_report("kernel's vp_index != QEMU's vp_index"); return -ENXIO; } @@ -1564,6 +1603,50 @@ static Error *invtsc_mig_blocker; #define KVM_MAX_CPUID_ENTRIES 100 +static void kvm_init_xsave(CPUX86State *env) +{ + if (has_xsave2) { + env->xsave_buf_len = QEMU_ALIGN_UP(has_xsave2, 4096); + } else if (has_xsave) { + env->xsave_buf_len = sizeof(struct kvm_xsave); + } else { + return; + } + + env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); + memset(env->xsave_buf, 0, env->xsave_buf_len); + /* + * The allocated storage must be large enough for all of the + * possible XSAVE state components. + */ + assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) <= + env->xsave_buf_len); +} + +static void kvm_init_nested_state(CPUX86State *env) +{ + struct kvm_vmx_nested_state_hdr *vmx_hdr; + uint32_t size; + + if (!env->nested_state) { + return; + } + + size = env->nested_state->size; + + memset(env->nested_state, 0, size); + env->nested_state->size = size; + + if (cpu_has_vmx(env)) { + env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; + vmx_hdr = &env->nested_state->hdr.vmx; + vmx_hdr->vmxon_pa = -1ull; + vmx_hdr->vmcs12_pa = -1ull; + } else if (cpu_has_svm(env)) { + env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; + } +} + int kvm_arch_init_vcpu(CPUState *cs) { struct { @@ -1593,6 +1676,8 @@ int kvm_arch_init_vcpu(CPUState *cs) cpuid_i = 0; + has_xsave2 = kvm_check_extension(cs->kvm_state, KVM_CAP_XSAVE2); + r = kvm_arch_set_tsc_khz(cs); if (r < 0) { return r; @@ -1698,6 +1783,7 @@ int kvm_arch_init_vcpu(CPUState *cs) } case 0x1f: if (env->nr_dies < 2) { + cpuid_i--; break; } /* fallthrough */ @@ -1738,7 +1824,6 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; - case 0x7: case 0x12: for (j = 0; ; j++) { c->function = i; @@ -1758,7 +1843,10 @@ int kvm_arch_init_vcpu(CPUState *cs) c = &cpuid_data.entries[cpuid_i++]; } break; - case 0x14: { + case 0x7: + case 0x14: + case 0x1d: + case 0x1e: { uint32_t times; c->function = i; @@ -1980,38 +2068,17 @@ int kvm_arch_init_vcpu(CPUState *cs) if (r) { goto fail; } - - if (has_xsave) { - env->xsave_buf_len = sizeof(struct kvm_xsave); - env->xsave_buf = qemu_memalign(4096, env->xsave_buf_len); - memset(env->xsave_buf, 0, env->xsave_buf_len); - - /* - * The allocated storage must be large enough for all of the - * possible XSAVE state components. - */ - assert(kvm_arch_get_supported_cpuid(kvm_state, 0xd, 0, R_ECX) - <= env->xsave_buf_len); - } + kvm_init_xsave(env); max_nested_state_len = kvm_max_nested_state_length(); if (max_nested_state_len > 0) { assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); if (cpu_has_vmx(env) || cpu_has_svm(env)) { - struct kvm_vmx_nested_state_hdr *vmx_hdr; - env->nested_state = g_malloc0(max_nested_state_len); env->nested_state->size = max_nested_state_len; - if (cpu_has_vmx(env)) { - env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; - vmx_hdr = &env->nested_state->hdr.vmx; - vmx_hdr->vmxon_pa = -1ull; - vmx_hdr->vmcs12_pa = -1ull; - } else { - env->nested_state->format = KVM_STATE_NESTED_FORMAT_SVM; - } + kvm_init_nested_state(env); } } @@ -2074,6 +2141,8 @@ void kvm_arch_reset_vcpu(X86CPU *cpu) /* enabled by default */ env->poll_control_msr = 1; + kvm_init_nested_state(env); + sev_es_set_reset_vector(CPU(cpu)); } @@ -2337,6 +2406,16 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + has_triple_fault_event = kvm_check_extension(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT); + if (has_triple_fault_event) { + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_TRIPLE_FAULT_EVENT, 0, true); + if (ret < 0) { + error_report("kvm: Failed to enable triple fault event cap: %s", + strerror(-ret)); + return ret; + } + } + ret = kvm_get_supported_msrs(s); if (ret < 0) { return ret; @@ -2442,6 +2521,21 @@ int kvm_arch_init(MachineState *ms, KVMState *s) } } + if (s->notify_vmexit != NOTIFY_VMEXIT_OPTION_DISABLE && + kvm_check_extension(s, KVM_CAP_X86_NOTIFY_VMEXIT)) { + uint64_t notify_window_flags = + ((uint64_t)s->notify_window << 32) | + KVM_X86_NOTIFY_VMEXIT_ENABLED | + KVM_X86_NOTIFY_VMEXIT_USER; + ret = kvm_vm_enable_cap(s, KVM_CAP_X86_NOTIFY_VMEXIT, 0, + notify_window_flags); + if (ret < 0) { + error_report("kvm: Failed to enable notify vmexit cap: %s", + strerror(-ret)); + return ret; + } + } + return 0; } @@ -2677,6 +2771,25 @@ static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); } +static int kvm_get_one_msr(X86CPU *cpu, int index, uint64_t *value) +{ + int ret; + struct { + struct kvm_msrs info; + struct kvm_msr_entry entries[1]; + } msr_data = { + .info.nmsrs = 1, + .entries[0].index = index, + }; + + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); + if (ret < 0) { + return ret; + } + assert(ret == 1); + *value = msr_data.entries[0].data; + return ret; +} void kvm_put_apicbase(X86CPU *cpu, uint64_t value) { int ret; @@ -3185,6 +3298,13 @@ static int kvm_put_msrs(X86CPU *cpu, int level) env->msr_ia32_sgxlepubkeyhash[3]); } + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, + env->msr_xfd); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, + env->msr_xfd_err); + } + /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see * kvm_put_msr_feature_control. */ } @@ -3240,13 +3360,15 @@ static int kvm_get_xsave(X86CPU *cpu) { CPUX86State *env = &cpu->env; void *xsave = env->xsave_buf; + unsigned long type; int ret; if (!has_xsave) { return kvm_get_fpu(cpu); } - ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); + type = has_xsave2 ? KVM_GET_XSAVE2 : KVM_GET_XSAVE; + ret = kvm_vcpu_ioctl(CPU(cpu), type, xsave); if (ret < 0) { return ret; } @@ -3323,6 +3445,10 @@ static int kvm_get_sregs(X86CPU *cpu) env->cr[4] = sregs.cr4; env->efer = sregs.efer; + if (sev_es_enabled() && env->efer & MSR_EFER_LME && + env->cr[0] & CR0_PG_MASK) { + env->efer |= MSR_EFER_LMA; + } /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ x86_update_hflags(env); @@ -3535,6 +3661,11 @@ static int kvm_get_msrs(X86CPU *cpu) kvm_msr_entry_add(cpu, MSR_IA32_SGXLEPUBKEYHASH3, 0); } + if (env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD) { + kvm_msr_entry_add(cpu, MSR_IA32_XFD, 0); + kvm_msr_entry_add(cpu, MSR_IA32_XFD_ERR, 0); + } + ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); if (ret < 0) { return ret; @@ -3831,6 +3962,12 @@ static int kvm_get_msrs(X86CPU *cpu) env->msr_ia32_sgxlepubkeyhash[index - MSR_IA32_SGXLEPUBKEYHASH0] = msrs[i].data; break; + case MSR_IA32_XFD: + env->msr_xfd = msrs[i].data; + break; + case MSR_IA32_XFD_ERR: + env->msr_xfd_err = msrs[i].data; + break; } } @@ -3942,6 +4079,11 @@ static int kvm_put_vcpu_events(X86CPU *cpu, int level) } } + if (has_triple_fault_event) { + events.flags |= KVM_VCPUEVENT_VALID_TRIPLE_FAULT; + events.triple_fault.pending = env->triple_fault_pending; + } + return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); } @@ -4011,6 +4153,10 @@ static int kvm_get_vcpu_events(X86CPU *cpu) } } + if (events.flags & KVM_VCPUEVENT_VALID_TRIPLE_FAULT) { + env->triple_fault_pending = events.triple_fault.pending; + } + env->sipi_vector = events.sipi_vector; return 0; @@ -4757,6 +4903,9 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) X86CPU *cpu = X86_CPU(cs); uint64_t code; int ret; + bool ctx_invalid; + char str[256]; + KVMState *state; switch (run->exit_reason) { case KVM_EXIT_HLT: @@ -4812,6 +4961,21 @@ int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) /* already handled in kvm_arch_post_run */ ret = 0; break; + case KVM_EXIT_NOTIFY: + ctx_invalid = !!(run->notify.flags & KVM_NOTIFY_CONTEXT_INVALID); + state = KVM_STATE(current_accel()); + sprintf(str, "Encounter a notify exit with %svalid context in" + " guest. There can be possible misbehaves in guest." + " Please have a look.", ctx_invalid ? "in" : ""); + if (ctx_invalid || + state->notify_vmexit == NOTIFY_VMEXIT_OPTION_INTERNAL_ERROR) { + warn_report("KVM internal error: %s", str); + ret = -1; + } else { + warn_report_once("KVM: %s", str); + ret = 0; + } + break; default: fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason); ret = -1; @@ -5050,3 +5214,112 @@ bool kvm_arch_cpu_check_are_resettable(void) { return !sev_es_enabled(); } + +#define ARCH_REQ_XCOMP_GUEST_PERM 0x1025 + +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask) +{ + KVMState *s = kvm_state; + uint64_t supported; + + mask &= XSTATE_DYNAMIC_MASK; + if (!mask) { + return; + } + /* + * Just ignore bits that are not in CPUID[EAX=0xD,ECX=0]. + * ARCH_REQ_XCOMP_GUEST_PERM would fail, and QEMU has warned + * about them already because they are not supported features. + */ + supported = kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EAX); + supported |= (uint64_t)kvm_arch_get_supported_cpuid(s, 0xd, 0, R_EDX) << 32; + mask &= supported; + + while (mask) { + int bit = ctz64(mask); + int rc = syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, bit); + if (rc) { + /* + * Older kernel version (<5.17) do not support + * ARCH_REQ_XCOMP_GUEST_PERM, but also do not return + * any dynamic feature from kvm_arch_get_supported_cpuid. + */ + warn_report("prctl(ARCH_REQ_XCOMP_GUEST_PERM) failure " + "for feature bit %d", bit); + } + mask &= ~BIT_ULL(bit); + } +} + +static int kvm_arch_get_notify_vmexit(Object *obj, Error **errp) +{ + KVMState *s = KVM_STATE(obj); + return s->notify_vmexit; +} + +static void kvm_arch_set_notify_vmexit(Object *obj, int value, Error **errp) +{ + KVMState *s = KVM_STATE(obj); + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + s->notify_vmexit = value; +} + +static void kvm_arch_get_notify_window(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + uint32_t value = s->notify_window; + + visit_type_uint32(v, name, &value, errp); +} + +static void kvm_arch_set_notify_window(Object *obj, Visitor *v, + const char *name, void *opaque, + Error **errp) +{ + KVMState *s = KVM_STATE(obj); + Error *error = NULL; + uint32_t value; + + if (s->fd != -1) { + error_setg(errp, "Cannot set properties after the accelerator has been initialized"); + return; + } + + visit_type_uint32(v, name, &value, &error); + if (error) { + error_propagate(errp, error); + return; + } + + s->notify_window = value; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ + object_class_property_add_enum(oc, "notify-vmexit", "NotifyVMexitOption", + &NotifyVmexitOption_lookup, + kvm_arch_get_notify_vmexit, + kvm_arch_set_notify_vmexit); + object_class_property_set_description(oc, "notify-vmexit", + "Enable notify VM exit"); + + object_class_property_add(oc, "notify-window", "uint32", + kvm_arch_get_notify_window, + kvm_arch_set_notify_window, + NULL, NULL); + object_class_property_set_description(oc, "notify-window", + "Clock cycles without an event window " + "after which a notification VM exit occurs"); +} + +void kvm_set_max_apic_id(uint32_t max_apic_id) +{ + kvm_vm_enable_cap(kvm_state, KVM_CAP_MAX_VCPU_ID, 0, max_apic_id); +} diff --git a/target/i386/kvm/kvm_i386.h b/target/i386/kvm/kvm_i386.h index a978509d507fa3a3333ca92580c7497ce61ea783..58590138e5ee9dbe06a76e201fc6be27d06b4fd5 100644 --- a/target/i386/kvm/kvm_i386.h +++ b/target/i386/kvm/kvm_i386.h @@ -52,5 +52,8 @@ bool kvm_hyperv_expand_features(X86CPU *cpu, Error **errp); uint64_t kvm_swizzle_msi_ext_dest_id(uint64_t address); bool kvm_enable_sgx_provisioning(KVMState *s); +void kvm_request_xsave_components(X86CPU *cpu, uint64_t mask); + +void kvm_set_max_apic_id(uint32_t max_apic_id); #endif diff --git a/target/i386/machine.c b/target/i386/machine.c index 83c2b91529bf1ad17a1d24fe0a6cea4b1d08ac35..41cf5c00534c139c664c77dc2d3b1bbf31596a91 100644 --- a/target/i386/machine.c +++ b/target/i386/machine.c @@ -1455,6 +1455,67 @@ static const VMStateDescription vmstate_msr_intel_sgx = { } }; +static bool xfd_msrs_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_XSAVE] & CPUID_D_1_EAX_XFD); +} + +static const VMStateDescription vmstate_msr_xfd = { + .name = "cpu/msr_xfd", + .version_id = 1, + .minimum_version_id = 1, + .needed = xfd_msrs_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT64(env.msr_xfd, X86CPU), + VMSTATE_UINT64(env.msr_xfd_err, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + +#ifdef TARGET_X86_64 +static bool amx_xtile_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return !!(env->features[FEAT_7_0_EDX] & CPUID_7_0_EDX_AMX_TILE); +} + +static const VMStateDescription vmstate_amx_xtile = { + .name = "cpu/intel_amx_xtile", + .version_id = 1, + .minimum_version_id = 1, + .needed = amx_xtile_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8_ARRAY(env.xtilecfg, X86CPU, 64), + VMSTATE_UINT8_ARRAY(env.xtiledata, X86CPU, 8192), + VMSTATE_END_OF_LIST() + } +}; +#endif + +static bool triple_fault_needed(void *opaque) +{ + X86CPU *cpu = opaque; + CPUX86State *env = &cpu->env; + + return env->triple_fault_pending; +} + +static const VMStateDescription vmstate_triple_fault = { + .name = "cpu/triple_fault", + .version_id = 1, + .minimum_version_id = 1, + .needed = triple_fault_needed, + .fields = (VMStateField[]) { + VMSTATE_UINT8(env.triple_fault_pending, X86CPU), + VMSTATE_END_OF_LIST() + } +}; + const VMStateDescription vmstate_x86_cpu = { .name = "cpu", .version_id = 12, @@ -1593,6 +1654,11 @@ const VMStateDescription vmstate_x86_cpu = { #endif &vmstate_msr_tsx_ctrl, &vmstate_msr_intel_sgx, + &vmstate_msr_xfd, +#ifdef TARGET_X86_64 + &vmstate_amx_xtile, +#endif + &vmstate_triple_fault, NULL } }; diff --git a/target/i386/ops_sse.h b/target/i386/ops_sse.h index 6f1fc174b3243a8fc70709e0f046182c699da9a4..898b7e42926efa6bb421142761581e4c4b642bb0 100644 --- a/target/i386/ops_sse.h +++ b/target/i386/ops_sse.h @@ -899,7 +899,7 @@ static inline uint64_t helper_extrq(uint64_t src, int shift, int len) void helper_extrq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1), s->ZMM_B(0)); + d->ZMM_Q(0) = helper_extrq(d->ZMM_Q(0), s->ZMM_B(1) & 63, s->ZMM_B(0) & 63); } void helper_extrq_i(CPUX86State *env, ZMMReg *d, int index, int length) @@ -921,7 +921,7 @@ static inline uint64_t helper_insertq(uint64_t src, int shift, int len) void helper_insertq_r(CPUX86State *env, ZMMReg *d, ZMMReg *s) { - d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9), s->ZMM_B(8)); + d->ZMM_Q(0) = helper_insertq(s->ZMM_Q(0), s->ZMM_B(9) & 63, s->ZMM_B(8) & 63); } void helper_insertq_i(CPUX86State *env, ZMMReg *d, int index, int length) diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index e9e145154090c93735f783e023ad1e51147b704a..e0716f4049bce4cc88e204d3853baacf5f38d8fd 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -2282,6 +2282,31 @@ static void gen_ldst_modrm(CPUX86State *env, DisasContext *s, int modrm, } } +static target_ulong insn_get_addr(CPUX86State *env, DisasContext *s, MemOp ot) +{ + target_ulong ret; + + switch (ot) { + case MO_8: + ret = x86_ldub_code(env, s); + break; + case MO_16: + ret = x86_lduw_code(env, s); + break; + case MO_32: + ret = x86_ldl_code(env, s); + break; +#ifdef TARGET_X86_64 + case MO_64: + ret = x86_ldq_code(env, s); + break; +#endif + default: + g_assert_not_reached(); + } + return ret; +} + static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, MemOp ot) { uint32_t ret; @@ -2551,7 +2576,7 @@ static void gen_enter(DisasContext *s, int esp_addend, int level) } /* Copy the FrameTemp value to EBP. */ - gen_op_mov_reg_v(s, a_ot, R_EBP, s->T1); + gen_op_mov_reg_v(s, d_ot, R_EBP, s->T1); /* Compute the final value of ESP. */ tcg_gen_subi_tl(s->T1, s->T1, esp_addend + size * level); @@ -4587,9 +4612,11 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) switch (b) { case 0xf3: prefixes |= PREFIX_REPZ; + prefixes &= ~PREFIX_REPNZ; goto next_byte; case 0xf2: prefixes |= PREFIX_REPNZ; + prefixes &= ~PREFIX_REPZ; goto next_byte; case 0xf0: prefixes |= PREFIX_LOCK; @@ -5703,16 +5730,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) target_ulong offset_addr; ot = mo_b_d(b, dflag); - switch (s->aflag) { -#ifdef TARGET_X86_64 - case MO_64: - offset_addr = x86_ldq_code(env, s); - break; -#endif - default: - offset_addr = insn_get(env, s, s->aflag); - break; - } + offset_addr = insn_get_addr(env, s, s->aflag); tcg_gen_movi_tl(s->A0, offset_addr); gen_add_A0_ds_seg(s); if ((b & 2) == 0) { @@ -7773,7 +7791,7 @@ static target_ulong disas_insn(DisasContext *s, CPUState *cpu) case 0x108: /* invd */ case 0x109: /* wbinvd */ if (check_cpl0(s)) { - gen_svm_check_intercept(s, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD); + gen_svm_check_intercept(s, (b & 1) ? SVM_EXIT_WBINVD : SVM_EXIT_INVD); /* nothing to do */ } break; diff --git a/target/i386/xsave_helper.c b/target/i386/xsave_helper.c index ac61a963440627e6d61a647a772a734c971c0a21..996e9f3bfef53b14a55963db4ece3254bb33b470 100644 --- a/target/i386/xsave_helper.c +++ b/target/i386/xsave_helper.c @@ -126,6 +126,20 @@ void x86_cpu_xsave_all_areas(X86CPU *cpu, void *buf, uint32_t buflen) memcpy(pkru, &env->pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(tilecfg, &env->xtilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(tiledata, &env->xtiledata, sizeof(env->xtiledata)); + } #endif } @@ -247,5 +261,19 @@ void x86_cpu_xrstor_all_areas(X86CPU *cpu, const void *buf, uint32_t buflen) pkru = buf + e->offset; memcpy(&env->pkru, pkru, sizeof(env->pkru)); } + + e = &x86_ext_save_areas[XSTATE_XTILE_CFG_BIT]; + if (e->size && e->offset) { + const XSaveXTILECFG *tilecfg = buf + e->offset; + + memcpy(&env->xtilecfg, tilecfg, sizeof(env->xtilecfg)); + } + + e = &x86_ext_save_areas[XSTATE_XTILE_DATA_BIT]; + if (e->size && e->offset && buflen >= e->size + e->offset) { + const XSaveXTILEDATA *tiledata = buf + e->offset; + + memcpy(&env->xtiledata, tiledata, sizeof(env->xtiledata)); + } #endif } diff --git a/target/loongarch64/Kconfig b/target/loongarch64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..46b26b1a85715e779672bea93152a3c62c170fe2 --- /dev/null +++ b/target/loongarch64/Kconfig @@ -0,0 +1,2 @@ +config LOONGARCH64 + bool diff --git a/target/loongarch64/arch_dump.c b/target/loongarch64/arch_dump.c new file mode 100644 index 0000000000000000000000000000000000000000..adce817d54c1cd34dbc5e68325165cfc0081814e --- /dev/null +++ b/target/loongarch64/arch_dump.c @@ -0,0 +1,179 @@ +/* + * Support for writing ELF notes for RM architectures + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "elf.h" +#include "sysemu/dump.h" +#include "internal.h" + +/* struct user_pt_regs from arch/loongarch/include/uapi/asm/ptrace.h */ +struct loongarch_user_regs { + uint64_t gpr[32]; + uint64_t lo; + uint64_t hi; + uint64_t csr_era; + uint64_t csr_badvaddr; + uint64_t csr_crmd; + uint64_t csr_ecfg; + uint64_t pad[7]; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_user_regs) != 360); + +/* struct elf_prstatus from include/uapi/linux/elfcore.h */ +struct loongarch_elf_prstatus { + char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */ + uint32_t pr_pid; + /* + * 76 == offsetof(struct elf_prstatus, pr_reg) - + * offsetof(struct elf_prstatus, pr_ppid) + */ + char pad2[76]; + struct loongarch_user_regs pr_reg; + uint32_t pr_fpvalid; + char pad3[4]; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_elf_prstatus) != 480); + +/* + * struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h + * + * While the vregs member of user_fpsimd_state is of type __uint128_t, + * QEMU uses an array of uint64_t, where the high half of the 128-bit + * value is always in the 2n+1'th index. Thus we also break the 128- + * bit values into two halves in this reproduction of user_fpsimd_state. + */ + +struct loongarch_fpu_struct { + uint64_t fpr[32]; + unsigned int fir; + unsigned int fcsr; +} QEMU_PACKED; + +QEMU_BUILD_BUG_ON(sizeof(struct loongarch_fpu_struct) != 264); + +struct loongarch_note { + Elf64_Nhdr hdr; + char name[8]; /* align_up(sizeof("CORE"), 4) */ + union + { + struct loongarch_elf_prstatus prstatus; + struct loongarch_fpu_struct fpu; + }; +} QEMU_PACKED; + +#define LOONGARCH_NOTE_HEADER_SIZE offsetof(struct loongarch_note, prstatus) +#define LOONGARCH_PRSTATUS_NOTE_SIZE \ + (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_elf_prstatus)) +#define LOONGARCH_PRFPREG_NOTE_SIZE \ + (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_fpu_struct)) + +static void loongarch_note_init(struct loongarch_note *note, DumpState *s, + const char *name, Elf64_Word namesz, + Elf64_Word type, Elf64_Word descsz) +{ + memset(note, 0, sizeof(*note)); + + note->hdr.n_namesz = cpu_to_dump32(s, namesz); + note->hdr.n_descsz = cpu_to_dump32(s, descsz); + note->hdr.n_type = cpu_to_dump32(s, type); + + memcpy(note->name, name, namesz); +} + +static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f, + CPULOONGARCHState *env, int cpuid, + DumpState *s) +{ + struct loongarch_note note; + int ret, i; + + loongarch_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu)); + + note.fpu.fcsr = cpu_to_dump64(s, env->active_fpu.fcsr0); + + for (i = 0; i < 32; ++i) { + note.fpu.fpr[i] = cpu_to_dump64(s, env->active_fpu.fpr[i].fd); + } + + ret = f(¬e, LOONGARCH_PRFPREG_NOTE_SIZE, s); + if (ret < 0) { + return -1; + } + + return 0; +} + +int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs, + int cpuid, void *opaque) +{ + struct loongarch_note note; + CPULOONGARCHState *env = &LOONGARCH_CPU(cs)->env; + DumpState *s = opaque; + int ret, i; + + loongarch_note_init(¬e, s, "CORE", 5, NT_PRSTATUS, + sizeof(note.prstatus)); + + note.prstatus.pr_pid = cpu_to_dump32(s, cpuid); + note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1); + + for (i = 0; i < 32; ++i) { + note.prstatus.pr_reg.gpr[i] = cpu_to_dump64(s, env->active_tc.gpr[i]); + } + note.prstatus.pr_reg.csr_era = cpu_to_dump64(s, env->CSR_ERA); + note.prstatus.pr_reg.csr_badvaddr = cpu_to_dump64(s, env->CSR_BADV); + note.prstatus.pr_reg.csr_crmd = cpu_to_dump64(s, env->CSR_CRMD); + note.prstatus.pr_reg.csr_ecfg = cpu_to_dump64(s, env->CSR_ECFG); + + ret = f(¬e, LOONGARCH_PRSTATUS_NOTE_SIZE, s); + if (ret < 0) { + return -1; + } + + ret = loongarch_write_elf64_fprpreg(f, env, cpuid, s); + if (ret < 0) { + return -1; + } + + return ret; +} + +int cpu_get_dump_info(ArchDumpInfo *info, + const GuestPhysBlockList *guest_phys_blocks) +{ + info->d_machine = EM_LOONGARCH; + info->d_endian = ELFDATA2LSB; + info->d_class = ELFCLASS64; + + return 0; +} + +ssize_t cpu_get_note_size(int class, int machine, int nr_cpus) +{ + size_t note_size = 0; + + if (class == ELFCLASS64) { + note_size = LOONGARCH_PRSTATUS_NOTE_SIZE + LOONGARCH_PRFPREG_NOTE_SIZE; + } + + return note_size * nr_cpus; +} diff --git a/target/loongarch64/cpu-csr.h b/target/loongarch64/cpu-csr.h new file mode 100644 index 0000000000000000000000000000000000000000..278a66c395ec837327a38d977e0dc480938c5385 --- /dev/null +++ b/target/loongarch64/cpu-csr.h @@ -0,0 +1,880 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef _CPU_CSR_H_ +#define _CPU_CSR_H_ + +/* basic CSR register */ +#define LOONGARCH_CSR_CRMD 0x0 /* 32 current mode info */ +#define CSR_CRMD_DACM_SHIFT 7 +#define CSR_CRMD_DACM_WIDTH 2 +#define CSR_CRMD_DACM (0x3UL << CSR_CRMD_DACM_SHIFT) +#define CSR_CRMD_DACF_SHIFT 5 +#define CSR_CRMD_DACF_WIDTH 2 +#define CSR_CRMD_DACF (0x3UL << CSR_CRMD_DACF_SHIFT) +#define CSR_CRMD_PG_SHIFT 4 +#define CSR_CRMD_PG (0x1UL << CSR_CRMD_PG_SHIFT) +#define CSR_CRMD_DA_SHIFT 3 +#define CSR_CRMD_DA (0x1UL << CSR_CRMD_DA_SHIFT) +#define CSR_CRMD_IE_SHIFT 2 +#define CSR_CRMD_IE (0x1UL << CSR_CRMD_IE_SHIFT) +#define CSR_CRMD_PLV_SHIFT 0 +#define CSR_CRMD_PLV_WIDTH 2 +#define CSR_CRMD_PLV (0x3UL << CSR_CRMD_PLV_SHIFT) + +#define PLV_USER 3 +#define PLV_KERN 0 +#define PLV_MASK 0x3 + +#define LOONGARCH_CSR_PRMD 0x1 /* 32 prev-exception mode info */ +#define CSR_PRMD_PIE_SHIFT 2 +#define CSR_PRMD_PIE (0x1UL << CSR_PRMD_PIE_SHIFT) +#define CSR_PRMD_PPLV_SHIFT 0 +#define CSR_PRMD_PPLV_WIDTH 2 +#define CSR_PRMD_PPLV (0x3UL << CSR_PRMD_PPLV_SHIFT) + +#define LOONGARCH_CSR_EUEN 0x2 /* 32 coprocessor enable */ +#define CSR_EUEN_LBTEN_SHIFT 3 +#define CSR_EUEN_LBTEN (0x1UL << CSR_EUEN_LBTEN_SHIFT) +#define CSR_EUEN_LASXEN_SHIFT 2 +#define CSR_EUEN_LASXEN (0x1UL << CSR_EUEN_LASXEN_SHIFT) +#define CSR_EUEN_LSXEN_SHIFT 1 +#define CSR_EUEN_LSXEN (0x1UL << CSR_EUEN_LSXEN_SHIFT) +#define CSR_EUEN_FPEN_SHIFT 0 +#define CSR_EUEN_FPEN (0x1UL << CSR_EUEN_FPEN_SHIFT) + +#define LOONGARCH_CSR_MISC 0x3 /* 32 misc config */ + +#define LOONGARCH_CSR_ECFG 0x4 /* 32 exception config */ +#define CSR_ECFG_VS_SHIFT 16 +#define CSR_ECFG_VS_WIDTH 3 +#define CSR_ECFG_VS (0x7UL << CSR_ECFG_VS_SHIFT) +#define CSR_ECFG_IM_SHIFT 0 +#define CSR_ECFG_IM_WIDTH 13 +#define CSR_ECFG_IM (0x1fffUL << CSR_ECFG_IM_SHIFT) + +#define CSR_ECFG_IPMASK 0x00001fff + +#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */ +#define CSR_ESTAT_ESUBCODE_SHIFT 22 +#define CSR_ESTAT_ESUBCODE_WIDTH 9 +#define CSR_ESTAT_ESUBCODE (0x1ffULL << CSR_ESTAT_ESUBCODE_SHIFT) +#define CSR_ESTAT_EXC_SH 16 +#define CSR_ESTAT_EXC_WIDTH 5 +#define CSR_ESTAT_EXC (0x1fULL << CSR_ESTAT_EXC_SH) +#define CSR_ESTAT_IS_SHIFT 0 +#define CSR_ESTAT_IS_WIDTH 15 +#define CSR_ESTAT_IS (0x7fffULL << CSR_ESTAT_IS_SHIFT) + +#define CSR_ESTAT_IPMASK 0x00001fff + +#define EXCODE_IP 64 +#define EXCCODE_RSV 0 +#define EXCCODE_TLBL 1 +#define EXCCODE_TLBS 2 +#define EXCCODE_TLBI 3 +#define EXCCODE_MOD 4 +#define EXCCODE_TLBRI 5 +#define EXCCODE_TLBXI 6 +#define EXCCODE_TLBPE 7 +#define EXCCODE_ADE 8 +#define EXCCODE_UNALIGN 9 +#define EXCCODE_OOB 10 +#define EXCCODE_SYS 11 +#define EXCCODE_BP 12 +#define EXCCODE_RI 13 +#define EXCCODE_IPE 14 +#define EXCCODE_FPDIS 15 +#define EXCCODE_LSXDIS 16 +#define EXCCODE_LASXDIS 17 +#define EXCCODE_FPE 18 +#define EXCCODE_WATCH 19 +#define EXCCODE_BTDIS 20 +#define EXCCODE_BTE 21 +#define EXCCODE_PSI 22 +#define EXCCODE_HYP 23 +#define EXCCODE_FC 24 +#define EXCCODE_SE 25 + +#define LOONGARCH_CSR_ERA 0x6 /* 64 error PC */ +#define LOONGARCH_CSR_BADV 0x7 /* 64 bad virtual address */ +#define LOONGARCH_CSR_BADI 0x8 /* 32 bad instruction */ +#define LOONGARCH_CSR_EEPN 0xc /* 64 exception enter base address */ +#define LOONGARCH_EEPN_CPUID (0x3ffULL << 0) + +#define CU_FPE 1 +#define CU_LSXE (1 << 1) +#define CU_LASXE (1 << 2) +#define CU_LBTE (1 << 3) + +/* TLB related CSR register : start with TLB if no pagewalk */ +/* 32 TLB Index, EHINV, PageSize, is_gtlb */ +#define LOONGARCH_CSR_TLBIDX 0x10 +#define CSR_TLBIDX_EHINV_SHIFT 31 +#define CSR_TLBIDX_EHINV (0x1ULL << CSR_TLBIDX_EHINV_SHIFT) +#define CSR_TLBIDX_PS_SHIFT 24 +#define CSR_TLBIDX_PS_WIDTH 6 +#define CSR_TLBIDX_PS (0x3fULL << CSR_TLBIDX_PS_SHIFT) +#define CSR_TLBIDX_IDX_SHIFT 0 +#define CSR_TLBIDX_IDX_WIDTH 12 +#define CSR_TLBIDX_IDX (0xfffULL << CSR_TLBIDX_IDX_SHIFT) +#define CSR_TLBIDX_SIZEM 0x3f000000 +#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT +#define CSR_TLBIDX_IDXM 0xfff + +#define LOONGARCH_CSR_TLBEHI 0x11 /* 64 TLB EntryHi without ASID */ +#define LOONGARCH_CSR_TLBELO0 0x12 /* 64 TLB EntryLo0 */ +#define CSR_TLBLO0_RPLV_SHIFT 63 +#define CSR_TLBLO0_RPLV (0x1ULL << CSR_TLBLO0_RPLV_SHIFT) +#define CSR_TLBLO0_XI_SHIFT 62 +#define CSR_TLBLO0_XI (0x1ULL << CSR_TLBLO0_XI_SHIFT) +#define CSR_TLBLO0_RI_SHIFT 61 +#define CSR_TLBLO0_RI (0x1ULL << CSR_TLBLO0_RI_SHIFT) +#define CSR_TLBLO0_PPN_SHIFT 12 +#define CSR_TLBLO0_PPN_WIDTH 36 /* ignore lower 12bits */ +#define CSR_TLBLO0_PPN (0xfffffffffULL << CSR_TLBLO0_PPN_SHIFT) +#define CSR_TLBLO0_GLOBAL_SHIFT 6 +#define CSR_TLBLO0_GLOBAL (0x1ULL << CSR_TLBLO0_GLOBAL_SHIFT) +#define CSR_TLBLO0_CCA_SHIFT 4 +#define CSR_TLBLO0_CCA_WIDTH 2 +#define CSR_TLBLO0_CCA (0x3ULL << CSR_TLBLO0_CCA_SHIFT) +#define CSR_TLBLO0_PLV_SHIFT 2 +#define CSR_TLBLO0_PLV_WIDTH 2 +#define CSR_TLBLO0_PLV (0x3ULL << CSR_TLBLO0_PLV_SHIFT) +#define CSR_TLBLO0_WE_SHIFT 1 +#define CSR_TLBLO0_WE (0x1ULL << CSR_TLBLO0_WE_SHIFT) +#define CSR_TLBLO0_V_SHIFT 0 +#define CSR_TLBLO0_V (0x1ULL << CSR_TLBLO0_V_SHIFT) + +#define LOONGARCH_CSR_TLBELO1 0x13 /* 64 TLB EntryLo1 */ +#define CSR_TLBLO1_RPLV_SHIFT 63 +#define CSR_TLBLO1_RPLV (0x1ULL << CSR_TLBLO1_RPLV_SHIFT) +#define CSR_TLBLO1_XI_SHIFT 62 +#define CSR_TLBLO1_XI (0x1ULL << CSR_TLBLO1_XI_SHIFT) +#define CSR_TLBLO1_RI_SHIFT 61 +#define CSR_TLBLO1_RI (0x1ULL << CSR_TLBLO1_RI_SHIFT) +#define CSR_TLBLO1_PPN_SHIFT 12 +#define CSR_TLBLO1_PPN_WIDTH 36 /* ignore lower 12bits */ +#define CSR_TLBLO1_PPN (0xfffffffffULL << CSR_TLBLO1_PPN_SHIFT) +#define CSR_TLBLO1_GLOBAL_SHIFT 6 +#define CSR_TLBLO1_GLOBAL (0x1ULL << CSR_TLBLO1_GLOBAL_SHIFT) +#define CSR_TLBLO1_CCA_SHIFT 4 +#define CSR_TLBLO1_CCA_WIDTH 2 +#define CSR_TLBLO1_CCA (0x3ULL << CSR_TLBLO1_CCA_SHIFT) +#define CSR_TLBLO1_PLV_SHIFT 2 +#define CSR_TLBLO1_PLV_WIDTH 2 +#define CSR_TLBLO1_PLV (0x3ULL << CSR_TLBLO1_PLV_SHIFT) +#define CSR_TLBLO1_WE_SHIFT 1 +#define CSR_TLBLO1_WE (0x1ULL << CSR_TLBLO1_WE_SHIFT) +#define CSR_TLBLO1_V_SHIFT 0 +#define CSR_TLBLO1_V (0x1ULL << CSR_TLBLO1_V_SHIFT) + +#define LOONGARCH_ENTRYLO_RI (1ULL << 61) +#define LOONGARCH_ENTRYLO_XI (1ULL << 62) + +#define LOONGARCH_CSR_TLBWIRED 0x14 /* 32 TLB wired */ +#define LOONGARCH_CSR_GTLBC 0x15 /* guest-related TLB */ +#define CSR_GTLBC_RID_SHIFT 16 +#define CSR_GTLBC_RID_WIDTH 8 +#define CSR_GTLBC_RID (0xffULL << CSR_GTLBC_RID_SHIFT) +#define CSR_GTLBC_TOTI_SHIFT 13 +#define CSR_GTLBC_TOTI (0x1ULL << CSR_GTLBC_TOTI_SHIFT) +#define CSR_GTLBC_USERID_SHIFT 12 +#define CSR_GTLBC_USERID (0x1ULL << CSR_GTLBC_USERID_SHIFT) +#define CSR_GTLBC_GMTLBSZ_SHIFT 0 +#define CSR_GTLBC_GMTLBSZ_WIDTH 6 +#define CSR_GTLBC_GMTLBSZ (0x3fULL << CSR_GTLBC_GVTLBSZ_SHIFT) + +#define LOONGARCH_CSR_TRGP 0x16 /* guest-related TLB */ +#define CSR_TRGP_RID_SHIFT 16 +#define CSR_TRGP_RID_WIDTH 8 +#define CSR_TRGP_RID (0xffULL << CSR_TRGP_RID_SHIFT) +#define CSR_TRGP_GTLB_SHIFT 0 +#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT) + +#define LOONGARCH_CSR_ASID 0x18 /* 64 ASID */ +#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */ +#define CSR_ASID_BIT_WIDTH 8 +#define CSR_ASID_BIT (0xffULL << CSR_ASID_BIT_SHIFT) +#define CSR_ASID_ASID_SHIFT 0 +#define CSR_ASID_ASID_WIDTH 10 +#define CSR_ASID_ASID (0x3ffULL << CSR_ASID_ASID_SHIFT) + +/* 64 page table base address when badv[47] = 0 */ +#define LOONGARCH_CSR_PGDL 0x19 +/* 64 page table base address when badv[47] = 1 */ +#define LOONGARCH_CSR_PGDH 0x1a +#define LOONGARCH_CSR_PGD 0x1b /* 64 page table base */ +#define LOONGARCH_CSR_PWCTL0 0x1c /* 64 PWCtl0 */ +#define CSR_PWCTL0_PTEW_SHIFT 30 +#define CSR_PWCTL0_PTEW_WIDTH 2 +#define CSR_PWCTL0_PTEW (0x3ULL << CSR_PWCTL0_PTEW_SHIFT) +#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25 +#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR1WIDTH (0x1fULL << CSR_PWCTL0_DIR1WIDTH_SHIFT) +#define CSR_PWCTL0_DIR1BASE_SHIFT 20 +#define CSR_PWCTL0_DIR1BASE_WIDTH 5 +#define CSR_PWCTL0_DIR1BASE (0x1fULL << CSR_PWCTL0_DIR1BASE_SHIFT) +#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15 +#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5 +#define CSR_PWCTL0_DIR0WIDTH (0x1fULL << CSR_PWCTL0_DIR0WIDTH_SHIFT) +#define CSR_PWCTL0_DIR0BASE_SHIFT 10 +#define CSR_PWCTL0_DIR0BASE_WIDTH 5 +#define CSR_PWCTL0_DIR0BASE (0x1fULL << CSR_PWCTL0_DIR0BASE_SHIFT) +#define CSR_PWCTL0_PTWIDTH_SHIFT 5 +#define CSR_PWCTL0_PTWIDTH_WIDTH 5 +#define CSR_PWCTL0_PTWIDTH (0x1fULL << CSR_PWCTL0_PTWIDTH_SHIFT) +#define CSR_PWCTL0_PTBASE_SHIFT 0 +#define CSR_PWCTL0_PTBASE_WIDTH 5 +#define CSR_PWCTL0_PTBASE (0x1fULL << CSR_PWCTL0_PTBASE_SHIFT) + +#define LOONGARCH_CSR_PWCTL1 0x1d /* 64 PWCtl1 */ +#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18 +#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR3WIDTH (0x1fULL << CSR_PWCTL1_DIR3WIDTH_SHIFT) +#define CSR_PWCTL1_DIR3BASE_SHIFT 12 +#define CSR_PWCTL1_DIR3BASE_WIDTH 5 +#define CSR_PWCTL1_DIR3BASE (0x1fULL << CSR_PWCTL0_DIR3BASE_SHIFT) +#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6 +#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5 +#define CSR_PWCTL1_DIR2WIDTH (0x1fULL << CSR_PWCTL1_DIR2WIDTH_SHIFT) +#define CSR_PWCTL1_DIR2BASE_SHIFT 0 +#define CSR_PWCTL1_DIR2BASE_WIDTH 5 +#define CSR_PWCTL1_DIR2BASE (0x1fULL << CSR_PWCTL0_DIR2BASE_SHIFT) + +#define LOONGARCH_CSR_STLBPGSIZE 0x1e /* 64 */ +#define CSR_STLBPGSIZE_PS_WIDTH 6 +#define CSR_STLBPGSIZE_PS (0x3f) + +#define LOONGARCH_CSR_RVACFG 0x1f +#define CSR_RVACFG_RDVA_WIDTH 4 +#define CSR_RVACFG_RDVA (0xf) + +/* read only CSR register : start with CPU */ +#define LOONGARCH_CSR_CPUID 0x20 /* 32 CPU core number */ +#define CSR_CPUID_CID_WIDTH 9 +#define CSR_CPUID_CID (0x1ff) + +#define LOONGARCH_CSR_PRCFG1 0x21 /* 32 CPU info */ +#define CSR_CONF1_VSMAX_SHIFT 12 +#define CSR_CONF1_VSMAX_WIDTH 3 +#define CSR_CONF1_VSMAX (7ULL << CSR_CONF1_VSMAX_SHIFT) +/* stable timer bits - 1, 0x2f = 47*/ +#define CSR_CONF1_TMRBITS_SHIFT 4 +#define CSR_CONF1_TMRBITS_WIDTH 8 +#define CSR_CONF1_TMRBITS (0xffULL << CSR_CONF1_TMRBITS_SHIFT) +#define CSR_CONF1_KSNUM_SHIFT 0 +#define CSR_CONF1_KSNUM_WIDTH 4 +#define CSR_CONF1_KSNUM (0x8) + +#define LOONGARCH_CSR_PRCFG2 0x22 +#define CSR_CONF2_PGMASK_SUPP 0x3ffff000 + +#define LOONGARCH_CSR_PRCFG3 0x23 +#define CSR_CONF3_STLBIDX_SHIFT 20 +#define CSR_CONF3_STLBIDX_WIDTH 6 +#define CSR_CONF3_STLBIDX (0x3fULL << CSR_CONF3_STLBIDX_SHIFT) +#define CSR_STLB_SETS 256 +#define CSR_CONF3_STLBWAYS_SHIFT 12 +#define CSR_CONF3_STLBWAYS_WIDTH 8 +#define CSR_CONF3_STLBWAYS (0xffULL << CSR_CONF3_STLBWAYS_SHIFT) +#define CSR_STLBWAYS_SIZE 8 +#define CSR_CONF3_MTLBSIZE_SHIFT 4 +#define CSR_CONF3_MTLBSIZE_WIDTH 8 +#define CSR_CONF3_MTLBSIZE (0xffULL << CSR_CONF3_MTLBSIZE_SHIFT) +/* mean VTLB 64 index */ +#define CSR_MTLB_SIZE 64 +#define CSR_CONF3_TLBORG_SHIFT 0 +#define CSR_CONF3_TLBORG_WIDTH 4 +#define CSR_CONF3_TLBORG (0xfULL << CSR_CONF3_TLBORG_SHIFT) +/* mean use MTLB+STLB */ +#define TLB_ORG 2 + +/* Kscratch : start with KS */ +#define LOONGARCH_CSR_KS0 0x30 /* 64 */ +#define LOONGARCH_CSR_KS1 0x31 /* 64 */ +#define LOONGARCH_CSR_KS2 0x32 /* 64 */ +#define LOONGARCH_CSR_KS3 0x33 /* 64 */ +#define LOONGARCH_CSR_KS4 0x34 /* 64 */ +#define LOONGARCH_CSR_KS5 0x35 /* 64 */ +#define LOONGARCH_CSR_KS6 0x36 /* 64 */ +#define LOONGARCH_CSR_KS7 0x37 /* 64 */ +#define LOONGARCH_CSR_KS8 0x38 /* 64 */ + +/* timer : start with TM */ +#define LOONGARCH_CSR_TMID 0x40 /* 32 timer ID */ + +#define LOONGARCH_CSR_TCFG 0x41 /* 64 timer config */ +#define CSR_TCFG_VAL_SHIFT 2 +#define CSR_TCFG_VAL_WIDTH 48 +#define CSR_TCFG_VAL (0x3fffffffffffULL << CSR_TCFG_VAL_SHIFT) +#define CSR_TCFG_PERIOD_SHIFT 1 +#define CSR_TCFG_PERIOD (0x1ULL << CSR_TCFG_PERIOD_SHIFT) +#define CSR_TCFG_EN (0x1) + +#define LOONGARCH_CSR_TVAL 0x42 /* 64 timer ticks remain */ + +#define LOONGARCH_CSR_CNTC 0x43 /* 64 timer offset */ + +#define LOONGARCH_CSR_TINTCLR 0x44 /* 64 timer interrupt clear */ +#define CSR_TINTCLR_TI_SHIFT 0 +#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT) + +/* guest : start with GST */ +#define LOONGARCH_CSR_GSTAT 0x50 /* 32 basic guest info */ +#define CSR_GSTAT_GID_SHIFT 16 +#define CSR_GSTAT_GID_WIDTH 8 +#define CSR_GSTAT_GID (0xffULL << CSR_GSTAT_GID_SHIFT) +#define CSR_GSTAT_GIDBIT_SHIFT 4 +#define CSR_GSTAT_GIDBIT_WIDTH 6 +#define CSR_GSTAT_GIDBIT (0x3fULL << CSR_GSTAT_GIDBIT_SHIFT) +#define CSR_GSTAT_PVM_SHIFT 1 +#define CSR_GSTAT_PVM (0x1ULL << CSR_GSTAT_PVM_SHIFT) +#define CSR_GSTAT_VM_SHIFT 0 +#define CSR_GSTAT_VM (0x1ULL << CSR_GSTAT_VM_SHIFT) + +#define LOONGARCH_CSR_GCFG 0x51 /* 32 guest config */ +#define CSR_GCFG_GPERF_SHIFT 24 +#define CSR_GCFG_GPERF_WIDTH 3 +#define CSR_GCFG_GPERF (0x7ULL << CSR_GCFG_GPERF_SHIFT) +#define CSR_GCFG_GCI_SHIFT 20 +#define CSR_GCFG_GCI_WIDTH 2 +#define CSR_GCFG_GCI (0x3ULL << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_ALL (0x0ULL << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_HIT (0x1ULL << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCI_SECURE (0x2ULL << CSR_GCFG_GCI_SHIFT) +#define CSR_GCFG_GCIP_SHIFT 16 +#define CSR_GCFG_GCIP (0xfULL << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_ALL (0x1ULL << CSR_GCFG_GCIP_SHIFT) +#define CSR_GCFG_GCIP_HIT (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 1)) +#define CSR_GCFG_GCIP_SECURE (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 2)) +#define CSR_GCFG_TORU_SHIFT 15 +#define CSR_GCFG_TORU (0x1ULL << CSR_GCFG_TORU_SHIFT) +#define CSR_GCFG_TORUP_SHIFT 14 +#define CSR_GCFG_TORUP (0x1ULL << CSR_GCFG_TORUP_SHIFT) +#define CSR_GCFG_TOP_SHIFT 13 +#define CSR_GCFG_TOP (0x1ULL << CSR_GCFG_TOP_SHIFT) +#define CSR_GCFG_TOPP_SHIFT 12 +#define CSR_GCFG_TOPP (0x1ULL << CSR_GCFG_TOPP_SHIFT) +#define CSR_GCFG_TOE_SHIFT 11 +#define CSR_GCFG_TOE (0x1ULL << CSR_GCFG_TOE_SHIFT) +#define CSR_GCFG_TOEP_SHIFT 10 +#define CSR_GCFG_TOEP (0x1ULL << CSR_GCFG_TOEP_SHIFT) +#define CSR_GCFG_TIT_SHIFT 9 +#define CSR_GCFG_TIT (0x1ULL << CSR_GCFG_TIT_SHIFT) +#define CSR_GCFG_TITP_SHIFT 8 +#define CSR_GCFG_TITP (0x1ULL << CSR_GCFG_TITP_SHIFT) +#define CSR_GCFG_SIT_SHIFT 7 +#define CSR_GCFG_SIT (0x1ULL << CSR_GCFG_SIT_SHIFT) +#define CSR_GCFG_SITP_SHIFT 6 +#define CSR_GCFG_SITP (0x1ULL << CSR_GCFG_SITP_SHIFT) +#define CSR_GCFG_CACTRL_SHIFT 4 +#define CSR_GCFG_CACTRL_WIDTH 2 +#define CSR_GCFG_CACTRL (0x3ULL << CSR_GCFG_CACTRL_SHIFT) +#define CSR_GCFG_CACTRL_GUEST (0x0ULL << CSR_GCFG_CACTRL_SHIFT) +#define CSR_GCFG_CACTRL_ROOT (0x1ULL << CSR_GCFG_CACTRL_SHIFT) +#define CSR_GCFG_CACTRL_NEST (0x2ULL << CSR_GCFG_CACTRL_SHIFT) +#define CSR_GCFG_CCCP_WIDTH 4 +#define CSR_GCFG_CCCP (0xf) +#define CSR_GCFG_CCCP_GUEST (0x1ULL << 0) +#define CSR_GCFG_CCCP_ROOT (0x1ULL << 1) +#define CSR_GCFG_CCCP_NEST (0x1ULL << 2) + +#define LOONGARCH_CSR_GINTC 0x52 /* 64 guest exception control */ +#define CSR_GINTC_HC_SHIFT 16 +#define CSR_GINTC_HC_WIDTH 8 +#define CSR_GINTC_HC (0xffULL << CSR_GINTC_HC_SHIFT) +#define CSR_GINTC_PIP_SHIFT 8 +#define CSR_GINTC_PIP_WIDTH 8 +#define CSR_GINTC_PIP (0xffULL << CSR_GINTC_PIP_SHIFT) +#define CSR_GINTC_VIP_SHIFT 0 +#define CSR_GINTC_VIP_WIDTH 8 +#define CSR_GINTC_VIP (0xff) + +#define LOONGARCH_CSR_GCNTC 0x53 /* 64 guest timer offset */ + +/* LLBCTL */ +#define LOONGARCH_CSR_LLBCTL 0x60 /* 32 csr number to be changed */ +#define CSR_LLBCTL_ROLLB_SHIFT 0 +#define CSR_LLBCTL_ROLLB (1ULL << CSR_LLBCTL_ROLLB_SHIFT) +#define CSR_LLBCTL_WCLLB_SHIFT 1 +#define CSR_LLBCTL_WCLLB (1ULL << CSR_LLBCTL_WCLLB_SHIFT) +#define CSR_LLBCTL_KLO_SHIFT 2 +#define CSR_LLBCTL_KLO (1ULL << CSR_LLBCTL_KLO_SHIFT) + +/* implement dependent */ +#define LOONGARCH_CSR_IMPCTL1 0x80 /* 32 loongarch config */ +#define CSR_MISPEC_SHIFT 20 +#define CSR_MISPEC_WIDTH 8 +#define CSR_MISPEC (0xffULL << CSR_MISPEC_SHIFT) +#define CSR_SSEN_SHIFT 18 +#define CSR_SSEN (1ULL << CSR_SSEN_SHIFT) +#define CSR_SCRAND_SHIFT 17 +#define CSR_SCRAND (1ULL << CSR_SCRAND_SHIFT) +#define CSR_LLEXCL_SHIFT 16 +#define CSR_LLEXCL (1ULL << CSR_LLEXCL_SHIFT) +#define CSR_DISVC_SHIFT 15 +#define CSR_DISVC (1ULL << CSR_DISVC_SHIFT) +#define CSR_VCLRU_SHIFT 14 +#define CSR_VCLRU (1ULL << CSR_VCLRU_SHIFT) +#define CSR_DCLRU_SHIFT 13 +#define CSR_DCLRU (1ULL << CSR_DCLRU_SHIFT) +#define CSR_FASTLDQ_SHIFT 12 +#define CSR_FASTLDQ (1ULL << CSR_FASTLDQ_SHIFT) +#define CSR_USERCAC_SHIFT 11 +#define CSR_USERCAC (1ULL << CSR_USERCAC_SHIFT) +#define CSR_ANTI_MISPEC_SHIFT 10 +#define CSR_ANTI_MISPEC (1ULL << CSR_ANTI_MISPEC_SHIFT) +#define CSR_ANTI_FLUSHSFB_SHIFT 9 +#define CSR_ANTI_FLUSHSFB (1ULL << CSR_ANTI_FLUSHSFB_SHIFT) +#define CSR_STFILL_SHIFT 8 +#define CSR_STFILL (1ULL << CSR_STFILL_SHIFT) +#define CSR_LIFEP_SHIFT 7 +#define CSR_LIFEP (1ULL << CSR_LIFEP_SHIFT) +#define CSR_LLSYNC_SHIFT 6 +#define CSR_LLSYNC (1ULL << CSR_LLSYNC_SHIFT) +#define CSR_BRBTDIS_SHIFT 5 +#define CSR_BRBTDIS (1ULL << CSR_BRBTDIS_SHIFT) +#define CSR_RASDIS_SHIFT 4 +#define CSR_RASDIS (1ULL << CSR_RASDIS_SHIFT) +#define CSR_STPRE_SHIFT 2 +#define CSR_STPRE_WIDTH 2 +#define CSR_STPRE (3ULL << CSR_STPRE_SHIFT) +#define CSR_INSTPRE_SHIFT 1 +#define CSR_INSTPRE (1ULL << CSR_INSTPRE_SHIFT) +#define CSR_DATAPRE_SHIFT 0 +#define CSR_DATAPRE (1ULL << CSR_DATAPRE_SHIFT) + +#define LOONGARCH_CSR_IMPCTL2 0x81 /* 32 Flush */ +#define CSR_IMPCTL2_MTLB_SHIFT 0 +#define CSR_IMPCTL2_MTLB (1ULL << CSR_IMPCTL2_MTLB_SHIFT) +#define CSR_IMPCTL2_STLB_SHIFT 1 +#define CSR_IMPCTL2_STLB (1ULL << CSR_IMPCTL2_STLB_SHIFT) +#define CSR_IMPCTL2_DTLB_SHIFT 2 +#define CSR_IMPCTL2_DTLB (1ULL << CSR_IMPCTL2_DTLB_SHIFT) +#define CSR_IMPCTL2_ITLB_SHIFT 3 +#define CSR_IMPCTL2_ITLB (1ULL << CSR_IMPCTL2_ITLB_SHIFT) +#define CSR_IMPCTL2_BTAC_SHIFT 4 +#define CSR_IMPCTL2_BTAC (1ULL << CSR_IMPCTL2_BTAC_SHIFT) + +#define LOONGARCH_FLUSH_VTLB 1 +#define LOONGARCH_FLUSH_FTLB (1 << 1) +#define LOONGARCH_FLUSH_DTLB (1 << 2) +#define LOONGARCH_FLUSH_ITLB (1 << 3) +#define LOONGARCH_FLUSH_BTAC (1 << 4) + +#define LOONGARCH_CSR_GNMI 0x82 + +/* TLB Refill Only */ +#define LOONGARCH_CSR_TLBRENT 0x88 /* 64 TLB refill exception address */ +#define LOONGARCH_CSR_TLBRBADV 0x89 /* 64 TLB refill badvaddr */ +#define LOONGARCH_CSR_TLBRERA 0x8a /* 64 TLB refill ERA */ +#define LOONGARCH_CSR_TLBRSAVE 0x8b /* 64 KScratch for TLB refill */ +#define LOONGARCH_CSR_TLBRELO0 0x8c /* 64 TLB refill entrylo0 */ +#define LOONGARCH_CSR_TLBRELO1 0x8d /* 64 TLB refill entrylo1 */ +#define LOONGARCH_CSR_TLBREHI 0x8e /* 64 TLB refill entryhi */ +#define LOONGARCH_CSR_TLBRPRMD 0x8f /* 64 TLB refill mode info */ + +/* error related */ +#define LOONGARCH_CSR_ERRCTL 0x90 /* 32 ERRCTL */ +#define LOONGARCH_CSR_ERRINFO 0x91 +#define LOONGARCH_CSR_ERRINFO1 0x92 +#define LOONGARCH_CSR_ERRENT 0x93 /* 64 error exception base */ +#define LOONGARCH_CSR_ERRERA 0x94 /* 64 error exception PC */ +#define LOONGARCH_CSR_ERRSAVE 0x95 /* 64 KScratch for error exception */ + +#define LOONGARCH_CSR_CTAG 0x98 /* 64 TagLo + TagHi */ + +/* direct map windows */ +#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */ +#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */ +#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */ +#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */ +#define CSR_DMW_PLV0 0x1 +#define CSR_DMW_PLV1 0x2 +#define CSR_DMW_PLV2 0x4 +#define CSR_DMW_PLV3 0x8 +#define CSR_DMW_BASE_SH 48 +#define dmwin_va2pa(va) (va & (((unsigned long)1 << CSR_DMW_BASE_SH) - 1)) + +/* performance counter */ +#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */ +#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */ +#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */ +#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */ +#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */ +#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */ +#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */ +#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */ +#define CSR_PERFCTRL_PLV0 (1ULL << 16) +#define CSR_PERFCTRL_PLV1 (1ULL << 17) +#define CSR_PERFCTRL_PLV2 (1ULL << 18) +#define CSR_PERFCTRL_PLV3 (1ULL << 19) +#define CSR_PERFCTRL_IE (1ULL << 20) +#define CSR_PERFCTRL_EVENT 0x3ff + +/* debug */ +#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */ +#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */ + +#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */ +#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */ +#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */ +#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */ + +#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */ +#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */ +#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */ +#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */ + +#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */ +#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */ +#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */ +#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */ + +#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */ +#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */ +#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */ +#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */ + +#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */ +#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */ + +#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */ +#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */ +#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */ +#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */ +#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */ +#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */ +#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */ +#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */ + +#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */ +#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */ +#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */ +#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */ + +#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */ +#define LOONGARCH_CSR_IB3MASK 0x3a9 /* inst breakpoint 3 mask */ +#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */ +#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */ + +#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */ +#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */ +#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */ +#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */ + +#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */ +#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */ +#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */ +#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */ + +#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */ +#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */ +#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */ +#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */ + +#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */ +#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */ +#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */ +#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */ + +#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */ +#define CSR_DEBUG_DM 0 +#define CSR_DEBUG_DMVER 1 +#define CSR_DEBUG_DINT 8 +#define CSR_DEBUG_DBP 9 +#define CSR_DEBUG_DIB 10 +#define CSR_DEBUG_DDB 11 + +#define LOONGARCH_CSR_DERA 0x501 /* debug era */ +#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */ + +#define LOONGARCH_CSR_PRID 0xc0 /* 32 LOONGARCH CP0 PRID */ + +#define LOONGARCH_CPUCFG0 0x0 +#define CPUCFG0_3A5000_PRID 0x0014c010 + +#define LOONGARCH_CPUCFG1 0x1 +#define CPUCFG1_ISGR32 BIT(0) +#define CPUCFG1_ISGR64 BIT(1) +#define CPUCFG1_PAGING BIT(2) +#define CPUCFG1_IOCSR BIT(3) +#define CPUCFG1_PABITS (47 << 4) +#define CPUCFG1_VABITS (47 << 12) +#define CPUCFG1_UAL BIT(20) +#define CPUCFG1_RI BIT(21) +#define CPUCFG1_XI BIT(22) +#define CPUCFG1_RPLV BIT(23) +#define CPUCFG1_HUGEPG BIT(24) +#define CPUCFG1_IOCSRBRD BIT(25) +#define CPUCFG1_MSGINT BIT(26) + +#define LOONGARCH_CPUCFG2 0x2 +#define CPUCFG2_FP BIT(0) +#define CPUCFG2_FPSP BIT(1) +#define CPUCFG2_FPDP BIT(2) +#define CPUCFG2_FPVERS (0 << 3) +#define CPUCFG2_LSX BIT(6) +#define CPUCFG2_LASX BIT(7) +#define CPUCFG2_COMPLEX BIT(8) +#define CPUCFG2_CRYPTO BIT(9) +#define CPUCFG2_LVZP BIT(10) +#define CPUCFG2_LVZVER (0 << 11) +#define CPUCFG2_LLFTP BIT(14) +#define CPUCFG2_LLFTPREV (1 << 15) +#define CPUCFG2_X86BT BIT(18) +#define CPUCFG2_ARMBT BIT(19) +#define CPUCFG2_MIPSBT BIT(20) +#define CPUCFG2_LSPW BIT(21) +#define CPUCFG2_LAM BIT(22) + +#define LOONGARCH_CPUCFG3 0x3 +#define CPUCFG3_CCDMA BIT(0) +#define CPUCFG3_SFB BIT(1) +#define CPUCFG3_UCACC BIT(2) +#define CPUCFG3_LLEXC BIT(3) +#define CPUCFG3_SCDLY BIT(4) +#define CPUCFG3_LLDBAR BIT(5) +#define CPUCFG3_ITLBT BIT(6) +#define CPUCFG3_ICACHET BIT(7) +#define CPUCFG3_SPW_LVL (4 << 8) +#define CPUCFG3_SPW_HG_HF BIT(11) +#define CPUCFG3_RVA BIT(12) +#define CPUCFG3_RVAMAX (7 << 13) + +#define LOONGARCH_CPUCFG4 0x4 +#define CCFREQ_100M 100000000 /* 100M */ + +#define LOONGARCH_CPUCFG5 0x5 +#define CPUCFG5_CCMUL 1 +#define CPUCFG5_CCDIV (1 << 16) + +#define LOONGARCH_CPUCFG6 0x6 +#define CPUCFG6_PMP BIT(0) +#define CPUCFG6_PAMVER (1 << 1) +#define CPUCFG6_PMNUM (3 << 4) +#define CPUCFG6_PMBITS (63 << 8) +#define CPUCFG6_UPM BIT(14) + +#define LOONGARCH_CPUCFG16 0x10 +#define CPUCFG16_L1_IUPRE BIT(0) +#define CPUCFG16_L1_UNIFY BIT(1) +#define CPUCFG16_L1_DPRE BIT(2) +#define CPUCFG16_L2_IUPRE BIT(3) +#define CPUCFG16_L2_IUUNIFY BIT(4) +#define CPUCFG16_L2_IUPRIV BIT(5) +#define CPUCFG16_L2_IUINCL BIT(6) +#define CPUCFG16_L2_DPRE BIT(7) +#define CPUCFG16_L2_DPRIV BIT(8) +#define CPUCFG16_L2_DINCL BIT(9) +#define CPUCFG16_L3_IUPRE BIT(10) +#define CPUCFG16_L3_IUUNIFY BIT(11) +#define CPUCFG16_L3_IUPRIV BIT(12) +#define CPUCFG16_L3_IUINCL BIT(13) +#define CPUCFG16_L3_DPRE BIT(14) +#define CPUCFG16_L3_DPRIV BIT(15) +#define CPUCFG16_L3_DINCL BIT(16) + +#define LOONGARCH_CPUCFG17 0x11 +#define CPUCFG17_L1I_WAYS_M (3 << 0) +#define CPUCFG17_L1I_SETS_M (8 << 16) +#define CPUCFG17_L1I_SIZE_M (6 << 24) + +#define LOONGARCH_CPUCFG18 0x12 +#define CPUCFG18_L1D_WAYS_M (3 << 0) +#define CPUCFG18_L1D_SETS_M (8 << 16) +#define CPUCFG18_L1D_SIZE_M (6 << 24) + +#define LOONGARCH_CPUCFG19 0x13 +#define CPUCFG19_L2_WAYS_M (0xf << 0) +#define CPUCFG19_L2_SETS_M (8 << 16) +#define CPUCFG19_L2_SIZE_M (6 << 24) + +#define LOONGARCH_CPUCFG20 0x14 +#define CPUCFG20_L3_WAYS_M (0xf << 0) +#define CPUCFG20_L3_SETS_M (0xe << 16) +#define CPUCFG20_L3_SIZE_M (0x6 << 24) + +#define LOONGARCH_PAGE_HUGE 0x40 +#define LOONGARCH_HUGE_GLOBAL 0x1000 +#define LOONGARCH_HUGE_GLOBAL_SH 12 + +/* + * All CSR register + * + * default value in target/loongarch/cpu.c + * reset function in target/loongarch/translate.c:cpu_state_reset() + * + * This macro will be used only twice. + * > In target/loongarch/cpu.h:CPULOONGARCHState + * > In target/loongarch/internal.h:loongarch_def_t + * + * helper_function to rd/wr: + * > declare in target/loongarch/helper.h + * > realize in target/loongarch/op_helper.c + * + * during translate: + * > gen_csr_rdl() + * > gen_csr_wrl() + * > gen_csr_rdq() + * > gen_csr_wrq() + */ +#define CPU_LOONGARCH_CSR \ + uint64_t CSR_CRMD; \ + uint64_t CSR_PRMD; \ + uint64_t CSR_EUEN; \ + uint64_t CSR_MISC; \ + uint64_t CSR_ECFG; \ + uint64_t CSR_ESTAT; \ + uint64_t CSR_ERA; \ + uint64_t CSR_BADV; \ + uint64_t CSR_BADI; \ + uint64_t CSR_EEPN; \ + uint64_t CSR_TLBIDX; \ + uint64_t CSR_TLBEHI; \ + uint64_t CSR_TLBELO0; \ + uint64_t CSR_TLBELO1; \ + uint64_t CSR_TLBWIRED; \ + uint64_t CSR_GTLBC; \ + uint64_t CSR_TRGP; \ + uint64_t CSR_ASID; \ + uint64_t CSR_PGDL; \ + uint64_t CSR_PGDH; \ + uint64_t CSR_PGD; \ + uint64_t CSR_PWCTL0; \ + uint64_t CSR_PWCTL1; \ + uint64_t CSR_STLBPGSIZE; \ + uint64_t CSR_RVACFG; \ + uint64_t CSR_CPUID; \ + uint64_t CSR_PRCFG1; \ + uint64_t CSR_PRCFG2; \ + uint64_t CSR_PRCFG3; \ + uint64_t CSR_KS0; \ + uint64_t CSR_KS1; \ + uint64_t CSR_KS2; \ + uint64_t CSR_KS3; \ + uint64_t CSR_KS4; \ + uint64_t CSR_KS5; \ + uint64_t CSR_KS6; \ + uint64_t CSR_KS7; \ + uint64_t CSR_KS8; \ + uint64_t CSR_TMID; \ + uint64_t CSR_TCFG; \ + uint64_t CSR_TVAL; \ + uint64_t CSR_CNTC; \ + uint64_t CSR_TINTCLR; \ + uint64_t CSR_GSTAT; \ + uint64_t CSR_GCFG; \ + uint64_t CSR_GINTC; \ + uint64_t CSR_GCNTC; \ + uint64_t CSR_LLBCTL; \ + uint64_t CSR_IMPCTL1; \ + uint64_t CSR_IMPCTL2; \ + uint64_t CSR_GNMI; \ + uint64_t CSR_TLBRENT; \ + uint64_t CSR_TLBRBADV; \ + uint64_t CSR_TLBRERA; \ + uint64_t CSR_TLBRSAVE; \ + uint64_t CSR_TLBRELO0; \ + uint64_t CSR_TLBRELO1; \ + uint64_t CSR_TLBREHI; \ + uint64_t CSR_TLBRPRMD; \ + uint64_t CSR_ERRCTL; \ + uint64_t CSR_ERRINFO; \ + uint64_t CSR_ERRINFO1; \ + uint64_t CSR_ERRENT; \ + uint64_t CSR_ERRERA; \ + uint64_t CSR_ERRSAVE; \ + uint64_t CSR_CTAG; \ + uint64_t CSR_DMWIN0; \ + uint64_t CSR_DMWIN1; \ + uint64_t CSR_DMWIN2; \ + uint64_t CSR_DMWIN3; \ + uint64_t CSR_PERFCTRL0; \ + uint64_t CSR_PERFCNTR0; \ + uint64_t CSR_PERFCTRL1; \ + uint64_t CSR_PERFCNTR1; \ + uint64_t CSR_PERFCTRL2; \ + uint64_t CSR_PERFCNTR2; \ + uint64_t CSR_PERFCTRL3; \ + uint64_t CSR_PERFCNTR3; \ + uint64_t CSR_MWPC; \ + uint64_t CSR_MWPS; \ + uint64_t CSR_DB0ADDR; \ + uint64_t CSR_DB0MASK; \ + uint64_t CSR_DB0CTL; \ + uint64_t CSR_DB0ASID; \ + uint64_t CSR_DB1ADDR; \ + uint64_t CSR_DB1MASK; \ + uint64_t CSR_DB1CTL; \ + uint64_t CSR_DB1ASID; \ + uint64_t CSR_DB2ADDR; \ + uint64_t CSR_DB2MASK; \ + uint64_t CSR_DB2CTL; \ + uint64_t CSR_DB2ASID; \ + uint64_t CSR_DB3ADDR; \ + uint64_t CSR_DB3MASK; \ + uint64_t CSR_DB3CTL; \ + uint64_t CSR_DB3ASID; \ + uint64_t CSR_FWPC; \ + uint64_t CSR_FWPS; \ + uint64_t CSR_IB0ADDR; \ + uint64_t CSR_IB0MASK; \ + uint64_t CSR_IB0CTL; \ + uint64_t CSR_IB0ASID; \ + uint64_t CSR_IB1ADDR; \ + uint64_t CSR_IB1MASK; \ + uint64_t CSR_IB1CTL; \ + uint64_t CSR_IB1ASID; \ + uint64_t CSR_IB2ADDR; \ + uint64_t CSR_IB2MASK; \ + uint64_t CSR_IB2CTL; \ + uint64_t CSR_IB2ASID; \ + uint64_t CSR_IB3ADDR; \ + uint64_t CSR_IB3MASK; \ + uint64_t CSR_IB3CTL; \ + uint64_t CSR_IB3ASID; \ + uint64_t CSR_IB4ADDR; \ + uint64_t CSR_IB4MASK; \ + uint64_t CSR_IB4CTL; \ + uint64_t CSR_IB4ASID; \ + uint64_t CSR_IB5ADDR; \ + uint64_t CSR_IB5MASK; \ + uint64_t CSR_IB5CTL; \ + uint64_t CSR_IB5ASID; \ + uint64_t CSR_IB6ADDR; \ + uint64_t CSR_IB6MASK; \ + uint64_t CSR_IB6CTL; \ + uint64_t CSR_IB6ASID; \ + uint64_t CSR_IB7ADDR; \ + uint64_t CSR_IB7MASK; \ + uint64_t CSR_IB7CTL; \ + uint64_t CSR_IB7ASID; \ + uint64_t CSR_DEBUG; \ + uint64_t CSR_DERA; \ + uint64_t CSR_DESAVE; + +#define LOONGARCH_CSR_32(_R, _S) \ + (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S))) + +#define LOONGARCH_CSR_64(_R, _S) \ + (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S))) + +#define KVM_IOC_CSRID(id) LOONGARCH_CSR_64(id, 0) + +#endif diff --git a/target/loongarch64/cpu-param.h b/target/loongarch64/cpu-param.h new file mode 100644 index 0000000000000000000000000000000000000000..b5acb6b91e6eaadef13249e3da886b0bed5b0515 --- /dev/null +++ b/target/loongarch64/cpu-param.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef CPU_PARAM_H +#define CPU_PARAM_H + +/* If we want to use host float regs... */ +/* #define USE_HOST_FLOAT_REGS */ + +/* Real pages are variable size... */ +#define TARGET_PAGE_BITS 14 + +#define LOONGARCH_TLB_MAX 2112 + +#define TARGET_LONG_BITS 64 +#define TARGET_PHYS_ADDR_SPACE_BITS 48 +#define TARGET_VIRT_ADDR_SPACE_BITS 48 + +/* + * bit definitions for insn_flags (ISAs/ASEs flags) + * ------------------------------------------------ + */ +#define ISA_LARCH32 0x00000001ULL +#define ISA_LARCH64 0x00000002ULL +#define INSN_LOONGARCH 0x00010000ULL + +#define CPU_LARCH32 (ISA_LARCH32) +#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64) + +#define NB_MMU_MODES 4 + +#endif /* QEMU_LOONGARCH_DEFS_H */ diff --git a/target/loongarch64/cpu-qom.h b/target/loongarch64/cpu-qom.h new file mode 100644 index 0000000000000000000000000000000000000000..43541c34e5cd2df457124e3ce6655a75ad4bc0b1 --- /dev/null +++ b/target/loongarch64/cpu-qom.h @@ -0,0 +1,54 @@ +/* + * QEMU LOONGARCH CPU + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef QEMU_LOONGARCH_CPU_QOM_H +#define QEMU_LOONGARCH_CPU_QOM_H + +#include "hw/core/cpu.h" + +#define TYPE_LOONGARCH_CPU "loongarch-cpu" + +#define LOONGARCH_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(LOONGARCHCPUClass, (klass), TYPE_LOONGARCH_CPU) +#define LOONGARCH_CPU(obj) \ + OBJECT_CHECK(LOONGARCHCPU, (obj), TYPE_LOONGARCH_CPU) +#define LOONGARCH_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(LOONGARCHCPUClass, (obj), TYPE_LOONGARCH_CPU) + +/** + * LOONGARCHCPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * A LOONGARCH CPU model. + */ +typedef struct LOONGARCHCPUClass { + /*< private >*/ + CPUClass parent_class; + /*< public >*/ + + DeviceRealize parent_realize; + DeviceUnrealize parent_unrealize; + DeviceReset parent_reset; + const struct loongarch_def_t *cpu_def; +} LOONGARCHCPUClass; + +typedef struct LOONGARCHCPU LOONGARCHCPU; + +#endif diff --git a/target/loongarch64/cpu.c b/target/loongarch64/cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..ce04d8064f6a0fad0ff9596ae0692b4f0fb92f76 --- /dev/null +++ b/target/loongarch64/cpu.c @@ -0,0 +1,575 @@ +/* + * QEMU LOONGARCH CPU + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qapi/visitor.h" +#include "cpu.h" +#include "internal.h" +#include "kvm_larch.h" +#include "qemu-common.h" +#include "hw/qdev-properties.h" +#include "sysemu/kvm.h" +#include "exec/exec-all.h" +#include "sysemu/arch_init.h" +#include "cpu-csr.h" +#include "qemu/qemu-print.h" +#include "qapi/qapi-commands-machine-target.h" +#ifdef CONFIG_TCG +#include "hw/core/tcg-cpu-ops.h" +#endif /* CONFIG_TCG */ + +#define LOONGARCH_CONFIG1 \ + ((0x8 << CSR_CONF1_KSNUM_SHIFT) | (0x2f << CSR_CONF1_TMRBITS_SHIFT) | \ + (0x7 << CSR_CONF1_VSMAX_SHIFT)) + +#define LOONGARCH_CONFIG3 \ + ((0x2 << CSR_CONF3_TLBORG_SHIFT) | (0x3f << CSR_CONF3_MTLBSIZE_SHIFT) | \ + (0x7 << CSR_CONF3_STLBWAYS_SHIFT) | (0x8 << CSR_CONF3_STLBIDX_SHIFT)) + +/* LOONGARCH CPU definitions */ +const loongarch_def_t loongarch_defs[] = { + { + .name = "Loongson-3A5000", + + /* for LoongISA CSR */ + .CSR_PRCFG1 = LOONGARCH_CONFIG1, + .CSR_PRCFG2 = 0x3ffff000, + .CSR_PRCFG3 = LOONGARCH_CONFIG3, + .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) | + (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) | + (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT), + .CSR_ECFG = 0x7 << 16, + .CSR_STLBPGSIZE = 0xe, + .CSR_RVACFG = 0x0, + .CSR_ASID = 0xa0000, + .FCSR0 = 0x0, + .FCSR0_rw_bitmask = 0x1f1f03df, + .PABITS = 48, + .insn_flags = CPU_LARCH64 | INSN_LOONGARCH, + .mmu_type = MMU_TYPE_LS3A5K, + }, + { + .name = "host", + + /* for LoongISA CSR */ + .CSR_PRCFG1 = LOONGARCH_CONFIG1, + .CSR_PRCFG2 = 0x3ffff000, + .CSR_PRCFG3 = LOONGARCH_CONFIG3, + .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) | + (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) | + (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT), + .CSR_ECFG = 0x7 << 16, + .CSR_STLBPGSIZE = 0xe, + .CSR_RVACFG = 0x0, + .FCSR0 = 0x0, + .FCSR0_rw_bitmask = 0x1f1f03df, + .PABITS = 48, + .insn_flags = CPU_LARCH64 | INSN_LOONGARCH, + .mmu_type = MMU_TYPE_LS3A5K, + }, +}; +const int loongarch_defs_number = ARRAY_SIZE(loongarch_defs); + +void loongarch_cpu_list(void) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) { + qemu_printf("LOONGARCH '%s'\n", loongarch_defs[i].name); + } +} + +CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp) +{ + CpuDefinitionInfoList *cpu_list = NULL; + const loongarch_def_t *def; + int i; + + for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) { + CpuDefinitionInfoList *entry; + CpuDefinitionInfo *info; + + def = &loongarch_defs[i]; + info = g_malloc0(sizeof(*info)); + info->name = g_strdup(def->name); + + entry = g_malloc0(sizeof(*entry)); + entry->value = info; + entry->next = cpu_list; + cpu_list = entry; + } + + return cpu_list; +} + +static void loongarch_cpu_set_pc(CPUState *cs, vaddr value) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + + env->active_tc.PC = value & ~(target_ulong)1; +} + +static bool loongarch_cpu_has_work(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + bool has_work = false; + + /* + * It is implementation dependent if non-enabled + * interrupts wake-up the CPU, however most of the implementations only + * check for interrupts that can be taken. + */ + if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_loongarch_hw_interrupts_pending(env)) { + has_work = true; + } + + return has_work; +} + +const char *const regnames[] = { + "r0", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6", + "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x0", + "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", +}; + +const char *const fregnames[] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", +}; + +static void fpu_dump_state(CPULOONGARCHState *env, FILE *f, + fprintf_function fpu_fprintf, int flags) +{ + int i; + int is_fpu64 = 1; + +#define printfpr(fp) \ + do { \ + if (is_fpu64) \ + fpu_fprintf( \ + f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu: %13g\n", \ + (fp)->w[FP_ENDIAN_IDX], (fp)->d, (double)(fp)->fd, \ + (double)(fp)->fs[FP_ENDIAN_IDX], \ + (double)(fp)->fs[!FP_ENDIAN_IDX]); \ + else { \ + fpr_t tmp; \ + tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \ + tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \ + fpu_fprintf(f, \ + "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu:%13g\n", \ + tmp.w[FP_ENDIAN_IDX], tmp.d, (double)tmp.fd, \ + (double)tmp.fs[FP_ENDIAN_IDX], \ + (double)tmp.fs[!FP_ENDIAN_IDX]); \ + } \ + } while (0) + + fpu_fprintf(f, "FCSR0 0x%08x SR.FR %d fp_status 0x%02x\n", + env->active_fpu.fcsr0, is_fpu64, + get_float_exception_flags(&env->active_fpu.fp_status)); + for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) { + fpu_fprintf(f, "%3s: ", fregnames[i]); + printfpr(&env->active_fpu.fpr[i]); + } + +#undef printfpr +} + +void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int i; + + qemu_fprintf(f, "pc:\t %lx\n", env->active_tc.PC); + for (i = 0; i < 32; i++) { + if ((i & 3) == 0) { + qemu_fprintf(f, "GPR%02d:", i); + } + qemu_fprintf(f, " %s " TARGET_FMT_lx, regnames[i], + env->active_tc.gpr[i]); + if ((i & 3) == 3) { + qemu_fprintf(f, "\n"); + } + } + qemu_fprintf(f, "EUEN 0x%lx\n", env->CSR_EUEN); + qemu_fprintf(f, "ESTAT 0x%lx\n", env->CSR_ESTAT); + qemu_fprintf(f, "ERA 0x%lx\n", env->CSR_ERA); + qemu_fprintf(f, "CRMD 0x%lx\n", env->CSR_CRMD); + qemu_fprintf(f, "PRMD 0x%lx\n", env->CSR_PRMD); + qemu_fprintf(f, "BadVAddr 0x%lx\n", env->CSR_BADV); + qemu_fprintf(f, "TLB refill ERA 0x%lx\n", env->CSR_TLBRERA); + qemu_fprintf(f, "TLB refill BadV 0x%lx\n", env->CSR_TLBRBADV); + qemu_fprintf(f, "EEPN 0x%lx\n", env->CSR_EEPN); + qemu_fprintf(f, "BadInstr 0x%lx\n", env->CSR_BADI); + qemu_fprintf(f, "PRCFG1 0x%lx\nPRCFG2 0x%lx\nPRCFG3 0x%lx\n", + env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3); + if ((flags & CPU_DUMP_FPU) && (env->hflags & LARCH_HFLAG_FPU)) { + fpu_dump_state(env, f, qemu_fprintf, flags); + } +} + +void cpu_state_reset(CPULOONGARCHState *env) +{ + LOONGARCHCPU *cpu = loongarch_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + /* Reset registers to their default values */ + env->CSR_PRCFG1 = env->cpu_model->CSR_PRCFG1; + env->CSR_PRCFG2 = env->cpu_model->CSR_PRCFG2; + env->CSR_PRCFG3 = env->cpu_model->CSR_PRCFG3; + env->CSR_CRMD = env->cpu_model->CSR_CRMD; + env->CSR_ECFG = env->cpu_model->CSR_ECFG; + env->CSR_STLBPGSIZE = env->cpu_model->CSR_STLBPGSIZE; + env->CSR_RVACFG = env->cpu_model->CSR_RVACFG; + env->CSR_ASID = env->cpu_model->CSR_ASID; + + env->current_tc = 0; + env->active_fpu.fcsr0_rw_bitmask = env->cpu_model->FCSR0_rw_bitmask; + env->active_fpu.fcsr0 = env->cpu_model->FCSR0; + env->insn_flags = env->cpu_model->insn_flags; + +#if !defined(CONFIG_USER_ONLY) + env->CSR_ERA = env->active_tc.PC; + env->active_tc.PC = env->exception_base; +#ifdef CONFIG_TCG + env->tlb->tlb_in_use = env->tlb->nb_tlb; +#endif + env->CSR_TLBWIRED = 0; + env->CSR_TMID = cs->cpu_index; + env->CSR_CPUID = (cs->cpu_index & 0x1ff); + env->CSR_EEPN |= (uint64_t)0x80000000; + env->CSR_TLBRENT |= (uint64_t)0x80000000; +#endif + + /* Count register increments in debug mode, EJTAG version 1 */ + env->CSR_DEBUG = (1 << CSR_DEBUG_DINT) | (0x1 << CSR_DEBUG_DMVER); + + compute_hflags(env); + restore_fp_status(env); + cs->exception_index = EXCP_NONE; +} + +/* CPUClass::reset() */ +static void loongarch_cpu_reset(DeviceState *dev) +{ + CPUState *s = CPU(dev); + LOONGARCHCPU *cpu = LOONGARCH_CPU(s); + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(cpu); + CPULOONGARCHState *env = &cpu->env; + + mcc->parent_reset(dev); + + memset(env, 0, offsetof(CPULOONGARCHState, end_reset_fields)); + + cpu_state_reset(env); + +#ifndef CONFIG_USER_ONLY + if (kvm_enabled()) { + kvm_loongarch_reset_vcpu(cpu); + } +#endif +} + +static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info) +{ + info->print_insn = print_insn_loongarch; +} + +static void fpu_init(CPULOONGARCHState *env, const loongarch_def_t *def) +{ + memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu)); +} + +void cpu_loongarch_realize_env(CPULOONGARCHState *env) +{ + env->exception_base = 0x1C000000; + +#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY) + mmu_init(env, env->cpu_model); +#endif + fpu_init(env, env->cpu_model); +} + +static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + LOONGARCHCPU *cpu = LOONGARCH_CPU(dev); + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev); + Error *local_err = NULL; + + cpu_exec_realizefn(cs, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } + + cpu_loongarch_realize_env(&cpu->env); + + loongarch_cpu_register_gdb_regs_for_features(cs); + + cpu_reset(cs); + qemu_init_vcpu(cs); + + mcc->parent_realize(dev, errp); + cpu->hotplugged = 1; +} + +static void loongarch_cpu_unrealizefn(DeviceState *dev) +{ + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev); + +#ifndef CONFIG_USER_ONLY + cpu_remove_sync(CPU(dev)); +#endif + + mcc->parent_unrealize(dev); +} + +static void loongarch_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + LOONGARCHCPU *cpu = LOONGARCH_CPU(obj); + CPULOONGARCHState *env = &cpu->env; + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(obj); + cpu_set_cpustate_pointers(cpu); + cs->env_ptr = env; + env->cpu_model = mcc->cpu_def; + cs->halted = 1; + cpu->dtb_compatible = "loongarch,Loongson-3A5000"; +} + +static char *loongarch_cpu_type_name(const char *cpu_model) +{ + return g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model); +} + +static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + + typename = loongarch_cpu_type_name(cpu_model); + oc = object_class_by_name(typename); + g_free(typename); + return oc; +} + +static int64_t loongarch_cpu_get_arch_id(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + + return cpu->id; +} + +static Property loongarch_cpu_properties[] = { + DEFINE_PROP_INT32("core-id", LOONGARCHCPU, core_id, -1), + DEFINE_PROP_INT32("id", LOONGARCHCPU, id, UNASSIGNED_CPU_ID), + DEFINE_PROP_INT32("node-id", LOONGARCHCPU, node_id, + CPU_UNSET_NUMA_NODE_ID), + + DEFINE_PROP_END_OF_LIST() +}; + +#ifdef CONFIG_TCG +static void loongarch_cpu_synchronize_from_tb(CPUState *cs, + const TranslationBlock *tb) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + + env->active_tc.PC = tb->pc; + env->hflags &= ~LARCH_HFLAG_BMASK; + env->hflags |= tb->flags & LARCH_HFLAG_BMASK; +} + +static const struct TCGCPUOps loongarch_tcg_ops = { + .initialize = loongarch_tcg_init, + .synchronize_from_tb = loongarch_cpu_synchronize_from_tb, + + .tlb_fill = loongarch_cpu_tlb_fill, + .cpu_exec_interrupt = loongarch_cpu_exec_interrupt, + .do_interrupt = loongarch_cpu_do_interrupt, + +#ifndef CONFIG_USER_ONLY + .do_unaligned_access = loongarch_cpu_do_unaligned_access, +#endif /* !CONFIG_USER_ONLY */ +}; +#endif /* CONFIG_TCG */ + +#if !defined(CONFIG_USER_ONLY) +static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical, + int *prot, target_ulong real_address, int rw, + int access_type, int mmu_idx) +{ + int user_mode = mmu_idx == LARCH_HFLAG_UM; + int kernel_mode = !user_mode; + unsigned plv, base_c, base_v, tmp; + + /* effective address (modified for KVM T&E kernel segments) */ + target_ulong address = real_address; + + /* Check PG */ + if (!(env->CSR_CRMD & CSR_CRMD_PG)) { + /* DA mode */ + *physical = address & 0xffffffffffffUL; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + + plv = kernel_mode | (user_mode << 3); + base_v = address >> CSR_DMW_BASE_SH; + /* Check direct map window 0 */ + base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH; + if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) { + *physical = dmwin_va2pa(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + /* Check direct map window 1 */ + base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH; + if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) { + *physical = dmwin_va2pa(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + /* Check valid extension */ + tmp = address >> 47; + if (!(tmp == 0 || tmp == 0x1ffff)) { + return TLBRET_BADADDR; + } + /* mapped address */ + return env->tlb->map_address(env, physical, prot, real_address, rw, + access_type); +} + +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + hwaddr phys_addr; + int prot; + + if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT, + cpu_mmu_index(env, false)) != 0) { + return -1; + } + return phys_addr; +} +#endif + +#ifndef CONFIG_USER_ONLY +#include "hw/core/sysemu-cpu-ops.h" + +static const struct SysemuCPUOps loongarch_sysemu_ops = { + .write_elf64_note = loongarch_cpu_write_elf64_note, + .get_phys_page_debug = loongarch_cpu_get_phys_page_debug, + .legacy_vmsd = &vmstate_loongarch_cpu, +}; +#endif + +static gchar *loongarch_gdb_arch_name(CPUState *cs) +{ + return g_strdup("loongarch64"); +} + +static void loongarch_cpu_class_init(ObjectClass *c, void *data) +{ + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(c); + CPUClass *cc = CPU_CLASS(c); + DeviceClass *dc = DEVICE_CLASS(c); + + device_class_set_props(dc, loongarch_cpu_properties); + device_class_set_parent_realize(dc, loongarch_cpu_realizefn, + &mcc->parent_realize); + + device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn, + &mcc->parent_unrealize); + + device_class_set_parent_reset(dc, loongarch_cpu_reset, &mcc->parent_reset); + cc->get_arch_id = loongarch_cpu_get_arch_id; + + cc->class_by_name = loongarch_cpu_class_by_name; + cc->has_work = loongarch_cpu_has_work; + cc->dump_state = loongarch_cpu_dump_state; + cc->set_pc = loongarch_cpu_set_pc; + cc->gdb_read_register = loongarch_cpu_gdb_read_register; + cc->gdb_write_register = loongarch_cpu_gdb_write_register; + cc->disas_set_info = loongarch_cpu_disas_set_info; + cc->gdb_arch_name = loongarch_gdb_arch_name; +#ifndef CONFIG_USER_ONLY + cc->sysemu_ops = &loongarch_sysemu_ops; +#endif /* !CONFIG_USER_ONLY */ + + cc->gdb_num_core_regs = 35; + cc->gdb_core_xml_file = "loongarch-base64.xml"; + cc->gdb_stop_before_watchpoint = true; + + dc->user_creatable = true; +#ifdef CONFIG_TCG + cc->tcg_ops = &loongarch_tcg_ops; +#endif /* CONFIG_TCG */ +} + +static const TypeInfo loongarch_cpu_type_info = { + .name = TYPE_LOONGARCH_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(LOONGARCHCPU), + .instance_init = loongarch_cpu_initfn, + .abstract = true, + .class_size = sizeof(LOONGARCHCPUClass), + .class_init = loongarch_cpu_class_init, +}; + +static void loongarch_cpu_cpudef_class_init(ObjectClass *oc, void *data) +{ + LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(oc); + mcc->cpu_def = data; +} + +static void loongarch_register_cpudef_type(const struct loongarch_def_t *def) +{ + char *typename = loongarch_cpu_type_name(def->name); + TypeInfo ti = { + .name = typename, + .parent = TYPE_LOONGARCH_CPU, + .class_init = loongarch_cpu_cpudef_class_init, + .class_data = (void *)def, + }; + + type_register(&ti); + g_free(typename); +} + +static void loongarch_cpu_register_types(void) +{ + int i; + + type_register_static(&loongarch_cpu_type_info); + for (i = 0; i < loongarch_defs_number; i++) { + loongarch_register_cpudef_type(&loongarch_defs[i]); + } +} + +type_init(loongarch_cpu_register_types) diff --git a/target/loongarch64/cpu.h b/target/loongarch64/cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..8a29a507b1bfa44830a63821b20b271d3bb43bc0 --- /dev/null +++ b/target/loongarch64/cpu.h @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_CPU_H +#define LOONGARCH_CPU_H + +#define CPUArchState struct CPULOONGARCHState + +#include "qemu-common.h" +#include "cpu-qom.h" +#include "larch-defs.h" +#include "exec/cpu-defs.h" +#include "fpu/softfloat.h" +#include "sysemu/sysemu.h" +#include "cpu-csr.h" + +#define TCG_GUEST_DEFAULT_MO (0) + +struct CPULOONGARCHState; +typedef LOONGARCHCPU ArchCPU; +typedef struct CPULOONGARCHTLBContext CPULOONGARCHTLBContext; + +#define LASX_REG_WIDTH (256) +typedef union lasx_reg_t lasx_reg_t; +union lasx_reg_t +{ + int64_t val64[LASX_REG_WIDTH / 64]; +}; + +typedef union fpr_t fpr_t; +union fpr_t +{ + float64 fd; /* ieee double precision */ + float32 fs[2]; /* ieee single precision */ + uint64_t d; /* binary double fixed-point */ + uint32_t w[2]; /* binary single fixed-point */ + /* FPU/LASX register mapping is not tested on big-endian hosts. */ + lasx_reg_t lasx; /* vector data */ +}; +/* + * define FP_ENDIAN_IDX to access the same location + * in the fpr_t union regardless of the host endianness + */ +#if defined(HOST_WORDS_BIGENDIAN) +#define FP_ENDIAN_IDX 1 +#else +#define FP_ENDIAN_IDX 0 +#endif + +typedef struct CPULOONGARCHFPUContext { + /* Floating point registers */ + fpr_t fpr[32]; + float_status fp_status; + + bool cf[8]; + /* + * fcsr0 + * 31:29 |28:24 |23:21 |20:16 |15:10 |9:8 |7 |6 |5 |4:0 + * Cause Flags RM DAE TM Enables + */ + uint32_t fcsr0; + uint32_t fcsr0_rw_bitmask; + uint32_t vcsr16; + uint64_t ftop; +} CPULOONGARCHFPUContext; + +/* fp control and status register definition */ +#define FCSR0_M1 0xdf /* DAE, TM and Enables */ +#define FCSR0_M2 0x1f1f0000 /* Cause and Flags */ +#define FCSR0_M3 0x300 /* Round Mode */ +#define FCSR0_RM 8 /* Round Mode bit num on fcsr0 */ +#define GET_FP_CAUSE(reg) (((reg) >> 24) & 0x1f) +#define GET_FP_ENABLE(reg) (((reg) >> 0) & 0x1f) +#define GET_FP_FLAGS(reg) (((reg) >> 16) & 0x1f) +#define SET_FP_CAUSE(reg, v) \ + do { \ + (reg) = ((reg) & ~(0x1f << 24)) | ((v & 0x1f) << 24); \ + } while (0) +#define SET_FP_ENABLE(reg, v) \ + do { \ + (reg) = ((reg) & ~(0x1f << 0)) | ((v & 0x1f) << 0); \ + } while (0) +#define SET_FP_FLAGS(reg, v) \ + do { \ + (reg) = ((reg) & ~(0x1f << 16)) | ((v & 0x1f) << 16); \ + } while (0) +#define UPDATE_FP_FLAGS(reg, v) \ + do { \ + (reg) |= ((v & 0x1f) << 16); \ + } while (0) +#define FP_INEXACT 1 +#define FP_UNDERFLOW 2 +#define FP_OVERFLOW 4 +#define FP_DIV0 8 +#define FP_INVALID 16 + +#define TARGET_INSN_START_EXTRA_WORDS 2 + +typedef struct loongarch_def_t loongarch_def_t; + +#define LOONGARCH_FPU_MAX 1 +#define LOONGARCH_KSCRATCH_NUM 8 + +typedef struct TCState TCState; +struct TCState { + target_ulong gpr[32]; + target_ulong PC; +}; + +#define N_IRQS 14 +#define IRQ_TIMER 11 +#define IRQ_IPI 12 +#define IRQ_UART 2 + +typedef struct CPULOONGARCHState CPULOONGARCHState; +struct CPULOONGARCHState { + TCState active_tc; + CPULOONGARCHFPUContext active_fpu; + + uint32_t current_tc; + uint64_t scr[4]; + uint32_t PABITS; + + /* LoongISA CSR register */ + CPU_LOONGARCH_CSR + uint64_t lladdr; + target_ulong llval; + uint64_t llval_wp; + uint32_t llnewval_wp; + + CPULOONGARCHFPUContext fpus[LOONGARCH_FPU_MAX]; + /* QEMU */ + int error_code; +#define EXCP_TLB_NOMATCH 0x1 +#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */ + uint32_t hflags; /* CPU State */ + /* TMASK defines different execution modes */ +#define LARCH_HFLAG_TMASK 0x5F5807FF + /* + * The KSU flags must be the lowest bits in hflags. The flag order + * must be the same as defined for CP0 Status. This allows to use + * the bits as the value of mmu_idx. + */ +#define LARCH_HFLAG_KSU 0x00003 /* kernel/user mode mask */ +#define LARCH_HFLAG_UM 0x00003 /* user mode flag */ +#define LARCH_HFLAG_KM 0x00000 /* kernel mode flag */ +#define LARCH_HFLAG_64 0x00008 /* 64-bit instructions enabled */ +#define LARCH_HFLAG_FPU 0x00020 /* FPU enabled */ +#define LARCH_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */ + /* + * If translation is interrupted between the branch instruction and + * the delay slot, record what type of branch it is so that we can + * resume translation properly. It might be possible to reduce + * this from three bits to two. + */ +#define LARCH_HFLAG_BMASK 0x03800 +#define LARCH_HFLAG_B 0x00800 /* Unconditional branch */ +#define LARCH_HFLAG_BC 0x01000 /* Conditional branch */ +#define LARCH_HFLAG_BR 0x02000 /* branch to register (can't link TB) */ +#define LARCH_HFLAG_LSX 0x1000000 +#define LARCH_HFLAG_LASX 0x2000000 +#define LARCH_HFLAG_LBT 0x40000000 + target_ulong btarget; /* Jump / branch target */ + target_ulong bcond; /* Branch condition (if needed) */ + + uint64_t insn_flags; /* Supported instruction set */ + int cpu_cfg[64]; + + /* Fields up to this point are cleared by a CPU reset */ + struct { + } end_reset_fields; + + /* Fields from here on are preserved across CPU reset. */ +#if !defined(CONFIG_USER_ONLY) + CPULOONGARCHTLBContext *tlb; +#endif + + const loongarch_def_t *cpu_model; + void *irq[N_IRQS]; + QEMUTimer *timer; /* Internal timer */ + MemoryRegion *itc_tag; /* ITC Configuration Tags */ + target_ulong exception_base; /* ExceptionBase input to the core */ + struct { + uint64_t guest_addr; + } st; + struct { + /* scratch registers */ + unsigned long scr0; + unsigned long scr1; + unsigned long scr2; + unsigned long scr3; + /* loongarch eflag */ + unsigned long eflag; + } lbt; +}; + +/* + * CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish + * that ID hasn't been set yet + */ +#define UNASSIGNED_CPU_ID 0xFFFFFFFF + +/** + * LOONGARCHCPU: + * @env: #CPULOONGARCHState + * + * A LOONGARCH CPU. + */ +struct LOONGARCHCPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + CPUNegativeOffsetState neg; + CPULOONGARCHState env; + int32_t id; + int hotplugged; + uint8_t online_vcpus; + uint8_t is_migrate; + uint64_t counter_value; + uint32_t cpu_freq; + uint32_t count_ctl; + uint64_t pending_exceptions; + uint64_t pending_exceptions_clr; + uint64_t core_ext_ioisr[4]; + VMChangeStateEntry *cpuStateEntry; + int32_t node_id; /* NUMA node this CPU belongs to */ + int32_t core_id; + struct kvm_msrs *kvm_csr_buf; + /* 'compatible' string for this CPU for Linux device trees */ + const char *dtb_compatible; +}; + +static inline LOONGARCHCPU *loongarch_env_get_cpu(CPULOONGARCHState *env) +{ + return container_of(env, LOONGARCHCPU, env); +} + +#define ENV_GET_CPU(e) CPU(loongarch_env_get_cpu(e)) + +#define ENV_OFFSET offsetof(LOONGARCHCPU, env) + +void loongarch_cpu_list(void); + +#define cpu_signal_handler cpu_loongarch_signal_handler +#define cpu_list loongarch_cpu_list + +/* + * MMU modes definitions. We carefully match the indices with our + * hflags layout. + */ +#define MMU_MODE0_SUFFIX _kernel +#define MMU_MODE1_SUFFIX _super +#define MMU_MODE2_SUFFIX _user +#define MMU_MODE3_SUFFIX _error +#define MMU_USER_IDX 3 + +static inline int hflags_mmu_index(uint32_t hflags) +{ + return hflags & LARCH_HFLAG_KSU; +} + +static inline int cpu_mmu_index(CPULOONGARCHState *env, bool ifetch) +{ + return hflags_mmu_index(env->hflags); +} + +#include "exec/cpu-all.h" + +/* + * Memory access type : + * may be needed for precise access rights control and precise exceptions. + */ +enum { + /* 1 bit to define user level / supervisor access */ + ACCESS_USER = 0x00, + ACCESS_SUPER = 0x01, + /* 1 bit to indicate direction */ + ACCESS_STORE = 0x02, + /* Type of instruction that generated the access */ + ACCESS_CODE = 0x10, /* Code fetch access */ + ACCESS_INT = 0x20, /* Integer load/store access */ + ACCESS_FLOAT = 0x30, /* floating point load/store access */ +}; + +/* Exceptions */ +enum { + EXCP_NONE = -1, + EXCP_RESET = 0, + EXCP_SRESET, + EXCP_DINT, + EXCP_NMI, + EXCP_EXT_INTERRUPT, + EXCP_AdEL, + EXCP_AdES, + EXCP_TLBF, + EXCP_IBE, + EXCP_SYSCALL, + EXCP_BREAK, + EXCP_FPDIS, + EXCP_LSXDIS, + EXCP_LASXDIS, + EXCP_RI, + EXCP_OVERFLOW, + EXCP_TRAP, + EXCP_FPE, + EXCP_LTLBL, + EXCP_TLBL, + EXCP_TLBS, + EXCP_DBE, + EXCP_TLBXI, + EXCP_TLBRI, + EXCP_TLBPE, + EXCP_BTDIS, + + EXCP_LAST = EXCP_BTDIS, +}; + +/* + * This is an internally generated WAKE request line. + * It is driven by the CPU itself. Raised when the MT + * block wants to wake a VPE from an inactive state and + * cleared when VPE goes from active to inactive. + */ +#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0 + +int cpu_loongarch_signal_handler(int host_signum, void *pinfo, void *puc); + +#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU +#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX +#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU + +/* helper.c */ +target_ulong exception_resume_pc(CPULOONGARCHState *env); + +/* gdbstub.c */ +void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs); +void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def); + +static inline void cpu_get_tb_cpu_state(CPULOONGARCHState *env, + target_ulong *pc, + target_ulong *cs_base, uint32_t *flags) +{ + *pc = env->active_tc.PC; + *cs_base = 0; + *flags = env->hflags & (LARCH_HFLAG_TMASK | LARCH_HFLAG_BMASK); +} + +static inline bool cpu_refill_state(CPULOONGARCHState *env) +{ + return env->CSR_TLBRERA & 0x1; +} + +extern const char *const regnames[]; +extern const char *const fregnames[]; +#endif /* LOONGARCH_CPU_H */ diff --git a/target/loongarch64/csr_helper.c b/target/loongarch64/csr_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..093e7e54d80141ac5bf3f570f8aee5180a785bd6 --- /dev/null +++ b/target/loongarch64/csr_helper.c @@ -0,0 +1,697 @@ +/* + * loongarch tlb emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "sysemu/kvm.h" +#include "hw/irq.h" +#include "cpu-csr.h" +#include "instmap.h" + +#ifndef CONFIG_USER_ONLY +target_ulong helper_csr_rdq(CPULOONGARCHState *env, uint64_t csr) +{ + int64_t v; + +#define CASE_CSR_RDQ(csr) \ + case LOONGARCH_CSR_##csr: { \ + v = env->CSR_##csr; \ + break; \ + }; + + switch (csr) { + CASE_CSR_RDQ(CRMD) + CASE_CSR_RDQ(PRMD) + CASE_CSR_RDQ(EUEN) + CASE_CSR_RDQ(MISC) + CASE_CSR_RDQ(ECFG) + CASE_CSR_RDQ(ESTAT) + CASE_CSR_RDQ(ERA) + CASE_CSR_RDQ(BADV) + CASE_CSR_RDQ(BADI) + CASE_CSR_RDQ(EEPN) + CASE_CSR_RDQ(TLBIDX) + CASE_CSR_RDQ(TLBEHI) + CASE_CSR_RDQ(TLBELO0) + CASE_CSR_RDQ(TLBELO1) + CASE_CSR_RDQ(TLBWIRED) + CASE_CSR_RDQ(GTLBC) + CASE_CSR_RDQ(TRGP) + CASE_CSR_RDQ(ASID) + CASE_CSR_RDQ(PGDL) + CASE_CSR_RDQ(PGDH) + CASE_CSR_RDQ(PGD) + CASE_CSR_RDQ(PWCTL0) + CASE_CSR_RDQ(PWCTL1) + CASE_CSR_RDQ(STLBPGSIZE) + CASE_CSR_RDQ(RVACFG) + CASE_CSR_RDQ(CPUID) + CASE_CSR_RDQ(PRCFG1) + CASE_CSR_RDQ(PRCFG2) + CASE_CSR_RDQ(PRCFG3) + CASE_CSR_RDQ(KS0) + CASE_CSR_RDQ(KS1) + CASE_CSR_RDQ(KS2) + CASE_CSR_RDQ(KS3) + CASE_CSR_RDQ(KS4) + CASE_CSR_RDQ(KS5) + CASE_CSR_RDQ(KS6) + CASE_CSR_RDQ(KS7) + CASE_CSR_RDQ(KS8) + CASE_CSR_RDQ(TMID) + CASE_CSR_RDQ(TCFG) + case LOONGARCH_CSR_TVAL: + v = cpu_loongarch_get_stable_timer_ticks(env); + break; + CASE_CSR_RDQ(CNTC) + CASE_CSR_RDQ(TINTCLR) + CASE_CSR_RDQ(GSTAT) + CASE_CSR_RDQ(GCFG) + CASE_CSR_RDQ(GINTC) + CASE_CSR_RDQ(GCNTC) + CASE_CSR_RDQ(LLBCTL) + CASE_CSR_RDQ(IMPCTL1) + CASE_CSR_RDQ(IMPCTL2) + CASE_CSR_RDQ(GNMI) + CASE_CSR_RDQ(TLBRENT) + CASE_CSR_RDQ(TLBRBADV) + CASE_CSR_RDQ(TLBRERA) + CASE_CSR_RDQ(TLBRSAVE) + CASE_CSR_RDQ(TLBRELO0) + CASE_CSR_RDQ(TLBRELO1) + CASE_CSR_RDQ(TLBREHI) + CASE_CSR_RDQ(TLBRPRMD) + CASE_CSR_RDQ(ERRCTL) + CASE_CSR_RDQ(ERRINFO) + CASE_CSR_RDQ(ERRINFO1) + CASE_CSR_RDQ(ERRENT) + CASE_CSR_RDQ(ERRERA) + CASE_CSR_RDQ(ERRSAVE) + CASE_CSR_RDQ(CTAG) + CASE_CSR_RDQ(DMWIN0) + CASE_CSR_RDQ(DMWIN1) + CASE_CSR_RDQ(DMWIN2) + CASE_CSR_RDQ(DMWIN3) + CASE_CSR_RDQ(PERFCTRL0) + CASE_CSR_RDQ(PERFCNTR0) + CASE_CSR_RDQ(PERFCTRL1) + CASE_CSR_RDQ(PERFCNTR1) + CASE_CSR_RDQ(PERFCTRL2) + CASE_CSR_RDQ(PERFCNTR2) + CASE_CSR_RDQ(PERFCTRL3) + CASE_CSR_RDQ(PERFCNTR3) + /* debug */ + CASE_CSR_RDQ(MWPC) + CASE_CSR_RDQ(MWPS) + CASE_CSR_RDQ(DB0ADDR) + CASE_CSR_RDQ(DB0MASK) + CASE_CSR_RDQ(DB0CTL) + CASE_CSR_RDQ(DB0ASID) + CASE_CSR_RDQ(DB1ADDR) + CASE_CSR_RDQ(DB1MASK) + CASE_CSR_RDQ(DB1CTL) + CASE_CSR_RDQ(DB1ASID) + CASE_CSR_RDQ(DB2ADDR) + CASE_CSR_RDQ(DB2MASK) + CASE_CSR_RDQ(DB2CTL) + CASE_CSR_RDQ(DB2ASID) + CASE_CSR_RDQ(DB3ADDR) + CASE_CSR_RDQ(DB3MASK) + CASE_CSR_RDQ(DB3CTL) + CASE_CSR_RDQ(DB3ASID) + CASE_CSR_RDQ(FWPC) + CASE_CSR_RDQ(FWPS) + CASE_CSR_RDQ(IB0ADDR) + CASE_CSR_RDQ(IB0MASK) + CASE_CSR_RDQ(IB0CTL) + CASE_CSR_RDQ(IB0ASID) + CASE_CSR_RDQ(IB1ADDR) + CASE_CSR_RDQ(IB1MASK) + CASE_CSR_RDQ(IB1CTL) + CASE_CSR_RDQ(IB1ASID) + CASE_CSR_RDQ(IB2ADDR) + CASE_CSR_RDQ(IB2MASK) + CASE_CSR_RDQ(IB2CTL) + CASE_CSR_RDQ(IB2ASID) + CASE_CSR_RDQ(IB3ADDR) + CASE_CSR_RDQ(IB3MASK) + CASE_CSR_RDQ(IB3CTL) + CASE_CSR_RDQ(IB3ASID) + CASE_CSR_RDQ(IB4ADDR) + CASE_CSR_RDQ(IB4MASK) + CASE_CSR_RDQ(IB4CTL) + CASE_CSR_RDQ(IB4ASID) + CASE_CSR_RDQ(IB5ADDR) + CASE_CSR_RDQ(IB5MASK) + CASE_CSR_RDQ(IB5CTL) + CASE_CSR_RDQ(IB5ASID) + CASE_CSR_RDQ(IB6ADDR) + CASE_CSR_RDQ(IB6MASK) + CASE_CSR_RDQ(IB6CTL) + CASE_CSR_RDQ(IB6ASID) + CASE_CSR_RDQ(IB7ADDR) + CASE_CSR_RDQ(IB7MASK) + CASE_CSR_RDQ(IB7CTL) + CASE_CSR_RDQ(IB7ASID) + CASE_CSR_RDQ(DEBUG) + CASE_CSR_RDQ(DERA) + CASE_CSR_RDQ(DESAVE) + default : + assert(0); + } + +#undef CASE_CSR_RDQ + compute_hflags(env); + return v; +} + +target_ulong helper_csr_wrq(CPULOONGARCHState *env, target_ulong val, + uint64_t csr) +{ + int64_t old_v, v; + old_v = -1; + v = val; + +#define CASE_CSR_WRQ(csr) \ + case LOONGARCH_CSR_##csr: { \ + old_v = env->CSR_##csr; \ + env->CSR_##csr = v; \ + break; \ + }; + + switch (csr) { + CASE_CSR_WRQ(CRMD) + CASE_CSR_WRQ(PRMD) + CASE_CSR_WRQ(EUEN) + CASE_CSR_WRQ(MISC) + CASE_CSR_WRQ(ECFG) + CASE_CSR_WRQ(ESTAT) + CASE_CSR_WRQ(ERA) + CASE_CSR_WRQ(BADV) + CASE_CSR_WRQ(BADI) + CASE_CSR_WRQ(EEPN) + CASE_CSR_WRQ(TLBIDX) + CASE_CSR_WRQ(TLBEHI) + CASE_CSR_WRQ(TLBELO0) + CASE_CSR_WRQ(TLBELO1) + CASE_CSR_WRQ(TLBWIRED) + CASE_CSR_WRQ(GTLBC) + CASE_CSR_WRQ(TRGP) + CASE_CSR_WRQ(ASID) + CASE_CSR_WRQ(PGDL) + CASE_CSR_WRQ(PGDH) + CASE_CSR_WRQ(PGD) + CASE_CSR_WRQ(PWCTL0) + CASE_CSR_WRQ(PWCTL1) + CASE_CSR_WRQ(STLBPGSIZE) + CASE_CSR_WRQ(RVACFG) + CASE_CSR_WRQ(CPUID) + CASE_CSR_WRQ(PRCFG1) + CASE_CSR_WRQ(PRCFG2) + CASE_CSR_WRQ(PRCFG3) + CASE_CSR_WRQ(KS0) + CASE_CSR_WRQ(KS1) + CASE_CSR_WRQ(KS2) + CASE_CSR_WRQ(KS3) + CASE_CSR_WRQ(KS4) + CASE_CSR_WRQ(KS5) + CASE_CSR_WRQ(KS6) + CASE_CSR_WRQ(KS7) + CASE_CSR_WRQ(KS8) + CASE_CSR_WRQ(TMID) + case LOONGARCH_CSR_TCFG: + old_v = env->CSR_TCFG; + cpu_loongarch_store_stable_timer_config(env, v); + break; + CASE_CSR_WRQ(TVAL) + CASE_CSR_WRQ(CNTC) + case LOONGARCH_CSR_TINTCLR: + old_v = 0; + qemu_irq_lower(env->irq[IRQ_TIMER]); + break; + CASE_CSR_WRQ(GSTAT) + CASE_CSR_WRQ(GCFG) + CASE_CSR_WRQ(GINTC) + CASE_CSR_WRQ(GCNTC) + CASE_CSR_WRQ(LLBCTL) + CASE_CSR_WRQ(IMPCTL1) + case LOONGARCH_CSR_IMPCTL2: + if (v & CSR_IMPCTL2_MTLB) { + ls3a5k_flush_vtlb(env); + } + if (v & CSR_IMPCTL2_STLB) { + ls3a5k_flush_ftlb(env); + } + break; + CASE_CSR_WRQ(GNMI) + CASE_CSR_WRQ(TLBRENT) + CASE_CSR_WRQ(TLBRBADV) + CASE_CSR_WRQ(TLBRERA) + CASE_CSR_WRQ(TLBRSAVE) + CASE_CSR_WRQ(TLBRELO0) + CASE_CSR_WRQ(TLBRELO1) + CASE_CSR_WRQ(TLBREHI) + CASE_CSR_WRQ(TLBRPRMD) + CASE_CSR_WRQ(ERRCTL) + CASE_CSR_WRQ(ERRINFO) + CASE_CSR_WRQ(ERRINFO1) + CASE_CSR_WRQ(ERRENT) + CASE_CSR_WRQ(ERRERA) + CASE_CSR_WRQ(ERRSAVE) + CASE_CSR_WRQ(CTAG) + CASE_CSR_WRQ(DMWIN0) + CASE_CSR_WRQ(DMWIN1) + CASE_CSR_WRQ(DMWIN2) + CASE_CSR_WRQ(DMWIN3) + CASE_CSR_WRQ(PERFCTRL0) + CASE_CSR_WRQ(PERFCNTR0) + CASE_CSR_WRQ(PERFCTRL1) + CASE_CSR_WRQ(PERFCNTR1) + CASE_CSR_WRQ(PERFCTRL2) + CASE_CSR_WRQ(PERFCNTR2) + CASE_CSR_WRQ(PERFCTRL3) + CASE_CSR_WRQ(PERFCNTR3) + /* debug */ + CASE_CSR_WRQ(MWPC) + CASE_CSR_WRQ(MWPS) + CASE_CSR_WRQ(DB0ADDR) + CASE_CSR_WRQ(DB0MASK) + CASE_CSR_WRQ(DB0CTL) + CASE_CSR_WRQ(DB0ASID) + CASE_CSR_WRQ(DB1ADDR) + CASE_CSR_WRQ(DB1MASK) + CASE_CSR_WRQ(DB1CTL) + CASE_CSR_WRQ(DB1ASID) + CASE_CSR_WRQ(DB2ADDR) + CASE_CSR_WRQ(DB2MASK) + CASE_CSR_WRQ(DB2CTL) + CASE_CSR_WRQ(DB2ASID) + CASE_CSR_WRQ(DB3ADDR) + CASE_CSR_WRQ(DB3MASK) + CASE_CSR_WRQ(DB3CTL) + CASE_CSR_WRQ(DB3ASID) + CASE_CSR_WRQ(FWPC) + CASE_CSR_WRQ(FWPS) + CASE_CSR_WRQ(IB0ADDR) + CASE_CSR_WRQ(IB0MASK) + CASE_CSR_WRQ(IB0CTL) + CASE_CSR_WRQ(IB0ASID) + CASE_CSR_WRQ(IB1ADDR) + CASE_CSR_WRQ(IB1MASK) + CASE_CSR_WRQ(IB1CTL) + CASE_CSR_WRQ(IB1ASID) + CASE_CSR_WRQ(IB2ADDR) + CASE_CSR_WRQ(IB2MASK) + CASE_CSR_WRQ(IB2CTL) + CASE_CSR_WRQ(IB2ASID) + CASE_CSR_WRQ(IB3ADDR) + CASE_CSR_WRQ(IB3MASK) + CASE_CSR_WRQ(IB3CTL) + CASE_CSR_WRQ(IB3ASID) + CASE_CSR_WRQ(IB4ADDR) + CASE_CSR_WRQ(IB4MASK) + CASE_CSR_WRQ(IB4CTL) + CASE_CSR_WRQ(IB4ASID) + CASE_CSR_WRQ(IB5ADDR) + CASE_CSR_WRQ(IB5MASK) + CASE_CSR_WRQ(IB5CTL) + CASE_CSR_WRQ(IB5ASID) + CASE_CSR_WRQ(IB6ADDR) + CASE_CSR_WRQ(IB6MASK) + CASE_CSR_WRQ(IB6CTL) + CASE_CSR_WRQ(IB6ASID) + CASE_CSR_WRQ(IB7ADDR) + CASE_CSR_WRQ(IB7MASK) + CASE_CSR_WRQ(IB7CTL) + CASE_CSR_WRQ(IB7ASID) + CASE_CSR_WRQ(DEBUG) + CASE_CSR_WRQ(DERA) + CASE_CSR_WRQ(DESAVE) + default : + assert(0); + } + + if (csr == LOONGARCH_CSR_ASID) { + if (old_v != v) { + tlb_flush(CPU(loongarch_env_get_cpu(env))); + } + } + +#undef CASE_CSR_WRQ + compute_hflags(env); + return old_v; +} + +target_ulong helper_csr_xchgq(CPULOONGARCHState *env, target_ulong val, + target_ulong mask, uint64_t csr) +{ + target_ulong v, tmp; + v = val & mask; + +#define CASE_CSR_XCHGQ(csr) \ + case LOONGARCH_CSR_##csr: { \ + val = env->CSR_##csr; \ + env->CSR_##csr = (env->CSR_##csr) & (~mask); \ + env->CSR_##csr = (env->CSR_##csr) | v; \ + break; \ + }; + + switch (csr) { + CASE_CSR_XCHGQ(CRMD) + CASE_CSR_XCHGQ(PRMD) + CASE_CSR_XCHGQ(EUEN) + CASE_CSR_XCHGQ(MISC) + CASE_CSR_XCHGQ(ECFG) + case LOONGARCH_CSR_ESTAT: + val = env->CSR_ESTAT; + qatomic_and(&env->CSR_ESTAT, ~mask); + qatomic_or(&env->CSR_ESTAT, v); + break; + CASE_CSR_XCHGQ(ERA) + CASE_CSR_XCHGQ(BADV) + CASE_CSR_XCHGQ(BADI) + CASE_CSR_XCHGQ(EEPN) + CASE_CSR_XCHGQ(TLBIDX) + CASE_CSR_XCHGQ(TLBEHI) + CASE_CSR_XCHGQ(TLBELO0) + CASE_CSR_XCHGQ(TLBELO1) + CASE_CSR_XCHGQ(TLBWIRED) + CASE_CSR_XCHGQ(GTLBC) + CASE_CSR_XCHGQ(TRGP) + CASE_CSR_XCHGQ(ASID) + CASE_CSR_XCHGQ(PGDL) + CASE_CSR_XCHGQ(PGDH) + CASE_CSR_XCHGQ(PGD) + CASE_CSR_XCHGQ(PWCTL0) + CASE_CSR_XCHGQ(PWCTL1) + CASE_CSR_XCHGQ(STLBPGSIZE) + CASE_CSR_XCHGQ(RVACFG) + CASE_CSR_XCHGQ(CPUID) + CASE_CSR_XCHGQ(PRCFG1) + CASE_CSR_XCHGQ(PRCFG2) + CASE_CSR_XCHGQ(PRCFG3) + CASE_CSR_XCHGQ(KS0) + CASE_CSR_XCHGQ(KS1) + CASE_CSR_XCHGQ(KS2) + CASE_CSR_XCHGQ(KS3) + CASE_CSR_XCHGQ(KS4) + CASE_CSR_XCHGQ(KS5) + CASE_CSR_XCHGQ(KS6) + CASE_CSR_XCHGQ(KS7) + CASE_CSR_XCHGQ(KS8) + CASE_CSR_XCHGQ(TMID) + case LOONGARCH_CSR_TCFG: + val = env->CSR_TCFG; + tmp = val & ~mask; + tmp |= v; + cpu_loongarch_store_stable_timer_config(env, tmp); + break; + CASE_CSR_XCHGQ(TVAL) + CASE_CSR_XCHGQ(CNTC) + CASE_CSR_XCHGQ(TINTCLR) + CASE_CSR_XCHGQ(GSTAT) + CASE_CSR_XCHGQ(GCFG) + CASE_CSR_XCHGQ(GINTC) + CASE_CSR_XCHGQ(GCNTC) + CASE_CSR_XCHGQ(LLBCTL) + CASE_CSR_XCHGQ(IMPCTL1) + CASE_CSR_XCHGQ(IMPCTL2) + CASE_CSR_XCHGQ(GNMI) + CASE_CSR_XCHGQ(TLBRENT) + CASE_CSR_XCHGQ(TLBRBADV) + CASE_CSR_XCHGQ(TLBRERA) + CASE_CSR_XCHGQ(TLBRSAVE) + CASE_CSR_XCHGQ(TLBRELO0) + CASE_CSR_XCHGQ(TLBRELO1) + CASE_CSR_XCHGQ(TLBREHI) + CASE_CSR_XCHGQ(TLBRPRMD) + CASE_CSR_XCHGQ(ERRCTL) + CASE_CSR_XCHGQ(ERRINFO) + CASE_CSR_XCHGQ(ERRINFO1) + CASE_CSR_XCHGQ(ERRENT) + CASE_CSR_XCHGQ(ERRERA) + CASE_CSR_XCHGQ(ERRSAVE) + CASE_CSR_XCHGQ(CTAG) + CASE_CSR_XCHGQ(DMWIN0) + CASE_CSR_XCHGQ(DMWIN1) + CASE_CSR_XCHGQ(DMWIN2) + CASE_CSR_XCHGQ(DMWIN3) + CASE_CSR_XCHGQ(PERFCTRL0) + CASE_CSR_XCHGQ(PERFCNTR0) + CASE_CSR_XCHGQ(PERFCTRL1) + CASE_CSR_XCHGQ(PERFCNTR1) + CASE_CSR_XCHGQ(PERFCTRL2) + CASE_CSR_XCHGQ(PERFCNTR2) + CASE_CSR_XCHGQ(PERFCTRL3) + CASE_CSR_XCHGQ(PERFCNTR3) + /* debug */ + CASE_CSR_XCHGQ(MWPC) + CASE_CSR_XCHGQ(MWPS) + CASE_CSR_XCHGQ(DB0ADDR) + CASE_CSR_XCHGQ(DB0MASK) + CASE_CSR_XCHGQ(DB0CTL) + CASE_CSR_XCHGQ(DB0ASID) + CASE_CSR_XCHGQ(DB1ADDR) + CASE_CSR_XCHGQ(DB1MASK) + CASE_CSR_XCHGQ(DB1CTL) + CASE_CSR_XCHGQ(DB1ASID) + CASE_CSR_XCHGQ(DB2ADDR) + CASE_CSR_XCHGQ(DB2MASK) + CASE_CSR_XCHGQ(DB2CTL) + CASE_CSR_XCHGQ(DB2ASID) + CASE_CSR_XCHGQ(DB3ADDR) + CASE_CSR_XCHGQ(DB3MASK) + CASE_CSR_XCHGQ(DB3CTL) + CASE_CSR_XCHGQ(DB3ASID) + CASE_CSR_XCHGQ(FWPC) + CASE_CSR_XCHGQ(FWPS) + CASE_CSR_XCHGQ(IB0ADDR) + CASE_CSR_XCHGQ(IB0MASK) + CASE_CSR_XCHGQ(IB0CTL) + CASE_CSR_XCHGQ(IB0ASID) + CASE_CSR_XCHGQ(IB1ADDR) + CASE_CSR_XCHGQ(IB1MASK) + CASE_CSR_XCHGQ(IB1CTL) + CASE_CSR_XCHGQ(IB1ASID) + CASE_CSR_XCHGQ(IB2ADDR) + CASE_CSR_XCHGQ(IB2MASK) + CASE_CSR_XCHGQ(IB2CTL) + CASE_CSR_XCHGQ(IB2ASID) + CASE_CSR_XCHGQ(IB3ADDR) + CASE_CSR_XCHGQ(IB3MASK) + CASE_CSR_XCHGQ(IB3CTL) + CASE_CSR_XCHGQ(IB3ASID) + CASE_CSR_XCHGQ(IB4ADDR) + CASE_CSR_XCHGQ(IB4MASK) + CASE_CSR_XCHGQ(IB4CTL) + CASE_CSR_XCHGQ(IB4ASID) + CASE_CSR_XCHGQ(IB5ADDR) + CASE_CSR_XCHGQ(IB5MASK) + CASE_CSR_XCHGQ(IB5CTL) + CASE_CSR_XCHGQ(IB5ASID) + CASE_CSR_XCHGQ(IB6ADDR) + CASE_CSR_XCHGQ(IB6MASK) + CASE_CSR_XCHGQ(IB6CTL) + CASE_CSR_XCHGQ(IB6ASID) + CASE_CSR_XCHGQ(IB7ADDR) + CASE_CSR_XCHGQ(IB7MASK) + CASE_CSR_XCHGQ(IB7CTL) + CASE_CSR_XCHGQ(IB7ASID) + CASE_CSR_XCHGQ(DEBUG) + CASE_CSR_XCHGQ(DERA) + CASE_CSR_XCHGQ(DESAVE) + default : + assert(0); + } + +#undef CASE_CSR_XCHGQ + compute_hflags(env); + return val; +} + +static target_ulong confbus_addr(CPULOONGARCHState *env, int cpuid, + target_ulong csr_addr) +{ + target_ulong addr; + target_ulong node_addr; + int cores_per_node = ((0x60018 >> 3) & 0xff) + 1; + + switch (cores_per_node) { + case 4: + assert(cpuid < 64); + node_addr = ((target_ulong)(cpuid & 0x3c) << 42); + break; + case 8: + assert(cpuid < 128); + node_addr = ((target_ulong)(cpuid & 0x78) << 41) + + ((target_ulong)(cpuid & 0x4) << 14); + break; + case 16: + assert(cpuid < 256); + node_addr = ((target_ulong)(cpuid & 0xf0) << 40) + + ((target_ulong)(cpuid & 0xc) << 14); + break; + default: + assert(0); + break; + } + + /* + * per core address + *0x10xx => ipi + * 0x18xx => extioi isr + */ + if (((csr_addr & 0xff00) == 0x1000)) { + addr = (csr_addr & 0xff) + (target_ulong)(cpuid << 8); + addr = 0x800000001f000000UL + addr; + return addr; + } else if ((csr_addr & 0xff00) == 0x1800) { + addr = (csr_addr & 0xff) + ((target_ulong)(cpuid << 8)); + addr = 0x800000001f020000UL + addr; + return addr; + } else if ((csr_addr & 0xff00) >= 0x1400 && (csr_addr & 0xff00) < 0x1d00) { + addr = 0x800000001f010000UL + ((csr_addr & 0xfff) - 0x400); + return addr; + } else if (csr_addr == 0x408) { + addr = csr_addr; + } else { + addr = csr_addr + node_addr; + } + + addr = 0x800000001fe00000UL + addr; + return addr; +} + +void helper_iocsr(CPULOONGARCHState *env, target_ulong r_addr, + target_ulong r_val, uint32_t op) +{ + target_ulong addr; + target_ulong val = env->active_tc.gpr[r_val]; + int mask; + + addr = confbus_addr(env, CPU(loongarch_env_get_cpu(env))->cpu_index, + env->active_tc.gpr[r_addr]); + + switch (env->active_tc.gpr[r_addr]) { + /* IPI send */ + case 0x1040: + if (op != OPC_LARCH_ST_W) { + return; + } + op = OPC_LARCH_ST_W; + break; + + /* Mail send */ + case 0x1048: + if (op != OPC_LARCH_ST_D) { + return; + } + op = OPC_LARCH_ST_D; + break; + + /* ANY send */ + case 0x1158: + if (op != OPC_LARCH_ST_D) { + return; + } + addr = confbus_addr(env, (val >> 16) & 0x3ff, val & 0xffff); + mask = (val >> 27) & 0xf; + val = (val >> 32); + switch (mask) { + case 0: + op = OPC_LARCH_ST_W; + break; + case 0x7: + op = OPC_LARCH_ST_B; + addr += 3; + val >>= 24; + break; + case 0xb: + op = OPC_LARCH_ST_B; + addr += 2; + val >>= 16; + break; + case 0xd: + op = OPC_LARCH_ST_B; + addr += 1; + val >>= 8; + break; + case 0xe: + op = OPC_LARCH_ST_B; + break; + case 0xc: + op = OPC_LARCH_ST_H; + break; + case 0x3: + op = OPC_LARCH_ST_H; + addr += 2; + val >>= 16; + break; + default: + qemu_log("Unsupported any_send mask0x%x\n", mask); + break; + } + break; + + default: + break; + } + + switch (op) { + case OPC_LARCH_LD_D: + env->active_tc.gpr[r_val] = cpu_ldq_data_ra(env, addr, GETPC()); + break; + case OPC_LARCH_LD_W: + env->active_tc.gpr[r_val] = cpu_ldl_data_ra(env, addr, GETPC()); + break; + case OPC_LARCH_LD_H: + assert(0); + break; + case OPC_LARCH_LD_B: + assert(0); + break; + case OPC_LARCH_ST_D: + cpu_stq_data_ra(env, addr, val, GETPC()); + break; + case OPC_LARCH_ST_W: + cpu_stl_data_ra(env, addr, val, GETPC()); + break; + case OPC_LARCH_ST_H: + cpu_stb_data_ra(env, addr, val, GETPC()); + break; + case OPC_LARCH_ST_B: + cpu_stb_data_ra(env, addr, val, GETPC()); + break; + default: + qemu_log("Unknown op 0x%x", op); + assert(0); + } +} +#endif + +target_ulong helper_cpucfg(CPULOONGARCHState *env, target_ulong rj) +{ + return 0; +} diff --git a/target/loongarch64/fpu.c b/target/loongarch64/fpu.c new file mode 100644 index 0000000000000000000000000000000000000000..f063c8bae02328cd1505efa178bac8ab5b939686 --- /dev/null +++ b/target/loongarch64/fpu.c @@ -0,0 +1,25 @@ +/* + * loongarch float point emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "fpu/softfloat.h" + +/* convert loongarch rounding mode in fcsr0 to IEEE library */ +unsigned int ieee_rm[] = { float_round_nearest_even, float_round_to_zero, + float_round_up, float_round_down }; diff --git a/target/loongarch64/fpu_helper.c b/target/loongarch64/fpu_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..033bf0de842a7ccb508c585f52e489471231f465 --- /dev/null +++ b/target/loongarch64/fpu_helper.c @@ -0,0 +1,891 @@ +/* + * loongarch float point emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "fpu/softfloat.h" + +#define FP_TO_INT32_OVERFLOW 0x7fffffff +#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL + +#define FLOAT_CLASS_SIGNALING_NAN 0x001 +#define FLOAT_CLASS_QUIET_NAN 0x002 +#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004 +#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008 +#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010 +#define FLOAT_CLASS_NEGATIVE_ZERO 0x020 +#define FLOAT_CLASS_POSITIVE_INFINITY 0x040 +#define FLOAT_CLASS_POSITIVE_NORMAL 0x080 +#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100 +#define FLOAT_CLASS_POSITIVE_ZERO 0x200 + +target_ulong helper_movfcsr2gr(CPULOONGARCHState *env, uint32_t reg) +{ + target_ulong r = 0; + + switch (reg) { + case 0: + r = (uint32_t)env->active_fpu.fcsr0; + break; + case 1: + r = (env->active_fpu.fcsr0 & FCSR0_M1); + break; + case 2: + r = (env->active_fpu.fcsr0 & FCSR0_M2); + break; + case 3: + r = (env->active_fpu.fcsr0 & FCSR0_M3); + break; + case 16: + r = (uint32_t)env->active_fpu.vcsr16; + break; + default: + printf("%s: warning, fcsr '%d' not supported\n", __func__, reg); + assert(0); + break; + } + + return r; +} + +void helper_movgr2fcsr(CPULOONGARCHState *env, target_ulong arg1, + uint32_t fcsr, uint32_t rj) +{ + switch (fcsr) { + case 0: + env->active_fpu.fcsr0 = arg1; + break; + case 1: + env->active_fpu.fcsr0 = + (arg1 & FCSR0_M1) | (env->active_fpu.fcsr0 & ~FCSR0_M1); + break; + case 2: + env->active_fpu.fcsr0 = + (arg1 & FCSR0_M2) | (env->active_fpu.fcsr0 & ~FCSR0_M2); + break; + case 3: + env->active_fpu.fcsr0 = + (arg1 & FCSR0_M3) | (env->active_fpu.fcsr0 & ~FCSR0_M3); + break; + case 16: + env->active_fpu.vcsr16 = arg1; + break; + default: + printf("%s: warning, fcsr '%d' not supported\n", __func__, fcsr); + assert(0); + break; + } + restore_fp_status(env); + set_float_exception_flags(0, &env->active_fpu.fp_status); +} + +void helper_movreg2cf(CPULOONGARCHState *env, uint32_t cd, target_ulong src) +{ + env->active_fpu.cf[cd & 0x7] = src & 0x1; +} + +void helper_movreg2cf_i32(CPULOONGARCHState *env, uint32_t cd, uint32_t src) +{ + env->active_fpu.cf[cd & 0x7] = src & 0x1; +} + +void helper_movreg2cf_i64(CPULOONGARCHState *env, uint32_t cd, uint64_t src) +{ + env->active_fpu.cf[cd & 0x7] = src & 0x1; +} + +target_ulong helper_movcf2reg(CPULOONGARCHState *env, uint32_t cj) +{ + return (target_ulong)env->active_fpu.cf[cj & 0x7]; +} + +int ieee_ex_to_loongarch(int xcpt) +{ + int ret = 0; + if (xcpt) { + if (xcpt & float_flag_invalid) { + ret |= FP_INVALID; + } + if (xcpt & float_flag_overflow) { + ret |= FP_OVERFLOW; + } + if (xcpt & float_flag_underflow) { + ret |= FP_UNDERFLOW; + } + if (xcpt & float_flag_divbyzero) { + ret |= FP_DIV0; + } + if (xcpt & float_flag_inexact) { + ret |= FP_INEXACT; + } + } + return ret; +} + +static inline void update_fcsr0(CPULOONGARCHState *env, uintptr_t pc) +{ + int tmp = ieee_ex_to_loongarch( + get_float_exception_flags(&env->active_fpu.fp_status)); + + SET_FP_CAUSE(env->active_fpu.fcsr0, tmp); + if (tmp) { + set_float_exception_flags(0, &env->active_fpu.fp_status); + + if (GET_FP_ENABLE(env->active_fpu.fcsr0) & tmp) { + do_raise_exception(env, EXCP_FPE, pc); + } else { + UPDATE_FP_FLAGS(env->active_fpu.fcsr0, tmp); + } + } +} + +/* unary operations, modifying fp status */ +uint64_t helper_float_sqrt_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt0; +} + +uint32_t helper_float_sqrt_s(CPULOONGARCHState *env, uint32_t fst0) +{ + fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst0; +} + +uint64_t helper_float_cvtd_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t fdt2; + + fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_w(CPULOONGARCHState *env, uint32_t wt0) +{ + uint64_t fdt2; + + fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvtd_l(CPULOONGARCHState *env, uint64_t dt0) +{ + uint64_t fdt2; + + fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt2; +} + +uint64_t helper_float_cvt_l_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint64_t helper_float_cvt_l_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_cvts_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t fst2; + + fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_w(CPULOONGARCHState *env, uint32_t wt0) +{ + uint32_t fst2; + + fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvts_l(CPULOONGARCHState *env, uint64_t dt0) +{ + uint32_t fst2; + + fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst2; +} + +uint32_t helper_float_cvt_w_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint32_t helper_float_cvt_w_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint64_t helper_float_round_l_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint64_t helper_float_round_l_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_round_w_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint32_t helper_float_round_w_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_nearest_even, + &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint64_t helper_float_trunc_l_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint64_t helper_float_trunc_l_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t dt2; + + dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_trunc_w_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t wt2; + + wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint32_t helper_float_trunc_w_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint64_t helper_float_ceil_l_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint64_t helper_float_ceil_l_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_ceil_w_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint32_t helper_float_ceil_w_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint64_t helper_float_floor_l_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint64_t helper_float_floor_l_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint64_t dt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + dt2 = FP_TO_INT64_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_floor_w_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +uint32_t helper_float_floor_w_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status); + wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status); + restore_rounding_mode(env); + if (get_float_exception_flags(&env->active_fpu.fp_status) & + (float_flag_invalid | float_flag_overflow)) { + wt2 = FP_TO_INT32_OVERFLOW; + } + update_fcsr0(env, GETPC()); + return wt2; +} + +/* unary operations, not modifying fp status */ +#define FLOAT_UNOP(name) \ + uint64_t helper_float_##name##_d(uint64_t fdt0) \ + { \ + return float64_##name(fdt0); \ + } \ + uint32_t helper_float_##name##_s(uint32_t fst0) \ + { \ + return float32_##name(fst0); \ + } + +FLOAT_UNOP(abs) +FLOAT_UNOP(chs) +#undef FLOAT_UNOP + +uint64_t helper_float_recip_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_recip_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst2; +} + +uint64_t helper_float_rsqrt_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t fdt2; + + fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status); + fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdt2; +} + +uint32_t helper_float_rsqrt_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t fst2; + + fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status); + fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fst2; +} + +uint32_t helper_float_rint_s(CPULOONGARCHState *env, uint32_t fs) +{ + uint32_t fdret; + + fdret = float32_round_to_int(fs, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdret; +} + +uint64_t helper_float_rint_d(CPULOONGARCHState *env, uint64_t fs) +{ + uint64_t fdret; + + fdret = float64_round_to_int(fs, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return fdret; +} + +#define FLOAT_CLASS(name, bits) \ + uint##bits##_t float_##name(uint##bits##_t arg, float_status *status) \ + { \ + if (float##bits##_is_signaling_nan(arg, status)) { \ + return FLOAT_CLASS_SIGNALING_NAN; \ + } else if (float##bits##_is_quiet_nan(arg, status)) { \ + return FLOAT_CLASS_QUIET_NAN; \ + } else if (float##bits##_is_neg(arg)) { \ + if (float##bits##_is_infinity(arg)) { \ + return FLOAT_CLASS_NEGATIVE_INFINITY; \ + } else if (float##bits##_is_zero(arg)) { \ + return FLOAT_CLASS_NEGATIVE_ZERO; \ + } else if (float##bits##_is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_NEGATIVE_NORMAL; \ + } \ + } else { \ + if (float##bits##_is_infinity(arg)) { \ + return FLOAT_CLASS_POSITIVE_INFINITY; \ + } else if (float##bits##_is_zero(arg)) { \ + return FLOAT_CLASS_POSITIVE_ZERO; \ + } else if (float##bits##_is_zero_or_denormal(arg)) { \ + return FLOAT_CLASS_POSITIVE_SUBNORMAL; \ + } else { \ + return FLOAT_CLASS_POSITIVE_NORMAL; \ + } \ + } \ + } \ + \ + uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \ + uint##bits##_t arg) \ + { \ + return float_##name(arg, &env->active_fpu.fp_status); \ + } + +FLOAT_CLASS(class_s, 32) +FLOAT_CLASS(class_d, 64) +#undef FLOAT_CLASS + +/* binary operations */ +#define FLOAT_BINOP(name) \ + uint64_t helper_float_##name##_d(CPULOONGARCHState *env, uint64_t fdt0, \ + uint64_t fdt1) \ + { \ + uint64_t dt2; \ + \ + dt2 = float64_##name(fdt0, fdt1, &env->active_fpu.fp_status); \ + update_fcsr0(env, GETPC()); \ + return dt2; \ + } \ + \ + uint32_t helper_float_##name##_s(CPULOONGARCHState *env, uint32_t fst0, \ + uint32_t fst1) \ + { \ + uint32_t wt2; \ + \ + wt2 = float32_##name(fst0, fst1, &env->active_fpu.fp_status); \ + update_fcsr0(env, GETPC()); \ + return wt2; \ + } + +FLOAT_BINOP(add) +FLOAT_BINOP(sub) +FLOAT_BINOP(mul) +FLOAT_BINOP(div) +#undef FLOAT_BINOP + +uint64_t helper_float_exp2_d(CPULOONGARCHState *env, uint64_t fdt0, + uint64_t fdt1) +{ + uint64_t dt2; + int64_t n = (int64_t)fdt1; + + dt2 = float64_scalbn(fdt0, n > 0x1000 ? 0x1000 : n < -0x1000 ? -0x1000 : n, + &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return dt2; +} + +uint32_t helper_float_exp2_s(CPULOONGARCHState *env, uint32_t fst0, + uint32_t fst1) +{ + uint32_t wt2; + int32_t n = (int32_t)fst1; + + wt2 = float32_scalbn(fst0, n > 0x200 ? 0x200 : n < -0x200 ? -0x200 : n, + &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return wt2; +} + +#define FLOAT_MINMAX(name, bits, minmaxfunc) \ + uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \ + uint##bits##_t fs, uint##bits##_t ft) \ + { \ + uint##bits##_t fdret; \ + \ + fdret = \ + float##bits##_##minmaxfunc(fs, ft, &env->active_fpu.fp_status); \ + update_fcsr0(env, GETPC()); \ + return fdret; \ + } + +FLOAT_MINMAX(max_s, 32, maxnum) +FLOAT_MINMAX(max_d, 64, maxnum) +FLOAT_MINMAX(maxa_s, 32, maxnummag) +FLOAT_MINMAX(maxa_d, 64, maxnummag) + +FLOAT_MINMAX(min_s, 32, minnum) +FLOAT_MINMAX(min_d, 64, minnum) +FLOAT_MINMAX(mina_s, 32, minnummag) +FLOAT_MINMAX(mina_d, 64, minnummag) +#undef FLOAT_MINMAX + +#define FLOAT_FMADDSUB(name, bits, muladd_arg) \ + uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \ + uint##bits##_t fs, uint##bits##_t ft, \ + uint##bits##_t fd) \ + { \ + uint##bits##_t fdret; \ + \ + fdret = float##bits##_muladd(fs, ft, fd, muladd_arg, \ + &env->active_fpu.fp_status); \ + update_fcsr0(env, GETPC()); \ + return fdret; \ + } + +FLOAT_FMADDSUB(maddf_s, 32, 0) +FLOAT_FMADDSUB(maddf_d, 64, 0) +FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_c) +FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_c) +FLOAT_FMADDSUB(nmaddf_s, 32, float_muladd_negate_result) +FLOAT_FMADDSUB(nmaddf_d, 64, float_muladd_negate_result) +FLOAT_FMADDSUB(nmsubf_s, 32, + float_muladd_negate_result | float_muladd_negate_c) +FLOAT_FMADDSUB(nmsubf_d, 64, + float_muladd_negate_result | float_muladd_negate_c) +#undef FLOAT_FMADDSUB + +/* compare operations */ +#define FOP_CONDN_D(op, cond) \ + uint64_t helper_cmp_d_##op(CPULOONGARCHState *env, uint64_t fdt0, \ + uint64_t fdt1) \ + { \ + uint64_t c; \ + c = cond; \ + update_fcsr0(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ + } + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered_quiet() is still called. + */ +FOP_CONDN_D(af, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), + 0)) +FOP_CONDN_D(un, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))) +FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ueq, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ult, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ule, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float64_unordered() is still called. + */ +FOP_CONDN_D(saf, + (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))) +FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(une, + (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_le(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) +FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) || + float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))) + +#define FOP_CONDN_S(op, cond) \ + uint32_t helper_cmp_s_##op(CPULOONGARCHState *env, uint32_t fst0, \ + uint32_t fst1) \ + { \ + uint64_t c; \ + c = cond; \ + update_fcsr0(env, GETPC()); \ + if (c) { \ + return -1; \ + } else { \ + return 0; \ + } \ + } + +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered_quiet() is still called. + */ +FOP_CONDN_S(af, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), + 0)) +FOP_CONDN_S(un, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))) +FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ueq, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ult, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ule, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +/* + * NOTE: the comma operator will make "cond" to eval to false, + * but float32_unordered() is still called. + */ +FOP_CONDN_S(saf, + (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0)) +FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status))) +FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || + float32_eq(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || + float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(une, + (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status) || + float32_le(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt(fst0, fst1, &env->active_fpu.fp_status))) +FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) || + float32_lt(fst0, fst1, &env->active_fpu.fp_status))) + +uint32_t helper_float_logb_s(CPULOONGARCHState *env, uint32_t fst0) +{ + uint32_t wt2; + + wt2 = float32_log2(fst0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return wt2; +} + +uint64_t helper_float_logb_d(CPULOONGARCHState *env, uint64_t fdt0) +{ + uint64_t dt2; + + dt2 = float64_log2(fdt0, &env->active_fpu.fp_status); + update_fcsr0(env, GETPC()); + return dt2; +} + +target_ulong helper_fsel(CPULOONGARCHState *env, target_ulong fj, + target_ulong fk, uint32_t ca) +{ + if (env->active_fpu.cf[ca & 0x7]) { + return fk; + } else { + return fj; + } +} diff --git a/target/loongarch64/fpu_helper.h b/target/loongarch64/fpu_helper.h new file mode 100644 index 0000000000000000000000000000000000000000..9efa7e30ca2572c4c1cfd04d1e92f6fc2456831e --- /dev/null +++ b/target/loongarch64/fpu_helper.h @@ -0,0 +1,127 @@ +/* + * loongarch internal definitions and helpers + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_FPU_H +#define LOONGARCH_FPU_H + +#include "cpu-csr.h" + +extern const struct loongarch_def_t loongarch_defs[]; +extern const int loongarch_defs_number; + +enum CPULSXDataFormat { DF_BYTE = 0, DF_HALF, DF_WORD, DF_DOUBLE, DF_QUAD }; + +void loongarch_cpu_do_interrupt(CPUState *cpu); +bool loongarch_cpu_exec_interrupt(CPUState *cpu, int int_req); +void loongarch_cpu_do_unaligned_access(CPUState *cpu, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; + +#if !defined(CONFIG_USER_ONLY) + +typedef struct r4k_tlb_t r4k_tlb_t; +struct r4k_tlb_t { + target_ulong VPN; + uint32_t PageMask; + uint16_t ASID; + unsigned int G:1; + unsigned int C0:3; + unsigned int C1:3; + unsigned int V0:1; + unsigned int V1:1; + unsigned int D0:1; + unsigned int D1:1; + unsigned int XI0:1; + unsigned int XI1:1; + unsigned int RI0:1; + unsigned int RI1:1; + unsigned int EHINV:1; + uint64_t PPN[2]; +}; + +int no_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int fixed_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +int r4k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); + +/* loongarch 3a5000 tlb helper function : lisa csr */ +int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type); +void ls3a5k_helper_tlbwr(CPULOONGARCHState *env); +void ls3a5k_helper_tlbfill(CPULOONGARCHState *env); +void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env); +void ls3a5k_helper_tlbrd(CPULOONGARCHState *env); +void ls3a5k_helper_tlbclr(CPULOONGARCHState *env); +void ls3a5k_helper_tlbflush(CPULOONGARCHState *env); +void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx); +void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr, + target_ulong info, int op); +void ls3a5k_flush_vtlb(CPULOONGARCHState *env); +void ls3a5k_flush_ftlb(CPULOONGARCHState *env); +hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env, + target_ulong address, int rw); +#endif + +#define cpu_signal_handler cpu_loongarch_signal_handler + +static inline bool cpu_loongarch_hw_interrupts_enabled(CPULOONGARCHState *env) +{ + bool ret = 0; + + ret = env->CSR_CRMD & (1 << CSR_CRMD_IE_SHIFT); + + return ret; +} + +void loongarch_tcg_init(void); + +/* helper.c */ +bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, bool probe, + uintptr_t retaddr); + +/* op_helper.c */ +uint32_t float_class_s(uint32_t arg, float_status *fst); +uint64_t float_class_d(uint64_t arg, float_status *fst); + +int ieee_ex_to_loongarch(int xcpt); +void update_pagemask(CPULOONGARCHState *env, target_ulong arg1, + int32_t *pagemask); + +void cpu_loongarch_tlb_flush(CPULOONGARCHState *env); +void sync_c0_status(CPULOONGARCHState *env, CPULOONGARCHState *cpu, int tc); + +void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env, + uint32_t exception, int error_code, + uintptr_t pc); +int loongarch_read_qxfer(CPUState *cs, const char *annex, uint8_t *read_buf, + unsigned long offset, unsigned long len); +int loongarch_write_qxfer(CPUState *cs, const char *annex, + const uint8_t *write_buf, unsigned long offset, + unsigned long len); + +static inline void QEMU_NORETURN do_raise_exception(CPULOONGARCHState *env, + uint32_t exception, + uintptr_t pc) +{ + do_raise_exception_err(env, exception, 0, pc); +} +#endif diff --git a/target/loongarch64/gdbstub.c b/target/loongarch64/gdbstub.c new file mode 100644 index 0000000000000000000000000000000000000000..5ee91dc930959584848d923ccf95b5a6dbec1c7c --- /dev/null +++ b/target/loongarch64/gdbstub.c @@ -0,0 +1,164 @@ +/* + * LOONGARCH gdb server stub + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "internal.h" +#include "exec/gdbstub.h" +#ifdef CONFIG_TCG +#include "exec/helper-proto.h" +#endif + +uint64_t read_fcc(CPULOONGARCHState *env) +{ + uint64_t ret = 0; + + for (int i = 0; i < 8; ++i) { + ret |= (uint64_t)env->active_fpu.cf[i] << (i * 8); + } + + return ret; +} + +void write_fcc(CPULOONGARCHState *env, uint64_t val) +{ + for (int i = 0; i < 8; ++i) { + env->active_fpu.cf[i] = (val >> (i * 8)) & 1; + } +} + +int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int size = 0; + + if (0 <= n && n < 32) { + return gdb_get_regl(mem_buf, env->active_tc.gpr[n]); + } + + switch (n) { + case 32: + size = gdb_get_regl(mem_buf, 0); + break; + case 33: + size = gdb_get_regl(mem_buf, env->active_tc.PC); + break; + case 34: + size = gdb_get_regl(mem_buf, env->CSR_BADV); + break; + default: + break; + } + + return size; +} + +int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + target_ulong tmp = ldtul_p(mem_buf); + int size = 0; + + if (0 <= n && n < 32) { + return env->active_tc.gpr[n] = tmp, sizeof(target_ulong); + } + + size = sizeof(target_ulong); + + switch (n) { + case 33: + env->active_tc.PC = tmp; + break; + case 32: + case 34: + default: + size = 0; + break; + } + + return size; +} + +static int loongarch_gdb_get_fpu(CPULOONGARCHState *env, GByteArray *mem_buf, + int n) +{ + if (0 <= n && n < 32) { + return gdb_get_reg64(mem_buf, env->active_fpu.fpr[n].d); + } else if (n == 32) { + uint64_t val = read_fcc(env); + return gdb_get_reg64(mem_buf, val); + } else if (n == 33) { + return gdb_get_reg32(mem_buf, env->active_fpu.fcsr0); + } + return 0; +} + +static int loongarch_gdb_set_fpu(CPULOONGARCHState *env, uint8_t *mem_buf, + int n) +{ + int length = 0; + + if (0 <= n && n < 32) { + env->active_fpu.fpr[n].d = ldq_p(mem_buf); + length = 8; + } else if (n == 32) { + uint64_t val = ldq_p(mem_buf); + write_fcc(env, val); + length = 8; + } else if (n == 33) { + env->active_fpu.fcsr0 = ldl_p(mem_buf); + length = 4; + } + return length; +} + +void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs) +{ + gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu, + 34, "loongarch-fpu.xml", 0); +} + +#ifdef CONFIG_TCG +int loongarch_read_qxfer(CPUState *cs, const char *annex, uint8_t *read_buf, + unsigned long offset, unsigned long len) +{ + if (strncmp(annex, "cpucfg", sizeof("cpucfg") - 1) == 0) { + if (offset % 4 != 0 || len % 4 != 0) { + return 0; + } + + size_t i; + for (i = offset; i < offset + len; i += 4) + ((uint32_t *)read_buf)[(i - offset) / 4] = + helper_cpucfg(&(LOONGARCH_CPU(cs)->env), i / 4); + return 32 * 4; + } + return 0; +} + +int loongarch_write_qxfer(CPUState *cs, const char *annex, + const uint8_t *write_buf, unsigned long offset, + unsigned long len) +{ + return 0; +} +#endif diff --git a/target/loongarch64/helper.c b/target/loongarch64/helper.c new file mode 100644 index 0000000000000000000000000000000000000000..ec25803c1cd93dda490da02befd5a32e7fca5fdf --- /dev/null +++ b/target/loongarch64/helper.c @@ -0,0 +1,726 @@ +/* + * LOONGARCH emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "exec/log.h" +#include "hw/loongarch/cpudevs.h" + +#if !defined(CONFIG_USER_ONLY) + +static int ls3a5k_map_address_tlb_entry(CPULOONGARCHState *env, + hwaddr *physical, int *prot, + target_ulong address, int rw, + int access_type, ls3a5k_tlb_t *tlb) +{ + uint64_t mask = tlb->PageMask; + int n = !!(address & mask & ~(mask >> 1)); + uint32_t plv = env->CSR_CRMD & CSR_CRMD_PLV; + + /* Check access rights */ + if (!(n ? tlb->V1 : tlb->V0)) { + return TLBRET_INVALID; + } + + if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) { + return TLBRET_XI; + } + + if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) { + return TLBRET_RI; + } + + if (plv > (n ? tlb->PLV1 : tlb->PLV0)) { + return TLBRET_PE; + } + + if (rw != MMU_DATA_STORE || (n ? tlb->WE1 : tlb->WE0)) { + /* + * PPN address + * 4 KB: [47:13] [12;0] + * 16 KB: [47:15] [14:0] + */ + if (n) { + *physical = tlb->PPN1 | (address & (mask >> 1)); + } else { + *physical = tlb->PPN0 | (address & (mask >> 1)); + } + *prot = PAGE_READ; + if (n ? tlb->WE1 : tlb->WE0) { + *prot |= PAGE_WRITE; + } + if (!(n ? tlb->XI1 : tlb->XI0)) { + *prot |= PAGE_EXEC; + } + return TLBRET_MATCH; + } + + return TLBRET_DIRTY; +} + +/* Loongarch 3A5K -style MMU emulation */ +int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot, + target_ulong address, int rw, int access_type) +{ + uint16_t asid = env->CSR_ASID & 0x3ff; + int i; + ls3a5k_tlb_t *tlb; + + int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size; + int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size; + + int ftlb_idx; + + uint64_t mask; + uint64_t vpn; /* address to map */ + uint64_t tag; /* address in TLB entry */ + + /* search VTLB */ + for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) { + tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + mask = tlb->PageMask; + + vpn = address & 0xffffffffe000 & ~mask; + tag = tlb->VPN & ~mask; + + if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && + tlb->EHINV != 1) { + return ls3a5k_map_address_tlb_entry(env, physical, prot, address, + rw, access_type, tlb); + } + } + + if (ftlb_size == 0) { + return TLBRET_NOMATCH; + } + + /* search FTLB */ + mask = env->tlb->mmu.ls3a5k.ftlb_mask; + vpn = address & 0xffffffffe000 & ~mask; + + ftlb_idx = (address & 0xffffffffc000) >> 15; /* 16 KB */ + ftlb_idx = ftlb_idx & 0xff; /* [0,255] */ + + for (i = 0; i < 8; ++i) { + /* + * ---------- set 0 1 2 ... 7 + * ftlb_idx ----------------------------------- + * 0 | 0 1 2 ... 7 + * 1 | 8 9 10 ... 15 + * 2 | 16 17 18 ... 23 + * ... | + * 255 | 2040 2041 2042 ... 2047 + */ + tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i]; + tag = tlb->VPN & ~mask; + + if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && + tlb->EHINV != 1) { + return ls3a5k_map_address_tlb_entry(env, physical, prot, address, + rw, access_type, tlb); + } + } + + return TLBRET_NOMATCH; +} + +static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical, + int *prot, target_ulong real_address, int rw, + int access_type, int mmu_idx) +{ + int user_mode = mmu_idx == LARCH_HFLAG_UM; + int kernel_mode = !user_mode; + unsigned plv, base_c, base_v, tmp; + + /* effective address (modified for KVM T&E kernel segments) */ + target_ulong address = real_address; + + /* Check PG */ + if (!(env->CSR_CRMD & CSR_CRMD_PG)) { + /* DA mode */ + *physical = address & 0xffffffffffffUL; + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + + plv = kernel_mode | (user_mode << 3); + base_v = address >> CSR_DMW_BASE_SH; + /* Check direct map window 0 */ + base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH; + if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) { + *physical = dmwin_va2pa(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + /* Check direct map window 1 */ + base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH; + if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) { + *physical = dmwin_va2pa(address); + *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return TLBRET_MATCH; + } + /* Check valid extension */ + tmp = address >> 47; + if (!(tmp == 0 || tmp == 0x1ffff)) { + return TLBRET_BADADDR; + } + /* mapped address */ + return env->tlb->map_address(env, physical, prot, real_address, rw, + access_type); +} + +void cpu_loongarch_tlb_flush(CPULOONGARCHState *env) +{ + LOONGARCHCPU *cpu = loongarch_env_get_cpu(env); + + /* Flush qemu's TLB and discard all shadowed entries. */ + tlb_flush(CPU(cpu)); + env->tlb->tlb_in_use = env->tlb->nb_tlb; +} +#endif + +static void raise_mmu_exception(CPULOONGARCHState *env, target_ulong address, + int rw, int tlb_error) +{ + CPUState *cs = CPU(loongarch_env_get_cpu(env)); + int exception = 0, error_code = 0; + + if (rw == MMU_INST_FETCH) { + error_code |= EXCP_INST_NOTAVAIL; + } + + switch (tlb_error) { + default: + case TLBRET_BADADDR: + /* Reference to kernel address from user mode or supervisor mode */ + /* Reference to supervisor address from user mode */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_AdES; + } else { + exception = EXCP_AdEL; + } + break; + case TLBRET_NOMATCH: + /* No TLB match for a mapped address */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + error_code |= EXCP_TLB_NOMATCH; + break; + case TLBRET_INVALID: + /* TLB match with no valid bit */ + if (rw == MMU_DATA_STORE) { + exception = EXCP_TLBS; + } else { + exception = EXCP_TLBL; + } + break; + case TLBRET_DIRTY: + /* TLB match but 'D' bit is cleared */ + exception = EXCP_LTLBL; + break; + case TLBRET_XI: + /* Execute-Inhibit Exception */ + exception = EXCP_TLBXI; + break; + case TLBRET_RI: + /* Read-Inhibit Exception */ + exception = EXCP_TLBRI; + break; + case TLBRET_PE: + /* Privileged Exception */ + exception = EXCP_TLBPE; + break; + } + + if (env->insn_flags & INSN_LOONGARCH) { + if (tlb_error == TLBRET_NOMATCH) { + env->CSR_TLBRBADV = address; + env->CSR_TLBREHI = address & (TARGET_PAGE_MASK << 1); + cs->exception_index = exception; + env->error_code = error_code; + return; + } + } + + /* Raise exception */ + env->CSR_BADV = address; + cs->exception_index = exception; + env->error_code = error_code; + + if (env->insn_flags & INSN_LOONGARCH) { + env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1); + } +} + +bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, bool probe, + uintptr_t retaddr) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; +#if !defined(CONFIG_USER_ONLY) + hwaddr physical; + int prot; + int loongarch_access_type; +#endif + int ret = TLBRET_BADADDR; + + qemu_log_mask(CPU_LOG_MMU, + "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " mmu_idx %d\n", + __func__, env->active_tc.PC, address, mmu_idx); + + /* data access */ +#if !defined(CONFIG_USER_ONLY) + /* XXX: put correct access by using cpu_restore_state() correctly */ + loongarch_access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, address, access_type, + loongarch_access_type, mmu_idx); + switch (ret) { + case TLBRET_MATCH: + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx + " prot %d asid %ld pc 0x%lx\n", + __func__, address, physical, prot, env->CSR_ASID, + env->active_tc.PC); + break; + default: + qemu_log_mask(CPU_LOG_MMU, + "%s address=%" VADDR_PRIx " ret %d asid %ld pc 0x%lx\n", + __func__, address, ret, env->CSR_ASID, + env->active_tc.PC); + break; + } + if (ret == TLBRET_MATCH) { + tlb_set_page(cs, address & TARGET_PAGE_MASK, + physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, mmu_idx, + TARGET_PAGE_SIZE); + ret = true; + } + if (probe) { + return false; + } +#endif + + raise_mmu_exception(env, address, access_type, ret); + do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr); +} + +#if !defined(CONFIG_USER_ONLY) +hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env, + target_ulong address, int rw) +{ + hwaddr physical; + int prot; + int access_type; + int ret = 0; + + /* data access */ + access_type = ACCESS_INT; + ret = get_physical_address(env, &physical, &prot, address, rw, access_type, + cpu_mmu_index(env, false)); + if (ret != TLBRET_MATCH) { + raise_mmu_exception(env, address, rw, ret); + return -1LL; + } else { + return physical; + } +} + +static const char *const excp_names[EXCP_LAST + 1] = { + [EXCP_RESET] = "reset", + [EXCP_SRESET] = "soft reset", + [EXCP_NMI] = "non-maskable interrupt", + [EXCP_EXT_INTERRUPT] = "interrupt", + [EXCP_AdEL] = "address error load", + [EXCP_AdES] = "address error store", + [EXCP_TLBF] = "TLB refill", + [EXCP_IBE] = "instruction bus error", + [EXCP_SYSCALL] = "syscall", + [EXCP_BREAK] = "break", + [EXCP_FPDIS] = "float unit unusable", + [EXCP_LSXDIS] = "vector128 unusable", + [EXCP_LASXDIS] = "vector256 unusable", + [EXCP_RI] = "reserved instruction", + [EXCP_OVERFLOW] = "arithmetic overflow", + [EXCP_TRAP] = "trap", + [EXCP_FPE] = "floating point", + [EXCP_LTLBL] = "TLB modify", + [EXCP_TLBL] = "TLB load", + [EXCP_TLBS] = "TLB store", + [EXCP_DBE] = "data bus error", + [EXCP_TLBXI] = "TLB execute-inhibit", + [EXCP_TLBRI] = "TLB read-inhibit", + [EXCP_TLBPE] = "TLB priviledged error", +}; +#endif + +target_ulong exception_resume_pc(CPULOONGARCHState *env) +{ + target_ulong bad_pc; + + bad_pc = env->active_tc.PC; + if (env->hflags & LARCH_HFLAG_BMASK) { + /* + * If the exception was raised from a delay slot, come back to + * the jump. + */ + bad_pc -= 4; + } + + return bad_pc; +} + +#if !defined(CONFIG_USER_ONLY) +static void set_hflags_for_handler(CPULOONGARCHState *env) +{ + /* Exception handlers are entered in 32-bit mode. */ +} + +static inline void set_badinstr_registers(CPULOONGARCHState *env) +{ + if ((env->insn_flags & INSN_LOONGARCH)) { + env->CSR_BADI = cpu_ldl_code(env, env->active_tc.PC); + return; + } +} +#endif + +static inline unsigned int get_vint_size(CPULOONGARCHState *env) +{ + unsigned int size = 0; + + switch ((env->CSR_ECFG >> 16) & 0x7) { + case 0: + break; + case 1: + size = 2 * 4; /* #Insts * inst_size */ + break; + case 2: + size = 4 * 4; + break; + case 3: + size = 8 * 4; + break; + case 4: + size = 16 * 4; + break; + case 5: + size = 32 * 4; + break; + case 6: + size = 64 * 4; + break; + case 7: + size = 128 * 4; + break; + default: + printf("%s: unexpected value", __func__); + assert(0); + } + + return size; +} + +#define is_refill(cs, env) \ + (((cs->exception_index == EXCP_TLBL) || \ + (cs->exception_index == EXCP_TLBS)) && \ + (env->error_code & EXCP_TLB_NOMATCH)) + +void loongarch_cpu_do_interrupt(CPUState *cs) +{ +#if !defined(CONFIG_USER_ONLY) + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + bool update_badinstr = 0; + int cause = -1; + const char *name; + + if (qemu_loglevel_mask(CPU_LOG_INT) && + cs->exception_index != EXCP_EXT_INTERRUPT) { + if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) { + name = "unknown"; + } else { + name = excp_names[cs->exception_index]; + } + + qemu_log("%s enter: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " TLBRERA 0x%016lx" + " %s exception\n", + __func__, env->active_tc.PC, env->CSR_ERA, env->CSR_TLBRERA, + name); + } + + switch (cs->exception_index) { + case EXCP_RESET: + cpu_reset(CPU(cpu)); + break; + case EXCP_NMI: + env->CSR_ERRERA = exception_resume_pc(env); + env->hflags &= ~LARCH_HFLAG_BMASK; + env->hflags |= LARCH_HFLAG_64; + env->hflags &= ~LARCH_HFLAG_AWRAP; + env->hflags &= ~(LARCH_HFLAG_KSU); + env->active_tc.PC = env->exception_base; + set_hflags_for_handler(env); + break; + case EXCP_EXT_INTERRUPT: + cause = 0; + goto set_ERA; + case EXCP_LTLBL: + cause = 1; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_ERA; + case EXCP_TLBL: + cause = 2; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_ERA; + case EXCP_TLBS: + cause = 3; + update_badinstr = 1; + goto set_ERA; + case EXCP_AdEL: + cause = 4; + update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL); + goto set_ERA; + case EXCP_AdES: + cause = 5; + update_badinstr = 1; + goto set_ERA; + case EXCP_IBE: + cause = 6; + goto set_ERA; + case EXCP_DBE: + cause = 7; + goto set_ERA; + case EXCP_SYSCALL: + cause = 8; + update_badinstr = 1; + goto set_ERA; + case EXCP_BREAK: + cause = 9; + update_badinstr = 1; + goto set_ERA; + case EXCP_RI: + cause = 10; + update_badinstr = 1; + goto set_ERA; + case EXCP_FPDIS: + case EXCP_LSXDIS: + case EXCP_LASXDIS: + cause = 11; + update_badinstr = 1; + goto set_ERA; + case EXCP_OVERFLOW: + cause = 12; + update_badinstr = 1; + goto set_ERA; + case EXCP_TRAP: + cause = 13; + update_badinstr = 1; + goto set_ERA; + case EXCP_FPE: + cause = 15; + update_badinstr = 1; + goto set_ERA; + case EXCP_TLBRI: + cause = 19; + update_badinstr = 1; + goto set_ERA; + case EXCP_TLBXI: + case EXCP_TLBPE: + cause = 20; + goto set_ERA; + set_ERA: + if (is_refill(cs, env)) { + env->CSR_TLBRERA = exception_resume_pc(env); + env->CSR_TLBRERA |= 1; + } else { + env->CSR_ERA = exception_resume_pc(env); + } + + if (update_badinstr) { + set_badinstr_registers(env); + } + env->hflags &= ~(LARCH_HFLAG_KSU); + + env->hflags &= ~LARCH_HFLAG_BMASK; + if (env->insn_flags & INSN_LOONGARCH) { + /* save PLV and IE */ + if (is_refill(cs, env)) { + env->CSR_TLBRPRMD &= (~0x7); + env->CSR_TLBRPRMD |= (env->CSR_CRMD & 0x7); + } else { + env->CSR_PRMD &= (~0x7); + env->CSR_PRMD |= (env->CSR_CRMD & 0x7); + } + + env->CSR_CRMD &= ~(0x7); + + switch (cs->exception_index) { + case EXCP_EXT_INTERRUPT: + break; + case EXCP_TLBL: + if (env->error_code & EXCP_INST_NOTAVAIL) { + cause = EXCCODE_TLBI; + } else { + cause = EXCCODE_TLBL; + } + break; + case EXCP_TLBS: + cause = EXCCODE_TLBS; + break; + case EXCP_LTLBL: + cause = EXCCODE_MOD; + break; + case EXCP_TLBRI: + cause = EXCCODE_TLBRI; + break; + case EXCP_TLBXI: + cause = EXCCODE_TLBXI; + break; + case EXCP_TLBPE: + cause = EXCCODE_TLBPE; + break; + case EXCP_AdEL: + case EXCP_AdES: + case EXCP_IBE: + case EXCP_DBE: + cause = EXCCODE_ADE; + break; + case EXCP_SYSCALL: + cause = EXCCODE_SYS; + break; + case EXCP_BREAK: + cause = EXCCODE_BP; + break; + case EXCP_RI: + cause = EXCCODE_RI; + break; + case EXCP_FPDIS: + cause = EXCCODE_FPDIS; + break; + case EXCP_LSXDIS: + cause = EXCCODE_LSXDIS; + break; + case EXCP_LASXDIS: + cause = EXCCODE_LASXDIS; + break; + case EXCP_FPE: + cause = EXCCODE_FPE; + break; + default: + printf("Error: exception(%d) '%s' has not been supported\n", + cs->exception_index, excp_names[cs->exception_index]); + abort(); + } + + uint32_t vec_size = get_vint_size(env); + env->active_tc.PC = env->CSR_EEPN; + env->active_tc.PC += cause * vec_size; + if (is_refill(cs, env)) { + /* TLB Refill */ + env->active_tc.PC = env->CSR_TLBRENT; + break; /* Do not modify excode */ + } + if (cs->exception_index == EXCP_EXT_INTERRUPT) { + /* Interrupt */ + uint32_t vector = 0; + uint32_t pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK; + pending &= env->CSR_ECFG & CSR_ECFG_IPMASK; + + /* Find the highest-priority interrupt. */ + while (pending >>= 1) { + vector++; + } + env->active_tc.PC = + env->CSR_EEPN + (EXCODE_IP + vector) * vec_size; + if (qemu_loglevel_mask(CPU_LOG_INT)) { + qemu_log("%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx + " cause %d\n" + " A " TARGET_FMT_lx " D " TARGET_FMT_lx + " vector = %d ExC %08lx ExS %08lx\n", + __func__, env->active_tc.PC, env->CSR_ERA, cause, + env->CSR_BADV, env->CSR_DERA, vector, + env->CSR_ECFG, env->CSR_ESTAT); + } + } + /* Excode */ + env->CSR_ESTAT = (env->CSR_ESTAT & ~(0x1f << CSR_ESTAT_EXC_SH)) | + (cause << CSR_ESTAT_EXC_SH); + } + set_hflags_for_handler(env); + break; + default: + abort(); + } + if (qemu_loglevel_mask(CPU_LOG_INT) && + cs->exception_index != EXCP_EXT_INTERRUPT) { + qemu_log("%s: PC " TARGET_FMT_lx " ERA 0x%08lx" + " cause %d%s\n" + " ESTAT %08lx EXCFG 0x%08lx BADVA 0x%08lx BADI 0x%08lx \ + SYS_NUM %lu cpu %d asid 0x%lx" + "\n", + __func__, env->active_tc.PC, + is_refill(cs, env) ? env->CSR_TLBRERA : env->CSR_ERA, cause, + is_refill(cs, env) ? "(refill)" : "", env->CSR_ESTAT, + env->CSR_ECFG, + is_refill(cs, env) ? env->CSR_TLBRBADV : env->CSR_BADV, + env->CSR_BADI, env->active_tc.gpr[11], cs->cpu_index, + env->CSR_ASID); + } +#endif + cs->exception_index = EXCP_NONE; +} + +bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + if (interrupt_request & CPU_INTERRUPT_HARD) { + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + + if (cpu_loongarch_hw_interrupts_enabled(env) && + cpu_loongarch_hw_interrupts_pending(env)) { + /* Raise it */ + cs->exception_index = EXCP_EXT_INTERRUPT; + env->error_code = 0; + loongarch_cpu_do_interrupt(cs); + return true; + } + } + return false; +} + +void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env, + uint32_t exception, int error_code, + uintptr_t pc) +{ + CPUState *cs = CPU(loongarch_env_get_cpu(env)); + + qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n", __func__, exception, error_code); + cs->exception_index = exception; + env->error_code = error_code; + + cpu_loop_exit_restore(cs, pc); +} diff --git a/target/loongarch64/helper.h b/target/loongarch64/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..868b16da1e3982e0546f11d54a8ce4f6f12536b3 --- /dev/null +++ b/target/loongarch64/helper.h @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int) +DEF_HELPER_2(raise_exception, noreturn, env, i32) +DEF_HELPER_1(raise_exception_debug, noreturn, env) + +DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl) +DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl) + +DEF_HELPER_3(crc32, tl, tl, tl, i32) +DEF_HELPER_3(crc32c, tl, tl, tl, i32) + +#ifndef CONFIG_USER_ONLY +/* LoongISA CSR register */ +DEF_HELPER_2(csr_rdq, tl, env, i64) +DEF_HELPER_3(csr_wrq, tl, env, tl, i64) +DEF_HELPER_4(csr_xchgq, tl, env, tl, tl, i64) + +#endif /* !CONFIG_USER_ONLY */ + +/* CP1 functions */ +DEF_HELPER_2(movfcsr2gr, tl, env, i32) +DEF_HELPER_4(movgr2fcsr, void, env, tl, i32, i32) + +DEF_HELPER_2(float_cvtd_s, i64, env, i32) +DEF_HELPER_2(float_cvtd_w, i64, env, i32) +DEF_HELPER_2(float_cvtd_l, i64, env, i64) +DEF_HELPER_2(float_cvts_d, i32, env, i64) +DEF_HELPER_2(float_cvts_w, i32, env, i32) +DEF_HELPER_2(float_cvts_l, i32, env, i64) + +DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32) +DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64) + +#define FOP_PROTO(op) \ + DEF_HELPER_4(float_##op##_s, i32, env, i32, i32, i32) \ + DEF_HELPER_4(float_##op##_d, i64, env, i64, i64, i64) +FOP_PROTO(maddf) +FOP_PROTO(msubf) +FOP_PROTO(nmaddf) +FOP_PROTO(nmsubf) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_3(float_##op##_s, i32, env, i32, i32) \ + DEF_HELPER_3(float_##op##_d, i64, env, i64, i64) +FOP_PROTO(max) +FOP_PROTO(maxa) +FOP_PROTO(min) +FOP_PROTO(mina) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_2(float_##op##_l_s, i64, env, i32) \ + DEF_HELPER_2(float_##op##_l_d, i64, env, i64) \ + DEF_HELPER_2(float_##op##_w_s, i32, env, i32) \ + DEF_HELPER_2(float_##op##_w_d, i32, env, i64) +FOP_PROTO(cvt) +FOP_PROTO(round) +FOP_PROTO(trunc) +FOP_PROTO(ceil) +FOP_PROTO(floor) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_2(float_##op##_s, i32, env, i32) \ + DEF_HELPER_2(float_##op##_d, i64, env, i64) +FOP_PROTO(sqrt) +FOP_PROTO(rsqrt) +FOP_PROTO(recip) +FOP_PROTO(rint) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_1(float_##op##_s, i32, i32) \ + DEF_HELPER_1(float_##op##_d, i64, i64) +FOP_PROTO(abs) +FOP_PROTO(chs) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_3(float_##op##_s, i32, env, i32, i32) \ + DEF_HELPER_3(float_##op##_d, i64, env, i64, i64) +FOP_PROTO(add) +FOP_PROTO(sub) +FOP_PROTO(mul) +FOP_PROTO(div) +#undef FOP_PROTO + +#define FOP_PROTO(op) \ + DEF_HELPER_3(cmp_d_##op, i64, env, i64, i64) \ + DEF_HELPER_3(cmp_s_##op, i32, env, i32, i32) +FOP_PROTO(af) +FOP_PROTO(un) +FOP_PROTO(eq) +FOP_PROTO(ueq) +FOP_PROTO(lt) +FOP_PROTO(ult) +FOP_PROTO(le) +FOP_PROTO(ule) +FOP_PROTO(saf) +FOP_PROTO(sun) +FOP_PROTO(seq) +FOP_PROTO(sueq) +FOP_PROTO(slt) +FOP_PROTO(sult) +FOP_PROTO(sle) +FOP_PROTO(sule) +FOP_PROTO(or) +FOP_PROTO(une) +FOP_PROTO(ne) +FOP_PROTO(sor) +FOP_PROTO(sune) +FOP_PROTO(sne) +#undef FOP_PROTO + +/* Special functions */ +#ifndef CONFIG_USER_ONLY +DEF_HELPER_1(tlbwr, void, env) +DEF_HELPER_1(tlbfill, void, env) +DEF_HELPER_1(tlbsrch, void, env) +DEF_HELPER_1(tlbrd, void, env) +DEF_HELPER_1(tlbclr, void, env) +DEF_HELPER_1(tlbflush, void, env) +DEF_HELPER_4(invtlb, void, env, tl, tl, tl) +DEF_HELPER_1(ertn, void, env) +DEF_HELPER_5(lddir, void, env, tl, tl, tl, i32) +DEF_HELPER_4(ldpte, void, env, tl, tl, i32) +DEF_HELPER_3(drdtime, void, env, tl, tl) +DEF_HELPER_1(read_pgd, tl, env) +#endif /* !CONFIG_USER_ONLY */ +DEF_HELPER_2(cpucfg, tl, env, tl) +DEF_HELPER_1(idle, void, env) + +DEF_HELPER_3(float_exp2_s, i32, env, i32, i32) +DEF_HELPER_3(float_exp2_d, i64, env, i64, i64) +DEF_HELPER_2(float_logb_s, i32, env, i32) +DEF_HELPER_2(float_logb_d, i64, env, i64) +DEF_HELPER_3(movreg2cf, void, env, i32, tl) +DEF_HELPER_2(movcf2reg, tl, env, i32) +DEF_HELPER_3(movreg2cf_i32, void, env, i32, i32) +DEF_HELPER_3(movreg2cf_i64, void, env, i32, i64) + +DEF_HELPER_2(cto_w, tl, env, tl) +DEF_HELPER_2(ctz_w, tl, env, tl) +DEF_HELPER_2(cto_d, tl, env, tl) +DEF_HELPER_2(ctz_d, tl, env, tl) +DEF_HELPER_2(bitrev_w, tl, env, tl) +DEF_HELPER_2(bitrev_d, tl, env, tl) + +DEF_HELPER_2(load_scr, i64, env, i32) +DEF_HELPER_3(store_scr, void, env, i32, i64) + +DEF_HELPER_3(asrtle_d, void, env, tl, tl) +DEF_HELPER_3(asrtgt_d, void, env, tl, tl) + +DEF_HELPER_4(fsel, i64, env, i64, i64, i32) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_4(iocsr, void, env, tl, tl, i32) +#endif +DEF_HELPER_3(memtrace_addr, void, env, tl, i32) +DEF_HELPER_2(memtrace_val, void, env, tl) diff --git a/target/loongarch64/insn.decode b/target/loongarch64/insn.decode new file mode 100644 index 0000000000000000000000000000000000000000..2f82441ea7b96a23e8c52a45ab9677e296761808 --- /dev/null +++ b/target/loongarch64/insn.decode @@ -0,0 +1,532 @@ +# +# loongarch ISA decode for 64-bit prefixed insns +# +# Copyright (c) 2023 Loongarch Technology +# +# This program is free software; you can redistribute it and/or modify it +# under the terms and conditions of the GNU General Public License, +# version 2 or later, as published by the Free Software Foundation. +# +# This program is distributed in the hope it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for +# more details. +# +# You should have received a copy of the GNU General Public License along with +# this program. If not, see . +# + +# Fields +%sd 0:2 +%rj 5:5 +%rd 0:5 +%sj 5:2 +%ptr 5:3 +%rk 10:5 +%sa2 15:2 +%sa3 15:3 +%si5 10:s5 +%code 0:15 +%cond 10:4 +%cond2 0:4 +%ui5 10:5 +%ui6 10:6 +%ui3 10:3 +%ui4 10:4 +%op 5:5 +%ui8 10:8 +%msbw 16:5 +%lsbw 10:5 +%msbd 16:6 +%lsbd 10:6 +%fd 0:5 +%fj 5:5 +%fk 10:5 +%fcsrd 0:5 +%fcsrs 5:5 +%cd 0:3 +%cj 5:3 +%si12 10:s12 +%ui12 10:12 +%csr 10:14 +%cop 0:5 +%level 10:8 +%seq 10:8 +%whint 0:15 +%addr 10:5 +%info 5:5 +%invop 0:5 +%fa 15:5 +%vd 0:5 +%vj 5:5 +%vk 10:5 +%va 15:5 +%xd 0:5 +%xj 5:5 +%xk 10:5 +%xa 15:5 +%fcond 15:5 +%ca 15:3 +%vui5 15:5 +%si16 10:s16 +%si20 5:s20 +%si14 10:s14 +%hint 0:5 +%si9 10:s9 +%si10 10:s10 +%si11 10:s11 +%si8 10:s8 +%idx1 18:1 +%idx2 18:2 +%idx3 18:3 +%idx4 18:4 +%idx 18:5 +%offs21 0:s5 10:16 +%offs16 10:s16 +%offs 0:s10 10:16 +%mode 5:5 +%ui2 10:2 +%ui1 10:1 +%ui7 10:7 +%i13 5:13 + +# Argument sets +&fmt_sdrj sd rj +&fmt_rdsj rd sj +&fmt_rdrj rd rj +&fmt_empty +&fmt_rjrk rj rk +&fmt_rdrjrksa2 rd rj rk sa2 +&fmt_rdrjrksa3 rd rj rk sa3 +&fmt_rdrjrk rd rj rk +&fmt_code code +&fmt_rdrjui5 rd rj ui5 +&fmt_rdrjui6 rd rj ui6 +&fmt_rdrjmsbwlsbw rd rj msbw lsbw +&fmt_rdrjmsbdlsbd rd rj msbd lsbd +&fmt_fdfjfk fd fj fk +&fmt_fdfj fd fj +&fmt_fdrj fd rj +&fmt_rdfj rd fj +&fmt_fcsrdrj fcsrd rj +&fmt_rdfcsrs rd fcsrs +&fmt_cdfj cd fj +&fmt_fdcj fd cj +&fmt_cdrj cd rj +&fmt_rdcj rd cj +&fmt_rdrjsi12 rd rj si12 +&fmt_rdrjui12 rd rj ui12 +&fmt_rdrjcsr rd rj csr +&fmt_coprjsi12 cop rj si12 +&fmt_rdrjlevel rd rj level +&fmt_rjseq rj seq +&fmt_whint whint +&fmt_invtlb addr info invop +&fmt_fdfjfkfa fd fj fk fa +&fmt_cdfjfkfcond cd fj fk fcond +&fmt_fdfjfkca fd fj fk ca +&fmt_rdrjsi16 rd rj si16 +&fmt_rdsi20 rd si20 +&fmt_rdrjsi14 rd rj si14 +&fmt_hintrjsi12 hint rj si12 +&fmt_fdrjsi12 fd rj si12 +&fmt_fdrjrk fd rj rk +&fmt_rjoffs21 rj offs21 +&fmt_cjoffs21 cj offs21 +&fmt_rdrjoffs16 rd rj offs16 +&fmt_offs offs +&fmt_rjrdoffs16 rj rd offs16 + +# Formats +@fmt_sdrj .... ........ ..... ..... ..... ... .. &fmt_sdrj %sd %rj +@fmt_rdsj .... ........ ..... ..... ... .. ..... &fmt_rdsj %rd %sj +@fmt_rdrj .... ........ ..... ..... ..... ..... &fmt_rdrj %rd %rj +@fmt_empty .... ........ ..... ..... ..... ..... &fmt_empty +@fmt_rjrk .... ........ ..... ..... ..... ..... &fmt_rjrk %rj %rk +@fmt_rdrjrksa2 .... ........ ... .. ..... ..... ..... &fmt_rdrjrksa2 %rd %rj %rk %sa2 +@fmt_rdrjrksa3 .... ........ .. ... ..... ..... ..... &fmt_rdrjrksa3 %rd %rj %rk %sa3 +@fmt_rdrjrk .... ........ ..... ..... ..... ..... &fmt_rdrjrk %rd %rj %rk +@fmt_code .... ........ ..... ............... &fmt_code %code +@fmt_rdrjui5 .... ........ ..... ..... ..... ..... &fmt_rdrjui5 %rd %rj %ui5 +@fmt_rdrjui6 .... ........ .... ...... ..... ..... &fmt_rdrjui6 %rd %rj %ui6 +@fmt_rdrjmsbwlsbw .... ....... ..... . ..... ..... ..... &fmt_rdrjmsbwlsbw %rd %rj %msbw %lsbw +@fmt_rdrjmsbdlsbd .... ...... ...... ...... ..... ..... &fmt_rdrjmsbdlsbd %rd %rj %msbd %lsbd +@fmt_fdfjfk .... ........ ..... ..... ..... ..... &fmt_fdfjfk %fd %fj %fk +@fmt_fdfj .... ........ ..... ..... ..... ..... &fmt_fdfj %fd %fj +@fmt_fdrj .... ........ ..... ..... ..... ..... &fmt_fdrj %fd %rj +@fmt_rdfj .... ........ ..... ..... ..... ..... &fmt_rdfj %rd %fj +@fmt_fcsrdrj .... ........ ..... ..... ..... ..... &fmt_fcsrdrj %fcsrd %rj +@fmt_rdfcsrs .... ........ ..... ..... ..... ..... &fmt_rdfcsrs %rd %fcsrs +@fmt_cdfj .... ........ ..... ..... ..... .. ... &fmt_cdfj %cd %fj +@fmt_fdcj .... ........ ..... ..... .. ... ..... &fmt_fdcj %fd %cj +@fmt_cdrj .... ........ ..... ..... ..... .. ... &fmt_cdrj %cd %rj +@fmt_rdcj .... ........ ..... ..... .. ... ..... &fmt_rdcj %rd %cj +@fmt_rdrjsi12 .... ...... ............ ..... ..... &fmt_rdrjsi12 %rd %rj %si12 +@fmt_rdrjui12 .... ...... ............ ..... ..... &fmt_rdrjui12 %rd %rj %ui12 +@fmt_rdrjcsr .... .... .............. ..... ..... &fmt_rdrjcsr %rd %rj %csr +@fmt_coprjsi12 .... ...... ............ ..... ..... &fmt_coprjsi12 %cop %rj %si12 +@fmt_rdrjlevel .... ........ .. ........ ..... ..... &fmt_rdrjlevel %rd %rj %level +@fmt_rjseq .... ........ .. ........ ..... ..... &fmt_rjseq %rj %seq +@fmt_whint .... ........ ..... ............... &fmt_whint %whint +@fmt_invtlb ...... ...... ..... ..... ..... ..... &fmt_invtlb %addr %info %invop +@fmt_fdfjfkfa .... ........ ..... ..... ..... ..... &fmt_fdfjfkfa %fd %fj %fk %fa +@fmt_cdfjfkfcond .... ........ ..... ..... ..... .. ... &fmt_cdfjfkfcond %cd %fj %fk %fcond +@fmt_fdfjfkca .... ........ .. ... ..... ..... ..... &fmt_fdfjfkca %fd %fj %fk %ca +@fmt_rdrjsi16 .... .. ................ ..... ..... &fmt_rdrjsi16 %rd %rj %si16 +@fmt_rdsi20 .... ... .................... ..... &fmt_rdsi20 %rd %si20 +@fmt_rdrjsi14 .... .... .............. ..... ..... &fmt_rdrjsi14 %rd %rj %si14 +@fmt_hintrjsi12 .... ...... ............ ..... ..... &fmt_hintrjsi12 %hint %rj %si12 +@fmt_fdrjsi12 .... ...... ............ ..... ..... &fmt_fdrjsi12 %fd %rj %si12 +@fmt_fdrjrk .... ........ ..... ..... ..... ..... &fmt_fdrjrk %fd %rj %rk +@fmt_rjoffs21 .... .. ................ ..... ..... &fmt_rjoffs21 %rj %offs21 +@fmt_cjoffs21 .... .. ................ .. ... ..... &fmt_cjoffs21 %cj %offs21 +@fmt_rdrjoffs16 .... .. ................ ..... ..... &fmt_rdrjoffs16 %rd %rj %offs16 +@fmt_offs .... .. .......................... &fmt_offs %offs +@fmt_rjrdoffs16 .... .. ................ ..... ..... &fmt_rjrdoffs16 %rj %rd %offs16 + +# Instructions + +# Fiexd point arithmetic Instructions +gr2scr 0000 00000000 00000 00010 ..... 000 .. @fmt_sdrj +scr2gr 0000 00000000 00000 00011 000 .. ..... @fmt_rdsj +clo_w 0000 00000000 00000 00100 ..... ..... @fmt_rdrj +clz_w 0000 00000000 00000 00101 ..... ..... @fmt_rdrj +cto_w 0000 00000000 00000 00110 ..... ..... @fmt_rdrj +ctz_w 0000 00000000 00000 00111 ..... ..... @fmt_rdrj +clo_d 0000 00000000 00000 01000 ..... ..... @fmt_rdrj +clz_d 0000 00000000 00000 01001 ..... ..... @fmt_rdrj +cto_d 0000 00000000 00000 01010 ..... ..... @fmt_rdrj +ctz_d 0000 00000000 00000 01011 ..... ..... @fmt_rdrj +revb_2h 0000 00000000 00000 01100 ..... ..... @fmt_rdrj +revb_4h 0000 00000000 00000 01101 ..... ..... @fmt_rdrj +revb_2w 0000 00000000 00000 01110 ..... ..... @fmt_rdrj +revb_d 0000 00000000 00000 01111 ..... ..... @fmt_rdrj +revh_2w 0000 00000000 00000 10000 ..... ..... @fmt_rdrj +revh_d 0000 00000000 00000 10001 ..... ..... @fmt_rdrj +bitrev_4b 0000 00000000 00000 10010 ..... ..... @fmt_rdrj +bitrev_8b 0000 00000000 00000 10011 ..... ..... @fmt_rdrj +bitrev_w 0000 00000000 00000 10100 ..... ..... @fmt_rdrj +bitrev_d 0000 00000000 00000 10101 ..... ..... @fmt_rdrj +ext_w_h 0000 00000000 00000 10110 ..... ..... @fmt_rdrj +ext_w_b 0000 00000000 00000 10111 ..... ..... @fmt_rdrj +rdtime_d 0000 00000000 00000 11010 ..... ..... @fmt_rdrj +cpucfg 0000 00000000 00000 11011 ..... ..... @fmt_rdrj +asrtle_d 0000 00000000 00010 ..... ..... 00000 @fmt_rjrk +asrtgt_d 0000 00000000 00011 ..... ..... 00000 @fmt_rjrk +alsl_w 0000 00000000 010 .. ..... ..... ..... @fmt_rdrjrksa2 +alsl_wu 0000 00000000 011 .. ..... ..... ..... @fmt_rdrjrksa2 +bytepick_w 0000 00000000 100 .. ..... ..... ..... @fmt_rdrjrksa2 +bytepick_d 0000 00000000 11 ... ..... ..... ..... @fmt_rdrjrksa3 +add_w 0000 00000001 00000 ..... ..... ..... @fmt_rdrjrk +add_d 0000 00000001 00001 ..... ..... ..... @fmt_rdrjrk +sub_w 0000 00000001 00010 ..... ..... ..... @fmt_rdrjrk +sub_d 0000 00000001 00011 ..... ..... ..... @fmt_rdrjrk +slt 0000 00000001 00100 ..... ..... ..... @fmt_rdrjrk +sltu 0000 00000001 00101 ..... ..... ..... @fmt_rdrjrk +maskeqz 0000 00000001 00110 ..... ..... ..... @fmt_rdrjrk +masknez 0000 00000001 00111 ..... ..... ..... @fmt_rdrjrk +nor 0000 00000001 01000 ..... ..... ..... @fmt_rdrjrk +and 0000 00000001 01001 ..... ..... ..... @fmt_rdrjrk +or 0000 00000001 01010 ..... ..... ..... @fmt_rdrjrk +xor 0000 00000001 01011 ..... ..... ..... @fmt_rdrjrk +orn 0000 00000001 01100 ..... ..... ..... @fmt_rdrjrk +andn 0000 00000001 01101 ..... ..... ..... @fmt_rdrjrk +sll_w 0000 00000001 01110 ..... ..... ..... @fmt_rdrjrk +srl_w 0000 00000001 01111 ..... ..... ..... @fmt_rdrjrk +sra_w 0000 00000001 10000 ..... ..... ..... @fmt_rdrjrk +sll_d 0000 00000001 10001 ..... ..... ..... @fmt_rdrjrk +srl_d 0000 00000001 10010 ..... ..... ..... @fmt_rdrjrk +sra_d 0000 00000001 10011 ..... ..... ..... @fmt_rdrjrk +rotr_w 0000 00000001 10110 ..... ..... ..... @fmt_rdrjrk +rotr_d 0000 00000001 10111 ..... ..... ..... @fmt_rdrjrk +mul_w 0000 00000001 11000 ..... ..... ..... @fmt_rdrjrk +mulh_w 0000 00000001 11001 ..... ..... ..... @fmt_rdrjrk +mulh_wu 0000 00000001 11010 ..... ..... ..... @fmt_rdrjrk +mul_d 0000 00000001 11011 ..... ..... ..... @fmt_rdrjrk +mulh_d 0000 00000001 11100 ..... ..... ..... @fmt_rdrjrk +mulh_du 0000 00000001 11101 ..... ..... ..... @fmt_rdrjrk +mulw_d_w 0000 00000001 11110 ..... ..... ..... @fmt_rdrjrk +mulw_d_wu 0000 00000001 11111 ..... ..... ..... @fmt_rdrjrk +div_w 0000 00000010 00000 ..... ..... ..... @fmt_rdrjrk +mod_w 0000 00000010 00001 ..... ..... ..... @fmt_rdrjrk +div_wu 0000 00000010 00010 ..... ..... ..... @fmt_rdrjrk +mod_wu 0000 00000010 00011 ..... ..... ..... @fmt_rdrjrk +div_d 0000 00000010 00100 ..... ..... ..... @fmt_rdrjrk +mod_d 0000 00000010 00101 ..... ..... ..... @fmt_rdrjrk +div_du 0000 00000010 00110 ..... ..... ..... @fmt_rdrjrk +mod_du 0000 00000010 00111 ..... ..... ..... @fmt_rdrjrk +crc_w_b_w 0000 00000010 01000 ..... ..... ..... @fmt_rdrjrk +crc_w_h_w 0000 00000010 01001 ..... ..... ..... @fmt_rdrjrk +crc_w_w_w 0000 00000010 01010 ..... ..... ..... @fmt_rdrjrk +crc_w_d_w 0000 00000010 01011 ..... ..... ..... @fmt_rdrjrk +crcc_w_b_w 0000 00000010 01100 ..... ..... ..... @fmt_rdrjrk +crcc_w_h_w 0000 00000010 01101 ..... ..... ..... @fmt_rdrjrk +crcc_w_w_w 0000 00000010 01110 ..... ..... ..... @fmt_rdrjrk +crcc_w_d_w 0000 00000010 01111 ..... ..... ..... @fmt_rdrjrk +break 0000 00000010 10100 ............... @fmt_code +dbcl 0000 00000010 10101 ............... @fmt_code +syscall 0000 00000010 10110 ............... @fmt_code +alsl_d 0000 00000010 110 .. ..... ..... ..... @fmt_rdrjrksa2 +slli_w 0000 00000100 00001 ..... ..... ..... @fmt_rdrjui5 +slli_d 0000 00000100 0001 ...... ..... ..... @fmt_rdrjui6 +srli_w 0000 00000100 01001 ..... ..... ..... @fmt_rdrjui5 +srli_d 0000 00000100 0101 ...... ..... ..... @fmt_rdrjui6 +srai_w 0000 00000100 10001 ..... ..... ..... @fmt_rdrjui5 +srai_d 0000 00000100 1001 ...... ..... ..... @fmt_rdrjui6 +rotri_w 0000 00000100 11001 ..... ..... ..... @fmt_rdrjui5 +rotri_d 0000 00000100 1101 ...... ..... ..... @fmt_rdrjui6 +bstrins_w 0000 0000011 ..... 0 ..... ..... ..... @fmt_rdrjmsbwlsbw +bstrpick_w 0000 0000011 ..... 1 ..... ..... ..... @fmt_rdrjmsbwlsbw +bstrins_d 0000 000010 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd +bstrpick_d 0000 000011 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd + +# float Instructions +fadd_s 0000 00010000 00001 ..... ..... ..... @fmt_fdfjfk +fadd_d 0000 00010000 00010 ..... ..... ..... @fmt_fdfjfk +fsub_s 0000 00010000 00101 ..... ..... ..... @fmt_fdfjfk +fsub_d 0000 00010000 00110 ..... ..... ..... @fmt_fdfjfk +fmul_s 0000 00010000 01001 ..... ..... ..... @fmt_fdfjfk +fmul_d 0000 00010000 01010 ..... ..... ..... @fmt_fdfjfk +fdiv_s 0000 00010000 01101 ..... ..... ..... @fmt_fdfjfk +fdiv_d 0000 00010000 01110 ..... ..... ..... @fmt_fdfjfk +fmax_s 0000 00010000 10001 ..... ..... ..... @fmt_fdfjfk +fmax_d 0000 00010000 10010 ..... ..... ..... @fmt_fdfjfk +fmin_s 0000 00010000 10101 ..... ..... ..... @fmt_fdfjfk +fmin_d 0000 00010000 10110 ..... ..... ..... @fmt_fdfjfk +fmaxa_s 0000 00010000 11001 ..... ..... ..... @fmt_fdfjfk +fmaxa_d 0000 00010000 11010 ..... ..... ..... @fmt_fdfjfk +fmina_s 0000 00010000 11101 ..... ..... ..... @fmt_fdfjfk +fmina_d 0000 00010000 11110 ..... ..... ..... @fmt_fdfjfk +fscaleb_s 0000 00010001 00001 ..... ..... ..... @fmt_fdfjfk +fscaleb_d 0000 00010001 00010 ..... ..... ..... @fmt_fdfjfk +fcopysign_s 0000 00010001 00101 ..... ..... ..... @fmt_fdfjfk +fcopysign_d 0000 00010001 00110 ..... ..... ..... @fmt_fdfjfk +fabs_s 0000 00010001 01000 00001 ..... ..... @fmt_fdfj +fabs_d 0000 00010001 01000 00010 ..... ..... @fmt_fdfj +fneg_s 0000 00010001 01000 00101 ..... ..... @fmt_fdfj +fneg_d 0000 00010001 01000 00110 ..... ..... @fmt_fdfj +flogb_s 0000 00010001 01000 01001 ..... ..... @fmt_fdfj +flogb_d 0000 00010001 01000 01010 ..... ..... @fmt_fdfj +fclass_s 0000 00010001 01000 01101 ..... ..... @fmt_fdfj +fclass_d 0000 00010001 01000 01110 ..... ..... @fmt_fdfj +fsqrt_s 0000 00010001 01000 10001 ..... ..... @fmt_fdfj +fsqrt_d 0000 00010001 01000 10010 ..... ..... @fmt_fdfj +frecip_s 0000 00010001 01000 10101 ..... ..... @fmt_fdfj +frecip_d 0000 00010001 01000 10110 ..... ..... @fmt_fdfj +frsqrt_s 0000 00010001 01000 11001 ..... ..... @fmt_fdfj +frsqrt_d 0000 00010001 01000 11010 ..... ..... @fmt_fdfj +fmov_s 0000 00010001 01001 00101 ..... ..... @fmt_fdfj +fmov_d 0000 00010001 01001 00110 ..... ..... @fmt_fdfj +movgr2fr_w 0000 00010001 01001 01001 ..... ..... @fmt_fdrj +movgr2fr_d 0000 00010001 01001 01010 ..... ..... @fmt_fdrj +movgr2frh_w 0000 00010001 01001 01011 ..... ..... @fmt_fdrj +movfr2gr_s 0000 00010001 01001 01101 ..... ..... @fmt_rdfj +movfr2gr_d 0000 00010001 01001 01110 ..... ..... @fmt_rdfj +movfrh2gr_s 0000 00010001 01001 01111 ..... ..... @fmt_rdfj +movgr2fcsr 0000 00010001 01001 10000 ..... ..... @fmt_fcsrdrj +movfcsr2gr 0000 00010001 01001 10010 ..... ..... @fmt_rdfcsrs +movfr2cf 0000 00010001 01001 10100 ..... 00 ... @fmt_cdfj +movcf2fr 0000 00010001 01001 10101 00 ... ..... @fmt_fdcj +movgr2cf 0000 00010001 01001 10110 ..... 00 ... @fmt_cdrj +movcf2gr 0000 00010001 01001 10111 00 ... ..... @fmt_rdcj +fcvt_s_d 0000 00010001 10010 00110 ..... ..... @fmt_fdfj +fcvt_d_s 0000 00010001 10010 01001 ..... ..... @fmt_fdfj +ftintrm_w_s 0000 00010001 10100 00001 ..... ..... @fmt_fdfj +ftintrm_w_d 0000 00010001 10100 00010 ..... ..... @fmt_fdfj +ftintrm_l_s 0000 00010001 10100 01001 ..... ..... @fmt_fdfj +ftintrm_l_d 0000 00010001 10100 01010 ..... ..... @fmt_fdfj +ftintrp_w_s 0000 00010001 10100 10001 ..... ..... @fmt_fdfj +ftintrp_w_d 0000 00010001 10100 10010 ..... ..... @fmt_fdfj +ftintrp_l_s 0000 00010001 10100 11001 ..... ..... @fmt_fdfj +ftintrp_l_d 0000 00010001 10100 11010 ..... ..... @fmt_fdfj +ftintrz_w_s 0000 00010001 10101 00001 ..... ..... @fmt_fdfj +ftintrz_w_d 0000 00010001 10101 00010 ..... ..... @fmt_fdfj +ftintrz_l_s 0000 00010001 10101 01001 ..... ..... @fmt_fdfj +ftintrz_l_d 0000 00010001 10101 01010 ..... ..... @fmt_fdfj +ftintrne_w_s 0000 00010001 10101 10001 ..... ..... @fmt_fdfj +ftintrne_w_d 0000 00010001 10101 10010 ..... ..... @fmt_fdfj +ftintrne_l_s 0000 00010001 10101 11001 ..... ..... @fmt_fdfj +ftintrne_l_d 0000 00010001 10101 11010 ..... ..... @fmt_fdfj +ftint_w_s 0000 00010001 10110 00001 ..... ..... @fmt_fdfj +ftint_w_d 0000 00010001 10110 00010 ..... ..... @fmt_fdfj +ftint_l_s 0000 00010001 10110 01001 ..... ..... @fmt_fdfj +ftint_l_d 0000 00010001 10110 01010 ..... ..... @fmt_fdfj +ffint_s_w 0000 00010001 11010 00100 ..... ..... @fmt_fdfj +ffint_s_l 0000 00010001 11010 00110 ..... ..... @fmt_fdfj +ffint_d_w 0000 00010001 11010 01000 ..... ..... @fmt_fdfj +ffint_d_l 0000 00010001 11010 01010 ..... ..... @fmt_fdfj +frint_s 0000 00010001 11100 10001 ..... ..... @fmt_fdfj +frint_d 0000 00010001 11100 10010 ..... ..... @fmt_fdfj + +# 12 bit immediate Instructions +slti 0000 001000 ............ ..... ..... @fmt_rdrjsi12 +sltui 0000 001001 ............ ..... ..... @fmt_rdrjsi12 +addi_w 0000 001010 ............ ..... ..... @fmt_rdrjsi12 +addi_d 0000 001011 ............ ..... ..... @fmt_rdrjsi12 +lu52i_d 0000 001100 ............ ..... ..... @fmt_rdrjsi12 +andi 0000 001101 ............ ..... ..... @fmt_rdrjui12 +ori 0000 001110 ............ ..... ..... @fmt_rdrjui12 +xori 0000 001111 ............ ..... ..... @fmt_rdrjui12 + +# core Instructions +csrxchg 0000 0100 .............. ..... ..... @fmt_rdrjcsr +cacop 0000 011000 ............ ..... ..... @fmt_coprjsi12 +lddir 0000 01100100 00 ........ ..... ..... @fmt_rdrjlevel +ldpte 0000 01100100 01 ........ ..... 00000 @fmt_rjseq +iocsrrd_b 0000 01100100 10000 00000 ..... ..... @fmt_rdrj +iocsrrd_h 0000 01100100 10000 00001 ..... ..... @fmt_rdrj +iocsrrd_w 0000 01100100 10000 00010 ..... ..... @fmt_rdrj +iocsrrd_d 0000 01100100 10000 00011 ..... ..... @fmt_rdrj +iocsrwr_b 0000 01100100 10000 00100 ..... ..... @fmt_rdrj +iocsrwr_h 0000 01100100 10000 00101 ..... ..... @fmt_rdrj +iocsrwr_w 0000 01100100 10000 00110 ..... ..... @fmt_rdrj +iocsrwr_d 0000 01100100 10000 00111 ..... ..... @fmt_rdrj +tlbclr 0000 01100100 10000 01000 00000 00000 @fmt_empty +tlbflush 0000 01100100 10000 01001 00000 00000 @fmt_empty +tlbsrch 0000 01100100 10000 01010 00000 00000 @fmt_empty +tlbrd 0000 01100100 10000 01011 00000 00000 @fmt_empty +tlbwr 0000 01100100 10000 01100 00000 00000 @fmt_empty +tlbfill 0000 01100100 10000 01101 00000 00000 @fmt_empty +ertn 0000 01100100 10000 01110 00000 00000 @fmt_empty +idle 0000 01100100 10001 ............... @fmt_whint +invtlb 0000 01100100 10011 ..... ..... ..... @fmt_invtlb + +# foure Op Instructions +fmadd_s 0000 10000001 ..... ..... ..... ..... @fmt_fdfjfkfa +fmadd_d 0000 10000010 ..... ..... ..... ..... @fmt_fdfjfkfa +fmsub_s 0000 10000101 ..... ..... ..... ..... @fmt_fdfjfkfa +fmsub_d 0000 10000110 ..... ..... ..... ..... @fmt_fdfjfkfa +fnmadd_s 0000 10001001 ..... ..... ..... ..... @fmt_fdfjfkfa +fnmadd_d 0000 10001010 ..... ..... ..... ..... @fmt_fdfjfkfa +fnmsub_s 0000 10001101 ..... ..... ..... ..... @fmt_fdfjfkfa +fnmsub_d 0000 10001110 ..... ..... ..... ..... @fmt_fdfjfkfa +fcmp_cond_s 0000 11000001 ..... ..... ..... 00 ... @fmt_cdfjfkfcond +fcmp_cond_d 0000 11000010 ..... ..... ..... 00 ... @fmt_cdfjfkfcond +fsel 0000 11010000 00 ... ..... ..... ..... @fmt_fdfjfkca + +# loog immediate Instructions +addu16i_d 0001 00 ................ ..... ..... @fmt_rdrjsi16 +lu12i_w 0001 010 .................... ..... @fmt_rdsi20 +lu32i_d 0001 011 .................... ..... @fmt_rdsi20 +pcaddi 0001 100 .................... ..... @fmt_rdsi20 +pcalau12i 0001 101 .................... ..... @fmt_rdsi20 +pcaddu12i 0001 110 .................... ..... @fmt_rdsi20 +pcaddu18i 0001 111 .................... ..... @fmt_rdsi20 + +# load/store Instructions +ll_w 0010 0000 .............. ..... ..... @fmt_rdrjsi14 +sc_w 0010 0001 .............. ..... ..... @fmt_rdrjsi14 +ll_d 0010 0010 .............. ..... ..... @fmt_rdrjsi14 +sc_d 0010 0011 .............. ..... ..... @fmt_rdrjsi14 +ldptr_w 0010 0100 .............. ..... ..... @fmt_rdrjsi14 +stptr_w 0010 0101 .............. ..... ..... @fmt_rdrjsi14 +ldptr_d 0010 0110 .............. ..... ..... @fmt_rdrjsi14 +stptr_d 0010 0111 .............. ..... ..... @fmt_rdrjsi14 +ld_b 0010 100000 ............ ..... ..... @fmt_rdrjsi12 +ld_h 0010 100001 ............ ..... ..... @fmt_rdrjsi12 +ld_w 0010 100010 ............ ..... ..... @fmt_rdrjsi12 +ld_d 0010 100011 ............ ..... ..... @fmt_rdrjsi12 +st_b 0010 100100 ............ ..... ..... @fmt_rdrjsi12 +st_h 0010 100101 ............ ..... ..... @fmt_rdrjsi12 +st_w 0010 100110 ............ ..... ..... @fmt_rdrjsi12 +st_d 0010 100111 ............ ..... ..... @fmt_rdrjsi12 +ld_bu 0010 101000 ............ ..... ..... @fmt_rdrjsi12 +ld_hu 0010 101001 ............ ..... ..... @fmt_rdrjsi12 +ld_wu 0010 101010 ............ ..... ..... @fmt_rdrjsi12 +preld 0010 101011 ............ ..... ..... @fmt_hintrjsi12 +fld_s 0010 101100 ............ ..... ..... @fmt_fdrjsi12 +fst_s 0010 101101 ............ ..... ..... @fmt_fdrjsi12 +fld_d 0010 101110 ............ ..... ..... @fmt_fdrjsi12 +fst_d 0010 101111 ............ ..... ..... @fmt_fdrjsi12 +ldx_b 0011 10000000 00000 ..... ..... ..... @fmt_rdrjrk +ldx_h 0011 10000000 01000 ..... ..... ..... @fmt_rdrjrk +ldx_w 0011 10000000 10000 ..... ..... ..... @fmt_rdrjrk +ldx_d 0011 10000000 11000 ..... ..... ..... @fmt_rdrjrk +stx_b 0011 10000001 00000 ..... ..... ..... @fmt_rdrjrk +stx_h 0011 10000001 01000 ..... ..... ..... @fmt_rdrjrk +stx_w 0011 10000001 10000 ..... ..... ..... @fmt_rdrjrk +stx_d 0011 10000001 11000 ..... ..... ..... @fmt_rdrjrk +ldx_bu 0011 10000010 00000 ..... ..... ..... @fmt_rdrjrk +ldx_hu 0011 10000010 01000 ..... ..... ..... @fmt_rdrjrk +ldx_wu 0011 10000010 10000 ..... ..... ..... @fmt_rdrjrk +fldx_s 0011 10000011 00000 ..... ..... ..... @fmt_fdrjrk +fldx_d 0011 10000011 01000 ..... ..... ..... @fmt_fdrjrk +fstx_s 0011 10000011 10000 ..... ..... ..... @fmt_fdrjrk +fstx_d 0011 10000011 11000 ..... ..... ..... @fmt_fdrjrk +amswap_w 0011 10000110 00000 ..... ..... ..... @fmt_rdrjrk +amswap_d 0011 10000110 00001 ..... ..... ..... @fmt_rdrjrk +amadd_w 0011 10000110 00010 ..... ..... ..... @fmt_rdrjrk +amadd_d 0011 10000110 00011 ..... ..... ..... @fmt_rdrjrk +amand_w 0011 10000110 00100 ..... ..... ..... @fmt_rdrjrk +amand_d 0011 10000110 00101 ..... ..... ..... @fmt_rdrjrk +amor_w 0011 10000110 00110 ..... ..... ..... @fmt_rdrjrk +amor_d 0011 10000110 00111 ..... ..... ..... @fmt_rdrjrk +amxor_w 0011 10000110 01000 ..... ..... ..... @fmt_rdrjrk +amxor_d 0011 10000110 01001 ..... ..... ..... @fmt_rdrjrk +ammax_w 0011 10000110 01010 ..... ..... ..... @fmt_rdrjrk +ammax_d 0011 10000110 01011 ..... ..... ..... @fmt_rdrjrk +ammin_w 0011 10000110 01100 ..... ..... ..... @fmt_rdrjrk +ammin_d 0011 10000110 01101 ..... ..... ..... @fmt_rdrjrk +ammax_wu 0011 10000110 01110 ..... ..... ..... @fmt_rdrjrk +ammax_du 0011 10000110 01111 ..... ..... ..... @fmt_rdrjrk +ammin_wu 0011 10000110 10000 ..... ..... ..... @fmt_rdrjrk +ammin_du 0011 10000110 10001 ..... ..... ..... @fmt_rdrjrk +amswap_db_w 0011 10000110 10010 ..... ..... ..... @fmt_rdrjrk +amswap_db_d 0011 10000110 10011 ..... ..... ..... @fmt_rdrjrk +amadd_db_w 0011 10000110 10100 ..... ..... ..... @fmt_rdrjrk +amadd_db_d 0011 10000110 10101 ..... ..... ..... @fmt_rdrjrk +amand_db_w 0011 10000110 10110 ..... ..... ..... @fmt_rdrjrk +amand_db_d 0011 10000110 10111 ..... ..... ..... @fmt_rdrjrk +amor_db_w 0011 10000110 11000 ..... ..... ..... @fmt_rdrjrk +amor_db_d 0011 10000110 11001 ..... ..... ..... @fmt_rdrjrk +amxor_db_w 0011 10000110 11010 ..... ..... ..... @fmt_rdrjrk +amxor_db_d 0011 10000110 11011 ..... ..... ..... @fmt_rdrjrk +ammax_db_w 0011 10000110 11100 ..... ..... ..... @fmt_rdrjrk +ammax_db_d 0011 10000110 11101 ..... ..... ..... @fmt_rdrjrk +ammin_db_w 0011 10000110 11110 ..... ..... ..... @fmt_rdrjrk +ammin_db_d 0011 10000110 11111 ..... ..... ..... @fmt_rdrjrk +ammax_db_wu 0011 10000111 00000 ..... ..... ..... @fmt_rdrjrk +ammax_db_du 0011 10000111 00001 ..... ..... ..... @fmt_rdrjrk +ammin_db_wu 0011 10000111 00010 ..... ..... ..... @fmt_rdrjrk +ammin_db_du 0011 10000111 00011 ..... ..... ..... @fmt_rdrjrk +dbar 0011 10000111 00100 ............... @fmt_whint +ibar 0011 10000111 00101 ............... @fmt_whint +fldgt_s 0011 10000111 01000 ..... ..... ..... @fmt_fdrjrk +fldgt_d 0011 10000111 01001 ..... ..... ..... @fmt_fdrjrk +fldle_s 0011 10000111 01010 ..... ..... ..... @fmt_fdrjrk +fldle_d 0011 10000111 01011 ..... ..... ..... @fmt_fdrjrk +fstgt_s 0011 10000111 01100 ..... ..... ..... @fmt_fdrjrk +fstgt_d 0011 10000111 01101 ..... ..... ..... @fmt_fdrjrk +fstle_s 0011 10000111 01110 ..... ..... ..... @fmt_fdrjrk +fstle_d 0011 10000111 01111 ..... ..... ..... @fmt_fdrjrk +ldgt_b 0011 10000111 10000 ..... ..... ..... @fmt_rdrjrk +ldgt_h 0011 10000111 10001 ..... ..... ..... @fmt_rdrjrk +ldgt_w 0011 10000111 10010 ..... ..... ..... @fmt_rdrjrk +ldgt_d 0011 10000111 10011 ..... ..... ..... @fmt_rdrjrk +ldle_b 0011 10000111 10100 ..... ..... ..... @fmt_rdrjrk +ldle_h 0011 10000111 10101 ..... ..... ..... @fmt_rdrjrk +ldle_w 0011 10000111 10110 ..... ..... ..... @fmt_rdrjrk +ldle_d 0011 10000111 10111 ..... ..... ..... @fmt_rdrjrk +stgt_b 0011 10000111 11000 ..... ..... ..... @fmt_rdrjrk +stgt_h 0011 10000111 11001 ..... ..... ..... @fmt_rdrjrk +stgt_w 0011 10000111 11010 ..... ..... ..... @fmt_rdrjrk +stgt_d 0011 10000111 11011 ..... ..... ..... @fmt_rdrjrk +stle_b 0011 10000111 11100 ..... ..... ..... @fmt_rdrjrk +stle_h 0011 10000111 11101 ..... ..... ..... @fmt_rdrjrk +stle_w 0011 10000111 11110 ..... ..... ..... @fmt_rdrjrk +stle_d 0011 10000111 11111 ..... ..... ..... @fmt_rdrjrk + +# jump Instructions +beqz 0100 00 ................ ..... ..... @fmt_rjoffs21 +bnez 0100 01 ................ ..... ..... @fmt_rjoffs21 +bceqz 0100 10 ................ 00 ... ..... @fmt_cjoffs21 +bcnez 0100 10 ................ 01 ... ..... @fmt_cjoffs21 +jirl 0100 11 ................ ..... ..... @fmt_rdrjoffs16 +b 0101 00 .......................... @fmt_offs +bl 0101 01 .......................... @fmt_offs +beq 0101 10 ................ ..... ..... @fmt_rjrdoffs16 +bne 0101 11 ................ ..... ..... @fmt_rjrdoffs16 +blt 0110 00 ................ ..... ..... @fmt_rjrdoffs16 +bge 0110 01 ................ ..... ..... @fmt_rjrdoffs16 +bltu 0110 10 ................ ..... ..... @fmt_rjrdoffs16 +bgeu 0110 11 ................ ..... ..... @fmt_rjrdoffs16 diff --git a/target/loongarch64/instmap.h b/target/loongarch64/instmap.h new file mode 100644 index 0000000000000000000000000000000000000000..5fbb8b5d29921ad9909994798427580c208b7ab8 --- /dev/null +++ b/target/loongarch64/instmap.h @@ -0,0 +1,217 @@ +/* + * Loongarch emulation for qemu: instruction opcode + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef TARGET_LARCH_INSTMAP_H +#define TARGET_LARCH_INSTMAP_H + +enum { + /* fix opcodes */ + OPC_LARCH_CLO_W = (0x000004 << 10), + OPC_LARCH_CLZ_W = (0x000005 << 10), + OPC_LARCH_CLO_D = (0x000008 << 10), + OPC_LARCH_CLZ_D = (0x000009 << 10), + OPC_LARCH_REVB_2H = (0x00000C << 10), + OPC_LARCH_REVB_4H = (0x00000D << 10), + OPC_LARCH_REVH_D = (0x000011 << 10), + OPC_LARCH_BREV_4B = (0x000012 << 10), + OPC_LARCH_BREV_8B = (0x000013 << 10), + OPC_LARCH_EXT_WH = (0x000016 << 10), + OPC_LARCH_EXT_WB = (0x000017 << 10), + + OPC_LARCH_ADD_W = (0x00020 << 15), + OPC_LARCH_ADD_D = (0x00021 << 15), + OPC_LARCH_SUB_W = (0x00022 << 15), + OPC_LARCH_SUB_D = (0x00023 << 15), + OPC_LARCH_SLT = (0x00024 << 15), + OPC_LARCH_SLTU = (0x00025 << 15), + OPC_LARCH_MASKEQZ = (0x00026 << 15), + OPC_LARCH_MASKNEZ = (0x00027 << 15), + OPC_LARCH_NOR = (0x00028 << 15), + OPC_LARCH_AND = (0x00029 << 15), + OPC_LARCH_OR = (0x0002A << 15), + OPC_LARCH_XOR = (0x0002B << 15), + OPC_LARCH_SLL_W = (0x0002E << 15), + OPC_LARCH_SRL_W = (0x0002F << 15), + OPC_LARCH_SRA_W = (0x00030 << 15), + OPC_LARCH_SLL_D = (0x00031 << 15), + OPC_LARCH_SRL_D = (0x00032 << 15), + OPC_LARCH_SRA_D = (0x00033 << 15), + OPC_LARCH_ROTR_W = (0x00036 << 15), + OPC_LARCH_ROTR_D = (0x00037 << 15), + OPC_LARCH_MUL_W = (0x00038 << 15), + OPC_LARCH_MULH_W = (0x00039 << 15), + OPC_LARCH_MULH_WU = (0x0003A << 15), + OPC_LARCH_MUL_D = (0x0003B << 15), + OPC_LARCH_MULH_D = (0x0003C << 15), + OPC_LARCH_MULH_DU = (0x0003D << 15), + OPC_LARCH_DIV_W = (0x00040 << 15), + OPC_LARCH_MOD_W = (0x00041 << 15), + OPC_LARCH_DIV_WU = (0x00042 << 15), + OPC_LARCH_MOD_WU = (0x00043 << 15), + OPC_LARCH_DIV_D = (0x00044 << 15), + OPC_LARCH_MOD_D = (0x00045 << 15), + OPC_LARCH_DIV_DU = (0x00046 << 15), + OPC_LARCH_MOD_DU = (0x00047 << 15), + OPC_LARCH_SRLI_W = (0x00089 << 15), + OPC_LARCH_SRAI_W = (0x00091 << 15), + OPC_LARCH_ROTRI_W = (0x00099 << 15), + + OPC_LARCH_ALSL_W = (0x0002 << 17), + OPC_LARCH_ALSL_D = (0x0016 << 17), + + OPC_LARCH_TRINS_W = (0x003 << 21) | (0x0 << 15), + OPC_LARCH_TRPICK_W = (0x003 << 21) | (0x1 << 15), +}; + +enum { + /* float opcodes */ + OPC_LARCH_FABS_S = (0x004501 << 10), + OPC_LARCH_FABS_D = (0x004502 << 10), + OPC_LARCH_FNEG_S = (0x004505 << 10), + OPC_LARCH_FNEG_D = (0x004506 << 10), + OPC_LARCH_FCLASS_S = (0x00450D << 10), + OPC_LARCH_FCLASS_D = (0x00450E << 10), + OPC_LARCH_FSQRT_S = (0x004511 << 10), + OPC_LARCH_FSQRT_D = (0x004512 << 10), + OPC_LARCH_FRECIP_S = (0x004515 << 10), + OPC_LARCH_FRECIP_D = (0x004516 << 10), + OPC_LARCH_FRSQRT_S = (0x004519 << 10), + OPC_LARCH_FRSQRT_D = (0x00451A << 10), + OPC_LARCH_FMOV_S = (0x004525 << 10), + OPC_LARCH_FMOV_D = (0x004526 << 10), + OPC_LARCH_GR2FR_W = (0x004529 << 10), + OPC_LARCH_GR2FR_D = (0x00452A << 10), + OPC_LARCH_GR2FRH_W = (0x00452B << 10), + OPC_LARCH_FR2GR_S = (0x00452D << 10), + OPC_LARCH_FR2GR_D = (0x00452E << 10), + OPC_LARCH_FRH2GR_S = (0x00452F << 10), + + OPC_LARCH_FCVT_S_D = (0x004646 << 10), + OPC_LARCH_FCVT_D_S = (0x004649 << 10), + OPC_LARCH_FTINTRM_W_S = (0x004681 << 10), + OPC_LARCH_FTINTRM_W_D = (0x004682 << 10), + OPC_LARCH_FTINTRM_L_S = (0x004689 << 10), + OPC_LARCH_FTINTRM_L_D = (0x00468A << 10), + OPC_LARCH_FTINTRP_W_S = (0x004691 << 10), + OPC_LARCH_FTINTRP_W_D = (0x004692 << 10), + OPC_LARCH_FTINTRP_L_S = (0x004699 << 10), + OPC_LARCH_FTINTRP_L_D = (0x00469A << 10), + OPC_LARCH_FTINTRZ_W_S = (0x0046A1 << 10), + OPC_LARCH_FTINTRZ_W_D = (0x0046A2 << 10), + OPC_LARCH_FTINTRZ_L_S = (0x0046A9 << 10), + OPC_LARCH_FTINTRZ_L_D = (0x0046AA << 10), + OPC_LARCH_FTINTRNE_W_S = (0x0046B1 << 10), + OPC_LARCH_FTINTRNE_W_D = (0x0046B2 << 10), + OPC_LARCH_FTINTRNE_L_S = (0x0046B9 << 10), + OPC_LARCH_FTINTRNE_L_D = (0x0046BA << 10), + OPC_LARCH_FTINT_W_S = (0x0046C1 << 10), + OPC_LARCH_FTINT_W_D = (0x0046C2 << 10), + OPC_LARCH_FTINT_L_S = (0x0046C9 << 10), + OPC_LARCH_FTINT_L_D = (0x0046CA << 10), + OPC_LARCH_FFINT_S_W = (0x004744 << 10), + OPC_LARCH_FFINT_S_L = (0x004746 << 10), + OPC_LARCH_FFINT_D_W = (0x004748 << 10), + OPC_LARCH_FFINT_D_L = (0x00474A << 10), + OPC_LARCH_FRINT_S = (0x004791 << 10), + OPC_LARCH_FRINT_D = (0x004792 << 10), + + OPC_LARCH_FADD_S = (0x00201 << 15), + OPC_LARCH_FADD_D = (0x00202 << 15), + OPC_LARCH_FSUB_S = (0x00205 << 15), + OPC_LARCH_FSUB_D = (0x00206 << 15), + OPC_LARCH_FMUL_S = (0x00209 << 15), + OPC_LARCH_FMUL_D = (0x0020A << 15), + OPC_LARCH_FDIV_S = (0x0020D << 15), + OPC_LARCH_FDIV_D = (0x0020E << 15), + OPC_LARCH_FMAX_S = (0x00211 << 15), + OPC_LARCH_FMAX_D = (0x00212 << 15), + OPC_LARCH_FMIN_S = (0x00215 << 15), + OPC_LARCH_FMIN_D = (0x00216 << 15), + OPC_LARCH_FMAXA_S = (0x00219 << 15), + OPC_LARCH_FMAXA_D = (0x0021A << 15), + OPC_LARCH_FMINA_S = (0x0021D << 15), + OPC_LARCH_FMINA_D = (0x0021E << 15), +}; + +enum { + /* 12 bit immediate opcodes */ + OPC_LARCH_SLTI = (0x008 << 22), + OPC_LARCH_SLTIU = (0x009 << 22), + OPC_LARCH_ADDI_W = (0x00A << 22), + OPC_LARCH_ADDI_D = (0x00B << 22), + OPC_LARCH_ANDI = (0x00D << 22), + OPC_LARCH_ORI = (0x00E << 22), + OPC_LARCH_XORI = (0x00F << 22), +}; + +enum { + /* load/store opcodes */ + OPC_LARCH_FLDX_S = (0x07060 << 15), + OPC_LARCH_FLDX_D = (0x07068 << 15), + OPC_LARCH_FSTX_S = (0x07070 << 15), + OPC_LARCH_FSTX_D = (0x07078 << 15), + OPC_LARCH_FLDGT_S = (0x070E8 << 15), + OPC_LARCH_FLDGT_D = (0x070E9 << 15), + OPC_LARCH_FLDLE_S = (0x070EA << 15), + OPC_LARCH_FLDLE_D = (0x070EB << 15), + OPC_LARCH_FSTGT_S = (0x070EC << 15), + OPC_LARCH_FSTGT_D = (0x070ED << 15), + OPC_LARCH_FSTLE_S = (0x070EE << 15), + OPC_LARCH_FSTLE_D = (0x070EF << 15), + + OPC_LARCH_LD_B = (0x0A0 << 22), + OPC_LARCH_LD_H = (0x0A1 << 22), + OPC_LARCH_LD_W = (0x0A2 << 22), + OPC_LARCH_LD_D = (0x0A3 << 22), + OPC_LARCH_ST_B = (0x0A4 << 22), + OPC_LARCH_ST_H = (0x0A5 << 22), + OPC_LARCH_ST_W = (0x0A6 << 22), + OPC_LARCH_ST_D = (0x0A7 << 22), + OPC_LARCH_LD_BU = (0x0A8 << 22), + OPC_LARCH_LD_HU = (0x0A9 << 22), + OPC_LARCH_LD_WU = (0x0AA << 22), + OPC_LARCH_FLD_S = (0x0AC << 22), + OPC_LARCH_FST_S = (0x0AD << 22), + OPC_LARCH_FLD_D = (0x0AE << 22), + OPC_LARCH_FST_D = (0x0AF << 22), + + OPC_LARCH_LL_W = (0x20 << 24), + OPC_LARCH_SC_W = (0x21 << 24), + OPC_LARCH_LL_D = (0x22 << 24), + OPC_LARCH_SC_D = (0x23 << 24), + OPC_LARCH_LDPTR_W = (0x24 << 24), + OPC_LARCH_STPTR_W = (0x25 << 24), + OPC_LARCH_LDPTR_D = (0x26 << 24), + OPC_LARCH_STPTR_D = (0x27 << 24), +}; + +enum { + /* jump opcodes */ + OPC_LARCH_BEQZ = (0x10 << 26), + OPC_LARCH_BNEZ = (0x11 << 26), + OPC_LARCH_B = (0x14 << 26), + OPC_LARCH_BEQ = (0x16 << 26), + OPC_LARCH_BNE = (0x17 << 26), + OPC_LARCH_BLT = (0x18 << 26), + OPC_LARCH_BGE = (0x19 << 26), + OPC_LARCH_BLTU = (0x1A << 26), + OPC_LARCH_BGEU = (0x1B << 26), +}; + +#endif diff --git a/target/loongarch64/internal.h b/target/loongarch64/internal.h new file mode 100644 index 0000000000000000000000000000000000000000..a51b7e6f564eb54cf8913326e7d4960b7a65bdc5 --- /dev/null +++ b/target/loongarch64/internal.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_INTERNAL_H +#define LOONGARCH_INTERNAL_H + +#include "cpu-csr.h" + +/* + * MMU types, the first four entries have the same layout as the + * CP0C0_MT field. + */ +enum loongarch_mmu_types { + MMU_TYPE_NONE, + MMU_TYPE_LS3A5K, /* LISA CSR */ +}; + +struct loongarch_def_t { + const char *name; + int32_t CSR_PRid; + int32_t FCSR0; + int32_t FCSR0_rw_bitmask; + int32_t PABITS; + CPU_LOONGARCH_CSR + uint64_t insn_flags; + enum loongarch_mmu_types mmu_type; + int cpu_cfg[64]; +}; + +/* loongarch 3a5000 TLB entry */ +struct ls3a5k_tlb_t { + target_ulong VPN; + uint64_t PageMask; /* CSR_TLBIDX[29:24] */ + uint32_t PageSize; + uint16_t ASID; + unsigned int G:1; /* CSR_TLBLO[6] */ + + unsigned int C0:3; /* CSR_TLBLO[5:4] */ + unsigned int C1:3; + + unsigned int V0:1; /* CSR_TLBLO[0] */ + unsigned int V1:1; + + unsigned int WE0:1; /* CSR_TLBLO[1] */ + unsigned int WE1:1; + + unsigned int XI0:1; /* CSR_TLBLO[62] */ + unsigned int XI1:1; + + unsigned int RI0:1; /* CSR_TLBLO[61] */ + unsigned int RI1:1; + + unsigned int EHINV:1;/* CSR_TLBIDX[31] */ + + unsigned int PLV0:2; /* CSR_TLBLO[3:2] */ + unsigned int PLV1:2; + + unsigned int RPLV0:1; + unsigned int RPLV1:1; /* CSR_TLBLO[63] */ + + uint64_t PPN0; /* CSR_TLBLO[47:12] */ + uint64_t PPN1; /* CSR_TLBLO[47:12] */ +}; +typedef struct ls3a5k_tlb_t ls3a5k_tlb_t; + +struct CPULOONGARCHTLBContext { + uint32_t nb_tlb; + uint32_t tlb_in_use; + int (*map_address)(struct CPULOONGARCHState *env, hwaddr *physical, + int *prot, target_ulong address, int rw, + int access_type); + void (*helper_tlbwr)(struct CPULOONGARCHState *env); + void (*helper_tlbfill)(struct CPULOONGARCHState *env); + void (*helper_tlbsrch)(struct CPULOONGARCHState *env); + void (*helper_tlbrd)(struct CPULOONGARCHState *env); + void (*helper_tlbclr)(struct CPULOONGARCHState *env); + void (*helper_tlbflush)(struct CPULOONGARCHState *env); + void (*helper_invtlb)(struct CPULOONGARCHState *env, target_ulong addr, + target_ulong info, int op); + union + { + struct { + uint64_t ftlb_mask; + uint32_t ftlb_size; /* at most : 8 * 256 = 2048 */ + uint32_t vtlb_size; /* at most : 64 */ + ls3a5k_tlb_t tlb[2048 + 64]; /* at most : 2048 FTLB + 64 VTLB */ + } ls3a5k; + } mmu; +}; + +enum { + TLBRET_PE = -7, + TLBRET_XI = -6, + TLBRET_RI = -5, + TLBRET_DIRTY = -4, + TLBRET_INVALID = -3, + TLBRET_NOMATCH = -2, + TLBRET_BADADDR = -1, + TLBRET_MATCH = 0 +}; + +extern unsigned int ieee_rm[]; + +static inline void restore_rounding_mode(CPULOONGARCHState *env) +{ + set_float_rounding_mode(ieee_rm[(env->active_fpu.fcsr0 >> FCSR0_RM) & 0x3], + &env->active_fpu.fp_status); +} + +static inline void restore_flush_mode(CPULOONGARCHState *env) +{ + set_flush_to_zero(0, &env->active_fpu.fp_status); +} + +static inline void restore_fp_status(CPULOONGARCHState *env) +{ + restore_rounding_mode(env); + restore_flush_mode(env); +} + +static inline void compute_hflags(CPULOONGARCHState *env) +{ + env->hflags &= ~(LARCH_HFLAG_64 | LARCH_HFLAG_FPU | LARCH_HFLAG_KSU | + LARCH_HFLAG_AWRAP | LARCH_HFLAG_LSX | LARCH_HFLAG_LASX); + + env->hflags |= (env->CSR_CRMD & CSR_CRMD_PLV); + env->hflags |= LARCH_HFLAG_64; + + if (env->CSR_EUEN & CSR_EUEN_FPEN) { + env->hflags |= LARCH_HFLAG_FPU; + } + if (env->CSR_EUEN & CSR_EUEN_LSXEN) { + env->hflags |= LARCH_HFLAG_LSX; + } + if (env->CSR_EUEN & CSR_EUEN_LASXEN) { + env->hflags |= LARCH_HFLAG_LASX; + } + if (env->CSR_EUEN & CSR_EUEN_LBTEN) { + env->hflags |= LARCH_HFLAG_LBT; + } +} + +/* Check if there is pending and not masked out interrupt */ +static inline bool cpu_loongarch_hw_interrupts_pending(CPULOONGARCHState *env) +{ + int32_t pending; + int32_t status; + bool r; + + pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK; + status = env->CSR_ECFG & CSR_ECFG_IPMASK; + + /* + * Configured with compatibility or VInt (Vectored Interrupts) + * treats the pending lines as individual interrupt lines, the status + * lines are individual masks. + */ + r = (pending & status) != 0; + + return r; +} + +/* stabletimer.c */ +uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high); +uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env); +uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env); +void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env, + uint64_t value); +int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, + int cpuid, void *opaque); + +void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags); + +/* TODO QOM'ify CPU reset and remove */ +void cpu_state_reset(CPULOONGARCHState *s); +void cpu_loongarch_realize_env(CPULOONGARCHState *env); + +uint64_t read_fcc(CPULOONGARCHState *env); +void write_fcc(CPULOONGARCHState *env, uint64_t val); + +int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n); +int loongarch_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); + +#ifdef CONFIG_TCG +#include "fpu_helper.h" +#endif + +#ifndef CONFIG_USER_ONLY +extern const struct VMStateDescription vmstate_loongarch_cpu; +hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); +#endif + +#endif diff --git a/target/loongarch64/kvm.c b/target/loongarch64/kvm.c new file mode 100644 index 0000000000000000000000000000000000000000..0a4dc86421ca0411451ffa5f65488cb1a82f9c87 --- /dev/null +++ b/target/loongarch64/kvm.c @@ -0,0 +1,1404 @@ +/* + * KVM/LOONGARCH: LOONGARCH specific KVM APIs + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include + +#include + +#include "qemu-common.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/error-report.h" +#include "qemu/timer.h" +#include "qemu/main-loop.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "sysemu/runstate.h" +#include "sysemu/cpus.h" +#include "kvm_larch.h" +#include "exec/memattrs.h" +#include "exec/gdbstub.h" + +#define DEBUG_KVM 0 +/* + * A 16384-byte buffer can hold the 8-byte kvm_msrs header, plus + * 2047 kvm_msr_entry structs + */ +#define CSR_BUF_SIZE 16384 + +#define DPRINTF(fmt, ...) \ + do { \ + if (DEBUG_KVM) { \ + fprintf(stderr, fmt, ##__VA_ARGS__); \ + } \ + } while (0) + +/* + * Define loongarch kvm version. + * Add version number when + * qemu/kvm interface changed + */ +#define KVM_LOONGARCH_VERSION 1 + +static struct { + target_ulong addr; + int len; + int type; +} inst_breakpoint[8], data_breakpoint[8]; + +int nb_data_breakpoint = 0, nb_inst_breakpoint = 0; +static int kvm_loongarch_version_cap; + +/* + * Hardware breakpoint control register + * 4:1 plv0-plv3 enable + * 6:5 config virtualization mode + * 9:8 load store + */ +static const int type_code[] = { [GDB_BREAKPOINT_HW] = 0x5e, + [GDB_WATCHPOINT_READ] = (0x5e | 1 << 8), + [GDB_WATCHPOINT_WRITE] = (0x5e | 1 << 9), + [GDB_WATCHPOINT_ACCESS] = + (0x5e | 1 << 8 | 1 << 9) }; + +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; + +static void kvm_loongarch_update_state(void *opaque, bool running, + RunState state); +static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id, + uint64_t *addr); + +unsigned long kvm_arch_vcpu_id(CPUState *cs) +{ + return cs->cpu_index; +} + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + /* LOONGARCH has 128 signals */ + kvm_set_sigmask_len(s, 16); + + kvm_loongarch_version_cap = kvm_check_extension(s, KVM_CAP_LOONGARCH_VZ); + + if (kvm_loongarch_version_cap != KVM_LOONGARCH_VERSION) { + warn_report("QEMU/KVM version not match, qemu_la_version: lvz-%d,\ + kvm_la_version: lvz-%d \n", + KVM_LOONGARCH_VERSION, kvm_loongarch_version_cap); + } + return 0; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +static void kvm_csr_set_addr(uint64_t **addr, uint32_t index, uint64_t *p) +{ + addr[index] = p; +} + +int kvm_arch_init_vcpu(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + uint64_t **addr; + CPULOONGARCHState *env = &cpu->env; + int ret = 0; + + kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_FPU, 0, 0); + kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_LSX, 0, 0); + + cpu->cpuStateEntry = + qemu_add_vm_change_state_handler(kvm_loongarch_update_state, cs); + cpu->kvm_csr_buf = g_malloc0(CSR_BUF_SIZE + CSR_BUF_SIZE); + + addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE; + kvm_csr_set_addr(addr, LOONGARCH_CSR_CRMD, &env->CSR_CRMD); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PRMD, &env->CSR_PRMD); + kvm_csr_set_addr(addr, LOONGARCH_CSR_EUEN, &env->CSR_EUEN); + kvm_csr_set_addr(addr, LOONGARCH_CSR_MISC, &env->CSR_MISC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ECFG, &env->CSR_ECFG); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ESTAT, &env->CSR_ESTAT); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERA, &env->CSR_ERA); + kvm_csr_set_addr(addr, LOONGARCH_CSR_BADV, &env->CSR_BADV); + kvm_csr_set_addr(addr, LOONGARCH_CSR_BADI, &env->CSR_BADI); + kvm_csr_set_addr(addr, LOONGARCH_CSR_EEPN, &env->CSR_EEPN); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBIDX, &env->CSR_TLBIDX); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBEHI, &env->CSR_TLBEHI); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO0, &env->CSR_TLBELO0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO1, &env->CSR_TLBELO1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_GTLBC, &env->CSR_GTLBC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TRGP, &env->CSR_TRGP); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ASID, &env->CSR_ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDL, &env->CSR_PGDL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDH, &env->CSR_PGDH); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PGD, &env->CSR_PGD); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL0, &env->CSR_PWCTL0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL1, &env->CSR_PWCTL1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_STLBPGSIZE, &env->CSR_STLBPGSIZE); + kvm_csr_set_addr(addr, LOONGARCH_CSR_RVACFG, &env->CSR_RVACFG); + kvm_csr_set_addr(addr, LOONGARCH_CSR_CPUID, &env->CSR_CPUID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG1, &env->CSR_PRCFG1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG2, &env->CSR_PRCFG2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG3, &env->CSR_PRCFG3); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS0, &env->CSR_KS0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS1, &env->CSR_KS1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS2, &env->CSR_KS2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS3, &env->CSR_KS3); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS4, &env->CSR_KS4); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS5, &env->CSR_KS5); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS6, &env->CSR_KS6); + kvm_csr_set_addr(addr, LOONGARCH_CSR_KS7, &env->CSR_KS7); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TMID, &env->CSR_TMID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_CNTC, &env->CSR_CNTC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TINTCLR, &env->CSR_TINTCLR); + + kvm_csr_set_addr(addr, LOONGARCH_CSR_GSTAT, &env->CSR_GSTAT); + kvm_csr_set_addr(addr, LOONGARCH_CSR_GCFG, &env->CSR_GCFG); + kvm_csr_set_addr(addr, LOONGARCH_CSR_GINTC, &env->CSR_GINTC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_GCNTC, &env->CSR_GCNTC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_LLBCTL, &env->CSR_LLBCTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL1, &env->CSR_IMPCTL1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL2, &env->CSR_IMPCTL2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_GNMI, &env->CSR_GNMI); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRENT, &env->CSR_TLBRENT); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRBADV, &env->CSR_TLBRBADV); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRERA, &env->CSR_TLBRERA); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRSAVE, &env->CSR_TLBRSAVE); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO0, &env->CSR_TLBRELO0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO1, &env->CSR_TLBRELO1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBREHI, &env->CSR_TLBREHI); + kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRPRMD, &env->CSR_TLBRPRMD); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRCTL, &env->CSR_ERRCTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO, &env->CSR_ERRINFO); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO1, &env->CSR_ERRINFO1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRENT, &env->CSR_ERRENT); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRERA, &env->CSR_ERRERA); + kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRSAVE, &env->CSR_ERRSAVE); + kvm_csr_set_addr(addr, LOONGARCH_CSR_CTAG, &env->CSR_CTAG); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN0, &env->CSR_DMWIN0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN1, &env->CSR_DMWIN1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN2, &env->CSR_DMWIN2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN3, &env->CSR_DMWIN3); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL0, &env->CSR_PERFCTRL0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR0, &env->CSR_PERFCNTR0); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL1, &env->CSR_PERFCTRL1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR1, &env->CSR_PERFCNTR1); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL2, &env->CSR_PERFCTRL2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR2, &env->CSR_PERFCNTR2); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL3, &env->CSR_PERFCTRL3); + kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR3, &env->CSR_PERFCNTR3); + + /* debug */ + kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPC, &env->CSR_MWPC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPS, &env->CSR_MWPS); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ADDR, &env->CSR_DB0ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0MASK, &env->CSR_DB0MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0CTL, &env->CSR_DB0CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ASID, &env->CSR_DB0ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ADDR, &env->CSR_DB1ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1MASK, &env->CSR_DB1MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1CTL, &env->CSR_DB1CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ASID, &env->CSR_DB1ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ADDR, &env->CSR_DB2ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2MASK, &env->CSR_DB2MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2CTL, &env->CSR_DB2CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ASID, &env->CSR_DB2ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ADDR, &env->CSR_DB3ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3MASK, &env->CSR_DB3MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3CTL, &env->CSR_DB3CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ASID, &env->CSR_DB3ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPC, &env->CSR_FWPC); + kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPS, &env->CSR_FWPS); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ADDR, &env->CSR_IB0ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0MASK, &env->CSR_IB0MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0CTL, &env->CSR_IB0CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ASID, &env->CSR_IB0ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ADDR, &env->CSR_IB1ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1MASK, &env->CSR_IB1MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1CTL, &env->CSR_IB1CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ASID, &env->CSR_IB1ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ADDR, &env->CSR_IB2ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2MASK, &env->CSR_IB2MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2CTL, &env->CSR_IB2CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ASID, &env->CSR_IB2ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ADDR, &env->CSR_IB3ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3MASK, &env->CSR_IB3MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3CTL, &env->CSR_IB3CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ASID, &env->CSR_IB3ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ADDR, &env->CSR_IB4ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4MASK, &env->CSR_IB4MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4CTL, &env->CSR_IB4CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ASID, &env->CSR_IB4ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ADDR, &env->CSR_IB5ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5MASK, &env->CSR_IB5MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5CTL, &env->CSR_IB5CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ASID, &env->CSR_IB5ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ADDR, &env->CSR_IB6ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6MASK, &env->CSR_IB6MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6CTL, &env->CSR_IB6CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ASID, &env->CSR_IB6ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ADDR, &env->CSR_IB7ADDR); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7MASK, &env->CSR_IB7MASK); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7CTL, &env->CSR_IB7CTL); + kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ASID, &env->CSR_IB7ASID); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DEBUG, &env->CSR_DEBUG); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DERA, &env->CSR_DERA); + kvm_csr_set_addr(addr, LOONGARCH_CSR_DESAVE, &env->CSR_DESAVE); + + DPRINTF("%s\n", __func__); + return ret; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + + g_free(cpu->kvm_csr_buf); + cpu->kvm_csr_buf = NULL; + return 0; +} + +static void kvm_csr_buf_reset(LOONGARCHCPU *cpu) +{ + memset(cpu->kvm_csr_buf, 0, CSR_BUF_SIZE); +} + +static void kvm_csr_entry_add(LOONGARCHCPU *cpu, uint32_t index, + uint64_t value) +{ + struct kvm_msrs *msrs = cpu->kvm_csr_buf; + void *limit = ((void *)msrs) + CSR_BUF_SIZE; + struct kvm_csr_entry *entry = &msrs->entries[msrs->ncsrs]; + + assert((void *)(entry + 1) <= limit); + + entry->index = index; + entry->reserved = 0; + entry->data = value; + msrs->ncsrs++; +} + +void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu) +{ + int ret = 0; + uint64_t reset = 1; + + if (CPU(cpu)->kvm_fd > 0) { + ret = kvm_larch_putq(CPU(cpu), KVM_REG_LOONGARCH_VCPU_RESET, &reset); + if (ret < 0) { + error_report("%s reset vcpu failed:%d", __func__, ret); + } + } + + DPRINTF("%s\n", __func__); +} + +void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) +{ + int n; + if (kvm_sw_breakpoints_active(cpu)) { + dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; + } + if (nb_data_breakpoint > 0) { + dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; + for (n = 0; n < nb_data_breakpoint; n++) { + dbg->arch.data_breakpoint[n].addr = data_breakpoint[n].addr; + dbg->arch.data_breakpoint[n].mask = 0; + dbg->arch.data_breakpoint[n].asid = 0; + dbg->arch.data_breakpoint[n].ctrl = + type_code[data_breakpoint[n].type]; + } + dbg->arch.data_bp_nums = nb_data_breakpoint; + } else { + dbg->arch.data_bp_nums = 0; + } + if (nb_inst_breakpoint > 0) { + dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; + for (n = 0; n < nb_inst_breakpoint; n++) { + dbg->arch.inst_breakpoint[n].addr = inst_breakpoint[n].addr; + dbg->arch.inst_breakpoint[n].mask = 0; + dbg->arch.inst_breakpoint[n].asid = 0; + dbg->arch.inst_breakpoint[n].ctrl = + type_code[inst_breakpoint[n].type]; + } + dbg->arch.inst_bp_nums = nb_inst_breakpoint; + } else { + dbg->arch.inst_bp_nums = 0; + } +} + +static const unsigned int brk_insn = 0x002b8005; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + DPRINTF("%s\n", __func__); + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + error_report("%s failed", __func__); + return -EINVAL; + } + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + DPRINTF("%s\n", __func__); + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + error_report("%s failed", __func__); + return -EINVAL; + } + return 0; +} + +static int find_hw_breakpoint(uint64_t addr, int len, int type) +{ + int n; + switch (type) { + case GDB_BREAKPOINT_HW: + if (nb_inst_breakpoint == 0) { + return -1; + } + for (n = 0; n < nb_inst_breakpoint; n++) { + if (inst_breakpoint[n].addr == addr && + inst_breakpoint[n].type == type) { + return n; + } + } + break; + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + if (nb_data_breakpoint == 0) { + return -1; + } + for (n = 0; n < nb_data_breakpoint; n++) { + if (data_breakpoint[n].addr == addr && + data_breakpoint[n].type == type && + data_breakpoint[n].len == len) { + return n; + } + } + break; + default: + return -1; + } + return -1; +} + +int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len, + int type) +{ + switch (type) { + case GDB_BREAKPOINT_HW: + len = 1; + if (nb_inst_breakpoint == 8) { + return -ENOBUFS; + } + if (find_hw_breakpoint(addr, len, type) >= 0) { + return -EEXIST; + } + inst_breakpoint[nb_inst_breakpoint].addr = addr; + inst_breakpoint[nb_inst_breakpoint].len = len; + inst_breakpoint[nb_inst_breakpoint].type = type; + nb_inst_breakpoint++; + break; + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + switch (len) { + case 1: + case 2: + case 4: + case 8: + if (addr & (len - 1)) { + return -EINVAL; + } + if (nb_data_breakpoint == 8) { + return -ENOBUFS; + } + if (find_hw_breakpoint(addr, len, type) >= 0) { + return -EEXIST; + } + data_breakpoint[nb_data_breakpoint].addr = addr; + data_breakpoint[nb_data_breakpoint].len = len; + data_breakpoint[nb_data_breakpoint].type = type; + nb_data_breakpoint++; + break; + default: + return -EINVAL; + } + break; + default: + return -ENOSYS; + } + return 0; +} + +int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len, + int type) +{ + int n; + n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); + if (n < 0) { + printf("err not find remove target\n"); + return -ENOENT; + } + switch (type) { + case GDB_BREAKPOINT_HW: + nb_inst_breakpoint--; + inst_breakpoint[n] = inst_breakpoint[nb_inst_breakpoint]; + break; + case GDB_WATCHPOINT_WRITE: + case GDB_WATCHPOINT_READ: + case GDB_WATCHPOINT_ACCESS: + nb_data_breakpoint--; + data_breakpoint[n] = data_breakpoint[nb_data_breakpoint]; + break; + default: + return -1; + } + return 0; +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + DPRINTF("%s\n", __func__); + nb_data_breakpoint = 0; + nb_inst_breakpoint = 0; +} + +static inline int cpu_loongarch_io_interrupts_pending(LOONGARCHCPU *cpu) +{ + CPULOONGARCHState *env = &cpu->env; + + return env->CSR_ESTAT & (0x1 << 2); +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + int r; + struct kvm_loongarch_interrupt intr; + + qemu_mutex_lock_iothread(); + + if ((cs->interrupt_request & CPU_INTERRUPT_HARD) && + cpu_loongarch_io_interrupts_pending(cpu)) { + intr.cpu = -1; + intr.irq = 2; + r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); + if (r < 0) { + error_report("%s: cpu %d: failed to inject IRQ %x", __func__, + cs->cpu_index, intr.irq); + } + } + + qemu_mutex_unlock_iothread(); +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return cs->halted; +} + +static CPUWatchpoint hw_watchpoint; + +static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int i; + bool ret = false; + kvm_cpu_synchronize_state(cs); + if (cs->singlestep_enabled) { + return true; + } + if (kvm_find_sw_breakpoint(cs, env->active_tc.PC)) { + return true; + } + /* hw breakpoint */ + if (run->debug.arch.exception == EXCCODE_WATCH) { + for (i = 0; i < 8; i++) { + if (run->debug.arch.fwps & (1 << i)) { + ret = true; + break; + } + } + for (i = 0; i < 8; i++) { + if (run->debug.arch.mwps & (1 << i)) { + cs->watchpoint_hit = &hw_watchpoint; + hw_watchpoint.vaddr = data_breakpoint[i].addr; + switch (data_breakpoint[i].type) { + case GDB_WATCHPOINT_READ: + ret = true; + hw_watchpoint.flags = BP_MEM_READ; + break; + case GDB_WATCHPOINT_WRITE: + ret = true; + hw_watchpoint.flags = BP_MEM_WRITE; + break; + case GDB_WATCHPOINT_ACCESS: + ret = true; + hw_watchpoint.flags = BP_MEM_ACCESS; + break; + } + } + } + run->debug.arch.exception = 0; + run->debug.arch.fwps = 0; + run->debug.arch.mwps = 0; + } + return ret; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret; + + DPRINTF("%s\n", __func__); + switch (run->exit_reason) { + case KVM_EXIT_HYPERCALL: + DPRINTF("handle LOONGARCH hypercall\n"); + ret = 0; + run->hypercall.ret = ret; + break; + + case KVM_EXIT_DEBUG: + ret = 0; + if (kvm_loongarch_handle_debug(cs, run)) { + ret = EXCP_DEBUG; + } + break; + default: + error_report("%s: unknown exit reason %d", __func__, run->exit_reason); + ret = -1; + break; + } + + return ret; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + DPRINTF("%s\n", __func__); + return true; +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ +} + +int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level) +{ + CPUState *cs = CPU(cpu); + struct kvm_loongarch_interrupt intr; + + if (!kvm_enabled()) { + return 0; + } + + intr.cpu = -1; + + if (level) { + intr.irq = irq; + } else { + intr.irq = -irq; + } + + kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); + + return 0; +} + +int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level) +{ + CPUState *cs = current_cpu; + CPUState *dest_cs = CPU(cpu); + struct kvm_loongarch_interrupt intr; + + if (!kvm_enabled()) { + return 0; + } + + intr.cpu = dest_cs->cpu_index; + + if (level) { + intr.irq = irq; + } else { + intr.irq = -irq; + } + + DPRINTF("%s: IRQ: %d\n", __func__, intr.irq); + if (!current_cpu) { + cs = dest_cs; + } + kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr); + + return 0; +} + +static inline int kvm_loongarch_put_one_reg(CPUState *cs, uint64_t reg_id, + int32_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_put_one_ureg(CPUState *cs, uint64_t reg_id, + uint32_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_put_one_ulreg(CPUState *cs, uint64_t reg_id, + target_ulong *addr) +{ + uint64_t val64 = *addr; + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)&val64 }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_put_one_reg64(CPUState *cs, int64_t reg_id, + int64_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg); +} + +static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id, uint64_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_get_one_reg(CPUState *cs, uint64_t reg_id, + int32_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_get_one_ureg(CPUState *cs, uint64_t reg_id, + uint32_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_get_one_ulreg(CPUState *cs, uint64_t reg_id, + target_ulong *addr) +{ + int ret; + uint64_t val64 = 0; + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)&val64 }; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg); + if (ret >= 0) { + *addr = val64; + } + return ret; +} + +static inline int kvm_loongarch_get_one_reg64(CPUState *cs, int64_t reg_id, + int64_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg); +} + +static inline int kvm_larch_getq(CPUState *cs, uint64_t reg_id, uint64_t *addr) +{ + struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr }; + + return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg); +} + +static inline int kvm_loongarch_change_one_reg(CPUState *cs, uint64_t reg_id, + int32_t *addr, int32_t mask) +{ + int err; + int32_t tmp, change; + + err = kvm_loongarch_get_one_reg(cs, reg_id, &tmp); + if (err < 0) { + return err; + } + + /* only change bits in mask */ + change = (*addr ^ tmp) & mask; + if (!change) { + return 0; + } + + tmp = tmp ^ change; + return kvm_loongarch_put_one_reg(cs, reg_id, &tmp); +} + +static inline int kvm_loongarch_change_one_reg64(CPUState *cs, uint64_t reg_id, + int64_t *addr, int64_t mask) +{ + int err; + int64_t tmp, change; + + err = kvm_loongarch_get_one_reg64(cs, reg_id, &tmp); + if (err < 0) { + DPRINTF("%s: Failed to get CSR_CONFIG7 (%d)\n", __func__, err); + return err; + } + + /* only change bits in mask */ + change = (*addr ^ tmp) & mask; + if (!change) { + return 0; + } + + tmp = tmp ^ change; + return kvm_loongarch_put_one_reg64(cs, reg_id, &tmp); +} +/* + * Handle the VM clock being started or stopped + */ +static void kvm_loongarch_update_state(void *opaque, bool running, + RunState state) +{ + CPUState *cs = opaque; + int ret; + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + + /* + * If state is already dirty (synced to QEMU) then the KVM timer state is + * already saved and can be restored when it is synced back to KVM. + */ + if (!running) { + ret = + kvm_larch_getq(cs, KVM_REG_LOONGARCH_COUNTER, &cpu->counter_value); + if (ret < 0) { + printf("%s: Failed to get counter_value (%d)\n", __func__, ret); + } + + } else { + ret = kvm_larch_putq(cs, KVM_REG_LOONGARCH_COUNTER, + &(LOONGARCH_CPU(cs))->counter_value); + if (ret < 0) { + printf("%s: Failed to put counter_value (%d)\n", __func__, ret); + } + } +} + +static int kvm_loongarch_put_fpu_registers(CPUState *cs, int level) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int err, ret = 0; + unsigned int i; + struct kvm_fpu fpu; + + fpu.fcsr = env->active_fpu.fcsr0; + for (i = 0; i < 32; i++) { + memcpy(&fpu.fpr[i], &env->active_fpu.fpr[i], + sizeof(struct kvm_fpureg)); + } + for (i = 0; i < 8; i++) { + ((char *)&fpu.fcc)[i] = env->active_fpu.cf[i]; + } + fpu.vcsr = env->active_fpu.vcsr16; + + err = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu); + if (err < 0) { + DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err); + ret = err; + } + + return ret; +} + +static int kvm_loongarch_get_fpu_registers(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int err, ret = 0; + unsigned int i; + struct kvm_fpu fpu; + + err = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu); + if (err < 0) { + DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err); + ret = err; + } else { + env->active_fpu.fcsr0 = fpu.fcsr; + for (i = 0; i < 32; i++) { + memcpy(&env->active_fpu.fpr[i], &fpu.fpr[i], + sizeof(struct kvm_fpureg)); + } + for (i = 0; i < 8; i++) { + env->active_fpu.cf[i] = ((char *)&fpu.fcc)[i]; + } + env->active_fpu.vcsr16 = fpu.vcsr; + } + + return ret; +} + +#define KVM_PUT_ONE_UREG64(cs, regidx, addr) \ + ({ \ + int err; \ + uint64_t csrid = 0; \ + csrid = (KVM_IOC_CSRID(regidx)); \ + err = kvm_larch_putq(cs, csrid, addr); \ + if (err < 0) { \ + DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, \ + regidx, err); \ + } \ + err; \ + }) + +static int kvm_loongarch_put_csr_registers(CPUState *cs, int level) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int ret = 0; + + (void)level; + + kvm_csr_buf_reset(cpu); + + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, env->CSR_CRMD); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, env->CSR_PRMD); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, env->CSR_EUEN); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, env->CSR_MISC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, env->CSR_ECFG); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, env->CSR_ESTAT); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, env->CSR_ERA); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, env->CSR_BADV); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, env->CSR_BADI); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, env->CSR_EEPN); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, env->CSR_TLBIDX); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, env->CSR_TLBEHI); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, env->CSR_TLBELO0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, env->CSR_TLBELO1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, env->CSR_GTLBC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, env->CSR_TRGP); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, env->CSR_ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, env->CSR_PGDL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, env->CSR_PGDH); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, env->CSR_PGD); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, env->CSR_PWCTL0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, env->CSR_PWCTL1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, env->CSR_STLBPGSIZE); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, env->CSR_RVACFG); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, env->CSR_CPUID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, env->CSR_PRCFG1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, env->CSR_PRCFG2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, env->CSR_PRCFG3); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, env->CSR_KS0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, env->CSR_KS1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, env->CSR_KS2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, env->CSR_KS3); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, env->CSR_KS4); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, env->CSR_KS5); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, env->CSR_KS6); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, env->CSR_KS7); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, env->CSR_TMID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, env->CSR_CNTC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, env->CSR_TINTCLR); + + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, env->CSR_GSTAT); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, env->CSR_GCFG); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, env->CSR_GINTC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, env->CSR_GCNTC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, env->CSR_LLBCTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, env->CSR_IMPCTL1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, env->CSR_IMPCTL2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, env->CSR_GNMI); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, env->CSR_TLBRENT); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, env->CSR_TLBRBADV); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, env->CSR_TLBRERA); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, env->CSR_TLBRSAVE); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, env->CSR_TLBRELO0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, env->CSR_TLBRELO1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, env->CSR_TLBREHI); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, env->CSR_TLBRPRMD); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, env->CSR_ERRCTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, env->CSR_ERRINFO); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, env->CSR_ERRINFO1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, env->CSR_ERRENT); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, env->CSR_ERRERA); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, env->CSR_ERRSAVE); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, env->CSR_CTAG); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, env->CSR_DMWIN0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, env->CSR_DMWIN1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, env->CSR_DMWIN2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, env->CSR_DMWIN3); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, env->CSR_PERFCTRL0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, env->CSR_PERFCNTR0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, env->CSR_PERFCTRL1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, env->CSR_PERFCNTR1); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, env->CSR_PERFCTRL2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, env->CSR_PERFCNTR2); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, env->CSR_PERFCTRL3); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, env->CSR_PERFCNTR3); + + /* debug */ + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, env->CSR_MWPC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, env->CSR_MWPS); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, env->CSR_DB0ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, env->CSR_DB0MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, env->CSR_DB0CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, env->CSR_DB0ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, env->CSR_DB1ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, env->CSR_DB1MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, env->CSR_DB1CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, env->CSR_DB1ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, env->CSR_DB2ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, env->CSR_DB2MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, env->CSR_DB2CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, env->CSR_DB2ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, env->CSR_DB3ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, env->CSR_DB3MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, env->CSR_DB3CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, env->CSR_DB3ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, env->CSR_FWPC); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, env->CSR_FWPS); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, env->CSR_IB0ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, env->CSR_IB0MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, env->CSR_IB0CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, env->CSR_IB0ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, env->CSR_IB1ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, env->CSR_IB1MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, env->CSR_IB1CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, env->CSR_IB1ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, env->CSR_IB2ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, env->CSR_IB2MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, env->CSR_IB2CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, env->CSR_IB2ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, env->CSR_IB3ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, env->CSR_IB3MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, env->CSR_IB3CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, env->CSR_IB3ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, env->CSR_IB4ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, env->CSR_IB4MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, env->CSR_IB4CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, env->CSR_IB4ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, env->CSR_IB5ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, env->CSR_IB5MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, env->CSR_IB5CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, env->CSR_IB5ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, env->CSR_IB6ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, env->CSR_IB6MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, env->CSR_IB6CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, env->CSR_IB6ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, env->CSR_IB7ADDR); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, env->CSR_IB7MASK); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, env->CSR_IB7CTL); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, env->CSR_IB7ASID); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, env->CSR_DEBUG); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, env->CSR_DERA); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, env->CSR_DESAVE); + + ret = kvm_vcpu_ioctl(cs, KVM_SET_MSRS, cpu->kvm_csr_buf); + if (ret < cpu->kvm_csr_buf->ncsrs) { + struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret]; + printf("error: failed to set CSR 0x%" PRIx32 " to 0x%" PRIx64 "\n", + (uint32_t)e->index, (uint64_t)e->data); + } + + /* + * timer cfg must be put at last since it is used to enable + * guest timer + */ + ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL); + ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG); + return ret; +} + +#define KVM_GET_ONE_UREG64(cs, regidx, addr) \ + ({ \ + int err; \ + uint64_t csrid = 0; \ + csrid = (KVM_IOC_CSRID(regidx)); \ + err = kvm_larch_getq(cs, csrid, addr); \ + if (err < 0) { \ + DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, \ + regidx, err); \ + } \ + err; \ + }) + +static int kvm_loongarch_get_csr_registers(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int ret = 0, i; + struct kvm_csr_entry *csrs = cpu->kvm_csr_buf->entries; + uint64_t **addr; + + kvm_csr_buf_reset(cpu); + addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE; + + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, 0); + + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, 0); + + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, 0); + + /* debug */ + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, 0); + kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, 0); + + ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, cpu->kvm_csr_buf); + if (ret < cpu->kvm_csr_buf->ncsrs) { + struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret]; + printf("error: failed to get CSR 0x%" PRIx32 "\n", (uint32_t)e->index); + } + + for (i = 0; i < ret; i++) { + uint32_t index = csrs[i].index; + if (addr[index]) { + *addr[index] = csrs[i].data; + } else { + printf("Failed to get addr CSR 0x%" PRIx32 "\n", i); + } + } + + ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL); + ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG); + return ret; +} + +int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu) +{ + CPULOONGARCHState *env = &cpu->env; + int err; + struct kvm_device_attr attr = { + .group = KVM_LARCH_VCPU_PVTIME_CTRL, + .attr = KVM_LARCH_VCPU_PVTIME_IPA, + .addr = (uint64_t)&env->st.guest_addr, + }; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + /* It's ok even though kvm has not such attr */ + return 0; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("PVTIME IPA: KVM_SET_DEVICE_ATTR: %s", strerror(-err)); + return err; + } + + return 0; +} + +int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu) +{ + CPULOONGARCHState *env = &cpu->env; + int err; + struct kvm_device_attr attr = { + .group = KVM_LARCH_VCPU_PVTIME_CTRL, + .attr = KVM_LARCH_VCPU_PVTIME_IPA, + .addr = (uint64_t)&env->st.guest_addr, + }; + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr); + if (err != 0) { + /* It's ok even though kvm has not such attr */ + return 0; + } + + err = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEVICE_ATTR, attr); + if (err != 0) { + error_report("PVTIME IPA: KVM_GET_DEVICE_ATTR: %s", strerror(-err)); + return err; + } + + return 0; +} + + +static int kvm_loongarch_put_lbt_registers(CPUState *cs) +{ + int ret = 0; + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + + ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0); + ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1); + ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2); + ret |= kvm_larch_putq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3); + ret |= kvm_larch_putq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag); + ret |= kvm_larch_putq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop); + + return ret; +} + +static int kvm_loongarch_get_lbt_registers(CPUState *cs) +{ + int ret = 0; + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + + ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR0, &env->lbt.scr0); + ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR1, &env->lbt.scr1); + ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR2, &env->lbt.scr2); + ret |= kvm_larch_getq(cs, KVM_REG_LBT_SCR3, &env->lbt.scr3); + ret |= kvm_larch_getq(cs, KVM_REG_LBT_FLAGS, &env->lbt.eflag); + ret |= kvm_larch_getq(cs, KVM_REG_LBT_FTOP, &env->active_fpu.ftop); + + return ret; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + struct kvm_regs regs; + int ret; + int i; + + /* Set the registers based on QEMU's view of things */ + for (i = 0; i < 32; i++) { + regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i]; + } + + regs.pc = (int64_t)(target_long)env->active_tc.PC; + + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + + if (ret < 0) { + return ret; + } + + ret = kvm_loongarch_put_csr_registers(cs, level); + if (ret < 0) { + return ret; + } + + ret = kvm_loongarch_put_fpu_registers(cs, level); + if (ret < 0) { + return ret; + } + + kvm_loongarch_put_lbt_registers(cs); + return ret; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + LOONGARCHCPU *cpu = LOONGARCH_CPU(cs); + CPULOONGARCHState *env = &cpu->env; + int ret = 0; + struct kvm_regs regs; + int i; + + /* Get the current register set as KVM seems it */ + ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); + + if (ret < 0) { + return ret; + } + + for (i = 0; i < 32; i++) { + env->active_tc.gpr[i] = regs.gpr[i]; + } + + env->active_tc.PC = regs.pc; + + kvm_loongarch_get_csr_registers(cs); + kvm_loongarch_get_fpu_registers(cs); + kvm_loongarch_get_lbt_registers(cs); + + return ret; +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return 0; +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +int kvm_arch_release_virq_post(int virq) +{ + return 0; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + abort(); +} +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/loongarch64/kvm_larch.h b/target/loongarch64/kvm_larch.h new file mode 100644 index 0000000000000000000000000000000000000000..637dec81062acc17206c775fe0ed8a15680cdc61 --- /dev/null +++ b/target/loongarch64/kvm_larch.h @@ -0,0 +1,49 @@ +/* + * KVM/LOONGARCH: LOONGARCH specific KVM APIs + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef KVM_LOONGARCH_H +#define KVM_LOONGARCH_H + +/** + * kvm_loongarch_reset_vcpu: + * @cpu: LOONGARCHCPU + * + * Called at reset time to set kernel registers to their initial values. + */ +void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu); + +int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level); +int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level); + +int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu); +int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu); + +#ifndef KVM_INTERRUPT_SET +#define KVM_INTERRUPT_SET -1 +#endif + +#ifndef KVM_INTERRUPT_UNSET +#define KVM_INTERRUPT_UNSET -2 +#endif + +#ifndef KVM_INTERRUPT_SET_LEVEL +#define KVM_INTERRUPT_SET_LEVEL -3 +#endif + +#endif /* KVM_LOONGARCH_H */ diff --git a/target/loongarch64/larch-defs.h b/target/loongarch64/larch-defs.h new file mode 100644 index 0000000000000000000000000000000000000000..e22a0dc652b0758612f47f7186b3e96f6089c592 --- /dev/null +++ b/target/loongarch64/larch-defs.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef QEMU_LOONGARCH_DEFS_H +#define QEMU_LOONGARCH_DEFS_H + +/* If we want to use host float regs... */ +/* #define USE_HOST_FLOAT_REGS */ + +/* Real pages are variable size... */ +#define TARGET_PAGE_BITS 14 +#define LOONGARCH_TLB_MAX 2112 +#define TARGET_LONG_BITS 64 +#define TARGET_PHYS_ADDR_SPACE_BITS 48 +#define TARGET_VIRT_ADDR_SPACE_BITS 48 + +/* + * bit definitions for insn_flags (ISAs/ASEs flags) + * ------------------------------------------------ + */ +#define ISA_LARCH32 0x00000001ULL +#define ISA_LARCH64 0x00000002ULL +#define INSN_LOONGARCH 0x00010000ULL + +#define CPU_LARCH32 (ISA_LARCH32) +#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64) + +#endif /* QEMU_LOONGARCH_DEFS_H */ diff --git a/target/loongarch64/machine.c b/target/loongarch64/machine.c new file mode 100644 index 0000000000000000000000000000000000000000..d91c858383e3062305db92c06c5ae67a9c913d19 --- /dev/null +++ b/target/loongarch64/machine.c @@ -0,0 +1,423 @@ +/* + * Loongarch 3A5000 machine emulation + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "internal.h" +#include "hw/hw.h" +#include "kvm_larch.h" +#include "migration/cpu.h" +#include "linux/kvm.h" +#include "sysemu/kvm.h" +#include "qemu/error-report.h" + +static int cpu_post_load(void *opaque, int version_id) +{ + LOONGARCHCPU *cpu = opaque; + CPULOONGARCHState *env = &cpu->env; + int r = 0; + + if (!kvm_enabled()) { + return 0; + } + +#ifdef CONFIG_KVM + struct kvm_loongarch_vcpu_state vcpu_state; + int i; + + vcpu_state.online_vcpus = cpu->online_vcpus; + vcpu_state.is_migrate = cpu->is_migrate; + vcpu_state.cpu_freq = cpu->cpu_freq; + vcpu_state.count_ctl = cpu->count_ctl; + vcpu_state.pending_exceptions = cpu->pending_exceptions; + vcpu_state.pending_exceptions_clr = cpu->pending_exceptions_clr; + for (i = 0; i < 4; i++) { + vcpu_state.core_ext_ioisr[i] = cpu->core_ext_ioisr[i]; + } + r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_SET_VCPU_STATE, &vcpu_state); + if (r) { + error_report("set vcpu state failed %d", r); + } + + kvm_loongarch_put_pvtime(cpu); +#endif + + restore_fp_status(env); + compute_hflags(env); + + return r; +} + +static int cpu_pre_save(void *opaque) +{ +#ifdef CONFIG_KVM + LOONGARCHCPU *cpu = opaque; + struct kvm_loongarch_vcpu_state vcpu_state; + int i, r = 0; + if (!kvm_enabled()) { + return 0; + } + + r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_GET_VCPU_STATE, &vcpu_state); + if (r < 0) { + error_report("get vcpu state failed %d", r); + return r; + } + + cpu->online_vcpus = vcpu_state.online_vcpus; + cpu->is_migrate = vcpu_state.is_migrate; + cpu->cpu_freq = vcpu_state.cpu_freq; + cpu->count_ctl = vcpu_state.count_ctl; + cpu->pending_exceptions = vcpu_state.pending_exceptions; + cpu->pending_exceptions_clr = vcpu_state.pending_exceptions_clr; + for (i = 0; i < 4; i++) { + cpu->core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i]; + } + + kvm_loongarch_get_pvtime(cpu); +#endif + return 0; +} + +/* FPU state */ + +static int get_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + fpr_t *v = pv; + qemu_get_be64s(f, &v->d); + return 0; +} + +static int put_fpr(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + fpr_t *v = pv; + qemu_put_be64s(f, &v->d); + return 0; +} + +const VMStateInfo vmstate_info_fpr = { + .name = "fpr", + .get = get_fpr, + .put = put_fpr, +}; + +#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t) + +#define VMSTATE_FPR_ARRAY(_f, _s, _n) VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0) + +static VMStateField vmstate_fpu_fields[] = { + VMSTATE_FPR_ARRAY(fpr, CPULOONGARCHFPUContext, 32), + VMSTATE_UINT32(fcsr0, CPULOONGARCHFPUContext), VMSTATE_END_OF_LIST() +}; + +const VMStateDescription vmstate_fpu = { .name = "cpu/fpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_fpu_fields }; + +const VMStateDescription vmstate_inactive_fpu = { .name = "cpu/inactive_fpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = + vmstate_fpu_fields }; + +/* TC state */ + +static VMStateField vmstate_tc_fields[] = { + VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), VMSTATE_UINTTL(PC, TCState), + VMSTATE_END_OF_LIST() +}; + +const VMStateDescription vmstate_tc = { .name = "cpu/tc", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_tc_fields }; + +const VMStateDescription vmstate_inactive_tc = { .name = "cpu/inactive_tc", + .version_id = 1, + .minimum_version_id = 1, + .fields = vmstate_tc_fields }; + +/* TLB state */ + +static int get_tlb(QEMUFile *f, void *pv, size_t size, + const VMStateField *field) +{ + ls3a5k_tlb_t *v = pv; + uint32_t flags; + + qemu_get_betls(f, &v->VPN); + qemu_get_be64s(f, &v->PageMask); + qemu_get_be32s(f, &v->PageSize); + qemu_get_be16s(f, &v->ASID); + qemu_get_be32s(f, &flags); + v->RPLV1 = (flags >> 21) & 1; + v->RPLV0 = (flags >> 20) & 1; + v->PLV1 = (flags >> 18) & 3; + v->PLV0 = (flags >> 16) & 3; + v->EHINV = (flags >> 15) & 1; + v->RI1 = (flags >> 14) & 1; + v->RI0 = (flags >> 13) & 1; + v->XI1 = (flags >> 12) & 1; + v->XI0 = (flags >> 11) & 1; + v->WE1 = (flags >> 10) & 1; + v->WE0 = (flags >> 9) & 1; + v->V1 = (flags >> 8) & 1; + v->V0 = (flags >> 7) & 1; + v->C1 = (flags >> 4) & 7; + v->C0 = (flags >> 1) & 7; + v->G = (flags >> 0) & 1; + qemu_get_be64s(f, &v->PPN0); + qemu_get_be64s(f, &v->PPN1); + + return 0; +} + +static int put_tlb(QEMUFile *f, void *pv, size_t size, + const VMStateField *field, JSONWriter *vmdesc) +{ + ls3a5k_tlb_t *v = pv; + + uint16_t asid = v->ASID; + uint32_t flags = + ((v->RPLV1 << 21) | (v->RPLV0 << 20) | (v->PLV1 << 18) | + (v->PLV0 << 16) | (v->EHINV << 15) | (v->RI1 << 14) | (v->RI0 << 13) | + (v->XI1 << 12) | (v->XI0 << 11) | (v->WE1 << 10) | (v->WE0 << 9) | + (v->V1 << 8) | (v->V0 << 7) | (v->C1 << 4) | (v->C0 << 1) | + (v->G << 0)); + + qemu_put_betls(f, &v->VPN); + qemu_put_be64s(f, &v->PageMask); + qemu_put_be32s(f, &v->PageSize); + qemu_put_be16s(f, &asid); + qemu_put_be32s(f, &flags); + qemu_put_be64s(f, &v->PPN0); + qemu_put_be64s(f, &v->PPN1); + + return 0; +} + +const VMStateInfo vmstate_info_tlb = { + .name = "tlb_entry", + .get = get_tlb, + .put = put_tlb, +}; + +#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \ + VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, ls3a5k_tlb_t) + +#define VMSTATE_TLB_ARRAY(_f, _s, _n) VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0) + +const VMStateDescription vmstate_tlb = { + .name = "cpu/tlb", + .version_id = 2, + .minimum_version_id = 2, + .fields = + (VMStateField[]){ VMSTATE_UINT32(nb_tlb, CPULOONGARCHTLBContext), + VMSTATE_UINT32(tlb_in_use, CPULOONGARCHTLBContext), + VMSTATE_TLB_ARRAY(mmu.ls3a5k.tlb, + CPULOONGARCHTLBContext, + LOONGARCH_TLB_MAX), + VMSTATE_END_OF_LIST() } +}; + +/* LOONGARCH CPU state */ + +const VMStateDescription vmstate_loongarch_cpu = { + .name = "cpu", + .version_id = 15, + .minimum_version_id = 15, + .post_load = cpu_post_load, + .pre_save = cpu_pre_save, + .fields = + (VMStateField[]){ + /* Active TC */ + VMSTATE_STRUCT(env.active_tc, LOONGARCHCPU, 1, vmstate_tc, + TCState), + + /* Active FPU */ + VMSTATE_STRUCT(env.active_fpu, LOONGARCHCPU, 1, vmstate_fpu, + CPULOONGARCHFPUContext), + + /* TLB */ + VMSTATE_STRUCT_POINTER(env.tlb, LOONGARCHCPU, vmstate_tlb, + CPULOONGARCHTLBContext), + /* CPU metastate */ + VMSTATE_UINT32(env.current_tc, LOONGARCHCPU), + VMSTATE_INT32(env.error_code, LOONGARCHCPU), + VMSTATE_UINTTL(env.btarget, LOONGARCHCPU), + VMSTATE_UINTTL(env.bcond, LOONGARCHCPU), + + VMSTATE_UINT64(env.lladdr, LOONGARCHCPU), + + /* PV time */ + VMSTATE_UINT64(env.st.guest_addr, LOONGARCHCPU), + + /* Remaining CSR registers */ + VMSTATE_UINT64(env.CSR_CRMD, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PRMD, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_EUEN, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_MISC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ECFG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ESTAT, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERA, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_BADV, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_BADI, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_EEPN, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBIDX, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBEHI, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBELO0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBELO1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBWIRED, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GTLBC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TRGP, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PGDL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PGDH, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PGD, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PWCTL0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PWCTL1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_STLBPGSIZE, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_RVACFG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_CPUID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PRCFG1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PRCFG2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PRCFG3, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS3, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS4, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS5, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS6, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_KS7, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TMID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TCFG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TVAL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_CNTC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TINTCLR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GSTAT, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GCFG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GINTC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GCNTC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_LLBCTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IMPCTL1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IMPCTL2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_GNMI, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRENT, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRBADV, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRERA, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRSAVE, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRELO0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRELO1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBREHI, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_TLBRPRMD, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRCTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRINFO, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRINFO1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRENT, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRERA, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_ERRSAVE, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_CTAG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DMWIN0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DMWIN1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DMWIN2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DMWIN3, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCTRL0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCNTR0, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCTRL1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCNTR1, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCTRL2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCNTR2, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCTRL3, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_PERFCNTR3, LOONGARCHCPU), + /* debug */ + VMSTATE_UINT64(env.CSR_MWPC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_MWPS, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB0ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB0MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB0CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB0ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB1ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB1MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB1CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB1ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB2ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB2MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB2CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB2ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB3ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB3MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB3CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DB3ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_FWPC, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_FWPS, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB0ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB0MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB0CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB0ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB1ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB1MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB1CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB1ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB2ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB2MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB2CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB2ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB3ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB3MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB3CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB3ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB4ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB4MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB4CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB4ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB5ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB5MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB5CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB5ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB6ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB6MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB6CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB6ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB7ADDR, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB7MASK, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB7CTL, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_IB7ASID, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DEBUG, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DERA, LOONGARCHCPU), + VMSTATE_UINT64(env.CSR_DESAVE, LOONGARCHCPU), + + VMSTATE_STRUCT_ARRAY(env.fpus, LOONGARCHCPU, LOONGARCH_FPU_MAX, 1, + vmstate_inactive_fpu, CPULOONGARCHFPUContext), + VMSTATE_UINT8(online_vcpus, LOONGARCHCPU), + VMSTATE_UINT8(is_migrate, LOONGARCHCPU), + VMSTATE_UINT64(counter_value, LOONGARCHCPU), + VMSTATE_UINT32(cpu_freq, LOONGARCHCPU), + VMSTATE_UINT32(count_ctl, LOONGARCHCPU), + VMSTATE_UINT64(pending_exceptions, LOONGARCHCPU), + VMSTATE_UINT64(pending_exceptions_clr, LOONGARCHCPU), + VMSTATE_UINT64_ARRAY(core_ext_ioisr, LOONGARCHCPU, 4), + + VMSTATE_END_OF_LIST() }, +}; diff --git a/target/loongarch64/meson.build b/target/loongarch64/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..6badf4484e610792e9930d995f9a245abc991622 --- /dev/null +++ b/target/loongarch64/meson.build @@ -0,0 +1,35 @@ +loongarch_user_ss = ss.source_set() +loongarch_softmmu_ss = ss.source_set() +loongarch_ss = ss.source_set() +loongarch_ss.add(files( + 'cpu.c', + 'fpu.c', + 'gdbstub.c', +)) + +gen = [ + decodetree.process('insn.decode', extra_args: [ '--decode', 'decode_insn', + '--insnwidth', '32' ]) +] + +loongarch_ss.add(gen) +loongarch_ss.add(when: 'CONFIG_TCG', if_true: files( + 'helper.c', + 'translate.c', + 'op_helper.c', + 'fpu_helper.c', + 'tlb_helper.c', + 'csr_helper.c', +)) + +loongarch_softmmu_ss.add(when: 'CONFIG_SOFTMMU', if_true: files( + 'machine.c', + 'stabletimer.c', + 'arch_dump.c', +)) + +loongarch_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) + +target_arch += {'loongarch64': loongarch_ss} +target_softmmu_arch += {'loongarch64': loongarch_softmmu_ss} +target_user_arch += {'loongarch64': loongarch_user_ss} diff --git a/target/loongarch64/op_helper.c b/target/loongarch64/op_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..7257e59479aac25d4e6d74c05bc0d5fe649731eb --- /dev/null +++ b/target/loongarch64/op_helper.c @@ -0,0 +1,485 @@ +/* + * LOONGARCH emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "sysemu/kvm.h" +#include "qemu/crc32c.h" +#include +#include "hw/irq.h" +#include "hw/core/cpu.h" +#include "instmap.h" + +/* Exceptions processing helpers */ + +void helper_raise_exception_err(CPULOONGARCHState *env, uint32_t exception, + int error_code) +{ + do_raise_exception_err(env, exception, error_code, 0); +} + +void helper_raise_exception(CPULOONGARCHState *env, uint32_t exception) +{ + do_raise_exception(env, exception, GETPC()); +} + +void helper_raise_exception_debug(CPULOONGARCHState *env) +{ + do_raise_exception(env, EXCP_DEBUG, 0); +} + +static void raise_exception(CPULOONGARCHState *env, uint32_t exception) +{ + do_raise_exception(env, exception, 0); +} + +#if defined(CONFIG_USER_ONLY) +#define HELPER_LD(name, insn, type) \ + static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \ + int mem_idx, uintptr_t retaddr) \ + { \ + return (type)cpu_##insn##_data_ra(env, addr, retaddr); \ + } +#else + +#define HF_SMAP_SHIFT 23 /* CR4.SMAP */ +#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) +#define MMU_KNOSMAP_IDX 2 +#define HF_CPL_SHIFT 0 +#define HF_CPL_MASK (3 << HF_CPL_SHIFT) +#define AC_MASK 0x00040000 +#define MMU_KSMAP_IDX 0 +static inline int cpu_mmu_index_kernel(CPULOONGARCHState *env) +{ + return !(env->hflags & HF_SMAP_MASK) + ? MMU_KNOSMAP_IDX + : ((env->hflags & HF_CPL_MASK) < 3 && (env->hflags & AC_MASK)) + ? MMU_KNOSMAP_IDX + : MMU_KSMAP_IDX; +} + +#define cpu_ldl_kernel_ra(e, p, r) \ + cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r) + +#define HELPER_LD(name, insn, type) \ + static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \ + int mem_idx, uintptr_t retaddr) \ + { \ + } +#endif + +#if defined(CONFIG_USER_ONLY) +#define HELPER_ST(name, insn, type) \ + static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \ + type val, int mem_idx, uintptr_t retaddr) \ + { \ + } +#else +#define HELPER_ST(name, insn, type) \ + static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \ + type val, int mem_idx, uintptr_t retaddr) \ + { \ + } +#endif + +static inline target_ulong bitswap(target_ulong v) +{ + v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) | + ((v & (target_ulong)0x5555555555555555ULL) << 1); + v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) | + ((v & (target_ulong)0x3333333333333333ULL) << 2); + v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) | + ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4); + return v; +} + +target_ulong helper_dbitswap(target_ulong rt) +{ + return bitswap(rt); +} + +target_ulong helper_bitswap(target_ulong rt) +{ + return (int32_t)bitswap(rt); +} + +/* these crc32 functions are based on target/arm/helper-a64.c */ +target_ulong helper_crc32(target_ulong val, target_ulong m, uint32_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1); + + m &= mask; + stq_le_p(buf, m); + return (int32_t)(crc32(val ^ 0xffffffff, buf, sz) ^ 0xffffffff); +} + +target_ulong helper_crc32c(target_ulong val, target_ulong m, uint32_t sz) +{ + uint8_t buf[8]; + target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1); + m &= mask; + stq_le_p(buf, m); + return (int32_t)(crc32c(val, buf, sz) ^ 0xffffffff); +} + +#ifndef CONFIG_USER_ONLY + +#define HELPER_LD_ATOMIC(name, insn, almask) \ + target_ulong helper_##name(CPULOONGARCHState *env, target_ulong arg, \ + int mem_idx) \ + { \ + } +#endif + +#ifndef CONFIG_USER_ONLY +void helper_drdtime(CPULOONGARCHState *env, target_ulong rd, target_ulong rs) +{ + env->active_tc.gpr[rd] = cpu_loongarch_get_stable_counter(env); + env->active_tc.gpr[rs] = env->CSR_TMID; +} +#endif + +#ifndef CONFIG_USER_ONLY +static void debug_pre_ertn(CPULOONGARCHState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx, + env->active_tc.PC, env->CSR_ERA); + qemu_log("\n"); + } +} + +static void debug_post_ertn(CPULOONGARCHState *env) +{ + if (qemu_loglevel_mask(CPU_LOG_EXEC)) { + qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx, + env->active_tc.PC, env->CSR_ERA); + } +} + +static void set_pc(CPULOONGARCHState *env, target_ulong error_pc) +{ + env->active_tc.PC = error_pc & ~(target_ulong)1; +} + +static inline void exception_return(CPULOONGARCHState *env) +{ + debug_pre_ertn(env); + + if (cpu_refill_state(env)) { + env->CSR_CRMD &= (~0x7); + env->CSR_CRMD |= (env->CSR_TLBRPRMD & 0x7); + /* Clear Refill flag and set pc */ + env->CSR_TLBRERA &= (~0x1); + set_pc(env, env->CSR_TLBRERA); + if (qemu_loglevel_mask(CPU_LOG_INT)) { + qemu_log("%s: TLBRERA 0x%lx\n", __func__, env->CSR_TLBRERA); + } + } else { + env->CSR_CRMD &= (~0x7); + env->CSR_CRMD |= (env->CSR_PRMD & 0x7); + /* Clear Refill flag and set pc*/ + set_pc(env, env->CSR_ERA); + if (qemu_loglevel_mask(CPU_LOG_INT)) { + qemu_log("%s: ERA 0x%lx\n", __func__, env->CSR_ERA); + } + } + + compute_hflags(env); + debug_post_ertn(env); +} + +void helper_ertn(CPULOONGARCHState *env) +{ + exception_return(env); + env->lladdr = 1; +} + +#endif /* !CONFIG_USER_ONLY */ + +void helper_idle(CPULOONGARCHState *env) +{ + CPUState *cs = CPU(loongarch_env_get_cpu(env)); + + cs->halted = 1; + cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE); + /* + * Last instruction in the block, PC was updated before + * - no need to recover PC and icount + */ + raise_exception(env, EXCP_HLT); +} + +#if !defined(CONFIG_USER_ONLY) + +void loongarch_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) +{ + while (1) { + } +} + +#endif /* !CONFIG_USER_ONLY */ + +void helper_store_scr(CPULOONGARCHState *env, uint32_t n, target_ulong val) +{ + env->scr[n & 0x3] = val; +} + +target_ulong helper_load_scr(CPULOONGARCHState *env, uint32_t n) +{ + return env->scr[n & 0x3]; +} + +/* loongarch assert op */ +void helper_asrtle_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt) +{ + if (rs > rt) { + do_raise_exception(env, EXCP_AdEL, GETPC()); + } +} + +void helper_asrtgt_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt) +{ + if (rs <= rt) { + do_raise_exception(env, EXCP_AdEL, GETPC()); + } +} + +target_ulong helper_cto_w(CPULOONGARCHState *env, target_ulong a0) +{ + uint32_t v = (uint32_t)a0; + int temp = 0; + + while ((v & 0x1) == 1) { + temp++; + v = v >> 1; + } + + return (target_ulong)temp; +} + +target_ulong helper_ctz_w(CPULOONGARCHState *env, target_ulong a0) +{ + uint32_t v = (uint32_t)a0; + + if (v == 0) { + return 32; + } + + int temp = 0; + while ((v & 0x1) == 0) { + temp++; + v = v >> 1; + } + + return (target_ulong)temp; +} + +target_ulong helper_cto_d(CPULOONGARCHState *env, target_ulong a0) +{ + uint64_t v = a0; + int temp = 0; + + while ((v & 0x1) == 1) { + temp++; + v = v >> 1; + } + + return (target_ulong)temp; +} + +target_ulong helper_ctz_d(CPULOONGARCHState *env, target_ulong a0) +{ + uint64_t v = a0; + + if (v == 0) { + return 64; + } + + int temp = 0; + while ((v & 0x1) == 0) { + temp++; + v = v >> 1; + } + + return (target_ulong)temp; +} + +target_ulong helper_bitrev_w(CPULOONGARCHState *env, target_ulong a0) +{ + int32_t v = (int32_t)a0; + const int SIZE = 32; + uint8_t bytes[SIZE]; + + int i; + for (i = 0; i < SIZE; i++) { + bytes[i] = v & 0x1; + v = v >> 1; + } + /* v == 0 */ + for (i = 0; i < SIZE; i++) { + v = v | ((uint32_t)bytes[i] << (SIZE - 1 - i)); + } + + return (target_ulong)(int32_t)v; +} + +target_ulong helper_bitrev_d(CPULOONGARCHState *env, target_ulong a0) +{ + uint64_t v = a0; + const int SIZE = 64; + uint8_t bytes[SIZE]; + + int i; + for (i = 0; i < SIZE; i++) { + bytes[i] = v & 0x1; + v = v >> 1; + } + /* v == 0 */ + for (i = 0; i < SIZE; i++) { + v = v | ((uint64_t)bytes[i] << (SIZE - 1 - i)); + } + + return (target_ulong)v; +} + +void helper_memtrace_addr(CPULOONGARCHState *env, target_ulong address, + uint32_t op) +{ + qemu_log("[cpu %d asid 0x%lx pc 0x%lx] addr 0x%lx op", + CPU(loongarch_env_get_cpu(env))->cpu_index, env->CSR_ASID, + env->active_tc.PC, address); + switch (op) { + case OPC_LARCH_LDPTR_D: + qemu_log("OPC_LARCH_LDPTR_D"); + break; + case OPC_LARCH_LD_D: + qemu_log("OPC_LARCH_LD_D"); + break; + case OPC_LARCH_LDPTR_W: + qemu_log("OPC_LARCH_LDPTR_W"); + break; + case OPC_LARCH_LD_W: + qemu_log("OPC_LARCH_LD_W"); + break; + case OPC_LARCH_LD_H: + qemu_log("OPC_LARCH_LD_H"); + break; + case OPC_LARCH_LD_B: + qemu_log("OPC_LARCH_LD_B"); + break; + case OPC_LARCH_LD_WU: + qemu_log("OPC_LARCH_LD_WU"); + break; + case OPC_LARCH_LD_HU: + qemu_log("OPC_LARCH_LD_HU"); + break; + case OPC_LARCH_LD_BU: + qemu_log("OPC_LARCH_LD_BU"); + break; + case OPC_LARCH_STPTR_D: + qemu_log("OPC_LARCH_STPTR_D"); + break; + case OPC_LARCH_ST_D: + qemu_log("OPC_LARCH_ST_D"); + break; + case OPC_LARCH_STPTR_W: + qemu_log("OPC_LARCH_STPTR_W"); + break; + case OPC_LARCH_ST_W: + qemu_log("OPC_LARCH_ST_W"); + break; + case OPC_LARCH_ST_H: + qemu_log("OPC_LARCH_ST_H"); + break; + case OPC_LARCH_ST_B: + qemu_log("OPC_LARCH_ST_B"); + break; + case OPC_LARCH_FLD_S: + qemu_log("OPC_LARCH_FLD_S"); + break; + case OPC_LARCH_FLD_D: + qemu_log("OPC_LARCH_FLD_D"); + break; + case OPC_LARCH_FST_S: + qemu_log("OPC_LARCH_FST_S"); + break; + case OPC_LARCH_FST_D: + qemu_log("OPC_LARCH_FST_D"); + break; + case OPC_LARCH_FLDX_S: + qemu_log("OPC_LARCH_FLDX_S"); + break; + case OPC_LARCH_FLDGT_S: + qemu_log("OPC_LARCH_FLDGT_S"); + break; + case OPC_LARCH_FLDLE_S: + qemu_log("OPC_LARCH_FLDLE_S"); + break; + case OPC_LARCH_FSTX_S: + qemu_log("OPC_LARCH_FSTX_S"); + break; + case OPC_LARCH_FSTGT_S: + qemu_log("OPC_LARCH_FSTGT_S"); + break; + case OPC_LARCH_FSTLE_S: + qemu_log("OPC_LARCH_FSTLE_S"); + break; + case OPC_LARCH_FLDX_D: + qemu_log("OPC_LARCH_FLDX_D"); + break; + case OPC_LARCH_FLDGT_D: + qemu_log("OPC_LARCH_FLDGT_D"); + break; + case OPC_LARCH_FLDLE_D: + qemu_log("OPC_LARCH_FLDLE_D"); + break; + case OPC_LARCH_FSTX_D: + qemu_log("OPC_LARCH_FSTX_D"); + break; + case OPC_LARCH_FSTGT_D: + qemu_log("OPC_LARCH_FSTGT_D"); + break; + case OPC_LARCH_FSTLE_D: + qemu_log("OPC_LARCH_FSTLE_D"); + break; + case OPC_LARCH_LL_W: + qemu_log("OPC_LARCH_LL_W"); + break; + case OPC_LARCH_LL_D: + qemu_log("OPC_LARCH_LL_D"); + break; + default: + qemu_log("0x%x", op); + } +} + +void helper_memtrace_val(CPULOONGARCHState *env, target_ulong val) +{ + qemu_log("val 0x%lx\n", val); +} diff --git a/target/loongarch64/stabletimer.c b/target/loongarch64/stabletimer.c new file mode 100644 index 0000000000000000000000000000000000000000..4f4ccc5d894b206a10035f1a14b8128c4272ac6b --- /dev/null +++ b/target/loongarch64/stabletimer.c @@ -0,0 +1,117 @@ +/* + * QEMU LOONGARCH timer support + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "hw/loongarch/cpudevs.h" +#include "qemu/timer.h" +#include "sysemu/kvm.h" +#include "internal.h" +#include "hw/irq.h" + +#ifdef DEBUG_TIMER +#define debug_timer(fmt, args...) \ + printf("%s(%d)-%s -> " #fmt "\n", __FILE__, __LINE__, __func__, ##args); +#else +#define debug_timer(fmt, args...) +#endif + +#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */ +#define STABLETIMER_TICK_MASK 0xfffffffffffcUL +#define STABLETIMER_ENABLE 0x1UL +#define STABLETIMER_PERIOD 0x2UL + +/* return random value in [low, high] */ +uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high) +{ + static uint32_t seed = 5; + static uint32_t prev_idx; + uint32_t idx; + uint32_t nb_rand_tlb = high - low + 1; + + do { + seed = 1103515245 * seed + 12345; + idx = (seed >> 16) % nb_rand_tlb + low; + } while (idx == prev_idx); + prev_idx = idx; + + return idx; +} + +/* LOONGARCH timer */ +uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env) +{ + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD; +} + +uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env) +{ + uint64_t now, expire; + + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + expire = timer_expire_time_ns(env->timer); + + return (expire - now) / TIMER_PERIOD; +} + +void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env, + uint64_t value) +{ + uint64_t now, next; + + env->CSR_TCFG = value; + if (value & STABLETIMER_ENABLE) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + (value & STABLETIMER_TICK_MASK) * TIMER_PERIOD; + timer_mod(env->timer, next); + } + debug_timer("0x%lx 0x%lx now 0x%lx, next 0x%lx", value, env->CSR_TCFG, now, + next); +} + +static void loongarch_stable_timer_cb(void *opaque) +{ + CPULOONGARCHState *env; + uint64_t now, next; + + env = opaque; + debug_timer(); + if (env->CSR_TCFG & STABLETIMER_PERIOD) { + now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); + next = now + (env->CSR_TCFG & STABLETIMER_TICK_MASK) * TIMER_PERIOD; + timer_mod(env->timer, next); + } else { + env->CSR_TCFG &= ~STABLETIMER_ENABLE; + } + + qemu_irq_raise(env->irq[IRQ_TIMER]); +} + +void cpu_loongarch_clock_init(LOONGARCHCPU *cpu) +{ + CPULOONGARCHState *env = &cpu->env; + + /* + * If we're in KVM mode, don't create the periodic timer, that is handled + * in kernel. + */ + if (!kvm_enabled()) { + env->timer = + timer_new_ns(QEMU_CLOCK_VIRTUAL, &loongarch_stable_timer_cb, env); + } +} diff --git a/target/loongarch64/tlb_helper.c b/target/loongarch64/tlb_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..b6e924fbec2ecd4fe02cd3da34bda8498764c911 --- /dev/null +++ b/target/loongarch64/tlb_helper.c @@ -0,0 +1,641 @@ +/* + * loongarch tlb emulation helpers for qemu. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "qemu/main-loop.h" +#include "cpu.h" +#include "internal.h" +#include "qemu/host-utils.h" +#include "exec/helper-proto.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" + +#ifndef CONFIG_USER_ONLY + +#define HELPER_LD(name, insn, type) \ + static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \ + int mem_idx, uintptr_t retaddr) \ + { \ + } + +void helper_lddir(CPULOONGARCHState *env, target_ulong base, target_ulong rt, + target_ulong level, uint32_t mem_idx) +{ +} + +void helper_ldpte(CPULOONGARCHState *env, target_ulong base, target_ulong odd, + uint32_t mem_idx) +{ +} + +target_ulong helper_read_pgd(CPULOONGARCHState *env) +{ + uint64_t badv; + + assert(env->CSR_TLBRERA & 0x1); + + if (env->CSR_TLBRERA & 0x1) { + badv = env->CSR_TLBRBADV; + } else { + badv = env->CSR_BADV; + } + + if ((badv >> 63) & 0x1) { + return env->CSR_PGDH; + } else { + return env->CSR_PGDL; + } +} + +/* TLB management */ +static uint64_t ls3a5k_pagesize_to_mask(int pagesize) +{ + /* 4KB - 1GB */ + if (pagesize < 12 && pagesize > 30) { + printf("[ERROR] unsupported page size %d\n", pagesize); + exit(-1); + } + + return (1 << (pagesize + 1)) - 1; +} + +static void ls3a5k_fill_tlb_entry(CPULOONGARCHState *env, ls3a5k_tlb_t *tlb, + int is_ftlb) +{ + uint64_t page_mask; /* 0000...00001111...1111 */ + uint32_t page_size; + uint64_t entryhi; + uint64_t lo0, lo1; + + if (env->CSR_TLBRERA & 0x1) { + page_size = env->CSR_TLBREHI & 0x3f; + entryhi = env->CSR_TLBREHI; + lo0 = env->CSR_TLBRELO0; + lo1 = env->CSR_TLBRELO1; + } else { + page_size = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f; + entryhi = env->CSR_TLBEHI; + lo0 = env->CSR_TLBELO0; + lo1 = env->CSR_TLBELO1; + } + + if (page_size == 0) { + printf("Warning: page_size is 0\n"); + } + + /* + * 15-12 11-8 7-4 3-0 + * 4KB: 0001 1111 1111 1111 // double 4KB mask [12:0] + * 16KB: 0111 1111 1111 1111 // double 16KB mask [14:0] + */ + if (is_ftlb) { + page_mask = env->tlb->mmu.ls3a5k.ftlb_mask; + } else { + page_mask = ls3a5k_pagesize_to_mask(page_size); + } + + tlb->VPN = entryhi & 0xffffffffe000 & ~page_mask; + + tlb->ASID = env->CSR_ASID & 0x3ff; /* CSR_ASID[9:0] */ + tlb->EHINV = 0; + tlb->G = (lo0 >> CSR_TLBLO0_GLOBAL_SHIFT) & /* CSR_TLBLO[6] */ + (lo1 >> CSR_TLBLO1_GLOBAL_SHIFT) & 1; + + tlb->PageMask = page_mask; + tlb->PageSize = page_size; + + tlb->V0 = (lo0 >> CSR_TLBLO0_V_SHIFT) & 0x1; /* [0] */ + tlb->WE0 = (lo0 >> CSR_TLBLO0_WE_SHIFT) & 0x1; /* [1] */ + tlb->PLV0 = (lo0 >> CSR_TLBLO0_PLV_SHIFT) & 0x3; /* [3:2] */ + tlb->C0 = (lo0 >> CSR_TLBLO0_CCA_SHIFT) & 0x3; /* [5:4] */ + tlb->PPN0 = (lo0 & 0xfffffffff000 & ~(page_mask >> 1)); + tlb->RI0 = (lo0 >> CSR_TLBLO0_RI_SHIFT) & 0x1; /* [61] */ + tlb->XI0 = (lo0 >> CSR_TLBLO0_XI_SHIFT) & 0x1; /* [62] */ + tlb->RPLV0 = (lo0 >> CSR_TLBLO0_RPLV_SHIFT) & 0x1; /* [63] */ + + tlb->V1 = (lo1 >> CSR_TLBLO1_V_SHIFT) & 0x1; /* [0] */ + tlb->WE1 = (lo1 >> CSR_TLBLO1_WE_SHIFT) & 0x1; /* [1] */ + tlb->PLV1 = (lo1 >> CSR_TLBLO1_PLV_SHIFT) & 0x3; /* [3:2] */ + tlb->C1 = (lo1 >> CSR_TLBLO1_CCA_SHIFT) & 0x3; /* [5:4] */ + tlb->PPN1 = (lo1 & 0xfffffffff000 & ~(page_mask >> 1)); + tlb->RI1 = (lo1 >> CSR_TLBLO1_RI_SHIFT) & 0x1; /* [61] */ + tlb->XI1 = (lo1 >> CSR_TLBLO1_XI_SHIFT) & 0x1; /* [62] */ + tlb->RPLV1 = (lo1 >> CSR_TLBLO1_RPLV_SHIFT) & 0x1; /* [63] */ +} + +static void ls3a5k_fill_tlb(CPULOONGARCHState *env, int idx, bool tlbwr) +{ + ls3a5k_tlb_t *tlb; + + tlb = &env->tlb->mmu.ls3a5k.tlb[idx]; + if (tlbwr) { + if ((env->CSR_TLBIDX >> CSR_TLBIDX_EHINV_SHIFT) & 0x1) { + tlb->EHINV = 1; + return; + } + } + + if (idx < 2048) { + ls3a5k_fill_tlb_entry(env, tlb, 1); + } else { + ls3a5k_fill_tlb_entry(env, tlb, 0); + } +} + +void ls3a5k_flush_vtlb(CPULOONGARCHState *env) +{ + uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size; + uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size; + int i; + + ls3a5k_tlb_t *tlb; + + for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) { + tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + tlb->EHINV = 1; + } + + cpu_loongarch_tlb_flush(env); +} + +void ls3a5k_flush_ftlb(CPULOONGARCHState *env) +{ + uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size; + int i; + + ls3a5k_tlb_t *tlb; + + for (i = 0; i < ftlb_size; ++i) { + tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + tlb->EHINV = 1; + } + + cpu_loongarch_tlb_flush(env); +} + +void ls3a5k_helper_tlbclr(CPULOONGARCHState *env) +{ + int i; + uint16_t asid; + int vsize, fsize, index; + int start = 0, end = -1; + + asid = env->CSR_ASID & 0x3ff; + vsize = env->tlb->mmu.ls3a5k.vtlb_size; + fsize = env->tlb->mmu.ls3a5k.ftlb_size; + index = env->CSR_TLBIDX & CSR_TLBIDX_IDX; + + if (index < fsize) { + /* FTLB. One line per operation */ + int set = index % 256; + start = set * 8; + end = start + 7; + } else if (index < (fsize + vsize)) { + /* VTLB. All entries */ + start = fsize; + end = fsize + vsize - 1; + } else { + /* Ignore */ + } + + for (i = start; i <= end; i++) { + ls3a5k_tlb_t *tlb; + tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + if (!tlb->G && tlb->ASID == asid) { + tlb->EHINV = 1; + } + } + + cpu_loongarch_tlb_flush(env); +} + +void ls3a5k_helper_tlbflush(CPULOONGARCHState *env) +{ + int i; + int vsize, fsize, index; + int start = 0, end = -1; + + vsize = env->tlb->mmu.ls3a5k.vtlb_size; + fsize = env->tlb->mmu.ls3a5k.ftlb_size; + index = env->CSR_TLBIDX & CSR_TLBIDX_IDX; + + if (index < fsize) { + /* FTLB. One line per operation */ + int set = index % 256; + start = set * 8; + end = start + 7; + } else if (index < (fsize + vsize)) { + /* VTLB. All entries */ + start = fsize; + end = fsize + vsize - 1; + } else { + /* Ignore */ + } + + for (i = start; i <= end; i++) { + env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1; + } + + cpu_loongarch_tlb_flush(env); +} + +void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr, + target_ulong info, int op) +{ + uint32_t asid = info & 0x3ff; + int i; + + switch (op) { + case 0: + case 1: + for (i = 0; i < env->tlb->nb_tlb; i++) { + env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1; + } + break; + case 4: { + int i; + for (i = 0; i < env->tlb->nb_tlb; i++) { + struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + + if (!tlb->G && tlb->ASID == asid) { + tlb->EHINV = 1; + } + } + break; + } + + case 5: { + int i; + for (i = 0; i < env->tlb->nb_tlb; i++) { + struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask; + + if (!tlb->G && tlb->ASID == asid && vpn == tlb->VPN) { + tlb->EHINV = 1; + } + } + break; + } + case 6: { + int i; + for (i = 0; i < env->tlb->nb_tlb; i++) { + struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask; + + if ((tlb->G || tlb->ASID == asid) && vpn == tlb->VPN) { + tlb->EHINV = 1; + } + } + break; + } + default: + helper_raise_exception(env, EXCP_RI); + } + + cpu_loongarch_tlb_flush(env); +} + +static void ls3a5k_invalidate_tlb_entry(CPULOONGARCHState *env, + ls3a5k_tlb_t *tlb) +{ + LOONGARCHCPU *cpu = loongarch_env_get_cpu(env); + CPUState *cs = CPU(cpu); + target_ulong addr; + target_ulong end; + target_ulong mask; + + mask = tlb->PageMask; /* 000...000111...111 */ + + if (tlb->V0) { + addr = tlb->VPN & ~mask; /* xxx...xxx[0]000..0000 */ + end = addr | (mask >> 1); /* xxx...xxx[0]111..1111 */ + while (addr < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } + + if (tlb->V1) { + /* xxx...xxx[1]000..0000 */ + addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1); + end = addr | mask; /* xxx...xxx[1]111..1111 */ + while (addr - 1 < end) { + tlb_flush_page(cs, addr); + addr += TARGET_PAGE_SIZE; + } + } +} + +void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx) +{ + ls3a5k_tlb_t *tlb; + int asid = env->CSR_ASID & 0x3ff; + tlb = &env->tlb->mmu.ls3a5k.tlb[idx]; + if (tlb->G == 0 && tlb->ASID != asid) { + return; + } + ls3a5k_invalidate_tlb_entry(env, tlb); +} + +void ls3a5k_helper_tlbwr(CPULOONGARCHState *env) +{ + int idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX; /* [11:0] */ + + /* Convert idx if in FTLB */ + if (idx < env->tlb->mmu.ls3a5k.ftlb_size) { + /* + * 0 3 6 0 1 2 + * 1 4 7 => 3 4 5 + * 2 5 8 6 7 8 + */ + int set = idx % 256; + int way = idx / 256; + idx = set * 8 + way; + } + ls3a5k_invalidate_tlb(env, idx); + ls3a5k_fill_tlb(env, idx, true); +} + +void ls3a5k_helper_tlbfill(CPULOONGARCHState *env) +{ + uint64_t mask; + uint64_t address; + int idx; + int set, ftlb_idx; + + uint64_t entryhi; + uint32_t pagesize; + + if (env->CSR_TLBRERA & 0x1) { + entryhi = env->CSR_TLBREHI & ~0x3f; + pagesize = env->CSR_TLBREHI & 0x3f; + } else { + entryhi = env->CSR_TLBEHI; + pagesize = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f; + } + + uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size; + uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size; + + mask = ls3a5k_pagesize_to_mask(pagesize); + + if (mask == env->tlb->mmu.ls3a5k.ftlb_mask && + env->tlb->mmu.ls3a5k.ftlb_size > 0) { + /* only write into FTLB */ + address = entryhi & 0xffffffffe000; /* [47:13] */ + + /* choose one set ramdomly */ + set = cpu_loongarch_get_random_ls3a5k_tlb(0, 7); + + /* index in one set */ + ftlb_idx = (address >> 15) & 0xff; /* [0,255] */ + + /* final idx */ + idx = ftlb_idx * 8 + set; /* max is 7 + 8 * 255 = 2047 */ + } else { + /* only write into VTLB */ + int wired_nr = env->CSR_TLBWIRED & 0x3f; + idx = cpu_loongarch_get_random_ls3a5k_tlb(ftlb_size + wired_nr, + ftlb_size + vtlb_size - 1); + } + + ls3a5k_invalidate_tlb(env, idx); + ls3a5k_fill_tlb(env, idx, false); +} + +void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env) +{ + uint64_t mask; + uint64_t vpn; + uint64_t tag; + uint16_t asid; + + int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size; + int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size; + int i; + int ftlb_idx; /* [0,255] 2^8 0xff */ + + ls3a5k_tlb_t *tlb; + + asid = env->CSR_ASID & 0x3ff; + + /* search VTLB */ + for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) { + tlb = &env->tlb->mmu.ls3a5k.tlb[i]; + mask = tlb->PageMask; + + vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask; + tag = tlb->VPN & ~mask; + + if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && + tlb->EHINV != 1) { + env->CSR_TLBIDX = + (i & 0xfff) | ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT); + goto _MATCH_OUT_; + } + } + + if (ftlb_size == 0) { + goto _NO_MATCH_OUT_; + } + + /* search FTLB */ + mask = env->tlb->mmu.ls3a5k.ftlb_mask; + vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask; + + ftlb_idx = (env->CSR_TLBEHI & 0xffffffffe000) >> 15; /* 16 KB */ + ftlb_idx = ftlb_idx & 0xff; /* [0,255] */ + + for (i = 0; i < 8; ++i) { + tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i]; + tag = tlb->VPN & ~mask; + + if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag && + tlb->EHINV != 1) { + env->CSR_TLBIDX = ((i * 256 + ftlb_idx) & 0xfff) | + ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT); + goto _MATCH_OUT_; + } + } + +_NO_MATCH_OUT_: + env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT; +_MATCH_OUT_: + return; +} + +void ls3a5k_helper_tlbrd(CPULOONGARCHState *env) +{ + ls3a5k_tlb_t *tlb; + int idx; + uint16_t asid; + + idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX; + if (idx < env->tlb->mmu.ls3a5k.ftlb_size) { + int set = idx % 256; + int way = idx / 256; + idx = set * 8 + way; + } + + tlb = &env->tlb->mmu.ls3a5k.tlb[idx]; + + asid = env->CSR_ASID & 0x3ff; + + if (asid != tlb->ASID) { + cpu_loongarch_tlb_flush(env); + } + + if (tlb->EHINV) { + /* invalid TLB entry */ + env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT; + env->CSR_TLBEHI = 0; + env->CSR_TLBELO0 = 0; + env->CSR_TLBELO1 = 0; + } else { + /* valid TLB entry */ + env->CSR_TLBIDX = (env->CSR_TLBIDX & 0xfff) | + ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT); + env->CSR_TLBEHI = tlb->VPN; + env->CSR_TLBELO0 = (tlb->V0 << CSR_TLBLO0_V_SHIFT) | + (tlb->WE0 << CSR_TLBLO0_WE_SHIFT) | + (tlb->PLV0 << CSR_TLBLO0_PLV_SHIFT) | + (tlb->C0 << CSR_TLBLO0_CCA_SHIFT) | + (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) | + (tlb->PPN0 & 0xfffffffff000) | + ((uint64_t)tlb->RI0 << CSR_TLBLO0_RI_SHIFT) | + ((uint64_t)tlb->XI0 << CSR_TLBLO0_XI_SHIFT) | + ((uint64_t)tlb->RPLV0 << CSR_TLBLO0_RPLV_SHIFT); + env->CSR_TLBELO1 = (tlb->V1 << CSR_TLBLO1_V_SHIFT) | + (tlb->WE1 << CSR_TLBLO1_WE_SHIFT) | + (tlb->PLV1 << CSR_TLBLO1_PLV_SHIFT) | + (tlb->C1 << CSR_TLBLO1_CCA_SHIFT) | + (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) | + (tlb->PPN1 & 0xfffffffff000) | + ((uint64_t)tlb->RI1 << CSR_TLBLO1_RI_SHIFT) | + ((uint64_t)tlb->XI1 << CSR_TLBLO1_XI_SHIFT) | + ((uint64_t)tlb->RPLV1 << CSR_TLBLO1_RPLV_SHIFT); + env->CSR_ASID = + (tlb->ASID << CSR_ASID_ASID_SHIFT) | (env->CSR_ASID & 0xff0000); + } +} + +void helper_tlbwr(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbwr(env); +} + +void helper_tlbfill(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbfill(env); +} + +void helper_tlbsrch(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbsrch(env); +} + +void helper_tlbrd(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbrd(env); +} + +void helper_tlbclr(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbclr(env); +} + +void helper_tlbflush(CPULOONGARCHState *env) +{ + env->tlb->helper_tlbflush(env); +} + +void helper_invtlb(CPULOONGARCHState *env, target_ulong addr, + target_ulong info, target_ulong op) +{ + env->tlb->helper_invtlb(env, addr, info, op); +} + +static void ls3a5k_mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def) +{ + /* number of VTLB */ + env->tlb->nb_tlb = 64; + env->tlb->mmu.ls3a5k.vtlb_size = 64; + + /* number of FTLB */ + env->tlb->nb_tlb += 2048; + env->tlb->mmu.ls3a5k.ftlb_size = 2048; + env->tlb->mmu.ls3a5k.ftlb_mask = (1 << 15) - 1; /* 16 KB */ + /* + * page_size | ftlb_mask | party field + * ---------------------------------------------------------------- + * 4 KB = 12 | ( 1 << 13 ) - 1 = [12:0] | [12] + * 16 KB = 14 | ( 1 << 15 ) - 1 = [14:0] | [14] + * 64 KB = 16 | ( 1 << 17 ) - 1 = [16:0] | [16] + * 256 KB = 18 | ( 1 << 19 ) - 1 = [18:0] | [18] + * 1 MB = 20 | ( 1 << 21 ) - 1 = [20:0] | [20] + * 4 MB = 22 | ( 1 << 23 ) - 1 = [22:0] | [22] + * 16 MB = 24 | ( 1 << 25 ) - 1 = [24:0] | [24] + * 64 MB = 26 | ( 1 << 27 ) - 1 = [26:0] | [26] + * 256 MB = 28 | ( 1 << 29 ) - 1 = [28:0] | [28] + * 1 GB = 30 | ( 1 << 31 ) - 1 = [30:0] | [30] + * ---------------------------------------------------------------- + * take party field index as @n. eg. For 16 KB, n = 14 + * ---------------------------------------------------------------- + * tlb->VPN = TLBEHI & 0xffffffffe000[47:13] & ~mask = [47:n+1] + * tlb->PPN = TLBLO0 & 0xffffffffe000[47:13] & ~mask = [47:n+1] + * tlb->PPN = TLBLO1 & 0xffffffffe000[47:13] & ~mask = [47:n+1] + * ---------------------------------------------------------------- + * On mapping : + * > vpn = address & 0xffffffffe000[47:13] & ~mask = [47:n+1] + * > tag = tlb->VPN & ~mask = [47:n+1] + * ---------------------------------------------------------------- + * physical address = [47:n+1] | [n:0] + * physical address = tlb->PPN0 | (address & mask) + * physical address = tlb->PPN1 | (address & mask) + */ + + int i; + for (i = 0; i < env->tlb->nb_tlb; i++) { + env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1; + } + + /* TLB's helper functions */ + env->tlb->map_address = &ls3a5k_map_address; + env->tlb->helper_tlbwr = ls3a5k_helper_tlbwr; + env->tlb->helper_tlbfill = ls3a5k_helper_tlbfill; + env->tlb->helper_tlbsrch = ls3a5k_helper_tlbsrch; + env->tlb->helper_tlbrd = ls3a5k_helper_tlbrd; + env->tlb->helper_tlbclr = ls3a5k_helper_tlbclr; + env->tlb->helper_tlbflush = ls3a5k_helper_tlbflush; + env->tlb->helper_invtlb = ls3a5k_helper_invtlb; +} + +void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def) +{ + env->tlb = g_malloc0(sizeof(CPULOONGARCHTLBContext)); + + switch (def->mmu_type) { + case MMU_TYPE_LS3A5K: + ls3a5k_mmu_init(env, def); + break; + default: + cpu_abort(CPU(loongarch_env_get_cpu(env)), "MMU type not supported\n"); + } +} +#endif /* !CONFIG_USER_ONLY */ diff --git a/target/loongarch64/trans.inc.c b/target/loongarch64/trans.inc.c new file mode 100644 index 0000000000000000000000000000000000000000..07bb0bb6e06045edd9ce65f067f7c9f58a504bf6 --- /dev/null +++ b/target/loongarch64/trans.inc.c @@ -0,0 +1,3482 @@ +/* + * LOONGARCH emulation for QEMU - main translation routines Extension + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +static bool trans_syscall(DisasContext *ctx, arg_syscall *a) +{ + generate_exception_end(ctx, EXCP_SYSCALL); + return true; +} + +static bool trans_break(DisasContext *ctx, arg_break *a) +{ + generate_exception_end(ctx, EXCP_BREAK); + return true; +} + +static bool trans_dbcl(DisasContext *ctx, arg_dbcl *a) +{ + /* + * dbcl instruction is not support in tcg + */ + generate_exception_end(ctx, EXCP_RI); + return true; +} + +static bool trans_addi_w(DisasContext *ctx, arg_addi_w *a) +{ + gen_arith_imm(ctx, OPC_LARCH_ADDI_W, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_addi_d(DisasContext *ctx, arg_addi_d *a) +{ + gen_arith_imm(ctx, OPC_LARCH_ADDI_D, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_slli_d(DisasContext *ctx, arg_slli_d *a) +{ + if (a->rd == 0) { + /* Nop */ + return true; + } + + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + tcg_gen_shli_tl(cpu_gpr[a->rd], t0, a->ui6); + + tcg_temp_free(t0); + return true; +} + +static bool trans_andi(DisasContext *ctx, arg_andi *a) +{ + gen_logic_imm(ctx, OPC_LARCH_ANDI, a->rd, a->rj, a->ui12); + return true; +} + +static bool trans_srli_d(DisasContext *ctx, arg_srli_d *a) +{ + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + tcg_gen_shri_tl(cpu_gpr[a->rd], t0, a->ui6); + + tcg_temp_free(t0); + return true; +} + +static bool trans_slli_w(DisasContext *ctx, arg_slli_w *a) +{ + if (a->rd == 0) { + /* Nop */ + return true; + } + + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + tcg_gen_shli_tl(t0, t0, a->ui5); + tcg_gen_ext32s_tl(cpu_gpr[a->rd], t0); + + tcg_temp_free(t0); + return true; +} + +static bool trans_addu16i_d(DisasContext *ctx, arg_addu16i_d *a) +{ + if (a->rj != 0) { + tcg_gen_addi_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], a->si16 << 16); + } else { + tcg_gen_movi_tl(cpu_gpr[a->rd], a->si16 << 16); + } + return true; +} + +static bool trans_lu12i_w(DisasContext *ctx, arg_lu12i_w *a) +{ + tcg_gen_movi_tl(cpu_gpr[a->rd], a->si20 << 12); + return true; +} + +static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a) +{ + TCGv_i64 t0, t1; + t0 = tcg_temp_new_i64(); + t1 = tcg_temp_new_i64(); + + tcg_gen_movi_tl(t0, a->si20); + tcg_gen_concat_tl_i64(t1, cpu_gpr[a->rd], t0); + gen_store_gpr(t1, a->rd); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_pcaddi(DisasContext *ctx, arg_pcaddi *a) +{ + target_ulong pc = ctx->base.pc_next; + target_ulong addr = pc + (a->si20 << 2); + tcg_gen_movi_tl(cpu_gpr[a->rd], addr); + return true; +} + +static bool trans_pcalau12i(DisasContext *ctx, arg_pcalau12i *a) +{ + target_ulong pc = ctx->base.pc_next; + target_ulong addr = (pc + (a->si20 << 12)) & ~0xfff; + tcg_gen_movi_tl(cpu_gpr[a->rd], addr); + return true; +} + +static bool trans_pcaddu12i(DisasContext *ctx, arg_pcaddu12i *a) +{ + target_ulong pc = ctx->base.pc_next; + target_ulong addr = pc + (a->si20 << 12); + tcg_gen_movi_tl(cpu_gpr[a->rd], addr); + return true; +} + +static bool trans_pcaddu18i(DisasContext *ctx, arg_pcaddu18i *a) +{ + target_ulong pc = ctx->base.pc_next; + target_ulong addr = pc + ((target_ulong)(a->si20) << 18); + tcg_gen_movi_tl(cpu_gpr[a->rd], addr); + return true; +} + +static bool trans_slti(DisasContext *ctx, arg_slti *a) +{ + gen_slt_imm(ctx, OPC_LARCH_SLTI, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_sltui(DisasContext *ctx, arg_sltui *a) +{ + gen_slt_imm(ctx, OPC_LARCH_SLTIU, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_lu52i_d(DisasContext *ctx, arg_lu52i_d *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + gen_load_gpr(t1, a->rj); + + tcg_gen_movi_tl(t0, a->si12); + tcg_gen_shli_tl(t0, t0, 52); + tcg_gen_andi_tl(t1, t1, 0xfffffffffffffU); + tcg_gen_or_tl(cpu_gpr[a->rd], t0, t1); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ori(DisasContext *ctx, arg_ori *a) +{ + gen_logic_imm(ctx, OPC_LARCH_ORI, a->rd, a->rj, a->ui12); + return true; +} + +static bool trans_xori(DisasContext *ctx, arg_xori *a) +{ + gen_logic_imm(ctx, OPC_LARCH_XORI, a->rd, a->rj, a->ui12); + return true; +} + +static bool trans_bstrins_d(DisasContext *ctx, arg_bstrins_d *a) +{ + int lsb = a->lsbd; + int msb = a->msbd; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + if (lsb > msb) { + return false; + } + + gen_load_gpr(t1, a->rj); + gen_load_gpr(t0, a->rd); + tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1); + gen_store_gpr(t0, a->rd); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_bstrpick_d(DisasContext *ctx, arg_bstrpick_d *a) +{ + int lsb = a->lsbd; + int msb = a->msbd; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + if (lsb > msb) { + return false; + } + + gen_load_gpr(t1, a->rj); + gen_load_gpr(t0, a->rd); + tcg_gen_extract_tl(t0, t1, lsb, msb - lsb + 1); + gen_store_gpr(t0, a->rd); + + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_bstrins_w(DisasContext *ctx, arg_bstrins_w *a) +{ + gen_bitops(ctx, OPC_LARCH_TRINS_W, a->rd, a->rj, a->lsbw, a->msbw); + return true; +} + +static bool trans_bstrpick_w(DisasContext *ctx, arg_bstrpick_w *a) +{ + if (a->lsbw > a->msbw) { + return false; + } + gen_bitops(ctx, OPC_LARCH_TRPICK_W, a->rd, a->rj, a->lsbw, + a->msbw - a->lsbw); + return true; +} + +static bool trans_ldptr_w(DisasContext *ctx, arg_ldptr_w *a) +{ + gen_ld(ctx, OPC_LARCH_LDPTR_W, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_stptr_w(DisasContext *ctx, arg_stptr_w *a) +{ + gen_st(ctx, OPC_LARCH_STPTR_W, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_ldptr_d(DisasContext *ctx, arg_ldptr_d *a) +{ + gen_ld(ctx, OPC_LARCH_LDPTR_D, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_stptr_d(DisasContext *ctx, arg_stptr_d *a) +{ + gen_st(ctx, OPC_LARCH_STPTR_D, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_ld_b(DisasContext *ctx, arg_ld_b *a) +{ + gen_ld(ctx, OPC_LARCH_LD_B, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_h(DisasContext *ctx, arg_ld_h *a) +{ + gen_ld(ctx, OPC_LARCH_LD_H, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_w(DisasContext *ctx, arg_ld_w *a) +{ + gen_ld(ctx, OPC_LARCH_LD_W, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_d(DisasContext *ctx, arg_ld_d *a) +{ + gen_ld(ctx, OPC_LARCH_LD_D, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_st_b(DisasContext *ctx, arg_st_b *a) +{ + gen_st(ctx, OPC_LARCH_ST_B, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_st_h(DisasContext *ctx, arg_st_h *a) +{ + gen_st(ctx, OPC_LARCH_ST_H, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_st_w(DisasContext *ctx, arg_st_w *a) +{ + gen_st(ctx, OPC_LARCH_ST_W, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_st_d(DisasContext *ctx, arg_st_d *a) +{ + gen_st(ctx, OPC_LARCH_ST_D, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_bu(DisasContext *ctx, arg_ld_bu *a) +{ + gen_ld(ctx, OPC_LARCH_LD_BU, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_hu(DisasContext *ctx, arg_ld_hu *a) +{ + gen_ld(ctx, OPC_LARCH_LD_HU, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_ld_wu(DisasContext *ctx, arg_ld_wu *a) +{ + gen_ld(ctx, OPC_LARCH_LD_WU, a->rd, a->rj, a->si12); + return true; +} + +static bool trans_preld(DisasContext *ctx, arg_preld *a) +{ + /* Treat as NOP. */ + return true; +} + +static bool trans_ll_w(DisasContext *ctx, arg_ll_w *a) +{ + gen_ld(ctx, OPC_LARCH_LL_W, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) +{ + gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TESL, false); + return true; +} + +static bool trans_ll_d(DisasContext *ctx, arg_ll_d *a) +{ + gen_ld(ctx, OPC_LARCH_LL_D, a->rd, a->rj, a->si14 << 2); + return true; +} + +static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) +{ + gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TEQ, false); + return true; +} + +static bool trans_fld_s(DisasContext *ctx, arg_fld_s *a) +{ + gen_fp_ldst(ctx, OPC_LARCH_FLD_S, a->fd, a->rj, a->si12); + return true; +} + +static bool trans_fst_s(DisasContext *ctx, arg_fst_s *a) +{ + gen_fp_ldst(ctx, OPC_LARCH_FST_S, a->fd, a->rj, a->si12); + return true; +} + +static bool trans_fld_d(DisasContext *ctx, arg_fld_d *a) +{ + gen_fp_ldst(ctx, OPC_LARCH_FLD_D, a->fd, a->rj, a->si12); + return true; +} + +static bool trans_fst_d(DisasContext *ctx, arg_fst_d *a) +{ + gen_fp_ldst(ctx, OPC_LARCH_FST_D, a->fd, a->rj, a->si12); + return true; +} + +static bool trans_ldx_b(DisasContext *ctx, arg_ldx_b *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_SB); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ldx_h(DisasContext *ctx, arg_ldx_h *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESW | ctx->default_tcg_memop_mask); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ldx_w(DisasContext *ctx, arg_ldx_w *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESL | ctx->default_tcg_memop_mask); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + return true; +} + +static bool trans_ldx_d(DisasContext *ctx, arg_ldx_d *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t1); + return true; +} + +static bool trans_stx_b(DisasContext *ctx, arg_stx_b *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + gen_load_gpr(t1, a->rd); + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_stx_h(DisasContext *ctx, arg_stx_h *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + gen_load_gpr(t1, a->rd); + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_stx_w(DisasContext *ctx, arg_stx_w *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + gen_load_gpr(t1, a->rd); + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_stx_d(DisasContext *ctx, arg_stx_d *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + gen_load_gpr(t1, a->rd); + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ldx_bu(DisasContext *ctx, arg_ldx_bu *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ldx_hu(DisasContext *ctx, arg_ldx_hu *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_ldx_wu(DisasContext *ctx, arg_ldx_wu *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]); + tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask); + gen_store_gpr(t1, a->rd); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_fldx_s(DisasContext *ctx, arg_fldx_s *a) +{ + gen_flt3_ldst(ctx, OPC_LARCH_FLDX_S, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fldx_d(DisasContext *ctx, arg_fldx_d *a) +{ + gen_flt3_ldst(ctx, OPC_LARCH_FLDX_D, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fstx_s(DisasContext *ctx, arg_fstx_s *a) +{ + gen_flt3_ldst(ctx, OPC_LARCH_FSTX_S, 0, a->fd, a->rj, a->rk); + return true; +} + +static bool trans_fstx_d(DisasContext *ctx, arg_fstx_d *a) +{ + gen_flt3_ldst(ctx, OPC_LARCH_FSTX_D, 0, a->fd, a->rj, a->rk); + return true; +} + +#define TRANS_AM_W(name, op) \ + static bool trans_##name(DisasContext *ctx, arg_##name *a) \ + { \ + if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \ + printf("%s: warning, register equal\n", __func__); \ + return false; \ + } \ + int mem_idx = ctx->mem_idx; \ + TCGv addr = tcg_temp_new(); \ + TCGv val = tcg_temp_new(); \ + TCGv ret = tcg_temp_new(); \ + \ + gen_load_gpr(addr, a->rj); \ + gen_load_gpr(val, a->rk); \ + tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \ + MO_TESL | ctx->default_tcg_memop_mask); \ + gen_store_gpr(ret, a->rd); \ + \ + tcg_temp_free(addr); \ + tcg_temp_free(val); \ + tcg_temp_free(ret); \ + return true; \ + } +#define TRANS_AM_D(name, op) \ + static bool trans_##name(DisasContext *ctx, arg_##name *a) \ + { \ + if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \ + printf("%s: warning, register equal\n", __func__); \ + return false; \ + } \ + int mem_idx = ctx->mem_idx; \ + TCGv addr = tcg_temp_new(); \ + TCGv val = tcg_temp_new(); \ + TCGv ret = tcg_temp_new(); \ + \ + gen_load_gpr(addr, a->rj); \ + gen_load_gpr(val, a->rk); \ + tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \ + MO_TEQ | ctx->default_tcg_memop_mask); \ + gen_store_gpr(ret, a->rd); \ + \ + tcg_temp_free(addr); \ + tcg_temp_free(val); \ + tcg_temp_free(ret); \ + return true; \ + } +#define TRANS_AM(name, op) \ + TRANS_AM_W(name##_w, op) \ + TRANS_AM_D(name##_d, op) +TRANS_AM(amswap, xchg) /* trans_amswap_w, trans_amswap_d */ +TRANS_AM(amadd, fetch_add) /* trans_amadd_w, trans_amadd_d */ +TRANS_AM(amand, fetch_and) /* trans_amand_w, trans_amand_d */ +TRANS_AM(amor, fetch_or) /* trans_amor_w, trans_amor_d */ +TRANS_AM(amxor, fetch_xor) /* trans_amxor_w, trans_amxor_d */ +TRANS_AM(ammax, fetch_smax) /* trans_ammax_w, trans_ammax_d */ +TRANS_AM(ammin, fetch_smin) /* trans_ammin_w, trans_ammin_d */ +TRANS_AM_W(ammax_wu, fetch_umax) /* trans_ammax_wu */ +TRANS_AM_D(ammax_du, fetch_umax) /* trans_ammax_du */ +TRANS_AM_W(ammin_wu, fetch_umin) /* trans_ammin_wu */ +TRANS_AM_D(ammin_du, fetch_umin) /* trans_ammin_du */ +#undef TRANS_AM +#undef TRANS_AM_W +#undef TRANS_AM_D + +#define TRANS_AM_DB_W(name, op) \ + static bool trans_##name(DisasContext *ctx, arg_##name *a) \ + { \ + if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \ + printf("%s: warning, register equal\n", __func__); \ + return false; \ + } \ + int mem_idx = ctx->mem_idx; \ + TCGv addr = tcg_temp_new(); \ + TCGv val = tcg_temp_new(); \ + TCGv ret = tcg_temp_new(); \ + \ + gen_sync(0x10); \ + gen_load_gpr(addr, a->rj); \ + gen_load_gpr(val, a->rk); \ + tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \ + MO_TESL | ctx->default_tcg_memop_mask); \ + gen_store_gpr(ret, a->rd); \ + \ + tcg_temp_free(addr); \ + tcg_temp_free(val); \ + tcg_temp_free(ret); \ + return true; \ + } +#define TRANS_AM_DB_D(name, op) \ + static bool trans_##name(DisasContext *ctx, arg_##name *a) \ + { \ + if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \ + printf("%s: warning, register equal\n", __func__); \ + return false; \ + } \ + int mem_idx = ctx->mem_idx; \ + TCGv addr = tcg_temp_new(); \ + TCGv val = tcg_temp_new(); \ + TCGv ret = tcg_temp_new(); \ + \ + gen_sync(0x10); \ + gen_load_gpr(addr, a->rj); \ + gen_load_gpr(val, a->rk); \ + tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \ + MO_TEQ | ctx->default_tcg_memop_mask); \ + gen_store_gpr(ret, a->rd); \ + \ + tcg_temp_free(addr); \ + tcg_temp_free(val); \ + tcg_temp_free(ret); \ + return true; \ + } +#define TRANS_AM_DB(name, op) \ + TRANS_AM_DB_W(name##_db_w, op) \ + TRANS_AM_DB_D(name##_db_d, op) +TRANS_AM_DB(amswap, xchg) /* trans_amswap_db_w, trans_amswap_db_d */ +TRANS_AM_DB(amadd, fetch_add) /* trans_amadd_db_w, trans_amadd_db_d */ +TRANS_AM_DB(amand, fetch_and) /* trans_amand_db_w, trans_amand_db_d */ +TRANS_AM_DB(amor, fetch_or) /* trans_amor_db_w, trans_amor_db_d */ +TRANS_AM_DB(amxor, fetch_xor) /* trans_amxor_db_w, trans_amxor_db_d */ +TRANS_AM_DB(ammax, fetch_smax) /* trans_ammax_db_w, trans_ammax_db_d */ +TRANS_AM_DB(ammin, fetch_smin) /* trans_ammin_db_w, trans_ammin_db_d */ +TRANS_AM_DB_W(ammax_db_wu, fetch_umax) /* trans_ammax_db_wu */ +TRANS_AM_DB_D(ammax_db_du, fetch_umax) /* trans_ammax_db_du */ +TRANS_AM_DB_W(ammin_db_wu, fetch_umin) /* trans_ammin_db_wu */ +TRANS_AM_DB_D(ammin_db_du, fetch_umin) /* trans_ammin_db_du */ +#undef TRANS_AM_DB +#undef TRANS_AM_DB_W +#undef TRANS_AM_DB_D + +static bool trans_dbar(DisasContext *ctx, arg_dbar *a) +{ + gen_sync(a->whint); + return true; +} + +static bool trans_ibar(DisasContext *ctx, arg_ibar *a) +{ + /* + * FENCE_I is a no-op in QEMU, + * however we need to end the translation block + */ + ctx->base.is_jmp = DISAS_STOP; + return true; +} + +#define ASRTGT \ + do { \ + TCGv t1 = tcg_temp_new(); \ + TCGv t2 = tcg_temp_new(); \ + gen_load_gpr(t1, a->rj); \ + gen_load_gpr(t2, a->rk); \ + gen_helper_asrtgt_d(cpu_env, t1, t2); \ + tcg_temp_free(t1); \ + tcg_temp_free(t2); \ + } while (0) + +#define ASRTLE \ + do { \ + TCGv t1 = tcg_temp_new(); \ + TCGv t2 = tcg_temp_new(); \ + gen_load_gpr(t1, a->rj); \ + gen_load_gpr(t2, a->rk); \ + gen_helper_asrtle_d(cpu_env, t1, t2); \ + tcg_temp_free(t1); \ + tcg_temp_free(t2); \ + } while (0) + +static bool trans_fldgt_s(DisasContext *ctx, arg_fldgt_s *a) +{ + ASRTGT; + gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_S, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fldgt_d(DisasContext *ctx, arg_fldgt_d *a) +{ + ASRTGT; + gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_D, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fldle_s(DisasContext *ctx, arg_fldle_s *a) +{ + ASRTLE; + gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_S, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fldle_d(DisasContext *ctx, arg_fldle_d *a) +{ + ASRTLE; + gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_D, a->fd, 0, a->rj, a->rk); + return true; +} + +static bool trans_fstgt_s(DisasContext *ctx, arg_fstgt_s *a) +{ + ASRTGT; + gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_S, 0, a->fd, a->rj, a->rk); + return true; +} + +static bool trans_fstgt_d(DisasContext *ctx, arg_fstgt_d *a) +{ + ASRTGT; + gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_D, 0, a->fd, a->rj, a->rk); + return true; +} + +static bool trans_fstle_s(DisasContext *ctx, arg_fstle_s *a) +{ + ASRTLE; + gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_S, 0, a->fd, a->rj, a->rk); + return true; +} + +static bool trans_fstle_d(DisasContext *ctx, arg_fstle_d *a) +{ + ASRTLE; + gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_D, 0, a->fd, a->rj, a->rk); + return true; +} + +#define DECL_ARG(name) \ + arg_##name arg = { \ + .rd = a->rd, \ + .rj = a->rj, \ + .rk = a->rk, \ + }; + +static bool trans_ldgt_b(DisasContext *ctx, arg_ldgt_b *a) +{ + ASRTGT; + DECL_ARG(ldx_b) + trans_ldx_b(ctx, &arg); + return true; +} + +static bool trans_ldgt_h(DisasContext *ctx, arg_ldgt_h *a) +{ + ASRTGT; + DECL_ARG(ldx_h) + trans_ldx_h(ctx, &arg); + return true; +} + +static bool trans_ldgt_w(DisasContext *ctx, arg_ldgt_w *a) +{ + ASRTGT; + DECL_ARG(ldx_w) + trans_ldx_w(ctx, &arg); + return true; +} + +static bool trans_ldgt_d(DisasContext *ctx, arg_ldgt_d *a) +{ + ASRTGT; + DECL_ARG(ldx_d) + trans_ldx_d(ctx, &arg); + return true; +} + +static bool trans_ldle_b(DisasContext *ctx, arg_ldle_b *a) +{ + ASRTLE; + DECL_ARG(ldx_b) + trans_ldx_b(ctx, &arg); + return true; +} + +static bool trans_ldle_h(DisasContext *ctx, arg_ldle_h *a) +{ + ASRTLE; + DECL_ARG(ldx_h) + trans_ldx_h(ctx, &arg); + return true; +} + +static bool trans_ldle_w(DisasContext *ctx, arg_ldle_w *a) +{ + ASRTLE; + DECL_ARG(ldx_w) + trans_ldx_w(ctx, &arg); + return true; +} + +static bool trans_ldle_d(DisasContext *ctx, arg_ldle_d *a) +{ + ASRTLE; + DECL_ARG(ldx_d) + trans_ldx_d(ctx, &arg); + return true; +} + +static bool trans_stgt_b(DisasContext *ctx, arg_stgt_b *a) +{ + ASRTGT; + DECL_ARG(stx_b) + trans_stx_b(ctx, &arg); + return true; +} + +static bool trans_stgt_h(DisasContext *ctx, arg_stgt_h *a) +{ + ASRTGT; + DECL_ARG(stx_h) + trans_stx_h(ctx, &arg); + return true; +} + +static bool trans_stgt_w(DisasContext *ctx, arg_stgt_w *a) +{ + ASRTGT; + DECL_ARG(stx_w) + trans_stx_w(ctx, &arg); + return true; +} + +static bool trans_stgt_d(DisasContext *ctx, arg_stgt_d *a) +{ + ASRTGT; + DECL_ARG(stx_d) + trans_stx_d(ctx, &arg); + return true; +} + +static bool trans_stle_b(DisasContext *ctx, arg_stle_b *a) +{ + ASRTLE; + DECL_ARG(stx_b) + trans_stx_b(ctx, &arg); + return true; +} + +static bool trans_stle_h(DisasContext *ctx, arg_stle_h *a) +{ + ASRTLE; + DECL_ARG(stx_h) + trans_stx_h(ctx, &arg); + return true; +} + +static bool trans_stle_w(DisasContext *ctx, arg_stle_w *a) +{ + ASRTLE; + DECL_ARG(stx_w) + trans_stx_w(ctx, &arg); + return true; +} + +static bool trans_stle_d(DisasContext *ctx, arg_stle_d *a) +{ + ASRTLE; + DECL_ARG(stx_d) + trans_stx_d(ctx, &arg); + return true; +} + +#undef ASRTGT +#undef ASRTLE +#undef DECL_ARG + +static bool trans_beqz(DisasContext *ctx, arg_beqz *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BEQZ, 4, a->rj, 0, a->offs21 << 2); + return true; +} + +static bool trans_bnez(DisasContext *ctx, arg_bnez *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BNEZ, 4, a->rj, 0, a->offs21 << 2); + return true; +} + +static bool trans_bceqz(DisasContext *ctx, arg_bceqz *a) +{ + TCGv_i32 cj = tcg_const_i32(a->cj); + TCGv v0 = tcg_temp_new(); + TCGv v1 = tcg_const_i64(0); + + gen_helper_movcf2reg(v0, cpu_env, cj); + tcg_gen_setcond_tl(TCG_COND_EQ, bcond, v0, v1); + ctx->hflags |= LARCH_HFLAG_BC; + ctx->btarget = ctx->base.pc_next + (a->offs21 << 2); + + tcg_temp_free_i32(cj); + tcg_temp_free(v0); + tcg_temp_free(v1); + return true; +} + +static bool trans_bcnez(DisasContext *ctx, arg_bcnez *a) +{ + TCGv_i32 cj = tcg_const_i32(a->cj); + TCGv v0 = tcg_temp_new(); + TCGv v1 = tcg_const_i64(0); + + gen_helper_movcf2reg(v0, cpu_env, cj); + tcg_gen_setcond_tl(TCG_COND_NE, bcond, v0, v1); + ctx->hflags |= LARCH_HFLAG_BC; + ctx->btarget = ctx->base.pc_next + (a->offs21 << 2); + + tcg_temp_free_i32(cj); + tcg_temp_free(v0); + tcg_temp_free(v1); + return true; +} + +static bool trans_b(DisasContext *ctx, arg_b *a) +{ + gen_compute_branch(ctx, OPC_LARCH_B, 4, 0, 0, a->offs << 2); + return true; +} + +static bool trans_bl(DisasContext *ctx, arg_bl *a) +{ + ctx->btarget = ctx->base.pc_next + (a->offs << 2); + tcg_gen_movi_tl(cpu_gpr[1], ctx->base.pc_next + 4); + ctx->hflags |= LARCH_HFLAG_B; + gen_branch(ctx, 4); + return true; +} + +static bool trans_blt(DisasContext *ctx, arg_blt *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BLT, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_bge(DisasContext *ctx, arg_bge *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BGE, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_bltu(DisasContext *ctx, arg_bltu *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BLTU, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BGEU, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_beq(DisasContext *ctx, arg_beq *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BEQ, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_bne(DisasContext *ctx, arg_bne *a) +{ + gen_compute_branch(ctx, OPC_LARCH_BNE, 4, a->rj, a->rd, a->offs16 << 2); + return true; +} + +static bool trans_jirl(DisasContext *ctx, arg_jirl *a) +{ + gen_base_offset_addr(ctx, btarget, a->rj, a->offs16 << 2); + if (a->rd != 0) { + tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->base.pc_next + 4); + } + ctx->hflags |= LARCH_HFLAG_BR; + gen_branch(ctx, 4); + + return true; +} + +#define TRANS_F4FR(name, fmt, op, bits) \ + static bool trans_##name##_##fmt(DisasContext *ctx, \ + arg_##name##_##fmt *a) \ + { \ + check_cp1_enabled(ctx); \ + TCGv_i##bits fp0 = tcg_temp_new_i##bits(); \ + TCGv_i##bits fp1 = tcg_temp_new_i##bits(); \ + TCGv_i##bits fp2 = tcg_temp_new_i##bits(); \ + TCGv_i##bits fp3 = tcg_temp_new_i##bits(); \ + check_cp1_enabled(ctx); \ + gen_load_fpr##bits(ctx, fp0, a->fj); \ + gen_load_fpr##bits(ctx, fp1, a->fk); \ + gen_load_fpr##bits(ctx, fp2, a->fa); \ + gen_helper_float_##op##_##fmt(fp3, cpu_env, fp0, fp1, fp2); \ + gen_store_fpr##bits(ctx, fp3, a->fd); \ + tcg_temp_free_i##bits(fp3); \ + tcg_temp_free_i##bits(fp2); \ + tcg_temp_free_i##bits(fp1); \ + tcg_temp_free_i##bits(fp0); \ + return true; \ + } + +TRANS_F4FR(fmadd, s, maddf, 32) /* trans_fmadd_s */ +TRANS_F4FR(fmadd, d, maddf, 64) /* trans_fmadd_d */ +TRANS_F4FR(fmsub, s, msubf, 32) /* trans_fmsub_s */ +TRANS_F4FR(fmsub, d, msubf, 64) /* trans_fmsub_d */ +TRANS_F4FR(fnmadd, s, nmaddf, 32) /* trans_fnmadd_s */ +TRANS_F4FR(fnmadd, d, nmaddf, 64) /* trans_fnmadd_d */ +TRANS_F4FR(fnmsub, s, nmsubf, 32) /* trans_fnmsub_s */ +TRANS_F4FR(fnmsub, d, nmsubf, 64) /* trans_fnmsub_d */ +#undef TRANS_F4FR + +static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a) +{ + gen_farith(ctx, OPC_LARCH_FADD_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a) +{ + gen_farith(ctx, OPC_LARCH_FADD_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a) +{ + gen_farith(ctx, OPC_LARCH_FSUB_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a) +{ + gen_farith(ctx, OPC_LARCH_FSUB_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMUL_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMUL_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a) +{ + gen_farith(ctx, OPC_LARCH_FDIV_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a) +{ + gen_farith(ctx, OPC_LARCH_FDIV_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMAX_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMAX_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMIN_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMIN_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmaxa_s(DisasContext *ctx, arg_fmaxa_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMAXA_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmaxa_d(DisasContext *ctx, arg_fmaxa_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMAXA_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmina_s(DisasContext *ctx, arg_fmina_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMINA_S, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmina_d(DisasContext *ctx, arg_fmina_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMINA_D, a->fk, a->fj, a->fd, 0); + return true; +} + +static bool trans_fscaleb_s(DisasContext *ctx, arg_fscaleb_s *a) +{ + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + check_cp1_enabled(ctx); + gen_load_fpr32(ctx, fp0, a->fj); + gen_load_fpr32(ctx, fp1, a->fk); + gen_helper_float_exp2_s(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i32(fp1); + gen_store_fpr32(ctx, fp0, a->fd); + tcg_temp_free_i32(fp0); + return true; +} + +static bool trans_fscaleb_d(DisasContext *ctx, arg_fscaleb_d *a) +{ + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, fp0, a->fj); + gen_load_fpr64(ctx, fp1, a->fk); + gen_helper_float_exp2_d(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i64(fp1); + gen_store_fpr64(ctx, fp0, a->fd); + tcg_temp_free_i64(fp0); + return true; +} + +static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a) +{ + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + TCGv_i32 fp2 = tcg_temp_new_i32(); + + check_cp1_enabled(ctx); + gen_load_fpr32(ctx, fp0, a->fj); + gen_load_fpr32(ctx, fp1, a->fk); + tcg_gen_deposit_i32(fp2, fp1, fp0, 0, 31); + gen_store_fpr32(ctx, fp2, a->fd); + + tcg_temp_free_i32(fp2); + tcg_temp_free_i32(fp1); + tcg_temp_free_i32(fp0); + return true; +} + +static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a) +{ + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + TCGv_i64 fp2 = tcg_temp_new_i64(); + + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, fp0, a->fj); + gen_load_fpr64(ctx, fp1, a->fk); + tcg_gen_deposit_i64(fp2, fp1, fp0, 0, 63); + gen_store_fpr64(ctx, fp2, a->fd); + + tcg_temp_free_i64(fp2); + tcg_temp_free_i64(fp1); + tcg_temp_free_i64(fp0); + return true; +} + +static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a) +{ + gen_farith(ctx, OPC_LARCH_FABS_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a) +{ + gen_farith(ctx, OPC_LARCH_FABS_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a) +{ + gen_farith(ctx, OPC_LARCH_FNEG_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a) +{ + gen_farith(ctx, OPC_LARCH_FNEG_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_flogb_s(DisasContext *ctx, arg_flogb_s *a) +{ + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + check_cp1_enabled(ctx); + gen_load_fpr32(ctx, fp0, a->fj); + gen_helper_float_logb_s(fp1, cpu_env, fp0); + gen_store_fpr32(ctx, fp1, a->fd); + + tcg_temp_free_i32(fp0); + tcg_temp_free_i32(fp1); + return true; +} + +static bool trans_flogb_d(DisasContext *ctx, arg_flogb_d *a) +{ + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, fp0, a->fj); + gen_helper_float_logb_d(fp1, cpu_env, fp0); + gen_store_fpr64(ctx, fp1, a->fd); + + tcg_temp_free_i64(fp0); + tcg_temp_free_i64(fp1); + return true; +} + +static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a) +{ + gen_farith(ctx, OPC_LARCH_FCLASS_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a) +{ + gen_farith(ctx, OPC_LARCH_FCLASS_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a) +{ + gen_farith(ctx, OPC_LARCH_FSQRT_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a) +{ + gen_farith(ctx, OPC_LARCH_FSQRT_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frecip_s(DisasContext *ctx, arg_frecip_s *a) +{ + gen_farith(ctx, OPC_LARCH_FRECIP_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frecip_d(DisasContext *ctx, arg_frecip_d *a) +{ + gen_farith(ctx, OPC_LARCH_FRECIP_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frsqrt_s(DisasContext *ctx, arg_frsqrt_s *a) +{ + gen_farith(ctx, OPC_LARCH_FRSQRT_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frsqrt_d(DisasContext *ctx, arg_frsqrt_d *a) +{ + gen_farith(ctx, OPC_LARCH_FRSQRT_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmov_s(DisasContext *ctx, arg_fmov_s *a) +{ + gen_farith(ctx, OPC_LARCH_FMOV_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fmov_d(DisasContext *ctx, arg_fmov_d *a) +{ + gen_farith(ctx, OPC_LARCH_FMOV_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_movgr2fr_w(DisasContext *ctx, arg_movgr2fr_w *a) +{ + gen_cp1(ctx, OPC_LARCH_GR2FR_W, a->rj, a->fd); + return true; +} + +static bool trans_movgr2fr_d(DisasContext *ctx, arg_movgr2fr_d *a) +{ + gen_cp1(ctx, OPC_LARCH_GR2FR_D, a->rj, a->fd); + return true; +} + +static bool trans_movgr2frh_w(DisasContext *ctx, arg_movgr2frh_w *a) +{ + gen_cp1(ctx, OPC_LARCH_GR2FRH_W, a->rj, a->fd); + return true; +} + +static bool trans_movfr2gr_s(DisasContext *ctx, arg_movfr2gr_s *a) +{ + gen_cp1(ctx, OPC_LARCH_FR2GR_S, a->rd, a->fj); + return true; +} + +static bool trans_movfr2gr_d(DisasContext *ctx, arg_movfr2gr_d *a) +{ + gen_cp1(ctx, OPC_LARCH_FR2GR_D, a->rd, a->fj); + return true; +} + +static bool trans_movfrh2gr_s(DisasContext *ctx, arg_movfrh2gr_s *a) +{ + gen_cp1(ctx, OPC_LARCH_FRH2GR_S, a->rd, a->fj); + return true; +} + +static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a) +{ + TCGv t0 = tcg_temp_new(); + + check_cp1_enabled(ctx); + gen_load_gpr(t0, a->rj); + save_cpu_state(ctx, 0); + { + TCGv_i32 fs_tmp = tcg_const_i32(a->fcsrd); + gen_helper_0e2i(movgr2fcsr, t0, fs_tmp, a->rj); + tcg_temp_free_i32(fs_tmp); + } + /* Stop translation as we may have changed hflags */ + ctx->base.is_jmp = DISAS_STOP; + + tcg_temp_free(t0); + return true; +} + +static bool trans_movfcsr2gr(DisasContext *ctx, arg_movfcsr2gr *a) +{ + TCGv t0 = tcg_temp_new(); + gen_helper_1e0i(movfcsr2gr, t0, a->fcsrs); + gen_store_gpr(t0, a->rd); + tcg_temp_free(t0); + return true; +} + +static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a) +{ + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i32 cd = tcg_const_i32(a->cd); + + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, fp0, a->fj); + gen_helper_movreg2cf(cpu_env, cd, fp0); + + tcg_temp_free_i64(fp0); + tcg_temp_free_i32(cd); + return true; +} + +static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv_i32 cj = tcg_const_i32(a->cj); + + check_cp1_enabled(ctx); + gen_helper_movcf2reg(t0, cpu_env, cj); + gen_store_fpr64(ctx, t0, a->fd); + + tcg_temp_free(t0); + return true; +} + +static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a) +{ + TCGv t0 = tcg_temp_new(); + TCGv_i32 cd = tcg_const_i32(a->cd); + + check_cp1_enabled(ctx); + gen_load_gpr(t0, a->rj); + gen_helper_movreg2cf(cpu_env, cd, t0); + + tcg_temp_free(t0); + tcg_temp_free_i32(cd); + return true; +} + +static bool trans_movcf2gr(DisasContext *ctx, arg_movcf2gr *a) +{ + TCGv_i32 cj = tcg_const_i32(a->cj); + + check_cp1_enabled(ctx); + gen_helper_movcf2reg(cpu_gpr[a->rd], cpu_env, cj); + + tcg_temp_free_i32(cj); + return true; +} + +static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a) +{ + gen_farith(ctx, OPC_LARCH_FCVT_S_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a) +{ + gen_farith(ctx, OPC_LARCH_FCVT_D_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrm_w_s(DisasContext *ctx, arg_ftintrm_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRM_W_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrm_w_d(DisasContext *ctx, arg_ftintrm_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRM_W_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrm_l_s(DisasContext *ctx, arg_ftintrm_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRM_L_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrm_l_d(DisasContext *ctx, arg_ftintrm_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRM_L_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrp_w_s(DisasContext *ctx, arg_ftintrp_w_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRP_W_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrp_w_d(DisasContext *ctx, arg_ftintrp_w_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRP_W_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrp_l_s(DisasContext *ctx, arg_ftintrp_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRP_L_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrp_l_d(DisasContext *ctx, arg_ftintrp_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRP_L_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrz_w_s(DisasContext *ctx, arg_ftintrz_w_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRZ_W_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrz_w_d(DisasContext *ctx, arg_ftintrz_w_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRZ_W_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrz_l_s(DisasContext *ctx, arg_ftintrz_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRZ_L_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrz_l_d(DisasContext *ctx, arg_ftintrz_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRZ_L_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrne_w_s(DisasContext *ctx, arg_ftintrne_w_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRNE_W_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrne_w_d(DisasContext *ctx, arg_ftintrne_w_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRNE_W_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrne_l_s(DisasContext *ctx, arg_ftintrne_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRNE_L_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftintrne_l_d(DisasContext *ctx, arg_ftintrne_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINTRNE_L_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftint_w_s(DisasContext *ctx, arg_ftint_w_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINT_W_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftint_w_d(DisasContext *ctx, arg_ftint_w_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINT_W_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftint_l_s(DisasContext *ctx, arg_ftint_l_s *a) +{ + gen_farith(ctx, OPC_LARCH_FTINT_L_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ftint_l_d(DisasContext *ctx, arg_ftint_l_d *a) +{ + gen_farith(ctx, OPC_LARCH_FTINT_L_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ffint_s_w(DisasContext *ctx, arg_ffint_s_w *a) +{ + gen_farith(ctx, OPC_LARCH_FFINT_S_W, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ffint_s_l(DisasContext *ctx, arg_ffint_s_l *a) +{ + gen_farith(ctx, OPC_LARCH_FFINT_S_L, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ffint_d_w(DisasContext *ctx, arg_ffint_d_w *a) +{ + gen_farith(ctx, OPC_LARCH_FFINT_D_W, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_ffint_d_l(DisasContext *ctx, arg_ffint_d_l *a) +{ + gen_farith(ctx, OPC_LARCH_FFINT_D_L, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frint_s(DisasContext *ctx, arg_frint_s *a) +{ + gen_farith(ctx, OPC_LARCH_FRINT_S, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_frint_d(DisasContext *ctx, arg_frint_d *a) +{ + gen_farith(ctx, OPC_LARCH_FRINT_D, 0, a->fj, a->fd, 0); + return true; +} + +static bool trans_alsl_w(DisasContext *ctx, arg_alsl_w *a) +{ + gen_lsa(ctx, OPC_LARCH_ALSL_W, a->rd, a->rj, a->rk, a->sa2); + return true; +} + +static bool trans_alsl_wu(DisasContext *ctx, arg_alsl_wu *a) +{ + TCGv t0, t1; + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + gen_load_gpr(t1, a->rk); + tcg_gen_shli_tl(t0, t0, a->sa2 + 1); + tcg_gen_add_tl(t0, t0, t1); + tcg_gen_ext32u_tl(cpu_gpr[a->rd], t0); + tcg_temp_free(t0); + tcg_temp_free(t1); + + return true; +} + +static bool trans_alsl_d(DisasContext *ctx, arg_alsl_d *a) +{ + check_larch_64(ctx); + gen_lsa(ctx, OPC_LARCH_ALSL_D, a->rd, a->rj, a->rk, a->sa2); + return true; +} + +static bool trans_bytepick_w(DisasContext *ctx, arg_bytepick_w *a) +{ + gen_align(ctx, 32, a->rd, a->rj, a->rk, a->sa2); + return true; +} + +static bool trans_bytepick_d(DisasContext *ctx, arg_bytepick_d *a) +{ + check_larch_64(ctx); + gen_align(ctx, 64, a->rd, a->rj, a->rk, a->sa3); + return true; +} + +static bool trans_add_w(DisasContext *ctx, arg_add_w *a) +{ + gen_arith(ctx, OPC_LARCH_ADD_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_sub_w(DisasContext *ctx, arg_sub_w *a) +{ + gen_arith(ctx, OPC_LARCH_SUB_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_add_d(DisasContext *ctx, arg_add_d *a) +{ + gen_arith(ctx, OPC_LARCH_ADD_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_sub_d(DisasContext *ctx, arg_sub_d *a) +{ + check_larch_64(ctx); + gen_arith(ctx, OPC_LARCH_SUB_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_slt(DisasContext *ctx, arg_slt *a) +{ + gen_slt(ctx, OPC_LARCH_SLT, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_sltu(DisasContext *ctx, arg_sltu *a) +{ + gen_slt(ctx, OPC_LARCH_SLTU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_maskeqz(DisasContext *ctx, arg_maskeqz *a) +{ + gen_cond_move(ctx, OPC_LARCH_MASKEQZ, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_masknez(DisasContext *ctx, arg_masknez *a) +{ + gen_cond_move(ctx, OPC_LARCH_MASKNEZ, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_nor(DisasContext *ctx, arg_nor *a) +{ + gen_logic(ctx, OPC_LARCH_NOR, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_and(DisasContext *ctx, arg_and *a) +{ + gen_logic(ctx, OPC_LARCH_AND, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_or(DisasContext *ctx, arg_or *a) +{ + gen_logic(ctx, OPC_LARCH_OR, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_xor(DisasContext *ctx, arg_xor *a) +{ + gen_logic(ctx, OPC_LARCH_XOR, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_orn(DisasContext *ctx, arg_orn *a) +{ + TCGv t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rk); + tcg_gen_not_tl(t0, t0); + tcg_gen_or_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], t0); + tcg_temp_free(t0); + return true; +} + +static bool trans_andn(DisasContext *ctx, arg_andn *a) +{ + TCGv t0, t1; + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, a->rk); + gen_load_gpr(t1, a->rj); + tcg_gen_not_tl(t0, t0); + tcg_gen_and_tl(cpu_gpr[a->rd], t1, t0); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +static bool trans_sll_w(DisasContext *ctx, arg_sll_w *a) +{ + gen_shift(ctx, OPC_LARCH_SLL_W, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_srl_w(DisasContext *ctx, arg_srl_w *a) +{ + gen_shift(ctx, OPC_LARCH_SRL_W, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_sra_w(DisasContext *ctx, arg_sra_w *a) +{ + gen_shift(ctx, OPC_LARCH_SRA_W, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_sll_d(DisasContext *ctx, arg_sll_d *a) +{ + check_larch_64(ctx); + gen_shift(ctx, OPC_LARCH_SLL_D, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_srl_d(DisasContext *ctx, arg_srl_d *a) +{ + check_larch_64(ctx); + gen_shift(ctx, OPC_LARCH_SRL_D, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_sra_d(DisasContext *ctx, arg_sra_d *a) +{ + check_larch_64(ctx); + gen_shift(ctx, OPC_LARCH_SRA_D, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_rotr_w(DisasContext *ctx, arg_rotr_w *a) +{ + gen_shift(ctx, OPC_LARCH_ROTR_W, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_rotr_d(DisasContext *ctx, arg_rotr_d *a) +{ + check_larch_64(ctx); + gen_shift(ctx, OPC_LARCH_ROTR_D, a->rd, a->rk, a->rj); + return true; +} + +static bool trans_crc_w_b_w(DisasContext *ctx, arg_crc_w_b_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 0); + return true; +} + +static bool trans_crc_w_h_w(DisasContext *ctx, arg_crc_w_h_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 0); + return true; +} + +static bool trans_crc_w_w_w(DisasContext *ctx, arg_crc_w_w_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 0); + return true; +} + +static bool trans_crc_w_d_w(DisasContext *ctx, arg_crc_w_d_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 0); + return true; +} + +static bool trans_crcc_w_b_w(DisasContext *ctx, arg_crcc_w_b_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 1); + return true; +} + +static bool trans_crcc_w_h_w(DisasContext *ctx, arg_crcc_w_h_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 1); + return true; +} + +static bool trans_crcc_w_w_w(DisasContext *ctx, arg_crcc_w_w_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 1); + return true; +} + +static bool trans_crcc_w_d_w(DisasContext *ctx, arg_crcc_w_d_w *a) +{ + gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 1); + return true; +} + +static bool trans_mul_w(DisasContext *ctx, arg_mul_w *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_MUL_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mulh_w(DisasContext *ctx, arg_mulh_w *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_MULH_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mulh_wu(DisasContext *ctx, arg_mulh_wu *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_MULH_WU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mul_d(DisasContext *ctx, arg_mul_d *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_MUL_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mulh_d(DisasContext *ctx, arg_mulh_d *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_MULH_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mulh_du(DisasContext *ctx, arg_mulh_du *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_MULH_DU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mulw_d_w(DisasContext *ctx, arg_mulw_d_w *a) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + gen_load_gpr(t0, a->rj); + gen_load_gpr(t1, a->rk); + tcg_gen_ext32s_i64(t0, t0); + tcg_gen_ext32s_i64(t1, t1); + tcg_gen_mul_i64(t2, t0, t1); + gen_store_gpr(t2, a->rd); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_mulw_d_wu(DisasContext *ctx, arg_mulw_d_wu *a) +{ + TCGv_i64 t0 = tcg_temp_new_i64(); + TCGv_i64 t1 = tcg_temp_new_i64(); + TCGv_i64 t2 = tcg_temp_new_i64(); + gen_load_gpr(t0, a->rj); + gen_load_gpr(t1, a->rk); + tcg_gen_ext32u_i64(t0, t0); + tcg_gen_ext32u_i64(t1, t1); + tcg_gen_mul_i64(t2, t0, t1); + gen_store_gpr(t2, a->rd); + tcg_temp_free_i64(t0); + tcg_temp_free_i64(t1); + tcg_temp_free_i64(t2); + return true; +} + +static bool trans_div_w(DisasContext *ctx, arg_div_w *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_DIV_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mod_w(DisasContext *ctx, arg_mod_w *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_MOD_W, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_div_wu(DisasContext *ctx, arg_div_wu *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_DIV_WU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mod_wu(DisasContext *ctx, arg_mod_wu *a) +{ + gen_r6_muldiv(ctx, OPC_LARCH_MOD_WU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_div_d(DisasContext *ctx, arg_div_d *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_DIV_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mod_d(DisasContext *ctx, arg_mod_d *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_MOD_D, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_div_du(DisasContext *ctx, arg_div_du *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_DIV_DU, a->rd, a->rj, a->rk); + return true; +} + +static bool trans_mod_du(DisasContext *ctx, arg_mod_du *a) +{ + check_larch_64(ctx); + gen_r6_muldiv(ctx, OPC_LARCH_MOD_DU, a->rd, a->rj, a->rk); + return true; +} + +/* do not update CP0.BadVaddr */ +static bool trans_asrtle_d(DisasContext *ctx, arg_asrtle_d *a) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + gen_load_gpr(t1, a->rj); + gen_load_gpr(t2, a->rk); + gen_helper_asrtle_d(cpu_env, t1, t2); + tcg_temp_free(t1); + tcg_temp_free(t2); + return true; +} + +/* do not update CP0.BadVaddr */ +static bool trans_asrtgt_d(DisasContext *ctx, arg_asrtgt_d *a) +{ + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_temp_new(); + gen_load_gpr(t1, a->rj); + gen_load_gpr(t2, a->rk); + gen_helper_asrtgt_d(cpu_env, t1, t2); + tcg_temp_free(t1); + tcg_temp_free(t2); + return true; +} + +#ifdef CONFIG_USER_ONLY +static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a) +{ + return false; +} + +static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a) +{ + return false; +} +#else +static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a) +{ + TCGv_i32 sd = tcg_const_i32(a->sd); + TCGv val = tcg_temp_new(); + check_lbt_enabled(ctx); + gen_load_gpr(val, a->rj); + gen_helper_store_scr(cpu_env, sd, val); + tcg_temp_free_i32(sd); + tcg_temp_free(val); + return true; +} + +static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a) +{ + if (a->rd == 0) { + /* Nop */ + return true; + } + + TCGv_i32 tsj = tcg_const_i32(a->sj); + check_lbt_enabled(ctx); + gen_helper_load_scr(cpu_gpr[a->rd], cpu_env, tsj); + tcg_temp_free_i32(tsj); + return true; +} +#endif + +static bool trans_clo_w(DisasContext *ctx, arg_clo_w *a) +{ + gen_cl(ctx, OPC_LARCH_CLO_W, a->rd, a->rj); + return true; +} + +static bool trans_clz_w(DisasContext *ctx, arg_clz_w *a) +{ + gen_cl(ctx, OPC_LARCH_CLZ_W, a->rd, a->rj); + return true; +} + +static bool trans_cto_w(DisasContext *ctx, arg_cto_w *a) +{ + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + gen_helper_cto_w(cpu_gpr[a->rd], cpu_env, t0); + + tcg_temp_free(t0); + return true; +} + +static bool trans_ctz_w(DisasContext *ctx, arg_ctz_w *a) +{ + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + gen_helper_ctz_w(cpu_gpr[a->rd], cpu_env, t0); + + tcg_temp_free(t0); + return true; +} + +static bool trans_clo_d(DisasContext *ctx, arg_clo_d *a) +{ + check_larch_64(ctx); + gen_cl(ctx, OPC_LARCH_CLO_D, a->rd, a->rj); + return true; +} + +static bool trans_clz_d(DisasContext *ctx, arg_clz_d *a) +{ + check_larch_64(ctx); + gen_cl(ctx, OPC_LARCH_CLZ_D, a->rd, a->rj); + return true; +} + +static bool trans_cto_d(DisasContext *ctx, arg_cto_d *a) +{ + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + gen_helper_cto_d(cpu_gpr[a->rd], cpu_env, t0); + + tcg_temp_free(t0); + return true; +} + +static bool trans_ctz_d(DisasContext *ctx, arg_ctz_d *a) +{ + TCGv t0 = tcg_temp_new(); + + gen_load_gpr(t0, a->rj); + gen_helper_ctz_d(cpu_gpr[a->rd], cpu_env, t0); + + tcg_temp_free(t0); + return true; +} + +static bool trans_revb_2h(DisasContext *ctx, arg_revb_2h *a) +{ + gen_bshfl(ctx, OPC_LARCH_REVB_2H, a->rj, a->rd); + return true; +} + +static bool trans_revb_4h(DisasContext *ctx, arg_revb_4h *a) +{ + check_larch_64(ctx); + gen_bshfl(ctx, OPC_LARCH_REVB_4H, a->rj, a->rd); + return true; +} + +static bool trans_revb_2w(DisasContext *ctx, arg_revb_2w *a) +{ + handle_rev32(ctx, a->rj, a->rd); + return true; +} + +static bool trans_revb_d(DisasContext *ctx, arg_revb_d *a) +{ + handle_rev64(ctx, a->rj, a->rd); + return true; +} + +static bool trans_revh_2w(DisasContext *ctx, arg_revh_2w *a) +{ + handle_rev16(ctx, a->rj, a->rd); + return true; +} + +static bool trans_revh_d(DisasContext *ctx, arg_revh_d *a) +{ + check_larch_64(ctx); + gen_bshfl(ctx, OPC_LARCH_REVH_D, a->rj, a->rd); + return true; +} + +static bool trans_bitrev_4b(DisasContext *ctx, arg_bitrev_4b *a) +{ + gen_bitswap(ctx, OPC_LARCH_BREV_4B, a->rd, a->rj); + return true; +} + +static bool trans_bitrev_8b(DisasContext *ctx, arg_bitrev_8b *a) +{ + check_larch_64(ctx); + gen_bitswap(ctx, OPC_LARCH_BREV_8B, a->rd, a->rj); + return true; +} + +static bool trans_bitrev_w(DisasContext *ctx, arg_bitrev_w *a) +{ + TCGv t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + gen_helper_bitrev_w(cpu_gpr[a->rd], cpu_env, t0); + tcg_temp_free(t0); + return true; +} + +static bool trans_bitrev_d(DisasContext *ctx, arg_bitrev_d *a) +{ + TCGv t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + gen_helper_bitrev_d(cpu_gpr[a->rd], cpu_env, t0); + tcg_temp_free(t0); + return true; +} + +static bool trans_ext_w_h(DisasContext *ctx, arg_ext_w_h *a) +{ + gen_bshfl(ctx, OPC_LARCH_EXT_WH, a->rj, a->rd); + return true; +} + +static bool trans_ext_w_b(DisasContext *ctx, arg_ext_w_b *a) +{ + gen_bshfl(ctx, OPC_LARCH_EXT_WB, a->rj, a->rd); + return true; +} + +static bool trans_srli_w(DisasContext *ctx, arg_srli_w *a) +{ + gen_shift_imm(ctx, OPC_LARCH_SRLI_W, a->rd, a->rj, a->ui5); + return true; +} + +static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a) +{ + gen_shift_imm(ctx, OPC_LARCH_SRAI_W, a->rd, a->rj, a->ui5); + return true; +} + +static bool trans_srai_d(DisasContext *ctx, arg_srai_d *a) +{ + TCGv t0; + check_larch_64(ctx); + t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + tcg_gen_sari_tl(cpu_gpr[a->rd], t0, a->ui6); + tcg_temp_free(t0); + return true; +} + +static bool trans_rotri_w(DisasContext *ctx, arg_rotri_w *a) +{ + gen_shift_imm(ctx, OPC_LARCH_ROTRI_W, a->rd, a->rj, a->ui5); + return true; +} + +static bool trans_rotri_d(DisasContext *ctx, arg_rotri_d *a) +{ + TCGv t0; + check_larch_64(ctx); + t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + tcg_gen_rotri_tl(cpu_gpr[a->rd], t0, a->ui6); + tcg_temp_free(t0); + return true; +} + +static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a) +{ + check_cp1_enabled(ctx); + gen_fcmp_s(ctx, a->fcond, a->fk, a->fj, a->cd); + return true; +} + +static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a) +{ + check_cp1_enabled(ctx); + gen_fcmp_d(ctx, a->fcond, a->fk, a->fj, a->cd); + return true; +} + +static bool trans_fsel(DisasContext *ctx, arg_fsel *a) +{ + TCGv_i64 fj = tcg_temp_new_i64(); + TCGv_i64 fk = tcg_temp_new_i64(); + TCGv_i64 fd = tcg_temp_new_i64(); + TCGv_i32 ca = tcg_const_i32(a->ca); + check_cp1_enabled(ctx); + gen_load_fpr64(ctx, fj, a->fj); + gen_load_fpr64(ctx, fk, a->fk); + gen_helper_fsel(fd, cpu_env, fj, fk, ca); + gen_store_fpr64(ctx, fd, a->fd); + tcg_temp_free_i64(fj); + tcg_temp_free_i64(fk); + tcg_temp_free_i64(fd); + tcg_temp_free_i32(ca); + return true; +} + +#include "cpu-csr.h" + +#ifdef CONFIG_USER_ONLY + +static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) +{ + return false; +} + +#else + +#define GEN_CSRRQ_CASE(name) \ + do { \ + case LOONGARCH_CSR_##name: \ + gen_csr_rdq(ctx, cpu_gpr[rd], LOONGARCH_CSR_##name); \ + } while (0) + +static bool trans_csrrd(DisasContext *ctx, unsigned rd, unsigned csr) +{ + switch (csr) { + GEN_CSRRQ_CASE(CRMD); + break; + GEN_CSRRQ_CASE(PRMD); + break; + GEN_CSRRQ_CASE(EUEN); + break; + GEN_CSRRQ_CASE(MISC); + break; + GEN_CSRRQ_CASE(ECFG); + break; + GEN_CSRRQ_CASE(ESTAT); + break; + GEN_CSRRQ_CASE(ERA); + break; + GEN_CSRRQ_CASE(BADV); + break; + GEN_CSRRQ_CASE(BADI); + break; + GEN_CSRRQ_CASE(EEPN); + break; + GEN_CSRRQ_CASE(TLBIDX); + break; + GEN_CSRRQ_CASE(TLBEHI); + break; + GEN_CSRRQ_CASE(TLBELO0); + break; + GEN_CSRRQ_CASE(TLBELO1); + break; + GEN_CSRRQ_CASE(TLBWIRED); + break; + GEN_CSRRQ_CASE(GTLBC); + break; + GEN_CSRRQ_CASE(TRGP); + break; + GEN_CSRRQ_CASE(ASID); + break; + GEN_CSRRQ_CASE(PGDL); + break; + GEN_CSRRQ_CASE(PGDH); + break; + case LOONGARCH_CSR_PGD: + gen_helper_read_pgd(cpu_gpr[rd], cpu_env); + break; + GEN_CSRRQ_CASE(PWCTL0); + break; + GEN_CSRRQ_CASE(PWCTL1); + break; + GEN_CSRRQ_CASE(STLBPGSIZE); + break; + GEN_CSRRQ_CASE(RVACFG); + break; + GEN_CSRRQ_CASE(CPUID); + break; + GEN_CSRRQ_CASE(PRCFG1); + break; + GEN_CSRRQ_CASE(PRCFG2); + break; + GEN_CSRRQ_CASE(PRCFG3); + break; + GEN_CSRRQ_CASE(KS0); + break; + GEN_CSRRQ_CASE(KS1); + break; + GEN_CSRRQ_CASE(KS2); + break; + GEN_CSRRQ_CASE(KS3); + break; + GEN_CSRRQ_CASE(KS4); + break; + GEN_CSRRQ_CASE(KS5); + break; + GEN_CSRRQ_CASE(KS6); + break; + GEN_CSRRQ_CASE(KS7); + break; + GEN_CSRRQ_CASE(KS8); + break; + GEN_CSRRQ_CASE(TMID); + break; + GEN_CSRRQ_CASE(TCFG); + break; + GEN_CSRRQ_CASE(TVAL); + break; + GEN_CSRRQ_CASE(CNTC); + break; + GEN_CSRRQ_CASE(TINTCLR); + break; + GEN_CSRRQ_CASE(GSTAT); + break; + GEN_CSRRQ_CASE(GCFG); + break; + GEN_CSRRQ_CASE(GINTC); + break; + GEN_CSRRQ_CASE(GCNTC); + break; + GEN_CSRRQ_CASE(LLBCTL); + break; + GEN_CSRRQ_CASE(IMPCTL1); + break; + GEN_CSRRQ_CASE(IMPCTL2); + break; + GEN_CSRRQ_CASE(GNMI); + break; + GEN_CSRRQ_CASE(TLBRENT); + break; + GEN_CSRRQ_CASE(TLBRBADV); + break; + GEN_CSRRQ_CASE(TLBRERA); + break; + GEN_CSRRQ_CASE(TLBRSAVE); + break; + GEN_CSRRQ_CASE(TLBRELO0); + break; + GEN_CSRRQ_CASE(TLBRELO1); + break; + GEN_CSRRQ_CASE(TLBREHI); + break; + GEN_CSRRQ_CASE(TLBRPRMD); + break; + GEN_CSRRQ_CASE(ERRCTL); + break; + GEN_CSRRQ_CASE(ERRINFO); + break; + GEN_CSRRQ_CASE(ERRINFO1); + break; + GEN_CSRRQ_CASE(ERRENT); + break; + GEN_CSRRQ_CASE(ERRERA); + break; + GEN_CSRRQ_CASE(ERRSAVE); + break; + GEN_CSRRQ_CASE(CTAG); + break; + GEN_CSRRQ_CASE(DMWIN0); + break; + GEN_CSRRQ_CASE(DMWIN1); + break; + GEN_CSRRQ_CASE(DMWIN2); + break; + GEN_CSRRQ_CASE(DMWIN3); + break; + GEN_CSRRQ_CASE(PERFCTRL0); + break; + GEN_CSRRQ_CASE(PERFCNTR0); + break; + GEN_CSRRQ_CASE(PERFCTRL1); + break; + GEN_CSRRQ_CASE(PERFCNTR1); + break; + GEN_CSRRQ_CASE(PERFCTRL2); + break; + GEN_CSRRQ_CASE(PERFCNTR2); + break; + GEN_CSRRQ_CASE(PERFCTRL3); + break; + GEN_CSRRQ_CASE(PERFCNTR3); + break; + /* debug */ + GEN_CSRRQ_CASE(MWPC); + break; + GEN_CSRRQ_CASE(MWPS); + break; + GEN_CSRRQ_CASE(DB0ADDR); + break; + GEN_CSRRQ_CASE(DB0MASK); + break; + GEN_CSRRQ_CASE(DB0CTL); + break; + GEN_CSRRQ_CASE(DB0ASID); + break; + GEN_CSRRQ_CASE(DB1ADDR); + break; + GEN_CSRRQ_CASE(DB1MASK); + break; + GEN_CSRRQ_CASE(DB1CTL); + break; + GEN_CSRRQ_CASE(DB1ASID); + break; + GEN_CSRRQ_CASE(DB2ADDR); + break; + GEN_CSRRQ_CASE(DB2MASK); + break; + GEN_CSRRQ_CASE(DB2CTL); + break; + GEN_CSRRQ_CASE(DB2ASID); + break; + GEN_CSRRQ_CASE(DB3ADDR); + break; + GEN_CSRRQ_CASE(DB3MASK); + break; + GEN_CSRRQ_CASE(DB3CTL); + break; + GEN_CSRRQ_CASE(DB3ASID); + break; + GEN_CSRRQ_CASE(FWPC); + break; + GEN_CSRRQ_CASE(FWPS); + break; + GEN_CSRRQ_CASE(IB0ADDR); + break; + GEN_CSRRQ_CASE(IB0MASK); + break; + GEN_CSRRQ_CASE(IB0CTL); + break; + GEN_CSRRQ_CASE(IB0ASID); + break; + GEN_CSRRQ_CASE(IB1ADDR); + break; + GEN_CSRRQ_CASE(IB1MASK); + break; + GEN_CSRRQ_CASE(IB1CTL); + break; + GEN_CSRRQ_CASE(IB1ASID); + break; + GEN_CSRRQ_CASE(IB2ADDR); + break; + GEN_CSRRQ_CASE(IB2MASK); + break; + GEN_CSRRQ_CASE(IB2CTL); + break; + GEN_CSRRQ_CASE(IB2ASID); + break; + GEN_CSRRQ_CASE(IB3ADDR); + break; + GEN_CSRRQ_CASE(IB3MASK); + break; + GEN_CSRRQ_CASE(IB3CTL); + break; + GEN_CSRRQ_CASE(IB3ASID); + break; + GEN_CSRRQ_CASE(IB4ADDR); + break; + GEN_CSRRQ_CASE(IB4MASK); + break; + GEN_CSRRQ_CASE(IB4CTL); + break; + GEN_CSRRQ_CASE(IB4ASID); + break; + GEN_CSRRQ_CASE(IB5ADDR); + break; + GEN_CSRRQ_CASE(IB5MASK); + break; + GEN_CSRRQ_CASE(IB5CTL); + break; + GEN_CSRRQ_CASE(IB5ASID); + break; + GEN_CSRRQ_CASE(IB6ADDR); + break; + GEN_CSRRQ_CASE(IB6MASK); + break; + GEN_CSRRQ_CASE(IB6CTL); + break; + GEN_CSRRQ_CASE(IB6ASID); + break; + GEN_CSRRQ_CASE(IB7ADDR); + break; + GEN_CSRRQ_CASE(IB7MASK); + break; + GEN_CSRRQ_CASE(IB7CTL); + break; + GEN_CSRRQ_CASE(IB7ASID); + break; + GEN_CSRRQ_CASE(DEBUG); + break; + GEN_CSRRQ_CASE(DERA); + break; + GEN_CSRRQ_CASE(DESAVE); + break; + default: + return false; + } + +#undef GEN_CSRRQ_CASE + + return true; +} + +#define GEN_CSRWQ_CASE(name) \ + do { \ + case LOONGARCH_CSR_##name: \ + gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_##name); \ + } while (0) + +static bool trans_csrwr(DisasContext *ctx, unsigned rd, unsigned csr) +{ + + switch (csr) { + case LOONGARCH_CSR_CRMD: + save_cpu_state(ctx, 1); + gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_CRMD); + gen_save_pc(ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + GEN_CSRWQ_CASE(PRMD); + break; + case LOONGARCH_CSR_EUEN: + gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_EUEN); + /* Stop translation */ + gen_save_pc(ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + GEN_CSRWQ_CASE(MISC); + break; + GEN_CSRWQ_CASE(ECFG); + break; + GEN_CSRWQ_CASE(ESTAT); + break; + GEN_CSRWQ_CASE(ERA); + break; + GEN_CSRWQ_CASE(BADV); + break; + GEN_CSRWQ_CASE(BADI); + break; + GEN_CSRWQ_CASE(EEPN); + break; + GEN_CSRWQ_CASE(TLBIDX); + break; + GEN_CSRWQ_CASE(TLBEHI); + break; + GEN_CSRWQ_CASE(TLBELO0); + break; + GEN_CSRWQ_CASE(TLBELO1); + break; + GEN_CSRWQ_CASE(TLBWIRED); + break; + GEN_CSRWQ_CASE(GTLBC); + break; + GEN_CSRWQ_CASE(TRGP); + break; + GEN_CSRWQ_CASE(ASID); + break; + GEN_CSRWQ_CASE(PGDL); + break; + GEN_CSRWQ_CASE(PGDH); + break; + GEN_CSRWQ_CASE(PGD); + break; + GEN_CSRWQ_CASE(PWCTL0); + break; + GEN_CSRWQ_CASE(PWCTL1); + break; + GEN_CSRWQ_CASE(STLBPGSIZE); + break; + GEN_CSRWQ_CASE(RVACFG); + break; + GEN_CSRWQ_CASE(CPUID); + break; + GEN_CSRWQ_CASE(PRCFG1); + break; + GEN_CSRWQ_CASE(PRCFG2); + break; + GEN_CSRWQ_CASE(PRCFG3); + break; + GEN_CSRWQ_CASE(KS0); + break; + GEN_CSRWQ_CASE(KS1); + break; + GEN_CSRWQ_CASE(KS2); + break; + GEN_CSRWQ_CASE(KS3); + break; + GEN_CSRWQ_CASE(KS4); + break; + GEN_CSRWQ_CASE(KS5); + break; + GEN_CSRWQ_CASE(KS6); + break; + GEN_CSRWQ_CASE(KS7); + break; + GEN_CSRWQ_CASE(KS8); + break; + GEN_CSRWQ_CASE(TMID); + break; + GEN_CSRWQ_CASE(TCFG); + break; + GEN_CSRWQ_CASE(TVAL); + break; + GEN_CSRWQ_CASE(CNTC); + break; + GEN_CSRWQ_CASE(TINTCLR); + break; + GEN_CSRWQ_CASE(GSTAT); + break; + GEN_CSRWQ_CASE(GCFG); + break; + GEN_CSRWQ_CASE(GINTC); + break; + GEN_CSRWQ_CASE(GCNTC); + break; + GEN_CSRWQ_CASE(LLBCTL); + break; + GEN_CSRWQ_CASE(IMPCTL1); + break; + GEN_CSRWQ_CASE(IMPCTL2); + break; + GEN_CSRWQ_CASE(GNMI); + break; + GEN_CSRWQ_CASE(TLBRENT); + break; + GEN_CSRWQ_CASE(TLBRBADV); + break; + GEN_CSRWQ_CASE(TLBRERA); + break; + GEN_CSRWQ_CASE(TLBRSAVE); + break; + GEN_CSRWQ_CASE(TLBRELO0); + break; + GEN_CSRWQ_CASE(TLBRELO1); + break; + GEN_CSRWQ_CASE(TLBREHI); + break; + GEN_CSRWQ_CASE(TLBRPRMD); + break; + GEN_CSRWQ_CASE(ERRCTL); + break; + GEN_CSRWQ_CASE(ERRINFO); + break; + GEN_CSRWQ_CASE(ERRINFO1); + break; + GEN_CSRWQ_CASE(ERRENT); + break; + GEN_CSRWQ_CASE(ERRERA); + break; + GEN_CSRWQ_CASE(ERRSAVE); + break; + GEN_CSRWQ_CASE(CTAG); + break; + GEN_CSRWQ_CASE(DMWIN0); + break; + GEN_CSRWQ_CASE(DMWIN1); + break; + GEN_CSRWQ_CASE(DMWIN2); + break; + GEN_CSRWQ_CASE(DMWIN3); + break; + GEN_CSRWQ_CASE(PERFCTRL0); + break; + GEN_CSRWQ_CASE(PERFCNTR0); + break; + GEN_CSRWQ_CASE(PERFCTRL1); + break; + GEN_CSRWQ_CASE(PERFCNTR1); + break; + GEN_CSRWQ_CASE(PERFCTRL2); + break; + GEN_CSRWQ_CASE(PERFCNTR2); + break; + GEN_CSRWQ_CASE(PERFCTRL3); + break; + GEN_CSRWQ_CASE(PERFCNTR3); + break; + /* debug */ + GEN_CSRWQ_CASE(MWPC); + break; + GEN_CSRWQ_CASE(MWPS); + break; + GEN_CSRWQ_CASE(DB0ADDR); + break; + GEN_CSRWQ_CASE(DB0MASK); + break; + GEN_CSRWQ_CASE(DB0CTL); + break; + GEN_CSRWQ_CASE(DB0ASID); + break; + GEN_CSRWQ_CASE(DB1ADDR); + break; + GEN_CSRWQ_CASE(DB1MASK); + break; + GEN_CSRWQ_CASE(DB1CTL); + break; + GEN_CSRWQ_CASE(DB1ASID); + break; + GEN_CSRWQ_CASE(DB2ADDR); + break; + GEN_CSRWQ_CASE(DB2MASK); + break; + GEN_CSRWQ_CASE(DB2CTL); + break; + GEN_CSRWQ_CASE(DB2ASID); + break; + GEN_CSRWQ_CASE(DB3ADDR); + break; + GEN_CSRWQ_CASE(DB3MASK); + break; + GEN_CSRWQ_CASE(DB3CTL); + break; + GEN_CSRWQ_CASE(DB3ASID); + break; + GEN_CSRWQ_CASE(FWPC); + break; + GEN_CSRWQ_CASE(FWPS); + break; + GEN_CSRWQ_CASE(IB0ADDR); + break; + GEN_CSRWQ_CASE(IB0MASK); + break; + GEN_CSRWQ_CASE(IB0CTL); + break; + GEN_CSRWQ_CASE(IB0ASID); + break; + GEN_CSRWQ_CASE(IB1ADDR); + break; + GEN_CSRWQ_CASE(IB1MASK); + break; + GEN_CSRWQ_CASE(IB1CTL); + break; + GEN_CSRWQ_CASE(IB1ASID); + break; + GEN_CSRWQ_CASE(IB2ADDR); + break; + GEN_CSRWQ_CASE(IB2MASK); + break; + GEN_CSRWQ_CASE(IB2CTL); + break; + GEN_CSRWQ_CASE(IB2ASID); + break; + GEN_CSRWQ_CASE(IB3ADDR); + break; + GEN_CSRWQ_CASE(IB3MASK); + break; + GEN_CSRWQ_CASE(IB3CTL); + break; + GEN_CSRWQ_CASE(IB3ASID); + break; + GEN_CSRWQ_CASE(IB4ADDR); + break; + GEN_CSRWQ_CASE(IB4MASK); + break; + GEN_CSRWQ_CASE(IB4CTL); + break; + GEN_CSRWQ_CASE(IB4ASID); + break; + GEN_CSRWQ_CASE(IB5ADDR); + break; + GEN_CSRWQ_CASE(IB5MASK); + break; + GEN_CSRWQ_CASE(IB5CTL); + break; + GEN_CSRWQ_CASE(IB5ASID); + break; + GEN_CSRWQ_CASE(IB6ADDR); + break; + GEN_CSRWQ_CASE(IB6MASK); + break; + GEN_CSRWQ_CASE(IB6CTL); + break; + GEN_CSRWQ_CASE(IB6ASID); + break; + GEN_CSRWQ_CASE(IB7ADDR); + break; + GEN_CSRWQ_CASE(IB7MASK); + break; + GEN_CSRWQ_CASE(IB7CTL); + break; + GEN_CSRWQ_CASE(IB7ASID); + break; + GEN_CSRWQ_CASE(DEBUG); + break; + GEN_CSRWQ_CASE(DERA); + break; + GEN_CSRWQ_CASE(DESAVE); + break; + default: + return false; + } + +#undef GEN_CSRWQ_CASE + + return true; +} + +#define GEN_CSRXQ_CASE(name) \ + do { \ + case LOONGARCH_CSR_##name: \ + if (rd == 0) { \ + gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_##name); \ + } else { \ + gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], \ + LOONGARCH_CSR_##name); \ + } \ + } while (0) + +static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a) +{ + unsigned rd, rj, csr; + TCGv zero = tcg_const_tl(0); + rd = a->rd; + rj = a->rj; + csr = a->csr; + + if (rj == 0) { + return trans_csrrd(ctx, rd, csr); + } else if (rj == 1) { + return trans_csrwr(ctx, rd, csr); + } + + switch (csr) { + case LOONGARCH_CSR_CRMD: + save_cpu_state(ctx, 1); + if (rd == 0) { + gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_CRMD); + } else { + gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_CRMD); + } + gen_save_pc(ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + + GEN_CSRXQ_CASE(PRMD); + break; + case LOONGARCH_CSR_EUEN: + if (rd == 0) { + gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_EUEN); + } else { + gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_EUEN); + } + /* Stop translation */ + gen_save_pc(ctx->base.pc_next + 4); + ctx->base.is_jmp = DISAS_EXIT; + break; + GEN_CSRXQ_CASE(MISC); + break; + GEN_CSRXQ_CASE(ECFG); + break; + GEN_CSRXQ_CASE(ESTAT); + break; + GEN_CSRXQ_CASE(ERA); + break; + GEN_CSRXQ_CASE(BADV); + break; + GEN_CSRXQ_CASE(BADI); + break; + GEN_CSRXQ_CASE(EEPN); + break; + GEN_CSRXQ_CASE(TLBIDX); + break; + GEN_CSRXQ_CASE(TLBEHI); + break; + GEN_CSRXQ_CASE(TLBELO0); + break; + GEN_CSRXQ_CASE(TLBELO1); + break; + GEN_CSRXQ_CASE(TLBWIRED); + break; + GEN_CSRXQ_CASE(GTLBC); + break; + GEN_CSRXQ_CASE(TRGP); + break; + GEN_CSRXQ_CASE(ASID); + break; + GEN_CSRXQ_CASE(PGDL); + break; + GEN_CSRXQ_CASE(PGDH); + break; + GEN_CSRXQ_CASE(PGD); + break; + GEN_CSRXQ_CASE(PWCTL0); + break; + GEN_CSRXQ_CASE(PWCTL1); + break; + GEN_CSRXQ_CASE(STLBPGSIZE); + break; + GEN_CSRXQ_CASE(RVACFG); + break; + GEN_CSRXQ_CASE(CPUID); + break; + GEN_CSRXQ_CASE(PRCFG1); + break; + GEN_CSRXQ_CASE(PRCFG2); + break; + GEN_CSRXQ_CASE(PRCFG3); + break; + GEN_CSRXQ_CASE(KS0); + break; + GEN_CSRXQ_CASE(KS1); + break; + GEN_CSRXQ_CASE(KS2); + break; + GEN_CSRXQ_CASE(KS3); + break; + GEN_CSRXQ_CASE(KS4); + break; + GEN_CSRXQ_CASE(KS5); + break; + GEN_CSRXQ_CASE(KS6); + break; + GEN_CSRXQ_CASE(KS7); + break; + GEN_CSRXQ_CASE(KS8); + break; + GEN_CSRXQ_CASE(TMID); + break; + GEN_CSRXQ_CASE(TCFG); + break; + GEN_CSRXQ_CASE(TVAL); + break; + GEN_CSRXQ_CASE(CNTC); + break; + GEN_CSRXQ_CASE(TINTCLR); + break; + GEN_CSRXQ_CASE(GSTAT); + break; + GEN_CSRXQ_CASE(GCFG); + break; + GEN_CSRXQ_CASE(GINTC); + break; + GEN_CSRXQ_CASE(GCNTC); + break; + GEN_CSRXQ_CASE(LLBCTL); + break; + GEN_CSRXQ_CASE(IMPCTL1); + break; + GEN_CSRXQ_CASE(IMPCTL2); + break; + GEN_CSRXQ_CASE(GNMI); + break; + GEN_CSRXQ_CASE(TLBRENT); + break; + GEN_CSRXQ_CASE(TLBRBADV); + break; + GEN_CSRXQ_CASE(TLBRERA); + break; + GEN_CSRXQ_CASE(TLBRSAVE); + break; + GEN_CSRXQ_CASE(TLBRELO0); + break; + GEN_CSRXQ_CASE(TLBRELO1); + break; + GEN_CSRXQ_CASE(TLBREHI); + break; + GEN_CSRXQ_CASE(TLBRPRMD); + break; + GEN_CSRXQ_CASE(ERRCTL); + break; + GEN_CSRXQ_CASE(ERRINFO); + break; + GEN_CSRXQ_CASE(ERRINFO1); + break; + GEN_CSRXQ_CASE(ERRENT); + break; + GEN_CSRXQ_CASE(ERRERA); + break; + GEN_CSRXQ_CASE(ERRSAVE); + break; + GEN_CSRXQ_CASE(CTAG); + break; + GEN_CSRXQ_CASE(DMWIN0); + break; + GEN_CSRXQ_CASE(DMWIN1); + break; + GEN_CSRXQ_CASE(DMWIN2); + break; + GEN_CSRXQ_CASE(DMWIN3); + break; + GEN_CSRXQ_CASE(PERFCTRL0); + break; + GEN_CSRXQ_CASE(PERFCNTR0); + break; + GEN_CSRXQ_CASE(PERFCTRL1); + break; + GEN_CSRXQ_CASE(PERFCNTR1); + break; + GEN_CSRXQ_CASE(PERFCTRL2); + break; + GEN_CSRXQ_CASE(PERFCNTR2); + break; + GEN_CSRXQ_CASE(PERFCTRL3); + break; + GEN_CSRXQ_CASE(PERFCNTR3); + break; + /* debug */ + GEN_CSRXQ_CASE(MWPC); + break; + GEN_CSRXQ_CASE(MWPS); + break; + GEN_CSRXQ_CASE(DB0ADDR); + break; + GEN_CSRXQ_CASE(DB0MASK); + break; + GEN_CSRXQ_CASE(DB0CTL); + break; + GEN_CSRXQ_CASE(DB0ASID); + break; + GEN_CSRXQ_CASE(DB1ADDR); + break; + GEN_CSRXQ_CASE(DB1MASK); + break; + GEN_CSRXQ_CASE(DB1CTL); + break; + GEN_CSRXQ_CASE(DB1ASID); + break; + GEN_CSRXQ_CASE(DB2ADDR); + break; + GEN_CSRXQ_CASE(DB2MASK); + break; + GEN_CSRXQ_CASE(DB2CTL); + break; + GEN_CSRXQ_CASE(DB2ASID); + break; + GEN_CSRXQ_CASE(DB3ADDR); + break; + GEN_CSRXQ_CASE(DB3MASK); + break; + GEN_CSRXQ_CASE(DB3CTL); + break; + GEN_CSRXQ_CASE(DB3ASID); + break; + GEN_CSRXQ_CASE(FWPC); + break; + GEN_CSRXQ_CASE(FWPS); + break; + GEN_CSRXQ_CASE(IB0ADDR); + break; + GEN_CSRXQ_CASE(IB0MASK); + break; + GEN_CSRXQ_CASE(IB0CTL); + break; + GEN_CSRXQ_CASE(IB0ASID); + break; + GEN_CSRXQ_CASE(IB1ADDR); + break; + GEN_CSRXQ_CASE(IB1MASK); + break; + GEN_CSRXQ_CASE(IB1CTL); + break; + GEN_CSRXQ_CASE(IB1ASID); + break; + GEN_CSRXQ_CASE(IB2ADDR); + break; + GEN_CSRXQ_CASE(IB2MASK); + break; + GEN_CSRXQ_CASE(IB2CTL); + break; + GEN_CSRXQ_CASE(IB2ASID); + break; + GEN_CSRXQ_CASE(IB3ADDR); + break; + GEN_CSRXQ_CASE(IB3MASK); + break; + GEN_CSRXQ_CASE(IB3CTL); + break; + GEN_CSRXQ_CASE(IB3ASID); + break; + GEN_CSRXQ_CASE(IB4ADDR); + break; + GEN_CSRXQ_CASE(IB4MASK); + break; + GEN_CSRXQ_CASE(IB4CTL); + break; + GEN_CSRXQ_CASE(IB4ASID); + break; + GEN_CSRXQ_CASE(IB5ADDR); + break; + GEN_CSRXQ_CASE(IB5MASK); + break; + GEN_CSRXQ_CASE(IB5CTL); + break; + GEN_CSRXQ_CASE(IB5ASID); + break; + GEN_CSRXQ_CASE(IB6ADDR); + break; + GEN_CSRXQ_CASE(IB6MASK); + break; + GEN_CSRXQ_CASE(IB6CTL); + break; + GEN_CSRXQ_CASE(IB6ASID); + break; + GEN_CSRXQ_CASE(IB7ADDR); + break; + GEN_CSRXQ_CASE(IB7MASK); + break; + GEN_CSRXQ_CASE(IB7CTL); + break; + GEN_CSRXQ_CASE(IB7ASID); + break; + GEN_CSRXQ_CASE(DEBUG); + break; + GEN_CSRXQ_CASE(DERA); + break; + GEN_CSRXQ_CASE(DESAVE); + break; + default: + return false; + } + +#undef GEN_CSRXQ_CASE + tcg_temp_free(zero); + return true; +} + +#endif + +static bool trans_cacop(DisasContext *ctx, arg_cacop *a) +{ + /* Treat as NOP. */ + return true; +} + +#ifdef CONFIG_USER_ONLY + +static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a) +{ + return false; +} + +static bool trans_lddir(DisasContext *ctx, arg_lddir *a) +{ + return false; +} + +static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a) +{ + return false; +} + +static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a) +{ + return false; +} + +static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a) +{ + return false; +} + +static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a) +{ + return false; +} + +static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a) +{ + return false; +} + +static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a) +{ + return false; +} + +static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a) +{ + return false; +} + +static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a) +{ + return false; +} +#else + +static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a) +{ + TCGv t0, t1; + TCGv_i32 t2; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->seq); + t2 = tcg_const_i32(ctx->mem_idx); + gen_helper_ldpte(cpu_env, t0, t1, t2); + + return true; +} + +static bool trans_lddir(DisasContext *ctx, arg_lddir *a) +{ + TCGv t0, t1, t2; + TCGv_i32 t3; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + t2 = tcg_const_tl(a->level); + t3 = tcg_const_i32(ctx->mem_idx); + gen_helper_lddir(cpu_env, t0, t1, t2, t3); + + return true; +} + +static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a) +{ + return false; +} + +static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a) +{ + return false; +} + +static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a) +{ + TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_W); + TCGv t0, t1; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + gen_helper_iocsr(cpu_env, t0, t1, iocsr_op); + return true; +} + +static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a) +{ + TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_D); + TCGv t0, t1; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + gen_helper_iocsr(cpu_env, t0, t1, iocsr_op); + return true; +} + +static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a) +{ + TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_B); + TCGv t0, t1; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + gen_helper_iocsr(cpu_env, t0, t1, iocsr_op); + return true; +} + +static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a) +{ + return false; +} + +static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a) +{ + TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_W); + TCGv t0, t1; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + gen_helper_iocsr(cpu_env, t0, t1, iocsr_op); + return true; +} + +static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a) +{ + TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_D); + TCGv t0, t1; + t0 = tcg_const_tl(a->rj); + t1 = tcg_const_tl(a->rd); + gen_helper_iocsr(cpu_env, t0, t1, iocsr_op); + return true; +} +#endif /* !CONFIG_USER_ONLY */ + +#ifdef CONFIG_USER_ONLY + +#define GEN_FALSE_TRANS(name) \ + static bool trans_##name(DisasContext *ctx, arg_##name *a) \ + { \ + return false; \ + } + +GEN_FALSE_TRANS(tlbclr) +GEN_FALSE_TRANS(invtlb) +GEN_FALSE_TRANS(tlbflush) +GEN_FALSE_TRANS(tlbsrch) +GEN_FALSE_TRANS(tlbrd) +GEN_FALSE_TRANS(tlbwr) +GEN_FALSE_TRANS(tlbfill) +GEN_FALSE_TRANS(ertn) + +#else + +static bool trans_tlbclr(DisasContext *ctx, arg_tlbclr *a) +{ + gen_helper_tlbclr(cpu_env); + return true; +} + +static bool trans_tlbflush(DisasContext *ctx, arg_tlbflush *a) +{ + gen_helper_tlbflush(cpu_env); + return true; +} + +static bool trans_invtlb(DisasContext *ctx, arg_invtlb *a) +{ + TCGv addr = tcg_temp_new(); + TCGv info = tcg_temp_new(); + TCGv op = tcg_const_tl(a->invop); + + gen_load_gpr(addr, a->addr); + gen_load_gpr(info, a->info); + gen_helper_invtlb(cpu_env, addr, info, op); + + tcg_temp_free(addr); + tcg_temp_free(info); + tcg_temp_free(op); + return true; +} + +static bool trans_tlbsrch(DisasContext *ctx, arg_tlbsrch *a) +{ + gen_helper_tlbsrch(cpu_env); + return true; +} + +static bool trans_tlbrd(DisasContext *ctx, arg_tlbrd *a) +{ + gen_helper_tlbrd(cpu_env); + return true; +} + +static bool trans_tlbwr(DisasContext *ctx, arg_tlbwr *a) +{ + gen_helper_tlbwr(cpu_env); + return true; +} + +static bool trans_tlbfill(DisasContext *ctx, arg_tlbfill *a) +{ + gen_helper_tlbfill(cpu_env); + return true; +} + +static bool trans_ertn(DisasContext *ctx, arg_ertn *a) +{ + gen_helper_ertn(cpu_env); + ctx->base.is_jmp = DISAS_EXIT; + return true; +} + +#endif /* CONFIG_USER_ONLY */ + +static bool trans_idle(DisasContext *ctx, arg_idle *a) +{ + ctx->base.pc_next += 4; + save_cpu_state(ctx, 1); + ctx->base.pc_next -= 4; + gen_helper_idle(cpu_env); + ctx->base.is_jmp = DISAS_NORETURN; + return true; +} + +#ifdef CONFIG_USER_ONLY + +static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a) +{ + /* Nop */ + return true; +} + +#else + +static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a) +{ + TCGv t0, t1; + t0 = tcg_const_tl(a->rd); + t1 = tcg_const_tl(a->rj); + gen_helper_drdtime(cpu_env, t0, t1); + tcg_temp_free(t0); + tcg_temp_free(t1); + return true; +} + +#endif + +static bool trans_cpucfg(DisasContext *ctx, arg_cpucfg *a) +{ + TCGv t0 = tcg_temp_new(); + gen_load_gpr(t0, a->rj); + gen_helper_cpucfg(cpu_gpr[a->rd], cpu_env, t0); + tcg_temp_free(t0); + return true; +} diff --git a/target/loongarch64/translate.c b/target/loongarch64/translate.c new file mode 100644 index 0000000000000000000000000000000000000000..2c65e4826a48b58eb2b0f821ff1dc2f0403fc983 --- /dev/null +++ b/target/loongarch64/translate.c @@ -0,0 +1,2705 @@ +/* + * LOONGARCH emulation for QEMU - main translation routines + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "qemu/osdep.h" +#include "cpu.h" +#include "internal.h" +#include "disas/disas.h" +#include "exec/exec-all.h" +#include "tcg/tcg-op.h" +#include "exec/cpu_ldst.h" +#include "hw/loongarch/cpudevs.h" + +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "semihosting/semihost.h" + +#include "trace-tcg.h" +#include "exec/translator.h" +#include "exec/log.h" + +#include "instmap.h" + +#define LARCH_DEBUG_DISAS 0 + +/* Values for the fmt field in FP instructions */ +enum { + /* 0 - 15 are reserved */ + FMT_S = 16, /* single fp */ + FMT_D = 17, /* double fp */ +}; + +/* global register indices */ +static TCGv cpu_gpr[32], cpu_PC; +static TCGv btarget, bcond; +static TCGv cpu_lladdr, cpu_llval; +static TCGv_i32 hflags; +static TCGv_i32 fpu_fcsr0; +static TCGv_i64 fpu_f64[32]; + +#include "exec/gen-icount.h" + +#define gen_helper_0e0i(name, arg) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg); \ + gen_helper_##name(cpu_env, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_0e1i(name, arg1, arg2) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg2); \ + gen_helper_##name(cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_1e0i(name, ret, arg1) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg1); \ + gen_helper_##name(ret, cpu_env, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_1e1i(name, ret, arg1, arg2) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg2); \ + gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_0e2i(name, arg1, arg2, arg3) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg3); \ + gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg3); \ + gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) \ + do { \ + TCGv_i32 helper_tmp = tcg_const_i32(arg4); \ + gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \ + tcg_temp_free_i32(helper_tmp); \ + } while (0) + +typedef struct DisasContext { + DisasContextBase base; + target_ulong saved_pc; + target_ulong page_start; + uint32_t opcode; + uint64_t insn_flags; + /* Routine used to access memory */ + int mem_idx; + MemOp default_tcg_memop_mask; + uint32_t hflags, saved_hflags; + target_ulong btarget; +} DisasContext; + +#define DISAS_STOP DISAS_TARGET_0 +#define DISAS_EXIT DISAS_TARGET_1 + +#define LOG_DISAS(...) \ + do { \ + if (LARCH_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, ##__VA_ARGS__); \ + } \ + } while (0) + +#define LARCH_INVAL(op) \ + do { \ + if (LARCH_DEBUG_DISAS) { \ + qemu_log_mask(CPU_LOG_TB_IN_ASM, \ + TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \ + ctx->base.pc_next, ctx->opcode, op, \ + ctx->opcode >> 26, ctx->opcode & 0x3F, \ + ((ctx->opcode >> 16) & 0x1F)); \ + } \ + } while (0) + +/* General purpose registers moves. */ +static inline void gen_load_gpr(TCGv t, int reg) +{ + if (reg == 0) { + tcg_gen_movi_tl(t, 0); + } else { + tcg_gen_mov_tl(t, cpu_gpr[reg]); + } +} + +static inline void gen_store_gpr(TCGv t, int reg) +{ + if (reg != 0) { + tcg_gen_mov_tl(cpu_gpr[reg], t); + } +} + +/* Moves to/from shadow registers. */ +/* Tests */ +static inline void gen_save_pc(target_ulong pc) +{ + tcg_gen_movi_tl(cpu_PC, pc); +} + +static inline void save_cpu_state(DisasContext *ctx, int do_save_pc) +{ + LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags); + if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) { + gen_save_pc(ctx->base.pc_next); + ctx->saved_pc = ctx->base.pc_next; + } + if (ctx->hflags != ctx->saved_hflags) { + tcg_gen_movi_i32(hflags, ctx->hflags); + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & LARCH_HFLAG_BMASK) { + case LARCH_HFLAG_BR: + break; + case LARCH_HFLAG_BC: + case LARCH_HFLAG_B: + tcg_gen_movi_tl(btarget, ctx->btarget); + break; + } + } +} + +static inline void restore_cpu_state(CPULOONGARCHState *env, DisasContext *ctx) +{ + ctx->saved_hflags = ctx->hflags; + switch (ctx->hflags & LARCH_HFLAG_BMASK) { + case LARCH_HFLAG_BR: + break; + case LARCH_HFLAG_BC: + case LARCH_HFLAG_B: + ctx->btarget = env->btarget; + break; + } +} + +static inline void generate_exception_err(DisasContext *ctx, int excp, int err) +{ + TCGv_i32 texcp = tcg_const_i32(excp); + TCGv_i32 terr = tcg_const_i32(err); + save_cpu_state(ctx, 1); + gen_helper_raise_exception_err(cpu_env, texcp, terr); + tcg_temp_free_i32(terr); + tcg_temp_free_i32(texcp); + ctx->base.is_jmp = DISAS_NORETURN; +} + +static inline void generate_exception_end(DisasContext *ctx, int excp) +{ + generate_exception_err(ctx, excp, 0); +} + +/* Floating point register moves. */ +static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) +{ + tcg_gen_extrl_i64_i32(t, fpu_f64[reg]); +} + +static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGv_i64 t64; + t64 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(t64, t); + tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32); + tcg_temp_free_i64(t64); +} + +static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + tcg_gen_extrh_i64_i32(t, fpu_f64[reg]); +} + +static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg) +{ + TCGv_i64 t64 = tcg_temp_new_i64(); + tcg_gen_extu_i32_i64(t64, t); + tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32); + tcg_temp_free_i64(t64); +} + +static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + tcg_gen_mov_i64(t, fpu_f64[reg]); +} + +static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg) +{ + tcg_gen_mov_i64(fpu_f64[reg], t); +} + +static inline int get_fp_bit(int cc) +{ + if (cc) { + return 24 + cc; + } else { + return 23; + } +} + +/* Addresses computation */ +static inline void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0, + TCGv arg1) +{ + tcg_gen_add_tl(ret, arg0, arg1); + + if (ctx->hflags & LARCH_HFLAG_AWRAP) { + tcg_gen_ext32s_i64(ret, ret); + } +} + +static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base, + target_long ofs) +{ + tcg_gen_addi_tl(ret, base, ofs); + + if (ctx->hflags & LARCH_HFLAG_AWRAP) { + tcg_gen_ext32s_i64(ret, ret); + } +} + +/* Sign-extract the low 32-bits to a target_long. */ +static inline void gen_move_low32(TCGv ret, TCGv_i64 arg) +{ + tcg_gen_ext32s_i64(ret, arg); +} + +/* Sign-extract the high 32-bits to a target_long. */ +static inline void gen_move_high32(TCGv ret, TCGv_i64 arg) +{ + tcg_gen_sari_i64(ret, arg, 32); +} + +static inline void check_cp1_enabled(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (unlikely(!(ctx->hflags & LARCH_HFLAG_FPU))) { + generate_exception_err(ctx, EXCP_FPDIS, 1); + } +#endif +} + +static inline void check_lsx_enabled(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (unlikely(!(ctx->hflags & LARCH_HFLAG_LSX))) { + generate_exception_err(ctx, EXCP_LSXDIS, 1); + } +#endif +} + +static inline void check_lasx_enabled(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (unlikely(!(ctx->hflags & LARCH_HFLAG_LASX))) { + generate_exception_err(ctx, EXCP_LASXDIS, 1); + } +#endif +} + +static inline void check_lbt_enabled(DisasContext *ctx) +{ +#ifndef CONFIG_USER_ONLY + if (unlikely(!(ctx->hflags & LARCH_HFLAG_LBT))) { + generate_exception_err(ctx, EXCP_BTDIS, 1); + } +#endif +} + +/* + * This code generates a "reserved instruction" exception if the + * CPU does not support the instruction set corresponding to flags. + */ +static inline void check_insn(DisasContext *ctx, uint64_t flags) +{ + if (unlikely(!(ctx->insn_flags & flags))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * This code generates a "reserved instruction" exception if the + * CPU has corresponding flag set which indicates that the instruction + * has been removed. + */ +static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags) +{ + if (unlikely(ctx->insn_flags & flags)) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * The Linux kernel traps certain reserved instruction exceptions to + * emulate the corresponding instructions. QEMU is the kernel in user + * mode, so those traps are emulated by accepting the instructions. + * + * A reserved instruction exception is generated for flagged CPUs if + * QEMU runs in system mode. + */ +static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags) +{ +#ifndef CONFIG_USER_ONLY + check_insn_opc_removed(ctx, flags); +#endif +} + +/* + * This code generates a "reserved instruction" exception if 64-bit + * instructions are not enabled. + */ +static inline void check_larch_64(DisasContext *ctx) +{ + if (unlikely(!(ctx->hflags & LARCH_HFLAG_64))) { + generate_exception_end(ctx, EXCP_RI); + } +} + +/* + * Define small wrappers for gen_load_fpr* so that we have a uniform + * calling interface for 32 and 64-bit FPRs. No sense in changing + * all callers for gen_load_fpr32 when we need the CTX parameter for + * this one use. + */ +#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y) +#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y) +#define FCOP_CONDNS(fmt, ifmt, bits, STORE) \ + static inline void gen_fcmp_##fmt(DisasContext *ctx, int n, int ft, \ + int fs, int cd) \ + { \ + TCGv_i##bits fp0 = tcg_temp_new_i##bits(); \ + TCGv_i##bits fp1 = tcg_temp_new_i##bits(); \ + TCGv_i32 fcc = tcg_const_i32(cd); \ + check_cp1_enabled(ctx); \ + gen_ldcmp_fpr##bits(ctx, fp0, fs); \ + gen_ldcmp_fpr##bits(ctx, fp1, ft); \ + switch (n) { \ + case 0: \ + gen_helper_cmp_##fmt##_af(fp0, cpu_env, fp0, fp1); \ + break; \ + case 1: \ + gen_helper_cmp_##fmt##_saf(fp0, cpu_env, fp0, fp1); \ + break; \ + case 2: \ + gen_helper_cmp_##fmt##_lt(fp0, cpu_env, fp0, fp1); \ + break; \ + case 3: \ + gen_helper_cmp_##fmt##_slt(fp0, cpu_env, fp0, fp1); \ + break; \ + case 4: \ + gen_helper_cmp_##fmt##_eq(fp0, cpu_env, fp0, fp1); \ + break; \ + case 5: \ + gen_helper_cmp_##fmt##_seq(fp0, cpu_env, fp0, fp1); \ + break; \ + case 6: \ + gen_helper_cmp_##fmt##_le(fp0, cpu_env, fp0, fp1); \ + break; \ + case 7: \ + gen_helper_cmp_##fmt##_sle(fp0, cpu_env, fp0, fp1); \ + break; \ + case 8: \ + gen_helper_cmp_##fmt##_un(fp0, cpu_env, fp0, fp1); \ + break; \ + case 9: \ + gen_helper_cmp_##fmt##_sun(fp0, cpu_env, fp0, fp1); \ + break; \ + case 10: \ + gen_helper_cmp_##fmt##_ult(fp0, cpu_env, fp0, fp1); \ + break; \ + case 11: \ + gen_helper_cmp_##fmt##_sult(fp0, cpu_env, fp0, fp1); \ + break; \ + case 12: \ + gen_helper_cmp_##fmt##_ueq(fp0, cpu_env, fp0, fp1); \ + break; \ + case 13: \ + gen_helper_cmp_##fmt##_sueq(fp0, cpu_env, fp0, fp1); \ + break; \ + case 14: \ + gen_helper_cmp_##fmt##_ule(fp0, cpu_env, fp0, fp1); \ + break; \ + case 15: \ + gen_helper_cmp_##fmt##_sule(fp0, cpu_env, fp0, fp1); \ + break; \ + case 16: \ + gen_helper_cmp_##fmt##_ne(fp0, cpu_env, fp0, fp1); \ + break; \ + case 17: \ + gen_helper_cmp_##fmt##_sne(fp0, cpu_env, fp0, fp1); \ + break; \ + case 20: \ + gen_helper_cmp_##fmt##_or(fp0, cpu_env, fp0, fp1); \ + break; \ + case 21: \ + gen_helper_cmp_##fmt##_sor(fp0, cpu_env, fp0, fp1); \ + break; \ + case 24: \ + gen_helper_cmp_##fmt##_une(fp0, cpu_env, fp0, fp1); \ + break; \ + case 25: \ + gen_helper_cmp_##fmt##_sune(fp0, cpu_env, fp0, fp1); \ + break; \ + default: \ + abort(); \ + } \ + STORE; \ + tcg_temp_free_i##bits(fp0); \ + tcg_temp_free_i##bits(fp1); \ + tcg_temp_free_i32(fcc); \ + } + +FCOP_CONDNS(d, FMT_D, 64, gen_helper_movreg2cf_i64(cpu_env, fcc, fp0)) +FCOP_CONDNS(s, FMT_S, 32, gen_helper_movreg2cf_i32(cpu_env, fcc, fp0)) +#undef FCOP_CONDNS +#undef gen_ldcmp_fpr32 +#undef gen_ldcmp_fpr64 + +/* load/store instructions. */ +#ifdef CONFIG_USER_ONLY +#define OP_LD_ATOMIC(insn, fname) \ + static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ + DisasContext *ctx) \ + { \ + TCGv t0 = tcg_temp_new(); \ + tcg_gen_mov_tl(t0, arg1); \ + tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \ + tcg_gen_st_tl(t0, cpu_env, offsetof(CPULOONGARCHState, lladdr)); \ + tcg_gen_st_tl(ret, cpu_env, offsetof(CPULOONGARCHState, llval)); \ + tcg_temp_free(t0); \ + } +#else +#define OP_LD_ATOMIC(insn, fname) \ + static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \ + DisasContext *ctx) \ + { \ + gen_helper_1e1i(insn, ret, arg1, mem_idx); \ + } +#endif + +static void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base, + int offset) +{ + if (base == 0) { + tcg_gen_movi_tl(addr, offset); + } else if (offset == 0) { + gen_load_gpr(addr, base); + } else { + tcg_gen_movi_tl(addr, offset); + gen_op_addr_add(ctx, addr, cpu_gpr[base], addr); + } +} + +/* Load */ +static void gen_ld(DisasContext *ctx, uint32_t opc, int rt, int base, + int offset) +{ + TCGv t0; + int mem_idx = ctx->mem_idx; + + t0 = tcg_temp_new(); + gen_base_offset_addr(ctx, t0, base, offset); + + switch (opc) { + case OPC_LARCH_LD_WU: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, + MO_TEUL | ctx->default_tcg_memop_mask); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LDPTR_D: + case OPC_LARCH_LD_D: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, + MO_TEQ | ctx->default_tcg_memop_mask); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LL_D: + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LDPTR_W: + case OPC_LARCH_LD_W: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, + MO_TESL | ctx->default_tcg_memop_mask); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LD_H: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, + MO_TESW | ctx->default_tcg_memop_mask); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LD_HU: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, + MO_TEUW | ctx->default_tcg_memop_mask); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LD_B: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_SB); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LD_BU: + tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_UB); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_LL_W: + gen_store_gpr(t0, rt); + break; + } + + tcg_temp_free(t0); +} + +/* Store */ +static void gen_st(DisasContext *ctx, uint32_t opc, int rt, int base, + int offset) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + int mem_idx = ctx->mem_idx; + + gen_base_offset_addr(ctx, t0, base, offset); + gen_load_gpr(t1, rt); + + switch (opc) { + case OPC_LARCH_STPTR_D: + case OPC_LARCH_ST_D: + tcg_gen_qemu_st_tl(t1, t0, mem_idx, + MO_TEQ | ctx->default_tcg_memop_mask); + break; + case OPC_LARCH_STPTR_W: + case OPC_LARCH_ST_W: + tcg_gen_qemu_st_tl(t1, t0, mem_idx, + MO_TEUL | ctx->default_tcg_memop_mask); + break; + case OPC_LARCH_ST_H: + tcg_gen_qemu_st_tl(t1, t0, mem_idx, + MO_TEUW | ctx->default_tcg_memop_mask); + break; + case OPC_LARCH_ST_B: + tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8); + break; + } + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +/* Store conditional */ +static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset, + MemOp tcg_mo, bool eva) +{ + TCGv addr, t0, val; + TCGLabel *l1 = gen_new_label(); + TCGLabel *done = gen_new_label(); + + t0 = tcg_temp_new(); + addr = tcg_temp_new(); + /* compare the address against that of the preceeding LL */ + gen_base_offset_addr(ctx, addr, base, offset); + tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1); + tcg_temp_free(addr); + tcg_gen_movi_tl(t0, 0); + gen_store_gpr(t0, rt); + tcg_gen_br(done); + + gen_set_label(l1); + /* generate cmpxchg */ + val = tcg_temp_new(); + gen_load_gpr(val, rt); + tcg_gen_atomic_cmpxchg_tl(t0, cpu_lladdr, cpu_llval, val, + eva ? LARCH_HFLAG_UM : ctx->mem_idx, tcg_mo); + tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval); + gen_store_gpr(t0, rt); + tcg_temp_free(val); + + gen_set_label(done); + tcg_temp_free(t0); +} + +/* Load and store */ +static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, TCGv t0) +{ + /* + * Don't do NOP if destination is zero: we must perform the actual + * memory access. + */ + switch (opc) { + case OPC_LARCH_FLD_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx, + MO_TESL | ctx->default_tcg_memop_mask); + gen_store_fpr32(ctx, fp0, ft); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FST_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, ft); + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, + MO_TEUL | ctx->default_tcg_memop_mask); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FLD_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, + MO_TEQ | ctx->default_tcg_memop_mask); + gen_store_fpr64(ctx, fp0, ft); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FST_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, ft); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, + MO_TEQ | ctx->default_tcg_memop_mask); + tcg_temp_free_i64(fp0); + } break; + default: + LARCH_INVAL("flt_ldst"); + generate_exception_end(ctx, EXCP_RI); + break; + } +} + +static void gen_fp_ldst(DisasContext *ctx, uint32_t op, int rt, int rs, + int16_t imm) +{ + TCGv t0 = tcg_temp_new(); + + check_cp1_enabled(ctx); + gen_base_offset_addr(ctx, t0, rs, imm); + gen_flt_ldst(ctx, op, rt, t0); + tcg_temp_free(t0); +} + +/* Arithmetic with immediate operand */ +static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, + int imm) +{ + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + + if (rt == 0) { + /* + * If no destination, treat it as a NOP. + * For addi, we must generate the overflow exception when needed. + */ + return; + } + switch (opc) { + case OPC_LARCH_ADDI_W: + if (rs != 0) { + tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); + tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(cpu_gpr[rt], uimm); + } + break; + case OPC_LARCH_ADDI_D: + if (rs != 0) { + tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(cpu_gpr[rt], uimm); + } + break; + } +} + +/* Logic with immediate operand */ +static void gen_logic_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, + int16_t imm) +{ + target_ulong uimm; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + uimm = (uint16_t)imm; + switch (opc) { + case OPC_LARCH_ANDI: + if (likely(rs != 0)) { + tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(cpu_gpr[rt], 0); + } + break; + case OPC_LARCH_ORI: + if (rs != 0) { + tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(cpu_gpr[rt], uimm); + } + break; + case OPC_LARCH_XORI: + if (likely(rs != 0)) { + tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm); + } else { + tcg_gen_movi_tl(cpu_gpr[rt], uimm); + } + break; + default: + break; + } +} + +/* Set on less than with immediate operand */ +static void gen_slt_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, + int16_t imm) +{ + target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */ + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + t0 = tcg_temp_new(); + gen_load_gpr(t0, rs); + switch (opc) { + case OPC_LARCH_SLTI: + tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm); + break; + case OPC_LARCH_SLTIU: + tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm); + break; + } + tcg_temp_free(t0); +} + +/* Shifts with immediate operand */ +static void gen_shift_imm(DisasContext *ctx, uint32_t opc, int rt, int rs, + int16_t imm) +{ + target_ulong uimm = ((uint16_t)imm) & 0x1f; + TCGv t0; + + if (rt == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, rs); + switch (opc) { + case OPC_LARCH_SRAI_W: + tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm); + break; + case OPC_LARCH_SRLI_W: + if (uimm != 0) { + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm); + } else { + tcg_gen_ext32s_tl(cpu_gpr[rt], t0); + } + break; + case OPC_LARCH_ROTRI_W: + if (uimm != 0) { + TCGv_i32 t1 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t1, t0); + tcg_gen_rotri_i32(t1, t1, uimm); + tcg_gen_ext_i32_tl(cpu_gpr[rt], t1); + tcg_temp_free_i32(t1); + } else { + tcg_gen_ext32s_tl(cpu_gpr[rt], t0); + } + break; + } + tcg_temp_free(t0); +} + +/* Arithmetic */ +static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) +{ + if (rd == 0) { + /* + * If no destination, treat it as a NOP. + * For add & sub, we must generate the + * overflow exception when needed. + */ + return; + } + + switch (opc) { + case OPC_LARCH_ADD_W: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + case OPC_LARCH_SUB_W: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + case OPC_LARCH_ADD_D: + if (rs != 0 && rt != 0) { + tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + case OPC_LARCH_SUB_D: + if (rs != 0 && rt != 0) { + tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + } +} + +/* Conditional move */ +static void gen_cond_move(DisasContext *ctx, uint32_t opc, int rd, int rs, + int rt) +{ + TCGv t0, t1, t2; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, rt); + t1 = tcg_const_tl(0); + t2 = tcg_temp_new(); + gen_load_gpr(t2, rs); + switch (opc) { + case OPC_LARCH_MASKEQZ: + tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, t1); + break; + case OPC_LARCH_MASKNEZ: + tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1); + break; + } + tcg_temp_free(t2); + tcg_temp_free(t1); + tcg_temp_free(t0); +} + +/* Logic */ +static void gen_logic(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) +{ + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + switch (opc) { + case OPC_LARCH_AND: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + case OPC_LARCH_NOR: + if (rs != 0 && rt != 0) { + tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0)); + } + break; + case OPC_LARCH_OR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + case OPC_LARCH_XOR: + if (likely(rs != 0 && rt != 0)) { + tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]); + } else if (rs == 0 && rt != 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]); + } else if (rs != 0 && rt == 0) { + tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]); + } else { + tcg_gen_movi_tl(cpu_gpr[rd], 0); + } + break; + } +} + +/* Set on lower than */ +static void gen_slt(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) +{ + TCGv t0, t1; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + switch (opc) { + case OPC_LARCH_SLT: + tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1); + break; + case OPC_LARCH_SLTU: + tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1); + break; + } + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +/* Shifts */ +static void gen_shift(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt) +{ + TCGv t0, t1; + + if (rd == 0) { + /* + * If no destination, treat it as a NOP. + * For add & sub, we must generate the + * overflow exception when needed. + */ + return; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + switch (opc) { + case OPC_LARCH_SLL_W: + tcg_gen_andi_tl(t0, t0, 0x1f); + tcg_gen_shl_tl(t0, t1, t0); + tcg_gen_ext32s_tl(cpu_gpr[rd], t0); + break; + case OPC_LARCH_SRA_W: + tcg_gen_andi_tl(t0, t0, 0x1f); + tcg_gen_sar_tl(cpu_gpr[rd], t1, t0); + break; + case OPC_LARCH_SRL_W: + tcg_gen_ext32u_tl(t1, t1); + tcg_gen_andi_tl(t0, t0, 0x1f); + tcg_gen_shr_tl(t0, t1, t0); + tcg_gen_ext32s_tl(cpu_gpr[rd], t0); + break; + case OPC_LARCH_ROTR_W: { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_andi_i32(t2, t2, 0x1f); + tcg_gen_rotr_i32(t2, t3, t2); + tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } break; + case OPC_LARCH_SLL_D: + tcg_gen_andi_tl(t0, t0, 0x3f); + tcg_gen_shl_tl(cpu_gpr[rd], t1, t0); + break; + case OPC_LARCH_SRA_D: + tcg_gen_andi_tl(t0, t0, 0x3f); + tcg_gen_sar_tl(cpu_gpr[rd], t1, t0); + break; + case OPC_LARCH_SRL_D: + tcg_gen_andi_tl(t0, t0, 0x3f); + tcg_gen_shr_tl(cpu_gpr[rd], t1, t0); + break; + case OPC_LARCH_ROTR_D: + tcg_gen_andi_tl(t0, t0, 0x3f); + tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0); + break; + } + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static inline void gen_r6_ld(target_long addr, int reg, int memidx, + MemOp memop) +{ + TCGv t0 = tcg_const_tl(addr); + tcg_gen_qemu_ld_tl(t0, t0, memidx, memop); + gen_store_gpr(t0, reg); + tcg_temp_free(t0); +} + +static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt) +{ + TCGv t0, t1; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + + switch (opc) { + case OPC_LARCH_DIV_W: { + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + tcg_gen_ext32s_tl(t0, t0); + tcg_gen_ext32s_tl(t1, t1); + tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(t2, t2, t3); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(t2, t2, t3); + tcg_gen_movi_tl(t3, 0); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MOD_W: { + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + tcg_gen_ext32s_tl(t0, t0); + tcg_gen_ext32s_tl(t1, t1); + tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1); + tcg_gen_and_tl(t2, t2, t3); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(t2, t2, t3); + tcg_gen_movi_tl(t3, 0); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_DIV_WU: { + TCGv t2 = tcg_const_tl(0); + TCGv t3 = tcg_const_tl(1); + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_tl(cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MOD_WU: { + TCGv t2 = tcg_const_tl(0); + TCGv t3 = tcg_const_tl(1); + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_ext32u_tl(t1, t1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_tl(cpu_gpr[rd], t0, t1); + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MUL_W: { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_mul_i32(t2, t2, t3); + tcg_gen_ext_i32_tl(cpu_gpr[rd], t2); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } break; + case OPC_LARCH_MULH_W: { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_muls2_i32(t2, t3, t2, t3); + tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } break; + case OPC_LARCH_MULH_WU: { + TCGv_i32 t2 = tcg_temp_new_i32(); + TCGv_i32 t3 = tcg_temp_new_i32(); + tcg_gen_trunc_tl_i32(t2, t0); + tcg_gen_trunc_tl_i32(t3, t1); + tcg_gen_mulu2_i32(t2, t3, t2, t3); + tcg_gen_ext_i32_tl(cpu_gpr[rd], t3); + tcg_temp_free_i32(t2); + tcg_temp_free_i32(t3); + } break; + case OPC_LARCH_DIV_D: { + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(t2, t2, t3); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(t2, t2, t3); + tcg_gen_movi_tl(t3, 0); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_div_tl(cpu_gpr[rd], t0, t1); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MOD_D: { + TCGv t2 = tcg_temp_new(); + TCGv t3 = tcg_temp_new(); + tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL); + tcg_gen_and_tl(t2, t2, t3); + tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0); + tcg_gen_or_tl(t2, t2, t3); + tcg_gen_movi_tl(t3, 0); + tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1); + tcg_gen_rem_tl(cpu_gpr[rd], t0, t1); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_DIV_DU: { + TCGv t2 = tcg_const_tl(0); + TCGv t3 = tcg_const_tl(1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_divu_i64(cpu_gpr[rd], t0, t1); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MOD_DU: { + TCGv t2 = tcg_const_tl(0); + TCGv t3 = tcg_const_tl(1); + tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1); + tcg_gen_remu_i64(cpu_gpr[rd], t0, t1); + tcg_temp_free(t3); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MUL_D: + tcg_gen_mul_i64(cpu_gpr[rd], t0, t1); + break; + case OPC_LARCH_MULH_D: { + TCGv t2 = tcg_temp_new(); + tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1); + tcg_temp_free(t2); + } break; + case OPC_LARCH_MULH_DU: { + TCGv t2 = tcg_temp_new(); + tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1); + tcg_temp_free(t2); + } break; + default: + LARCH_INVAL("r6 mul/div"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } +out: + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void gen_cl(DisasContext *ctx, uint32_t opc, int rd, int rs) +{ + TCGv t0; + + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = cpu_gpr[rd]; + gen_load_gpr(t0, rs); + + switch (opc) { + case OPC_LARCH_CLO_W: + case OPC_LARCH_CLO_D: + tcg_gen_not_tl(t0, t0); + break; + } + + switch (opc) { + case OPC_LARCH_CLO_W: + case OPC_LARCH_CLZ_W: + tcg_gen_ext32u_tl(t0, t0); + tcg_gen_clzi_tl(t0, t0, TARGET_LONG_BITS); + tcg_gen_subi_tl(t0, t0, TARGET_LONG_BITS - 32); + break; + case OPC_LARCH_CLO_D: + case OPC_LARCH_CLZ_D: + tcg_gen_clzi_i64(t0, t0, 64); + break; + } +} + +static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest) +{ + if (unlikely(ctx->base.singlestep_enabled)) { + return false; + } + +#ifndef CONFIG_USER_ONLY + return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK); +#else + return true; +#endif +} + +static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest) +{ + if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(n); + gen_save_pc(dest); + tcg_gen_exit_tb(ctx->base.tb, n); + } else { + gen_save_pc(dest); + if (ctx->base.singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_raise_exception_debug(cpu_env); + } + tcg_gen_lookup_and_goto_ptr(); + } +} + +/* Branches */ +static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int insn_bytes, + int rs, int rt, int32_t offset) +{ + target_ulong btgt = -1; + int bcond_compute = 0; + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + if (ctx->hflags & LARCH_HFLAG_BMASK) { +#ifdef LARCH_DEBUG_DISAS + LOG_DISAS("Branch at PC 0x" TARGET_FMT_lx "\n", ctx->base.pc_next); +#endif + generate_exception_end(ctx, EXCP_RI); + goto out; + } + + /* Load needed operands */ + switch (opc) { + case OPC_LARCH_BLT: + case OPC_LARCH_BGE: + case OPC_LARCH_BLTU: + case OPC_LARCH_BGEU: + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + bcond_compute = 1; + btgt = ctx->base.pc_next + offset; + break; + case OPC_LARCH_BEQZ: + case OPC_LARCH_B: + case OPC_LARCH_BEQ: + case OPC_LARCH_BNEZ: + case OPC_LARCH_BNE: + /* Compare two registers */ + if (rs != rt) { + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + bcond_compute = 1; + } + btgt = ctx->base.pc_next + offset; + break; + default: + LARCH_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + if (bcond_compute == 0) { + /* No condition to be computed */ + switch (opc) { + case OPC_LARCH_BEQZ: /* rx == rx */ + case OPC_LARCH_B: + case OPC_LARCH_BEQ: + /* Always take */ + ctx->hflags |= LARCH_HFLAG_B; + break; + case OPC_LARCH_BNEZ: + case OPC_LARCH_BNE: + /* Treat as NOP. */ + goto out; + default: + LARCH_INVAL("branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } else { + switch (opc) { + case OPC_LARCH_BLT: + tcg_gen_setcond_tl(TCG_COND_LT, bcond, t0, t1); + goto not_likely; + case OPC_LARCH_BGE: + tcg_gen_setcond_tl(TCG_COND_GE, bcond, t0, t1); + goto not_likely; + case OPC_LARCH_BLTU: + tcg_gen_setcond_tl(TCG_COND_LTU, bcond, t0, t1); + goto not_likely; + case OPC_LARCH_BGEU: + tcg_gen_setcond_tl(TCG_COND_GEU, bcond, t0, t1); + goto not_likely; + case OPC_LARCH_BEQZ: + case OPC_LARCH_B: + case OPC_LARCH_BEQ: + tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1); + goto not_likely; + case OPC_LARCH_BNEZ: + case OPC_LARCH_BNE: + tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1); + goto not_likely; + not_likely: + ctx->hflags |= LARCH_HFLAG_BC; + break; + default: + LARCH_INVAL("conditional branch/jump"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + } + + ctx->btarget = btgt; + +out: + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +/* special3 bitfield operations */ +static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, int rs, + int lsb, int msb) +{ + TCGv t0 = tcg_temp_new(); + TCGv t1 = tcg_temp_new(); + + gen_load_gpr(t1, rs); + switch (opc) { + case OPC_LARCH_TRPICK_W: + if (lsb + msb > 31) { + goto fail; + } + if (msb != 31) { + tcg_gen_extract_tl(t0, t1, lsb, msb + 1); + } else { + /* + * The two checks together imply that lsb == 0, + * so this is a simple sign-extension. + */ + tcg_gen_ext32s_tl(t0, t1); + } + break; + case OPC_LARCH_TRINS_W: + if (lsb > msb) { + goto fail; + } + gen_load_gpr(t0, rt); + tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1); + tcg_gen_ext32s_tl(t0, t0); + break; + default: + fail: + LARCH_INVAL("bitops"); + generate_exception_end(ctx, EXCP_RI); + tcg_temp_free(t0); + tcg_temp_free(t1); + return; + } + gen_store_gpr(t0, rt); + tcg_temp_free(t0); + tcg_temp_free(t1); +} + +static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd) +{ + TCGv t0; + + if (rd == 0) { + /* If no destination, treat it as a NOP. */ + return; + } + + t0 = tcg_temp_new(); + gen_load_gpr(t0, rt); + switch (op2) { + case OPC_LARCH_REVB_2H: { + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_const_tl(0x00FF00FF); + + tcg_gen_shri_tl(t1, t0, 8); + tcg_gen_and_tl(t1, t1, t2); + tcg_gen_and_tl(t0, t0, t2); + tcg_gen_shli_tl(t0, t0, 8); + tcg_gen_or_tl(t0, t0, t1); + tcg_temp_free(t2); + tcg_temp_free(t1); + tcg_gen_ext32s_tl(cpu_gpr[rd], t0); + } break; + case OPC_LARCH_EXT_WB: + tcg_gen_ext8s_tl(cpu_gpr[rd], t0); + break; + case OPC_LARCH_EXT_WH: + tcg_gen_ext16s_tl(cpu_gpr[rd], t0); + break; + case OPC_LARCH_REVB_4H: { + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL); + + tcg_gen_shri_tl(t1, t0, 8); + tcg_gen_and_tl(t1, t1, t2); + tcg_gen_and_tl(t0, t0, t2); + tcg_gen_shli_tl(t0, t0, 8); + tcg_gen_or_tl(cpu_gpr[rd], t0, t1); + tcg_temp_free(t2); + tcg_temp_free(t1); + } break; + case OPC_LARCH_REVH_D: { + TCGv t1 = tcg_temp_new(); + TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL); + + tcg_gen_shri_tl(t1, t0, 16); + tcg_gen_and_tl(t1, t1, t2); + tcg_gen_and_tl(t0, t0, t2); + tcg_gen_shli_tl(t0, t0, 16); + tcg_gen_or_tl(t0, t0, t1); + tcg_gen_shri_tl(t1, t0, 32); + tcg_gen_shli_tl(t0, t0, 32); + tcg_gen_or_tl(cpu_gpr[rd], t0, t1); + tcg_temp_free(t2); + tcg_temp_free(t1); + } break; + default: + LARCH_INVAL("bsfhl"); + generate_exception_end(ctx, EXCP_RI); + tcg_temp_free(t0); + return; + } + tcg_temp_free(t0); +} + +/* REV with sf==1, opcode==3 ("REV64") */ +static void handle_rev64(DisasContext *ctx, unsigned int rn, unsigned int rd) +{ + tcg_gen_bswap64_i64(cpu_gpr[rd], cpu_gpr[rn]); +} + +/* + * REV with sf==0, opcode==2 + * REV32 (sf==1, opcode==2) + */ +static void handle_rev32(DisasContext *ctx, unsigned int rn, unsigned int rd) +{ + TCGv_i64 tcg_rd = tcg_temp_new_i64(); + gen_load_gpr(tcg_rd, rd); + + TCGv_i64 tcg_tmp = tcg_temp_new_i64(); + TCGv_i64 tcg_rn = tcg_temp_new_i64(); + gen_load_gpr(tcg_rn, rn); + + /* bswap32_i64 requires zero high word */ + tcg_gen_ext32u_i64(tcg_tmp, tcg_rn); + tcg_gen_bswap32_i64(tcg_rd, tcg_tmp, TCG_BSWAP_OZ); + tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32); + tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_OZ); + tcg_gen_concat32_i64(cpu_gpr[rd], tcg_rd, tcg_tmp); + + tcg_temp_free_i64(tcg_tmp); + tcg_temp_free_i64(tcg_rd); + tcg_temp_free_i64(tcg_rn); +} + +/* REV16 */ +static void handle_rev16(DisasContext *ctx, unsigned int rn, unsigned int rd) +{ + TCGv_i64 tcg_rd = tcg_temp_new_i64(); + TCGv_i64 tcg_rn = tcg_temp_new_i64(); + gen_load_gpr(tcg_rd, rd); + gen_load_gpr(tcg_rn, rn); + TCGv_i64 tcg_tmp = tcg_temp_new_i64(); + TCGv_i64 mask = tcg_const_i64(0x0000ffff0000ffffull); + + tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16); + tcg_gen_and_i64(tcg_rd, tcg_rn, mask); + tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask); + tcg_gen_shli_i64(tcg_rd, tcg_rd, 16); + tcg_gen_or_i64(cpu_gpr[rd], tcg_rd, tcg_tmp); + + tcg_temp_free_i64(mask); + tcg_temp_free_i64(tcg_tmp); + tcg_temp_free_i64(tcg_rd); + tcg_temp_free_i64(tcg_rn); +} + +static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt, + int imm2) +{ + TCGv t0; + TCGv t1; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + gen_load_gpr(t0, rs); + gen_load_gpr(t1, rt); + tcg_gen_shli_tl(t0, t0, imm2 + 1); + tcg_gen_add_tl(cpu_gpr[rd], t0, t1); + if (opc == OPC_LARCH_ALSL_W) { + tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]); + } + + tcg_temp_free(t1); + tcg_temp_free(t0); + + return; +} + +static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs, + int rt, int bits) +{ + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(); + if (bits == 0 || bits == wordsz) { + if (bits == 0) { + gen_load_gpr(t0, rt); + } else { + gen_load_gpr(t0, rs); + } + switch (wordsz) { + case 32: + tcg_gen_ext32s_tl(cpu_gpr[rd], t0); + break; + case 64: + tcg_gen_mov_tl(cpu_gpr[rd], t0); + break; + } + } else { + TCGv t1 = tcg_temp_new(); + gen_load_gpr(t0, rt); + gen_load_gpr(t1, rs); + switch (wordsz) { + case 32: { + TCGv_i64 t2 = tcg_temp_new_i64(); + tcg_gen_concat_tl_i64(t2, t1, t0); + tcg_gen_shri_i64(t2, t2, 32 - bits); + gen_move_low32(cpu_gpr[rd], t2); + tcg_temp_free_i64(t2); + } break; + case 64: + tcg_gen_shli_tl(t0, t0, bits); + tcg_gen_shri_tl(t1, t1, 64 - bits); + tcg_gen_or_tl(cpu_gpr[rd], t1, t0); + break; + } + tcg_temp_free(t1); + } + + tcg_temp_free(t0); +} + +static void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt, + int bp) +{ + gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8); +} + +static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt) +{ + TCGv t0; + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(); + gen_load_gpr(t0, rt); + switch (opc) { + case OPC_LARCH_BREV_4B: + gen_helper_bitswap(cpu_gpr[rd], t0); + break; + case OPC_LARCH_BREV_8B: + gen_helper_dbitswap(cpu_gpr[rd], t0); + break; + } + tcg_temp_free(t0); +} + +static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs) +{ + TCGv t0 = tcg_temp_new(); + check_cp1_enabled(ctx); + + switch (opc) { + case OPC_LARCH_FR2GR_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_ext_i32_tl(t0, fp0); + tcg_temp_free_i32(fp0); + } + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_GR2FR_W: + gen_load_gpr(t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(fp0, t0); + gen_store_fpr32(ctx, fp0, fs); + tcg_temp_free_i32(fp0); + } + break; + case OPC_LARCH_FR2GR_D: + gen_load_fpr64(ctx, t0, fs); + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_GR2FR_D: + gen_load_gpr(t0, rt); + gen_store_fpr64(ctx, t0, fs); + break; + case OPC_LARCH_FRH2GR_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32h(ctx, fp0, fs); + tcg_gen_ext_i32_tl(t0, fp0); + tcg_temp_free_i32(fp0); + } + gen_store_gpr(t0, rt); + break; + case OPC_LARCH_GR2FRH_W: + gen_load_gpr(t0, rt); + { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + tcg_gen_trunc_tl_i32(fp0, t0); + gen_store_fpr32h(ctx, fp0, fs); + tcg_temp_free_i32(fp0); + } + break; + default: + LARCH_INVAL("cp1 move"); + generate_exception_end(ctx, EXCP_RI); + goto out; + } + +out: + tcg_temp_free(t0); +} + +static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, int cc, + int tf) +{ + int cond; + TCGv_i32 t0 = tcg_temp_new_i32(); + TCGLabel *l1 = gen_new_label(); + TCGLabel *l2 = gen_new_label(); + + if (tf) { + cond = TCG_COND_EQ; + } else { + cond = TCG_COND_NE; + } + + tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc)); + tcg_gen_brcondi_i32(cond, t0, 0, l1); + gen_load_fpr32(ctx, t0, fs); + gen_store_fpr32(ctx, t0, fd); + gen_set_label(l1); + + tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc + 1)); + tcg_gen_brcondi_i32(cond, t0, 0, l2); + gen_load_fpr32h(ctx, t0, fs); + gen_store_fpr32h(ctx, t0, fd); + tcg_temp_free_i32(t0); + gen_set_label(l2); +} + +static void gen_farith(DisasContext *ctx, uint32_t opc, int ft, int fs, int fd, + int cc) +{ + check_cp1_enabled(ctx); + switch (opc) { + case OPC_LARCH_FADD_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_add_s(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i32(fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FSUB_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i32(fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMUL_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i32(fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FDIV_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_div_s(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i32(fp1); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FSQRT_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_sqrt_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FABS_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_abs_s(fp0, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMOV_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FNEG_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_chs_s(fp0, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FTINTRNE_L_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_round_l_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FTINTRZ_L_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_trunc_l_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FTINTRP_L_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_ceil_l_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FTINTRM_L_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_floor_l_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FTINTRNE_W_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_round_w_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FTINTRZ_W_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_trunc_w_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FTINTRP_W_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_ceil_w_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FTINTRM_W_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_floor_w_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FRECIP_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_recip_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FRSQRT_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rsqrt_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FRINT_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_rint_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FCLASS_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_class_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMIN_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + TCGv_i32 fp2 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_min_s(fp2, cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(fp2); + tcg_temp_free_i32(fp1); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMINA_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + TCGv_i32 fp2 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp2, fd); + tcg_temp_free_i32(fp2); + tcg_temp_free_i32(fp1); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMAX_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_max_s(fp1, cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(fp1); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FMAXA_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + TCGv_i32 fp1 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + gen_load_fpr32(ctx, fp1, ft); + gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1); + gen_store_fpr32(ctx, fp1, fd); + tcg_temp_free_i32(fp1); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FCVT_D_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FTINT_W_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvt_w_s(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FTINT_L_S: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvt_l_s(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FADD_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_add_d(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i64(fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FSUB_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i64(fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMUL_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i64(fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FDIV_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_div_d(fp0, cpu_env, fp0, fp1); + tcg_temp_free_i64(fp1); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FSQRT_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_sqrt_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FABS_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_abs_d(fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMOV_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FNEG_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_chs_d(fp0, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FTINTRNE_L_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_round_l_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FTINTRZ_L_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_trunc_l_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FTINTRP_L_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_ceil_l_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FTINTRM_L_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_floor_l_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FTINTRNE_W_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_round_w_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FTINTRZ_W_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_trunc_w_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FTINTRP_W_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_ceil_w_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FTINTRM_W_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_floor_w_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FRECIP_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_recip_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FRSQRT_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rsqrt_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FRINT_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_rint_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FCLASS_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_class_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMIN_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_min_d(fp1, cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(fp1); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMINA_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(fp1); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMAX_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_max_d(fp1, cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(fp1); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FMAXA_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + TCGv_i64 fp1 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + gen_load_fpr64(ctx, fp1, ft); + gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1); + gen_store_fpr64(ctx, fp1, fd); + tcg_temp_free_i64(fp1); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FCVT_S_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FTINT_W_D: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvt_w_d(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FTINT_L_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvt_l_d(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FFINT_S_W: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + gen_load_fpr32(ctx, fp0, fs); + gen_helper_float_cvts_w(fp0, cpu_env, fp0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FFINT_D_W: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr32(ctx, fp32, fs); + gen_helper_float_cvtd_w(fp64, cpu_env, fp32); + tcg_temp_free_i32(fp32); + gen_store_fpr64(ctx, fp64, fd); + tcg_temp_free_i64(fp64); + } break; + case OPC_LARCH_FFINT_S_L: { + TCGv_i32 fp32 = tcg_temp_new_i32(); + TCGv_i64 fp64 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp64, fs); + gen_helper_float_cvts_l(fp32, cpu_env, fp64); + tcg_temp_free_i64(fp64); + gen_store_fpr32(ctx, fp32, fd); + tcg_temp_free_i32(fp32); + } break; + case OPC_LARCH_FFINT_D_L: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + + gen_load_fpr64(ctx, fp0, fs); + gen_helper_float_cvtd_l(fp0, cpu_env, fp0); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + default: + LARCH_INVAL("farith"); + generate_exception_end(ctx, EXCP_RI); + return; + } +} + +/* Coprocessor 3 (FPU) */ +static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, int fd, int fs, + int base, int index) +{ + TCGv t0 = tcg_temp_new(); + + check_cp1_enabled(ctx); + if (base == 0) { + gen_load_gpr(t0, index); + } else if (index == 0) { + gen_load_gpr(t0, base); + } else { + gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]); + } + + /* + * Don't do NOP if destination is zero: we must perform the actual + * memory access. + */ + switch (opc) { + case OPC_LARCH_FLDX_S: + case OPC_LARCH_FLDGT_S: + case OPC_LARCH_FLDLE_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + + tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL); + tcg_gen_trunc_tl_i32(fp0, t0); + gen_store_fpr32(ctx, fp0, fd); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FLDX_D: + case OPC_LARCH_FLDGT_D: + case OPC_LARCH_FLDLE_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + gen_store_fpr64(ctx, fp0, fd); + tcg_temp_free_i64(fp0); + } break; + case OPC_LARCH_FSTX_S: + case OPC_LARCH_FSTGT_S: + case OPC_LARCH_FSTLE_S: { + TCGv_i32 fp0 = tcg_temp_new_i32(); + gen_load_fpr32(ctx, fp0, fs); + tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL); + tcg_temp_free_i32(fp0); + } break; + case OPC_LARCH_FSTX_D: + case OPC_LARCH_FSTGT_D: + case OPC_LARCH_FSTLE_D: { + TCGv_i64 fp0 = tcg_temp_new_i64(); + gen_load_fpr64(ctx, fp0, fs); + tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ); + tcg_temp_free_i64(fp0); + } break; + } + tcg_temp_free(t0); +} + +static inline void clear_branch_hflags(DisasContext *ctx) +{ + ctx->hflags &= ~LARCH_HFLAG_BMASK; + if (ctx->base.is_jmp == DISAS_NEXT) { + save_cpu_state(ctx, 0); + } else { + /* + * It is not safe to save ctx->hflags as hflags may be changed + * in execution time. + */ + tcg_gen_andi_i32(hflags, hflags, ~LARCH_HFLAG_BMASK); + } +} + +static void gen_branch(DisasContext *ctx, int insn_bytes) +{ + if (ctx->hflags & LARCH_HFLAG_BMASK) { + int proc_hflags = ctx->hflags & LARCH_HFLAG_BMASK; + /* Branches completion */ + clear_branch_hflags(ctx); + ctx->base.is_jmp = DISAS_NORETURN; + /* FIXME: Need to clear can_do_io. */ + switch (proc_hflags & LARCH_HFLAG_BMASK) { + case LARCH_HFLAG_B: + /* unconditional branch */ + gen_goto_tb(ctx, 0, ctx->btarget); + break; + case LARCH_HFLAG_BC: + /* Conditional branch */ + { + TCGLabel *l1 = gen_new_label(); + + tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1); + gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes); + gen_set_label(l1); + gen_goto_tb(ctx, 0, ctx->btarget); + } + break; + case LARCH_HFLAG_BR: + /* unconditional branch to register */ + tcg_gen_mov_tl(cpu_PC, btarget); + if (ctx->base.singlestep_enabled) { + save_cpu_state(ctx, 0); + gen_helper_raise_exception_debug(cpu_env); + } + tcg_gen_lookup_and_goto_ptr(); + break; + default: + fprintf(stderr, "unknown branch 0x%x\n", proc_hflags); + abort(); + } + } +} + +/* Signed immediate */ +#define SIMM(op, start, width) \ + ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) << (32 - width)) >> \ + (32 - width)) +/* Zero-extended immediate */ +#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width))) + +static void gen_sync(int stype) +{ + TCGBar tcg_mo = TCG_BAR_SC; + + switch (stype) { + case 0x4: /* SYNC_WMB */ + tcg_mo |= TCG_MO_ST_ST; + break; + case 0x10: /* SYNC_MB */ + tcg_mo |= TCG_MO_ALL; + break; + case 0x11: /* SYNC_ACQUIRE */ + tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST; + break; + case 0x12: /* SYNC_RELEASE */ + tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST; + break; + case 0x13: /* SYNC_RMB */ + tcg_mo |= TCG_MO_LD_LD; + break; + default: + tcg_mo |= TCG_MO_ALL; + break; + } + + tcg_gen_mb(tcg_mo); +} + +static void gen_crc32(DisasContext *ctx, int rd, int rs, int rt, int sz, + int crc32c) +{ + TCGv t0; + TCGv t1; + TCGv_i32 tsz = tcg_const_i32(1 << sz); + if (rd == 0) { + /* Treat as NOP. */ + return; + } + t0 = tcg_temp_new(); + t1 = tcg_temp_new(); + + gen_load_gpr(t0, rt); + gen_load_gpr(t1, rs); + + if (crc32c) { + gen_helper_crc32c(cpu_gpr[rd], t0, t1, tsz); + } else { + gen_helper_crc32(cpu_gpr[rd], t0, t1, tsz); + } + + tcg_temp_free(t0); + tcg_temp_free(t1); + tcg_temp_free_i32(tsz); +} + +#include "cpu-csr.h" + +#ifndef CONFIG_USER_ONLY + +/* + * 64-bit CSR read + * + * @arg : GPR to store the value of CSR register + * @csr : CSR register number + */ +static void gen_csr_rdq(DisasContext *ctx, TCGv rd, int64_t a1) +{ + TCGv_i64 csr = tcg_const_i64(a1); + gen_helper_csr_rdq(rd, cpu_env, csr); +} + +/* + * 64-bit CSR write + * + * @arg : GPR that stores the new value of CSR register + * @csr : CSR register number + */ +static void gen_csr_wrq(DisasContext *ctx, TCGv val, int64_t a1) +{ + TCGv_i64 csr = tcg_const_i64(a1); + gen_helper_csr_wrq(val, cpu_env, val, csr); +} + +/* + * 64-bit CSR exchange + * + * @arg : GPR that stores the new value of CSR register + * @csr : CSR register number + */ +static void gen_csr_xchgq(DisasContext *ctx, TCGv val, TCGv mask, int64_t a1) +{ + TCGv_i64 csr = tcg_const_i64(a1); + gen_helper_csr_xchgq(val, cpu_env, val, mask, csr); +} +#endif /* !CONFIG_USER_ONLY */ + +static void loongarch_tr_init_disas_context(DisasContextBase *dcbase, + CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPULOONGARCHState *env = cs->env_ptr; + + ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK; + ctx->saved_pc = -1; + ctx->insn_flags = env->insn_flags; + ctx->btarget = 0; + /* Restore state from the tb context. */ + ctx->hflags = + (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */ + restore_cpu_state(env, ctx); +#ifdef CONFIG_USER_ONLY + ctx->mem_idx = LARCH_HFLAG_UM; +#else + ctx->mem_idx = hflags_mmu_index(ctx->hflags); +#endif + ctx->default_tcg_memop_mask = MO_ALIGN; + + LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx, + ctx->hflags); +} + +static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs) +{ +} + +static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + tcg_gen_insn_start(ctx->base.pc_next, ctx->hflags & LARCH_HFLAG_BMASK, + ctx->btarget); +} + +/* 128 and 256 lsx vector instructions are not supported yet */ +static bool decode_vector_lsx(uint32_t opcode) +{ + uint32_t value = (opcode & 0xff000000); + + if ((opcode & 0xf0000000) == 0x70000000) { + return true; + } else if ((opcode & 0xfff00000) == 0x38400000) { + return true; + } else { + switch (value) { + case 0x09000000: + case 0x0a000000: + case 0x0e000000: + case 0x0f000000: + case 0x2c000000: + case 0x30000000: + case 0x31000000: + case 0x32000000: + case 0x33000000: + return true; + } + } + return false; +} + +static bool decode_insn(DisasContext *ctx, uint32_t insn); +#include "decode-insn.c.inc" +#include "trans.inc.c" + +static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) +{ + CPULOONGARCHState *env = cs->env_ptr; + DisasContext *ctx = container_of(dcbase, DisasContext, base); + int insn_bytes = 4; + + ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next); + + if (!decode_insn(ctx, ctx->opcode)) { + if (decode_vector_lsx(ctx->opcode)) { + generate_exception_end(ctx, EXCP_RI); + } else { + fprintf(stderr, "Error: unkown opcode. 0x%lx: 0x%x\n", + ctx->base.pc_next, ctx->opcode); + generate_exception_end(ctx, EXCP_RI); + } + } + + if (ctx->hflags & LARCH_HFLAG_BMASK) { + gen_branch(ctx, insn_bytes); + } + ctx->base.pc_next += insn_bytes; +} + +static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + + if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) { + save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT); + gen_helper_raise_exception_debug(cpu_env); + } else { + switch (ctx->base.is_jmp) { + case DISAS_STOP: + gen_save_pc(ctx->base.pc_next); + tcg_gen_lookup_and_goto_ptr(); + break; + case DISAS_NEXT: + case DISAS_TOO_MANY: + save_cpu_state(ctx, 0); + gen_goto_tb(ctx, 0, ctx->base.pc_next); + break; + case DISAS_EXIT: + tcg_gen_exit_tb(NULL, 0); + break; + case DISAS_NORETURN: + break; + default: + g_assert_not_reached(); + } + } +} + +static void loongarch_tr_disas_log(const DisasContextBase *dcbase, + CPUState *cs) +{ + qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); + log_target_disas(cs, dcbase->pc_first, dcbase->tb->size); +} + +static const TranslatorOps loongarch_tr_ops = { + .init_disas_context = loongarch_tr_init_disas_context, + .tb_start = loongarch_tr_tb_start, + .insn_start = loongarch_tr_insn_start, + .translate_insn = loongarch_tr_translate_insn, + .tb_stop = loongarch_tr_tb_stop, + .disas_log = loongarch_tr_disas_log, +}; + +void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb, + int max_insns) +{ + DisasContext ctx; + + translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns); +} + +void loongarch_tcg_init(void) +{ + int i; + + for (i = 0; i < 32; i++) + cpu_gpr[i] = tcg_global_mem_new( + cpu_env, offsetof(CPULOONGARCHState, active_tc.gpr[i]), + regnames[i]); + + for (i = 0; i < 32; i++) { + int off = offsetof(CPULOONGARCHState, active_fpu.fpr[i].d); + fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]); + } + + cpu_PC = tcg_global_mem_new( + cpu_env, offsetof(CPULOONGARCHState, active_tc.PC), "PC"); + bcond = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, bcond), + "bcond"); + btarget = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, btarget), + "btarget"); + hflags = tcg_global_mem_new_i32( + cpu_env, offsetof(CPULOONGARCHState, hflags), "hflags"); + fpu_fcsr0 = tcg_global_mem_new_i32( + cpu_env, offsetof(CPULOONGARCHState, active_fpu.fcsr0), "fcsr0"); + cpu_lladdr = tcg_global_mem_new( + cpu_env, offsetof(CPULOONGARCHState, lladdr), "lladdr"); + cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, llval), + "llval"); +} + +void restore_state_to_opc(CPULOONGARCHState *env, TranslationBlock *tb, + target_ulong *data) +{ + env->active_tc.PC = data[0]; + env->hflags &= ~LARCH_HFLAG_BMASK; + env->hflags |= data[1]; + switch (env->hflags & LARCH_HFLAG_BMASK) { + case LARCH_HFLAG_BR: + break; + case LARCH_HFLAG_BC: + case LARCH_HFLAG_B: + env->btarget = data[2]; + break; + } +} diff --git a/target/m68k/translate.c b/target/m68k/translate.c index af43c8eab8e6199cfa69ecc9f34f5dfafa838ccc..edd9f9954ac19c0ded3d0a36d1af89564d289792 100644 --- a/target/m68k/translate.c +++ b/target/m68k/translate.c @@ -2269,9 +2269,9 @@ static void gen_set_sr_im(DisasContext *s, uint16_t val, int ccr_only) tcg_gen_movi_i32(QREG_CC_N, val & CCF_N ? -1 : 0); tcg_gen_movi_i32(QREG_CC_X, val & CCF_X ? 1 : 0); } else { - TCGv sr = tcg_const_i32(val); - gen_helper_set_sr(cpu_env, sr); - tcg_temp_free(sr); + /* Must writeback before changing security state. */ + do_writebacks(s); + gen_helper_set_sr(cpu_env, tcg_constant_i32(val)); } set_cc_op(s, CC_OP_FLAGS); } @@ -2281,6 +2281,8 @@ static void gen_set_sr(DisasContext *s, TCGv val, int ccr_only) if (ccr_only) { gen_helper_set_ccr(cpu_env, val); } else { + /* Must writeback before changing security state. */ + do_writebacks(s); gen_helper_set_sr(cpu_env, val); } set_cc_op(s, CC_OP_FLAGS); @@ -2357,6 +2359,7 @@ DISAS_INSN(arith_im) tcg_gen_or_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2366,6 +2369,7 @@ DISAS_INSN(arith_im) tcg_gen_and_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2389,6 +2393,7 @@ DISAS_INSN(arith_im) tcg_gen_xor_i32(dest, src1, im); if (with_SR) { gen_set_sr(s, dest, opsize == OS_BYTE); + gen_exit_tb(s); } else { DEST_EA(env, insn, opsize, dest, &addr); gen_logic_cc(s, dest, opsize); @@ -2809,19 +2814,39 @@ DISAS_INSN(illegal) gen_exception(s, s->base.pc_next, EXCP_ILLEGAL); } -/* ??? This should be atomic. */ DISAS_INSN(tas) { - TCGv dest; - TCGv src1; - TCGv addr; + int mode = extract32(insn, 3, 3); + int reg0 = REG(insn, 0); - dest = tcg_temp_new(); - SRC_EA(env, src1, OS_BYTE, 1, &addr); - gen_logic_cc(s, src1, OS_BYTE); - tcg_gen_ori_i32(dest, src1, 0x80); - DEST_EA(env, insn, OS_BYTE, dest, &addr); - tcg_temp_free(dest); + if (mode == 0) { + /* data register direct */ + TCGv dest = cpu_dregs[reg0]; + gen_logic_cc(s, dest, OS_BYTE); + tcg_gen_ori_tl(dest, dest, 0x80); + } else { + TCGv src1, addr; + + addr = gen_lea_mode(env, s, mode, reg0, OS_BYTE); + if (IS_NULL_QREG(addr)) { + gen_addr_fault(s); + return; + } + src1 = tcg_temp_new(); + tcg_gen_atomic_fetch_or_tl(src1, addr, tcg_constant_tl(0x80), + IS_USER(s), MO_SB); + gen_logic_cc(s, src1, OS_BYTE); + tcg_temp_free(src1); + + switch (mode) { + case 3: /* Indirect postincrement. */ + tcg_gen_addi_i32(AREG(insn, 0), addr, 1); + break; + case 4: /* Indirect predecrememnt. */ + tcg_gen_mov_i32(AREG(insn, 0), addr); + break; + } + } } DISAS_INSN(mull) @@ -4592,6 +4617,7 @@ DISAS_INSN(strldsr) } gen_push(s, gen_get_sr(s)); gen_set_sr_im(s, ext, 0); + gen_exit_tb(s); } DISAS_INSN(move_from_sr) @@ -5809,8 +5835,10 @@ DISAS_INSN(from_mext) DISAS_INSN(macsr_to_ccr) { TCGv tmp = tcg_temp_new(); - tcg_gen_andi_i32(tmp, QREG_MACSR, 0xf); - gen_helper_set_sr(cpu_env, tmp); + + /* Note that X and C are always cleared. */ + tcg_gen_andi_i32(tmp, QREG_MACSR, CCF_N | CCF_Z | CCF_V); + gen_helper_set_ccr(cpu_env, tmp); tcg_temp_free(tmp); set_cc_op(s, CC_OP_FLAGS); } diff --git a/target/meson.build b/target/meson.build index 2f6940255e6583e344ec0892fd8c4b55ee75766d..a824a390f9cc37361290176cc9728f59b46a935d 100644 --- a/target/meson.build +++ b/target/meson.build @@ -5,6 +5,7 @@ subdir('cris') subdir('hexagon') subdir('hppa') subdir('i386') +subdir('loongarch64') subdir('m68k') subdir('microblaze') subdir('mips') @@ -16,5 +17,6 @@ subdir('rx') subdir('s390x') subdir('sh4') subdir('sparc') +subdir('sw64') subdir('tricore') subdir('xtensa') diff --git a/target/mips/kvm.c b/target/mips/kvm.c index 086debd9f013737f9a0e889afc712adb4c3f8f74..f80ac72dd1855ea9fbcb518313f3eb925b83ccc7 100644 --- a/target/mips/kvm.c +++ b/target/mips/kvm.c @@ -1295,3 +1295,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/ppc/cpu-models.c b/target/ppc/cpu-models.c index 4baa111713b0c5fbeb4d3c5866ca893a6906b16f..e6cce0f8db626703ac8ffe96685770a8dbb523f6 100644 --- a/target/ppc/cpu-models.c +++ b/target/ppc/cpu-models.c @@ -670,13 +670,13 @@ "PowerPC 7410 v1.3 (G4)") POWERPC_DEF("7410_v1.4", CPU_POWERPC_7410_v14, 7410, "PowerPC 7410 v1.4 (G4)") - POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7400, + POWERPC_DEF("7448_v1.0", CPU_POWERPC_7448_v10, 7445, "PowerPC 7448 v1.0 (G4)") - POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7400, + POWERPC_DEF("7448_v1.1", CPU_POWERPC_7448_v11, 7445, "PowerPC 7448 v1.1 (G4)") - POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7400, + POWERPC_DEF("7448_v2.0", CPU_POWERPC_7448_v20, 7445, "PowerPC 7448 v2.0 (G4)") - POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7400, + POWERPC_DEF("7448_v2.1", CPU_POWERPC_7448_v21, 7445, "PowerPC 7448 v2.1 (G4)") POWERPC_DEF("7450_v1.0", CPU_POWERPC_7450_v10, 7450, "PowerPC 7450 v1.0 (G4)") @@ -963,6 +963,6 @@ PowerPCCPUAlias ppc_cpu_aliases[] = { #endif { "ppc32", "604" }, { "ppc", "604" }, - { "default", "604" }, + { NULL, NULL } }; diff --git a/target/ppc/cpu-models.h b/target/ppc/cpu-models.h index 095259275941dbcf6657b94af8635d1e778c0460..75ea085bd5735e44945867773dacb6ac01046030 100644 --- a/target/ppc/cpu-models.h +++ b/target/ppc/cpu-models.h @@ -63,7 +63,7 @@ enum { /* PowerPC 405 cores */ CPU_POWERPC_405D2 = 0x20010000, CPU_POWERPC_405D4 = 0x41810000, - /* PowerPC 405 microcontrolers */ + /* PowerPC 405 microcontrollers */ /* XXX: missing 0x200108a0 */ CPU_POWERPC_405CRa = 0x40110041, CPU_POWERPC_405CRb = 0x401100C5, @@ -93,7 +93,7 @@ enum { #define CPU_POWERPC_440 CPU_POWERPC_440GXf /* PowerPC 440 cores */ CPU_POWERPC_440_XILINX = 0x7ff21910, - /* PowerPC 440 microcontrolers */ + /* PowerPC 440 microcontrollers */ CPU_POWERPC_440EPa = 0x42221850, CPU_POWERPC_440EPb = 0x422218D3, CPU_POWERPC_440GPb = 0x40120440, diff --git a/target/ppc/cpu.h b/target/ppc/cpu.h index e946da5f3a8c197aaabbbfda18355674b5efd0c2..a2b84f086aec5e8697ac286f8380246ed075e55a 100644 --- a/target/ppc/cpu.h +++ b/target/ppc/cpu.h @@ -24,6 +24,7 @@ #include "exec/cpu-defs.h" #include "cpu-qom.h" #include "qom/object.h" +#include "hw/registerfields.h" #define TCG_GUEST_DEFAULT_MO 0 @@ -230,7 +231,7 @@ typedef union _ppc_vsr_t { #ifdef CONFIG_INT128 __uint128_t u128; #endif - Int128 s128; + Int128 s128; } ppc_vsr_t; typedef ppc_vsr_t ppc_avr_t; @@ -343,9 +344,11 @@ typedef struct ppc_v3_pate_t { #define MSR_RI 1 /* Recoverable interrupt 1 */ #define MSR_LE 0 /* Little-endian mode 1 hflags */ +FIELD(MSR, PR, MSR_PR, 1) + /* PMU bits */ #define MMCR0_FC PPC_BIT(32) /* Freeze Counters */ -#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Ocurred */ +#define MMCR0_PMAO PPC_BIT(56) /* Perf Monitor Alert Occurred */ #define MMCR0_PMAE PPC_BIT(37) /* Perf Monitor Alert Enable */ #define MMCR0_EBE PPC_BIT(43) /* Perf Monitor EBB Enable */ #define MMCR0_FCECE PPC_BIT(38) /* FC on Enabled Cond or Event */ @@ -443,7 +446,6 @@ typedef struct ppc_v3_pate_t { #define msr_ce ((env->msr >> MSR_CE) & 1) #define msr_ile ((env->msr >> MSR_ILE) & 1) #define msr_ee ((env->msr >> MSR_EE) & 1) -#define msr_pr ((env->msr >> MSR_PR) & 1) #define msr_fp ((env->msr >> MSR_FP) & 1) #define msr_me ((env->msr >> MSR_ME) & 1) #define msr_fe0 ((env->msr >> MSR_FE0) & 1) @@ -1446,10 +1448,6 @@ typedef PowerPCCPU ArchCPU; #define XER_CMP 8 #define XER_BC 0 #define xer_so (env->so) -#define xer_ov (env->ov) -#define xer_ca (env->ca) -#define xer_ov32 (env->ov) -#define xer_ca32 (env->ca) #define xer_cmp ((env->xer >> XER_CMP) & 0xFF) #define xer_bc ((env->xer >> XER_BC) & 0x7F) @@ -1621,6 +1619,8 @@ typedef PowerPCCPU ArchCPU; #define SPR_BOOKE_GIVOR14 (0x1BD) #define SPR_TIR (0x1BE) #define SPR_PTCR (0x1D0) +#define SPR_HASHKEYR (0x1D4) +#define SPR_HASHPKEYR (0x1D5) #define SPR_BOOKE_SPEFSCR (0x200) #define SPR_Exxx_BBEAR (0x201) #define SPR_Exxx_BBTAR (0x202) diff --git a/target/ppc/cpu_init.c b/target/ppc/cpu_init.c index 6695985e9b56a0ad188086f03f8a2c3d3ce284f8..211759508fca46f362d2276da73d011ebe65895b 100644 --- a/target/ppc/cpu_init.c +++ b/target/ppc/cpu_init.c @@ -2098,6 +2098,33 @@ static void register_8xx_sprs(CPUPPCState *env) 0x00000000); } +static void register_power10_hash_sprs(CPUPPCState *env) +{ + /* + * it's the OS responsability to generate a random value for the registers + * in each process' context. So, initialize it with 0 here. + */ + uint64_t hashkeyr_initial_value = 0, hashpkeyr_initial_value = 0; +#if defined(CONFIG_USER_ONLY) + /* in linux-user, setup the hash register with a random value */ + GRand *rand = g_rand_new(); + hashkeyr_initial_value = + ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand); + hashpkeyr_initial_value = + ((uint64_t)g_rand_int(rand) << 32) | (uint64_t)g_rand_int(rand); + g_rand_free(rand); +#endif + spr_register(env, SPR_HASHKEYR, "HASHKEYR", + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + hashkeyr_initial_value); + spr_register_hv(env, SPR_HASHPKEYR, "HASHPKEYR", + SPR_NOACCESS, SPR_NOACCESS, + SPR_NOACCESS, SPR_NOACCESS, + &spr_read_generic, &spr_write_generic, + hashpkeyr_initial_value); +} + /* * AMR => SPR 29 (Power 2.04) * CTRL => SPR 136 (Power 2.04) @@ -3018,7 +3045,15 @@ POWERPC_FAMILY(403GCX)(ObjectClass *oc, void *data) pcc->flags = POWERPC_FLAG_CE | POWERPC_FLAG_PX | POWERPC_FLAG_BUS_CLK; } - +static void init_tlbs_emb(CPUPPCState *env) +{ +#if !defined(CONFIG_USER_ONLY) + env->nb_tlb = 64; + env->nb_ways = 1; + env->id_tlbs = 0; + env->tlb_type = TLB_EMB; +#endif +} static void init_proc_405(CPUPPCState *env) { /* Time base */ @@ -3036,13 +3071,7 @@ static void init_proc_405(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + init_tlbs_emb(env); init_excp_4xx_softmmu(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3135,13 +3164,7 @@ static void init_proc_440EP(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3259,13 +3282,7 @@ static void init_proc_440GP(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3443,13 +3460,7 @@ static void init_proc_440x5(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); - /* Memory management */ -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + init_tlbs_emb(env); init_excp_BookE(env); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -3877,12 +3888,8 @@ static void init_proc_e200(CPUPPCState *env) SPR_NOACCESS, SPR_NOACCESS, &spr_read_generic, &spr_write_generic, 0x00000000); -#if !defined(CONFIG_USER_ONLY) - env->nb_tlb = 64; - env->nb_ways = 1; - env->id_tlbs = 0; - env->tlb_type = TLB_EMB; -#endif + + init_tlbs_emb(env); init_excp_e200(env, 0xFFFF0000UL); env->dcache_line_size = 32; env->icache_line_size = 32; @@ -7023,7 +7030,7 @@ static void register_970_lpar_sprs(CPUPPCState *env) static void register_power5p_lpar_sprs(CPUPPCState *env) { #if !defined(CONFIG_USER_ONLY) - /* Logical partitionning */ + /* Logical partitioning */ spr_register_kvm_hv(env, SPR_LPCR, "LPCR", SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, SPR_NOACCESS, @@ -7961,7 +7968,7 @@ static bool cpu_has_work_POWER9(CPUState *cs) if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_EEE)) { bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); - if (heic == 0 || !msr_hv || msr_pr) { + if (!heic || !msr_hv || FIELD_EX64(env->msr, MSR, PR)) { return true; } } @@ -8127,6 +8134,7 @@ static void init_proc_POWER10(CPUPPCState *env) register_power8_book4_sprs(env); register_power8_rpr_sprs(env); register_power9_mmu_sprs(env); + register_power10_hash_sprs(env); /* FIXME: Filter fields properly based on privilege level */ spr_register_kvm_hv(env, SPR_PSSCR, "PSSCR", NULL, NULL, NULL, NULL, @@ -8170,7 +8178,7 @@ static bool cpu_has_work_POWER10(CPUState *cs) if ((env->pending_interrupts & (1u << PPC_INTERRUPT_EXT)) && (env->spr[SPR_LPCR] & LPCR_EEE)) { bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); - if (heic == 0 || !msr_hv || msr_pr) { + if (!heic || !msr_hv || FIELD_EX64(env->msr, MSR, PR)) { return true; } } diff --git a/target/ppc/dfp_helper.c b/target/ppc/dfp_helper.c index 0d01ac3de0b700a1caf94861ba02e477ba554808..0398b3a50e958c5cd2d14fb41191b0bb8c2ba8a0 100644 --- a/target/ppc/dfp_helper.c +++ b/target/ppc/dfp_helper.c @@ -1144,6 +1144,26 @@ static inline uint8_t dfp_get_bcd_digit_128(ppc_vsr_t *t, unsigned n) return t->VsrD((n & 0x10) ? 0 : 1) >> ((n << 2) & 63) & 15; } +static inline void dfp_invalid_op_vxcvi_64(struct PPC_DFP *dfp) +{ + /* TODO: fpscr is incorrectly not being saved to env */ + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); + if ((dfp->env->fpscr & FP_VE) == 0) { + dfp->vt.VsrD(1) = 0x7c00000000000000; /* QNaN */ + } +} + + +static inline void dfp_invalid_op_vxcvi_128(struct PPC_DFP *dfp) +{ + /* TODO: fpscr is incorrectly not being saved to env */ + dfp_set_FPSCR_flag(dfp, FP_VX | FP_VXCVI, FPSCR_VE); + if ((dfp->env->fpscr & FP_VE) == 0) { + dfp->vt.VsrD(0) = 0x7c00000000000000; /* QNaN */ + dfp->vt.VsrD(1) = 0x0; + } +} + #define DFP_HELPER_ENBCD(op, size) \ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ uint32_t s) \ @@ -1170,7 +1190,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ sgn = 0; \ break; \ default: \ - dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + dfp_invalid_op_vxcvi_##size(&dfp); \ + set_dfp##size(t, &dfp.vt); \ return; \ } \ } \ @@ -1180,7 +1201,8 @@ void helper_##op(CPUPPCState *env, ppc_fprp_t *t, ppc_fprp_t *b, \ digits[(size) / 4 - n] = dfp_get_bcd_digit_##size(&dfp.vb, \ offset++); \ if (digits[(size) / 4 - n] > 10) { \ - dfp_set_FPSCR_flag(&dfp, FP_VX | FP_VXCVI, FPSCR_VE); \ + dfp_invalid_op_vxcvi_##size(&dfp); \ + set_dfp##size(t, &dfp.vt); \ return; \ } else { \ nonzero |= (digits[(size) / 4 - n] > 0); \ diff --git a/target/ppc/excp_helper.c b/target/ppc/excp_helper.c index 17607adbe411dfdce78dc79efc691db6832709f9..3576e87b2806b0e491571d7d7c75c37f6422b6ac 100644 --- a/target/ppc/excp_helper.c +++ b/target/ppc/excp_helper.c @@ -312,7 +312,7 @@ static inline void powerpc_excp(PowerPCCPU *cpu, int excp_model, int excp) /* * new interrupt handler msr preserves existing HV and ME unless - * explicitly overriden + * explicitly overridden */ new_msr = env->msr & (((target_ulong)1 << MSR_ME) | MSR_HVB); @@ -976,7 +976,8 @@ static void ppc_hw_interrupt(CPUPPCState *env) bool lpes0 = !!(env->spr[SPR_LPCR] & LPCR_LPES0); bool heic = !!(env->spr[SPR_LPCR] & LPCR_HEIC); /* HEIC blocks delivery to the hypervisor */ - if ((async_deliver && !(heic && msr_hv && !msr_pr)) || + if ((async_deliver && !(heic && msr_hv && + !FIELD_EX64(env->msr, MSR, PR))) || (env->has_hv_mode && msr_hv == 0 && !lpes0)) { powerpc_excp(cpu, env->excp_model, POWERPC_EXCP_EXTERNAL); return; diff --git a/target/ppc/fpu_helper.c b/target/ppc/fpu_helper.c index c4896cecc80ce46a646eb65ae710e585bcf4744c..4e34cc2a3f45b2a9ac68286fea1fb98f6e8a19ba 100644 --- a/target/ppc/fpu_helper.c +++ b/target/ppc/fpu_helper.c @@ -2086,7 +2086,7 @@ VSX_TSQRT(xvtsqrtsp, 4, float32, VsrW(i), -126, 23) void helper_##op(CPUPPCState *env, ppc_vsr_t *xt, \ ppc_vsr_t *xa, ppc_vsr_t *b, ppc_vsr_t *c) \ { \ - ppc_vsr_t t = *xt; \ + ppc_vsr_t t = { }; \ int i; \ \ helper_reset_fpstatus(env); \ diff --git a/target/ppc/helper_regs.c b/target/ppc/helper_regs.c index 99562edd574151a7359762c37d515bccf3a6fc86..e97d25e9ab3c5b43b647557d551a44846abfbf39 100644 --- a/target/ppc/helper_regs.c +++ b/target/ppc/helper_regs.c @@ -288,7 +288,7 @@ void check_tlb_flush(CPUPPCState *env, bool global) if (global && (env->tlb_need_flush & TLB_NEED_GLOBAL_FLUSH)) { env->tlb_need_flush &= ~TLB_NEED_GLOBAL_FLUSH; env->tlb_need_flush &= ~TLB_NEED_LOCAL_FLUSH; - tlb_flush_all_cpus_synced(cs); + tlb_flush_all_cpus(cs); return; } diff --git a/target/ppc/int_helper.c b/target/ppc/int_helper.c index 9bc327bcba5acc4af871717295157b2d77fae61e..b577b43f4c275e6e9a45c7a4716682c35f0448c9 100644 --- a/target/ppc/int_helper.c +++ b/target/ppc/int_helper.c @@ -36,9 +36,9 @@ static inline void helper_update_ov_legacy(CPUPPCState *env, int ov) { if (unlikely(ov)) { - env->so = env->ov = 1; + env->so = env->ov = env->ov32 = 1; } else { - env->ov = 0; + env->ov = env->ov32 = 0; } } diff --git a/target/ppc/kvm.c b/target/ppc/kvm.c index dc93b99189ea242fa8c05c95f91dc08fed999c9b..403567b6eebea533491d2b70b1f264e8322782e8 100644 --- a/target/ppc/kvm.c +++ b/target/ppc/kvm.c @@ -1323,7 +1323,7 @@ int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level) return 0; } - if (!kvm_enabled() || !cap_interrupt_unset) { + if (!cap_interrupt_unset) { return 0; } @@ -1895,7 +1895,7 @@ static int kvmppc_find_cpu_dt(char *buf, int buf_len) return 0; } -static uint64_t kvmppc_read_int_dt(const char *filename) +static uint64_t kvmppc_read_int_dt(const char *filename, Error **errp) { union { uint32_t v32; @@ -1906,7 +1906,8 @@ static uint64_t kvmppc_read_int_dt(const char *filename) f = fopen(filename, "rb"); if (!f) { - return -1; + error_setg_errno(errp, errno, "error opening %s", filename); + return 0; } len = fread(&u, 1, sizeof(u), f); @@ -1924,33 +1925,48 @@ static uint64_t kvmppc_read_int_dt(const char *filename) /* * Read a CPU node property from the host device tree that's a single - * integer (32-bit or 64-bit). Returns 0 if anything goes wrong - * (can't find or open the property, or doesn't understand the format) + * integer (32-bit or 64-bit). Returns 0 and set errp if anything goes + * wrong (can't find or open the property, or doesn't understand the + * format) */ -static uint64_t kvmppc_read_int_cpu_dt(const char *propname) +static uint64_t kvmppc_read_int_cpu_dt(const char *propname, Error **errp) { - char buf[PATH_MAX], *tmp; - uint64_t val; + g_autofree char *tmp = NULL; + char buf[PATH_MAX]; if (kvmppc_find_cpu_dt(buf, sizeof(buf))) { - return -1; + error_setg(errp, "Failed to read CPU property %s", propname); + return 0; } tmp = g_strdup_printf("%s/%s", buf, propname); - val = kvmppc_read_int_dt(tmp); - g_free(tmp); - return val; + return kvmppc_read_int_dt(tmp, errp); } +/* + * Read the clock-frequency from the DT. On error (e.g. + * 'clock-frequency' is not present in the DT) will + * report an error and exit(1). + */ uint64_t kvmppc_get_clockfreq(void) { - return kvmppc_read_int_cpu_dt("clock-frequency"); + Error *local_err = NULL; + int ret; + + ret = kvmppc_read_int_cpu_dt("clock-frequency", &local_err); + + if (local_err) { + error_report_err(local_err); + exit(1); + } + + return ret; } static int kvmppc_get_dec_bits(void) { - int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits"); + int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits", NULL); if (nr_bits > 0) { return nr_bits; @@ -2335,8 +2351,8 @@ static void alter_insns(uint64_t *word, uint64_t flags, bool on) static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data) { PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc); - uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size"); - uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size"); + uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size", NULL); + uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size", NULL); /* Now fix up the class with information we can query from the host */ pcc->pvr = mfpvr(); @@ -2959,3 +2975,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/ppc/mem_helper.c b/target/ppc/mem_helper.c index 39945d9ea585e4c9db6967b63afac6bc4f492728..ceb4aa41d79260c5f08f2298595b83e71e46f493 100644 --- a/target/ppc/mem_helper.c +++ b/target/ppc/mem_helper.c @@ -613,10 +613,11 @@ void helper_tbegin(CPUPPCState *env) (1ULL << TEXASR_FAILURE_PERSISTENT) | (1ULL << TEXASR_NESTING_OVERFLOW) | (msr_hv << TEXASR_PRIVILEGE_HV) | - (msr_pr << TEXASR_PRIVILEGE_PR) | + (FIELD_EX64(env->msr, MSR, PR) << TEXASR_PRIVILEGE_PR) | (1ULL << TEXASR_FAILURE_SUMMARY) | (1ULL << TEXASR_TFIAR_EXACT); - env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | msr_pr; + env->spr[SPR_TFIAR] = env->nip | (msr_hv << 1) | + FIELD_EX64(env->msr, MSR, PR); env->spr[SPR_TFHAR] = env->nip + 4; env->crf[0] = 0xB; /* 0b1010 = transaction failure */ } diff --git a/target/ppc/mmu-radix64.c b/target/ppc/mmu-radix64.c index 5b0e62e676dc75d310d67bc58c9965d02e5cfd37..3f016730cdab24eeaf9854324e0d2a4e21d1c552 100644 --- a/target/ppc/mmu-radix64.c +++ b/target/ppc/mmu-radix64.c @@ -171,12 +171,13 @@ static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, } /* Determine permissions allowed by Encoded Access Authority */ - if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) { + if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && + FIELD_EX64(env->msr, MSR, PR)) { *prot = 0; } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || partition_scoped) { *prot = ppc_radix64_get_prot_eaa(pte); - } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ + } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ *prot = ppc_radix64_get_prot_eaa(pte); *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ } diff --git a/target/ppc/mmu_common.c b/target/ppc/mmu_common.c index 754509e556c0fa6f85e7305b1e343bff63010fa0..fb1059bcf2c98508a028b13ee27ef21814dfe73e 100644 --- a/target/ppc/mmu_common.c +++ b/target/ppc/mmu_common.c @@ -292,8 +292,8 @@ static inline void bat_size_prot(CPUPPCState *env, target_ulong *blp, bl = (*BATu & 0x00001FFC) << 15; valid = 0; prot = 0; - if (((msr_pr == 0) && (*BATu & 0x00000002)) || - ((msr_pr != 0) && (*BATu & 0x00000001))) { + if ((!FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000002)) || + (FIELD_EX64(env->msr, MSR, PR) && (*BATu & 0x00000001))) { valid = 1; pp = *BATl & 0x00000003; if (pp != 0) { @@ -386,16 +386,17 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, PowerPCCPU *cpu = env_archcpu(env); hwaddr hash; target_ulong vsid; - int ds, pr, target_page_bits; + int ds, target_page_bits; + bool pr; int ret; target_ulong sr, pgidx; - pr = msr_pr; + pr = FIELD_EX64(env->msr, MSR, PR); ctx->eaddr = eaddr; sr = env->sr[eaddr >> 28]; - ctx->key = (((sr & 0x20000000) && (pr != 0)) || - ((sr & 0x40000000) && (pr == 0))) ? 1 : 0; + ctx->key = (((sr & 0x20000000) && pr) || + ((sr & 0x40000000) && !pr)) ? 1 : 0; ds = sr & 0x80000000 ? 1 : 0; ctx->nx = sr & 0x10000000 ? 1 : 0; vsid = sr & 0x00FFFFFF; @@ -404,8 +405,9 @@ static int get_segment_6xx_tlb(CPUPPCState *env, mmu_ctx_t *ctx, "Check segment v=" TARGET_FMT_lx " %d " TARGET_FMT_lx " nip=" TARGET_FMT_lx " lr=" TARGET_FMT_lx " ir=%d dr=%d pr=%d %d t=%d\n", - eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, (int)msr_ir, - (int)msr_dr, pr != 0 ? 1 : 0, access_type == MMU_DATA_STORE, type); + eaddr, (int)(eaddr >> 28), sr, env->nip, env->lr, + (int)msr_ir, (int)msr_dr, pr ? 1 : 0, + access_type == MMU_DATA_STORE, type); pgidx = (eaddr & ~SEGMENT_MASK_256M) >> target_page_bits; hash = vsid ^ pgidx; ctx->ptem = (vsid << 7) | (pgidx >> 10); @@ -566,7 +568,7 @@ static int mmu40x_get_physical_address(CPUPPCState *env, mmu_ctx_t *ctx, ret = -1; raddr = (hwaddr)-1ULL; - pr = msr_pr; + pr = FIELD_EX64(env->msr, MSR, PR); for (i = 0; i < env->nb_tlb; i++) { tlb = &env->tlb.tlbe[i]; if (ppcemb_tlb_check(env, tlb, &raddr, address, @@ -651,7 +653,7 @@ static int mmubooke_check_tlb(CPUPPCState *env, ppcemb_tlb_t *tlb, found_tlb: - if (msr_pr != 0) { + if (FIELD_EX64(env->msr, MSR, PR)) { prot2 = tlb->prot & 0xF; } else { prot2 = (tlb->prot >> 4) & 0xF; @@ -799,7 +801,7 @@ static bool mmubooke206_get_as(CPUPPCState *env, return true; } else { *as_out = msr_ds; - *pr_out = msr_pr; + *pr_out = FIELD_EX64(env->msr, MSR, PR); return false; } } diff --git a/target/ppc/power8-pmu-regs.c.inc b/target/ppc/power8-pmu-regs.c.inc index 73918512382710794da89a32f202b30fc223f280..c58874752b878c0d6fa22da6515ea7430ba1fbd4 100644 --- a/target/ppc/power8-pmu-regs.c.inc +++ b/target/ppc/power8-pmu-regs.c.inc @@ -16,7 +16,7 @@ * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the * PMCs) has problem state read access. * - * Read acccess is granted for all PMCC values but 0b01, where a + * Read access is granted for all PMCC values but 0b01, where a * Facility Unavailable Interrupt will occur. */ static bool spr_groupA_read_allowed(DisasContext *ctx) @@ -33,7 +33,7 @@ static bool spr_groupA_read_allowed(DisasContext *ctx) * Checks whether the Group A SPR (MMCR0, MMCR2, MMCRA, and the * PMCs) has problem state write access. * - * Write acccess is granted for PMCC values 0b10 and 0b11. Userspace + * Write access is granted for PMCC values 0b10 and 0b11. Userspace * writing with PMCC 0b00 will generate a Hypervisor Emulation * Assistance Interrupt. Userspace writing with PMCC 0b01 will * generate a Facility Unavailable Interrupt. diff --git a/target/ppc/translate.c b/target/ppc/translate.c index 9960df6e18397ae273251306b35e19d18940eaac..a03bafadbc84d572448522b59934dcdd78766f10 100644 --- a/target/ppc/translate.c +++ b/target/ppc/translate.c @@ -7290,6 +7290,14 @@ static inline void get_fpr(TCGv_i64 dst, int regno) static inline void set_fpr(int regno, TCGv_i64 src) { tcg_gen_st_i64(src, cpu_env, fpr_offset(regno)); + /* + * Before PowerISA v3.1 the result of doubleword 1 of the VSR + * corresponding to the target FPR was undefined. However, + * most (if not all) real hardware were setting the result to 0. + * Starting at ISA v3.1, the result for doubleword 1 is now defined + * to be 0. + */ + tcg_gen_st_i64(tcg_constant_i64(0), cpu_env, vsr64_offset(regno, false)); } static inline void get_avr64(TCGv_i64 dst, int regno, bool high) @@ -8372,8 +8380,6 @@ static bool decode_legacy(PowerPCCPU *cpu, DisasContext *ctx, uint32_t insn) opc_handler_t **table, *handler; uint32_t inval; - ctx->opcode = insn; - LOG_DISAS("translate opcode %08x (%02x %02x %02x %02x) (%s)\n", insn, opc1(insn), opc2(insn), opc3(insn), opc4(insn), ctx->le_mode ? "little" : "big"); @@ -8502,6 +8508,7 @@ static void ppc_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs) ctx->base.pc_next = pc += 4; if (!is_prefix_insn(ctx, insn)) { + ctx->opcode = insn; ok = (decode_insn32(ctx, insn) || decode_legacy(cpu, ctx, insn)); } else if ((pc & 63) == 0) { diff --git a/target/ppc/translate/vmx-impl.c.inc b/target/ppc/translate/vmx-impl.c.inc index 8eb8d3a06720b3f5e4a2e5b35539ae1f6dbb7034..f56f061d188e2b11f957a0f14fcda74f7314c72f 100644 --- a/target/ppc/translate/vmx-impl.c.inc +++ b/target/ppc/translate/vmx-impl.c.inc @@ -127,7 +127,7 @@ static void gen_stve##name(DisasContext *ctx) \ } GEN_VR_LDX(lvx, 0x07, 0x03); -/* As we don't emulate the cache, lvxl is stricly equivalent to lvx */ +/* As we don't emulate the cache, lvxl is strictly equivalent to lvx */ GEN_VR_LDX(lvxl, 0x07, 0x0B); GEN_VR_LVE(bx, 0x07, 0x00, 1); @@ -135,7 +135,7 @@ GEN_VR_LVE(hx, 0x07, 0x01, 2); GEN_VR_LVE(wx, 0x07, 0x02, 4); GEN_VR_STX(svx, 0x07, 0x07); -/* As we don't emulate the cache, stvxl is stricly equivalent to stvx */ +/* As we don't emulate the cache, stvxl is strictly equivalent to stvx */ GEN_VR_STX(svxl, 0x07, 0x0F); GEN_VR_STVE(bx, 0x07, 0x04, 1); diff --git a/target/riscv/op_helper.c b/target/riscv/op_helper.c index ee7c24efe770631db7d882e852ec44e126bda97a..58d992e98a298e75d4a6a5c3fb4eefde9e1ee4ef 100644 --- a/target/riscv/op_helper.c +++ b/target/riscv/op_helper.c @@ -146,7 +146,8 @@ target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb) uint64_t mstatus = env->mstatus; target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP); - if (!pmp_get_num_rules(env) && (prev_priv != PRV_M)) { + if (riscv_feature(env, RISCV_FEATURE_PMP) && + !pmp_get_num_rules(env) && (prev_priv != PRV_M)) { riscv_raise_exception(env, RISCV_EXCP_ILLEGAL_INST, GETPC()); } diff --git a/target/rx/translate.c b/target/rx/translate.c index 5db8f79a82e443cb6bdaa977eb98fe072df5f6c6..aacf59d5cb3615e10a037ffb332ce660cd2eb65d 100644 --- a/target/rx/translate.c +++ b/target/rx/translate.c @@ -82,7 +82,8 @@ static uint32_t decode_load_bytes(DisasContext *ctx, uint32_t insn, static uint32_t li(DisasContext *ctx, int sz) { - int32_t tmp, addr; + target_ulong addr; + uint32_t tmp; CPURXState *env = ctx->env; addr = ctx->base.pc_next; diff --git a/target/s390x/cpu_models.c b/target/s390x/cpu_models.c index 11e06cc51fab915e70e588372457c53e448591a6..65908c6d368fa6489bff74240702faa29f951ba4 100644 --- a/target/s390x/cpu_models.c +++ b/target/s390x/cpu_models.c @@ -334,18 +334,31 @@ const S390CPUDef *s390_find_cpu_def(uint16_t type, uint8_t gen, uint8_t ec_ga, static void s390_print_cpu_model_list_entry(gpointer data, gpointer user_data) { const S390CPUClass *scc = S390_CPU_CLASS((ObjectClass *)data); + CPUClass *cc = CPU_CLASS(scc); char *name = g_strdup(object_class_get_name((ObjectClass *)data)); - const char *details = ""; + g_autoptr(GString) details = g_string_new(""); if (scc->is_static) { - details = "(static, migration-safe)"; - } else if (scc->is_migration_safe) { - details = "(migration-safe)"; + g_string_append(details, "static, "); + } + if (scc->is_migration_safe) { + g_string_append(details, "migration-safe, "); + } + if (cc->deprecation_note) { + g_string_append(details, "deprecated, "); + } + if (details->len) { + /* cull trailing ', ' */ + g_string_truncate(details, details->len - 2); } /* strip off the -s390x-cpu */ g_strrstr(name, "-" TYPE_S390_CPU)[0] = 0; - qemu_printf("s390 %-15s %-35s %s\n", name, scc->desc, details); + if (details->len) { + qemu_printf("s390 %-15s %-35s (%s)\n", name, scc->desc, details->str); + } else { + qemu_printf("s390 %-15s %-35s\n", name, scc->desc); + } g_free(name); } diff --git a/target/s390x/cpu_models.h b/target/s390x/cpu_models.h index 74d1f87e4fd5fca9791abc4bf9e36b3341e5fb63..fb1adc8b210ba803f8ce23cd21f9ec88efc24b15 100644 --- a/target/s390x/cpu_models.h +++ b/target/s390x/cpu_models.h @@ -24,13 +24,13 @@ struct S390CPUDef { uint8_t gen; /* hw generation identification */ uint16_t type; /* cpu type identification */ uint8_t ec_ga; /* EC GA version (on which also the BC is based) */ - uint8_t mha_pow; /* Maximum Host Adress Power, mha = 2^pow-1 */ + uint8_t mha_pow; /* maximum host address power, mha = 2^pow-1 */ uint32_t hmfai; /* hypervisor-managed facilities */ /* base/min features, must never be changed between QEMU versions */ S390FeatBitmap base_feat; /* used to init base_feat from generated data */ S390FeatInit base_init; - /* deafault features, QEMU version specific */ + /* default features, QEMU version specific */ S390FeatBitmap default_feat; /* used to init default_feat from generated data */ S390FeatInit default_init; diff --git a/target/s390x/ioinst.c b/target/s390x/ioinst.c index bdae5090bc8c3c03e7f84ac8be6fd8712b983b5f..e6347d1801cd04c0cc51b86b59083b88875e96b7 100644 --- a/target/s390x/ioinst.c +++ b/target/s390x/ioinst.c @@ -285,7 +285,7 @@ void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb, /* * As operand exceptions have a lower priority than access exceptions, * we check whether the memory area is writeable (injecting the - * access execption if it is not) first. + * access exception if it is not) first. */ if (!s390_cpu_virt_mem_check_write(cpu, addr, ar, sizeof(schib))) { s390_program_interrupt(env, PGM_OPERAND, ra); diff --git a/target/s390x/kvm/kvm.c b/target/s390x/kvm/kvm.c index 5b1fdb55c47747f1145fe72af88391b0d92515b8..671d0f179c7524f68773b9501091de6698a352f5 100644 --- a/target/s390x/kvm/kvm.c +++ b/target/s390x/kvm/kvm.c @@ -2562,3 +2562,7 @@ bool kvm_arch_cpu_check_are_resettable(void) { return true; } + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/s390x/tcg/excp_helper.c b/target/s390x/tcg/excp_helper.c index 4e7648f301b3a415fccc649c98b73c97eb5b3fbb..6a4f7585b891e5a249350a675988d3f9738f0f78 100644 --- a/target/s390x/tcg/excp_helper.c +++ b/target/s390x/tcg/excp_helper.c @@ -551,7 +551,7 @@ try_deliver: /* don't trigger a cpu_loop_exit(), use an interrupt instead */ cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT); } else if (cs->halted) { - /* unhalt if we had a WAIT PSW somehwere in our injection chain */ + /* unhalt if we had a WAIT PSW somewhere in our injection chain */ s390_cpu_unhalt(cpu); } } diff --git a/target/s390x/tcg/fpu_helper.c b/target/s390x/tcg/fpu_helper.c index 40672054052ad19d5ded05671bd87f7d61a2b474..be80b2373c3618d76f1f2f9027935f807bf8ddb3 100644 --- a/target/s390x/tcg/fpu_helper.c +++ b/target/s390x/tcg/fpu_helper.c @@ -89,7 +89,7 @@ static void handle_exceptions(CPUS390XState *env, bool XxC, uintptr_t retaddr) /* * invalid/divbyzero cannot coexist with other conditions. * overflow/underflow however can coexist with inexact, we have to - * handle it separatly. + * handle it separately. */ if (s390_exc & ~S390_IEEE_MASK_INEXACT) { if (s390_exc & ~S390_IEEE_MASK_INEXACT & env->fpc >> 24) { diff --git a/target/s390x/tcg/insn-data.def b/target/s390x/tcg/insn-data.def index 3e5594210c883211da9916000ca9a7fd8b9c1dd6..149f2577be4418f979aba6c13f336cf9a88725ae 100644 --- a/target/s390x/tcg/insn-data.def +++ b/target/s390x/tcg/insn-data.def @@ -463,7 +463,7 @@ C(0xe39f, LAT, RXY_a, LAT, 0, m2_32u, r1, 0, lat, 0) C(0xe385, LGAT, RXY_a, LAT, 0, a2, r1, 0, lgat, 0) /* LOAD AND ZERO RIGHTMOST BYTE */ - C(0xe3eb, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) + C(0xe33b, LZRF, RXY_a, LZRB, 0, m2_32u, new, r1_32, lzrb, 0) C(0xe32a, LZRG, RXY_a, LZRB, 0, m2_64, r1, 0, lzrb, 0) /* LOAD LOGICAL AND ZERO RIGHTMOST BYTE */ C(0xe33a, LLZRGF, RXY_a, LZRB, 0, m2_32u, r1, 0, lzrb, 0) diff --git a/target/s390x/tcg/misc_helper.c b/target/s390x/tcg/misc_helper.c index aab9c47747eefd9dd96ff19ce247a3f7d1653e8a..7a975aaf94695ad99f475b30a27050337949b8a3 100644 --- a/target/s390x/tcg/misc_helper.c +++ b/target/s390x/tcg/misc_helper.c @@ -326,7 +326,7 @@ uint32_t HELPER(stsi)(CPUS390XState *env, uint64_t a0, uint64_t r0, uint64_t r1) /* same as machine type number in STORE CPU ID, but in EBCDIC */ snprintf(type, ARRAY_SIZE(type), "%X", cpu->model->def->type); ebcdic_put(sysib.sysib_111.type, type, 4); - /* model number (not stored in STORE CPU ID for z/Architecure) */ + /* model number (not stored in STORE CPU ID for z/Architecture) */ ebcdic_put(sysib.sysib_111.model, "QEMU ", 16); ebcdic_put(sysib.sysib_111.sequence, "QEMU ", 16); ebcdic_put(sysib.sysib_111.plant, "QEMU", 4); diff --git a/target/s390x/tcg/translate.c b/target/s390x/tcg/translate.c index dcc249a197cea6c872435b8e9fc447af5305789e..62fbc90d5e43fc128ae87a2ddb152f87a4fab8ac 100644 --- a/target/s390x/tcg/translate.c +++ b/target/s390x/tcg/translate.c @@ -434,7 +434,7 @@ static void gen_program_exception(DisasContext *s, int code) { TCGv_i32 tmp; - /* Remember what pgm exeption this was. */ + /* Remember what pgm exception this was. */ tmp = tcg_const_i32(code); tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUS390XState, int_pgm_code)); tcg_temp_free_i32(tmp); @@ -490,7 +490,7 @@ static TCGv_i64 get_address(DisasContext *s, int x2, int b2, int d2) /* * Note that d2 is limited to 20 bits, signed. If we crop negative - * displacements early we create larger immedate addends. + * displacements early we create larger immediate addends. */ if (b2 && x2) { tcg_gen_add_i64(tmp, regs[b2], regs[x2]); diff --git a/target/s390x/tcg/translate_vx.c.inc b/target/s390x/tcg/translate_vx.c.inc index 28bf5a23b68b73e8978b3ecd3ba66a975321a792..d1fe4df1b5bc11e2760155d045d7ccfb8143e86e 100644 --- a/target/s390x/tcg/translate_vx.c.inc +++ b/target/s390x/tcg/translate_vx.c.inc @@ -797,7 +797,7 @@ static DisasJumpType op_vpk(DisasContext *s, DisasOps *o) } break; case 0x94: - /* If sources and destination dont't overlap -> fast path */ + /* If sources and destination don't overlap -> fast path */ if (v1 != v2 && v1 != v3) { const uint8_t src_es = get_field(s, m4); const uint8_t dst_es = src_es - 1; @@ -1793,7 +1793,7 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) l2 = tcg_temp_new_i64(); h2 = tcg_temp_new_i64(); - /* Multipy both even elements from v2 and v3 */ + /* Multiply both even elements from v2 and v3 */ read_vec_element_i64(l1, get_field(s, v2), 0, ES_64); read_vec_element_i64(h1, get_field(s, v3), 0, ES_64); tcg_gen_mulu2_i64(l1, h1, l1, h1); @@ -1802,7 +1802,7 @@ static DisasJumpType op_vmsl(DisasContext *s, DisasOps *o) tcg_gen_add2_i64(l1, h1, l1, h1, l1, h1); } - /* Multipy both odd elements from v2 and v3 */ + /* Multiply both odd elements from v2 and v3 */ read_vec_element_i64(l2, get_field(s, v2), 1, ES_64); read_vec_element_i64(h2, get_field(s, v3), 1, ES_64); tcg_gen_mulu2_i64(l2, h2, l2, h2); diff --git a/target/s390x/tcg/vec_fpu_helper.c b/target/s390x/tcg/vec_fpu_helper.c index 1a779934715f483d03e6efcc177d647fddc6fd39..d1249706f9a7f394d3cd20a962a16a74b6195567 100644 --- a/target/s390x/tcg/vec_fpu_helper.c +++ b/target/s390x/tcg/vec_fpu_helper.c @@ -794,7 +794,7 @@ static S390MinMaxRes vfmin_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { switch (type) { case S390_MINMAX_TYPE_JAVA: return neg_a ? S390_MINMAX_RES_A : S390_MINMAX_RES_B; @@ -844,7 +844,7 @@ static S390MinMaxRes vfmax_res(uint16_t dcmask_a, uint16_t dcmask_b, default: g_assert_not_reached(); } - } else if (unlikely(dcmask_a & dcmask_b & DCMASK_ZERO)) { + } else if (unlikely((dcmask_a & DCMASK_ZERO) && (dcmask_b & DCMASK_ZERO))) { const bool neg_a = dcmask_a & DCMASK_NEGATIVE; switch (type) { diff --git a/target/sw64/Kconfig b/target/sw64/Kconfig new file mode 100644 index 0000000000000000000000000000000000000000..ad50b9677e553c84e275134f40383e375956b6f1 --- /dev/null +++ b/target/sw64/Kconfig @@ -0,0 +1,2 @@ +config SW64 + bool diff --git a/target/sw64/Makefile.objs b/target/sw64/Makefile.objs new file mode 100644 index 0000000000000000000000000000000000000000..c702eaa26db53fc0bd58eca052f874bd179a77bc --- /dev/null +++ b/target/sw64/Makefile.objs @@ -0,0 +1,5 @@ +obj-$(CONFIG_SOFTMMU) += machine.o +obj-y += cpu.o translate.o profile.o helper.o +obj-y += int_helper.o float_helper.o simd_helper.o helper.o exception.o +obj-$(CONFIG_KVM) += kvm.o +obj-y += gdbstub.o diff --git a/target/sw64/cpu-param.h b/target/sw64/cpu-param.h new file mode 100644 index 0000000000000000000000000000000000000000..464cfb3dc1b707508ee521ae61bc03a172917026 --- /dev/null +++ b/target/sw64/cpu-param.h @@ -0,0 +1,18 @@ +/* + * SW64 cpu parameters for qemu. + * + * Copyright (c) 2018 Lin Hainan + */ + +#ifndef SW64_CPU_PARAM_H +#define SW64_CPU_PARAM_H 1 + +#define TARGET_LONG_BITS 64 +#define TARGET_PAGE_BITS 13 + +#define TARGET_VIRT_ADDR_SPACE_BITS 64 +#define TARGET_PHYS_ADDR_SPACE_BITS 48 + +#define NB_MMU_MODES 4 + +#endif diff --git a/target/sw64/cpu-qom.h b/target/sw64/cpu-qom.h new file mode 100644 index 0000000000000000000000000000000000000000..b093c2bec81d1a001228fb51d7ff4e625fa72e44 --- /dev/null +++ b/target/sw64/cpu-qom.h @@ -0,0 +1,47 @@ +/* + * QEMU SW64 CPU + * + * Copyright (c) 2018 Lin Hainan + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ +#ifndef QEMU_SW64_CPU_QOM +#define QEMU_SW64_CPU_QOM + +#include "hw/core/cpu.h" + +#define TYPE_SW64_CPU "sw64-cpu" + +#define SW64_CPU_CLASS(kclass) \ + OBJECT_CLASS_CHECK(SW64CPUClass, (kclass), TYPE_SW64_CPU) +#define SW64_CPU(obj) \ + OBJECT_CHECK(SW64CPU, (obj), TYPE_SW64_CPU) +#define SW64_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SW64CPUClass, (obj), TYPE_SW64_CPU) + +/** + * SW64CPUClass: + * @parent_realize: The parent class' realize handler. + * @parent_reset: The parent class' reset handler. + * + * An SW64 CPU model. + */ +typedef struct SW64CPUClass { + /* private */ + CPUClass parent_class; + /* public */ + DeviceRealize parent_realize; + DeviceReset parent_reset; +} SW64CPUClass; + +typedef struct SW64CPU SW64CPU; +#endif diff --git a/target/sw64/cpu.c b/target/sw64/cpu.c new file mode 100644 index 0000000000000000000000000000000000000000..8987361346ca9cf082529b03f7f57883991ac262 --- /dev/null +++ b/target/sw64/cpu.c @@ -0,0 +1,326 @@ +/* + * QEMU SW64 CPU + * + * Copyright (c) 2018 Lin Hainan + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + */ + +#include "qemu/osdep.h" +#include "qapi/error.h" +#include "qemu/qemu-print.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "sysemu/kvm.h" +#include "disas/dis-asm.h" +#include "kvm_sw64.h" +#include "sysemu/reset.h" +#include "hw/qdev-properties.h" + +static void sw64_cpu_set_pc(CPUState *cs, vaddr value) +{ + SW64CPU *cpu = SW64_CPU(cs); + + cpu->env.pc = value; +} + +static void sw64_cpu_dump_state(CPUState *cs, FILE *f, int flags) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + int i; + + static const char ireg_names[31][4] = { + "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", + "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5", + "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"}; + static const char freg_names[128][4] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", + "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", + "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", + "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", + "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", + "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", + "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", + "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", + "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", + "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; + qemu_fprintf(f, "PC=%016" PRIx64 " SP=%016" PRIx64 "\n", env->pc, + env->ir[IDX_SP]); + for (i = 0; i < 31; i++) { + qemu_fprintf(f, "%s=%016" PRIx64, ireg_names[i], env->ir[i]); + if ((i % 4) == 3) { + qemu_fprintf(f, "\n"); + } else { + qemu_fprintf(f, " "); + } + } + qemu_fprintf(f, "\n"); +#ifndef CONFIG_USER_ONLY + static const char sreg_names[10][4] = {"p1", "p2", "p4", "p5", "p6", + "p7", "p20", "p21", "p22", "p23"}; + for (i = 0; i < 10; i++) { + qemu_fprintf(f, "%s=%016" PRIx64, sreg_names[i], env->sr[i]); + if ((i % 4) == 3) { + qemu_fprintf(f, "\n"); + } else { + qemu_fprintf(f, " "); + } + } + qemu_fprintf(f, "\n"); +#endif + for (i = 0; i < 32; i++) { + qemu_fprintf(f, "%s=%016" PRIx64, freg_names[i + 96], env->fr[i + 96]); + qemu_fprintf(f, " %016" PRIx64, env->fr[i + 64]); + qemu_fprintf(f, " %016" PRIx64, env->fr[i + 32]); + qemu_fprintf(f, " %016" PRIx64, env->fr[i]); + qemu_fprintf(f, "\n"); + } + qemu_fprintf(f, "\n"); +} + +#ifndef CONFIG_USER_ONLY +static void sw64_machine_cpu_reset(void *opaque) +{ + SW64CPU *cpu = opaque; + + cpu_reset(CPU(cpu)); +} +#endif + +static void sw64_cpu_realizefn(DeviceState *dev, Error **errp) +{ + CPUState *cs = CPU(dev); + SW64CPUClass *scc = SW64_CPU_GET_CLASS(dev); + Error *local_err = NULL; + + cpu_exec_realizefn(cs, &local_err); + if (local_err != NULL) { + error_propagate(errp, local_err); + return; + } +#ifndef CONFIG_USER_ONLY + qemu_register_reset(sw64_machine_cpu_reset, cs); +#endif + + qemu_init_vcpu(cs); + + scc->parent_realize(dev, errp); +} + +static void sw64_cpu_disas_set_info(CPUState *cs, disassemble_info *info) +{ + info->mach = bfd_mach_sw_64_core3; + info->print_insn = print_insn_sw_64; +} + +#include "fpu/softfloat.h" + +static void core3_init(Object *obj) +{ + CPUState *cs = CPU(obj); + CPUSW64State *env = cs->env_ptr; +#ifdef CONFIG_USER_ONLY + env->fpcr = 0x680e800000000000; +#endif + set_feature(env, SW64_FEATURE_CORE3); +} + +static ObjectClass *sw64_cpu_class_by_name(const char *cpu_model) +{ + ObjectClass *oc; + char *typename; + char **cpuname; + + cpuname = g_strsplit(cpu_model, ",", 1); + typename = g_strdup_printf(SW64_CPU_TYPE_NAME("%s"), cpu_model); + + oc = object_class_by_name(typename); + g_strfreev(cpuname); + g_free(typename); + + if (oc && object_class_dynamic_cast(oc, TYPE_SW64_CPU) && + !object_class_is_abstract(oc)) { + return oc; + } + return NULL; +} + +bool sw64_cpu_has_work(CPUState *cs) +{ + /* If CPU has gotten into asleep(halt), then it may be + * wake up by hard interrupt, timer, ii, mail or mchk. + */ + return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER | + CPU_INTERRUPT_II0| CPU_INTERRUPT_MCHK); +} + +static void sw64_cpu_initfn(Object *obj) +{ + CPUState *cs = CPU(obj); + SW64CPU *cpu = SW64_CPU(obj); + CPUSW64State *env = &cpu->env; + + cpu_set_cpustate_pointers(cpu); + + cs->env_ptr = env; +#ifndef CONFIG_USER_ONLY + env->flags = ENV_FLAG_HM_MODE; +#else + env->flags = ENV_FLAG_PS_USER; +#endif + tlb_flush(cs); +} + +#ifndef CONFIG_USER_ONLY +static void sw64_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr addr, + unsigned size, MMUAccessType access_type, + int mmu_idx, MemTxAttrs attrs, + MemTxResult response, uintptr_t retaddr) +{ +#ifdef DEBUG_TRANS + if (retaddr) { + cpu_restore_state(cs, retaddr, true); + } + fprintf(stderr, "PC = %lx, Wrong IO addr. Hwaddr = %lx, vaddr = %lx, access_type = %d\n", + env->pc, physaddr, addr, access_type); +#endif +} +#endif + +static void sw64_cpu_reset(DeviceState *dev) +{ + CPUState *s = CPU(dev); + SW64CPU *cpu = SW64_CPU(s); + SW64CPUClass *scc = SW64_CPU_GET_CLASS(cpu); + + scc->parent_reset(dev); + +#ifndef CONFIG_USER_ONLY + if (kvm_enabled()) { + kvm_sw64_reset_vcpu(cpu); + } +#endif +} + +static Property sw64_cpu_properties[] = { +#ifdef CONFIG_USER_ONLY + /* apic_id = 0 by default for *-user, see commit 9886e834 */ + DEFINE_PROP_UINT32("cid", SW64CPU, cid, 0), +#else + DEFINE_PROP_UINT32("cid", SW64CPU, cid, 0xFFFFFFFF), +#endif + DEFINE_PROP_END_OF_LIST() +}; + +#ifndef CONFIG_USER_ONLY +#include "hw/core/sysemu-cpu-ops.h" + +static const struct SysemuCPUOps sw64_sysemu_ops = { + .get_phys_page_debug = sw64_cpu_get_phys_page_debug, +}; +#endif + +#include "hw/core/tcg-cpu-ops.h" + +static const struct TCGCPUOps sw64_tcg_ops = { + .initialize = sw64_translate_init, + +#ifndef CONFIG_USER_ONLY + .tlb_fill = sw64_cpu_tlb_fill, + .do_unaligned_access = sw64_cpu_do_unaligned_access, + .cpu_exec_interrupt = sw64_cpu_exec_interrupt, + .do_transaction_failed = sw64_cpu_do_transaction_failed, + .do_interrupt = sw64_cpu_do_interrupt, +#endif /* !CONFIG_USER_ONLY */ +}; + +static void sw64_cpu_class_init(ObjectClass *oc, void *data) +{ + DeviceClass *dc = DEVICE_CLASS(oc); + CPUClass *cc = CPU_CLASS(oc); + SW64CPUClass *scc = SW64_CPU_CLASS(oc); + + device_class_set_parent_realize(dc, sw64_cpu_realizefn, &scc->parent_realize); + device_class_set_parent_reset(dc, sw64_cpu_reset, &scc->parent_reset); + device_class_set_props(dc, sw64_cpu_properties); + + cc->class_by_name = sw64_cpu_class_by_name; +#ifndef CONFIG_USER_ONLY + dc->vmsd = &vmstate_sw64_cpu; + cc->sysemu_ops = &sw64_sysemu_ops; +#endif + cc->has_work = sw64_cpu_has_work; + cc->set_pc = sw64_cpu_set_pc; + cc->disas_set_info = sw64_cpu_disas_set_info; + cc->dump_state = sw64_cpu_dump_state; + + cc->gdb_read_register = sw64_cpu_gdb_read_register; + cc->gdb_write_register = sw64_cpu_gdb_write_register; + cc->gdb_num_core_regs = 67; + cc->gdb_core_xml_file = "sw64-core.xml"; + + cc->tcg_ops = &sw64_tcg_ops; +} + +static const SW64CPUInfo sw64_cpus[] = +{ + { + .name = "core3", + .initfn = core3_init, + }, + { + .name = NULL + }, +}; + +static void cpu_register(const SW64CPUInfo *info) +{ + TypeInfo type_info = { + .parent = TYPE_SW64_CPU, + .instance_size = sizeof(SW64CPU), + .instance_init = info->initfn, + .class_size = sizeof(SW64CPUClass), + .class_init = info->class_init, + }; + + type_info.name = g_strdup_printf("%s-" TYPE_SW64_CPU, info->name); + type_register(&type_info); + g_free((void*)type_info.name); +} + +static const TypeInfo sw64_cpu_type_info = { + .name = TYPE_SW64_CPU, + .parent = TYPE_CPU, + .instance_size = sizeof(SW64CPU), + .instance_init = sw64_cpu_initfn, + .abstract = true, + .class_size = sizeof(SW64CPUClass), + .class_init = sw64_cpu_class_init, +}; + +static void sw64_cpu_register_types(void) +{ + const SW64CPUInfo *info = sw64_cpus; + + type_register_static(&sw64_cpu_type_info); + + while (info->name) { + cpu_register(info); + info++; + } +} + +type_init(sw64_cpu_register_types) diff --git a/target/sw64/cpu.h b/target/sw64/cpu.h new file mode 100644 index 0000000000000000000000000000000000000000..4e14891e849ee7cf59410f72a10e14d2db8e5bc8 --- /dev/null +++ b/target/sw64/cpu.h @@ -0,0 +1,417 @@ +/* + * SW64 emulation cpu definitions for qemu. + * + * Copyright (c) 2018 Lin Hainan + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + */ +#ifndef SW64_CPU_H +#define SW64_CPU_H + +#include "cpu-qom.h" +#include "fpu/softfloat.h" +#include "profile.h" + +/* QEMU addressing/paging config */ +#define TARGET_PAGE_BITS 13 +#define TARGET_LONG_BITS 64 +#define TARGET_LEVEL_BITS 10 +//#define ALIGNED_ONLY + +#include "exec/cpu-defs.h" + +/* FIXME: LOCKFIX */ +#define SW64_FIXLOCK 1 + +/* swcore processors have a weak memory model */ +#define TCG_GUEST_DEFAULT_MO (0) + +#define SOFTMMU 1 + +#ifndef CONFIG_USER_ONLY +#define MMU_MODE0_SUFFIX _phys +#define MMU_MODE3_SUFFIX _user +#define MMU_MODE2_SUFFIX _kernel +#endif +#define MMU_PHYS_IDX 0 +#define MMU_KERNEL_IDX 2 +#define MMU_USER_IDX 3 + +/* FIXME:Bits 4 and 5 are the mmu mode. The VMS hmcode uses all 4 modes; + The Unix hmcode only uses bit 4. */ +#define PS_USER_MODE 8u + +#define ENV_FLAG_HM_SHIFT 0 +#define ENV_FLAG_PS_SHIFT 8 +#define ENV_FLAG_FEN_SHIFT 24 + +#define ENV_FLAG_HM_MODE (1u << ENV_FLAG_HM_SHIFT) +#define ENV_FLAG_PS_USER (PS_USER_MODE << ENV_FLAG_PS_SHIFT) +#define ENV_FLAG_FEN (1u << ENV_FLAG_FEN_SHIFT) + +#define MCU_CLOCK 25000000 + +#define init_pc 0xffffffff80011100 + +typedef struct CPUSW64State CPUSW64State; +typedef CPUSW64State CPUArchState; +typedef SW64CPU ArchCPU; + +struct CPUSW64State { + uint64_t ir[32]; + uint64_t fr[128]; + uint64_t pc; + bool is_slave; + + uint64_t csr[0x100]; + uint64_t fpcr; + uint64_t fpcr_exc_enable; + uint8_t fpcr_round_mode; + uint8_t fpcr_flush_to_zero; + + float_status fp_status; + + uint64_t hm_entry; + +#if !defined(CONFIG_USER_ONLY) + uint64_t sr[10]; /* shadow regs 1,2,4-7,20-23 */ +#endif + + uint32_t flags; + uint64_t error_code; + uint64_t unique; + uint64_t lock_addr; + uint64_t lock_valid; + uint64_t lock_flag; + uint64_t lock_success; +#ifdef SW64_FIXLOCK + uint64_t lock_value; +#endif + + uint64_t trap_arg0; + uint64_t trap_arg1; + uint64_t trap_arg2; + + uint64_t features; + uint64_t insn_count[537]; + + /* reserve for slave */ + uint64_t ca[4]; + uint64_t scala_gpr[64]; + uint64_t vec_gpr[224]; + uint64_t fpcr_base; + uint64_t fpcr_ext; + uint64_t pendding_flag; + uint64_t pendding_status; + uint64_t synr_pendding_status; + uint64_t sync_pendding_status; + uint8_t vlenma_idxa; + uint8_t stable; +}; +#define SW64_FEATURE_CORE3 0x2 + +static inline void set_feature(CPUSW64State *env, int feature) +{ + env->features |= feature; +} + +/** + * SW64CPU: + * @env: #CPUSW64State + * + * An SW64 CPU + */ +struct SW64CPU { + /*< private >*/ + CPUState parent_obj; + /*< public >*/ + CPUNegativeOffsetState neg; + CPUSW64State env; + + uint64_t k_regs[158]; + uint64_t k_vcb[48]; + QEMUTimer *alarm_timer; + target_ulong irq; + uint32_t cid; +}; + +enum { + IDX_V0 = 0, + IDX_T0 = 1, + IDX_T1 = 2, + IDX_T2 = 3, + IDX_T3 = 4, + IDX_T4 = 5, + IDX_T5 = 6, + IDX_T6 = 7, + IDX_T7 = 8, + IDX_S0 = 9, + IDX_S1 = 10, + IDX_S2 = 11, + IDX_S3 = 12, + IDX_S4 = 13, + IDX_S5 = 14, + IDX_S6 = 15, + IDX_FP = IDX_S6, + IDX_A0 = 16, + IDX_A1 = 17, + IDX_A2 = 18, + IDX_A3 = 19, + IDX_A4 = 20, + IDX_A5 = 21, + IDX_T8 = 22, + IDX_T9 = 23, + IDX_T10 = 24, + IDX_T11 = 25, + IDX_RA = 26, + IDX_T12 = 27, + IDX_PV = IDX_T12, + IDX_AT = 28, + IDX_GP = 29, + IDX_SP = 30, + IDX_ZERO = 31, +}; + +enum { + MM_K_TNV = 0x0, + MM_K_ACV = 0x1, + MM_K_FOR = 0x2, + MM_K_FOE = 0x3, + MM_K_FOW = 0x4 +}; + +enum { + PTE_VALID = 0x0001, + PTE_FOR = 0x0002, /* used for page protection (fault on read) */ + PTE_FOW = 0x0004, /* used for page protection (fault on write) */ + PTE_FOE = 0x0008, + PTE_KS = 0x0010, + PTE_PSE = 0x0040, + PTE_GH = 0x0060, + PTE_HRE = 0x0100, + PTE_VRE = 0x0200, + PTE_KRE = 0x0400, + PTE_URE = 0x0800, + PTE_HWE = 0x1000, + PTE_VWE = 0x2000, + PTE_KWE = 0x4000, + PTE_UWE = 0x8000 +}; + +static inline int cpu_mmu_index(CPUSW64State *env, bool ifetch) +{ + int ret = env->flags & ENV_FLAG_PS_USER ? MMU_USER_IDX : MMU_KERNEL_IDX; + if (env->flags & ENV_FLAG_HM_MODE) { + ret = MMU_PHYS_IDX; + } + return ret; +} + +static inline SW64CPU *sw64_env_get_cpu(CPUSW64State *env) +{ + return container_of(env, SW64CPU, env); +} + +#define ENV_GET_CPU(e) CPU(sw64_env_get_cpu(e)) +#define ENV_OFFSET offsetof(SW64CPU, env) + +#define cpu_init(cpu_model) cpu_generic_init(TYPE_SW64_CPU, cpu_model) + +#define SW64_CPU_TYPE_SUFFIX "-" TYPE_SW64_CPU +#define SW64_CPU_TYPE_NAME(name) (name SW64_CPU_TYPE_SUFFIX) +int cpu_sw64_signal_handler(int host_signum, void *pinfo, void *puc); +int sw64_cpu_gdb_read_register(CPUState *cs, uint8_t *buf, int reg); +int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *buf, int reg); +bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr); +hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr); +void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val); +uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr); +void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val); +uint64_t cpu_sw64_load_fpcr(CPUSW64State *env); +#ifndef CONFIG_USER_ONLY +void sw64_cpu_do_interrupt(CPUState *cs); +bool sw64_cpu_exec_interrupt(CPUState *cpu, int int_req); +#endif +void cpu_sw64_store_fpcr(CPUSW64State *env, uint64_t val); +void sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, int mmu_idx, + uintptr_t retaddr) QEMU_NORETURN; +bool sw64_cpu_has_work(CPUState *cs); +extern struct VMStateDescription vmstate_sw64_cpu; + +/* SW64-specific interrupt pending bits */ +#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0 +#define CPU_INTERRUPT_II0 CPU_INTERRUPT_TGT_EXT_1 +#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2 +#define CPU_INTERRUPT_PCIE CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_WAKEUP CPU_INTERRUPT_TGT_EXT_3 +#define CPU_INTERRUPT_SLAVE CPU_INTERRUPT_TGT_EXT_4 + +#define cpu_signal_handler cpu_sw64_signal_handler +#define CPU_RESOLVING_TYPE TYPE_SW64_CPU + +#define SWCSR(x, y) x = y +enum { + SWCSR(ITB_TAG, 0x0), + SWCSR(ITB_PTE, 0x1), + SWCSR(ITB_IA, 0x2), + SWCSR(ITB_IV, 0x3), + SWCSR(ITB_IVP, 0x4), + SWCSR(ITB_IU, 0x5), + SWCSR(ITB_IS, 0x6), + SWCSR(EXC_SUM, 0xd), + SWCSR(EXC_PC, 0xe), + SWCSR(DS_STAT, 0x48), + SWCSR(CID, 0xc4), + SWCSR(TID, 0xc7), + + SWCSR(DTB_TAG, 0x40), + SWCSR(DTB_PTE, 0x41), + SWCSR(DTB_IA, 0x42), + SWCSR(DTB_IV, 0x43), + SWCSR(DTB_IVP, 0x44), + SWCSR(DTB_IU, 0x45), + SWCSR(DTB_IS, 0x46), + SWCSR(II_REQ, 0x82), + + SWCSR(PTBR, 0x8), + SWCSR(PRI_BASE, 0x10), + SWCSR(TIMER_CTL, 0x2a), + SWCSR(TIMER_TH, 0x2b), + SWCSR(INT_STAT, 0x30), + SWCSR(INT_CLR, 0x31), + SWCSR(IER, 0x32), + SWCSR(INT_PCI_INT, 0x33), + SWCSR(DVA, 0x4e), + SWCSR(SOFT_CID, 0xc9), + SWCSR(SHTCLOCK, 0xca), +}; + +#include "exec/cpu-all.h" +static inline void cpu_get_tb_cpu_state(CPUSW64State *env, target_ulong *pc, + target_ulong *cs_base, uint32_t *pflags) +{ + *pc = env->pc; + *cs_base = 0; + *pflags = env->flags; +} + +void sw64_translate_init(void); + +enum { + EXCP_NONE, + EXCP_HALT, + EXCP_II0, + EXCP_OPCDEC, + EXCP_CALL_SYS, + EXCP_ARITH, + EXCP_UNALIGN, +#ifdef SOFTMMU + EXCP_MMFAULT, +#else + EXCP_DTBD, + EXCP_DTBS_U, + EXCP_DTBS_K, + EXCP_ITB_U, + EXCP_ITB_K, +#endif + EXCP_CLK_INTERRUPT, + EXCP_DEV_INTERRUPT, + EXCP_SLAVE, +}; + +#define CSR_SHIFT_AND_MASK(name, func, shift, bits) \ + name##_##func##_S = shift, \ + name##_##func##_V = bits, \ + name##_##func##_M = (1UL << bits) - 1 + +#define FPCR_MASK(name) ((uint64_t)FPCR_##name##_M << FPCR_##name##_S) +/* FPCR */ +enum { + CSR_SHIFT_AND_MASK(FPCR, EXC_CTL, 0, 2), + CSR_SHIFT_AND_MASK(FPCR, EXC_CTL_WEN, 2, 1), + CSR_SHIFT_AND_MASK(FPCR, RSV0, 3, 1), + CSR_SHIFT_AND_MASK(FPCR, INV3, 4, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO0, 5, 1), + CSR_SHIFT_AND_MASK(FPCR, OVF3, 6, 1), + CSR_SHIFT_AND_MASK(FPCR, UNF3, 7, 1), + CSR_SHIFT_AND_MASK(FPCR, INE3, 8, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO1, 9, 1), + CSR_SHIFT_AND_MASK(FPCR, RSV1, 10, 10), + CSR_SHIFT_AND_MASK(FPCR, INV2, 20, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO2, 21, 1), + CSR_SHIFT_AND_MASK(FPCR, OVF2, 22, 1), + CSR_SHIFT_AND_MASK(FPCR, UNF2, 23, 1), + CSR_SHIFT_AND_MASK(FPCR, INE2, 24, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO3, 25, 1), + CSR_SHIFT_AND_MASK(FPCR, RSV2, 26, 10), + CSR_SHIFT_AND_MASK(FPCR, INV1, 36, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO4, 37, 1), + CSR_SHIFT_AND_MASK(FPCR, OVF1, 38, 1), + CSR_SHIFT_AND_MASK(FPCR, UNF1, 39, 1), + CSR_SHIFT_AND_MASK(FPCR, INE1, 40, 1), + CSR_SHIFT_AND_MASK(FPCR, ZERO5, 41, 1), + CSR_SHIFT_AND_MASK(FPCR, RSV3, 42, 6), + CSR_SHIFT_AND_MASK(FPCR, DNZ, 48, 1), + CSR_SHIFT_AND_MASK(FPCR, INVD, 49, 1), + CSR_SHIFT_AND_MASK(FPCR, DZED, 50, 1), + CSR_SHIFT_AND_MASK(FPCR, OVFD, 51, 1), + CSR_SHIFT_AND_MASK(FPCR, INV0, 52, 1), + CSR_SHIFT_AND_MASK(FPCR, DZE0, 53, 1), + CSR_SHIFT_AND_MASK(FPCR, OVF0, 54, 1), + CSR_SHIFT_AND_MASK(FPCR, UNF0, 55, 1), + CSR_SHIFT_AND_MASK(FPCR, INE0, 56, 1), + CSR_SHIFT_AND_MASK(FPCR, OVI0, 57, 1), + CSR_SHIFT_AND_MASK(FPCR, DYN, 58, 2), + CSR_SHIFT_AND_MASK(FPCR, UNDZ, 60, 1), + CSR_SHIFT_AND_MASK(FPCR, UNFD, 61, 1), + CSR_SHIFT_AND_MASK(FPCR, INED, 62, 1), + CSR_SHIFT_AND_MASK(FPCR, SUM, 63, 1), +}; + +/* Arithmetic exception (entArith) constants. */ +#define EXC_M_SWC 1 /* Software completion */ +#define EXC_M_INV 2 /* Invalid operation */ +#define EXC_M_DZE 4 /* Division by zero */ +#define EXC_M_OVF 8 /* Overflow */ +#define EXC_M_UNF 16 /* Underflow */ +#define EXC_M_INE 32 /* Inexact result */ +#define EXC_M_IOV 64 /* Integer Overflow */ +#define EXC_M_DNO 128 /* Denomal operation */ + +void QEMU_NORETURN dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp, + int error); +void QEMU_NORETURN arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc, + uint64_t mask); + +#define DEBUG_ARCH +#ifdef DEBUG_ARCH +#define arch_assert(x) \ + do { \ + g_assert(x); /*fprintf(stderr, "+6b %d\n", __LINE__); */ \ + } while (0) +#else +#define arch_assert(x) +#endif + +typedef struct SW64CPUInfo { + const char *name; + void (*initfn)(Object *obj); + void (*class_init)(ObjectClass *oc, void *data); +} SW64CPUInfo; +#define test_feature(env, x) (env->features & (x)) + +/* Slave */ +#endif diff --git a/target/sw64/exception.c b/target/sw64/exception.c new file mode 100644 index 0000000000000000000000000000000000000000..a2df1cd32980efacfd274cd60ab8347e373d163d --- /dev/null +++ b/target/sw64/exception.c @@ -0,0 +1,76 @@ +#include "qemu/osdep.h" +#include "qemu/timer.h" + +#include "cpu.h" +#include "exec/exec-all.h" +#include "fpu/softfloat.h" +#include "exec/helper-proto.h" +#include "hw/core/cpu.h" + +#ifndef CONFIG_USER_ONLY +void QEMU_NORETURN sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + uint32_t insn = 0; + + if (retaddr) { + cpu_restore_state(cs, retaddr, true); + } + + fprintf(stderr, "Error %s addr = %lx\n", __func__, addr); + env->csr[DVA] = addr; + + env->csr[EXC_SUM] = ((insn >> 21) & 31) << 8; /* opcode */ + env->csr[DS_STAT] = (insn >> 26) << 4; /* dest regno */ + cs->exception_index = EXCP_UNALIGN; + env->error_code = 0; + cpu_loop_exit(cs); +} + +#endif + +/* This should only be called from translate, via gen_excp. + We expect that ENV->PC has already been updated. */ +void QEMU_NORETURN helper_excp(CPUSW64State *env, int excp, int error) +{ + SW64CPU *cpu = sw64_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cs->exception_index = excp; + env->error_code = error; + cpu_loop_exit(cs); +} + +/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */ +void QEMU_NORETURN dynamic_excp(CPUSW64State *env, uintptr_t retaddr, int excp, + int error) +{ + SW64CPU *cpu = sw64_env_get_cpu(env); + CPUState *cs = CPU(cpu); + + cs->exception_index = excp; + env->error_code = error; + if (retaddr) { + /* FIXME: Not jump to another tb, but jump to next insn emu */ + cpu_restore_state(cs, retaddr, true); + /* Floating-point exceptions (our only users) point to the next PC. */ + env->pc += 4; + } + cpu_loop_exit(cs); +} + +void QEMU_NORETURN arith_excp(CPUSW64State *env, uintptr_t retaddr, int exc, + uint64_t mask) +{ + env->csr[EXC_SUM] = exc; + dynamic_excp(env, retaddr, EXCP_ARITH, 0); +} + + +void helper_trace_mem(CPUSW64State *env, uint64_t addr, uint64_t val) +{ + /* printf("pc = %lx: Access mem addr =%lx, val = %lx\n", env->pc, addr,val); */ +} diff --git a/target/sw64/float_helper.c b/target/sw64/float_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..c8e0845afc9579bb55ec8f0cd4f9798e81650cfd --- /dev/null +++ b/target/sw64/float_helper.c @@ -0,0 +1,845 @@ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "fpu/softfloat.h" + +static inline uint32_t extractFloat16Frac(float16 a) +{ + return float16_val(a) & 0x3ff; +} + +/*---------------------------------------------------------------------------- +| Returns the exponent bits of the half-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline int extractFloat16Exp(float16 a) +{ + return (float16_val(a) >> 10) & 0x1f; +} + +/*---------------------------------------------------------------------------- +| Returns the sign bit of the single-precision floating-point value `a'. +*----------------------------------------------------------------------------*/ + +static inline uint8_t extractFloat16Sign(float16 a) +{ + return float16_val(a) >> 15; +} + +#define FP_STATUS (env->fp_status) + +#define CONVERT_BIT(X, SRC, DST) \ + (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X)&SRC) * (DST / SRC)) + +static uint64_t soft_to_errcode_exc(CPUSW64State *env) +{ + uint8_t exc = get_float_exception_flags(&FP_STATUS); + + if (unlikely(exc)) { + set_float_exception_flags(0, &FP_STATUS); + } + return exc; +} + +static inline uint64_t float32_to_s_int(uint32_t fi) +{ + uint32_t frac = fi & 0x7fffff; + uint32_t sign = (fi >> 31) & 1; + uint32_t exp_msb = (fi >> 30) & 1; + uint32_t exp_low = (fi >> 23) & 0x7f; + uint32_t exp; + + exp = (exp_msb << 10) | exp_low; + if (exp_msb) { + if (exp_low == 0x7f) { + exp = 0x7ff; + } + } else { + if (exp_low != 0x00) { + exp |= 0x380; + } + } + + return (((uint64_t)sign << 63) | ((uint64_t)exp << 52) | + ((uint64_t)frac << 29)); +} + +static inline uint64_t float32_to_s(float32 fa) +{ + CPU_FloatU a; + a.f = fa; + return float32_to_s_int(a.l); +} +static inline uint32_t s_to_float32_int(uint64_t a) +{ + return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff); +} + +static inline float32 s_to_float32(uint64_t a) +{ + CPU_FloatU r; + r.l = s_to_float32_int(a); + return r.f; +} + +uint32_t helper_s_to_memory(uint64_t a) +{ + return s_to_float32(a); +} + +uint64_t helper_memory_to_s(uint32_t a) +{ + return float32_to_s(a); +} + +uint64_t helper_fcvtls(CPUSW64State *env, uint64_t a) +{ + float32 fr = int64_to_float32(a, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); + return float32_to_s(fr); +} + +uint64_t helper_fcvtld(CPUSW64State *env, uint64_t a) +{ + float64 fr = int64_to_float64(a, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); + return (uint64_t)fr; +} + +static uint64_t do_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode) +{ + uint64_t frac, ret = 0; + uint32_t exp, sign, exc = 0; + int shift; + + sign = (a >> 63); + exp = (uint32_t)(a >> 52) & 0x7ff; + frac = a & 0xfffffffffffffull; + + if (exp == 0) { + if (unlikely(frac != 0) && !env->fp_status.flush_inputs_to_zero) { + goto do_underflow; + } + } else if (exp == 0x7ff) { + exc = float_flag_invalid; + } else { + /* Restore implicit bit. */ + frac |= 0x10000000000000ull; + + shift = exp - 1023 - 52; + if (shift >= 0) { + /* In this case the number is so large that we must shift + the fraction left. There is no rounding to do. */ + if (shift < 64) { + ret = frac << shift; + } + /* Check for overflow. Note the special case of -0x1p63. */ + if (shift >= 11 && a != 0xC3E0000000000000ull) { + exc = float_flag_inexact; + } + } else { + uint64_t round; + + /* In this case the number is smaller than the fraction as + represented by the 52 bit number. Here we must think + about rounding the result. Handle this by shifting the + fractional part of the number into the high bits of ROUND. + This will let us efficiently handle round-to-nearest. */ + shift = -shift; + if (shift < 63) { + ret = frac >> shift; + round = frac << (64 - shift); + } else { + /* The exponent is so small we shift out everything. + Leave a sticky bit for proper rounding below. */ + do_underflow: + round = 1; + } + + if (round) { + exc = float_flag_inexact; + switch (roundmode) { + case float_round_nearest_even: + if (round == (1ull << 63)) { + /* Fraction is exactly 0.5; round to even. */ + ret += (ret & 1); + } else if (round > (1ull << 63)) { + ret += 1; + } + break; + case float_round_to_zero: + break; + case float_round_up: + ret += 1 - sign; + break; + case float_round_down: + ret += sign; + break; + } + } + } + if (sign) { + ret = -ret; + } + } + env->error_code = exc; + + return ret; +} + +/* TODO: */ +uint64_t helper_fris(CPUSW64State *env, uint64_t a, uint64_t roundmode) +{ + uint64_t ir; + float32 fr; + + if (roundmode == 5) + roundmode = env->fpcr_round_mode; + ir = do_fcvtdl(env, a, roundmode); + fr = int64_to_float32(ir, &FP_STATUS); + return float32_to_s(fr); +} + +/* TODO: */ +uint64_t helper_frid(CPUSW64State *env, uint64_t a, uint64_t roundmode) +{ + if (roundmode == 5) + roundmode = env->fpcr_round_mode; + return int64_to_float64(do_fcvtdl(env, a, roundmode), &FP_STATUS); +} + +uint64_t helper_fcvtdl(CPUSW64State *env, uint64_t a, uint64_t roundmode) +{ + return do_fcvtdl(env, a, roundmode); +} + +uint64_t helper_fcvtdl_dyn(CPUSW64State *env, uint64_t a) +{ + uint64_t roundmode = (uint64_t)(env->fpcr_round_mode); + return do_fcvtdl(env, a, roundmode); +} + +uint64_t helper_fcvtsd(CPUSW64State *env, uint64_t a) +{ + float32 fa; + float64 fr; + + fa = s_to_float32(a); + fr = float32_to_float64(fa, &FP_STATUS); + + return fr; +} + +uint64_t helper_fcvtds(CPUSW64State *env, uint64_t a) +{ + float32 fa; + + fa = float64_to_float32((float64)a, &FP_STATUS); + + return float32_to_s(fa); +} + +uint64_t helper_fcvtwl(CPUSW64State *env, uint64_t a) +{ + int32_t ret; + ret = (a >> 29) & 0x3fffffff; + ret |= ((a >> 62) & 0x3) << 30; + return (uint64_t)(int64_t)ret; //int32_t to int64_t as Sign-Extend +} + +uint64_t helper_fcvtlw(CPUSW64State *env, uint64_t a) +{ + uint64_t ret; + ret = (a & 0x3fffffff) << 29; + ret |= ((a >> 30) & 0x3) << 62; + return ret; +} + +uint64_t helper_fadds(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); +#if 1 + fr = float32_add(fa, fb, &FP_STATUS); + + env->error_code = soft_to_errcode_exc(env); +#else + *(float*)&fr = *(float*)&fb + *(float*)&fa; +#endif + return float32_to_s(fr); +} + +/* Input handing without software completion. Trap for all + non-finite numbers. */ +uint64_t helper_faddd(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = (float64)a; + fb = (float64)b; +#if 1 + fr = float64_add(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(double*)&fr = *(double*)&fb + *(double*)&fa; +#endif + return (uint64_t)fr; +} + +uint64_t helper_fsubs(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); +#if 1 + fr = float32_sub(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(float*)&fr = *(float*)&fa - *(float*)&fb; +#endif + return float32_to_s(fr); +} + +uint64_t helper_fsubd(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = (float64)a; + fb = (float64)b; +#if 1 + fr = float64_sub(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(double*)&fr = *(double*)&fa - *(double*)&fb; +#endif + return (uint64_t)fr; +} + +uint64_t helper_fmuls(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); +#if 1 + fr = float32_mul(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(float*)&fr = *(float*)&fa * *(float*)&fb; +#endif + return float32_to_s(fr); +} + +uint64_t helper_fmuld(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = (float64)a; + fb = (float64)b; +#if 1 + fr = float64_mul(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(double*)&fr = *(double*)&fa * *(double*)&fb; +#endif + return (uint64_t)fr; +} + +uint64_t helper_fdivs(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = s_to_float32(b); +#if 1 + fr = float32_div(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(float*)&fr = *(float*)&fa / *(float*)&fb; +#endif + return float32_to_s(fr); +} + +uint64_t helper_fdivd(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb, fr; + + fa = (float64)a; + fb = (float64)b; +#if 1 + fr = float64_div(fa, fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(double*)&fr = *(double*)&fa / *(double*)&fb; +#endif + + return (uint64_t)fr; +} + +uint64_t helper_frecs(CPUSW64State *env, uint64_t a) +{ + float32 fa, fb, fr; + + fa = s_to_float32(a); + fb = int64_to_float32(1, &FP_STATUS); +#if 1 + fr = float32_div(fb, fa, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(float*)&fr = *(float*)&fb / *(float*)&fa; +#endif + return float32_to_s(fr); +} + +uint64_t helper_frecd(CPUSW64State *env, uint64_t a) +{ + float64 fa, fb, fr; + + fa = (float64)a; + fb = int64_to_float64(1, &FP_STATUS); +#if 1 + fr = float64_div(fb, fa, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else + *(double*)&fr = *(double*)&fb / *(double*)&fa; +#endif + + return (uint64_t)fr; +} + +uint64_t helper_fsqrts(CPUSW64State *env, uint64_t b) +{ + float32 fb, fr; +#if 1 + fb = s_to_float32(b); + fr = float32_sqrt(fb, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else +#include + *(double*)&fr = sqrt(*(double*)&b); +#endif + + return float32_to_s(fr); +} + +uint64_t helper_fsqrt(CPUSW64State *env, uint64_t b) +{ + float64 fr; + +#if 1 + fr = float64_sqrt(b, &FP_STATUS); + env->error_code = soft_to_errcode_exc(env); +#else +#include + *(double*)&fr = sqrt(*(double*)&b); +#endif + + return (uint64_t)fr; +} + + +uint64_t helper_fmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float32 fa, fb, fc, fr; + fa = s_to_float32(a); + fb = s_to_float32(b); + fc = s_to_float32(c); + + fr = float32_muladd(fa, fb, fc, 0, &FP_STATUS); + + return float32_to_s(fr); +} + +uint64_t helper_fmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float64 fr; + + fr = float64_muladd(a, b, c, 0, &FP_STATUS); + + return fr; +} + + +uint64_t helper_fmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float32 fa, fb, fc, fr; + fa = s_to_float32(a); + fb = s_to_float32(b); + fc = s_to_float32(c); + + fr = float32_muladd(fa, fb, fc, float_muladd_negate_c, &FP_STATUS); + + return float32_to_s(fr); +} + +uint64_t helper_fmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float64 fr; + + fr = float64_muladd(a, b, c, float_muladd_negate_c, &FP_STATUS); + + return fr; +} + + +uint64_t helper_fnmas(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float32 fa, fb, fc, fr; + fa = s_to_float32(a); + fb = s_to_float32(b); + fc = s_to_float32(c); + int flag = float_muladd_negate_product; + + fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS); + + return float32_to_s(fr); +} + +uint64_t helper_fnmad(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float64 fr; + int flag = float_muladd_negate_product; + + fr = float64_muladd(a, b, c, flag, &FP_STATUS); + + return fr; +} + +uint64_t helper_fnmss(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float32 fa, fb, fc, fr; + fa = s_to_float32(a); + fb = s_to_float32(b); + fc = s_to_float32(c); + int flag = float_muladd_negate_product | float_muladd_negate_c; + + fr = float32_muladd(fa, fb, fc, flag, &FP_STATUS); + + return float32_to_s(fr); +} + +uint64_t helper_fnmsd(CPUSW64State *env, uint64_t a, uint64_t b, uint64_t c) +{ + float64 fr; + int flag = float_muladd_negate_product | float_muladd_negate_c; + + fr = float64_muladd(a, b, c, flag, &FP_STATUS); + + return fr; +} +uint64_t helper_load_fpcr(CPUSW64State *env) +{ + return cpu_sw64_load_fpcr(env); +} + +static void update_fpcr_status_mask(CPUSW64State *env) +{ + uint64_t t = 0; + + /* Don't mask the inv excp: + * EXC_CTL1 = 1 + * EXC_CTL1 = 0, input denormal, DNZ=0 + * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0 + */ + if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) { + if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) { + t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV); + } else { + t |= EXC_M_INE; + } + } else { + /* INV and DNO mask */ + if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO; + if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV; + if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF; + if (env->fpcr & FPCR_MASK(UNFD)) { + t |= EXC_M_UNF; + } + if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE; + if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE; + } + + env->fpcr_exc_enable = t; +} + +void helper_store_fpcr(CPUSW64State *env, uint64_t val) +{ + uint64_t fpcr = val; + uint8_t ret; + + switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) { + case 0x0: + ret = float_round_to_zero; + break; + case 0x1: + ret = float_round_down; + break; + case 0x2: + ret = float_round_nearest_even; + break; + case 0x3: + ret = float_round_up; + break; + default: + ret = float_round_nearest_even; + break; + } + + env->fpcr_round_mode = ret; + + env->fp_status.float_rounding_mode = ret; + + env->fpcr_flush_to_zero = + (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ)); + env->fp_status.flush_to_zero = env->fpcr_flush_to_zero; + + /* FIXME: Now the DNZ flag does not work int C3A. */ + //set_flush_inputs_to_zero((val & FPCR_MASK(DNZ)) != 0? 1 : 0, &FP_STATUS); + + val &= ~0x3UL; + val |= env->fpcr & 0x3UL; + env->fpcr = val; + update_fpcr_status_mask(env); +} + +void helper_setfpcrx(CPUSW64State *env, uint64_t val) +{ + if (env->fpcr & FPCR_MASK(EXC_CTL_WEN)) { + env->fpcr &= ~3UL; + env->fpcr |= val & 0x3; + update_fpcr_status_mask(env); + } +} +#ifndef CONFIG_USER_ONLY +static uint32_t soft_to_exc_type(uint64_t exc) +{ + uint32_t ret = 0; + + if (unlikely(exc)) { + ret |= CONVERT_BIT(exc, float_flag_invalid, EXC_M_INV); + ret |= CONVERT_BIT(exc, float_flag_divbyzero, EXC_M_DZE); + ret |= CONVERT_BIT(exc, float_flag_overflow, EXC_M_OVF); + ret |= CONVERT_BIT(exc, float_flag_underflow, EXC_M_UNF); + ret |= CONVERT_BIT(exc, float_flag_inexact, EXC_M_INE); + } + + return ret; +} +static void fp_exc_raise1(CPUSW64State *env, uintptr_t retaddr, uint64_t exc, + uint32_t regno) +{ + if (!likely(exc)) + return; + arith_excp(env, retaddr, exc, 1ull << regno); +} + +void helper_fp_exc_raise(CPUSW64State *env, uint32_t regno) +{ + uint64_t exc = env->error_code; + uint32_t exc_type = soft_to_exc_type(exc); + + if (exc_type) { + exc_type &= ~(env->fpcr_exc_enable); + if (exc_type) fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, regno); + } +} +#endif + +void helper_ieee_input(CPUSW64State *env, uint64_t val) +{ +#ifndef CONFIG_USER_ONLY + uint32_t exp = (uint32_t)(val >> 52) & 0x7ff; + + if (exp == 0x7ff) { + /* Infinity or NaN. */ + uint32_t exc_type = EXC_M_INV; + + if (exc_type) { + exc_type &= ~(env->fpcr_exc_enable); + if (exc_type) + fp_exc_raise1(env, GETPC(), exc_type | EXC_M_SWC, 32); + } + } +#endif +} + +void helper_ieee_input_s(CPUSW64State *env, uint64_t val) +{ + if (unlikely(2 * val - 1 < 0x1fffffffffffffull) && + !env->fp_status.flush_inputs_to_zero) { + } +} + +static inline float64 t_to_float64(uint64_t a) +{ + /* Memory format is the same as float64 */ + CPU_DoubleU r; + r.ll = a; + return r.d; +} + +uint64_t helper_fcmpun(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_unordered_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmpeq(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_eq_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmple(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_le_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmplt(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_lt_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmpge(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_le_quiet(fb, fa, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmpgt(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + fa = t_to_float64(a); + fb = t_to_float64(b); + + if (float64_lt_quiet(fb, fa, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmpge_s(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + /* Make sure va and vb is s float. */ + fa = float32_to_float64(s_to_float32(a), &FP_STATUS); + fb = float32_to_float64(s_to_float32(b), &FP_STATUS); + + if (float64_le_quiet(fb, fa, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +uint64_t helper_fcmple_s(CPUSW64State *env, uint64_t a, uint64_t b) +{ + float64 fa, fb; + uint64_t ret = 0; + + /* Make sure va and vb is s float. */ + fa = float32_to_float64(s_to_float32(a), &FP_STATUS); + fb = float32_to_float64(s_to_float32(b), &FP_STATUS); + + if (float64_le_quiet(fa, fb, &FP_STATUS)) { + ret = 0x4000000000000000ULL; + } + env->error_code = soft_to_errcode_exc(env); + + return ret; +} + +void helper_vfcvtsh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + uint64_t temp = 0; + int i; + for (i = 0; i < 4; i++) { + temp |= (uint64_t)float32_to_float16(s_to_float32(env->fr[ra + i * 32]), + 1, &FP_STATUS) + << (i * 16); + } + for (i = 0; i < 4; i++) { + if (i == (vc & 0x3)) { + env->fr[rd + i * 32] = temp; + } else { + env->fr[rd + i * 32] = env->fr[rb + i * 32]; + } + } +} + +void helper_vfcvths(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + uint64_t temp; + int i; + + temp = env->fr[ra + 32 * (vc & 0x3)]; + for (i = 0; i < 4; i++) { + env->fr[rd + i * 32] = float32_to_s( + float16_to_float32((temp >> (i * 16)) & 0xffffUL, 1, &FP_STATUS)); + } +} diff --git a/target/sw64/gdbstub.c b/target/sw64/gdbstub.c new file mode 100644 index 0000000000000000000000000000000000000000..da4d39d215d0a89d40f2b4a377381fc007fd7cb9 --- /dev/null +++ b/target/sw64/gdbstub.c @@ -0,0 +1,56 @@ +/* + * SW64 gdb server stub + * + * Copyright (c) 2023 Lu Feifei + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "exec/gdbstub.h" + +int sw64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + + if (n < 31) { + return gdb_get_regl(mem_buf, env->ir[n]); + } else if (n == 31) { + return gdb_get_regl(mem_buf, 0); + } else if (n == 64) { + return gdb_get_regl(mem_buf, env->pc); + } + return 0; +} + +int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + + if (n < 31) { + env->ir[n] = ldtul_p(mem_buf); + return sizeof(target_ulong); + } else if (n == 31) { + /* discard writes to r31 */ + return sizeof(target_ulong); + } else if (n == 64) { + env->pc = ldtul_p(mem_buf); + return sizeof(target_ulong); + } + + return 0; +} diff --git a/target/sw64/helper.c b/target/sw64/helper.c new file mode 100644 index 0000000000000000000000000000000000000000..e317c08f0a5492a218ae8fa9b19917298003bb79 --- /dev/null +++ b/target/sw64/helper.c @@ -0,0 +1,461 @@ +/* + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see . + */ + +#include "qemu/osdep.h" +#include "qemu/timer.h" + +#include "cpu.h" +#include "exec/exec-all.h" +#include "fpu/softfloat.h" +#include "exec/helper-proto.h" +#include "hw/core/cpu.h" +#include "exec/memattrs.h" + +#ifndef CONFIG_USER_ONLY +static target_ulong ldq_phys_clear(CPUState *cs, target_ulong phys) +{ + return ldq_phys(cs->as, phys & ~(3UL)); +} + +static int get_sw64_physical_address(CPUSW64State *env, target_ulong addr, + int prot_need, int mmu_idx, target_ulong *pphys, + int *pprot) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + target_ulong phys = 0; + int prot = 0; + int ret = MM_K_ACV; + target_ulong L1pte, L2pte, L3pte, L4pte; + target_ulong pt = 0, index = 0, pte_pfn_s = 0; + + if (((addr >> 28) & 0xffffffff8) == 0xffffffff8) { + phys = (~(0xffffffff80000000)) & addr; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = -1; + goto exit; + } else if (((addr >> 32) & 0xfffff000) == 0xfffff000) { + goto do_pgmiss; + } else if (((addr >> 52) & 0xfff) == 0xfff) { + phys = (~(0xfff0000000000000)) & addr; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + ret = -1; + goto exit; + } +do_pgmiss: + pte_pfn_s = 28; + pt = env->csr[PTBR]; + index = (addr >> (TARGET_PAGE_BITS + 3 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); + L1pte = ldq_phys_clear(cs, pt + index * 8); + if ((L1pte & PTE_VALID) == 0) { + ret = MM_K_TNV; + goto exit; + } + if (((L1pte >> 1) & 1) && prot_need == 0) { + ret = MM_K_FOR; + goto exit; + } + if (((L1pte >> 2) & 1) && prot_need == 1) { + ret = MM_K_FOW; + goto exit; + } + pt = L1pte >> pte_pfn_s << TARGET_PAGE_BITS; + + index = (addr >> (TARGET_PAGE_BITS + 2 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); + L2pte = ldq_phys_clear(cs, pt + index * 8); + + if ((L2pte & PTE_VALID) == 0) { + ret = MM_K_TNV; + goto exit; + } + if (((L2pte >> 1) & 1) && prot_need == 0) { + ret = MM_K_FOR; + goto exit; + } + if (((L2pte >> 2) & 1) && prot_need == 1) { + ret = MM_K_FOW; + goto exit; + } + + pt = L2pte >> pte_pfn_s << TARGET_PAGE_BITS; + + index = (addr >> (TARGET_PAGE_BITS + 1 * TARGET_LEVEL_BITS)) & ((1 << TARGET_LEVEL_BITS)-1); + L3pte = ldq_phys_clear(cs, pt + index * 8); + + if ((L3pte & PTE_VALID) == 0) { + ret = MM_K_TNV; + goto exit; + } + if (((L3pte >> 1) & 1) && prot_need == 0) { + ret = MM_K_FOR; + goto exit; + } + if (((L3pte >> 2) & 1) && prot_need == 1) { + ret = MM_K_FOW; + goto exit; + } + + pt = L3pte >> pte_pfn_s << TARGET_PAGE_BITS; + + index = (addr >> TARGET_PAGE_BITS) & ((1 << TARGET_LEVEL_BITS)-1); + L4pte = ldq_phys_clear(cs, pt + index * 8); + if ((L4pte & PTE_VALID) == 0) { + ret = MM_K_TNV; + goto exit; + } +#if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4 +#error page bits out of date +#endif + + /* Check access violations. */ + if ((L4pte & PTE_FOR) == 0) { + prot |= PAGE_READ | PAGE_EXEC; + } + if ((L4pte & PTE_FOW) == 0) { + prot |= PAGE_WRITE; + } + + /* Check fault-on-operation violations. */ + prot &= ~(L4pte >> 1); + + phys = (L4pte >> pte_pfn_s << TARGET_PAGE_BITS); + + if (unlikely((prot & prot_need) == 0)) { + ret = (prot_need & PAGE_EXEC + ? MM_K_FOE + : prot_need & PAGE_WRITE + ? MM_K_FOW + : prot_need & PAGE_READ ? MM_K_FOR : -1); + goto exit; + } + + ret = -1; +exit: + *pphys = phys; + *pprot = prot; + return ret; +} + +bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + target_ulong phys; + int prot, fail; + + if (mmu_idx == MMU_PHYS_IDX) { + phys = address; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + fail = 0; + if ((address >> 52) & 1) goto do_pgmiss; + goto done; + } + +do_pgmiss: + fail = get_sw64_physical_address(env, address, 1 << access_type, mmu_idx, &phys, &prot); + if (unlikely(fail >= 0)) { + if (probe) { + return false; + } + cs->exception_index = EXCP_MMFAULT; + if (access_type == 2) { + env->csr[DS_STAT] = fail; + env->csr[DVA] = address & ~(3UL); + } else { + env->csr[DS_STAT] = fail | (((unsigned long)access_type + 1) << 3); + env->csr[DVA] = address; + } + env->error_code = access_type; + cpu_loop_exit_restore(cs, retaddr); + } +done: + tlb_set_page(cs, address & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, prot, + mmu_idx, TARGET_PAGE_SIZE); + return true; +} + +hwaddr sw64_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + target_ulong phys; + int prot, fail; + int mmu_index = cpu_mmu_index(env, 0); + if (mmu_index == MMU_PHYS_IDX) { + phys = addr; + prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; + fail = -1; + if ((addr >> 52) & 1) goto do_pgmiss; + goto done; + } +do_pgmiss: + fail = get_sw64_physical_address(&cpu->env, addr, 1, mmu_index, &phys, &prot); +done: + return (fail >= 0 ? -1 : phys); +} + +#define a0(func) (((func & 0xFF) >> 6) & 0x1) +#define a1(func) ((((func & 0xFF) >> 6) & 0x2) >> 1) + +#define t(func) ((a0(func) ^ a1(func)) & 0x1) +#define b0(func) (t(func) | a0(func)) +#define b1(func) ((~t(func) & 1) | a1(func)) + +#define START_SYS_CALL_ADDR(func) \ + (b1(func) << 14) | (b0(func) << 13) | ((func & 0x3F) << 7) + +void sw64_cpu_do_interrupt(CPUState *cs) +{ + int i = cs->exception_index; + + cs->exception_index = -1; + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + switch (i) { + case EXCP_OPCDEC: + cpu_abort(cs, "ILLEGAL INSN"); + break; + case EXCP_CALL_SYS: + i = START_SYS_CALL_ADDR(env->error_code); + if (i <= 0x3F) { + i += 0x4000; + } else if (i >= 0x40 && i <= 0x7F) { + i += 0x2000; + } else if (i >= 0x80 && i <= 0x8F) { + i += 0x6000; + } + break; + case EXCP_ARITH: + env->error_code = -1; + env->csr[EXC_PC] = env->pc - 4; + env->csr[EXC_SUM] = 1; + i = 0xB80; + break; + case EXCP_UNALIGN: + i = 0xB00; + env->csr[EXC_PC] = env->pc - 4; + break; + case EXCP_CLK_INTERRUPT: + case EXCP_DEV_INTERRUPT: + i = 0xE80; + break; + case EXCP_MMFAULT: + i = 0x980; + env->csr[EXC_PC] = env->pc; + break; + case EXCP_II0: + env->csr[EXC_PC] = env->pc; + i = 0xE00; + break; + default: + break; + } + env->pc = env->hm_entry + i; + env->flags = ENV_FLAG_HM_MODE; +} + +bool sw64_cpu_exec_interrupt(CPUState *cs, int interrupt_request) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + int idx = -1; + /* We never take interrupts while in PALmode. */ + if (env->flags & ENV_FLAG_HM_MODE) + return false; + + if (interrupt_request & CPU_INTERRUPT_II0) { + idx = EXCP_II0; + env->csr[INT_STAT] |= 1UL << 6; + if ((env->csr[IER] & env->csr[INT_STAT]) == 0) + return false; + cs->interrupt_request &= ~CPU_INTERRUPT_II0; + goto done; + } + + if (interrupt_request & CPU_INTERRUPT_TIMER) { + idx = EXCP_CLK_INTERRUPT; + env->csr[INT_STAT] |= 1UL << 4; + if ((env->csr[IER] & env->csr[INT_STAT]) == 0) + return false; + cs->interrupt_request &= ~CPU_INTERRUPT_TIMER; + goto done; + } + + if (interrupt_request & CPU_INTERRUPT_HARD) { + idx = EXCP_DEV_INTERRUPT; + env->csr[INT_STAT] |= 1UL << 12; + if ((env->csr[IER] & env->csr[INT_STAT]) == 0) + return false; + cs->interrupt_request &= ~CPU_INTERRUPT_HARD; + goto done; + } + + if (interrupt_request & CPU_INTERRUPT_PCIE) { + idx = EXCP_DEV_INTERRUPT; + env->csr[INT_STAT] |= 1UL << 1; + env->csr[INT_PCI_INT] = 0x10; + if ((env->csr[IER] & env->csr[INT_STAT]) == 0) + return false; + cs->interrupt_request &= ~CPU_INTERRUPT_PCIE; + goto done; + } + +done: + if (idx >= 0) { + cs->exception_index = idx; + env->error_code = 0; + env->csr[EXC_PC] = env->pc; + sw64_cpu_do_interrupt(cs); + return true; + } + + return false; +} +#endif + +static void update_fpcr_status_mask(CPUSW64State* env) { + uint64_t t = 0; + + /* Don't mask the inv excp: + * EXC_CTL1 = 1 + * EXC_CTL1 = 0, input denormal, DNZ=0 + * EXC_CTL1 = 0, no input denormal or DNZ=1, INVD = 0 + */ + if ((env->fpcr & FPCR_MASK(EXC_CTL) & 0x2)) { + if (env->fpcr & FPCR_MASK(EXC_CTL) & 0x1) { + t |= (EXC_M_INE | EXC_M_UNF | EXC_M_IOV); + } else { + t |= EXC_M_INE; + } + } else { + /* INV and DNO mask */ + if (env->fpcr & FPCR_MASK(DNZ)) t |= EXC_M_DNO; + if (env->fpcr & FPCR_MASK(INVD)) t |= EXC_M_INV; + if (env->fpcr & FPCR_MASK(OVFD)) t |= EXC_M_OVF; + if (env->fpcr & FPCR_MASK(UNFD)) { + t |= EXC_M_UNF; + } + if (env->fpcr & FPCR_MASK(DZED)) t |= EXC_M_DZE; + if (env->fpcr & FPCR_MASK(INED)) t |= EXC_M_INE; + } + + env->fpcr_exc_enable = t; +} + +void cpu_sw64_store_fpcr(CPUSW64State* env, uint64_t val) { + uint64_t fpcr = val; + uint8_t ret; + + switch ((fpcr & FPCR_MASK(DYN)) >> FPCR_DYN_S) { + case 0x0: + ret = float_round_to_zero; + break; + case 0x1: + ret = float_round_down; + break; + case 0x2: + ret = float_round_nearest_even; + break; + case 0x3: + ret = float_round_up; + break; + default: + ret = float_round_nearest_even; + break; + } + + env->fpcr_round_mode = ret; + env->fp_status.float_rounding_mode = ret; + + env->fpcr_flush_to_zero = + (fpcr & FPCR_MASK(UNFD)) && (fpcr & FPCR_MASK(UNDZ)); + env->fp_status.flush_to_zero = env->fpcr_flush_to_zero; + + val &= ~0x3UL; + val |= env->fpcr & 0x3UL; + env->fpcr = val; + update_fpcr_status_mask(env); +} + +uint64_t helper_read_csr(CPUSW64State *env, uint64_t index) +{ + if (index == PRI_BASE) + env->csr[index] = 0x10000; + if (index == SHTCLOCK) + env->csr[index] = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 40; + return env->csr[index]; +} + +uint64_t helper_rtc(void) +{ +#ifndef CONFIG_USER_ONLY + return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) * CPUFREQ_SCALE; +#else + return 0; +#endif +} + +void helper_write_csr(CPUSW64State *env, uint64_t index, uint64_t va) +{ + env->csr[index] = va; +#ifndef CONFIG_USER_ONLY + CPUState *cs = &(sw64_env_get_cpu(env)->parent_obj); + SW64CPU *cpu = SW64_CPU(cs); + if ((index == DTB_IA) || (index == DTB_IV) || (index == DTB_IVP) || + (index == DTB_IU) || (index == DTB_IS) || (index == ITB_IA) || + (index == ITB_IV) || (index == ITB_IVP) || (index == ITB_IU) || + (index == ITB_IS) || (index == PTBR)) { + tlb_flush(cs); + } + if (index == INT_CLR) { + env->csr[INT_STAT] &= ~va; + } + if ((index == TIMER_CTL) && (va == 1)) { + timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + env->csr[TIMER_TH]); + } + + if (index == TIMER_CTL && env->csr[index] == 1) { + timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1000000000 / 250); + } +#endif +} + +uint64_t cpu_sw64_load_fpcr(CPUSW64State *env) +{ + return (uint64_t)env->fpcr; +} + +void helper_tb_flush(CPUSW64State *env) +{ + tb_flush(CPU(sw64_env_get_cpu(env))); +} + +void helper_cpustate_update(CPUSW64State *env, uint64_t pc) +{ + switch (pc & 0x3) { + case 0x00: + env->flags = ENV_FLAG_HM_MODE; + break; + case 0x01: + env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE); + break; + case 0x02: + env->flags &= ~(ENV_FLAG_PS_USER | ENV_FLAG_HM_MODE); + break; + case 0x03: + env->flags = ENV_FLAG_PS_USER; + } +} diff --git a/target/sw64/helper.h b/target/sw64/helper.h new file mode 100644 index 0000000000000000000000000000000000000000..7cafa563c2ee678422a79440789a879c6b301b9e --- /dev/null +++ b/target/sw64/helper.h @@ -0,0 +1,127 @@ + +DEF_HELPER_FLAGS_2(zap, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(zapnot, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_2(cmpgeb, TCG_CALL_NO_RWG_SE, i64, i64, i64) +DEF_HELPER_FLAGS_1(s_to_memory, TCG_CALL_NO_RWG_SE, i32, i64) +DEF_HELPER_FLAGS_1(memory_to_s, TCG_CALL_NO_RWG_SE, i64, i32) +DEF_HELPER_FLAGS_2(fcvtls, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvtld, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(fcvtdl, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvtdl_dyn, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(fris, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(frid, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(fcvtsd, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvtds, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvtwl, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fcvtlw, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_5(vfcvtsh, 0, void, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_5(vfcvths, 0, void, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_3(fadds, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(faddd, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsubs, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fsubd, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmuls, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fmuld, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdivs, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fdivd, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(frecs, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(frecd, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fsqrts, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_2(fsqrt, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_4(fmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmas, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmad, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmss, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(fnmsd, TCG_CALL_NO_RWG, i64, env, i64, i64, i64) +DEF_HELPER_FLAGS_0(rtc, TCG_CALL_NO_RWG, i64) +DEF_HELPER_FLAGS_1(load_fpcr, 0, i64, env) +DEF_HELPER_FLAGS_2(store_fpcr, 0, void, env, i64) +DEF_HELPER_FLAGS_2(setfpcrx, 0, void, env, i64) +DEF_HELPER_FLAGS_2(ieee_input, 0, void, env, i64) +DEF_HELPER_FLAGS_2(ieee_input_s, 0, void, env, i64) +DEF_HELPER_FLAGS_2(read_csr, TCG_CALL_NO_RWG, i64, env, i64) +DEF_HELPER_FLAGS_3(write_csr, 0, void, env, i64, i64) +DEF_HELPER_FLAGS_2(cpustate_update, 0, void, env, i64) +DEF_HELPER_FLAGS_3(trace_mem, 0, void, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmpun, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmpeq, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmple, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmplt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmpge, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmpgt, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmpge_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(fcmple_s, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_4(srlow, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(sllow, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vlogzz, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vconw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vcond, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vshfw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_2(ctlzow, 0, i64, env, i64) +DEF_HELPER_FLAGS_4(vucaddw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucaddwi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubwi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucaddh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucaddhi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubhi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucaddb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucaddbi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vucsubbi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_3(vstw, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(vsts, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_3(vstd, TCG_CALL_NO_RWG, i64, env, i64, i64) +DEF_HELPER_FLAGS_2(v_print, 0, void, env, i64) +DEF_HELPER_FLAGS_1(tb_flush, 0, void, env) +DEF_HELPER_FLAGS_4(vmaxb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vminb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vmaxh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vminh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vmaxw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vminw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(sraow, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vsm4r, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vsm4key, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vsm3msw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vcmpueqb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vcmpugtb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vcmpueqbi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vcmpugtbi, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vumaxb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vuminb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vumaxh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vuminh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vumaxw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vuminw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_5(vinsb, 0, void, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_5(vinsh, 0, void, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_4(vinsectlh, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vinsectlw, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_4(vinsectlb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_5(vshfq, 0, void, env, i64, i64, i64, i64) +DEF_HELPER_FLAGS_4(vshfqb, 0, void, env, i64, i64, i64) +DEF_HELPER_FLAGS_5(vsm3r, 0, void, env, i64, i64, i64, i64) + +#ifndef CONFIG_USER_ONLY +DEF_HELPER_FLAGS_2(fp_exc_raise, 0, void, env, i32) +DEF_HELPER_FLAGS_2(pri_ldw, 0, i64, env, i64) +DEF_HELPER_FLAGS_3(pri_stw, 0, void, env, i64, i64) +DEF_HELPER_FLAGS_2(pri_ldl, 0, i64, env, i64) +DEF_HELPER_FLAGS_3(pri_stl, 0, void, env, i64, i64) +#endif + +DEF_HELPER_3(excp, noreturn, env, int, int) +//DEF_HELPER_FLAGS_3(faddh, TCG_CALL_NO_RWG, i64, env, i64, i64) +//DEF_HELPER_FLAGS_3(fsubh, TCG_CALL_NO_RWG, i64, env, i64, i64) +//DEF_HELPER_FLAGS_3(fmulh, TCG_CALL_NO_RWG, i64, env, i64, i64) +#ifndef CONFIG_USER_ONLY +/* Scale factor for core3 cpu freq, ie number of ns per tick. */ +#define CPUFREQ_SCALE 3 +#endif + +/* SLAVE FLOAT HELPER. */ diff --git a/target/sw64/int_helper.c b/target/sw64/int_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..131182585a4b13030dc0156aca32cb0d86c95215 --- /dev/null +++ b/target/sw64/int_helper.c @@ -0,0 +1,118 @@ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" +#include "qemu/host-utils.h" +#include "exec/memattrs.h" + +uint64_t helper_zapnot(uint64_t val, uint64_t mskb) +{ + uint64_t mask; + + mask = -(mskb & 0x01) & 0x00000000000000ffull; + mask |= -(mskb & 0x02) & 0x000000000000ff00ull; + mask |= -(mskb & 0x04) & 0x0000000000ff0000ull; + mask |= -(mskb & 0x08) & 0x00000000ff000000ull; + mask |= -(mskb & 0x10) & 0x000000ff00000000ull; + mask |= -(mskb & 0x20) & 0x0000ff0000000000ull; + mask |= -(mskb & 0x40) & 0x00ff000000000000ull; + mask |= -(mskb & 0x80) & 0xff00000000000000ull; + + return val & mask; +} + +uint64_t helper_zap(uint64_t val, uint64_t mask) +{ + return helper_zapnot(val, ~mask); +} + +uint64_t helper_cmpgeb(uint64_t va, uint64_t vb) +{ + int i; + uint64_t ret = 0; + uint64_t tmp; + for (i = 0; i < 64; i += 8) { + tmp = ((va >> i) & 0xff) + (~(vb >> i) & 0xff) + 1; + ret |= (tmp >> 8) << (i / 8); + } + return ret; +} + +#ifndef CONFIG_USER_ONLY +static inline MemTxAttrs cpu_get_mem_attrs(CPUSW64State *env) +{ + return ((MemTxAttrs) { .secure = 1 }); +} + +static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) +{ + return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); +} + +uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr) +{ + SW64CPU *cpu = SW64_CPU(cs); + int32_t ret; + CPUSW64State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + + ret = (int32_t)address_space_ldl(as, addr, attrs, NULL); + + return (uint64_t)(int64_t)ret; +} + +void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + + address_space_stl(as, addr, (uint32_t)val, attrs, NULL); +} + +uint64_t sw64_ldl_phys(CPUState *cs, hwaddr addr) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + + return address_space_ldq(as, addr, attrs, NULL); +} + +void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + MemTxAttrs attrs = cpu_get_mem_attrs(env); + AddressSpace *as = cpu_addressspace(cs, attrs); + + address_space_stq(as, addr, val, attrs, NULL); +} + +uint64_t helper_pri_ldw(CPUSW64State *env, uint64_t hwaddr) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + return sw64_ldw_phys(cs, hwaddr); +} + +void helper_pri_stw(CPUSW64State *env, uint64_t val, uint64_t hwaddr) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + sw64_stw_phys(cs, hwaddr, val); +} + +uint64_t helper_pri_ldl(CPUSW64State *env, uint64_t hwaddr) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + return sw64_ldl_phys(cs, hwaddr); +} + +void helper_pri_stl(CPUSW64State *env, uint64_t val, uint64_t hwaddr) +{ + CPUState *cs = CPU(sw64_env_get_cpu(env)); + sw64_stl_phys(cs, hwaddr, val); +} +#endif diff --git a/target/sw64/kvm.c b/target/sw64/kvm.c new file mode 100644 index 0000000000000000000000000000000000000000..c38db7cabe0e2957b75c3643385a7f1df246b7b0 --- /dev/null +++ b/target/sw64/kvm.c @@ -0,0 +1,353 @@ +/* + * SW64 implementation of KVM hooks + * + * Copyright (c) 2018 Lin Hainan + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#include "qemu/osdep.h" +#include + +#include + +#include "qemu-common.h" +#include "qemu/timer.h" +#include "qemu/error-report.h" +#include "sysemu/sysemu.h" +#include "sysemu/kvm.h" +#include "kvm_sw64.h" +#include "cpu.h" +#include "exec/memattrs.h" +#include "exec/address-spaces.h" +#include "hw/boards.h" +#include "qemu/log.h" + +const KVMCapabilityInfo kvm_arch_required_capabilities[] = { + KVM_CAP_LAST_INFO +}; +/* 50000 jump to bootlader while 2f00000 jump to bios*/ +int kvm_sw64_vcpu_init(CPUState *cs) +{ + struct kvm_regs *regs; + SW64CPU *cpu = SW64_CPU(cs); + regs = (struct kvm_regs *)cpu->k_regs; + regs->pc = init_pc; + return kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); +} + +static void kvm_sw64_host_cpu_class_init(ObjectClass *oc, void *data) +{ +} + +static void kvm_sw64_host_cpu_initfn(Object *obj) +{ +} + + +static const TypeInfo host_sw64_cpu_type_info = { + .name = TYPE_SW64_HOST_CPU, + .parent = TYPE_SW64_CPU, + .instance_init = kvm_sw64_host_cpu_initfn, + .class_init = kvm_sw64_host_cpu_class_init, + .class_size = sizeof(SW64HostCPUClass), +}; + +int kvm_arch_init(MachineState *ms, KVMState *s) +{ + kvm_async_interrupts_allowed = true; + + type_register_static(&host_sw64_cpu_type_info); + + return 0; +} + +/* 50000 jump to bootlader while 2f00000 jump to bios*/ +void kvm_sw64_reset_vcpu(SW64CPU *cpu) +{ + CPUState *cs = CPU(cpu); + struct kvm_regs *regs; + int ret; + struct vcpucb *vcb; + + regs = (struct kvm_regs *)cpu->k_regs; + regs->pc = init_pc; + + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); + + if (ret < 0) { + fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret)); + abort(); + } + + vcb = (struct vcpucb *)cpu->k_vcb; + vcb->vcpu_irq_disabled = 1; + + ret = kvm_vcpu_ioctl(cs, KVM_SW64_VCPU_INIT, NULL); + + if (ret < 0) { + fprintf(stderr, "kvm_sw64_vcpu_init failed: %s\n", strerror(-ret)); + abort(); + } +} + +unsigned long kvm_arch_vcpu_id(CPUState *cpu) +{ + return cpu->cpu_index; +} + +#include +int kvm_arch_init_vcpu(CPUState *cs) +{ + int ret; + ret = kvm_sw64_vcpu_init(cs); + if (ret) { + return ret; + } + return 0; +} + +int kvm_arch_destroy_vcpu(CPUState *cs) +{ + return 0; +} + +int kvm_arch_get_registers(CPUState *cs) +{ + int ret, i; + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + + ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &cpu->k_regs); + if (ret < 0) + return ret; + + ret = kvm_vcpu_ioctl(cs, KVM_SW64_GET_VCB, &cpu->k_vcb); + if (ret < 0) + return ret; + + for (i = 0; i < 16; i++) + env->ir[i] = cpu->k_regs[i]; + + env->ir[16] = cpu->k_regs[155]; + env->ir[17] = cpu->k_regs[156]; + env->ir[18] = cpu->k_regs[157]; + + for (i = 19; i < 29; i++) + env->ir[i] = cpu->k_regs[i-3]; + + env->ir[29] = cpu->k_regs[154]; + + if (cpu->k_regs[152] >> 3) + env->ir[30] = cpu->k_vcb[3]; /* usp */ + else + env->ir[30] = cpu->k_vcb[2]; /* ksp */ + + env->pc = cpu->k_regs[153]; + + return 0; +} + +int kvm_arch_put_registers(CPUState *cs, int level) +{ + int ret; + SW64CPU *cpu = SW64_CPU(cs); + struct vcpucb *vcb; + + if (level == KVM_PUT_RUNTIME_STATE) { + int i; + CPUSW64State *env = &cpu->env; + + for (i = 0; i < 16; i++) + cpu->k_regs[i] = env->ir[i]; + + for (i = 19; i < 29; i++) + cpu->k_regs[i-3] = env->ir[i]; + + cpu->k_regs[155] = env->ir[16]; + cpu->k_regs[156] = env->ir[17]; + cpu->k_regs[157] = env->ir[18]; + + cpu->k_regs[154] = env->ir[29]; + + if (cpu->k_regs[152] >> 3) + cpu->k_vcb[3] = env->ir[30]; /* usp */ + else + cpu->k_vcb[2] = env->ir[30]; /* ksp */ + + cpu->k_regs[153] = env->pc; + } + + ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &cpu->k_regs); + if (ret < 0) + return ret; + vcb = (struct vcpucb *)cpu->k_vcb; + vcb->whami = kvm_arch_vcpu_id(cs); + fprintf(stderr,"vcpu %ld init.\n", vcb->whami); + + if (level == KVM_PUT_RESET_STATE) + vcb->pcbb = 0; + + return kvm_vcpu_ioctl(cs, KVM_SW64_SET_VCB, &cpu->k_vcb); +} + +static const uint32_t brk_insn = 0x00000080; + +int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) { + return -EINVAL; + } + + return 0; +} + +int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) +{ + static uint32_t brk; + + if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) || + brk != brk_insn || + cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) { + return -EINVAL; + } + + return 0; +} + +int kvm_arch_insert_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); + return -EINVAL; +} + +int kvm_arch_remove_hw_breakpoint(target_ulong addr, + target_ulong len, int type) +{ + qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); + return -EINVAL; +} + +void kvm_arch_remove_all_hw_breakpoints(void) +{ + qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__); +} + +int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, + int vector, PCIDevice *dev) +{ + return -1; +} + +int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, + uint64_t address, uint32_t data, PCIDevice *dev) +{ + return 0; +} + +void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run) +{ +} + +MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run) +{ + return MEMTXATTRS_UNSPECIFIED; +} + +bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit) +{ + SW64CPU *cpu = SW64_CPU(cs); + CPUSW64State *env = &cpu->env; + + /* Ensure PC is synchronised */ + kvm_cpu_synchronize_state(cs); + + if (cs->singlestep_enabled) { + return true; + } else if (kvm_find_sw_breakpoint(cs, debug_exit->epc)) { + return true; + } else { + error_report("%s: unhandled debug exit (%"PRIx64", %"PRIx64")", + __func__, env->pc, debug_exit->epc); + } + + return false; +} + +int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) +{ + int ret = 0; + + switch (run->exit_reason) { + case KVM_EXIT_DEBUG: + if (kvm_sw64_handle_debug(cs, &run->debug.arch)) { + ret = EXCP_DEBUG; + } /* otherwise return to guest */ + break; + default: + qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n", + __func__, run->exit_reason); + break; + } + return ret; +} + +bool kvm_arch_stop_on_emulation_error(CPUState *cs) +{ + return true; +} + +int kvm_arch_process_async_events(CPUState *cs) +{ + return 0; +} + +void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg) +{ +} + +void kvm_arch_init_irq_routing(KVMState *s) +{ + /* We know at this point that we're using the in-kernel + * irqchip, so we can use irqfds, and on x86 we know + * we can use msi via irqfd and GSI routing. + */ + kvm_msi_via_irqfd_allowed = true; + kvm_gsi_routing_allowed = true; +} + +int kvm_arch_irqchip_create(KVMState *s) +{ + return 0; +} + +int kvm_arch_release_virq_post(int virq) +{ + return -1; +} + +int kvm_arch_msi_data_to_gsi(uint32_t data) +{ + return -1; +} + + +void kvm_sw64_register_slave(SW64CPU *cpu) +{ + CPUState *cs = CPU(cpu); + + kvm_vcpu_ioctl(cs, KVM_SW64_USE_SLAVE, NULL); +} + +bool kvm_arch_cpu_check_are_resettable(void) +{ + return true; +} + +void kvm_arch_accel_class_init(ObjectClass *oc) +{ +} diff --git a/target/sw64/kvm_sw64.h b/target/sw64/kvm_sw64.h new file mode 100644 index 0000000000000000000000000000000000000000..81dd760008c73283dce68b2e4b9ea830c5169346 --- /dev/null +++ b/target/sw64/kvm_sw64.h @@ -0,0 +1,56 @@ +/* + * QEMU KVM support -- SW64 specific functions. + * + * Copyright (c) 2018 Lin Hainan + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ + +#ifndef QEMU_KVM_SW64_H +#define QEMU_KVM_SW64_H + +#include "sysemu/kvm.h" +#include "exec/memory.h" +#include "qemu/error-report.h" + +/** + * kvm_sw64_vcpu_init: + * @cs: CPUState + * + * Initialize (or reinitialize) the VCPU by invoking the + * KVM_SW64_VCPU_INIT ioctl with the CPU type and feature + * bitmask specified in the CPUState. + * + * Returns: 0 if success else < 0 error code + */ +int kvm_sw64_vcpu_init(CPUState *cs); +void kvm_sw64_reset_vcpu(SW64CPU *cpu); +void kvm_sw64_register_slave(SW64CPU *cpu); + +#define TYPE_SW64_HOST_CPU "host-" TYPE_SW64_CPU +#define SW64_HOST_CPU_CLASS(klass) \ + OBJECT_CLASS_CHECK(SW64HostCPUClass, (klass), TYPE_SW64_HOST_CPU) +#define SW64_HOST_CPU_GET_CLASS(obj) \ + OBJECT_GET_CLASS(SW64HostCPUClass, (obj), TYPE_SW64_HOST_CPU) + +typedef struct SW64HostCPUClass { + /*< private >*/ + SW64CPUClass parent_class; + /*< public >*/ + + uint64_t features; + uint32_t target; + const char *dtb_compatible; +} SW64HostCPUClass; + +/** + * kvm_sw64_handle_debug: + * @cs: CPUState + * @debug_exit: debug part of the KVM exit structure + * + * Returns: TRUE if the debug exception was handled. + */ +bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit); +#endif diff --git a/target/sw64/machine.c b/target/sw64/machine.c new file mode 100644 index 0000000000000000000000000000000000000000..93b1968ad8578dee4ada8c8d40e9b759f2969322 --- /dev/null +++ b/target/sw64/machine.c @@ -0,0 +1,18 @@ +#include "qemu/osdep.h" +#include "qemu-common.h" +#include "cpu.h" +#include "migration/vmstate.h" +#include "migration/cpu.h" + +VMStateDescription vmstate_sw64_cpu = { + .name = "cpu", + .version_id = 1, + .minimum_version_id = 1, + .fields = (VMStateField[]) { +#ifdef CONFIG_KVM + VMSTATE_UINTTL_ARRAY(k_regs, SW64CPU, 158), + VMSTATE_UINTTL_ARRAY(k_vcb, SW64CPU, 48), +#endif + VMSTATE_END_OF_LIST() + } +}; diff --git a/target/sw64/meson.build b/target/sw64/meson.build new file mode 100644 index 0000000000000000000000000000000000000000..332f2c2ee65dbcfa992f916dd0aab83687383eb1 --- /dev/null +++ b/target/sw64/meson.build @@ -0,0 +1,20 @@ +sw64_ss = ss.source_set() +sw64_ss.add(files( + 'cpu.c', + 'exception.c', + 'float_helper.c', + 'helper.c', + 'gdbstub.c', + 'int_helper.c', + 'profile.c', + 'simd_helper.c', + 'translate.c', +)) + +sw64_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c')) + +sw64_softmmu_ss = ss.source_set() +sw64_softmmu_ss.add(files('machine.c')) + +target_arch += {'sw64': sw64_ss} +target_softmmu_arch += {'sw64': sw64_softmmu_ss} diff --git a/target/sw64/profile.c b/target/sw64/profile.c new file mode 100644 index 0000000000000000000000000000000000000000..73fe077234c89200cab8414b9ecde23f0457da13 --- /dev/null +++ b/target/sw64/profile.c @@ -0,0 +1,2342 @@ +#include "translate.h" + +const char *insn_opc[535] = { + "sys_call", "call", "ret", "jmp", "br", "bsr", "memb", "imemb", + "wmemb", "rtc", "rcid", "halt", "rd_f", "wr_f", "rtid", + "csrws", "csrwc", "pri_rcsr", "pri_wcsr", "pri_ret", "lldw", "lldl", + "ldw_inc", "ldl_inc", "ldw_dec", "ldl_dec", "ldw_set", "ldl_set", "lstw", + "lstl", "ldw_nc", "ldl_nc", "ldd_nc", "stw_nc", "stl_nc", "std_nc", + "ldwe", "ldse", "ldde", "vlds", "vldd", "vsts", "vstd", + "fimovs", "fimovd", "addw", "subw", "s4addw", "s4subw", "s8addw", + "s8subw", "addl", "subl", "s4addl", "s4subl", "s8addl", "s8subl", + "mulw", "divw", "udivw", "remw", "uremw", "mull", "mulh", + "divl", "udivl", "reml", "ureml", "addpi", "addpis", "cmpeq", + "cmplt", "cmple", "cmpult", "cmpule", "sbt", "cbt", "and", + "bic", "bis", "ornot", "xor", "eqv", "inslb", "inslh", + "inslw", "insll", "inshb", "inshh", "inshw", "inshl", "slll", + "srll", "sral", "roll", "sllw", "srlw", "sraw", "rolw", + "extlb", "extlh", "extlw", "extll", "exthb", "exthh", "exthw", + "exthl", "ctpop", "ctlz", "cttz", "revbh", "revbw", "revbl", + "casw", "casl", "masklb", "masklh", "masklw", "maskll", "maskhb", + "maskhh", "maskhw", "maskhl", "zap", "zapnot", "sextb", "sexth", + "seleq", "selge", "selgt", "selle", "sellt", "selne", "sellbc", + "sellbs", "addwi", "subwi", "s4addwi", "s4subwi", "s8addwi", "s8subwi", + "addli", "subli", "s4addli", "s4subli", "s8addli", "s8subli", "mulwi", + "divwi", "udivwi", "remwi", "uremwi", "mulli", "mulhi", "divli", + "udivli", "remli", "uremli", "addpii", "addpisi", "cmpeqi", "cmplti", + "cmplei", "cmpulti", "cmpulei", "sbti", "cbti", "andi", "bici", + "bisi", "ornoti", "xori", "eqvi", "inslbi", "inslhi", "inslwi", + "inslli", "inshbi", "inshhi", "inshwi", "inshli", "sllli", "srlli", + "srali", "rolli", "sllwi", "srlwi", "srawi", "rolwi", "extlbi", + "extlhi", "extlwi", "extlli", "exthbi", "exthhi", "exthwi", "exthli", + "ctpopi", "ctlzi", "cttzi", "revbhi", "revbwi", "revbli", "caswi", + "casli", "masklbi", "masklhi", "masklwi", "masklli", "maskhbi", "maskhhi", + "maskhwi", "maskhli", "zapi", "zapnoti", "sextbi", "sexthi", "cmpgebi", + "seleqi", "selgei", "selgti", "sellei", "sellti", "selnei", "sellbci", + "sellbsi", "vlogzz", "fadds", "faddd", "fsubs", "fsubd", "fmuls", + "fmuld", "fdivs", "fdivd", "fsqrts", "fsqrtd", "fcmpeq", "fcmple", + "fcmplt", "fcmpun", "fcvtsd", "fcvtds", "fcvtdl_g", "fcvtdl_p", "fcvtdl_z", + "fcvtdl_n", "fcvtdl", "fcvtwl", "fcvtlw", "fcvtls", "fcvtld", "fcpys", + "fcpyse", "fcpysn", "ifmovs", "ifmovd", "rfpcr", "wfpcr", "setfpec0", + "setfpec1", "setfpec2", "setfpec3", "frecs", "frecd", "fris", "fris_g", + "fris_p", "fris_z", "fris_n", "frid", "frid_g", "frid_p", "frid_z", + "frid_n", "fmas", "fmad", "fmss", "fmsd", "fnmas", "fnmad", + "fnmss", "fnmsd", "fseleq", "fselne", "fsellt", "fselle", "fselgt", + "fselge", "vaddw", "vaddwi", "vsubw", "vsubwi", "vcmpgew", "vcmpgewi", + "vcmpeqw", "vcmpeqwi", "vcmplew", "vcmplewi", "vcmpltw", "vcmpltwi", "vcmpulew", + "vcmpulewi", "vcmpultw", "vcmpultwi", "vsllw", "vsllwi", "vsrlw", "vsrlwi", + "vsraw", "vsrawi", "vrolw", "vrolwi", "sllow", "sllowi", "srlow", + "srlowi", "vaddl", "vaddli", "vsubl", "vsubli", "vsllb", "vsllbi", + "vsrlb", "vsrlbi", "vsrab", "vsrabi", "vrolb", "vrolbi", "vsllh", + "vsllhi", "vsrlh", "vsrlhi", "vsrah", "vsrahi", "vrolh", "vrolhi", + "ctpopow", "ctlzow", "vslll", "vsllli", "vsrll", "vsrlli", "vsral", + "vsrali", "vroll", "vrolli", "vmaxb", "vminb", "vucaddw", "vucaddwi", + "vucsubw", "vucsubwi", "vucaddh", "vucaddhi", "vucsubh", "vucsubhi", "vucaddb", + "vucaddbi", "vucsubb", "vucsubbi", "sraow", "sraowi", "vsumw", "vsuml", + "vsm4r", "vbinvw", "vcmpueqb", "vcmpugtb", "vcmpugtbi", "vsm3msw", "vmaxh", + "vminh", "vmaxw", "vminw", "vmaxl", "vminl", "vumaxb", "vuminb", + "vumaxh", "vuminh", "vumaxw", "vuminw", "vumaxl", "vuminl", "vsm4key", + "vadds", "vaddd", "vsubs", "vsubd", "vmuls", "vmuld", "vdivs", + "vdivd", "vsqrts", "vsqrtd", "vfcmpeq", "vfcmple", "vfcmplt", "vfcmpun", + "vcpys", "vcpyse", "vcpysn", "vsums", "vsumd", "vfcvtsd", "vfcvtds", + "vfcvtls", "vfcvtld", "vfcvtdl", "vfcvtdl_g", "vfcvtdl_p", "vfcvtdl_z", "vfcvtdl_n", + "vfris", "vfris_g", "vfris_p", "vfris_z", "vfris_n", "vfrid", "vfrid_g", + "vfrid_p", "vfrid_z", "vfrid_n", "vfrecs", "vfrecd", "vmaxs", "vmins", + "vmaxd", "vmind", "vmas", "vmad", "vmss", "vmsd", "vnmas", + "vnmad", "vnmss", "vnmsd", "vfseleq", "vfsellt", "vfselle", "vseleqw", + "vseleqwi", "vsellbcw", "vsellbcwi", "vselltw", "vselltwi", "vsellew", "vsellewi", + "vinsw", "vinsf", "vextw", "vextf", "vcpyw", "vcpyf", "vconw", + "vshfw", "vcons", "vcond", "vinsb", "vinsh", "vinsectlh", "vinsectlw", + "vinsectll", "vinsectlb", "vshfq", "vshfqb", "vcpyb", "vcpyh", "vsm3r", + "vfcvtsh", "vfcvths", "vldw_u", "vstw_u", "vlds_u", "vsts_u", "vldd_u", + "vstd_u", "vstw_ul", "vstw_uh", "vsts_ul", "vsts_uh", "vstd_ul", "vstd_uh", + "vldd_nc", "vstd_nc", "lbr", "ldbu_a", "ldhu_a", "ldw_a", "ldl_a", + "flds_a", "fldd_a", "stbu_a", "sthu_a", "stw_a", "stl_a", "fsts_a", + "fstd_a", "dpfhr", "dpfhw", "ldbu", "ldhu", "ldw", "ldl", + "ldl_u", "pri_ldl", "pri_ldw", "flds", "fldd", "stb", "sth", + "stw", "stl", "stl_u", "pri_stl", "pri_stw", "fsts", "fstd", + "beq", "bne", "blt", "ble", "bgt", "bge", "blbc", + "blbs", "fbeq", "fbne", "fblt", "fble", "fbgt", "fbge", + "ldih", "ldi", }; + +void insn_profile(DisasContext *ctx, uint32_t insn) +{ + int32_t disp16, disp26 __attribute__((unused)); + uint8_t opc; + uint16_t fn3, fn4, fn6, fn8, fn11; + TCGv count; + int index, offs; + + opc = extract32(insn, 26, 6); + + fn3 = extract32(insn, 10, 3); + fn6 = extract32(insn, 10, 6); + fn4 = extract32(insn, 12, 4); + fn8 = extract32(insn, 5, 8); + fn11 = extract32(insn, 5, 11); + + disp16 = sextract32(insn, 0, 16); + disp26 = sextract32(insn, 0, 26); + + index = 0; + switch (opc) { + case 0x00: + /* SYS_CALL */ + index = SYS_CALL; + break; + case 0x01: + /* CALL */ + index = CALL; + break; + case 0x02: + /* RET */ + index = RET; + break; + case 0x03: + /* JMP */ + index = JMP; + break; + case 0x04: + /* BR */ + index = BR; + break; + case 0x05: + /* BSR */ + index = BSR; + break; + case 0x06: + switch (disp16) { + case 0x0000: + /* MEMB */ + index = MEMB; + break; + case 0x0001: + /* IMEMB */ + index = IMEMB; + break; + case 0x0002: + /* WMEMB */ + index = WMEMB; + break; + case 0x0020: + /* RTC */ + index = RTC; + break; + case 0x0040: + /* RCID */ + index = RCID; + break; + case 0x0080: + /* HALT */ + index = HALT; + break; + case 0x1000: + /* RD_F */ + index = RD_F; + break; + case 0x1020: + /* WR_F */ + index = WR_F; + break; + case 0x1040: + /* RTID */ + index = RTID; + break; + default: + if ((disp16 & 0xFF00) == 0xFC00) { + /* CSRWS */ + index = CSRWS; + break; + } + if ((disp16 & 0xFF00) == 0xFD00) { + /* CSRWC */ + index = CSRWC; + break; + } + if ((disp16 & 0xFF00) == 0xFE00) { + /* PRI_RCSR */ + index = PRI_RCSR; + break; + } + if ((disp16 & 0xFF00) == 0xFF00) { + /* PRI_WCSR */ + index = PRI_WCSR; + break; + } + goto do_invalid; + } + break; + case 0x07: + /* PRI_RET */ + index = PRI_RET; + break; + case 0x08: + switch (fn4) { + case 0x0: + /* LLDW */ + index = LLDW; + break; + case 0x1: + /* LLDL */ + index = LLDL; + break; + case 0x2: + /* LDW_INC */ + index = LDW_INC; + break; + case 0x3: + /* LDL_INC */ + index = LDL_INC; + break; + case 0x4: + /* LDW_DEC */ + index = LDW_DEC; + break; + case 0x5: + /* LDL_DEC */ + index = LDL_DEC; + break; + case 0x6: + /* LDW_SET */ + index = LDW_SET; + break; + case 0x7: + /* LDL_SET */ + index = LDL_SET; + break; + case 0x8: + /* LSTW */ + index = LSTW; + break; + case 0x9: + /* LSTL */ + index = LSTL; + break; + case 0xa: + /* LDW_NC */ + index = LDW_NC; + break; + case 0xb: + /* LDL_NC */ + index = LDL_NC; + break; + case 0xc: + /* LDD_NC */ + index = LDD_NC; + break; + case 0xd: + /* STW_NC */ + index = STW_NC; + break; + case 0xe: + /* STL_NC */ + index = STL_NC; + break; + case 0xf: + /* STD_NC */ + index = STD_NC; + break; + default: + goto do_invalid; + } + break; + case 0x9: + /* LDWE */ + index = LDWE; + break; + case 0x0a: + /* LDSE */ + index = LDSE; + break; + case 0x0b: + /* LDDE */ + index = LDDE; + break; + case 0x0c: + /* VLDS */ + index = VLDS; + break; + case 0x0d: + /* VLDD */ + index = VLDD; + break; + case 0x0e: + /* VSTS */ + index = VSTS; + break; + case 0x0f: + /* VSTD */ + index = VSTD; + break; + case 0x10: + if (fn11 == 0x70) { + /* FIMOVS */ + index = FIMOVS; + } else if (fn11 == 0x78) { + /* FIMOVD */ + index = FIMOVD; + } else { + switch (fn11 & 0xff) { + case 0x00: + /* ADDW */ + index = ADDW; + break; + case 0x01: + /* SUBW */ + index = SUBW; + break; + case 0x02: + /* S4ADDW */ + index = S4ADDW; + break; + case 0x03: + /* S4SUBW */ + index = S4SUBW; + break; + case 0x04: + /* S8ADDW */ + index = S8ADDW; + break; + case 0x05: + /* S8SUBW */ + index = S8SUBW; + break; + + case 0x08: + /* ADDL */ + index = ADDL; + break; + case 0x09: + /* SUBL */ + index = SUBL; + break; + case 0x0a: + /* S4ADDL */ + index = S4ADDL; + break; + case 0x0b: + /* S4SUBL */ + index = S4SUBL; + break; + case 0x0c: + /* S8ADDL */ + index = S8ADDL; + break; + case 0x0d: + /* S8SUBL */ + index = S8SUBL; + break; + case 0x10: + /* MULW */ + index = MULW; + break; + case 0x11: + /* DIVW */ + index = DIVW; + break; + case 0x12: + /* UDIVW */ + index = UDIVW; + break; + case 0x13: + /* REMW */ + index = REMW; + break; + case 0x14: + /* UREMW */ + index = UREMW; + break; + case 0x18: + /* MULL */ + index = MULL; + break; + case 0x19: + /* MULH */ + index = MULH; + break; + case 0x1A: + /* DIVL */ + index = DIVL; + break; + case 0x1B: + /* UDIVL */ + index = UDIVL; + break; + case 0x1C: + /* REML */ + index = REML; + break; + case 0x1D: + /* UREML */ + index = UREML; + break; + case 0x1E: + /* ADDPI */ + index = ADDPI; + break; + case 0x1F: + /* ADDPIS */ + index = ADDPIS; + break; + case 0x28: + /* CMPEQ */ + index = CMPEQ; + break; + case 0x29: + /* CMPLT */ + index = CMPLT; + break; + case 0x2a: + /* CMPLE */ + index = CMPLE; + break; + case 0x2b: + /* CMPULT */ + index = CMPULT; + break; + case 0x2c: + /* CMPULE */ + index = CMPULE; + break; + case 0x2D: + /* SBT */ + index = SBT; + break; + case 0x2E: + /* CBT */ + index = CBT; + break; + case 0x38: + /* AND */ + index = AND; + break; + case 0x39: + /* BIC */ + index = BIC; + break; + case 0x3a: + /* BIS */ + index = BIS; + break; + case 0x3b: + /* ORNOT */ + index = ORNOT; + break; + case 0x3c: + /* XOR */ + index = XOR; + break; + case 0x3d: + /* EQV */ + index = EQV; + break; + case 0x40: + /* INSLB */ + index = INSLB; + break; + case 0x41: + /* INSLH */ + index = INSLH; + break; + case 0x42: + /* INSLW */ + index = INSLW; + break; + case 0x43: + /* INSLL */ + index = INSLL; + break; + case 0x44: + /* INSHB */ + index = INSHB; + break; + case 0x45: + /* INSHH */ + index = INSHH; + break; + case 0x46: + /* INSHW */ + index = INSHW; + break; + case 0x47: + /* INSHL */ + index = INSHL; + break; + case 0x48: + /* SLLL */ + index = SLLL; + break; + case 0x49: + /* SRLL */ + index = SRLL; + break; + case 0x4a: + /* SRAL */ + index = SRAL; + break; + case 0x4B: + /* ROLL */ + index = ROLL; + break; + case 0x4C: + /* SLLW */ + index = SLLW; + break; + case 0x4D: + /* SRLW */ + index = SRLW; + break; + case 0x4E: + /* SRAW */ + index = SRAW; + break; + case 0x4F: + /* ROLW */ + index = ROLW; + break; + case 0x50: + /* EXTLB */ + index = EXTLB; + break; + case 0x51: + /* EXTLH */ + index = EXTLH; + break; + case 0x52: + /* EXTLW */ + index = EXTLW; + break; + case 0x53: + /* EXTLL */ + index = EXTLL; + break; + case 0x54: + /* EXTHB */ + index = EXTHB; + break; + case 0x55: + /* EXTHH */ + index = EXTHH; + break; + case 0x56: + /* EXTHW */ + index = EXTHW; + break; + case 0x57: + /* EXTHL */ + index = EXTHL; + break; + case 0x58: + /* CTPOP */ + index = CTPOP; + break; + case 0x59: + /* CTLZ */ + index = CTLZ; + break; + case 0x5a: + /* CTTZ */ + index = CTTZ; + break; + case 0x5B: + /* REVBH */ + index = REVBH; + break; + case 0x5C: + /* REVBW */ + index = REVBW; + break; + case 0x5D: + /* REVBL */ + index = REVBL; + break; + case 0x5E: + /* CASW */ + index = CASW; + break; + case 0x5F: + /* CASL */ + index = CASL; + break; + case 0x60: + /* MASKLB */ + index = MASKLB; + break; + case 0x61: + /* MASKLH */ + index = MASKLH; + break; + case 0x62: + /* MASKLW */ + index = MASKLW; + break; + case 0x63: + /* MASKLL */ + index = MASKLL; + break; + case 0x64: + /* MASKHB */ + index = MASKHB; + break; + case 0x65: + /* MASKHH */ + index = MASKHH; + break; + case 0x66: + /* MASKHW */ + index = MASKHW; + break; + case 0x67: + /* MASKHL */ + index = MASKHL; + break; + case 0x68: + /* ZAP */ + index = ZAP; + break; + case 0x69: + /* ZAPNOT */ + index = ZAPNOT; + break; + case 0x6a: + /* SEXTB */ + index = SEXTB; + break; + case 0x6b: + /* SEXTH */ + index = SEXTH; + break; + case 0x6c: + /* CMPGEB*/ + break; + default: + break; + } + } + break; + case 0x11: + switch (fn3) { + case 0x0: + /* SELEQ */ + index = SELEQ; + break; + case 0x1: + /* SELGE */ + index = SELGE; + break; + case 0x2: + /* SELGT */ + index = SELGT; + break; + case 0x3: + /* SELLE */ + index = SELLE; + break; + case 0x4: + /* SELLT */ + index = SELLT; + break; + case 0x5: + /* SELNE */ + index = SELNE; + break; + case 0x6: + /* SELLBC */ + index = SELLBC; + break; + case 0x7: + /* SELLBS */ + index = SELLBS; + break; + default: + break; + } + break; + case 0x12: + switch (fn8 & 0xff) { + case 0x00: + /* ADDWI */ + index = ADDWI; + break; + case 0x01: + /* SUBWI */ + index = SUBWI; + break; + case 0x02: + /* S4ADDWI */ + index = S4ADDWI; + break; + case 0x03: + /* S4SUBWI */ + index = S4SUBWI; + break; + case 0x04: + /* S8ADDWI */ + index = S8ADDWI; + break; + case 0x05: + /* S8SUBWI */ + index = S8SUBWI; + break; + + case 0x08: + /* ADDLI */ + index = ADDLI; + break; + case 0x09: + /* SUBLI */ + index = SUBLI; + break; + case 0x0a: + /* S4ADDLI */ + index = S4ADDLI; + break; + case 0x0b: + /* S4SUBLI */ + index = S4SUBLI; + break; + case 0x0c: + /* S8ADDLI */ + index = S8ADDLI; + break; + case 0x0d: + /* S8SUBLI */ + index = S8SUBLI; + break; + case 0x10: + /* MULWI */ + index = MULWI; + break; + case 0x11: + /* DIVWI */ + index = DIVWI; + break; + case 0x12: + /* UDIVWI */ + index = UDIVWI; + break; + case 0x13: + /* REMWI */ + index = REMWI; + break; + case 0x14: + /* UREMWI */ + index = UREMWI; + break; + case 0x18: + /* MULLI */ + index = MULLI; + break; + case 0x19: + /* MULHI */ + index = MULHI; + break; + case 0x1A: + /* DIVLI */ + index = DIVLI; + break; + case 0x1B: + /* UDIVLI */ + index = UDIVLI; + break; + case 0x1C: + /* REMLI */ + index = REMLI; + break; + case 0x1D: + /* UREMLI */ + index = UREMLI; + break; + case 0x1E: + /* ADDPII */ + index = ADDPII; + break; + case 0x1F: + /* ADDPISI */ + index = ADDPISI; + break; + case 0x28: + /* CMPEQI */ + index = CMPEQI; + break; + case 0x29: + /* CMPLTI */ + index = CMPLTI; + break; + case 0x2a: + /* CMPLEI */ + index = CMPLEI; + break; + case 0x2b: + /* CMPULTI */ + index = CMPULTI; + break; + case 0x2c: + /* CMPULEI */ + index = CMPULEI; + break; + case 0x2D: + /* SBTI */ + index = SBTI; + break; + case 0x2E: + /* CBTI */ + index = CBTI; + break; + case 0x38: + /* ANDI */ + index = ANDI; + break; + case 0x39: + /* BICI */ + index = BICI; + break; + case 0x3a: + /* BISI */ + index = BISI; + break; + case 0x3b: + /* ORNOTI */ + index = ORNOTI; + break; + case 0x3c: + /* XORI */ + index = XORI; + break; + case 0x3d: + /* EQVI */ + index = EQVI; + break; + case 0x40: + /* INSLBI */ + index = INSLBI; + break; + case 0x41: + /* INSLHI */ + index = INSLHI; + break; + case 0x42: + /* INSLWI */ + index = INSLWI; + break; + case 0x43: + /* INSLLI */ + index = INSLLI; + break; + case 0x44: + /* INSHBI */ + index = INSHBI; + break; + case 0x45: + /* INSHHI */ + index = INSHHI; + break; + case 0x46: + /* INSHWI */ + index = INSHWI; + break; + case 0x47: + /* INSHLI */ + index = INSHLI; + break; + case 0x48: + /* SLLLI */ + index = SLLLI; + break; + case 0x49: + /* SRLLI */ + index = SRLLI; + break; + case 0x4a: + /* SRALI */ + index = SRALI; + break; + case 0x4B: + /* ROLLI */ + index = ROLLI; + break; + case 0x4C: + /* SLLWI */ + index = SLLWI; + break; + case 0x4D: + /* SRLWI */ + index = SRLWI; + break; + case 0x4E: + /* SRAWI */ + index = SRAWI; + break; + case 0x4F: + /* ROLWI */ + index = ROLWI; + break; + case 0x50: + /* EXTLBI */ + index = EXTLBI; + break; + case 0x51: + /* EXTLHI */ + index = EXTLHI; + break; + case 0x52: + /* EXTLWI */ + index = EXTLWI; + break; + case 0x53: + /* EXTLLI */ + index = EXTLLI; + break; + case 0x54: + /* EXTHBI */ + index = EXTHBI; + break; + case 0x55: + /* EXTHHI */ + index = EXTHHI; + break; + case 0x56: + /* EXTHWI */ + index = EXTHWI; + break; + case 0x57: + /* EXTHLI */ + index = EXTHLI; + break; + case 0x58: + /* CTPOPI */ + index = CTPOPI; + break; + case 0x59: + /* CTLZI */ + index = CTLZI; + break; + case 0x5a: + /* CTTZI */ + index = CTTZI; + break; + case 0x5B: + /* REVBHI */ + index = REVBHI; + break; + case 0x5C: + /* REVBWI */ + index = REVBWI; + break; + case 0x5D: + /* REVBLI */ + index = REVBLI; + break; + case 0x5E: + /* CASWI */ + index = CASWI; + break; + case 0x5F: + /* CASLI */ + index = CASLI; + break; + case 0x60: + /* MASKLBI */ + index = MASKLBI; + break; + case 0x61: + /* MASKLHI */ + index = MASKLHI; + break; + case 0x62: + /* MASKLWI */ + index = MASKLWI; + break; + case 0x63: + /* MASKLLI */ + index = MASKLLI; + break; + case 0x64: + /* MASKHBI */ + index = MASKHBI; + break; + case 0x65: + /* MASKHHI */ + index = MASKHHI; + break; + case 0x66: + /* MASKHWI */ + index = MASKHWI; + break; + case 0x67: + /* MASKHLI */ + index = MASKHLI; + break; + case 0x68: + /* ZAPI */ + index = ZAPI; + break; + case 0x69: + /* ZAPNOTI */ + index = ZAPNOTI; + break; + case 0x6a: + /* SEXTBI */ + index = SEXTBI; + break; + case 0x6b: + /* SEXTHI */ + index = SEXTHI; + break; + case 0x6c: + /* CMPGEBI */ + index = CMPGEBI; + break; + default: + break; + } + break; + case 0x13: + switch (fn3) { + case 0x0: + /* SELEQI */ + index = SELEQI; + break; + case 0x1: + /* SELGEI */ + index = SELGEI; + break; + case 0x2: + /* SELGTI */ + index = SELGTI; + break; + case 0x3: + /* SELLEI */ + index = SELLEI; + break; + case 0x4: + /* SELLTI */ + index = SELLTI; + break; + case 0x5: + /* SELNEI */ + index = SELNEI; + break; + case 0x6: + /* SELLBCI */ + index = SELLBCI; + break; + case 0x7: + /* SELLBSI */ + index = SELLBSI; + break; + default: + break; + } + break; + case 0x14: + case 0x15: + case 0x16: + case 0x17: + /* VLOGZZ */ + index = VLOGZZ; + break; + case 0x18: + switch (fn8) { + case 0x00: + /* FADDS */ + index = FADDS; + break; + case 0x01: + /* FADDD */ + index = FADDD; + break; + case 0x02: + /* FSUBS */ + index = FSUBS; + break; + case 0x03: + /* FSUBD */ + index = FSUBD; + break; + case 0x4: + /* FMULS */ + index = FMULS; + break; + case 0x05: + /* FMULD */ + index = FMULD; + break; + case 0x06: + /* FDIVS */ + index = FDIVS; + break; + case 0x07: + /* FDIVD */ + index = FDIVD; + break; + case 0x08: + /* FSQRTS */ + index = FSQRTS; + break; + case 0x09: + /* FSQRTD */ + index = FSQRTD; + break; + case 0x10: + /* FCMPEQ */ + index = FCMPEQ; + break; + case 0x11: + /* FCMPLE */ + index = FCMPLE; + break; + case 0x12: + /* FCMPLT */ + index = FCMPLT; + break; + case 0x13: + /* FCMPUN */ + index = FCMPUN; + break; + case 0x20: + /* FCVTSD */ + index = FCVTSD; + break; + case 0x21: + /* FCVTDS */ + index = FCVTDS; + break; + case 0x22: + /* FCVTDL_G */ + index = FCVTDL_G; + break; + case 0x23: + /* FCVTDL_P */ + index = FCVTDL_P; + break; + case 0x24: + /* FCVTDL_Z */ + index = FCVTDL_Z; + break; + case 0x25: + /* FCVTDL_N */ + index = FCVTDL_N; + break; + case 0x27: + /* FCVTDL */ + index = FCVTDL; + break; + case 0x28: + /* FCVTWL */ + index = FCVTWL; + break; + case 0x29: + /* FCVTLW */ + index = FCVTLW; + break; + case 0x2d: + /* FCVTLS */ + index = FCVTLS; + break; + case 0x2f: + /* FCVTLD */ + index = FCVTLD; + break; + case 0x30: + /* FCPYS */ + index = FCPYS; + break; + case 0x31: + /* FCPYSE */ + index = FCPYSE; + break; + case 0x32: + /* FCPYSN */ + index = FCPYSN; + break; + case 0x40: + /* IFMOVS */ + index = IFMOVS; + break; + case 0x41: + /* IFMOVD */ + index = IFMOVD; + break; + case 0x50: + /* RFPCR */ + index = RFPCR; + break; + case 0x51: + /* WFPCR */ + index = WFPCR; + break; + case 0x54: + /* SETFPEC0 */ + index = SETFPEC0; + break; + case 0x55: + /* SETFPEC1 */ + index = SETFPEC1; + break; + case 0x56: + /* SETFPEC2 */ + index = SETFPEC2; + break; + case 0x57: + /* SETFPEC3 */ + index = SETFPEC3; + break; + case 0x58: + /* FRECS */ + index = FRECS; + break; + case 0x59: + /* FRECD */ + index = FRECD; + break; + case 0x5A: + /* FRIS */ + index = FRIS; + break; + case 0x5B: + /* FRIS_G */ + index = FRIS_G; + break; + case 0x5C: + /* FRIS_P */ + index = FRIS_P; + break; + case 0x5D: + /* FRIS_Z */ + index = FRIS_Z; + break; + case 0x5F: + /* FRIS_N */ + index = FRIS_N; + break; + case 0x60: + /* FRID */ + index = FRID; + break; + case 0x61: + /* FRID_G */ + index = FRID_G; + break; + case 0x62: + /* FRID_P */ + index = FRID_P; + break; + case 0x63: + /* FRID_Z */ + index = FRID_Z; + break; + case 0x64: + /* FRID_N */ + index = FRID_N; + break; + default: + break; + } + break; + case 0x19: + switch (fn6) { + case 0x00: + /* FMAS */ + index = FMAS; + break; + case 0x01: + /* FMAD */ + index = FMAD; + break; + case 0x02: + /* FMSS */ + index = FMSS; + break; + case 0x03: + /* FMSD */ + index = FMSD; + break; + case 0x04: + /* FNMAS */ + index = FNMAS; + break; + case 0x05: + /* FNMAD */ + index = FNMAD; + break; + case 0x06: + /* FNMSS */ + index = FNMSS; + break; + case 0x07: + /* FNMSD */ + index = FNMSD; + break; + case 0x10: + /* FSELEQ */ + index = FSELEQ; + break; + case 0x11: + /* FSELNE */ + index = FSELNE; + break; + case 0x12: + /* FSELLT */ + index = FSELLT; + break; + case 0x13: + /* FSELLE */ + index = FSELLE; + break; + case 0x14: + /* FSELGT */ + index = FSELGT; + break; + case 0x15: + /* FSELGE */ + index = FSELGE; + break; + default: + break; + } + break; + case 0x1A: + switch (fn8) { + case 0x00: + /* VADDW */ + index = VADDW; + break; + case 0x20: + /* VADDWI */ + index = VADDWI; + break; + case 0x01: + /* VSUBW */ + index = VSUBW; + break; + case 0x21: + /* VSUBWI */ + index = VSUBWI; + break; + case 0x02: + /* VCMPGEW */ + index = VCMPGEW; + break; + case 0x22: + /* VCMPGEWI */ + index = VCMPGEWI; + break; + case 0x03: + /* VCMPEQW */ + index = VCMPEQW; + break; + case 0x23: + /* VCMPEQWI */ + index = VCMPEQWI; + break; + case 0x04: + /* VCMPLEW */ + index = VCMPLEW; + break; + case 0x24: + /* VCMPLEWI */ + index = VCMPLEWI; + break; + case 0x05: + /* VCMPLTW */ + index = VCMPLTW; + break; + case 0x25: + /* VCMPLTWI */ + index = VCMPLTWI; + break; + case 0x06: + /* VCMPULEW */ + index = VCMPULEW; + break; + case 0x26: + /* VCMPULEWI */ + index = VCMPULEWI; + break; + case 0x07: + /* VCMPULTW */ + index = VCMPULTW; + break; + case 0x27: + /* VCMPULTWI */ + index = VCMPULTWI; + break; + case 0x08: + /* VSLLW */ + index = VSLLW; + break; + case 0x28: + /* VSLLWI */ + index = VSLLWI; + break; + case 0x09: + /* VSRLW */ + index = VSRLW; + break; + case 0x29: + /* VSRLWI */ + index = VSRLWI; + break; + case 0x0A: + /* VSRAW */ + index = VSRAW; + break; + case 0x2A: + /* VSRAWI */ + index = VSRAWI; + break; + case 0x0B: + /* VROLW */ + index = VROLW; + break; + case 0x2B: + /* VROLWI */ + index = VROLWI; + break; + case 0x0C: + /* SLLOW */ + index = SLLOW; + break; + case 0x2C: + /* SLLOWI */ + index = SLLOWI; + break; + case 0x0D: + /* SRLOW */ + index = SRLOW; + break; + case 0x2D: + /* SRLOWI */ + index = SRLOWI; + break; + case 0x0E: + /* VADDL */ + index = VADDL; + break; + case 0x2E: + /* VADDLI */ + index = VADDLI; + break; + case 0x0F: + /* VSUBL */ + index = VSUBL; + break; + case 0x2F: + /* VSUBLI */ + index = VSUBLI; + break; + case 0x10: + /* VSLLB */ + index = VSLLB; + break; + case 0x30: + /* VSLLBI */ + index = VSLLBI; + break; + case 0x11: + /* VSRLB */ + index = VSRLB; + break; + case 0x31: + /* VSRLBI */ + index = VSRLBI; + break; + case 0x12: + /* VSRAB */ + index = VSRAB; + break; + case 0x32: + /* VSRABI */ + index = VSRABI; + break; + case 0x13: + /* VROLB */ + index = VROLB; + break; + case 0x33: + /* VROLBI */ + index = VROLBI; + break; + case 0x14: + /* VSLLH */ + index = VSLLH; + break; + case 0x34: + /* VSLLHI */ + index = VSLLHI; + break; + case 0x15: + /* VSRLH */ + index = VSRLH; + break; + case 0x35: + /* VSRLHI */ + index = VSRLHI; + break; + case 0x16: + /* VSRAH */ + index = VSRAH; + break; + case 0x36: + /* VSRAHI */ + index = VSRAHI; + break; + case 0x17: + /* VROLH */ + index = VROLH; + break; + case 0x37: + /* VROLHI */ + index = VROLHI; + break; + case 0x18: + /* CTPOPOW */ + index = CTPOPOW; + break; + case 0x19: + /* CTLZOW */ + index = CTLZOW; + break; + case 0x1A: + /* VSLLL */ + index = VSLLL; + break; + case 0x3A: + /* VSLLLI */ + index = VSLLLI; + break; + case 0x1B: + /* VSRLL */ + index = VSRLL; + break; + case 0x3B: + /* VSRLLI */ + index = VSRLLI; + break; + case 0x1C: + /* VSRAL */ + index = VSRAL; + break; + case 0x3C: + /* VSRALI */ + index = VSRALI; + break; + case 0x1D: + /* VROLL */ + index = VROLL; + break; + case 0x3D: + /* VROLLI */ + index = VROLLI; + break; + case 0x1E: + /* VMAXB */ + index = VMAXB; + break; + case 0x1F: + /* VMINB */ + index = VMINB; + break; + case 0x40: + /* VUCADDW */ + index = VUCADDW; + break; + case 0x60: + /* VUCADDWI */ + index = VUCADDWI; + break; + case 0x41: + /* VUCSUBW */ + index = VUCSUBW; + break; + case 0x61: + /* VUCSUBWI */ + index = VUCSUBWI; + break; + case 0x42: + /* VUCADDH */ + index = VUCADDH; + break; + case 0x62: + /* VUCADDHI */ + index = VUCADDHI; + break; + case 0x43: + /* VUCSUBH */ + index = VUCSUBH; + break; + case 0x63: + /* VUCSUBHI */ + index = VUCSUBHI; + break; + case 0x44: + /* VUCADDB */ + index = VUCADDB; + break; + case 0x64: + /* VUCADDBI */ + index = VUCADDBI; + break; + case 0x45: + /* VUCSUBB */ + index = VUCSUBB; + break; + case 0x65: + /* VUCSUBBI */ + index = VUCSUBBI; + break; + case 0x46: + /* SRAOW */ + index = SRAOW; + break; + case 0x66: + /* SRAOWI */ + index = SRAOWI; + break; + case 0x47: + /* VSUMW */ + index = VSUMW; + break; + case 0x48: + /* VSUML */ + index = VSUML; + break; + case 0x49: + /* VSM4R */ + index = VSM4R; + break; + case 0x4A: + /* VBINVW */ + index = VBINVW; + break; + case 0x4B: + /* VCMPUEQB */ + index = VCMPUEQB; + break; + case 0x6B: + /* VCMPUEQBI*/ + break; + case 0x4C: + /* VCMPUGTB */ + index = VCMPUGTB; + break; + case 0x6C: + /* VCMPUGTBI */ + index = VCMPUGTBI; + break; + case 0x4D: + /* VSM3MSW */ + index = VSM3MSW; + break; + case 0x50: + /* VMAXH */ + index = VMAXH; + break; + case 0x51: + /* VMINH */ + index = VMINH; + break; + case 0x52: + /* VMAXW */ + index = VMAXW; + break; + case 0x53: + /* VMINW */ + index = VMINW; + break; + case 0x54: + /* VMAXL */ + index = VMAXL; + break; + case 0x55: + /* VMINL */ + index = VMINL; + break; + case 0x56: + /* VUMAXB */ + index = VUMAXB; + break; + case 0x57: + /* VUMINB */ + index = VUMINB; + break; + case 0x58: + /* VUMAXH */ + index = VUMAXH; + break; + case 0x59: + /* VUMINH */ + index = VUMINH; + break; + case 0x5A: + /* VUMAXW */ + index = VUMAXW; + break; + case 0x5B: + /* VUMINW */ + index = VUMINW; + break; + case 0x5C: + /* VUMAXL */ + index = VUMAXL; + break; + case 0x5D: + /* VUMINL */ + index = VUMINL; + break; + case 0x68: + /* VSM4KEY */ + index = VSM4KEY; + break; + case 0x80: + /* VADDS */ + index = VADDS; + break; + case 0x81: + /* VADDD */ + index = VADDD; + break; + case 0x82: + /* VSUBS */ + index = VSUBS; + break; + case 0x83: + /* VSUBD */ + index = VSUBD; + break; + case 0x84: + /* VMULS */ + index = VMULS; + break; + case 0x85: + /* VMULD */ + index = VMULD; + break; + case 0x86: + /* VDIVS */ + index = VDIVS; + break; + case 0x87: + /* VDIVD */ + index = VDIVD; + break; + case 0x88: + /* VSQRTS */ + index = VSQRTS; + break; + case 0x89: + /* VSQRTD */ + index = VSQRTD; + break; + case 0x8C: + /* VFCMPEQ */ + index = VFCMPEQ; + break; + case 0x8D: + /* VFCMPLE */ + index = VFCMPLE; + break; + case 0x8E: + /* VFCMPLT */ + index = VFCMPLT; + break; + case 0x8F: + /* VFCMPUN */ + index = VFCMPUN; + break; + case 0x90: + /* VCPYS */ + index = VCPYS; + break; + case 0x91: + /* VCPYSE */ + index = VCPYSE; + break; + case 0x92: + /* VCPYSN */ + index = VCPYSN; + break; + case 0x93: + /* VSUMS */ + index = VSUMS; + break; + case 0x94: + /* VSUMD */ + index = VSUMD; + break; + case 0x95: + /* VFCVTSD */ + index = VFCVTSD; + break; + case 0x96: + /* VFCVTDS */ + index = VFCVTDS; + break; + case 0x99: + /* VFCVTLS */ + index = VFCVTLS; + break; + case 0x9A: + /* VFCVTLD */ + index = VFCVTLD; + break; + case 0x9B: + /* VFCVTDL */ + index = VFCVTDL; + break; + case 0x9C: + /* VFCVTDL_G */ + index = VFCVTDL_G; + break; + case 0x9D: + /* VFCVTDL_P */ + index = VFCVTDL_P; + break; + case 0x9E: + /* VFCVTDL_Z */ + index = VFCVTDL_Z; + break; + case 0x9F: + /* VFCVTDL_N */ + index = VFCVTDL_N; + break; + case 0xA0: + /* VFRIS */ + index = VFRIS; + break; + case 0xA1: + /* VFRIS_G */ + index = VFRIS_G; + break; + case 0xA2: + /* VFRIS_P */ + index = VFRIS_P; + break; + case 0xA3: + /* VFRIS_Z */ + index = VFRIS_Z; + break; + case 0xA4: + /* VFRIS_N */ + index = VFRIS_N; + break; + case 0xA5: + /* VFRID */ + index = VFRID; + break; + case 0xA6: + /* VFRID_G */ + index = VFRID_G; + break; + case 0xA7: + /* VFRID_P */ + index = VFRID_P; + break; + case 0xA8: + /* VFRID_Z */ + index = VFRID_Z; + break; + case 0xA9: + /* VFRID_N */ + index = VFRID_N; + break; + case 0xAA: + /* VFRECS */ + index = VFRECS; + break; + case 0xAB: + /* VFRECD */ + index = VFRECD; + break; + case 0xAC: + /* VMAXS */ + index = VMAXS; + break; + case 0xAD: + /* VMINS */ + index = VMINS; + break; + case 0xAE: + /* VMAXD */ + index = VMAXD; + break; + case 0xAF: + /* VMIND */ + index = VMIND; + break; + default: + break; + } + break; + case 0x1B: + switch (fn6) { + case 0x00: + /* VMAS */ + index = VMAS; + break; + case 0x01: + /* VMAD */ + index = VMAD; + break; + case 0x02: + /* VMSS */ + index = VMSS; + break; + case 0x03: + /* VMSD */ + index = VMSD; + break; + case 0x04: + /* VNMAS */ + index = VNMAS; + break; + case 0x05: + /* VNMAD */ + index = VNMAD; + break; + case 0x06: + /* VNMSS */ + index = VNMSS; + break; + case 0x07: + /* VNMSD */ + index = VNMSD; + break; + case 0x10: + /* VFSELEQ */ + index = VFSELEQ; + break; + case 0x12: + /* VFSELLT */ + index = VFSELLT; + break; + case 0x13: + /* VFSELLE */ + index = VFSELLE; + break; + case 0x18: + /* VSELEQW */ + index = VSELEQW; + break; + case 0x38: + /* VSELEQWI */ + index = VSELEQWI; + break; + case 0x19: + /* VSELLBCW */ + index = VSELLBCW; + break; + case 0x39: + /* VSELLBCWI */ + index = VSELLBCWI; + break; + case 0x1A: + /* VSELLTW */ + index = VSELLTW; + break; + case 0x3A: + /* VSELLTWI */ + index = VSELLTWI; + break; + case 0x1B: + /* VSELLEW */ + index = VSELLEW; + break; + case 0x3B: + /* VSELLEWI */ + index = VSELLEWI; + break; + case 0x20: + /* VINSW */ + index = VINSW; + break; + case 0x21: + /* VINSF */ + index = VINSF; + break; + case 0x22: + /* VEXTW */ + index = VEXTW; + break; + case 0x23: + /* VEXTF */ + index = VEXTF; + break; + case 0x24: + /* VCPYW */ + index = VCPYW; + break; + case 0x25: + /* VCPYF */ + index = VCPYF; + break; + case 0x26: + /* VCONW */ + index = VCONW; + break; + case 0x27: + /* VSHFW */ + index = VSHFW; + break; + case 0x28: + /* VCONS */ + index = VCONS; + break; + case 0x29: + /* VCOND */ + index = VCOND; + break; + case 0x2A: + /* VINSB */ + index = VINSB; + break; + case 0x2B: + /* VINSH */ + index = VINSH; + break; + case 0x2C: + /* VINSECTLH */ + index = VINSECTLH; + break; + case 0x2D: + /* VINSECTLW */ + index = VINSECTLW; + break; + case 0x2E: + /* VINSECTLL */ + index = VINSECTLL; + break; + case 0x2F: + /* VINSECTLB */ + index = VINSECTLB; + break; + case 0x30: + /* VSHFQ */ + index = VSHFQ; + break; + case 0x31: + /* VSHFQB */ + index = VSHFQB; + break; + case 0x32: + /* VCPYB */ + index = VCPYB; + break; + case 0x33: + /* VCPYH */ + index = VCPYH; + break; + case 0x34: + /* VSM3R */ + index = VSM3R; + break; + case 0x35: + /* VFCVTSH */ + index = VFCVTSH; + break; + case 0x36: + /* VFCVTHS */ + index = VFCVTHS; + break; + default: + break; + } + break; + case 0x1C: + switch (fn4) { + case 0x0: + /* VLDW_U */ + index = VLDW_U; + break; + case 0x1: + /* VSTW_U */ + index = VSTW_U; + break; + case 0x2: + /* VLDS_U */ + index = VLDS_U; + break; + case 0x3: + /* VSTS_U */ + index = VSTS_U; + break; + case 0x4: + /* VLDD_U */ + index = VLDD_U; + break; + case 0x5: + /* VSTD_U */ + index = VSTD_U; + break; + case 0x8: + /* VSTW_UL */ + index = VSTW_UL; + break; + case 0x9: + /* VSTW_UH */ + index = VSTW_UH; + break; + case 0xa: + /* VSTS_UL */ + index = VSTS_UL; + break; + case 0xb: + /* VSTS_UH */ + index = VSTS_UH; + break; + case 0xc: + /* VSTD_UL */ + index = VSTD_UL; + break; + case 0xd: + /* VSTD_UH */ + index = VSTD_UH; + break; + case 0xe: + /* VLDD_NC */ + index = VLDD_NC; + break; + case 0xf: + /* VSTD_NC */ + index = VSTD_NC; + break; + default: + break; + } + break; + case 0x1D: + /* LBR */ + index = LBR; + break; + case 0x1E: + switch (fn4) { + case 0x0: + /* LDBU_A */ + index = LDBU_A; + break; + case 0x1: + /* LDHU_A */ + index = LDHU_A; + break; + case 0x2: + /* LDW_A */ + index = LDW_A; + break; + case 0x3: + /* LDL_A */ + index = LDL_A; + break; + case 0x4: + /* FLDS_A */ + index = FLDS_A; + break; + case 0x5: + /* FLDD_A */ + index = FLDD_A; + break; + case 0x6: + /* STBU_A */ + index = STBU_A; + break; + case 0x7: + /* STHU_A */ + index = STHU_A; + break; + case 0x8: + /* STW_A */ + index = STW_A; + break; + case 0x9: + /* STL_A */ + index = STL_A; + break; + case 0xA: + /* FSTS_A */ + index = FSTS_A; + break; + case 0xB: + /* FSTD_A */ + index = FSTD_A; + break; + case 0xE: + /* DPFHR */ + index = DPFHR; + break; + case 0xF: + /* DPFHW */ + index = DPFHW; + break; + default: + break; + } + break; + case 0x20: + /* LDBU */ + index = LDBU; + break; + case 0x21: + /* LDHU */ + index = LDHU; + break; + case 0x22: + /* LDW */ + index = LDW; + break; + case 0x23: + /* LDL */ + index = LDL; + break; + case 0x24: + /* LDL_U */ + index = LDL_U; + break; + case 0x25: + if ((insn >> 12) & 1) { + /* PRI_LDL */ + index = PRI_LDL; + } else { + /* PRI_LDW */ + index = PRI_LDW; + } + break; + case 0x26: + /* FLDS */ + index = FLDS; + break; + case 0x27: + /* FLDD */ + index = FLDD; + break; + case 0x28: + /* STB */ + index = STB; + break; + case 0x29: + /* STH */ + index = STH; + break; + case 0x2a: + /* STW */ + index = STW; + break; + case 0x2b: + /* STL */ + index = STL; + break; + case 0x2c: + /* STL_U */ + index = STL_U; + break; + case 0x2d: + if ((insn >> 12) & 1) { + /* PRI_STL */ + index = PRI_STL; + } else { + /* PRI_STW */ + index = PRI_STW; + } + break; + case 0x2e: + /* FSTS */ + index = FSTS; + break; + case 0x2f: + /* FSTD */ + index = FSTD; + break; + case 0x30: + /* BEQ */ + index = BEQ; + break; + case 0x31: + /* BNE */ + index = BNE; + break; + case 0x32: + /* BLT */ + index = BLT; + break; + case 0x33: + /* BLE */ + index = BLE; + break; + case 0x34: + /* BGT */ + index = BGT; + break; + case 0x35: + /* BGE */ + index = BGE; + break; + case 0x36: + /* BLBC */ + index = BLBC; + break; + case 0x37: + /* BLBS */ + index = BLBS; + break; + case 0x38: + /* FBEQ */ + index = FBEQ; + break; + case 0x39: + /* FBNE */ + index = FBNE; + break; + case 0x3a: + /* FBLT */ + index = FBLT; + break; + case 0x3b: + /* FBLE */ + index = FBLE; + break; + case 0x3c: + /* FBGT */ + index = FBGT; + break; + case 0x3d: + /* FBGE */ + index = FBGE; + break; + case 0x3f: + /* LDIH */ + index = LDIH; + break; + case 0x3e: + /* LDI */ + index = LDI; + break; + default: +do_invalid: + break; + } + count = tcg_temp_new(); + offs = offsetof(CPUSW64State, insn_count[index]); + tcg_gen_ld_i64(count, cpu_env, offs); + tcg_gen_addi_i64(count, count, 1); + tcg_gen_st_i64(count, cpu_env, offs); + tcg_temp_free(count); +} diff --git a/target/sw64/profile.h b/target/sw64/profile.h new file mode 100644 index 0000000000000000000000000000000000000000..5aca541ea736a856f60ad74c0007c48a61318217 --- /dev/null +++ b/target/sw64/profile.h @@ -0,0 +1,541 @@ +#ifndef PROFILE_H +#define PROFILE_H +#define SYS_CALL 0 +#define CALL 1 +#define RET 2 +#define JMP 3 +#define BR 4 +#define BSR 5 +#define MEMB 6 +#define IMEMB 7 +#define WMEMB 8 +#define RTC 9 +#define RCID 10 +#define HALT 11 +#define RD_F 12 +#define WR_F 13 +#define RTID 14 +#define CSRWS 15 +#define CSRWC 16 +#define PRI_RCSR 17 +#define PRI_WCSR 18 +#define PRI_RET 19 +#define LLDW 20 +#define LLDL 21 +#define LDW_INC 22 +#define LDL_INC 23 +#define LDW_DEC 24 +#define LDL_DEC 25 +#define LDW_SET 26 +#define LDL_SET 27 +#define LSTW 28 +#define LSTL 29 +#define LDW_NC 30 +#define LDL_NC 31 +#define LDD_NC 32 +#define STW_NC 33 +#define STL_NC 34 +#define STD_NC 35 +#define LDWE 36 +#define LDSE 37 +#define LDDE 38 +#define VLDS 39 +#define VLDD 40 +#define VSTS 41 +#define VSTD 42 +#define FIMOVS 43 +#define FIMOVD 44 +#define ADDW 45 +#define SUBW 46 +#define S4ADDW 47 +#define S4SUBW 48 +#define S8ADDW 49 +#define S8SUBW 50 +#define ADDL 51 +#define SUBL 52 +#define S4ADDL 53 +#define S4SUBL 54 +#define S8ADDL 55 +#define S8SUBL 56 +#define MULW 57 +#define DIVW 58 +#define UDIVW 59 +#define REMW 60 +#define UREMW 61 +#define MULL 62 +#define MULH 63 +#define DIVL 64 +#define UDIVL 65 +#define REML 66 +#define UREML 67 +#define ADDPI 68 +#define ADDPIS 69 +#define CMPEQ 70 +#define CMPLT 71 +#define CMPLE 72 +#define CMPULT 73 +#define CMPULE 74 +#define SBT 75 +#define CBT 76 +#define AND 77 +#define BIC 78 +#define BIS 79 +#define ORNOT 80 +#define XOR 81 +#define EQV 82 +#define INSLB 83 +#define INSLH 84 +#define INSLW 85 +#define INSLL 86 +#define INSHB 87 +#define INSHH 88 +#define INSHW 89 +#define INSHL 90 +#define SLLL 91 +#define SRLL 92 +#define SRAL 93 +#define ROLL 94 +#define SLLW 95 +#define SRLW 96 +#define SRAW 97 +#define ROLW 98 +#define EXTLB 99 +#define EXTLH 100 +#define EXTLW 101 +#define EXTLL 102 +#define EXTHB 103 +#define EXTHH 104 +#define EXTHW 105 +#define EXTHL 106 +#define CTPOP 107 +#define CTLZ 108 +#define CTTZ 109 +#define REVBH 110 +#define REVBW 111 +#define REVBL 112 +#define CASW 113 +#define CASL 114 +#define MASKLB 115 +#define MASKLH 116 +#define MASKLW 117 +#define MASKLL 118 +#define MASKHB 119 +#define MASKHH 120 +#define MASKHW 121 +#define MASKHL 122 +#define ZAP 123 +#define ZAPNOT 124 +#define SEXTB 125 +#define SEXTH 126 +#define SELEQ 127 +#define SELGE 128 +#define SELGT 129 +#define SELLE 130 +#define SELLT 131 +#define SELNE 132 +#define SELLBC 133 +#define SELLBS 134 +#define ADDWI 135 +#define SUBWI 136 +#define S4ADDWI 137 +#define S4SUBWI 138 +#define S8ADDWI 139 +#define S8SUBWI 140 +#define ADDLI 141 +#define SUBLI 142 +#define S4ADDLI 143 +#define S4SUBLI 144 +#define S8ADDLI 145 +#define S8SUBLI 146 +#define MULWI 147 +#define DIVWI 148 +#define UDIVWI 149 +#define REMWI 150 +#define UREMWI 151 +#define MULLI 152 +#define MULHI 153 +#define DIVLI 154 +#define UDIVLI 155 +#define REMLI 156 +#define UREMLI 157 +#define ADDPII 158 +#define ADDPISI 159 +#define CMPEQI 160 +#define CMPLTI 161 +#define CMPLEI 162 +#define CMPULTI 163 +#define CMPULEI 164 +#define SBTI 165 +#define CBTI 166 +#define ANDI 167 +#define BICI 168 +#define BISI 169 +#define ORNOTI 170 +#define XORI 171 +#define EQVI 172 +#define INSLBI 173 +#define INSLHI 174 +#define INSLWI 175 +#define INSLLI 176 +#define INSHBI 177 +#define INSHHI 178 +#define INSHWI 179 +#define INSHLI 180 +#define SLLLI 181 +#define SRLLI 182 +#define SRALI 183 +#define ROLLI 184 +#define SLLWI 185 +#define SRLWI 186 +#define SRAWI 187 +#define ROLWI 188 +#define EXTLBI 189 +#define EXTLHI 190 +#define EXTLWI 191 +#define EXTLLI 192 +#define EXTHBI 193 +#define EXTHHI 194 +#define EXTHWI 195 +#define EXTHLI 196 +#define CTPOPI 197 +#define CTLZI 198 +#define CTTZI 199 +#define REVBHI 200 +#define REVBWI 201 +#define REVBLI 202 +#define CASWI 203 +#define CASLI 204 +#define MASKLBI 205 +#define MASKLHI 206 +#define MASKLWI 207 +#define MASKLLI 208 +#define MASKHBI 209 +#define MASKHHI 210 +#define MASKHWI 211 +#define MASKHLI 212 +#define ZAPI 213 +#define ZAPNOTI 214 +#define SEXTBI 215 +#define SEXTHI 216 +#define CMPGEBI 217 +#define SELEQI 218 +#define SELGEI 219 +#define SELGTI 220 +#define SELLEI 221 +#define SELLTI 222 +#define SELNEI 223 +#define SELLBCI 224 +#define SELLBSI 225 +#define VLOGZZ 226 +#define FADDS 227 +#define FADDD 228 +#define FSUBS 229 +#define FSUBD 230 +#define FMULS 231 +#define FMULD 232 +#define FDIVS 233 +#define FDIVD 234 +#define FSQRTS 235 +#define FSQRTD 236 +#define FCMPEQ 237 +#define FCMPLE 238 +#define FCMPLT 239 +#define FCMPUN 240 +#define FCVTSD 241 +#define FCVTDS 242 +#define FCVTDL_G 243 +#define FCVTDL_P 244 +#define FCVTDL_Z 245 +#define FCVTDL_N 246 +#define FCVTDL 247 +#define FCVTWL 248 +#define FCVTLW 249 +#define FCVTLS 250 +#define FCVTLD 251 +#define FCPYS 252 +#define FCPYSE 253 +#define FCPYSN 254 +#define IFMOVS 255 +#define IFMOVD 256 +#define RFPCR 257 +#define WFPCR 258 +#define SETFPEC0 259 +#define SETFPEC1 260 +#define SETFPEC2 261 +#define SETFPEC3 262 +#define FRECS 263 +#define FRECD 264 +#define FRIS 265 +#define FRIS_G 266 +#define FRIS_P 267 +#define FRIS_Z 268 +#define FRIS_N 269 +#define FRID 270 +#define FRID_G 271 +#define FRID_P 272 +#define FRID_Z 273 +#define FRID_N 274 +#define FMAS 275 +#define FMAD 276 +#define FMSS 277 +#define FMSD 278 +#define FNMAS 279 +#define FNMAD 280 +#define FNMSS 281 +#define FNMSD 282 +#define FSELEQ 283 +#define FSELNE 284 +#define FSELLT 285 +#define FSELLE 286 +#define FSELGT 287 +#define FSELGE 288 +#define VADDW 289 +#define VADDWI 290 +#define VSUBW 291 +#define VSUBWI 292 +#define VCMPGEW 293 +#define VCMPGEWI 294 +#define VCMPEQW 295 +#define VCMPEQWI 296 +#define VCMPLEW 297 +#define VCMPLEWI 298 +#define VCMPLTW 299 +#define VCMPLTWI 300 +#define VCMPULEW 301 +#define VCMPULEWI 302 +#define VCMPULTW 303 +#define VCMPULTWI 304 +#define VSLLW 305 +#define VSLLWI 306 +#define VSRLW 307 +#define VSRLWI 308 +#define VSRAW 309 +#define VSRAWI 310 +#define VROLW 311 +#define VROLWI 312 +#define SLLOW 313 +#define SLLOWI 314 +#define SRLOW 315 +#define SRLOWI 316 +#define VADDL 317 +#define VADDLI 318 +#define VSUBL 319 +#define VSUBLI 320 +#define VSLLB 321 +#define VSLLBI 322 +#define VSRLB 323 +#define VSRLBI 324 +#define VSRAB 325 +#define VSRABI 326 +#define VROLB 327 +#define VROLBI 328 +#define VSLLH 329 +#define VSLLHI 330 +#define VSRLH 331 +#define VSRLHI 332 +#define VSRAH 333 +#define VSRAHI 334 +#define VROLH 335 +#define VROLHI 336 +#define CTPOPOW 337 +#define CTLZOW 338 +#define VSLLL 339 +#define VSLLLI 340 +#define VSRLL 341 +#define VSRLLI 342 +#define VSRAL 343 +#define VSRALI 344 +#define VROLL 345 +#define VROLLI 346 +#define VMAXB 347 +#define VMINB 348 +#define VUCADDW 349 +#define VUCADDWI 350 +#define VUCSUBW 351 +#define VUCSUBWI 352 +#define VUCADDH 353 +#define VUCADDHI 354 +#define VUCSUBH 355 +#define VUCSUBHI 356 +#define VUCADDB 357 +#define VUCADDBI 358 +#define VUCSUBB 359 +#define VUCSUBBI 360 +#define SRAOW 361 +#define SRAOWI 362 +#define VSUMW 363 +#define VSUML 364 +#define VSM4R 365 +#define VBINVW 366 +#define VCMPUEQB 367 +#define VCMPUGTB 368 +#define VCMPUGTBI 369 +#define VSM3MSW 370 +#define VMAXH 371 +#define VMINH 372 +#define VMAXW 373 +#define VMINW 374 +#define VMAXL 375 +#define VMINL 376 +#define VUMAXB 377 +#define VUMINB 378 +#define VUMAXH 379 +#define VUMINH 380 +#define VUMAXW 381 +#define VUMINW 382 +#define VUMAXL 383 +#define VUMINL 384 +#define VSM4KEY 385 +#define VADDS 386 +#define VADDD 387 +#define VSUBS 388 +#define VSUBD 389 +#define VMULS 390 +#define VMULD 391 +#define VDIVS 392 +#define VDIVD 393 +#define VSQRTS 394 +#define VSQRTD 395 +#define VFCMPEQ 396 +#define VFCMPLE 397 +#define VFCMPLT 398 +#define VFCMPUN 399 +#define VCPYS 400 +#define VCPYSE 401 +#define VCPYSN 402 +#define VSUMS 403 +#define VSUMD 404 +#define VFCVTSD 405 +#define VFCVTDS 406 +#define VFCVTLS 407 +#define VFCVTLD 408 +#define VFCVTDL 409 +#define VFCVTDL_G 410 +#define VFCVTDL_P 411 +#define VFCVTDL_Z 412 +#define VFCVTDL_N 413 +#define VFRIS 414 +#define VFRIS_G 415 +#define VFRIS_P 416 +#define VFRIS_Z 417 +#define VFRIS_N 418 +#define VFRID 419 +#define VFRID_G 420 +#define VFRID_P 421 +#define VFRID_Z 422 +#define VFRID_N 423 +#define VFRECS 424 +#define VFRECD 425 +#define VMAXS 426 +#define VMINS 427 +#define VMAXD 428 +#define VMIND 429 +#define VMAS 430 +#define VMAD 431 +#define VMSS 432 +#define VMSD 433 +#define VNMAS 434 +#define VNMAD 435 +#define VNMSS 436 +#define VNMSD 437 +#define VFSELEQ 438 +#define VFSELLT 439 +#define VFSELLE 440 +#define VSELEQW 441 +#define VSELEQWI 442 +#define VSELLBCW 443 +#define VSELLBCWI 444 +#define VSELLTW 445 +#define VSELLTWI 446 +#define VSELLEW 447 +#define VSELLEWI 448 +#define VINSW 449 +#define VINSF 450 +#define VEXTW 451 +#define VEXTF 452 +#define VCPYW 453 +#define VCPYF 454 +#define VCONW 455 +#define VSHFW 456 +#define VCONS 457 +#define VCOND 458 +#define VINSB 459 +#define VINSH 460 +#define VINSECTLH 461 +#define VINSECTLW 462 +#define VINSECTLL 463 +#define VINSECTLB 464 +#define VSHFQ 465 +#define VSHFQB 466 +#define VCPYB 467 +#define VCPYH 468 +#define VSM3R 469 +#define VFCVTSH 470 +#define VFCVTHS 471 +#define VLDW_U 472 +#define VSTW_U 473 +#define VLDS_U 474 +#define VSTS_U 475 +#define VLDD_U 476 +#define VSTD_U 477 +#define VSTW_UL 478 +#define VSTW_UH 479 +#define VSTS_UL 480 +#define VSTS_UH 481 +#define VSTD_UL 482 +#define VSTD_UH 483 +#define VLDD_NC 484 +#define VSTD_NC 485 +#define LBR 486 +#define LDBU_A 487 +#define LDHU_A 488 +#define LDW_A 489 +#define LDL_A 490 +#define FLDS_A 491 +#define FLDD_A 492 +#define STBU_A 493 +#define STHU_A 494 +#define STW_A 495 +#define STL_A 496 +#define FSTS_A 497 +#define FSTD_A 498 +#define DPFHR 499 +#define DPFHW 500 +#define LDBU 501 +#define LDHU 502 +#define LDW 503 +#define LDL 504 +#define LDL_U 505 +#define PRI_LDL 506 +#define PRI_LDW 507 +#define FLDS 508 +#define FLDD 509 +#define STB 510 +#define STH 511 +#define STW 512 +#define STL 513 +#define STL_U 514 +#define PRI_STL 515 +#define PRI_STW 516 +#define FSTS 517 +#define FSTD 518 +#define BEQ 519 +#define BNE 520 +#define BLT 521 +#define BLE 522 +#define BGT 523 +#define BGE 524 +#define BLBC 525 +#define BLBS 526 +#define FBEQ 527 +#define FBNE 528 +#define FBLT 529 +#define FBLE 530 +#define FBGT 531 +#define FBGE 532 +#define LDIH 533 +#define LDI 534 + +extern const char *insn_opc[535]; + +#endif diff --git a/target/sw64/simd_helper.c b/target/sw64/simd_helper.c new file mode 100644 index 0000000000000000000000000000000000000000..13bd52de3dce3f89fa59eddf715d9d2689289493 --- /dev/null +++ b/target/sw64/simd_helper.c @@ -0,0 +1,1058 @@ +#include "qemu/osdep.h" +#include "cpu.h" +#include "exec/exec-all.h" +#include "exec/helper-proto.h" + +#undef DEBUG_SIMD + +static inline uint8_t *get_element_b(CPUSW64State *env, uint64_t ra, + int index) +{ + return (uint8_t*)&env->fr[ra + (index / 8) * 32] + (index % 8); +} + +static inline uint16_t *get_element_h(CPUSW64State *env, uint64_t ra, + int index) +{ + return (uint16_t*)&env->fr[ra + (index / 4) * 32] + (index % 4); +} + +static inline uint32_t *get_element_w(CPUSW64State *env, uint64_t ra, + int index) +{ + return (uint32_t*)&env->fr[ra + (index / 2) * 32] + (index % 2); +} + +static inline uint64_t *get_element_l(CPUSW64State *env, uint64_t ra, + int index) +{ + return &env->fr[ra + index * 32]; +} + +void helper_srlow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) +{ + int i; + int adden; + int dest, src; + adden = shift >> 6; + shift &= 0x3f; +#ifdef DEBUG_SIMD + printf("right shift = %ld adden = %d\n", shift, adden); + printf("in_fr[%ld]:", ra); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[ra + 32 * i]); + } + printf("\n"); +#endif + + for (i = 0; (i + adden) < 4; i++) { + dest = i * 32 + rc; + src = (i + adden) * 32 + ra; + env->fr[dest] = env->fr[src] >> shift; + if (((i + adden) < 3) && (shift != 0)) + env->fr[dest] |= (env->fr[src + 32] << (64 - shift)); + } + + for (; i < 4; i++) { + env->fr[rc + i * 32] = 0; + } +#ifdef DEBUG_SIMD + printf("out_fr[%ld]:", rc); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rc + 32 * i]); + } + printf("\n"); +#endif +} + +void helper_sllow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) +{ + int i; + int adden; + int dest, src; + adden = shift >> 6; + shift &= 0x3f; +#ifdef DEBUG_SIMD + printf("left shift = %ld adden = %d\n", shift, adden); + printf("in_fr[%ld]:", ra); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[ra + 32 * i]); + } + printf("\n"); +#endif + + for (i = 3; (i - adden) >= 0; i--) { + dest = i * 32 + rc; + src = (i - adden) * 32 + ra; + env->fr[dest] = env->fr[src] << shift; + if (((i - adden) > 0) && (shift != 0)) + env->fr[dest] |= (env->fr[src - 32] >> (64 - shift)); + } + for (; i >= 0; i--) { + env->fr[rc + i * 32] = 0; + } +#ifdef DEBUG_SIMD + printf("out_fr[%ld]:", rc); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rc + 32 * i]); + } + printf("\n"); +#endif +} + +static uint64_t do_logzz(uint64_t va, uint64_t vb, uint64_t vc, uint64_t zz) +{ + int i; + uint64_t ret = 0; + int index; + + for (i = 0; i < 64; i++) { + index = (((va >> i) & 1) << 2) | (((vb >> i) & 1) << 1) | ((vc >> i) & 1); + ret |= ((zz >> index) & 1) << i; + } + + return ret; +} + +void helper_vlogzz(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t zz) +{ + int i; + int ra, rb, rc; + ra = args >> 16; + rb = (args >> 8) & 0xff; + rc = args & 0xff; +#ifdef DEBUG_SIMD + printf("zz = %lx\n", zz); + printf("in_fr[%d]:", ra); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[ra + 32 * i]); + } + printf("\n"); + printf("in_fr[%d]:", rb); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rb + 32 * i]); + } + printf("\n"); + printf("in_fr[%d]:", rc); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rc + 32 * i]); + } + printf("\n"); +#endif + for (i = 0; i < 4; i++) { + env->fr[rd + i * 32] = do_logzz(env->fr[ra + i * 32], env->fr[rb + i * 32], + env->fr[rc + i * 32], zz); + } +#ifdef DEBUG_SIMD + printf("out_fr[%ld]:", rd); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rd + 32 * i]); + } + printf("\n"); +#endif +} + +void helper_v_print(CPUSW64State *env, uint64_t v) +{ + printf("PC[%lx]: fr[%lx]:\n", GETPC(), v); +} + +void helper_vconw(CPUSW64State *env, uint64_t args, uint64_t rd, + uint64_t byte4_len) +{ + int ra, rb; + int count; + int i; + uint32_t *ptr_dst, *ptr_src; + uint32_t tmp[8]; + + ra = (args >> 8) & 0xff; + rb = args & 0xff; + count = 8 - byte4_len; + + for (i = 0; i < 8; i++) { + ptr_dst = get_element_w(env, rd, i); + if (i < count) { + ptr_src = get_element_w(env, ra, i + byte4_len); + } else { + ptr_src = get_element_w(env, rb, i - count); + } + tmp[i] = *ptr_src; + } + for (i = 0; i < 8; i++) { + ptr_dst = get_element_w(env, rd, i); + *ptr_dst = tmp[i]; + } +} + +void helper_vcond(CPUSW64State *env, uint64_t args, uint64_t rd, + uint64_t byte8_len) +{ + int ra, rb; + int count; + int i; + uint64_t *ptr_dst, *ptr_src; + uint64_t tmp[8]; + + ra = (args >> 8) & 0xff; + rb = args & 0xff; + count = 4 - byte8_len; + + for (i = 0; i < 4; i++) { + if (i < count) { + ptr_src = get_element_l(env, ra, i + byte8_len); + } else { + ptr_src = get_element_l(env, rb, i - count); + } + tmp[i] = *ptr_src; + } + for (i = 0; i < 4; i++) { + ptr_dst = get_element_l(env, rd, i + byte8_len); + *ptr_dst = tmp[i]; + } +} + +void helper_vshfw(CPUSW64State *env, uint64_t args, uint64_t rd, uint64_t vc) +{ + int ra, rb; + int i; + uint32_t *ptr_dst, *ptr_src; + uint32_t tmp[8]; + int flag, idx; + + ra = (args >> 8) & 0xff; + rb = args & 0xff; + + for (i = 0; i < 8; i++) { + flag = (vc >> (i * 4)) & 0x8; + idx = (vc >> (i * 4)) & 0x7; + if (flag == 0) { + ptr_src = get_element_w(env, ra, idx); + } else { + ptr_src = get_element_w(env, rb, idx); + } + tmp[i] = *ptr_src; + } + for (i = 0; i < 8; i++) { + ptr_dst = get_element_w(env, rd, i); + *ptr_dst = tmp[i]; + } +} + +uint64_t helper_ctlzow(CPUSW64State *env, uint64_t ra) +{ + int i, j; + uint64_t val; + uint64_t ctlz = 0; + + for (j = 3; j >= 0; j--) { + val = env->fr[ra + 32 * j]; + for (i = 63; i >= 0; i--) { + if ((val >> i) & 1) + return ctlz << 29; + else + ctlz++; + } + } + return ctlz << 29; +} + +void helper_vucaddw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + int a, b, c; + int ret; + int i; + + for (i = 0; i < 4; i++) { + a = (int)(env->fr[ra + i * 32] & 0xffffffff); + b = (int)(env->fr[rb + i * 32] & 0xffffffff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + ret = c; + + a = (int)(env->fr[ra + i * 32] >> 32); + b = (int)(env->fr[rb + i * 32] >> 32); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | + (uint64_t)(uint32_t)ret; + } +} + +void helper_vucaddwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + int a, b, c; + int ret; + int i; + + b = (int)vb; + for (i = 0; i < 4; i++) { + a = (int)(env->fr[ra + i * 32] & 0xffffffff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + ret = c; + + a = (int)(env->fr[ra + i * 32] >> 32); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | + (uint64_t)(uint32_t)ret; + } +} + +void helper_vucsubw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + int a, b, c; + int ret; + int i; + + for (i = 0; i < 4; i++) { + a = (int)(env->fr[ra + i * 32] & 0xffffffff); + b = (int)(env->fr[rb + i * 32] & 0xffffffff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + ret = c; + + a = (int)(env->fr[ra + i * 32] >> 32); + b = (int)(env->fr[rb + i * 32] >> 32); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | + (uint64_t)(uint32_t)ret; + } +} + +void helper_vucsubwi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + int a, b, c; + int ret; + int i; + + b = (int)vb; + for (i = 0; i < 4; i++) { + a = (int)(env->fr[ra + i * 32] & 0xffffffff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + ret = c; + + a = (int)(env->fr[ra + i * 32] >> 32); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80000000; + else + c = 0x7fffffff; + } + env->fr[rc + i * 32] = ((uint64_t)(uint32_t)c << 32) | + (uint64_t)(uint32_t)ret; + } +} + +void helper_vucaddh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + short a, b, c; + uint64_t ret; + int i, j; + + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 4; j++) { + a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); + b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x8000; + else + c = 0x7fff; + } + ret |= ((uint64_t)(uint16_t)c) << (j * 16); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucaddhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + short a, b, c; + uint64_t ret; + int i, j; + + b = (short)vb; + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 4; j++) { + a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x8000; + else + c = 0x7fff; + } + ret |= ((uint64_t)(uint16_t)c) << (j * 16); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucsubh(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + short a, b, c; + uint64_t ret; + int i, j; + + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 4; j++) { + a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); + b = (short)((env->fr[rb + i * 32] >> (j * 16)) & 0xffff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x8000; + else + c = 0x7fff; + } + ret |= ((uint64_t)(uint16_t)c) << (j * 16); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucsubhi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + short a, b, c; + uint64_t ret; + int i, j; + + b = (short)vb; + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 4; j++) { + a = (short)((env->fr[ra + i * 32] >> (j * 16)) & 0xffff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x8000; + else + c = 0x7fff; + } + ret |= ((uint64_t)(uint16_t)c) << (j * 16); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucaddb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + int8_t a, b, c; + uint64_t ret; + int i, j; + + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 8; j++) { + a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); + b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80; + else + c = 0x7f; + } + ret |= ((uint64_t)(uint8_t)c) << (j * 8); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucaddbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + int8_t a, b, c; + uint64_t ret; + int i, j; + + b = (int8_t)(vb & 0xff); + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 8; j++) { + a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); + c = a + b; + if ((c ^ a) < 0 && (c ^ b) < 0) { + if (a < 0) + c = 0x80; + else + c = 0x7f; + } + ret |= ((uint64_t)(uint8_t)c) << (j * 8); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucsubb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + int8_t a, b, c; + uint64_t ret; + int i, j; + + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 8; j++) { + a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xff); + b = (int8_t)((env->fr[rb + i * 32] >> (j * 8)) & 0xff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80; + else + c = 0x7f; + } + ret |= ((uint64_t)(uint8_t)c) << (j * 8); + } + env->fr[rc + i * 32] = ret; + } +} + +void helper_vucsubbi(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + int8_t a, b, c; + uint64_t ret; + int i, j; + + b = (int8_t)(vb & 0xff); + for (i = 0; i < 4; i++) { + ret = 0; + for (j = 0; j < 8; j++) { + a = (int8_t)((env->fr[ra + i * 32] >> (j * 8)) & 0xffff); + c = a - b; + if ((b ^ a) < 0 && (c ^ a) < 0) { + if (a < 0) + c = 0x80; + else + c = 0x7f; + } + ret |= ((uint64_t)(uint8_t)c) << (j * 8); + } + env->fr[rc + i * 32] = ret; + } +} + +uint64_t helper_vstw(CPUSW64State *env, uint64_t t0, uint64_t t1) +{ + uint64_t idx, shift; + + idx = t0 + (t1 / 2) * 32; + shift = (t1 % 2) * 32; + + return (env->fr[idx] >> shift) & 0xffffffffUL; +} + +uint64_t helper_vsts(CPUSW64State *env, uint64_t t0, uint64_t t1) +{ + uint64_t idx, val; + + idx = t0 + t1 * 32; + val = env->fr[idx]; + + return ((val >> 32) & 0xc0000000) | ((val >> 29) & 0x3fffffff); +} + +uint64_t helper_vstd(CPUSW64State *env, uint64_t t0, uint64_t t1) +{ + uint64_t idx; + + idx = t0 + t1 * 32; + return env->fr[idx]; +} + +#define HELPER_VMAX(name, _suffix, type, loop) \ + void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \ + uint64_t rb, uint64_t rc) \ + { \ + int i; \ + type *ptr_dst, *ptr_src_a, *ptr_src_b; \ + \ + for (i = 0; i < loop; i++) { \ + ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \ + ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \ + ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \ + \ + if (*ptr_src_a >= *ptr_src_b) { \ + *ptr_dst = *ptr_src_a; \ + } else { \ + *ptr_dst = *ptr_src_b; \ + } \ + } \ + } + +#define HELPER_VMIN(name, _suffix, type, loop) \ + void glue(glue(helper_, name), _suffix)(CPUSW64State *env, uint64_t ra, \ + uint64_t rb, uint64_t rc) \ + { \ + int i; \ + type *ptr_dst, *ptr_src_a, *ptr_src_b; \ + \ + for (i = 0; i < loop; i++) { \ + ptr_dst = (type*)glue(get_element_, _suffix)(env, rc, i); \ + ptr_src_a = (type*)glue(get_element_, _suffix)(env, ra, i); \ + ptr_src_b = (type*)glue(get_element_, _suffix)(env, rb, i); \ + \ + if (*ptr_src_a <= *ptr_src_b) { \ + *ptr_dst = *ptr_src_a; \ + } else { \ + *ptr_dst = *ptr_src_b; \ + } \ + } \ + } + +HELPER_VMAX(vmax, b, int8_t, 32) +HELPER_VMIN(vmin, b, int8_t, 32) +HELPER_VMAX(vmax, h, int16_t, 16) +HELPER_VMIN(vmin, h, int16_t, 16) +HELPER_VMAX(vmax, w, int32_t, 8) +HELPER_VMIN(vmin, w, int32_t, 8) +HELPER_VMAX(vumax, b, uint8_t, 32) +HELPER_VMIN(vumin, b, uint8_t, 32) +HELPER_VMAX(vumax, h, uint16_t, 16) +HELPER_VMIN(vumin, h, uint16_t, 16) +HELPER_VMAX(vumax, w, uint32_t, 8) +HELPER_VMIN(vumin, w, uint32_t, 8) + +void helper_sraow(CPUSW64State *env, uint64_t ra, uint64_t rc, uint64_t shift) +{ + int i; + int adden; + int dest, src; + uint64_t sign; + adden = shift >> 6; + shift &= 0x3f; + sign = (uint64_t)((int64_t)env->fr[ra + 96] >> 63); +#ifdef DEBUG_SIMD + printf("right shift = %ld adden = %d\n", shift, adden); + printf("in_fr[%ld]:", ra); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[ra + 32 * i]); + } + printf("\n"); +#endif + + for (i = 0; (i + adden) < 4; i++) { + dest = i * 32 + rc; + src = (i + adden) * 32 + ra; + env->fr[dest] = env->fr[src] >> shift; + if (shift != 0) { + if (((i + adden) < 3)) + env->fr[dest] |= (env->fr[src + 32] << (64 - shift)); + else + env->fr[dest] |= (sign << (64 - shift)); + } + } + + for (; i < 4; i++) { + env->fr[rc + i * 32] = sign; + } +#ifdef DEBUG_SIMD + printf("out_fr[%ld]:", rc); + for (i = 3 ; i >= 0; i--) { + printf("%016lx ", env->fr[rc + 32 * i]); + } + printf("\n"); +#endif +} + +static uint16_t sm4_sbox[16][16] = { + { 0xd6, 0x90, 0xe9, 0xfe, 0xcc, 0xe1, 0x3d, 0xb7, 0x16, 0xb6, 0x14, 0xc2, 0x28, 0xfb, 0x2c, 0x05 }, + { 0x2b, 0x67, 0x9a, 0x76, 0x2a, 0xbe, 0x04, 0xc3, 0xaa, 0x44, 0x13, 0x26, 0x49, 0x86, 0x06, 0x99 }, + { 0x9c, 0x42, 0x50, 0xf4, 0x91, 0xef, 0x98, 0x7a, 0x33, 0x54, 0x0b, 0x43, 0xed, 0xcf, 0xac, 0x62 }, + { 0xe4, 0xb3, 0x1c, 0xa9, 0xc9, 0x08, 0xe8, 0x95, 0x80, 0xdf, 0x94, 0xfa, 0x75, 0x8f, 0x3f, 0xa6 }, + { 0x47, 0x07, 0xa7, 0xfc, 0xf3, 0x73, 0x17, 0xba, 0x83, 0x59, 0x3c, 0x19, 0xe6, 0x85, 0x4f, 0xa8 }, + { 0x68, 0x6b, 0x81, 0xb2, 0x71, 0x64, 0xda, 0x8b, 0xf8, 0xeb, 0x0f, 0x4b, 0x70, 0x56, 0x9d, 0x35 }, + { 0x1e, 0x24, 0x0e, 0x5e, 0x63, 0x58, 0xd1, 0xa2, 0x25, 0x22, 0x7c, 0x3b, 0x01, 0x21, 0x78, 0x87 }, + { 0xd4, 0x00, 0x46, 0x57, 0x9f, 0xd3, 0x27, 0x52, 0x4c, 0x36, 0x02, 0xe7, 0xa0, 0xc4, 0xc8, 0x9e }, + { 0xea, 0xbf, 0x8a, 0xd2, 0x40, 0xc7, 0x38, 0xb5, 0xa3, 0xf7, 0xf2, 0xce, 0xf9, 0x61, 0x15, 0xa1 }, + { 0xe0, 0xae, 0x5d, 0xa4, 0x9b, 0x34, 0x1a, 0x55, 0xad, 0x93, 0x32, 0x30, 0xf5, 0x8c, 0xb1, 0xe3 }, + { 0x1d, 0xf6, 0xe2, 0x2e, 0x82, 0x66, 0xca, 0x60, 0xc0, 0x29, 0x23, 0xab, 0x0d, 0x53, 0x4e, 0x6f }, + { 0xd5, 0xdb, 0x37, 0x45, 0xde, 0xfd, 0x8e, 0x2f, 0x03, 0xff, 0x6a, 0x72, 0x6d, 0x6c, 0x5b, 0x51 }, + { 0x8d, 0x1b, 0xaf, 0x92, 0xbb, 0xdd, 0xbc, 0x7f, 0x11, 0xd9, 0x5c, 0x41, 0x1f, 0x10, 0x5a, 0xd8 }, + { 0x0a, 0xc1, 0x31, 0x88, 0xa5, 0xcd, 0x7b, 0xbd, 0x2d, 0x74, 0xd0, 0x12, 0xb8, 0xe5, 0xb4, 0xb0 }, + { 0x89, 0x69, 0x97, 0x4a, 0x0c, 0x96, 0x77, 0x7e, 0x65, 0xb9, 0xf1, 0x09, 0xc5, 0x6e, 0xc6, 0x84 }, + { 0x18, 0xf0, 0x7d, 0xec, 0x3a, 0xdc, 0x4d, 0x20, 0x79, 0xee, 0x5f, 0x3e, 0xd7, 0xcb, 0x39, 0x48 } +}; + +static uint32_t SBOX(uint32_t val) +{ + int ret = 0; + int i; + int idx_x, idx_y; + for (i = 0; i < 4; i++) { + idx_x = (val >> (i * 8)) & 0xff; + idx_y = idx_x & 0xf; + idx_x = idx_x >> 4; + + ret |= (sm4_sbox[idx_x][idx_y] << (i * 8)); + } + return ret; +} + +static uint32_t rotl(uint32_t val, int shift) +{ + uint64_t ret = (uint64_t)val; + ret = (ret << (shift & 0x1f)); + return (uint32_t)((ret & 0xffffffff) | (ret >> 32)); +} + +void helper_vsm4r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + uint32_t W[12], rk[8]; + uint32_t temp1, temp2; + int i, j; + + for (i = 0; i < 8; i++) { + rk[i] = *get_element_w(env, rb, i); + } + for (i = 0; i < 2; i++) { + for (j = 0; j < 4; j++) { + W[j] = *get_element_w(env, ra, i * 4 + j); + } + for (j = 0; j < 8; j++) { + temp1 = W[j + 1] ^ W[j + 2] ^ W[j + 3] ^ rk[j]; + temp2 = SBOX(temp1); + W[j + 4] = W[j] ^ temp2 ^ rotl(temp2, 2) ^ rotl(temp2, 10) ^ rotl(temp2, 18) ^ rotl(temp2, 24); + } + + for (j = 0; j < 4; j++) { + *get_element_w(env, rc, i * 4 + j) = W[8 + j]; + } + } +} + +void helper_vcmpueqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + uint8_t *ptr_a, *ptr_b, *ptr_c; + int i; + + for (i = 0; i < 32; i++) { + ptr_a = get_element_b(env, ra, i); + ptr_b = get_element_b(env, rb, i); + ptr_c = get_element_b(env, rc, i); + + *ptr_c = (*ptr_a == *ptr_b) ? 1 : 0; + ; + } +} + +void helper_vcmpugtb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + uint8_t *ptr_a, *ptr_b, *ptr_c; + int i; + + for (i = 0; i < 32; i++) { + ptr_a = get_element_b(env, ra, i); + ptr_b = get_element_b(env, rb, i); + ptr_c = get_element_b(env, rc, i); + + *ptr_c = (*ptr_a > *ptr_b) ? 1 : 0; + ; + } +} + +void helper_vcmpueqbi(CPUSW64State *env, uint64_t ra, uint64_t vb, + uint64_t rc) +{ + uint8_t *ptr_a, *ptr_c; + int i; + + for (i = 0; i < 32; i++) { + ptr_a = get_element_b(env, ra, i); + ptr_c = get_element_b(env, rc, i); + + *ptr_c = (*ptr_a == vb) ? 1 : 0; + ; + } +} + +void helper_vcmpugtbi(CPUSW64State *env, uint64_t ra, uint64_t vb, + uint64_t rc) +{ + uint8_t *ptr_a, *ptr_c; + int i; + + for (i = 0; i < 32; i++) { + ptr_a = get_element_b(env, ra, i); + ptr_c = get_element_b(env, rc, i); + + *ptr_c = (*ptr_a > vb) ? 1 : 0; + ; + } +} + +void helper_vsm3msw(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rc) +{ + uint32_t W[24]; + uint32_t temp; + int i; + + for (i = 0; i < 8; i++) { + W[i + 0] = *get_element_w(env, ra, i); + W[i + 8] = *get_element_w(env, rb, i); + } + for (i = 16; i < 24; i++) { + temp = W[i - 16] ^ W[i - 9] ^ rotl(W[i - 3], 15); + temp = temp ^ rotl(temp, 15) ^ rotl(temp, 23) ^ rotl(W[i - 13], 7) ^ W[i - 6]; + W[i] = temp; + } + for (i = 0; i < 8; i++) { + *get_element_w(env, rc, i) = W[16 + i]; + } +} + +static uint32_t selck[4][8] = { + {0x00070e15, 0x1c232a31, 0x383f464d, 0x545b6269, 0x70777e85, 0x8c939aa1, 0xa8afb6bd, 0xc4cbd2d9}, + {0xe0e7eef5, 0xfc030a11, 0x181f262d, 0x343b4249, 0x50575e65, 0x6c737a81, 0x888f969d, 0xa4abb2b9}, + {0xc0c7ced5, 0xdce3eaf1, 0xf8ff060d, 0x141b2229, 0x30373e45, 0x4c535a61, 0x686f767d, 0x848b9299}, + {0xa0a7aeb5, 0xbcc3cad1, 0xd8dfe6ed, 0xf4fb0209, 0x10171e25, 0x2c333a41, 0x484f565d, 0x646b7279} +}; + +void helper_vsm4key(CPUSW64State *env, uint64_t ra, uint64_t vb, uint64_t rc) +{ + uint32_t K[12], *CK; + int i; + uint32_t temp1, temp2; + + for (i = 4; i < 8; i++) { + K[i - 4] = *get_element_w(env, ra, i); + } + CK = selck[vb]; + + for (i = 0; i < 8; i++) { + temp1 = K[i + 1] ^ K[i + 2] ^ K[i + 3] ^ CK[i]; + temp2 = SBOX(temp1); + K[i + 4] = K[i] ^ temp2 ^ rotl(temp2, 13) ^ rotl(temp2, 23); + } + for (i = 0; i < 8; i++) { + *get_element_w(env, rc, i) = K[i + 4]; + } +} + +void helper_vinsb(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + int i; + + for (i = 0; i < 128; i += 32) { + env->fr[rd + i] = env->fr[rb + i]; + } + + *get_element_b(env, rd, vc) = (uint8_t)(va & 0xff); +} + +void helper_vinsh(CPUSW64State *env, uint64_t va, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + int i; + + if (vc >= 16) + return; + + for (i = 0; i < 128; i += 32) { + env->fr[rd + i] = env->fr[rb + i]; + } + + *get_element_h(env, rd, vc) = (uint16_t)(va & 0xffff); +} + +void helper_vinsectlh(CPUSW64State *env, uint64_t ra, uint64_t rb, + uint64_t rd) +{ + int i; + uint32_t temp[8]; + for (i = 0; i < 8; i++) { + temp[i] = *get_element_h(env, ra, i) | ((uint32_t)*get_element_h(env, rb, i) << 16); + } + for (i = 0; i < 8; i++) { + *get_element_w(env, rd, i) = temp[i]; + } +} +void helper_vinsectlw(CPUSW64State *env, uint64_t ra, uint64_t rb, + uint64_t rd) +{ + int i; + uint64_t temp[4]; + for (i = 0; i < 4; i++) { + temp[i] = *get_element_w(env, ra, i) | ((uint64_t)*get_element_w(env, rb, i) << 32); + } + for (i = 0; i < 4; i++) { + *get_element_l(env, rd, i) = temp[i]; + } +} + +void helper_vinsectlb(CPUSW64State *env, uint64_t ra, uint64_t rb, + uint64_t rd) +{ + int i; + uint16_t temp[16]; + for (i = 0; i < 16; i++) { + temp[i] = *get_element_b(env, ra, i) | ((uint16_t)*get_element_b(env, rb, i) << 8); + } + for (i = 0; i < 16; i++) { + *get_element_h(env, rd, i) = temp[i]; + } +} + +void helper_vshfq(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + int i; + int idx; + uint64_t temp[4]; + for (i = 0; i < 2; i++) { + idx = ((vc >> (i * 2)) & 1) * 64; + if ((vc >> (i * 2 + 1)) & 1) { + temp[i * 2] = env->fr[rb + idx]; + temp[i * 2 + 1] = env->fr[rb + idx + 32]; + } else { + temp[i * 2] = env->fr[ra + idx]; + temp[i * 2 + 1] = env->fr[ra + idx + 32]; + } + } + for (i = 0; i < 4; i++) { + env->fr[rd + i * 32] = temp[i]; + } +} + +void helper_vshfqb(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t rd) +{ + int i; + int idx; + int vb; + uint8_t temp[32]; + + for (i = 0; i < 16; i++) { + vb = *get_element_b(env, rb, i); + if (vb >> 7) { + temp[i] = 0; + } else { + idx = vb & 0xf; + temp[i] = *get_element_b(env, ra, idx); + } + vb = *get_element_b(env, rb, i + 16); + if (vb >> 7) { + temp[i + 16] = 0; + } else { + idx = vb & 0xf; + temp[i + 16] = *get_element_b(env, ra, idx + 16); + } + } + for (i = 0; i < 4; i++) { + env->fr[rd + i * 32] = *((uint64_t*)temp + i); + } +} + +void helper_vsm3r(CPUSW64State *env, uint64_t ra, uint64_t rb, uint64_t vc, + uint64_t rd) +{ + uint32_t W[8]; + uint32_t A, B, C, D, E, F, G, H, T; + int i; + uint32_t SS1, SS2, TT1, TT2, P0; + + if (vc >= 16) + return; + for (i = 0; i < 8; i++) { + W[i] = *get_element_w(env, ra, i); + } + A = *get_element_w(env, rb, 0); + B = *get_element_w(env, rb, 1); + C = *get_element_w(env, rb, 2); + D = *get_element_w(env, rb, 3); + E = *get_element_w(env, rb, 4); + F = *get_element_w(env, rb, 5); + G = *get_element_w(env, rb, 6); + H = *get_element_w(env, rb, 7); + + if (vc < 4) { + T = 0x79cc4519; + for (i = 0; i < 4; i++) { + SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7); + SS2 = SS1 ^ rotl(A, 12); + TT1 = (A ^ B ^ C) + D + SS2 + (W[i] ^ W[i + 4]); + TT2 = (E ^ F ^ G) + H + SS1 + W[i]; + + P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17); + + H = G; + G = rotl(F, 19); + F = E; + E = P0; + D = C; + C = rotl(B, 9); + B = A; + A = TT1; + } + } else { + T = 0x7a879d8a; + for (i = 0; i < 4; i++) { + SS1 = rotl(rotl(A, 12) + E + rotl(T, 4 * vc + i), 7); + SS2 = SS1 ^ rotl(A, 12); + TT1 = ((A & B) | (A & C) | (B & C)) + D + SS2 + (W[i] ^ W[i + 4]); + TT2 = ((E & F) | ((~E) & G)) + H + SS1 + W[i]; + + P0 = TT2 ^ rotl(TT2, 9) ^ rotl(TT2, 17); + + H = G; + G = rotl(F, 19); + F = E; + E = P0; + D = C; + C = rotl(B, 9); + B = A; + A = TT1; + } + } + *get_element_w(env, rd, 0) = A; + *get_element_w(env, rd, 1) = B; + *get_element_w(env, rd, 2) = C; + *get_element_w(env, rd, 3) = D; + *get_element_w(env, rd, 4) = E; + *get_element_w(env, rd, 5) = F; + *get_element_w(env, rd, 6) = G; + *get_element_w(env, rd, 7) = H; +} diff --git a/target/sw64/translate.c b/target/sw64/translate.c new file mode 100644 index 0000000000000000000000000000000000000000..1e725b9294d3b8deb44cc6cbff54a1651efaf14b --- /dev/null +++ b/target/sw64/translate.c @@ -0,0 +1,3798 @@ +#include "translate.h" +#include "tcg/tcg.h" +#define DEVELOP_SW64 1 +#ifdef DEVELOP_SW64 + +#define ILLEGAL(x) \ + do { \ + printf("Illegal SW64 0x%x at line %d!\n", x, __LINE__); \ + exit(-1); \ + } while (0) +#endif + +TCGv cpu_pc; +TCGv cpu_std_ir[31]; +TCGv cpu_fr[128]; +TCGv cpu_lock_addr; +TCGv cpu_lock_flag; +TCGv cpu_lock_success; +#ifdef SW64_FIXLOCK +TCGv cpu_lock_value; +#endif + +#ifndef CONFIG_USER_ONLY +TCGv cpu_hm_ir[31]; +#endif + +#include "exec/gen-icount.h" + +void sw64_translate_init(void) +{ +#define DEF_VAR(V) \ + { &cpu_##V, #V, offsetof(CPUSW64State, V) } + + typedef struct { + TCGv* var; + const char* name; + int ofs; + } GlobalVar; + + static const GlobalVar vars[] = { + DEF_VAR(pc), DEF_VAR(lock_addr), + DEF_VAR(lock_flag), DEF_VAR(lock_success), +#ifdef SW64_FIXLOCK + DEF_VAR(lock_value), +#endif + }; + cpu_pc = tcg_global_mem_new_i64(cpu_env, + offsetof(CPUSW64State, pc), "PC"); + +#undef DEF_VAR + + /* Use the symbolic register names that match the disassembler. */ + static const char ireg_names[31][4] = { + "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", + "s2", "s3", "s4", "s5", "fp", "a0", "a1", "a2", "a3", "a4", "a5", + "t8", "t9", "t10", "t11", "ra", "t12", "at", "gp", "sp"}; + + static const char freg_names[128][4] = { + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", + "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", + "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", + "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", "f16", "f17", + "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", "f26", "f27", + "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", "f4", "f5", + "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", "f24", "f25", + "f26", "f27", "f28", "f29", "f30", "f31", "f0", "f1", "f2", "f3", + "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", + "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"}; + +#ifndef CONFIG_USER_ONLY + static const char shadow_names[10][8] = { + "hm_p1", "hm_p2", "hm_p4", "hm_p5", "hm_p6", + "hm_p7", "hm_p20", "hm_p21", "hm_p22", "hm_p23"}; + static const int shadow_index[10] = {1, 2, 4, 5, 6, 7, 20, 21, 22, 23}; +#endif + + int i; + + for (i = 0; i < 31; i++) { + cpu_std_ir[i] = tcg_global_mem_new_i64( + cpu_env, offsetof(CPUSW64State, ir[i]), ireg_names[i]); + } + + for (i = 0; i < 128; i++) { + cpu_fr[i] = tcg_global_mem_new_i64( + cpu_env, offsetof(CPUSW64State, fr[i]), freg_names[i]); + } + for (i = 0; i < ARRAY_SIZE(vars); ++i) { + const GlobalVar* v = &vars[i]; + *v->var = tcg_global_mem_new_i64(cpu_env, v->ofs, v->name); + } +#ifndef CONFIG_USER_ONLY + memcpy(cpu_hm_ir, cpu_std_ir, sizeof(cpu_hm_ir)); + for (i = 0; i < 10; i++) { + int r = shadow_index[i]; + cpu_hm_ir[r] = tcg_global_mem_new_i64( + cpu_env, offsetof(CPUSW64State, sr[i]), shadow_names[i]); + } +#endif +} + +static bool in_superpage(DisasContext* ctx, int64_t addr) +{ + return false; +} + +bool use_exit_tb(DisasContext* ctx) +{ + return ((tb_cflags(ctx->base.tb) & CF_LAST_IO) || + ctx->base.singlestep_enabled || singlestep); +} + +bool use_goto_tb(DisasContext* ctx, uint64_t dest) +{ + /* Suppress goto_tb in the case of single-steping and IO. */ + if (unlikely(use_exit_tb(ctx))) { + return false; + } + /* If the destination is in the superpage, the page perms can't change. */ + if (in_superpage(ctx, dest)) { + return true; + } +/* Check for the dest on the same page as the start of the TB. */ +#ifndef CONFIG_USER_ONLY + return ((ctx->base.tb->pc ^ dest) & TARGET_PAGE_MASK) == 0; +#else + return true; +#endif +} + +void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src) +{ + uint64_t mzero = 1ull << 63; + + switch (cond) { + case TCG_COND_LE: + case TCG_COND_GT: + /* For <= or >, the -0.0 value directly compares the way we want. */ + tcg_gen_mov_i64(dest, src); + break; + + case TCG_COND_EQ: + case TCG_COND_NE: + /* For == or !=, we can simply mask off the sign bit and compare. */ + tcg_gen_andi_i64(dest, src, mzero - 1); + break; + + case TCG_COND_GE: + case TCG_COND_LT: + /* For >= or <, map -0.0 to +0.0 via comparison and mask. */ + tcg_gen_setcondi_i64(TCG_COND_NE, dest, src, mzero); + tcg_gen_neg_i64(dest, dest); + tcg_gen_and_i64(dest, dest, src); + break; + + default: + abort(); + } +} + +static TCGv load_zero(DisasContext *ctx) +{ + if (!ctx->zero) { + ctx->zero = tcg_const_i64(0); + } + return ctx->zero; +} + +static void free_context_temps(DisasContext *ctx) +{ + if (ctx->zero) { + tcg_temp_free(ctx->zero); + ctx->zero = NULL; + } +} + +static TCGv load_gir(DisasContext *ctx, unsigned reg) +{ + if (likely(reg < 31)) { + return ctx->ir[reg]; + } else { + return load_zero(ctx); + } +} + +static void gen_excp_1(int exception, int error_code) +{ + TCGv_i32 tmp1, tmp2; + + tmp1 = tcg_const_i32(exception); + tmp2 = tcg_const_i32(error_code); + gen_helper_excp(cpu_env, tmp1, tmp2); + tcg_temp_free_i32(tmp2); + tcg_temp_free_i32(tmp1); +} + +static DisasJumpType gen_excp(DisasContext* ctx, int exception, + int error_code) +{ + tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); + gen_excp_1(exception, error_code); + return DISAS_NORETURN; +} + +static int i_count = 1; + +static inline DisasJumpType gen_invalid(DisasContext *ctx) +{ + if (i_count == 0) { + i_count++; + return DISAS_NEXT; + } + fprintf(stderr, "here %lx\n", ctx->base.pc_next); + return gen_excp(ctx, EXCP_OPCDEC, 0); +} + +static uint64_t zapnot_mask(uint8_t byte_mask) +{ + uint64_t mask = 0; + int i; + + for (i = 0; i < 8; ++i) { + if ((byte_mask >> i) & 1) { + mask |= 0xffull << (i * 8); + } + } + return mask; +} + +static void gen_ins_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + TCGv shift = tcg_temp_new(); + + tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask)); + + tcg_gen_andi_i64(shift, vb, 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_shl_i64(vc, tmp, shift); + + tcg_temp_free(shift); + tcg_temp_free(tmp); +} + +static void gen_ins_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + TCGv shift = tcg_temp_new(); + + tcg_gen_andi_i64(tmp, va, zapnot_mask(byte_mask)); + + tcg_gen_shli_i64(shift, vb, 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + + tcg_gen_shr_i64(vc, tmp, shift); + tcg_gen_shri_i64(vc, vc, 1); + tcg_temp_free(shift); + tcg_temp_free(tmp); +} + +static void gen_ext_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + TCGv shift = tcg_temp_new(); + + tcg_gen_andi_i64(shift, vb, 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_shr_i64(tmp, va, shift); + + tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask)); + + tcg_temp_free(shift); + tcg_temp_free(tmp); +} + +static void gen_ext_h(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv tmp = tcg_temp_new(); + TCGv shift = tcg_temp_new(); + + tcg_gen_andi_i64(shift, vb, 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_movi_i64(tmp, 64); + tcg_gen_sub_i64(shift, tmp, shift); + tcg_gen_shl_i64(tmp, va, shift); + + tcg_gen_andi_i64(vc, tmp, zapnot_mask(byte_mask)); + + tcg_temp_free(shift); + tcg_temp_free(tmp); +} + +static void gen_mask_l(DisasContext* ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv shift = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + + tcg_gen_andi_i64(shift, vb, 7); + tcg_gen_shli_i64(shift, shift, 3); + tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); + tcg_gen_shl_i64(mask, mask, shift); + + tcg_gen_andc_i64(vc, va, mask); + + tcg_temp_free(mask); + tcg_temp_free(shift); +} + +static void gen_mask_h(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb, + uint8_t byte_mask) +{ + TCGv shift = tcg_temp_new(); + TCGv mask = tcg_temp_new(); + + /* The instruction description is as above, where the byte_mask + is shifted left, and then we extract bits <15:8>. This can be + emulated with a right-shift on the expanded byte mask. This + requires extra care because for an input <2:0> == 0 we need a + shift of 64 bits in order to generate a zero. This is done by + splitting the shift into two parts, the variable shift - 1 + followed by a constant 1 shift. The code we expand below is + equivalent to ~(B * 8) & 63. */ + + tcg_gen_shli_i64(shift, vb, 3); + tcg_gen_not_i64(shift, shift); + tcg_gen_andi_i64(shift, shift, 0x3f); + tcg_gen_movi_i64(mask, zapnot_mask(byte_mask)); + tcg_gen_shr_i64(mask, mask, shift); + tcg_gen_shri_i64(mask, mask, 1); + + tcg_gen_andc_i64(vc, va, mask); + + tcg_temp_free(mask); + tcg_temp_free(shift); +} + +static inline void gen_load_mem( + DisasContext *ctx, void (*tcg_gen_qemu_load)(TCGv t0, TCGv t1, int flags), + int ra, int rb, int32_t disp16, bool fp, bool clear) +{ + TCGv tmp, addr, va; + + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) { + return; + } + + tmp = tcg_temp_new(); + addr = load_gir(ctx, rb); + + if (disp16) { + tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); + addr = tmp; + } else { + tcg_gen_mov_i64(tmp, addr); + addr = tmp; + } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7UL); + addr = tmp; + } + + va = (fp ? cpu_fr[ra] : load_gir(ctx, ra)); + tcg_gen_qemu_load(va, addr, ctx->mem_idx); + + tcg_temp_free(tmp); +} + +static inline void gen_store_mem( + DisasContext *ctx, void (*tcg_gen_qemu_store)(TCGv t0, TCGv t1, int flags), + int ra, int rb, int32_t disp16, bool fp, bool clear) +{ + TCGv tmp, addr, va; + + tmp = tcg_temp_new(); + addr = load_gir(ctx, rb); + if (disp16) { + tcg_gen_addi_i64(tmp, addr, disp16); + addr = tmp; + } else { + tcg_gen_mov_i64(tmp, addr); + addr = tmp; + } + if (clear) { + tcg_gen_andi_i64(tmp, addr, ~0x7); + addr = tmp; + } + va = (fp ? cpu_fr[ra] : load_gir(ctx, ra)); + + tcg_gen_qemu_store(va, addr, ctx->mem_idx); + gen_helper_trace_mem(cpu_env, addr, va); + tcg_temp_free(tmp); +} + +static void cal_with_iregs_2(DisasContext *ctx, TCGv vc, TCGv va, TCGv vb, + int32_t disp13, uint16_t fn) +{ + TCGv tmp; + + switch (fn & 0xff) { + case 0x00: + /* ADDW */ + tcg_gen_add_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x01: + /* SUBW */ + tcg_gen_sub_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x02: + /* S4ADDW */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x03: + /* S4SUBW */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x04: + /* S8ADDW */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + case 0x05: + /* S8SUBW */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(tmp, tmp, vb); + tcg_gen_ext32s_i64(vc, tmp); + tcg_temp_free(tmp); + break; + + case 0x08: + /* ADDL */ + tcg_gen_add_i64(vc, va, vb); + break; + case 0x09: + /* SUBL */ + tcg_gen_sub_i64(vc, va, vb); + break; + case 0x0a: + /* S4ADDL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x0b: + /* S4SUBL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 2); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x0c: + /* S8ADDL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_add_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x0d: + /* S8SUBL */ + tmp = tcg_temp_new(); + tcg_gen_shli_i64(tmp, va, 3); + tcg_gen_sub_i64(vc, tmp, vb); + tcg_temp_free(tmp); + break; + case 0x10: + /* MULW */ + tcg_gen_mul_i64(vc, va, vb); + tcg_gen_ext32s_i64(vc, vc); + break; + case 0x18: + /* MULL */ + tcg_gen_mul_i64(vc, va, vb); + break; + case 0x19: + /* MULH */ + tmp = tcg_temp_new(); + tcg_gen_mulu2_i64(tmp, vc, va, vb); + tcg_temp_free(tmp); + break; + case 0x28: + /* CMPEQ */ + tcg_gen_setcond_i64(TCG_COND_EQ, vc, va, vb); + break; + case 0x29: + /* CMPLT */ + tcg_gen_setcond_i64(TCG_COND_LT, vc, va, vb); + break; + case 0x2a: + /* CMPLE */ + tcg_gen_setcond_i64(TCG_COND_LE, vc, va, vb); + break; + case 0x2b: + /* CMPULT */ + tcg_gen_setcond_i64(TCG_COND_LTU, vc, va, vb); + break; + case 0x2c: + /* CMPULE */ + tcg_gen_setcond_i64(TCG_COND_LEU, vc, va, vb); + break; + case 0x38: + /* AND */ + tcg_gen_and_i64(vc, va, vb); + break; + case 0x39: + /* BIC */ + tcg_gen_andc_i64(vc, va, vb); + break; + case 0x3a: + /* BIS */ + tcg_gen_or_i64(vc, va, vb); + break; + case 0x3b: + /* ORNOT */ + tcg_gen_orc_i64(vc, va, vb); + break; + case 0x3c: + /* XOR */ + tcg_gen_xor_i64(vc, va, vb); + break; + case 0x3d: + /* EQV */ + tcg_gen_eqv_i64(vc, va, vb); + break; + case 0x40: + /* INSLB */ + gen_ins_l(ctx, vc, va, vb, 0x1); + break; + case 0x41: + /* INSLH */ + gen_ins_l(ctx, vc, va, vb, 0x3); + break; + case 0x42: + /* INSLW */ + gen_ins_l(ctx, vc, va, vb, 0xf); + break; + case 0x43: + /* INSLL */ + gen_ins_l(ctx, vc, va, vb, 0xff); + break; + case 0x44: + /* INSHB */ + gen_ins_h(ctx, vc, va, vb, 0x1); + break; + case 0x45: + /* INSHH */ + gen_ins_h(ctx, vc, va, vb, 0x3); + break; + case 0x46: + /* INSHW */ + gen_ins_h(ctx, vc, va, vb, 0xf); + break; + case 0x47: + /* INSHL */ + gen_ins_h(ctx, vc, va, vb, 0xff); + break; + case 0x48: + /* SLL/SLLL */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shl_i64(vc, va, tmp); + tcg_temp_free(tmp); + break; + case 0x49: + /* SRL/SRLL */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_shr_i64(vc, va, tmp); + tcg_temp_free(tmp); + break; + case 0x4a: + /* SRA/SRAL */ + tmp = tcg_temp_new(); + tcg_gen_andi_i64(tmp, vb, 0x3f); + tcg_gen_sar_i64(vc, va, tmp); + tcg_temp_free(tmp); + break; + case 0x50: + /* EXTLB */ + gen_ext_l(ctx, vc, va, vb, 0x1); + break; + case 0x51: + /* EXTLH */ + gen_ext_l(ctx, vc, va, vb, 0x3); + break; + case 0x52: + /* EXTLW */ + gen_ext_l(ctx, vc, va, vb, 0xf); + break; + case 0x53: + /* EXTLL */ + gen_ext_l(ctx, vc, va, vb, 0xff); + break; + case 0x54: + /* EXTHB */ + gen_ext_h(ctx, vc, va, vb, 0x1); + break; + case 0x55: + /* EXTHH */ + gen_ext_h(ctx, vc, va, vb, 0x3); + break; + case 0x56: + /* EXTHW */ + gen_ext_h(ctx, vc, va, vb, 0xf); + break; + case 0x57: + /* EXTHL */ + gen_ext_h(ctx, vc, va, vb, 0xff); + break; + case 0x58: + /* CTPOP */ + tcg_gen_ctpop_i64(vc, vb); + break; + case 0x59: + /* CTLZ */ + tcg_gen_clzi_i64(vc, vb, 64); + break; + case 0x5a: + /* CTTZ */ + tcg_gen_ctzi_i64(vc, vb, 64); + break; + case 0x60: + /* MASKLB */ + gen_mask_l(ctx, vc, va, vb, 0x1); + break; + case 0x61: + /* MASKLH */ + gen_mask_l(ctx, vc, va, vb, 0x3); + break; + case 0x62: + /* MASKLW */ + gen_mask_l(ctx, vc, va, vb, 0xf); + break; + case 0x63: + /* MASKLL */ + gen_mask_l(ctx, vc, va, vb, 0xff); + break; + case 0x64: + /* MASKHB */ + gen_mask_h(ctx, vc, va, vb, 0x1); + break; + case 0x65: + /* MASKHH */ + gen_mask_h(ctx, vc, va, vb, 0x3); + break; + case 0x66: + /* MASKHW */ + gen_mask_h(ctx, vc, va, vb, 0xf); + break; + case 0x67: + /* MASKHL */ + gen_mask_h(ctx, vc, va, vb, 0xff); + break; + case 0x68: + /* ZAP */ + gen_helper_zap(vc, va, vb); + break; + case 0x69: + /* ZAPNOT */ + gen_helper_zapnot(vc, va, vb); + break; + case 0x6a: + /* SEXTB */ + tcg_gen_ext8s_i64(vc, vb); + break; + case 0x6b: + /* SEXTH */ + tcg_gen_ext16s_i64(vc, vb); + break; + case 0x6c: + /* CMPGEB*/ + gen_helper_cmpgeb(vc, va, vb); + break; + default: + ILLEGAL(fn); + } +} + +static void cal_with_imm_2(DisasContext *ctx, TCGv vc, TCGv va, int64_t disp, + uint8_t fn) +{ + TCGv_i64 t0 = tcg_const_i64(disp); + cal_with_iregs_2(ctx, vc, va, t0, 0, fn); + tcg_temp_free_i64(t0); +} + +static void cal_with_iregs_3(DisasContext *ctx, TCGv vd, TCGv va, TCGv vb, + TCGv vc, uint8_t fn) +{ + TCGv_i64 t0 = tcg_const_i64(0); + TCGv_i64 tmp; + switch (fn) { + case 0x0: + /* SELEQ */ + tcg_gen_movcond_i64(TCG_COND_EQ, vd, va, t0, vb, vc); + break; + case 0x1: + /* SELGE */ + tcg_gen_movcond_i64(TCG_COND_GE, vd, va, t0, vb, vc); + break; + case 0x2: + /* SELGT */ + tcg_gen_movcond_i64(TCG_COND_GT, vd, va, t0, vb, vc); + break; + case 0x3: + /* SELLE */ + tcg_gen_movcond_i64(TCG_COND_LE, vd, va, t0, vb, vc); + break; + case 0x4: + /* SELLT */ + tcg_gen_movcond_i64(TCG_COND_LT, vd, va, t0, vb, vc); + break; + case 0x5: + /* SELNE */ + tcg_gen_movcond_i64(TCG_COND_NE, vd, va, t0, vb, vc); + break; + case 0x6: + /* SELLBC */ + tmp = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp, t0, vb, vc); + tcg_temp_free_i64(tmp); + break; + case 0x7: + /* SELLBS */ + tmp = tcg_temp_new_i64(); + tcg_gen_andi_i64(tmp, va, 1); + tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp, t0, vb, vc); + tcg_temp_free_i64(tmp); + break; + default: + ILLEGAL(fn); + break; + } + tcg_temp_free_i64(t0); +} + +static void cal_with_imm_3(DisasContext *ctx, TCGv vd, TCGv va, int32_t disp, + TCGv vc, uint8_t fn) +{ + TCGv_i64 vb = tcg_const_i64(disp); + cal_with_iregs_3(ctx, vd, va, vb, vc, fn); + tcg_temp_free_i64(vb); +} + +static DisasJumpType gen_bdirect(DisasContext *ctx, int ra, int32_t disp) +{ + uint64_t dest = ctx->base.pc_next + ((int64_t)disp << 2); + if (ra != 31) { + tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~0x3UL)); + } + if (disp == 0) { + return 0; + } else if (use_goto_tb(ctx, dest)) { + tcg_gen_goto_tb(0); + tcg_gen_movi_i64(cpu_pc, dest); + tcg_gen_exit_tb(ctx->base.tb, 0); + return DISAS_NORETURN; + } else { + tcg_gen_movi_i64(cpu_pc, dest); + return DISAS_PC_UPDATED; + } +} + +static DisasJumpType gen_bcond_internal(DisasContext *ctx, TCGCond cond, + TCGv cmp, int disp) +{ + uint64_t dest = ctx->base.pc_next + (disp << 2); + TCGLabel* lab_true = gen_new_label(); + + if (use_goto_tb(ctx, dest)) { + tcg_gen_brcondi_i64(cond, cmp, 0, lab_true); + + tcg_gen_goto_tb(0); + tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); + tcg_gen_exit_tb(ctx->base.tb, 0); + + gen_set_label(lab_true); + tcg_gen_goto_tb(1); + tcg_gen_movi_i64(cpu_pc, dest); + tcg_gen_exit_tb(ctx->base.tb, 1); + + return DISAS_NORETURN; + } else { + TCGv_i64 t = tcg_const_i64(0); + TCGv_i64 d = tcg_const_i64(dest); + TCGv_i64 p = tcg_const_i64(ctx->base.pc_next); + + tcg_gen_movcond_i64(cond, cpu_pc, cmp, t, d, p); + + tcg_temp_free_i64(t); + tcg_temp_free_i64(d); + tcg_temp_free_i64(p); + return DISAS_PC_UPDATED; + } +} + +static DisasJumpType gen_bcond(DisasContext *ctx, TCGCond cond, uint32_t ra, + int32_t disp, uint64_t mask) +{ + TCGv tmp = tcg_temp_new(); + DisasJumpType ret; + + tcg_gen_andi_i64(tmp, load_gir(ctx, ra), mask); + ret = gen_bcond_internal(ctx, cond, tmp, disp); + tcg_temp_free(tmp); + return ret; +} + +static DisasJumpType gen_fbcond(DisasContext *ctx, TCGCond cond, int ra, + int32_t disp) +{ + TCGv cmp_tmp = tcg_temp_new(); + DisasJumpType ret; + + gen_fold_mzero(cond, cmp_tmp, cpu_fr[ra]); + ret = gen_bcond_internal(ctx, cond, cmp_tmp, disp); + tcg_temp_free(cmp_tmp); + return ret; +} + +#ifndef CONFIG_USER_ONLY +static void gen_qemu_pri_ldw(TCGv t0, TCGv t1, int memidx) +{ + gen_helper_pri_ldw(t0, cpu_env, t1); +} + +static void gen_qemu_pri_stw(TCGv t0, TCGv t1, int memidx) +{ + gen_helper_pri_stw(cpu_env, t0, t1); +} + +static void gen_qemu_pri_ldl(TCGv t0, TCGv t1, int memidx) +{ + gen_helper_pri_ldl(t0, cpu_env, t1); +} + +static void gen_qemu_pri_stl(TCGv t0, TCGv t1, int memidx) +{ + gen_helper_pri_stl(cpu_env, t0, t1); +} +#endif + +static inline void gen_load_mem_simd( + DisasContext *ctx, void (*tcg_gen_qemu_load)(int t0, TCGv t1, int flags), + int ra, int rb, int32_t disp16, uint64_t mask) +{ + TCGv tmp, addr; + + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) + return; + + tmp = tcg_temp_new(); + addr = load_gir(ctx, rb); + + if (disp16) { + tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); + addr = tmp; + } else { + tcg_gen_mov_i64(tmp, addr); + addr = tmp; + } + + if (mask) { + tcg_gen_andi_i64(addr, addr, mask); + } + + tcg_gen_qemu_load(ra, addr, ctx->mem_idx); + // FIXME: for debug + + tcg_temp_free(tmp); +} + +static inline void gen_store_mem_simd( + DisasContext *ctx, void (*tcg_gen_qemu_store)(int t0, TCGv t1, int flags), + int ra, int rb, int32_t disp16, uint64_t mask) +{ + TCGv tmp, addr; + + tmp = tcg_temp_new(); + addr = load_gir(ctx, rb); + if (disp16) { + tcg_gen_addi_i64(tmp, addr, (int64_t)disp16); + addr = tmp; + } else { + tcg_gen_mov_i64(tmp, addr); + addr = tmp; + } + if (mask) { + tcg_gen_andi_i64(addr, addr, mask); + } + // FIXME: for debug + tcg_gen_qemu_store(ra, addr, ctx->mem_idx); + + tcg_temp_free(tmp); +} + +static void gen_qemu_ldwe(int t0, TCGv t1, int memidx) +{ + TCGv tmp = tcg_temp_new(); + + tcg_gen_qemu_ld_i64(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL); + tcg_gen_shli_i64(cpu_fr[t0], tmp, 32); + tcg_gen_or_i64(cpu_fr[t0], cpu_fr[t0], tmp); + tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); + + tcg_temp_free(tmp); +} + +static void gen_qemu_vlds(int t0, TCGv t1, int memidx) +{ + int i; + TCGv_i32 tmp32 = tcg_temp_new_i32(); + + tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL); + gen_helper_memory_to_s(cpu_fr[t0], tmp32); + tcg_gen_addi_i64(t1, t1, 4); + + for (i = 1; i < 4; i++) { + tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_LEUL); + gen_helper_memory_to_s(cpu_fr[t0 + i * 32], tmp32); + tcg_gen_addi_i64(t1, t1, 4); + } + + tcg_temp_free_i32(tmp32); +} + +static void gen_qemu_ldse(int t0, TCGv t1, int memidx) +{ + TCGv_i32 tmp32 = tcg_temp_new_i32(); + TCGv tmp64 = tcg_temp_new(); + + tcg_gen_qemu_ld_i32(tmp32, t1, memidx, MO_ALIGN_4 | MO_LEUL); + gen_helper_memory_to_s(cpu_fr[t0], tmp32); + tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); + + tcg_temp_free(tmp64); + tcg_temp_free_i32(tmp32); +} + +static void gen_qemu_ldde(int t0, TCGv t1, int memidx) +{ + tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ); + tcg_gen_mov_i64(cpu_fr[t0 + 32], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 64], cpu_fr[t0]); + tcg_gen_mov_i64(cpu_fr[t0 + 96], cpu_fr[t0]); +} + +static void gen_qemu_vldd(int t0, TCGv t1, int memidx) +{ + tcg_gen_qemu_ld_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_ld_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_ld_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_ld_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEQ); +} + +static void gen_qemu_vsts(int t0, TCGv t1, int memidx) +{ + int i; + TCGv_i32 tmp = tcg_temp_new_i32(); + + gen_helper_s_to_memory(tmp, cpu_fr[t0]); + tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_ALIGN_4 | MO_LEUL); + tcg_gen_addi_i64(t1, t1, 4); + for (i = 1; i < 4; i++) { + gen_helper_s_to_memory(tmp, cpu_fr[t0 + 32 * i]); + tcg_gen_qemu_st_i32(tmp, t1, memidx, MO_LEUL); + tcg_gen_addi_i64(t1, t1, 4); + } + tcg_temp_free_i32(tmp); +} + +static void gen_qemu_vstd(int t0, TCGv t1, int memidx) +{ + tcg_gen_qemu_st_i64(cpu_fr[t0], t1, memidx, MO_ALIGN_4 | MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_st_i64(cpu_fr[t0 + 32], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_st_i64(cpu_fr[t0 + 64], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + tcg_gen_qemu_st_i64(cpu_fr[t0 + 96], t1, memidx, MO_TEQ); +} + +static inline void gen_qemu_fsts(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + gen_helper_s_to_memory(tmp, t0); + tcg_gen_qemu_st_i32(tmp, t1, flags, MO_LEUL); + tcg_temp_free_i32(tmp); +} + +static inline void gen_qemu_flds(TCGv t0, TCGv t1, int flags) +{ + TCGv_i32 tmp = tcg_temp_new_i32(); + tcg_gen_qemu_ld_i32(tmp, t1, flags, MO_LEUL); + gen_helper_memory_to_s(t0, tmp); + tcg_temp_free_i32(tmp); +} + +static TCGv gen_ieee_input(DisasContext *ctx, int reg, int is_cmp) +{ + TCGv val; + + if (unlikely(reg == 31)) { + val = load_zero(ctx); + } else { + val = cpu_fr[reg]; +#ifndef CONFIG_USER_ONLY + /* In system mode, raise exceptions for denormals like real + hardware. In user mode, proceed as if the OS completion + handler is handling the denormal as per spec. */ + gen_helper_ieee_input(cpu_env, val); +#endif + } + return val; +} + +static void gen_fp_exc_raise(int rc) +{ +#ifndef CONFIG_USER_ONLY + TCGv_i32 reg = tcg_const_i32(rc + 32); + gen_helper_fp_exc_raise(cpu_env, reg); + tcg_temp_free_i32(reg); +#endif +} + +static void gen_ieee_arith2(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv), int ra, + int rc) +{ + TCGv va, vc; + + va = gen_ieee_input(ctx, ra, 0); + vc = cpu_fr[rc]; + helper(vc, cpu_env, va); + + gen_fp_exc_raise(rc); +} + +static void gen_ieee_arith3(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, + int rb, int rc) +{ + TCGv va, vb, vc; + + va = gen_ieee_input(ctx, ra, 0); + vb = gen_ieee_input(ctx, rb, 0); + vc = cpu_fr[rc]; + helper(vc, cpu_env, va, vb); + + gen_fp_exc_raise(rc); +} + +#define IEEE_ARITH2(name) \ + static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rc) { \ + gen_ieee_arith2(ctx, gen_helper_##name, ra, rc); \ + } + +#define IEEE_ARITH3(name) \ + static inline void glue(gen_, name)(DisasContext * ctx, int ra, int rb, \ + int rc) { \ + gen_ieee_arith3(ctx, gen_helper_##name, ra, rb, rc); \ + } +IEEE_ARITH3(fadds) +IEEE_ARITH3(faddd) +IEEE_ARITH3(fsubs) +IEEE_ARITH3(fsubd) +IEEE_ARITH3(fmuls) +IEEE_ARITH3(fmuld) +IEEE_ARITH3(fdivs) +IEEE_ARITH3(fdivd) +IEEE_ARITH2(frecs) +IEEE_ARITH2(frecd) + +static void gen_ieee_compare(DisasContext *ctx, + void (*helper)(TCGv, TCGv_ptr, TCGv, TCGv), int ra, + int rb, int rc) +{ + TCGv va, vb, vc; + + va = gen_ieee_input(ctx, ra, 1); + vb = gen_ieee_input(ctx, rb, 1); + vc = cpu_fr[rc]; + helper(vc, cpu_env, va, vb); + + gen_fp_exc_raise(rc); +} + +#define IEEE_CMP2(name) \ + static inline void glue(gen_, name)(DisasContext *ctx, int ra, int rb, \ + int rc) { \ + gen_ieee_compare(ctx, gen_helper_##name, ra, rb, rc); \ + } + +IEEE_CMP2(fcmpun) +IEEE_CMP2(fcmpeq) +IEEE_CMP2(fcmplt) +IEEE_CMP2(fcmple) + +static void gen_fcvtdl(int rb, int rc, uint64_t round_mode) +{ + TCGv tmp64; + tmp64 = tcg_temp_new_i64(); + tcg_gen_movi_i64(tmp64, round_mode); + gen_helper_fcvtdl(cpu_fr[rc], cpu_env, cpu_fr[rb], tmp64); + tcg_temp_free(tmp64); + gen_fp_exc_raise(rc); +} + +static void cal_with_fregs_2(DisasContext *ctx, uint8_t rc, uint8_t ra, + uint8_t rb, uint8_t fn) +{ + TCGv tmp64; + TCGv_i32 tmp32; + switch (fn) { + case 0x00: + /* FADDS */ + gen_fadds(ctx, ra, rb, rc); + break; + case 0x01: + /* FADDD */ + gen_faddd(ctx, ra, rb, rc); + break; + case 0x02: + /* FSUBS */ + gen_fsubs(ctx, ra, rb, rc); + break; + case 0x03: + /* FSUBD */ + gen_fsubd(ctx, ra, rb, rc); + break; + case 0x4: + /* FMULS */ + gen_fmuls(ctx, ra, rb, rc); + break; + case 0x05: + /* FMULD */ + gen_fmuld(ctx, ra, rb, rc); + break; + case 0x06: + /* FDIVS */ + gen_fdivs(ctx, ra, rb, rc); + break; + case 0x07: + /* FDIVD */ + gen_fdivd(ctx, ra, rb, rc); + break; + case 0x08: + /* FSQRTS */ + gen_helper_fsqrts(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x09: + /* FSQRTD */ + gen_helper_fsqrt(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x10: + /* FCMPEQ */ + gen_fcmpeq(ctx, ra, rb, rc); + break; + case 0x11: + /* FCMPLE */ + gen_fcmple(ctx, ra, rb, rc); + break; + case 0x12: + /* FCMPLT */ + gen_fcmplt(ctx, ra, rb, rc); + break; + case 0x13: + /* FCMPUN */ + gen_fcmpun(ctx, ra, rb, rc); + break; + case 0x20: + /* FCVTSD */ + gen_helper_fcvtsd(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x21: + /* FCVTDS */ + gen_helper_fcvtds(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x22: + /* FCVTDL_G */ + gen_fcvtdl(rb, rc, 0); + break; + case 0x23: + /* FCVTDL_P */ + gen_fcvtdl(rb, rc, 2); + break; + case 0x24: + /* FCVTDL_Z */ + gen_fcvtdl(rb, rc, 3); + break; + case 0x25: + /* FCVTDL_N */ + gen_fcvtdl(rb, rc, 1); + break; + case 0x27: + /* FCVTDL */ + gen_helper_fcvtdl_dyn(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x28: + /* FCVTWL */ + gen_helper_fcvtwl(cpu_fr[rc], cpu_env, cpu_fr[rb]); + tcg_gen_ext32s_i64(cpu_fr[rc], cpu_fr[rc]); + break; + case 0x29: + /* FCVTLW */ + gen_helper_fcvtlw(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x2d: + /* FCVTLS */ + gen_helper_fcvtls(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x2f: + /* FCVTLD */ + gen_helper_fcvtld(cpu_fr[rc], cpu_env, cpu_fr[rb]); + break; + case 0x30: + /* FCPYS */ + tmp64 = tcg_temp_new(); + tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63); + tcg_gen_shli_i64(tmp64, tmp64, 63); + tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); + tcg_temp_free(tmp64); + break; + case 0x31: + /* FCPYSE */ + tmp64 = tcg_temp_new(); + tcg_gen_shri_i64(tmp64, cpu_fr[ra], 52); + tcg_gen_shli_i64(tmp64, tmp64, 52); + tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x000fffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); + tcg_temp_free(tmp64); + break; + case 0x32: + /* FCPYSN */ + tmp64 = tcg_temp_new(); + tcg_gen_shri_i64(tmp64, cpu_fr[ra], 63); + tcg_gen_not_i64(tmp64, tmp64); + tcg_gen_shli_i64(tmp64, tmp64, 63); + tcg_gen_andi_i64(cpu_fr[rc], cpu_fr[rb], 0x7fffffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc], tmp64, cpu_fr[rc]); + tcg_temp_free(tmp64); + break; + case 0x40: + /* IFMOVS */ + tmp64 = tcg_temp_new(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_movi_i64(tmp64, ra); + tcg_gen_extrl_i64_i32(tmp32, load_gir(ctx, ra)); + gen_helper_memory_to_s(tmp64, tmp32); + tcg_gen_mov_i64(cpu_fr[rc], tmp64); + tcg_gen_movi_i64(tmp64, rc); + tcg_temp_free(tmp64); + tcg_temp_free_i32(tmp32); + break; + case 0x41: + /* IFMOVD */ + tcg_gen_mov_i64(cpu_fr[rc], load_gir(ctx, ra)); + break; + case 0x50: + /* RFPCR */ + gen_helper_load_fpcr(cpu_fr[ra], cpu_env); + break; + case 0x51: + /* WFPCR */ + gen_helper_store_fpcr(cpu_env, cpu_fr[ra]); + break; + case 0x54: + /* SETFPEC0 */ + tmp64 = tcg_const_i64(0); + gen_helper_setfpcrx(cpu_env, tmp64); + tcg_temp_free(tmp64); + break; + case 0x55: + /* SETFPEC1 */ + tmp64 = tcg_const_i64(1); + gen_helper_setfpcrx(cpu_env, tmp64); + tcg_temp_free(tmp64); + break; + case 0x56: + /* SETFPEC2 */ + tmp64 = tcg_const_i64(2); + gen_helper_setfpcrx(cpu_env, tmp64); + tcg_temp_free(tmp64); + break; + case 0x57: + /* SETFPEC3 */ + tmp64 = tcg_const_i64(3); + gen_helper_setfpcrx(cpu_env, tmp64); + tcg_temp_free(tmp64); + break; + default: + fprintf(stderr, "Illegal insn func[%x]\n", fn); + gen_invalid(ctx); + break; + } +} + +static void cal_with_fregs_4(DisasContext *ctx, uint8_t rd, uint8_t ra, + uint8_t rb, uint8_t rc, uint8_t fn) +{ + TCGv zero = tcg_const_i64(0); + TCGv va, vb, vc, vd, tmp64; + + va = cpu_fr[ra]; + vb = cpu_fr[rb]; + vc = cpu_fr[rc]; + vd = cpu_fr[rd]; + + switch (fn) { + case 0x00: + /* FMAS */ + gen_helper_fmas(vd, cpu_env, va, vb, vc); + break; + case 0x01: + /* FMAD */ + gen_helper_fmad(vd, cpu_env, va, vb, vc); + break; + case 0x02: + /* FMSS */ + gen_helper_fmss(vd, cpu_env, va, vb, vc); + break; + case 0x03: + /* FMSD */ + gen_helper_fmsd(vd, cpu_env, va, vb, vc); + break; + case 0x04: + /* FNMAS */ + gen_helper_fnmas(vd, cpu_env, va, vb, vc); + break; + case 0x05: + /* FNMAD */ + gen_helper_fnmad(vd, cpu_env, va, vb, vc); + break; + case 0x06: + /* FNMSS */ + gen_helper_fnmss(vd, cpu_env, va, vb, vc); + break; + case 0x07: + /* FNMSD */ + gen_helper_fnmsd(vd, cpu_env, va, vb, vc); + break; + case 0x10: + /* FSELEQ */ + // Maybe wrong translation. + tmp64 = tcg_temp_new(); + gen_helper_fcmpeq(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); + tcg_temp_free(tmp64); + break; + case 0x11: + /* FSELNE */ + tmp64 = tcg_temp_new(); + gen_helper_fcmpeq(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vb, vc); + tcg_temp_free(tmp64); + break; + case 0x12: + /* FSELLT */ + tmp64 = tcg_temp_new(); + gen_helper_fcmplt(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); + tcg_temp_free(tmp64); + break; + case 0x13: + /* FSELLE */ + tmp64 = tcg_temp_new(); + gen_helper_fcmple(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_EQ, vd, tmp64, zero, vc, vb); + tcg_temp_free(tmp64); + break; + case 0x14: + /* FSELGT */ + tmp64 = tcg_temp_new(); + gen_helper_fcmpgt(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc); + tcg_temp_free(tmp64); + break; + case 0x15: + /* FSELGE */ + tmp64 = tcg_temp_new(); + gen_helper_fcmpge(tmp64, cpu_env, va, zero); + tcg_gen_movcond_i64(TCG_COND_NE, vd, tmp64, zero, vb, vc); + tcg_temp_free(tmp64); + break; + default: + fprintf(stderr, "Illegal insn func[%x]\n", fn); + gen_invalid(ctx); + break; + } + tcg_temp_free(zero); +} +static inline void gen_qemu_lldw(TCGv t0, TCGv t1, int flags) +{ + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LESL); + tcg_gen_mov_i64(cpu_lock_addr, t1); +#ifdef SW64_FIXLOCK + tcg_gen_ext32u_i64(cpu_lock_value, t0); +#endif +} + +static inline void gen_qemu_lldl(TCGv t0, TCGv t1, int flags) +{ + tcg_gen_qemu_ld_i64(t0, t1, flags, MO_LEQ); + tcg_gen_mov_i64(cpu_lock_addr, t1); +#ifdef SW64_FIXLOCK + tcg_gen_mov_i64(cpu_lock_value, t0); +#endif +} + +static DisasJumpType gen_store_conditional(DisasContext *ctx, int ra, int rb, + int32_t disp16, int mem_idx, + MemOp op) +{ + TCGLabel *lab_fail, *lab_done; + TCGv addr; + + addr = tcg_temp_new_i64(); + tcg_gen_addi_i64(addr, load_gir(ctx, rb), disp16); + free_context_temps(ctx); + + lab_fail = gen_new_label(); + lab_done = gen_new_label(); + tcg_gen_brcond_i64(TCG_COND_NE, addr, cpu_lock_addr, lab_fail); + tcg_temp_free_i64(addr); + tcg_gen_brcondi_i64(TCG_COND_NE, cpu_lock_flag, 0x1, lab_fail); +#ifdef SW64_FIXLOCK + TCGv val = tcg_temp_new_i64(); + tcg_gen_atomic_cmpxchg_i64(val, cpu_lock_addr, cpu_lock_value, + load_gir(ctx, ra), mem_idx, op); + tcg_gen_setcond_i64(TCG_COND_EQ, cpu_lock_success, val, cpu_lock_value); + tcg_temp_free_i64(val); +#else + tcg_gen_qemu_st_i64(load_gir(ctx, ra), addr, mem_idx, op); +#endif + + tcg_gen_br(lab_done); + + gen_set_label(lab_fail); + tcg_gen_movi_i64(cpu_lock_success, 0); + gen_set_label(lab_done); + + tcg_gen_movi_i64(cpu_lock_flag, 0); + tcg_gen_movi_i64(cpu_lock_addr, -1); + return DISAS_NEXT; +} + +static DisasJumpType gen_sys_call(DisasContext *ctx, int syscode) +{ + if (syscode >= 0x80 && syscode <= 0xbf) { + switch (syscode) { + case 0x86: + /* IMB */ + /* No-op inside QEMU */ + break; +#ifdef CONFIG_USER_ONLY + case 0x9E: + /* RDUNIQUE */ + tcg_gen_ld_i64(ctx->ir[IDX_V0], cpu_env, + offsetof(CPUSW64State, unique)); + break; + case 0x9F: + /* WRUNIQUE */ + tcg_gen_st_i64(ctx->ir[IDX_A0], cpu_env, + offsetof(CPUSW64State, unique)); + break; +#endif + default: + goto do_sys_call; + } + return DISAS_NEXT; + } +do_sys_call: +#ifdef CONFIG_USER_ONLY + return gen_excp(ctx, EXCP_CALL_SYS, syscode); +#else + tcg_gen_movi_i64(cpu_hm_ir[23], ctx->base.pc_next); + return gen_excp(ctx, EXCP_CALL_SYS, syscode); +#endif +} + +static void read_csr(int idx, TCGv va) +{ + TCGv_i64 tmp = tcg_const_i64(idx); + gen_helper_read_csr(va, cpu_env, tmp); + tcg_temp_free_i64(tmp); +} + +static void write_csr(int idx, TCGv va, CPUSW64State *env) +{ + TCGv_i64 tmp = tcg_const_i64(idx); + gen_helper_write_csr(cpu_env, tmp, va); + tcg_temp_free_i64(tmp); +} + +static inline void ldx_set(DisasContext *ctx, int ra, int rb, int32_t disp12, + bool bype) +{ + TCGv tmp, addr, va, t1; + + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) { + return; + } + + tmp = tcg_temp_new(); + t1 = tcg_const_i64(1); + addr = load_gir(ctx, rb); + + tcg_gen_addi_i64(tmp, addr, disp12); + addr = tmp; + + va = load_gir(ctx, ra); + if (bype == 0) { + tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TESL); + } else { + tcg_gen_atomic_xchg_i64(va, addr, t1, ctx->mem_idx, MO_TEQ); + } + + tcg_temp_free(tmp); + tcg_temp_free(t1); +} + +static inline void ldx_xxx(DisasContext *ctx, int ra, int rb, int32_t disp12, + bool bype, int64_t val) +{ + TCGv tmp, addr, va, t; + + /* LDQ_U with ra $31 is UNOP. Other various loads are forms of + prefetches, which we can treat as nops. No worries about + missed exceptions here. */ + if (unlikely(ra == 31)) { + return; + } + + tmp = tcg_temp_new(); + t = tcg_const_i64(val); + addr = load_gir(ctx, rb); + + tcg_gen_addi_i64(tmp, addr, disp12); + addr = tmp; + + va = load_gir(ctx, ra); + if (bype == 0) { + tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TESL); + } else { + tcg_gen_atomic_fetch_add_i64(va, addr, t, ctx->mem_idx, MO_TEQ); + } + + tcg_temp_free(tmp); + tcg_temp_free(t); +} + +static void tcg_gen_srlow_i64(int ra, int rc, int rb) +{ + TCGv va, vb, vc; + TCGv shift; + + va = tcg_const_i64(ra); + vc = tcg_const_i64(rc); + shift = tcg_temp_new(); + vb = cpu_fr[rb]; + tcg_gen_shri_i64(shift, vb, 29); + tcg_gen_andi_i64(shift, shift, 0xff); + + gen_helper_srlow(cpu_env, va, vc, shift); + + tcg_temp_free(vc); + tcg_temp_free(va); + tcg_temp_free(shift); +} + +static void tcg_gen_srlowi_i64(int ra, int rc, int disp8) +{ + TCGv va, vc; + TCGv shift; + + va = tcg_const_i64(ra); + vc = tcg_const_i64(rc); + shift = tcg_temp_new(); + tcg_gen_movi_i64(shift, disp8); + tcg_gen_andi_i64(shift, shift, 0xff); + + gen_helper_srlow(cpu_env, va, vc, shift); + + tcg_temp_free(vc); + tcg_temp_free(va); + tcg_temp_free(shift); +} + +static void tcg_gen_sllow_i64(int ra, int rc, int rb) +{ + TCGv va, vb, vc; + TCGv shift; + + va = tcg_const_i64(ra); + vc = tcg_const_i64(rc); + shift = tcg_temp_new(); + vb = cpu_fr[rb]; + tcg_gen_shri_i64(shift, vb, 29); + tcg_gen_andi_i64(shift, shift, 0xff); + + gen_helper_sllow(cpu_env, va, vc, shift); + + tcg_temp_free(vc); + tcg_temp_free(va); + tcg_temp_free(shift); +} + +static void tcg_gen_sllowi_i64(int ra, int rc, int disp8) +{ + TCGv va, vc; + TCGv shift; + + va = tcg_const_i64(ra); + vc = tcg_const_i64(rc); + shift = tcg_temp_new(); + tcg_gen_movi_i64(shift, disp8); + tcg_gen_andi_i64(shift, shift, 0xff); + + gen_helper_sllow(cpu_env, va, vc, shift); + + tcg_temp_free(vc); + tcg_temp_free(va); + tcg_temp_free(shift); +} + +static void gen_qemu_vstw_uh(int t0, TCGv t1, int memidx) +{ + TCGv byte4_len; + TCGv addr_start, addr_end; + TCGv tmp[8]; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + tmp[4] = tcg_temp_new(); + tmp[5] = tcg_temp_new(); + tmp[6] = tcg_temp_new(); + tmp[7] = tcg_temp_new(); + ti = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte4_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte4_len, t1, 2); + tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL); + tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_andi_i64(addr_start, t1, ~0x1fUL); + tcg_gen_mov_i64(addr_end, t1); + for (i = 7; i >= 0; i--) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_subi_i64(t1, t1, 4); + tcg_gen_movi_i64(ti, i); + if (i % 2) + tcg_gen_shli_i64(tmp[i], tmp[i], 32); + } + tcg_gen_subfi_i64(byte4_len, 8, byte4_len); + + for (i = 0; i < 8; i++) { + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i / 2)*32], tmp[i]); + if (i % 2) + tcg_gen_shri_i64(tmp[i], tmp[i], 32); + else + tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL); + } + + tcg_gen_subi_i64(addr_end, addr_end, 32); + for (i = 0; i < 8; i++) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_addi_i64(addr_end, addr_end, 4); + } + + tcg_temp_free(ti); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte4_len); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); + tcg_temp_free(tmp[4]); + tcg_temp_free(tmp[5]); + tcg_temp_free(tmp[6]); + tcg_temp_free(tmp[7]); +} + +static void gen_qemu_vstw_ul(int t0, TCGv t1, int memidx) +{ + TCGv byte4_len; + TCGv addr_start, addr_end; + TCGv tmp[8]; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + tmp[4] = tcg_temp_new(); + tmp[5] = tcg_temp_new(); + tmp[6] = tcg_temp_new(); + tmp[7] = tcg_temp_new(); + ti = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte4_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte4_len, t1, 2); + tcg_gen_andi_i64(byte4_len, byte4_len, 0x7UL); + tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ + tcg_gen_addi_i64(addr_end, addr_start, 24); + for (i = 0; i < 8; i++) { + tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_addi_i64(t1, t1, 4); + if (i % 2) + tcg_gen_shli_i64(tmp[i], tmp[i], 32); + } + tcg_gen_subfi_i64(byte4_len, 8, byte4_len); + + for (i = 0; i < 8; i++) { + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, cpu_fr[t0 + (i/2)*32], tmp[i]); + if (i % 2) + tcg_gen_shri_i64(tmp[i], tmp[i], 32); + else + tcg_gen_andi_i64(tmp[i], tmp[i], 0xffffffffUL); + } + + tcg_gen_addi_i64(addr_start, addr_start, 32); + for (i = 7; i >= 0; i--) { + tcg_gen_subi_i64(addr_start, addr_start, 4); + tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); + } + + tcg_temp_free(ti); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte4_len); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); + tcg_temp_free(tmp[4]); + tcg_temp_free(tmp[5]); + tcg_temp_free(tmp[6]); + tcg_temp_free(tmp[7]); +} + +static void gen_qemu_vsts_uh(int t0, TCGv t1, int memidx) +{ + TCGv byte4_len; + TCGv addr_start, addr_end; + TCGv tmp[4]; + TCGv ftmp; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + ti = tcg_temp_new(); + ftmp = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte4_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte4_len, t1, 2); + tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL); + tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_andi_i64(addr_start, t1, ~0xfUL); + tcg_gen_mov_i64(addr_end, t1); + for (i = 3; i >= 0; i--) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_subi_i64(t1, t1, 4); + } + tcg_gen_subfi_i64(byte4_len, 4, byte4_len); + + for (i = 0; i < 4; i++) { + tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62); + tcg_gen_shli_i64(ti, ti, 30); + tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29); + tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL); + tcg_gen_or_i64(ftmp, ftmp, ti); + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte4_len, ftmp, tmp[i]); + } + + tcg_gen_subi_i64(addr_end, addr_end, 16); + for (i = 0; i < 4; i++) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_addi_i64(addr_end, addr_end, 4); + } + + tcg_temp_free(ti); + tcg_temp_free(ftmp); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte4_len); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); +} + +static void gen_qemu_vsts_ul(int t0, TCGv t1, int memidx) +{ + TCGv byte4_len; + TCGv addr_start, addr_end; + TCGv tmp[4]; + TCGv ftmp; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + ftmp = tcg_temp_new(); + ti = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte4_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte4_len, t1, 2); + tcg_gen_andi_i64(byte4_len, byte4_len, 0x3UL); + tcg_gen_andi_i64(t1, t1, ~0x3UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ + tcg_gen_addi_i64(addr_end, addr_start, 12); + for (i = 0; i < 4; i++) { + tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEUL); + tcg_gen_addi_i64(t1, t1, 4); + } + tcg_gen_subfi_i64(byte4_len, 4, byte4_len); + + for (i = 0; i < 4; i++) { + tcg_gen_shri_i64(ti, cpu_fr[t0 + i * 32], 62); + tcg_gen_shli_i64(ti, ti, 30); + tcg_gen_shri_i64(ftmp, cpu_fr[t0 + i * 32], 29); + tcg_gen_andi_i64(ftmp, ftmp, 0x3fffffffUL); + tcg_gen_or_i64(ftmp, ftmp, ti); + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte4_len, ftmp, tmp[i]); + } + + tcg_gen_addi_i64(addr_start, addr_start, 16); + for (i = 3; i >= 0; i--) { + tcg_gen_subi_i64(addr_start, addr_start, 4); + tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEUL); + } + + tcg_temp_free(ti); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte4_len); + tcg_temp_free(ftmp); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); +} + +static void gen_qemu_vstd_uh(int t0, TCGv t1, int memidx) +{ + TCGv byte8_len; + TCGv addr_start, addr_end; + TCGv tmp[4]; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + ti = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte8_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte8_len, t1, 3); + tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL); + tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_andi_i64(addr_start, t1, ~0x1fUL); + tcg_gen_mov_i64(addr_end, t1); + for (i = 3; i >= 0; i--) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, t1, addr_start, t1, addr_start); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEQ); + tcg_gen_subi_i64(t1, t1, 8); + } + tcg_gen_subfi_i64(byte8_len, 4, byte8_len); + + for (i = 0; i < 4; i++) { + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_GEU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]); + } + + tcg_gen_subi_i64(addr_end, addr_end, 32); + for (i = 0; i < 4; i++) { + tcg_gen_movcond_i64(TCG_COND_GEU, t1, addr_end, addr_start, addr_end, addr_start); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(addr_end, addr_end, 8); + } + + tcg_temp_free(ti); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte8_len); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); +} + +static void gen_qemu_vstd_ul(int t0, TCGv t1, int memidx) +{ + TCGv byte8_len; + TCGv addr_start, addr_end; + TCGv tmp[4]; + TCGv ti; + int i; + + tmp[0] = tcg_temp_new(); + tmp[1] = tcg_temp_new(); + tmp[2] = tcg_temp_new(); + tmp[3] = tcg_temp_new(); + ti = tcg_temp_new(); + addr_start = tcg_temp_new(); + addr_end = tcg_temp_new(); + byte8_len = tcg_temp_new(); + + tcg_gen_shri_i64(byte8_len, t1, 3); + tcg_gen_andi_i64(byte8_len, byte8_len, 0x3UL); + tcg_gen_andi_i64(t1, t1, ~0x7UL); /* t1 = addr + byte4_len * 4 */ + tcg_gen_mov_i64(addr_start, t1); /* t1 = addr + byte4_len * 4 */ + tcg_gen_addi_i64(addr_end, addr_start, 24); + for (i = 0; i < 4; i++) { + tcg_gen_movcond_i64(TCG_COND_LEU, t1, t1, addr_end, t1, addr_end); + tcg_gen_qemu_ld_i64(tmp[i], t1, memidx, MO_TEQ); + tcg_gen_addi_i64(t1, t1, 8); + } + tcg_gen_subfi_i64(byte8_len, 4, byte8_len); + + for (i = 0; i < 4; i++) { + tcg_gen_movi_i64(ti, i); + tcg_gen_movcond_i64(TCG_COND_LTU, tmp[i], ti, byte8_len, cpu_fr[t0 + i*32], tmp[i]); + } + + tcg_gen_addi_i64(addr_start, addr_start, 32); + for (i = 3; i >= 0; i--) { + tcg_gen_subi_i64(addr_start, addr_start, 8); + tcg_gen_movcond_i64(TCG_COND_LEU, t1, addr_start, addr_end, addr_start, addr_end); + tcg_gen_qemu_st_i64(tmp[i], t1, memidx, MO_TEQ); + } + + tcg_temp_free(ti); + tcg_temp_free(addr_start); + tcg_temp_free(addr_end); + tcg_temp_free(byte8_len); + tcg_temp_free(tmp[0]); + tcg_temp_free(tmp[1]); + tcg_temp_free(tmp[2]); + tcg_temp_free(tmp[3]); +} + +static void tcg_gen_vcpys_i64(int ra, int rb, int rc) +{ + int i; + TCGv tmp64 = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63); + tcg_gen_shli_i64(tmp64, tmp64, 63); + tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); + } + tcg_temp_free(tmp64); +} + +static void tcg_gen_vcpyse_i64(int ra, int rb, int rc) +{ + int i; + + TCGv tmp64 = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 52); + tcg_gen_shli_i64(tmp64, tmp64, 52); + tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x000fffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); + } + tcg_temp_free(tmp64); +} + +static void tcg_gen_vcpysn_i64(int ra, int rb, int rc) +{ + int i; + TCGv tmp64 = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(tmp64, cpu_fr[ra + i], 63); + tcg_gen_not_i64(tmp64, tmp64); + tcg_gen_shli_i64(tmp64, tmp64, 63); + tcg_gen_andi_i64(cpu_fr[rc + i], cpu_fr[rb + i], 0x7fffffffffffffffUL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, cpu_fr[rc + i]); + } + tcg_temp_free(tmp64); +} + +static void tcg_gen_vlogzz_i64(DisasContext *ctx, int opc, int ra, int rb, + int rc, int rd, int fn6) +{ + TCGv zz; + TCGv args, vd; + zz = tcg_const_i64(((opc & 0x3) << 6) | fn6); + args = tcg_const_i64((ra << 16) | (rb << 8) | rc); + vd = tcg_const_i64(rd); + + gen_helper_vlogzz(cpu_env, args, vd, zz); + + tcg_temp_free(vd); + tcg_temp_free(args); + tcg_temp_free(zz); +} + +static void gen_qemu_vcmpxxw_i64(TCGCond cond, int ra, int rb, int rc) +{ + TCGv va, vb, vc, tmp64; + int i; + + va = tcg_temp_new(); + vb = tcg_temp_new(); + vc = tcg_temp_new(); + tmp64 = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + if ((cond >> 1) & 1) { + tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); + tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]); + } else { + tcg_gen_ext32u_i64(va, cpu_fr[ra + i]); + tcg_gen_ext32u_i64(vb, cpu_fr[rb + i]); + } + tcg_gen_setcond_i64(cond, vc, va, vb); + tcg_gen_mov_i64(tmp64, vc); + + tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); + tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32); + if ((cond >> 1) & 1) { + tcg_gen_ext32s_i64(va, va); + tcg_gen_ext32s_i64(vb, vb); + } else { + tcg_gen_ext32u_i64(va, va); + tcg_gen_ext32u_i64(vb, vb); + } + tcg_gen_setcond_i64(cond, vc, va, vb); + tcg_gen_shli_i64(vc, vc, 32); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); +} + +static void gen_qemu_vcmpxxwi_i64(TCGCond cond, int ra, int rb, int rc) +{ + TCGv va, vb, vc, tmp64; + int i; + + va = tcg_temp_new(); + vb = tcg_const_i64(rb); + vc = tcg_temp_new(); + tmp64 = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + if ((cond >> 1) & 1) { + tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); + } else { + tcg_gen_ext32u_i64(va, cpu_fr[ra + i]); + } + tcg_gen_setcond_i64(cond, vc, va, vb); + tcg_gen_mov_i64(tmp64, vc); + + tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); + if ((cond >> 1) & 1) { + tcg_gen_ext32s_i64(va, va); + } else { + tcg_gen_ext32u_i64(va, va); + } + tcg_gen_setcond_i64(cond, vc, va, vb); + tcg_gen_shli_i64(vc, vc, 32); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); +} + +static void gen_qemu_vselxxw(TCGCond cond, int ra, int rb, int rc, int rd, + int mask) +{ + int i; + + TCGv t0 = tcg_const_i64(0); + TCGv tmpa = tcg_temp_new(); + TCGv tmpb = tcg_temp_new(); + TCGv tmpc = tcg_temp_new(); + TCGv tmpd = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]); + tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]); + tcg_gen_ext32u_i64(tmpc, cpu_fr[rc + i]); + if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask); + tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc); + + tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL); + tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL); + tcg_gen_andi_i64(tmpc, cpu_fr[rc + i], 0xffffffff00000000UL); + if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32); + tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc); + + tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd); + } + + tcg_temp_free(t0); + tcg_temp_free(tmpa); + tcg_temp_free(tmpb); + tcg_temp_free(tmpc); + tcg_temp_free(tmpd); +} + +static void gen_qemu_vselxxwi(TCGCond cond, int ra, int rb, int disp8, int rd, + int mask) +{ + int i; + + TCGv t0 = tcg_const_i64(0); + TCGv tmpa = tcg_temp_new(); + TCGv tmpb = tcg_temp_new(); + TCGv tmpc_0 = tcg_temp_new(); + TCGv tmpc_1 = tcg_temp_new(); + TCGv tmpd = tcg_temp_new(); + + tcg_gen_movi_i64(tmpc_0, (uint64_t)(((uint64_t)disp8))); + tcg_gen_movi_i64(tmpc_1, (uint64_t)(((uint64_t)disp8 << 32))); + for (i = 0; i < 128; i += 32) { + tcg_gen_ext32s_i64(tmpa, cpu_fr[ra + i]); + tcg_gen_ext32u_i64(tmpb, cpu_fr[rb + i]); + if (mask) tcg_gen_andi_i64(tmpa, tmpa, mask); + tcg_gen_movcond_i64(cond, tmpd, tmpa, t0, tmpb, tmpc_0); + + tcg_gen_andi_i64(tmpa, cpu_fr[ra + i], 0xffffffff00000000UL); + tcg_gen_andi_i64(tmpb, cpu_fr[rb + i], 0xffffffff00000000UL); + if (mask) tcg_gen_andi_i64(tmpa, tmpa, (uint64_t)mask << 32); + tcg_gen_movcond_i64(cond, cpu_fr[rd + i], tmpa, t0, tmpb, tmpc_1); + + tcg_gen_or_i64(cpu_fr[rd + i], cpu_fr[rd + i], tmpd); + } + + tcg_temp_free(t0); + tcg_temp_free(tmpa); + tcg_temp_free(tmpb); + tcg_temp_free(tmpc_0); + tcg_temp_free(tmpc_1); + tcg_temp_free(tmpd); +} + +DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn, + CPUState *cpu) +{ + int32_t disp5, disp8, disp12, disp13, disp16, disp21, disp26 __attribute__((unused)); + uint8_t opc, ra, rb, rc, rd; + uint16_t fn3, fn4, fn6, fn8, fn11; + int32_t i; + TCGv va, vb, vc, vd; + TCGv_i32 tmp32; + TCGv_i64 tmp64, tmp64_0, tmp64_1, shift; + TCGv_i32 tmpa, tmpb, tmpc; + DisasJumpType ret; + DisasContext* ctx = container_of(dcbase, DisasContext, base); + + opc = extract32(insn, 26, 6); + ra = extract32(insn, 21, 5); + rb = extract32(insn, 16, 5); + rc = extract32(insn, 0, 5); + rd = extract32(insn, 5, 5); + + fn3 = extract32(insn, 10, 3); + fn6 = extract32(insn, 10, 6); + fn4 = extract32(insn, 12, 4); + fn8 = extract32(insn, 5, 8); + fn11 = extract32(insn, 5, 11); + + disp5 = extract32(insn, 5, 5); + disp8 = extract32(insn, 13, 8); + disp12 = sextract32(insn, 0, 12); + disp13 = sextract32(insn, 13, 13); + disp16 = sextract32(insn, 0, 16); + disp21 = sextract32(insn, 0, 21); + disp26 = sextract32(insn, 0, 26); + + ret = DISAS_NEXT; + insn_profile(ctx, insn); + + switch (opc) { + case 0x00: + /* SYS_CALL */ + ret = gen_sys_call(ctx, insn & 0x1ffffff); + break; + case 0x01: + /* CALL */ + case 0x02: + /* RET */ + case 0x03: + /* JMP */ + vb = load_gir(ctx, rb); + tcg_gen_addi_i64(cpu_pc, vb, ctx->base.pc_next & 0x3); + if (ra != 31) { + tcg_gen_movi_i64(load_gir(ctx, ra), ctx->base.pc_next & (~3UL)); + } + ret = DISAS_PC_UPDATED; + break; + case 0x04: + /* BR */ + case 0x05: + /* BSR */ + ret = gen_bdirect(ctx, ra, disp21); + break; + case 0x06: + switch (disp16) { + case 0x0000: + /* MEMB */ + tcg_gen_mb(TCG_MO_ALL | TCG_BAR_SC); + break; + case 0x0001: + /* IMEMB */ + /* No achievement in Qemu*/ + break; + case 0x0020: + /* RTC */ + if (disp16 && unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + gen_helper_rtc(va); + break; + case 0x0040: + /* RCID */ + if (disp16 && unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + read_csr(0xc9, va); + break; + case 0x0080: + /* HALT */ +#ifndef CONFIG_USER_ONLY + { + tmp32 = tcg_const_i32(1); + tcg_gen_st_i32( + tmp32, cpu_env, + -offsetof(SW64CPU, env) + offsetof(CPUState, halted)); + tcg_temp_free_i32(tmp32); + } + ret = gen_excp(ctx, EXCP_HALTED, 0); +#endif + break; + case 0x1000: + /* RD_F */ + if (disp16 && unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + tcg_gen_mov_i64(va, cpu_lock_success); + break; + case 0x1020: + /* WR_F */ + if (disp16 && unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + tcg_gen_andi_i64(cpu_lock_flag, va, 0x1); + break; + case 0x1040: + /* RTID */ + if (unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + read_csr(0xc7, va); + break; + default: + if ((disp16 & 0xFF00) == 0xFE00) { + /* PRI_RCSR */ + if (disp16 && unlikely(ra == 31)) break; + va = load_gir(ctx, ra); + read_csr(disp16 & 0xff, va); + break; + } + if ((disp16 & 0xFF00) == 0xFF00) { + /* PRI_WCSR */ + va = load_gir(ctx, ra); + write_csr(disp16 & 0xff, va, ctx->env); + break; + } + goto do_invalid; + } + break; + case 0x07: + /* PRI_RET */ + va = load_gir(ctx, ra); + tcg_gen_mov_i64(cpu_pc, va); + gen_helper_cpustate_update(cpu_env, va); + ret = DISAS_PC_UPDATED_NOCHAIN; + break; + case 0x08: + switch (fn4) { + case 0x0: + /* LLDW */ + gen_load_mem(ctx, &gen_qemu_lldw, ra, rb, disp12, 0, 0); + break; + case 0x1: + /* LLDL */ + gen_load_mem(ctx, &gen_qemu_lldl, ra, rb, disp12, 0, 0); + break; + case 0x2: + /* LDW_INC */ + ldx_xxx(ctx, ra, rb, disp12, 0, 1); + break; + case 0x3: + /* LDL_INC */ + ldx_xxx(ctx, ra, rb, disp12, 1, 1); + break; + case 0x4: + /* LDW_DEC */ + ldx_xxx(ctx, ra, rb, disp12, 0, -1); + break; + case 0x5: + /* LDL_DEC */ + ldx_xxx(ctx, ra, rb, disp12, 1, -1); + break; + case 0x6: + /* LDW_SET */ + ldx_set(ctx, ra, rb, disp12, 0); + break; + case 0x7: + /* LDL_SET */ + ldx_set(ctx, ra, rb, disp12, 1); + break; + case 0x8: + /* LSTW */ + ret = gen_store_conditional(ctx, ra, rb, disp12, + ctx->mem_idx, MO_LEUL); + break; + case 0x9: + /* LSTL */ + ret = gen_store_conditional(ctx, ra, rb, disp12, + ctx->mem_idx, MO_LEQ); + break; + case 0xa: + /* LDW_NC */ + gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp12, 0, + 0); + break; + case 0xb: + /* LDL_NC */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 0, 0); + break; + case 0xc: + /* LDD_NC */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp12, 1, 0); + break; + case 0xd: + /* STW_NC */ + gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp12, 0, + 0); + break; + case 0xe: + /* STL_NC */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 0, + 0); + break; + case 0xf: + /* STD_NC */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp12, 1, + 0); + break; + default: + goto do_invalid; + } + break; + case 0x9: + /* LDWE */ + gen_load_mem_simd(ctx, &gen_qemu_ldwe, ra, rb, disp16, 0); + break; + case 0x0a: + /* LDSE */ + gen_load_mem_simd(ctx, &gen_qemu_ldse, ra, rb, disp16, 0); + break; + case 0x0b: + /* LDDE */ + gen_load_mem_simd(ctx, &gen_qemu_ldde, ra, rb, disp16, 0); + break; + case 0x0c: + /* VLDS */ + gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp16, 0); + break; + case 0x0d: + /* VLDD */ + if (unlikely(ra == 31)) break; + gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp16, 0); + break; + case 0x0e: + /* VSTS */ + gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp16, 0); + break; + case 0x0f: + /* VSTD */ + gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp16, 0); + break; + case 0x10: + if (unlikely(rc == 31)) break; + if (fn11 == 0x70) { + /* FIMOVS */ + va = cpu_fr[ra]; + vc = load_gir(ctx, rc); + tmp32 = tcg_temp_new_i32(); + gen_helper_s_to_memory(tmp32, va); + tcg_gen_ext_i32_i64(vc, tmp32); + tcg_temp_free_i32(tmp32); + } else if (fn11 == 0x78) { + /* FIMOVD */ + va = cpu_fr[ra]; + vc = load_gir(ctx, rc); + tcg_gen_mov_i64(vc, va); + } else { + va = load_gir(ctx, ra); + vb = load_gir(ctx, rb); + vc = load_gir(ctx, rc); + cal_with_iregs_2(ctx, vc, va, vb, disp13, fn11); + } + break; + case 0x11: + if (unlikely(rc == 31)) break; + va = load_gir(ctx, ra); + vb = load_gir(ctx, rb); + vc = load_gir(ctx, rc); + vd = load_gir(ctx, rd); + cal_with_iregs_3(ctx, vc, va, vb, vd, fn3); + break; + case 0x12: + if (unlikely(rc == 31)) break; + va = load_gir(ctx, ra); + vc = load_gir(ctx, rc); + cal_with_imm_2(ctx, vc, va, disp8, fn8); + break; + case 0x13: + if (rc == 31) /* Special deal */ + break; + va = load_gir(ctx, ra); + vc = load_gir(ctx, rc); + vd = load_gir(ctx, rd); + cal_with_imm_3(ctx, vc, va, disp8, vd, fn3); + break; + case 0x14: + case 0x15: + case 0x16: + case 0x17: + /* VLOGZZ */ + tcg_gen_vlogzz_i64(ctx, opc, ra, rb, rd, rc, fn6); + break; + case 0x18: + if (unlikely(rc == 31)) break; + cal_with_fregs_2(ctx, rc, ra, rb, fn8); + break; + case 0x19: + if (unlikely(rc == 31)) break; + cal_with_fregs_4(ctx, rc, ra, rb, rd, fn6); + break; + case 0x1A: + /* SIMD */ + if (unlikely(rc == 31)) break; + switch (fn8) { + case 0x00: + /* VADDW */ + tmp64 = tcg_temp_new(); + va = tcg_temp_new(); + vb = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); + tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL); + tcg_gen_add_i64(tmp64, va, vb); + tcg_gen_ext32u_i64(tmp64, tmp64); + tcg_gen_andi_i64(va, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_andi_i64(vb, cpu_fr[rb + i], + 0xffffffff00000000UL); + tcg_gen_add_i64(vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); + } + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x20: + /* VADDW */ + tmp64 = tcg_temp_new(); + va = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); + tcg_gen_addi_i64(tmp64, va, disp8); + tcg_gen_ext32u_i64(tmp64, tmp64); + tcg_gen_andi_i64(va, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_addi_i64(vc, va, ((uint64_t)disp8 << 32)); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); + } + tcg_temp_free(va); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x01: + /* VSUBW */ + tmp64 = tcg_temp_new(); + va = tcg_temp_new(); + vb = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); + tcg_gen_andi_i64(vb, cpu_fr[rb + i], 0xffffffffUL); + tcg_gen_sub_i64(tmp64, va, vb); + tcg_gen_ext32u_i64(tmp64, tmp64); + tcg_gen_andi_i64(va, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_andi_i64(vb, cpu_fr[rb + i], + 0xffffffff00000000UL); + tcg_gen_sub_i64(vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); + } + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x21: + /* VSUBW */ + tmp64 = tcg_temp_new(); + va = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(va, cpu_fr[ra + i], 0xffffffffUL); + tcg_gen_subi_i64(tmp64, va, disp8); + tcg_gen_ext32u_i64(tmp64, tmp64); + tcg_gen_andi_i64(va, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_subi_i64(vc, va, ((uint64_t)disp8 << 32)); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_mov_i64(cpu_fr[rc + i], tmp64); + } + tcg_temp_free(va); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x02: + /* VCMPGEW */ + tmp64 = tcg_const_i64(0); + va = tcg_temp_new(); + vb = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); + tcg_gen_ext32s_i64(vb, cpu_fr[rb + i]); + tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); + tcg_gen_shri_i64(vb, cpu_fr[rb + i], 32); + tcg_gen_ext32s_i64(va, va); + tcg_gen_ext32s_i64(vb, vb); + tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + } + tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x22: + /* VCMPGEW */ + tmp64 = tcg_const_i64(0); + va = tcg_temp_new(); + vb = tcg_const_i64(disp8); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_ext32s_i64(va, cpu_fr[ra + i]); + tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + tcg_gen_shri_i64(va, cpu_fr[ra + i], 32); + tcg_gen_ext32s_i64(va, va); + tcg_gen_setcond_i64(TCG_COND_GE, vc, va, vb); + tcg_gen_or_i64(tmp64, tmp64, vc); + } + tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + tcg_temp_free(tmp64); + break; + case 0x03: + /* VCMPEQW */ + gen_qemu_vcmpxxw_i64(TCG_COND_EQ, ra, rb, rc); + break; + case 0x23: + /* VCMPEQW */ + gen_qemu_vcmpxxwi_i64(TCG_COND_EQ, ra, disp8, rc); + break; + case 0x04: + /* VCMPLEW */ + gen_qemu_vcmpxxw_i64(TCG_COND_LE, ra, rb, rc); + break; + case 0x24: + /* VCMPLEW */ + gen_qemu_vcmpxxwi_i64(TCG_COND_LE, ra, disp8, rc); + break; + case 0x05: + /* VCMPLTW */ + gen_qemu_vcmpxxw_i64(TCG_COND_LT, ra, rb, rc); + break; + case 0x25: + /* VCMPLTW */ + gen_qemu_vcmpxxwi_i64(TCG_COND_LT, ra, disp8, rc); + break; + case 0x06: + /* VCMPULEW */ + gen_qemu_vcmpxxw_i64(TCG_COND_LEU, ra, rb, rc); + break; + case 0x26: + /* VCMPULEW */ + gen_qemu_vcmpxxwi_i64(TCG_COND_LEU, ra, disp8, rc); + break; + case 0x07: + /* VCMPULTW */ + gen_qemu_vcmpxxw_i64(TCG_COND_LTU, ra, rb, rc); + break; + case 0x27: + /* VCMPULTW */ + gen_qemu_vcmpxxwi_i64(TCG_COND_LTU, ra, disp8, rc); + break; + case 0x08: + /* VSLLW */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(shift, cpu_fr[rb], 29); + tcg_gen_andi_i64(shift, shift, 0x1fUL); + + tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_ext32u_i64(tmp64, vc); + + tcg_gen_andi_i64(vc, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_shl_i64(vc, vc, shift); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x28: + /* VSLLW */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_movi_i64(shift, disp8 & 0x1fUL); + + tcg_gen_shl_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_ext32u_i64(tmp64, vc); + + tcg_gen_andi_i64(vc, cpu_fr[ra + i], + 0xffffffff00000000UL); + tcg_gen_shl_i64(vc, vc, shift); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x09: + /* VSRLW */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(shift, cpu_fr[rb], 29); + tcg_gen_andi_i64(shift, shift, 0x1fUL); + + tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]); + tcg_gen_shr_i64(tmp64, vc, shift); + + tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x29: + /* VSRLW */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_movi_i64(shift, disp8 & 0x1fUL); + + tcg_gen_ext32u_i64(vc, cpu_fr[ra + i]); + tcg_gen_shr_i64(tmp64, vc, shift); + + tcg_gen_shr_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x0A: + /* VSRAW */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(shift, cpu_fr[rb], 29); + tcg_gen_andi_i64(shift, shift, 0x1fUL); + + tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]); + tcg_gen_sar_i64(tmp64, vc, shift); + + tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x2A: + /* VSRAWI */ + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_movi_i64(shift, disp8 & 0x1fUL); + + tcg_gen_ext32s_i64(vc, cpu_fr[ra + i]); + tcg_gen_sar_i64(tmp64, vc, shift); + + tcg_gen_sar_i64(vc, cpu_fr[ra + i], shift); + tcg_gen_andi_i64(vc, vc, 0xffffffff00000000UL); + tcg_gen_or_i64(cpu_fr[rc + i], tmp64, vc); + } + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x0B: + /* VROLW */ + tmpa = tcg_temp_new_i32(); + tmpb = tcg_temp_new_i32(); + tmpc = tcg_temp_new_i32(); + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_shri_i64(shift, cpu_fr[rb], 29); + tcg_gen_andi_i64(shift, shift, 0x1fUL); + + tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]); + tcg_gen_extrl_i64_i32(tmpb, shift); + + tcg_gen_rotl_i32(tmpc, tmpa, tmpb); + tcg_gen_extu_i32_i64(tmp64, tmpc); + + tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]); + tcg_gen_rotl_i32(tmpc, tmpa, tmpb); + tcg_gen_extu_i32_i64(vc, tmpc); + tcg_gen_shli_i64(vc, vc, 32); + + tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64); + } + tcg_temp_free_i32(tmpa); + tcg_temp_free_i32(tmpb); + tcg_temp_free_i32(tmpc); + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x2B: + /* VROLW */ + tmpa = tcg_temp_new_i32(); + tmpb = tcg_temp_new_i32(); + tmpc = tcg_temp_new_i32(); + tmp64 = tcg_temp_new(); + shift = tcg_temp_new(); + vc = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_movi_i64(shift, disp8 & 0x1fUL); + + tcg_gen_extrl_i64_i32(tmpa, cpu_fr[ra + i]); + tcg_gen_extrl_i64_i32(tmpb, shift); + + tcg_gen_rotl_i32(tmpc, tmpa, tmpb); + tcg_gen_extu_i32_i64(tmp64, tmpc); + + tcg_gen_extrh_i64_i32(tmpa, cpu_fr[ra + i]); + tcg_gen_rotl_i32(tmpc, tmpa, tmpb); + tcg_gen_extu_i32_i64(vc, tmpc); + tcg_gen_shli_i64(vc, vc, 32); + + tcg_gen_or_i64(cpu_fr[rc + i], vc, tmp64); + } + tcg_temp_free_i32(tmpa); + tcg_temp_free_i32(tmpb); + tcg_temp_free_i32(tmpc); + tcg_temp_free(tmp64); + tcg_temp_free(shift); + tcg_temp_free(vc); + break; + case 0x0C: + /* SLLOW */ + tcg_gen_sllow_i64(ra, rc, rb); + break; + case 0x2C: + /* SLLOW */ + tcg_gen_sllowi_i64(ra, rc, disp8); + break; + case 0x0D: + /* SRLOW */ + tcg_gen_srlow_i64(ra, rc, rb); + break; + case 0x2D: + /* SRLOW */ + tcg_gen_srlowi_i64(ra, rc, disp8); + break; + case 0x0E: + /* VADDL */ + for (i = 0; i < 128; i += 32) { + tcg_gen_add_i64(cpu_fr[rc + i], cpu_fr[ra + i], + cpu_fr[rb + i]); + } + break; + case 0x2E: + /* VADDL */ + for (i = 0; i < 128; i += 32) { + tcg_gen_addi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8); + } + break; + case 0x0F: + /* VSUBL */ + for (i = 0; i < 128; i += 32) { + tcg_gen_sub_i64(cpu_fr[rc + i], cpu_fr[ra + i], + cpu_fr[rb + i]); + } + break; + case 0x2F: + /* VSUBL */ + for (i = 0; i < 128; i += 32) { + tcg_gen_subi_i64(cpu_fr[rc + i], cpu_fr[ra + i], disp8); + } + break; + case 0x18: + /* CTPOPOW */ + tmp64 = tcg_const_i64(0); + tmp64_0 = tcg_temp_new(); + + for (i = 0; i < 128; i += 32) { + tcg_gen_ctpop_i64(tmp64_0, cpu_fr[ra + i]); + tcg_gen_add_i64(tmp64, tmp64, tmp64_0); + } + tcg_gen_shli_i64(cpu_fr[rc], tmp64, 29); + tcg_temp_free(tmp64); + tcg_temp_free(tmp64_0); + break; + case 0x19: + /* CTLZOW */ + va = tcg_const_i64(ra); + gen_helper_ctlzow(cpu_fr[rc], cpu_env, va); + tcg_temp_free(va); + break; + case 0x40: + /* VUCADDW */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucaddw(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x60: + /* VUCADDW */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucaddwi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x41: + /* VUCSUBW */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucsubw(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x61: + /* VUCSUBW */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucsubwi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x42: + /* VUCADDH */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucaddh(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x62: + /* VUCADDH */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucaddhi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x43: + /* VUCSUBH */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucsubh(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x63: + /* VUCSUBH */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucsubhi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x44: + /* VUCADDB */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucaddb(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x64: + /* VUCADDB */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucaddbi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x45: + /* VUCSUBB */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(rb); + vc = tcg_const_i64(rc); + gen_helper_vucsubb(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x65: + /* VUCSUBB */ + va = tcg_const_i64(ra); + vb = tcg_const_i64(disp8); + vc = tcg_const_i64(rc); + gen_helper_vucsubbi(cpu_env, va, vb, vc); + tcg_temp_free(va); + tcg_temp_free(vb); + tcg_temp_free(vc); + break; + case 0x80: + /* VADDS */ + for (i = 0; i < 128; i += 32) + gen_fadds(ctx, ra + i, rb + i, rc + i); + break; + case 0x81: + /* VADDD */ + for (i = 0; i < 128; i += 32) + gen_faddd(ctx, ra + i, rb + i, rc + i); + break; + case 0x82: + /* VSUBS */ + for (i = 0; i < 128; i += 32) + gen_fsubs(ctx, ra + i, rb + i, rc + i); + break; + case 0x83: + /* VSUBD */ + for (i = 0; i < 128; i += 32) + gen_fsubd(ctx, ra + i, rb + i, rc + i); + break; + case 0x84: + /* VMULS */ + for (i = 0; i < 128; i += 32) + gen_fmuls(ctx, ra + i, rb + i, rc + i); + break; + case 0x85: + /* VMULD */ + for (i = 0; i < 128; i += 32) + gen_fmuld(ctx, ra + i, rb + i, rc + i); + break; + case 0x86: + /* VDIVS */ + for (i = 0; i < 128; i += 32) + gen_fdivs(ctx, ra + i, rb + i, rc + i); + break; + case 0x87: + /* VDIVD */ + for (i = 0; i < 128; i += 32) + gen_fdivd(ctx, ra + i, rb + i, rc + i); + break; + case 0x88: + /* VSQRTS */ + for (i = 0; i < 128; i += 32) + gen_helper_fsqrts(cpu_fr[rc + i], cpu_env, + cpu_fr[rb + i]); + break; + case 0x89: + /* VSQRTD */ + for (i = 0; i < 128; i += 32) + gen_helper_fsqrt(cpu_fr[rc + i], cpu_env, + cpu_fr[rb + i]); + break; + case 0x8C: + /* VFCMPEQ */ + for (i = 0; i < 128; i += 32) + gen_fcmpeq(ctx, ra + i, rb + i, rc + i); + break; + case 0x8D: + /* VFCMPLE */ + for (i = 0; i < 128; i += 32) + gen_fcmple(ctx, ra + i, rb + i, rc + i); + break; + case 0x8E: + /* VFCMPLT */ + for (i = 0; i < 128; i += 32) + gen_fcmplt(ctx, ra + i, rb + i, rc + i); + break; + case 0x8F: + /* VFCMPUN */ + for (i = 0; i < 128; i += 32) + gen_fcmpun(ctx, ra + i, rb + i, rc + i); + break; + case 0x90: + /* VCPYS */ + tcg_gen_vcpys_i64(ra, rb, rc); + break; + case 0x91: + /* VCPYSE */ + tcg_gen_vcpyse_i64(ra, rb, rc); + break; + case 0x92: + /* VCPYSN */ + tcg_gen_vcpysn_i64(ra, rb, rc); + break; + case 0x93: + /* VSUMS */ + gen_fadds(ctx, ra, ra + 32, rc); + gen_fadds(ctx, rc, ra + 64, rc); + gen_fadds(ctx, rc, ra + 96, rc); + break; + case 0x94: + /* VSUMD */ + gen_faddd(ctx, ra, ra + 32, rc); + gen_faddd(ctx, rc, ra + 64, rc); + gen_faddd(ctx, rc, ra + 96, rc); + break; + default: + printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn8); + ret = gen_invalid(ctx); + break; + } + break; + case 0x1B: + /* SIMD */ + if (unlikely(rc == 31)) break; + switch (fn6) { + case 0x00: + /* VMAS */ + for (i = 0; i < 128; i += 32) + gen_helper_fmas(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i], + cpu_fr[rb + i], cpu_fr[rd + i]); + break; + case 0x01: + /* VMAD */ + for (i = 0; i < 128; i += 32) + gen_helper_fmad(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i], + cpu_fr[rb + i], cpu_fr[rd + i]); + break; + case 0x02: + /* VMSS */ + for (i = 0; i < 128; i += 32) + gen_helper_fmss(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i], + cpu_fr[rb + i], cpu_fr[rd + i]); + break; + case 0x03: + /* VMSD */ + for (i = 0; i < 128; i += 32) + gen_helper_fmsd(cpu_fr[rc + i], cpu_env, cpu_fr[ra + i], + cpu_fr[rb + i], cpu_fr[rd + i]); + break; + case 0x04: + /* VNMAS */ + for (i = 0; i < 128; i += 32) + gen_helper_fnmas(cpu_fr[rc + i], cpu_env, + cpu_fr[ra + i], cpu_fr[rb + i], + cpu_fr[rd + i]); + break; + case 0x05: + /* VNMAD */ + for (i = 0; i < 128; i += 32) + gen_helper_fnmad(cpu_fr[rc + i], cpu_env, + cpu_fr[ra + i], cpu_fr[rb + i], + cpu_fr[rd + i]); + break; + case 0x06: + /* VNMSS */ + for (i = 0; i < 128; i += 32) + gen_helper_fnmss(cpu_fr[rc + i], cpu_env, + cpu_fr[ra + i], cpu_fr[rb + i], + cpu_fr[rd + i]); + break; + case 0x07: + /* VNMSD */ + for (i = 0; i < 128; i += 32) + gen_helper_fnmsd(cpu_fr[rc + i], cpu_env, + cpu_fr[ra + i], cpu_fr[rb + i], + cpu_fr[rd + i]); + break; + case 0x10: + /* VFSELEQ */ + tmp64 = tcg_temp_new(); + tmp64_0 = tcg_const_i64(0); + for (i = 0; i < 128; i += 32) { + gen_helper_fcmpeq(tmp64, cpu_env, cpu_fr[ra + i], + tmp64_0); + tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, + tmp64_0, cpu_fr[rd + i], + cpu_fr[rb + i]); + } + tcg_temp_free(tmp64); + tcg_temp_free(tmp64_0); + break; + case 0x12: + /* VFSELLT */ + tmp64 = tcg_temp_new(); + tmp64_0 = tcg_const_i64(0); + tmp64_1 = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(tmp64, cpu_fr[ra + i], + 0x7fffffffffffffffUL); + tcg_gen_setcond_i64(TCG_COND_NE, tmp64, tmp64, + tmp64_0); + tcg_gen_shri_i64(tmp64_1, cpu_fr[ra +i], 63); + tcg_gen_and_i64(tmp64, tmp64_1, tmp64); + tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, + tmp64_0, cpu_fr[rd + i], + cpu_fr[rb + i]); + } + tcg_temp_free(tmp64); + tcg_temp_free(tmp64_0); + tcg_temp_free(tmp64_1); + break; + case 0x13: + /* VFSELLE */ + tmp64 = tcg_temp_new(); + tmp64_0 = tcg_const_i64(0); + tmp64_1 = tcg_temp_new(); + for (i = 0; i < 128; i += 32) { + tcg_gen_andi_i64(tmp64, cpu_fr[ra + i], + 0x7fffffffffffffffUL); + tcg_gen_setcond_i64(TCG_COND_EQ, tmp64, tmp64, + tmp64_0); + tcg_gen_shri_i64(tmp64_1, cpu_fr[ra + i], 63); + tcg_gen_or_i64(tmp64, tmp64_1, tmp64); + tcg_gen_movcond_i64(TCG_COND_EQ, cpu_fr[rc + i], tmp64, + tmp64_0, cpu_fr[rd + i], + cpu_fr[rb + i]); + } + tcg_temp_free(tmp64); + tcg_temp_free(tmp64_0); + tcg_temp_free(tmp64_1); + break; + case 0x18: + /* VSELEQW */ + gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 0); + break; + case 0x38: + /* VSELEQW */ + gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 0); + break; + case 0x19: + /* VSELLBCW */ + gen_qemu_vselxxw(TCG_COND_EQ, ra, rb, rd, rc, 1); + break; + case 0x39: + /* VSELLBCW */ + gen_qemu_vselxxwi(TCG_COND_EQ, ra, rb, disp5, rc, 1); + break; + case 0x1A: + /* VSELLTW */ + gen_qemu_vselxxw(TCG_COND_LT, ra, rb, rd, rc, 0); + break; + case 0x3A: + /* VSELLTW */ + gen_qemu_vselxxwi(TCG_COND_LT, ra, rb, disp5, rc, 0); + break; + case 0x1B: + /* VSELLEW */ + gen_qemu_vselxxw(TCG_COND_LE, ra, rb, rd, rc, 0); + break; + case 0x3B: + /* VSELLEW */ + gen_qemu_vselxxwi(TCG_COND_LE, ra, rb, disp5, rc, 0); + break; + case 0x20: + /* VINSW */ + if (disp5 > 7) break; + tmp64 = tcg_temp_new(); + tmp32 = tcg_temp_new_i32(); + gen_helper_s_to_memory(tmp32, cpu_fr[ra]); + tcg_gen_extu_i32_i64(tmp64, tmp32); + tcg_gen_shli_i64(tmp64, tmp64, (disp5 % 2) * 32); + for (i = 0; i < 128; i += 32) { + tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]); + } + if (disp5 % 2) { + tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32], + cpu_fr[rc + (disp5 / 2) * 32], + 0xffffffffUL); + } else { + tcg_gen_andi_i64(cpu_fr[rc + (disp5 / 2) * 32], + cpu_fr[rc + (disp5 / 2) * 32], + 0xffffffff00000000UL); + } + tcg_gen_or_i64(cpu_fr[rc + (disp5 / 2) * 32], + cpu_fr[rc + (disp5 / 2) * 32], tmp64); + tcg_temp_free(tmp64); + tcg_temp_free_i32(tmp32); + break; + case 0x21: + /* VINSF */ + if (disp5 > 3) break; + tmp64 = tcg_temp_new(); + tcg_gen_mov_i64(tmp64, cpu_fr[ra]); + + for (i = 0; i < 128; i += 32) { + tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[rb + i]); + } + tcg_gen_mov_i64(cpu_fr[rc + disp5 * 32], tmp64); + tcg_temp_free(tmp64); + break; + case 0x22: + /* VEXTW */ + if (disp5 > 7) break; + tmp64 = tcg_temp_new(); + tmp32 = tcg_temp_new_i32(); + tcg_gen_shri_i64(tmp64, cpu_fr[ra + (disp5 / 2) * 32], + (disp5 % 2) * 32); + tcg_gen_extrl_i64_i32(tmp32, tmp64); + gen_helper_memory_to_s(tmp64, tmp32); + tcg_gen_mov_i64(cpu_fr[rc], tmp64); + tcg_temp_free(tmp64); + tcg_temp_free_i32(tmp32); + break; + case 0x23: + /* VEXTF */ + if (disp5 > 3) break; + tcg_gen_mov_i64(cpu_fr[rc], cpu_fr[ra + disp5 * 32]); + break; + case 0x24: + /* VCPYW */ + tmp64 = tcg_temp_new(); + tmp64_0 = tcg_temp_new(); + /* FIXME: for debug + tcg_gen_movi_i64(tmp64, ra); + gen_helper_v_print(cpu_env, tmp64); + */ + tcg_gen_shri_i64(tmp64, cpu_fr[ra], 29); + tcg_gen_andi_i64(tmp64_0, tmp64, 0x3fffffffUL); + tcg_gen_shri_i64(tmp64, cpu_fr[ra], 62); + tcg_gen_shli_i64(tmp64, tmp64, 30); + tcg_gen_or_i64(tmp64_0, tmp64, tmp64_0); + tcg_gen_mov_i64(tmp64, tmp64_0); + tcg_gen_shli_i64(tmp64, tmp64, 32); + tcg_gen_or_i64(tmp64_0, tmp64_0, tmp64); + tcg_gen_mov_i64(cpu_fr[rc], tmp64_0); + tcg_gen_mov_i64(cpu_fr[rc + 32], cpu_fr[rc]); + tcg_gen_mov_i64(cpu_fr[rc + 64], cpu_fr[rc]); + tcg_gen_mov_i64(cpu_fr[rc + 96], cpu_fr[rc]); + /* FIXME: for debug + tcg_gen_movi_i64(tmp64, rb); + gen_helper_v_print(cpu_env, tmp64); + tcg_gen_movi_i64(tmp64, rc); + gen_helper_v_print(cpu_env, tmp64); + */ + tcg_temp_free(tmp64); + tcg_temp_free(tmp64_0); + break; + case 0x25: + /* VCPYF */ + for (i = 0; i < 128; i += 32) { + tcg_gen_mov_i64(cpu_fr[rc + i], cpu_fr[ra]); + } + break; + case 0x26: + /* VCONW */ + tmp64 = tcg_const_i64(ra << 8 | rb); + tmp64_0 = tcg_temp_new(); + vd = tcg_const_i64(rc); + tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2); + tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x7ul); + gen_helper_vconw(cpu_env, tmp64, vd, tmp64_0); + tcg_temp_free(tmp64_0); + tcg_temp_free(tmp64); + tcg_temp_free(vd); + break; + case 0x27: + /* VSHFW */ + tmp64 = tcg_const_i64(ra << 8 | rb); + vd = tcg_const_i64(rc); + gen_helper_vshfw(cpu_env, tmp64, vd, cpu_fr[rd]); + tcg_temp_free(tmp64); + tcg_temp_free(vd); + break; + case 0x28: + /* VCONS */ + tmp64 = tcg_const_i64(ra << 8 | rb); + tmp64_0 = tcg_temp_new(); + vd = tcg_const_i64(rc); + tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 2); + tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul); + gen_helper_vcond(cpu_env, tmp64, vd, tmp64_0); + tcg_temp_free(tmp64_0); + tcg_temp_free(tmp64); + tcg_temp_free(vd); + break; + case 0x29: + /* FIXME: VCOND maybe it's wrong in the instruction book + * that there are no temp. */ + tmp64 = tcg_const_i64(ra << 8 | rb); + tmp64_0 = tcg_temp_new(); + vd = tcg_const_i64(rc); + tcg_gen_shri_i64(tmp64_0, cpu_fr[rd], 3); + tcg_gen_andi_i64(tmp64_0, tmp64_0, 0x3ul); + gen_helper_vcond(cpu_env, tmp64, vd, tmp64_0); + tcg_temp_free(tmp64_0); + tcg_temp_free(tmp64); + tcg_temp_free(vd); + break; + default: + printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn6); + ret = gen_invalid(ctx); + break; + } + break; + case 0x1C: + switch (fn4) { + case 0x0: + /* VLDW_U */ + if (unlikely(ra == 31)) break; + gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, + ~0x1fUL); + break; + case 0x1: + /* VSTW_U */ + gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, + ~0x1fUL); + break; + case 0x2: + /* VLDS_U */ + if (unlikely(ra == 31)) break; + gen_load_mem_simd(ctx, &gen_qemu_vlds, ra, rb, disp12, + ~0xfUL); + break; + case 0x3: + /* VSTS_U */ + gen_store_mem_simd(ctx, &gen_qemu_vsts, ra, rb, disp12, + ~0xfUL); + break; + case 0x4: + /* VLDD_U */ + if (unlikely(ra == 31)) break; + gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, + ~0x1fUL); + break; + case 0x5: + /* VSTD_U */ + gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, + ~0x1fUL); + break; + case 0x8: + /* VSTW_UL */ + gen_store_mem_simd(ctx, &gen_qemu_vstw_ul, ra, rb, disp12, + 0); + break; + case 0x9: + /* VSTW_UH */ + gen_store_mem_simd(ctx, &gen_qemu_vstw_uh, ra, rb, disp12, + 0); + break; + case 0xa: + /* VSTS_UL */ + gen_store_mem_simd(ctx, &gen_qemu_vsts_ul, ra, rb, disp12, + 0); + break; + case 0xb: + /* VSTS_UH */ + gen_store_mem_simd(ctx, &gen_qemu_vsts_uh, ra, rb, disp12, + 0); + break; + case 0xc: + /* VSTD_UL */ + gen_store_mem_simd(ctx, &gen_qemu_vstd_ul, ra, rb, disp12, + 0); + break; + case 0xd: + /* VSTD_UH */ + gen_store_mem_simd(ctx, &gen_qemu_vstd_uh, ra, rb, disp12, + 0); + break; + case 0xe: + /* VLDD_NC */ + gen_load_mem_simd(ctx, &gen_qemu_vldd, ra, rb, disp12, 0); + break; + case 0xf: + /* VSTD_NC */ + gen_store_mem_simd(ctx, &gen_qemu_vstd, ra, rb, disp12, 0); + break; + default: + printf("ILLEGAL BELOW OPC[%x] func[%08x]\n", opc, fn4); + ret = gen_invalid(ctx); + break; + } + break; + case 0x20: + /* LDBU */ + gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0); + break; + case 0x21: + /* LDHU */ + gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0); + break; + case 0x22: + /* LDW */ + gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0); + break; + case 0x23: + /* LDL */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0); + break; + case 0x24: + /* LDL_U */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1); + break; + case 0x25: + /* PRI_LD */ +#ifndef CONFIG_USER_ONLY + if ((insn >> 12) & 1) { + gen_load_mem(ctx, &gen_qemu_pri_ldl, ra, rb, disp12, 0, 1); + } else { + gen_load_mem(ctx, &gen_qemu_pri_ldw, ra, rb, disp12, 0, 1); + } +#endif + break; + case 0x26: + /* FLDS */ + gen_load_mem(ctx, &gen_qemu_flds, ra, rb, disp16, 1, 0); + break; + case 0x27: + /* FLDD */ + gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0); + break; + case 0x28: + /* STB */ + gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0); + break; + case 0x29: + /* STH */ + gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0); + break; + case 0x2a: + /* STW */ + gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0); + break; + case 0x2b: + /* STL */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0); + break; + case 0x2c: + /* STL_U */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1); + break; + case 0x2d: + /* PRI_ST */ +#ifndef CONFIG_USER_ONLY + if ((insn >> 12) & 1) { + gen_store_mem(ctx, &gen_qemu_pri_stl, ra, rb, disp12, 0, 1); + } else { + gen_store_mem(ctx, &gen_qemu_pri_stw, ra, rb, disp12, 0, 1); + } +#endif + break; + case 0x2e: + /* FSTS */ + gen_store_mem(ctx, &gen_qemu_fsts, ra, rb, disp16, 1, 0); + break; + case 0x2f: + /* FSTD */ + gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0); + break; + case 0x30: + /* BEQ */ + ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, (uint64_t)-1); + break; + case 0x31: + /* BNE */ + ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, (uint64_t)-1); + break; + case 0x32: + /* BLT */ + ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, (uint64_t)-1); + break; + case 0x33: + /* BLE */ + ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, (uint64_t)-1); + break; + case 0x34: + /* BGT */ + ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, (uint64_t)-1); + break; + case 0x35: + /* BGE */ + ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, (uint64_t)-1); + break; + case 0x36: + /* BLBC */ + ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1); + break; + case 0x37: + /* BLBS */ + ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1); + break; + case 0x38: + /* FBEQ */ + ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21); + break; + case 0x39: + /* FBNE */ + ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21); + break; + case 0x3a: + /* FBLT */ + ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21); + break; + case 0x3b: + /* FBLE */ + ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21); + break; + case 0x3c: + /* FBGT */ + ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21); + break; + case 0x3d: + /* FBGE */ + ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21); + break; + case 0x3f: + /* LDIH */ + disp16 = ((uint32_t)disp16) << 16; + if (ra == 31) break; + va = load_gir(ctx, ra); + if (rb == 31) { + tcg_gen_movi_i64(va, disp16); + } else { + tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16); + } + break; + case 0x3e: + /* LDI */ + if (ra == 31) break; + va = load_gir(ctx, ra); + if (rb == 31) { + tcg_gen_movi_i64(va, disp16); + } else { + tcg_gen_addi_i64(va, load_gir(ctx, rb), (int64_t)disp16); + } + break; + do_invalid: + default: + printf("ILLEGAL BELOW OPC[%x] insn[%08x]\n", opc, insn); + ret = gen_invalid(ctx); + } + return ret; +} +static void sw64_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext* ctx = container_of(dcbase, DisasContext, base); + CPUSW64State* env = cpu->env_ptr; /*init by instance_initfn*/ + + ctx->tbflags = ctx->base.tb->flags; + ctx->mem_idx = cpu_mmu_index(env, false); +#ifdef CONFIG_USER_ONLY + ctx->ir = cpu_std_ir; +#else + ctx->ir = (ctx->tbflags & ENV_FLAG_HM_MODE ? cpu_hm_ir : cpu_std_ir); +#endif + ctx->zero = NULL; +} + +static void sw64_tr_tb_start(DisasContextBase *db, CPUState *cpu) +{ +} + +static void sw64_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) +{ + tcg_gen_insn_start(dcbase->pc_next); +} + +static void sw64_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) +{ + DisasContext *ctx = container_of(dcbase, DisasContext, base); + CPUSW64State *env = cpu->env_ptr; + uint32_t insn; + + insn = cpu_ldl_code(env, ctx->base.pc_next & (~3UL)); + ctx->env = env; + ctx->base.pc_next += 4; + ctx->base.is_jmp = ctx->translate_one(dcbase, insn, cpu); + + free_context_temps(ctx); + translator_loop_temp_check(&ctx->base); +} + +/* FIXME:Linhainan */ +static void sw64_tr_tb_stop(DisasContextBase* dcbase, CPUState* cpu) { + DisasContext* ctx = container_of(dcbase, DisasContext, base); + + switch (ctx->base.is_jmp) { + case DISAS_NORETURN: + break; + case DISAS_TOO_MANY: + if (use_goto_tb(ctx, ctx->base.pc_next)) { + tcg_gen_goto_tb(0); + tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); + tcg_gen_exit_tb(ctx->base.tb, 0); + } + /* FALLTHRU */ + case DISAS_PC_STALE: + tcg_gen_movi_i64(cpu_pc, ctx->base.pc_next); + /* FALLTHRU */ + case DISAS_PC_UPDATED: + if (!use_exit_tb(ctx)) { + tcg_gen_lookup_and_goto_ptr(); + break; + } + /* FALLTHRU */ + case DISAS_PC_UPDATED_NOCHAIN: + if (ctx->base.singlestep_enabled) { + /* FIXME: for gdb*/ + cpu_loop_exit(cpu); + } else { + tcg_gen_exit_tb(NULL, 0); + } + break; + default: + g_assert_not_reached(); + } +} + +static void sw64_tr_disas_log(const DisasContextBase* dcbase, CPUState* cpu) { + SW64CPU* sc = SW64_CPU(cpu); + qemu_log("IN(%d): %s\n", sc->cid, + lookup_symbol(dcbase->pc_first)); + log_target_disas(cpu, dcbase->pc_first & (~0x3UL), dcbase->tb->size); +} + +static void init_transops(CPUState *cpu, DisasContext *dc) +{ + dc->translate_one = translate_one; +} + +void restore_state_to_opc(CPUSW64State* env, TranslationBlock* tb, + target_ulong* data) { + env->pc = data[0]; +} + +static const TranslatorOps sw64_trans_ops = { + .init_disas_context = sw64_tr_init_disas_context, + .tb_start = sw64_tr_tb_start, + .insn_start = sw64_tr_insn_start, + .translate_insn = sw64_tr_translate_insn, + .tb_stop = sw64_tr_tb_stop, + .disas_log = sw64_tr_disas_log, +}; + +void gen_intermediate_code(CPUState* cpu, TranslationBlock* tb, int max_insns) +{ + DisasContext dc; + init_transops(cpu, &dc); + translator_loop(&sw64_trans_ops, &dc.base, cpu, tb, max_insns); +} diff --git a/target/sw64/translate.h b/target/sw64/translate.h new file mode 100644 index 0000000000000000000000000000000000000000..e93df0815eb6c746a6dd205c7313c5f62e849ead --- /dev/null +++ b/target/sw64/translate.h @@ -0,0 +1,60 @@ +#ifndef SW64_TRANSLATE_H +#define SW64_TRANSLATE_H +#include "qemu/osdep.h" +#include "cpu.h" +#include "sysemu/cpus.h" +#include "disas/disas.h" +#include "qemu/host-utils.h" +#include "exec/exec-all.h" +#include "exec/cpu_ldst.h" +#include "tcg/tcg-op.h" +#include "exec/helper-proto.h" +#include "exec/helper-gen.h" +#include "trace-tcg.h" +#include "exec/translator.h" +#include "exec/log.h" + +#define DISAS_PC_UPDATED_NOCHAIN DISAS_TARGET_0 +#define DISAS_PC_UPDATED DISAS_TARGET_1 +#define DISAS_PC_STALE DISAS_TARGET_2 +#define DISAS_PC_UPDATED_T DISAS_TOO_MANY + +typedef struct DisasContext DisasContext; +struct DisasContext { + DisasContextBase base; + + uint32_t tbflags; + + /* The set of registers active in the current context. */ + TCGv *ir; + + /* Accel: Temporaries for $31 and $f31 as source and destination. */ + TCGv zero; + int mem_idx; + CPUSW64State *env; + DisasJumpType (*translate_one)(DisasContextBase *dcbase, uint32_t insn, + CPUState *cpu); +}; + +extern TCGv cpu_pc; +extern TCGv cpu_std_ir[31]; +extern TCGv cpu_fr[128]; +extern TCGv cpu_lock_addr; +extern TCGv cpu_lock_flag; +extern TCGv cpu_lock_success; +#ifdef SW64_FIXLOCK +extern TCGv cpu_lock_value; +#endif +#ifndef CONFIG_USER_ONLY +extern TCGv cpu_hm_ir[31]; +#endif + +DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn, + CPUState *cpu); +DisasJumpType th1_translate_one(DisasContextBase *dcbase, uint32_t insn, + CPUState *cpu); +bool use_exit_tb(DisasContext *ctx); +bool use_goto_tb(DisasContext *ctx, uint64_t dest); +void insn_profile(DisasContext *ctx, uint32_t insn); +extern void gen_fold_mzero(TCGCond cond, TCGv dest, TCGv src); +#endif diff --git a/tcg/loongarch64/tcg-insn-defs.c.inc b/tcg/loongarch64/tcg-insn-defs.c.inc new file mode 100644 index 0000000000000000000000000000000000000000..2a8fdad6264f5f2188c8b090b5d509c02cb70a69 --- /dev/null +++ b/tcg/loongarch64/tcg-insn-defs.c.inc @@ -0,0 +1,985 @@ +/* + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + + */ + +typedef enum { + OPC_CLZ_W = 0x00001400, + OPC_CTZ_W = 0x00001c00, + OPC_CLZ_D = 0x00002400, + OPC_CTZ_D = 0x00002c00, + OPC_REVB_2H = 0x00003000, + OPC_REVB_2W = 0x00003800, + OPC_REVB_D = 0x00003c00, + OPC_SEXT_H = 0x00005800, + OPC_SEXT_B = 0x00005c00, + OPC_ADD_W = 0x00100000, + OPC_ADD_D = 0x00108000, + OPC_SUB_W = 0x00110000, + OPC_SUB_D = 0x00118000, + OPC_SLT = 0x00120000, + OPC_SLTU = 0x00128000, + OPC_MASKEQZ = 0x00130000, + OPC_MASKNEZ = 0x00138000, + OPC_NOR = 0x00140000, + OPC_AND = 0x00148000, + OPC_OR = 0x00150000, + OPC_XOR = 0x00158000, + OPC_ORN = 0x00160000, + OPC_ANDN = 0x00168000, + OPC_SLL_W = 0x00170000, + OPC_SRL_W = 0x00178000, + OPC_SRA_W = 0x00180000, + OPC_SLL_D = 0x00188000, + OPC_SRL_D = 0x00190000, + OPC_SRA_D = 0x00198000, + OPC_ROTR_W = 0x001b0000, + OPC_ROTR_D = 0x001b8000, + OPC_MUL_W = 0x001c0000, + OPC_MULH_W = 0x001c8000, + OPC_MULH_WU = 0x001d0000, + OPC_MUL_D = 0x001d8000, + OPC_MULH_D = 0x001e0000, + OPC_MULH_DU = 0x001e8000, + OPC_DIV_W = 0x00200000, + OPC_MOD_W = 0x00208000, + OPC_DIV_WU = 0x00210000, + OPC_MOD_WU = 0x00218000, + OPC_DIV_D = 0x00220000, + OPC_MOD_D = 0x00228000, + OPC_DIV_DU = 0x00230000, + OPC_MOD_DU = 0x00238000, + OPC_SLLI_W = 0x00408000, + OPC_SLLI_D = 0x00410000, + OPC_SRLI_W = 0x00448000, + OPC_SRLI_D = 0x00450000, + OPC_SRAI_W = 0x00488000, + OPC_SRAI_D = 0x00490000, + OPC_ROTRI_W = 0x004c8000, + OPC_ROTRI_D = 0x004d0000, + OPC_BSTRINS_W = 0x00600000, + OPC_BSTRPICK_W = 0x00608000, + OPC_BSTRINS_D = 0x00800000, + OPC_BSTRPICK_D = 0x00c00000, + OPC_SLTI = 0x02000000, + OPC_SLTUI = 0x02400000, + OPC_ADDI_W = 0x02800000, + OPC_ADDI_D = 0x02c00000, + OPC_CU52I_D = 0x03000000, + OPC_ANDI = 0x03400000, + OPC_ORI = 0x03800000, + OPC_XORI = 0x03c00000, + OPC_LU12I_W = 0x14000000, + OPC_CU32I_D = 0x16000000, + OPC_PCADDU2I = 0x18000000, + OPC_PCALAU12I = 0x1a000000, + OPC_PCADDU12I = 0x1c000000, + OPC_PCADDU18I = 0x1e000000, + OPC_LD_B = 0x28000000, + OPC_LD_H = 0x28400000, + OPC_LD_W = 0x28800000, + OPC_LD_D = 0x28c00000, + OPC_ST_B = 0x29000000, + OPC_ST_H = 0x29400000, + OPC_ST_W = 0x29800000, + OPC_ST_D = 0x29c00000, + OPC_LD_BU = 0x2a000000, + OPC_LD_HU = 0x2a400000, + OPC_LD_WU = 0x2a800000, + OPC_LDX_B = 0x38000000, + OPC_LDX_H = 0x38040000, + OPC_LDX_W = 0x38080000, + OPC_LDX_D = 0x380c0000, + OPC_STX_B = 0x38100000, + OPC_STX_H = 0x38140000, + OPC_STX_W = 0x38180000, + OPC_STX_D = 0x381c0000, + OPC_LDX_BU = 0x38200000, + OPC_LDX_HU = 0x38240000, + OPC_LDX_WU = 0x38280000, + OPC_DBAR = 0x38720000, + OPC_JIRL = 0x4c000000, + OPC_B = 0x50000000, + OPC_BL = 0x54000000, + OPC_BEQ = 0x58000000, + OPC_BNE = 0x5c000000, + OPC_BGT = 0x60000000, + OPC_BLE = 0x64000000, + OPC_BGTU = 0x68000000, + OPC_BLEU = 0x6c000000, +} LoongArchInsn; + +static int32_t __attribute__((unused)) +encode_d_slot(LoongArchInsn opc, uint32_t d) +{ + return opc | d; +} + +static int32_t __attribute__((unused)) +encode_dj_slots(LoongArchInsn opc, uint32_t d, uint32_t j) +{ + return opc | d | j << 5; +} + +static int32_t __attribute__((unused)) +encode_djk_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k) +{ + return opc | d | j << 5 | k << 10; +} + +static int32_t __attribute__((unused)) +encode_djkm_slots(LoongArchInsn opc, uint32_t d, uint32_t j, uint32_t k, + uint32_t m) +{ + return opc | d | j << 5 | k << 10 | m << 16; +} + +static int32_t __attribute__((unused)) +encode_dk_slots(LoongArchInsn opc, uint32_t d, uint32_t k) +{ + return opc | d | k << 10; +} + +static int32_t __attribute__((unused)) +encode_dj_insn(LoongArchInsn opc, TCGReg d, TCGReg j) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + return encode_dj_slots(opc, d, j); +} + +static int32_t __attribute__((unused)) +encode_djk_insn(LoongArchInsn opc, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(k >= 0 && k <= 0x1f); + return encode_djk_slots(opc, d, j, k); +} + +static int32_t __attribute__((unused)) +encode_djsk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(sk12 >= -0x800 && sk12 <= 0x7ff); + return encode_djk_slots(opc, d, j, sk12 & 0xfff); +} + +static int32_t __attribute__((unused)) +encode_djsk16_insn(LoongArchInsn opc, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(sk16 >= -0x8000 && sk16 <= 0x7fff); + return encode_djk_slots(opc, d, j, sk16 & 0xffff); +} + +static int32_t __attribute__((unused)) +encode_djuk12_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk12 <= 0xfff); + return encode_djk_slots(opc, d, j, uk12); +} + +static int32_t __attribute__((unused)) +encode_djuk5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk5 <= 0x1f); + return encode_djk_slots(opc, d, j, uk5); +} + +static int32_t __attribute__((unused)) +encode_djuk5um5_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk5 <= 0x1f); + tcg_debug_assert(um5 <= 0x1f); + return encode_djkm_slots(opc, d, j, uk5, um5); +} + +static int32_t __attribute__((unused)) +encode_djuk6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk6 <= 0x3f); + return encode_djk_slots(opc, d, j, uk6); +} + +static int32_t __attribute__((unused)) +encode_djuk6um6_insn(LoongArchInsn opc, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(j >= 0 && j <= 0x1f); + tcg_debug_assert(uk6 <= 0x3f); + tcg_debug_assert(um6 <= 0x3f); + return encode_djkm_slots(opc, d, j, uk6, um6); +} + +static int32_t __attribute__((unused)) +encode_dsj20_insn(LoongArchInsn opc, TCGReg d, int32_t sj20) +{ + tcg_debug_assert(d >= 0 && d <= 0x1f); + tcg_debug_assert(sj20 >= -0x80000 && sj20 <= 0x7ffff); + return encode_dj_slots(opc, d, sj20 & 0xfffff); +} + +static int32_t __attribute__((unused)) +encode_sd10k16_insn(LoongArchInsn opc, int32_t sd10k16) +{ + tcg_debug_assert(sd10k16 >= -0x2000000 && sd10k16 <= 0x1ffffff); + return encode_dk_slots(opc, (sd10k16 >> 16) & 0x3ff, sd10k16 & 0xffff); +} + +static int32_t __attribute__((unused)) +encode_ud15_insn(LoongArchInsn opc, uint32_t ud15) +{ + tcg_debug_assert(ud15 <= 0x7fff); + return encode_d_slot(opc, ud15); +} + +/* Emits the `clz.w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_clz_w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CLZ_W, d, j)); +} + +/* Emits the `ctz.w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ctz_w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CTZ_W, d, j)); +} + +/* Emits the `clz.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_clz_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CLZ_D, d, j)); +} + +/* Emits the `ctz.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ctz_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_CTZ_D, d, j)); +} + +/* Emits the `revb.2h d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_2h(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_2H, d, j)); +} + +/* Emits the `revb.2w d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_2w(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_2W, d, j)); +} + +/* Emits the `revb.d d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_revb_d(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_REVB_D, d, j)); +} + +/* Emits the `sext.h d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sext_h(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_SEXT_H, d, j)); +} + +/* Emits the `sext.b d, j` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sext_b(TCGContext *s, TCGReg d, TCGReg j) +{ + tcg_out32(s, encode_dj_insn(OPC_SEXT_B, d, j)); +} + +/* Emits the `add.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_add_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ADD_W, d, j, k)); +} + +/* Emits the `add.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_add_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ADD_D, d, j, k)); +} + +/* Emits the `sub.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sub_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SUB_W, d, j, k)); +} + +/* Emits the `sub.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sub_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SUB_D, d, j, k)); +} + +/* Emits the `slt d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slt(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLT, d, j, k)); +} + +/* Emits the `sltu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sltu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLTU, d, j, k)); +} + +/* Emits the `maskeqz d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_maskeqz(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MASKEQZ, d, j, k)); +} + +/* Emits the `masknez d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_masknez(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MASKNEZ, d, j, k)); +} + +/* Emits the `nor d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_nor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_NOR, d, j, k)); +} + +/* Emits the `and d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_and(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_AND, d, j, k)); +} + +/* Emits the `or d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_or(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_OR, d, j, k)); +} + +/* Emits the `xor d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_xor(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_XOR, d, j, k)); +} + +/* Emits the `orn d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_orn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ORN, d, j, k)); +} + +/* Emits the `andn d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_andn(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ANDN, d, j, k)); +} + +/* Emits the `sll.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sll_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLL_W, d, j, k)); +} + +/* Emits the `srl.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srl_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRL_W, d, j, k)); +} + +/* Emits the `sra.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sra_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRA_W, d, j, k)); +} + +/* Emits the `sll.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sll_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SLL_D, d, j, k)); +} + +/* Emits the `srl.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srl_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRL_D, d, j, k)); +} + +/* Emits the `sra.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sra_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_SRA_D, d, j, k)); +} + +/* Emits the `rotr.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotr_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ROTR_W, d, j, k)); +} + +/* Emits the `rotr.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotr_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_ROTR_D, d, j, k)); +} + +/* Emits the `mul.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mul_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MUL_W, d, j, k)); +} + +/* Emits the `mulh.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_W, d, j, k)); +} + +/* Emits the `mulh.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_WU, d, j, k)); +} + +/* Emits the `mul.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mul_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MUL_D, d, j, k)); +} + +/* Emits the `mulh.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_D, d, j, k)); +} + +/* Emits the `mulh.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mulh_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MULH_DU, d, j, k)); +} + +/* Emits the `div.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_W, d, j, k)); +} + +/* Emits the `mod.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_W, d, j, k)); +} + +/* Emits the `div.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_WU, d, j, k)); +} + +/* Emits the `mod.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_WU, d, j, k)); +} + +/* Emits the `div.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_D, d, j, k)); +} + +/* Emits the `mod.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_D, d, j, k)); +} + +/* Emits the `div.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_div_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_DIV_DU, d, j, k)); +} + +/* Emits the `mod.du d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_mod_du(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_MOD_DU, d, j, k)); +} + +/* Emits the `slli.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SLLI_W, d, j, uk5)); +} + +/* Emits the `slli.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SLLI_D, d, j, uk6)); +} + +/* Emits the `srli.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srli_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SRLI_W, d, j, uk5)); +} + +/* Emits the `srli.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srli_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SRLI_D, d, j, uk6)); +} + +/* Emits the `srai.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srai_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_SRAI_W, d, j, uk5)); +} + +/* Emits the `srai.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_srai_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_SRAI_D, d, j, uk6)); +} + +/* Emits the `rotri.w d, j, uk5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotri_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5) +{ + tcg_out32(s, encode_djuk5_insn(OPC_ROTRI_W, d, j, uk5)); +} + +/* Emits the `rotri.d d, j, uk6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_rotri_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6) +{ + tcg_out32(s, encode_djuk6_insn(OPC_ROTRI_D, d, j, uk6)); +} + +/* Emits the `bstrins.w d, j, uk5, um5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrins_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRINS_W, d, j, uk5, um5)); +} + +/* Emits the `bstrpick.w d, j, uk5, um5` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrpick_w(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk5, + uint32_t um5) +{ + tcg_out32(s, encode_djuk5um5_insn(OPC_BSTRPICK_W, d, j, uk5, um5)); +} + +/* Emits the `bstrins.d d, j, uk6, um6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrins_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRINS_D, d, j, uk6, um6)); +} + +/* Emits the `bstrpick.d d, j, uk6, um6` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bstrpick_d(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk6, + uint32_t um6) +{ + tcg_out32(s, encode_djuk6um6_insn(OPC_BSTRPICK_D, d, j, uk6, um6)); +} + +/* Emits the `slti d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_slti(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_SLTI, d, j, sk12)); +} + +/* Emits the `sltui d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_sltui(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_SLTUI, d, j, sk12)); +} + +/* Emits the `addi.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_addi_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ADDI_W, d, j, sk12)); +} + +/* Emits the `addi.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_addi_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ADDI_D, d, j, sk12)); +} + +/* Emits the `cu52i.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_cu52i_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_CU52I_D, d, j, sk12)); +} + +/* Emits the `andi d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_andi(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_ANDI, d, j, uk12)); +} + +/* Emits the `ori d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_ORI, d, j, uk12)); +} + +/* Emits the `xori d, j, uk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_xori(TCGContext *s, TCGReg d, TCGReg j, uint32_t uk12) +{ + tcg_out32(s, encode_djuk12_insn(OPC_XORI, d, j, uk12)); +} + +/* Emits the `lu12i.w d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_lu12i_w(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_LU12I_W, d, sj20)); +} + +/* Emits the `cu32i.d d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_cu32i_d(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_CU32I_D, d, sj20)); +} + +/* Emits the `pcaddu2i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu2i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU2I, d, sj20)); +} + +/* Emits the `pcalau12i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcalau12i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCALAU12I, d, sj20)); +} + +/* Emits the `pcaddu12i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu12i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU12I, d, sj20)); +} + +/* Emits the `pcaddu18i d, sj20` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_pcaddu18i(TCGContext *s, TCGReg d, int32_t sj20) +{ + tcg_out32(s, encode_dsj20_insn(OPC_PCADDU18I, d, sj20)); +} + +/* Emits the `ld.b d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_B, d, j, sk12)); +} + +/* Emits the `ld.h d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_H, d, j, sk12)); +} + +/* Emits the `ld.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_W, d, j, sk12)); +} + +/* Emits the `ld.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_D, d, j, sk12)); +} + +/* Emits the `st.b d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_b(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_B, d, j, sk12)); +} + +/* Emits the `st.h d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_h(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_H, d, j, sk12)); +} + +/* Emits the `st.w d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_w(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_W, d, j, sk12)); +} + +/* Emits the `st.d d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_st_d(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_ST_D, d, j, sk12)); +} + +/* Emits the `ld.bu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_bu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_BU, d, j, sk12)); +} + +/* Emits the `ld.hu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_hu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_HU, d, j, sk12)); +} + +/* Emits the `ld.wu d, j, sk12` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ld_wu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk12) +{ + tcg_out32(s, encode_djsk12_insn(OPC_LD_WU, d, j, sk12)); +} + +/* Emits the `ldx.b d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_B, d, j, k)); +} + +/* Emits the `ldx.h d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_H, d, j, k)); +} + +/* Emits the `ldx.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_W, d, j, k)); +} + +/* Emits the `ldx.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_D, d, j, k)); +} + +/* Emits the `stx.b d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_b(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_B, d, j, k)); +} + +/* Emits the `stx.h d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_h(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_H, d, j, k)); +} + +/* Emits the `stx.w d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_w(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_W, d, j, k)); +} + +/* Emits the `stx.d d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_stx_d(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_STX_D, d, j, k)); +} + +/* Emits the `ldx.bu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_bu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_BU, d, j, k)); +} + +/* Emits the `ldx.hu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_hu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_HU, d, j, k)); +} + +/* Emits the `ldx.wu d, j, k` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ldx_wu(TCGContext *s, TCGReg d, TCGReg j, TCGReg k) +{ + tcg_out32(s, encode_djk_insn(OPC_LDX_WU, d, j, k)); +} + +/* Emits the `dbar ud15` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_dbar(TCGContext *s, uint32_t ud15) +{ + tcg_out32(s, encode_ud15_insn(OPC_DBAR, ud15)); +} + +/* Emits the `jirl d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_jirl(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_JIRL, d, j, sk16)); +} + +/* Emits the `b sd10k16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_b(TCGContext *s, int32_t sd10k16) +{ + tcg_out32(s, encode_sd10k16_insn(OPC_B, sd10k16)); +} + +/* Emits the `bl sd10k16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bl(TCGContext *s, int32_t sd10k16) +{ + tcg_out32(s, encode_sd10k16_insn(OPC_BL, sd10k16)); +} + +/* Emits the `beq d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_beq(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BEQ, d, j, sk16)); +} + +/* Emits the `bne d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bne(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BNE, d, j, sk16)); +} + +/* Emits the `bgt d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bgt(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BGT, d, j, sk16)); +} + +/* Emits the `ble d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_ble(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BLE, d, j, sk16)); +} + +/* Emits the `bgtu d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bgtu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BGTU, d, j, sk16)); +} + +/* Emits the `bleu d, j, sk16` instruction. */ +static void __attribute__((unused)) +tcg_out_opc_bleu(TCGContext *s, TCGReg d, TCGReg j, int32_t sk16) +{ + tcg_out32(s, encode_djsk16_insn(OPC_BLEU, d, j, sk16)); +} +/* End of generated code. */ diff --git a/tcg/loongarch64/tcg-target-con-set.h b/tcg/loongarch64/tcg-target-con-set.h new file mode 100644 index 0000000000000000000000000000000000000000..7b0297034f6c6950f8f5d12858ddde4845c2b90f --- /dev/null +++ b/tcg/loongarch64/tcg-target-con-set.h @@ -0,0 +1,39 @@ +/* + * Define LoongArch target-specific constraint sets. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +/* + * C_On_Im(...) defines a constraint set with outputs and inputs. + * Each operand should be a sequence of constraint letters as defined by + * tcg-target-con-str.h; the constraint combination is inclusive or. + */ +C_O0_I1(r) +C_O0_I2(rZ, r) +C_O0_I2(rZ, rZ) +C_O0_I2(LZ, L) +C_O1_I1(r, r) +C_O1_I1(r, L) +C_O1_I2(r, r, rC) +C_O1_I2(r, r, ri) +C_O1_I2(r, r, rI) +C_O1_I2(r, r, rU) +C_O1_I2(r, r, rW) +C_O1_I2(r, r, rZ) +C_O1_I2(r, 0, rZ) +C_O1_I2(r, rZ, rN) +C_O1_I2(r, rZ, rZ) diff --git a/tcg/loongarch64/tcg-target-con-str.h b/tcg/loongarch64/tcg-target-con-str.h new file mode 100644 index 0000000000000000000000000000000000000000..b105f5ebd84826e26eb84130a18d040d595da6fc --- /dev/null +++ b/tcg/loongarch64/tcg-target-con-str.h @@ -0,0 +1,36 @@ +/* + * Define LoongArch target-specific operand constraints. + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +/* + * Define constraint letters for register sets: + * REGS(letter, register_mask) + */ +REGS('r', ALL_GENERAL_REGS) +REGS('L', ALL_GENERAL_REGS & ~SOFTMMU_RESERVE_REGS) + +/* + * Define constraint letters for constants: + * CONST(letter, TCG_CT_CONST_* bit set) + */ +CONST('I', TCG_CT_CONST_S12) +CONST('N', TCG_CT_CONST_N12) +CONST('U', TCG_CT_CONST_U12) +CONST('Z', TCG_CT_CONST_ZERO) +CONST('C', TCG_CT_CONST_C12) +CONST('W', TCG_CT_CONST_WSZ) diff --git a/tcg/loongarch64/tcg-target.c.inc b/tcg/loongarch64/tcg-target.c.inc new file mode 100644 index 0000000000000000000000000000000000000000..0b28b30002262bad2f840d5debd83421d263441c --- /dev/null +++ b/tcg/loongarch64/tcg-target.c.inc @@ -0,0 +1,1727 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#include "../tcg-ldst.c.inc" + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "zero", + "ra", + "tp", + "sp", + "a0", + "a1", + "a2", + "a3", + "a4", + "a5", + "a6", + "a7", + "t0", + "t1", + "t2", + "t3", + "t4", + "t5", + "t6", + "t7", + "t8", + "r21", /* reserved in the LP64* ABI, hence no ABI name */ + "s9", + "s0", + "s1", + "s2", + "s3", + "s4", + "s5", + "s6", + "s7", + "s8" +}; +#endif + +static const int tcg_target_reg_alloc_order[] = { + /* Registers preserved across calls */ + /* TCG_REG_S0 reserved for TCG_AREG0 */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + + /* Registers (potentially) clobbered across calls */ + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + + /* Argument registers, opposite order of allocation. */ + TCG_REG_A7, + TCG_REG_A6, + TCG_REG_A5, + TCG_REG_A4, + TCG_REG_A3, + TCG_REG_A2, + TCG_REG_A1, + TCG_REG_A0, +}; + +static const int tcg_target_call_iarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, +}; + +static const int tcg_target_call_oarg_regs[] = { + TCG_REG_A0, + TCG_REG_A1, +}; + +#ifndef CONFIG_SOFTMMU +#define USE_GUEST_BASE (guest_base != 0) +#define TCG_GUEST_BASE_REG TCG_REG_S1 +#endif + +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_S12 0x200 +#define TCG_CT_CONST_N12 0x400 +#define TCG_CT_CONST_U12 0x800 +#define TCG_CT_CONST_C12 0x1000 +#define TCG_CT_CONST_WSZ 0x2000 + +#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32) +/* + * For softmmu, we need to avoid conflicts with the first 5 + * argument registers to call the helper. Some of these are + * also used for the tlb lookup. + */ +#ifdef CONFIG_SOFTMMU +#define SOFTMMU_RESERVE_REGS MAKE_64BIT_MASK(TCG_REG_A0, 5) +#else +#define SOFTMMU_RESERVE_REGS 0 +#endif + +static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) +{ + return sextract64(val, pos, len); +} + +/* test if a constant matches the constraint */ +static bool tcg_target_const_match(int64_t val, TCGType type, int ct) +{ + if (ct & TCG_CT_CONST) { + return true; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return true; + } + if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { + return true; + } + if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { + return true; + } + if ((ct & TCG_CT_CONST_U12) && val >= 0 && val <= 0xfff) { + return true; + } + if ((ct & TCG_CT_CONST_C12) && ~val >= 0 && ~val <= 0xfff) { + return true; + } + if ((ct & TCG_CT_CONST_WSZ) && val == (type == TCG_TYPE_I32 ? 32 : 64)) { + return true; + } + return false; +} + +/* + * Relocations + */ + +/* + * Relocation records defined in LoongArch ELF psABI v1.00 is way too + * complicated; a whopping stack machine is needed to stuff the fields, at + * the very least one SOP_PUSH and one SOP_POP (of the correct format) are + * needed. + * + * Hence, define our own simpler relocation types. Numbers are chosen as to + * not collide with potential future additions to the true ELF relocation + * type enum. + */ + +/* Field Sk16, shifted right by 2; suitable for conditional jumps */ +#define R_LOONGARCH_BR_SK16 256 +/* Field Sd10k16, shifted right by 2; suitable for B and BL */ +#define R_LOONGARCH_BR_SD10K16 257 + +static bool reloc_br_sk16(tcg_insn_unit *src_rw, const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + intptr_t offset = (intptr_t)target - (intptr_t)src_rx; + + tcg_debug_assert((offset & 3) == 0); + offset >>= 2; + if (offset == sextreg(offset, 0, 16)) { + *src_rw = deposit64(*src_rw, 10, 16, offset); + return true; + } + + return false; +} + +static bool reloc_br_sd10k16(tcg_insn_unit *src_rw, + const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + intptr_t offset = (intptr_t)target - (intptr_t)src_rx; + + tcg_debug_assert((offset & 3) == 0); + offset >>= 2; + if (offset == sextreg(offset, 0, 26)) { + *src_rw = deposit64(*src_rw, 0, 10, offset >> 16); /* slot d10 */ + *src_rw = deposit64(*src_rw, 10, 16, offset); /* slot k16 */ + return true; + } + + return false; +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, + intptr_t addend) +{ + tcg_debug_assert(addend == 0); + switch (type) { + case R_LOONGARCH_BR_SK16: + return reloc_br_sk16(code_ptr, (tcg_insn_unit *)value); + case R_LOONGARCH_BR_SD10K16: + return reloc_br_sd10k16(code_ptr, (tcg_insn_unit *)value); + default: + g_assert_not_reached(); + } +} + +#include "tcg-insn-defs.c.inc" + +/* + * TCG intrinsics + */ + +static void tcg_out_mb(TCGContext *s, TCGArg a0) +{ + /* Baseline LoongArch only has the full barrier, unfortunately. */ + tcg_out_opc_dbar(s, 0); +} + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + /* + * Conventional register-register move used in LoongArch is + * `or dst, src, zero`. + */ + tcg_out_opc_or(s, ret, arg, TCG_REG_ZERO); + break; + default: + g_assert_not_reached(); + } + return true; +} + +static bool imm_part_needs_loading(bool high_bits_are_ones, + tcg_target_long part) +{ + if (high_bits_are_ones) { + return part != -1; + } else { + return part != 0; + } +} + +/* Loads a 32-bit immediate into rd, sign-extended. */ +static void tcg_out_movi_i32(TCGContext *s, TCGReg rd, int32_t val) +{ + tcg_target_long lo = sextreg(val, 0, 12); + tcg_target_long hi12 = sextreg(val, 12, 20); + + /* Single-instruction cases. */ + if (lo == val) { + /* val fits in simm12: addi.w rd, zero, val */ + tcg_out_opc_addi_w(s, rd, TCG_REG_ZERO, val); + return; + } + if (0x800 <= val && val <= 0xfff) { + /* val fits in uimm12: ori rd, zero, val */ + tcg_out_opc_ori(s, rd, TCG_REG_ZERO, val); + return; + } + + /* High bits must be set; load with lu12i.w + optional ori. */ + tcg_out_opc_lu12i_w(s, rd, hi12); + if (lo != 0) { + tcg_out_opc_ori(s, rd, rd, lo & 0xfff); + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, + tcg_target_long val) +{ + /* + * LoongArch conventionally loads 64-bit immediates in at most 4 steps, + * with dedicated instructions for filling the respective bitfields + * below: + * + * 6 5 4 3 + * 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 + * +-----------------------+---------------------------------------+... + * | hi52 | hi32 | + * +-----------------------+---------------------------------------+... + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * ...+-------------------------------------+-------------------------+ + * | hi12 | lo | + * ...+-------------------------------------+-------------------------+ + * + * Check if val belong to one of the several fast cases, before falling + * back to the slow path. + */ + + intptr_t pc_offset; + tcg_target_long val_lo, val_hi, pc_hi, offset_hi; + tcg_target_long hi32, hi52; + bool rd_high_bits_are_ones; + + /* Value fits in signed i32. */ + if (type == TCG_TYPE_I32 || val == (int32_t)val) { + tcg_out_movi_i32(s, rd, val); + return; + } + + /* PC-relative cases. */ + pc_offset = tcg_pcrel_diff(s, (void *)val); + if (pc_offset == sextreg(pc_offset, 0, 22) && (pc_offset & 3) == 0) { + /* Single pcaddu2i. */ + tcg_out_opc_pcaddu2i(s, rd, pc_offset >> 2); + return; + } + + if (pc_offset == (int32_t)pc_offset) { + /* Offset within 32 bits; load with pcalau12i + ori. */ + val_lo = sextreg(val, 0, 12); + val_hi = val >> 12; + pc_hi = (val - pc_offset) >> 12; + offset_hi = val_hi - pc_hi; + + tcg_debug_assert(offset_hi == sextreg(offset_hi, 0, 20)); + tcg_out_opc_pcalau12i(s, rd, offset_hi); + if (val_lo != 0) { + tcg_out_opc_ori(s, rd, rd, val_lo & 0xfff); + } + return; + } + + hi32 = sextreg(val, 32, 20); + hi52 = sextreg(val, 52, 12); + + /* Single cu52i.d case. */ + if (ctz64(val) >= 52) { + tcg_out_opc_cu52i_d(s, rd, TCG_REG_ZERO, hi52); + return; + } + + /* Slow path. Initialize the low 32 bits, then concat high bits. */ + tcg_out_movi_i32(s, rd, val); + rd_high_bits_are_ones = (int32_t)val < 0; + + if (imm_part_needs_loading(rd_high_bits_are_ones, hi32)) { + tcg_out_opc_cu32i_d(s, rd, hi32); + rd_high_bits_are_ones = hi32 < 0; + } + + if (imm_part_needs_loading(rd_high_bits_are_ones, hi52)) { + tcg_out_opc_cu52i_d(s, rd, rd, hi52); + } +} + +static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_andi(s, ret, arg, 0xff); +} + +static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_bstrpick_w(s, ret, arg, 0, 15); +} + +static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_bstrpick_d(s, ret, arg, 0, 31); +} + +static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_sext_b(s, ret, arg); +} + +static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_sext_h(s, ret, arg); +} + +static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) +{ + tcg_out_opc_addi_w(s, ret, arg, 0); +} + +static void tcg_out_clzctz(TCGContext *s, LoongArchInsn opc, TCGReg a0, + TCGReg a1, TCGReg a2, bool c2, bool is_32bit) +{ + if (c2) { + /* + * Fast path: semantics already satisfied due to constraint and + * insn behavior, single instruction is enough. + */ + tcg_debug_assert(a2 == (is_32bit ? 32 : 64)); + /* all clz/ctz insns belong to DJ-format */ + tcg_out32(s, encode_dj_insn(opc, a0, a1)); + return; + } + + tcg_out32(s, encode_dj_insn(opc, TCG_REG_TMP0, a1)); + /* a0 = a1 ? REG_TMP0 : a2 */ + tcg_out_opc_maskeqz(s, TCG_REG_TMP0, TCG_REG_TMP0, a1); + tcg_out_opc_masknez(s, a0, a2, a1); + tcg_out_opc_or(s, a0, TCG_REG_TMP0, a0); +} + +static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, + TCGReg arg1, TCGReg arg2, bool c2) +{ + TCGReg tmp; + + if (c2) { + tcg_debug_assert(arg2 == 0); + } + + switch (cond) { + case TCG_COND_EQ: + if (c2) { + tmp = arg1; + } else { + tcg_out_opc_sub_d(s, ret, arg1, arg2); + tmp = ret; + } + tcg_out_opc_sltui(s, ret, tmp, 1); + break; + case TCG_COND_NE: + if (c2) { + tmp = arg1; + } else { + tcg_out_opc_sub_d(s, ret, arg1, arg2); + tmp = ret; + } + tcg_out_opc_sltu(s, ret, TCG_REG_ZERO, tmp); + break; + case TCG_COND_LT: + tcg_out_opc_slt(s, ret, arg1, arg2); + break; + case TCG_COND_GE: + tcg_out_opc_slt(s, ret, arg1, arg2); + tcg_out_opc_xori(s, ret, ret, 1); + break; + case TCG_COND_LE: + tcg_out_setcond(s, TCG_COND_GE, ret, arg2, arg1, false); + break; + case TCG_COND_GT: + tcg_out_setcond(s, TCG_COND_LT, ret, arg2, arg1, false); + break; + case TCG_COND_LTU: + tcg_out_opc_sltu(s, ret, arg1, arg2); + break; + case TCG_COND_GEU: + tcg_out_opc_sltu(s, ret, arg1, arg2); + tcg_out_opc_xori(s, ret, ret, 1); + break; + case TCG_COND_LEU: + tcg_out_setcond(s, TCG_COND_GEU, ret, arg2, arg1, false); + break; + case TCG_COND_GTU: + tcg_out_setcond(s, TCG_COND_LTU, ret, arg2, arg1, false); + break; + default: + g_assert_not_reached(); + break; + } +} + +/* + * Branch helpers + */ + +static const struct { + LoongArchInsn op; + bool swap; +} tcg_brcond_to_loongarch[] = { + [TCG_COND_EQ] = { OPC_BEQ, false }, [TCG_COND_NE] = { OPC_BNE, false }, + [TCG_COND_LT] = { OPC_BGT, true }, [TCG_COND_GE] = { OPC_BLE, true }, + [TCG_COND_LE] = { OPC_BLE, false }, [TCG_COND_GT] = { OPC_BGT, false }, + [TCG_COND_LTU] = { OPC_BGTU, true }, [TCG_COND_GEU] = { OPC_BLEU, true }, + [TCG_COND_LEU] = { OPC_BLEU, false }, [TCG_COND_GTU] = { OPC_BGTU, false } +}; + +static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, + TCGReg arg2, TCGLabel *l) +{ + LoongArchInsn op = tcg_brcond_to_loongarch[cond].op; + + tcg_debug_assert(op != 0); + + if (tcg_brcond_to_loongarch[cond].swap) { + TCGReg t = arg1; + arg1 = arg2; + arg2 = t; + } + + /* all conditional branch insns belong to DJSk16-format */ + tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SK16, l, 0); + tcg_out32(s, encode_djsk16_insn(op, arg1, arg2, 0)); +} + +static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, + bool tail) +{ + TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; + ptrdiff_t offset = tcg_pcrel_diff(s, arg); + + tcg_debug_assert((offset & 3) == 0); + if (offset == sextreg(offset, 0, 28)) { + /* short jump: +/- 256MiB */ + if (tail) { + tcg_out_opc_b(s, offset >> 2); + } else { + tcg_out_opc_bl(s, offset >> 2); + } + } else if (offset == sextreg(offset, 0, 38)) { + /* long jump: +/- 256GiB */ + tcg_target_long lo = sextreg(offset, 0, 18); + tcg_target_long hi = offset - lo; + tcg_out_opc_pcaddu18i(s, TCG_REG_TMP0, hi >> 18); + tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); + } else { + /* far jump: 64-bit */ + tcg_target_long lo = sextreg((tcg_target_long)arg, 0, 18); + tcg_target_long hi = (tcg_target_long)arg - lo; + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, hi); + tcg_out_opc_jirl(s, link, TCG_REG_TMP0, lo >> 2); + } +} + +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *arg) +{ + tcg_out_call_int(s, arg, false); +} + +/* + * Load/store helpers + */ + +static void tcg_out_ldst(TCGContext *s, LoongArchInsn opc, TCGReg data, + TCGReg addr, intptr_t offset) +{ + intptr_t imm12 = sextreg(offset, 0, 12); + + if (offset != imm12) { + intptr_t diff = offset - (uintptr_t)s->code_ptr; + + if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { + imm12 = sextreg(diff, 0, 12); + tcg_out_opc_pcaddu12i(s, TCG_REG_TMP2, (diff - imm12) >> 12); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); + if (addr != TCG_REG_ZERO) { + tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, addr); + } + } + addr = TCG_REG_TMP2; + } + + switch (opc) { + case OPC_LD_B: + case OPC_LD_BU: + case OPC_LD_H: + case OPC_LD_HU: + case OPC_LD_W: + case OPC_LD_WU: + case OPC_LD_D: + case OPC_ST_B: + case OPC_ST_H: + case OPC_ST_W: + case OPC_ST_D: + tcg_out32(s, encode_djsk12_insn(opc, data, addr, imm12)); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, + intptr_t arg2) +{ + bool is_32bit = type == TCG_TYPE_I32; + tcg_out_ldst(s, is_32bit ? OPC_LD_W : OPC_LD_D, arg, arg1, arg2); +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, + intptr_t arg2) +{ + bool is_32bit = type == TCG_TYPE_I32; + tcg_out_ldst(s, is_32bit ? OPC_ST_W : OPC_ST_D, arg, arg1, arg2); +} + +static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, + intptr_t ofs) +{ + if (val == 0) { + tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); + return true; + } + return false; +} + +/* + * Load/store helpers for SoftMMU, and qemu_ld/st implementations + */ + +#if defined(CONFIG_SOFTMMU) +/* + * helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, + * MemOpIdx oi, uintptr_t ra) + */ +static void *const qemu_ld_helpers[4] = { + [MO_8] = helper_ret_ldub_mmu, + [MO_16] = helper_le_lduw_mmu, + [MO_32] = helper_le_ldul_mmu, + [MO_64] = helper_le_ldq_mmu, +}; + +/* + * helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, + * uintxx_t val, MemOpIdx oi, + * uintptr_t ra) + */ +static void *const qemu_st_helpers[4] = { + [MO_8] = helper_ret_stb_mmu, + [MO_16] = helper_le_stw_mmu, + [MO_32] = helper_le_stl_mmu, + [MO_64] = helper_le_stq_mmu, +}; + +/* We expect to use a 12-bit negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11)); + +static bool tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) +{ + tcg_out_opc_b(s, 0); + return reloc_br_sd10k16(s->code_ptr - 1, target); +} + +/* + * Emits common code for TLB addend lookup, that eventually loads the + * addend in TCG_REG_TMP2. + */ +static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, MemOpIdx oi, + tcg_insn_unit **label_ptr, bool is_load) +{ + MemOp opc = get_memop(oi); + unsigned s_bits = opc & MO_SIZE; + unsigned a_bits = get_alignment_bits(opc); + tcg_target_long compare_mask; + int mem_index = get_mmuidx(oi); + int fast_ofs = TLB_MASK_TABLE_OFS(mem_index); + int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask); + int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table); + + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs); + + tcg_out_opc_srli_d(s, TCG_REG_TMP2, addrl, + TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_opc_and(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); + tcg_out_opc_add_d(s, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); + + /* Load the tlb comparator and the addend. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, + is_load ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, + offsetof(CPUTLBEntry, addend)); + + /* We don't support unaligned accesses. */ + if (a_bits < s_bits) { + a_bits = s_bits; + } + /* Clear the non-page, non-alignment bits from the address. */ + compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); + tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); + tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addrl); + + /* Compare masked address with the TLB entry. */ + label_ptr[0] = s->code_ptr; + tcg_out_opc_bne(s, TCG_REG_TMP0, TCG_REG_TMP1, 0); + + /* TLB Hit - addend in TCG_REG_TMP2, ready for use. */ +} + +static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi, + TCGType type, TCGReg datalo, TCGReg addrlo, + void *raddr, tcg_insn_unit **label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = type; + label->datalo_reg = datalo; + label->datahi_reg = 0; /* unused */ + label->addrlo_reg = addrlo; + label->addrhi_reg = 0; /* unused */ + label->raddr = tcg_splitwx_to_rx(raddr); + label->label_ptr[0] = label_ptr[0]; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + MemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + TCGType type = l->type; + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + /* call load helper */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A2, oi); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_ld_helpers[size]); + + switch (opc & MO_SSIZE) { + case MO_SB: + tcg_out_ext8s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_SW: + tcg_out_ext16s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_SL: + tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); + break; + case MO_UL: + if (type == TCG_TYPE_I32) { + /* MO_UL loads of i32 should be sign-extended too */ + tcg_out_ext32s(s, l->datalo_reg, TCG_REG_A0); + break; + } + /* fallthrough */ + default: + tcg_out_mov(s, type, l->datalo_reg, TCG_REG_A0); + break; + } + + return tcg_out_goto(s, l->raddr); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + MemOpIdx oi = l->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + /* call store helper */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A1, l->addrlo_reg); + switch (size) { + case MO_8: + tcg_out_ext8u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_16: + tcg_out_ext16u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_32: + tcg_out_ext32u(s, TCG_REG_A2, l->datalo_reg); + break; + case MO_64: + tcg_out_mov(s, TCG_TYPE_I64, TCG_REG_A2, l->datalo_reg); + break; + default: + g_assert_not_reached(); + break; + } + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A3, oi); + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A4, (tcg_target_long)l->raddr); + + tcg_out_call(s, qemu_st_helpers[size]); + + return tcg_out_goto(s, l->raddr); +} +#else + +/* + * Alignment helpers for user-mode emulation + */ + +static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg, + unsigned a_bits) +{ + TCGLabelQemuLdst *l = new_ldst_label(s); + + l->is_ld = is_ld; + l->addrlo_reg = addr_reg; + + /* + * Without micro-architecture details, we don't know which of bstrpick or + * andi is faster, so use bstrpick as it's not constrained by imm field + * width. (Not to say alignments >= 2^12 are going to happen any time + * soon, though) + */ + tcg_out_opc_bstrpick_d(s, TCG_REG_TMP1, addr_reg, 0, a_bits - 1); + + l->label_ptr[0] = s->code_ptr; + tcg_out_opc_bne(s, TCG_REG_TMP1, TCG_REG_ZERO, 0); + + l->raddr = tcg_splitwx_to_rx(s->code_ptr); +} + +static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l) +{ + /* resolve label address */ + if (!reloc_br_sk16(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg); + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0); + + /* tail call, with the return address back inline. */ + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr); + tcg_out_call_int( + s, + (const void *)(l->is_ld ? helper_unaligned_ld : helper_unaligned_st), + true); + return true; +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) +{ + return tcg_out_fail_alignment(s, l); +} + +#endif /* CONFIG_SOFTMMU */ + +/* + * `ext32u` the address register into the temp register given, + * if target is 32-bit, no-op otherwise. + * + * Returns the address register ready for use with TLB addend. + */ +static TCGReg tcg_out_zext_addr_if_32_bit(TCGContext *s, TCGReg addr, + TCGReg tmp) +{ + if (TARGET_LONG_BITS == 32) { + tcg_out_ext32u(s, tmp, addr); + return tmp; + } + return addr; +} + +static void tcg_out_qemu_ld_indexed(TCGContext *s, TCGReg rd, TCGReg rj, + TCGReg rk, MemOp opc, TCGType type) +{ + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); + + switch (opc & MO_SSIZE) { + case MO_UB: + tcg_out_opc_ldx_bu(s, rd, rj, rk); + break; + case MO_SB: + tcg_out_opc_ldx_b(s, rd, rj, rk); + break; + case MO_UW: + tcg_out_opc_ldx_hu(s, rd, rj, rk); + break; + case MO_SW: + tcg_out_opc_ldx_h(s, rd, rj, rk); + break; + case MO_UL: + if (type == TCG_TYPE_I64) { + tcg_out_opc_ldx_wu(s, rd, rj, rk); + break; + } + /* fallthrough */ + case MO_SL: + tcg_out_opc_ldx_w(s, rd, rj, rk); + break; + case MO_Q: + tcg_out_opc_ldx_d(s, rd, rj, rk); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, TCGType type) +{ + TCGReg addr_regl; + TCGReg data_regl; + MemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; +#endif + TCGReg base; + + data_regl = *args++; + addr_regl = *args++; + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 1); + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + tcg_out_qemu_ld_indexed(s, data_regl, base, TCG_REG_TMP2, opc, type); + add_qemu_ldst_label(s, 1, oi, type, data_regl, addr_regl, s->code_ptr, + label_ptr); +#else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, true, addr_regl, a_bits); + } + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; + tcg_out_qemu_ld_indexed(s, data_regl, base, guest_base_reg, opc, type); +#endif +} + +static void tcg_out_qemu_st_indexed(TCGContext *s, TCGReg data, TCGReg rj, + TCGReg rk, MemOp opc) +{ + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); + + switch (opc & MO_SIZE) { + case MO_8: + tcg_out_opc_stx_b(s, data, rj, rk); + break; + case MO_16: + tcg_out_opc_stx_h(s, data, rj, rk); + break; + case MO_32: + tcg_out_opc_stx_w(s, data, rj, rk); + break; + case MO_64: + tcg_out_opc_stx_d(s, data, rj, rk); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args) +{ + TCGReg addr_regl; + TCGReg data_regl; + MemOpIdx oi; + MemOp opc; +#if defined(CONFIG_SOFTMMU) + tcg_insn_unit *label_ptr[1]; +#else + unsigned a_bits; +#endif + TCGReg base; + + data_regl = *args++; + addr_regl = *args++; + oi = *args++; + opc = get_memop(oi); + +#if defined(CONFIG_SOFTMMU) + tcg_out_tlb_load(s, addr_regl, oi, label_ptr, 0); + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + tcg_out_qemu_st_indexed(s, data_regl, base, TCG_REG_TMP2, opc); + add_qemu_ldst_label(s, 0, oi, 0, /* type param is unused for stores */ + data_regl, addr_regl, s->code_ptr, label_ptr); +#else + a_bits = get_alignment_bits(opc); + if (a_bits) { + tcg_out_test_alignment(s, false, addr_regl, a_bits); + } + base = tcg_out_zext_addr_if_32_bit(s, addr_regl, TCG_REG_TMP0); + TCGReg guest_base_reg = USE_GUEST_BASE ? TCG_GUEST_BASE_REG : TCG_REG_ZERO; + tcg_out_qemu_st_indexed(s, data_regl, base, guest_base_reg, opc); +#endif +} + +/* + * Entry-points + */ + +static const tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, + const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_call_int(s, tcg_code_gen_epilogue, true); + } else { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); + tcg_out_call_int(s, tb_ret_addr, true); + } + break; + + case INDEX_op_goto_tb: + assert(s->tb_jmp_insn_offset == 0); + /* indirect jump method */ + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, + (uintptr_t)(s->tb_jmp_target_addr + a0)); + tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_TMP0, 0); + set_jmp_reset_offset(s, a0); + break; + + case INDEX_op_mb: + tcg_out_mb(s, a0); + break; + + case INDEX_op_goto_ptr: + tcg_out_opc_jirl(s, TCG_REG_ZERO, a0, 0); + break; + + case INDEX_op_br: + tcg_out_reloc(s, s->code_ptr, R_LOONGARCH_BR_SD10K16, arg_label(a0), + 0); + tcg_out_opc_b(s, 0); + break; + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); + break; + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + tcg_out_ext8s(s, a0, a1); + break; + + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_ext8u(s, a0, a1); + break; + + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + tcg_out_ext16s(s, a0, a1); + break; + + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_ext16u(s, a0, a1); + break; + + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + tcg_out_ext32u(s, a0, a1); + break; + + case INDEX_op_ext32s_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_ext_i32_i64: + tcg_out_ext32s(s, a0, a1); + break; + + case INDEX_op_extrh_i64_i32: + tcg_out_opc_srai_d(s, a0, a1, 32); + break; + + case INDEX_op_not_i32: + case INDEX_op_not_i64: + tcg_out_opc_nor(s, a0, a1, TCG_REG_ZERO); + break; + + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + if (c2) { + tcg_out_opc_ori(s, a0, a1, a2); + tcg_out_opc_nor(s, a0, a0, TCG_REG_ZERO); + } else { + tcg_out_opc_nor(s, a0, a1, a2); + } + break; + + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + if (c2) { + /* guaranteed to fit due to constraint */ + tcg_out_opc_andi(s, a0, a1, ~a2); + } else { + tcg_out_opc_andn(s, a0, a1, a2); + } + break; + + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + if (c2) { + /* guaranteed to fit due to constraint */ + tcg_out_opc_ori(s, a0, a1, ~a2); + } else { + tcg_out_opc_orn(s, a0, a1, a2); + } + break; + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + if (c2) { + tcg_out_opc_andi(s, a0, a1, a2); + } else { + tcg_out_opc_and(s, a0, a1, a2); + } + break; + + case INDEX_op_or_i32: + case INDEX_op_or_i64: + if (c2) { + tcg_out_opc_ori(s, a0, a1, a2); + } else { + tcg_out_opc_or(s, a0, a1, a2); + } + break; + + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + if (c2) { + tcg_out_opc_xori(s, a0, a1, a2); + } else { + tcg_out_opc_xor(s, a0, a1, a2); + } + break; + + case INDEX_op_extract_i32: + tcg_out_opc_bstrpick_w(s, a0, a1, a2, a2 + args[3] - 1); + break; + case INDEX_op_extract_i64: + tcg_out_opc_bstrpick_d(s, a0, a1, a2, a2 + args[3] - 1); + break; + + case INDEX_op_deposit_i32: + tcg_out_opc_bstrins_w(s, a0, a2, args[3], args[3] + args[4] - 1); + break; + case INDEX_op_deposit_i64: + tcg_out_opc_bstrins_d(s, a0, a2, args[3], args[3] + args[4] - 1); + break; + + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + tcg_out_opc_revb_2h(s, a0, a1); + if (a2 & TCG_BSWAP_OS) { + tcg_out_ext16s(s, a0, a0); + } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext16u(s, a0, a0); + } + break; + + case INDEX_op_bswap32_i32: + /* All 32-bit values are computed sign-extended in the register. */ + a2 = TCG_BSWAP_OS; + /* fallthrough */ + case INDEX_op_bswap32_i64: + tcg_out_opc_revb_2w(s, a0, a1); + if (a2 & TCG_BSWAP_OS) { + tcg_out_ext32s(s, a0, a0); + } else if ((a2 & (TCG_BSWAP_IZ | TCG_BSWAP_OZ)) == TCG_BSWAP_OZ) { + tcg_out_ext32u(s, a0, a0); + } + break; + + case INDEX_op_bswap64_i64: + tcg_out_opc_revb_d(s, a0, a1); + break; + + case INDEX_op_clz_i32: + tcg_out_clzctz(s, OPC_CLZ_W, a0, a1, a2, c2, true); + break; + case INDEX_op_clz_i64: + tcg_out_clzctz(s, OPC_CLZ_D, a0, a1, a2, c2, false); + break; + + case INDEX_op_ctz_i32: + tcg_out_clzctz(s, OPC_CTZ_W, a0, a1, a2, c2, true); + break; + case INDEX_op_ctz_i64: + tcg_out_clzctz(s, OPC_CTZ_D, a0, a1, a2, c2, false); + break; + + case INDEX_op_shl_i32: + if (c2) { + tcg_out_opc_slli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_sll_w(s, a0, a1, a2); + } + break; + case INDEX_op_shl_i64: + if (c2) { + tcg_out_opc_slli_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_sll_d(s, a0, a1, a2); + } + break; + + case INDEX_op_shr_i32: + if (c2) { + tcg_out_opc_srli_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_srl_w(s, a0, a1, a2); + } + break; + case INDEX_op_shr_i64: + if (c2) { + tcg_out_opc_srli_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_srl_d(s, a0, a1, a2); + } + break; + + case INDEX_op_sar_i32: + if (c2) { + tcg_out_opc_srai_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_sra_w(s, a0, a1, a2); + } + break; + case INDEX_op_sar_i64: + if (c2) { + tcg_out_opc_srai_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_sra_d(s, a0, a1, a2); + } + break; + + case INDEX_op_rotl_i32: + /* transform into equivalent rotr/rotri */ + if (c2) { + tcg_out_opc_rotri_w(s, a0, a1, (32 - a2) & 0x1f); + } else { + tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_rotr_w(s, a0, a1, TCG_REG_TMP0); + } + break; + case INDEX_op_rotl_i64: + /* transform into equivalent rotr/rotri */ + if (c2) { + tcg_out_opc_rotri_d(s, a0, a1, (64 - a2) & 0x3f); + } else { + tcg_out_opc_sub_w(s, TCG_REG_TMP0, TCG_REG_ZERO, a2); + tcg_out_opc_rotr_d(s, a0, a1, TCG_REG_TMP0); + } + break; + + case INDEX_op_rotr_i32: + if (c2) { + tcg_out_opc_rotri_w(s, a0, a1, a2 & 0x1f); + } else { + tcg_out_opc_rotr_w(s, a0, a1, a2); + } + break; + case INDEX_op_rotr_i64: + if (c2) { + tcg_out_opc_rotri_d(s, a0, a1, a2 & 0x3f); + } else { + tcg_out_opc_rotr_d(s, a0, a1, a2); + } + break; + + case INDEX_op_add_i32: + if (c2) { + tcg_out_opc_addi_w(s, a0, a1, a2); + } else { + tcg_out_opc_add_w(s, a0, a1, a2); + } + break; + case INDEX_op_add_i64: + if (c2) { + tcg_out_opc_addi_d(s, a0, a1, a2); + } else { + tcg_out_opc_add_d(s, a0, a1, a2); + } + break; + + case INDEX_op_sub_i32: + if (c2) { + tcg_out_opc_addi_w(s, a0, a1, -a2); + } else { + tcg_out_opc_sub_w(s, a0, a1, a2); + } + break; + case INDEX_op_sub_i64: + if (c2) { + tcg_out_opc_addi_d(s, a0, a1, -a2); + } else { + tcg_out_opc_sub_d(s, a0, a1, a2); + } + break; + + case INDEX_op_mul_i32: + tcg_out_opc_mul_w(s, a0, a1, a2); + break; + case INDEX_op_mul_i64: + tcg_out_opc_mul_d(s, a0, a1, a2); + break; + + case INDEX_op_mulsh_i32: + tcg_out_opc_mulh_w(s, a0, a1, a2); + break; + case INDEX_op_mulsh_i64: + tcg_out_opc_mulh_d(s, a0, a1, a2); + break; + + case INDEX_op_muluh_i32: + tcg_out_opc_mulh_wu(s, a0, a1, a2); + break; + case INDEX_op_muluh_i64: + tcg_out_opc_mulh_du(s, a0, a1, a2); + break; + + case INDEX_op_div_i32: + tcg_out_opc_div_w(s, a0, a1, a2); + break; + case INDEX_op_div_i64: + tcg_out_opc_div_d(s, a0, a1, a2); + break; + + case INDEX_op_divu_i32: + tcg_out_opc_div_wu(s, a0, a1, a2); + break; + case INDEX_op_divu_i64: + tcg_out_opc_div_du(s, a0, a1, a2); + break; + + case INDEX_op_rem_i32: + tcg_out_opc_mod_w(s, a0, a1, a2); + break; + case INDEX_op_rem_i64: + tcg_out_opc_mod_d(s, a0, a1, a2); + break; + + case INDEX_op_remu_i32: + tcg_out_opc_mod_wu(s, a0, a1, a2); + break; + case INDEX_op_remu_i64: + tcg_out_opc_mod_du(s, a0, a1, a2); + break; + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + tcg_out_setcond(s, args[3], a0, a1, a2, c2); + break; + + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, OPC_LD_B, a0, a1, a2); + break; + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, OPC_LD_BU, a0, a1, a2); + break; + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, OPC_LD_H, a0, a1, a2); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, OPC_LD_HU, a0, a1, a2); + break; + case INDEX_op_ld_i32: + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, OPC_LD_W, a0, a1, a2); + break; + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, OPC_LD_WU, a0, a1, a2); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, OPC_LD_D, a0, a1, a2); + break; + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, OPC_ST_B, a0, a1, a2); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, OPC_ST_H, a0, a1, a2); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, OPC_ST_W, a0, a1, a2); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, OPC_ST_D, a0, a1, a2); + break; + + case INDEX_op_qemu_ld_i32: + tcg_out_qemu_ld(s, args, TCG_TYPE_I32); + break; + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, args, TCG_TYPE_I64); + break; + case INDEX_op_qemu_st_i32: + tcg_out_qemu_st(s, args); + break; + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, args); + break; + + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + case INDEX_op_mov_i64: + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + g_assert_not_reached(); + } +} + +static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +{ + switch (op) { + case INDEX_op_goto_ptr: + return C_O0_I1(r); + + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i32: + case INDEX_op_st_i64: + return C_O0_I2(rZ, r); + + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return C_O0_I2(rZ, rZ); + + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + return C_O0_I2(LZ, L); + + case INDEX_op_ext8s_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16s_i32: + case INDEX_op_ext16s_i64: + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extrl_i64_i32: + case INDEX_op_extrh_i64_i32: + case INDEX_op_ext_i32_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ld8s_i32: + case INDEX_op_ld8s_i64: + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld16s_i32: + case INDEX_op_ld16s_i64: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld_i32: + case INDEX_op_ld_i64: + return C_O1_I1(r, r); + + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return C_O1_I1(r, L); + + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + /* + * LoongArch insns for these ops don't have reg-imm forms, but we + * can express using andi/ori if ~constant satisfies + * TCG_CT_CONST_U12. + */ + return C_O1_I2(r, r, rC); + + case INDEX_op_shl_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i32: + case INDEX_op_shr_i64: + case INDEX_op_sar_i32: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i32: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i32: + case INDEX_op_rotr_i64: + return C_O1_I2(r, r, ri); + + case INDEX_op_add_i32: + case INDEX_op_add_i64: + return C_O1_I2(r, r, rI); + + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_nor_i32: + case INDEX_op_nor_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + /* LoongArch reg-imm bitops have their imms ZERO-extended */ + return C_O1_I2(r, r, rU); + + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + return C_O1_I2(r, r, rW); + + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return C_O1_I2(r, r, rZ); + + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + /* Must deposit into the same register as input */ + return C_O1_I2(r, 0, rZ); + + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return C_O1_I2(r, rZ, rN); + + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_mulsh_i32: + case INDEX_op_mulsh_i64: + case INDEX_op_muluh_i32: + case INDEX_op_muluh_i64: + case INDEX_op_div_i32: + case INDEX_op_div_i64: + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + return C_O1_I2(r, rZ, rZ); + + default: + g_assert_not_reached(); + } +} + +static const int tcg_target_callee_save_regs[] = { + TCG_REG_S0, /* used for the global env (TCG_AREG0) */ + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + TCG_REG_S9, + TCG_REG_RA, /* should be last for ABI compliance */ +}; + +/* Stack frame parameters. */ +#define REG_SIZE (TCG_TARGET_REG_BITS / 8) +#define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) +#define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) +#define FRAME_SIZE \ + ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE + \ + TCG_TARGET_STACK_ALIGN - 1) & \ + -TCG_TARGET_STACK_ALIGN) +#define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) + +/* We're expecting to be able to use an immediate for frame allocation. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); + +/* Generate global QEMU prologue and epilogue code */ +static void tcg_target_qemu_prologue(TCGContext *s) +{ + int i; + + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); + + /* TB prologue */ + tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, + SAVE_OFS + i * REG_SIZE); + } + +#if !defined(CONFIG_SOFTMMU) + if (USE_GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); + } +#endif + + /* Call generated code */ + tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_opc_jirl(s, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); + + /* Return path for goto_ptr. Set return value to 0 */ + tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); + tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); + + /* TB epilogue */ + tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); + for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { + tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, + SAVE_OFS + i * REG_SIZE); + } + + tcg_out_opc_addi_d(s, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); + tcg_out_opc_jirl(s, TCG_REG_ZERO, TCG_REG_RA, 0); +} + +static void tcg_target_init(TCGContext *s) +{ + tcg_target_available_regs[TCG_TYPE_I32] = ALL_GENERAL_REGS; + tcg_target_available_regs[TCG_TYPE_I64] = ALL_GENERAL_REGS; + + tcg_target_call_clobber_regs = ALL_GENERAL_REGS; + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RESERVED); +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; +} DebugFrame; + +#define ELF_HOST_MACHINE EM_LOONGARCH + +static const DebugFrame + debug_frame = { .h.cie.len = sizeof(DebugFrameCIE) - + 4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = + -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.return_column = TCG_REG_RA, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - + offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { 12, + TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | + 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) }, + .fde_reg_ofs = { + 0x80 + 23, 11, /* DW_CFA_offset, s0, -88 */ + 0x80 + 24, 10, /* DW_CFA_offset, s1, -80 */ + 0x80 + 25, 9, /* DW_CFA_offset, s2, -72 */ + 0x80 + 26, 8, /* DW_CFA_offset, s3, -64 */ + 0x80 + 27, 7, /* DW_CFA_offset, s4, -56 */ + 0x80 + 28, 6, /* DW_CFA_offset, s5, -48 */ + 0x80 + 29, 5, /* DW_CFA_offset, s6, -40 */ + 0x80 + 30, 4, /* DW_CFA_offset, s7, -32 */ + 0x80 + 31, 3, /* DW_CFA_offset, s8, -24 */ + 0x80 + 22, 2, /* DW_CFA_offset, s9, -16 */ + 0x80 + 1, 1, /* DW_CFA_offset, ra, -8 */ + } }; + +void tcg_register_jit(const void *buf, size_t buf_size) +{ + tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/tcg/loongarch64/tcg-target.h b/tcg/loongarch64/tcg-target.h new file mode 100644 index 0000000000000000000000000000000000000000..20f77b707dd9c651796da3d0c79749f47eb9df42 --- /dev/null +++ b/tcg/loongarch64/tcg-target.h @@ -0,0 +1,168 @@ +/* + * Tiny Code Generator for QEMU + * + * Copyright (c) 2023 Loongarch Technology + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2 or later, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program. If not, see . + * + */ + +#ifndef LOONGARCH_TCG_TARGET_H +#define LOONGARCH_TCG_TARGET_H + +/* + * Loongson removed the (incomplete) 32-bit support from kernel and toolchain + * for the initial upstreaming of this architecture, so don't bother and just + * support the LP64* ABI for now. + */ + +#if defined(__loongarch64) +#define TCG_TARGET_REG_BITS 64 +#else +#error unsupported LoongArch register size +#endif + +#define TCG_TARGET_INSN_UNIT_SIZE 4 +#define TCG_TARGET_NB_REGS 32 +#define MAX_CODE_GEN_BUFFER_SIZE SIZE_MAX + +typedef enum { + TCG_REG_ZERO, + TCG_REG_RA, + TCG_REG_TP, + TCG_REG_SP, + TCG_REG_A0, + TCG_REG_A1, + TCG_REG_A2, + TCG_REG_A3, + TCG_REG_A4, + TCG_REG_A5, + TCG_REG_A6, + TCG_REG_A7, + TCG_REG_T0, + TCG_REG_T1, + TCG_REG_T2, + TCG_REG_T3, + TCG_REG_T4, + TCG_REG_T5, + TCG_REG_T6, + TCG_REG_T7, + TCG_REG_T8, + TCG_REG_RESERVED, + TCG_REG_S9, + TCG_REG_S0, + TCG_REG_S1, + TCG_REG_S2, + TCG_REG_S3, + TCG_REG_S4, + TCG_REG_S5, + TCG_REG_S6, + TCG_REG_S7, + TCG_REG_S8, + + /* aliases */ + TCG_AREG0 = TCG_REG_S0, + TCG_REG_TMP0 = TCG_REG_T8, + TCG_REG_TMP1 = TCG_REG_T7, + TCG_REG_TMP2 = TCG_REG_T6, +} TCGReg; + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 +#define TCG_TARGET_CALL_STACK_OFFSET 0 + +/* optional instructions */ +#define TCG_TARGET_HAS_movcond_i32 0 +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_rem_i32 1 +#define TCG_TARGET_HAS_div2_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 1 +#define TCG_TARGET_HAS_ext8s_i32 1 +#define TCG_TARGET_HAS_ext16s_i32 1 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 1 +#define TCG_TARGET_HAS_bswap32_i32 1 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 0 +#define TCG_TARGET_HAS_andc_i32 1 +#define TCG_TARGET_HAS_orc_i32 1 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 1 +#define TCG_TARGET_HAS_clz_i32 1 +#define TCG_TARGET_HAS_ctz_i32 1 +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_direct_jump 0 +#define TCG_TARGET_HAS_brcond2 0 +#define TCG_TARGET_HAS_setcond2 0 +#define TCG_TARGET_HAS_qemu_st8_i32 0 + +/* 64-bit operations */ +#define TCG_TARGET_HAS_movcond_i64 0 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 1 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_extrl_i64_i32 1 +#define TCG_TARGET_HAS_extrh_i64_i32 1 +#define TCG_TARGET_HAS_ext8s_i64 1 +#define TCG_TARGET_HAS_ext16s_i64 1 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 1 +#define TCG_TARGET_HAS_bswap32_i64 1 +#define TCG_TARGET_HAS_bswap64_i64 1 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_neg_i64 0 +#define TCG_TARGET_HAS_andc_i64 1 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 1 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 + +/* not defined -- call should be eliminated at compile time */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); + +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 0 +#define TCG_TARGET_NEED_LDST_LABELS + +#endif /* LOONGARCH_TCG_TARGET_H */ diff --git a/tcg/optimize.c b/tcg/optimize.c index 2397f2cf93ae1d1bc2c70ef63d48ef4bc1897f04..e57300095102c27cfe7889455c5d2ba0d065db97 100644 --- a/tcg/optimize.c +++ b/tcg/optimize.c @@ -308,13 +308,13 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) CASE_OP_32_64(mul): return x * y; - CASE_OP_32_64(and): + CASE_OP_32_64_VEC(and): return x & y; - CASE_OP_32_64(or): + CASE_OP_32_64_VEC(or): return x | y; - CASE_OP_32_64(xor): + CASE_OP_32_64_VEC(xor): return x ^ y; case INDEX_op_shl_i32: @@ -347,16 +347,16 @@ static uint64_t do_constant_folding_2(TCGOpcode op, uint64_t x, uint64_t y) case INDEX_op_rotl_i64: return rol64(x, y & 63); - CASE_OP_32_64(not): + CASE_OP_32_64_VEC(not): return ~x; CASE_OP_32_64(neg): return -x; - CASE_OP_32_64(andc): + CASE_OP_32_64_VEC(andc): return x & ~y; - CASE_OP_32_64(orc): + CASE_OP_32_64_VEC(orc): return x | ~y; CASE_OP_32_64(eqv): @@ -751,6 +751,12 @@ static bool fold_const2(OptContext *ctx, TCGOp *op) return false; } +static bool fold_commutative(OptContext *ctx, TCGOp *op) +{ + swap_commutative(op->args[0], &op->args[1], &op->args[2]); + return false; +} + static bool fold_const2_commutative(OptContext *ctx, TCGOp *op) { swap_commutative(op->args[0], &op->args[1], &op->args[2]); @@ -905,6 +911,16 @@ static bool fold_add(OptContext *ctx, TCGOp *op) return false; } +/* We cannot as yet do_constant_folding with vectors. */ +static bool fold_add_vec(OptContext *ctx, TCGOp *op) +{ + if (fold_commutative(ctx, op) || + fold_xi_to_x(ctx, op, 0)) { + return true; + } + return false; +} + static bool fold_addsub2(OptContext *ctx, TCGOp *op, bool add) { if (arg_is_const(op->args[2]) && arg_is_const(op->args[3]) && @@ -1938,10 +1954,10 @@ static bool fold_sub_to_neg(OptContext *ctx, TCGOp *op) return false; } -static bool fold_sub(OptContext *ctx, TCGOp *op) +/* We cannot as yet do_constant_folding with vectors. */ +static bool fold_sub_vec(OptContext *ctx, TCGOp *op) { - if (fold_const2(ctx, op) || - fold_xx_to_i(ctx, op, 0) || + if (fold_xx_to_i(ctx, op, 0) || fold_xi_to_x(ctx, op, 0) || fold_sub_to_neg(ctx, op)) { return true; @@ -1949,6 +1965,11 @@ static bool fold_sub(OptContext *ctx, TCGOp *op) return false; } +static bool fold_sub(OptContext *ctx, TCGOp *op) +{ + return fold_const2(ctx, op) || fold_sub_vec(ctx, op); +} + static bool fold_sub2(OptContext *ctx, TCGOp *op) { return fold_addsub2(ctx, op, false); @@ -2052,9 +2073,12 @@ void tcg_optimize(TCGContext *s) * Sorted alphabetically by opcode as much as possible. */ switch (opc) { - CASE_OP_32_64_VEC(add): + CASE_OP_32_64(add): done = fold_add(&ctx, op); break; + case INDEX_op_add_vec: + done = fold_add_vec(&ctx, op); + break; CASE_OP_32_64(add2): done = fold_add2(&ctx, op); break; @@ -2193,9 +2217,12 @@ void tcg_optimize(TCGContext *s) CASE_OP_32_64(sextract): done = fold_sextract(&ctx, op); break; - CASE_OP_32_64_VEC(sub): + CASE_OP_32_64(sub): done = fold_sub(&ctx, op); break; + case INDEX_op_sub_vec: + done = fold_sub_vec(&ctx, op); + break; CASE_OP_32_64(sub2): done = fold_sub2(&ctx, op); break; diff --git a/tcg/sw64/tcg-target-con-set.h b/tcg/sw64/tcg-target-con-set.h new file mode 100755 index 0000000000000000000000000000000000000000..71fdfdcbef6b95e6f1f4abca4f2396a9c73d0692 --- /dev/null +++ b/tcg/sw64/tcg-target-con-set.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Define SW_64 target-specific constraint sets. + * Copyright (c) 2021 Linaro + */ + +/* + * C_On_Im(...) defines a constraint set with outputs and inputs. + * Each operand should be a sequence of constraint letters as defined by + * tcg-target-con-str.h; the constraint combination is inclusive or. + */ +C_O0_I1(r) +C_O0_I2(lZ, l) +C_O0_I2(r, rA) +C_O0_I2(rZ, r) +C_O0_I2(w, r) +C_O1_I1(r, l) +C_O1_I1(r, r) +C_O1_I1(w, r) +C_O1_I1(w, w) +C_O1_I1(w, wr) +C_O1_I2(r, 0, rZ) +C_O1_I2(r, r, r) +C_O1_I2(r, r, rA) +C_O1_I2(r, r, rAL) +C_O1_I2(r, r, ri) +C_O1_I2(r, r, rL) +C_O1_I2(r, rZ, rZ) +C_O1_I2(w, 0, w) +C_O1_I2(w, w, w) +C_O1_I2(w, w, wN) +C_O1_I2(w, w, wO) +C_O1_I2(w, w, wZ) +C_O1_I3(w, w, w, w) +C_O1_I4(r, r, rA, rZ, rZ) +C_O2_I4(r, r, rZ, rZ, rA, rMZ) +C_O1_I4(r, r, rU, rZ, rZ) +C_O0_I2(r, rU) +C_O1_I2(r, r, rU) diff --git a/tcg/sw64/tcg-target-con-str.h b/tcg/sw64/tcg-target-con-str.h new file mode 100755 index 0000000000000000000000000000000000000000..47edb3837bc8df5bec6a8e98df1587eaedb1002d --- /dev/null +++ b/tcg/sw64/tcg-target-con-str.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Define sw_64 target-specific operand constraints. + * Copyright (c) 2021 Linaro + */ + +/* + * Define constraint letters for register sets: + * REGS(letter, register_mask) + */ +REGS('r', ALL_GENERAL_REGS) +REGS('l', ALL_QLDST_REGS) +REGS('w', ALL_VECTOR_REGS) + +/* + * Define constraint letters for constants: + * CONST(letter, TCG_CT_CONST_* bit set) + */ + +CONST('Z', TCG_CT_CONST_ZERO) +CONST('A', TCG_CT_CONST_LONG) +CONST('M', TCG_CT_CONST_MONE) +CONST('O', TCG_CT_CONST_ORRI) +CONST('W', TCG_CT_CONST_WORD) +CONST('L', TCG_CT_CONST_LONG) +CONST('U', TCG_CT_CONST_U8) +CONST('S', TCG_CT_CONST_S8) + diff --git a/tcg/sw64/tcg-target.c.inc b/tcg/sw64/tcg-target.c.inc new file mode 100755 index 0000000000000000000000000000000000000000..da938a73824285612e40e43a6d935c7f5bae6d2c --- /dev/null +++ b/tcg/sw64/tcg-target.c.inc @@ -0,0 +1,2464 @@ +/* + * Initial TCG Implementation for sw_64 + * + */ + +#include "../tcg-pool.c.inc" +#include "qemu/bitops.h" + +/* We're going to re-use TCGType in setting of the SF bit, which controls + the size of the operation performed. If we know the values match, it + makes things much cleaner. */ +QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1); + +#ifdef CONFIG_DEBUG_TCG +static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { + "X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", + "X8", "X9", "X10", "X11", "X12", "X13", "X14", "fp", + "X16", "X17", "X18", "X19", "X20", "X21", "X22", "X23", + "X24", "X25", "X26", "X27", "X28", "X29", "Xsp", "X31", + + "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", + "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", + "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", +}; +#endif /* CONFIG_DEBUG_TCG */ + +static const int tcg_target_reg_alloc_order[] = { + /* TCG_REG_X9 qemu saved for AREG0*/ + TCG_REG_X10, TCG_REG_X11, TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, + + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4, + TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8, + + TCG_REG_X22, TCG_REG_X23, /* TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, */ + + /* TCG_REG_SP=TCG_REG_X15 saved for system*/ + TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, TCG_REG_X28, /* TCG_REG_X29, TCG_REG_X30, TCG_REG_X31 */ + + /* TCG_REG_TMP=TCG_REG_X27 reserved as temporary register */ + /* TCG_REG_TMP2=TCG_REG_X25 reserved as temporary register */ + /* TCG_REG_TMP3=TCG_REG_X24 reserved as temporary register */ + /* TCG_REG_RA=TCG_REG_X26 reserved as temporary */ + /* TCG_REG_GP=TCG_REG_X29 gp saved for system*/ + /* TCG_REG_SP=TCG_REG_X30 sp saved for system*/ + /* TCG_REG_ZERO=TCG_REG_X31 zero saved for system*/ + + TCG_REG_F2, TCG_REG_F3, TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7, TCG_REG_F8, TCG_REG_F9, /* f2-f9 saved registers */ + /* TCG_VEC_TMP=TCG_REG_F10, TCG_VEC_TMP2=TCG_REG_F11, are saved as temporary */ + TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15, /* f10-f15 temporary registers */ + + TCG_REG_F22, TCG_REG_F23, TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27, TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, /* f22-f30 temporary registers */ + /* TCG_REG_F31, zero saved for system */ + + TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19, TCG_REG_F20, TCG_REG_F21, /* input args */ + + TCG_REG_F0, TCG_REG_F1, /*output args */ +}; + +static const int tcg_target_call_iarg_regs[6] = { + TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, +}; +static const int tcg_target_call_oarg_regs[1] = { + TCG_REG_X0, +}; + +#define TCG_REG_TMP TCG_REG_X27 +#define TCG_REG_TMP2 TCG_REG_X25 +#define TCG_REG_TMP3 TCG_REG_X24 +#define TCG_FLOAT_TMP TCG_REG_F10 +#define TCG_FLOAT_TMP2 TCG_REG_F11 + +#define REG0(I) (const_args[I] ? TCG_REG_ZERO : (TCGReg)args[I]) +#define tcg_out_insn_jump tcg_out_insn_ldst +#define tcg_out_insn_bitReg tcg_out_insn_simpleReg +#define zeroExt 0 +#define sigExt 1 +#define noPara 0//represent this parament of function isnot needed. + +#ifndef CONFIG_SOFTMMU +#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32) +#define TCG_REG_GUEST_BASE TCG_REG_X14 +#endif + +static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target) +{ + const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw); + ptrdiff_t offset = target - src_rx -1; + + if (offset == sextract64(offset, 0, 21)) { + /* read instruction, mask away previous PC_REL21 parameter contents, + set the proper offset, then write back the instruction. */ + *src_rw = deposit32(*src_rw, 0, 21, offset); + return true; + } + return false; +} + +static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) +{ + tcg_debug_assert(addend == 0); + switch (type) { + case R_SW_64_BRADDR: + value = value; + return reloc_pc21(code_ptr, (const tcg_insn_unit *)value); + default: + g_assert_not_reached(); + } +} + +/* +* contact with "tcg-target-con-str.h" +*/ +#define TCG_CT_CONST_ZERO 0x100 +#define TCG_CT_CONST_LONG 0x200 +#define TCG_CT_CONST_MONE 0x400 +#define TCG_CT_CONST_ORRI 0x800 +#define TCG_CT_CONST_WORD 0X1000 +#define TCG_CT_CONST_U8 0x2000 +#define TCG_CT_CONST_S8 0X4000 + +#define ALL_GENERAL_REGS 0xffffffffu +#define ALL_VECTOR_REGS 0xffffffff00000000ull + +#ifdef CONFIG_SOFTMMU +#define ALL_QLDST_REGS \ + (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \ + (1 << TCG_REG_X2) | (1 << TCG_REG_X3))) +#else +#define ALL_QLDST_REGS ALL_GENERAL_REGS +#endif + +/* sw test if a constant matches the constraint */ +static bool tcg_target_const_match(tcg_target_long val, TCGType type, int ct) +{ + if (ct & TCG_CT_CONST) { + return 1; + } + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + if ((ct & TCG_CT_CONST_U8) && 0 <= val && val <= 255) { + return 1; + } + if ((ct & TCG_CT_CONST_LONG)) { + return 1; + } + if ((ct & TCG_CT_CONST_MONE)) { + return 1; + } + if ((ct & TCG_CT_CONST_ORRI)) { + return 1; + } + if ((ct & TCG_CT_CONST_WORD)) { + return 1; + } + if ((ct & TCG_CT_CONST_ZERO) && val == 0) { + return 1; + } + return 0; +} + +#define OPC_OP(x) (((x) & 0x3f) << 26) +#define OPC_FUNC(x) (((x) & 0xff) << 5) +#define OPC_FUNC_COMPLEX(x) (((x) & 0xff) << 10) +typedef enum { + OPC_NOP =0X43ff075f, + OPC_SYS_CALL =OPC_OP(0x00), + OPC_CALL =OPC_OP(0x01), + OPC_RET =OPC_OP(0x02), + OPC_JMP =OPC_OP(0x03), + OPC_BR =OPC_OP(0x04), + OPC_BSR =OPC_OP(0x05), + OPC_PRI_RET =OPC_OP(0x07), + OPC_LDWE =OPC_OP(0x09), + OPC_LDSE =OPC_OP(0x0A), + OPC_LDDE =OPC_OP(0x0B), + OPC_VLDS =OPC_OP(0x0C), + OPC_VLDD =OPC_OP(0x0D), + OPC_VSTS =OPC_OP(0x0E), + OPC_VSTD =OPC_OP(0x0F), + + OPC_LDBU =OPC_OP(0x20), + OPC_LDHU =OPC_OP(0x21), + OPC_LDW =OPC_OP(0x22), + OPC_LDL =OPC_OP(0x23), + OPC_LDL_U =OPC_OP(0x24), + OPC_FLDS =OPC_OP(0X26), + OPC_PRI_LD =OPC_OP(0x25), + OPC_FLDD =OPC_OP(0X27), + OPC_STB =OPC_OP(0X28), + OPC_STH =OPC_OP(0x29), + OPC_STW =OPC_OP(0x2a), + OPC_STL =OPC_OP(0x2B), + OPC_STL_U =OPC_OP(0x2C), + OPC_PRI_ST =OPC_OP(0x2D), + OPC_FSTS =OPC_OP(0x2E), + OPC_FSTD =OPC_OP(0x2F), + + OPC_BEQ =OPC_OP(0x30), + OPC_BNE =OPC_OP(0x31), + OPC_BLT =OPC_OP(0x32), + OPC_BLE =OPC_OP(0x33), + OPC_BGT =OPC_OP(0x34), + OPC_BGE =OPC_OP(0x35), + OPC_BLBC =OPC_OP(0x36), + OPC_BLBS =OPC_OP(0x37), + + OPC_FBEQ =OPC_OP(0x38), + OPC_FBNE =OPC_OP(0x39), + OPC_FBLT =OPC_OP(0x3A), + OPC_FBLE =OPC_OP(0x3B), + OPC_FBGT =OPC_OP(0x3C), + OPC_FBGE =OPC_OP(0x3D), + OPC_LDI =OPC_OP(0x3E), + OPC_LDIH =OPC_OP(0x3F), + + OPC_ADDW =(OPC_OP(0x10) | OPC_FUNC(0x0)), + OPC_ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x0)), + OPC_SUBW =(OPC_OP(0x10) | OPC_FUNC(0x1)), + OPC_SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x1)), + OPC_S4ADDW =(OPC_OP(0x10) | OPC_FUNC(0x02)), + OPC_S4ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x02)), + OPC_S4SUBW =(OPC_OP(0x10) | OPC_FUNC(0x03)), + OPC_S4SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x03)), + + OPC_S8ADDW =(OPC_OP(0x10) | OPC_FUNC(0x04)), + OPC_S8ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x04)), + OPC_S8SUBW =(OPC_OP(0x10) | OPC_FUNC(0x05)), + OPC_S8SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x05)), + + OPC_ADDL =(OPC_OP(0x10) | OPC_FUNC(0x8)), + OPC_ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0x8)), + OPC_SUBL =(OPC_OP(0x10) | OPC_FUNC(0x9)), + OPC_SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0x9)), + + OPC_S4ADDL =(OPC_OP(0x10) | OPC_FUNC(0xA)), + OPC_S4ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xA)), + OPC_S4SUBL =(OPC_OP(0x10) | OPC_FUNC(0xB)), + OPC_S4SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xB)), + + OPC_S8ADDL =(OPC_OP(0x10) | OPC_FUNC(0xC)), + OPC_S8ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xC)), + OPC_S8SUBL =(OPC_OP(0x10) | OPC_FUNC(0xD)), + OPC_S8SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xD)), + + OPC_MULW =(OPC_OP(0x10) | OPC_FUNC(0x10)), + OPC_MULW_I =(OPC_OP(0x12) | OPC_FUNC(0x10)), + OPC_MULL =(OPC_OP(0x10) | OPC_FUNC(0x18)), + OPC_MULL_I =(OPC_OP(0x12) | OPC_FUNC(0x18)), + + OPC_UMULH =(OPC_OP(0x10) | OPC_FUNC(0x19)), + OPC_UMULH_I =(OPC_OP(0x12) | OPC_FUNC(0x19)), + + OPC_CTPOP =(OPC_OP(0x10) | OPC_FUNC(0x58)), + OPC_CTLZ =(OPC_OP(0x10) | OPC_FUNC(0x59)), + OPC_CTTZ =(OPC_OP(0x10) | OPC_FUNC(0x5A)), + + OPC_ZAP =(OPC_OP(0x10) | OPC_FUNC(0x68)), + OPC_ZAP_I =(OPC_OP(0x12) | OPC_FUNC(0x68)), + OPC_ZAPNOT =(OPC_OP(0x10) | OPC_FUNC(0x69)), + OPC_ZAPNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x69)), + + OPC_SEXTB =(OPC_OP(0x10) | OPC_FUNC(0x6A)), + OPC_SEXTB_I =(OPC_OP(0x12) | OPC_FUNC(0x6A)), + OPC_SEXTH =(OPC_OP(0x10) | OPC_FUNC(0x6B)), + OPC_SEXTH_I =(OPC_OP(0x12) | OPC_FUNC(0x6B)), + + OPC_CMPEQ =(OPC_OP(0x10) | OPC_FUNC(0x28)), + OPC_CMPEQ_I =(OPC_OP(0x12) | OPC_FUNC(0x28)), + + OPC_CMPLT =(OPC_OP(0x10) | OPC_FUNC(0x29)), + OPC_CMPLT_I =(OPC_OP(0x12) | OPC_FUNC(0x29)), + OPC_CMPLE =(OPC_OP(0x10) | OPC_FUNC(0x2A)), + OPC_CMPLE_I =(OPC_OP(0x12) | OPC_FUNC(0x2A)), + + OPC_CMPULT =(OPC_OP(0x10) | OPC_FUNC(0x2B)), + OPC_CMPULT_I =(OPC_OP(0x12) | OPC_FUNC(0x2B)), + OPC_CMPULE =(OPC_OP(0x10) | OPC_FUNC(0x2C)), + OPC_CMPULE_I =(OPC_OP(0x12) | OPC_FUNC(0x2C)), + + OPC_AND =(OPC_OP(0x10) | OPC_FUNC(0x38)), + OPC_BIC =(OPC_OP(0x10) | OPC_FUNC(0x39)), + OPC_BIS =(OPC_OP(0x10) | OPC_FUNC(0x3A)), + OPC_ORNOT =(OPC_OP(0x10) | OPC_FUNC(0x3B)), + OPC_XOR =(OPC_OP(0x10) | OPC_FUNC(0x3C)), + OPC_EQV =(OPC_OP(0x10) | OPC_FUNC(0x3D)), + + OPC_AND_I =(OPC_OP(0x12) | OPC_FUNC(0x38)), + OPC_BIC_I =(OPC_OP(0x12) | OPC_FUNC(0x39)), + OPC_BIS_I =(OPC_OP(0x12) | OPC_FUNC(0x3A)), + OPC_ORNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x3B)), + OPC_XOR_I =(OPC_OP(0x12) | OPC_FUNC(0x3C)), + OPC_EQV_I =(OPC_OP(0x12) | OPC_FUNC(0x3D)), + + OPC_SLL =(OPC_OP(0x10) | OPC_FUNC(0x48)), + OPC_SRL =(OPC_OP(0x10) | OPC_FUNC(0x49)), + OPC_SRA =(OPC_OP(0x10) | OPC_FUNC(0x4A)), + OPC_SLL_I =(OPC_OP(0x12) | OPC_FUNC(0x48)), + OPC_SRL_I =(OPC_OP(0x12) | OPC_FUNC(0x49)), + OPC_SRA_I =(OPC_OP(0x12) | OPC_FUNC(0x4A)), + + OPC_SELEQ =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x00)), + OPC_SELGE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x01)), + OPC_SELGT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x02)), + OPC_SELLE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x03)), + OPC_SELLT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x04)), + OPC_SELNE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x05)), + OPC_SELLBC =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x06)), + OPC_SELLBS =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x07)), + OPC_SELEQ_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x00)), + OPC_SELGE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x01)), + OPC_SELGT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x02)), + OPC_SELLE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x03)), + OPC_SELLT_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x04)), + OPC_SELNE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x05)), + OPC_SELLBC_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x06)), + OPC_SELLBS_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x07)), + + OPC_INS0B =(OPC_OP(0x10) | OPC_FUNC(0x40)), + OPC_INS1B =(OPC_OP(0x10) | OPC_FUNC(0x41)), + OPC_INS2B =(OPC_OP(0x10) | OPC_FUNC(0x42)), + OPC_INS3B =(OPC_OP(0x10) | OPC_FUNC(0x43)), + OPC_INS4B =(OPC_OP(0x10) | OPC_FUNC(0x44)), + OPC_INS5B =(OPC_OP(0x10) | OPC_FUNC(0x45)), + OPC_INS6B =(OPC_OP(0x10) | OPC_FUNC(0x46)), + OPC_INS7B =(OPC_OP(0x10) | OPC_FUNC(0x47)), + OPC_INS0B_I =(OPC_OP(0x12) | OPC_FUNC(0x40)), + OPC_INS1B_I =(OPC_OP(0x12) | OPC_FUNC(0x41)), + OPC_INS2B_I =(OPC_OP(0x12) | OPC_FUNC(0x42)), + OPC_INS3B_I =(OPC_OP(0x12) | OPC_FUNC(0x43)), + OPC_INS4B_I =(OPC_OP(0x12) | OPC_FUNC(0x44)), + OPC_INS5B_I =(OPC_OP(0x12) | OPC_FUNC(0x45)), + OPC_INS6B_I =(OPC_OP(0x12) | OPC_FUNC(0x46)), + OPC_INS7B_I =(OPC_OP(0x12) | OPC_FUNC(0x47)), + + OPC_EXTLB =(OPC_OP(0x10) | OPC_FUNC(0x50)), + OPC_EXTLH =(OPC_OP(0x10) | OPC_FUNC(0x51)), + OPC_EXTLW =(OPC_OP(0x10) | OPC_FUNC(0x52)), + OPC_EXTLL =(OPC_OP(0x10) | OPC_FUNC(0x53)), + OPC_EXTHB =(OPC_OP(0x10) | OPC_FUNC(0x54)), + OPC_EXTHH =(OPC_OP(0x10) | OPC_FUNC(0x55)), + OPC_EXTHW =(OPC_OP(0x10) | OPC_FUNC(0x56)), + OPC_EXTHL =(OPC_OP(0x10) | OPC_FUNC(0x57)), + OPC_EXTLB_I =(OPC_OP(0x12) | OPC_FUNC(0x50)), + OPC_EXTLH_I =(OPC_OP(0x12) | OPC_FUNC(0x51)), + OPC_EXTLW_I =(OPC_OP(0x12) | OPC_FUNC(0x52)), + OPC_EXTLL_I =(OPC_OP(0x12) | OPC_FUNC(0x53)), + OPC_EXTHB_I =(OPC_OP(0x12) | OPC_FUNC(0x54)), + OPC_EXTHH_I =(OPC_OP(0x12) | OPC_FUNC(0x55)), + OPC_EXTHW_I =(OPC_OP(0x12) | OPC_FUNC(0x56)), + OPC_EXTHL_I =(OPC_OP(0x12) | OPC_FUNC(0x57)), + + OPC_MASKLB =(OPC_OP(0x10) | OPC_FUNC(0x60)), + OPC_MASKLH =(OPC_OP(0x10) | OPC_FUNC(0x61)), + OPC_MASKLW =(OPC_OP(0x10) | OPC_FUNC(0x62)), + OPC_MASKLL =(OPC_OP(0x10) | OPC_FUNC(0x63)), + OPC_MASKHB =(OPC_OP(0x10) | OPC_FUNC(0x64)), + OPC_MASKHH =(OPC_OP(0x10) | OPC_FUNC(0x65)), + OPC_MASKHW =(OPC_OP(0x10) | OPC_FUNC(0x66)), + OPC_MASKHL =(OPC_OP(0x10) | OPC_FUNC(0x67)), + OPC_MASKLB_I =(OPC_OP(0x12) | OPC_FUNC(0x60)), + OPC_MASKLH_I =(OPC_OP(0x12) | OPC_FUNC(0x61)), + OPC_MASKLW_I =(OPC_OP(0x12) | OPC_FUNC(0x62)), + OPC_MASKLL_I =(OPC_OP(0x12) | OPC_FUNC(0x63)), + OPC_MASKHB_I =(OPC_OP(0x12) | OPC_FUNC(0x64)), + OPC_MASKHH_I =(OPC_OP(0x12) | OPC_FUNC(0x65)), + OPC_MASKHW_I =(OPC_OP(0x12) | OPC_FUNC(0x66)), + OPC_MASKHL_I =(OPC_OP(0x12) | OPC_FUNC(0x67)), + + OPC_CNPGEB =(OPC_OP(0x10) | OPC_FUNC(0x6C)), + OPC_CNPGEB_I =(OPC_OP(0x12) | OPC_FUNC(0x6C)), + + OPC_MEMB =(OPC_OP(0x06) | OPC_FUNC(0x0)), + OPC_RTC =(OPC_OP(0x06) | OPC_FUNC(0x20)), + + /*float insn*/ + OPC_RFPCR = (OPC_OP(0x18) | OPC_FUNC(0x50)), + OPC_WFPCR = (OPC_OP(0x18) | OPC_FUNC(0x51)), + OPC_SETFPEC0 = (OPC_OP(0x18) | OPC_FUNC(0x54)), + OPC_SETFPEC1 = (OPC_OP(0x18) | OPC_FUNC(0x55)), + OPC_SETFPEC2 = (OPC_OP(0x18) | OPC_FUNC(0x56)), + OPC_SETFPEC3 = (OPC_OP(0x18) | OPC_FUNC(0x57)), + + + OPC_IFMOVS = (OPC_OP(0x18) | OPC_FUNC(0x40)), + OPC_IFMOVD = (OPC_OP(0x18) | OPC_FUNC(0x41)), + OPC_FIMOVS = (OPC_OP(0x10) | OPC_FUNC(0x70)), + OPC_FIMOVD = (OPC_OP(0x10) | OPC_FUNC(0x78)), + + /*translate S--D*/ + /*translate S/D--Long*/ + OPC_FCVTSD = (OPC_OP(0x18) | OPC_FUNC(0x20)), + OPC_FCVTDS = (OPC_OP(0x18) | OPC_FUNC(0x21)), + OPC_FCVTDL_G = (OPC_OP(0x18) | OPC_FUNC(0x22)), + OPC_FCVTDL_P = (OPC_OP(0x18) | OPC_FUNC(0x23)), + OPC_FCVTDL_Z = (OPC_OP(0x18) | OPC_FUNC(0x24)), + OPC_FCVTDL_N = (OPC_OP(0x18) | OPC_FUNC(0x25)), + OPC_FCVTDL = (OPC_OP(0x18) | OPC_FUNC(0x27)), + OPC_FCVTLS = (OPC_OP(0x18) | OPC_FUNC(0x2D)), + OPC_FCVTLD = (OPC_OP(0x18) | OPC_FUNC(0x2F)), + + + OPC_FADDS = (OPC_OP(0x18) | OPC_FUNC(0x00)), + OPC_FADDD = (OPC_OP(0x18) | OPC_FUNC(0x01)), + OPC_FSUBS = (OPC_OP(0x18) | OPC_FUNC(0x02)), + OPC_FSUBD = (OPC_OP(0x18) | OPC_FUNC(0x03)), + OPC_FMULS = (OPC_OP(0x18) | OPC_FUNC(0x04)), + OPC_FMULD = (OPC_OP(0x18) | OPC_FUNC(0x05)), + OPC_FDIVS = (OPC_OP(0x18) | OPC_FUNC(0x06)), + OPC_FDIVD = (OPC_OP(0x18) | OPC_FUNC(0x07)), + OPC_FSQRTS = (OPC_OP(0x18) | OPC_FUNC(0x08)), + OPC_FSQRTD = (OPC_OP(0x18) | OPC_FUNC(0x09)), +}SW_64Insn; + +static inline uint32_t tcg_in32(TCGContext *s) +{ + uint32_t v = *(uint32_t *)s->code_ptr; + return v; +} + +/* + * SW instruction format of br(alias jump) + * insn = opcode[31,26]:Rd[25,21]:disp[20,0], + */ +static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64) +{ + tcg_debug_assert(imm64 <= 0xfffff && imm64 >= -0x100000); + tcg_out32(s, insn | (rd & 0x1f) << 21 | (imm64 & 0x1fffff)); +} + +/* + * SW instruction format of (load and store) + * insn = opcode[31,26]:rd[25,21]:rn[20,16]:disp[15,0] + */ +static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16) +{ + tcg_debug_assert(imm16 <= 0x7fff && imm16 >= -0x8000); + tcg_out32(s, insn | (rd & 0x1f) << 21 | (rn & 0x1f) << 16 | (imm16 & 0xffff)); +} + +/* + * SW instruction format of simple operator for Register + * insn = opcode[31,26]:rn(ra)[25,21]:rn(rb)[20,16]:Zeors[15,13]:function[12,5]:rd(rc)[4,0] + */ +static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn,TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out32(s, insn | (rn & 0x1f) << 21 | (rm & 0x1f) << 16 | (rd & 0x1f)); +} + +/* + * SW instruction format of simple operator for imm + * insn = opcode[31,26]:rn(ra)[25,21]:disp[20,13]:function[12,5]:rd(rc)[4,0] + */ +static void tcg_out_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64) +{ + if (imm64 <= 0x7f && imm64 >= -0x80) { + tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP3, imm64); + tcg_out_insn_simpleReg(s, insn_Reg, rd, rn, TCG_REG_TMP3); + } +} + +static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64) +{ + tcg_debug_assert(imm64 <= 255); + tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); +} + +/* + * sw bit operation: and bis etc + */ +static void tcg_out_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64) +{ + if (imm64 <= 255) { + tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f)); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64); + tcg_out_insn_bitReg(s, insn_Reg, rd, rn, TCG_REG_TMP); + } +} + +/* + * SW instruction format of complex operator + * insn = opcode[31,26]:rd[25,21]:rn[20,16],function[15,10]:rm[9,5]:rx[4,0] + */ +static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out32(s, insn | (cond & 0x1f) << 21 | (rn & 0x1f) << 16 | (rm & 0x1f) << 5 | (rd & 0x1f)); +} + +static void tcg_out_insn_complexImm(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, intptr_t imm8, TCGReg rm) +{ + tcg_out32(s, insn | (cond & 0x1f) << 21 | (imm8 & 0xff) << 13 | (rm & 0x1f) << 5 | (rd & 0x1f)); +} + +static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + if (ext == TCG_TYPE_I64) { + tcg_out_insn_simpleReg(s, OPC_BIS, rd, rn, TCG_REG_ZERO); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); + } +} + +static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long orig) +{ + tcg_target_long l0=0, l1=0, l2=0, l3=0, extra=0; + tcg_target_long val = orig; + TCGReg rs = TCG_REG_ZERO; + + if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { + val = (int32_t)val;//val64bit + } + + if (orig == (int16_t)orig) { + tcg_out_insn_ldst(s, OPC_LDI, rd, TCG_REG_ZERO, (int16_t)orig); + return; + } + + if (orig == (uint8_t)orig) { + tcg_out_insn_simpleImm(s, OPC_BIS_I, rd, TCG_REG_ZERO, (uint8_t)orig); + return; + } + + if (type == TCG_TYPE_I32) { + val = (int32_t)val; + } + + l0 = (int16_t)val; + val = (val - l0) >> 16; + l1 = (int16_t)val; + + if (orig >> 31 == -1 || orig >> 31 == 0) { + if (l1 < 0 && orig >= 0) { + extra = 0x4000; + l1 = (int16_t)(val - 0x4000); + } + } else { + val = (val - l1) >> 16; + l2 = (int16_t)val; + val = (val - l2) >> 16; + l3 = (int16_t)val; + + if (l3) { + tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l3); + rs = rd; + } + if (l2) { + tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l2); + rs = rd; + } + if (l3 || l2) + tcg_out_insn_simpleImm(s, OPC_SLL_I, rd, rd, 32); + } + + if (l1) { + tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l1); + rs = rd; + } + + if (extra) { + tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, extra); + rs = rd; + } + + tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l0); + if (type == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) +{ + if (ret == arg) { + return true; + } + switch (type) { + case TCG_TYPE_I32: + case TCG_TYPE_I64: + if (ret < 32 && arg < 32) { + tcg_out_movr(s, type, ret, arg); + break; + } else if (ret < 32) { + tcg_debug_assert(0); + break; + } else if (arg < 32) { + tcg_debug_assert(0); + break; + } + /* FALLTHRU */ + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(0); + break; + default: + g_assert_not_reached(); + } + return true; +} + +static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits, + TCGReg rd, TCGReg rn) +{ + /* + * Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31 + * int bits = (8 << s_bits) - 1; + * tcg_out_sbfm(s, ext, rd, rn, 0, bits); + */ + switch (s_bits) { + case MO_8: + tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rn); + break; + case MO_16: + tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rn); + break; + case MO_32: + tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_ZERO); + break; + default: + tcg_debug_assert(0); + break; + } + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +/* + * counting heading/tailing zero numbers + */ +static void tcg_out_ctz64(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b) +{ + if (const_b && b == 64) { + if (opc == OPC_CTLZ) { + tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, rn); + } else { + tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, rn); + } + } else { + if (opc == OPC_CTLZ) { + tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, rn); + } else { + tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, rn); + } + if (const_b) { + if (b == -1) { + tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO); + tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd); + } else if (b == 0) { + tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, TCG_REG_ZERO); + } else { + tcg_out_movi(s, TCG_TYPE_I64, rd, b); + tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd); + } + } else { + tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, b); + } + } +} + +/* + * counting heading/tailing zero numbers + */ +static void tcg_out_ctz32(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b) +{ + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, rn, 0xf); + + if (const_b && b == 32) { + if (opc == OPC_CTLZ) { + tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, TCG_REG_TMP); + tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rd, 32); + } else { + tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, TCG_REG_TMP); + tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, rd, 32, rd); + } + } else { + if (opc == OPC_CTLZ) { + tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP); + tcg_out_insn_simpleImm(s, OPC_SUBW_I, TCG_REG_TMP2, TCG_REG_TMP2, 32); + } else { + tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP); + tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, TCG_REG_TMP2, 32, TCG_REG_TMP2); + } + if (const_b) { + if (b == -1) { + tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO); + tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd); + } else if (b == 0) { + tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, TCG_REG_ZERO); + } else { + tcg_out_movi(s, TCG_TYPE_I32, rd, b); + tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd); + } + } else { + tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, b); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } + } +} + +/* + * memory protect for order of (ld and st) + */ +static void tcg_out_mb(TCGContext *s) +{ + tcg_out32(s, OPC_MEMB); +} + +static inline void tcg_out_bswap16(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 1); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); + } else { + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); + } +} + +static void tcg_out_bswap32(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 3); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); + } else { + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); + } +} + +static void tcg_out_bswap64(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn) +{ + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 7); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48); + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP); + + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP); +} + +static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len) +{ + //get 000..111..0000 + tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb); + /* get rn[lsb, lsb+len-1]-->rd[lsb, lsb+len-1] */ + tcg_out_insn_bitReg(s, OPC_AND, rd, rn, TCG_REG_TMP); + + /* rd[lsb, lsb+len-1] --> rd[0, len-1] */ + tcg_out_insn_simpleImm(s, OPC_SRL_I, rd, rd, lsb); +} + +static void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, int lsb, int len) +{ + tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb); + + /* TCG_REG_TMP2 = rn[msb,lsb] */ + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, 64-len); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, TCG_REG_TMP2, 64-len-lsb); + + /* clear rd[msb,lsb] */ + tcg_out_insn_bitReg(s, OPC_BIC, rd, rd, TCG_REG_TMP); + /* rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0] */ + tcg_out_insn_bitReg(s, OPC_BIS, rd, rd, TCG_REG_TMP2); + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm) +{ + tcg_out_insn_simpleReg(s, OPC_UMULH, TCG_REG_TMP, rn, rm); + + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, 63); + tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rm); + tcg_out_insn_simpleReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP2); + + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, 63); + tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rn); + tcg_out_insn_simpleReg(s, OPC_SUBL, rd, TCG_REG_TMP, TCG_REG_TMP2); +} + +static void tcg_out_sar(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGArg a2, bool c2) +{ + unsigned int bits = ext ? 64 : 32; + unsigned int max = bits - 1; + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleReg(s, OPC_ADDW, TCG_REG_TMP, rn, TCG_REG_ZERO); + + if (c2) { + tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, TCG_REG_TMP, a2 & max); + } else { + tcg_out_insn_bitReg(s, OPC_SRA, rd, TCG_REG_TMP, a2); + } + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } else { + if (c2) { + tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, rn, a2 & max); + } else { + tcg_out_insn_bitReg(s, OPC_SRA, rd, rn, a2); + } + } +} + +/* + * memory <=> Reg in (B H W L) bytes + */ +static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign) +{ + if (offset != sextract64(offset, 0, 15)) { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP2, offset); + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP2, TCG_REG_TMP2, rn); + tcg_out_insn_ldst(s, insn, rd, TCG_REG_TMP2, 0); + } else { + tcg_out_insn_ldst(s, insn, rd, rn, offset); + } + + switch (insn) { + case OPC_LDBU: + if (sign) + tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rd); + break; + case OPC_LDHU: + if (sign) + tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rd); + break; + case OPC_LDW: + if (!sign) + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + break; + default: + break; + } +} + +static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs) +{ + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_LDW, rd, rn, ofs, zeroExt); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_LDL, rd, rn, ofs, sigExt); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(0); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_st(TCGContext *s, TCGType type, TCGReg rd,TCGReg rn, intptr_t ofs) +{ + switch (type) { + case TCG_TYPE_I32: + tcg_out_ldst(s, OPC_STW, rd, rn, ofs, noPara); + break; + case TCG_TYPE_I64: + tcg_out_ldst(s, OPC_STL, rd, rn, ofs, noPara); + break; + case TCG_TYPE_V64: + case TCG_TYPE_V128: + tcg_debug_assert(0); + break; + default: + g_assert_not_reached(); + } +} + +static void tcg_out_cond_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, TCGArg a, tcg_target_long b, bool const_b) +{ + if (const_b && (b < 0 || b > 0xff)) { + tcg_out_movi(s, ext, TCG_REG_TMP2, b); + b = TCG_REG_TMP2; + const_b = 0; + } + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleReg(s, OPC_ADDW, a, a, TCG_REG_ZERO); + if (!const_b) { + tcg_out_insn_simpleReg(s, OPC_ADDW, b, b, TCG_REG_ZERO); + } else { + b = (int32_t)b; + } + } + + if (const_b) { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + tcg_out_insn_simpleImm(s, OPC_CMPEQ_I, ret, a, b); + break; + case TCG_COND_LT: + case TCG_COND_GE: + tcg_out_insn_simpleImm(s, OPC_CMPLT_I, ret, a, b); + break; + case TCG_COND_LE: + case TCG_COND_GT: + tcg_out_insn_simpleImm(s, OPC_CMPLE_I, ret, a, b); + break; + case TCG_COND_LTU: + case TCG_COND_GEU: + tcg_out_insn_simpleImm(s, OPC_CMPULT_I, ret, a, b); + break; + case TCG_COND_LEU: + case TCG_COND_GTU: + tcg_out_insn_simpleImm(s, OPC_CMPULE_I, ret, a, b); + break; + default: + tcg_debug_assert(0); + break; + } + } else { + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_NE: + tcg_out_insn_simpleReg(s, OPC_CMPEQ, ret, a, b); + break; + case TCG_COND_LT: + case TCG_COND_GE: + tcg_out_insn_simpleReg(s, OPC_CMPLT, ret, a, b); + break; + case TCG_COND_LE: + case TCG_COND_GT: + tcg_out_insn_simpleReg(s, OPC_CMPLE, ret, a, b); + break; + case TCG_COND_LTU: + case TCG_COND_GEU: + tcg_out_insn_simpleReg(s, OPC_CMPULT, ret, a, b); + break; + case TCG_COND_LEU: + case TCG_COND_GTU: + tcg_out_insn_simpleReg(s, OPC_CMPULE, ret, a, b); + break; + default: + tcg_debug_assert(0); + break; + } + } + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a, a, 0xf); + if (!const_b) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, b, b, 0xf); + } + } + + switch (cond) { + case TCG_COND_NE: + case TCG_COND_GE: + case TCG_COND_GT: + case TCG_COND_GEU: + case TCG_COND_GTU: + tcg_out_insn_simpleImm(s, OPC_XOR_I, ret, ret, 0x1); + break; + case TCG_COND_ALWAYS: + case TCG_COND_NEVER: + tcg_debug_assert(0); + break; + default: + break; + } +} + +/* + * step1 tcg_out_cmp() ,"eq" and "ne" in the same case with the same insn; + * store compare result by TCG_REG_TMP, for step2; + * step2: jump address with compare result. in last "switch" section, we diff qe/ne by different case with different insn. + */ +static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond cond, TCGArg a, tcg_target_long b, bool b_const, TCGLabel *l) +{ + intptr_t offset; + bool need_cmp; + + if (b_const && b == 0 && (cond == TCG_COND_EQ || cond == TCG_COND_NE)) { + need_cmp = false; + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, a, 0xf); + } else { + tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP, a, TCG_REG_ZERO); + } + } else { + need_cmp = true; + tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a, b, b_const); + } + + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0); + offset=0; //offset = tcg_in32(s) >> 5;//luo br $31, 0, do not jump here! + } else { + offset = tcg_pcrel_diff(s, l->u.value_ptr); + offset = offset - 4; + offset = offset >> 2; + tcg_debug_assert(offset == sextract64(offset, 0, 21)); + } + + if (need_cmp) { + tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, offset); + } else if (cond == TCG_COND_EQ) { + tcg_out_insn_br(s, OPC_BEQ, TCG_REG_TMP, offset); + } else { + tcg_out_insn_br(s, OPC_BNE, TCG_REG_TMP, offset); + } +} + +static void tcg_out_setcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, + TCGReg a, tcg_target_long b, bool const_b) +{ + switch (cond) { + case TCG_COND_EQ: + case TCG_COND_LT: + case TCG_COND_LE: + case TCG_COND_LTU: + case TCG_COND_LEU: + case TCG_COND_NE: + case TCG_COND_GE: + case TCG_COND_GT: + case TCG_COND_GEU: + case TCG_COND_GTU: + tcg_out_cond_cmp(s, ext, cond, ret, a, b, const_b); + break; + default: + tcg_abort(); + break; + } +} + +static void tcg_out_movcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, + TCGReg a1, tcg_target_long a2, bool const_b, TCGReg v1, TCGReg v2) +{ + tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a1, a2, const_b); + tcg_out_insn_complexReg(s, OPC_SELLBS, TCG_REG_TMP, ret, v1, v2); +} + +static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,TCGReg base, intptr_t ofs) +{ + if (type <= TCG_TYPE_I64 && val == 0) { + tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); + return true; + } + return false; +} + +static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,TCGReg rn, int64_t imm64) +{ + if (ext == TCG_TYPE_I64) { + if (imm64 >= 0) { + if (0 <=imm64 && imm64 <= 255) { + /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */ + tcg_out_insn_simpleImm(s, OPC_ADDL_I, rd, rn, imm64); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64); + tcg_out_insn_simpleReg(s, OPC_ADDL, rd, rn, TCG_REG_TMP); + } + } else { + if (0 < -imm64 && -imm64 <= 255) { + /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */ + tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rn, -imm64); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, -imm64); + tcg_out_insn_simpleReg(s, OPC_SUBL, rd, rn, TCG_REG_TMP); + } + } + } else { + if (imm64 >= 0) { + if (0 <=imm64 && imm64 <= 255) { + /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */ + tcg_out_insn_simpleImm(s, OPC_ADDW_I, rd, rn, imm64); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } else { + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, imm64); + tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_TMP); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } + } else { + if (0 < -imm64 && -imm64 <= 255) { + /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */ + tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rn, -imm64); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } else { + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, -imm64); + tcg_out_insn_simpleReg(s, OPC_SUBW, rd, rn, TCG_REG_TMP); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } + } + } +} + +static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target) +{ + ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2; + tcg_debug_assert(offset == sextract64(offset, 0, 21)); + tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset); +} + +static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target) +{ + ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2; + if (offset == sextract64(offset, 0 ,21)) { + tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); + tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, noPara); + } +} + +static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target) +{ + ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2; + if (offset == sextract64(offset, 0, 21)) { + tcg_out_insn_br(s, OPC_BSR, TCG_REG_RA, offset); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target); + tcg_out_insn_jump(s, OPC_CALL, TCG_REG_RA, TCG_REG_TMP, noPara); + } +} + +static void modify_direct_addr(uintptr_t addr, uintptr_t jmp_rw, uintptr_t jmp_rx) +{ + tcg_target_long l0=0, l1=0; + tcg_target_long val = addr; + TCGReg rs = TCG_REG_ZERO; + TCGReg rd = TCG_REG_TMP; + tcg_insn_unit i_nop=0, i1=0, i2=0; + uint64_t pair = 0; + i_nop = OPC_NOP; + uintptr_t jmp = jmp_rw; + + l0 = (int16_t)val; + val = (val - l0) >> 16; + l1 = (int16_t)val; + if (l1) { + i1 = OPC_LDIH | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l1 & 0xffff); + } else { + i1 = i_nop; + } + i2 = OPC_LDI | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l0 & 0xffff); + pair = (uint64_t)i1 << 32 | i2; + qatomic_set((uint64_t *)jmp, pair); + flush_idcache_range(jmp_rx, jmp_rw, 8); +} + +void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr) +{ + tcg_insn_unit i1, i2; + uint64_t pair; + + ptrdiff_t offset = addr - jmp_rx -4; + + if (offset == sextract64(offset, 0, 21)) { + i1 = OPC_BR | (TCG_REG_ZERO & 0x1f) << 21| ((offset >> 2) & 0x1fffff); + i2 = OPC_NOP; + pair = (uint64_t)i2 << 32 | i1; + qatomic_set((uint64_t *)jmp_rw, pair); + flush_idcache_range(jmp_rx, jmp_rw, 8); + } else if (offset == sextract64(offset, 0, 32)) { + modify_direct_addr(addr, jmp_rw, jmp_rx); + } else { + tcg_debug_assert("tb_target"); + } +} + +static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l) +{ + if (!l->has_value) { + tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0); + tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, 0); + } else { + tcg_out_goto(s, l->u.value_ptr); + } +} + +/* + * result: rd=rn(64,64-m]:rm(64-m,0] + * 1: rn(m,0]--->TCG_REG_TMP(64,64-m] + * 2: rm(64,64-m]--->rm(64-m,0] + * 3: rd=TCG_REG_TMP(64,64-m]:rm(64-m,0] + */ +static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int m) +{ + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max)); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, (m & max)); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); +} + +static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) +{ + unsigned int bits = ext ? 64 : 32; + unsigned int max = bits - 1; + if (ext == TCG_TYPE_I64) { + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max)); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, (m & max)); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rd, bits - (m & max)); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rd, (m & max)); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm) +{ + unsigned int bits = ext ? 64 : 32; + tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); + tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP); + + if (ext == TCG_TYPE_I64) { + tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rn, TCG_REG_TMP); + tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rn, rm); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); + tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rd, TCG_REG_TMP); + tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rd, rm); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m) +{ + unsigned int bits = ext ? 64 : 32; + unsigned int max = bits - 1; + + if (ext == TCG_TYPE_I64) { + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rn, bits -(m & max)); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, (m & max)); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rd, bits -(m & max)); + tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rd, (m & max)); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm) +{ + unsigned int bits = ext ? 64 : 32; + tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); + tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP); + + if (ext == TCG_TYPE_I64) { + tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rn, TCG_REG_TMP); + tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rn, rm); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf); + tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rd, TCG_REG_TMP); + tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rd, rm); + tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); + } +} + +#ifdef CONFIG_SOFTMMU +#include "../tcg-ldst.c.inc" + +static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_LEUW] = helper_le_lduw_mmu, + [MO_LEUL] = helper_le_ldul_mmu, + [MO_LEQ] = helper_le_ldq_mmu, + [MO_BEUW] = helper_be_lduw_mmu, + [MO_BEUL] = helper_be_ldul_mmu, + [MO_BEQ] = helper_be_ldq_mmu, +}; + +static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = { + [MO_UB] = helper_ret_stb_mmu, + [MO_LEUW] = helper_le_stw_mmu, + [MO_LEUL] = helper_le_stl_mmu, + [MO_LEQ] = helper_le_stq_mmu, + [MO_BEUW] = helper_be_stw_mmu, + [MO_BEUL] = helper_be_stl_mmu, + [MO_BEQ] = helper_be_stq_mmu, +}; + +static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target) +{ + ptrdiff_t offset = tcg_pcrel_diff(s, target); + tcg_debug_assert(offset == sextract64(offset, 0, 21)); + tcg_out_insn_br(s, OPC_BR, rd, 0); + tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rd, 4); + if (offset >= 0) { + tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, rd, rd, offset); + } else { + tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, rd, rd, -offset); + } +} + +static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X16, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X17, lb->addrlo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X18, oi); + tcg_out_adr(s, TCG_REG_X19, lb->raddr); + tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]); + if (opc & MO_SIGN) { + tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0); + } else { + tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0); + } + + tcg_out_goto(s, lb->raddr); + return true; +} + +static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb) +{ + MemOpIdx oi = lb->oi; + MemOp opc = get_memop(oi); + MemOp size = opc & MO_SIZE; + + if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) { + return false; + } + + tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X16, TCG_AREG0); + tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X17, lb->addrlo_reg); + tcg_out_mov(s, size == MO_64, TCG_REG_X18, lb->datalo_reg); + tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X19, oi); + tcg_out_adr(s, TCG_REG_X20, lb->raddr); + tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]); + tcg_out_goto(s, lb->raddr); + return true; +} + +static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi, + TCGType ext, TCGReg data_reg, TCGReg addr_reg, + tcg_insn_unit *raddr, tcg_insn_unit *label_ptr) +{ + TCGLabelQemuLdst *label = new_ldst_label(s); + + label->is_ld = is_ld; + label->oi = oi; + label->type = ext; + label->datalo_reg = data_reg; + label->addrlo_reg = addr_reg; + label->raddr = tcg_splitwx_to_rx(raddr); + label->label_ptr[0] = label_ptr; +} + +/* We expect to use a 7-bit scaled negative offset from ENV. */ +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0); +QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512); + +/* These offsets are built into the LDP below. */ +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0); +QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8); + +/* + * Load and compare a TLB entry, emitting the conditional jump to the + * slow path for the failure case, which will be patched later when finalizing + * the slow path. Generated code returns the host addend in X1, + * clobbers X0,X2,X3,TMP. + */ +static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc, + tcg_insn_unit **label_ptr, int mem_index, + bool is_read) +{ + unsigned a_bits = get_alignment_bits(opc); + unsigned s_bits = opc & MO_SIZE; + unsigned a_mask = (1u << a_bits) - 1; + unsigned s_mask = (1u << s_bits) - 1; + TCGReg x3; + TCGType mask_type; + uint64_t compare_mask; + + mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32 + ? TCG_TYPE_I64 : TCG_TYPE_I32); + + /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */ + tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_X0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index)); + tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_X1, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index)+8); + + /* Extract the TLB index from the address into X0. */ + if (mask_type == TCG_TYPE_I64) { + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_X0, TCG_REG_X0, TCG_REG_TMP); + } else { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, addr_reg, 0xf); + tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); + tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_X0, TCG_REG_X0, TCG_REG_TMP); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X0, TCG_REG_X0, 0xf); + } + /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */ + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0); + + /* Load the tlb comparator into X0, and the fast path addend into X1. */ + tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read + ? offsetof(CPUTLBEntry, addr_read) + : offsetof(CPUTLBEntry, addr_write)); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1, + offsetof(CPUTLBEntry, addend)); + + /* For aligned accesses, we check the first byte and include the alignment + bits within the address. For unaligned access, we check that we don't + cross pages using the address of the last byte of the access. */ + if (a_bits >= s_bits) { + x3 = addr_reg; + } else { + if (s_mask >= a_mask) { + tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_X3, addr_reg, s_mask - a_mask); + } else { + tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_X3, addr_reg, a_mask - s_mask); + } + + if (TARGET_LONG_BITS != 64) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X3, TCG_REG_X3, 0xf); + } + x3 = TCG_REG_X3; + } + compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask; + + /* Store the page mask part of the address into X3. */ + tcg_out_bit(s, OPC_AND_I, OPC_AND, TCG_REG_X3, x3, compare_mask); + if (TARGET_LONG_BITS != 64) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X3, TCG_REG_X3, 0xf); + } + + /* Perform the address comparison. */ + tcg_out_cond_cmp(s, TARGET_LONG_BITS == 64, TCG_COND_NE, TCG_REG_TMP, TCG_REG_X0, TCG_REG_X3, 0); + + /* If not equal, we jump to the slow path. */ + *label_ptr = s->code_ptr; + tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, 0); +} + +#endif /* CONFIG_SOFTMMU */ + +static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext, + TCGReg data_r, TCGReg addr_r, + TCGType otype, TCGReg off_r) +{ + if (otype == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, off_r, 0xf); + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, TCG_REG_TMP); + } else { + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, off_r); + } + + const MemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SSIZE) { + case MO_UB: + tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, zeroExt); + break; + case MO_SB: + tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, sigExt); + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf); + } + break; + case MO_UW: + tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt); + if (bswap) { + tcg_out_bswap16(s, ext, data_r, data_r); + } + break; + case MO_SW: + if (bswap) { + tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt); + tcg_out_bswap16(s, ext, data_r, data_r); + tcg_out_insn_simpleReg(s, OPC_SEXTH, data_r, TCG_REG_ZERO, data_r); + } else { + tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, sigExt); + } + + if (ext == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf); + } + break; + case MO_UL: + tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt); + if (bswap) { + tcg_out_bswap32(s, ext, data_r, data_r); + } + break; + case MO_SL: + if (bswap) { + tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt); + tcg_out_bswap32(s, ext, data_r, data_r); + tcg_out_insn_simpleReg(s, OPC_ADDW, data_r, data_r, TCG_REG_ZERO); + } else { + tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, sigExt); + } + break; + case MO_Q: + tcg_out_ldst(s, OPC_LDL, data_r, TCG_REG_TMP, 0, zeroExt); + if (bswap) { + tcg_out_bswap64(s, ext, data_r, data_r); + } + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext) +{ + MemOp memop = get_memop(oi); + const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64: TCG_TYPE_I32; +#ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1); + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, + TCG_REG_X1, otype, addr_reg); + add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg, + s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (USE_GUEST_BASE) { + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); + } else { + tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_ZERO); + } +#endif /* CONFIG_SOFTMMU */ +} + +static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop, + TCGReg data_r, TCGReg addr_r, + TCGType otype, TCGReg off_r) +{ + if (otype == TCG_TYPE_I32) { + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, off_r, 0xf); + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, TCG_REG_TMP); + } else { + tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, off_r); + } + + const MemOp bswap = memop & MO_BSWAP; + + switch (memop & MO_SIZE) { + case MO_8: + tcg_out_ldst(s, OPC_STB, data_r, TCG_REG_TMP, 0, 0); + break; + case MO_16: + if (bswap && data_r != TCG_REG_ZERO) { + tcg_out_bswap16(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r); + data_r = TCG_REG_TMP3; + } + tcg_out_ldst(s, OPC_STH, data_r, TCG_REG_TMP, 0, 0); + break; + case MO_32: + if (bswap && data_r != TCG_REG_ZERO) { + tcg_out_bswap32(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r); + data_r = TCG_REG_TMP3; + } + tcg_out_ldst(s, OPC_STW, data_r, TCG_REG_TMP, 0, 0); + break; + case MO_64: + if (bswap && data_r != TCG_REG_ZERO) { + tcg_out_bswap64(s, TCG_TYPE_I64, TCG_REG_TMP3, data_r); + data_r = TCG_REG_TMP3; + } + tcg_out_ldst(s, OPC_STL, data_r, TCG_REG_TMP, 0, 0); + break; + default: + tcg_abort(); + } +} + +static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, + MemOpIdx oi) +{ + MemOp memop = get_memop(oi); + const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64: TCG_TYPE_I32; +#ifdef CONFIG_SOFTMMU + unsigned mem_index = get_mmuidx(oi); + tcg_insn_unit *label_ptr; + + tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0); + tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_X1, otype, addr_reg); + add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, data_reg, addr_reg, s->code_ptr, label_ptr); +#else /* !CONFIG_SOFTMMU */ + if (USE_GUEST_BASE) { + tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg); + } else { + tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_ZERO); + } +#endif /* CONFIG_SOFTMMU */ +} + +static const tcg_insn_unit *tb_ret_addr; + +static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], + const int const_args[TCG_MAX_OP_ARGS]) +{ + /* 99% of the time, we can signal the use of extension registers + * by looking to see if the opcode handles 64-bit data. */ + TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0; + /* Hoist the loads of the most common arguments. */ + TCGArg a0 = args[0]; + TCGArg a1 = args[1]; + TCGArg a2 = args[2]; + int c2 = const_args[2]; + + /* Some operands are defined with "rZ" constraint, a register or + * the zero register. These need not actually test args[I] == 0. */ + + switch (opc) { + case INDEX_op_exit_tb: + /* Reuse the zeroing that exists for goto_ptr. */ + if (a0 == 0) { + tcg_out_goto_long(s, tcg_code_gen_epilogue); + } else { + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, a0); + tcg_out_goto_long(s, tb_ret_addr); + } + break; + case INDEX_op_goto_tb: + if (s->tb_jmp_insn_offset != NULL) { + /* TCG_TARGET_HAS_direct_jump */ + /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic + write can be used to patch the target address. */ + if ((uintptr_t)s->code_ptr & 7) { + tcg_out32(s, OPC_NOP); + } + s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s); + tcg_out32(s, OPC_NOP); + tcg_out32(s, OPC_NOP); + } else { + /* !TCG_TARGET_HAS_direct_jump */ + tcg_debug_assert(s->tb_jmp_target_addr != NULL); + tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0)); + } + tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, noPara); + set_jmp_reset_offset(s, a0); + break; + case INDEX_op_goto_ptr: + tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, a0, noPara); + break; + case INDEX_op_br: + tcg_out_goto_label(s, arg_label(a0)); + break; + case INDEX_op_ld8u_i32: + case INDEX_op_ld8u_i64: + tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 0); + break; + case INDEX_op_ld8s_i32: + tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_ld8s_i64: + tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1); + break; + case INDEX_op_ld16u_i32: + case INDEX_op_ld16u_i64: + tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 0); + break; + case INDEX_op_ld16s_i32: + tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_ld16s_i64: + tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1); + break; + case INDEX_op_ld_i32: + tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0); + break; + case INDEX_op_ld32u_i64: + tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0); + break; + case INDEX_op_ld32s_i64: + tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 1); + break; + case INDEX_op_ld_i64: + tcg_out_ldst(s, OPC_LDL, a0, a1, a2, 1); + break; + case INDEX_op_st8_i32: + case INDEX_op_st8_i64: + tcg_out_ldst(s, OPC_STB, REG0(0), a1, a2, 0); + break; + case INDEX_op_st16_i32: + case INDEX_op_st16_i64: + tcg_out_ldst(s, OPC_STH, REG0(0), a1, a2, 0); + break; + case INDEX_op_st_i32: + case INDEX_op_st32_i64: + tcg_out_ldst(s, OPC_STW, REG0(0), a1, a2, 0); + break; + case INDEX_op_st_i64: + tcg_out_ldst(s, OPC_STL, REG0(0), a1, a2, 0); + break; + case INDEX_op_add_i32: + a2 = (int32_t)a2; + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, a2); + } else { + tcg_out_insn_simpleReg(s, OPC_ADDW, a0, a1, a2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + } + break; + case INDEX_op_add_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, a2); + } else { + tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2); + } + break; + case INDEX_op_sub_i32: + a2 = (int32_t)a2; + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, -a2); + } else { + tcg_out_insn_simpleReg(s, OPC_SUBW, a0, a1, a2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + } + break; + case INDEX_op_sub_i64: + if (c2) { + tcg_out_addsubi(s, ext, a0, a1, -a2); + } else { + tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2); + } + break; + case INDEX_op_neg_i32: + tcg_out_insn_bitReg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_neg_i64: + tcg_out_insn_bitReg(s, OPC_SUBL, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_and_i32: + if (c2) { + a2 = (int32_t)a2; + tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2); + } + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_and_i64: + if (c2) { + tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2); + } + break; + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + tcg_debug_assert(0); + break; + case INDEX_op_or_i32: + if (c2) { + a2 = (int32_t)a2; + tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2); + } + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_or_i64: + if (c2) { + tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2); + } + break; + case INDEX_op_orc_i32: + if (c2) { + a2 = (int32_t)a2; + tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2); + } else { + tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2); + } + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_orc_i64: + if (c2) { + tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2); + } else { + tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2); + } + break; + case INDEX_op_xor_i32: + if (c2) { + a2 = (int32_t)a2; + tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2); + } + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_xor_i64: + if (c2) { + tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2); + } else { + tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2); + } + break; + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + tcg_debug_assert(0); + break; + case INDEX_op_not_i32: + tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_not_i64: + tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_mul_i32: + tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_mul_i64: + tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2); + break; + case INDEX_op_div_i32: + case INDEX_op_div_i64: + tcg_debug_assert(0); + break; + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + tcg_debug_assert(0); + break; + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + tcg_debug_assert(0); + break; + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + tcg_debug_assert(0); + break; + case INDEX_op_shl_i32: /* sw logical left*/ + if (c2) { + unsigned int bits = ext ? 64 : 32; + unsigned int max = bits - 1; + tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + } else { + tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + } + break; + case INDEX_op_shl_i64: + if (c2) { + unsigned int bits = ext ? 64 : 32; + unsigned int max = bits - 1; + tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max); + } else { + tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2); + } + break; + case INDEX_op_shr_i32: /* sw logical right */ + a2 = (int32_t)a2; + if (c2) { + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max); + } else { + tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2); + } + break; + case INDEX_op_shr_i64: + if (c2) { + int bits = ext ? 64 : 32; + int max = bits - 1; + tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max); + } else { + tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2); + } + break; + case INDEX_op_sar_i32: + a2 = (int32_t)a2; + tcg_out_sar(s, ext, a0, a1, a2, c2); + break; + case INDEX_op_sar_i64: /* sw arithmetic right*/ + tcg_out_sar(s, ext, a0, a1, a2, c2); + break; + case INDEX_op_rotr_i32: /* loop shift */ + case INDEX_op_rotr_i64: + if (c2) {/* loop right shift a2*/ + tcg_out_rotr_Imm(s, ext, a0, a1, a2); + } else { + tcg_out_rotr_Reg(s, ext, a0, a1, a2); + } + break; + case INDEX_op_rotl_i32: /* loop shift */ + case INDEX_op_rotl_i64: /* sw */ + if (c2) {/* loop left shift a2*/ + tcg_out_rotl_Imm(s, ext, a0, a1, a2); + } else { + tcg_out_rotl_Reg(s, ext, a0, a1, a2); + } + break; + case INDEX_op_clz_i32: + tcg_out_ctz32(s, OPC_CTLZ, a0, a1, a2, c2); + break; + case INDEX_op_clz_i64: /* counting leading zero numbers */ + tcg_out_ctz64(s, OPC_CTLZ, a0, a1, a2, c2); + break; + case INDEX_op_ctz_i32: + tcg_out_ctz32(s, OPC_CTTZ, a0, a1, a2, c2); + break; + case INDEX_op_ctz_i64: /* counting tailing zero numbers */ + tcg_out_ctz64(s, OPC_CTTZ, a0, a1, a2, c2); + break; + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3])); + break; + case INDEX_op_setcond_i32: + a2 = (int32_t)a2; + tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2); + break; + case INDEX_op_setcond_i64: + tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2); + break; + case INDEX_op_movcond_i32: + a2 = (int32_t)a2; + tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4)); + break; + case INDEX_op_movcond_i64: + tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4)); + break; + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + tcg_out_qemu_ld(s, a0, a1, a2, ext); + break; + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + tcg_out_qemu_st(s, REG0(0), a1, a2); + break; + case INDEX_op_bswap64_i64: + tcg_out_bswap64(s, ext, a0, a1); + break; + case INDEX_op_bswap32_i32: + case INDEX_op_bswap32_i64: + tcg_out_bswap32(s, ext, a0, a1); + break; + case INDEX_op_bswap16_i32: + case INDEX_op_bswap16_i64: + tcg_out_bswap16(s, ext, a0, a1); + break; + case INDEX_op_ext8s_i32: + tcg_out_insn_simpleReg(s, OPC_SEXTB, a0, TCG_REG_ZERO, a1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_ext8s_i64: + tcg_out_insn_simpleReg(s, OPC_SEXTB, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_ext16s_i32: + tcg_out_insn_simpleReg(s, OPC_SEXTH, a0, TCG_REG_ZERO, a1); + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf); + break; + case INDEX_op_ext16s_i64: + tcg_out_insn_simpleReg(s, OPC_SEXTH, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_ext_i32_i64: + case INDEX_op_ext32s_i64: + tcg_out_insn_simpleReg(s, OPC_ADDW, a0, TCG_REG_ZERO, a1); + break; + case INDEX_op_ext8u_i32: + case INDEX_op_ext8u_i64: + tcg_out_insn_simpleImm(s, OPC_EXTLB_I, a0, a1, 0x0); + break; + case INDEX_op_ext16u_i32: + case INDEX_op_ext16u_i64: + tcg_out_insn_simpleImm(s, OPC_EXTLH_I, a0, a1, 0x0); + break; + case INDEX_op_extu_i32_i64: + case INDEX_op_ext32u_i64: + tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a1, 0xf); + break; + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]); + break; + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + tcg_out_extract(s, a0, a1, a2, args[3]); + break; + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + tcg_debug_assert(0); + break; + case INDEX_op_extract2_i32: /* extract REG0(2) right args[3] bit to REG0(1) left ,save to a0*/ + case INDEX_op_extract2_i64: + tcg_debug_assert(0); + break; + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + tcg_debug_assert(0); + break; + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + tcg_debug_assert(0); + break; + case INDEX_op_muluh_i64: + tcg_out_insn_simpleReg(s, OPC_UMULH, a0, a1, a2); + break; + case INDEX_op_mulsh_i64: + tcg_out_mulsh64(s, a0, a1, a2); + break; + case INDEX_op_mb: + tcg_out_mb(s); + break; + case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ + break; + case INDEX_op_mov_i64: + break; + case INDEX_op_call: /* Always emitted via tcg_out_call. */ + default: + g_assert_not_reached(); + } +#undef REG0 +} + +static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op) +{ + switch (op) { + case INDEX_op_goto_ptr: + return C_O0_I1(r); + case INDEX_op_ld8u_i32: + case INDEX_op_ld8s_i32: + case INDEX_op_ld16u_i32: + case INDEX_op_ld16s_i32: + case INDEX_op_ld_i32: + case INDEX_op_ld8u_i64: + case INDEX_op_ld8s_i64: + case INDEX_op_ld16u_i64: + case INDEX_op_ld16s_i64: + case INDEX_op_ld32u_i64: + case INDEX_op_ld32s_i64: + case INDEX_op_ld_i64: + case INDEX_op_neg_i32: + case INDEX_op_neg_i64: + case INDEX_op_not_i32: + case INDEX_op_not_i64: + case INDEX_op_bswap16_i32: + case INDEX_op_bswap32_i32: + case INDEX_op_bswap16_i64: + case INDEX_op_bswap32_i64: + case INDEX_op_bswap64_i64: + case INDEX_op_ext8s_i32: + case INDEX_op_ext16s_i32: + case INDEX_op_ext8u_i32: + case INDEX_op_ext16u_i32: + case INDEX_op_ext8s_i64: + case INDEX_op_ext16s_i64: + case INDEX_op_ext32s_i64: + case INDEX_op_ext8u_i64: + case INDEX_op_ext16u_i64: + case INDEX_op_ext32u_i64: + case INDEX_op_ext_i32_i64: + case INDEX_op_extu_i32_i64: + case INDEX_op_extract_i32: + case INDEX_op_extract_i64: + case INDEX_op_sextract_i32: + case INDEX_op_sextract_i64: + return C_O1_I1(r, r); + case INDEX_op_st8_i32: + case INDEX_op_st16_i32: + case INDEX_op_st_i32: + case INDEX_op_st8_i64: + case INDEX_op_st16_i64: + case INDEX_op_st32_i64: + case INDEX_op_st_i64: + return C_O0_I2(rZ, r); + case INDEX_op_add_i32: + case INDEX_op_add_i64: + case INDEX_op_sub_i32: + case INDEX_op_sub_i64: + return C_O1_I2(r, r, rU); + case INDEX_op_setcond_i32: + case INDEX_op_setcond_i64: + return C_O1_I2(r, r, rU); + case INDEX_op_mul_i32: + case INDEX_op_mul_i64: + case INDEX_op_div_i32: + case INDEX_op_div_i64: + case INDEX_op_divu_i32: + case INDEX_op_divu_i64: + case INDEX_op_rem_i32: + case INDEX_op_rem_i64: + case INDEX_op_remu_i32: + case INDEX_op_remu_i64: + case INDEX_op_muluh_i64: + case INDEX_op_mulsh_i64: + return C_O1_I2(r, r, r); + case INDEX_op_and_i32: + case INDEX_op_and_i64: + case INDEX_op_or_i32: + case INDEX_op_or_i64: + case INDEX_op_xor_i32: + case INDEX_op_xor_i64: + case INDEX_op_andc_i32: + case INDEX_op_andc_i64: + case INDEX_op_orc_i32: + case INDEX_op_orc_i64: + case INDEX_op_eqv_i32: + case INDEX_op_eqv_i64: + return C_O1_I2(r, r, rU); + case INDEX_op_shl_i32: + case INDEX_op_shr_i32: + case INDEX_op_sar_i32: + case INDEX_op_rotl_i32: + case INDEX_op_rotr_i32: + case INDEX_op_shl_i64: + case INDEX_op_shr_i64: + case INDEX_op_sar_i64: + case INDEX_op_rotl_i64: + case INDEX_op_rotr_i64: + return C_O1_I2(r, r, ri); + case INDEX_op_clz_i32: + case INDEX_op_clz_i64: + return C_O1_I2(r, r, r); + case INDEX_op_ctz_i32: + case INDEX_op_ctz_i64: + return C_O1_I2(r, r, r); + case INDEX_op_brcond_i32: + case INDEX_op_brcond_i64: + return C_O0_I2(r, rU); + case INDEX_op_movcond_i32: + case INDEX_op_movcond_i64: + return C_O1_I4(r, r, rU, rZ, rZ); + case INDEX_op_qemu_ld_i32: + case INDEX_op_qemu_ld_i64: + return C_O1_I1(r, l); + case INDEX_op_qemu_st_i32: + case INDEX_op_qemu_st_i64: + return C_O0_I2(lZ, l); + case INDEX_op_deposit_i32: + case INDEX_op_deposit_i64: + return C_O1_I2(r, 0, rZ); + case INDEX_op_extract2_i32: + case INDEX_op_extract2_i64: + return C_O1_I2(r, rZ, rZ); + case INDEX_op_add2_i32: + case INDEX_op_add2_i64: + case INDEX_op_sub2_i32: + case INDEX_op_sub2_i64: + return C_O2_I4(r, r, rZ, rZ, rA, rMZ); + case INDEX_op_add_vec: + case INDEX_op_sub_vec: + case INDEX_op_mul_vec: + case INDEX_op_xor_vec: + case INDEX_op_ssadd_vec: + case INDEX_op_sssub_vec: + case INDEX_op_usadd_vec: + case INDEX_op_ussub_vec: + case INDEX_op_smax_vec: + case INDEX_op_smin_vec: + case INDEX_op_umax_vec: + case INDEX_op_umin_vec: + case INDEX_op_shlv_vec: + case INDEX_op_shrv_vec: + case INDEX_op_sarv_vec: + return C_O1_I2(w, w, w); + case INDEX_op_not_vec: + case INDEX_op_neg_vec: + case INDEX_op_abs_vec: + case INDEX_op_shli_vec: + case INDEX_op_shri_vec: + case INDEX_op_sari_vec: + return C_O1_I1(w, w); + case INDEX_op_ld_vec: + case INDEX_op_dupm_vec: + return C_O1_I1(w, r); + case INDEX_op_st_vec: + return C_O0_I2(w, r); + case INDEX_op_dup_vec: + return C_O1_I1(w, wr); + case INDEX_op_or_vec: + case INDEX_op_andc_vec: + return C_O1_I2(w, w, wO); + case INDEX_op_and_vec: + case INDEX_op_orc_vec: + return C_O1_I2(w, w, wN); + case INDEX_op_cmp_vec: + return C_O1_I2(w, w, wZ); + case INDEX_op_bitsel_vec: + return C_O1_I3(w, w, w, w); + default: + g_assert_not_reached(); + } +} + + +static void tcg_target_init(TCGContext *s) +{ + tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu; + tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu; + tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull; + tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull; + tcg_target_call_clobber_regs = -1ull; + + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X9); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X10); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X11); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X12); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X13); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X14); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X15); + + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F2); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F3); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F4); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F5); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F6); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F7); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F8); + tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F9); + + s->reserved_regs = 0; + tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP3); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_X29); + tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP); + tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP2); +} + + +#define PUSH_SIZE ((15-9+1+1) * 8) +#define FRAME_SIZE \ + ((PUSH_SIZE \ + + TCG_STATIC_CALL_ARGS_SIZE \ + + CPU_TEMP_BUF_NLONGS * sizeof(long) \ + + TCG_TARGET_STACK_ALIGN - 1) \ + & ~(TCG_TARGET_STACK_ALIGN - 1)) + + +/* We're expecting a 2 byte uleb128 encoded value. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14)); + +/* We're expecting to use a single ADDI insn. */ +QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff); + +static void tcg_target_qemu_prologue(TCGContext *s) +{ + TCGReg r; + int ofs; + + /* allocate space for all saved registers */ + /* subl $sp,PUSH_SIZE,$sp */ + tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE); + + /* Push (FP, LR) */ + /* stl $fp,0($sp) */ + tcg_out_insn_ldst(s, OPC_STL, TCG_REG_FP, TCG_REG_SP, 0); + /* stl $26,8($sp) */ + tcg_out_insn_ldst(s, OPC_STL, TCG_REG_RA, TCG_REG_SP, 8); + + + /* Set up frame pointer for canonical unwinding. */ + /* TCG_REG_FP=TCG_REG_SP */ + tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP); + + /* Store callee-preserved regs x9..x14. */ + for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1){ + ofs = (r - TCG_REG_X9 + 2) * 8; + tcg_out_insn_ldst(s, OPC_STL, r, TCG_REG_SP, ofs); + } + + /* Make stack space for TCG locals. */ + /* subl $sp,FRAME_SIZE-PUSH_SIZE,$sp */ + tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); + + /* Inform TCG about how to find TCG locals with register, offset, size. */ + tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, + CPU_TEMP_BUF_NLONGS * sizeof(long)); + +#ifndef CONFIG_SOFTMMU + if (USE_GUEST_BASE) { + tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base); + tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE); + } +#endif + + /* TCG_AREG0=tcg_target_call_iarg_regs[0], on sw, we mov $16 to $9 */ + tcg_out_mov(s, TCG_TYPE_I64, TCG_AREG0, tcg_target_call_iarg_regs[0]); + tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], noPara); + + /* + * Return path for goto_ptr. Set return value to 0, a-la exit_tb, + * and fall through to the rest of the epilogue. + */ + tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr); + tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, 0); + + /* TB epilogue */ + tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr); + + /* Remove TCG locals stack space. */ + /* addl $sp,FRAME_SIZE-PUSH_SIZE,$sp */ + tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE); + + /* Restore registers x9..x14. */ + for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1) { + int ofs = (r - TCG_REG_X9 + 2) * 8; + tcg_out_insn_ldst(s, OPC_LDL, r, TCG_REG_SP, ofs); + } + + /* Pop (FP, LR) */ + /* ldl $fp,0($sp) */ + tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_FP, TCG_REG_SP, 0); + /* ldl $26,8($sp) */ + tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_RA, TCG_REG_SP, 8); + + /* restore SP to previous frame. */ + /* addl $sp,PUSH_SIZE,$sp */ + tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE); + + tcg_out_insn_jump(s, OPC_RET, TCG_REG_ZERO, TCG_REG_RA, noPara); +} + +static void tcg_out_nop_fill(tcg_insn_unit *p, int count) +{ + int i; + for (i = 0; i < count; ++i) { + p[i] = OPC_NOP; + } +} + +typedef struct { + DebugFrameHeader h; + uint8_t fde_def_cfa[4]; + uint8_t fde_reg_ofs[8 * 2]; +} DebugFrame; + +/* + * GDB doesn't appear to require proper setting of ELF_HOST_FLAGS, + * which is good because they're really quite complicated for SW64. + */ +#define ELF_HOST_MACHINE EM_SW_64 + +static const DebugFrame debug_frame = { + .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ + .h.cie.id = -1, + .h.cie.version = 1, + .h.cie.code_align = 1, + .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ + .h.cie.return_column = TCG_REG_RA, + + /* Total FDE size does not include the "len" member. */ + .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), + + .fde_def_cfa = { + 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ + (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ + (FRAME_SIZE >> 7) + }, + .fde_reg_ofs = { + 0x80 + 14, 1, /* DW_CFA_offset, */ + 0x80 + 13, 2, /* DW_CFA_offset, */ + 0x80 + 12, 3, /* DW_CFA_offset, */ + 0x80 + 11, 4, /* DW_CFA_offset, */ + 0x80 + 10, 5, /* DW_CFA_offset, */ + 0x80 + 9, 6, /* DW_CFA_offset, */ + 0x80 + 26, 7, /* DW_CFA_offset, ra, -24 */ + 0x80 + 15, 8, /* DW_CFA_offset, fp, -8 */ + } +}; + +void tcg_register_jit(const void *buf, size_t buf_size) +{ + tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); +} diff --git a/tcg/sw64/tcg-target.h b/tcg/sw64/tcg-target.h new file mode 100755 index 0000000000000000000000000000000000000000..91681a0c75594362d14a2e912891474fe1c7578e --- /dev/null +++ b/tcg/sw64/tcg-target.h @@ -0,0 +1,126 @@ +/* + * Initial TCG Implementation for sw_64 + * + */ + +#ifndef SW_64_TCG_TARGET_H +#define SW_64_TCG_TARGET_H + +#define TCG_TARGET_INSN_UNIT_SIZE 4 + +typedef enum { + TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, + TCG_REG_X4, TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, + TCG_REG_X8, TCG_REG_X9, TCG_REG_X10, TCG_REG_X11, + TCG_REG_X12, TCG_REG_X13, TCG_REG_X14, TCG_REG_X15, + TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, + TCG_REG_X20, TCG_REG_X21, TCG_REG_X22, TCG_REG_X23, + TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, + TCG_REG_X28, TCG_REG_X29, TCG_REG_X30, TCG_REG_X31, + + TCG_REG_F0=32, TCG_REG_F1, TCG_REG_F2, TCG_REG_F3, + TCG_REG_F4, TCG_REG_F5, TCG_REG_F6, TCG_REG_F7, + TCG_REG_F8, TCG_REG_F9, TCG_REG_F10, TCG_REG_F11, + TCG_REG_F12, TCG_REG_F13, TCG_REG_F14, TCG_REG_F15, + TCG_REG_F16, TCG_REG_F17, TCG_REG_F18, TCG_REG_F19, + TCG_REG_F20, TCG_REG_F21, TCG_REG_F22, TCG_REG_F23, + TCG_REG_F24, TCG_REG_F25, TCG_REG_F26, TCG_REG_F27, + TCG_REG_F28, TCG_REG_F29, TCG_REG_F30, TCG_REG_F31, + + /* Aliases. */ + TCG_REG_FP = TCG_REG_X15, + TCG_REG_RA = TCG_REG_X26, + TCG_REG_GP = TCG_REG_X29, + TCG_REG_SP = TCG_REG_X30, + TCG_REG_ZERO = TCG_REG_X31, + TCG_AREG0 = TCG_REG_X9, +} TCGReg; + +#define TCG_TARGET_NB_REGS 64 +#define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1) + +/* used for function call generation */ +#define TCG_REG_CALL_STACK TCG_REG_SP +#define TCG_TARGET_STACK_ALIGN 16 +#define TCG_TARGET_CALL_ALIGN_ARGS 1 /*luo*/ +#define TCG_TARGET_CALL_STACK_OFFSET 0 /*luo*/ +#define TCG_TARGET_HAS_neg_i64 1 +#define TCG_TARGET_HAS_direct_jump 0 +#define TCG_TARGET_HAS_goto_ptr 1 +#define TCG_TARGET_HAS_qemu_st8_i32 0 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_neg_i32 1 +#define TCG_TARGET_HAS_div_i32 1 +#define TCG_TARGET_HAS_movcond_i32 1 +#define TCG_TARGET_HAS_rem_i32 0 +#define TCG_TARGET_HAS_rot_i32 1 +#define TCG_TARGET_HAS_deposit_i32 1 +#define TCG_TARGET_HAS_extract_i32 1 +#define TCG_TARGET_HAS_sextract_i32 0 +#define TCG_TARGET_HAS_extract2_i32 0 +#define TCG_TARGET_HAS_add2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_sub2_i32 0 +#define TCG_TARGET_HAS_mulu2_i32 0 +#define TCG_TARGET_HAS_muluh_i32 0 +#define TCG_TARGET_HAS_muls2_i32 0 +#define TCG_TARGET_HAS_not_i32 1 +#define TCG_TARGET_HAS_mulsh_i32 0 +#define TCG_TARGET_HAS_ext8s_i32 0 +#define TCG_TARGET_HAS_ext16s_i32 0 +#define TCG_TARGET_HAS_ext8u_i32 1 +#define TCG_TARGET_HAS_ext16u_i32 1 +#define TCG_TARGET_HAS_bswap16_i32 0 +#define TCG_TARGET_HAS_bswap32_i32 0 +#define TCG_TARGET_HAS_andc_i32 0 +#define TCG_TARGET_HAS_eqv_i32 0 +#define TCG_TARGET_HAS_nand_i32 0 +#define TCG_TARGET_HAS_nor_i32 0 +#define TCG_TARGET_HAS_clz_i32 0 +#define TCG_TARGET_HAS_ctz_i32 0 +#define TCG_TARGET_HAS_orc_i32 0 +#define TCG_TARGET_HAS_ctpop_i32 0 +#define TCG_TARGET_HAS_movcond_i64 1 +#define TCG_TARGET_HAS_div_i64 1 +#define TCG_TARGET_HAS_rem_i64 0 +#define TCG_TARGET_HAS_div2_i64 0 +#define TCG_TARGET_HAS_rot_i64 1 +#define TCG_TARGET_HAS_deposit_i64 1 +#define TCG_TARGET_HAS_extract_i64 1 +#define TCG_TARGET_HAS_sextract_i64 0 +#define TCG_TARGET_HAS_extract2_i64 0 +#define TCG_TARGET_HAS_extrl_i64_i32 0 +#define TCG_TARGET_HAS_extrh_i64_i32 0 +#define TCG_TARGET_HAS_ext8s_i64 0 +#define TCG_TARGET_HAS_ext16s_i64 0 +#define TCG_TARGET_HAS_ext32s_i64 1 +#define TCG_TARGET_HAS_ext8u_i64 1 +#define TCG_TARGET_HAS_ext16u_i64 1 +#define TCG_TARGET_HAS_ext32u_i64 1 +#define TCG_TARGET_HAS_bswap16_i64 0 +#define TCG_TARGET_HAS_bswap32_i64 0 +#define TCG_TARGET_HAS_bswap64_i64 0 +#define TCG_TARGET_HAS_not_i64 1 +#define TCG_TARGET_HAS_andc_i64 0 +#define TCG_TARGET_HAS_orc_i64 1 +#define TCG_TARGET_HAS_eqv_i64 0 +#define TCG_TARGET_HAS_nand_i64 0 +#define TCG_TARGET_HAS_nor_i64 0 +#define TCG_TARGET_HAS_clz_i64 1 +#define TCG_TARGET_HAS_ctz_i64 1 +#define TCG_TARGET_HAS_ctpop_i64 0 +#define TCG_TARGET_HAS_add2_i64 0 +#define TCG_TARGET_HAS_sub2_i64 0 +#define TCG_TARGET_HAS_mulu2_i64 0 +#define TCG_TARGET_HAS_muls2_i64 0 +#define TCG_TARGET_HAS_muluh_i64 1 +#define TCG_TARGET_HAS_mulsh_i64 1 +#define TCG_TARGET_DEFAULT_MO (0) +#define TCG_TARGET_HAS_MEMORY_BSWAP 0 +/* optional instructions */ +void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t); +#ifdef CONFIG_SOFTMMU +#define TCG_TARGET_NEED_LDST_LABELS +#endif +#define TCG_TARGET_NEED_POOL_LABELS +#endif /* SW_64_TCG_TARGET_H */ diff --git a/tcg/sw64/tcg-target.opc.h b/tcg/sw64/tcg-target.opc.h new file mode 100755 index 0000000000000000000000000000000000000000..bce30accd9366f1665c58141616ca5e4e56c9f35 --- /dev/null +++ b/tcg/sw64/tcg-target.opc.h @@ -0,0 +1,15 @@ +/* + * Copyright (c) 2019 Linaro + * + * This work is licensed under the terms of the GNU GPL, version 2 or + * (at your option) any later version. + * + * See the COPYING file in the top-level directory for details. + * + * Target-specific opcodes for host vector expansion. These will be + * emitted by tcg_expand_vec_op. For those familiar with GCC internals, + * consider these to be UNSPEC with names. + */ + +DEF(aa64_sshl_vec, 1, 2, 0, IMPLVEC) +DEF(aa64_sli_vec, 1, 2, 1, IMPLVEC) diff --git a/tcg/tcg-op-gvec.c b/tcg/tcg-op-gvec.c index ffe55e908f8a9e52c5519544291035a0736567ae..aea44c53b0f6c823415eb2aa1c898488047b0a3a 100644 --- a/tcg/tcg-op-gvec.c +++ b/tcg/tcg-op-gvec.c @@ -88,7 +88,20 @@ uint32_t simd_desc(uint32_t oprsz, uint32_t maxsz, int32_t data) uint32_t desc = 0; check_size_align(oprsz, maxsz, 0); - tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS)); + + /* + * We want to check that 'data' will fit into SIMD_DATA_BITS. + * However, some callers want to treat the data as a signed + * value (which they can later get back with simd_data()) + * and some want to treat it as an unsigned value. + * So here we assert only that the data will fit into the + * field in at least one way. This means that some invalid + * values from the caller will not be detected, e.g. if the + * caller wants to handle the value as a signed integer but + * incorrectly passes us 1 << (SIMD_DATA_BITS - 1). + */ + tcg_debug_assert(data == sextract32(data, 0, SIMD_DATA_BITS) || + data == extract32(data, 0, SIMD_DATA_BITS)); oprsz = (oprsz / 8) - 1; maxsz = (maxsz / 8) - 1; diff --git a/tcg/tcg-op-vec.c b/tcg/tcg-op-vec.c index faf30f9cdd320030755d3a47cc74576d718339e9..7c027099c42f01527fb2c63e2c5997c25413b6fc 100644 --- a/tcg/tcg-op-vec.c +++ b/tcg/tcg-op-vec.c @@ -50,9 +50,9 @@ extern TCGv_i32 TCGV_HIGH_link_error(TCGv_i64); * tcg_ctx->vec_opt_opc is non-NULL, the tcg_gen_*_vec expanders * will validate that their opcode is present in the list. */ -#ifdef CONFIG_DEBUG_TCG -void tcg_assert_listed_vecop(TCGOpcode op) +static void tcg_assert_listed_vecop(TCGOpcode op) { +#ifdef CONFIG_DEBUG_TCG const TCGOpcode *p = tcg_ctx->vecop_list; if (p) { for (; *p; ++p) { @@ -62,8 +62,8 @@ void tcg_assert_listed_vecop(TCGOpcode op) } g_assert_not_reached(); } -} #endif +} bool tcg_can_emit_vecop_list(const TCGOpcode *list, TCGType type, unsigned vece) diff --git a/tcg/tcg.c b/tcg/tcg.c index 934aa8510b0170b8b844b4d3d427f5f75c7330ca..08c3b5a002dc249c3f6c0a2daf407a7da3e36300 100644 --- a/tcg/tcg.c +++ b/tcg/tcg.c @@ -632,9 +632,9 @@ static void tcg_context_init(unsigned max_cpus) if (nargs != 0) { ca->cif.arg_types = ca->args; - for (i = 0; i < nargs; ++i) { - int typecode = extract32(typemask, (i + 1) * 3, 3); - ca->args[i] = typecode_to_ffi[typecode]; + for (int j = 0; j < nargs; ++j) { + int typecode = extract32(typemask, (j + 1) * 3, 3); + ca->args[j] = typecode_to_ffi[typecode]; } } @@ -714,7 +714,6 @@ TranslationBlock *tcg_tb_alloc(TCGContext *s) goto retry; } qatomic_set(&s->code_gen_ptr, next); - s->data_gen_ptr = NULL; return tb; } @@ -4276,6 +4275,7 @@ int tcg_gen_code(TCGContext *s, TranslationBlock *tb) */ s->code_buf = tcg_splitwx_to_rw(tb->tc.ptr); s->code_ptr = s->code_buf; + s->data_gen_ptr = NULL; #ifdef TCG_TARGET_NEED_LDST_LABELS QSIMPLEQ_INIT(&s->ldst_labels); diff --git a/tests/Makefile.include b/tests/Makefile.include index 4c564cf7899ee8f3011df90072129dee2b7bf634..3aba622400997148b11879548378d6969feccce4 100644 --- a/tests/Makefile.include +++ b/tests/Makefile.include @@ -23,7 +23,7 @@ endif @echo " $(MAKE) check-clean Clean the tests and related data" @echo @echo "The following are useful for CI builds" - @echo " $(MAKE) check-build Build most test binaris" + @echo " $(MAKE) check-build Build most test binaries" @echo " $(MAKE) get-vm-images Downloads all images used by avocado tests, according to configured targets (~350 MB each, 1.5 GB max)" @echo @echo diff --git a/tests/avocado/boot_linux_console.py b/tests/avocado/boot_linux_console.py index 9c618d4809f976ee2cedbd52559ce4d98a8885d5..d61f31414d279c0d90c133e68d51d7787b70b6d3 100644 --- a/tests/avocado/boot_linux_console.py +++ b/tests/avocado/boot_linux_console.py @@ -512,7 +512,7 @@ def test_arm_raspi2_initrd(self): 'BCM2835') exec_command_and_wait_for_pattern(self, 'cat /proc/iomem', '/soc/cprman@7e101000') - exec_command(self, 'halt') + exec_command_and_wait_for_pattern(self, 'halt', 'reboot: System halted') # Wait for VM to shut down gracefully self.vm.wait() @@ -813,8 +813,8 @@ def test_arm_orangepi_sd(self): dtb_path = '/usr/lib/linux-image-current-sunxi/sun8i-h3-orangepi-pc.dtb' dtb_path = self.extract_from_deb(deb_path, dtb_path) rootfs_url = ('http://storage.kernelci.org/images/rootfs/buildroot/' - 'kci-2019.02/armel/base/rootfs.ext2.xz') - rootfs_hash = '692510cb625efda31640d1de0a8d60e26040f061' + 'buildroot-baseline/20221116.0/armel/rootfs.ext2.xz') + rootfs_hash = 'fae32f337c7b87547b10f42599acf109da8b6d9a' rootfs_path_xz = self.fetch_asset(rootfs_url, asset_hash=rootfs_hash) rootfs_path = os.path.join(self.workdir, 'rootfs.cpio') archive.lzma_uncompress(rootfs_path_xz, rootfs_path) diff --git a/tests/avocado/machine_s390_ccw_virtio.py b/tests/avocado/machine_s390_ccw_virtio.py index bd03d7160b4a8c7719c2b48e85db78d64640a86a..438a6f4321d059f4bd2fa7fd893975a7374c9272 100644 --- a/tests/avocado/machine_s390_ccw_virtio.py +++ b/tests/avocado/machine_s390_ccw_virtio.py @@ -248,7 +248,7 @@ def test_s390x_fedora(self): line = ppmfile.readline() self.assertEqual(line, b"P6\n") line = ppmfile.readline() - self.assertEqual(line, b"1024 768\n") + self.assertEqual(line, b"1280 800\n") line = ppmfile.readline() self.assertEqual(line, b"255\n") line = ppmfile.readline(256) diff --git a/tests/avocado/replay_kernel.py b/tests/avocado/replay_kernel.py index c68a9537301df03b01c869dfa6afe4a3acd68c32..16421b34071a3206ba87263d29e5433c676f48c9 100644 --- a/tests/avocado/replay_kernel.py +++ b/tests/avocado/replay_kernel.py @@ -113,6 +113,8 @@ def test_mips_malta(self): self.run_rr(kernel_path, kernel_command_line, console_pattern, shift=5) + # See https://gitlab.com/qemu-project/qemu/-/issues/2013 + @skipUnless(os.getenv('QEMU_TEST_FLAKY_TESTS'), 'Test is unstable on GitLab') def test_mips64el_malta(self): """ This test requires the ar tool to extract "data.tar.gz" from @@ -128,6 +130,7 @@ def test_mips64el_malta(self): :avocado: tags=arch:mips64el :avocado: tags=machine:malta + :avocado: tags=flaky """ deb_url = ('http://snapshot.debian.org/archive/debian/' '20130217T032700Z/pool/main/l/linux-2.6/' diff --git a/tests/avocado/replay_linux.py b/tests/avocado/replay_linux.py index 15953f9e4967134c66c4b4308d5f52c959047572..e2ee35a7f360bba70fff489ef6a9ec53f58894cc 100644 --- a/tests/avocado/replay_linux.py +++ b/tests/avocado/replay_linux.py @@ -82,8 +82,8 @@ def launch_and_wait(self, record, args, shift): % os.path.getsize(replay_path)) else: vm.event_wait('SHUTDOWN', self.timeout) - vm.shutdown(True) - logger.info('successfully fihished the replay') + vm.wait() + logger.info('successfully finished the replay') elapsed = time.time() - start_time logger.info('elapsed time %.2f sec' % elapsed) return elapsed diff --git a/tests/bench/meson.build b/tests/bench/meson.build index 00b3c209dcbd16958a32283c86e8cb5f3ae2d5cb..54bc8938a8addaed294cb1311b409d9fee602a01 100644 --- a/tests/bench/meson.build +++ b/tests/bench/meson.build @@ -3,6 +3,12 @@ qht_bench = executable('qht-bench', sources: 'qht-bench.c', dependencies: [qemuutil]) +if have_system +xbzrle_bench = executable('xbzrle-bench', + sources: 'xbzrle-bench.c', + dependencies: [qemuutil,migration]) +endif + executable('atomic_add-bench', sources: files('atomic_add-bench.c'), dependencies: [qemuutil], diff --git a/tests/bench/xbzrle-bench.c b/tests/bench/xbzrle-bench.c new file mode 100644 index 0000000000000000000000000000000000000000..8848a3a32d7ef591f8eae2cbbc898f92af07f549 --- /dev/null +++ b/tests/bench/xbzrle-bench.c @@ -0,0 +1,469 @@ +/* + * Xor Based Zero Run Length Encoding unit tests. + * + * Copyright 2013 Red Hat, Inc. and/or its affiliates + * + * Authors: + * Orit Wasserman + * + * This work is licensed under the terms of the GNU GPL, version 2 or later. + * See the COPYING file in the top-level directory. + * + */ +#include "qemu/osdep.h" +#include "qemu/cutils.h" +#include "../migration/xbzrle.h" + +#if defined(CONFIG_AVX512BW_OPT) +#define XBZRLE_PAGE_SIZE 4096 +static bool is_cpu_support_avx512bw; +#include "qemu/cpuid.h" +static void __attribute__((constructor)) init_cpu_flag(void) +{ + unsigned max = __get_cpuid_max(0, NULL); + int a, b, c, d; + is_cpu_support_avx512bw = false; + if (max >= 1) { + __cpuid(1, a, b, c, d); + /* We must check that AVX is not just available, but usable. */ + if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { + int bv; + __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); + __cpuid_count(7, 0, a, b, c, d); + /* 0xe6: + * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 + * and ZMM16-ZMM31 state are enabled by OS) + * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) + */ + if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { + is_cpu_support_avx512bw = true; + } + } + } + return ; +} + +struct ResTime { + float t_raw; + float t_512; +}; + + +/* Function prototypes +int xbzrle_encode_buffer_avx512(uint8_t *old_buf, uint8_t *new_buf, int slen, + uint8_t *dst, int dlen); +*/ +static void encode_decode_zero(struct ResTime *res) +{ + uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); + int i = 0; + int dlen = 0, dlen512 = 0; + int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); + + for (i = diff_len; i > 0; i--) { + buffer[1000 + i] = i; + buffer512[1000 + i] = i; + } + + buffer[1000 + diff_len + 3] = 103; + buffer[1000 + diff_len + 5] = 105; + + buffer512[1000 + diff_len + 3] = 103; + buffer512[1000 + diff_len + 5] = 105; + + /* encode zero page */ + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + dlen = xbzrle_encode_buffer(buffer, buffer, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + g_assert(dlen == 0); + + t_start512 = clock(); + dlen512 = xbzrle_encode_buffer_avx512(buffer512, buffer512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + g_assert(dlen512 == 0); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(buffer); + g_free(compressed); + g_free(buffer512); + g_free(compressed512); + +} + +static void test_encode_decode_zero_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_zero(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("Zero test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} + +static void encode_decode_unchanged(struct ResTime *res) +{ + uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); + int i = 0; + int dlen = 0, dlen512 = 0; + int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); + + for (i = diff_len; i > 0; i--) { + test[1000 + i] = i + 4; + test512[1000 + i] = i + 4; + } + + test[1000 + diff_len + 3] = 107; + test[1000 + diff_len + 5] = 109; + + test512[1000 + diff_len + 3] = 107; + test512[1000 + diff_len + 5] = 109; + + /* test unchanged buffer */ + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + dlen = xbzrle_encode_buffer(test, test, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + g_assert(dlen == 0); + + t_start512 = clock(); + dlen512 = xbzrle_encode_buffer_avx512(test512, test512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + g_assert(dlen512 == 0); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(test); + g_free(compressed); + g_free(test512); + g_free(compressed512); + +} + +static void test_encode_decode_unchanged_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_unchanged(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("Unchanged test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} + +static void encode_decode_1_byte(struct ResTime *res) +{ + uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); + uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); + int dlen = 0, rc = 0, dlen512 = 0, rc512 = 0; + uint8_t buf[2]; + uint8_t buf512[2]; + + test[XBZRLE_PAGE_SIZE - 1] = 1; + test512[XBZRLE_PAGE_SIZE - 1] = 1; + + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + dlen = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + g_assert(dlen == (uleb128_encode_small(&buf[0], 4095) + 2)); + + rc = xbzrle_decode_buffer(compressed, dlen, buffer, XBZRLE_PAGE_SIZE); + g_assert(rc == XBZRLE_PAGE_SIZE); + g_assert(memcmp(test, buffer, XBZRLE_PAGE_SIZE) == 0); + + t_start512 = clock(); + dlen512 = xbzrle_encode_buffer_avx512(buffer512, test512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + g_assert(dlen512 == (uleb128_encode_small(&buf512[0], 4095) + 2)); + + rc512 = xbzrle_decode_buffer(compressed512, dlen512, buffer512, + XBZRLE_PAGE_SIZE); + g_assert(rc512 == XBZRLE_PAGE_SIZE); + g_assert(memcmp(test512, buffer512, XBZRLE_PAGE_SIZE) == 0); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(buffer); + g_free(compressed); + g_free(test); + g_free(buffer512); + g_free(compressed512); + g_free(test512); + +} + +static void test_encode_decode_1_byte_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_1_byte(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("1 byte test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} + +static void encode_decode_overflow(struct ResTime *res) +{ + uint8_t *compressed = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); + int i = 0, rc = 0, rc512 = 0; + + for (i = 0; i < XBZRLE_PAGE_SIZE / 2 - 1; i++) { + test[i * 2] = 1; + test512[i * 2] = 1; + } + + /* encode overflow */ + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + rc = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + g_assert(rc == -1); + + t_start512 = clock(); + rc512 = xbzrle_encode_buffer_avx512(buffer512, test512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + g_assert(rc512 == -1); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(buffer); + g_free(compressed); + g_free(test); + g_free(buffer512); + g_free(compressed512); + g_free(test512); + +} + +static void test_encode_decode_overflow_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_overflow(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("Overflow test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} + +static void encode_decode_range_avx512(struct ResTime *res) +{ + uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); + uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); + uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); + int i = 0, rc = 0, rc512 = 0; + int dlen = 0, dlen512 = 0; + + int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1006); + + for (i = diff_len; i > 0; i--) { + buffer[1000 + i] = i; + test[1000 + i] = i + 4; + buffer512[1000 + i] = i; + test512[1000 + i] = i + 4; + } + + buffer[1000 + diff_len + 3] = 103; + test[1000 + diff_len + 3] = 107; + + buffer[1000 + diff_len + 5] = 105; + test[1000 + diff_len + 5] = 109; + + buffer512[1000 + diff_len + 3] = 103; + test512[1000 + diff_len + 3] = 107; + + buffer512[1000 + diff_len + 5] = 105; + test512[1000 + diff_len + 5] = 109; + + /* test encode/decode */ + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); + g_assert(rc < XBZRLE_PAGE_SIZE); + g_assert(memcmp(test, buffer, XBZRLE_PAGE_SIZE) == 0); + + t_start512 = clock(); + dlen512 = xbzrle_encode_buffer_avx512(test512, buffer512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + rc512 = xbzrle_decode_buffer(compressed512, dlen512, test512, XBZRLE_PAGE_SIZE); + g_assert(rc512 < XBZRLE_PAGE_SIZE); + g_assert(memcmp(test512, buffer512, XBZRLE_PAGE_SIZE) == 0); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(buffer); + g_free(compressed); + g_free(test); + g_free(buffer512); + g_free(compressed512); + g_free(test512); + +} + +static void test_encode_decode_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_range_avx512(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("Encode decode test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} + +static void encode_decode_random(struct ResTime *res) +{ + uint8_t *buffer = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed = g_malloc(XBZRLE_PAGE_SIZE); + uint8_t *test = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *buffer512 = g_malloc0(XBZRLE_PAGE_SIZE); + uint8_t *compressed512 = g_malloc(XBZRLE_PAGE_SIZE); + uint8_t *test512 = g_malloc0(XBZRLE_PAGE_SIZE); + int i = 0, rc = 0, rc512 = 0; + int dlen = 0, dlen512 = 0; + + int diff_len = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1); + /* store the index of diff */ + int dirty_index[diff_len]; + for (int j = 0; j < diff_len; j++) { + dirty_index[j] = g_test_rand_int_range(0, XBZRLE_PAGE_SIZE - 1); + } + for (i = diff_len - 1; i >= 0; i--) { + buffer[dirty_index[i]] = i; + test[dirty_index[i]] = i + 4; + buffer512[dirty_index[i]] = i; + test512[dirty_index[i]] = i + 4; + } + + time_t t_start, t_end, t_start512, t_end512; + t_start = clock(); + dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, compressed, + XBZRLE_PAGE_SIZE); + t_end = clock(); + float time_val = difftime(t_end, t_start); + rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); + g_assert(rc < XBZRLE_PAGE_SIZE); + + t_start512 = clock(); + dlen512 = xbzrle_encode_buffer_avx512(test512, buffer512, XBZRLE_PAGE_SIZE, + compressed512, XBZRLE_PAGE_SIZE); + t_end512 = clock(); + float time_val512 = difftime(t_end512, t_start512); + rc512 = xbzrle_decode_buffer(compressed512, dlen512, test512, XBZRLE_PAGE_SIZE); + g_assert(rc512 < XBZRLE_PAGE_SIZE); + + res->t_raw = time_val; + res->t_512 = time_val512; + + g_free(buffer); + g_free(compressed); + g_free(test); + g_free(buffer512); + g_free(compressed512); + g_free(test512); + +} + +static void test_encode_decode_random_avx512(void) +{ + int i; + float time_raw = 0.0, time_512 = 0.0; + struct ResTime res; + for (i = 0; i < 10000; i++) { + encode_decode_random(&res); + time_raw += res.t_raw; + time_512 += res.t_512; + } + printf("Random test:\n"); + printf("Raw xbzrle_encode time is %f ms\n", time_raw); + printf("512 xbzrle_encode time is %f ms\n", time_512); +} +#endif + +int main(int argc, char **argv) +{ + g_test_init(&argc, &argv, NULL); + g_test_rand_int(); + #if defined(CONFIG_AVX512BW_OPT) + if (likely(is_cpu_support_avx512bw)) { + g_test_add_func("/xbzrle/encode_decode_zero", test_encode_decode_zero_avx512); + g_test_add_func("/xbzrle/encode_decode_unchanged", + test_encode_decode_unchanged_avx512); + g_test_add_func("/xbzrle/encode_decode_1_byte", test_encode_decode_1_byte_avx512); + g_test_add_func("/xbzrle/encode_decode_overflow", + test_encode_decode_overflow_avx512); + g_test_add_func("/xbzrle/encode_decode", test_encode_decode_avx512); + g_test_add_func("/xbzrle/encode_decode_random", test_encode_decode_random_avx512); + } + #endif + return g_test_run(); +} diff --git a/tests/data/acpi/q35/FACP.slic b/tests/data/acpi/q35/FACP.slic new file mode 100644 index 0000000000000000000000000000000000000000..891fd4b784b7b6b3ea303976db7ecd5b669bc84b Binary files /dev/null and b/tests/data/acpi/q35/FACP.slic differ diff --git a/tests/data/acpi/q35/SLIC.slic b/tests/data/acpi/q35/SLIC.slic new file mode 100644 index 0000000000000000000000000000000000000000..fd26592e2480c5d02a018e0d855a04106661a7b5 Binary files /dev/null and b/tests/data/acpi/q35/SLIC.slic differ diff --git a/tests/data/acpi/q35/SSDT.dimmpxm b/tests/data/acpi/q35/SSDT.dimmpxm index 617a1c911c7d6753bcedc8ecc52e3027a5259ad6..a50a961fa1d9b0dd8ea4096d652c83bcf04db20b 100644 Binary files a/tests/data/acpi/q35/SSDT.dimmpxm and b/tests/data/acpi/q35/SSDT.dimmpxm differ diff --git a/tests/data/acpi/virt/DSDT b/tests/data/acpi/virt/DSDT index c47503990715d389914fdf9c8bccb510761741ac..4643f6fa4510fade07c1187d230b92b49cd28cba 100644 Binary files a/tests/data/acpi/virt/DSDT and b/tests/data/acpi/virt/DSDT differ diff --git a/tests/data/acpi/virt/DSDT.memhp b/tests/data/acpi/virt/DSDT.memhp index bae36cdd397473afe3923c52f030641a5ab19d5d..f03b87702e3ec04b8308a1843aa373174c2ed7bb 100644 Binary files a/tests/data/acpi/virt/DSDT.memhp and b/tests/data/acpi/virt/DSDT.memhp differ diff --git a/tests/data/acpi/virt/DSDT.numamem b/tests/data/acpi/virt/DSDT.numamem index c47503990715d389914fdf9c8bccb510761741ac..4643f6fa4510fade07c1187d230b92b49cd28cba 100644 Binary files a/tests/data/acpi/virt/DSDT.numamem and b/tests/data/acpi/virt/DSDT.numamem differ diff --git a/tests/data/acpi/virt/DSDT.pxb b/tests/data/acpi/virt/DSDT.pxb index fbd78f44c4785d19759daea909fe6d6f9a6e6b01..8dd662f4bacfa6cb73d454932710f1a3d1d7a33f 100644 Binary files a/tests/data/acpi/virt/DSDT.pxb and b/tests/data/acpi/virt/DSDT.pxb differ diff --git a/tests/data/acpi/virt/PPTT b/tests/data/acpi/virt/PPTT index 7a1258ecf123555b24462c98ccbb76b4ac1d0c2b..b89b2a9c71e0bc2713fc38f5de68fbc39b6302cb 100644 Binary files a/tests/data/acpi/virt/PPTT and b/tests/data/acpi/virt/PPTT differ diff --git a/tests/decode/err_pattern_group_ident2.decode b/tests/decode/err_pattern_group_ident2.decode index bc859233b1b2f689bf9f6500fb6d0c08db9c736e..0abb7513e9347c7d7f7f986fd2618cff7474cc47 100644 --- a/tests/decode/err_pattern_group_ident2.decode +++ b/tests/decode/err_pattern_group_ident2.decode @@ -7,5 +7,5 @@ { top 00000000 00000000 00000000 00000000 sub1 00000000 00000000 00000000 ........ %sub1 -# comments are suposed to be indented +# comments are supposed to be indented } diff --git a/tests/docker/common.rc b/tests/docker/common.rc index e6f8cee0d61d7ea300e0899a494e982ff4827218..6daab2f35373e677522609c248a386a5cab41450 100755 --- a/tests/docker/common.rc +++ b/tests/docker/common.rc @@ -12,7 +12,7 @@ # the top-level directory. # This might be set by ENV of a docker container... it is always -# overriden by TARGET_LIST if the user sets it. We special case +# overridden by TARGET_LIST if the user sets it. We special case # "none" to allow for other options like --disable-tcg to restrict the # builds we eventually do. if test "$DEF_TARGET_LIST" = "none"; then diff --git a/tests/docker/dockerfiles/debian-hexagon-cross.docker b/tests/docker/dockerfiles/debian-hexagon-cross.docker index d5dc299dc1f593f5638e5dd61d6a2c55f3e6fb35..a64e950f0764e03168f55a6dfe59784e4e95f4d3 100644 --- a/tests/docker/dockerfiles/debian-hexagon-cross.docker +++ b/tests/docker/dockerfiles/debian-hexagon-cross.docker @@ -38,7 +38,7 @@ RUN cat /etc/apt/sources.list | sed "s/^deb\ /deb-src /" >> /etc/apt/sources.lis # Install QEMU build deps for use in CI RUN apt update && \ DEBIAN_FRONTEND=noninteractive apt install -yy eatmydata && \ - DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy git ninja-build && \ + DEBIAN_FRONTEND=noninteractive eatmydata apt install -yy bison flex git ninja-build && \ DEBIAN_FRONTEND=noninteractive eatmydata \ apt build-dep -yy --arch-only qemu COPY --from=0 /usr/local /usr/local diff --git a/tests/migration/guestperf-batch.py b/tests/migration/guestperf-batch.py index ab6bdb9d384415514f570a1ffffaf4111b661de9..9485eefe496431ea549d26261ae758d94819d9ed 100755 --- a/tests/migration/guestperf-batch.py +++ b/tests/migration/guestperf-batch.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Migration test batch comparison invokation +# Migration test batch comparison invocation # # Copyright (c) 2016 Red Hat, Inc. # diff --git a/tests/migration/guestperf.py b/tests/migration/guestperf.py index e8cc127fd0280707ade21f0494dba7c8d42ceebe..07182f211e584c08260ae3bb8c27f00461ea8397 100755 --- a/tests/migration/guestperf.py +++ b/tests/migration/guestperf.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Migration test direct invokation command +# Migration test direct invocation command # # Copyright (c) 2016 Red Hat, Inc. # diff --git a/tests/migration/guestperf/engine.py b/tests/migration/guestperf/engine.py index 87a6ab200908f6d0d4e581bd75b134e3f466e017..59fca2c70b1e7c81b84c79b38be11bec8769f916 100644 --- a/tests/migration/guestperf/engine.py +++ b/tests/migration/guestperf/engine.py @@ -65,7 +65,6 @@ def _vcpu_timing(self, pid, tid_list): return records def _cpu_timing(self, pid): - records = [] now = time.time() jiffies_per_sec = os.sysconf(os.sysconf_names['SC_CLK_TCK']) diff --git a/tests/plugin/mem.c b/tests/plugin/mem.c index 4570f7d8152e9e822debf1f917efc1172946e106..de6445ffab980af9443229c3afb4fb94b0bcd7f4 100644 --- a/tests/plugin/mem.c +++ b/tests/plugin/mem.c @@ -98,7 +98,7 @@ QEMU_PLUGIN_EXPORT int qemu_plugin_install(qemu_plugin_id_t id, } else if (g_strcmp0(tokens[1], "rw") == 0) { rw = QEMU_PLUGIN_MEM_RW; } else { - fprintf(stderr, "invaild value for argument track: %s\n", opt); + fprintf(stderr, "invalid value for argument track: %s\n", opt); return -1; } } else if (g_strcmp0(tokens[0], "inline") == 0) { diff --git a/tests/qapi-schema/bad-if-not.json b/tests/qapi-schema/bad-if-not.json index 9fdaacc47b79d1eeeeb674a6b2b2442ac49da501..660fc4feb2de9df3148b6eb41303f7ec61c8f092 100644 --- a/tests/qapi-schema/bad-if-not.json +++ b/tests/qapi-schema/bad-if-not.json @@ -1,3 +1,3 @@ -# check 'if not' with empy argument +# check 'if not' with empty argument { 'struct': 'TestIfStruct', 'data': { 'foo': 'int' }, 'if': { 'not': '' } } diff --git a/tests/qapi-schema/test-qapi.py b/tests/qapi-schema/test-qapi.py index 2160cef0822de67570e9ea2b39380a0307953901..d58c31f53933badbd63219e5e659a3d24a810c3a 100755 --- a/tests/qapi-schema/test-qapi.py +++ b/tests/qapi-schema/test-qapi.py @@ -206,6 +206,7 @@ def main(argv): parser.add_argument('-d', '--dir', action='store', default='', help="directory containing tests") parser.add_argument('-u', '--update', action='store_true', + default='QAPI_TEST_UPDATE' in os.environ, help="update expected test results") parser.add_argument('tests', nargs='*', metavar='TEST', action='store') args = parser.parse_args() diff --git a/tests/qemu-iotests/029 b/tests/qemu-iotests/029 index bd71dd2f2217fb94ab806016b63e428e917cc177..7f4849b97bf14cf6bb67f76e32df446b8a6a14ac 100755 --- a/tests/qemu-iotests/029 +++ b/tests/qemu-iotests/029 @@ -39,7 +39,7 @@ trap "_cleanup; exit \$status" 0 1 2 3 15 . ./common.filter . ./common.pattern -# Any format supporting intenal snapshots +# Any format supporting internal snapshots _supported_fmt qcow2 _supported_proto generic # Internal snapshots are (currently) impossible with refcount_bits=1, diff --git a/tests/qemu-iotests/040 b/tests/qemu-iotests/040 index 6af5ab9e764cc5a061712a622a812900293e9db6..57cf100bcb7400726c138607ae49bbab52357a8e 100755 --- a/tests/qemu-iotests/040 +++ b/tests/qemu-iotests/040 @@ -834,7 +834,7 @@ class TestCommitWithFilters(iotests.QMPTestCase): self.assertIsNone(self.vm.node_info('cow-2')) self.assertIsNotNone(self.vm.node_info('cow-1')) - # 2 has been comitted into 1 + # 2 has been committed into 1 self.pattern_files[2] = self.img1 def test_commit_through_filter(self): @@ -851,7 +851,7 @@ class TestCommitWithFilters(iotests.QMPTestCase): self.assertIsNone(self.vm.node_info('bottom-filter')) self.assertIsNotNone(self.vm.node_info('cow-0')) - # 1 has been comitted into 0 + # 1 has been committed into 0 self.pattern_files[1] = self.img0 def test_filtered_active_commit_with_filter(self): @@ -888,7 +888,7 @@ class TestCommitWithFilters(iotests.QMPTestCase): drv0 = next(dev for dev in blockdevs if dev['qdev'] == 'drv0') self.assertEqual(drv0['inserted']['node-name'], 'cow-2') - # 3 has been comitted into 2 + # 3 has been committed into 2 self.pattern_files[3] = self.img2 def test_filtered_active_commit_without_filter(self): @@ -904,7 +904,7 @@ class TestCommitWithFilters(iotests.QMPTestCase): self.assertIsNone(self.vm.node_info('cow-3')) self.assertIsNotNone(self.vm.node_info('cow-2')) - # 3 has been comitted into 2 + # 3 has been committed into 2 self.pattern_files[3] = self.img2 class TestCommitWithOverriddenBacking(iotests.QMPTestCase): diff --git a/tests/qemu-iotests/046 b/tests/qemu-iotests/046 index 517b16250821f349a2f0c86f209f923946363b6b..4c9ed4d26e1b60287ad24c10a58aac57f8423c6b 100755 --- a/tests/qemu-iotests/046 +++ b/tests/qemu-iotests/046 @@ -125,7 +125,7 @@ aio_flush EOF # Sequential write, but the next cluster is already allocated -# and phyiscally in the right position +# and physically in the right position cat <&1 | _filter_testdir | _filter_imgfmt +$QEMU_IO -c "open -o data-file.filename=$TEST_IMG.data,file.filename=$TEST_IMG" -c "read 0 4k" | _filter_qemu_io TEST_IMG="data-file.filename=$TEST_IMG.data,file.filename=$TEST_IMG" _img_info --format-specific --image-opts echo $QEMU_IMG amend -o "data_file=" --image-opts "data-file.filename=$TEST_IMG.data,file.filename=$TEST_IMG" -_img_info --format-specific +$QEMU_IO -c "read 0 4k" "$TEST_IMG" 2>&1 | _filter_testdir | _filter_imgfmt +$QEMU_IO -c "open -o data-file.filename=$TEST_IMG.data,file.filename=$TEST_IMG" -c "read 0 4k" | _filter_qemu_io TEST_IMG="data-file.filename=$TEST_IMG.data,file.filename=$TEST_IMG" _img_info --format-specific --image-opts echo diff --git a/tests/qemu-iotests/061.out b/tests/qemu-iotests/061.out index 7ecbd4dea875ea26e0b131ec292294f38baf5882..99b2307a23ca081d21767ca6842fdab3ce4966f6 100644 --- a/tests/qemu-iotests/061.out +++ b/tests/qemu-iotests/061.out @@ -545,7 +545,9 @@ Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 qemu-img: data-file can only be set for images that use an external data file Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=67108864 data_file=TEST_DIR/t.IMGFMT.data -qemu-img: Could not open 'TEST_DIR/t.IMGFMT': Could not open 'foo': No such file or directory +qemu-io: can't open device TEST_DIR/t.IMGFMT: Could not open 'foo': No such file or directory +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) image: TEST_DIR/t.IMGFMT file format: IMGFMT virtual size: 64 MiB (67108864 bytes) @@ -560,7 +562,9 @@ Format specific information: corrupt: false extended l2: false -qemu-img: Could not open 'TEST_DIR/t.IMGFMT': 'data-file' is required for this image +qemu-io: can't open device TEST_DIR/t.IMGFMT: 'data-file' is required for this image +read 4096/4096 bytes at offset 0 +4 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) image: TEST_DIR/t.IMGFMT file format: IMGFMT virtual size: 64 MiB (67108864 bytes) diff --git a/tests/qemu-iotests/071 b/tests/qemu-iotests/071 index d99cef5a424996fed327f46a51e9c50757b5c57e..7589672c8ec85f26928506fa85c453e1038e69ef 100755 --- a/tests/qemu-iotests/071 +++ b/tests/qemu-iotests/071 @@ -41,7 +41,7 @@ _supported_fmt qcow2 _supported_proto file fuse _require_drivers blkdebug blkverify # blkdebug can only inject errors on bs->file, not on the data_file, -# so thie test does not work with external data files +# so this test does not work with external data files _unsupported_imgopts data_file do_run_qemu() diff --git a/tests/qemu-iotests/099.out b/tests/qemu-iotests/099.out index 8cce6275295fb66acffdbd27c9a4ecfb910a2657..f6f8f259574f8d2e705ebbd551817c6c5875da32 100644 --- a/tests/qemu-iotests/099.out +++ b/tests/qemu-iotests/099.out @@ -1,6 +1,6 @@ QA output created by 099 Formatting 'TEST_DIR/t.IMGFMT', fmt=IMGFMT size=131072 -Formatting 'TEST_DIR/t.IMGFMT.compare', fmt=raw size=131072 +Formatting 'TEST_DIR/t.IMGFMT.compare', fmt=raw size=131072 cache=writeback === Testing simple filename for blkverify === diff --git a/tests/qemu-iotests/194 b/tests/qemu-iotests/194 index e44b8df7280e2d10e7934d5db9d76af7f799a217..4c8a952cd813e7b6d34ed454188a4e0a7c88dcb7 100755 --- a/tests/qemu-iotests/194 +++ b/tests/qemu-iotests/194 @@ -74,6 +74,11 @@ with iotests.FilePath('source.img') as source_img_path, \ while True: event1 = source_vm.event_wait('MIGRATION') + if event1['data']['status'] == 'postcopy-active': + # This event is racy, it depends do we really do postcopy or bitmap + # was migrated during downtime (and no data to migrate in postcopy + # phase). So, don't log it. + continue iotests.log(event1, filters=[iotests.filter_qmp_event]) if event1['data']['status'] in ('completed', 'failed'): iotests.log('Gracefully ending the `drive-mirror` job on source...') diff --git a/tests/qemu-iotests/194.out b/tests/qemu-iotests/194.out index 4e6df1565ab15e3e641f0e7785868641bc9b2845..376ed1d2e6346d8eb6ae921f9b1ba5d74a5d121d 100644 --- a/tests/qemu-iotests/194.out +++ b/tests/qemu-iotests/194.out @@ -14,7 +14,6 @@ Starting migration... {"return": {}} {"data": {"status": "setup"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} -{"data": {"status": "postcopy-active"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} {"data": {"status": "completed"}, "event": "MIGRATION", "timestamp": {"microseconds": "USECS", "seconds": "SECS"}} Gracefully ending the `drive-mirror` job on source... {"return": {}} diff --git a/tests/qemu-iotests/197 b/tests/qemu-iotests/197 index a2547bc280ca4cc8a08e84464a257f41fc2ef5c1..08d9b49906e742e3d39c2d96a95a3136c3007976 100755 --- a/tests/qemu-iotests/197 +++ b/tests/qemu-iotests/197 @@ -93,7 +93,7 @@ output=$($QEMU_IO -f qcow2 -C -c "read -P 0 1k $((2*1024*1024*1024 - 512))" \ "$TEST_WRAP" 2>&1 | _filter_qemu_io) case $output in *allocate*) - _notrun "Insufficent memory to run test" ;; + _notrun "Insufficient memory to run test" ;; *) printf '%s\n' "$output" ;; esac $QEMU_IO -f qcow2 -C -c "read -P 0 $((3*1024*1024*1024 + 1024)) 1k" \ diff --git a/tests/qemu-iotests/215 b/tests/qemu-iotests/215 index d464596f14046a324ac1afa01072d4c2b2c30240..6babbcdc1fdbed53dd02d9391fb2c47d8ce5c8fe 100755 --- a/tests/qemu-iotests/215 +++ b/tests/qemu-iotests/215 @@ -95,7 +95,7 @@ output=$($QEMU_IO \ 2>&1 | _filter_qemu_io) case $output in *allocate*) - _notrun "Insufficent memory to run test" ;; + _notrun "Insufficient memory to run test" ;; *) printf '%s\n' "$output" ;; esac $QEMU_IO \ diff --git a/tests/qemu-iotests/244 b/tests/qemu-iotests/244 index 3e61fa25bb6047b59fc2e8d3ed5596fc7317b02d..bb9cc6512f3545fc03a9f2089d34bd1270360c6b 100755 --- a/tests/qemu-iotests/244 +++ b/tests/qemu-iotests/244 @@ -215,9 +215,22 @@ $QEMU_IMG convert -f $IMGFMT -O $IMGFMT -n -C "$TEST_IMG.src" "$TEST_IMG" $QEMU_IMG compare -f $IMGFMT -F $IMGFMT "$TEST_IMG.src" "$TEST_IMG" # blkdebug doesn't support copy offloading, so this tests the error path -$QEMU_IMG amend -f $IMGFMT -o "data_file=blkdebug::$TEST_IMG.data" "$TEST_IMG" -$QEMU_IMG convert -f $IMGFMT -O $IMGFMT -n -C "$TEST_IMG.src" "$TEST_IMG" -$QEMU_IMG compare -f $IMGFMT -F $IMGFMT "$TEST_IMG.src" "$TEST_IMG" +test_img_with_blkdebug="json:{ + 'driver': 'qcow2', + 'file': { + 'driver': 'file', + 'filename': '$TEST_IMG' + }, + 'data-file': { + 'driver': 'blkdebug', + 'image': { + 'driver': 'file', + 'filename': '$TEST_IMG.data' + } + } +}" +$QEMU_IMG convert -f $IMGFMT -O $IMGFMT -n -C "$TEST_IMG.src" "$test_img_with_blkdebug" +$QEMU_IMG compare -f $IMGFMT -F $IMGFMT "$TEST_IMG.src" "$test_img_with_blkdebug" echo echo "=== Flushing should flush the data file ===" diff --git a/tests/qemu-iotests/270 b/tests/qemu-iotests/270 index 74352342db58c000b992d04a31cc5b23b246cefb..c37b674aa2e15e24840b68c15af98dcc2a601702 100755 --- a/tests/qemu-iotests/270 +++ b/tests/qemu-iotests/270 @@ -60,8 +60,16 @@ _make_test_img -o cluster_size=2M,data_file="$TEST_IMG.orig" \ # "write" 2G of data without using any space. # (qemu-img create does not like it, though, because null-co does not # support image creation.) -$QEMU_IMG amend -o data_file="json:{'driver':'null-co',,'size':'4294967296'}" \ - "$TEST_IMG" +test_img_with_null_data="json:{ + 'driver': '$IMGFMT', + 'file': { + 'filename': '$TEST_IMG' + }, + 'data-file': { + 'driver': 'null-co', + 'size':'4294967296' + } +}" # This gives us a range of: # 2^31 - 512 + 768 - 1 = 2^31 + 255 > 2^31 @@ -74,7 +82,7 @@ $QEMU_IMG amend -o data_file="json:{'driver':'null-co',,'size':'4294967296'}" \ # on L2 boundaries, we need large L2 tables; hence the cluster size of # 2 MB. (Anything from 256 kB should work, though, because then one L2 # table covers 8 GB.) -$QEMU_IO -c "write 768 $((2 ** 31 - 512))" "$TEST_IMG" | _filter_qemu_io +$QEMU_IO -c "write 768 $((2 ** 31 - 512))" "$test_img_with_null_data" | _filter_qemu_io _check_test_img diff --git a/tests/qemu-iotests/298 b/tests/qemu-iotests/298 index fae72211b11dc357b9989e320603d4207801f901..87bd5bb627c5ee01a616e7a89e7388588a797719 100755 --- a/tests/qemu-iotests/298 +++ b/tests/qemu-iotests/298 @@ -143,8 +143,8 @@ class TestTruncate(iotests.QMPTestCase): stat = os.stat(disk) refstat = os.stat(refdisk) - # Probably we'll want preallocate filter to keep align to cluster when - # shrink preallocation, so, ignore small differece + # The preallocate filter may keep cluster alignment when shrinking, + # so ignore small differences self.assertLess(abs(stat.st_size - refstat.st_size), 64 * 1024) # Preallocate filter may leak some internal clusters (for example, if diff --git a/tests/qemu-iotests/308 b/tests/qemu-iotests/308 index 2e3f8f42824ad848a2706b9f61ad647641db5aa7..bde4aac2fa2a1b9abe0ac82a38d3392519590d6a 100755 --- a/tests/qemu-iotests/308 +++ b/tests/qemu-iotests/308 @@ -230,8 +230,29 @@ echo '=== Writable export ===' fuse_export_add 'export-mp' "'mountpoint': '$EXT_MP', 'writable': true" # Check that writing to the read-only export fails -$QEMU_IO -f raw -c 'write -P 42 1M 64k' "$TEST_IMG" 2>&1 \ - | _filter_qemu_io | _filter_testdir | _filter_imgfmt +output=$($QEMU_IO -f raw -c 'write -P 42 1M 64k' "$TEST_IMG" 2>&1 \ + | _filter_qemu_io | _filter_testdir | _filter_imgfmt) + +# Expected reference output: Opening the file fails because it has no +# write permission +reference="Could not open 'TEST_DIR/t.IMGFMT': Permission denied" + +if echo "$output" | grep -q "$reference"; then + echo "Writing to read-only export failed: OK" +elif echo "$output" | grep -q "write failed: Permission denied"; then + # With CAP_DAC_OVERRIDE (e.g. when running this test as root), the export + # can be opened regardless of its file permissions, but writing will then + # fail. This is not the result for which we want to test, so count this as + # a SKIP. + _casenotrun "Opening RO export as R/W succeeded, perhaps because of" \ + "CAP_DAC_OVERRIDE" + + # Still, write this to the reference output to make the test pass + echo "Writing to read-only export failed: OK" +else + echo "Writing to read-only export failed: ERROR" + echo "$output" +fi # But here it should work $QEMU_IO -f raw -c 'write -P 42 1M 64k' "$EXT_MP" | _filter_qemu_io diff --git a/tests/qemu-iotests/308.out b/tests/qemu-iotests/308.out index fc47bb11a2e482a76e5c9f8fd3fb0a16458036dc..e4467a10cf6a6543f1bdfc2608e2c2ed044486a9 100644 --- a/tests/qemu-iotests/308.out +++ b/tests/qemu-iotests/308.out @@ -95,7 +95,7 @@ virtual size: 0 B (0 bytes) 'mountpoint': 'TEST_DIR/t.IMGFMT.fuse', 'writable': true } } {"return": {}} -qemu-io: can't open device TEST_DIR/t.IMGFMT: Could not open 'TEST_DIR/t.IMGFMT': Permission denied +Writing to read-only export failed: OK wrote 65536/65536 bytes at offset 1048576 64 KiB, X ops; XX:XX:XX.X (XXX YYY/sec and XXX ops/sec) wrote 65536/65536 bytes at offset 1048576 diff --git a/tests/qemu-iotests/common.rc b/tests/qemu-iotests/common.rc index d8582454de0aa4c2c9cfba0f0ce4916519151083..4c468675d324052f3619860494285d103cd087e6 100644 --- a/tests/qemu-iotests/common.rc +++ b/tests/qemu-iotests/common.rc @@ -973,7 +973,7 @@ _require_large_file() # _require_devices() { - available=$($QEMU -M none -device help | \ + available=$($QEMU -M none -device help 2> /dev/null | \ grep ^name | sed -e 's/^name "//' -e 's/".*$//') for device do @@ -985,7 +985,7 @@ _require_devices() _require_one_device_of() { - available=$($QEMU -M none -device help | \ + available=$($QEMU -M none -device help 2> /dev/null | \ grep ^name | sed -e 's/^name "//' -e 's/".*$//') for device do diff --git a/tests/qemu-iotests/pylintrc b/tests/qemu-iotests/pylintrc index 32ab77b8bb979436de1d96333b435244f1616334..74e57933255de635fabbe4e2b03d9469d3c2fcde 100644 --- a/tests/qemu-iotests/pylintrc +++ b/tests/qemu-iotests/pylintrc @@ -19,7 +19,7 @@ disable=invalid-name, too-many-public-methods, # pylint warns about Optional[] etc. as unsubscriptable in 3.9 unsubscriptable-object, - # pylint's static analysis causes false positivies for file_path(); + # pylint's static analysis causes false positives for file_path(); # If we really care to make it statically knowable, we'll use mypy. unbalanced-tuple-unpacking, # Sometimes we need to disable a newly introduced pylint warning. diff --git a/tests/qemu-iotests/testenv.py b/tests/qemu-iotests/testenv.py index c33454fa6854da6261a0c2dcf3f47179bb2f0122..993e9c56beb5a56f1eb8c8bdae185b43ffb4411f 100644 --- a/tests/qemu-iotests/testenv.py +++ b/tests/qemu-iotests/testenv.py @@ -40,7 +40,7 @@ def get_default_machine(qemu_prog: str) -> str: machines = outp.split('\n') try: - default_machine = next(m for m in machines if m.endswith(' (default)')) + default_machine = next(m for m in machines if ' (default)' in m) except StopIteration: return '' default_machine = default_machine.split(' ', 1)[0] @@ -238,6 +238,8 @@ def __init__(self, imgfmt: str, imgproto: str, aiomode: str, ('aarch64', 'virt'), ('avr', 'mega2560'), ('m68k', 'virt'), + ('riscv32', 'virt'), + ('riscv64', 'virt'), ('rx', 'gdbsim-r5f562n8'), ('tricore', 'tricore_testboard') ) diff --git a/tests/qtest/acpi-utils.c b/tests/qtest/acpi-utils.c index 766c48e3a6a43bc8ae61fb175001c78707b972ea..c6f5169b80426a8fdafb6e8a99b7d112fc2eb247 100644 --- a/tests/qtest/acpi-utils.c +++ b/tests/qtest/acpi-utils.c @@ -103,7 +103,7 @@ void acpi_fetch_table(QTestState *qts, uint8_t **aml, uint32_t *aml_len, char *fname = NULL; GError *error = NULL; - fprintf(stderr, "Invalid '%.4s'(%d)\n", *aml, *aml_len); + fprintf(stderr, "Invalid '%.4s'(%u)\n", *aml, *aml_len); fd = g_file_open_tmp("malformed-XXXXXX.dat", &fname, &error); g_assert_no_error(error); fprintf(stderr, "Dumping invalid table into '%s'\n", fname); diff --git a/tests/qtest/ahci-test.c b/tests/qtest/ahci-test.c index 8073ccc205215a799ed26e569fe6b070a0dd623c..c8657cbdb36519c794f5d0452b0f0accde8efb4a 100644 --- a/tests/qtest/ahci-test.c +++ b/tests/qtest/ahci-test.c @@ -331,7 +331,7 @@ static void ahci_test_pci_spec(AHCIQState *ahci) ASSERT_BIT_CLEAR(datal, ~0xFF); g_assert_cmphex(datal, !=, 0); - /* Check specification adherence for capability extenstions. */ + /* Check specification adherence for capability extensions. */ data = qpci_config_readw(ahci->dev, datal); switch (ahci->fingerprint) { @@ -1425,6 +1425,89 @@ static void test_reset(void) ahci_shutdown(ahci); } +static void test_reset_pending_callback(void) +{ + AHCIQState *ahci; + AHCICommand *cmd; + uint8_t port; + uint64_t ptr1; + uint64_t ptr2; + + int bufsize = 4 * 1024; + int speed = bufsize + (bufsize / 2); + int offset1 = 0; + int offset2 = bufsize / AHCI_SECTOR_SIZE; + + g_autofree unsigned char *tx1 = g_malloc(bufsize); + g_autofree unsigned char *tx2 = g_malloc(bufsize); + g_autofree unsigned char *rx1 = g_malloc0(bufsize); + g_autofree unsigned char *rx2 = g_malloc0(bufsize); + + /* Uses throttling to make test independent of specific environment. */ + ahci = ahci_boot_and_enable("-drive if=none,id=drive0,file=%s," + "cache=writeback,format=%s," + "throttling.bps-write=%d " + "-M q35 " + "-device ide-hd,drive=drive0 ", + tmp_path, imgfmt, speed); + + port = ahci_port_select(ahci); + ahci_port_clear(ahci, port); + + ptr1 = ahci_alloc(ahci, bufsize); + ptr2 = ahci_alloc(ahci, bufsize); + + g_assert(ptr1 && ptr2); + + /* Need two different patterns. */ + do { + generate_pattern(tx1, bufsize, AHCI_SECTOR_SIZE); + generate_pattern(tx2, bufsize, AHCI_SECTOR_SIZE); + } while (memcmp(tx1, tx2, bufsize) == 0); + + qtest_bufwrite(ahci->parent->qts, ptr1, tx1, bufsize); + qtest_bufwrite(ahci->parent->qts, ptr2, tx2, bufsize); + + /* Write to beginning of disk to check it wasn't overwritten later. */ + ahci_guest_io(ahci, port, CMD_WRITE_DMA_EXT, ptr1, bufsize, offset1); + + /* Issue asynchronously to get a pending callback during reset. */ + cmd = ahci_command_create(CMD_WRITE_DMA_EXT); + ahci_command_adjust(cmd, offset2, ptr2, bufsize, 0); + ahci_command_commit(ahci, cmd, port); + ahci_command_issue_async(ahci, cmd); + + ahci_set(ahci, AHCI_GHC, AHCI_GHC_HR); + + ahci_command_free(cmd); + + /* Wait for throttled write to finish. */ + sleep(1); + + /* Start again. */ + ahci_clean_mem(ahci); + ahci_pci_enable(ahci); + ahci_hba_enable(ahci); + port = ahci_port_select(ahci); + ahci_port_clear(ahci, port); + + /* Read and verify. */ + ahci_guest_io(ahci, port, CMD_READ_DMA_EXT, ptr1, bufsize, offset1); + qtest_bufread(ahci->parent->qts, ptr1, rx1, bufsize); + g_assert_cmphex(memcmp(tx1, rx1, bufsize), ==, 0); + + ahci_guest_io(ahci, port, CMD_READ_DMA_EXT, ptr2, bufsize, offset2); + qtest_bufread(ahci->parent->qts, ptr2, rx2, bufsize); + g_assert_cmphex(memcmp(tx2, rx2, bufsize), ==, 0); + + ahci_free(ahci, ptr1); + ahci_free(ahci, ptr2); + + ahci_clean_mem(ahci); + + ahci_shutdown(ahci); +} + static void test_ncq_simple(void) { AHCIQState *ahci; @@ -1929,7 +2012,8 @@ int main(int argc, char **argv) qtest_add_func("/ahci/migrate/dma/halted", test_migrate_halted_dma); qtest_add_func("/ahci/max", test_max); - qtest_add_func("/ahci/reset", test_reset); + qtest_add_func("/ahci/reset/simple", test_reset); + qtest_add_func("/ahci/reset/pending_callback", test_reset_pending_callback); qtest_add_func("/ahci/io/ncq/simple", test_ncq_simple); qtest_add_func("/ahci/migrate/ncq/simple", test_migrate_ncq); diff --git a/tests/qtest/bios-tables-test.c b/tests/qtest/bios-tables-test.c index 258874167ef4560300ed80eab0c954e14c445066..0b89cae577d14d9b2d7abb9a265b12e543a6c53e 100644 --- a/tests/qtest/bios-tables-test.c +++ b/tests/qtest/bios-tables-test.c @@ -26,7 +26,7 @@ * 4. Run * make check V=1 * this will produce a bunch of warnings about differences - * beween actual and expected ACPI tables. If you have IASL installed, + * between actual and expected ACPI tables. If you have IASL installed, * they will also be disassembled so you can look at the disassembled * output. If not - disassemble them yourself in any way you like. * Look at the differences - make sure they make sense and match what the @@ -1465,6 +1465,20 @@ static void test_acpi_virt_tcg(void) free_test_data(&data); } +static void test_acpi_q35_slic(void) +{ + test_data data = { + .machine = MACHINE_Q35, + .variant = ".slic", + }; + + test_acpi_one("-acpitable sig=SLIC,oem_id='CRASH ',oem_table_id='ME'," + "oem_rev=00002210,asl_compiler_id='qemu'," + "asl_compiler_rev=00000000,data=/dev/null", + &data); + free_test_data(&data); +} + static void test_oem_fields(test_data *data) { int i; @@ -1487,7 +1501,7 @@ static void test_oem_fields(test_data *data) } } -static void test_acpi_oem_fields_pc(void) +static void test_acpi_piix4_oem_fields(void) { test_data data; char *args; @@ -1507,7 +1521,7 @@ static void test_acpi_oem_fields_pc(void) g_free(args); } -static void test_acpi_oem_fields_q35(void) +static void test_acpi_q35_oem_fields(void) { test_data data; char *args; @@ -1527,7 +1541,7 @@ static void test_acpi_oem_fields_q35(void) g_free(args); } -static void test_acpi_oem_fields_microvm(void) +static void test_acpi_microvm_oem_fields(void) { test_data data; char *args; @@ -1544,7 +1558,7 @@ static void test_acpi_oem_fields_microvm(void) g_free(args); } -static void test_acpi_oem_fields_virt(void) +static void test_acpi_virt_oem_fields(void) { test_data data = { .machine = "virt", @@ -1582,13 +1596,13 @@ int main(int argc, char *argv[]) if (ret) { return ret; } - qtest_add_func("acpi/q35/oem-fields", test_acpi_oem_fields_q35); + qtest_add_func("acpi/q35/oem-fields", test_acpi_q35_oem_fields); if (tpm_model_is_available("-machine q35", "tpm-tis")) { qtest_add_func("acpi/q35/tpm2-tis", test_acpi_q35_tcg_tpm2_tis); qtest_add_func("acpi/q35/tpm12-tis", test_acpi_q35_tcg_tpm12_tis); } qtest_add_func("acpi/piix4", test_acpi_piix4_tcg); - qtest_add_func("acpi/oem-fields", test_acpi_oem_fields_pc); + qtest_add_func("acpi/piix4/oem-fields", test_acpi_piix4_oem_fields); qtest_add_func("acpi/piix4/bridge", test_acpi_piix4_tcg_bridge); qtest_add_func("acpi/piix4/pci-hotplug/no_root_hotplug", test_acpi_piix4_no_root_hotplug); @@ -1628,7 +1642,7 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/microvm/usb", test_acpi_microvm_usb_tcg); qtest_add_func("acpi/microvm/rtc", test_acpi_microvm_rtc_tcg); qtest_add_func("acpi/microvm/ioapic2", test_acpi_microvm_ioapic2_tcg); - qtest_add_func("acpi/microvm/oem-fields", test_acpi_oem_fields_microvm); + qtest_add_func("acpi/microvm/oem-fields", test_acpi_microvm_oem_fields); if (has_tcg) { qtest_add_func("acpi/q35/ivrs", test_acpi_q35_tcg_ivrs); if (strcmp(arch, "x86_64") == 0) { @@ -1639,13 +1653,14 @@ int main(int argc, char *argv[]) qtest_add_func("acpi/q35/kvm/xapic", test_acpi_q35_kvm_xapic); qtest_add_func("acpi/q35/kvm/dmar", test_acpi_q35_kvm_dmar); } + qtest_add_func("acpi/q35/slic", test_acpi_q35_slic); } else if (strcmp(arch, "aarch64") == 0) { if (has_tcg) { qtest_add_func("acpi/virt", test_acpi_virt_tcg); qtest_add_func("acpi/virt/numamem", test_acpi_virt_tcg_numamem); qtest_add_func("acpi/virt/memhp", test_acpi_virt_tcg_memhp); qtest_add_func("acpi/virt/pxb", test_acpi_virt_tcg_pxb); - qtest_add_func("acpi/virt/oem-fields", test_acpi_oem_fields_virt); + qtest_add_func("acpi/virt/oem-fields", test_acpi_virt_oem_fields); } } ret = g_test_run(); diff --git a/tests/qtest/ds1338-test.c b/tests/qtest/ds1338-test.c index c5d46bcc643d40a1d05037587e5d0bfbd65b430b..427407e7b4eac2b6ef71213c9db044ad37eb0863 100644 --- a/tests/qtest/ds1338-test.c +++ b/tests/qtest/ds1338-test.c @@ -38,7 +38,7 @@ static void send_and_receive(void *obj, void *data, QGuestAllocator *alloc) i2c_read_block(i2cdev, 0, resp, sizeof(resp)); - /* check retrieved time againt local time */ + /* check retrieved time against local time */ g_assert_cmpuint(bcd2bin(resp[4]), == , tm_ptr->tm_mday); g_assert_cmpuint(bcd2bin(resp[5]), == , 1 + tm_ptr->tm_mon); g_assert_cmpuint(2000 + bcd2bin(resp[6]), == , 1900 + tm_ptr->tm_year); diff --git a/tests/qtest/e1000-test.c b/tests/qtest/e1000-test.c index ea286d1793046b0508ec7ed4714740ee947a84bd..1bd25d584bc67e288e4891a460c4eea883eab92e 100644 --- a/tests/qtest/e1000-test.c +++ b/tests/qtest/e1000-test.c @@ -35,7 +35,7 @@ static void *e1000_get_driver(void *obj, const char *interface) return &e1000->dev; } - fprintf(stderr, "%s not present in e1000e\n", interface); + fprintf(stderr, "%s not present in e1000\n", interface); g_assert_not_reached(); } diff --git a/tests/qtest/es1370-test.c b/tests/qtest/es1370-test.c index 2fd7fd2d3d3039e64c2e22596d9018b40f488da9..861656d572f69e8f953c9de1addb8f6fe6f504ff 100644 --- a/tests/qtest/es1370-test.c +++ b/tests/qtest/es1370-test.c @@ -28,7 +28,7 @@ static void *es1370_get_driver(void *obj, const char *interface) return &es1370->dev; } - fprintf(stderr, "%s not present in e1000e\n", interface); + fprintf(stderr, "%s not present in es1370\n", interface); g_assert_not_reached(); } diff --git a/tests/qtest/fdc-test.c b/tests/qtest/fdc-test.c index 8f6eee84a47ac58fca68f54c936beee93d63b15c..6f5850354fd5687fc8484db8d4e362fa0a429289 100644 --- a/tests/qtest/fdc-test.c +++ b/tests/qtest/fdc-test.c @@ -583,6 +583,26 @@ static void test_cve_2021_20196(void) qtest_quit(s); } +static void test_cve_2021_3507(void) +{ + QTestState *s; + + s = qtest_initf("-nographic -m 32M -nodefaults " + "-drive file=%s,format=raw,if=floppy,snapshot=on", + test_image); + qtest_outl(s, 0x9, 0x0a0206); + qtest_outw(s, 0x3f4, 0x1600); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0200); + qtest_outw(s, 0x3f4, 0x0200); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_outw(s, 0x3f4, 0x0000); + qtest_quit(s); +} + int main(int argc, char **argv) { int fd; @@ -614,6 +634,7 @@ int main(int argc, char **argv) qtest_add_func("/fdc/read_no_dma_19", test_read_no_dma_19); qtest_add_func("/fdc/fuzz-registers", fuzz_registers); qtest_add_func("/fdc/fuzz/cve_2021_20196", test_cve_2021_20196); + qtest_add_func("/fdc/fuzz/cve_2021_3507", test_cve_2021_3507); ret = g_test_run(); diff --git a/tests/qtest/fuzz-lsi53c895a-test.c b/tests/qtest/fuzz-lsi53c895a-test.c new file mode 100644 index 0000000000000000000000000000000000000000..bd18e8622ed3e843316a5d939107ab947bc00516 --- /dev/null +++ b/tests/qtest/fuzz-lsi53c895a-test.c @@ -0,0 +1,159 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * QTest fuzzer-generated testcase for LSI53C895A device + * + * Copyright (c) Red Hat + */ + +#include "qemu/osdep.h" +#include "libqos/libqtest.h" + +/* + * This used to trigger a DMA reentrancy issue + * leading to memory corruption bugs like stack + * overflow or use-after-free + * https://gitlab.com/qemu-project/qemu/-/issues/1563 + */ +static void test_lsi_dma_reentrancy(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -m 512M -nodefaults " + "-blockdev driver=null-co,node-name=null0 " + "-device lsi53c810 -device scsi-cd,drive=null0"); + + qtest_outl(s, 0xcf8, 0x80000804); /* PCI Command Register */ + qtest_outw(s, 0xcfc, 0x7); /* Enables accesses */ + qtest_outl(s, 0xcf8, 0x80000814); /* Memory Bar 1 */ + qtest_outl(s, 0xcfc, 0xff100000); /* Set MMIO Address*/ + qtest_outl(s, 0xcf8, 0x80000818); /* Memory Bar 2 */ + qtest_outl(s, 0xcfc, 0xff000000); /* Set RAM Address*/ + qtest_writel(s, 0xff000000, 0xc0000024); + qtest_writel(s, 0xff000114, 0x00000080); + qtest_writel(s, 0xff00012c, 0xff000000); + qtest_writel(s, 0xff000004, 0xff000114); + qtest_writel(s, 0xff000008, 0xff100014); + qtest_writel(s, 0xff10002f, 0x000000ff); + + qtest_quit(s); +} + +/* + * This used to trigger a UAF in lsi_do_msgout() + * https://gitlab.com/qemu-project/qemu/-/issues/972 + */ +static void test_lsi_do_msgout_cancel_req(void) +{ + QTestState *s; + + if (sizeof(void *) == 4) { + g_test_skip("memory size too big for 32-bit build"); + return; + } + + s = qtest_init("-M q35 -m 2G -display none -nodefaults " + "-device lsi53c895a,id=scsi " + "-device scsi-hd,drive=disk0 " + "-drive file=null-co://,id=disk0,if=none,format=raw"); + + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outl(s, 0xcf8, 0xc000); + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80000810); + qtest_outl(s, 0xcfc, 0xc000); + qtest_outl(s, 0xcf8, 0x80000804); + qtest_outw(s, 0xcfc, 0x05); + qtest_writeb(s, 0x69736c10, 0x08); + qtest_writeb(s, 0x69736c13, 0x58); + qtest_writeb(s, 0x69736c1a, 0x01); + qtest_writeb(s, 0x69736c1b, 0x06); + qtest_writeb(s, 0x69736c22, 0x01); + qtest_writeb(s, 0x69736c23, 0x07); + qtest_writeb(s, 0x69736c2b, 0x02); + qtest_writeb(s, 0x69736c48, 0x08); + qtest_writeb(s, 0x69736c4b, 0x58); + qtest_writeb(s, 0x69736c52, 0x04); + qtest_writeb(s, 0x69736c53, 0x06); + qtest_writeb(s, 0x69736c5b, 0x02); + qtest_outl(s, 0xc02d, 0x697300); + qtest_writeb(s, 0x5a554662, 0x01); + qtest_writeb(s, 0x5a554663, 0x07); + qtest_writeb(s, 0x5a55466a, 0x10); + qtest_writeb(s, 0x5a55466b, 0x22); + qtest_writeb(s, 0x5a55466c, 0x5a); + qtest_writeb(s, 0x5a55466d, 0x5a); + qtest_writeb(s, 0x5a55466e, 0x34); + qtest_writeb(s, 0x5a55466f, 0x5a); + qtest_writeb(s, 0x5a345a5a, 0x77); + qtest_writeb(s, 0x5a345a5b, 0x55); + qtest_writeb(s, 0x5a345a5c, 0x51); + qtest_writeb(s, 0x5a345a5d, 0x27); + qtest_writeb(s, 0x27515577, 0x41); + qtest_outl(s, 0xc02d, 0x5a5500); + qtest_writeb(s, 0x364001d0, 0x08); + qtest_writeb(s, 0x364001d3, 0x58); + qtest_writeb(s, 0x364001da, 0x01); + qtest_writeb(s, 0x364001db, 0x26); + qtest_writeb(s, 0x364001dc, 0x0d); + qtest_writeb(s, 0x364001dd, 0xae); + qtest_writeb(s, 0x364001de, 0x41); + qtest_writeb(s, 0x364001df, 0x5a); + qtest_writeb(s, 0x5a41ae0d, 0xf8); + qtest_writeb(s, 0x5a41ae0e, 0x36); + qtest_writeb(s, 0x5a41ae0f, 0xd7); + qtest_writeb(s, 0x5a41ae10, 0x36); + qtest_writeb(s, 0x36d736f8, 0x0c); + qtest_writeb(s, 0x36d736f9, 0x80); + qtest_writeb(s, 0x36d736fa, 0x0d); + qtest_outl(s, 0xc02d, 0x364000); + + qtest_quit(s); +} + +/* + * This used to trigger the assert in lsi_do_dma() + * https://bugs.launchpad.net/qemu/+bug/697510 + * https://bugs.launchpad.net/qemu/+bug/1905521 + * https://bugs.launchpad.net/qemu/+bug/1908515 + */ +static void test_lsi_do_dma_empty_queue(void) +{ + QTestState *s; + + s = qtest_init("-M q35 -nographic -monitor none -serial none " + "-drive if=none,id=drive0," + "file=null-co://,file.read-zeroes=on,format=raw " + "-device lsi53c895a,id=scsi0 " + "-device scsi-hd,drive=drive0," + "bus=scsi0.0,channel=0,scsi-id=0,lun=0"); + qtest_outl(s, 0xcf8, 0x80001814); + qtest_outl(s, 0xcfc, 0xe1068000); + qtest_outl(s, 0xcf8, 0x80001818); + qtest_outl(s, 0xcf8, 0x80001804); + qtest_outw(s, 0xcfc, 0x7); + qtest_outl(s, 0xcf8, 0x80002010); + + qtest_writeb(s, 0xe106802e, 0xff); /* Fill DSP bits 16-23 */ + qtest_writeb(s, 0xe106802f, 0xff); /* Fill DSP bits 24-31: trigger SCRIPT */ + + qtest_quit(s); +} + +int main(int argc, char **argv) +{ + const char *arch = qtest_get_arch(); + + g_test_init(&argc, &argv, NULL); + + if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { + qtest_add_func("fuzz/lsi53c895a/lsi_do_dma_empty_queue", + test_lsi_do_dma_empty_queue); + qtest_add_func("fuzz/lsi53c895a/lsi_do_msgout_cancel_req", + test_lsi_do_msgout_cancel_req); + qtest_add_func("fuzz/lsi53c895a/lsi_dma_reentrancy", + test_lsi_dma_reentrancy); + } + + return g_test_run(); +} diff --git a/tests/qtest/fuzz/generic_fuzz.c b/tests/qtest/fuzz/generic_fuzz.c index dd7e25851cb6dcc55b97f95897c1a65d312b4a10..aa4fe5a7eae394d32886d71954694a44fbb51e8d 100644 --- a/tests/qtest/fuzz/generic_fuzz.c +++ b/tests/qtest/fuzz/generic_fuzz.c @@ -911,9 +911,9 @@ static void generic_pre_fuzz(QTestState *s) * functionality B * * This function attempts to produce an input that: - * Ouptut: maps a device's BARs, set up three DMA patterns, triggers - * functionality A device, replaces the DMA patterns with a single - * patten, and triggers device functionality B. + * Output: maps a device's BARs, set up three DMA patterns, triggers + * device functionality A, replaces the DMA patterns with a single + * pattern, and triggers device functionality B. */ static size_t generic_fuzz_crossover(const uint8_t *data1, size_t size1, const uint8_t *data2, size_t size2, uint8_t *out, diff --git a/tests/qtest/fuzz/qos_fuzz.c b/tests/qtest/fuzz/qos_fuzz.c index 7a244c951e5e7fba4bdaf537287193b61f23988c..5ab38e50cfd1a7cb821194a6dc6e5cc99e85a524 100644 --- a/tests/qtest/fuzz/qos_fuzz.c +++ b/tests/qtest/fuzz/qos_fuzz.c @@ -182,6 +182,7 @@ static void walk_path(QOSGraphNode *orig_path, int len) fuzz_path_vec = path_vec; } else { + g_string_free(cmd_line, true); g_free(path_vec); } diff --git a/tests/qtest/intel-hda-test.c b/tests/qtest/intel-hda-test.c index fc25ccc33cc809cca04b880a382fd3d186683b2a..a58c98e4d11bc11d4be14dc7cb24ba9d187e435e 100644 --- a/tests/qtest/intel-hda-test.c +++ b/tests/qtest/intel-hda-test.c @@ -29,11 +29,45 @@ static void ich9_test(void) qtest_end(); } +/* + * https://gitlab.com/qemu-project/qemu/-/issues/542 + * Used to trigger: + * AddressSanitizer: stack-overflow + */ +static void test_issue542_ich6(void) +{ + QTestState *s; + + s = qtest_init("-nographic -nodefaults -M pc-q35-6.2 " + "-device intel-hda,id=" HDA_ID CODEC_DEVICES); + + qtest_outl(s, 0xcf8, 0x80000804); + qtest_outw(s, 0xcfc, 0x06); + qtest_bufwrite(s, 0xff0d060f, "\x03", 1); + qtest_bufwrite(s, 0x0, "\x12", 1); + qtest_bufwrite(s, 0x2, "\x2a", 1); + qtest_writeb(s, 0x0, 0x12); + qtest_writeb(s, 0x2, 0x2a); + qtest_outl(s, 0xcf8, 0x80000811); + qtest_outl(s, 0xcfc, 0x006a4400); + qtest_bufwrite(s, 0x6a44005a, "\x01", 1); + qtest_bufwrite(s, 0x6a44005c, "\x02", 1); + qtest_bufwrite(s, 0x6a442050, "\x00\x00\x44\x6a", 4); + qtest_bufwrite(s, 0x6a44204a, "\x01", 1); + qtest_bufwrite(s, 0x6a44204c, "\x02", 1); + qtest_bufwrite(s, 0x6a44005c, "\x02", 1); + qtest_bufwrite(s, 0x6a442050, "\x00\x00\x44\x6a", 4); + qtest_bufwrite(s, 0x6a44204a, "\x01", 1); + qtest_bufwrite(s, 0x6a44204c, "\x02", 1); + qtest_quit(s); +} + int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); qtest_add_func("/intel-hda/ich6", ich6_test); qtest_add_func("/intel-hda/ich9", ich9_test); + qtest_add_func("/intel-hda/fuzz/issue542", test_issue542_ich6); return g_test_run(); } diff --git a/tests/qtest/ivshmem-test.c b/tests/qtest/ivshmem-test.c index dfa69424ed90ca3bcf2ec441a1fa2df7bdcdd6c2..fe94dd3b96fa0eb29655d0dc389e9b7d11439778 100644 --- a/tests/qtest/ivshmem-test.c +++ b/tests/qtest/ivshmem-test.c @@ -463,7 +463,6 @@ static gchar *mktempshm(int size, int *fd) int main(int argc, char **argv) { int ret, fd; - const char *arch = qtest_get_arch(); gchar dir[] = "/tmp/ivshmem-test.XXXXXX"; g_test_init(&argc, &argv, NULL); @@ -488,9 +487,7 @@ int main(int argc, char **argv) qtest_add_func("/ivshmem/memdev", test_ivshmem_memdev); if (g_test_slow()) { qtest_add_func("/ivshmem/pair", test_ivshmem_pair); - if (strcmp(arch, "ppc64") != 0) { - qtest_add_func("/ivshmem/server", test_ivshmem_server); - } + qtest_add_func("/ivshmem/server", test_ivshmem_server); } out: diff --git a/tests/qtest/libqos/e1000e.c b/tests/qtest/libqos/e1000e.c index a451f6168f63d8672cc52943801fd9e8562fa71c..d3511369757353ea0aace7a857069e30a8bb9361 100644 --- a/tests/qtest/libqos/e1000e.c +++ b/tests/qtest/libqos/e1000e.c @@ -26,6 +26,8 @@ #include "malloc.h" #include "qgraph.h" #include "e1000e.h" +#include "hw/net/e1000_regs.h" +#include "hw/pci/pci_ids.h" #define E1000E_IMS (0x00d0) @@ -248,8 +250,8 @@ static void *e1000e_pci_create(void *pci_bus, QGuestAllocator *alloc, static void e1000e_register_nodes(void) { QPCIAddress addr = { - .vendor_id = 0x8086, - .device_id = 0x10D3, + .vendor_id = PCI_VENDOR_ID_INTEL, + .device_id = E1000_DEV_ID_82574L, }; /* FIXME: every test using this node needs to setup a -netdev socket,id=hs0 diff --git a/tests/qtest/libqos/qgraph.c b/tests/qtest/libqos/qgraph.c index d1dc4919305512236fc68bb5443b773d2de151f0..eae29368bc0f0eb874db4530117eadff12698f47 100644 --- a/tests/qtest/libqos/qgraph.c +++ b/tests/qtest/libqos/qgraph.c @@ -54,7 +54,7 @@ struct QOSStackElement { int length; }; -/* Each enty in these hash table will consist of pair. */ +/* Each entry in these hash table will consist of pair. */ static GHashTable *edge_table; static GHashTable *node_table; @@ -214,7 +214,7 @@ static QOSGraphEdge *search_list_edges(QOSGraphEdgeList *edgelist, /** * search_machine(): search for a machine @name in the node hash * table. A machine is the child of the root node. - * This function forces the research in the childs of the root, + * This function forces the research in the children of the root, * to check the node is a proper machine * * Returns: on success: the %QOSGraphNode diff --git a/tests/qtest/libqos/qgraph.h b/tests/qtest/libqos/qgraph.h index 871740c0dc89d879050116e2f8e81ee995475b60..33d609a06a6f4b2d51d724065f97fcddbd7b9bf4 100644 --- a/tests/qtest/libqos/qgraph.h +++ b/tests/qtest/libqos/qgraph.h @@ -381,7 +381,7 @@ QOSGraphObject *qos_driver_new(QOSGraphNode *node, QOSGraphObject *parent, * mind: only tests with a path down from the actual test case node (leaf) up * to the graph's root node are actually executed by the qtest framework. And * the qtest framework uses QMP to automatically check which QEMU drivers are - * actually currently available, and accordingly qos marks certain pathes as + * actually currently available, and accordingly qos marks certain paths as * 'unavailable' in such cases (e.g. when QEMU was compiled without support for * a certain feature). */ diff --git a/tests/qtest/libqos/qgraph_internal.h b/tests/qtest/libqos/qgraph_internal.h index 7d62fd17af71ac341ef6319f34c16c4e4a3d2fa0..87fab1f9f0d0974dc06d64b53b5262d7a2e278ea 100644 --- a/tests/qtest/libqos/qgraph_internal.h +++ b/tests/qtest/libqos/qgraph_internal.h @@ -197,7 +197,7 @@ char *qos_graph_edge_get_name(QOSGraphEdge *edge); * qos_graph_get_machine(): returns the machine assigned * to that @node name. * - * It performs a search only trough the list of machines + * It performs a search only through the list of machines * (i.e. the QOS_ROOT child). * * Returns: on success: the %QOSGraphNode diff --git a/tests/qtest/libqos/virtio-9p.c b/tests/qtest/libqos/virtio-9p.c index b4e1143288ae69d36e1105fb3d8380a60a2b73d8..2941d3cdc6309418a64ab0355939093fe9dd41be 100644 --- a/tests/qtest/libqos/virtio-9p.c +++ b/tests/qtest/libqos/virtio-9p.c @@ -31,7 +31,7 @@ static QGuestAllocator *alloc; static char *local_test_path; -/* Concatenates the passed 2 pathes. Returned result must be freed. */ +/* Concatenates the passed 2 paths. Returned result must be freed. */ static char *concat_path(const char* a, const char* b) { return g_build_filename(a, b, NULL); diff --git a/tests/qtest/libqos/virtio.c b/tests/qtest/libqos/virtio.c index 6fe7bf9555fca71326ec3b9b3707e71ad36e8c67..08e5bcdca1424c450dd274d0666b0a5d3c0246e0 100644 --- a/tests/qtest/libqos/virtio.c +++ b/tests/qtest/libqos/virtio.c @@ -377,7 +377,7 @@ void qvirtqueue_kick(QTestState *qts, QVirtioDevice *d, QVirtQueue *vq, qvirtio_writew(d, qts, vq->avail + 2, idx + 1); /* Must read after idx is updated */ - flags = qvirtio_readw(d, qts, vq->avail); + flags = qvirtio_readw(d, qts, vq->used); avail_event = qvirtio_readw(d, qts, vq->used + 4 + sizeof(struct vring_used_elem) * vq->size); diff --git a/tests/qtest/libqtest.c b/tests/qtest/libqtest.c index 25aeea385bfaa1c771939468d401f5680cd0fd6d..407c07d72890bff22d861c4e2c4a3bb2b665de06 100644 --- a/tests/qtest/libqtest.c +++ b/tests/qtest/libqtest.c @@ -1483,7 +1483,7 @@ QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch, qtest_client_set_rx_handler(qts, qtest_client_inproc_recv_line); - /* send() may not have a matching protoype, so use a type-safe wrapper */ + /* send() may not have a matching prototype, so use a type-safe wrapper */ qts->ops.external_send = send; qtest_client_set_tx_handler(qts, send_wrapper); diff --git a/tests/qtest/meson.build b/tests/qtest/meson.build index c9d8458062ff6b88d1c28cb21e8f9c3cee65ea90..d2ce20d30473184e5c648e7363ed7e2bd8cc7070 100644 --- a/tests/qtest/meson.build +++ b/tests/qtest/meson.build @@ -19,6 +19,7 @@ slow_qtests = { qtests_generic = \ (config_all_devices.has_key('CONFIG_MEGASAS_SCSI_PCI') ? ['fuzz-megasas-test'] : []) + \ + (config_all_devices.has_key('CONFIG_LSI_SCSI_PCI') ? ['fuzz-lsi53c895a-test'] : []) + \ (config_all_devices.has_key('CONFIG_VIRTIO_SCSI') ? ['fuzz-virtio-scsi-test'] : []) + \ (config_all_devices.has_key('CONFIG_SB16') ? ['fuzz-sb16-test'] : []) + \ (config_all_devices.has_key('CONFIG_SDHCI_PCI') ? ['fuzz-sdcard-test'] : []) + \ diff --git a/tests/qtest/migration-helpers.c b/tests/qtest/migration-helpers.c index 4ee26014b78322df9a04ad7be2c368726b9f19d8..1e594f9cb1c65c0a5573ed1ae98e66f68db79851 100644 --- a/tests/qtest/migration-helpers.c +++ b/tests/qtest/migration-helpers.c @@ -75,6 +75,28 @@ QDict *wait_command(QTestState *who, const char *command, ...) return ret; } +/* + * Execute the qmp command only + */ +QDict *qmp_command(QTestState *who, const char *command, ...) +{ + va_list ap; + QDict *resp, *ret; + + va_start(ap, command); + resp = qtest_vqmp(who, command, ap); + va_end(ap); + + g_assert(!qdict_haskey(resp, "error")); + g_assert(qdict_haskey(resp, "return")); + + ret = qdict_get_qdict(resp, "return"); + qobject_ref(ret); + qobject_unref(resp); + + return ret; +} + /* * Send QMP command "migrate". * Arguments are built from @fmt... (formatted like diff --git a/tests/qtest/migration-helpers.h b/tests/qtest/migration-helpers.h index d63bba9630f9572fe4f33a4f606474d7ee63224c..9bc809fb7505dd3322b38949828339c22a2d57b1 100644 --- a/tests/qtest/migration-helpers.h +++ b/tests/qtest/migration-helpers.h @@ -22,6 +22,8 @@ QDict *wait_command_fd(QTestState *who, int fd, const char *command, ...); GCC_FMT_ATTR(2, 3) QDict *wait_command(QTestState *who, const char *command, ...); +QDict *qmp_command(QTestState *who, const char *command, ...); + GCC_FMT_ATTR(3, 4) void migrate_qmp(QTestState *who, const char *uri, const char *fmt, ...); diff --git a/tests/qtest/migration-test.c b/tests/qtest/migration-test.c index 7b42f6fd909bfbcd3ae8c6051bcaaff9016bed53..9125300403016c5c708db052b9c368f6bbf982ed 100644 --- a/tests/qtest/migration-test.c +++ b/tests/qtest/migration-test.c @@ -23,6 +23,7 @@ #include "qapi/qapi-visit-sockets.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" +#include "qapi/qmp/qlist.h" #include "migration-helpers.h" #include "tests/migration/migration-test.h" @@ -42,6 +43,12 @@ static bool uffd_feature_thread_id; /* A downtime where the test really should converge */ #define CONVERGE_DOWNTIME 1000 +/* + * Dirtylimit stop working if dirty page rate error + * value less than DIRTYLIMIT_TOLERANCE_RANGE + */ +#define DIRTYLIMIT_TOLERANCE_RANGE 25 /* MB/s */ + #if defined(__linux__) #include #include @@ -613,7 +620,7 @@ static int test_migrate_start(QTestState **from, QTestState **to, /* * Remove shmem file immediately to avoid memory leak in test failed case. - * It's valid becase QEMU has already opened this file + * It's valid because QEMU has already opened this file */ if (args->use_shmem) { unlink(shmem_path); @@ -1150,7 +1157,7 @@ static void test_migrate_auto_converge(void) /* * We want the test to be stable and as fast as possible. - * E.g., with 1Gb/s bandwith migration may pass without throttling, + * E.g., with 1Gb/s bandwidth migration may pass without throttling, * so we need to decrease a bandwidth. */ const int64_t init_pct = 5, inc_pct = 50, max_pct = 95; @@ -1394,6 +1401,253 @@ static void test_multifd_tcp_cancel(void) test_migrate_end(from, to2, true); } +static void calc_dirty_rate(QTestState *who, uint64_t calc_time) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'calc-dirty-rate'," + "'arguments': { " + "'calc-time': %" PRIu64 "," + "'mode': 'dirty-ring' }}", + calc_time)); +} + +static QDict *query_dirty_rate(QTestState *who) +{ + return qmp_command(who, "{ 'execute': 'query-dirty-rate' }"); +} + +static void dirtylimit_set_all(QTestState *who, uint64_t dirtyrate) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'set-vcpu-dirty-limit'," + "'arguments': { " + "'dirty-rate': %" PRIu64 " } }", + dirtyrate)); +} + +static void cancel_vcpu_dirty_limit(QTestState *who) +{ + qobject_unref(qmp_command(who, + "{ 'execute': 'cancel-vcpu-dirty-limit' }")); +} + +static QDict *query_vcpu_dirty_limit(QTestState *who) +{ + QDict *rsp; + + rsp = qtest_qmp(who, "{ 'execute': 'query-vcpu-dirty-limit' }"); + g_assert(!qdict_haskey(rsp, "error")); + g_assert(qdict_haskey(rsp, "return")); + + return rsp; +} + +static bool calc_dirtyrate_ready(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + + return g_strcmp0(status, "measuring"); +} + +static void wait_for_calc_dirtyrate_complete(QTestState *who, + int64_t time_s) +{ + int max_try_count = 10000; + usleep(time_s * 1000000); + + while (!calc_dirtyrate_ready(who) && max_try_count--) { + usleep(1000); + } + + /* + * Set the timeout with 10 s(max_try_count * 1000us), + * if dirtyrate measurement not complete, fail test. + */ + g_assert_cmpint(max_try_count, !=, 0); +} + +static int64_t get_dirty_rate(QTestState *who) +{ + QDict *rsp_return; + gchar *status; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_dirty_rate(who); + g_assert(rsp_return); + + status = g_strdup(qdict_get_str(rsp_return, "status")); + g_assert(status); + g_assert_cmpstr(status, ==, "measured"); + + rates = qdict_get_qlist(rsp_return, "vcpu-dirty-rate"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "dirty-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static int64_t get_limit_rate(QTestState *who) +{ + QDict *rsp_return; + QList *rates; + const QListEntry *entry; + QDict *rate; + int64_t dirtyrate; + + rsp_return = query_vcpu_dirty_limit(who); + g_assert(rsp_return); + + rates = qdict_get_qlist(rsp_return, "return"); + g_assert(rates && !qlist_empty(rates)); + + entry = qlist_first(rates); + g_assert(entry); + + rate = qobject_to(QDict, qlist_entry_obj(entry)); + g_assert(rate); + + dirtyrate = qdict_get_try_int(rate, "limit-rate", -1); + + qobject_unref(rsp_return); + return dirtyrate; +} + +static QTestState *dirtylimit_start_vm(void) +{ + QTestState *vm = NULL; + g_autofree gchar *cmd = NULL; + const char *arch = qtest_get_arch(); + g_autofree char *bootpath = NULL; + + assert((strcmp(arch, "x86_64") == 0)); + bootpath = g_strdup_printf("%s/bootsect", tmpfs); + assert(sizeof(x86_bootsect) == 512); + init_bootfile(bootpath, x86_bootsect, sizeof(x86_bootsect)); + + cmd = g_strdup_printf("-accel kvm,dirty-ring-size=4096 " + "-name dirtylimit-test,debug-threads=on " + "-m 150M -smp 1 " + "-serial file:%s/vm_serial " + "-drive file=%s,format=raw ", + tmpfs, bootpath); + + vm = qtest_init(cmd); + return vm; +} + +static void dirtylimit_stop_vm(QTestState *vm) +{ + qtest_quit(vm); + cleanup("bootsect"); + cleanup("vm_serial"); +} + +static void test_vcpu_dirty_limit(void) +{ + QTestState *vm; + int64_t origin_rate; + int64_t quota_rate; + int64_t rate ; + int max_try_count = 20; + int hit = 0; + + /* Start vm for vcpu dirtylimit test */ + vm = dirtylimit_start_vm(); + + /* Wait for the first serial output from the vm*/ + wait_for_serial("vm_serial"); + + /* Do dirtyrate measurement with calc time equals 1s */ + calc_dirty_rate(vm, 1); + + /* Sleep calc time and wait for calc dirtyrate complete */ + wait_for_calc_dirtyrate_complete(vm, 1); + + /* Query original dirty page rate */ + origin_rate = get_dirty_rate(vm); + + /* VM booted from bootsect should dirty memory steadily */ + assert(origin_rate != 0); + + /* Setup quota dirty page rate at half of origin */ + quota_rate = origin_rate / 2; + + /* Set dirtylimit */ + dirtylimit_set_all(vm, quota_rate); + + /* + * Check if set-vcpu-dirty-limit and query-vcpu-dirty-limit + * works literally + */ + g_assert_cmpint(quota_rate, ==, get_limit_rate(vm)); + + /* Sleep a bit to check if it take effect */ + usleep(2000000); + + /* + * Check if dirtylimit take effect realistically, set the + * timeout with 20 s(max_try_count * 1s), if dirtylimit + * doesn't take effect, fail test. + */ + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume hitting if current rate is less + * than quota rate (within accepting error) + */ + if (rate < (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + + hit = 0; + max_try_count = 20; + + /* Check if dirtylimit cancellation take effect */ + cancel_vcpu_dirty_limit(vm); + while (--max_try_count) { + calc_dirty_rate(vm, 1); + wait_for_calc_dirtyrate_complete(vm, 1); + rate = get_dirty_rate(vm); + + /* + * Assume dirtylimit be canceled if current rate is + * greater than quota rate (within accepting error) + */ + if (rate > (quota_rate + DIRTYLIMIT_TOLERANCE_RANGE)) { + hit = 1; + break; + } + } + + g_assert_cmpint(hit, ==, 1); + dirtylimit_stop_vm(vm); +} + static bool kvm_dirty_ring_supported(void) { #if defined(__linux__) && defined(HOST_X86_64) @@ -1483,6 +1737,8 @@ int main(int argc, char **argv) if (kvm_dirty_ring_supported()) { qtest_add_func("/migration/dirty_ring", test_precopy_unix_dirty_ring); + qtest_add_func("/migration/vcpu_dirty_limit", + test_vcpu_dirty_limit); } ret = g_test_run(); diff --git a/tests/qtest/npcm7xx_emc-test.c b/tests/qtest/npcm7xx_emc-test.c index 9eec71d87c1fa8f20117f63b54eb2a4f5122308f..1bc1d821c5e2ef8f6b5ee5c0b75e2368a8613332 100644 --- a/tests/qtest/npcm7xx_emc-test.c +++ b/tests/qtest/npcm7xx_emc-test.c @@ -378,7 +378,8 @@ static void test_init(gconstpointer test_data) #undef CHECK_REG - for (i = 0; i < NUM_CAMML_REGS; ++i) { + /* Skip over the MAC address registers, which is BASE+0 */ + for (i = 1; i < NUM_CAMML_REGS; ++i) { g_assert_cmpuint(emc_read(qts, mod, REG_CAMM_BASE + i * 2), ==, 0); g_assert_cmpuint(emc_read(qts, mod, REG_CAML_BASE + i * 2), ==, diff --git a/tests/qtest/npcm7xx_pwm-test.c b/tests/qtest/npcm7xx_pwm-test.c index a54fd70d273bc5a806b2c61a055014052a090747..ddfc120df0b6f5a852a2d181e234b7f8df6a8ee6 100644 --- a/tests/qtest/npcm7xx_pwm-test.c +++ b/tests/qtest/npcm7xx_pwm-test.c @@ -268,6 +268,9 @@ static void mft_qom_set(QTestState *qts, int index, const char *name, path, name, value); /* The qom set message returns successfully. */ g_assert_true(qdict_haskey(response, "return")); + + qobject_unref(response); + g_free(path); } static uint32_t get_pll(uint32_t con) diff --git a/tests/qtest/npcm7xx_timer-test.c b/tests/qtest/npcm7xx_timer-test.c index 83774a5b903b91fee03f116ca72297d92df34af7..43711049ca747d509cc0ca53d065a0f81694294a 100644 --- a/tests/qtest/npcm7xx_timer-test.c +++ b/tests/qtest/npcm7xx_timer-test.c @@ -384,7 +384,7 @@ static void test_pause_resume(gconstpointer test_data) g_assert_true(qtest_get_irq(global_qtest, tim_timer_irq(td))); } -/* Verifies that the prescaler can be changed while the timer is runnin. */ +/* Verifies that the prescaler can be changed while the timer is running. */ static void test_prescaler_change(gconstpointer test_data) { const TestData *td = test_data; diff --git a/tests/qtest/numa-test.c b/tests/qtest/numa-test.c index 90bf68a5b33c1f0098eb8bc482b5029cf1d469cc..08f28012c84378ae08d1c53e54636a459b0eb536 100644 --- a/tests/qtest/numa-test.c +++ b/tests/qtest/numa-test.c @@ -223,17 +223,17 @@ static void aarch64_numa_cpu(const void *data) QTestState *qts; g_autofree char *cli = NULL; - cli = make_cli(data, "-machine smp.cpus=2 " + cli = make_cli(data, "-machine smp.cpus=2,smp.cores=2 " "-numa node,nodeid=0,memdev=ram -numa node,nodeid=1 " - "-numa cpu,node-id=1,thread-id=0 " - "-numa cpu,node-id=0,thread-id=1"); + "-numa cpu,node-id=1,core-id=0 " + "-numa cpu,node-id=0,core-id=1"); qts = qtest_init(cli); cpus = get_cpus(qts, &resp); g_assert(cpus); while ((e = qlist_pop(cpus))) { QDict *cpu, *props; - int64_t thread, node; + int64_t core, node; cpu = qobject_to(QDict, e); g_assert(qdict_haskey(cpu, "props")); @@ -241,12 +241,12 @@ static void aarch64_numa_cpu(const void *data) g_assert(qdict_haskey(props, "node-id")); node = qdict_get_int(props, "node-id"); - g_assert(qdict_haskey(props, "thread-id")); - thread = qdict_get_int(props, "thread-id"); + g_assert(qdict_haskey(props, "core-id")); + core = qdict_get_int(props, "core-id"); - if (thread == 0) { + if (core == 0) { g_assert_cmpint(node, ==, 1); - } else if (thread == 1) { + } else if (core == 1) { g_assert_cmpint(node, ==, 0); } else { g_assert(false); diff --git a/tests/qtest/pflash-cfi02-test.c b/tests/qtest/pflash-cfi02-test.c index 6168edc821a86195295861fc94140ceb601ae4ad..bd1f946fc03b3d989d32d1210d8e259cbff9017e 100644 --- a/tests/qtest/pflash-cfi02-test.c +++ b/tests/qtest/pflash-cfi02-test.c @@ -406,7 +406,7 @@ static void test_geometry(const void *opaque) for (int region = 0; region < nb_erase_regions; ++region) { for (uint32_t i = 0; i < c->nb_blocs[region]; ++i) { - uint64_t byte_addr = (uint64_t)i * c->sector_len[region]; + byte_addr = (uint64_t)i * c->sector_len[region]; g_assert_cmphex(flash_read(c, byte_addr), ==, bank_mask(c)); } } diff --git a/tests/qtest/qmp-cmd-test.c b/tests/qtest/qmp-cmd-test.c index 7f103ea3fd2ac5c9fd0b6ebb91ce4b66bac31a41..4b216a0435db47ee2e5d46efff289826b5435d22 100644 --- a/tests/qtest/qmp-cmd-test.c +++ b/tests/qtest/qmp-cmd-test.c @@ -110,6 +110,8 @@ static bool query_is_ignored(const char *cmd) "query-sev-capabilities", "query-sgx", "query-sgx-capabilities", + /* Success depends on enabling dirty page rate limit */ + "query-vcpu-dirty-limit", NULL }; int i; diff --git a/tests/qtest/test-filter-mirror.c b/tests/qtest/test-filter-mirror.c index bc0dee64dd93b8f6191a61f477b054c653d9e343..40f736734ade3e83098da0481674d605f3b5dd74 100644 --- a/tests/qtest/test-filter-mirror.c +++ b/tests/qtest/test-filter-mirror.c @@ -71,6 +71,7 @@ static void test_mirror(void) g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock[0], recv_buf, len, 0); + g_assert_cmpint(ret, ==, len); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); diff --git a/tests/qtest/test-filter-redirector.c b/tests/qtest/test-filter-redirector.c index 4269b2cdd9dab32151e9ae3932fe4f8a4ec9253e..f802c94f54df27032afa32bfb10cfb1395431d47 100644 --- a/tests/qtest/test-filter-redirector.c +++ b/tests/qtest/test-filter-redirector.c @@ -133,6 +133,7 @@ static void test_redirector_tx(void) g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock, recv_buf, len, 0); + g_assert_cmpint(ret, ==, len); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); @@ -201,6 +202,7 @@ static void test_redirector_rx(void) g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(backend_sock[0], recv_buf, len, 0); + g_assert_cmpint(ret, ==, len); g_assert_cmpstr(recv_buf, ==, send_buf); close(send_sock); diff --git a/tests/qtest/tpm-emu.c b/tests/qtest/tpm-emu.c index 2994d1cf423cef6af35823e94b0da4ac70387ffa..e0d967fc0154307910846e0f5368086cec6b6f9e 100644 --- a/tests/qtest/tpm-emu.c +++ b/tests/qtest/tpm-emu.c @@ -70,7 +70,7 @@ static void *tpm_emu_tpm_thread(void *data) s->tpm_msg->code = cpu_to_be32(TPM_FAIL); break; default: - g_debug("unsupport TPM version %u", s->tpm_version); + g_debug("unsupported TPM version %u", s->tpm_version); g_assert_not_reached(); } qio_channel_write(ioc, (char *)s->tpm_msg, be32_to_cpu(s->tpm_msg->len), diff --git a/tests/qtest/tpm-tests.c b/tests/qtest/tpm-tests.c index 25073d1f9e9f3303b1e4099736f769a9b2d25f98..fb94496bbd8614219ae8416e97c37740ad5e09ed 100644 --- a/tests/qtest/tpm-tests.c +++ b/tests/qtest/tpm-tests.c @@ -1,5 +1,5 @@ /* - * QTest TPM commont test code + * QTest TPM common test code * * Copyright (c) 2018 IBM Corporation * Copyright (c) 2018 Red Hat, Inc. diff --git a/tests/qtest/tpm-tests.h b/tests/qtest/tpm-tests.h index a5df35ab5ba7685587400dbad7dec2bbcbffae1d..07ba60d26eef5ce0a5b9a6e5d24e2421f8567286 100644 --- a/tests/qtest/tpm-tests.h +++ b/tests/qtest/tpm-tests.h @@ -1,5 +1,5 @@ /* - * QTest TPM commont test code + * QTest TPM common test code * * Copyright (c) 2018 IBM Corporation * diff --git a/tests/qtest/tpm-tis-util.c b/tests/qtest/tpm-tis-util.c index 939893bf0149768cce74b2222a8ccb9949b717e2..0c8af168e43ba0f1469601200b169a000b709edf 100644 --- a/tests/qtest/tpm-tis-util.c +++ b/tests/qtest/tpm-tis-util.c @@ -340,7 +340,7 @@ void tpm_tis_test_check_access_reg_release(const void *data) TPM_TIS_ACCESS_ACTIVE_LOCALITY); /* * highest locality should now be active; release it and make sure the - * next higest locality is active afterwards + * next highest locality is active afterwards */ for (l = TPM_TIS_NUM_LOCALITIES - 2; l >= 0; l--) { if (l == locty) { diff --git a/tests/qtest/vhost-user-blk-test.c b/tests/qtest/vhost-user-blk-test.c index 62e670f39be02d875782165cae0faa87c87f2aa7..c1d2c6fa036c64ba9d3187ab225b8fb09ac486f6 100644 --- a/tests/qtest/vhost-user-blk-test.c +++ b/tests/qtest/vhost-user-blk-test.c @@ -950,7 +950,7 @@ static void *vhost_user_blk_test_setup(GString *cmd_line, void *arg) * Setup for hotplug. * * Since vhost-user server only serves one vhost-user client one time, - * another exprot + * another export * */ static void *vhost_user_blk_hotplug_test_setup(GString *cmd_line, void *arg) diff --git a/tests/qtest/vhost-user-test.c b/tests/qtest/vhost-user-test.c index 3d6337fb5c55b11745664990d035c0816648f39d..d07babc06db4445c2ebe4fa1b60fe3c4e0f18056 100644 --- a/tests/qtest/vhost-user-test.c +++ b/tests/qtest/vhost-user-test.c @@ -328,7 +328,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) if (size != msg.size) { g_test_message("Wrong message size received %d != %d", size, msg.size); - return; + goto out; } } @@ -429,6 +429,7 @@ static void chr_read(void *opaque, const uint8_t *buf, int size) break; } +out: g_mutex_unlock(&s->data_mutex); } diff --git a/tests/qtest/virtio-net-test.c b/tests/qtest/virtio-net-test.c index 8bf74e516cce23117696e4b23396a9244c473588..3b17e74d06d7efc65fd3b8442156d6048d9afff4 100644 --- a/tests/qtest/virtio-net-test.c +++ b/tests/qtest/virtio-net-test.c @@ -92,6 +92,7 @@ static void tx_test(QVirtioDevice *dev, len = ntohl(len); ret = qemu_recv(socket, buffer, len, 0); + g_assert_cmpint(ret, ==, len); g_assert_cmpstr(buffer, ==, "TEST"); } @@ -210,7 +211,7 @@ static void announce_self(void *obj, void *data, QGuestAllocator *t_alloc) g_assert_cmpint(*proto, ==, htons(ETH_P_RARP)); /* - * Stop the announcment by settings rounds to 0 on the + * Stop the announcement by settings rounds to 0 on the * existing timer. */ rsp = qmp("{ 'execute' : 'announce-self', " diff --git a/tests/qtest/vmgenid-test.c b/tests/qtest/vmgenid-test.c index 6781a514479bc47af0199d5a4705cf94635e0df9..015e413075631ad611a47b95964e5bfc1a586861 100644 --- a/tests/qtest/vmgenid-test.c +++ b/tests/qtest/vmgenid-test.c @@ -19,7 +19,7 @@ #define VGID_GUID "324e6eaf-d1d1-4bf6-bf41-b9bb6c91fb87" #define VMGENID_GUID_OFFSET 40 /* allow space for - * OVMF SDT Header Probe Supressor + * OVMF SDT Header Probe Suppressor */ #define RSDP_ADDR_INVALID 0x100000 /* RSDP must be below this address */ diff --git a/tests/tcg/Makefile.target b/tests/tcg/Makefile.target index 63cf1b2573f9edecd603f5f06c778f6641cd5274..2d6ec70156e9ed261ca6b8ed490fe194183b326d 100644 --- a/tests/tcg/Makefile.target +++ b/tests/tcg/Makefile.target @@ -33,7 +33,7 @@ all: -include ../../../config-host.mak -include ../config-$(TARGET).mak ifeq ($(CONFIG_USER_ONLY),y) --include $(SRC_PATH)/default-configs/targets/$(TARGET).mak +-include $(SRC_PATH)/configs/targets/$(TARGET)/default.mak endif # for including , in command strings diff --git a/tests/tcg/multiarch/linux/linux-test.c b/tests/tcg/multiarch/linux/linux-test.c index 019d8175ca634c7b1e6dcb929b395789d55622a9..64f57cb287eb280b49586feef6e08f83eead40c3 100644 --- a/tests/tcg/multiarch/linux/linux-test.c +++ b/tests/tcg/multiarch/linux/linux-test.c @@ -263,7 +263,7 @@ static int server_socket(void) sockaddr.sin_port = htons(0); /* choose random ephemeral port) */ sockaddr.sin_addr.s_addr = 0; chk_error(bind(fd, (struct sockaddr *)&sockaddr, sizeof(sockaddr))); - chk_error(listen(fd, 0)); + chk_error(listen(fd, 1)); return fd; } @@ -354,13 +354,17 @@ static void test_pipe(void) if (FD_ISSET(fds[0], &rfds)) { chk_error(read(fds[0], &ch, 1)); rcount++; - if (rcount >= WCOUNT_MAX) + if (rcount >= WCOUNT_MAX) { break; + } } if (FD_ISSET(fds[1], &wfds)) { ch = 'a'; chk_error(write(fds[1], &ch, 1)); wcount++; + if (wcount >= WCOUNT_MAX) { + break; + } } } } diff --git a/tests/tsan/suppressions.tsan b/tests/tsan/suppressions.tsan index 73414b9ebd9046974f115f35bdebedb63d78058c..d9a002a2ef16018c305f6981087f77d92c87eaa2 100644 --- a/tests/tsan/suppressions.tsan +++ b/tests/tsan/suppressions.tsan @@ -7,7 +7,7 @@ mutex:aio_context_acquire mutex:pthread_mutex_lock -# TSan reports a race betwen pthread_mutex_init() and +# TSan reports a race between pthread_mutex_init() and # pthread_mutex_lock(). Since this is outside of QEMU, # we choose to ignore it. race:pthread_mutex_init diff --git a/tests/uefi-test-tools/Makefile b/tests/uefi-test-tools/Makefile index 471f0de981090b3d419b8bc7890c7a22c0bbe642..0c003f2877f5673b37e33687a888c5ad261717f4 100644 --- a/tests/uefi-test-tools/Makefile +++ b/tests/uefi-test-tools/Makefile @@ -87,7 +87,7 @@ Build/%.fat: Build/%.efi .NOTPARALLEL: # In turn, the "build" utility of edk2 BaseTools invokes another "make". -# Although the outer "make" process advertizes its job server to all child +# Although the outer "make" process advertises its job server to all child # processes via MAKEFLAGS in the environment, the outer "make" closes the job # server file descriptors (exposed in MAKEFLAGS) before executing a recipe -- # unless the recipe is recognized as a recursive "make" recipe. Recipes that diff --git a/tests/unit/check-block-qdict.c b/tests/unit/check-block-qdict.c index 5a25825093016d1d75c7d77e9b12dc3fd691d801..751c58e73733ce4e77fd756900dc6fe74c29f3dc 100644 --- a/tests/unit/check-block-qdict.c +++ b/tests/unit/check-block-qdict.c @@ -504,7 +504,7 @@ static void qdict_crumple_test_empty(void) src = qdict_new(); dst = qobject_to(QDict, qdict_crumple(src, &error_abort)); - + g_assert(dst); g_assert_cmpint(qdict_size(dst), ==, 0); qobject_unref(src); diff --git a/tests/unit/check-qjson.c b/tests/unit/check-qjson.c index c845f91d43773083c78f00e3bab0587c89713c1d..d90ddd0217598cd9390919f58259f662fcf94d67 100644 --- a/tests/unit/check-qjson.c +++ b/tests/unit/check-qjson.c @@ -1487,7 +1487,7 @@ int main(int argc, char **argv) g_test_add_func("/literals/keyword", keyword_literal); g_test_add_func("/literals/interpolation/valid", interpolation_valid); - g_test_add_func("/literals/interpolation/unkown", interpolation_unknown); + g_test_add_func("/literals/interpolation/unknown", interpolation_unknown); g_test_add_func("/literals/interpolation/string", interpolation_string); g_test_add_func("/dicts/simple_dict", simple_dict); diff --git a/tests/unit/meson.build b/tests/unit/meson.build index acac3622edcb4c7f679446fc442cb8daa03bbc77..c21d81787454854610738e274deb00d03e275990 100644 --- a/tests/unit/meson.build +++ b/tests/unit/meson.build @@ -129,9 +129,6 @@ if have_system 'test-vmstate': [migration, io], 'test-yank': ['socket-helpers.c', qom, io, chardev] } - if 'CONFIG_INOTIFY1' in config_host - tests += {'test-util-filemonitor': []} - endif # Some tests: test-char, test-qdev-global-props, and test-qga, # are not runnable under TSan due to a known issue. diff --git a/tests/unit/ptimer-test-stubs.c b/tests/unit/ptimer-test-stubs.c index 2a3ef5879910033831549651f7a589716a66f914..a7a2d08e7ec5788cd4aa1ad1505b7ab6ecba4ca1 100644 --- a/tests/unit/ptimer-test-stubs.c +++ b/tests/unit/ptimer-test-stubs.c @@ -108,7 +108,8 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) return deadline; } -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard) { QEMUBH *bh = g_new(QEMUBH, 1); diff --git a/tests/unit/ptimer-test.c b/tests/unit/ptimer-test.c index 9176b96c1ce219a510626fa1689708e7d3f7731a..8544b6486cb4d4d35bc8d7451b95c4381cde7e84 100644 --- a/tests/unit/ptimer-test.c +++ b/tests/unit/ptimer-test.c @@ -763,6 +763,33 @@ static void check_oneshot_with_load_0(gconstpointer arg) ptimer_free(ptimer); } +static void check_freq_more_than_1000M(gconstpointer arg) +{ + const uint8_t *policy = arg; + ptimer_state *ptimer = ptimer_init(ptimer_trigger, NULL, *policy); + bool no_round_down = (*policy & PTIMER_POLICY_NO_COUNTER_ROUND_DOWN); + + triggered = false; + + ptimer_transaction_begin(ptimer); + ptimer_set_freq(ptimer, 2000000000); + ptimer_set_limit(ptimer, 8, 1); + ptimer_run(ptimer, 1); + ptimer_transaction_commit(ptimer); + + qemu_clock_step(3); + + g_assert_cmpuint(ptimer_get_count(ptimer), ==, no_round_down ? 3 : 2); + g_assert_false(triggered); + + qemu_clock_step(1); + + g_assert_cmpuint(ptimer_get_count(ptimer), ==, 0); + g_assert_true(triggered); + + ptimer_free(ptimer); +} + static void add_ptimer_tests(uint8_t policy) { char policy_name[256] = ""; @@ -857,6 +884,12 @@ static void add_ptimer_tests(uint8_t policy) policy_name), g_memdup(&policy, 1), check_oneshot_with_load_0, g_free); g_free(tmp); + + g_test_add_data_func_full( + tmp = g_strdup_printf("/ptimer/freq_more_than_1000M policy=%s", + policy_name), + g_memdup(&policy, 1), check_freq_more_than_1000M, g_free); + g_free(tmp); } static void add_all_ptimer_policies_comb_tests(void) diff --git a/tests/unit/test-aio.c b/tests/unit/test-aio.c index 6feeb9a4a9fd0ac51aa1d56f446401f45d85472e..95265a0d7525b96ea3253625766b16c56d3ec03d 100644 --- a/tests/unit/test-aio.c +++ b/tests/unit/test-aio.c @@ -478,7 +478,7 @@ static void test_timer_schedule(void) g_assert_cmpint(data.n, ==, 0); - /* timer_mod may well cause an event notifer to have gone off, + /* timer_mod may well cause an event notifier to have gone off, * so clear that */ do {} while (aio_poll(ctx, false)); diff --git a/tests/unit/test-bdrv-graph-mod.c b/tests/unit/test-bdrv-graph-mod.c index a6e3bb79be256c6e54fb19a9ad8f718b7a5259e1..a5d5cab0050640a5c233446541199912fabda13d 100644 --- a/tests/unit/test-bdrv-graph-mod.c +++ b/tests/unit/test-bdrv-graph-mod.c @@ -94,9 +94,9 @@ static BlockDriverState *exclusive_writer_node(const char *name) * | perm: write, read * | shared: except write * v - * +-------------------+ +----------------+ - * | passtrough filter |---------->| null-co node | - * +-------------------+ +----------------+ + * +--------------------+ +----------------+ + * | passthrough filter |--------->| null-co node | + * +--------------------+ +----------------+ * * * and then, tries to append filter under node. Expected behavior: fail. @@ -110,9 +110,9 @@ static BlockDriverState *exclusive_writer_node(const char *name) * | perm: write, read * | shared: except write * v - * +-------------------+ - * | passtrough filter | - * +-------------------+ + * +--------------------+ + * | passthrough filter | + * +--------------------+ * | | * perm: write, read | | perm: write, read * shared: except write | | shared: except write diff --git a/tests/unit/test-crypto-cipher.c b/tests/unit/test-crypto-cipher.c index d9d9d078ff11c9942922d5228c7d3448a0320731..11ab1a54fca3991fe68c8f7ce180cd436e9194c0 100644 --- a/tests/unit/test-crypto-cipher.c +++ b/tests/unit/test-crypto-cipher.c @@ -382,6 +382,19 @@ static QCryptoCipherTestData test_data[] = { .plaintext = "90afe91bb288544f2c32dc239b2635e6", .ciphertext = "6cb4561c40bf0a9705931cb6d408e7fa", }, +#ifdef CONFIG_CRYPTO_SM4 + { + /* SM4, GB/T 32907-2016, Appendix A.1 */ + .path = "/crypto/cipher/sm4", + .alg = QCRYPTO_CIPHER_ALG_SM4, + .mode = QCRYPTO_CIPHER_MODE_ECB, + .key = "0123456789abcdeffedcba9876543210", + .plaintext = + "0123456789abcdeffedcba9876543210", + .ciphertext = + "681edf34d206965e86b3e94f536e4246", + }, +#endif { /* #1 32 byte key, 32 byte PTX */ .path = "/crypto/cipher/aes-xts-128-1", diff --git a/tests/unit/test-crypto-secret.c b/tests/unit/test-crypto-secret.c index 34a4aecc121c8f1449cc88cd01d75e094a8ed27b..ffd13ff70e82bdfbcbad0c8c8a0f27a9194e681f 100644 --- a/tests/unit/test-crypto-secret.c +++ b/tests/unit/test-crypto-secret.c @@ -24,7 +24,7 @@ #include "crypto/secret.h" #include "qapi/error.h" #include "qemu/module.h" -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) #include "crypto/secret_keyring.h" #include #endif @@ -128,7 +128,7 @@ static void test_secret_indirect_emptyfile(void) g_free(fname); } -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) #define DESCRIPTION "qemu_test_secret" #define PAYLOAD "Test Payload" @@ -244,7 +244,7 @@ static void test_secret_keyring_bad_key_access_right(void) char key_str[16]; Object *sec; - g_test_skip("TODO: Need responce from Linux kernel maintainers"); + g_test_skip("TODO: Need response from Linux kernel maintainers"); return; int32_t key = add_key("user", DESCRIPTION, PAYLOAD, @@ -268,7 +268,7 @@ static void test_secret_keyring_bad_key_access_right(void) keyctl_unlink(key, KEY_SPEC_PROCESS_KEYRING); } -#endif /* CONFIG_KEYUTILS */ +#endif /* CONFIG_KEYUTILS && CONFIG_SECRET_KEYRING */ static void test_secret_noconv_base64_good(void) { @@ -571,7 +571,7 @@ int main(int argc, char **argv) g_test_add_func("/crypto/secret/indirect/emptyfile", test_secret_indirect_emptyfile); -#ifdef CONFIG_KEYUTILS +#if defined(CONFIG_KEYUTILS) && defined(CONFIG_SECRET_KEYRING) g_test_add_func("/crypto/secret/keyring/good", test_secret_keyring_good); g_test_add_func("/crypto/secret/keyring/revoked_key", @@ -582,7 +582,7 @@ int main(int argc, char **argv) test_secret_keyring_bad_serial_key); g_test_add_func("/crypto/secret/keyring/bad_key_access_right", test_secret_keyring_bad_key_access_right); -#endif /* CONFIG_KEYUTILS */ +#endif /* CONFIG_KEYUTILS && CONFIG_SECRET_KEYRING */ g_test_add_func("/crypto/secret/noconv/base64/good", test_secret_noconv_base64_good); diff --git a/tests/unit/test-io-channel-command.c b/tests/unit/test-io-channel-command.c index 99056e07c0231253b56ab109db9e48c02b58c4c9..aa09c559cd4836c73478b5a0e1af001412a42585 100644 --- a/tests/unit/test-io-channel-command.c +++ b/tests/unit/test-io-channel-command.c @@ -41,7 +41,8 @@ static void test_io_channel_command_fifo(bool async) unlink(TEST_FIFO); if (access("/bin/socat", X_OK) < 0) { - return; /* Pretend success if socat is not present */ + g_test_skip("socat is missing"); + return; } if (mkfifo(TEST_FIFO, 0600) < 0) { abort(); diff --git a/tests/unit/test-qga.c b/tests/unit/test-qga.c index 5cb140d1b53d8e5b9228d6cd2a914d2dfcfb0182..e6564c673b5674e651b0ea7d2996ed40ae44a5a0 100644 --- a/tests/unit/test-qga.c +++ b/tests/unit/test-qga.c @@ -32,6 +32,7 @@ static int connect_qga(char *path) g_usleep(G_USEC_PER_SEC); } if (i++ == 10) { + close(s); return -1; } } while (ret == -1); diff --git a/tests/unit/test-qobject-input-visitor.c b/tests/unit/test-qobject-input-visitor.c index 6f59a7f43249fc12447d16c5a36ef0a4d6829d01..8c9e8124d03c23aacfa15abd4764a15dc090f51f 100644 --- a/tests/unit/test-qobject-input-visitor.c +++ b/tests/unit/test-qobject-input-visitor.c @@ -95,7 +95,7 @@ Visitor *visitor_input_test_init(TestInputVisitorData *data, /* similar to visitor_input_test_init(), but does not expect a string * literal/format json_string argument and so can be used for - * programatically generated strings (and we can't pass in programatically + * programmatically generated strings (and we can't pass in programmatically * generated strings via %s format parameters since qobject_from_jsonv() * will wrap those in double-quotes and treat the entire object as a * string) @@ -448,9 +448,8 @@ static void test_visitor_in_list(TestInputVisitorData *data, g_assert(head != NULL); for (i = 0, item = head; item; item = item->next, i++) { - char string[12]; + g_autofree char *string = g_strdup_printf("string%d", i); - snprintf(string, sizeof(string), "string%d", i); g_assert_cmpstr(item->value->string, ==, string); g_assert_cmpint(item->value->integer, ==, 42 + i); } diff --git a/tests/unit/test-smp-parse.c b/tests/unit/test-smp-parse.c index b02450e25a369a14751644e6f1161ffc9e987f7c..fdc39a846ca608d2207d029ecd6dd0a7fc6293c2 100644 --- a/tests/unit/test-smp-parse.c +++ b/tests/unit/test-smp-parse.c @@ -61,6 +61,20 @@ .has_maxcpus = hf, .maxcpus = f, \ } +/* + * Currently a 4-level topology hierarchy is supported on ARM virt machines + * -sockets/clusters/cores/threads + */ +#define SMP_CONFIG_WITH_CLUSTERS(ha, a, hb, b, hc, c, hd, d, he, e, hf, f) \ + { \ + .has_cpus = ha, .cpus = a, \ + .has_sockets = hb, .sockets = b, \ + .has_clusters = hc, .clusters = c, \ + .has_cores = hd, .cores = d, \ + .has_threads = he, .threads = e, \ + .has_maxcpus = hf, .maxcpus = f, \ + } + /** * @config - the given SMP configuration * @expect_prefer_sockets - the expected parsing result for the @@ -83,7 +97,7 @@ typedef struct SMPTestData { * then test the automatic calculation algorithm of the missing * values in the parser. */ -static struct SMPTestData data_generic_valid[] = { +static const struct SMPTestData data_generic_valid[] = { { /* config: no configuration provided * expect: cpus=1,sockets=1,cores=1,threads=1,maxcpus=1 */ @@ -285,11 +299,15 @@ static struct SMPTestData data_generic_valid[] = { }, }; -static struct SMPTestData data_generic_invalid[] = { +static const struct SMPTestData data_generic_invalid[] = { { /* config: -smp 2,dies=2 */ .config = SMP_CONFIG_WITH_DIES(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), .expect_error = "dies not supported by this machine's CPU topology", + }, { + /* config: -smp 2,clusters=2 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 2, F, 0, T, 2, F, 0, F, 0, F, 0), + .expect_error = "clusters not supported by this machine's CPU topology", }, { /* config: -smp 8,sockets=2,cores=4,threads=2,maxcpus=8 */ .config = SMP_CONFIG_GENERIC(T, 8, T, 2, T, 4, T, 2, T, 8), @@ -319,7 +337,7 @@ static struct SMPTestData data_generic_invalid[] = { }, }; -static struct SMPTestData data_with_dies_invalid[] = { +static const struct SMPTestData data_with_dies_invalid[] = { { /* config: -smp 16,sockets=2,dies=2,cores=4,threads=2,maxcpus=16 */ .config = SMP_CONFIG_WITH_DIES(T, 16, T, 2, T, 2, T, 4, T, 2, T, 16), @@ -337,42 +355,63 @@ static struct SMPTestData data_with_dies_invalid[] = { }, }; -static char *smp_config_to_string(SMPConfiguration *config) +static const struct SMPTestData data_with_clusters_invalid[] = { + { + /* config: -smp 16,sockets=2,clusters=2,cores=4,threads=2,maxcpus=16 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 16, T, 2, T, 2, T, 4, T, 2, T, 16), + .expect_error = "Invalid CPU topology: " + "product of the hierarchy must match maxcpus: " + "sockets (2) * clusters (2) * cores (4) * threads (2) " + "!= maxcpus (16)", + }, { + /* config: -smp 34,sockets=2,clusters=2,cores=4,threads=2,maxcpus=32 */ + .config = SMP_CONFIG_WITH_CLUSTERS(T, 34, T, 2, T, 2, T, 4, T, 2, T, 32), + .expect_error = "Invalid CPU topology: " + "maxcpus must be equal to or greater than smp: " + "sockets (2) * clusters (2) * cores (4) * threads (2) " + "== maxcpus (32) < smp_cpus (34)", + }, +}; + +static char *smp_config_to_string(const SMPConfiguration *config) { return g_strdup_printf( "(SMPConfiguration) {\n" - " .has_cpus = %5s, cpus = %" PRId64 ",\n" - " .has_sockets = %5s, sockets = %" PRId64 ",\n" - " .has_dies = %5s, dies = %" PRId64 ",\n" - " .has_cores = %5s, cores = %" PRId64 ",\n" - " .has_threads = %5s, threads = %" PRId64 ",\n" - " .has_maxcpus = %5s, maxcpus = %" PRId64 ",\n" + " .has_cpus = %5s, cpus = %" PRId64 ",\n" + " .has_sockets = %5s, sockets = %" PRId64 ",\n" + " .has_dies = %5s, dies = %" PRId64 ",\n" + " .has_clusters = %5s, clusters = %" PRId64 ",\n" + " .has_cores = %5s, cores = %" PRId64 ",\n" + " .has_threads = %5s, threads = %" PRId64 ",\n" + " .has_maxcpus = %5s, maxcpus = %" PRId64 ",\n" "}", config->has_cpus ? "true" : "false", config->cpus, config->has_sockets ? "true" : "false", config->sockets, config->has_dies ? "true" : "false", config->dies, + config->has_clusters ? "true" : "false", config->clusters, config->has_cores ? "true" : "false", config->cores, config->has_threads ? "true" : "false", config->threads, config->has_maxcpus ? "true" : "false", config->maxcpus); } -static char *cpu_topology_to_string(CpuTopology *topo) +static char *cpu_topology_to_string(const CpuTopology *topo) { return g_strdup_printf( "(CpuTopology) {\n" " .cpus = %u,\n" " .sockets = %u,\n" " .dies = %u,\n" + " .clusters = %u,\n" " .cores = %u,\n" " .threads = %u,\n" " .max_cpus = %u,\n" "}", - topo->cpus, topo->sockets, topo->dies, + topo->cpus, topo->sockets, topo->dies, topo->clusters, topo->cores, topo->threads, topo->max_cpus); } -static void check_parse(MachineState *ms, SMPConfiguration *config, - CpuTopology *expect_topo, const char *expect_err, +static void check_parse(MachineState *ms, const SMPConfiguration *config, + const CpuTopology *expect_topo, const char *expect_err, bool is_valid) { g_autofree char *config_str = smp_config_to_string(config); @@ -380,8 +419,8 @@ static void check_parse(MachineState *ms, SMPConfiguration *config, g_autofree char *output_topo_str = NULL; Error *err = NULL; - /* call the generic parser smp_parse() */ - smp_parse(ms, config, &err); + /* call the generic parser */ + machine_parse_smp_config(ms, config, &err); output_topo_str = cpu_topology_to_string(&ms->smp); @@ -391,6 +430,7 @@ static void check_parse(MachineState *ms, SMPConfiguration *config, (ms->smp.cpus == expect_topo->cpus) && (ms->smp.sockets == expect_topo->sockets) && (ms->smp.dies == expect_topo->dies) && + (ms->smp.clusters == expect_topo->clusters) && (ms->smp.cores == expect_topo->cores) && (ms->smp.threads == expect_topo->threads) && (ms->smp.max_cpus == expect_topo->max_cpus)) { @@ -466,12 +506,17 @@ static void smp_parse_test(MachineState *ms, SMPTestData *data, bool is_valid) } /* The parsed results of the unsupported parameters should be 1 */ -static void unsupported_params_init(MachineClass *mc, SMPTestData *data) +static void unsupported_params_init(const MachineClass *mc, SMPTestData *data) { if (!mc->smp_props.dies_supported) { data->expect_prefer_sockets.dies = 1; data->expect_prefer_cores.dies = 1; } + + if (!mc->smp_props.clusters_supported) { + data->expect_prefer_sockets.clusters = 1; + data->expect_prefer_cores.clusters = 1; + } } static void machine_base_class_init(ObjectClass *oc, void *data) @@ -481,101 +526,171 @@ static void machine_base_class_init(ObjectClass *oc, void *data) mc->min_cpus = MIN_CPUS; mc->max_cpus = MAX_CPUS; - mc->smp_props.prefer_sockets = true; - mc->smp_props.dies_supported = false; - mc->name = g_strdup(SMP_MACHINE_NAME); } -static void test_generic(void) +static void machine_generic_invalid_class_init(ObjectClass *oc, void *data) { - Object *obj = object_new(TYPE_MACHINE); + MachineClass *mc = MACHINE_CLASS(oc); + + /* Force invalid min CPUs and max CPUs */ + mc->min_cpus = 2; + mc->max_cpus = 511; +} + +static void machine_with_dies_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.dies_supported = true; +} + +static void machine_with_clusters_class_init(ObjectClass *oc, void *data) +{ + MachineClass *mc = MACHINE_CLASS(oc); + + mc->smp_props.clusters_supported = true; +} + +static void test_generic_valid(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); MachineState *ms = MACHINE(obj); MachineClass *mc = MACHINE_GET_CLASS(obj); - SMPTestData *data = &(SMPTestData){{ }}; + SMPTestData data = {}; int i; for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { - *data = data_generic_valid[i]; - unsupported_params_init(mc, data); + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); - smp_parse_test(ms, data, true); + smp_parse_test(ms, &data, true); /* Unsupported parameters can be provided with their values as 1 */ - data->config.has_dies = true; - data->config.dies = 1; - smp_parse_test(ms, data, true); + data.config.has_dies = true; + data.config.dies = 1; + smp_parse_test(ms, &data, true); } - /* Force invalid min CPUs and max CPUs */ - mc->min_cpus = 2; - mc->max_cpus = 511; + object_unref(obj); +} + +static void test_generic_invalid(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + int i; for (i = 0; i < ARRAY_SIZE(data_generic_invalid); i++) { - *data = data_generic_invalid[i]; - unsupported_params_init(mc, data); + data = data_generic_invalid[i]; + unsupported_params_init(mc, &data); - smp_parse_test(ms, data, false); + smp_parse_test(ms, &data, false); } - /* Reset the supported min CPUs and max CPUs */ - mc->min_cpus = MIN_CPUS; - mc->max_cpus = MAX_CPUS; - object_unref(obj); } -static void test_with_dies(void) +static void test_with_dies(const void *opaque) { - Object *obj = object_new(TYPE_MACHINE); + const char *machine_type = opaque; + Object *obj = object_new(machine_type); MachineState *ms = MACHINE(obj); MachineClass *mc = MACHINE_GET_CLASS(obj); - SMPTestData *data = &(SMPTestData){{ }}; + SMPTestData data = {}; unsigned int num_dies = 2; int i; - /* Force the SMP compat properties */ - mc->smp_props.dies_supported = true; - for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { - *data = data_generic_valid[i]; - unsupported_params_init(mc, data); + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); /* when dies parameter is omitted, it will be set as 1 */ - data->expect_prefer_sockets.dies = 1; - data->expect_prefer_cores.dies = 1; + data.expect_prefer_sockets.dies = 1; + data.expect_prefer_cores.dies = 1; - smp_parse_test(ms, data, true); + smp_parse_test(ms, &data, true); /* when dies parameter is specified */ - data->config.has_dies = true; - data->config.dies = num_dies; - if (data->config.has_cpus) { - data->config.cpus *= num_dies; + data.config.has_dies = true; + data.config.dies = num_dies; + if (data.config.has_cpus) { + data.config.cpus *= num_dies; } - if (data->config.has_maxcpus) { - data->config.maxcpus *= num_dies; + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_dies; } - data->expect_prefer_sockets.dies = num_dies; - data->expect_prefer_sockets.cpus *= num_dies; - data->expect_prefer_sockets.max_cpus *= num_dies; - data->expect_prefer_cores.dies = num_dies; - data->expect_prefer_cores.cpus *= num_dies; - data->expect_prefer_cores.max_cpus *= num_dies; + data.expect_prefer_sockets.dies = num_dies; + data.expect_prefer_sockets.cpus *= num_dies; + data.expect_prefer_sockets.max_cpus *= num_dies; + data.expect_prefer_cores.dies = num_dies; + data.expect_prefer_cores.cpus *= num_dies; + data.expect_prefer_cores.max_cpus *= num_dies; - smp_parse_test(ms, data, true); + smp_parse_test(ms, &data, true); } for (i = 0; i < ARRAY_SIZE(data_with_dies_invalid); i++) { - *data = data_with_dies_invalid[i]; - unsupported_params_init(mc, data); + data = data_with_dies_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } + + object_unref(obj); +} + +static void test_with_clusters(const void *opaque) +{ + const char *machine_type = opaque; + Object *obj = object_new(machine_type); + MachineState *ms = MACHINE(obj); + MachineClass *mc = MACHINE_GET_CLASS(obj); + SMPTestData data = {}; + unsigned int num_clusters = 2; + int i; + + for (i = 0; i < ARRAY_SIZE(data_generic_valid); i++) { + data = data_generic_valid[i]; + unsupported_params_init(mc, &data); + + /* when clusters parameter is omitted, it will be set as 1 */ + data.expect_prefer_sockets.clusters = 1; + data.expect_prefer_cores.clusters = 1; + + smp_parse_test(ms, &data, true); - smp_parse_test(ms, data, false); + /* when clusters parameter is specified */ + data.config.has_clusters = true; + data.config.clusters = num_clusters; + if (data.config.has_cpus) { + data.config.cpus *= num_clusters; + } + if (data.config.has_maxcpus) { + data.config.maxcpus *= num_clusters; + } + + data.expect_prefer_sockets.clusters = num_clusters; + data.expect_prefer_sockets.cpus *= num_clusters; + data.expect_prefer_sockets.max_cpus *= num_clusters; + data.expect_prefer_cores.clusters = num_clusters; + data.expect_prefer_cores.cpus *= num_clusters; + data.expect_prefer_cores.max_cpus *= num_clusters; + + smp_parse_test(ms, &data, true); } - /* Restore the SMP compat properties */ - mc->smp_props.dies_supported = false; + for (i = 0; i < ARRAY_SIZE(data_with_clusters_invalid); i++) { + data = data_with_clusters_invalid[i]; + unsupported_params_init(mc, &data); + + smp_parse_test(ms, &data, false); + } object_unref(obj); } @@ -585,9 +700,25 @@ static const TypeInfo smp_machine_types[] = { { .name = TYPE_MACHINE, .parent = TYPE_OBJECT, + .abstract = true, .class_init = machine_base_class_init, .class_size = sizeof(MachineClass), .instance_size = sizeof(MachineState), + }, { + .name = MACHINE_TYPE_NAME("smp-generic-valid"), + .parent = TYPE_MACHINE, + }, { + .name = MACHINE_TYPE_NAME("smp-generic-invalid"), + .parent = TYPE_MACHINE, + .class_init = machine_generic_invalid_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-dies"), + .parent = TYPE_MACHINE, + .class_init = machine_with_dies_class_init, + }, { + .name = MACHINE_TYPE_NAME("smp-with-clusters"), + .parent = TYPE_MACHINE, + .class_init = machine_with_clusters_class_init, } }; @@ -599,8 +730,18 @@ int main(int argc, char *argv[]) g_test_init(&argc, &argv, NULL); - g_test_add_func("/test-smp-parse/generic", test_generic); - g_test_add_func("/test-smp-parse/with_dies", test_with_dies); + g_test_add_data_func("/test-smp-parse/generic/valid", + MACHINE_TYPE_NAME("smp-generic-valid"), + test_generic_valid); + g_test_add_data_func("/test-smp-parse/generic/invalid", + MACHINE_TYPE_NAME("smp-generic-invalid"), + test_generic_invalid); + g_test_add_data_func("/test-smp-parse/with_dies", + MACHINE_TYPE_NAME("smp-with-dies"), + test_with_dies); + g_test_add_data_func("/test-smp-parse/with_clusters", + MACHINE_TYPE_NAME("smp-with-clusters"), + test_with_clusters); g_test_run(); diff --git a/tests/unit/test-throttle.c b/tests/unit/test-throttle.c index 7adb5e6652c94bda8238e520f72fb82c0a8afc24..dc8739c1d6d49516556ee6ac9c4ba1207c657efd 100644 --- a/tests/unit/test-throttle.c +++ b/tests/unit/test-throttle.c @@ -135,7 +135,7 @@ static void test_compute_wait(void) g_assert(double_cmp(bkt.burst_level, 0)); g_assert(double_cmp(bkt.level, (i + 1) * (bkt.max - bkt.avg) / 10)); /* We can do bursts for the 2 seconds we have configured in - * burst_length. We have 100 extra miliseconds of burst + * burst_length. We have 100 extra milliseconds of burst * because bkt.level has been leaking during this time. * After that, we have to wait. */ result = i < 21 ? 0 : 1.8 * NANOSECONDS_PER_SECOND; @@ -375,11 +375,11 @@ static void test_is_valid_for_value(int value, bool should_be_valid) static void test_is_valid(void) { - /* negative number are invalid */ + /* negative numbesr are invalid */ test_is_valid_for_value(-1, false); - /* zero are valids */ + /* zero is valid */ test_is_valid_for_value(0, true); - /* positives numers are valids */ + /* positives numbers are valid */ test_is_valid_for_value(1, true); } diff --git a/tests/unit/test-util-filemonitor.c b/tests/unit/test-util-filemonitor.c index b629e10857255aea33dc07bfb0ef927baaff731e..a22de275955d3ad7db827a813a7731d4fb2fae76 100644 --- a/tests/unit/test-util-filemonitor.c +++ b/tests/unit/test-util-filemonitor.c @@ -132,7 +132,7 @@ qemu_file_monitor_test_record_free(QFileMonitorTestRecord *rec) * the file monitor event handler. Since events are * emitted in the background thread running the event * loop, we can't assume there is a record available - * immediately. Thus we will sleep for upto 5 seconds + * immediately. Thus we will sleep for up to 5 seconds * to wait for the event to be queued for us. */ static QFileMonitorTestRecord * diff --git a/tests/unit/test-vmstate.c b/tests/unit/test-vmstate.c index 4688c03ea72e8a94f94294e8d2c3b92e4172e0a3..91879ad2d4ee65ba8a57f3bf5769979fa2887756 100644 --- a/tests/unit/test-vmstate.c +++ b/tests/unit/test-vmstate.c @@ -88,17 +88,16 @@ static void save_buffer(const uint8_t *buf, size_t buf_size) static void compare_vmstate(const uint8_t *wire, size_t size) { QEMUFile *f = open_test_file(false); - uint8_t result[size]; + g_autofree uint8_t *result = g_malloc(size); /* read back as binary */ - g_assert_cmpint(qemu_get_buffer(f, result, sizeof(result)), ==, - sizeof(result)); + g_assert_cmpint(qemu_get_buffer(f, result, size), ==, size); g_assert(!qemu_file_get_error(f)); /* Compare that what is on the file is the same that what we expected to be there */ - SUCCESS(memcmp(result, wire, sizeof(result))); + SUCCESS(memcmp(result, wire, size)); /* Must reach EOF */ qemu_get_byte(f); @@ -1076,7 +1075,6 @@ static gboolean diff_tree(gpointer key, gpointer value, gpointer data) struct match_node_data d = {tp->tree2, key, value}; g_tree_foreach(tp->tree2, tp->match_node, &d); - g_tree_remove(tp->tree1, key); return false; } @@ -1085,9 +1083,9 @@ static void compare_trees(GTree *tree1, GTree *tree2, { struct tree_cmp_data tp = {tree1, tree2, function}; + assert(g_tree_nnodes(tree1) == g_tree_nnodes(tree2)); g_tree_foreach(tree1, diff_tree, &tp); - assert(g_tree_nnodes(tree1) == 0); - assert(g_tree_nnodes(tree2) == 0); + g_tree_destroy(g_tree_ref(tree1)); } static void diff_domain(TestGTreeDomain *d1, TestGTreeDomain *d2) diff --git a/tests/unit/test-xbzrle.c b/tests/unit/test-xbzrle.c index 795d6f1cbabb0761206aa812f9319155481e2e09..baa364b443b648aff73c32ebdb1bd8749e3e34b2 100644 --- a/tests/unit/test-xbzrle.c +++ b/tests/unit/test-xbzrle.c @@ -17,6 +17,35 @@ #define XBZRLE_PAGE_SIZE 4096 +int (*xbzrle_encode_buffer_func)(uint8_t *, uint8_t *, int, + uint8_t *, int) = xbzrle_encode_buffer; +#if defined(CONFIG_AVX512BW_OPT) +#include "qemu/cpuid.h" +static void __attribute__((constructor)) init_cpu_flag(void) +{ + unsigned max = __get_cpuid_max(0, NULL); + int a, b, c, d; + if (max >= 1) { + __cpuid(1, a, b, c, d); + /* We must check that AVX is not just available, but usable. */ + if ((c & bit_OSXSAVE) && (c & bit_AVX) && max >= 7) { + int bv; + __asm("xgetbv" : "=a"(bv), "=d"(d) : "c"(0)); + __cpuid_count(7, 0, a, b, c, d); + /* 0xe6: + * XCR0[7:5] = 111b (OPMASK state, upper 256-bit of ZMM0-ZMM15 + * and ZMM16-ZMM31 state are enabled by OS) + * XCR0[2:1] = 11b (XMM state and YMM state are enabled by OS) + */ + if ((bv & 0xe6) == 0xe6 && (b & bit_AVX512BW)) { + xbzrle_encode_buffer_func = xbzrle_encode_buffer_avx512; + } + } + } + return ; +} +#endif + static void test_uleb(void) { uint32_t i, val; @@ -55,7 +84,7 @@ static void test_encode_decode_zero(void) buffer[1000 + diff_len + 5] = 105; /* encode zero page */ - dlen = xbzrle_encode_buffer(buffer, buffer, XBZRLE_PAGE_SIZE, compressed, + dlen = xbzrle_encode_buffer_func(buffer, buffer, XBZRLE_PAGE_SIZE, compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == 0); @@ -79,7 +108,7 @@ static void test_encode_decode_unchanged(void) test[1000 + diff_len + 5] = 109; /* test unchanged buffer */ - dlen = xbzrle_encode_buffer(test, test, XBZRLE_PAGE_SIZE, compressed, + dlen = xbzrle_encode_buffer_func(test, test, XBZRLE_PAGE_SIZE, compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == 0); @@ -97,7 +126,7 @@ static void test_encode_decode_1_byte(void) test[XBZRLE_PAGE_SIZE - 1] = 1; - dlen = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, + dlen = xbzrle_encode_buffer_func(buffer, test, XBZRLE_PAGE_SIZE, compressed, XBZRLE_PAGE_SIZE); g_assert(dlen == (uleb128_encode_small(&buf[0], 4095) + 2)); @@ -122,7 +151,7 @@ static void test_encode_decode_overflow(void) } /* encode overflow */ - rc = xbzrle_encode_buffer(buffer, test, XBZRLE_PAGE_SIZE, compressed, + rc = xbzrle_encode_buffer_func(buffer, test, XBZRLE_PAGE_SIZE, compressed, XBZRLE_PAGE_SIZE); g_assert(rc == -1); @@ -153,7 +182,7 @@ static void encode_decode_range(void) test[1000 + diff_len + 5] = 109; /* test encode/decode */ - dlen = xbzrle_encode_buffer(test, buffer, XBZRLE_PAGE_SIZE, compressed, + dlen = xbzrle_encode_buffer_func(test, buffer, XBZRLE_PAGE_SIZE, compressed, XBZRLE_PAGE_SIZE); rc = xbzrle_decode_buffer(compressed, dlen, test, XBZRLE_PAGE_SIZE); diff --git a/tests/vm/Makefile.include b/tests/vm/Makefile.include index ae91f5043e506d2a3e46edfe827c794be656c325..b1c45954b64dbf0f944e1ea702c15117558dbd62 100644 --- a/tests/vm/Makefile.include +++ b/tests/vm/Makefile.include @@ -75,7 +75,7 @@ endif @echo " QEMU_IMG=/path/to/qemu-img - Change path to qemu-img tool" @echo " QEMU_LOCAL=1 - Use QEMU binary local to this build." @echo " TARGET_LIST=a,b,c - Override target list in builds" - @echo " V=1 - Enable verbose ouput on host and guest commands" + @echo " V=1 - Enable verbose output on host and guest commands" vm-build-all: $(addprefix vm-build-, $(IMAGES)) diff --git a/tests/vm/basevm.py b/tests/vm/basevm.py index 254e11c932bff64719c8a4c9a505123ec2a607da..4003bc5fef853113038c52c1ef265a7065b2c21b 100644 --- a/tests/vm/basevm.py +++ b/tests/vm/basevm.py @@ -228,7 +228,8 @@ def _ssh_do(self, user, cmd, check): "-o", "UserKnownHostsFile=" + os.devnull, "-o", "ConnectTimeout={}".format(self._config["ssh_timeout"]), - "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file] + "-p", str(self.ssh_port), "-i", self._ssh_tmp_key_file, + "-o", "IdentitiesOnly=yes"] # If not in debug mode, set ssh to quiet mode to # avoid printing the results of commands. if not self.debug: diff --git a/tests/vm/netbsd b/tests/vm/netbsd index 4cc58df130f304cf9ff2976cf1026de575e2ec57..df4769c63d14dfa27badfe6d53e731f53c23a874 100755 --- a/tests/vm/netbsd +++ b/tests/vm/netbsd @@ -22,8 +22,8 @@ class NetBSDVM(basevm.BaseVM): name = "netbsd" arch = "x86_64" - link = "https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.2/images/NetBSD-9.2-amd64.iso" - csum = "5ee0ea101f73386b9b424f5d1041e371db3c42fdd6f4e4518dc79c4a08f31d43091ebe93425c9f0dcaaed2b51131836fe6774f33f89030b58d64709b35fda72f" + link = "https://cdn.netbsd.org/pub/NetBSD/NetBSD-9.3/images/NetBSD-9.3-amd64.iso" + csum = "2bfce544f762a579f61478e7106c436fc48731ff25cf6f79b392ba5752e6f5ec130364286f7471716290a5f033637cf56aacee7fedb91095face59adf36300c3" size = "20G" pkgs = [ # tools diff --git a/tests/vm/ubuntuvm.py b/tests/vm/ubuntuvm.py index 6689ad87aa83d5fdd3caaacc28d2097bf4d8c86b..15c530c571148529e5e41bff9005df90e3267166 100644 --- a/tests/vm/ubuntuvm.py +++ b/tests/vm/ubuntuvm.py @@ -51,7 +51,7 @@ def build_image(self, img): # then we will jump right to the graceful shutdown if self._config['install_cmds'] != "": # Issue the install commands. - # This can be overriden by the user in the config .yml. + # This can be overridden by the user in the config .yml. install_cmds = self._config['install_cmds'].split(',') for cmd in install_cmds: self.ssh_root(cmd) diff --git a/tools/virtiofsd/passthrough_ll.c b/tools/virtiofsd/passthrough_ll.c index 64b5b4fbb18670075eedd1f3d158cc36dda17a99..523d8fbe1ed52726eaa1be4fa75bbc41de5000e3 100644 --- a/tools/virtiofsd/passthrough_ll.c +++ b/tools/virtiofsd/passthrough_ll.c @@ -54,6 +54,7 @@ #include #include #include +#include #include "qemu/cutils.h" #include "passthrough_helpers.h" @@ -1161,6 +1162,30 @@ static void lo_lookup(fuse_req_t req, fuse_ino_t parent, const char *name) #define OURSYS_setresuid SYS_setresuid #endif +static void drop_supplementary_groups(void) +{ + int ret; + + ret = getgroups(0, NULL); + if (ret == -1) { + fuse_log(FUSE_LOG_ERR, "getgroups() failed with error=%d:%s\n", + errno, strerror(errno)); + exit(1); + } + + if (!ret) { + return; + } + + /* Drop all supplementary groups. We should not need it */ + ret = setgroups(0, NULL); + if (ret == -1) { + fuse_log(FUSE_LOG_ERR, "setgroups() failed with error=%d:%s\n", + errno, strerror(errno)); + exit(1); + } +} + /* * Change to uid/gid of caller so that file is created with * ownership of caller. @@ -3766,6 +3791,7 @@ static void setup_nofile_rlimit(unsigned long rlimit_nofile) static void log_func(enum fuse_log_level level, const char *fmt, va_list ap) { g_autofree char *localfmt = NULL; + char buf[64]; if (current_log_level < level) { return; @@ -3778,9 +3804,11 @@ static void log_func(enum fuse_log_level level, const char *fmt, va_list ap) fmt); } else { g_autoptr(GDateTime) now = g_date_time_new_now_utc(); - g_autofree char *nowstr = g_date_time_format(now, "%Y-%m-%d %H:%M:%S.%f%z"); + g_autofree char *nowstr = g_date_time_format(now, + "%Y-%m-%d %H:%M:%S.%%06d%z"); + snprintf(buf, 64, nowstr, g_date_time_get_microsecond(now)); localfmt = g_strdup_printf("[%s] [ID: %08ld] %s", - nowstr, syscall(__NR_gettid), fmt); + buf, syscall(__NR_gettid), fmt); } fmt = localfmt; } @@ -3926,6 +3954,8 @@ int main(int argc, char *argv[]) qemu_init_exec_dir(argv[0]); + drop_supplementary_groups(); + pthread_mutex_init(&lo.mutex, NULL); lo.inodes = g_hash_table_new(lo_key_hash, lo_key_equal); lo.root.fd = -1; diff --git a/tools/virtiofsd/passthrough_seccomp.c b/tools/virtiofsd/passthrough_seccomp.c index a3ce9f898d2d5cb30153b9301c1c241d0c36293d..2bc0127b69ad72a0505bb45c1fcdef6a069b7866 100644 --- a/tools/virtiofsd/passthrough_seccomp.c +++ b/tools/virtiofsd/passthrough_seccomp.c @@ -91,6 +91,9 @@ static const int syscall_allowlist[] = { SCMP_SYS(renameat2), SCMP_SYS(removexattr), SCMP_SYS(restart_syscall), +#ifdef __NR_rseq + SCMP_SYS(rseq), /* required since glibc 2.35 */ +#endif SCMP_SYS(rt_sigaction), SCMP_SYS(rt_sigprocmask), SCMP_SYS(rt_sigreturn), diff --git a/ui/clipboard.c b/ui/clipboard.c index d7b008d62a07735b07f8ed85e820f5ce2f090351..6721852cb6e4c8726bc405bb9a5513c053f1bb9c 100644 --- a/ui/clipboard.c +++ b/ui/clipboard.c @@ -123,9 +123,15 @@ void qemu_clipboard_set_data(QemuClipboardPeer *peer, } g_free(info->types[type].data); - info->types[type].data = g_memdup(data, size); - info->types[type].size = size; - info->types[type].available = true; + if (size) { + info->types[type].data = g_memdup(data, size); + info->types[type].size = size; + info->types[type].available = true; + } else { + info->types[type].data = NULL; + info->types[type].size = 0; + info->types[type].available = false; + } if (update) { qemu_clipboard_update(info); diff --git a/ui/console.c b/ui/console.c index 29a3e3f0f51c839d035b0236cc80e1fbba1c5949..28770bacd1361300e2898b75997ade04baf5eb45 100644 --- a/ui/console.c +++ b/ui/console.c @@ -1187,6 +1187,7 @@ static const int qcode_to_keysym[Q_KEY_CODE__MAX] = { [Q_KEY_CODE_PGUP] = QEMU_KEY_PAGEUP, [Q_KEY_CODE_PGDN] = QEMU_KEY_PAGEDOWN, [Q_KEY_CODE_DELETE] = QEMU_KEY_DELETE, + [Q_KEY_CODE_TAB] = QEMU_KEY_TAB, [Q_KEY_CODE_BACKSPACE] = QEMU_KEY_BACKSPACE, }; @@ -1526,6 +1527,9 @@ bool dpy_ui_info_supported(QemuConsole *con) con = active_console; } + if (con == NULL) { + return false; + } return con->hw_ops->ui_info != NULL; } diff --git a/ui/curses.c b/ui/curses.c index 861d63244c7047c6e94ab7f45764fbc58765cbaa..de962faa7cd682d855d459583b1612c6a594382d 100644 --- a/ui/curses.c +++ b/ui/curses.c @@ -69,7 +69,7 @@ static void curses_update(DisplayChangeListener *dcl, int x, int y, int w, int h) { console_ch_t *line; - cchar_t curses_line[width]; + g_autofree cchar_t *curses_line = g_new(cchar_t, width); wchar_t wch[CCHARW_MAX]; attr_t attrs; short colors; diff --git a/ui/cursor.c b/ui/cursor.c index 1d62ddd4d072f6c60926db9d23150b510e783c3e..835f0802f951a3ec965b95d7742eda1704c8fccd 100644 --- a/ui/cursor.c +++ b/ui/cursor.c @@ -46,6 +46,8 @@ static QEMUCursor *cursor_parse_xpm(const char *xpm[]) /* parse pixel data */ c = cursor_alloc(width, height); + assert(c != NULL); + for (pixel = 0, y = 0; y < height; y++, line++) { for (x = 0; x < height; x++, pixel++) { idx = xpm[line][x]; @@ -91,7 +93,11 @@ QEMUCursor *cursor_builtin_left_ptr(void) QEMUCursor *cursor_alloc(int width, int height) { QEMUCursor *c; - int datasize = width * height * sizeof(uint32_t); + size_t datasize = width * height * sizeof(uint32_t); + + if (width > 512 || height > 512) { + return NULL; + } c = g_malloc0(sizeof(QEMUCursor) + datasize); c->width = width; diff --git a/ui/gtk-egl.c b/ui/gtk-egl.c index 45cb67712df03aff9db65a549118f6e397aea243..0e1e5bfaaaa327c0ad8aba7cda8fd14b048c58ea 100644 --- a/ui/gtk-egl.c +++ b/ui/gtk-egl.c @@ -340,7 +340,7 @@ void gd_egl_flush(DisplayChangeListener *dcl, VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); GtkWidget *area = vc->gfx.drawing_area; - if (vc->gfx.guest_fb.dmabuf) { + if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) { graphic_hw_gl_block(vc->gfx.dcl.con, true); vc->gfx.guest_fb.dmabuf->draw_submitted = true; gtk_widget_queue_draw_area(area, x, y, w, h); diff --git a/ui/gtk-gl-area.c b/ui/gtk-gl-area.c index 01e4e74ee361e6dbf76acc27e32c63399d846481..11e0cb4af2492f8be708e8ac8a147026488d41fa 100644 --- a/ui/gtk-gl-area.c +++ b/ui/gtk-gl-area.c @@ -246,7 +246,7 @@ void gd_gl_area_scanout_flush(DisplayChangeListener *dcl, { VirtualConsole *vc = container_of(dcl, VirtualConsole, gfx.dcl); - if (vc->gfx.guest_fb.dmabuf) { + if (vc->gfx.guest_fb.dmabuf && !vc->gfx.guest_fb.dmabuf->draw_submitted) { graphic_hw_gl_block(vc->gfx.dcl.con, true); vc->gfx.guest_fb.dmabuf->draw_submitted = true; } diff --git a/ui/gtk.c b/ui/gtk.c index 428f02f2dfe1da721d2ecc638ca96e40d286aad2..f16ccf9c4b90e489de076c08d378b927a8563781 100644 --- a/ui/gtk.c +++ b/ui/gtk.c @@ -1718,8 +1718,10 @@ static void gd_vc_chr_accept_input(Chardev *chr) { VCChardev *vcd = VC_CHARDEV(chr); VirtualConsole *vc = vcd->console; - - gd_vc_send_chars(vc); + + if (vc) { + gd_vc_send_chars(vc); + } } static void gd_vc_chr_set_echo(Chardev *chr, bool echo) @@ -2243,7 +2245,7 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) { VirtualConsole *vc; - GtkDisplayState *s = g_malloc0(sizeof(*s)); + GtkDisplayState *s; GdkDisplay *window_display; GtkIconTheme *theme; char *dir; @@ -2253,6 +2255,7 @@ static void gtk_display_init(DisplayState *ds, DisplayOptions *opts) exit(1); } assert(opts->type == DISPLAY_TYPE_GTK); + s = g_malloc0(sizeof(*s)); s->opts = opts; theme = gtk_icon_theme_get_default(); diff --git a/ui/qemu-pixman.c b/ui/qemu-pixman.c index 3ab7e2e958a5912e77bc490225a6d560b2e314ae..67a88bf8dde61ae738e173cc4a79eff2a7a8c754 100644 --- a/ui/qemu-pixman.c +++ b/ui/qemu-pixman.c @@ -48,7 +48,6 @@ PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format) break; default: g_assert_not_reached(); - break; } pf.amax = (1 << pf.abits) - 1; diff --git a/ui/vnc-clipboard.c b/ui/vnc-clipboard.c index 67284b556cd88738509decc0f3e338bce181be89..2bb1b07c40428b5ba04b1d56f29eb65d62eb1ee3 100644 --- a/ui/vnc-clipboard.c +++ b/ui/vnc-clipboard.c @@ -51,8 +51,11 @@ static uint8_t *inflate_buffer(uint8_t *in, uint32_t in_len, uint32_t *size) ret = inflate(&stream, Z_FINISH); switch (ret) { case Z_OK: - case Z_STREAM_END: break; + case Z_STREAM_END: + *size = stream.total_out; + inflateEnd(&stream); + return out; case Z_BUF_ERROR: out_len <<= 1; if (out_len > (1 << 20)) { diff --git a/ui/vnc.c b/ui/vnc.c index af02522e8416f7949a2ba8672909e5d46f801ede..c72de81053b80fa4907300b768018b3dd75158cb 100644 --- a/ui/vnc.c +++ b/ui/vnc.c @@ -1354,12 +1354,12 @@ void vnc_disconnect_finish(VncState *vs) /* last client gone */ vnc_update_server_surface(vs->vd); } + vnc_unlock_output(vs); + if (vs->cbpeer.update.notify) { qemu_clipboard_peer_unregister(&vs->cbpeer); } - vnc_unlock_output(vs); - qemu_mutex_destroy(&vs->output_mutex); if (vs->bh != NULL) { qemu_bh_delete(vs->bh); @@ -1379,6 +1379,8 @@ void vnc_disconnect_finish(VncState *vs) g_free(vs->zrle); g_free(vs->tight); g_free(vs); + + qemu_timer_set_mode(QEMU_TIMER_USB_LAZY_MODE, QEMU_USB_CONTROLLER_UHCI); } size_t vnc_client_io_error(VncState *vs, ssize_t ret, Error *err) @@ -2219,7 +2221,7 @@ static void set_encodings(VncState *vs, int32_t *encodings, size_t n_encodings) break; case VNC_ENCODING_XVP: if (vs->vd->power_control) { - vs->features |= VNC_FEATURE_XVP; + vs->features |= VNC_FEATURE_XVP_MASK; send_xvp_message(vs, VNC_XVP_CODE_INIT); } break; @@ -2442,8 +2444,8 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) if (len == 1) { return 8; } + uint32_t dlen = abs(read_s32(data, 4)); if (len == 8) { - uint32_t dlen = abs(read_s32(data, 4)); if (dlen > (1 << 20)) { error_report("vnc: client_cut_text msg payload has %u bytes" " which exceeds our limit of 1MB.", dlen); @@ -2456,14 +2458,24 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) } if (read_s32(data, 4) < 0) { - vnc_client_cut_text_ext(vs, abs(read_s32(data, 4)), - read_u32(data, 8), data + 12); + if (!vnc_has_feature(vs, VNC_FEATURE_CLIPBOARD_EXT)) { + error_report("vnc: extended clipboard message while disabled"); + vnc_client_error(vs); + break; + } + if (dlen < 4) { + error_report("vnc: malformed payload (header less than 4 bytes)" + " in extended clipboard pseudo-encoding."); + vnc_client_error(vs); + break; + } + vnc_client_cut_text_ext(vs, dlen, read_u32(data, 8), data + 12); break; } vnc_client_cut_text(vs, read_u32(data, 4), data + 8); break; case VNC_MSG_CLIENT_XVP: - if (!(vs->features & VNC_FEATURE_XVP)) { + if (!vnc_has_feature(vs, VNC_FEATURE_XVP)) { error_report("vnc: xvp client message while disabled"); vnc_client_error(vs); break; @@ -2560,7 +2572,7 @@ static int protocol_client_msg(VncState *vs, uint8_t *data, size_t len) vs, vs->ioc, vs->as.fmt, vs->as.nchannels, vs->as.freq); break; default: - VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 4)); + VNC_DEBUG("Invalid audio message %d\n", read_u8(data, 2)); vnc_client_error(vs); break; } @@ -3333,6 +3345,8 @@ static void vnc_connect(VncDisplay *vd, QIOChannelSocket *sioc, } } } + + qemu_timer_set_mode(QEMU_TIMER_USB_NORMAL_MODE, QEMU_USB_CONTROLLER_UHCI); } void vnc_start_protocol(VncState *vs) @@ -3716,11 +3730,6 @@ static int vnc_display_get_address(const char *addrstr, addr->type = SOCKET_ADDRESS_TYPE_UNIX; addr->u.q_unix.path = g_strdup(addrstr + 5); - if (websocket) { - error_setg(errp, "UNIX sockets not supported with websock"); - goto cleanup; - } - if (to) { error_setg(errp, "Port range not support with UNIX socket"); goto cleanup; @@ -3752,7 +3761,7 @@ static int vnc_display_get_address(const char *addrstr, addr->type = SOCKET_ADDRESS_TYPE_INET; inet = &addr->u.inet; - if (addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { + if (hostlen && addrstr[0] == '[' && addrstr[hostlen - 1] == ']') { inet->host = g_strndup(addrstr + 1, hostlen - 2); } else { inet->host = g_strndup(addrstr, hostlen); diff --git a/util/async.c b/util/async.c index 6f6717a34b6318ae0405d5f7f44c1d0a68339339..760ad7340409f877f243f36793eadcc8f5735e80 100644 --- a/util/async.c +++ b/util/async.c @@ -62,6 +62,7 @@ struct QEMUBH { void *opaque; QSLIST_ENTRY(QEMUBH) next; unsigned flags; + MemReentrancyGuard *reentrancy_guard; }; /* Called concurrently from any thread */ @@ -123,7 +124,7 @@ void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, } QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, - const char *name) + const char *name, MemReentrancyGuard *reentrancy_guard) { QEMUBH *bh; bh = g_new(QEMUBH, 1); @@ -132,13 +133,30 @@ QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, .cb = cb, .opaque = opaque, .name = name, + .reentrancy_guard = reentrancy_guard, }; return bh; } void aio_bh_call(QEMUBH *bh) { + bool last_engaged_in_io = false; + + /* Make a copy of the guard-pointer as cb may free the bh */ + MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard; + if (reentrancy_guard) { + last_engaged_in_io = reentrancy_guard->engaged_in_io; + if (reentrancy_guard->engaged_in_io) { + trace_reentrant_aio(bh->ctx, bh->name); + } + reentrancy_guard->engaged_in_io = true; + } + bh->cb(bh->opaque); + + if (reentrancy_guard) { + reentrancy_guard->engaged_in_io = last_engaged_in_io; + } } /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ diff --git a/util/coroutine-ucontext.c b/util/coroutine-ucontext.c index 904b375192cab6d24c2bdea46d8c53beedb9b296..23ab7cdf74dd50fdfd688c3559ee2be0c65a116f 100644 --- a/util/coroutine-ucontext.c +++ b/util/coroutine-ucontext.c @@ -79,6 +79,19 @@ union cc_arg { int i[2]; }; +/** + * coroutines list for libcare + */ +struct CoroutineInformation { + sigjmp_buf *env; + QLIST_ENTRY(CoroutineInformation) next; +}; + +static QemuMutex coro_mtx; +QLIST_HEAD(, CoroutineInformation) coro_info_list = QLIST_HEAD_INITIALIZER(pool); +int coro_env_offset = offsetof(struct CoroutineInformation, env); +int coro_next_offset = offsetof(struct CoroutineInformation, next); + /* * QEMU_ALWAYS_INLINE only does so if __OPTIMIZE__, so we cannot use it. * always_inline is required to avoid TSan runtime fatal errors. @@ -330,3 +343,42 @@ bool qemu_in_coroutine(void) { return current && current->caller; } + +static void __attribute__((constructor)) coro_mutex_init(void) +{ + qemu_mutex_init(&coro_mtx); +} + +void qemu_coroutine_info_add(const Coroutine *co_) +{ + CoroutineUContext *co; + struct CoroutineInformation *coro_info; + + /* save coroutine env to coro_info_list */ + co = DO_UPCAST(CoroutineUContext, base, co_); + coro_info = g_malloc0(sizeof(struct CoroutineInformation)); + coro_info->env = &co->env; + + qemu_mutex_lock(&coro_mtx); + QLIST_INSERT_HEAD(&coro_info_list, coro_info, next); + qemu_mutex_unlock(&coro_mtx); +} + +void qemu_coroutine_info_delete(const Coroutine *co_) +{ + CoroutineUContext *co; + struct CoroutineInformation *coro_info; + + /* Remove relative coroutine env info from coro_info_list */ + co = DO_UPCAST(CoroutineUContext, base, co_); + + qemu_mutex_lock(&coro_mtx); + QLIST_FOREACH(coro_info, &coro_info_list, next) { + if (coro_info->env == &co->env) { + QLIST_REMOVE(coro_info, next); + g_free(coro_info); + break; + } + } + qemu_mutex_unlock(&coro_mtx); +} diff --git a/util/fdmon-epoll.c b/util/fdmon-epoll.c index e11a8a022e9822e05c267901ca2a1f859803a9b4..1683aa1105d3ab48941c970bcb4b12dcb82317c9 100644 --- a/util/fdmon-epoll.c +++ b/util/fdmon-epoll.c @@ -127,6 +127,8 @@ static bool fdmon_epoll_try_enable(AioContext *ctx) bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) { + bool ok; + if (ctx->epollfd < 0) { return false; } @@ -136,14 +138,23 @@ bool fdmon_epoll_try_upgrade(AioContext *ctx, unsigned npfd) return false; } - if (npfd >= EPOLL_ENABLE_THRESHOLD) { - if (fdmon_epoll_try_enable(ctx)) { - return true; - } else { - fdmon_epoll_disable(ctx); - } + if (npfd < EPOLL_ENABLE_THRESHOLD) { + return false; + } + + /* The list must not change while we add fds to epoll */ + if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { + return false; + } + + ok = fdmon_epoll_try_enable(ctx); + + qemu_lockcnt_inc_and_unlock(&ctx->list_lock); + + if (!ok) { + fdmon_epoll_disable(ctx); } - return false; + return ok; } void fdmon_epoll_setup(AioContext *ctx) diff --git a/util/fdmon-io_uring.c b/util/fdmon-io_uring.c index 1461dfa40743ba0a33dcb3e7e795a43265f4ff3d..35165bcb463edeeaee3a305319ea386826fcff5d 100644 --- a/util/fdmon-io_uring.c +++ b/util/fdmon-io_uring.c @@ -179,7 +179,12 @@ static void add_poll_remove_sqe(AioContext *ctx, AioHandler *node) { struct io_uring_sqe *sqe = get_sqe(ctx); +#ifdef LIBURING_HAVE_DATA64 + io_uring_prep_poll_remove(sqe, (__u64)(uintptr_t)node); +#else io_uring_prep_poll_remove(sqe, node); +#endif + io_uring_sqe_set_data(sqe, NULL); } /* Add a timeout that self-cancels when another cqe becomes ready */ @@ -193,6 +198,7 @@ static void add_timeout_sqe(AioContext *ctx, int64_t ns) sqe = get_sqe(ctx); io_uring_prep_timeout(sqe, &ts, 1, 0); + io_uring_sqe_set_data(sqe, NULL); } /* Add sqes from ctx->submit_list for submission */ diff --git a/util/iova-tree.c b/util/iova-tree.c index 23ea35b7a4b41f0ead7465b4f4be7d9dff163ba8..536789797e472fb947ec99a2d32c9909d3c21eed 100644 --- a/util/iova-tree.c +++ b/util/iova-tree.c @@ -16,6 +16,45 @@ struct IOVATree { GTree *tree; }; +/* Args to pass to iova_tree_alloc foreach function. */ +struct IOVATreeAllocArgs { + /* Size of the desired allocation */ + size_t new_size; + + /* The minimum address allowed in the allocation */ + hwaddr iova_begin; + + /* Map at the left of the hole, can be NULL if "this" is first one */ + const DMAMap *prev; + + /* Map at the right of the hole, can be NULL if "prev" is the last one */ + const DMAMap *this; + + /* If found, we fill in the IOVA here */ + hwaddr iova_result; + + /* Whether have we found a valid IOVA */ + bool iova_found; +}; + +typedef struct IOVATreeFindIOVAArgs { + const DMAMap *needle; + const DMAMap *result; +} IOVATreeFindIOVAArgs; + +/** + * Iterate args to the next hole + * + * @args: The alloc arguments + * @next: The next mapping in the tree. Can be NULL to signal the last one + */ +static void iova_tree_alloc_args_iterate(struct IOVATreeAllocArgs *args, + const DMAMap *next) +{ + args->prev = args->this; + args->this = next; +} + static int iova_tree_compare(gconstpointer a, gconstpointer b, gpointer data) { const DMAMap *m1 = a, *m2 = b; @@ -47,6 +86,35 @@ const DMAMap *iova_tree_find(const IOVATree *tree, const DMAMap *map) return g_tree_lookup(tree->tree, map); } +static gboolean iova_tree_find_address_iterator(gpointer key, gpointer value, + gpointer data) +{ + const DMAMap *map = key; + IOVATreeFindIOVAArgs *args = data; + const DMAMap *needle; + + g_assert(key == value); + + needle = args->needle; + if (map->translated_addr + map->size < needle->translated_addr || + needle->translated_addr + needle->size < map->translated_addr) { + return false; + } + + args->result = map; + return true; +} + +const DMAMap *iova_tree_find_iova(const IOVATree *tree, const DMAMap *map) +{ + IOVATreeFindIOVAArgs args = { + .needle = map, + }; + + g_tree_foreach(tree->tree, iova_tree_find_address_iterator, &args); + return args.result; +} + const DMAMap *iova_tree_find_address(const IOVATree *tree, hwaddr iova) { const DMAMap map = { .iova = iova, .size = 0 }; @@ -96,15 +164,115 @@ void iova_tree_foreach(IOVATree *tree, iova_tree_iterator iterator) g_tree_foreach(tree->tree, iova_tree_traverse, iterator); } -int iova_tree_remove(IOVATree *tree, const DMAMap *map) +void iova_tree_remove(IOVATree *tree, DMAMap map) { const DMAMap *overlap; - while ((overlap = iova_tree_find(tree, map))) { + while ((overlap = iova_tree_find(tree, &map))) { g_tree_remove(tree->tree, overlap); } +} - return IOVA_OK; +/** + * Try to find an unallocated IOVA range between prev and this elements. + * + * @args: Arguments to allocation + * + * Cases: + * + * (1) !prev, !this: No entries allocated, always succeed + * + * (2) !prev, this: We're iterating at the 1st element. + * + * (3) prev, !this: We're iterating at the last element. + * + * (4) prev, this: this is the most common case, we'll try to find a hole + * between "prev" and "this" mapping. + * + * Note that this function assumes the last valid iova is HWADDR_MAX, but it + * searches linearly so it's easy to discard the result if it's not the case. + */ +static void iova_tree_alloc_map_in_hole(struct IOVATreeAllocArgs *args) +{ + const DMAMap *prev = args->prev, *this = args->this; + uint64_t hole_start, hole_last; + + if (this && this->iova + this->size < args->iova_begin) { + return; + } + + hole_start = MAX(prev ? prev->iova + prev->size + 1 : 0, args->iova_begin); + hole_last = this ? this->iova : HWADDR_MAX; + + if (hole_last - hole_start > args->new_size) { + args->iova_result = hole_start; + args->iova_found = true; + } +} + +/** + * Foreach dma node in the tree, compare if there is a hole with its previous + * node (or minimum iova address allowed) and the node. + * + * @key: Node iterating + * @value: Node iterating + * @pargs: Struct to communicate with the outside world + * + * Return: false to keep iterating, true if needs break. + */ +static gboolean iova_tree_alloc_traverse(gpointer key, gpointer value, + gpointer pargs) +{ + struct IOVATreeAllocArgs *args = pargs; + DMAMap *node = value; + + assert(key == value); + + iova_tree_alloc_args_iterate(args, node); + iova_tree_alloc_map_in_hole(args); + return args->iova_found; +} + +int iova_tree_alloc_map(IOVATree *tree, DMAMap *map, hwaddr iova_begin, + hwaddr iova_last) +{ + struct IOVATreeAllocArgs args = { + .new_size = map->size, + .iova_begin = iova_begin, + }; + + if (unlikely(iova_last < iova_begin)) { + return IOVA_ERR_INVALID; + } + + /* + * Find a valid hole for the mapping + * + * Assuming low iova_begin, so no need to do a binary search to + * locate the first node. + * + * TODO: Replace all this with g_tree_node_first/next/last when available + * (from glib since 2.68). To do it with g_tree_foreach complicates the + * code a lot. + * + */ + g_tree_foreach(tree->tree, iova_tree_alloc_traverse, &args); + if (!args.iova_found) { + /* + * Either tree is empty or the last hole is still not checked. + * g_tree_foreach does not compare (last, iova_last] range, so we check + * it here. + */ + iova_tree_alloc_args_iterate(&args, NULL); + iova_tree_alloc_map_in_hole(&args); + } + + if (!args.iova_found || args.iova_result + map->size > iova_last) { + return IOVA_ERR_NOMEM; + } + + map->iova = args.iova_result; + return iova_tree_insert(tree, map); } void iova_tree_destroy(IOVATree *tree) diff --git a/util/log.c b/util/log.c index 2ee1500beedf9630f162454a38333e46b23953b9..ed3029fe5c43e860aa1c0c3808db05aae8ff3a72 100644 --- a/util/log.c +++ b/util/log.c @@ -34,6 +34,12 @@ int qemu_loglevel; static int log_append = 0; static GArray *debug_regions; +#ifdef CONFIG_DISABLE_QEMU_LOG +int qemu_log(const char *fmt, ...) +{ + return 0; +} +#else /* Return the number of characters emitted. */ int qemu_log(const char *fmt, ...) { @@ -56,6 +62,7 @@ int qemu_log(const char *fmt, ...) rcu_read_unlock(); return ret; } +#endif static void __attribute__((__constructor__)) qemu_logfile_init(void) { diff --git a/util/main-loop.c b/util/main-loop.c index 06b18b195cfa6e52c7d43942aafd9e559f594d81..1eacf04691ce9c053c835bde7a60405a918878fd 100644 --- a/util/main-loop.c +++ b/util/main-loop.c @@ -544,9 +544,11 @@ void main_loop_wait(int nonblocking) /* Functions to operate on the main QEMU AioContext. */ -QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name) +QEMUBH *qemu_bh_new_full(QEMUBHFunc *cb, void *opaque, const char *name, + MemReentrancyGuard *reentrancy_guard) { - return aio_bh_new_full(qemu_aio_context, cb, opaque, name); + return aio_bh_new_full(qemu_aio_context, cb, opaque, name, + reentrancy_guard); } /* diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c index 893d864354a1f91ad9be152630780c7089debe16..4993dd5bfad89678573e87514c0dafe1ad89c01f 100644 --- a/util/mmap-alloc.c +++ b/util/mmap-alloc.c @@ -29,6 +29,28 @@ #include #endif +size_t qemu_fd_getfiletype(int fd) +{ + struct statfs fs; + int ret; + + if (fd != -1) { + do { + ret = fstatfs(fd, &fs); + } while (ret != 0 && errno == EINTR); + + if (ret != 0) { + fprintf(stderr, "Couldn't fstatfs() fd: %s\n", + strerror(errno)); + return -1; + } + return fs.f_type; + } else { + fprintf(stderr, "fd is invalid \n"); + return -1; + } +} + size_t qemu_fd_getpagesize(int fd) { #ifdef CONFIG_LINUX diff --git a/util/oslib-posix.c b/util/oslib-posix.c index e8bdb02e1d01d35396af49d18680544144fe9cb5..18a38b94643080895dff89f1f22faed6ca9e11b8 100644 --- a/util/oslib-posix.c +++ b/util/oslib-posix.c @@ -84,6 +84,7 @@ typedef struct MemsetThread MemsetThread; static MemsetThread *memset_thread; static int memset_num_threads; +static int started_num_threads; static bool memset_thread_failed; static QemuMutex page_mutex; @@ -464,6 +465,10 @@ static void *do_touch_pages(void *arg) } qemu_mutex_unlock(&page_mutex); + while (started_num_threads != memset_num_threads) { + smp_mb(); + } + /* unblock SIGBUS */ sigemptyset(&set); sigaddset(&set, SIGBUS); @@ -529,7 +534,7 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, memset_thread = g_new0(MemsetThread, memset_num_threads); numpages_per_thread = numpages / memset_num_threads; leftover = numpages % memset_num_threads; - for (i = 0; i < memset_num_threads; i++) { + for (i = 0, started_num_threads = 0; i < memset_num_threads; i++) { memset_thread[i].addr = addr; memset_thread[i].numpages = numpages_per_thread + (i < leftover); memset_thread[i].hpagesize = hpagesize; @@ -537,6 +542,7 @@ static bool touch_all_pages(char *area, size_t hpagesize, size_t numpages, do_touch_pages, &memset_thread[i], QEMU_THREAD_JOINABLE); addr += memset_thread[i].numpages * hpagesize; + started_num_threads++; } qemu_mutex_lock(&page_mutex); diff --git a/util/qemu-coroutine.c b/util/qemu-coroutine.c index 38fb6d3084dad2dd2145134d9e5da8f03d7169fe..9c81336d8e11bef10ef24af26f0f80c46b704c07 100644 --- a/util/qemu-coroutine.c +++ b/util/qemu-coroutine.c @@ -21,7 +21,7 @@ #include "block/aio.h" enum { - POOL_BATCH_SIZE = 64, + POOL_BATCH_SIZE = 128, }; /** Free list to speed up creation */ @@ -75,6 +75,8 @@ Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque) co = qemu_coroutine_new(); } + qemu_coroutine_info_add(co); + co->entry = entry; co->entry_arg = opaque; QSIMPLEQ_INIT(&co->co_queue_wakeup); @@ -85,6 +87,8 @@ static void coroutine_delete(Coroutine *co) { co->caller = NULL; + qemu_coroutine_info_delete(co); + if (CONFIG_COROUTINE_POOL) { if (release_pool_size < POOL_BATCH_SIZE * 2) { QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next); diff --git a/util/qemu-timer.c b/util/qemu-timer.c index f36c75e594a6f23a7b982567f81180605d17f73d..c5b6dc987c0d847c3c95dc0eee6fb385e6a5e16c 100644 --- a/util/qemu-timer.c +++ b/util/qemu-timer.c @@ -23,6 +23,7 @@ */ #include "qemu/osdep.h" +#include "qemu/log.h" #include "qemu/main-loop.h" #include "qemu/timer.h" #include "qemu/lockable.h" @@ -75,6 +76,74 @@ struct QEMUTimerList { QemuEvent timers_done_ev; }; +typedef struct qemu_controller_timer_state { + qemu_usb_controller_ptr controller; + int refs; +} controller_timer_state; + +typedef controller_timer_state* controller_timer_state_ptr; + +static controller_timer_state uhci_timer_state = { + .controller = NULL, + .refs = 0, +}; + +static controller_timer_state_ptr \ + qemu_usb_controller_tab[MAX_USB_CONTROLLER_TYPES] = {NULL, + &uhci_timer_state, + NULL, NULL}; + +int qemu_register_usb_controller(qemu_usb_controller_ptr controller, + unsigned int type) +{ + if (type != QEMU_USB_CONTROLLER_UHCI) { + return 0; + } + + /* for companion EHCI controller will create three UHCI controllers, + * we init it only once. + */ + if (!qemu_usb_controller_tab[type]->controller) { + qemu_log("the usb controller (%d) registed frame handler\n", type); + qemu_usb_controller_tab[type]->controller = controller; + } + + return 0; +} + +int qemu_timer_set_mode(enum qemu_timer_mode mode, unsigned int type) +{ + if (type != QEMU_USB_CONTROLLER_UHCI) { + qemu_log("the usb controller (%d) no need change frame frep\n", type); + return 0; + } + + if (!qemu_usb_controller_tab[type]->controller) { + qemu_log("the usb controller (%d) not registed yet\n", type); + return 0; + } + + if (mode == QEMU_TIMER_USB_NORMAL_MODE) { + if (qemu_usb_controller_tab[type]->refs++ > 0) { + return 0; + } + qemu_usb_controller_tab[type]->controller-> + qemu_set_freq(QEMU_USB_NORMAL_FREQ); + qemu_log("Set the controller (%d) of freq %d HZ,\n", + type, QEMU_USB_NORMAL_FREQ); + } else { + if (--qemu_usb_controller_tab[type]->refs > 0) { + return 0; + } + qemu_usb_controller_tab[type]->controller-> + qemu_set_freq(QEMU_USB_LAZY_FREQ); + qemu_log("Set the controller(type:%d) of freq %d HZ,\n", + type, QEMU_USB_LAZY_FREQ); + } + + return 0; +} + /** * qemu_clock_ptr: * @type: type of clock @@ -261,6 +330,9 @@ int64_t qemu_clock_deadline_ns_all(QEMUClockType type, int attr_mask) } QLIST_FOREACH(timer_list, &clock->timerlists, list) { + if (!qatomic_read(&timer_list->active_timers)) { + continue; + } qemu_mutex_lock(&timer_list->active_timers_lock); ts = timer_list->active_timers; /* Skip all external timers */ diff --git a/util/range.c b/util/range.c index 098d9d2dc0b7bd791e9e6bc7d52aabaafe175c24..83d1a6c302c277234518fc1b9fe3cbfc19c6d56e 100644 --- a/util/range.c +++ b/util/range.c @@ -65,6 +65,7 @@ GList *range_list_insert(GList *list, Range *data) range_extend(l->data, l->next->data); g_free(l->next->data); new_l = g_list_delete_link(list, l->next); + l->next = NULL; assert(new_l == list); } diff --git a/util/thread-pool.c b/util/thread-pool.c index d763cea505b68575af8e1b39cd95d02caa73801a..7e9e2c178b8413a2a28d014ae31871dd1693a557 100644 --- a/util/thread-pool.c +++ b/util/thread-pool.c @@ -108,9 +108,8 @@ static void *worker_thread(void *opaque) smp_wmb(); req->state = THREAD_DONE; - qemu_mutex_lock(&pool->lock); - qemu_bh_schedule(pool->completion_bh); + qemu_mutex_lock(&pool->lock); } pool->cur_threads--; diff --git a/util/trace-events b/util/trace-events index c8f53d7d9fc3ddd395099467e52e6a8dbb8575a3..dc3b1eb3bfea7c7bfa5ee6ed2418e78a2b4c2c93 100644 --- a/util/trace-events +++ b/util/trace-events @@ -11,6 +11,7 @@ poll_remove(void *ctx, void *node, int fd) "ctx %p node %p fd %d" # async.c aio_co_schedule(void *ctx, void *co) "ctx %p co %p" aio_co_schedule_bh_cb(void *ctx, void *co) "ctx %p co %p" +reentrant_aio(void *ctx, const char *name) "ctx %p name %s" # thread-pool.c thread_pool_submit(void *pool, void *req, void *opaque) "pool %p req %p opaque %p" diff --git a/util/userfaultfd.c b/util/userfaultfd.c index f1cd6af2b19e12af94eea6f72eb3b7558a5abd50..50d9246287377a1ead09f86956833344ec3b9028 100644 --- a/util/userfaultfd.c +++ b/util/userfaultfd.c @@ -315,31 +315,3 @@ int uffd_read_events(int uffd_fd, struct uffd_msg *msgs, int count) return (int) (res / sizeof(struct uffd_msg)); } - -/** - * uffd_poll_events: poll UFFD file descriptor for read - * - * Returns true if events are available for read, false otherwise - * - * @uffd_fd: UFFD file descriptor - * @tmo: timeout value - */ -bool uffd_poll_events(int uffd_fd, int tmo) -{ - int res; - struct pollfd poll_fd = { .fd = uffd_fd, .events = POLLIN, .revents = 0 }; - - do { - res = poll(&poll_fd, 1, tmo); - } while (res < 0 && errno == EINTR); - - if (res == 0) { - return false; - } - if (res < 0) { - error_report("uffd_poll_events() failed: errno=%i", errno); - return false; - } - - return (poll_fd.revents & POLLIN) != 0; -} diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c index 783d847a6db3021d16fb1048e751167818119b24..eda82447bd4a5863149dbf8a202070a6232d9aed 100644 --- a/util/vhost-user-server.c +++ b/util/vhost-user-server.c @@ -64,6 +64,18 @@ static void vmsg_close_fds(VhostUserMsg *vmsg) static void vmsg_unblock_fds(VhostUserMsg *vmsg) { int i; + + /* + * These messages carry fd used to map memory, not to send/receive messages, + * so this operation is useless. In addition, in some systems this + * operation can fail (e.g. in macOS setting an fd returned by shm_open() + * non-blocking fails with errno = ENOTTY) + */ + if (vmsg->request == VHOST_USER_ADD_MEM_REG || + vmsg->request == VHOST_USER_SET_MEM_TABLE) { + return; + } + for (i = 0; i < vmsg->fd_num; i++) { qemu_set_nonblock(vmsg->fds[i]); }