-bugfix: fix qmp command migrate-set-parameters -some bugfixs about ARM hot-plugged CPUs -hw/core/machine:Fix the missing consideration of cluster-id -test/tcg:Fix target-specific Makefile variable path for user-mode -tests:add (riscv virt) machine mapping to testenv -Make a litte improvement in curl and hw/riscv -qemu support for loongarch -hw/pvrdma: Protect against buggy or malious guest driver -hw/audio/intel-hda:fix stream reset -dsoundaudio:fix crackling audio recordings -add notify-vm-exit support for i386 -blok-backend: prevent dangling BDS pointers across aio_poll() -net:Fix uninitialized data usage -net/eth:Don't consider ESP to be an IPv6 option header -hw/net/vmxnet3:Log guest-triggerable errors using LOG_GUEST_ERROR Signed-off-by: FeiXu <xufei30@huawei.com>
15467 lines
507 KiB
Diff
15467 lines
507 KiB
Diff
From 9aefc417ef8c705503b51b844b489598448656d0 Mon Sep 17 00:00:00 2001
|
|
From: lixianglai <lixianglai@loongson.cn>
|
|
Date: Tue, 7 Feb 2023 07:15:39 -0500
|
|
Subject: [PATCH] Add target/loongarch64.
|
|
|
|
1.Add loongarch cpu simulation.
|
|
2.Add loongarch csr simulation.
|
|
3.Add loongarch fpu support.
|
|
4.Add loongarch gdb support.
|
|
5.Add loongarhch kvm support.
|
|
6.Add loongarch stable timer.
|
|
7.Add loongarch tcg related support.
|
|
|
|
Signed-off-by: lixianglai <lixianglai@loongson.cn>
|
|
---
|
|
target/Kconfig | 1 +
|
|
target/loongarch64/Kconfig | 2 +
|
|
target/loongarch64/arch_dump.c | 179 ++
|
|
target/loongarch64/cpu-csr.h | 880 ++++++++
|
|
target/loongarch64/cpu-param.h | 46 +
|
|
target/loongarch64/cpu-qom.h | 54 +
|
|
target/loongarch64/cpu.c | 575 +++++
|
|
target/loongarch64/cpu.h | 359 +++
|
|
target/loongarch64/csr_helper.c | 697 ++++++
|
|
target/loongarch64/fpu.c | 25 +
|
|
target/loongarch64/fpu_helper.c | 891 ++++++++
|
|
target/loongarch64/fpu_helper.h | 127 ++
|
|
target/loongarch64/gdbstub.c | 164 ++
|
|
target/loongarch64/helper.c | 726 +++++++
|
|
target/loongarch64/helper.h | 178 ++
|
|
target/loongarch64/insn.decode | 532 +++++
|
|
target/loongarch64/instmap.h | 217 ++
|
|
target/loongarch64/internal.h | 207 ++
|
|
target/loongarch64/kvm.c | 1366 ++++++++++++
|
|
target/loongarch64/kvm_larch.h | 49 +
|
|
target/loongarch64/larch-defs.h | 42 +
|
|
target/loongarch64/machine.c | 423 ++++
|
|
target/loongarch64/meson.build | 35 +
|
|
target/loongarch64/op_helper.c | 485 +++++
|
|
target/loongarch64/stabletimer.c | 117 +
|
|
target/loongarch64/tlb_helper.c | 641 ++++++
|
|
target/loongarch64/trans.inc.c | 3482 ++++++++++++++++++++++++++++++
|
|
target/loongarch64/translate.c | 2705 +++++++++++++++++++++++
|
|
target/meson.build | 1 +
|
|
29 files changed, 15206 insertions(+)
|
|
create mode 100644 target/loongarch64/Kconfig
|
|
create mode 100644 target/loongarch64/arch_dump.c
|
|
create mode 100644 target/loongarch64/cpu-csr.h
|
|
create mode 100644 target/loongarch64/cpu-param.h
|
|
create mode 100644 target/loongarch64/cpu-qom.h
|
|
create mode 100644 target/loongarch64/cpu.c
|
|
create mode 100644 target/loongarch64/cpu.h
|
|
create mode 100644 target/loongarch64/csr_helper.c
|
|
create mode 100644 target/loongarch64/fpu.c
|
|
create mode 100644 target/loongarch64/fpu_helper.c
|
|
create mode 100644 target/loongarch64/fpu_helper.h
|
|
create mode 100644 target/loongarch64/gdbstub.c
|
|
create mode 100644 target/loongarch64/helper.c
|
|
create mode 100644 target/loongarch64/helper.h
|
|
create mode 100644 target/loongarch64/insn.decode
|
|
create mode 100644 target/loongarch64/instmap.h
|
|
create mode 100644 target/loongarch64/internal.h
|
|
create mode 100644 target/loongarch64/kvm.c
|
|
create mode 100644 target/loongarch64/kvm_larch.h
|
|
create mode 100644 target/loongarch64/larch-defs.h
|
|
create mode 100644 target/loongarch64/machine.c
|
|
create mode 100644 target/loongarch64/meson.build
|
|
create mode 100644 target/loongarch64/op_helper.c
|
|
create mode 100644 target/loongarch64/stabletimer.c
|
|
create mode 100644 target/loongarch64/tlb_helper.c
|
|
create mode 100644 target/loongarch64/trans.inc.c
|
|
create mode 100644 target/loongarch64/translate.c
|
|
|
|
diff --git a/target/Kconfig b/target/Kconfig
|
|
index a8d6cb1e97..b2abc7b60b 100644
|
|
--- a/target/Kconfig
|
|
+++ b/target/Kconfig
|
|
@@ -4,6 +4,7 @@ source avr/Kconfig
|
|
source cris/Kconfig
|
|
source hppa/Kconfig
|
|
source i386/Kconfig
|
|
+source loongarch64/Kconfig
|
|
source m68k/Kconfig
|
|
source microblaze/Kconfig
|
|
source mips/Kconfig
|
|
diff --git a/target/loongarch64/Kconfig b/target/loongarch64/Kconfig
|
|
new file mode 100644
|
|
index 0000000000..46b26b1a85
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/Kconfig
|
|
@@ -0,0 +1,2 @@
|
|
+config LOONGARCH64
|
|
+ bool
|
|
diff --git a/target/loongarch64/arch_dump.c b/target/loongarch64/arch_dump.c
|
|
new file mode 100644
|
|
index 0000000000..adce817d54
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/arch_dump.c
|
|
@@ -0,0 +1,179 @@
|
|
+/*
|
|
+ * Support for writing ELF notes for RM architectures
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "cpu.h"
|
|
+#include "elf.h"
|
|
+#include "sysemu/dump.h"
|
|
+#include "internal.h"
|
|
+
|
|
+/* struct user_pt_regs from arch/loongarch/include/uapi/asm/ptrace.h */
|
|
+struct loongarch_user_regs {
|
|
+ uint64_t gpr[32];
|
|
+ uint64_t lo;
|
|
+ uint64_t hi;
|
|
+ uint64_t csr_era;
|
|
+ uint64_t csr_badvaddr;
|
|
+ uint64_t csr_crmd;
|
|
+ uint64_t csr_ecfg;
|
|
+ uint64_t pad[7];
|
|
+} QEMU_PACKED;
|
|
+
|
|
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_user_regs) != 360);
|
|
+
|
|
+/* struct elf_prstatus from include/uapi/linux/elfcore.h */
|
|
+struct loongarch_elf_prstatus {
|
|
+ char pad1[32]; /* 32 == offsetof(struct elf_prstatus, pr_pid) */
|
|
+ uint32_t pr_pid;
|
|
+ /*
|
|
+ * 76 == offsetof(struct elf_prstatus, pr_reg) -
|
|
+ * offsetof(struct elf_prstatus, pr_ppid)
|
|
+ */
|
|
+ char pad2[76];
|
|
+ struct loongarch_user_regs pr_reg;
|
|
+ uint32_t pr_fpvalid;
|
|
+ char pad3[4];
|
|
+} QEMU_PACKED;
|
|
+
|
|
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_elf_prstatus) != 480);
|
|
+
|
|
+/*
|
|
+ * struct user_fpsimd_state from arch/arm64/include/uapi/asm/ptrace.h
|
|
+ *
|
|
+ * While the vregs member of user_fpsimd_state is of type __uint128_t,
|
|
+ * QEMU uses an array of uint64_t, where the high half of the 128-bit
|
|
+ * value is always in the 2n+1'th index. Thus we also break the 128-
|
|
+ * bit values into two halves in this reproduction of user_fpsimd_state.
|
|
+ */
|
|
+
|
|
+struct loongarch_fpu_struct {
|
|
+ uint64_t fpr[32];
|
|
+ unsigned int fir;
|
|
+ unsigned int fcsr;
|
|
+} QEMU_PACKED;
|
|
+
|
|
+QEMU_BUILD_BUG_ON(sizeof(struct loongarch_fpu_struct) != 264);
|
|
+
|
|
+struct loongarch_note {
|
|
+ Elf64_Nhdr hdr;
|
|
+ char name[8]; /* align_up(sizeof("CORE"), 4) */
|
|
+ union
|
|
+ {
|
|
+ struct loongarch_elf_prstatus prstatus;
|
|
+ struct loongarch_fpu_struct fpu;
|
|
+ };
|
|
+} QEMU_PACKED;
|
|
+
|
|
+#define LOONGARCH_NOTE_HEADER_SIZE offsetof(struct loongarch_note, prstatus)
|
|
+#define LOONGARCH_PRSTATUS_NOTE_SIZE \
|
|
+ (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_elf_prstatus))
|
|
+#define LOONGARCH_PRFPREG_NOTE_SIZE \
|
|
+ (LOONGARCH_NOTE_HEADER_SIZE + sizeof(struct loongarch_fpu_struct))
|
|
+
|
|
+static void loongarch_note_init(struct loongarch_note *note, DumpState *s,
|
|
+ const char *name, Elf64_Word namesz,
|
|
+ Elf64_Word type, Elf64_Word descsz)
|
|
+{
|
|
+ memset(note, 0, sizeof(*note));
|
|
+
|
|
+ note->hdr.n_namesz = cpu_to_dump32(s, namesz);
|
|
+ note->hdr.n_descsz = cpu_to_dump32(s, descsz);
|
|
+ note->hdr.n_type = cpu_to_dump32(s, type);
|
|
+
|
|
+ memcpy(note->name, name, namesz);
|
|
+}
|
|
+
|
|
+static int loongarch_write_elf64_fprpreg(WriteCoreDumpFunction f,
|
|
+ CPULOONGARCHState *env, int cpuid,
|
|
+ DumpState *s)
|
|
+{
|
|
+ struct loongarch_note note;
|
|
+ int ret, i;
|
|
+
|
|
+ loongarch_note_init(¬e, s, "CORE", 5, NT_PRFPREG, sizeof(note.fpu));
|
|
+
|
|
+ note.fpu.fcsr = cpu_to_dump64(s, env->active_fpu.fcsr0);
|
|
+
|
|
+ for (i = 0; i < 32; ++i) {
|
|
+ note.fpu.fpr[i] = cpu_to_dump64(s, env->active_fpu.fpr[i].fd);
|
|
+ }
|
|
+
|
|
+ ret = f(¬e, LOONGARCH_PRFPREG_NOTE_SIZE, s);
|
|
+ if (ret < 0) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
|
|
+ int cpuid, void *opaque)
|
|
+{
|
|
+ struct loongarch_note note;
|
|
+ CPULOONGARCHState *env = &LOONGARCH_CPU(cs)->env;
|
|
+ DumpState *s = opaque;
|
|
+ int ret, i;
|
|
+
|
|
+ loongarch_note_init(¬e, s, "CORE", 5, NT_PRSTATUS,
|
|
+ sizeof(note.prstatus));
|
|
+
|
|
+ note.prstatus.pr_pid = cpu_to_dump32(s, cpuid);
|
|
+ note.prstatus.pr_fpvalid = cpu_to_dump32(s, 1);
|
|
+
|
|
+ for (i = 0; i < 32; ++i) {
|
|
+ note.prstatus.pr_reg.gpr[i] = cpu_to_dump64(s, env->active_tc.gpr[i]);
|
|
+ }
|
|
+ note.prstatus.pr_reg.csr_era = cpu_to_dump64(s, env->CSR_ERA);
|
|
+ note.prstatus.pr_reg.csr_badvaddr = cpu_to_dump64(s, env->CSR_BADV);
|
|
+ note.prstatus.pr_reg.csr_crmd = cpu_to_dump64(s, env->CSR_CRMD);
|
|
+ note.prstatus.pr_reg.csr_ecfg = cpu_to_dump64(s, env->CSR_ECFG);
|
|
+
|
|
+ ret = f(¬e, LOONGARCH_PRSTATUS_NOTE_SIZE, s);
|
|
+ if (ret < 0) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ ret = loongarch_write_elf64_fprpreg(f, env, cpuid, s);
|
|
+ if (ret < 0) {
|
|
+ return -1;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int cpu_get_dump_info(ArchDumpInfo *info,
|
|
+ const GuestPhysBlockList *guest_phys_blocks)
|
|
+{
|
|
+ info->d_machine = EM_LOONGARCH;
|
|
+ info->d_endian = ELFDATA2LSB;
|
|
+ info->d_class = ELFCLASS64;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+ssize_t cpu_get_note_size(int class, int machine, int nr_cpus)
|
|
+{
|
|
+ size_t note_size = 0;
|
|
+
|
|
+ if (class == ELFCLASS64) {
|
|
+ note_size = LOONGARCH_PRSTATUS_NOTE_SIZE + LOONGARCH_PRFPREG_NOTE_SIZE;
|
|
+ }
|
|
+
|
|
+ return note_size * nr_cpus;
|
|
+}
|
|
diff --git a/target/loongarch64/cpu-csr.h b/target/loongarch64/cpu-csr.h
|
|
new file mode 100644
|
|
index 0000000000..278a66c395
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/cpu-csr.h
|
|
@@ -0,0 +1,880 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef _CPU_CSR_H_
|
|
+#define _CPU_CSR_H_
|
|
+
|
|
+/* basic CSR register */
|
|
+#define LOONGARCH_CSR_CRMD 0x0 /* 32 current mode info */
|
|
+#define CSR_CRMD_DACM_SHIFT 7
|
|
+#define CSR_CRMD_DACM_WIDTH 2
|
|
+#define CSR_CRMD_DACM (0x3UL << CSR_CRMD_DACM_SHIFT)
|
|
+#define CSR_CRMD_DACF_SHIFT 5
|
|
+#define CSR_CRMD_DACF_WIDTH 2
|
|
+#define CSR_CRMD_DACF (0x3UL << CSR_CRMD_DACF_SHIFT)
|
|
+#define CSR_CRMD_PG_SHIFT 4
|
|
+#define CSR_CRMD_PG (0x1UL << CSR_CRMD_PG_SHIFT)
|
|
+#define CSR_CRMD_DA_SHIFT 3
|
|
+#define CSR_CRMD_DA (0x1UL << CSR_CRMD_DA_SHIFT)
|
|
+#define CSR_CRMD_IE_SHIFT 2
|
|
+#define CSR_CRMD_IE (0x1UL << CSR_CRMD_IE_SHIFT)
|
|
+#define CSR_CRMD_PLV_SHIFT 0
|
|
+#define CSR_CRMD_PLV_WIDTH 2
|
|
+#define CSR_CRMD_PLV (0x3UL << CSR_CRMD_PLV_SHIFT)
|
|
+
|
|
+#define PLV_USER 3
|
|
+#define PLV_KERN 0
|
|
+#define PLV_MASK 0x3
|
|
+
|
|
+#define LOONGARCH_CSR_PRMD 0x1 /* 32 prev-exception mode info */
|
|
+#define CSR_PRMD_PIE_SHIFT 2
|
|
+#define CSR_PRMD_PIE (0x1UL << CSR_PRMD_PIE_SHIFT)
|
|
+#define CSR_PRMD_PPLV_SHIFT 0
|
|
+#define CSR_PRMD_PPLV_WIDTH 2
|
|
+#define CSR_PRMD_PPLV (0x3UL << CSR_PRMD_PPLV_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_EUEN 0x2 /* 32 coprocessor enable */
|
|
+#define CSR_EUEN_LBTEN_SHIFT 3
|
|
+#define CSR_EUEN_LBTEN (0x1UL << CSR_EUEN_LBTEN_SHIFT)
|
|
+#define CSR_EUEN_LASXEN_SHIFT 2
|
|
+#define CSR_EUEN_LASXEN (0x1UL << CSR_EUEN_LASXEN_SHIFT)
|
|
+#define CSR_EUEN_LSXEN_SHIFT 1
|
|
+#define CSR_EUEN_LSXEN (0x1UL << CSR_EUEN_LSXEN_SHIFT)
|
|
+#define CSR_EUEN_FPEN_SHIFT 0
|
|
+#define CSR_EUEN_FPEN (0x1UL << CSR_EUEN_FPEN_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_MISC 0x3 /* 32 misc config */
|
|
+
|
|
+#define LOONGARCH_CSR_ECFG 0x4 /* 32 exception config */
|
|
+#define CSR_ECFG_VS_SHIFT 16
|
|
+#define CSR_ECFG_VS_WIDTH 3
|
|
+#define CSR_ECFG_VS (0x7UL << CSR_ECFG_VS_SHIFT)
|
|
+#define CSR_ECFG_IM_SHIFT 0
|
|
+#define CSR_ECFG_IM_WIDTH 13
|
|
+#define CSR_ECFG_IM (0x1fffUL << CSR_ECFG_IM_SHIFT)
|
|
+
|
|
+#define CSR_ECFG_IPMASK 0x00001fff
|
|
+
|
|
+#define LOONGARCH_CSR_ESTAT 0x5 /* Exception status */
|
|
+#define CSR_ESTAT_ESUBCODE_SHIFT 22
|
|
+#define CSR_ESTAT_ESUBCODE_WIDTH 9
|
|
+#define CSR_ESTAT_ESUBCODE (0x1ffULL << CSR_ESTAT_ESUBCODE_SHIFT)
|
|
+#define CSR_ESTAT_EXC_SH 16
|
|
+#define CSR_ESTAT_EXC_WIDTH 5
|
|
+#define CSR_ESTAT_EXC (0x1fULL << CSR_ESTAT_EXC_SH)
|
|
+#define CSR_ESTAT_IS_SHIFT 0
|
|
+#define CSR_ESTAT_IS_WIDTH 15
|
|
+#define CSR_ESTAT_IS (0x7fffULL << CSR_ESTAT_IS_SHIFT)
|
|
+
|
|
+#define CSR_ESTAT_IPMASK 0x00001fff
|
|
+
|
|
+#define EXCODE_IP 64
|
|
+#define EXCCODE_RSV 0
|
|
+#define EXCCODE_TLBL 1
|
|
+#define EXCCODE_TLBS 2
|
|
+#define EXCCODE_TLBI 3
|
|
+#define EXCCODE_MOD 4
|
|
+#define EXCCODE_TLBRI 5
|
|
+#define EXCCODE_TLBXI 6
|
|
+#define EXCCODE_TLBPE 7
|
|
+#define EXCCODE_ADE 8
|
|
+#define EXCCODE_UNALIGN 9
|
|
+#define EXCCODE_OOB 10
|
|
+#define EXCCODE_SYS 11
|
|
+#define EXCCODE_BP 12
|
|
+#define EXCCODE_RI 13
|
|
+#define EXCCODE_IPE 14
|
|
+#define EXCCODE_FPDIS 15
|
|
+#define EXCCODE_LSXDIS 16
|
|
+#define EXCCODE_LASXDIS 17
|
|
+#define EXCCODE_FPE 18
|
|
+#define EXCCODE_WATCH 19
|
|
+#define EXCCODE_BTDIS 20
|
|
+#define EXCCODE_BTE 21
|
|
+#define EXCCODE_PSI 22
|
|
+#define EXCCODE_HYP 23
|
|
+#define EXCCODE_FC 24
|
|
+#define EXCCODE_SE 25
|
|
+
|
|
+#define LOONGARCH_CSR_ERA 0x6 /* 64 error PC */
|
|
+#define LOONGARCH_CSR_BADV 0x7 /* 64 bad virtual address */
|
|
+#define LOONGARCH_CSR_BADI 0x8 /* 32 bad instruction */
|
|
+#define LOONGARCH_CSR_EEPN 0xc /* 64 exception enter base address */
|
|
+#define LOONGARCH_EEPN_CPUID (0x3ffULL << 0)
|
|
+
|
|
+#define CU_FPE 1
|
|
+#define CU_LSXE (1 << 1)
|
|
+#define CU_LASXE (1 << 2)
|
|
+#define CU_LBTE (1 << 3)
|
|
+
|
|
+/* TLB related CSR register : start with TLB if no pagewalk */
|
|
+/* 32 TLB Index, EHINV, PageSize, is_gtlb */
|
|
+#define LOONGARCH_CSR_TLBIDX 0x10
|
|
+#define CSR_TLBIDX_EHINV_SHIFT 31
|
|
+#define CSR_TLBIDX_EHINV (0x1ULL << CSR_TLBIDX_EHINV_SHIFT)
|
|
+#define CSR_TLBIDX_PS_SHIFT 24
|
|
+#define CSR_TLBIDX_PS_WIDTH 6
|
|
+#define CSR_TLBIDX_PS (0x3fULL << CSR_TLBIDX_PS_SHIFT)
|
|
+#define CSR_TLBIDX_IDX_SHIFT 0
|
|
+#define CSR_TLBIDX_IDX_WIDTH 12
|
|
+#define CSR_TLBIDX_IDX (0xfffULL << CSR_TLBIDX_IDX_SHIFT)
|
|
+#define CSR_TLBIDX_SIZEM 0x3f000000
|
|
+#define CSR_TLBIDX_SIZE CSR_TLBIDX_PS_SHIFT
|
|
+#define CSR_TLBIDX_IDXM 0xfff
|
|
+
|
|
+#define LOONGARCH_CSR_TLBEHI 0x11 /* 64 TLB EntryHi without ASID */
|
|
+#define LOONGARCH_CSR_TLBELO0 0x12 /* 64 TLB EntryLo0 */
|
|
+#define CSR_TLBLO0_RPLV_SHIFT 63
|
|
+#define CSR_TLBLO0_RPLV (0x1ULL << CSR_TLBLO0_RPLV_SHIFT)
|
|
+#define CSR_TLBLO0_XI_SHIFT 62
|
|
+#define CSR_TLBLO0_XI (0x1ULL << CSR_TLBLO0_XI_SHIFT)
|
|
+#define CSR_TLBLO0_RI_SHIFT 61
|
|
+#define CSR_TLBLO0_RI (0x1ULL << CSR_TLBLO0_RI_SHIFT)
|
|
+#define CSR_TLBLO0_PPN_SHIFT 12
|
|
+#define CSR_TLBLO0_PPN_WIDTH 36 /* ignore lower 12bits */
|
|
+#define CSR_TLBLO0_PPN (0xfffffffffULL << CSR_TLBLO0_PPN_SHIFT)
|
|
+#define CSR_TLBLO0_GLOBAL_SHIFT 6
|
|
+#define CSR_TLBLO0_GLOBAL (0x1ULL << CSR_TLBLO0_GLOBAL_SHIFT)
|
|
+#define CSR_TLBLO0_CCA_SHIFT 4
|
|
+#define CSR_TLBLO0_CCA_WIDTH 2
|
|
+#define CSR_TLBLO0_CCA (0x3ULL << CSR_TLBLO0_CCA_SHIFT)
|
|
+#define CSR_TLBLO0_PLV_SHIFT 2
|
|
+#define CSR_TLBLO0_PLV_WIDTH 2
|
|
+#define CSR_TLBLO0_PLV (0x3ULL << CSR_TLBLO0_PLV_SHIFT)
|
|
+#define CSR_TLBLO0_WE_SHIFT 1
|
|
+#define CSR_TLBLO0_WE (0x1ULL << CSR_TLBLO0_WE_SHIFT)
|
|
+#define CSR_TLBLO0_V_SHIFT 0
|
|
+#define CSR_TLBLO0_V (0x1ULL << CSR_TLBLO0_V_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_TLBELO1 0x13 /* 64 TLB EntryLo1 */
|
|
+#define CSR_TLBLO1_RPLV_SHIFT 63
|
|
+#define CSR_TLBLO1_RPLV (0x1ULL << CSR_TLBLO1_RPLV_SHIFT)
|
|
+#define CSR_TLBLO1_XI_SHIFT 62
|
|
+#define CSR_TLBLO1_XI (0x1ULL << CSR_TLBLO1_XI_SHIFT)
|
|
+#define CSR_TLBLO1_RI_SHIFT 61
|
|
+#define CSR_TLBLO1_RI (0x1ULL << CSR_TLBLO1_RI_SHIFT)
|
|
+#define CSR_TLBLO1_PPN_SHIFT 12
|
|
+#define CSR_TLBLO1_PPN_WIDTH 36 /* ignore lower 12bits */
|
|
+#define CSR_TLBLO1_PPN (0xfffffffffULL << CSR_TLBLO1_PPN_SHIFT)
|
|
+#define CSR_TLBLO1_GLOBAL_SHIFT 6
|
|
+#define CSR_TLBLO1_GLOBAL (0x1ULL << CSR_TLBLO1_GLOBAL_SHIFT)
|
|
+#define CSR_TLBLO1_CCA_SHIFT 4
|
|
+#define CSR_TLBLO1_CCA_WIDTH 2
|
|
+#define CSR_TLBLO1_CCA (0x3ULL << CSR_TLBLO1_CCA_SHIFT)
|
|
+#define CSR_TLBLO1_PLV_SHIFT 2
|
|
+#define CSR_TLBLO1_PLV_WIDTH 2
|
|
+#define CSR_TLBLO1_PLV (0x3ULL << CSR_TLBLO1_PLV_SHIFT)
|
|
+#define CSR_TLBLO1_WE_SHIFT 1
|
|
+#define CSR_TLBLO1_WE (0x1ULL << CSR_TLBLO1_WE_SHIFT)
|
|
+#define CSR_TLBLO1_V_SHIFT 0
|
|
+#define CSR_TLBLO1_V (0x1ULL << CSR_TLBLO1_V_SHIFT)
|
|
+
|
|
+#define LOONGARCH_ENTRYLO_RI (1ULL << 61)
|
|
+#define LOONGARCH_ENTRYLO_XI (1ULL << 62)
|
|
+
|
|
+#define LOONGARCH_CSR_TLBWIRED 0x14 /* 32 TLB wired */
|
|
+#define LOONGARCH_CSR_GTLBC 0x15 /* guest-related TLB */
|
|
+#define CSR_GTLBC_RID_SHIFT 16
|
|
+#define CSR_GTLBC_RID_WIDTH 8
|
|
+#define CSR_GTLBC_RID (0xffULL << CSR_GTLBC_RID_SHIFT)
|
|
+#define CSR_GTLBC_TOTI_SHIFT 13
|
|
+#define CSR_GTLBC_TOTI (0x1ULL << CSR_GTLBC_TOTI_SHIFT)
|
|
+#define CSR_GTLBC_USERID_SHIFT 12
|
|
+#define CSR_GTLBC_USERID (0x1ULL << CSR_GTLBC_USERID_SHIFT)
|
|
+#define CSR_GTLBC_GMTLBSZ_SHIFT 0
|
|
+#define CSR_GTLBC_GMTLBSZ_WIDTH 6
|
|
+#define CSR_GTLBC_GMTLBSZ (0x3fULL << CSR_GTLBC_GVTLBSZ_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_TRGP 0x16 /* guest-related TLB */
|
|
+#define CSR_TRGP_RID_SHIFT 16
|
|
+#define CSR_TRGP_RID_WIDTH 8
|
|
+#define CSR_TRGP_RID (0xffULL << CSR_TRGP_RID_SHIFT)
|
|
+#define CSR_TRGP_GTLB_SHIFT 0
|
|
+#define CSR_TRGP_GTLB (1 << CSR_TRGP_GTLB_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_ASID 0x18 /* 64 ASID */
|
|
+#define CSR_ASID_BIT_SHIFT 16 /* ASIDBits */
|
|
+#define CSR_ASID_BIT_WIDTH 8
|
|
+#define CSR_ASID_BIT (0xffULL << CSR_ASID_BIT_SHIFT)
|
|
+#define CSR_ASID_ASID_SHIFT 0
|
|
+#define CSR_ASID_ASID_WIDTH 10
|
|
+#define CSR_ASID_ASID (0x3ffULL << CSR_ASID_ASID_SHIFT)
|
|
+
|
|
+/* 64 page table base address when badv[47] = 0 */
|
|
+#define LOONGARCH_CSR_PGDL 0x19
|
|
+/* 64 page table base address when badv[47] = 1 */
|
|
+#define LOONGARCH_CSR_PGDH 0x1a
|
|
+#define LOONGARCH_CSR_PGD 0x1b /* 64 page table base */
|
|
+#define LOONGARCH_CSR_PWCTL0 0x1c /* 64 PWCtl0 */
|
|
+#define CSR_PWCTL0_PTEW_SHIFT 30
|
|
+#define CSR_PWCTL0_PTEW_WIDTH 2
|
|
+#define CSR_PWCTL0_PTEW (0x3ULL << CSR_PWCTL0_PTEW_SHIFT)
|
|
+#define CSR_PWCTL0_DIR1WIDTH_SHIFT 25
|
|
+#define CSR_PWCTL0_DIR1WIDTH_WIDTH 5
|
|
+#define CSR_PWCTL0_DIR1WIDTH (0x1fULL << CSR_PWCTL0_DIR1WIDTH_SHIFT)
|
|
+#define CSR_PWCTL0_DIR1BASE_SHIFT 20
|
|
+#define CSR_PWCTL0_DIR1BASE_WIDTH 5
|
|
+#define CSR_PWCTL0_DIR1BASE (0x1fULL << CSR_PWCTL0_DIR1BASE_SHIFT)
|
|
+#define CSR_PWCTL0_DIR0WIDTH_SHIFT 15
|
|
+#define CSR_PWCTL0_DIR0WIDTH_WIDTH 5
|
|
+#define CSR_PWCTL0_DIR0WIDTH (0x1fULL << CSR_PWCTL0_DIR0WIDTH_SHIFT)
|
|
+#define CSR_PWCTL0_DIR0BASE_SHIFT 10
|
|
+#define CSR_PWCTL0_DIR0BASE_WIDTH 5
|
|
+#define CSR_PWCTL0_DIR0BASE (0x1fULL << CSR_PWCTL0_DIR0BASE_SHIFT)
|
|
+#define CSR_PWCTL0_PTWIDTH_SHIFT 5
|
|
+#define CSR_PWCTL0_PTWIDTH_WIDTH 5
|
|
+#define CSR_PWCTL0_PTWIDTH (0x1fULL << CSR_PWCTL0_PTWIDTH_SHIFT)
|
|
+#define CSR_PWCTL0_PTBASE_SHIFT 0
|
|
+#define CSR_PWCTL0_PTBASE_WIDTH 5
|
|
+#define CSR_PWCTL0_PTBASE (0x1fULL << CSR_PWCTL0_PTBASE_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_PWCTL1 0x1d /* 64 PWCtl1 */
|
|
+#define CSR_PWCTL1_DIR3WIDTH_SHIFT 18
|
|
+#define CSR_PWCTL1_DIR3WIDTH_WIDTH 5
|
|
+#define CSR_PWCTL1_DIR3WIDTH (0x1fULL << CSR_PWCTL1_DIR3WIDTH_SHIFT)
|
|
+#define CSR_PWCTL1_DIR3BASE_SHIFT 12
|
|
+#define CSR_PWCTL1_DIR3BASE_WIDTH 5
|
|
+#define CSR_PWCTL1_DIR3BASE (0x1fULL << CSR_PWCTL0_DIR3BASE_SHIFT)
|
|
+#define CSR_PWCTL1_DIR2WIDTH_SHIFT 6
|
|
+#define CSR_PWCTL1_DIR2WIDTH_WIDTH 5
|
|
+#define CSR_PWCTL1_DIR2WIDTH (0x1fULL << CSR_PWCTL1_DIR2WIDTH_SHIFT)
|
|
+#define CSR_PWCTL1_DIR2BASE_SHIFT 0
|
|
+#define CSR_PWCTL1_DIR2BASE_WIDTH 5
|
|
+#define CSR_PWCTL1_DIR2BASE (0x1fULL << CSR_PWCTL0_DIR2BASE_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_STLBPGSIZE 0x1e /* 64 */
|
|
+#define CSR_STLBPGSIZE_PS_WIDTH 6
|
|
+#define CSR_STLBPGSIZE_PS (0x3f)
|
|
+
|
|
+#define LOONGARCH_CSR_RVACFG 0x1f
|
|
+#define CSR_RVACFG_RDVA_WIDTH 4
|
|
+#define CSR_RVACFG_RDVA (0xf)
|
|
+
|
|
+/* read only CSR register : start with CPU */
|
|
+#define LOONGARCH_CSR_CPUID 0x20 /* 32 CPU core number */
|
|
+#define CSR_CPUID_CID_WIDTH 9
|
|
+#define CSR_CPUID_CID (0x1ff)
|
|
+
|
|
+#define LOONGARCH_CSR_PRCFG1 0x21 /* 32 CPU info */
|
|
+#define CSR_CONF1_VSMAX_SHIFT 12
|
|
+#define CSR_CONF1_VSMAX_WIDTH 3
|
|
+#define CSR_CONF1_VSMAX (7ULL << CSR_CONF1_VSMAX_SHIFT)
|
|
+/* stable timer bits - 1, 0x2f = 47*/
|
|
+#define CSR_CONF1_TMRBITS_SHIFT 4
|
|
+#define CSR_CONF1_TMRBITS_WIDTH 8
|
|
+#define CSR_CONF1_TMRBITS (0xffULL << CSR_CONF1_TMRBITS_SHIFT)
|
|
+#define CSR_CONF1_KSNUM_SHIFT 0
|
|
+#define CSR_CONF1_KSNUM_WIDTH 4
|
|
+#define CSR_CONF1_KSNUM (0x8)
|
|
+
|
|
+#define LOONGARCH_CSR_PRCFG2 0x22
|
|
+#define CSR_CONF2_PGMASK_SUPP 0x3ffff000
|
|
+
|
|
+#define LOONGARCH_CSR_PRCFG3 0x23
|
|
+#define CSR_CONF3_STLBIDX_SHIFT 20
|
|
+#define CSR_CONF3_STLBIDX_WIDTH 6
|
|
+#define CSR_CONF3_STLBIDX (0x3fULL << CSR_CONF3_STLBIDX_SHIFT)
|
|
+#define CSR_STLB_SETS 256
|
|
+#define CSR_CONF3_STLBWAYS_SHIFT 12
|
|
+#define CSR_CONF3_STLBWAYS_WIDTH 8
|
|
+#define CSR_CONF3_STLBWAYS (0xffULL << CSR_CONF3_STLBWAYS_SHIFT)
|
|
+#define CSR_STLBWAYS_SIZE 8
|
|
+#define CSR_CONF3_MTLBSIZE_SHIFT 4
|
|
+#define CSR_CONF3_MTLBSIZE_WIDTH 8
|
|
+#define CSR_CONF3_MTLBSIZE (0xffULL << CSR_CONF3_MTLBSIZE_SHIFT)
|
|
+/* mean VTLB 64 index */
|
|
+#define CSR_MTLB_SIZE 64
|
|
+#define CSR_CONF3_TLBORG_SHIFT 0
|
|
+#define CSR_CONF3_TLBORG_WIDTH 4
|
|
+#define CSR_CONF3_TLBORG (0xfULL << CSR_CONF3_TLBORG_SHIFT)
|
|
+/* mean use MTLB+STLB */
|
|
+#define TLB_ORG 2
|
|
+
|
|
+/* Kscratch : start with KS */
|
|
+#define LOONGARCH_CSR_KS0 0x30 /* 64 */
|
|
+#define LOONGARCH_CSR_KS1 0x31 /* 64 */
|
|
+#define LOONGARCH_CSR_KS2 0x32 /* 64 */
|
|
+#define LOONGARCH_CSR_KS3 0x33 /* 64 */
|
|
+#define LOONGARCH_CSR_KS4 0x34 /* 64 */
|
|
+#define LOONGARCH_CSR_KS5 0x35 /* 64 */
|
|
+#define LOONGARCH_CSR_KS6 0x36 /* 64 */
|
|
+#define LOONGARCH_CSR_KS7 0x37 /* 64 */
|
|
+#define LOONGARCH_CSR_KS8 0x38 /* 64 */
|
|
+
|
|
+/* timer : start with TM */
|
|
+#define LOONGARCH_CSR_TMID 0x40 /* 32 timer ID */
|
|
+
|
|
+#define LOONGARCH_CSR_TCFG 0x41 /* 64 timer config */
|
|
+#define CSR_TCFG_VAL_SHIFT 2
|
|
+#define CSR_TCFG_VAL_WIDTH 48
|
|
+#define CSR_TCFG_VAL (0x3fffffffffffULL << CSR_TCFG_VAL_SHIFT)
|
|
+#define CSR_TCFG_PERIOD_SHIFT 1
|
|
+#define CSR_TCFG_PERIOD (0x1ULL << CSR_TCFG_PERIOD_SHIFT)
|
|
+#define CSR_TCFG_EN (0x1)
|
|
+
|
|
+#define LOONGARCH_CSR_TVAL 0x42 /* 64 timer ticks remain */
|
|
+
|
|
+#define LOONGARCH_CSR_CNTC 0x43 /* 64 timer offset */
|
|
+
|
|
+#define LOONGARCH_CSR_TINTCLR 0x44 /* 64 timer interrupt clear */
|
|
+#define CSR_TINTCLR_TI_SHIFT 0
|
|
+#define CSR_TINTCLR_TI (1 << CSR_TINTCLR_TI_SHIFT)
|
|
+
|
|
+/* guest : start with GST */
|
|
+#define LOONGARCH_CSR_GSTAT 0x50 /* 32 basic guest info */
|
|
+#define CSR_GSTAT_GID_SHIFT 16
|
|
+#define CSR_GSTAT_GID_WIDTH 8
|
|
+#define CSR_GSTAT_GID (0xffULL << CSR_GSTAT_GID_SHIFT)
|
|
+#define CSR_GSTAT_GIDBIT_SHIFT 4
|
|
+#define CSR_GSTAT_GIDBIT_WIDTH 6
|
|
+#define CSR_GSTAT_GIDBIT (0x3fULL << CSR_GSTAT_GIDBIT_SHIFT)
|
|
+#define CSR_GSTAT_PVM_SHIFT 1
|
|
+#define CSR_GSTAT_PVM (0x1ULL << CSR_GSTAT_PVM_SHIFT)
|
|
+#define CSR_GSTAT_VM_SHIFT 0
|
|
+#define CSR_GSTAT_VM (0x1ULL << CSR_GSTAT_VM_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_GCFG 0x51 /* 32 guest config */
|
|
+#define CSR_GCFG_GPERF_SHIFT 24
|
|
+#define CSR_GCFG_GPERF_WIDTH 3
|
|
+#define CSR_GCFG_GPERF (0x7ULL << CSR_GCFG_GPERF_SHIFT)
|
|
+#define CSR_GCFG_GCI_SHIFT 20
|
|
+#define CSR_GCFG_GCI_WIDTH 2
|
|
+#define CSR_GCFG_GCI (0x3ULL << CSR_GCFG_GCI_SHIFT)
|
|
+#define CSR_GCFG_GCI_ALL (0x0ULL << CSR_GCFG_GCI_SHIFT)
|
|
+#define CSR_GCFG_GCI_HIT (0x1ULL << CSR_GCFG_GCI_SHIFT)
|
|
+#define CSR_GCFG_GCI_SECURE (0x2ULL << CSR_GCFG_GCI_SHIFT)
|
|
+#define CSR_GCFG_GCIP_SHIFT 16
|
|
+#define CSR_GCFG_GCIP (0xfULL << CSR_GCFG_GCIP_SHIFT)
|
|
+#define CSR_GCFG_GCIP_ALL (0x1ULL << CSR_GCFG_GCIP_SHIFT)
|
|
+#define CSR_GCFG_GCIP_HIT (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 1))
|
|
+#define CSR_GCFG_GCIP_SECURE (0x1ULL << (CSR_GCFG_GCIP_SHIFT + 2))
|
|
+#define CSR_GCFG_TORU_SHIFT 15
|
|
+#define CSR_GCFG_TORU (0x1ULL << CSR_GCFG_TORU_SHIFT)
|
|
+#define CSR_GCFG_TORUP_SHIFT 14
|
|
+#define CSR_GCFG_TORUP (0x1ULL << CSR_GCFG_TORUP_SHIFT)
|
|
+#define CSR_GCFG_TOP_SHIFT 13
|
|
+#define CSR_GCFG_TOP (0x1ULL << CSR_GCFG_TOP_SHIFT)
|
|
+#define CSR_GCFG_TOPP_SHIFT 12
|
|
+#define CSR_GCFG_TOPP (0x1ULL << CSR_GCFG_TOPP_SHIFT)
|
|
+#define CSR_GCFG_TOE_SHIFT 11
|
|
+#define CSR_GCFG_TOE (0x1ULL << CSR_GCFG_TOE_SHIFT)
|
|
+#define CSR_GCFG_TOEP_SHIFT 10
|
|
+#define CSR_GCFG_TOEP (0x1ULL << CSR_GCFG_TOEP_SHIFT)
|
|
+#define CSR_GCFG_TIT_SHIFT 9
|
|
+#define CSR_GCFG_TIT (0x1ULL << CSR_GCFG_TIT_SHIFT)
|
|
+#define CSR_GCFG_TITP_SHIFT 8
|
|
+#define CSR_GCFG_TITP (0x1ULL << CSR_GCFG_TITP_SHIFT)
|
|
+#define CSR_GCFG_SIT_SHIFT 7
|
|
+#define CSR_GCFG_SIT (0x1ULL << CSR_GCFG_SIT_SHIFT)
|
|
+#define CSR_GCFG_SITP_SHIFT 6
|
|
+#define CSR_GCFG_SITP (0x1ULL << CSR_GCFG_SITP_SHIFT)
|
|
+#define CSR_GCFG_CACTRL_SHIFT 4
|
|
+#define CSR_GCFG_CACTRL_WIDTH 2
|
|
+#define CSR_GCFG_CACTRL (0x3ULL << CSR_GCFG_CACTRL_SHIFT)
|
|
+#define CSR_GCFG_CACTRL_GUEST (0x0ULL << CSR_GCFG_CACTRL_SHIFT)
|
|
+#define CSR_GCFG_CACTRL_ROOT (0x1ULL << CSR_GCFG_CACTRL_SHIFT)
|
|
+#define CSR_GCFG_CACTRL_NEST (0x2ULL << CSR_GCFG_CACTRL_SHIFT)
|
|
+#define CSR_GCFG_CCCP_WIDTH 4
|
|
+#define CSR_GCFG_CCCP (0xf)
|
|
+#define CSR_GCFG_CCCP_GUEST (0x1ULL << 0)
|
|
+#define CSR_GCFG_CCCP_ROOT (0x1ULL << 1)
|
|
+#define CSR_GCFG_CCCP_NEST (0x1ULL << 2)
|
|
+
|
|
+#define LOONGARCH_CSR_GINTC 0x52 /* 64 guest exception control */
|
|
+#define CSR_GINTC_HC_SHIFT 16
|
|
+#define CSR_GINTC_HC_WIDTH 8
|
|
+#define CSR_GINTC_HC (0xffULL << CSR_GINTC_HC_SHIFT)
|
|
+#define CSR_GINTC_PIP_SHIFT 8
|
|
+#define CSR_GINTC_PIP_WIDTH 8
|
|
+#define CSR_GINTC_PIP (0xffULL << CSR_GINTC_PIP_SHIFT)
|
|
+#define CSR_GINTC_VIP_SHIFT 0
|
|
+#define CSR_GINTC_VIP_WIDTH 8
|
|
+#define CSR_GINTC_VIP (0xff)
|
|
+
|
|
+#define LOONGARCH_CSR_GCNTC 0x53 /* 64 guest timer offset */
|
|
+
|
|
+/* LLBCTL */
|
|
+#define LOONGARCH_CSR_LLBCTL 0x60 /* 32 csr number to be changed */
|
|
+#define CSR_LLBCTL_ROLLB_SHIFT 0
|
|
+#define CSR_LLBCTL_ROLLB (1ULL << CSR_LLBCTL_ROLLB_SHIFT)
|
|
+#define CSR_LLBCTL_WCLLB_SHIFT 1
|
|
+#define CSR_LLBCTL_WCLLB (1ULL << CSR_LLBCTL_WCLLB_SHIFT)
|
|
+#define CSR_LLBCTL_KLO_SHIFT 2
|
|
+#define CSR_LLBCTL_KLO (1ULL << CSR_LLBCTL_KLO_SHIFT)
|
|
+
|
|
+/* implement dependent */
|
|
+#define LOONGARCH_CSR_IMPCTL1 0x80 /* 32 loongarch config */
|
|
+#define CSR_MISPEC_SHIFT 20
|
|
+#define CSR_MISPEC_WIDTH 8
|
|
+#define CSR_MISPEC (0xffULL << CSR_MISPEC_SHIFT)
|
|
+#define CSR_SSEN_SHIFT 18
|
|
+#define CSR_SSEN (1ULL << CSR_SSEN_SHIFT)
|
|
+#define CSR_SCRAND_SHIFT 17
|
|
+#define CSR_SCRAND (1ULL << CSR_SCRAND_SHIFT)
|
|
+#define CSR_LLEXCL_SHIFT 16
|
|
+#define CSR_LLEXCL (1ULL << CSR_LLEXCL_SHIFT)
|
|
+#define CSR_DISVC_SHIFT 15
|
|
+#define CSR_DISVC (1ULL << CSR_DISVC_SHIFT)
|
|
+#define CSR_VCLRU_SHIFT 14
|
|
+#define CSR_VCLRU (1ULL << CSR_VCLRU_SHIFT)
|
|
+#define CSR_DCLRU_SHIFT 13
|
|
+#define CSR_DCLRU (1ULL << CSR_DCLRU_SHIFT)
|
|
+#define CSR_FASTLDQ_SHIFT 12
|
|
+#define CSR_FASTLDQ (1ULL << CSR_FASTLDQ_SHIFT)
|
|
+#define CSR_USERCAC_SHIFT 11
|
|
+#define CSR_USERCAC (1ULL << CSR_USERCAC_SHIFT)
|
|
+#define CSR_ANTI_MISPEC_SHIFT 10
|
|
+#define CSR_ANTI_MISPEC (1ULL << CSR_ANTI_MISPEC_SHIFT)
|
|
+#define CSR_ANTI_FLUSHSFB_SHIFT 9
|
|
+#define CSR_ANTI_FLUSHSFB (1ULL << CSR_ANTI_FLUSHSFB_SHIFT)
|
|
+#define CSR_STFILL_SHIFT 8
|
|
+#define CSR_STFILL (1ULL << CSR_STFILL_SHIFT)
|
|
+#define CSR_LIFEP_SHIFT 7
|
|
+#define CSR_LIFEP (1ULL << CSR_LIFEP_SHIFT)
|
|
+#define CSR_LLSYNC_SHIFT 6
|
|
+#define CSR_LLSYNC (1ULL << CSR_LLSYNC_SHIFT)
|
|
+#define CSR_BRBTDIS_SHIFT 5
|
|
+#define CSR_BRBTDIS (1ULL << CSR_BRBTDIS_SHIFT)
|
|
+#define CSR_RASDIS_SHIFT 4
|
|
+#define CSR_RASDIS (1ULL << CSR_RASDIS_SHIFT)
|
|
+#define CSR_STPRE_SHIFT 2
|
|
+#define CSR_STPRE_WIDTH 2
|
|
+#define CSR_STPRE (3ULL << CSR_STPRE_SHIFT)
|
|
+#define CSR_INSTPRE_SHIFT 1
|
|
+#define CSR_INSTPRE (1ULL << CSR_INSTPRE_SHIFT)
|
|
+#define CSR_DATAPRE_SHIFT 0
|
|
+#define CSR_DATAPRE (1ULL << CSR_DATAPRE_SHIFT)
|
|
+
|
|
+#define LOONGARCH_CSR_IMPCTL2 0x81 /* 32 Flush */
|
|
+#define CSR_IMPCTL2_MTLB_SHIFT 0
|
|
+#define CSR_IMPCTL2_MTLB (1ULL << CSR_IMPCTL2_MTLB_SHIFT)
|
|
+#define CSR_IMPCTL2_STLB_SHIFT 1
|
|
+#define CSR_IMPCTL2_STLB (1ULL << CSR_IMPCTL2_STLB_SHIFT)
|
|
+#define CSR_IMPCTL2_DTLB_SHIFT 2
|
|
+#define CSR_IMPCTL2_DTLB (1ULL << CSR_IMPCTL2_DTLB_SHIFT)
|
|
+#define CSR_IMPCTL2_ITLB_SHIFT 3
|
|
+#define CSR_IMPCTL2_ITLB (1ULL << CSR_IMPCTL2_ITLB_SHIFT)
|
|
+#define CSR_IMPCTL2_BTAC_SHIFT 4
|
|
+#define CSR_IMPCTL2_BTAC (1ULL << CSR_IMPCTL2_BTAC_SHIFT)
|
|
+
|
|
+#define LOONGARCH_FLUSH_VTLB 1
|
|
+#define LOONGARCH_FLUSH_FTLB (1 << 1)
|
|
+#define LOONGARCH_FLUSH_DTLB (1 << 2)
|
|
+#define LOONGARCH_FLUSH_ITLB (1 << 3)
|
|
+#define LOONGARCH_FLUSH_BTAC (1 << 4)
|
|
+
|
|
+#define LOONGARCH_CSR_GNMI 0x82
|
|
+
|
|
+/* TLB Refill Only */
|
|
+#define LOONGARCH_CSR_TLBRENT 0x88 /* 64 TLB refill exception address */
|
|
+#define LOONGARCH_CSR_TLBRBADV 0x89 /* 64 TLB refill badvaddr */
|
|
+#define LOONGARCH_CSR_TLBRERA 0x8a /* 64 TLB refill ERA */
|
|
+#define LOONGARCH_CSR_TLBRSAVE 0x8b /* 64 KScratch for TLB refill */
|
|
+#define LOONGARCH_CSR_TLBRELO0 0x8c /* 64 TLB refill entrylo0 */
|
|
+#define LOONGARCH_CSR_TLBRELO1 0x8d /* 64 TLB refill entrylo1 */
|
|
+#define LOONGARCH_CSR_TLBREHI 0x8e /* 64 TLB refill entryhi */
|
|
+#define LOONGARCH_CSR_TLBRPRMD 0x8f /* 64 TLB refill mode info */
|
|
+
|
|
+/* error related */
|
|
+#define LOONGARCH_CSR_ERRCTL 0x90 /* 32 ERRCTL */
|
|
+#define LOONGARCH_CSR_ERRINFO 0x91
|
|
+#define LOONGARCH_CSR_ERRINFO1 0x92
|
|
+#define LOONGARCH_CSR_ERRENT 0x93 /* 64 error exception base */
|
|
+#define LOONGARCH_CSR_ERRERA 0x94 /* 64 error exception PC */
|
|
+#define LOONGARCH_CSR_ERRSAVE 0x95 /* 64 KScratch for error exception */
|
|
+
|
|
+#define LOONGARCH_CSR_CTAG 0x98 /* 64 TagLo + TagHi */
|
|
+
|
|
+/* direct map windows */
|
|
+#define LOONGARCH_CSR_DMWIN0 0x180 /* 64 direct map win0: MEM & IF */
|
|
+#define LOONGARCH_CSR_DMWIN1 0x181 /* 64 direct map win1: MEM & IF */
|
|
+#define LOONGARCH_CSR_DMWIN2 0x182 /* 64 direct map win2: MEM */
|
|
+#define LOONGARCH_CSR_DMWIN3 0x183 /* 64 direct map win3: MEM */
|
|
+#define CSR_DMW_PLV0 0x1
|
|
+#define CSR_DMW_PLV1 0x2
|
|
+#define CSR_DMW_PLV2 0x4
|
|
+#define CSR_DMW_PLV3 0x8
|
|
+#define CSR_DMW_BASE_SH 48
|
|
+#define dmwin_va2pa(va) (va & (((unsigned long)1 << CSR_DMW_BASE_SH) - 1))
|
|
+
|
|
+/* performance counter */
|
|
+#define LOONGARCH_CSR_PERFCTRL0 0x200 /* 32 perf event 0 config */
|
|
+#define LOONGARCH_CSR_PERFCNTR0 0x201 /* 64 perf event 0 count value */
|
|
+#define LOONGARCH_CSR_PERFCTRL1 0x202 /* 32 perf event 1 config */
|
|
+#define LOONGARCH_CSR_PERFCNTR1 0x203 /* 64 perf event 1 count value */
|
|
+#define LOONGARCH_CSR_PERFCTRL2 0x204 /* 32 perf event 2 config */
|
|
+#define LOONGARCH_CSR_PERFCNTR2 0x205 /* 64 perf event 2 count value */
|
|
+#define LOONGARCH_CSR_PERFCTRL3 0x206 /* 32 perf event 3 config */
|
|
+#define LOONGARCH_CSR_PERFCNTR3 0x207 /* 64 perf event 3 count value */
|
|
+#define CSR_PERFCTRL_PLV0 (1ULL << 16)
|
|
+#define CSR_PERFCTRL_PLV1 (1ULL << 17)
|
|
+#define CSR_PERFCTRL_PLV2 (1ULL << 18)
|
|
+#define CSR_PERFCTRL_PLV3 (1ULL << 19)
|
|
+#define CSR_PERFCTRL_IE (1ULL << 20)
|
|
+#define CSR_PERFCTRL_EVENT 0x3ff
|
|
+
|
|
+/* debug */
|
|
+#define LOONGARCH_CSR_MWPC 0x300 /* data breakpoint config */
|
|
+#define LOONGARCH_CSR_MWPS 0x301 /* data breakpoint status */
|
|
+
|
|
+#define LOONGARCH_CSR_DB0ADDR 0x310 /* data breakpoint 0 address */
|
|
+#define LOONGARCH_CSR_DB0MASK 0x311 /* data breakpoint 0 mask */
|
|
+#define LOONGARCH_CSR_DB0CTL 0x312 /* data breakpoint 0 control */
|
|
+#define LOONGARCH_CSR_DB0ASID 0x313 /* data breakpoint 0 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_DB1ADDR 0x318 /* data breakpoint 1 address */
|
|
+#define LOONGARCH_CSR_DB1MASK 0x319 /* data breakpoint 1 mask */
|
|
+#define LOONGARCH_CSR_DB1CTL 0x31a /* data breakpoint 1 control */
|
|
+#define LOONGARCH_CSR_DB1ASID 0x31b /* data breakpoint 1 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_DB2ADDR 0x320 /* data breakpoint 2 address */
|
|
+#define LOONGARCH_CSR_DB2MASK 0x321 /* data breakpoint 2 mask */
|
|
+#define LOONGARCH_CSR_DB2CTL 0x322 /* data breakpoint 2 control */
|
|
+#define LOONGARCH_CSR_DB2ASID 0x323 /* data breakpoint 2 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_DB3ADDR 0x328 /* data breakpoint 3 address */
|
|
+#define LOONGARCH_CSR_DB3MASK 0x329 /* data breakpoint 3 mask */
|
|
+#define LOONGARCH_CSR_DB3CTL 0x32a /* data breakpoint 3 control */
|
|
+#define LOONGARCH_CSR_DB3ASID 0x32b /* data breakpoint 3 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_FWPC 0x380 /* instruction breakpoint config */
|
|
+#define LOONGARCH_CSR_FWPS 0x381 /* instruction breakpoint status */
|
|
+
|
|
+#define LOONGARCH_CSR_IB0ADDR 0x390 /* inst breakpoint 0 address */
|
|
+#define LOONGARCH_CSR_IB0MASK 0x391 /* inst breakpoint 0 mask */
|
|
+#define LOONGARCH_CSR_IB0CTL 0x392 /* inst breakpoint 0 control */
|
|
+#define LOONGARCH_CSR_IB0ASID 0x393 /* inst breakpoint 0 asid */
|
|
+#define LOONGARCH_CSR_IB1ADDR 0x398 /* inst breakpoint 1 address */
|
|
+#define LOONGARCH_CSR_IB1MASK 0x399 /* inst breakpoint 1 mask */
|
|
+#define LOONGARCH_CSR_IB1CTL 0x39a /* inst breakpoint 1 control */
|
|
+#define LOONGARCH_CSR_IB1ASID 0x39b /* inst breakpoint 1 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB2ADDR 0x3a0 /* inst breakpoint 2 address */
|
|
+#define LOONGARCH_CSR_IB2MASK 0x3a1 /* inst breakpoint 2 mask */
|
|
+#define LOONGARCH_CSR_IB2CTL 0x3a2 /* inst breakpoint 2 control */
|
|
+#define LOONGARCH_CSR_IB2ASID 0x3a3 /* inst breakpoint 2 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB3ADDR 0x3a8 /* inst breakpoint 3 address */
|
|
+#define LOONGARCH_CSR_IB3MASK 0x3a9 /* inst breakpoint 3 mask */
|
|
+#define LOONGARCH_CSR_IB3CTL 0x3aa /* inst breakpoint 3 control */
|
|
+#define LOONGARCH_CSR_IB3ASID 0x3ab /* inst breakpoint 3 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB4ADDR 0x3b0 /* inst breakpoint 4 address */
|
|
+#define LOONGARCH_CSR_IB4MASK 0x3b1 /* inst breakpoint 4 mask */
|
|
+#define LOONGARCH_CSR_IB4CTL 0x3b2 /* inst breakpoint 4 control */
|
|
+#define LOONGARCH_CSR_IB4ASID 0x3b3 /* inst breakpoint 4 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB5ADDR 0x3b8 /* inst breakpoint 5 address */
|
|
+#define LOONGARCH_CSR_IB5MASK 0x3b9 /* inst breakpoint 5 mask */
|
|
+#define LOONGARCH_CSR_IB5CTL 0x3ba /* inst breakpoint 5 control */
|
|
+#define LOONGARCH_CSR_IB5ASID 0x3bb /* inst breakpoint 5 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB6ADDR 0x3c0 /* inst breakpoint 6 address */
|
|
+#define LOONGARCH_CSR_IB6MASK 0x3c1 /* inst breakpoint 6 mask */
|
|
+#define LOONGARCH_CSR_IB6CTL 0x3c2 /* inst breakpoint 6 control */
|
|
+#define LOONGARCH_CSR_IB6ASID 0x3c3 /* inst breakpoint 6 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_IB7ADDR 0x3c8 /* inst breakpoint 7 address */
|
|
+#define LOONGARCH_CSR_IB7MASK 0x3c9 /* inst breakpoint 7 mask */
|
|
+#define LOONGARCH_CSR_IB7CTL 0x3ca /* inst breakpoint 7 control */
|
|
+#define LOONGARCH_CSR_IB7ASID 0x3cb /* inst breakpoint 7 asid */
|
|
+
|
|
+#define LOONGARCH_CSR_DEBUG 0x500 /* debug config */
|
|
+#define CSR_DEBUG_DM 0
|
|
+#define CSR_DEBUG_DMVER 1
|
|
+#define CSR_DEBUG_DINT 8
|
|
+#define CSR_DEBUG_DBP 9
|
|
+#define CSR_DEBUG_DIB 10
|
|
+#define CSR_DEBUG_DDB 11
|
|
+
|
|
+#define LOONGARCH_CSR_DERA 0x501 /* debug era */
|
|
+#define LOONGARCH_CSR_DESAVE 0x502 /* debug save */
|
|
+
|
|
+#define LOONGARCH_CSR_PRID 0xc0 /* 32 LOONGARCH CP0 PRID */
|
|
+
|
|
+#define LOONGARCH_CPUCFG0 0x0
|
|
+#define CPUCFG0_3A5000_PRID 0x0014c010
|
|
+
|
|
+#define LOONGARCH_CPUCFG1 0x1
|
|
+#define CPUCFG1_ISGR32 BIT(0)
|
|
+#define CPUCFG1_ISGR64 BIT(1)
|
|
+#define CPUCFG1_PAGING BIT(2)
|
|
+#define CPUCFG1_IOCSR BIT(3)
|
|
+#define CPUCFG1_PABITS (47 << 4)
|
|
+#define CPUCFG1_VABITS (47 << 12)
|
|
+#define CPUCFG1_UAL BIT(20)
|
|
+#define CPUCFG1_RI BIT(21)
|
|
+#define CPUCFG1_XI BIT(22)
|
|
+#define CPUCFG1_RPLV BIT(23)
|
|
+#define CPUCFG1_HUGEPG BIT(24)
|
|
+#define CPUCFG1_IOCSRBRD BIT(25)
|
|
+#define CPUCFG1_MSGINT BIT(26)
|
|
+
|
|
+#define LOONGARCH_CPUCFG2 0x2
|
|
+#define CPUCFG2_FP BIT(0)
|
|
+#define CPUCFG2_FPSP BIT(1)
|
|
+#define CPUCFG2_FPDP BIT(2)
|
|
+#define CPUCFG2_FPVERS (0 << 3)
|
|
+#define CPUCFG2_LSX BIT(6)
|
|
+#define CPUCFG2_LASX BIT(7)
|
|
+#define CPUCFG2_COMPLEX BIT(8)
|
|
+#define CPUCFG2_CRYPTO BIT(9)
|
|
+#define CPUCFG2_LVZP BIT(10)
|
|
+#define CPUCFG2_LVZVER (0 << 11)
|
|
+#define CPUCFG2_LLFTP BIT(14)
|
|
+#define CPUCFG2_LLFTPREV (1 << 15)
|
|
+#define CPUCFG2_X86BT BIT(18)
|
|
+#define CPUCFG2_ARMBT BIT(19)
|
|
+#define CPUCFG2_MIPSBT BIT(20)
|
|
+#define CPUCFG2_LSPW BIT(21)
|
|
+#define CPUCFG2_LAM BIT(22)
|
|
+
|
|
+#define LOONGARCH_CPUCFG3 0x3
|
|
+#define CPUCFG3_CCDMA BIT(0)
|
|
+#define CPUCFG3_SFB BIT(1)
|
|
+#define CPUCFG3_UCACC BIT(2)
|
|
+#define CPUCFG3_LLEXC BIT(3)
|
|
+#define CPUCFG3_SCDLY BIT(4)
|
|
+#define CPUCFG3_LLDBAR BIT(5)
|
|
+#define CPUCFG3_ITLBT BIT(6)
|
|
+#define CPUCFG3_ICACHET BIT(7)
|
|
+#define CPUCFG3_SPW_LVL (4 << 8)
|
|
+#define CPUCFG3_SPW_HG_HF BIT(11)
|
|
+#define CPUCFG3_RVA BIT(12)
|
|
+#define CPUCFG3_RVAMAX (7 << 13)
|
|
+
|
|
+#define LOONGARCH_CPUCFG4 0x4
|
|
+#define CCFREQ_100M 100000000 /* 100M */
|
|
+
|
|
+#define LOONGARCH_CPUCFG5 0x5
|
|
+#define CPUCFG5_CCMUL 1
|
|
+#define CPUCFG5_CCDIV (1 << 16)
|
|
+
|
|
+#define LOONGARCH_CPUCFG6 0x6
|
|
+#define CPUCFG6_PMP BIT(0)
|
|
+#define CPUCFG6_PAMVER (1 << 1)
|
|
+#define CPUCFG6_PMNUM (3 << 4)
|
|
+#define CPUCFG6_PMBITS (63 << 8)
|
|
+#define CPUCFG6_UPM BIT(14)
|
|
+
|
|
+#define LOONGARCH_CPUCFG16 0x10
|
|
+#define CPUCFG16_L1_IUPRE BIT(0)
|
|
+#define CPUCFG16_L1_UNIFY BIT(1)
|
|
+#define CPUCFG16_L1_DPRE BIT(2)
|
|
+#define CPUCFG16_L2_IUPRE BIT(3)
|
|
+#define CPUCFG16_L2_IUUNIFY BIT(4)
|
|
+#define CPUCFG16_L2_IUPRIV BIT(5)
|
|
+#define CPUCFG16_L2_IUINCL BIT(6)
|
|
+#define CPUCFG16_L2_DPRE BIT(7)
|
|
+#define CPUCFG16_L2_DPRIV BIT(8)
|
|
+#define CPUCFG16_L2_DINCL BIT(9)
|
|
+#define CPUCFG16_L3_IUPRE BIT(10)
|
|
+#define CPUCFG16_L3_IUUNIFY BIT(11)
|
|
+#define CPUCFG16_L3_IUPRIV BIT(12)
|
|
+#define CPUCFG16_L3_IUINCL BIT(13)
|
|
+#define CPUCFG16_L3_DPRE BIT(14)
|
|
+#define CPUCFG16_L3_DPRIV BIT(15)
|
|
+#define CPUCFG16_L3_DINCL BIT(16)
|
|
+
|
|
+#define LOONGARCH_CPUCFG17 0x11
|
|
+#define CPUCFG17_L1I_WAYS_M (3 << 0)
|
|
+#define CPUCFG17_L1I_SETS_M (8 << 16)
|
|
+#define CPUCFG17_L1I_SIZE_M (6 << 24)
|
|
+
|
|
+#define LOONGARCH_CPUCFG18 0x12
|
|
+#define CPUCFG18_L1D_WAYS_M (3 << 0)
|
|
+#define CPUCFG18_L1D_SETS_M (8 << 16)
|
|
+#define CPUCFG18_L1D_SIZE_M (6 << 24)
|
|
+
|
|
+#define LOONGARCH_CPUCFG19 0x13
|
|
+#define CPUCFG19_L2_WAYS_M (0xf << 0)
|
|
+#define CPUCFG19_L2_SETS_M (8 << 16)
|
|
+#define CPUCFG19_L2_SIZE_M (6 << 24)
|
|
+
|
|
+#define LOONGARCH_CPUCFG20 0x14
|
|
+#define CPUCFG20_L3_WAYS_M (0xf << 0)
|
|
+#define CPUCFG20_L3_SETS_M (0xe << 16)
|
|
+#define CPUCFG20_L3_SIZE_M (0x6 << 24)
|
|
+
|
|
+#define LOONGARCH_PAGE_HUGE 0x40
|
|
+#define LOONGARCH_HUGE_GLOBAL 0x1000
|
|
+#define LOONGARCH_HUGE_GLOBAL_SH 12
|
|
+
|
|
+/*
|
|
+ * All CSR register
|
|
+ *
|
|
+ * default value in target/loongarch/cpu.c
|
|
+ * reset function in target/loongarch/translate.c:cpu_state_reset()
|
|
+ *
|
|
+ * This macro will be used only twice.
|
|
+ * > In target/loongarch/cpu.h:CPULOONGARCHState
|
|
+ * > In target/loongarch/internal.h:loongarch_def_t
|
|
+ *
|
|
+ * helper_function to rd/wr:
|
|
+ * > declare in target/loongarch/helper.h
|
|
+ * > realize in target/loongarch/op_helper.c
|
|
+ *
|
|
+ * during translate:
|
|
+ * > gen_csr_rdl()
|
|
+ * > gen_csr_wrl()
|
|
+ * > gen_csr_rdq()
|
|
+ * > gen_csr_wrq()
|
|
+ */
|
|
+#define CPU_LOONGARCH_CSR \
|
|
+ uint64_t CSR_CRMD; \
|
|
+ uint64_t CSR_PRMD; \
|
|
+ uint64_t CSR_EUEN; \
|
|
+ uint64_t CSR_MISC; \
|
|
+ uint64_t CSR_ECFG; \
|
|
+ uint64_t CSR_ESTAT; \
|
|
+ uint64_t CSR_ERA; \
|
|
+ uint64_t CSR_BADV; \
|
|
+ uint64_t CSR_BADI; \
|
|
+ uint64_t CSR_EEPN; \
|
|
+ uint64_t CSR_TLBIDX; \
|
|
+ uint64_t CSR_TLBEHI; \
|
|
+ uint64_t CSR_TLBELO0; \
|
|
+ uint64_t CSR_TLBELO1; \
|
|
+ uint64_t CSR_TLBWIRED; \
|
|
+ uint64_t CSR_GTLBC; \
|
|
+ uint64_t CSR_TRGP; \
|
|
+ uint64_t CSR_ASID; \
|
|
+ uint64_t CSR_PGDL; \
|
|
+ uint64_t CSR_PGDH; \
|
|
+ uint64_t CSR_PGD; \
|
|
+ uint64_t CSR_PWCTL0; \
|
|
+ uint64_t CSR_PWCTL1; \
|
|
+ uint64_t CSR_STLBPGSIZE; \
|
|
+ uint64_t CSR_RVACFG; \
|
|
+ uint64_t CSR_CPUID; \
|
|
+ uint64_t CSR_PRCFG1; \
|
|
+ uint64_t CSR_PRCFG2; \
|
|
+ uint64_t CSR_PRCFG3; \
|
|
+ uint64_t CSR_KS0; \
|
|
+ uint64_t CSR_KS1; \
|
|
+ uint64_t CSR_KS2; \
|
|
+ uint64_t CSR_KS3; \
|
|
+ uint64_t CSR_KS4; \
|
|
+ uint64_t CSR_KS5; \
|
|
+ uint64_t CSR_KS6; \
|
|
+ uint64_t CSR_KS7; \
|
|
+ uint64_t CSR_KS8; \
|
|
+ uint64_t CSR_TMID; \
|
|
+ uint64_t CSR_TCFG; \
|
|
+ uint64_t CSR_TVAL; \
|
|
+ uint64_t CSR_CNTC; \
|
|
+ uint64_t CSR_TINTCLR; \
|
|
+ uint64_t CSR_GSTAT; \
|
|
+ uint64_t CSR_GCFG; \
|
|
+ uint64_t CSR_GINTC; \
|
|
+ uint64_t CSR_GCNTC; \
|
|
+ uint64_t CSR_LLBCTL; \
|
|
+ uint64_t CSR_IMPCTL1; \
|
|
+ uint64_t CSR_IMPCTL2; \
|
|
+ uint64_t CSR_GNMI; \
|
|
+ uint64_t CSR_TLBRENT; \
|
|
+ uint64_t CSR_TLBRBADV; \
|
|
+ uint64_t CSR_TLBRERA; \
|
|
+ uint64_t CSR_TLBRSAVE; \
|
|
+ uint64_t CSR_TLBRELO0; \
|
|
+ uint64_t CSR_TLBRELO1; \
|
|
+ uint64_t CSR_TLBREHI; \
|
|
+ uint64_t CSR_TLBRPRMD; \
|
|
+ uint64_t CSR_ERRCTL; \
|
|
+ uint64_t CSR_ERRINFO; \
|
|
+ uint64_t CSR_ERRINFO1; \
|
|
+ uint64_t CSR_ERRENT; \
|
|
+ uint64_t CSR_ERRERA; \
|
|
+ uint64_t CSR_ERRSAVE; \
|
|
+ uint64_t CSR_CTAG; \
|
|
+ uint64_t CSR_DMWIN0; \
|
|
+ uint64_t CSR_DMWIN1; \
|
|
+ uint64_t CSR_DMWIN2; \
|
|
+ uint64_t CSR_DMWIN3; \
|
|
+ uint64_t CSR_PERFCTRL0; \
|
|
+ uint64_t CSR_PERFCNTR0; \
|
|
+ uint64_t CSR_PERFCTRL1; \
|
|
+ uint64_t CSR_PERFCNTR1; \
|
|
+ uint64_t CSR_PERFCTRL2; \
|
|
+ uint64_t CSR_PERFCNTR2; \
|
|
+ uint64_t CSR_PERFCTRL3; \
|
|
+ uint64_t CSR_PERFCNTR3; \
|
|
+ uint64_t CSR_MWPC; \
|
|
+ uint64_t CSR_MWPS; \
|
|
+ uint64_t CSR_DB0ADDR; \
|
|
+ uint64_t CSR_DB0MASK; \
|
|
+ uint64_t CSR_DB0CTL; \
|
|
+ uint64_t CSR_DB0ASID; \
|
|
+ uint64_t CSR_DB1ADDR; \
|
|
+ uint64_t CSR_DB1MASK; \
|
|
+ uint64_t CSR_DB1CTL; \
|
|
+ uint64_t CSR_DB1ASID; \
|
|
+ uint64_t CSR_DB2ADDR; \
|
|
+ uint64_t CSR_DB2MASK; \
|
|
+ uint64_t CSR_DB2CTL; \
|
|
+ uint64_t CSR_DB2ASID; \
|
|
+ uint64_t CSR_DB3ADDR; \
|
|
+ uint64_t CSR_DB3MASK; \
|
|
+ uint64_t CSR_DB3CTL; \
|
|
+ uint64_t CSR_DB3ASID; \
|
|
+ uint64_t CSR_FWPC; \
|
|
+ uint64_t CSR_FWPS; \
|
|
+ uint64_t CSR_IB0ADDR; \
|
|
+ uint64_t CSR_IB0MASK; \
|
|
+ uint64_t CSR_IB0CTL; \
|
|
+ uint64_t CSR_IB0ASID; \
|
|
+ uint64_t CSR_IB1ADDR; \
|
|
+ uint64_t CSR_IB1MASK; \
|
|
+ uint64_t CSR_IB1CTL; \
|
|
+ uint64_t CSR_IB1ASID; \
|
|
+ uint64_t CSR_IB2ADDR; \
|
|
+ uint64_t CSR_IB2MASK; \
|
|
+ uint64_t CSR_IB2CTL; \
|
|
+ uint64_t CSR_IB2ASID; \
|
|
+ uint64_t CSR_IB3ADDR; \
|
|
+ uint64_t CSR_IB3MASK; \
|
|
+ uint64_t CSR_IB3CTL; \
|
|
+ uint64_t CSR_IB3ASID; \
|
|
+ uint64_t CSR_IB4ADDR; \
|
|
+ uint64_t CSR_IB4MASK; \
|
|
+ uint64_t CSR_IB4CTL; \
|
|
+ uint64_t CSR_IB4ASID; \
|
|
+ uint64_t CSR_IB5ADDR; \
|
|
+ uint64_t CSR_IB5MASK; \
|
|
+ uint64_t CSR_IB5CTL; \
|
|
+ uint64_t CSR_IB5ASID; \
|
|
+ uint64_t CSR_IB6ADDR; \
|
|
+ uint64_t CSR_IB6MASK; \
|
|
+ uint64_t CSR_IB6CTL; \
|
|
+ uint64_t CSR_IB6ASID; \
|
|
+ uint64_t CSR_IB7ADDR; \
|
|
+ uint64_t CSR_IB7MASK; \
|
|
+ uint64_t CSR_IB7CTL; \
|
|
+ uint64_t CSR_IB7ASID; \
|
|
+ uint64_t CSR_DEBUG; \
|
|
+ uint64_t CSR_DERA; \
|
|
+ uint64_t CSR_DESAVE;
|
|
+
|
|
+#define LOONGARCH_CSR_32(_R, _S) \
|
|
+ (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
|
|
+
|
|
+#define LOONGARCH_CSR_64(_R, _S) \
|
|
+ (KVM_REG_LOONGARCH_CSR | KVM_REG_SIZE_U64 | (8 * (_R) + (_S)))
|
|
+
|
|
+#define KVM_IOC_CSRID(id) LOONGARCH_CSR_64(id, 0)
|
|
+
|
|
+#endif
|
|
diff --git a/target/loongarch64/cpu-param.h b/target/loongarch64/cpu-param.h
|
|
new file mode 100644
|
|
index 0000000000..b5acb6b91e
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/cpu-param.h
|
|
@@ -0,0 +1,46 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef CPU_PARAM_H
|
|
+#define CPU_PARAM_H
|
|
+
|
|
+/* If we want to use host float regs... */
|
|
+/* #define USE_HOST_FLOAT_REGS */
|
|
+
|
|
+/* Real pages are variable size... */
|
|
+#define TARGET_PAGE_BITS 14
|
|
+
|
|
+#define LOONGARCH_TLB_MAX 2112
|
|
+
|
|
+#define TARGET_LONG_BITS 64
|
|
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
|
|
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
|
|
+
|
|
+/*
|
|
+ * bit definitions for insn_flags (ISAs/ASEs flags)
|
|
+ * ------------------------------------------------
|
|
+ */
|
|
+#define ISA_LARCH32 0x00000001ULL
|
|
+#define ISA_LARCH64 0x00000002ULL
|
|
+#define INSN_LOONGARCH 0x00010000ULL
|
|
+
|
|
+#define CPU_LARCH32 (ISA_LARCH32)
|
|
+#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64)
|
|
+
|
|
+#define NB_MMU_MODES 4
|
|
+
|
|
+#endif /* QEMU_LOONGARCH_DEFS_H */
|
|
diff --git a/target/loongarch64/cpu-qom.h b/target/loongarch64/cpu-qom.h
|
|
new file mode 100644
|
|
index 0000000000..43541c34e5
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/cpu-qom.h
|
|
@@ -0,0 +1,54 @@
|
|
+/*
|
|
+ * QEMU LOONGARCH CPU
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef QEMU_LOONGARCH_CPU_QOM_H
|
|
+#define QEMU_LOONGARCH_CPU_QOM_H
|
|
+
|
|
+#include "hw/core/cpu.h"
|
|
+
|
|
+#define TYPE_LOONGARCH_CPU "loongarch-cpu"
|
|
+
|
|
+#define LOONGARCH_CPU_CLASS(klass) \
|
|
+ OBJECT_CLASS_CHECK(LOONGARCHCPUClass, (klass), TYPE_LOONGARCH_CPU)
|
|
+#define LOONGARCH_CPU(obj) \
|
|
+ OBJECT_CHECK(LOONGARCHCPU, (obj), TYPE_LOONGARCH_CPU)
|
|
+#define LOONGARCH_CPU_GET_CLASS(obj) \
|
|
+ OBJECT_GET_CLASS(LOONGARCHCPUClass, (obj), TYPE_LOONGARCH_CPU)
|
|
+
|
|
+/**
|
|
+ * LOONGARCHCPUClass:
|
|
+ * @parent_realize: The parent class' realize handler.
|
|
+ * @parent_reset: The parent class' reset handler.
|
|
+ *
|
|
+ * A LOONGARCH CPU model.
|
|
+ */
|
|
+typedef struct LOONGARCHCPUClass {
|
|
+ /*< private >*/
|
|
+ CPUClass parent_class;
|
|
+ /*< public >*/
|
|
+
|
|
+ DeviceRealize parent_realize;
|
|
+ DeviceUnrealize parent_unrealize;
|
|
+ DeviceReset parent_reset;
|
|
+ const struct loongarch_def_t *cpu_def;
|
|
+} LOONGARCHCPUClass;
|
|
+
|
|
+typedef struct LOONGARCHCPU LOONGARCHCPU;
|
|
+
|
|
+#endif
|
|
diff --git a/target/loongarch64/cpu.c b/target/loongarch64/cpu.c
|
|
new file mode 100644
|
|
index 0000000000..ce04d8064f
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/cpu.c
|
|
@@ -0,0 +1,575 @@
|
|
+/*
|
|
+ * QEMU LOONGARCH CPU
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qapi/error.h"
|
|
+#include "qapi/visitor.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "kvm_larch.h"
|
|
+#include "qemu-common.h"
|
|
+#include "hw/qdev-properties.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "sysemu/arch_init.h"
|
|
+#include "cpu-csr.h"
|
|
+#include "qemu/qemu-print.h"
|
|
+#include "qapi/qapi-commands-machine-target.h"
|
|
+#ifdef CONFIG_TCG
|
|
+#include "hw/core/tcg-cpu-ops.h"
|
|
+#endif /* CONFIG_TCG */
|
|
+
|
|
+#define LOONGARCH_CONFIG1 \
|
|
+ ((0x8 << CSR_CONF1_KSNUM_SHIFT) | (0x2f << CSR_CONF1_TMRBITS_SHIFT) | \
|
|
+ (0x7 << CSR_CONF1_VSMAX_SHIFT))
|
|
+
|
|
+#define LOONGARCH_CONFIG3 \
|
|
+ ((0x2 << CSR_CONF3_TLBORG_SHIFT) | (0x3f << CSR_CONF3_MTLBSIZE_SHIFT) | \
|
|
+ (0x7 << CSR_CONF3_STLBWAYS_SHIFT) | (0x8 << CSR_CONF3_STLBIDX_SHIFT))
|
|
+
|
|
+/* LOONGARCH CPU definitions */
|
|
+const loongarch_def_t loongarch_defs[] = {
|
|
+ {
|
|
+ .name = "Loongson-3A5000",
|
|
+
|
|
+ /* for LoongISA CSR */
|
|
+ .CSR_PRCFG1 = LOONGARCH_CONFIG1,
|
|
+ .CSR_PRCFG2 = 0x3ffff000,
|
|
+ .CSR_PRCFG3 = LOONGARCH_CONFIG3,
|
|
+ .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) |
|
|
+ (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) |
|
|
+ (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT),
|
|
+ .CSR_ECFG = 0x7 << 16,
|
|
+ .CSR_STLBPGSIZE = 0xe,
|
|
+ .CSR_RVACFG = 0x0,
|
|
+ .CSR_ASID = 0xa0000,
|
|
+ .FCSR0 = 0x0,
|
|
+ .FCSR0_rw_bitmask = 0x1f1f03df,
|
|
+ .PABITS = 48,
|
|
+ .insn_flags = CPU_LARCH64 | INSN_LOONGARCH,
|
|
+ .mmu_type = MMU_TYPE_LS3A5K,
|
|
+ },
|
|
+ {
|
|
+ .name = "host",
|
|
+
|
|
+ /* for LoongISA CSR */
|
|
+ .CSR_PRCFG1 = LOONGARCH_CONFIG1,
|
|
+ .CSR_PRCFG2 = 0x3ffff000,
|
|
+ .CSR_PRCFG3 = LOONGARCH_CONFIG3,
|
|
+ .CSR_CRMD = (0 << CSR_CRMD_PLV_SHIFT) | (0 << CSR_CRMD_IE_SHIFT) |
|
|
+ (1 << CSR_CRMD_DA_SHIFT) | (0 << CSR_CRMD_PG_SHIFT) |
|
|
+ (1 << CSR_CRMD_DACF_SHIFT) | (1 << CSR_CRMD_DACM_SHIFT),
|
|
+ .CSR_ECFG = 0x7 << 16,
|
|
+ .CSR_STLBPGSIZE = 0xe,
|
|
+ .CSR_RVACFG = 0x0,
|
|
+ .FCSR0 = 0x0,
|
|
+ .FCSR0_rw_bitmask = 0x1f1f03df,
|
|
+ .PABITS = 48,
|
|
+ .insn_flags = CPU_LARCH64 | INSN_LOONGARCH,
|
|
+ .mmu_type = MMU_TYPE_LS3A5K,
|
|
+ },
|
|
+};
|
|
+const int loongarch_defs_number = ARRAY_SIZE(loongarch_defs);
|
|
+
|
|
+void loongarch_cpu_list(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) {
|
|
+ qemu_printf("LOONGARCH '%s'\n", loongarch_defs[i].name);
|
|
+ }
|
|
+}
|
|
+
|
|
+CpuDefinitionInfoList *qmp_query_cpu_definitions(Error **errp)
|
|
+{
|
|
+ CpuDefinitionInfoList *cpu_list = NULL;
|
|
+ const loongarch_def_t *def;
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < ARRAY_SIZE(loongarch_defs); i++) {
|
|
+ CpuDefinitionInfoList *entry;
|
|
+ CpuDefinitionInfo *info;
|
|
+
|
|
+ def = &loongarch_defs[i];
|
|
+ info = g_malloc0(sizeof(*info));
|
|
+ info->name = g_strdup(def->name);
|
|
+
|
|
+ entry = g_malloc0(sizeof(*entry));
|
|
+ entry->value = info;
|
|
+ entry->next = cpu_list;
|
|
+ cpu_list = entry;
|
|
+ }
|
|
+
|
|
+ return cpu_list;
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_set_pc(CPUState *cs, vaddr value)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ env->active_tc.PC = value & ~(target_ulong)1;
|
|
+}
|
|
+
|
|
+static bool loongarch_cpu_has_work(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ bool has_work = false;
|
|
+
|
|
+ /*
|
|
+ * It is implementation dependent if non-enabled
|
|
+ * interrupts wake-up the CPU, however most of the implementations only
|
|
+ * check for interrupts that can be taken.
|
|
+ */
|
|
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
+ cpu_loongarch_hw_interrupts_pending(env)) {
|
|
+ has_work = true;
|
|
+ }
|
|
+
|
|
+ return has_work;
|
|
+}
|
|
+
|
|
+const char *const regnames[] = {
|
|
+ "r0", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
|
|
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x0",
|
|
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8",
|
|
+};
|
|
+
|
|
+const char *const fregnames[] = {
|
|
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
|
|
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
|
|
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
|
|
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
|
|
+};
|
|
+
|
|
+static void fpu_dump_state(CPULOONGARCHState *env, FILE *f,
|
|
+ fprintf_function fpu_fprintf, int flags)
|
|
+{
|
|
+ int i;
|
|
+ int is_fpu64 = 1;
|
|
+
|
|
+#define printfpr(fp) \
|
|
+ do { \
|
|
+ if (is_fpu64) \
|
|
+ fpu_fprintf( \
|
|
+ f, "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu: %13g\n", \
|
|
+ (fp)->w[FP_ENDIAN_IDX], (fp)->d, (double)(fp)->fd, \
|
|
+ (double)(fp)->fs[FP_ENDIAN_IDX], \
|
|
+ (double)(fp)->fs[!FP_ENDIAN_IDX]); \
|
|
+ else { \
|
|
+ fpr_t tmp; \
|
|
+ tmp.w[FP_ENDIAN_IDX] = (fp)->w[FP_ENDIAN_IDX]; \
|
|
+ tmp.w[!FP_ENDIAN_IDX] = ((fp) + 1)->w[FP_ENDIAN_IDX]; \
|
|
+ fpu_fprintf(f, \
|
|
+ "w:%08x d:%016" PRIx64 " fd:%13g fs:%13g psu:%13g\n", \
|
|
+ tmp.w[FP_ENDIAN_IDX], tmp.d, (double)tmp.fd, \
|
|
+ (double)tmp.fs[FP_ENDIAN_IDX], \
|
|
+ (double)tmp.fs[!FP_ENDIAN_IDX]); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+ fpu_fprintf(f, "FCSR0 0x%08x SR.FR %d fp_status 0x%02x\n",
|
|
+ env->active_fpu.fcsr0, is_fpu64,
|
|
+ get_float_exception_flags(&env->active_fpu.fp_status));
|
|
+ for (i = 0; i < 32; (is_fpu64) ? i++ : (i += 2)) {
|
|
+ fpu_fprintf(f, "%3s: ", fregnames[i]);
|
|
+ printfpr(&env->active_fpu.fpr[i]);
|
|
+ }
|
|
+
|
|
+#undef printfpr
|
|
+}
|
|
+
|
|
+void loongarch_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int i;
|
|
+
|
|
+ qemu_fprintf(f, "pc:\t %lx\n", env->active_tc.PC);
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ if ((i & 3) == 0) {
|
|
+ qemu_fprintf(f, "GPR%02d:", i);
|
|
+ }
|
|
+ qemu_fprintf(f, " %s " TARGET_FMT_lx, regnames[i],
|
|
+ env->active_tc.gpr[i]);
|
|
+ if ((i & 3) == 3) {
|
|
+ qemu_fprintf(f, "\n");
|
|
+ }
|
|
+ }
|
|
+ qemu_fprintf(f, "EUEN 0x%lx\n", env->CSR_EUEN);
|
|
+ qemu_fprintf(f, "ESTAT 0x%lx\n", env->CSR_ESTAT);
|
|
+ qemu_fprintf(f, "ERA 0x%lx\n", env->CSR_ERA);
|
|
+ qemu_fprintf(f, "CRMD 0x%lx\n", env->CSR_CRMD);
|
|
+ qemu_fprintf(f, "PRMD 0x%lx\n", env->CSR_PRMD);
|
|
+ qemu_fprintf(f, "BadVAddr 0x%lx\n", env->CSR_BADV);
|
|
+ qemu_fprintf(f, "TLB refill ERA 0x%lx\n", env->CSR_TLBRERA);
|
|
+ qemu_fprintf(f, "TLB refill BadV 0x%lx\n", env->CSR_TLBRBADV);
|
|
+ qemu_fprintf(f, "EEPN 0x%lx\n", env->CSR_EEPN);
|
|
+ qemu_fprintf(f, "BadInstr 0x%lx\n", env->CSR_BADI);
|
|
+ qemu_fprintf(f, "PRCFG1 0x%lx\nPRCFG2 0x%lx\nPRCFG3 0x%lx\n",
|
|
+ env->CSR_PRCFG1, env->CSR_PRCFG3, env->CSR_PRCFG3);
|
|
+ if ((flags & CPU_DUMP_FPU) && (env->hflags & LARCH_HFLAG_FPU)) {
|
|
+ fpu_dump_state(env, f, qemu_fprintf, flags);
|
|
+ }
|
|
+}
|
|
+
|
|
+void cpu_state_reset(CPULOONGARCHState *env)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
|
|
+ CPUState *cs = CPU(cpu);
|
|
+
|
|
+ /* Reset registers to their default values */
|
|
+ env->CSR_PRCFG1 = env->cpu_model->CSR_PRCFG1;
|
|
+ env->CSR_PRCFG2 = env->cpu_model->CSR_PRCFG2;
|
|
+ env->CSR_PRCFG3 = env->cpu_model->CSR_PRCFG3;
|
|
+ env->CSR_CRMD = env->cpu_model->CSR_CRMD;
|
|
+ env->CSR_ECFG = env->cpu_model->CSR_ECFG;
|
|
+ env->CSR_STLBPGSIZE = env->cpu_model->CSR_STLBPGSIZE;
|
|
+ env->CSR_RVACFG = env->cpu_model->CSR_RVACFG;
|
|
+ env->CSR_ASID = env->cpu_model->CSR_ASID;
|
|
+
|
|
+ env->current_tc = 0;
|
|
+ env->active_fpu.fcsr0_rw_bitmask = env->cpu_model->FCSR0_rw_bitmask;
|
|
+ env->active_fpu.fcsr0 = env->cpu_model->FCSR0;
|
|
+ env->insn_flags = env->cpu_model->insn_flags;
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+ env->CSR_ERA = env->active_tc.PC;
|
|
+ env->active_tc.PC = env->exception_base;
|
|
+#ifdef CONFIG_TCG
|
|
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
|
|
+#endif
|
|
+ env->CSR_TLBWIRED = 0;
|
|
+ env->CSR_TMID = cs->cpu_index;
|
|
+ env->CSR_CPUID = (cs->cpu_index & 0x1ff);
|
|
+ env->CSR_EEPN |= (uint64_t)0x80000000;
|
|
+ env->CSR_TLBRENT |= (uint64_t)0x80000000;
|
|
+#endif
|
|
+
|
|
+ /* Count register increments in debug mode, EJTAG version 1 */
|
|
+ env->CSR_DEBUG = (1 << CSR_DEBUG_DINT) | (0x1 << CSR_DEBUG_DMVER);
|
|
+
|
|
+ compute_hflags(env);
|
|
+ restore_fp_status(env);
|
|
+ cs->exception_index = EXCP_NONE;
|
|
+}
|
|
+
|
|
+/* CPUClass::reset() */
|
|
+static void loongarch_cpu_reset(DeviceState *dev)
|
|
+{
|
|
+ CPUState *s = CPU(dev);
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(s);
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(cpu);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ mcc->parent_reset(dev);
|
|
+
|
|
+ memset(env, 0, offsetof(CPULOONGARCHState, end_reset_fields));
|
|
+
|
|
+ cpu_state_reset(env);
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ if (kvm_enabled()) {
|
|
+ kvm_loongarch_reset_vcpu(cpu);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_disas_set_info(CPUState *s, disassemble_info *info)
|
|
+{
|
|
+ info->print_insn = print_insn_loongarch;
|
|
+}
|
|
+
|
|
+static void fpu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
|
|
+{
|
|
+ memcpy(&env->active_fpu, &env->fpus[0], sizeof(env->active_fpu));
|
|
+}
|
|
+
|
|
+void cpu_loongarch_realize_env(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->exception_base = 0x1C000000;
|
|
+
|
|
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
|
+ mmu_init(env, env->cpu_model);
|
|
+#endif
|
|
+ fpu_init(env, env->cpu_model);
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_realizefn(DeviceState *dev, Error **errp)
|
|
+{
|
|
+ CPUState *cs = CPU(dev);
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(dev);
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev);
|
|
+ Error *local_err = NULL;
|
|
+
|
|
+ cpu_exec_realizefn(cs, &local_err);
|
|
+ if (local_err != NULL) {
|
|
+ error_propagate(errp, local_err);
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_realize_env(&cpu->env);
|
|
+
|
|
+ loongarch_cpu_register_gdb_regs_for_features(cs);
|
|
+
|
|
+ cpu_reset(cs);
|
|
+ qemu_init_vcpu(cs);
|
|
+
|
|
+ mcc->parent_realize(dev, errp);
|
|
+ cpu->hotplugged = 1;
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_unrealizefn(DeviceState *dev)
|
|
+{
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(dev);
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ cpu_remove_sync(CPU(dev));
|
|
+#endif
|
|
+
|
|
+ mcc->parent_unrealize(dev);
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_initfn(Object *obj)
|
|
+{
|
|
+ CPUState *cs = CPU(obj);
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(obj);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_GET_CLASS(obj);
|
|
+ cpu_set_cpustate_pointers(cpu);
|
|
+ cs->env_ptr = env;
|
|
+ env->cpu_model = mcc->cpu_def;
|
|
+ cs->halted = 1;
|
|
+ cpu->dtb_compatible = "loongarch,Loongson-3A5000";
|
|
+}
|
|
+
|
|
+static char *loongarch_cpu_type_name(const char *cpu_model)
|
|
+{
|
|
+ return g_strdup_printf(LOONGARCH_CPU_TYPE_NAME("%s"), cpu_model);
|
|
+}
|
|
+
|
|
+static ObjectClass *loongarch_cpu_class_by_name(const char *cpu_model)
|
|
+{
|
|
+ ObjectClass *oc;
|
|
+ char *typename;
|
|
+
|
|
+ typename = loongarch_cpu_type_name(cpu_model);
|
|
+ oc = object_class_by_name(typename);
|
|
+ g_free(typename);
|
|
+ return oc;
|
|
+}
|
|
+
|
|
+static int64_t loongarch_cpu_get_arch_id(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+
|
|
+ return cpu->id;
|
|
+}
|
|
+
|
|
+static Property loongarch_cpu_properties[] = {
|
|
+ DEFINE_PROP_INT32("core-id", LOONGARCHCPU, core_id, -1),
|
|
+ DEFINE_PROP_INT32("id", LOONGARCHCPU, id, UNASSIGNED_CPU_ID),
|
|
+ DEFINE_PROP_INT32("node-id", LOONGARCHCPU, node_id,
|
|
+ CPU_UNSET_NUMA_NODE_ID),
|
|
+
|
|
+ DEFINE_PROP_END_OF_LIST()
|
|
+};
|
|
+
|
|
+#ifdef CONFIG_TCG
|
|
+static void loongarch_cpu_synchronize_from_tb(CPUState *cs,
|
|
+ const TranslationBlock *tb)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ env->active_tc.PC = tb->pc;
|
|
+ env->hflags &= ~LARCH_HFLAG_BMASK;
|
|
+ env->hflags |= tb->flags & LARCH_HFLAG_BMASK;
|
|
+}
|
|
+
|
|
+static const struct TCGCPUOps loongarch_tcg_ops = {
|
|
+ .initialize = loongarch_tcg_init,
|
|
+ .synchronize_from_tb = loongarch_cpu_synchronize_from_tb,
|
|
+
|
|
+ .tlb_fill = loongarch_cpu_tlb_fill,
|
|
+ .cpu_exec_interrupt = loongarch_cpu_exec_interrupt,
|
|
+ .do_interrupt = loongarch_cpu_do_interrupt,
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ .do_unaligned_access = loongarch_cpu_do_unaligned_access,
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+};
|
|
+#endif /* CONFIG_TCG */
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical,
|
|
+ int *prot, target_ulong real_address, int rw,
|
|
+ int access_type, int mmu_idx)
|
|
+{
|
|
+ int user_mode = mmu_idx == LARCH_HFLAG_UM;
|
|
+ int kernel_mode = !user_mode;
|
|
+ unsigned plv, base_c, base_v, tmp;
|
|
+
|
|
+ /* effective address (modified for KVM T&E kernel segments) */
|
|
+ target_ulong address = real_address;
|
|
+
|
|
+ /* Check PG */
|
|
+ if (!(env->CSR_CRMD & CSR_CRMD_PG)) {
|
|
+ /* DA mode */
|
|
+ *physical = address & 0xffffffffffffUL;
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+
|
|
+ plv = kernel_mode | (user_mode << 3);
|
|
+ base_v = address >> CSR_DMW_BASE_SH;
|
|
+ /* Check direct map window 0 */
|
|
+ base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH;
|
|
+ if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) {
|
|
+ *physical = dmwin_va2pa(address);
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+ /* Check direct map window 1 */
|
|
+ base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH;
|
|
+ if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) {
|
|
+ *physical = dmwin_va2pa(address);
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+ /* Check valid extension */
|
|
+ tmp = address >> 47;
|
|
+ if (!(tmp == 0 || tmp == 0x1ffff)) {
|
|
+ return TLBRET_BADADDR;
|
|
+ }
|
|
+ /* mapped address */
|
|
+ return env->tlb->map_address(env, physical, prot, real_address, rw,
|
|
+ access_type);
|
|
+}
|
|
+
|
|
+hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ hwaddr phys_addr;
|
|
+ int prot;
|
|
+
|
|
+ if (get_physical_address(env, &phys_addr, &prot, addr, 0, ACCESS_INT,
|
|
+ cpu_mmu_index(env, false)) != 0) {
|
|
+ return -1;
|
|
+ }
|
|
+ return phys_addr;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+#include "hw/core/sysemu-cpu-ops.h"
|
|
+
|
|
+static const struct SysemuCPUOps loongarch_sysemu_ops = {
|
|
+ .write_elf64_note = loongarch_cpu_write_elf64_note,
|
|
+ .get_phys_page_debug = loongarch_cpu_get_phys_page_debug,
|
|
+ .legacy_vmsd = &vmstate_loongarch_cpu,
|
|
+};
|
|
+#endif
|
|
+
|
|
+static gchar *loongarch_gdb_arch_name(CPUState *cs)
|
|
+{
|
|
+ return g_strdup("loongarch64");
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_class_init(ObjectClass *c, void *data)
|
|
+{
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(c);
|
|
+ CPUClass *cc = CPU_CLASS(c);
|
|
+ DeviceClass *dc = DEVICE_CLASS(c);
|
|
+
|
|
+ device_class_set_props(dc, loongarch_cpu_properties);
|
|
+ device_class_set_parent_realize(dc, loongarch_cpu_realizefn,
|
|
+ &mcc->parent_realize);
|
|
+
|
|
+ device_class_set_parent_unrealize(dc, loongarch_cpu_unrealizefn,
|
|
+ &mcc->parent_unrealize);
|
|
+
|
|
+ device_class_set_parent_reset(dc, loongarch_cpu_reset, &mcc->parent_reset);
|
|
+ cc->get_arch_id = loongarch_cpu_get_arch_id;
|
|
+
|
|
+ cc->class_by_name = loongarch_cpu_class_by_name;
|
|
+ cc->has_work = loongarch_cpu_has_work;
|
|
+ cc->dump_state = loongarch_cpu_dump_state;
|
|
+ cc->set_pc = loongarch_cpu_set_pc;
|
|
+ cc->gdb_read_register = loongarch_cpu_gdb_read_register;
|
|
+ cc->gdb_write_register = loongarch_cpu_gdb_write_register;
|
|
+ cc->disas_set_info = loongarch_cpu_disas_set_info;
|
|
+ cc->gdb_arch_name = loongarch_gdb_arch_name;
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ cc->sysemu_ops = &loongarch_sysemu_ops;
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+ cc->gdb_num_core_regs = 35;
|
|
+ cc->gdb_core_xml_file = "loongarch-base64.xml";
|
|
+ cc->gdb_stop_before_watchpoint = true;
|
|
+
|
|
+ dc->user_creatable = true;
|
|
+#ifdef CONFIG_TCG
|
|
+ cc->tcg_ops = &loongarch_tcg_ops;
|
|
+#endif /* CONFIG_TCG */
|
|
+}
|
|
+
|
|
+static const TypeInfo loongarch_cpu_type_info = {
|
|
+ .name = TYPE_LOONGARCH_CPU,
|
|
+ .parent = TYPE_CPU,
|
|
+ .instance_size = sizeof(LOONGARCHCPU),
|
|
+ .instance_init = loongarch_cpu_initfn,
|
|
+ .abstract = true,
|
|
+ .class_size = sizeof(LOONGARCHCPUClass),
|
|
+ .class_init = loongarch_cpu_class_init,
|
|
+};
|
|
+
|
|
+static void loongarch_cpu_cpudef_class_init(ObjectClass *oc, void *data)
|
|
+{
|
|
+ LOONGARCHCPUClass *mcc = LOONGARCH_CPU_CLASS(oc);
|
|
+ mcc->cpu_def = data;
|
|
+}
|
|
+
|
|
+static void loongarch_register_cpudef_type(const struct loongarch_def_t *def)
|
|
+{
|
|
+ char *typename = loongarch_cpu_type_name(def->name);
|
|
+ TypeInfo ti = {
|
|
+ .name = typename,
|
|
+ .parent = TYPE_LOONGARCH_CPU,
|
|
+ .class_init = loongarch_cpu_cpudef_class_init,
|
|
+ .class_data = (void *)def,
|
|
+ };
|
|
+
|
|
+ type_register(&ti);
|
|
+ g_free(typename);
|
|
+}
|
|
+
|
|
+static void loongarch_cpu_register_types(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ type_register_static(&loongarch_cpu_type_info);
|
|
+ for (i = 0; i < loongarch_defs_number; i++) {
|
|
+ loongarch_register_cpudef_type(&loongarch_defs[i]);
|
|
+ }
|
|
+}
|
|
+
|
|
+type_init(loongarch_cpu_register_types)
|
|
diff --git a/target/loongarch64/cpu.h b/target/loongarch64/cpu.h
|
|
new file mode 100644
|
|
index 0000000000..bf5b36d404
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/cpu.h
|
|
@@ -0,0 +1,359 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef LOONGARCH_CPU_H
|
|
+#define LOONGARCH_CPU_H
|
|
+
|
|
+#define CPUArchState struct CPULOONGARCHState
|
|
+
|
|
+#include "qemu-common.h"
|
|
+#include "cpu-qom.h"
|
|
+#include "larch-defs.h"
|
|
+#include "exec/cpu-defs.h"
|
|
+#include "fpu/softfloat.h"
|
|
+#include "sysemu/sysemu.h"
|
|
+#include "cpu-csr.h"
|
|
+
|
|
+#define TCG_GUEST_DEFAULT_MO (0)
|
|
+
|
|
+struct CPULOONGARCHState;
|
|
+typedef LOONGARCHCPU ArchCPU;
|
|
+typedef struct CPULOONGARCHTLBContext CPULOONGARCHTLBContext;
|
|
+
|
|
+#define LASX_REG_WIDTH (256)
|
|
+typedef union lasx_reg_t lasx_reg_t;
|
|
+union lasx_reg_t
|
|
+{
|
|
+ int64_t val64[LASX_REG_WIDTH / 64];
|
|
+};
|
|
+
|
|
+typedef union fpr_t fpr_t;
|
|
+union fpr_t
|
|
+{
|
|
+ float64 fd; /* ieee double precision */
|
|
+ float32 fs[2]; /* ieee single precision */
|
|
+ uint64_t d; /* binary double fixed-point */
|
|
+ uint32_t w[2]; /* binary single fixed-point */
|
|
+ /* FPU/LASX register mapping is not tested on big-endian hosts. */
|
|
+ lasx_reg_t lasx; /* vector data */
|
|
+};
|
|
+/*
|
|
+ * define FP_ENDIAN_IDX to access the same location
|
|
+ * in the fpr_t union regardless of the host endianness
|
|
+ */
|
|
+#if defined(HOST_WORDS_BIGENDIAN)
|
|
+#define FP_ENDIAN_IDX 1
|
|
+#else
|
|
+#define FP_ENDIAN_IDX 0
|
|
+#endif
|
|
+
|
|
+typedef struct CPULOONGARCHFPUContext {
|
|
+ /* Floating point registers */
|
|
+ fpr_t fpr[32];
|
|
+ float_status fp_status;
|
|
+
|
|
+ bool cf[8];
|
|
+ /*
|
|
+ * fcsr0
|
|
+ * 31:29 |28:24 |23:21 |20:16 |15:10 |9:8 |7 |6 |5 |4:0
|
|
+ * Cause Flags RM DAE TM Enables
|
|
+ */
|
|
+ uint32_t fcsr0;
|
|
+ uint32_t fcsr0_rw_bitmask;
|
|
+ uint32_t vcsr16;
|
|
+} CPULOONGARCHFPUContext;
|
|
+
|
|
+/* fp control and status register definition */
|
|
+#define FCSR0_M1 0xdf /* DAE, TM and Enables */
|
|
+#define FCSR0_M2 0x1f1f0000 /* Cause and Flags */
|
|
+#define FCSR0_M3 0x300 /* Round Mode */
|
|
+#define FCSR0_RM 8 /* Round Mode bit num on fcsr0 */
|
|
+#define GET_FP_CAUSE(reg) (((reg) >> 24) & 0x1f)
|
|
+#define GET_FP_ENABLE(reg) (((reg) >> 0) & 0x1f)
|
|
+#define GET_FP_FLAGS(reg) (((reg) >> 16) & 0x1f)
|
|
+#define SET_FP_CAUSE(reg, v) \
|
|
+ do { \
|
|
+ (reg) = ((reg) & ~(0x1f << 24)) | ((v & 0x1f) << 24); \
|
|
+ } while (0)
|
|
+#define SET_FP_ENABLE(reg, v) \
|
|
+ do { \
|
|
+ (reg) = ((reg) & ~(0x1f << 0)) | ((v & 0x1f) << 0); \
|
|
+ } while (0)
|
|
+#define SET_FP_FLAGS(reg, v) \
|
|
+ do { \
|
|
+ (reg) = ((reg) & ~(0x1f << 16)) | ((v & 0x1f) << 16); \
|
|
+ } while (0)
|
|
+#define UPDATE_FP_FLAGS(reg, v) \
|
|
+ do { \
|
|
+ (reg) |= ((v & 0x1f) << 16); \
|
|
+ } while (0)
|
|
+#define FP_INEXACT 1
|
|
+#define FP_UNDERFLOW 2
|
|
+#define FP_OVERFLOW 4
|
|
+#define FP_DIV0 8
|
|
+#define FP_INVALID 16
|
|
+
|
|
+#define TARGET_INSN_START_EXTRA_WORDS 2
|
|
+
|
|
+typedef struct loongarch_def_t loongarch_def_t;
|
|
+
|
|
+#define LOONGARCH_FPU_MAX 1
|
|
+#define LOONGARCH_KSCRATCH_NUM 8
|
|
+
|
|
+typedef struct TCState TCState;
|
|
+struct TCState {
|
|
+ target_ulong gpr[32];
|
|
+ target_ulong PC;
|
|
+};
|
|
+
|
|
+#define N_IRQS 14
|
|
+#define IRQ_TIMER 11
|
|
+#define IRQ_IPI 12
|
|
+#define IRQ_UART 2
|
|
+
|
|
+typedef struct CPULOONGARCHState CPULOONGARCHState;
|
|
+struct CPULOONGARCHState {
|
|
+ TCState active_tc;
|
|
+ CPULOONGARCHFPUContext active_fpu;
|
|
+
|
|
+ uint32_t current_tc;
|
|
+ uint64_t scr[4];
|
|
+ uint32_t PABITS;
|
|
+
|
|
+ /* LoongISA CSR register */
|
|
+ CPU_LOONGARCH_CSR
|
|
+ uint64_t lladdr;
|
|
+ target_ulong llval;
|
|
+ uint64_t llval_wp;
|
|
+ uint32_t llnewval_wp;
|
|
+
|
|
+ CPULOONGARCHFPUContext fpus[LOONGARCH_FPU_MAX];
|
|
+ /* QEMU */
|
|
+ int error_code;
|
|
+#define EXCP_TLB_NOMATCH 0x1
|
|
+#define EXCP_INST_NOTAVAIL 0x2 /* No valid instruction word for BadInstr */
|
|
+ uint32_t hflags; /* CPU State */
|
|
+ /* TMASK defines different execution modes */
|
|
+#define LARCH_HFLAG_TMASK 0x5F5807FF
|
|
+ /*
|
|
+ * The KSU flags must be the lowest bits in hflags. The flag order
|
|
+ * must be the same as defined for CP0 Status. This allows to use
|
|
+ * the bits as the value of mmu_idx.
|
|
+ */
|
|
+#define LARCH_HFLAG_KSU 0x00003 /* kernel/user mode mask */
|
|
+#define LARCH_HFLAG_UM 0x00003 /* user mode flag */
|
|
+#define LARCH_HFLAG_KM 0x00000 /* kernel mode flag */
|
|
+#define LARCH_HFLAG_64 0x00008 /* 64-bit instructions enabled */
|
|
+#define LARCH_HFLAG_FPU 0x00020 /* FPU enabled */
|
|
+#define LARCH_HFLAG_AWRAP 0x00200 /* 32-bit compatibility address wrapping */
|
|
+ /*
|
|
+ * If translation is interrupted between the branch instruction and
|
|
+ * the delay slot, record what type of branch it is so that we can
|
|
+ * resume translation properly. It might be possible to reduce
|
|
+ * this from three bits to two.
|
|
+ */
|
|
+#define LARCH_HFLAG_BMASK 0x03800
|
|
+#define LARCH_HFLAG_B 0x00800 /* Unconditional branch */
|
|
+#define LARCH_HFLAG_BC 0x01000 /* Conditional branch */
|
|
+#define LARCH_HFLAG_BR 0x02000 /* branch to register (can't link TB) */
|
|
+#define LARCH_HFLAG_LSX 0x1000000
|
|
+#define LARCH_HFLAG_LASX 0x2000000
|
|
+#define LARCH_HFLAG_LBT 0x40000000
|
|
+ target_ulong btarget; /* Jump / branch target */
|
|
+ target_ulong bcond; /* Branch condition (if needed) */
|
|
+
|
|
+ uint64_t insn_flags; /* Supported instruction set */
|
|
+ int cpu_cfg[64];
|
|
+
|
|
+ /* Fields up to this point are cleared by a CPU reset */
|
|
+ struct {
|
|
+ } end_reset_fields;
|
|
+
|
|
+ /* Fields from here on are preserved across CPU reset. */
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+ CPULOONGARCHTLBContext *tlb;
|
|
+#endif
|
|
+
|
|
+ const loongarch_def_t *cpu_model;
|
|
+ void *irq[N_IRQS];
|
|
+ QEMUTimer *timer; /* Internal timer */
|
|
+ MemoryRegion *itc_tag; /* ITC Configuration Tags */
|
|
+ target_ulong exception_base; /* ExceptionBase input to the core */
|
|
+ struct {
|
|
+ uint64_t guest_addr;
|
|
+ } st;
|
|
+};
|
|
+
|
|
+/*
|
|
+ * CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish
|
|
+ * that ID hasn't been set yet
|
|
+ */
|
|
+#define UNASSIGNED_CPU_ID 0xFFFFFFFF
|
|
+
|
|
+/**
|
|
+ * LOONGARCHCPU:
|
|
+ * @env: #CPULOONGARCHState
|
|
+ *
|
|
+ * A LOONGARCH CPU.
|
|
+ */
|
|
+struct LOONGARCHCPU {
|
|
+ /*< private >*/
|
|
+ CPUState parent_obj;
|
|
+ /*< public >*/
|
|
+ CPUNegativeOffsetState neg;
|
|
+ CPULOONGARCHState env;
|
|
+ int32_t id;
|
|
+ int hotplugged;
|
|
+ uint8_t online_vcpus;
|
|
+ uint8_t is_migrate;
|
|
+ uint64_t counter_value;
|
|
+ uint32_t cpu_freq;
|
|
+ uint32_t count_ctl;
|
|
+ uint64_t pending_exceptions;
|
|
+ uint64_t pending_exceptions_clr;
|
|
+ uint64_t core_ext_ioisr[4];
|
|
+ VMChangeStateEntry *cpuStateEntry;
|
|
+ int32_t node_id; /* NUMA node this CPU belongs to */
|
|
+ int32_t core_id;
|
|
+ struct kvm_msrs *kvm_csr_buf;
|
|
+ /* 'compatible' string for this CPU for Linux device trees */
|
|
+ const char *dtb_compatible;
|
|
+};
|
|
+
|
|
+static inline LOONGARCHCPU *loongarch_env_get_cpu(CPULOONGARCHState *env)
|
|
+{
|
|
+ return container_of(env, LOONGARCHCPU, env);
|
|
+}
|
|
+
|
|
+#define ENV_GET_CPU(e) CPU(loongarch_env_get_cpu(e))
|
|
+
|
|
+#define ENV_OFFSET offsetof(LOONGARCHCPU, env)
|
|
+
|
|
+void loongarch_cpu_list(void);
|
|
+
|
|
+#define cpu_signal_handler cpu_loongarch_signal_handler
|
|
+#define cpu_list loongarch_cpu_list
|
|
+
|
|
+/*
|
|
+ * MMU modes definitions. We carefully match the indices with our
|
|
+ * hflags layout.
|
|
+ */
|
|
+#define MMU_MODE0_SUFFIX _kernel
|
|
+#define MMU_MODE1_SUFFIX _super
|
|
+#define MMU_MODE2_SUFFIX _user
|
|
+#define MMU_MODE3_SUFFIX _error
|
|
+#define MMU_USER_IDX 3
|
|
+
|
|
+static inline int hflags_mmu_index(uint32_t hflags)
|
|
+{
|
|
+ return hflags & LARCH_HFLAG_KSU;
|
|
+}
|
|
+
|
|
+static inline int cpu_mmu_index(CPULOONGARCHState *env, bool ifetch)
|
|
+{
|
|
+ return hflags_mmu_index(env->hflags);
|
|
+}
|
|
+
|
|
+#include "exec/cpu-all.h"
|
|
+
|
|
+/*
|
|
+ * Memory access type :
|
|
+ * may be needed for precise access rights control and precise exceptions.
|
|
+ */
|
|
+enum {
|
|
+ /* 1 bit to define user level / supervisor access */
|
|
+ ACCESS_USER = 0x00,
|
|
+ ACCESS_SUPER = 0x01,
|
|
+ /* 1 bit to indicate direction */
|
|
+ ACCESS_STORE = 0x02,
|
|
+ /* Type of instruction that generated the access */
|
|
+ ACCESS_CODE = 0x10, /* Code fetch access */
|
|
+ ACCESS_INT = 0x20, /* Integer load/store access */
|
|
+ ACCESS_FLOAT = 0x30, /* floating point load/store access */
|
|
+};
|
|
+
|
|
+/* Exceptions */
|
|
+enum {
|
|
+ EXCP_NONE = -1,
|
|
+ EXCP_RESET = 0,
|
|
+ EXCP_SRESET,
|
|
+ EXCP_DINT,
|
|
+ EXCP_NMI,
|
|
+ EXCP_EXT_INTERRUPT,
|
|
+ EXCP_AdEL,
|
|
+ EXCP_AdES,
|
|
+ EXCP_TLBF,
|
|
+ EXCP_IBE,
|
|
+ EXCP_SYSCALL,
|
|
+ EXCP_BREAK,
|
|
+ EXCP_FPDIS,
|
|
+ EXCP_LSXDIS,
|
|
+ EXCP_LASXDIS,
|
|
+ EXCP_RI,
|
|
+ EXCP_OVERFLOW,
|
|
+ EXCP_TRAP,
|
|
+ EXCP_FPE,
|
|
+ EXCP_LTLBL,
|
|
+ EXCP_TLBL,
|
|
+ EXCP_TLBS,
|
|
+ EXCP_DBE,
|
|
+ EXCP_TLBXI,
|
|
+ EXCP_TLBRI,
|
|
+ EXCP_TLBPE,
|
|
+ EXCP_BTDIS,
|
|
+
|
|
+ EXCP_LAST = EXCP_BTDIS,
|
|
+};
|
|
+
|
|
+/*
|
|
+ * This is an internally generated WAKE request line.
|
|
+ * It is driven by the CPU itself. Raised when the MT
|
|
+ * block wants to wake a VPE from an inactive state and
|
|
+ * cleared when VPE goes from active to inactive.
|
|
+ */
|
|
+#define CPU_INTERRUPT_WAKE CPU_INTERRUPT_TGT_INT_0
|
|
+
|
|
+int cpu_loongarch_signal_handler(int host_signum, void *pinfo, void *puc);
|
|
+
|
|
+#define LOONGARCH_CPU_TYPE_SUFFIX "-" TYPE_LOONGARCH_CPU
|
|
+#define LOONGARCH_CPU_TYPE_NAME(model) model LOONGARCH_CPU_TYPE_SUFFIX
|
|
+#define CPU_RESOLVING_TYPE TYPE_LOONGARCH_CPU
|
|
+
|
|
+/* helper.c */
|
|
+target_ulong exception_resume_pc(CPULOONGARCHState *env);
|
|
+
|
|
+/* gdbstub.c */
|
|
+void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs);
|
|
+void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def);
|
|
+
|
|
+static inline void cpu_get_tb_cpu_state(CPULOONGARCHState *env,
|
|
+ target_ulong *pc,
|
|
+ target_ulong *cs_base, uint32_t *flags)
|
|
+{
|
|
+ *pc = env->active_tc.PC;
|
|
+ *cs_base = 0;
|
|
+ *flags = env->hflags & (LARCH_HFLAG_TMASK | LARCH_HFLAG_BMASK);
|
|
+}
|
|
+
|
|
+static inline bool cpu_refill_state(CPULOONGARCHState *env)
|
|
+{
|
|
+ return env->CSR_TLBRERA & 0x1;
|
|
+}
|
|
+
|
|
+extern const char *const regnames[];
|
|
+extern const char *const fregnames[];
|
|
+#endif /* LOONGARCH_CPU_H */
|
|
diff --git a/target/loongarch64/csr_helper.c b/target/loongarch64/csr_helper.c
|
|
new file mode 100644
|
|
index 0000000000..093e7e54d8
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/csr_helper.c
|
|
@@ -0,0 +1,697 @@
|
|
+/*
|
|
+ * loongarch tlb emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu/main-loop.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "qemu/host-utils.h"
|
|
+#include "exec/helper-proto.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "exec/cpu_ldst.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "hw/irq.h"
|
|
+#include "cpu-csr.h"
|
|
+#include "instmap.h"
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+target_ulong helper_csr_rdq(CPULOONGARCHState *env, uint64_t csr)
|
|
+{
|
|
+ int64_t v;
|
|
+
|
|
+#define CASE_CSR_RDQ(csr) \
|
|
+ case LOONGARCH_CSR_##csr: { \
|
|
+ v = env->CSR_##csr; \
|
|
+ break; \
|
|
+ };
|
|
+
|
|
+ switch (csr) {
|
|
+ CASE_CSR_RDQ(CRMD)
|
|
+ CASE_CSR_RDQ(PRMD)
|
|
+ CASE_CSR_RDQ(EUEN)
|
|
+ CASE_CSR_RDQ(MISC)
|
|
+ CASE_CSR_RDQ(ECFG)
|
|
+ CASE_CSR_RDQ(ESTAT)
|
|
+ CASE_CSR_RDQ(ERA)
|
|
+ CASE_CSR_RDQ(BADV)
|
|
+ CASE_CSR_RDQ(BADI)
|
|
+ CASE_CSR_RDQ(EEPN)
|
|
+ CASE_CSR_RDQ(TLBIDX)
|
|
+ CASE_CSR_RDQ(TLBEHI)
|
|
+ CASE_CSR_RDQ(TLBELO0)
|
|
+ CASE_CSR_RDQ(TLBELO1)
|
|
+ CASE_CSR_RDQ(TLBWIRED)
|
|
+ CASE_CSR_RDQ(GTLBC)
|
|
+ CASE_CSR_RDQ(TRGP)
|
|
+ CASE_CSR_RDQ(ASID)
|
|
+ CASE_CSR_RDQ(PGDL)
|
|
+ CASE_CSR_RDQ(PGDH)
|
|
+ CASE_CSR_RDQ(PGD)
|
|
+ CASE_CSR_RDQ(PWCTL0)
|
|
+ CASE_CSR_RDQ(PWCTL1)
|
|
+ CASE_CSR_RDQ(STLBPGSIZE)
|
|
+ CASE_CSR_RDQ(RVACFG)
|
|
+ CASE_CSR_RDQ(CPUID)
|
|
+ CASE_CSR_RDQ(PRCFG1)
|
|
+ CASE_CSR_RDQ(PRCFG2)
|
|
+ CASE_CSR_RDQ(PRCFG3)
|
|
+ CASE_CSR_RDQ(KS0)
|
|
+ CASE_CSR_RDQ(KS1)
|
|
+ CASE_CSR_RDQ(KS2)
|
|
+ CASE_CSR_RDQ(KS3)
|
|
+ CASE_CSR_RDQ(KS4)
|
|
+ CASE_CSR_RDQ(KS5)
|
|
+ CASE_CSR_RDQ(KS6)
|
|
+ CASE_CSR_RDQ(KS7)
|
|
+ CASE_CSR_RDQ(KS8)
|
|
+ CASE_CSR_RDQ(TMID)
|
|
+ CASE_CSR_RDQ(TCFG)
|
|
+ case LOONGARCH_CSR_TVAL:
|
|
+ v = cpu_loongarch_get_stable_timer_ticks(env);
|
|
+ break;
|
|
+ CASE_CSR_RDQ(CNTC)
|
|
+ CASE_CSR_RDQ(TINTCLR)
|
|
+ CASE_CSR_RDQ(GSTAT)
|
|
+ CASE_CSR_RDQ(GCFG)
|
|
+ CASE_CSR_RDQ(GINTC)
|
|
+ CASE_CSR_RDQ(GCNTC)
|
|
+ CASE_CSR_RDQ(LLBCTL)
|
|
+ CASE_CSR_RDQ(IMPCTL1)
|
|
+ CASE_CSR_RDQ(IMPCTL2)
|
|
+ CASE_CSR_RDQ(GNMI)
|
|
+ CASE_CSR_RDQ(TLBRENT)
|
|
+ CASE_CSR_RDQ(TLBRBADV)
|
|
+ CASE_CSR_RDQ(TLBRERA)
|
|
+ CASE_CSR_RDQ(TLBRSAVE)
|
|
+ CASE_CSR_RDQ(TLBRELO0)
|
|
+ CASE_CSR_RDQ(TLBRELO1)
|
|
+ CASE_CSR_RDQ(TLBREHI)
|
|
+ CASE_CSR_RDQ(TLBRPRMD)
|
|
+ CASE_CSR_RDQ(ERRCTL)
|
|
+ CASE_CSR_RDQ(ERRINFO)
|
|
+ CASE_CSR_RDQ(ERRINFO1)
|
|
+ CASE_CSR_RDQ(ERRENT)
|
|
+ CASE_CSR_RDQ(ERRERA)
|
|
+ CASE_CSR_RDQ(ERRSAVE)
|
|
+ CASE_CSR_RDQ(CTAG)
|
|
+ CASE_CSR_RDQ(DMWIN0)
|
|
+ CASE_CSR_RDQ(DMWIN1)
|
|
+ CASE_CSR_RDQ(DMWIN2)
|
|
+ CASE_CSR_RDQ(DMWIN3)
|
|
+ CASE_CSR_RDQ(PERFCTRL0)
|
|
+ CASE_CSR_RDQ(PERFCNTR0)
|
|
+ CASE_CSR_RDQ(PERFCTRL1)
|
|
+ CASE_CSR_RDQ(PERFCNTR1)
|
|
+ CASE_CSR_RDQ(PERFCTRL2)
|
|
+ CASE_CSR_RDQ(PERFCNTR2)
|
|
+ CASE_CSR_RDQ(PERFCTRL3)
|
|
+ CASE_CSR_RDQ(PERFCNTR3)
|
|
+ /* debug */
|
|
+ CASE_CSR_RDQ(MWPC)
|
|
+ CASE_CSR_RDQ(MWPS)
|
|
+ CASE_CSR_RDQ(DB0ADDR)
|
|
+ CASE_CSR_RDQ(DB0MASK)
|
|
+ CASE_CSR_RDQ(DB0CTL)
|
|
+ CASE_CSR_RDQ(DB0ASID)
|
|
+ CASE_CSR_RDQ(DB1ADDR)
|
|
+ CASE_CSR_RDQ(DB1MASK)
|
|
+ CASE_CSR_RDQ(DB1CTL)
|
|
+ CASE_CSR_RDQ(DB1ASID)
|
|
+ CASE_CSR_RDQ(DB2ADDR)
|
|
+ CASE_CSR_RDQ(DB2MASK)
|
|
+ CASE_CSR_RDQ(DB2CTL)
|
|
+ CASE_CSR_RDQ(DB2ASID)
|
|
+ CASE_CSR_RDQ(DB3ADDR)
|
|
+ CASE_CSR_RDQ(DB3MASK)
|
|
+ CASE_CSR_RDQ(DB3CTL)
|
|
+ CASE_CSR_RDQ(DB3ASID)
|
|
+ CASE_CSR_RDQ(FWPC)
|
|
+ CASE_CSR_RDQ(FWPS)
|
|
+ CASE_CSR_RDQ(IB0ADDR)
|
|
+ CASE_CSR_RDQ(IB0MASK)
|
|
+ CASE_CSR_RDQ(IB0CTL)
|
|
+ CASE_CSR_RDQ(IB0ASID)
|
|
+ CASE_CSR_RDQ(IB1ADDR)
|
|
+ CASE_CSR_RDQ(IB1MASK)
|
|
+ CASE_CSR_RDQ(IB1CTL)
|
|
+ CASE_CSR_RDQ(IB1ASID)
|
|
+ CASE_CSR_RDQ(IB2ADDR)
|
|
+ CASE_CSR_RDQ(IB2MASK)
|
|
+ CASE_CSR_RDQ(IB2CTL)
|
|
+ CASE_CSR_RDQ(IB2ASID)
|
|
+ CASE_CSR_RDQ(IB3ADDR)
|
|
+ CASE_CSR_RDQ(IB3MASK)
|
|
+ CASE_CSR_RDQ(IB3CTL)
|
|
+ CASE_CSR_RDQ(IB3ASID)
|
|
+ CASE_CSR_RDQ(IB4ADDR)
|
|
+ CASE_CSR_RDQ(IB4MASK)
|
|
+ CASE_CSR_RDQ(IB4CTL)
|
|
+ CASE_CSR_RDQ(IB4ASID)
|
|
+ CASE_CSR_RDQ(IB5ADDR)
|
|
+ CASE_CSR_RDQ(IB5MASK)
|
|
+ CASE_CSR_RDQ(IB5CTL)
|
|
+ CASE_CSR_RDQ(IB5ASID)
|
|
+ CASE_CSR_RDQ(IB6ADDR)
|
|
+ CASE_CSR_RDQ(IB6MASK)
|
|
+ CASE_CSR_RDQ(IB6CTL)
|
|
+ CASE_CSR_RDQ(IB6ASID)
|
|
+ CASE_CSR_RDQ(IB7ADDR)
|
|
+ CASE_CSR_RDQ(IB7MASK)
|
|
+ CASE_CSR_RDQ(IB7CTL)
|
|
+ CASE_CSR_RDQ(IB7ASID)
|
|
+ CASE_CSR_RDQ(DEBUG)
|
|
+ CASE_CSR_RDQ(DERA)
|
|
+ CASE_CSR_RDQ(DESAVE)
|
|
+ default :
|
|
+ assert(0);
|
|
+ }
|
|
+
|
|
+#undef CASE_CSR_RDQ
|
|
+ compute_hflags(env);
|
|
+ return v;
|
|
+}
|
|
+
|
|
+target_ulong helper_csr_wrq(CPULOONGARCHState *env, target_ulong val,
|
|
+ uint64_t csr)
|
|
+{
|
|
+ int64_t old_v, v;
|
|
+ old_v = -1;
|
|
+ v = val;
|
|
+
|
|
+#define CASE_CSR_WRQ(csr) \
|
|
+ case LOONGARCH_CSR_##csr: { \
|
|
+ old_v = env->CSR_##csr; \
|
|
+ env->CSR_##csr = v; \
|
|
+ break; \
|
|
+ };
|
|
+
|
|
+ switch (csr) {
|
|
+ CASE_CSR_WRQ(CRMD)
|
|
+ CASE_CSR_WRQ(PRMD)
|
|
+ CASE_CSR_WRQ(EUEN)
|
|
+ CASE_CSR_WRQ(MISC)
|
|
+ CASE_CSR_WRQ(ECFG)
|
|
+ CASE_CSR_WRQ(ESTAT)
|
|
+ CASE_CSR_WRQ(ERA)
|
|
+ CASE_CSR_WRQ(BADV)
|
|
+ CASE_CSR_WRQ(BADI)
|
|
+ CASE_CSR_WRQ(EEPN)
|
|
+ CASE_CSR_WRQ(TLBIDX)
|
|
+ CASE_CSR_WRQ(TLBEHI)
|
|
+ CASE_CSR_WRQ(TLBELO0)
|
|
+ CASE_CSR_WRQ(TLBELO1)
|
|
+ CASE_CSR_WRQ(TLBWIRED)
|
|
+ CASE_CSR_WRQ(GTLBC)
|
|
+ CASE_CSR_WRQ(TRGP)
|
|
+ CASE_CSR_WRQ(ASID)
|
|
+ CASE_CSR_WRQ(PGDL)
|
|
+ CASE_CSR_WRQ(PGDH)
|
|
+ CASE_CSR_WRQ(PGD)
|
|
+ CASE_CSR_WRQ(PWCTL0)
|
|
+ CASE_CSR_WRQ(PWCTL1)
|
|
+ CASE_CSR_WRQ(STLBPGSIZE)
|
|
+ CASE_CSR_WRQ(RVACFG)
|
|
+ CASE_CSR_WRQ(CPUID)
|
|
+ CASE_CSR_WRQ(PRCFG1)
|
|
+ CASE_CSR_WRQ(PRCFG2)
|
|
+ CASE_CSR_WRQ(PRCFG3)
|
|
+ CASE_CSR_WRQ(KS0)
|
|
+ CASE_CSR_WRQ(KS1)
|
|
+ CASE_CSR_WRQ(KS2)
|
|
+ CASE_CSR_WRQ(KS3)
|
|
+ CASE_CSR_WRQ(KS4)
|
|
+ CASE_CSR_WRQ(KS5)
|
|
+ CASE_CSR_WRQ(KS6)
|
|
+ CASE_CSR_WRQ(KS7)
|
|
+ CASE_CSR_WRQ(KS8)
|
|
+ CASE_CSR_WRQ(TMID)
|
|
+ case LOONGARCH_CSR_TCFG:
|
|
+ old_v = env->CSR_TCFG;
|
|
+ cpu_loongarch_store_stable_timer_config(env, v);
|
|
+ break;
|
|
+ CASE_CSR_WRQ(TVAL)
|
|
+ CASE_CSR_WRQ(CNTC)
|
|
+ case LOONGARCH_CSR_TINTCLR:
|
|
+ old_v = 0;
|
|
+ qemu_irq_lower(env->irq[IRQ_TIMER]);
|
|
+ break;
|
|
+ CASE_CSR_WRQ(GSTAT)
|
|
+ CASE_CSR_WRQ(GCFG)
|
|
+ CASE_CSR_WRQ(GINTC)
|
|
+ CASE_CSR_WRQ(GCNTC)
|
|
+ CASE_CSR_WRQ(LLBCTL)
|
|
+ CASE_CSR_WRQ(IMPCTL1)
|
|
+ case LOONGARCH_CSR_IMPCTL2:
|
|
+ if (v & CSR_IMPCTL2_MTLB) {
|
|
+ ls3a5k_flush_vtlb(env);
|
|
+ }
|
|
+ if (v & CSR_IMPCTL2_STLB) {
|
|
+ ls3a5k_flush_ftlb(env);
|
|
+ }
|
|
+ break;
|
|
+ CASE_CSR_WRQ(GNMI)
|
|
+ CASE_CSR_WRQ(TLBRENT)
|
|
+ CASE_CSR_WRQ(TLBRBADV)
|
|
+ CASE_CSR_WRQ(TLBRERA)
|
|
+ CASE_CSR_WRQ(TLBRSAVE)
|
|
+ CASE_CSR_WRQ(TLBRELO0)
|
|
+ CASE_CSR_WRQ(TLBRELO1)
|
|
+ CASE_CSR_WRQ(TLBREHI)
|
|
+ CASE_CSR_WRQ(TLBRPRMD)
|
|
+ CASE_CSR_WRQ(ERRCTL)
|
|
+ CASE_CSR_WRQ(ERRINFO)
|
|
+ CASE_CSR_WRQ(ERRINFO1)
|
|
+ CASE_CSR_WRQ(ERRENT)
|
|
+ CASE_CSR_WRQ(ERRERA)
|
|
+ CASE_CSR_WRQ(ERRSAVE)
|
|
+ CASE_CSR_WRQ(CTAG)
|
|
+ CASE_CSR_WRQ(DMWIN0)
|
|
+ CASE_CSR_WRQ(DMWIN1)
|
|
+ CASE_CSR_WRQ(DMWIN2)
|
|
+ CASE_CSR_WRQ(DMWIN3)
|
|
+ CASE_CSR_WRQ(PERFCTRL0)
|
|
+ CASE_CSR_WRQ(PERFCNTR0)
|
|
+ CASE_CSR_WRQ(PERFCTRL1)
|
|
+ CASE_CSR_WRQ(PERFCNTR1)
|
|
+ CASE_CSR_WRQ(PERFCTRL2)
|
|
+ CASE_CSR_WRQ(PERFCNTR2)
|
|
+ CASE_CSR_WRQ(PERFCTRL3)
|
|
+ CASE_CSR_WRQ(PERFCNTR3)
|
|
+ /* debug */
|
|
+ CASE_CSR_WRQ(MWPC)
|
|
+ CASE_CSR_WRQ(MWPS)
|
|
+ CASE_CSR_WRQ(DB0ADDR)
|
|
+ CASE_CSR_WRQ(DB0MASK)
|
|
+ CASE_CSR_WRQ(DB0CTL)
|
|
+ CASE_CSR_WRQ(DB0ASID)
|
|
+ CASE_CSR_WRQ(DB1ADDR)
|
|
+ CASE_CSR_WRQ(DB1MASK)
|
|
+ CASE_CSR_WRQ(DB1CTL)
|
|
+ CASE_CSR_WRQ(DB1ASID)
|
|
+ CASE_CSR_WRQ(DB2ADDR)
|
|
+ CASE_CSR_WRQ(DB2MASK)
|
|
+ CASE_CSR_WRQ(DB2CTL)
|
|
+ CASE_CSR_WRQ(DB2ASID)
|
|
+ CASE_CSR_WRQ(DB3ADDR)
|
|
+ CASE_CSR_WRQ(DB3MASK)
|
|
+ CASE_CSR_WRQ(DB3CTL)
|
|
+ CASE_CSR_WRQ(DB3ASID)
|
|
+ CASE_CSR_WRQ(FWPC)
|
|
+ CASE_CSR_WRQ(FWPS)
|
|
+ CASE_CSR_WRQ(IB0ADDR)
|
|
+ CASE_CSR_WRQ(IB0MASK)
|
|
+ CASE_CSR_WRQ(IB0CTL)
|
|
+ CASE_CSR_WRQ(IB0ASID)
|
|
+ CASE_CSR_WRQ(IB1ADDR)
|
|
+ CASE_CSR_WRQ(IB1MASK)
|
|
+ CASE_CSR_WRQ(IB1CTL)
|
|
+ CASE_CSR_WRQ(IB1ASID)
|
|
+ CASE_CSR_WRQ(IB2ADDR)
|
|
+ CASE_CSR_WRQ(IB2MASK)
|
|
+ CASE_CSR_WRQ(IB2CTL)
|
|
+ CASE_CSR_WRQ(IB2ASID)
|
|
+ CASE_CSR_WRQ(IB3ADDR)
|
|
+ CASE_CSR_WRQ(IB3MASK)
|
|
+ CASE_CSR_WRQ(IB3CTL)
|
|
+ CASE_CSR_WRQ(IB3ASID)
|
|
+ CASE_CSR_WRQ(IB4ADDR)
|
|
+ CASE_CSR_WRQ(IB4MASK)
|
|
+ CASE_CSR_WRQ(IB4CTL)
|
|
+ CASE_CSR_WRQ(IB4ASID)
|
|
+ CASE_CSR_WRQ(IB5ADDR)
|
|
+ CASE_CSR_WRQ(IB5MASK)
|
|
+ CASE_CSR_WRQ(IB5CTL)
|
|
+ CASE_CSR_WRQ(IB5ASID)
|
|
+ CASE_CSR_WRQ(IB6ADDR)
|
|
+ CASE_CSR_WRQ(IB6MASK)
|
|
+ CASE_CSR_WRQ(IB6CTL)
|
|
+ CASE_CSR_WRQ(IB6ASID)
|
|
+ CASE_CSR_WRQ(IB7ADDR)
|
|
+ CASE_CSR_WRQ(IB7MASK)
|
|
+ CASE_CSR_WRQ(IB7CTL)
|
|
+ CASE_CSR_WRQ(IB7ASID)
|
|
+ CASE_CSR_WRQ(DEBUG)
|
|
+ CASE_CSR_WRQ(DERA)
|
|
+ CASE_CSR_WRQ(DESAVE)
|
|
+ default :
|
|
+ assert(0);
|
|
+ }
|
|
+
|
|
+ if (csr == LOONGARCH_CSR_ASID) {
|
|
+ if (old_v != v) {
|
|
+ tlb_flush(CPU(loongarch_env_get_cpu(env)));
|
|
+ }
|
|
+ }
|
|
+
|
|
+#undef CASE_CSR_WRQ
|
|
+ compute_hflags(env);
|
|
+ return old_v;
|
|
+}
|
|
+
|
|
+target_ulong helper_csr_xchgq(CPULOONGARCHState *env, target_ulong val,
|
|
+ target_ulong mask, uint64_t csr)
|
|
+{
|
|
+ target_ulong v, tmp;
|
|
+ v = val & mask;
|
|
+
|
|
+#define CASE_CSR_XCHGQ(csr) \
|
|
+ case LOONGARCH_CSR_##csr: { \
|
|
+ val = env->CSR_##csr; \
|
|
+ env->CSR_##csr = (env->CSR_##csr) & (~mask); \
|
|
+ env->CSR_##csr = (env->CSR_##csr) | v; \
|
|
+ break; \
|
|
+ };
|
|
+
|
|
+ switch (csr) {
|
|
+ CASE_CSR_XCHGQ(CRMD)
|
|
+ CASE_CSR_XCHGQ(PRMD)
|
|
+ CASE_CSR_XCHGQ(EUEN)
|
|
+ CASE_CSR_XCHGQ(MISC)
|
|
+ CASE_CSR_XCHGQ(ECFG)
|
|
+ case LOONGARCH_CSR_ESTAT:
|
|
+ val = env->CSR_ESTAT;
|
|
+ qatomic_and(&env->CSR_ESTAT, ~mask);
|
|
+ qatomic_or(&env->CSR_ESTAT, v);
|
|
+ break;
|
|
+ CASE_CSR_XCHGQ(ERA)
|
|
+ CASE_CSR_XCHGQ(BADV)
|
|
+ CASE_CSR_XCHGQ(BADI)
|
|
+ CASE_CSR_XCHGQ(EEPN)
|
|
+ CASE_CSR_XCHGQ(TLBIDX)
|
|
+ CASE_CSR_XCHGQ(TLBEHI)
|
|
+ CASE_CSR_XCHGQ(TLBELO0)
|
|
+ CASE_CSR_XCHGQ(TLBELO1)
|
|
+ CASE_CSR_XCHGQ(TLBWIRED)
|
|
+ CASE_CSR_XCHGQ(GTLBC)
|
|
+ CASE_CSR_XCHGQ(TRGP)
|
|
+ CASE_CSR_XCHGQ(ASID)
|
|
+ CASE_CSR_XCHGQ(PGDL)
|
|
+ CASE_CSR_XCHGQ(PGDH)
|
|
+ CASE_CSR_XCHGQ(PGD)
|
|
+ CASE_CSR_XCHGQ(PWCTL0)
|
|
+ CASE_CSR_XCHGQ(PWCTL1)
|
|
+ CASE_CSR_XCHGQ(STLBPGSIZE)
|
|
+ CASE_CSR_XCHGQ(RVACFG)
|
|
+ CASE_CSR_XCHGQ(CPUID)
|
|
+ CASE_CSR_XCHGQ(PRCFG1)
|
|
+ CASE_CSR_XCHGQ(PRCFG2)
|
|
+ CASE_CSR_XCHGQ(PRCFG3)
|
|
+ CASE_CSR_XCHGQ(KS0)
|
|
+ CASE_CSR_XCHGQ(KS1)
|
|
+ CASE_CSR_XCHGQ(KS2)
|
|
+ CASE_CSR_XCHGQ(KS3)
|
|
+ CASE_CSR_XCHGQ(KS4)
|
|
+ CASE_CSR_XCHGQ(KS5)
|
|
+ CASE_CSR_XCHGQ(KS6)
|
|
+ CASE_CSR_XCHGQ(KS7)
|
|
+ CASE_CSR_XCHGQ(KS8)
|
|
+ CASE_CSR_XCHGQ(TMID)
|
|
+ case LOONGARCH_CSR_TCFG:
|
|
+ val = env->CSR_TCFG;
|
|
+ tmp = val & ~mask;
|
|
+ tmp |= v;
|
|
+ cpu_loongarch_store_stable_timer_config(env, tmp);
|
|
+ break;
|
|
+ CASE_CSR_XCHGQ(TVAL)
|
|
+ CASE_CSR_XCHGQ(CNTC)
|
|
+ CASE_CSR_XCHGQ(TINTCLR)
|
|
+ CASE_CSR_XCHGQ(GSTAT)
|
|
+ CASE_CSR_XCHGQ(GCFG)
|
|
+ CASE_CSR_XCHGQ(GINTC)
|
|
+ CASE_CSR_XCHGQ(GCNTC)
|
|
+ CASE_CSR_XCHGQ(LLBCTL)
|
|
+ CASE_CSR_XCHGQ(IMPCTL1)
|
|
+ CASE_CSR_XCHGQ(IMPCTL2)
|
|
+ CASE_CSR_XCHGQ(GNMI)
|
|
+ CASE_CSR_XCHGQ(TLBRENT)
|
|
+ CASE_CSR_XCHGQ(TLBRBADV)
|
|
+ CASE_CSR_XCHGQ(TLBRERA)
|
|
+ CASE_CSR_XCHGQ(TLBRSAVE)
|
|
+ CASE_CSR_XCHGQ(TLBRELO0)
|
|
+ CASE_CSR_XCHGQ(TLBRELO1)
|
|
+ CASE_CSR_XCHGQ(TLBREHI)
|
|
+ CASE_CSR_XCHGQ(TLBRPRMD)
|
|
+ CASE_CSR_XCHGQ(ERRCTL)
|
|
+ CASE_CSR_XCHGQ(ERRINFO)
|
|
+ CASE_CSR_XCHGQ(ERRINFO1)
|
|
+ CASE_CSR_XCHGQ(ERRENT)
|
|
+ CASE_CSR_XCHGQ(ERRERA)
|
|
+ CASE_CSR_XCHGQ(ERRSAVE)
|
|
+ CASE_CSR_XCHGQ(CTAG)
|
|
+ CASE_CSR_XCHGQ(DMWIN0)
|
|
+ CASE_CSR_XCHGQ(DMWIN1)
|
|
+ CASE_CSR_XCHGQ(DMWIN2)
|
|
+ CASE_CSR_XCHGQ(DMWIN3)
|
|
+ CASE_CSR_XCHGQ(PERFCTRL0)
|
|
+ CASE_CSR_XCHGQ(PERFCNTR0)
|
|
+ CASE_CSR_XCHGQ(PERFCTRL1)
|
|
+ CASE_CSR_XCHGQ(PERFCNTR1)
|
|
+ CASE_CSR_XCHGQ(PERFCTRL2)
|
|
+ CASE_CSR_XCHGQ(PERFCNTR2)
|
|
+ CASE_CSR_XCHGQ(PERFCTRL3)
|
|
+ CASE_CSR_XCHGQ(PERFCNTR3)
|
|
+ /* debug */
|
|
+ CASE_CSR_XCHGQ(MWPC)
|
|
+ CASE_CSR_XCHGQ(MWPS)
|
|
+ CASE_CSR_XCHGQ(DB0ADDR)
|
|
+ CASE_CSR_XCHGQ(DB0MASK)
|
|
+ CASE_CSR_XCHGQ(DB0CTL)
|
|
+ CASE_CSR_XCHGQ(DB0ASID)
|
|
+ CASE_CSR_XCHGQ(DB1ADDR)
|
|
+ CASE_CSR_XCHGQ(DB1MASK)
|
|
+ CASE_CSR_XCHGQ(DB1CTL)
|
|
+ CASE_CSR_XCHGQ(DB1ASID)
|
|
+ CASE_CSR_XCHGQ(DB2ADDR)
|
|
+ CASE_CSR_XCHGQ(DB2MASK)
|
|
+ CASE_CSR_XCHGQ(DB2CTL)
|
|
+ CASE_CSR_XCHGQ(DB2ASID)
|
|
+ CASE_CSR_XCHGQ(DB3ADDR)
|
|
+ CASE_CSR_XCHGQ(DB3MASK)
|
|
+ CASE_CSR_XCHGQ(DB3CTL)
|
|
+ CASE_CSR_XCHGQ(DB3ASID)
|
|
+ CASE_CSR_XCHGQ(FWPC)
|
|
+ CASE_CSR_XCHGQ(FWPS)
|
|
+ CASE_CSR_XCHGQ(IB0ADDR)
|
|
+ CASE_CSR_XCHGQ(IB0MASK)
|
|
+ CASE_CSR_XCHGQ(IB0CTL)
|
|
+ CASE_CSR_XCHGQ(IB0ASID)
|
|
+ CASE_CSR_XCHGQ(IB1ADDR)
|
|
+ CASE_CSR_XCHGQ(IB1MASK)
|
|
+ CASE_CSR_XCHGQ(IB1CTL)
|
|
+ CASE_CSR_XCHGQ(IB1ASID)
|
|
+ CASE_CSR_XCHGQ(IB2ADDR)
|
|
+ CASE_CSR_XCHGQ(IB2MASK)
|
|
+ CASE_CSR_XCHGQ(IB2CTL)
|
|
+ CASE_CSR_XCHGQ(IB2ASID)
|
|
+ CASE_CSR_XCHGQ(IB3ADDR)
|
|
+ CASE_CSR_XCHGQ(IB3MASK)
|
|
+ CASE_CSR_XCHGQ(IB3CTL)
|
|
+ CASE_CSR_XCHGQ(IB3ASID)
|
|
+ CASE_CSR_XCHGQ(IB4ADDR)
|
|
+ CASE_CSR_XCHGQ(IB4MASK)
|
|
+ CASE_CSR_XCHGQ(IB4CTL)
|
|
+ CASE_CSR_XCHGQ(IB4ASID)
|
|
+ CASE_CSR_XCHGQ(IB5ADDR)
|
|
+ CASE_CSR_XCHGQ(IB5MASK)
|
|
+ CASE_CSR_XCHGQ(IB5CTL)
|
|
+ CASE_CSR_XCHGQ(IB5ASID)
|
|
+ CASE_CSR_XCHGQ(IB6ADDR)
|
|
+ CASE_CSR_XCHGQ(IB6MASK)
|
|
+ CASE_CSR_XCHGQ(IB6CTL)
|
|
+ CASE_CSR_XCHGQ(IB6ASID)
|
|
+ CASE_CSR_XCHGQ(IB7ADDR)
|
|
+ CASE_CSR_XCHGQ(IB7MASK)
|
|
+ CASE_CSR_XCHGQ(IB7CTL)
|
|
+ CASE_CSR_XCHGQ(IB7ASID)
|
|
+ CASE_CSR_XCHGQ(DEBUG)
|
|
+ CASE_CSR_XCHGQ(DERA)
|
|
+ CASE_CSR_XCHGQ(DESAVE)
|
|
+ default :
|
|
+ assert(0);
|
|
+ }
|
|
+
|
|
+#undef CASE_CSR_XCHGQ
|
|
+ compute_hflags(env);
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static target_ulong confbus_addr(CPULOONGARCHState *env, int cpuid,
|
|
+ target_ulong csr_addr)
|
|
+{
|
|
+ target_ulong addr;
|
|
+ target_ulong node_addr;
|
|
+ int cores_per_node = ((0x60018 >> 3) & 0xff) + 1;
|
|
+
|
|
+ switch (cores_per_node) {
|
|
+ case 4:
|
|
+ assert(cpuid < 64);
|
|
+ node_addr = ((target_ulong)(cpuid & 0x3c) << 42);
|
|
+ break;
|
|
+ case 8:
|
|
+ assert(cpuid < 128);
|
|
+ node_addr = ((target_ulong)(cpuid & 0x78) << 41) +
|
|
+ ((target_ulong)(cpuid & 0x4) << 14);
|
|
+ break;
|
|
+ case 16:
|
|
+ assert(cpuid < 256);
|
|
+ node_addr = ((target_ulong)(cpuid & 0xf0) << 40) +
|
|
+ ((target_ulong)(cpuid & 0xc) << 14);
|
|
+ break;
|
|
+ default:
|
|
+ assert(0);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * per core address
|
|
+ *0x10xx => ipi
|
|
+ * 0x18xx => extioi isr
|
|
+ */
|
|
+ if (((csr_addr & 0xff00) == 0x1000)) {
|
|
+ addr = (csr_addr & 0xff) + (target_ulong)(cpuid << 8);
|
|
+ addr = 0x800000001f000000UL + addr;
|
|
+ return addr;
|
|
+ } else if ((csr_addr & 0xff00) == 0x1800) {
|
|
+ addr = (csr_addr & 0xff) + ((target_ulong)(cpuid << 8));
|
|
+ addr = 0x800000001f020000UL + addr;
|
|
+ return addr;
|
|
+ } else if ((csr_addr & 0xff00) >= 0x1400 && (csr_addr & 0xff00) < 0x1d00) {
|
|
+ addr = 0x800000001f010000UL + ((csr_addr & 0xfff) - 0x400);
|
|
+ return addr;
|
|
+ } else if (csr_addr == 0x408) {
|
|
+ addr = csr_addr;
|
|
+ } else {
|
|
+ addr = csr_addr + node_addr;
|
|
+ }
|
|
+
|
|
+ addr = 0x800000001fe00000UL + addr;
|
|
+ return addr;
|
|
+}
|
|
+
|
|
+void helper_iocsr(CPULOONGARCHState *env, target_ulong r_addr,
|
|
+ target_ulong r_val, uint32_t op)
|
|
+{
|
|
+ target_ulong addr;
|
|
+ target_ulong val = env->active_tc.gpr[r_val];
|
|
+ int mask;
|
|
+
|
|
+ addr = confbus_addr(env, CPU(loongarch_env_get_cpu(env))->cpu_index,
|
|
+ env->active_tc.gpr[r_addr]);
|
|
+
|
|
+ switch (env->active_tc.gpr[r_addr]) {
|
|
+ /* IPI send */
|
|
+ case 0x1040:
|
|
+ if (op != OPC_LARCH_ST_W) {
|
|
+ return;
|
|
+ }
|
|
+ op = OPC_LARCH_ST_W;
|
|
+ break;
|
|
+
|
|
+ /* Mail send */
|
|
+ case 0x1048:
|
|
+ if (op != OPC_LARCH_ST_D) {
|
|
+ return;
|
|
+ }
|
|
+ op = OPC_LARCH_ST_D;
|
|
+ break;
|
|
+
|
|
+ /* ANY send */
|
|
+ case 0x1158:
|
|
+ if (op != OPC_LARCH_ST_D) {
|
|
+ return;
|
|
+ }
|
|
+ addr = confbus_addr(env, (val >> 16) & 0x3ff, val & 0xffff);
|
|
+ mask = (val >> 27) & 0xf;
|
|
+ val = (val >> 32);
|
|
+ switch (mask) {
|
|
+ case 0:
|
|
+ op = OPC_LARCH_ST_W;
|
|
+ break;
|
|
+ case 0x7:
|
|
+ op = OPC_LARCH_ST_B;
|
|
+ addr += 3;
|
|
+ val >>= 24;
|
|
+ break;
|
|
+ case 0xb:
|
|
+ op = OPC_LARCH_ST_B;
|
|
+ addr += 2;
|
|
+ val >>= 16;
|
|
+ break;
|
|
+ case 0xd:
|
|
+ op = OPC_LARCH_ST_B;
|
|
+ addr += 1;
|
|
+ val >>= 8;
|
|
+ break;
|
|
+ case 0xe:
|
|
+ op = OPC_LARCH_ST_B;
|
|
+ break;
|
|
+ case 0xc:
|
|
+ op = OPC_LARCH_ST_H;
|
|
+ break;
|
|
+ case 0x3:
|
|
+ op = OPC_LARCH_ST_H;
|
|
+ addr += 2;
|
|
+ val >>= 16;
|
|
+ break;
|
|
+ default:
|
|
+ qemu_log("Unsupported any_send mask0x%x\n", mask);
|
|
+ break;
|
|
+ }
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ switch (op) {
|
|
+ case OPC_LARCH_LD_D:
|
|
+ env->active_tc.gpr[r_val] = cpu_ldq_data_ra(env, addr, GETPC());
|
|
+ break;
|
|
+ case OPC_LARCH_LD_W:
|
|
+ env->active_tc.gpr[r_val] = cpu_ldl_data_ra(env, addr, GETPC());
|
|
+ break;
|
|
+ case OPC_LARCH_LD_H:
|
|
+ assert(0);
|
|
+ break;
|
|
+ case OPC_LARCH_LD_B:
|
|
+ assert(0);
|
|
+ break;
|
|
+ case OPC_LARCH_ST_D:
|
|
+ cpu_stq_data_ra(env, addr, val, GETPC());
|
|
+ break;
|
|
+ case OPC_LARCH_ST_W:
|
|
+ cpu_stl_data_ra(env, addr, val, GETPC());
|
|
+ break;
|
|
+ case OPC_LARCH_ST_H:
|
|
+ cpu_stb_data_ra(env, addr, val, GETPC());
|
|
+ break;
|
|
+ case OPC_LARCH_ST_B:
|
|
+ cpu_stb_data_ra(env, addr, val, GETPC());
|
|
+ break;
|
|
+ default:
|
|
+ qemu_log("Unknown op 0x%x", op);
|
|
+ assert(0);
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+target_ulong helper_cpucfg(CPULOONGARCHState *env, target_ulong rj)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
diff --git a/target/loongarch64/fpu.c b/target/loongarch64/fpu.c
|
|
new file mode 100644
|
|
index 0000000000..f063c8bae0
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/fpu.c
|
|
@@ -0,0 +1,25 @@
|
|
+/*
|
|
+ * loongarch float point emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "fpu/softfloat.h"
|
|
+
|
|
+/* convert loongarch rounding mode in fcsr0 to IEEE library */
|
|
+unsigned int ieee_rm[] = { float_round_nearest_even, float_round_to_zero,
|
|
+ float_round_up, float_round_down };
|
|
diff --git a/target/loongarch64/fpu_helper.c b/target/loongarch64/fpu_helper.c
|
|
new file mode 100644
|
|
index 0000000000..033bf0de84
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/fpu_helper.c
|
|
@@ -0,0 +1,891 @@
|
|
+/*
|
|
+ * loongarch float point emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "qemu/host-utils.h"
|
|
+#include "exec/helper-proto.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "fpu/softfloat.h"
|
|
+
|
|
+#define FP_TO_INT32_OVERFLOW 0x7fffffff
|
|
+#define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
|
|
+
|
|
+#define FLOAT_CLASS_SIGNALING_NAN 0x001
|
|
+#define FLOAT_CLASS_QUIET_NAN 0x002
|
|
+#define FLOAT_CLASS_NEGATIVE_INFINITY 0x004
|
|
+#define FLOAT_CLASS_NEGATIVE_NORMAL 0x008
|
|
+#define FLOAT_CLASS_NEGATIVE_SUBNORMAL 0x010
|
|
+#define FLOAT_CLASS_NEGATIVE_ZERO 0x020
|
|
+#define FLOAT_CLASS_POSITIVE_INFINITY 0x040
|
|
+#define FLOAT_CLASS_POSITIVE_NORMAL 0x080
|
|
+#define FLOAT_CLASS_POSITIVE_SUBNORMAL 0x100
|
|
+#define FLOAT_CLASS_POSITIVE_ZERO 0x200
|
|
+
|
|
+target_ulong helper_movfcsr2gr(CPULOONGARCHState *env, uint32_t reg)
|
|
+{
|
|
+ target_ulong r = 0;
|
|
+
|
|
+ switch (reg) {
|
|
+ case 0:
|
|
+ r = (uint32_t)env->active_fpu.fcsr0;
|
|
+ break;
|
|
+ case 1:
|
|
+ r = (env->active_fpu.fcsr0 & FCSR0_M1);
|
|
+ break;
|
|
+ case 2:
|
|
+ r = (env->active_fpu.fcsr0 & FCSR0_M2);
|
|
+ break;
|
|
+ case 3:
|
|
+ r = (env->active_fpu.fcsr0 & FCSR0_M3);
|
|
+ break;
|
|
+ case 16:
|
|
+ r = (uint32_t)env->active_fpu.vcsr16;
|
|
+ break;
|
|
+ default:
|
|
+ printf("%s: warning, fcsr '%d' not supported\n", __func__, reg);
|
|
+ assert(0);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+void helper_movgr2fcsr(CPULOONGARCHState *env, target_ulong arg1,
|
|
+ uint32_t fcsr, uint32_t rj)
|
|
+{
|
|
+ switch (fcsr) {
|
|
+ case 0:
|
|
+ env->active_fpu.fcsr0 = arg1;
|
|
+ break;
|
|
+ case 1:
|
|
+ env->active_fpu.fcsr0 =
|
|
+ (arg1 & FCSR0_M1) | (env->active_fpu.fcsr0 & ~FCSR0_M1);
|
|
+ break;
|
|
+ case 2:
|
|
+ env->active_fpu.fcsr0 =
|
|
+ (arg1 & FCSR0_M2) | (env->active_fpu.fcsr0 & ~FCSR0_M2);
|
|
+ break;
|
|
+ case 3:
|
|
+ env->active_fpu.fcsr0 =
|
|
+ (arg1 & FCSR0_M3) | (env->active_fpu.fcsr0 & ~FCSR0_M3);
|
|
+ break;
|
|
+ case 16:
|
|
+ env->active_fpu.vcsr16 = arg1;
|
|
+ break;
|
|
+ default:
|
|
+ printf("%s: warning, fcsr '%d' not supported\n", __func__, fcsr);
|
|
+ assert(0);
|
|
+ break;
|
|
+ }
|
|
+ restore_fp_status(env);
|
|
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
|
|
+}
|
|
+
|
|
+void helper_movreg2cf(CPULOONGARCHState *env, uint32_t cd, target_ulong src)
|
|
+{
|
|
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
|
|
+}
|
|
+
|
|
+void helper_movreg2cf_i32(CPULOONGARCHState *env, uint32_t cd, uint32_t src)
|
|
+{
|
|
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
|
|
+}
|
|
+
|
|
+void helper_movreg2cf_i64(CPULOONGARCHState *env, uint32_t cd, uint64_t src)
|
|
+{
|
|
+ env->active_fpu.cf[cd & 0x7] = src & 0x1;
|
|
+}
|
|
+
|
|
+target_ulong helper_movcf2reg(CPULOONGARCHState *env, uint32_t cj)
|
|
+{
|
|
+ return (target_ulong)env->active_fpu.cf[cj & 0x7];
|
|
+}
|
|
+
|
|
+int ieee_ex_to_loongarch(int xcpt)
|
|
+{
|
|
+ int ret = 0;
|
|
+ if (xcpt) {
|
|
+ if (xcpt & float_flag_invalid) {
|
|
+ ret |= FP_INVALID;
|
|
+ }
|
|
+ if (xcpt & float_flag_overflow) {
|
|
+ ret |= FP_OVERFLOW;
|
|
+ }
|
|
+ if (xcpt & float_flag_underflow) {
|
|
+ ret |= FP_UNDERFLOW;
|
|
+ }
|
|
+ if (xcpt & float_flag_divbyzero) {
|
|
+ ret |= FP_DIV0;
|
|
+ }
|
|
+ if (xcpt & float_flag_inexact) {
|
|
+ ret |= FP_INEXACT;
|
|
+ }
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline void update_fcsr0(CPULOONGARCHState *env, uintptr_t pc)
|
|
+{
|
|
+ int tmp = ieee_ex_to_loongarch(
|
|
+ get_float_exception_flags(&env->active_fpu.fp_status));
|
|
+
|
|
+ SET_FP_CAUSE(env->active_fpu.fcsr0, tmp);
|
|
+ if (tmp) {
|
|
+ set_float_exception_flags(0, &env->active_fpu.fp_status);
|
|
+
|
|
+ if (GET_FP_ENABLE(env->active_fpu.fcsr0) & tmp) {
|
|
+ do_raise_exception(env, EXCP_FPE, pc);
|
|
+ } else {
|
|
+ UPDATE_FP_FLAGS(env->active_fpu.fcsr0, tmp);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* unary operations, modifying fp status */
|
|
+uint64_t helper_float_sqrt_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt0;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_sqrt_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst0;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_cvtd_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t fdt2;
|
|
+
|
|
+ fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_cvtd_w(CPULOONGARCHState *env, uint32_t wt0)
|
|
+{
|
|
+ uint64_t fdt2;
|
|
+
|
|
+ fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_cvtd_l(CPULOONGARCHState *env, uint64_t dt0)
|
|
+{
|
|
+ uint64_t fdt2;
|
|
+
|
|
+ fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_cvt_l_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_cvt_l_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_cvts_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t fst2;
|
|
+
|
|
+ fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_cvts_w(CPULOONGARCHState *env, uint32_t wt0)
|
|
+{
|
|
+ uint32_t fst2;
|
|
+
|
|
+ fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_cvts_l(CPULOONGARCHState *env, uint64_t dt0)
|
|
+{
|
|
+ uint32_t fst2;
|
|
+
|
|
+ fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_cvt_w_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_cvt_w_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_round_l_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_nearest_even,
|
|
+ &env->active_fpu.fp_status);
|
|
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_round_l_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_nearest_even,
|
|
+ &env->active_fpu.fp_status);
|
|
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_round_w_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_nearest_even,
|
|
+ &env->active_fpu.fp_status);
|
|
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_round_w_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_nearest_even,
|
|
+ &env->active_fpu.fp_status);
|
|
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_trunc_l_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_trunc_l_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_trunc_w_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_trunc_w_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_ceil_l_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
|
|
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_ceil_l_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
|
|
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_ceil_w_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
|
|
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_ceil_w_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
|
|
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_floor_l_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
|
|
+ dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_floor_l_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
|
|
+ dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ dt2 = FP_TO_INT64_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_floor_w_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
|
|
+ wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_floor_w_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
|
|
+ wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
|
|
+ restore_rounding_mode(env);
|
|
+ if (get_float_exception_flags(&env->active_fpu.fp_status) &
|
|
+ (float_flag_invalid | float_flag_overflow)) {
|
|
+ wt2 = FP_TO_INT32_OVERFLOW;
|
|
+ }
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+/* unary operations, not modifying fp status */
|
|
+#define FLOAT_UNOP(name) \
|
|
+ uint64_t helper_float_##name##_d(uint64_t fdt0) \
|
|
+ { \
|
|
+ return float64_##name(fdt0); \
|
|
+ } \
|
|
+ uint32_t helper_float_##name##_s(uint32_t fst0) \
|
|
+ { \
|
|
+ return float32_##name(fst0); \
|
|
+ }
|
|
+
|
|
+FLOAT_UNOP(abs)
|
|
+FLOAT_UNOP(chs)
|
|
+#undef FLOAT_UNOP
|
|
+
|
|
+uint64_t helper_float_recip_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t fdt2;
|
|
+
|
|
+ fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_recip_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t fst2;
|
|
+
|
|
+ fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_rsqrt_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t fdt2;
|
|
+
|
|
+ fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
|
|
+ fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_rsqrt_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t fst2;
|
|
+
|
|
+ fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
|
|
+ fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fst2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_rint_s(CPULOONGARCHState *env, uint32_t fs)
|
|
+{
|
|
+ uint32_t fdret;
|
|
+
|
|
+ fdret = float32_round_to_int(fs, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdret;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_rint_d(CPULOONGARCHState *env, uint64_t fs)
|
|
+{
|
|
+ uint64_t fdret;
|
|
+
|
|
+ fdret = float64_round_to_int(fs, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return fdret;
|
|
+}
|
|
+
|
|
+#define FLOAT_CLASS(name, bits) \
|
|
+ uint##bits##_t float_##name(uint##bits##_t arg, float_status *status) \
|
|
+ { \
|
|
+ if (float##bits##_is_signaling_nan(arg, status)) { \
|
|
+ return FLOAT_CLASS_SIGNALING_NAN; \
|
|
+ } else if (float##bits##_is_quiet_nan(arg, status)) { \
|
|
+ return FLOAT_CLASS_QUIET_NAN; \
|
|
+ } else if (float##bits##_is_neg(arg)) { \
|
|
+ if (float##bits##_is_infinity(arg)) { \
|
|
+ return FLOAT_CLASS_NEGATIVE_INFINITY; \
|
|
+ } else if (float##bits##_is_zero(arg)) { \
|
|
+ return FLOAT_CLASS_NEGATIVE_ZERO; \
|
|
+ } else if (float##bits##_is_zero_or_denormal(arg)) { \
|
|
+ return FLOAT_CLASS_NEGATIVE_SUBNORMAL; \
|
|
+ } else { \
|
|
+ return FLOAT_CLASS_NEGATIVE_NORMAL; \
|
|
+ } \
|
|
+ } else { \
|
|
+ if (float##bits##_is_infinity(arg)) { \
|
|
+ return FLOAT_CLASS_POSITIVE_INFINITY; \
|
|
+ } else if (float##bits##_is_zero(arg)) { \
|
|
+ return FLOAT_CLASS_POSITIVE_ZERO; \
|
|
+ } else if (float##bits##_is_zero_or_denormal(arg)) { \
|
|
+ return FLOAT_CLASS_POSITIVE_SUBNORMAL; \
|
|
+ } else { \
|
|
+ return FLOAT_CLASS_POSITIVE_NORMAL; \
|
|
+ } \
|
|
+ } \
|
|
+ } \
|
|
+ \
|
|
+ uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \
|
|
+ uint##bits##_t arg) \
|
|
+ { \
|
|
+ return float_##name(arg, &env->active_fpu.fp_status); \
|
|
+ }
|
|
+
|
|
+FLOAT_CLASS(class_s, 32)
|
|
+FLOAT_CLASS(class_d, 64)
|
|
+#undef FLOAT_CLASS
|
|
+
|
|
+/* binary operations */
|
|
+#define FLOAT_BINOP(name) \
|
|
+ uint64_t helper_float_##name##_d(CPULOONGARCHState *env, uint64_t fdt0, \
|
|
+ uint64_t fdt1) \
|
|
+ { \
|
|
+ uint64_t dt2; \
|
|
+ \
|
|
+ dt2 = float64_##name(fdt0, fdt1, &env->active_fpu.fp_status); \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ return dt2; \
|
|
+ } \
|
|
+ \
|
|
+ uint32_t helper_float_##name##_s(CPULOONGARCHState *env, uint32_t fst0, \
|
|
+ uint32_t fst1) \
|
|
+ { \
|
|
+ uint32_t wt2; \
|
|
+ \
|
|
+ wt2 = float32_##name(fst0, fst1, &env->active_fpu.fp_status); \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ return wt2; \
|
|
+ }
|
|
+
|
|
+FLOAT_BINOP(add)
|
|
+FLOAT_BINOP(sub)
|
|
+FLOAT_BINOP(mul)
|
|
+FLOAT_BINOP(div)
|
|
+#undef FLOAT_BINOP
|
|
+
|
|
+uint64_t helper_float_exp2_d(CPULOONGARCHState *env, uint64_t fdt0,
|
|
+ uint64_t fdt1)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+ int64_t n = (int64_t)fdt1;
|
|
+
|
|
+ dt2 = float64_scalbn(fdt0, n > 0x1000 ? 0x1000 : n < -0x1000 ? -0x1000 : n,
|
|
+ &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+uint32_t helper_float_exp2_s(CPULOONGARCHState *env, uint32_t fst0,
|
|
+ uint32_t fst1)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+ int32_t n = (int32_t)fst1;
|
|
+
|
|
+ wt2 = float32_scalbn(fst0, n > 0x200 ? 0x200 : n < -0x200 ? -0x200 : n,
|
|
+ &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+#define FLOAT_MINMAX(name, bits, minmaxfunc) \
|
|
+ uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \
|
|
+ uint##bits##_t fs, uint##bits##_t ft) \
|
|
+ { \
|
|
+ uint##bits##_t fdret; \
|
|
+ \
|
|
+ fdret = \
|
|
+ float##bits##_##minmaxfunc(fs, ft, &env->active_fpu.fp_status); \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ return fdret; \
|
|
+ }
|
|
+
|
|
+FLOAT_MINMAX(max_s, 32, maxnum)
|
|
+FLOAT_MINMAX(max_d, 64, maxnum)
|
|
+FLOAT_MINMAX(maxa_s, 32, maxnummag)
|
|
+FLOAT_MINMAX(maxa_d, 64, maxnummag)
|
|
+
|
|
+FLOAT_MINMAX(min_s, 32, minnum)
|
|
+FLOAT_MINMAX(min_d, 64, minnum)
|
|
+FLOAT_MINMAX(mina_s, 32, minnummag)
|
|
+FLOAT_MINMAX(mina_d, 64, minnummag)
|
|
+#undef FLOAT_MINMAX
|
|
+
|
|
+#define FLOAT_FMADDSUB(name, bits, muladd_arg) \
|
|
+ uint##bits##_t helper_float_##name(CPULOONGARCHState *env, \
|
|
+ uint##bits##_t fs, uint##bits##_t ft, \
|
|
+ uint##bits##_t fd) \
|
|
+ { \
|
|
+ uint##bits##_t fdret; \
|
|
+ \
|
|
+ fdret = float##bits##_muladd(fs, ft, fd, muladd_arg, \
|
|
+ &env->active_fpu.fp_status); \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ return fdret; \
|
|
+ }
|
|
+
|
|
+FLOAT_FMADDSUB(maddf_s, 32, 0)
|
|
+FLOAT_FMADDSUB(maddf_d, 64, 0)
|
|
+FLOAT_FMADDSUB(msubf_s, 32, float_muladd_negate_c)
|
|
+FLOAT_FMADDSUB(msubf_d, 64, float_muladd_negate_c)
|
|
+FLOAT_FMADDSUB(nmaddf_s, 32, float_muladd_negate_result)
|
|
+FLOAT_FMADDSUB(nmaddf_d, 64, float_muladd_negate_result)
|
|
+FLOAT_FMADDSUB(nmsubf_s, 32,
|
|
+ float_muladd_negate_result | float_muladd_negate_c)
|
|
+FLOAT_FMADDSUB(nmsubf_d, 64,
|
|
+ float_muladd_negate_result | float_muladd_negate_c)
|
|
+#undef FLOAT_FMADDSUB
|
|
+
|
|
+/* compare operations */
|
|
+#define FOP_CONDN_D(op, cond) \
|
|
+ uint64_t helper_cmp_d_##op(CPULOONGARCHState *env, uint64_t fdt0, \
|
|
+ uint64_t fdt1) \
|
|
+ { \
|
|
+ uint64_t c; \
|
|
+ c = cond; \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ if (c) { \
|
|
+ return -1; \
|
|
+ } else { \
|
|
+ return 0; \
|
|
+ } \
|
|
+ }
|
|
+
|
|
+/*
|
|
+ * NOTE: the comma operator will make "cond" to eval to false,
|
|
+ * but float64_unordered_quiet() is still called.
|
|
+ */
|
|
+FOP_CONDN_D(af,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status),
|
|
+ 0))
|
|
+FOP_CONDN_D(un,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(eq, (float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(ueq,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(lt, (float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(ult,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(le, (float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(ule,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+/*
|
|
+ * NOTE: the comma operator will make "cond" to eval to false,
|
|
+ * but float64_unordered() is still called.
|
|
+ */
|
|
+FOP_CONDN_D(saf,
|
|
+ (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
|
|
+FOP_CONDN_D(sun, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(seq, (float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sueq, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_eq(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(slt, (float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sult, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sle, (float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sule, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(or, (float64_le_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(une,
|
|
+ (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(ne, (float64_lt_quiet(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sor, (float64_le(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_le(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sune, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_D(sne, (float64_lt(fdt1, fdt0, &env->active_fpu.fp_status) ||
|
|
+ float64_lt(fdt0, fdt1, &env->active_fpu.fp_status)))
|
|
+
|
|
+#define FOP_CONDN_S(op, cond) \
|
|
+ uint32_t helper_cmp_s_##op(CPULOONGARCHState *env, uint32_t fst0, \
|
|
+ uint32_t fst1) \
|
|
+ { \
|
|
+ uint64_t c; \
|
|
+ c = cond; \
|
|
+ update_fcsr0(env, GETPC()); \
|
|
+ if (c) { \
|
|
+ return -1; \
|
|
+ } else { \
|
|
+ return 0; \
|
|
+ } \
|
|
+ }
|
|
+
|
|
+/*
|
|
+ * NOTE: the comma operator will make "cond" to eval to false,
|
|
+ * but float32_unordered_quiet() is still called.
|
|
+ */
|
|
+FOP_CONDN_S(af,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
|
|
+ 0))
|
|
+FOP_CONDN_S(un,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(eq, (float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(ueq,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(lt, (float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(ult,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(le, (float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(ule,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+/*
|
|
+ * NOTE: the comma operator will make "cond" to eval to false,
|
|
+ * but float32_unordered() is still called.
|
|
+ */
|
|
+FOP_CONDN_S(saf,
|
|
+ (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
|
|
+FOP_CONDN_S(sun, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(seq, (float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sueq, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_eq(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(slt, (float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sult, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sle, (float32_le(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sule, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_le(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(or, (float32_le_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(une,
|
|
+ (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(ne, (float32_lt_quiet(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sor, (float32_le(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_le(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sune, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status) ||
|
|
+ float32_lt(fst0, fst1, &env->active_fpu.fp_status)))
|
|
+
|
|
+uint32_t helper_float_logb_s(CPULOONGARCHState *env, uint32_t fst0)
|
|
+{
|
|
+ uint32_t wt2;
|
|
+
|
|
+ wt2 = float32_log2(fst0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return wt2;
|
|
+}
|
|
+
|
|
+uint64_t helper_float_logb_d(CPULOONGARCHState *env, uint64_t fdt0)
|
|
+{
|
|
+ uint64_t dt2;
|
|
+
|
|
+ dt2 = float64_log2(fdt0, &env->active_fpu.fp_status);
|
|
+ update_fcsr0(env, GETPC());
|
|
+ return dt2;
|
|
+}
|
|
+
|
|
+target_ulong helper_fsel(CPULOONGARCHState *env, target_ulong fj,
|
|
+ target_ulong fk, uint32_t ca)
|
|
+{
|
|
+ if (env->active_fpu.cf[ca & 0x7]) {
|
|
+ return fk;
|
|
+ } else {
|
|
+ return fj;
|
|
+ }
|
|
+}
|
|
diff --git a/target/loongarch64/fpu_helper.h b/target/loongarch64/fpu_helper.h
|
|
new file mode 100644
|
|
index 0000000000..9efa7e30ca
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/fpu_helper.h
|
|
@@ -0,0 +1,127 @@
|
|
+/*
|
|
+ * loongarch internal definitions and helpers
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef LOONGARCH_FPU_H
|
|
+#define LOONGARCH_FPU_H
|
|
+
|
|
+#include "cpu-csr.h"
|
|
+
|
|
+extern const struct loongarch_def_t loongarch_defs[];
|
|
+extern const int loongarch_defs_number;
|
|
+
|
|
+enum CPULSXDataFormat { DF_BYTE = 0, DF_HALF, DF_WORD, DF_DOUBLE, DF_QUAD };
|
|
+
|
|
+void loongarch_cpu_do_interrupt(CPUState *cpu);
|
|
+bool loongarch_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
|
+void loongarch_cpu_do_unaligned_access(CPUState *cpu, vaddr addr,
|
|
+ MMUAccessType access_type, int mmu_idx,
|
|
+ uintptr_t retaddr) QEMU_NORETURN;
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+
|
|
+typedef struct r4k_tlb_t r4k_tlb_t;
|
|
+struct r4k_tlb_t {
|
|
+ target_ulong VPN;
|
|
+ uint32_t PageMask;
|
|
+ uint16_t ASID;
|
|
+ unsigned int G:1;
|
|
+ unsigned int C0:3;
|
|
+ unsigned int C1:3;
|
|
+ unsigned int V0:1;
|
|
+ unsigned int V1:1;
|
|
+ unsigned int D0:1;
|
|
+ unsigned int D1:1;
|
|
+ unsigned int XI0:1;
|
|
+ unsigned int XI1:1;
|
|
+ unsigned int RI0:1;
|
|
+ unsigned int RI1:1;
|
|
+ unsigned int EHINV:1;
|
|
+ uint64_t PPN[2];
|
|
+};
|
|
+
|
|
+int no_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw, int access_type);
|
|
+int fixed_mmu_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw, int access_type);
|
|
+int r4k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw, int access_type);
|
|
+
|
|
+/* loongarch 3a5000 tlb helper function : lisa csr */
|
|
+int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw, int access_type);
|
|
+void ls3a5k_helper_tlbwr(CPULOONGARCHState *env);
|
|
+void ls3a5k_helper_tlbfill(CPULOONGARCHState *env);
|
|
+void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env);
|
|
+void ls3a5k_helper_tlbrd(CPULOONGARCHState *env);
|
|
+void ls3a5k_helper_tlbclr(CPULOONGARCHState *env);
|
|
+void ls3a5k_helper_tlbflush(CPULOONGARCHState *env);
|
|
+void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx);
|
|
+void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr,
|
|
+ target_ulong info, int op);
|
|
+void ls3a5k_flush_vtlb(CPULOONGARCHState *env);
|
|
+void ls3a5k_flush_ftlb(CPULOONGARCHState *env);
|
|
+hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env,
|
|
+ target_ulong address, int rw);
|
|
+#endif
|
|
+
|
|
+#define cpu_signal_handler cpu_loongarch_signal_handler
|
|
+
|
|
+static inline bool cpu_loongarch_hw_interrupts_enabled(CPULOONGARCHState *env)
|
|
+{
|
|
+ bool ret = 0;
|
|
+
|
|
+ ret = env->CSR_CRMD & (1 << CSR_CRMD_IE_SHIFT);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void loongarch_tcg_init(void);
|
|
+
|
|
+/* helper.c */
|
|
+bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
+ MMUAccessType access_type, int mmu_idx, bool probe,
|
|
+ uintptr_t retaddr);
|
|
+
|
|
+/* op_helper.c */
|
|
+uint32_t float_class_s(uint32_t arg, float_status *fst);
|
|
+uint64_t float_class_d(uint64_t arg, float_status *fst);
|
|
+
|
|
+int ieee_ex_to_loongarch(int xcpt);
|
|
+void update_pagemask(CPULOONGARCHState *env, target_ulong arg1,
|
|
+ int32_t *pagemask);
|
|
+
|
|
+void cpu_loongarch_tlb_flush(CPULOONGARCHState *env);
|
|
+void sync_c0_status(CPULOONGARCHState *env, CPULOONGARCHState *cpu, int tc);
|
|
+
|
|
+void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env,
|
|
+ uint32_t exception, int error_code,
|
|
+ uintptr_t pc);
|
|
+int loongarch_read_qxfer(CPUState *cs, const char *annex, uint8_t *read_buf,
|
|
+ unsigned long offset, unsigned long len);
|
|
+int loongarch_write_qxfer(CPUState *cs, const char *annex,
|
|
+ const uint8_t *write_buf, unsigned long offset,
|
|
+ unsigned long len);
|
|
+
|
|
+static inline void QEMU_NORETURN do_raise_exception(CPULOONGARCHState *env,
|
|
+ uint32_t exception,
|
|
+ uintptr_t pc)
|
|
+{
|
|
+ do_raise_exception_err(env, exception, 0, pc);
|
|
+}
|
|
+#endif
|
|
diff --git a/target/loongarch64/gdbstub.c b/target/loongarch64/gdbstub.c
|
|
new file mode 100644
|
|
index 0000000000..5ee91dc930
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/gdbstub.c
|
|
@@ -0,0 +1,164 @@
|
|
+/*
|
|
+ * LOONGARCH gdb server stub
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu-common.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "exec/gdbstub.h"
|
|
+#ifdef CONFIG_TCG
|
|
+#include "exec/helper-proto.h"
|
|
+#endif
|
|
+
|
|
+uint64_t read_fcc(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint64_t ret = 0;
|
|
+
|
|
+ for (int i = 0; i < 8; ++i) {
|
|
+ ret |= (uint64_t)env->active_fpu.cf[i] << (i * 8);
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+void write_fcc(CPULOONGARCHState *env, uint64_t val)
|
|
+{
|
|
+ for (int i = 0; i < 8; ++i) {
|
|
+ env->active_fpu.cf[i] = (val >> (i * 8)) & 1;
|
|
+ }
|
|
+}
|
|
+
|
|
+int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int size = 0;
|
|
+
|
|
+ if (0 <= n && n < 32) {
|
|
+ return gdb_get_regl(mem_buf, env->active_tc.gpr[n]);
|
|
+ }
|
|
+
|
|
+ switch (n) {
|
|
+ case 32:
|
|
+ size = gdb_get_regl(mem_buf, 0);
|
|
+ break;
|
|
+ case 33:
|
|
+ size = gdb_get_regl(mem_buf, env->active_tc.PC);
|
|
+ break;
|
|
+ case 34:
|
|
+ size = gdb_get_regl(mem_buf, env->CSR_BADV);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+int loongarch_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ target_ulong tmp = ldtul_p(mem_buf);
|
|
+ int size = 0;
|
|
+
|
|
+ if (0 <= n && n < 32) {
|
|
+ return env->active_tc.gpr[n] = tmp, sizeof(target_ulong);
|
|
+ }
|
|
+
|
|
+ size = sizeof(target_ulong);
|
|
+
|
|
+ switch (n) {
|
|
+ case 33:
|
|
+ env->active_tc.PC = tmp;
|
|
+ break;
|
|
+ case 32:
|
|
+ case 34:
|
|
+ default:
|
|
+ size = 0;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+static int loongarch_gdb_get_fpu(CPULOONGARCHState *env, GByteArray *mem_buf,
|
|
+ int n)
|
|
+{
|
|
+ if (0 <= n && n < 32) {
|
|
+ return gdb_get_reg64(mem_buf, env->active_fpu.fpr[n].d);
|
|
+ } else if (n == 32) {
|
|
+ uint64_t val = read_fcc(env);
|
|
+ return gdb_get_reg64(mem_buf, val);
|
|
+ } else if (n == 33) {
|
|
+ return gdb_get_reg32(mem_buf, env->active_fpu.fcsr0);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int loongarch_gdb_set_fpu(CPULOONGARCHState *env, uint8_t *mem_buf,
|
|
+ int n)
|
|
+{
|
|
+ int length = 0;
|
|
+
|
|
+ if (0 <= n && n < 32) {
|
|
+ env->active_fpu.fpr[n].d = ldq_p(mem_buf);
|
|
+ length = 8;
|
|
+ } else if (n == 32) {
|
|
+ uint64_t val = ldq_p(mem_buf);
|
|
+ write_fcc(env, val);
|
|
+ length = 8;
|
|
+ } else if (n == 33) {
|
|
+ env->active_fpu.fcsr0 = ldl_p(mem_buf);
|
|
+ length = 4;
|
|
+ }
|
|
+ return length;
|
|
+}
|
|
+
|
|
+void loongarch_cpu_register_gdb_regs_for_features(CPUState *cs)
|
|
+{
|
|
+ gdb_register_coprocessor(cs, loongarch_gdb_get_fpu, loongarch_gdb_set_fpu,
|
|
+ 34, "loongarch-fpu.xml", 0);
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_TCG
|
|
+int loongarch_read_qxfer(CPUState *cs, const char *annex, uint8_t *read_buf,
|
|
+ unsigned long offset, unsigned long len)
|
|
+{
|
|
+ if (strncmp(annex, "cpucfg", sizeof("cpucfg") - 1) == 0) {
|
|
+ if (offset % 4 != 0 || len % 4 != 0) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ size_t i;
|
|
+ for (i = offset; i < offset + len; i += 4)
|
|
+ ((uint32_t *)read_buf)[(i - offset) / 4] =
|
|
+ helper_cpucfg(&(LOONGARCH_CPU(cs)->env), i / 4);
|
|
+ return 32 * 4;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int loongarch_write_qxfer(CPUState *cs, const char *annex,
|
|
+ const uint8_t *write_buf, unsigned long offset,
|
|
+ unsigned long len)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
diff --git a/target/loongarch64/helper.c b/target/loongarch64/helper.c
|
|
new file mode 100644
|
|
index 0000000000..ec25803c1c
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/helper.c
|
|
@@ -0,0 +1,726 @@
|
|
+/*
|
|
+ * LOONGARCH emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "exec/cpu_ldst.h"
|
|
+#include "exec/log.h"
|
|
+#include "hw/loongarch/cpudevs.h"
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+
|
|
+static int ls3a5k_map_address_tlb_entry(CPULOONGARCHState *env,
|
|
+ hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw,
|
|
+ int access_type, ls3a5k_tlb_t *tlb)
|
|
+{
|
|
+ uint64_t mask = tlb->PageMask;
|
|
+ int n = !!(address & mask & ~(mask >> 1));
|
|
+ uint32_t plv = env->CSR_CRMD & CSR_CRMD_PLV;
|
|
+
|
|
+ /* Check access rights */
|
|
+ if (!(n ? tlb->V1 : tlb->V0)) {
|
|
+ return TLBRET_INVALID;
|
|
+ }
|
|
+
|
|
+ if (rw == MMU_INST_FETCH && (n ? tlb->XI1 : tlb->XI0)) {
|
|
+ return TLBRET_XI;
|
|
+ }
|
|
+
|
|
+ if (rw == MMU_DATA_LOAD && (n ? tlb->RI1 : tlb->RI0)) {
|
|
+ return TLBRET_RI;
|
|
+ }
|
|
+
|
|
+ if (plv > (n ? tlb->PLV1 : tlb->PLV0)) {
|
|
+ return TLBRET_PE;
|
|
+ }
|
|
+
|
|
+ if (rw != MMU_DATA_STORE || (n ? tlb->WE1 : tlb->WE0)) {
|
|
+ /*
|
|
+ * PPN address
|
|
+ * 4 KB: [47:13] [12;0]
|
|
+ * 16 KB: [47:15] [14:0]
|
|
+ */
|
|
+ if (n) {
|
|
+ *physical = tlb->PPN1 | (address & (mask >> 1));
|
|
+ } else {
|
|
+ *physical = tlb->PPN0 | (address & (mask >> 1));
|
|
+ }
|
|
+ *prot = PAGE_READ;
|
|
+ if (n ? tlb->WE1 : tlb->WE0) {
|
|
+ *prot |= PAGE_WRITE;
|
|
+ }
|
|
+ if (!(n ? tlb->XI1 : tlb->XI0)) {
|
|
+ *prot |= PAGE_EXEC;
|
|
+ }
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+
|
|
+ return TLBRET_DIRTY;
|
|
+}
|
|
+
|
|
+/* Loongarch 3A5K -style MMU emulation */
|
|
+int ls3a5k_map_address(CPULOONGARCHState *env, hwaddr *physical, int *prot,
|
|
+ target_ulong address, int rw, int access_type)
|
|
+{
|
|
+ uint16_t asid = env->CSR_ASID & 0x3ff;
|
|
+ int i;
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+
|
|
+ int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+
|
|
+ int ftlb_idx;
|
|
+
|
|
+ uint64_t mask;
|
|
+ uint64_t vpn; /* address to map */
|
|
+ uint64_t tag; /* address in TLB entry */
|
|
+
|
|
+ /* search VTLB */
|
|
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ mask = tlb->PageMask;
|
|
+
|
|
+ vpn = address & 0xffffffffe000 & ~mask;
|
|
+ tag = tlb->VPN & ~mask;
|
|
+
|
|
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag &&
|
|
+ tlb->EHINV != 1) {
|
|
+ return ls3a5k_map_address_tlb_entry(env, physical, prot, address,
|
|
+ rw, access_type, tlb);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ftlb_size == 0) {
|
|
+ return TLBRET_NOMATCH;
|
|
+ }
|
|
+
|
|
+ /* search FTLB */
|
|
+ mask = env->tlb->mmu.ls3a5k.ftlb_mask;
|
|
+ vpn = address & 0xffffffffe000 & ~mask;
|
|
+
|
|
+ ftlb_idx = (address & 0xffffffffc000) >> 15; /* 16 KB */
|
|
+ ftlb_idx = ftlb_idx & 0xff; /* [0,255] */
|
|
+
|
|
+ for (i = 0; i < 8; ++i) {
|
|
+ /*
|
|
+ * ---------- set 0 1 2 ... 7
|
|
+ * ftlb_idx -----------------------------------
|
|
+ * 0 | 0 1 2 ... 7
|
|
+ * 1 | 8 9 10 ... 15
|
|
+ * 2 | 16 17 18 ... 23
|
|
+ * ... |
|
|
+ * 255 | 2040 2041 2042 ... 2047
|
|
+ */
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i];
|
|
+ tag = tlb->VPN & ~mask;
|
|
+
|
|
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag &&
|
|
+ tlb->EHINV != 1) {
|
|
+ return ls3a5k_map_address_tlb_entry(env, physical, prot, address,
|
|
+ rw, access_type, tlb);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return TLBRET_NOMATCH;
|
|
+}
|
|
+
|
|
+static int get_physical_address(CPULOONGARCHState *env, hwaddr *physical,
|
|
+ int *prot, target_ulong real_address, int rw,
|
|
+ int access_type, int mmu_idx)
|
|
+{
|
|
+ int user_mode = mmu_idx == LARCH_HFLAG_UM;
|
|
+ int kernel_mode = !user_mode;
|
|
+ unsigned plv, base_c, base_v, tmp;
|
|
+
|
|
+ /* effective address (modified for KVM T&E kernel segments) */
|
|
+ target_ulong address = real_address;
|
|
+
|
|
+ /* Check PG */
|
|
+ if (!(env->CSR_CRMD & CSR_CRMD_PG)) {
|
|
+ /* DA mode */
|
|
+ *physical = address & 0xffffffffffffUL;
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+
|
|
+ plv = kernel_mode | (user_mode << 3);
|
|
+ base_v = address >> CSR_DMW_BASE_SH;
|
|
+ /* Check direct map window 0 */
|
|
+ base_c = env->CSR_DMWIN0 >> CSR_DMW_BASE_SH;
|
|
+ if ((plv & env->CSR_DMWIN0) && (base_c == base_v)) {
|
|
+ *physical = dmwin_va2pa(address);
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+ /* Check direct map window 1 */
|
|
+ base_c = env->CSR_DMWIN1 >> CSR_DMW_BASE_SH;
|
|
+ if ((plv & env->CSR_DMWIN1) && (base_c == base_v)) {
|
|
+ *physical = dmwin_va2pa(address);
|
|
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
+ return TLBRET_MATCH;
|
|
+ }
|
|
+ /* Check valid extension */
|
|
+ tmp = address >> 47;
|
|
+ if (!(tmp == 0 || tmp == 0x1ffff)) {
|
|
+ return TLBRET_BADADDR;
|
|
+ }
|
|
+ /* mapped address */
|
|
+ return env->tlb->map_address(env, physical, prot, real_address, rw,
|
|
+ access_type);
|
|
+}
|
|
+
|
|
+void cpu_loongarch_tlb_flush(CPULOONGARCHState *env)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
|
|
+
|
|
+ /* Flush qemu's TLB and discard all shadowed entries. */
|
|
+ tlb_flush(CPU(cpu));
|
|
+ env->tlb->tlb_in_use = env->tlb->nb_tlb;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static void raise_mmu_exception(CPULOONGARCHState *env, target_ulong address,
|
|
+ int rw, int tlb_error)
|
|
+{
|
|
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
|
|
+ int exception = 0, error_code = 0;
|
|
+
|
|
+ if (rw == MMU_INST_FETCH) {
|
|
+ error_code |= EXCP_INST_NOTAVAIL;
|
|
+ }
|
|
+
|
|
+ switch (tlb_error) {
|
|
+ default:
|
|
+ case TLBRET_BADADDR:
|
|
+ /* Reference to kernel address from user mode or supervisor mode */
|
|
+ /* Reference to supervisor address from user mode */
|
|
+ if (rw == MMU_DATA_STORE) {
|
|
+ exception = EXCP_AdES;
|
|
+ } else {
|
|
+ exception = EXCP_AdEL;
|
|
+ }
|
|
+ break;
|
|
+ case TLBRET_NOMATCH:
|
|
+ /* No TLB match for a mapped address */
|
|
+ if (rw == MMU_DATA_STORE) {
|
|
+ exception = EXCP_TLBS;
|
|
+ } else {
|
|
+ exception = EXCP_TLBL;
|
|
+ }
|
|
+ error_code |= EXCP_TLB_NOMATCH;
|
|
+ break;
|
|
+ case TLBRET_INVALID:
|
|
+ /* TLB match with no valid bit */
|
|
+ if (rw == MMU_DATA_STORE) {
|
|
+ exception = EXCP_TLBS;
|
|
+ } else {
|
|
+ exception = EXCP_TLBL;
|
|
+ }
|
|
+ break;
|
|
+ case TLBRET_DIRTY:
|
|
+ /* TLB match but 'D' bit is cleared */
|
|
+ exception = EXCP_LTLBL;
|
|
+ break;
|
|
+ case TLBRET_XI:
|
|
+ /* Execute-Inhibit Exception */
|
|
+ exception = EXCP_TLBXI;
|
|
+ break;
|
|
+ case TLBRET_RI:
|
|
+ /* Read-Inhibit Exception */
|
|
+ exception = EXCP_TLBRI;
|
|
+ break;
|
|
+ case TLBRET_PE:
|
|
+ /* Privileged Exception */
|
|
+ exception = EXCP_TLBPE;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (env->insn_flags & INSN_LOONGARCH) {
|
|
+ if (tlb_error == TLBRET_NOMATCH) {
|
|
+ env->CSR_TLBRBADV = address;
|
|
+ env->CSR_TLBREHI = address & (TARGET_PAGE_MASK << 1);
|
|
+ cs->exception_index = exception;
|
|
+ env->error_code = error_code;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Raise exception */
|
|
+ env->CSR_BADV = address;
|
|
+ cs->exception_index = exception;
|
|
+ env->error_code = error_code;
|
|
+
|
|
+ if (env->insn_flags & INSN_LOONGARCH) {
|
|
+ env->CSR_TLBEHI = address & (TARGET_PAGE_MASK << 1);
|
|
+ }
|
|
+}
|
|
+
|
|
+bool loongarch_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
+ MMUAccessType access_type, int mmu_idx, bool probe,
|
|
+ uintptr_t retaddr)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+ hwaddr physical;
|
|
+ int prot;
|
|
+ int loongarch_access_type;
|
|
+#endif
|
|
+ int ret = TLBRET_BADADDR;
|
|
+
|
|
+ qemu_log_mask(CPU_LOG_MMU,
|
|
+ "%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " mmu_idx %d\n",
|
|
+ __func__, env->active_tc.PC, address, mmu_idx);
|
|
+
|
|
+ /* data access */
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+ /* XXX: put correct access by using cpu_restore_state() correctly */
|
|
+ loongarch_access_type = ACCESS_INT;
|
|
+ ret = get_physical_address(env, &physical, &prot, address, access_type,
|
|
+ loongarch_access_type, mmu_idx);
|
|
+ switch (ret) {
|
|
+ case TLBRET_MATCH:
|
|
+ qemu_log_mask(CPU_LOG_MMU,
|
|
+ "%s address=%" VADDR_PRIx " physical " TARGET_FMT_plx
|
|
+ " prot %d asid %ld pc 0x%lx\n",
|
|
+ __func__, address, physical, prot, env->CSR_ASID,
|
|
+ env->active_tc.PC);
|
|
+ break;
|
|
+ default:
|
|
+ qemu_log_mask(CPU_LOG_MMU,
|
|
+ "%s address=%" VADDR_PRIx " ret %d asid %ld pc 0x%lx\n",
|
|
+ __func__, address, ret, env->CSR_ASID,
|
|
+ env->active_tc.PC);
|
|
+ break;
|
|
+ }
|
|
+ if (ret == TLBRET_MATCH) {
|
|
+ tlb_set_page(cs, address & TARGET_PAGE_MASK,
|
|
+ physical & TARGET_PAGE_MASK, prot | PAGE_EXEC, mmu_idx,
|
|
+ TARGET_PAGE_SIZE);
|
|
+ ret = true;
|
|
+ }
|
|
+ if (probe) {
|
|
+ return false;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ raise_mmu_exception(env, address, access_type, ret);
|
|
+ do_raise_exception_err(env, cs->exception_index, env->error_code, retaddr);
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+hwaddr cpu_loongarch_translate_address(CPULOONGARCHState *env,
|
|
+ target_ulong address, int rw)
|
|
+{
|
|
+ hwaddr physical;
|
|
+ int prot;
|
|
+ int access_type;
|
|
+ int ret = 0;
|
|
+
|
|
+ /* data access */
|
|
+ access_type = ACCESS_INT;
|
|
+ ret = get_physical_address(env, &physical, &prot, address, rw, access_type,
|
|
+ cpu_mmu_index(env, false));
|
|
+ if (ret != TLBRET_MATCH) {
|
|
+ raise_mmu_exception(env, address, rw, ret);
|
|
+ return -1LL;
|
|
+ } else {
|
|
+ return physical;
|
|
+ }
|
|
+}
|
|
+
|
|
+static const char *const excp_names[EXCP_LAST + 1] = {
|
|
+ [EXCP_RESET] = "reset",
|
|
+ [EXCP_SRESET] = "soft reset",
|
|
+ [EXCP_NMI] = "non-maskable interrupt",
|
|
+ [EXCP_EXT_INTERRUPT] = "interrupt",
|
|
+ [EXCP_AdEL] = "address error load",
|
|
+ [EXCP_AdES] = "address error store",
|
|
+ [EXCP_TLBF] = "TLB refill",
|
|
+ [EXCP_IBE] = "instruction bus error",
|
|
+ [EXCP_SYSCALL] = "syscall",
|
|
+ [EXCP_BREAK] = "break",
|
|
+ [EXCP_FPDIS] = "float unit unusable",
|
|
+ [EXCP_LSXDIS] = "vector128 unusable",
|
|
+ [EXCP_LASXDIS] = "vector256 unusable",
|
|
+ [EXCP_RI] = "reserved instruction",
|
|
+ [EXCP_OVERFLOW] = "arithmetic overflow",
|
|
+ [EXCP_TRAP] = "trap",
|
|
+ [EXCP_FPE] = "floating point",
|
|
+ [EXCP_LTLBL] = "TLB modify",
|
|
+ [EXCP_TLBL] = "TLB load",
|
|
+ [EXCP_TLBS] = "TLB store",
|
|
+ [EXCP_DBE] = "data bus error",
|
|
+ [EXCP_TLBXI] = "TLB execute-inhibit",
|
|
+ [EXCP_TLBRI] = "TLB read-inhibit",
|
|
+ [EXCP_TLBPE] = "TLB priviledged error",
|
|
+};
|
|
+#endif
|
|
+
|
|
+target_ulong exception_resume_pc(CPULOONGARCHState *env)
|
|
+{
|
|
+ target_ulong bad_pc;
|
|
+
|
|
+ bad_pc = env->active_tc.PC;
|
|
+ if (env->hflags & LARCH_HFLAG_BMASK) {
|
|
+ /*
|
|
+ * If the exception was raised from a delay slot, come back to
|
|
+ * the jump.
|
|
+ */
|
|
+ bad_pc -= 4;
|
|
+ }
|
|
+
|
|
+ return bad_pc;
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+static void set_hflags_for_handler(CPULOONGARCHState *env)
|
|
+{
|
|
+ /* Exception handlers are entered in 32-bit mode. */
|
|
+}
|
|
+
|
|
+static inline void set_badinstr_registers(CPULOONGARCHState *env)
|
|
+{
|
|
+ if ((env->insn_flags & INSN_LOONGARCH)) {
|
|
+ env->CSR_BADI = cpu_ldl_code(env, env->active_tc.PC);
|
|
+ return;
|
|
+ }
|
|
+}
|
|
+#endif
|
|
+
|
|
+static inline unsigned int get_vint_size(CPULOONGARCHState *env)
|
|
+{
|
|
+ unsigned int size = 0;
|
|
+
|
|
+ switch ((env->CSR_ECFG >> 16) & 0x7) {
|
|
+ case 0:
|
|
+ break;
|
|
+ case 1:
|
|
+ size = 2 * 4; /* #Insts * inst_size */
|
|
+ break;
|
|
+ case 2:
|
|
+ size = 4 * 4;
|
|
+ break;
|
|
+ case 3:
|
|
+ size = 8 * 4;
|
|
+ break;
|
|
+ case 4:
|
|
+ size = 16 * 4;
|
|
+ break;
|
|
+ case 5:
|
|
+ size = 32 * 4;
|
|
+ break;
|
|
+ case 6:
|
|
+ size = 64 * 4;
|
|
+ break;
|
|
+ case 7:
|
|
+ size = 128 * 4;
|
|
+ break;
|
|
+ default:
|
|
+ printf("%s: unexpected value", __func__);
|
|
+ assert(0);
|
|
+ }
|
|
+
|
|
+ return size;
|
|
+}
|
|
+
|
|
+#define is_refill(cs, env) \
|
|
+ (((cs->exception_index == EXCP_TLBL) || \
|
|
+ (cs->exception_index == EXCP_TLBS)) && \
|
|
+ (env->error_code & EXCP_TLB_NOMATCH))
|
|
+
|
|
+void loongarch_cpu_do_interrupt(CPUState *cs)
|
|
+{
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ bool update_badinstr = 0;
|
|
+ int cause = -1;
|
|
+ const char *name;
|
|
+
|
|
+ if (qemu_loglevel_mask(CPU_LOG_INT) &&
|
|
+ cs->exception_index != EXCP_EXT_INTERRUPT) {
|
|
+ if (cs->exception_index < 0 || cs->exception_index > EXCP_LAST) {
|
|
+ name = "unknown";
|
|
+ } else {
|
|
+ name = excp_names[cs->exception_index];
|
|
+ }
|
|
+
|
|
+ qemu_log("%s enter: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
|
|
+ " TLBRERA 0x%016lx"
|
|
+ " %s exception\n",
|
|
+ __func__, env->active_tc.PC, env->CSR_ERA, env->CSR_TLBRERA,
|
|
+ name);
|
|
+ }
|
|
+
|
|
+ switch (cs->exception_index) {
|
|
+ case EXCP_RESET:
|
|
+ cpu_reset(CPU(cpu));
|
|
+ break;
|
|
+ case EXCP_NMI:
|
|
+ env->CSR_ERRERA = exception_resume_pc(env);
|
|
+ env->hflags &= ~LARCH_HFLAG_BMASK;
|
|
+ env->hflags |= LARCH_HFLAG_64;
|
|
+ env->hflags &= ~LARCH_HFLAG_AWRAP;
|
|
+ env->hflags &= ~(LARCH_HFLAG_KSU);
|
|
+ env->active_tc.PC = env->exception_base;
|
|
+ set_hflags_for_handler(env);
|
|
+ break;
|
|
+ case EXCP_EXT_INTERRUPT:
|
|
+ cause = 0;
|
|
+ goto set_ERA;
|
|
+ case EXCP_LTLBL:
|
|
+ cause = 1;
|
|
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
|
|
+ goto set_ERA;
|
|
+ case EXCP_TLBL:
|
|
+ cause = 2;
|
|
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
|
|
+ goto set_ERA;
|
|
+ case EXCP_TLBS:
|
|
+ cause = 3;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_AdEL:
|
|
+ cause = 4;
|
|
+ update_badinstr = !(env->error_code & EXCP_INST_NOTAVAIL);
|
|
+ goto set_ERA;
|
|
+ case EXCP_AdES:
|
|
+ cause = 5;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_IBE:
|
|
+ cause = 6;
|
|
+ goto set_ERA;
|
|
+ case EXCP_DBE:
|
|
+ cause = 7;
|
|
+ goto set_ERA;
|
|
+ case EXCP_SYSCALL:
|
|
+ cause = 8;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_BREAK:
|
|
+ cause = 9;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_RI:
|
|
+ cause = 10;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_FPDIS:
|
|
+ case EXCP_LSXDIS:
|
|
+ case EXCP_LASXDIS:
|
|
+ cause = 11;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_OVERFLOW:
|
|
+ cause = 12;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_TRAP:
|
|
+ cause = 13;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_FPE:
|
|
+ cause = 15;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_TLBRI:
|
|
+ cause = 19;
|
|
+ update_badinstr = 1;
|
|
+ goto set_ERA;
|
|
+ case EXCP_TLBXI:
|
|
+ case EXCP_TLBPE:
|
|
+ cause = 20;
|
|
+ goto set_ERA;
|
|
+ set_ERA:
|
|
+ if (is_refill(cs, env)) {
|
|
+ env->CSR_TLBRERA = exception_resume_pc(env);
|
|
+ env->CSR_TLBRERA |= 1;
|
|
+ } else {
|
|
+ env->CSR_ERA = exception_resume_pc(env);
|
|
+ }
|
|
+
|
|
+ if (update_badinstr) {
|
|
+ set_badinstr_registers(env);
|
|
+ }
|
|
+ env->hflags &= ~(LARCH_HFLAG_KSU);
|
|
+
|
|
+ env->hflags &= ~LARCH_HFLAG_BMASK;
|
|
+ if (env->insn_flags & INSN_LOONGARCH) {
|
|
+ /* save PLV and IE */
|
|
+ if (is_refill(cs, env)) {
|
|
+ env->CSR_TLBRPRMD &= (~0x7);
|
|
+ env->CSR_TLBRPRMD |= (env->CSR_CRMD & 0x7);
|
|
+ } else {
|
|
+ env->CSR_PRMD &= (~0x7);
|
|
+ env->CSR_PRMD |= (env->CSR_CRMD & 0x7);
|
|
+ }
|
|
+
|
|
+ env->CSR_CRMD &= ~(0x7);
|
|
+
|
|
+ switch (cs->exception_index) {
|
|
+ case EXCP_EXT_INTERRUPT:
|
|
+ break;
|
|
+ case EXCP_TLBL:
|
|
+ if (env->error_code & EXCP_INST_NOTAVAIL) {
|
|
+ cause = EXCCODE_TLBI;
|
|
+ } else {
|
|
+ cause = EXCCODE_TLBL;
|
|
+ }
|
|
+ break;
|
|
+ case EXCP_TLBS:
|
|
+ cause = EXCCODE_TLBS;
|
|
+ break;
|
|
+ case EXCP_LTLBL:
|
|
+ cause = EXCCODE_MOD;
|
|
+ break;
|
|
+ case EXCP_TLBRI:
|
|
+ cause = EXCCODE_TLBRI;
|
|
+ break;
|
|
+ case EXCP_TLBXI:
|
|
+ cause = EXCCODE_TLBXI;
|
|
+ break;
|
|
+ case EXCP_TLBPE:
|
|
+ cause = EXCCODE_TLBPE;
|
|
+ break;
|
|
+ case EXCP_AdEL:
|
|
+ case EXCP_AdES:
|
|
+ case EXCP_IBE:
|
|
+ case EXCP_DBE:
|
|
+ cause = EXCCODE_ADE;
|
|
+ break;
|
|
+ case EXCP_SYSCALL:
|
|
+ cause = EXCCODE_SYS;
|
|
+ break;
|
|
+ case EXCP_BREAK:
|
|
+ cause = EXCCODE_BP;
|
|
+ break;
|
|
+ case EXCP_RI:
|
|
+ cause = EXCCODE_RI;
|
|
+ break;
|
|
+ case EXCP_FPDIS:
|
|
+ cause = EXCCODE_FPDIS;
|
|
+ break;
|
|
+ case EXCP_LSXDIS:
|
|
+ cause = EXCCODE_LSXDIS;
|
|
+ break;
|
|
+ case EXCP_LASXDIS:
|
|
+ cause = EXCCODE_LASXDIS;
|
|
+ break;
|
|
+ case EXCP_FPE:
|
|
+ cause = EXCCODE_FPE;
|
|
+ break;
|
|
+ default:
|
|
+ printf("Error: exception(%d) '%s' has not been supported\n",
|
|
+ cs->exception_index, excp_names[cs->exception_index]);
|
|
+ abort();
|
|
+ }
|
|
+
|
|
+ uint32_t vec_size = get_vint_size(env);
|
|
+ env->active_tc.PC = env->CSR_EEPN;
|
|
+ env->active_tc.PC += cause * vec_size;
|
|
+ if (is_refill(cs, env)) {
|
|
+ /* TLB Refill */
|
|
+ env->active_tc.PC = env->CSR_TLBRENT;
|
|
+ break; /* Do not modify excode */
|
|
+ }
|
|
+ if (cs->exception_index == EXCP_EXT_INTERRUPT) {
|
|
+ /* Interrupt */
|
|
+ uint32_t vector = 0;
|
|
+ uint32_t pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK;
|
|
+ pending &= env->CSR_ECFG & CSR_ECFG_IPMASK;
|
|
+
|
|
+ /* Find the highest-priority interrupt. */
|
|
+ while (pending >>= 1) {
|
|
+ vector++;
|
|
+ }
|
|
+ env->active_tc.PC =
|
|
+ env->CSR_EEPN + (EXCODE_IP + vector) * vec_size;
|
|
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
|
+ qemu_log("%s: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx
|
|
+ " cause %d\n"
|
|
+ " A " TARGET_FMT_lx " D " TARGET_FMT_lx
|
|
+ " vector = %d ExC %08lx ExS %08lx\n",
|
|
+ __func__, env->active_tc.PC, env->CSR_ERA, cause,
|
|
+ env->CSR_BADV, env->CSR_DERA, vector,
|
|
+ env->CSR_ECFG, env->CSR_ESTAT);
|
|
+ }
|
|
+ }
|
|
+ /* Excode */
|
|
+ env->CSR_ESTAT = (env->CSR_ESTAT & ~(0x1f << CSR_ESTAT_EXC_SH)) |
|
|
+ (cause << CSR_ESTAT_EXC_SH);
|
|
+ }
|
|
+ set_hflags_for_handler(env);
|
|
+ break;
|
|
+ default:
|
|
+ abort();
|
|
+ }
|
|
+ if (qemu_loglevel_mask(CPU_LOG_INT) &&
|
|
+ cs->exception_index != EXCP_EXT_INTERRUPT) {
|
|
+ qemu_log("%s: PC " TARGET_FMT_lx " ERA 0x%08lx"
|
|
+ " cause %d%s\n"
|
|
+ " ESTAT %08lx EXCFG 0x%08lx BADVA 0x%08lx BADI 0x%08lx \
|
|
+ SYS_NUM %lu cpu %d asid 0x%lx"
|
|
+ "\n",
|
|
+ __func__, env->active_tc.PC,
|
|
+ is_refill(cs, env) ? env->CSR_TLBRERA : env->CSR_ERA, cause,
|
|
+ is_refill(cs, env) ? "(refill)" : "", env->CSR_ESTAT,
|
|
+ env->CSR_ECFG,
|
|
+ is_refill(cs, env) ? env->CSR_TLBRBADV : env->CSR_BADV,
|
|
+ env->CSR_BADI, env->active_tc.gpr[11], cs->cpu_index,
|
|
+ env->CSR_ASID);
|
|
+ }
|
|
+#endif
|
|
+ cs->exception_index = EXCP_NONE;
|
|
+}
|
|
+
|
|
+bool loongarch_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
+{
|
|
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ if (cpu_loongarch_hw_interrupts_enabled(env) &&
|
|
+ cpu_loongarch_hw_interrupts_pending(env)) {
|
|
+ /* Raise it */
|
|
+ cs->exception_index = EXCP_EXT_INTERRUPT;
|
|
+ env->error_code = 0;
|
|
+ loongarch_cpu_do_interrupt(cs);
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+void QEMU_NORETURN do_raise_exception_err(CPULOONGARCHState *env,
|
|
+ uint32_t exception, int error_code,
|
|
+ uintptr_t pc)
|
|
+{
|
|
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
|
|
+
|
|
+ qemu_log_mask(CPU_LOG_INT, "%s: %d %d\n", __func__, exception, error_code);
|
|
+ cs->exception_index = exception;
|
|
+ env->error_code = error_code;
|
|
+
|
|
+ cpu_loop_exit_restore(cs, pc);
|
|
+}
|
|
diff --git a/target/loongarch64/helper.h b/target/loongarch64/helper.h
|
|
new file mode 100644
|
|
index 0000000000..868b16da1e
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/helper.h
|
|
@@ -0,0 +1,178 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+DEF_HELPER_3(raise_exception_err, noreturn, env, i32, int)
|
|
+DEF_HELPER_2(raise_exception, noreturn, env, i32)
|
|
+DEF_HELPER_1(raise_exception_debug, noreturn, env)
|
|
+
|
|
+DEF_HELPER_FLAGS_1(bitswap, TCG_CALL_NO_RWG_SE, tl, tl)
|
|
+DEF_HELPER_FLAGS_1(dbitswap, TCG_CALL_NO_RWG_SE, tl, tl)
|
|
+
|
|
+DEF_HELPER_3(crc32, tl, tl, tl, i32)
|
|
+DEF_HELPER_3(crc32c, tl, tl, tl, i32)
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+/* LoongISA CSR register */
|
|
+DEF_HELPER_2(csr_rdq, tl, env, i64)
|
|
+DEF_HELPER_3(csr_wrq, tl, env, tl, i64)
|
|
+DEF_HELPER_4(csr_xchgq, tl, env, tl, tl, i64)
|
|
+
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+/* CP1 functions */
|
|
+DEF_HELPER_2(movfcsr2gr, tl, env, i32)
|
|
+DEF_HELPER_4(movgr2fcsr, void, env, tl, i32, i32)
|
|
+
|
|
+DEF_HELPER_2(float_cvtd_s, i64, env, i32)
|
|
+DEF_HELPER_2(float_cvtd_w, i64, env, i32)
|
|
+DEF_HELPER_2(float_cvtd_l, i64, env, i64)
|
|
+DEF_HELPER_2(float_cvts_d, i32, env, i64)
|
|
+DEF_HELPER_2(float_cvts_w, i32, env, i32)
|
|
+DEF_HELPER_2(float_cvts_l, i32, env, i64)
|
|
+
|
|
+DEF_HELPER_FLAGS_2(float_class_s, TCG_CALL_NO_RWG_SE, i32, env, i32)
|
|
+DEF_HELPER_FLAGS_2(float_class_d, TCG_CALL_NO_RWG_SE, i64, env, i64)
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_4(float_##op##_s, i32, env, i32, i32, i32) \
|
|
+ DEF_HELPER_4(float_##op##_d, i64, env, i64, i64, i64)
|
|
+FOP_PROTO(maddf)
|
|
+FOP_PROTO(msubf)
|
|
+FOP_PROTO(nmaddf)
|
|
+FOP_PROTO(nmsubf)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_3(float_##op##_s, i32, env, i32, i32) \
|
|
+ DEF_HELPER_3(float_##op##_d, i64, env, i64, i64)
|
|
+FOP_PROTO(max)
|
|
+FOP_PROTO(maxa)
|
|
+FOP_PROTO(min)
|
|
+FOP_PROTO(mina)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_2(float_##op##_l_s, i64, env, i32) \
|
|
+ DEF_HELPER_2(float_##op##_l_d, i64, env, i64) \
|
|
+ DEF_HELPER_2(float_##op##_w_s, i32, env, i32) \
|
|
+ DEF_HELPER_2(float_##op##_w_d, i32, env, i64)
|
|
+FOP_PROTO(cvt)
|
|
+FOP_PROTO(round)
|
|
+FOP_PROTO(trunc)
|
|
+FOP_PROTO(ceil)
|
|
+FOP_PROTO(floor)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_2(float_##op##_s, i32, env, i32) \
|
|
+ DEF_HELPER_2(float_##op##_d, i64, env, i64)
|
|
+FOP_PROTO(sqrt)
|
|
+FOP_PROTO(rsqrt)
|
|
+FOP_PROTO(recip)
|
|
+FOP_PROTO(rint)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_1(float_##op##_s, i32, i32) \
|
|
+ DEF_HELPER_1(float_##op##_d, i64, i64)
|
|
+FOP_PROTO(abs)
|
|
+FOP_PROTO(chs)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_3(float_##op##_s, i32, env, i32, i32) \
|
|
+ DEF_HELPER_3(float_##op##_d, i64, env, i64, i64)
|
|
+FOP_PROTO(add)
|
|
+FOP_PROTO(sub)
|
|
+FOP_PROTO(mul)
|
|
+FOP_PROTO(div)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+#define FOP_PROTO(op) \
|
|
+ DEF_HELPER_3(cmp_d_##op, i64, env, i64, i64) \
|
|
+ DEF_HELPER_3(cmp_s_##op, i32, env, i32, i32)
|
|
+FOP_PROTO(af)
|
|
+FOP_PROTO(un)
|
|
+FOP_PROTO(eq)
|
|
+FOP_PROTO(ueq)
|
|
+FOP_PROTO(lt)
|
|
+FOP_PROTO(ult)
|
|
+FOP_PROTO(le)
|
|
+FOP_PROTO(ule)
|
|
+FOP_PROTO(saf)
|
|
+FOP_PROTO(sun)
|
|
+FOP_PROTO(seq)
|
|
+FOP_PROTO(sueq)
|
|
+FOP_PROTO(slt)
|
|
+FOP_PROTO(sult)
|
|
+FOP_PROTO(sle)
|
|
+FOP_PROTO(sule)
|
|
+FOP_PROTO(or)
|
|
+FOP_PROTO(une)
|
|
+FOP_PROTO(ne)
|
|
+FOP_PROTO(sor)
|
|
+FOP_PROTO(sune)
|
|
+FOP_PROTO(sne)
|
|
+#undef FOP_PROTO
|
|
+
|
|
+/* Special functions */
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+DEF_HELPER_1(tlbwr, void, env)
|
|
+DEF_HELPER_1(tlbfill, void, env)
|
|
+DEF_HELPER_1(tlbsrch, void, env)
|
|
+DEF_HELPER_1(tlbrd, void, env)
|
|
+DEF_HELPER_1(tlbclr, void, env)
|
|
+DEF_HELPER_1(tlbflush, void, env)
|
|
+DEF_HELPER_4(invtlb, void, env, tl, tl, tl)
|
|
+DEF_HELPER_1(ertn, void, env)
|
|
+DEF_HELPER_5(lddir, void, env, tl, tl, tl, i32)
|
|
+DEF_HELPER_4(ldpte, void, env, tl, tl, i32)
|
|
+DEF_HELPER_3(drdtime, void, env, tl, tl)
|
|
+DEF_HELPER_1(read_pgd, tl, env)
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+DEF_HELPER_2(cpucfg, tl, env, tl)
|
|
+DEF_HELPER_1(idle, void, env)
|
|
+
|
|
+DEF_HELPER_3(float_exp2_s, i32, env, i32, i32)
|
|
+DEF_HELPER_3(float_exp2_d, i64, env, i64, i64)
|
|
+DEF_HELPER_2(float_logb_s, i32, env, i32)
|
|
+DEF_HELPER_2(float_logb_d, i64, env, i64)
|
|
+DEF_HELPER_3(movreg2cf, void, env, i32, tl)
|
|
+DEF_HELPER_2(movcf2reg, tl, env, i32)
|
|
+DEF_HELPER_3(movreg2cf_i32, void, env, i32, i32)
|
|
+DEF_HELPER_3(movreg2cf_i64, void, env, i32, i64)
|
|
+
|
|
+DEF_HELPER_2(cto_w, tl, env, tl)
|
|
+DEF_HELPER_2(ctz_w, tl, env, tl)
|
|
+DEF_HELPER_2(cto_d, tl, env, tl)
|
|
+DEF_HELPER_2(ctz_d, tl, env, tl)
|
|
+DEF_HELPER_2(bitrev_w, tl, env, tl)
|
|
+DEF_HELPER_2(bitrev_d, tl, env, tl)
|
|
+
|
|
+DEF_HELPER_2(load_scr, i64, env, i32)
|
|
+DEF_HELPER_3(store_scr, void, env, i32, i64)
|
|
+
|
|
+DEF_HELPER_3(asrtle_d, void, env, tl, tl)
|
|
+DEF_HELPER_3(asrtgt_d, void, env, tl, tl)
|
|
+
|
|
+DEF_HELPER_4(fsel, i64, env, i64, i64, i32)
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+DEF_HELPER_4(iocsr, void, env, tl, tl, i32)
|
|
+#endif
|
|
+DEF_HELPER_3(memtrace_addr, void, env, tl, i32)
|
|
+DEF_HELPER_2(memtrace_val, void, env, tl)
|
|
diff --git a/target/loongarch64/insn.decode b/target/loongarch64/insn.decode
|
|
new file mode 100644
|
|
index 0000000000..2f82441ea7
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/insn.decode
|
|
@@ -0,0 +1,532 @@
|
|
+#
|
|
+# loongarch ISA decode for 64-bit prefixed insns
|
|
+#
|
|
+# Copyright (c) 2023 Loongarch Technology
|
|
+#
|
|
+# This program is free software; you can redistribute it and/or modify it
|
|
+# under the terms and conditions of the GNU General Public License,
|
|
+# version 2 or later, as published by the Free Software Foundation.
|
|
+#
|
|
+# This program is distributed in the hope it will be useful, but WITHOUT
|
|
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+# more details.
|
|
+#
|
|
+# You should have received a copy of the GNU General Public License along with
|
|
+# this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+#
|
|
+
|
|
+# Fields
|
|
+%sd 0:2
|
|
+%rj 5:5
|
|
+%rd 0:5
|
|
+%sj 5:2
|
|
+%ptr 5:3
|
|
+%rk 10:5
|
|
+%sa2 15:2
|
|
+%sa3 15:3
|
|
+%si5 10:s5
|
|
+%code 0:15
|
|
+%cond 10:4
|
|
+%cond2 0:4
|
|
+%ui5 10:5
|
|
+%ui6 10:6
|
|
+%ui3 10:3
|
|
+%ui4 10:4
|
|
+%op 5:5
|
|
+%ui8 10:8
|
|
+%msbw 16:5
|
|
+%lsbw 10:5
|
|
+%msbd 16:6
|
|
+%lsbd 10:6
|
|
+%fd 0:5
|
|
+%fj 5:5
|
|
+%fk 10:5
|
|
+%fcsrd 0:5
|
|
+%fcsrs 5:5
|
|
+%cd 0:3
|
|
+%cj 5:3
|
|
+%si12 10:s12
|
|
+%ui12 10:12
|
|
+%csr 10:14
|
|
+%cop 0:5
|
|
+%level 10:8
|
|
+%seq 10:8
|
|
+%whint 0:15
|
|
+%addr 10:5
|
|
+%info 5:5
|
|
+%invop 0:5
|
|
+%fa 15:5
|
|
+%vd 0:5
|
|
+%vj 5:5
|
|
+%vk 10:5
|
|
+%va 15:5
|
|
+%xd 0:5
|
|
+%xj 5:5
|
|
+%xk 10:5
|
|
+%xa 15:5
|
|
+%fcond 15:5
|
|
+%ca 15:3
|
|
+%vui5 15:5
|
|
+%si16 10:s16
|
|
+%si20 5:s20
|
|
+%si14 10:s14
|
|
+%hint 0:5
|
|
+%si9 10:s9
|
|
+%si10 10:s10
|
|
+%si11 10:s11
|
|
+%si8 10:s8
|
|
+%idx1 18:1
|
|
+%idx2 18:2
|
|
+%idx3 18:3
|
|
+%idx4 18:4
|
|
+%idx 18:5
|
|
+%offs21 0:s5 10:16
|
|
+%offs16 10:s16
|
|
+%offs 0:s10 10:16
|
|
+%mode 5:5
|
|
+%ui2 10:2
|
|
+%ui1 10:1
|
|
+%ui7 10:7
|
|
+%i13 5:13
|
|
+
|
|
+# Argument sets
|
|
+&fmt_sdrj sd rj
|
|
+&fmt_rdsj rd sj
|
|
+&fmt_rdrj rd rj
|
|
+&fmt_empty
|
|
+&fmt_rjrk rj rk
|
|
+&fmt_rdrjrksa2 rd rj rk sa2
|
|
+&fmt_rdrjrksa3 rd rj rk sa3
|
|
+&fmt_rdrjrk rd rj rk
|
|
+&fmt_code code
|
|
+&fmt_rdrjui5 rd rj ui5
|
|
+&fmt_rdrjui6 rd rj ui6
|
|
+&fmt_rdrjmsbwlsbw rd rj msbw lsbw
|
|
+&fmt_rdrjmsbdlsbd rd rj msbd lsbd
|
|
+&fmt_fdfjfk fd fj fk
|
|
+&fmt_fdfj fd fj
|
|
+&fmt_fdrj fd rj
|
|
+&fmt_rdfj rd fj
|
|
+&fmt_fcsrdrj fcsrd rj
|
|
+&fmt_rdfcsrs rd fcsrs
|
|
+&fmt_cdfj cd fj
|
|
+&fmt_fdcj fd cj
|
|
+&fmt_cdrj cd rj
|
|
+&fmt_rdcj rd cj
|
|
+&fmt_rdrjsi12 rd rj si12
|
|
+&fmt_rdrjui12 rd rj ui12
|
|
+&fmt_rdrjcsr rd rj csr
|
|
+&fmt_coprjsi12 cop rj si12
|
|
+&fmt_rdrjlevel rd rj level
|
|
+&fmt_rjseq rj seq
|
|
+&fmt_whint whint
|
|
+&fmt_invtlb addr info invop
|
|
+&fmt_fdfjfkfa fd fj fk fa
|
|
+&fmt_cdfjfkfcond cd fj fk fcond
|
|
+&fmt_fdfjfkca fd fj fk ca
|
|
+&fmt_rdrjsi16 rd rj si16
|
|
+&fmt_rdsi20 rd si20
|
|
+&fmt_rdrjsi14 rd rj si14
|
|
+&fmt_hintrjsi12 hint rj si12
|
|
+&fmt_fdrjsi12 fd rj si12
|
|
+&fmt_fdrjrk fd rj rk
|
|
+&fmt_rjoffs21 rj offs21
|
|
+&fmt_cjoffs21 cj offs21
|
|
+&fmt_rdrjoffs16 rd rj offs16
|
|
+&fmt_offs offs
|
|
+&fmt_rjrdoffs16 rj rd offs16
|
|
+
|
|
+# Formats
|
|
+@fmt_sdrj .... ........ ..... ..... ..... ... .. &fmt_sdrj %sd %rj
|
|
+@fmt_rdsj .... ........ ..... ..... ... .. ..... &fmt_rdsj %rd %sj
|
|
+@fmt_rdrj .... ........ ..... ..... ..... ..... &fmt_rdrj %rd %rj
|
|
+@fmt_empty .... ........ ..... ..... ..... ..... &fmt_empty
|
|
+@fmt_rjrk .... ........ ..... ..... ..... ..... &fmt_rjrk %rj %rk
|
|
+@fmt_rdrjrksa2 .... ........ ... .. ..... ..... ..... &fmt_rdrjrksa2 %rd %rj %rk %sa2
|
|
+@fmt_rdrjrksa3 .... ........ .. ... ..... ..... ..... &fmt_rdrjrksa3 %rd %rj %rk %sa3
|
|
+@fmt_rdrjrk .... ........ ..... ..... ..... ..... &fmt_rdrjrk %rd %rj %rk
|
|
+@fmt_code .... ........ ..... ............... &fmt_code %code
|
|
+@fmt_rdrjui5 .... ........ ..... ..... ..... ..... &fmt_rdrjui5 %rd %rj %ui5
|
|
+@fmt_rdrjui6 .... ........ .... ...... ..... ..... &fmt_rdrjui6 %rd %rj %ui6
|
|
+@fmt_rdrjmsbwlsbw .... ....... ..... . ..... ..... ..... &fmt_rdrjmsbwlsbw %rd %rj %msbw %lsbw
|
|
+@fmt_rdrjmsbdlsbd .... ...... ...... ...... ..... ..... &fmt_rdrjmsbdlsbd %rd %rj %msbd %lsbd
|
|
+@fmt_fdfjfk .... ........ ..... ..... ..... ..... &fmt_fdfjfk %fd %fj %fk
|
|
+@fmt_fdfj .... ........ ..... ..... ..... ..... &fmt_fdfj %fd %fj
|
|
+@fmt_fdrj .... ........ ..... ..... ..... ..... &fmt_fdrj %fd %rj
|
|
+@fmt_rdfj .... ........ ..... ..... ..... ..... &fmt_rdfj %rd %fj
|
|
+@fmt_fcsrdrj .... ........ ..... ..... ..... ..... &fmt_fcsrdrj %fcsrd %rj
|
|
+@fmt_rdfcsrs .... ........ ..... ..... ..... ..... &fmt_rdfcsrs %rd %fcsrs
|
|
+@fmt_cdfj .... ........ ..... ..... ..... .. ... &fmt_cdfj %cd %fj
|
|
+@fmt_fdcj .... ........ ..... ..... .. ... ..... &fmt_fdcj %fd %cj
|
|
+@fmt_cdrj .... ........ ..... ..... ..... .. ... &fmt_cdrj %cd %rj
|
|
+@fmt_rdcj .... ........ ..... ..... .. ... ..... &fmt_rdcj %rd %cj
|
|
+@fmt_rdrjsi12 .... ...... ............ ..... ..... &fmt_rdrjsi12 %rd %rj %si12
|
|
+@fmt_rdrjui12 .... ...... ............ ..... ..... &fmt_rdrjui12 %rd %rj %ui12
|
|
+@fmt_rdrjcsr .... .... .............. ..... ..... &fmt_rdrjcsr %rd %rj %csr
|
|
+@fmt_coprjsi12 .... ...... ............ ..... ..... &fmt_coprjsi12 %cop %rj %si12
|
|
+@fmt_rdrjlevel .... ........ .. ........ ..... ..... &fmt_rdrjlevel %rd %rj %level
|
|
+@fmt_rjseq .... ........ .. ........ ..... ..... &fmt_rjseq %rj %seq
|
|
+@fmt_whint .... ........ ..... ............... &fmt_whint %whint
|
|
+@fmt_invtlb ...... ...... ..... ..... ..... ..... &fmt_invtlb %addr %info %invop
|
|
+@fmt_fdfjfkfa .... ........ ..... ..... ..... ..... &fmt_fdfjfkfa %fd %fj %fk %fa
|
|
+@fmt_cdfjfkfcond .... ........ ..... ..... ..... .. ... &fmt_cdfjfkfcond %cd %fj %fk %fcond
|
|
+@fmt_fdfjfkca .... ........ .. ... ..... ..... ..... &fmt_fdfjfkca %fd %fj %fk %ca
|
|
+@fmt_rdrjsi16 .... .. ................ ..... ..... &fmt_rdrjsi16 %rd %rj %si16
|
|
+@fmt_rdsi20 .... ... .................... ..... &fmt_rdsi20 %rd %si20
|
|
+@fmt_rdrjsi14 .... .... .............. ..... ..... &fmt_rdrjsi14 %rd %rj %si14
|
|
+@fmt_hintrjsi12 .... ...... ............ ..... ..... &fmt_hintrjsi12 %hint %rj %si12
|
|
+@fmt_fdrjsi12 .... ...... ............ ..... ..... &fmt_fdrjsi12 %fd %rj %si12
|
|
+@fmt_fdrjrk .... ........ ..... ..... ..... ..... &fmt_fdrjrk %fd %rj %rk
|
|
+@fmt_rjoffs21 .... .. ................ ..... ..... &fmt_rjoffs21 %rj %offs21
|
|
+@fmt_cjoffs21 .... .. ................ .. ... ..... &fmt_cjoffs21 %cj %offs21
|
|
+@fmt_rdrjoffs16 .... .. ................ ..... ..... &fmt_rdrjoffs16 %rd %rj %offs16
|
|
+@fmt_offs .... .. .......................... &fmt_offs %offs
|
|
+@fmt_rjrdoffs16 .... .. ................ ..... ..... &fmt_rjrdoffs16 %rj %rd %offs16
|
|
+
|
|
+# Instructions
|
|
+
|
|
+# Fiexd point arithmetic Instructions
|
|
+gr2scr 0000 00000000 00000 00010 ..... 000 .. @fmt_sdrj
|
|
+scr2gr 0000 00000000 00000 00011 000 .. ..... @fmt_rdsj
|
|
+clo_w 0000 00000000 00000 00100 ..... ..... @fmt_rdrj
|
|
+clz_w 0000 00000000 00000 00101 ..... ..... @fmt_rdrj
|
|
+cto_w 0000 00000000 00000 00110 ..... ..... @fmt_rdrj
|
|
+ctz_w 0000 00000000 00000 00111 ..... ..... @fmt_rdrj
|
|
+clo_d 0000 00000000 00000 01000 ..... ..... @fmt_rdrj
|
|
+clz_d 0000 00000000 00000 01001 ..... ..... @fmt_rdrj
|
|
+cto_d 0000 00000000 00000 01010 ..... ..... @fmt_rdrj
|
|
+ctz_d 0000 00000000 00000 01011 ..... ..... @fmt_rdrj
|
|
+revb_2h 0000 00000000 00000 01100 ..... ..... @fmt_rdrj
|
|
+revb_4h 0000 00000000 00000 01101 ..... ..... @fmt_rdrj
|
|
+revb_2w 0000 00000000 00000 01110 ..... ..... @fmt_rdrj
|
|
+revb_d 0000 00000000 00000 01111 ..... ..... @fmt_rdrj
|
|
+revh_2w 0000 00000000 00000 10000 ..... ..... @fmt_rdrj
|
|
+revh_d 0000 00000000 00000 10001 ..... ..... @fmt_rdrj
|
|
+bitrev_4b 0000 00000000 00000 10010 ..... ..... @fmt_rdrj
|
|
+bitrev_8b 0000 00000000 00000 10011 ..... ..... @fmt_rdrj
|
|
+bitrev_w 0000 00000000 00000 10100 ..... ..... @fmt_rdrj
|
|
+bitrev_d 0000 00000000 00000 10101 ..... ..... @fmt_rdrj
|
|
+ext_w_h 0000 00000000 00000 10110 ..... ..... @fmt_rdrj
|
|
+ext_w_b 0000 00000000 00000 10111 ..... ..... @fmt_rdrj
|
|
+rdtime_d 0000 00000000 00000 11010 ..... ..... @fmt_rdrj
|
|
+cpucfg 0000 00000000 00000 11011 ..... ..... @fmt_rdrj
|
|
+asrtle_d 0000 00000000 00010 ..... ..... 00000 @fmt_rjrk
|
|
+asrtgt_d 0000 00000000 00011 ..... ..... 00000 @fmt_rjrk
|
|
+alsl_w 0000 00000000 010 .. ..... ..... ..... @fmt_rdrjrksa2
|
|
+alsl_wu 0000 00000000 011 .. ..... ..... ..... @fmt_rdrjrksa2
|
|
+bytepick_w 0000 00000000 100 .. ..... ..... ..... @fmt_rdrjrksa2
|
|
+bytepick_d 0000 00000000 11 ... ..... ..... ..... @fmt_rdrjrksa3
|
|
+add_w 0000 00000001 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+add_d 0000 00000001 00001 ..... ..... ..... @fmt_rdrjrk
|
|
+sub_w 0000 00000001 00010 ..... ..... ..... @fmt_rdrjrk
|
|
+sub_d 0000 00000001 00011 ..... ..... ..... @fmt_rdrjrk
|
|
+slt 0000 00000001 00100 ..... ..... ..... @fmt_rdrjrk
|
|
+sltu 0000 00000001 00101 ..... ..... ..... @fmt_rdrjrk
|
|
+maskeqz 0000 00000001 00110 ..... ..... ..... @fmt_rdrjrk
|
|
+masknez 0000 00000001 00111 ..... ..... ..... @fmt_rdrjrk
|
|
+nor 0000 00000001 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+and 0000 00000001 01001 ..... ..... ..... @fmt_rdrjrk
|
|
+or 0000 00000001 01010 ..... ..... ..... @fmt_rdrjrk
|
|
+xor 0000 00000001 01011 ..... ..... ..... @fmt_rdrjrk
|
|
+orn 0000 00000001 01100 ..... ..... ..... @fmt_rdrjrk
|
|
+andn 0000 00000001 01101 ..... ..... ..... @fmt_rdrjrk
|
|
+sll_w 0000 00000001 01110 ..... ..... ..... @fmt_rdrjrk
|
|
+srl_w 0000 00000001 01111 ..... ..... ..... @fmt_rdrjrk
|
|
+sra_w 0000 00000001 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+sll_d 0000 00000001 10001 ..... ..... ..... @fmt_rdrjrk
|
|
+srl_d 0000 00000001 10010 ..... ..... ..... @fmt_rdrjrk
|
|
+sra_d 0000 00000001 10011 ..... ..... ..... @fmt_rdrjrk
|
|
+rotr_w 0000 00000001 10110 ..... ..... ..... @fmt_rdrjrk
|
|
+rotr_d 0000 00000001 10111 ..... ..... ..... @fmt_rdrjrk
|
|
+mul_w 0000 00000001 11000 ..... ..... ..... @fmt_rdrjrk
|
|
+mulh_w 0000 00000001 11001 ..... ..... ..... @fmt_rdrjrk
|
|
+mulh_wu 0000 00000001 11010 ..... ..... ..... @fmt_rdrjrk
|
|
+mul_d 0000 00000001 11011 ..... ..... ..... @fmt_rdrjrk
|
|
+mulh_d 0000 00000001 11100 ..... ..... ..... @fmt_rdrjrk
|
|
+mulh_du 0000 00000001 11101 ..... ..... ..... @fmt_rdrjrk
|
|
+mulw_d_w 0000 00000001 11110 ..... ..... ..... @fmt_rdrjrk
|
|
+mulw_d_wu 0000 00000001 11111 ..... ..... ..... @fmt_rdrjrk
|
|
+div_w 0000 00000010 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+mod_w 0000 00000010 00001 ..... ..... ..... @fmt_rdrjrk
|
|
+div_wu 0000 00000010 00010 ..... ..... ..... @fmt_rdrjrk
|
|
+mod_wu 0000 00000010 00011 ..... ..... ..... @fmt_rdrjrk
|
|
+div_d 0000 00000010 00100 ..... ..... ..... @fmt_rdrjrk
|
|
+mod_d 0000 00000010 00101 ..... ..... ..... @fmt_rdrjrk
|
|
+div_du 0000 00000010 00110 ..... ..... ..... @fmt_rdrjrk
|
|
+mod_du 0000 00000010 00111 ..... ..... ..... @fmt_rdrjrk
|
|
+crc_w_b_w 0000 00000010 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+crc_w_h_w 0000 00000010 01001 ..... ..... ..... @fmt_rdrjrk
|
|
+crc_w_w_w 0000 00000010 01010 ..... ..... ..... @fmt_rdrjrk
|
|
+crc_w_d_w 0000 00000010 01011 ..... ..... ..... @fmt_rdrjrk
|
|
+crcc_w_b_w 0000 00000010 01100 ..... ..... ..... @fmt_rdrjrk
|
|
+crcc_w_h_w 0000 00000010 01101 ..... ..... ..... @fmt_rdrjrk
|
|
+crcc_w_w_w 0000 00000010 01110 ..... ..... ..... @fmt_rdrjrk
|
|
+crcc_w_d_w 0000 00000010 01111 ..... ..... ..... @fmt_rdrjrk
|
|
+break 0000 00000010 10100 ............... @fmt_code
|
|
+dbcl 0000 00000010 10101 ............... @fmt_code
|
|
+syscall 0000 00000010 10110 ............... @fmt_code
|
|
+alsl_d 0000 00000010 110 .. ..... ..... ..... @fmt_rdrjrksa2
|
|
+slli_w 0000 00000100 00001 ..... ..... ..... @fmt_rdrjui5
|
|
+slli_d 0000 00000100 0001 ...... ..... ..... @fmt_rdrjui6
|
|
+srli_w 0000 00000100 01001 ..... ..... ..... @fmt_rdrjui5
|
|
+srli_d 0000 00000100 0101 ...... ..... ..... @fmt_rdrjui6
|
|
+srai_w 0000 00000100 10001 ..... ..... ..... @fmt_rdrjui5
|
|
+srai_d 0000 00000100 1001 ...... ..... ..... @fmt_rdrjui6
|
|
+rotri_w 0000 00000100 11001 ..... ..... ..... @fmt_rdrjui5
|
|
+rotri_d 0000 00000100 1101 ...... ..... ..... @fmt_rdrjui6
|
|
+bstrins_w 0000 0000011 ..... 0 ..... ..... ..... @fmt_rdrjmsbwlsbw
|
|
+bstrpick_w 0000 0000011 ..... 1 ..... ..... ..... @fmt_rdrjmsbwlsbw
|
|
+bstrins_d 0000 000010 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd
|
|
+bstrpick_d 0000 000011 ...... ...... ..... ..... @fmt_rdrjmsbdlsbd
|
|
+
|
|
+# float Instructions
|
|
+fadd_s 0000 00010000 00001 ..... ..... ..... @fmt_fdfjfk
|
|
+fadd_d 0000 00010000 00010 ..... ..... ..... @fmt_fdfjfk
|
|
+fsub_s 0000 00010000 00101 ..... ..... ..... @fmt_fdfjfk
|
|
+fsub_d 0000 00010000 00110 ..... ..... ..... @fmt_fdfjfk
|
|
+fmul_s 0000 00010000 01001 ..... ..... ..... @fmt_fdfjfk
|
|
+fmul_d 0000 00010000 01010 ..... ..... ..... @fmt_fdfjfk
|
|
+fdiv_s 0000 00010000 01101 ..... ..... ..... @fmt_fdfjfk
|
|
+fdiv_d 0000 00010000 01110 ..... ..... ..... @fmt_fdfjfk
|
|
+fmax_s 0000 00010000 10001 ..... ..... ..... @fmt_fdfjfk
|
|
+fmax_d 0000 00010000 10010 ..... ..... ..... @fmt_fdfjfk
|
|
+fmin_s 0000 00010000 10101 ..... ..... ..... @fmt_fdfjfk
|
|
+fmin_d 0000 00010000 10110 ..... ..... ..... @fmt_fdfjfk
|
|
+fmaxa_s 0000 00010000 11001 ..... ..... ..... @fmt_fdfjfk
|
|
+fmaxa_d 0000 00010000 11010 ..... ..... ..... @fmt_fdfjfk
|
|
+fmina_s 0000 00010000 11101 ..... ..... ..... @fmt_fdfjfk
|
|
+fmina_d 0000 00010000 11110 ..... ..... ..... @fmt_fdfjfk
|
|
+fscaleb_s 0000 00010001 00001 ..... ..... ..... @fmt_fdfjfk
|
|
+fscaleb_d 0000 00010001 00010 ..... ..... ..... @fmt_fdfjfk
|
|
+fcopysign_s 0000 00010001 00101 ..... ..... ..... @fmt_fdfjfk
|
|
+fcopysign_d 0000 00010001 00110 ..... ..... ..... @fmt_fdfjfk
|
|
+fabs_s 0000 00010001 01000 00001 ..... ..... @fmt_fdfj
|
|
+fabs_d 0000 00010001 01000 00010 ..... ..... @fmt_fdfj
|
|
+fneg_s 0000 00010001 01000 00101 ..... ..... @fmt_fdfj
|
|
+fneg_d 0000 00010001 01000 00110 ..... ..... @fmt_fdfj
|
|
+flogb_s 0000 00010001 01000 01001 ..... ..... @fmt_fdfj
|
|
+flogb_d 0000 00010001 01000 01010 ..... ..... @fmt_fdfj
|
|
+fclass_s 0000 00010001 01000 01101 ..... ..... @fmt_fdfj
|
|
+fclass_d 0000 00010001 01000 01110 ..... ..... @fmt_fdfj
|
|
+fsqrt_s 0000 00010001 01000 10001 ..... ..... @fmt_fdfj
|
|
+fsqrt_d 0000 00010001 01000 10010 ..... ..... @fmt_fdfj
|
|
+frecip_s 0000 00010001 01000 10101 ..... ..... @fmt_fdfj
|
|
+frecip_d 0000 00010001 01000 10110 ..... ..... @fmt_fdfj
|
|
+frsqrt_s 0000 00010001 01000 11001 ..... ..... @fmt_fdfj
|
|
+frsqrt_d 0000 00010001 01000 11010 ..... ..... @fmt_fdfj
|
|
+fmov_s 0000 00010001 01001 00101 ..... ..... @fmt_fdfj
|
|
+fmov_d 0000 00010001 01001 00110 ..... ..... @fmt_fdfj
|
|
+movgr2fr_w 0000 00010001 01001 01001 ..... ..... @fmt_fdrj
|
|
+movgr2fr_d 0000 00010001 01001 01010 ..... ..... @fmt_fdrj
|
|
+movgr2frh_w 0000 00010001 01001 01011 ..... ..... @fmt_fdrj
|
|
+movfr2gr_s 0000 00010001 01001 01101 ..... ..... @fmt_rdfj
|
|
+movfr2gr_d 0000 00010001 01001 01110 ..... ..... @fmt_rdfj
|
|
+movfrh2gr_s 0000 00010001 01001 01111 ..... ..... @fmt_rdfj
|
|
+movgr2fcsr 0000 00010001 01001 10000 ..... ..... @fmt_fcsrdrj
|
|
+movfcsr2gr 0000 00010001 01001 10010 ..... ..... @fmt_rdfcsrs
|
|
+movfr2cf 0000 00010001 01001 10100 ..... 00 ... @fmt_cdfj
|
|
+movcf2fr 0000 00010001 01001 10101 00 ... ..... @fmt_fdcj
|
|
+movgr2cf 0000 00010001 01001 10110 ..... 00 ... @fmt_cdrj
|
|
+movcf2gr 0000 00010001 01001 10111 00 ... ..... @fmt_rdcj
|
|
+fcvt_s_d 0000 00010001 10010 00110 ..... ..... @fmt_fdfj
|
|
+fcvt_d_s 0000 00010001 10010 01001 ..... ..... @fmt_fdfj
|
|
+ftintrm_w_s 0000 00010001 10100 00001 ..... ..... @fmt_fdfj
|
|
+ftintrm_w_d 0000 00010001 10100 00010 ..... ..... @fmt_fdfj
|
|
+ftintrm_l_s 0000 00010001 10100 01001 ..... ..... @fmt_fdfj
|
|
+ftintrm_l_d 0000 00010001 10100 01010 ..... ..... @fmt_fdfj
|
|
+ftintrp_w_s 0000 00010001 10100 10001 ..... ..... @fmt_fdfj
|
|
+ftintrp_w_d 0000 00010001 10100 10010 ..... ..... @fmt_fdfj
|
|
+ftintrp_l_s 0000 00010001 10100 11001 ..... ..... @fmt_fdfj
|
|
+ftintrp_l_d 0000 00010001 10100 11010 ..... ..... @fmt_fdfj
|
|
+ftintrz_w_s 0000 00010001 10101 00001 ..... ..... @fmt_fdfj
|
|
+ftintrz_w_d 0000 00010001 10101 00010 ..... ..... @fmt_fdfj
|
|
+ftintrz_l_s 0000 00010001 10101 01001 ..... ..... @fmt_fdfj
|
|
+ftintrz_l_d 0000 00010001 10101 01010 ..... ..... @fmt_fdfj
|
|
+ftintrne_w_s 0000 00010001 10101 10001 ..... ..... @fmt_fdfj
|
|
+ftintrne_w_d 0000 00010001 10101 10010 ..... ..... @fmt_fdfj
|
|
+ftintrne_l_s 0000 00010001 10101 11001 ..... ..... @fmt_fdfj
|
|
+ftintrne_l_d 0000 00010001 10101 11010 ..... ..... @fmt_fdfj
|
|
+ftint_w_s 0000 00010001 10110 00001 ..... ..... @fmt_fdfj
|
|
+ftint_w_d 0000 00010001 10110 00010 ..... ..... @fmt_fdfj
|
|
+ftint_l_s 0000 00010001 10110 01001 ..... ..... @fmt_fdfj
|
|
+ftint_l_d 0000 00010001 10110 01010 ..... ..... @fmt_fdfj
|
|
+ffint_s_w 0000 00010001 11010 00100 ..... ..... @fmt_fdfj
|
|
+ffint_s_l 0000 00010001 11010 00110 ..... ..... @fmt_fdfj
|
|
+ffint_d_w 0000 00010001 11010 01000 ..... ..... @fmt_fdfj
|
|
+ffint_d_l 0000 00010001 11010 01010 ..... ..... @fmt_fdfj
|
|
+frint_s 0000 00010001 11100 10001 ..... ..... @fmt_fdfj
|
|
+frint_d 0000 00010001 11100 10010 ..... ..... @fmt_fdfj
|
|
+
|
|
+# 12 bit immediate Instructions
|
|
+slti 0000 001000 ............ ..... ..... @fmt_rdrjsi12
|
|
+sltui 0000 001001 ............ ..... ..... @fmt_rdrjsi12
|
|
+addi_w 0000 001010 ............ ..... ..... @fmt_rdrjsi12
|
|
+addi_d 0000 001011 ............ ..... ..... @fmt_rdrjsi12
|
|
+lu52i_d 0000 001100 ............ ..... ..... @fmt_rdrjsi12
|
|
+andi 0000 001101 ............ ..... ..... @fmt_rdrjui12
|
|
+ori 0000 001110 ............ ..... ..... @fmt_rdrjui12
|
|
+xori 0000 001111 ............ ..... ..... @fmt_rdrjui12
|
|
+
|
|
+# core Instructions
|
|
+csrxchg 0000 0100 .............. ..... ..... @fmt_rdrjcsr
|
|
+cacop 0000 011000 ............ ..... ..... @fmt_coprjsi12
|
|
+lddir 0000 01100100 00 ........ ..... ..... @fmt_rdrjlevel
|
|
+ldpte 0000 01100100 01 ........ ..... 00000 @fmt_rjseq
|
|
+iocsrrd_b 0000 01100100 10000 00000 ..... ..... @fmt_rdrj
|
|
+iocsrrd_h 0000 01100100 10000 00001 ..... ..... @fmt_rdrj
|
|
+iocsrrd_w 0000 01100100 10000 00010 ..... ..... @fmt_rdrj
|
|
+iocsrrd_d 0000 01100100 10000 00011 ..... ..... @fmt_rdrj
|
|
+iocsrwr_b 0000 01100100 10000 00100 ..... ..... @fmt_rdrj
|
|
+iocsrwr_h 0000 01100100 10000 00101 ..... ..... @fmt_rdrj
|
|
+iocsrwr_w 0000 01100100 10000 00110 ..... ..... @fmt_rdrj
|
|
+iocsrwr_d 0000 01100100 10000 00111 ..... ..... @fmt_rdrj
|
|
+tlbclr 0000 01100100 10000 01000 00000 00000 @fmt_empty
|
|
+tlbflush 0000 01100100 10000 01001 00000 00000 @fmt_empty
|
|
+tlbsrch 0000 01100100 10000 01010 00000 00000 @fmt_empty
|
|
+tlbrd 0000 01100100 10000 01011 00000 00000 @fmt_empty
|
|
+tlbwr 0000 01100100 10000 01100 00000 00000 @fmt_empty
|
|
+tlbfill 0000 01100100 10000 01101 00000 00000 @fmt_empty
|
|
+ertn 0000 01100100 10000 01110 00000 00000 @fmt_empty
|
|
+idle 0000 01100100 10001 ............... @fmt_whint
|
|
+invtlb 0000 01100100 10011 ..... ..... ..... @fmt_invtlb
|
|
+
|
|
+# foure Op Instructions
|
|
+fmadd_s 0000 10000001 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fmadd_d 0000 10000010 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fmsub_s 0000 10000101 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fmsub_d 0000 10000110 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fnmadd_s 0000 10001001 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fnmadd_d 0000 10001010 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fnmsub_s 0000 10001101 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fnmsub_d 0000 10001110 ..... ..... ..... ..... @fmt_fdfjfkfa
|
|
+fcmp_cond_s 0000 11000001 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
|
|
+fcmp_cond_d 0000 11000010 ..... ..... ..... 00 ... @fmt_cdfjfkfcond
|
|
+fsel 0000 11010000 00 ... ..... ..... ..... @fmt_fdfjfkca
|
|
+
|
|
+# loog immediate Instructions
|
|
+addu16i_d 0001 00 ................ ..... ..... @fmt_rdrjsi16
|
|
+lu12i_w 0001 010 .................... ..... @fmt_rdsi20
|
|
+lu32i_d 0001 011 .................... ..... @fmt_rdsi20
|
|
+pcaddi 0001 100 .................... ..... @fmt_rdsi20
|
|
+pcalau12i 0001 101 .................... ..... @fmt_rdsi20
|
|
+pcaddu12i 0001 110 .................... ..... @fmt_rdsi20
|
|
+pcaddu18i 0001 111 .................... ..... @fmt_rdsi20
|
|
+
|
|
+# load/store Instructions
|
|
+ll_w 0010 0000 .............. ..... ..... @fmt_rdrjsi14
|
|
+sc_w 0010 0001 .............. ..... ..... @fmt_rdrjsi14
|
|
+ll_d 0010 0010 .............. ..... ..... @fmt_rdrjsi14
|
|
+sc_d 0010 0011 .............. ..... ..... @fmt_rdrjsi14
|
|
+ldptr_w 0010 0100 .............. ..... ..... @fmt_rdrjsi14
|
|
+stptr_w 0010 0101 .............. ..... ..... @fmt_rdrjsi14
|
|
+ldptr_d 0010 0110 .............. ..... ..... @fmt_rdrjsi14
|
|
+stptr_d 0010 0111 .............. ..... ..... @fmt_rdrjsi14
|
|
+ld_b 0010 100000 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_h 0010 100001 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_w 0010 100010 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_d 0010 100011 ............ ..... ..... @fmt_rdrjsi12
|
|
+st_b 0010 100100 ............ ..... ..... @fmt_rdrjsi12
|
|
+st_h 0010 100101 ............ ..... ..... @fmt_rdrjsi12
|
|
+st_w 0010 100110 ............ ..... ..... @fmt_rdrjsi12
|
|
+st_d 0010 100111 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_bu 0010 101000 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_hu 0010 101001 ............ ..... ..... @fmt_rdrjsi12
|
|
+ld_wu 0010 101010 ............ ..... ..... @fmt_rdrjsi12
|
|
+preld 0010 101011 ............ ..... ..... @fmt_hintrjsi12
|
|
+fld_s 0010 101100 ............ ..... ..... @fmt_fdrjsi12
|
|
+fst_s 0010 101101 ............ ..... ..... @fmt_fdrjsi12
|
|
+fld_d 0010 101110 ............ ..... ..... @fmt_fdrjsi12
|
|
+fst_d 0010 101111 ............ ..... ..... @fmt_fdrjsi12
|
|
+ldx_b 0011 10000000 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_h 0011 10000000 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_w 0011 10000000 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_d 0011 10000000 11000 ..... ..... ..... @fmt_rdrjrk
|
|
+stx_b 0011 10000001 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+stx_h 0011 10000001 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+stx_w 0011 10000001 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+stx_d 0011 10000001 11000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_bu 0011 10000010 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_hu 0011 10000010 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldx_wu 0011 10000010 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+fldx_s 0011 10000011 00000 ..... ..... ..... @fmt_fdrjrk
|
|
+fldx_d 0011 10000011 01000 ..... ..... ..... @fmt_fdrjrk
|
|
+fstx_s 0011 10000011 10000 ..... ..... ..... @fmt_fdrjrk
|
|
+fstx_d 0011 10000011 11000 ..... ..... ..... @fmt_fdrjrk
|
|
+amswap_w 0011 10000110 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+amswap_d 0011 10000110 00001 ..... ..... ..... @fmt_rdrjrk
|
|
+amadd_w 0011 10000110 00010 ..... ..... ..... @fmt_rdrjrk
|
|
+amadd_d 0011 10000110 00011 ..... ..... ..... @fmt_rdrjrk
|
|
+amand_w 0011 10000110 00100 ..... ..... ..... @fmt_rdrjrk
|
|
+amand_d 0011 10000110 00101 ..... ..... ..... @fmt_rdrjrk
|
|
+amor_w 0011 10000110 00110 ..... ..... ..... @fmt_rdrjrk
|
|
+amor_d 0011 10000110 00111 ..... ..... ..... @fmt_rdrjrk
|
|
+amxor_w 0011 10000110 01000 ..... ..... ..... @fmt_rdrjrk
|
|
+amxor_d 0011 10000110 01001 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_w 0011 10000110 01010 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_d 0011 10000110 01011 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_w 0011 10000110 01100 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_d 0011 10000110 01101 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_wu 0011 10000110 01110 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_du 0011 10000110 01111 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_wu 0011 10000110 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_du 0011 10000110 10001 ..... ..... ..... @fmt_rdrjrk
|
|
+amswap_db_w 0011 10000110 10010 ..... ..... ..... @fmt_rdrjrk
|
|
+amswap_db_d 0011 10000110 10011 ..... ..... ..... @fmt_rdrjrk
|
|
+amadd_db_w 0011 10000110 10100 ..... ..... ..... @fmt_rdrjrk
|
|
+amadd_db_d 0011 10000110 10101 ..... ..... ..... @fmt_rdrjrk
|
|
+amand_db_w 0011 10000110 10110 ..... ..... ..... @fmt_rdrjrk
|
|
+amand_db_d 0011 10000110 10111 ..... ..... ..... @fmt_rdrjrk
|
|
+amor_db_w 0011 10000110 11000 ..... ..... ..... @fmt_rdrjrk
|
|
+amor_db_d 0011 10000110 11001 ..... ..... ..... @fmt_rdrjrk
|
|
+amxor_db_w 0011 10000110 11010 ..... ..... ..... @fmt_rdrjrk
|
|
+amxor_db_d 0011 10000110 11011 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_db_w 0011 10000110 11100 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_db_d 0011 10000110 11101 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_db_w 0011 10000110 11110 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_db_d 0011 10000110 11111 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_db_wu 0011 10000111 00000 ..... ..... ..... @fmt_rdrjrk
|
|
+ammax_db_du 0011 10000111 00001 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_db_wu 0011 10000111 00010 ..... ..... ..... @fmt_rdrjrk
|
|
+ammin_db_du 0011 10000111 00011 ..... ..... ..... @fmt_rdrjrk
|
|
+dbar 0011 10000111 00100 ............... @fmt_whint
|
|
+ibar 0011 10000111 00101 ............... @fmt_whint
|
|
+fldgt_s 0011 10000111 01000 ..... ..... ..... @fmt_fdrjrk
|
|
+fldgt_d 0011 10000111 01001 ..... ..... ..... @fmt_fdrjrk
|
|
+fldle_s 0011 10000111 01010 ..... ..... ..... @fmt_fdrjrk
|
|
+fldle_d 0011 10000111 01011 ..... ..... ..... @fmt_fdrjrk
|
|
+fstgt_s 0011 10000111 01100 ..... ..... ..... @fmt_fdrjrk
|
|
+fstgt_d 0011 10000111 01101 ..... ..... ..... @fmt_fdrjrk
|
|
+fstle_s 0011 10000111 01110 ..... ..... ..... @fmt_fdrjrk
|
|
+fstle_d 0011 10000111 01111 ..... ..... ..... @fmt_fdrjrk
|
|
+ldgt_b 0011 10000111 10000 ..... ..... ..... @fmt_rdrjrk
|
|
+ldgt_h 0011 10000111 10001 ..... ..... ..... @fmt_rdrjrk
|
|
+ldgt_w 0011 10000111 10010 ..... ..... ..... @fmt_rdrjrk
|
|
+ldgt_d 0011 10000111 10011 ..... ..... ..... @fmt_rdrjrk
|
|
+ldle_b 0011 10000111 10100 ..... ..... ..... @fmt_rdrjrk
|
|
+ldle_h 0011 10000111 10101 ..... ..... ..... @fmt_rdrjrk
|
|
+ldle_w 0011 10000111 10110 ..... ..... ..... @fmt_rdrjrk
|
|
+ldle_d 0011 10000111 10111 ..... ..... ..... @fmt_rdrjrk
|
|
+stgt_b 0011 10000111 11000 ..... ..... ..... @fmt_rdrjrk
|
|
+stgt_h 0011 10000111 11001 ..... ..... ..... @fmt_rdrjrk
|
|
+stgt_w 0011 10000111 11010 ..... ..... ..... @fmt_rdrjrk
|
|
+stgt_d 0011 10000111 11011 ..... ..... ..... @fmt_rdrjrk
|
|
+stle_b 0011 10000111 11100 ..... ..... ..... @fmt_rdrjrk
|
|
+stle_h 0011 10000111 11101 ..... ..... ..... @fmt_rdrjrk
|
|
+stle_w 0011 10000111 11110 ..... ..... ..... @fmt_rdrjrk
|
|
+stle_d 0011 10000111 11111 ..... ..... ..... @fmt_rdrjrk
|
|
+
|
|
+# jump Instructions
|
|
+beqz 0100 00 ................ ..... ..... @fmt_rjoffs21
|
|
+bnez 0100 01 ................ ..... ..... @fmt_rjoffs21
|
|
+bceqz 0100 10 ................ 00 ... ..... @fmt_cjoffs21
|
|
+bcnez 0100 10 ................ 01 ... ..... @fmt_cjoffs21
|
|
+jirl 0100 11 ................ ..... ..... @fmt_rdrjoffs16
|
|
+b 0101 00 .......................... @fmt_offs
|
|
+bl 0101 01 .......................... @fmt_offs
|
|
+beq 0101 10 ................ ..... ..... @fmt_rjrdoffs16
|
|
+bne 0101 11 ................ ..... ..... @fmt_rjrdoffs16
|
|
+blt 0110 00 ................ ..... ..... @fmt_rjrdoffs16
|
|
+bge 0110 01 ................ ..... ..... @fmt_rjrdoffs16
|
|
+bltu 0110 10 ................ ..... ..... @fmt_rjrdoffs16
|
|
+bgeu 0110 11 ................ ..... ..... @fmt_rjrdoffs16
|
|
diff --git a/target/loongarch64/instmap.h b/target/loongarch64/instmap.h
|
|
new file mode 100644
|
|
index 0000000000..5fbb8b5d29
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/instmap.h
|
|
@@ -0,0 +1,217 @@
|
|
+/*
|
|
+ * Loongarch emulation for qemu: instruction opcode
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef TARGET_LARCH_INSTMAP_H
|
|
+#define TARGET_LARCH_INSTMAP_H
|
|
+
|
|
+enum {
|
|
+ /* fix opcodes */
|
|
+ OPC_LARCH_CLO_W = (0x000004 << 10),
|
|
+ OPC_LARCH_CLZ_W = (0x000005 << 10),
|
|
+ OPC_LARCH_CLO_D = (0x000008 << 10),
|
|
+ OPC_LARCH_CLZ_D = (0x000009 << 10),
|
|
+ OPC_LARCH_REVB_2H = (0x00000C << 10),
|
|
+ OPC_LARCH_REVB_4H = (0x00000D << 10),
|
|
+ OPC_LARCH_REVH_D = (0x000011 << 10),
|
|
+ OPC_LARCH_BREV_4B = (0x000012 << 10),
|
|
+ OPC_LARCH_BREV_8B = (0x000013 << 10),
|
|
+ OPC_LARCH_EXT_WH = (0x000016 << 10),
|
|
+ OPC_LARCH_EXT_WB = (0x000017 << 10),
|
|
+
|
|
+ OPC_LARCH_ADD_W = (0x00020 << 15),
|
|
+ OPC_LARCH_ADD_D = (0x00021 << 15),
|
|
+ OPC_LARCH_SUB_W = (0x00022 << 15),
|
|
+ OPC_LARCH_SUB_D = (0x00023 << 15),
|
|
+ OPC_LARCH_SLT = (0x00024 << 15),
|
|
+ OPC_LARCH_SLTU = (0x00025 << 15),
|
|
+ OPC_LARCH_MASKEQZ = (0x00026 << 15),
|
|
+ OPC_LARCH_MASKNEZ = (0x00027 << 15),
|
|
+ OPC_LARCH_NOR = (0x00028 << 15),
|
|
+ OPC_LARCH_AND = (0x00029 << 15),
|
|
+ OPC_LARCH_OR = (0x0002A << 15),
|
|
+ OPC_LARCH_XOR = (0x0002B << 15),
|
|
+ OPC_LARCH_SLL_W = (0x0002E << 15),
|
|
+ OPC_LARCH_SRL_W = (0x0002F << 15),
|
|
+ OPC_LARCH_SRA_W = (0x00030 << 15),
|
|
+ OPC_LARCH_SLL_D = (0x00031 << 15),
|
|
+ OPC_LARCH_SRL_D = (0x00032 << 15),
|
|
+ OPC_LARCH_SRA_D = (0x00033 << 15),
|
|
+ OPC_LARCH_ROTR_W = (0x00036 << 15),
|
|
+ OPC_LARCH_ROTR_D = (0x00037 << 15),
|
|
+ OPC_LARCH_MUL_W = (0x00038 << 15),
|
|
+ OPC_LARCH_MULH_W = (0x00039 << 15),
|
|
+ OPC_LARCH_MULH_WU = (0x0003A << 15),
|
|
+ OPC_LARCH_MUL_D = (0x0003B << 15),
|
|
+ OPC_LARCH_MULH_D = (0x0003C << 15),
|
|
+ OPC_LARCH_MULH_DU = (0x0003D << 15),
|
|
+ OPC_LARCH_DIV_W = (0x00040 << 15),
|
|
+ OPC_LARCH_MOD_W = (0x00041 << 15),
|
|
+ OPC_LARCH_DIV_WU = (0x00042 << 15),
|
|
+ OPC_LARCH_MOD_WU = (0x00043 << 15),
|
|
+ OPC_LARCH_DIV_D = (0x00044 << 15),
|
|
+ OPC_LARCH_MOD_D = (0x00045 << 15),
|
|
+ OPC_LARCH_DIV_DU = (0x00046 << 15),
|
|
+ OPC_LARCH_MOD_DU = (0x00047 << 15),
|
|
+ OPC_LARCH_SRLI_W = (0x00089 << 15),
|
|
+ OPC_LARCH_SRAI_W = (0x00091 << 15),
|
|
+ OPC_LARCH_ROTRI_W = (0x00099 << 15),
|
|
+
|
|
+ OPC_LARCH_ALSL_W = (0x0002 << 17),
|
|
+ OPC_LARCH_ALSL_D = (0x0016 << 17),
|
|
+
|
|
+ OPC_LARCH_TRINS_W = (0x003 << 21) | (0x0 << 15),
|
|
+ OPC_LARCH_TRPICK_W = (0x003 << 21) | (0x1 << 15),
|
|
+};
|
|
+
|
|
+enum {
|
|
+ /* float opcodes */
|
|
+ OPC_LARCH_FABS_S = (0x004501 << 10),
|
|
+ OPC_LARCH_FABS_D = (0x004502 << 10),
|
|
+ OPC_LARCH_FNEG_S = (0x004505 << 10),
|
|
+ OPC_LARCH_FNEG_D = (0x004506 << 10),
|
|
+ OPC_LARCH_FCLASS_S = (0x00450D << 10),
|
|
+ OPC_LARCH_FCLASS_D = (0x00450E << 10),
|
|
+ OPC_LARCH_FSQRT_S = (0x004511 << 10),
|
|
+ OPC_LARCH_FSQRT_D = (0x004512 << 10),
|
|
+ OPC_LARCH_FRECIP_S = (0x004515 << 10),
|
|
+ OPC_LARCH_FRECIP_D = (0x004516 << 10),
|
|
+ OPC_LARCH_FRSQRT_S = (0x004519 << 10),
|
|
+ OPC_LARCH_FRSQRT_D = (0x00451A << 10),
|
|
+ OPC_LARCH_FMOV_S = (0x004525 << 10),
|
|
+ OPC_LARCH_FMOV_D = (0x004526 << 10),
|
|
+ OPC_LARCH_GR2FR_W = (0x004529 << 10),
|
|
+ OPC_LARCH_GR2FR_D = (0x00452A << 10),
|
|
+ OPC_LARCH_GR2FRH_W = (0x00452B << 10),
|
|
+ OPC_LARCH_FR2GR_S = (0x00452D << 10),
|
|
+ OPC_LARCH_FR2GR_D = (0x00452E << 10),
|
|
+ OPC_LARCH_FRH2GR_S = (0x00452F << 10),
|
|
+
|
|
+ OPC_LARCH_FCVT_S_D = (0x004646 << 10),
|
|
+ OPC_LARCH_FCVT_D_S = (0x004649 << 10),
|
|
+ OPC_LARCH_FTINTRM_W_S = (0x004681 << 10),
|
|
+ OPC_LARCH_FTINTRM_W_D = (0x004682 << 10),
|
|
+ OPC_LARCH_FTINTRM_L_S = (0x004689 << 10),
|
|
+ OPC_LARCH_FTINTRM_L_D = (0x00468A << 10),
|
|
+ OPC_LARCH_FTINTRP_W_S = (0x004691 << 10),
|
|
+ OPC_LARCH_FTINTRP_W_D = (0x004692 << 10),
|
|
+ OPC_LARCH_FTINTRP_L_S = (0x004699 << 10),
|
|
+ OPC_LARCH_FTINTRP_L_D = (0x00469A << 10),
|
|
+ OPC_LARCH_FTINTRZ_W_S = (0x0046A1 << 10),
|
|
+ OPC_LARCH_FTINTRZ_W_D = (0x0046A2 << 10),
|
|
+ OPC_LARCH_FTINTRZ_L_S = (0x0046A9 << 10),
|
|
+ OPC_LARCH_FTINTRZ_L_D = (0x0046AA << 10),
|
|
+ OPC_LARCH_FTINTRNE_W_S = (0x0046B1 << 10),
|
|
+ OPC_LARCH_FTINTRNE_W_D = (0x0046B2 << 10),
|
|
+ OPC_LARCH_FTINTRNE_L_S = (0x0046B9 << 10),
|
|
+ OPC_LARCH_FTINTRNE_L_D = (0x0046BA << 10),
|
|
+ OPC_LARCH_FTINT_W_S = (0x0046C1 << 10),
|
|
+ OPC_LARCH_FTINT_W_D = (0x0046C2 << 10),
|
|
+ OPC_LARCH_FTINT_L_S = (0x0046C9 << 10),
|
|
+ OPC_LARCH_FTINT_L_D = (0x0046CA << 10),
|
|
+ OPC_LARCH_FFINT_S_W = (0x004744 << 10),
|
|
+ OPC_LARCH_FFINT_S_L = (0x004746 << 10),
|
|
+ OPC_LARCH_FFINT_D_W = (0x004748 << 10),
|
|
+ OPC_LARCH_FFINT_D_L = (0x00474A << 10),
|
|
+ OPC_LARCH_FRINT_S = (0x004791 << 10),
|
|
+ OPC_LARCH_FRINT_D = (0x004792 << 10),
|
|
+
|
|
+ OPC_LARCH_FADD_S = (0x00201 << 15),
|
|
+ OPC_LARCH_FADD_D = (0x00202 << 15),
|
|
+ OPC_LARCH_FSUB_S = (0x00205 << 15),
|
|
+ OPC_LARCH_FSUB_D = (0x00206 << 15),
|
|
+ OPC_LARCH_FMUL_S = (0x00209 << 15),
|
|
+ OPC_LARCH_FMUL_D = (0x0020A << 15),
|
|
+ OPC_LARCH_FDIV_S = (0x0020D << 15),
|
|
+ OPC_LARCH_FDIV_D = (0x0020E << 15),
|
|
+ OPC_LARCH_FMAX_S = (0x00211 << 15),
|
|
+ OPC_LARCH_FMAX_D = (0x00212 << 15),
|
|
+ OPC_LARCH_FMIN_S = (0x00215 << 15),
|
|
+ OPC_LARCH_FMIN_D = (0x00216 << 15),
|
|
+ OPC_LARCH_FMAXA_S = (0x00219 << 15),
|
|
+ OPC_LARCH_FMAXA_D = (0x0021A << 15),
|
|
+ OPC_LARCH_FMINA_S = (0x0021D << 15),
|
|
+ OPC_LARCH_FMINA_D = (0x0021E << 15),
|
|
+};
|
|
+
|
|
+enum {
|
|
+ /* 12 bit immediate opcodes */
|
|
+ OPC_LARCH_SLTI = (0x008 << 22),
|
|
+ OPC_LARCH_SLTIU = (0x009 << 22),
|
|
+ OPC_LARCH_ADDI_W = (0x00A << 22),
|
|
+ OPC_LARCH_ADDI_D = (0x00B << 22),
|
|
+ OPC_LARCH_ANDI = (0x00D << 22),
|
|
+ OPC_LARCH_ORI = (0x00E << 22),
|
|
+ OPC_LARCH_XORI = (0x00F << 22),
|
|
+};
|
|
+
|
|
+enum {
|
|
+ /* load/store opcodes */
|
|
+ OPC_LARCH_FLDX_S = (0x07060 << 15),
|
|
+ OPC_LARCH_FLDX_D = (0x07068 << 15),
|
|
+ OPC_LARCH_FSTX_S = (0x07070 << 15),
|
|
+ OPC_LARCH_FSTX_D = (0x07078 << 15),
|
|
+ OPC_LARCH_FLDGT_S = (0x070E8 << 15),
|
|
+ OPC_LARCH_FLDGT_D = (0x070E9 << 15),
|
|
+ OPC_LARCH_FLDLE_S = (0x070EA << 15),
|
|
+ OPC_LARCH_FLDLE_D = (0x070EB << 15),
|
|
+ OPC_LARCH_FSTGT_S = (0x070EC << 15),
|
|
+ OPC_LARCH_FSTGT_D = (0x070ED << 15),
|
|
+ OPC_LARCH_FSTLE_S = (0x070EE << 15),
|
|
+ OPC_LARCH_FSTLE_D = (0x070EF << 15),
|
|
+
|
|
+ OPC_LARCH_LD_B = (0x0A0 << 22),
|
|
+ OPC_LARCH_LD_H = (0x0A1 << 22),
|
|
+ OPC_LARCH_LD_W = (0x0A2 << 22),
|
|
+ OPC_LARCH_LD_D = (0x0A3 << 22),
|
|
+ OPC_LARCH_ST_B = (0x0A4 << 22),
|
|
+ OPC_LARCH_ST_H = (0x0A5 << 22),
|
|
+ OPC_LARCH_ST_W = (0x0A6 << 22),
|
|
+ OPC_LARCH_ST_D = (0x0A7 << 22),
|
|
+ OPC_LARCH_LD_BU = (0x0A8 << 22),
|
|
+ OPC_LARCH_LD_HU = (0x0A9 << 22),
|
|
+ OPC_LARCH_LD_WU = (0x0AA << 22),
|
|
+ OPC_LARCH_FLD_S = (0x0AC << 22),
|
|
+ OPC_LARCH_FST_S = (0x0AD << 22),
|
|
+ OPC_LARCH_FLD_D = (0x0AE << 22),
|
|
+ OPC_LARCH_FST_D = (0x0AF << 22),
|
|
+
|
|
+ OPC_LARCH_LL_W = (0x20 << 24),
|
|
+ OPC_LARCH_SC_W = (0x21 << 24),
|
|
+ OPC_LARCH_LL_D = (0x22 << 24),
|
|
+ OPC_LARCH_SC_D = (0x23 << 24),
|
|
+ OPC_LARCH_LDPTR_W = (0x24 << 24),
|
|
+ OPC_LARCH_STPTR_W = (0x25 << 24),
|
|
+ OPC_LARCH_LDPTR_D = (0x26 << 24),
|
|
+ OPC_LARCH_STPTR_D = (0x27 << 24),
|
|
+};
|
|
+
|
|
+enum {
|
|
+ /* jump opcodes */
|
|
+ OPC_LARCH_BEQZ = (0x10 << 26),
|
|
+ OPC_LARCH_BNEZ = (0x11 << 26),
|
|
+ OPC_LARCH_B = (0x14 << 26),
|
|
+ OPC_LARCH_BEQ = (0x16 << 26),
|
|
+ OPC_LARCH_BNE = (0x17 << 26),
|
|
+ OPC_LARCH_BLT = (0x18 << 26),
|
|
+ OPC_LARCH_BGE = (0x19 << 26),
|
|
+ OPC_LARCH_BLTU = (0x1A << 26),
|
|
+ OPC_LARCH_BGEU = (0x1B << 26),
|
|
+};
|
|
+
|
|
+#endif
|
|
diff --git a/target/loongarch64/internal.h b/target/loongarch64/internal.h
|
|
new file mode 100644
|
|
index 0000000000..a51b7e6f56
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/internal.h
|
|
@@ -0,0 +1,207 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef LOONGARCH_INTERNAL_H
|
|
+#define LOONGARCH_INTERNAL_H
|
|
+
|
|
+#include "cpu-csr.h"
|
|
+
|
|
+/*
|
|
+ * MMU types, the first four entries have the same layout as the
|
|
+ * CP0C0_MT field.
|
|
+ */
|
|
+enum loongarch_mmu_types {
|
|
+ MMU_TYPE_NONE,
|
|
+ MMU_TYPE_LS3A5K, /* LISA CSR */
|
|
+};
|
|
+
|
|
+struct loongarch_def_t {
|
|
+ const char *name;
|
|
+ int32_t CSR_PRid;
|
|
+ int32_t FCSR0;
|
|
+ int32_t FCSR0_rw_bitmask;
|
|
+ int32_t PABITS;
|
|
+ CPU_LOONGARCH_CSR
|
|
+ uint64_t insn_flags;
|
|
+ enum loongarch_mmu_types mmu_type;
|
|
+ int cpu_cfg[64];
|
|
+};
|
|
+
|
|
+/* loongarch 3a5000 TLB entry */
|
|
+struct ls3a5k_tlb_t {
|
|
+ target_ulong VPN;
|
|
+ uint64_t PageMask; /* CSR_TLBIDX[29:24] */
|
|
+ uint32_t PageSize;
|
|
+ uint16_t ASID;
|
|
+ unsigned int G:1; /* CSR_TLBLO[6] */
|
|
+
|
|
+ unsigned int C0:3; /* CSR_TLBLO[5:4] */
|
|
+ unsigned int C1:3;
|
|
+
|
|
+ unsigned int V0:1; /* CSR_TLBLO[0] */
|
|
+ unsigned int V1:1;
|
|
+
|
|
+ unsigned int WE0:1; /* CSR_TLBLO[1] */
|
|
+ unsigned int WE1:1;
|
|
+
|
|
+ unsigned int XI0:1; /* CSR_TLBLO[62] */
|
|
+ unsigned int XI1:1;
|
|
+
|
|
+ unsigned int RI0:1; /* CSR_TLBLO[61] */
|
|
+ unsigned int RI1:1;
|
|
+
|
|
+ unsigned int EHINV:1;/* CSR_TLBIDX[31] */
|
|
+
|
|
+ unsigned int PLV0:2; /* CSR_TLBLO[3:2] */
|
|
+ unsigned int PLV1:2;
|
|
+
|
|
+ unsigned int RPLV0:1;
|
|
+ unsigned int RPLV1:1; /* CSR_TLBLO[63] */
|
|
+
|
|
+ uint64_t PPN0; /* CSR_TLBLO[47:12] */
|
|
+ uint64_t PPN1; /* CSR_TLBLO[47:12] */
|
|
+};
|
|
+typedef struct ls3a5k_tlb_t ls3a5k_tlb_t;
|
|
+
|
|
+struct CPULOONGARCHTLBContext {
|
|
+ uint32_t nb_tlb;
|
|
+ uint32_t tlb_in_use;
|
|
+ int (*map_address)(struct CPULOONGARCHState *env, hwaddr *physical,
|
|
+ int *prot, target_ulong address, int rw,
|
|
+ int access_type);
|
|
+ void (*helper_tlbwr)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_tlbfill)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_tlbsrch)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_tlbrd)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_tlbclr)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_tlbflush)(struct CPULOONGARCHState *env);
|
|
+ void (*helper_invtlb)(struct CPULOONGARCHState *env, target_ulong addr,
|
|
+ target_ulong info, int op);
|
|
+ union
|
|
+ {
|
|
+ struct {
|
|
+ uint64_t ftlb_mask;
|
|
+ uint32_t ftlb_size; /* at most : 8 * 256 = 2048 */
|
|
+ uint32_t vtlb_size; /* at most : 64 */
|
|
+ ls3a5k_tlb_t tlb[2048 + 64]; /* at most : 2048 FTLB + 64 VTLB */
|
|
+ } ls3a5k;
|
|
+ } mmu;
|
|
+};
|
|
+
|
|
+enum {
|
|
+ TLBRET_PE = -7,
|
|
+ TLBRET_XI = -6,
|
|
+ TLBRET_RI = -5,
|
|
+ TLBRET_DIRTY = -4,
|
|
+ TLBRET_INVALID = -3,
|
|
+ TLBRET_NOMATCH = -2,
|
|
+ TLBRET_BADADDR = -1,
|
|
+ TLBRET_MATCH = 0
|
|
+};
|
|
+
|
|
+extern unsigned int ieee_rm[];
|
|
+
|
|
+static inline void restore_rounding_mode(CPULOONGARCHState *env)
|
|
+{
|
|
+ set_float_rounding_mode(ieee_rm[(env->active_fpu.fcsr0 >> FCSR0_RM) & 0x3],
|
|
+ &env->active_fpu.fp_status);
|
|
+}
|
|
+
|
|
+static inline void restore_flush_mode(CPULOONGARCHState *env)
|
|
+{
|
|
+ set_flush_to_zero(0, &env->active_fpu.fp_status);
|
|
+}
|
|
+
|
|
+static inline void restore_fp_status(CPULOONGARCHState *env)
|
|
+{
|
|
+ restore_rounding_mode(env);
|
|
+ restore_flush_mode(env);
|
|
+}
|
|
+
|
|
+static inline void compute_hflags(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->hflags &= ~(LARCH_HFLAG_64 | LARCH_HFLAG_FPU | LARCH_HFLAG_KSU |
|
|
+ LARCH_HFLAG_AWRAP | LARCH_HFLAG_LSX | LARCH_HFLAG_LASX);
|
|
+
|
|
+ env->hflags |= (env->CSR_CRMD & CSR_CRMD_PLV);
|
|
+ env->hflags |= LARCH_HFLAG_64;
|
|
+
|
|
+ if (env->CSR_EUEN & CSR_EUEN_FPEN) {
|
|
+ env->hflags |= LARCH_HFLAG_FPU;
|
|
+ }
|
|
+ if (env->CSR_EUEN & CSR_EUEN_LSXEN) {
|
|
+ env->hflags |= LARCH_HFLAG_LSX;
|
|
+ }
|
|
+ if (env->CSR_EUEN & CSR_EUEN_LASXEN) {
|
|
+ env->hflags |= LARCH_HFLAG_LASX;
|
|
+ }
|
|
+ if (env->CSR_EUEN & CSR_EUEN_LBTEN) {
|
|
+ env->hflags |= LARCH_HFLAG_LBT;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Check if there is pending and not masked out interrupt */
|
|
+static inline bool cpu_loongarch_hw_interrupts_pending(CPULOONGARCHState *env)
|
|
+{
|
|
+ int32_t pending;
|
|
+ int32_t status;
|
|
+ bool r;
|
|
+
|
|
+ pending = env->CSR_ESTAT & CSR_ESTAT_IPMASK;
|
|
+ status = env->CSR_ECFG & CSR_ECFG_IPMASK;
|
|
+
|
|
+ /*
|
|
+ * Configured with compatibility or VInt (Vectored Interrupts)
|
|
+ * treats the pending lines as individual interrupt lines, the status
|
|
+ * lines are individual masks.
|
|
+ */
|
|
+ r = (pending & status) != 0;
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+/* stabletimer.c */
|
|
+uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high);
|
|
+uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env);
|
|
+uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env);
|
|
+void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env,
|
|
+ uint64_t value);
|
|
+int loongarch_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu,
|
|
+ int cpuid, void *opaque);
|
|
+
|
|
+void loongarch_cpu_dump_state(CPUState *cpu, FILE *f, int flags);
|
|
+
|
|
+/* TODO QOM'ify CPU reset and remove */
|
|
+void cpu_state_reset(CPULOONGARCHState *s);
|
|
+void cpu_loongarch_realize_env(CPULOONGARCHState *env);
|
|
+
|
|
+uint64_t read_fcc(CPULOONGARCHState *env);
|
|
+void write_fcc(CPULOONGARCHState *env, uint64_t val);
|
|
+
|
|
+int loongarch_cpu_gdb_read_register(CPUState *cs, GByteArray *mem_buf, int n);
|
|
+int loongarch_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
|
|
+
|
|
+#ifdef CONFIG_TCG
|
|
+#include "fpu_helper.h"
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+extern const struct VMStateDescription vmstate_loongarch_cpu;
|
|
+hwaddr loongarch_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
|
|
+#endif
|
|
+
|
|
+#endif
|
|
diff --git a/target/loongarch64/kvm.c b/target/loongarch64/kvm.c
|
|
new file mode 100644
|
|
index 0000000000..2b0159bb32
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/kvm.c
|
|
@@ -0,0 +1,1366 @@
|
|
+/*
|
|
+ * KVM/LOONGARCH: LOONGARCH specific KVM APIs
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include <sys/ioctl.h>
|
|
+
|
|
+#include <linux/kvm.h>
|
|
+
|
|
+#include "qemu-common.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "qemu/error-report.h"
|
|
+#include "qemu/timer.h"
|
|
+#include "qemu/main-loop.h"
|
|
+#include "sysemu/sysemu.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "sysemu/runstate.h"
|
|
+#include "sysemu/cpus.h"
|
|
+#include "kvm_larch.h"
|
|
+#include "exec/memattrs.h"
|
|
+#include "exec/gdbstub.h"
|
|
+
|
|
+#define DEBUG_KVM 0
|
|
+/*
|
|
+ * A 16384-byte buffer can hold the 8-byte kvm_msrs header, plus
|
|
+ * 2047 kvm_msr_entry structs
|
|
+ */
|
|
+#define CSR_BUF_SIZE 16384
|
|
+
|
|
+#define DPRINTF(fmt, ...) \
|
|
+ do { \
|
|
+ if (DEBUG_KVM) { \
|
|
+ fprintf(stderr, fmt, ##__VA_ARGS__); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/*
|
|
+ * Define loongarch kvm version.
|
|
+ * Add version number when
|
|
+ * qemu/kvm interface changed
|
|
+ */
|
|
+#define KVM_LOONGARCH_VERSION 1
|
|
+
|
|
+static struct {
|
|
+ target_ulong addr;
|
|
+ int len;
|
|
+ int type;
|
|
+} inst_breakpoint[8], data_breakpoint[8];
|
|
+
|
|
+int nb_data_breakpoint = 0, nb_inst_breakpoint = 0;
|
|
+static int kvm_loongarch_version_cap;
|
|
+
|
|
+/*
|
|
+ * Hardware breakpoint control register
|
|
+ * 4:1 plv0-plv3 enable
|
|
+ * 6:5 config virtualization mode
|
|
+ * 9:8 load store
|
|
+ */
|
|
+static const int type_code[] = { [GDB_BREAKPOINT_HW] = 0x5e,
|
|
+ [GDB_WATCHPOINT_READ] = (0x5e | 1 << 8),
|
|
+ [GDB_WATCHPOINT_WRITE] = (0x5e | 1 << 9),
|
|
+ [GDB_WATCHPOINT_ACCESS] =
|
|
+ (0x5e | 1 << 8 | 1 << 9) };
|
|
+
|
|
+const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
+ KVM_CAP_LAST_INFO
|
|
+};
|
|
+
|
|
+static void kvm_loongarch_update_state(void *opaque, bool running,
|
|
+ RunState state);
|
|
+static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id,
|
|
+ uint64_t *addr);
|
|
+
|
|
+unsigned long kvm_arch_vcpu_id(CPUState *cs)
|
|
+{
|
|
+ return cs->cpu_index;
|
|
+}
|
|
+
|
|
+int kvm_arch_init(MachineState *ms, KVMState *s)
|
|
+{
|
|
+ /* LOONGARCH has 128 signals */
|
|
+ kvm_set_sigmask_len(s, 16);
|
|
+
|
|
+ kvm_loongarch_version_cap = kvm_check_extension(s, KVM_CAP_LOONGARCH_VZ);
|
|
+
|
|
+ if (kvm_loongarch_version_cap != KVM_LOONGARCH_VERSION) {
|
|
+ warn_report("QEMU/KVM version not match, qemu_la_version: lvz-%d,\
|
|
+ kvm_la_version: lvz-%d \n",
|
|
+ KVM_LOONGARCH_VERSION, kvm_loongarch_version_cap);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_irqchip_create(KVMState *s)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void kvm_csr_set_addr(uint64_t **addr, uint32_t index, uint64_t *p)
|
|
+{
|
|
+ addr[index] = p;
|
|
+}
|
|
+
|
|
+int kvm_arch_init_vcpu(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ uint64_t **addr;
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int ret = 0;
|
|
+
|
|
+ kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_FPU, 0, 0);
|
|
+ kvm_vcpu_enable_cap(cs, KVM_CAP_LOONGARCH_LSX, 0, 0);
|
|
+
|
|
+ cpu->cpuStateEntry =
|
|
+ qemu_add_vm_change_state_handler(kvm_loongarch_update_state, cs);
|
|
+ cpu->kvm_csr_buf = g_malloc0(CSR_BUF_SIZE + CSR_BUF_SIZE);
|
|
+
|
|
+ addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE;
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CRMD, &env->CSR_CRMD);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRMD, &env->CSR_PRMD);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_EUEN, &env->CSR_EUEN);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MISC, &env->CSR_MISC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ECFG, &env->CSR_ECFG);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ESTAT, &env->CSR_ESTAT);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERA, &env->CSR_ERA);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_BADV, &env->CSR_BADV);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_BADI, &env->CSR_BADI);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_EEPN, &env->CSR_EEPN);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBIDX, &env->CSR_TLBIDX);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBEHI, &env->CSR_TLBEHI);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO0, &env->CSR_TLBELO0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBELO1, &env->CSR_TLBELO1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GTLBC, &env->CSR_GTLBC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TRGP, &env->CSR_TRGP);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ASID, &env->CSR_ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDL, &env->CSR_PGDL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGDH, &env->CSR_PGDH);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PGD, &env->CSR_PGD);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL0, &env->CSR_PWCTL0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PWCTL1, &env->CSR_PWCTL1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_STLBPGSIZE, &env->CSR_STLBPGSIZE);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_RVACFG, &env->CSR_RVACFG);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CPUID, &env->CSR_CPUID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG1, &env->CSR_PRCFG1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG2, &env->CSR_PRCFG2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PRCFG3, &env->CSR_PRCFG3);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS0, &env->CSR_KS0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS1, &env->CSR_KS1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS2, &env->CSR_KS2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS3, &env->CSR_KS3);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS4, &env->CSR_KS4);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS5, &env->CSR_KS5);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS6, &env->CSR_KS6);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_KS7, &env->CSR_KS7);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TMID, &env->CSR_TMID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CNTC, &env->CSR_CNTC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TINTCLR, &env->CSR_TINTCLR);
|
|
+
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GSTAT, &env->CSR_GSTAT);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GCFG, &env->CSR_GCFG);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GINTC, &env->CSR_GINTC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GCNTC, &env->CSR_GCNTC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_LLBCTL, &env->CSR_LLBCTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL1, &env->CSR_IMPCTL1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IMPCTL2, &env->CSR_IMPCTL2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_GNMI, &env->CSR_GNMI);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRENT, &env->CSR_TLBRENT);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRBADV, &env->CSR_TLBRBADV);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRERA, &env->CSR_TLBRERA);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRSAVE, &env->CSR_TLBRSAVE);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO0, &env->CSR_TLBRELO0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRELO1, &env->CSR_TLBRELO1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBREHI, &env->CSR_TLBREHI);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_TLBRPRMD, &env->CSR_TLBRPRMD);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRCTL, &env->CSR_ERRCTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO, &env->CSR_ERRINFO);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRINFO1, &env->CSR_ERRINFO1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRENT, &env->CSR_ERRENT);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRERA, &env->CSR_ERRERA);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_ERRSAVE, &env->CSR_ERRSAVE);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_CTAG, &env->CSR_CTAG);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN0, &env->CSR_DMWIN0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN1, &env->CSR_DMWIN1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN2, &env->CSR_DMWIN2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DMWIN3, &env->CSR_DMWIN3);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL0, &env->CSR_PERFCTRL0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR0, &env->CSR_PERFCNTR0);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL1, &env->CSR_PERFCTRL1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR1, &env->CSR_PERFCNTR1);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL2, &env->CSR_PERFCTRL2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR2, &env->CSR_PERFCNTR2);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCTRL3, &env->CSR_PERFCTRL3);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_PERFCNTR3, &env->CSR_PERFCNTR3);
|
|
+
|
|
+ /* debug */
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPC, &env->CSR_MWPC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_MWPS, &env->CSR_MWPS);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ADDR, &env->CSR_DB0ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0MASK, &env->CSR_DB0MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0CTL, &env->CSR_DB0CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB0ASID, &env->CSR_DB0ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ADDR, &env->CSR_DB1ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1MASK, &env->CSR_DB1MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1CTL, &env->CSR_DB1CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB1ASID, &env->CSR_DB1ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ADDR, &env->CSR_DB2ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2MASK, &env->CSR_DB2MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2CTL, &env->CSR_DB2CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB2ASID, &env->CSR_DB2ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ADDR, &env->CSR_DB3ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3MASK, &env->CSR_DB3MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3CTL, &env->CSR_DB3CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DB3ASID, &env->CSR_DB3ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPC, &env->CSR_FWPC);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_FWPS, &env->CSR_FWPS);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ADDR, &env->CSR_IB0ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0MASK, &env->CSR_IB0MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0CTL, &env->CSR_IB0CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB0ASID, &env->CSR_IB0ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ADDR, &env->CSR_IB1ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1MASK, &env->CSR_IB1MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1CTL, &env->CSR_IB1CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB1ASID, &env->CSR_IB1ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ADDR, &env->CSR_IB2ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2MASK, &env->CSR_IB2MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2CTL, &env->CSR_IB2CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB2ASID, &env->CSR_IB2ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ADDR, &env->CSR_IB3ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3MASK, &env->CSR_IB3MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3CTL, &env->CSR_IB3CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB3ASID, &env->CSR_IB3ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ADDR, &env->CSR_IB4ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4MASK, &env->CSR_IB4MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4CTL, &env->CSR_IB4CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB4ASID, &env->CSR_IB4ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ADDR, &env->CSR_IB5ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5MASK, &env->CSR_IB5MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5CTL, &env->CSR_IB5CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB5ASID, &env->CSR_IB5ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ADDR, &env->CSR_IB6ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6MASK, &env->CSR_IB6MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6CTL, &env->CSR_IB6CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB6ASID, &env->CSR_IB6ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ADDR, &env->CSR_IB7ADDR);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7MASK, &env->CSR_IB7MASK);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7CTL, &env->CSR_IB7CTL);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_IB7ASID, &env->CSR_IB7ASID);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DEBUG, &env->CSR_DEBUG);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DERA, &env->CSR_DERA);
|
|
+ kvm_csr_set_addr(addr, LOONGARCH_CSR_DESAVE, &env->CSR_DESAVE);
|
|
+
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_arch_destroy_vcpu(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+
|
|
+ g_free(cpu->kvm_csr_buf);
|
|
+ cpu->kvm_csr_buf = NULL;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void kvm_csr_buf_reset(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ memset(cpu->kvm_csr_buf, 0, CSR_BUF_SIZE);
|
|
+}
|
|
+
|
|
+static void kvm_csr_entry_add(LOONGARCHCPU *cpu, uint32_t index,
|
|
+ uint64_t value)
|
|
+{
|
|
+ struct kvm_msrs *msrs = cpu->kvm_csr_buf;
|
|
+ void *limit = ((void *)msrs) + CSR_BUF_SIZE;
|
|
+ struct kvm_csr_entry *entry = &msrs->entries[msrs->ncsrs];
|
|
+
|
|
+ assert((void *)(entry + 1) <= limit);
|
|
+
|
|
+ entry->index = index;
|
|
+ entry->reserved = 0;
|
|
+ entry->data = value;
|
|
+ msrs->ncsrs++;
|
|
+}
|
|
+
|
|
+void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ int ret = 0;
|
|
+ uint64_t reset = 1;
|
|
+
|
|
+ if (CPU(cpu)->kvm_fd > 0) {
|
|
+ ret = kvm_larch_putq(CPU(cpu), KVM_REG_LOONGARCH_VCPU_RESET, &reset);
|
|
+ if (ret < 0) {
|
|
+ error_report("%s reset vcpu failed:%d", __func__, ret);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ DPRINTF("%s\n", __func__);
|
|
+}
|
|
+
|
|
+void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
|
|
+{
|
|
+ int n;
|
|
+ if (kvm_sw_breakpoints_active(cpu)) {
|
|
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
|
|
+ }
|
|
+ if (nb_data_breakpoint > 0) {
|
|
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
+ for (n = 0; n < nb_data_breakpoint; n++) {
|
|
+ dbg->arch.data_breakpoint[n].addr = data_breakpoint[n].addr;
|
|
+ dbg->arch.data_breakpoint[n].mask = 0;
|
|
+ dbg->arch.data_breakpoint[n].asid = 0;
|
|
+ dbg->arch.data_breakpoint[n].ctrl =
|
|
+ type_code[data_breakpoint[n].type];
|
|
+ }
|
|
+ dbg->arch.data_bp_nums = nb_data_breakpoint;
|
|
+ } else {
|
|
+ dbg->arch.data_bp_nums = 0;
|
|
+ }
|
|
+ if (nb_inst_breakpoint > 0) {
|
|
+ dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
|
|
+ for (n = 0; n < nb_inst_breakpoint; n++) {
|
|
+ dbg->arch.inst_breakpoint[n].addr = inst_breakpoint[n].addr;
|
|
+ dbg->arch.inst_breakpoint[n].mask = 0;
|
|
+ dbg->arch.inst_breakpoint[n].asid = 0;
|
|
+ dbg->arch.inst_breakpoint[n].ctrl =
|
|
+ type_code[inst_breakpoint[n].type];
|
|
+ }
|
|
+ dbg->arch.inst_bp_nums = nb_inst_breakpoint;
|
|
+ } else {
|
|
+ dbg->arch.inst_bp_nums = 0;
|
|
+ }
|
|
+}
|
|
+
|
|
+static const unsigned int brk_insn = 0x002b8005;
|
|
+
|
|
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
|
+{
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
|
|
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
|
|
+ error_report("%s failed", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
|
+{
|
|
+ static uint32_t brk;
|
|
+
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
|
|
+ brk != brk_insn ||
|
|
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
|
|
+ error_report("%s failed", __func__);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int find_hw_breakpoint(uint64_t addr, int len, int type)
|
|
+{
|
|
+ int n;
|
|
+ switch (type) {
|
|
+ case GDB_BREAKPOINT_HW:
|
|
+ if (nb_inst_breakpoint == 0) {
|
|
+ return -1;
|
|
+ }
|
|
+ for (n = 0; n < nb_inst_breakpoint; n++) {
|
|
+ if (inst_breakpoint[n].addr == addr &&
|
|
+ inst_breakpoint[n].type == type) {
|
|
+ return n;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ case GDB_WATCHPOINT_WRITE:
|
|
+ case GDB_WATCHPOINT_READ:
|
|
+ case GDB_WATCHPOINT_ACCESS:
|
|
+ if (nb_data_breakpoint == 0) {
|
|
+ return -1;
|
|
+ }
|
|
+ for (n = 0; n < nb_data_breakpoint; n++) {
|
|
+ if (data_breakpoint[n].addr == addr &&
|
|
+ data_breakpoint[n].type == type &&
|
|
+ data_breakpoint[n].len == len) {
|
|
+ return n;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+ return -1;
|
|
+}
|
|
+
|
|
+int kvm_arch_insert_hw_breakpoint(target_ulong addr, target_ulong len,
|
|
+ int type)
|
|
+{
|
|
+ switch (type) {
|
|
+ case GDB_BREAKPOINT_HW:
|
|
+ len = 1;
|
|
+ if (nb_inst_breakpoint == 8) {
|
|
+ return -ENOBUFS;
|
|
+ }
|
|
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
|
|
+ return -EEXIST;
|
|
+ }
|
|
+ inst_breakpoint[nb_inst_breakpoint].addr = addr;
|
|
+ inst_breakpoint[nb_inst_breakpoint].len = len;
|
|
+ inst_breakpoint[nb_inst_breakpoint].type = type;
|
|
+ nb_inst_breakpoint++;
|
|
+ break;
|
|
+ case GDB_WATCHPOINT_WRITE:
|
|
+ case GDB_WATCHPOINT_READ:
|
|
+ case GDB_WATCHPOINT_ACCESS:
|
|
+ switch (len) {
|
|
+ case 1:
|
|
+ case 2:
|
|
+ case 4:
|
|
+ case 8:
|
|
+ if (addr & (len - 1)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ if (nb_data_breakpoint == 8) {
|
|
+ return -ENOBUFS;
|
|
+ }
|
|
+ if (find_hw_breakpoint(addr, len, type) >= 0) {
|
|
+ return -EEXIST;
|
|
+ }
|
|
+ data_breakpoint[nb_data_breakpoint].addr = addr;
|
|
+ data_breakpoint[nb_data_breakpoint].len = len;
|
|
+ data_breakpoint[nb_data_breakpoint].type = type;
|
|
+ nb_data_breakpoint++;
|
|
+ break;
|
|
+ default:
|
|
+ return -EINVAL;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ return -ENOSYS;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_remove_hw_breakpoint(target_ulong addr, target_ulong len,
|
|
+ int type)
|
|
+{
|
|
+ int n;
|
|
+ n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type);
|
|
+ if (n < 0) {
|
|
+ printf("err not find remove target\n");
|
|
+ return -ENOENT;
|
|
+ }
|
|
+ switch (type) {
|
|
+ case GDB_BREAKPOINT_HW:
|
|
+ nb_inst_breakpoint--;
|
|
+ inst_breakpoint[n] = inst_breakpoint[nb_inst_breakpoint];
|
|
+ break;
|
|
+ case GDB_WATCHPOINT_WRITE:
|
|
+ case GDB_WATCHPOINT_READ:
|
|
+ case GDB_WATCHPOINT_ACCESS:
|
|
+ nb_data_breakpoint--;
|
|
+ data_breakpoint[n] = data_breakpoint[nb_data_breakpoint];
|
|
+ break;
|
|
+ default:
|
|
+ return -1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+void kvm_arch_remove_all_hw_breakpoints(void)
|
|
+{
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ nb_data_breakpoint = 0;
|
|
+ nb_inst_breakpoint = 0;
|
|
+}
|
|
+
|
|
+static inline int cpu_loongarch_io_interrupts_pending(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ return env->CSR_ESTAT & (0x1 << 2);
|
|
+}
|
|
+
|
|
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ int r;
|
|
+ struct kvm_loongarch_interrupt intr;
|
|
+
|
|
+ qemu_mutex_lock_iothread();
|
|
+
|
|
+ if ((cs->interrupt_request & CPU_INTERRUPT_HARD) &&
|
|
+ cpu_loongarch_io_interrupts_pending(cpu)) {
|
|
+ intr.cpu = -1;
|
|
+ intr.irq = 2;
|
|
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
|
|
+ if (r < 0) {
|
|
+ error_report("%s: cpu %d: failed to inject IRQ %x", __func__,
|
|
+ cs->cpu_index, intr.irq);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ qemu_mutex_unlock_iothread();
|
|
+}
|
|
+
|
|
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
|
+{
|
|
+ return MEMTXATTRS_UNSPECIFIED;
|
|
+}
|
|
+
|
|
+int kvm_arch_process_async_events(CPUState *cs)
|
|
+{
|
|
+ return cs->halted;
|
|
+}
|
|
+
|
|
+static CPUWatchpoint hw_watchpoint;
|
|
+
|
|
+static bool kvm_loongarch_handle_debug(CPUState *cs, struct kvm_run *run)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int i;
|
|
+ bool ret = false;
|
|
+ kvm_cpu_synchronize_state(cs);
|
|
+ if (cs->singlestep_enabled) {
|
|
+ return true;
|
|
+ }
|
|
+ if (kvm_find_sw_breakpoint(cs, env->active_tc.PC)) {
|
|
+ return true;
|
|
+ }
|
|
+ /* hw breakpoint */
|
|
+ if (run->debug.arch.exception == EXCCODE_WATCH) {
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ if (run->debug.arch.fwps & (1 << i)) {
|
|
+ ret = true;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ if (run->debug.arch.mwps & (1 << i)) {
|
|
+ cs->watchpoint_hit = &hw_watchpoint;
|
|
+ hw_watchpoint.vaddr = data_breakpoint[i].addr;
|
|
+ switch (data_breakpoint[i].type) {
|
|
+ case GDB_WATCHPOINT_READ:
|
|
+ ret = true;
|
|
+ hw_watchpoint.flags = BP_MEM_READ;
|
|
+ break;
|
|
+ case GDB_WATCHPOINT_WRITE:
|
|
+ ret = true;
|
|
+ hw_watchpoint.flags = BP_MEM_WRITE;
|
|
+ break;
|
|
+ case GDB_WATCHPOINT_ACCESS:
|
|
+ ret = true;
|
|
+ hw_watchpoint.flags = BP_MEM_ACCESS;
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+ run->debug.arch.exception = 0;
|
|
+ run->debug.arch.fwps = 0;
|
|
+ run->debug.arch.mwps = 0;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
|
+{
|
|
+ int ret;
|
|
+
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ switch (run->exit_reason) {
|
|
+ case KVM_EXIT_HYPERCALL:
|
|
+ DPRINTF("handle LOONGARCH hypercall\n");
|
|
+ ret = 0;
|
|
+ run->hypercall.ret = ret;
|
|
+ break;
|
|
+
|
|
+ case KVM_EXIT_DEBUG:
|
|
+ ret = 0;
|
|
+ if (kvm_loongarch_handle_debug(cs, run)) {
|
|
+ ret = EXCP_DEBUG;
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ error_report("%s: unknown exit reason %d", __func__, run->exit_reason);
|
|
+ ret = -1;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
|
+{
|
|
+ DPRINTF("%s\n", __func__);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+void kvm_arch_init_irq_routing(KVMState *s)
|
|
+{
|
|
+}
|
|
+
|
|
+int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level)
|
|
+{
|
|
+ CPUState *cs = CPU(cpu);
|
|
+ struct kvm_loongarch_interrupt intr;
|
|
+
|
|
+ if (!kvm_enabled()) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ intr.cpu = -1;
|
|
+
|
|
+ if (level) {
|
|
+ intr.irq = irq;
|
|
+ } else {
|
|
+ intr.irq = -irq;
|
|
+ }
|
|
+
|
|
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level)
|
|
+{
|
|
+ CPUState *cs = current_cpu;
|
|
+ CPUState *dest_cs = CPU(cpu);
|
|
+ struct kvm_loongarch_interrupt intr;
|
|
+
|
|
+ if (!kvm_enabled()) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ intr.cpu = dest_cs->cpu_index;
|
|
+
|
|
+ if (level) {
|
|
+ intr.irq = irq;
|
|
+ } else {
|
|
+ intr.irq = -irq;
|
|
+ }
|
|
+
|
|
+ DPRINTF("%s: IRQ: %d\n", __func__, intr.irq);
|
|
+ if (!current_cpu) {
|
|
+ cs = dest_cs;
|
|
+ }
|
|
+ kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &intr);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_put_one_reg(CPUState *cs, uint64_t reg_id,
|
|
+ int32_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_put_one_ureg(CPUState *cs, uint64_t reg_id,
|
|
+ uint32_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_put_one_ulreg(CPUState *cs, uint64_t reg_id,
|
|
+ target_ulong *addr)
|
|
+{
|
|
+ uint64_t val64 = *addr;
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)&val64 };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_put_one_reg64(CPUState *cs, int64_t reg_id,
|
|
+ int64_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_larch_putq(CPUState *cs, uint64_t reg_id, uint64_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_get_one_reg(CPUState *cs, uint64_t reg_id,
|
|
+ int32_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_get_one_ureg(CPUState *cs, uint64_t reg_id,
|
|
+ uint32_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_get_one_ulreg(CPUState *cs, uint64_t reg_id,
|
|
+ target_ulong *addr)
|
|
+{
|
|
+ int ret;
|
|
+ uint64_t val64 = 0;
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)&val64 };
|
|
+
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
|
|
+ if (ret >= 0) {
|
|
+ *addr = val64;
|
|
+ }
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_get_one_reg64(CPUState *cs, int64_t reg_id,
|
|
+ int64_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_larch_getq(CPUState *cs, uint64_t reg_id, uint64_t *addr)
|
|
+{
|
|
+ struct kvm_one_reg csrreg = { .id = reg_id, .addr = (uintptr_t)addr };
|
|
+
|
|
+ return kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &csrreg);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_change_one_reg(CPUState *cs, uint64_t reg_id,
|
|
+ int32_t *addr, int32_t mask)
|
|
+{
|
|
+ int err;
|
|
+ int32_t tmp, change;
|
|
+
|
|
+ err = kvm_loongarch_get_one_reg(cs, reg_id, &tmp);
|
|
+ if (err < 0) {
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ /* only change bits in mask */
|
|
+ change = (*addr ^ tmp) & mask;
|
|
+ if (!change) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ tmp = tmp ^ change;
|
|
+ return kvm_loongarch_put_one_reg(cs, reg_id, &tmp);
|
|
+}
|
|
+
|
|
+static inline int kvm_loongarch_change_one_reg64(CPUState *cs, uint64_t reg_id,
|
|
+ int64_t *addr, int64_t mask)
|
|
+{
|
|
+ int err;
|
|
+ int64_t tmp, change;
|
|
+
|
|
+ err = kvm_loongarch_get_one_reg64(cs, reg_id, &tmp);
|
|
+ if (err < 0) {
|
|
+ DPRINTF("%s: Failed to get CSR_CONFIG7 (%d)\n", __func__, err);
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ /* only change bits in mask */
|
|
+ change = (*addr ^ tmp) & mask;
|
|
+ if (!change) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ tmp = tmp ^ change;
|
|
+ return kvm_loongarch_put_one_reg64(cs, reg_id, &tmp);
|
|
+}
|
|
+/*
|
|
+ * Handle the VM clock being started or stopped
|
|
+ */
|
|
+static void kvm_loongarch_update_state(void *opaque, bool running,
|
|
+ RunState state)
|
|
+{
|
|
+ CPUState *cs = opaque;
|
|
+ int ret;
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+
|
|
+ /*
|
|
+ * If state is already dirty (synced to QEMU) then the KVM timer state is
|
|
+ * already saved and can be restored when it is synced back to KVM.
|
|
+ */
|
|
+ if (!running) {
|
|
+ ret =
|
|
+ kvm_larch_getq(cs, KVM_REG_LOONGARCH_COUNTER, &cpu->counter_value);
|
|
+ if (ret < 0) {
|
|
+ printf("%s: Failed to get counter_value (%d)\n", __func__, ret);
|
|
+ }
|
|
+
|
|
+ } else {
|
|
+ ret = kvm_larch_putq(cs, KVM_REG_LOONGARCH_COUNTER,
|
|
+ &(LOONGARCH_CPU(cs))->counter_value);
|
|
+ if (ret < 0) {
|
|
+ printf("%s: Failed to put counter_value (%d)\n", __func__, ret);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static int kvm_loongarch_put_fpu_registers(CPUState *cs, int level)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int err, ret = 0;
|
|
+ unsigned int i;
|
|
+ struct kvm_fpu fpu;
|
|
+
|
|
+ fpu.fcsr = env->active_fpu.fcsr0;
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ memcpy(&fpu.fpr[i], &env->active_fpu.fpr[i],
|
|
+ sizeof(struct kvm_fpureg));
|
|
+ }
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ ((char *)&fpu.fcc)[i] = env->active_fpu.cf[i];
|
|
+ }
|
|
+ fpu.vcsr = env->active_fpu.vcsr16;
|
|
+
|
|
+ err = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
|
|
+ if (err < 0) {
|
|
+ DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err);
|
|
+ ret = err;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int kvm_loongarch_get_fpu_registers(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int err, ret = 0;
|
|
+ unsigned int i;
|
|
+ struct kvm_fpu fpu;
|
|
+
|
|
+ err = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
|
|
+ if (err < 0) {
|
|
+ DPRINTF("%s: Failed to get FPU (%d)\n", __func__, err);
|
|
+ ret = err;
|
|
+ } else {
|
|
+ env->active_fpu.fcsr0 = fpu.fcsr;
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ memcpy(&env->active_fpu.fpr[i], &fpu.fpr[i],
|
|
+ sizeof(struct kvm_fpureg));
|
|
+ }
|
|
+ for (i = 0; i < 8; i++) {
|
|
+ env->active_fpu.cf[i] = ((char *)&fpu.fcc)[i];
|
|
+ }
|
|
+ env->active_fpu.vcsr16 = fpu.vcsr;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#define KVM_PUT_ONE_UREG64(cs, regidx, addr) \
|
|
+ ({ \
|
|
+ int err; \
|
|
+ uint64_t csrid = 0; \
|
|
+ csrid = (KVM_IOC_CSRID(regidx)); \
|
|
+ err = kvm_larch_putq(cs, csrid, addr); \
|
|
+ if (err < 0) { \
|
|
+ DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, \
|
|
+ regidx, err); \
|
|
+ } \
|
|
+ err; \
|
|
+ })
|
|
+
|
|
+static int kvm_loongarch_put_csr_registers(CPUState *cs, int level)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int ret = 0;
|
|
+
|
|
+ (void)level;
|
|
+
|
|
+ kvm_csr_buf_reset(cpu);
|
|
+
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, env->CSR_CRMD);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, env->CSR_PRMD);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, env->CSR_EUEN);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, env->CSR_MISC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, env->CSR_ECFG);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, env->CSR_ESTAT);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, env->CSR_ERA);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, env->CSR_BADV);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, env->CSR_BADI);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, env->CSR_EEPN);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, env->CSR_TLBIDX);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, env->CSR_TLBEHI);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, env->CSR_TLBELO0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, env->CSR_TLBELO1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, env->CSR_GTLBC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, env->CSR_TRGP);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, env->CSR_ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, env->CSR_PGDL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, env->CSR_PGDH);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, env->CSR_PGD);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, env->CSR_PWCTL0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, env->CSR_PWCTL1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, env->CSR_STLBPGSIZE);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, env->CSR_RVACFG);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, env->CSR_CPUID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, env->CSR_PRCFG1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, env->CSR_PRCFG2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, env->CSR_PRCFG3);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, env->CSR_KS0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, env->CSR_KS1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, env->CSR_KS2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, env->CSR_KS3);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, env->CSR_KS4);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, env->CSR_KS5);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, env->CSR_KS6);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, env->CSR_KS7);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, env->CSR_TMID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, env->CSR_CNTC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, env->CSR_TINTCLR);
|
|
+
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, env->CSR_GSTAT);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, env->CSR_GCFG);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, env->CSR_GINTC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, env->CSR_GCNTC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, env->CSR_LLBCTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, env->CSR_IMPCTL1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, env->CSR_IMPCTL2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, env->CSR_GNMI);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, env->CSR_TLBRENT);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, env->CSR_TLBRBADV);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, env->CSR_TLBRERA);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, env->CSR_TLBRSAVE);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, env->CSR_TLBRELO0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, env->CSR_TLBRELO1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, env->CSR_TLBREHI);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, env->CSR_TLBRPRMD);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, env->CSR_ERRCTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, env->CSR_ERRINFO);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, env->CSR_ERRINFO1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, env->CSR_ERRENT);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, env->CSR_ERRERA);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, env->CSR_ERRSAVE);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, env->CSR_CTAG);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, env->CSR_DMWIN0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, env->CSR_DMWIN1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, env->CSR_DMWIN2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, env->CSR_DMWIN3);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, env->CSR_PERFCTRL0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, env->CSR_PERFCNTR0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, env->CSR_PERFCTRL1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, env->CSR_PERFCNTR1);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, env->CSR_PERFCTRL2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, env->CSR_PERFCNTR2);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, env->CSR_PERFCTRL3);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, env->CSR_PERFCNTR3);
|
|
+
|
|
+ /* debug */
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, env->CSR_MWPC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, env->CSR_MWPS);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, env->CSR_DB0ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, env->CSR_DB0MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, env->CSR_DB0CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, env->CSR_DB0ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, env->CSR_DB1ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, env->CSR_DB1MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, env->CSR_DB1CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, env->CSR_DB1ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, env->CSR_DB2ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, env->CSR_DB2MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, env->CSR_DB2CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, env->CSR_DB2ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, env->CSR_DB3ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, env->CSR_DB3MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, env->CSR_DB3CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, env->CSR_DB3ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, env->CSR_FWPC);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, env->CSR_FWPS);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, env->CSR_IB0ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, env->CSR_IB0MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, env->CSR_IB0CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, env->CSR_IB0ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, env->CSR_IB1ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, env->CSR_IB1MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, env->CSR_IB1CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, env->CSR_IB1ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, env->CSR_IB2ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, env->CSR_IB2MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, env->CSR_IB2CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, env->CSR_IB2ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, env->CSR_IB3ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, env->CSR_IB3MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, env->CSR_IB3CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, env->CSR_IB3ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, env->CSR_IB4ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, env->CSR_IB4MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, env->CSR_IB4CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, env->CSR_IB4ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, env->CSR_IB5ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, env->CSR_IB5MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, env->CSR_IB5CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, env->CSR_IB5ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, env->CSR_IB6ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, env->CSR_IB6MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, env->CSR_IB6CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, env->CSR_IB6ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, env->CSR_IB7ADDR);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, env->CSR_IB7MASK);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, env->CSR_IB7CTL);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, env->CSR_IB7ASID);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, env->CSR_DEBUG);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, env->CSR_DERA);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, env->CSR_DESAVE);
|
|
+
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_MSRS, cpu->kvm_csr_buf);
|
|
+ if (ret < cpu->kvm_csr_buf->ncsrs) {
|
|
+ struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret];
|
|
+ printf("error: failed to set CSR 0x%" PRIx32 " to 0x%" PRIx64 "\n",
|
|
+ (uint32_t)e->index, (uint64_t)e->data);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * timer cfg must be put at last since it is used to enable
|
|
+ * guest timer
|
|
+ */
|
|
+ ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL);
|
|
+ ret |= KVM_PUT_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+#define KVM_GET_ONE_UREG64(cs, regidx, addr) \
|
|
+ ({ \
|
|
+ int err; \
|
|
+ uint64_t csrid = 0; \
|
|
+ csrid = (KVM_IOC_CSRID(regidx)); \
|
|
+ err = kvm_larch_getq(cs, csrid, addr); \
|
|
+ if (err < 0) { \
|
|
+ DPRINTF("%s: Failed to put regidx 0x%x err:%d\n", __func__, \
|
|
+ regidx, err); \
|
|
+ } \
|
|
+ err; \
|
|
+ })
|
|
+
|
|
+static int kvm_loongarch_get_csr_registers(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int ret = 0, i;
|
|
+ struct kvm_csr_entry *csrs = cpu->kvm_csr_buf->entries;
|
|
+ uint64_t **addr;
|
|
+
|
|
+ kvm_csr_buf_reset(cpu);
|
|
+ addr = (void *)cpu->kvm_csr_buf + CSR_BUF_SIZE;
|
|
+
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CRMD, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRMD, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EUEN, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MISC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ECFG, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ESTAT, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERA, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADV, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_BADI, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_EEPN, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBIDX, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBEHI, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBELO1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GTLBC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TRGP, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGDH, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PGD, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PWCTL1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_STLBPGSIZE, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_RVACFG, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CPUID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PRCFG3, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS3, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS4, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS5, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS6, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_KS7, 0);
|
|
+
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TMID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CNTC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TINTCLR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GSTAT, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCFG, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GINTC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GCNTC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_LLBCTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IMPCTL2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_GNMI, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRENT, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRBADV, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRERA, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRSAVE, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRELO1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBREHI, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_TLBRPRMD, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRCTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRINFO1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRENT, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRERA, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_ERRSAVE, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_CTAG, 0);
|
|
+
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DMWIN3, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR0, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR1, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR2, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCTRL3, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_PERFCNTR3, 0);
|
|
+
|
|
+ /* debug */
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_MWPS, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB0ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB1ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB2ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DB3ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPC, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_FWPS, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB0ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB1ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB2ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB3ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB4ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB5ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB6ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ADDR, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7MASK, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7CTL, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_IB7ASID, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DEBUG, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DERA, 0);
|
|
+ kvm_csr_entry_add(cpu, LOONGARCH_CSR_DESAVE, 0);
|
|
+
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, cpu->kvm_csr_buf);
|
|
+ if (ret < cpu->kvm_csr_buf->ncsrs) {
|
|
+ struct kvm_csr_entry *e = &cpu->kvm_csr_buf->entries[ret];
|
|
+ printf("error: failed to get CSR 0x%" PRIx32 "\n", (uint32_t)e->index);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < ret; i++) {
|
|
+ uint32_t index = csrs[i].index;
|
|
+ if (addr[index]) {
|
|
+ *addr[index] = csrs[i].data;
|
|
+ } else {
|
|
+ printf("Failed to get addr CSR 0x%" PRIx32 "\n", i);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TVAL, &env->CSR_TVAL);
|
|
+ ret |= KVM_GET_ONE_UREG64(cs, LOONGARCH_CSR_TCFG, &env->CSR_TCFG);
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int err;
|
|
+ struct kvm_device_attr attr = {
|
|
+ .group = KVM_LARCH_VCPU_PVTIME_CTRL,
|
|
+ .attr = KVM_LARCH_VCPU_PVTIME_IPA,
|
|
+ .addr = (uint64_t)&env->st.guest_addr,
|
|
+ };
|
|
+
|
|
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
|
|
+ if (err != 0) {
|
|
+ /* It's ok even though kvm has not such attr */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr);
|
|
+ if (err != 0) {
|
|
+ error_report("PVTIME IPA: KVM_SET_DEVICE_ATTR: %s", strerror(-err));
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int err;
|
|
+ struct kvm_device_attr attr = {
|
|
+ .group = KVM_LARCH_VCPU_PVTIME_CTRL,
|
|
+ .attr = KVM_LARCH_VCPU_PVTIME_IPA,
|
|
+ .addr = (uint64_t)&env->st.guest_addr,
|
|
+ };
|
|
+
|
|
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
|
|
+ if (err != 0) {
|
|
+ /* It's ok even though kvm has not such attr */
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEVICE_ATTR, attr);
|
|
+ if (err != 0) {
|
|
+ error_report("PVTIME IPA: KVM_GET_DEVICE_ATTR: %s", strerror(-err));
|
|
+ return err;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_put_registers(CPUState *cs, int level)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ struct kvm_regs regs;
|
|
+ int ret;
|
|
+ int i;
|
|
+
|
|
+ /* Set the registers based on QEMU's view of things */
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ regs.gpr[i] = (int64_t)(target_long)env->active_tc.gpr[i];
|
|
+ }
|
|
+
|
|
+ regs.pc = (int64_t)(target_long)env->active_tc.PC;
|
|
+
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = kvm_loongarch_put_csr_registers(cs, level);
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ ret = kvm_loongarch_put_fpu_registers(cs, level);
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_arch_get_registers(CPUState *cs)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = LOONGARCH_CPU(cs);
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int ret = 0;
|
|
+ struct kvm_regs regs;
|
|
+ int i;
|
|
+
|
|
+ /* Get the current register set as KVM seems it */
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
|
|
+
|
|
+ if (ret < 0) {
|
|
+ return ret;
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ env->active_tc.gpr[i] = regs.gpr[i];
|
|
+ }
|
|
+
|
|
+ env->active_tc.PC = regs.pc;
|
|
+
|
|
+ kvm_loongarch_get_csr_registers(cs);
|
|
+ kvm_loongarch_get_fpu_registers(cs);
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
|
|
+ uint64_t address, uint32_t data, PCIDevice *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
+ int vector, PCIDevice *dev)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+bool kvm_arch_cpu_check_are_resettable(void)
|
|
+{
|
|
+ return true;
|
|
+}
|
|
+
|
|
+int kvm_arch_release_virq_post(int virq)
|
|
+{
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_msi_data_to_gsi(uint32_t data)
|
|
+{
|
|
+ abort();
|
|
+}
|
|
diff --git a/target/loongarch64/kvm_larch.h b/target/loongarch64/kvm_larch.h
|
|
new file mode 100644
|
|
index 0000000000..637dec8106
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/kvm_larch.h
|
|
@@ -0,0 +1,49 @@
|
|
+/*
|
|
+ * KVM/LOONGARCH: LOONGARCH specific KVM APIs
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef KVM_LOONGARCH_H
|
|
+#define KVM_LOONGARCH_H
|
|
+
|
|
+/**
|
|
+ * kvm_loongarch_reset_vcpu:
|
|
+ * @cpu: LOONGARCHCPU
|
|
+ *
|
|
+ * Called at reset time to set kernel registers to their initial values.
|
|
+ */
|
|
+void kvm_loongarch_reset_vcpu(LOONGARCHCPU *cpu);
|
|
+
|
|
+int kvm_loongarch_set_interrupt(LOONGARCHCPU *cpu, int irq, int level);
|
|
+int kvm_loongarch_set_ipi_interrupt(LOONGARCHCPU *cpu, int irq, int level);
|
|
+
|
|
+int kvm_loongarch_put_pvtime(LOONGARCHCPU *cpu);
|
|
+int kvm_loongarch_get_pvtime(LOONGARCHCPU *cpu);
|
|
+
|
|
+#ifndef KVM_INTERRUPT_SET
|
|
+#define KVM_INTERRUPT_SET -1
|
|
+#endif
|
|
+
|
|
+#ifndef KVM_INTERRUPT_UNSET
|
|
+#define KVM_INTERRUPT_UNSET -2
|
|
+#endif
|
|
+
|
|
+#ifndef KVM_INTERRUPT_SET_LEVEL
|
|
+#define KVM_INTERRUPT_SET_LEVEL -3
|
|
+#endif
|
|
+
|
|
+#endif /* KVM_LOONGARCH_H */
|
|
diff --git a/target/loongarch64/larch-defs.h b/target/loongarch64/larch-defs.h
|
|
new file mode 100644
|
|
index 0000000000..e22a0dc652
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/larch-defs.h
|
|
@@ -0,0 +1,42 @@
|
|
+/*
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#ifndef QEMU_LOONGARCH_DEFS_H
|
|
+#define QEMU_LOONGARCH_DEFS_H
|
|
+
|
|
+/* If we want to use host float regs... */
|
|
+/* #define USE_HOST_FLOAT_REGS */
|
|
+
|
|
+/* Real pages are variable size... */
|
|
+#define TARGET_PAGE_BITS 14
|
|
+#define LOONGARCH_TLB_MAX 2112
|
|
+#define TARGET_LONG_BITS 64
|
|
+#define TARGET_PHYS_ADDR_SPACE_BITS 48
|
|
+#define TARGET_VIRT_ADDR_SPACE_BITS 48
|
|
+
|
|
+/*
|
|
+ * bit definitions for insn_flags (ISAs/ASEs flags)
|
|
+ * ------------------------------------------------
|
|
+ */
|
|
+#define ISA_LARCH32 0x00000001ULL
|
|
+#define ISA_LARCH64 0x00000002ULL
|
|
+#define INSN_LOONGARCH 0x00010000ULL
|
|
+
|
|
+#define CPU_LARCH32 (ISA_LARCH32)
|
|
+#define CPU_LARCH64 (ISA_LARCH32 | ISA_LARCH64)
|
|
+
|
|
+#endif /* QEMU_LOONGARCH_DEFS_H */
|
|
diff --git a/target/loongarch64/machine.c b/target/loongarch64/machine.c
|
|
new file mode 100644
|
|
index 0000000000..d91c858383
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/machine.c
|
|
@@ -0,0 +1,423 @@
|
|
+/*
|
|
+ * Loongarch 3A5000 machine emulation
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu-common.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "hw/hw.h"
|
|
+#include "kvm_larch.h"
|
|
+#include "migration/cpu.h"
|
|
+#include "linux/kvm.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "qemu/error-report.h"
|
|
+
|
|
+static int cpu_post_load(void *opaque, int version_id)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = opaque;
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+ int r = 0;
|
|
+
|
|
+ if (!kvm_enabled()) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_KVM
|
|
+ struct kvm_loongarch_vcpu_state vcpu_state;
|
|
+ int i;
|
|
+
|
|
+ vcpu_state.online_vcpus = cpu->online_vcpus;
|
|
+ vcpu_state.is_migrate = cpu->is_migrate;
|
|
+ vcpu_state.cpu_freq = cpu->cpu_freq;
|
|
+ vcpu_state.count_ctl = cpu->count_ctl;
|
|
+ vcpu_state.pending_exceptions = cpu->pending_exceptions;
|
|
+ vcpu_state.pending_exceptions_clr = cpu->pending_exceptions_clr;
|
|
+ for (i = 0; i < 4; i++) {
|
|
+ vcpu_state.core_ext_ioisr[i] = cpu->core_ext_ioisr[i];
|
|
+ }
|
|
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_SET_VCPU_STATE, &vcpu_state);
|
|
+ if (r) {
|
|
+ error_report("set vcpu state failed %d", r);
|
|
+ }
|
|
+
|
|
+ kvm_loongarch_put_pvtime(cpu);
|
|
+#endif
|
|
+
|
|
+ restore_fp_status(env);
|
|
+ compute_hflags(env);
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+static int cpu_pre_save(void *opaque)
|
|
+{
|
|
+#ifdef CONFIG_KVM
|
|
+ LOONGARCHCPU *cpu = opaque;
|
|
+ struct kvm_loongarch_vcpu_state vcpu_state;
|
|
+ int i, r = 0;
|
|
+ if (!kvm_enabled()) {
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ r = kvm_vcpu_ioctl(CPU(cpu), KVM_LARCH_GET_VCPU_STATE, &vcpu_state);
|
|
+ if (r < 0) {
|
|
+ error_report("get vcpu state failed %d", r);
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ cpu->online_vcpus = vcpu_state.online_vcpus;
|
|
+ cpu->is_migrate = vcpu_state.is_migrate;
|
|
+ cpu->cpu_freq = vcpu_state.cpu_freq;
|
|
+ cpu->count_ctl = vcpu_state.count_ctl;
|
|
+ cpu->pending_exceptions = vcpu_state.pending_exceptions;
|
|
+ cpu->pending_exceptions_clr = vcpu_state.pending_exceptions_clr;
|
|
+ for (i = 0; i < 4; i++) {
|
|
+ cpu->core_ext_ioisr[i] = vcpu_state.core_ext_ioisr[i];
|
|
+ }
|
|
+
|
|
+ kvm_loongarch_get_pvtime(cpu);
|
|
+#endif
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* FPU state */
|
|
+
|
|
+static int get_fpr(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field)
|
|
+{
|
|
+ fpr_t *v = pv;
|
|
+ qemu_get_be64s(f, &v->d);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int put_fpr(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field, JSONWriter *vmdesc)
|
|
+{
|
|
+ fpr_t *v = pv;
|
|
+ qemu_put_be64s(f, &v->d);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const VMStateInfo vmstate_info_fpr = {
|
|
+ .name = "fpr",
|
|
+ .get = get_fpr,
|
|
+ .put = put_fpr,
|
|
+};
|
|
+
|
|
+#define VMSTATE_FPR_ARRAY_V(_f, _s, _n, _v) \
|
|
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_fpr, fpr_t)
|
|
+
|
|
+#define VMSTATE_FPR_ARRAY(_f, _s, _n) VMSTATE_FPR_ARRAY_V(_f, _s, _n, 0)
|
|
+
|
|
+static VMStateField vmstate_fpu_fields[] = {
|
|
+ VMSTATE_FPR_ARRAY(fpr, CPULOONGARCHFPUContext, 32),
|
|
+ VMSTATE_UINT32(fcsr0, CPULOONGARCHFPUContext), VMSTATE_END_OF_LIST()
|
|
+};
|
|
+
|
|
+const VMStateDescription vmstate_fpu = { .name = "cpu/fpu",
|
|
+ .version_id = 1,
|
|
+ .minimum_version_id = 1,
|
|
+ .fields = vmstate_fpu_fields };
|
|
+
|
|
+const VMStateDescription vmstate_inactive_fpu = { .name = "cpu/inactive_fpu",
|
|
+ .version_id = 1,
|
|
+ .minimum_version_id = 1,
|
|
+ .fields =
|
|
+ vmstate_fpu_fields };
|
|
+
|
|
+/* TC state */
|
|
+
|
|
+static VMStateField vmstate_tc_fields[] = {
|
|
+ VMSTATE_UINTTL_ARRAY(gpr, TCState, 32), VMSTATE_UINTTL(PC, TCState),
|
|
+ VMSTATE_END_OF_LIST()
|
|
+};
|
|
+
|
|
+const VMStateDescription vmstate_tc = { .name = "cpu/tc",
|
|
+ .version_id = 1,
|
|
+ .minimum_version_id = 1,
|
|
+ .fields = vmstate_tc_fields };
|
|
+
|
|
+const VMStateDescription vmstate_inactive_tc = { .name = "cpu/inactive_tc",
|
|
+ .version_id = 1,
|
|
+ .minimum_version_id = 1,
|
|
+ .fields = vmstate_tc_fields };
|
|
+
|
|
+/* TLB state */
|
|
+
|
|
+static int get_tlb(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field)
|
|
+{
|
|
+ ls3a5k_tlb_t *v = pv;
|
|
+ uint32_t flags;
|
|
+
|
|
+ qemu_get_betls(f, &v->VPN);
|
|
+ qemu_get_be64s(f, &v->PageMask);
|
|
+ qemu_get_be32s(f, &v->PageSize);
|
|
+ qemu_get_be16s(f, &v->ASID);
|
|
+ qemu_get_be32s(f, &flags);
|
|
+ v->RPLV1 = (flags >> 21) & 1;
|
|
+ v->RPLV0 = (flags >> 20) & 1;
|
|
+ v->PLV1 = (flags >> 18) & 3;
|
|
+ v->PLV0 = (flags >> 16) & 3;
|
|
+ v->EHINV = (flags >> 15) & 1;
|
|
+ v->RI1 = (flags >> 14) & 1;
|
|
+ v->RI0 = (flags >> 13) & 1;
|
|
+ v->XI1 = (flags >> 12) & 1;
|
|
+ v->XI0 = (flags >> 11) & 1;
|
|
+ v->WE1 = (flags >> 10) & 1;
|
|
+ v->WE0 = (flags >> 9) & 1;
|
|
+ v->V1 = (flags >> 8) & 1;
|
|
+ v->V0 = (flags >> 7) & 1;
|
|
+ v->C1 = (flags >> 4) & 7;
|
|
+ v->C0 = (flags >> 1) & 7;
|
|
+ v->G = (flags >> 0) & 1;
|
|
+ qemu_get_be64s(f, &v->PPN0);
|
|
+ qemu_get_be64s(f, &v->PPN1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int put_tlb(QEMUFile *f, void *pv, size_t size,
|
|
+ const VMStateField *field, JSONWriter *vmdesc)
|
|
+{
|
|
+ ls3a5k_tlb_t *v = pv;
|
|
+
|
|
+ uint16_t asid = v->ASID;
|
|
+ uint32_t flags =
|
|
+ ((v->RPLV1 << 21) | (v->RPLV0 << 20) | (v->PLV1 << 18) |
|
|
+ (v->PLV0 << 16) | (v->EHINV << 15) | (v->RI1 << 14) | (v->RI0 << 13) |
|
|
+ (v->XI1 << 12) | (v->XI0 << 11) | (v->WE1 << 10) | (v->WE0 << 9) |
|
|
+ (v->V1 << 8) | (v->V0 << 7) | (v->C1 << 4) | (v->C0 << 1) |
|
|
+ (v->G << 0));
|
|
+
|
|
+ qemu_put_betls(f, &v->VPN);
|
|
+ qemu_put_be64s(f, &v->PageMask);
|
|
+ qemu_put_be32s(f, &v->PageSize);
|
|
+ qemu_put_be16s(f, &asid);
|
|
+ qemu_put_be32s(f, &flags);
|
|
+ qemu_put_be64s(f, &v->PPN0);
|
|
+ qemu_put_be64s(f, &v->PPN1);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+const VMStateInfo vmstate_info_tlb = {
|
|
+ .name = "tlb_entry",
|
|
+ .get = get_tlb,
|
|
+ .put = put_tlb,
|
|
+};
|
|
+
|
|
+#define VMSTATE_TLB_ARRAY_V(_f, _s, _n, _v) \
|
|
+ VMSTATE_ARRAY(_f, _s, _n, _v, vmstate_info_tlb, ls3a5k_tlb_t)
|
|
+
|
|
+#define VMSTATE_TLB_ARRAY(_f, _s, _n) VMSTATE_TLB_ARRAY_V(_f, _s, _n, 0)
|
|
+
|
|
+const VMStateDescription vmstate_tlb = {
|
|
+ .name = "cpu/tlb",
|
|
+ .version_id = 2,
|
|
+ .minimum_version_id = 2,
|
|
+ .fields =
|
|
+ (VMStateField[]){ VMSTATE_UINT32(nb_tlb, CPULOONGARCHTLBContext),
|
|
+ VMSTATE_UINT32(tlb_in_use, CPULOONGARCHTLBContext),
|
|
+ VMSTATE_TLB_ARRAY(mmu.ls3a5k.tlb,
|
|
+ CPULOONGARCHTLBContext,
|
|
+ LOONGARCH_TLB_MAX),
|
|
+ VMSTATE_END_OF_LIST() }
|
|
+};
|
|
+
|
|
+/* LOONGARCH CPU state */
|
|
+
|
|
+const VMStateDescription vmstate_loongarch_cpu = {
|
|
+ .name = "cpu",
|
|
+ .version_id = 15,
|
|
+ .minimum_version_id = 15,
|
|
+ .post_load = cpu_post_load,
|
|
+ .pre_save = cpu_pre_save,
|
|
+ .fields =
|
|
+ (VMStateField[]){
|
|
+ /* Active TC */
|
|
+ VMSTATE_STRUCT(env.active_tc, LOONGARCHCPU, 1, vmstate_tc,
|
|
+ TCState),
|
|
+
|
|
+ /* Active FPU */
|
|
+ VMSTATE_STRUCT(env.active_fpu, LOONGARCHCPU, 1, vmstate_fpu,
|
|
+ CPULOONGARCHFPUContext),
|
|
+
|
|
+ /* TLB */
|
|
+ VMSTATE_STRUCT_POINTER(env.tlb, LOONGARCHCPU, vmstate_tlb,
|
|
+ CPULOONGARCHTLBContext),
|
|
+ /* CPU metastate */
|
|
+ VMSTATE_UINT32(env.current_tc, LOONGARCHCPU),
|
|
+ VMSTATE_INT32(env.error_code, LOONGARCHCPU),
|
|
+ VMSTATE_UINTTL(env.btarget, LOONGARCHCPU),
|
|
+ VMSTATE_UINTTL(env.bcond, LOONGARCHCPU),
|
|
+
|
|
+ VMSTATE_UINT64(env.lladdr, LOONGARCHCPU),
|
|
+
|
|
+ /* PV time */
|
|
+ VMSTATE_UINT64(env.st.guest_addr, LOONGARCHCPU),
|
|
+
|
|
+ /* Remaining CSR registers */
|
|
+ VMSTATE_UINT64(env.CSR_CRMD, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PRMD, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_EUEN, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_MISC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ECFG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ESTAT, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERA, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_BADV, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_BADI, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_EEPN, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBIDX, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBEHI, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBELO0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBELO1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBWIRED, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GTLBC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TRGP, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PGDL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PGDH, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PGD, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PWCTL0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PWCTL1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_STLBPGSIZE, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_RVACFG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_CPUID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PRCFG1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PRCFG2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PRCFG3, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS3, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS4, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS5, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS6, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_KS7, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TMID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TCFG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TVAL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_CNTC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TINTCLR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GSTAT, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GCFG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GINTC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GCNTC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_LLBCTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IMPCTL1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IMPCTL2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_GNMI, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRENT, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRBADV, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRERA, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRSAVE, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRELO0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRELO1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBREHI, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_TLBRPRMD, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRCTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRINFO, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRINFO1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRENT, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRERA, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_ERRSAVE, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_CTAG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DMWIN0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DMWIN1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DMWIN2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DMWIN3, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCTRL0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCNTR0, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCTRL1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCNTR1, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCTRL2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCNTR2, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCTRL3, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_PERFCNTR3, LOONGARCHCPU),
|
|
+ /* debug */
|
|
+ VMSTATE_UINT64(env.CSR_MWPC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_MWPS, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB0ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB0MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB0CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB0ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB1ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB1MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB1CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB1ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB2ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB2MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB2CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB2ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB3ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB3MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB3CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DB3ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_FWPC, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_FWPS, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB0ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB0MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB0CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB0ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB1ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB1MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB1CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB1ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB2ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB2MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB2CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB2ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB3ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB3MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB3CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB3ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB4ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB4MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB4CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB4ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB5ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB5MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB5CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB5ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB6ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB6MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB6CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB6ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB7ADDR, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB7MASK, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB7CTL, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_IB7ASID, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DEBUG, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DERA, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(env.CSR_DESAVE, LOONGARCHCPU),
|
|
+
|
|
+ VMSTATE_STRUCT_ARRAY(env.fpus, LOONGARCHCPU, LOONGARCH_FPU_MAX, 1,
|
|
+ vmstate_inactive_fpu, CPULOONGARCHFPUContext),
|
|
+ VMSTATE_UINT8(online_vcpus, LOONGARCHCPU),
|
|
+ VMSTATE_UINT8(is_migrate, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(counter_value, LOONGARCHCPU),
|
|
+ VMSTATE_UINT32(cpu_freq, LOONGARCHCPU),
|
|
+ VMSTATE_UINT32(count_ctl, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(pending_exceptions, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64(pending_exceptions_clr, LOONGARCHCPU),
|
|
+ VMSTATE_UINT64_ARRAY(core_ext_ioisr, LOONGARCHCPU, 4),
|
|
+
|
|
+ VMSTATE_END_OF_LIST() },
|
|
+};
|
|
diff --git a/target/loongarch64/meson.build b/target/loongarch64/meson.build
|
|
new file mode 100644
|
|
index 0000000000..6badf4484e
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/meson.build
|
|
@@ -0,0 +1,35 @@
|
|
+loongarch_user_ss = ss.source_set()
|
|
+loongarch_softmmu_ss = ss.source_set()
|
|
+loongarch_ss = ss.source_set()
|
|
+loongarch_ss.add(files(
|
|
+ 'cpu.c',
|
|
+ 'fpu.c',
|
|
+ 'gdbstub.c',
|
|
+))
|
|
+
|
|
+gen = [
|
|
+ decodetree.process('insn.decode', extra_args: [ '--decode', 'decode_insn',
|
|
+ '--insnwidth', '32' ])
|
|
+]
|
|
+
|
|
+loongarch_ss.add(gen)
|
|
+loongarch_ss.add(when: 'CONFIG_TCG', if_true: files(
|
|
+ 'helper.c',
|
|
+ 'translate.c',
|
|
+ 'op_helper.c',
|
|
+ 'fpu_helper.c',
|
|
+ 'tlb_helper.c',
|
|
+ 'csr_helper.c',
|
|
+))
|
|
+
|
|
+loongarch_softmmu_ss.add(when: 'CONFIG_SOFTMMU', if_true: files(
|
|
+ 'machine.c',
|
|
+ 'stabletimer.c',
|
|
+ 'arch_dump.c',
|
|
+))
|
|
+
|
|
+loongarch_softmmu_ss.add(when: 'CONFIG_KVM', if_true: files('kvm.c'))
|
|
+
|
|
+target_arch += {'loongarch64': loongarch_ss}
|
|
+target_softmmu_arch += {'loongarch64': loongarch_softmmu_ss}
|
|
+target_user_arch += {'loongarch64': loongarch_user_ss}
|
|
diff --git a/target/loongarch64/op_helper.c b/target/loongarch64/op_helper.c
|
|
new file mode 100644
|
|
index 0000000000..7257e59479
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/op_helper.c
|
|
@@ -0,0 +1,485 @@
|
|
+/*
|
|
+ * LOONGARCH emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu/main-loop.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "qemu/host-utils.h"
|
|
+#include "exec/helper-proto.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "exec/cpu_ldst.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "qemu/crc32c.h"
|
|
+#include <zlib.h>
|
|
+#include "hw/irq.h"
|
|
+#include "hw/core/cpu.h"
|
|
+#include "instmap.h"
|
|
+
|
|
+/* Exceptions processing helpers */
|
|
+
|
|
+void helper_raise_exception_err(CPULOONGARCHState *env, uint32_t exception,
|
|
+ int error_code)
|
|
+{
|
|
+ do_raise_exception_err(env, exception, error_code, 0);
|
|
+}
|
|
+
|
|
+void helper_raise_exception(CPULOONGARCHState *env, uint32_t exception)
|
|
+{
|
|
+ do_raise_exception(env, exception, GETPC());
|
|
+}
|
|
+
|
|
+void helper_raise_exception_debug(CPULOONGARCHState *env)
|
|
+{
|
|
+ do_raise_exception(env, EXCP_DEBUG, 0);
|
|
+}
|
|
+
|
|
+static void raise_exception(CPULOONGARCHState *env, uint32_t exception)
|
|
+{
|
|
+ do_raise_exception(env, exception, 0);
|
|
+}
|
|
+
|
|
+#if defined(CONFIG_USER_ONLY)
|
|
+#define HELPER_LD(name, insn, type) \
|
|
+ static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
|
|
+ int mem_idx, uintptr_t retaddr) \
|
|
+ { \
|
|
+ return (type)cpu_##insn##_data_ra(env, addr, retaddr); \
|
|
+ }
|
|
+#else
|
|
+
|
|
+#define HF_SMAP_SHIFT 23 /* CR4.SMAP */
|
|
+#define HF_SMAP_MASK (1 << HF_SMAP_SHIFT)
|
|
+#define MMU_KNOSMAP_IDX 2
|
|
+#define HF_CPL_SHIFT 0
|
|
+#define HF_CPL_MASK (3 << HF_CPL_SHIFT)
|
|
+#define AC_MASK 0x00040000
|
|
+#define MMU_KSMAP_IDX 0
|
|
+static inline int cpu_mmu_index_kernel(CPULOONGARCHState *env)
|
|
+{
|
|
+ return !(env->hflags & HF_SMAP_MASK)
|
|
+ ? MMU_KNOSMAP_IDX
|
|
+ : ((env->hflags & HF_CPL_MASK) < 3 && (env->hflags & AC_MASK))
|
|
+ ? MMU_KNOSMAP_IDX
|
|
+ : MMU_KSMAP_IDX;
|
|
+}
|
|
+
|
|
+#define cpu_ldl_kernel_ra(e, p, r) \
|
|
+ cpu_ldl_mmuidx_ra(e, p, cpu_mmu_index_kernel(e), r)
|
|
+
|
|
+#define HELPER_LD(name, insn, type) \
|
|
+ static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
|
|
+ int mem_idx, uintptr_t retaddr) \
|
|
+ { \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_USER_ONLY)
|
|
+#define HELPER_ST(name, insn, type) \
|
|
+ static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \
|
|
+ type val, int mem_idx, uintptr_t retaddr) \
|
|
+ { \
|
|
+ }
|
|
+#else
|
|
+#define HELPER_ST(name, insn, type) \
|
|
+ static inline void do_##name(CPULOONGARCHState *env, target_ulong addr, \
|
|
+ type val, int mem_idx, uintptr_t retaddr) \
|
|
+ { \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+static inline target_ulong bitswap(target_ulong v)
|
|
+{
|
|
+ v = ((v >> 1) & (target_ulong)0x5555555555555555ULL) |
|
|
+ ((v & (target_ulong)0x5555555555555555ULL) << 1);
|
|
+ v = ((v >> 2) & (target_ulong)0x3333333333333333ULL) |
|
|
+ ((v & (target_ulong)0x3333333333333333ULL) << 2);
|
|
+ v = ((v >> 4) & (target_ulong)0x0F0F0F0F0F0F0F0FULL) |
|
|
+ ((v & (target_ulong)0x0F0F0F0F0F0F0F0FULL) << 4);
|
|
+ return v;
|
|
+}
|
|
+
|
|
+target_ulong helper_dbitswap(target_ulong rt)
|
|
+{
|
|
+ return bitswap(rt);
|
|
+}
|
|
+
|
|
+target_ulong helper_bitswap(target_ulong rt)
|
|
+{
|
|
+ return (int32_t)bitswap(rt);
|
|
+}
|
|
+
|
|
+/* these crc32 functions are based on target/arm/helper-a64.c */
|
|
+target_ulong helper_crc32(target_ulong val, target_ulong m, uint32_t sz)
|
|
+{
|
|
+ uint8_t buf[8];
|
|
+ target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1);
|
|
+
|
|
+ m &= mask;
|
|
+ stq_le_p(buf, m);
|
|
+ return (int32_t)(crc32(val ^ 0xffffffff, buf, sz) ^ 0xffffffff);
|
|
+}
|
|
+
|
|
+target_ulong helper_crc32c(target_ulong val, target_ulong m, uint32_t sz)
|
|
+{
|
|
+ uint8_t buf[8];
|
|
+ target_ulong mask = ((sz * 8) == 64) ? -1ULL : ((1ULL << (sz * 8)) - 1);
|
|
+ m &= mask;
|
|
+ stq_le_p(buf, m);
|
|
+ return (int32_t)(crc32c(val, buf, sz) ^ 0xffffffff);
|
|
+}
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+
|
|
+#define HELPER_LD_ATOMIC(name, insn, almask) \
|
|
+ target_ulong helper_##name(CPULOONGARCHState *env, target_ulong arg, \
|
|
+ int mem_idx) \
|
|
+ { \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+void helper_drdtime(CPULOONGARCHState *env, target_ulong rd, target_ulong rs)
|
|
+{
|
|
+ env->active_tc.gpr[rd] = cpu_loongarch_get_stable_counter(env);
|
|
+ env->active_tc.gpr[rs] = env->CSR_TMID;
|
|
+}
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+static void debug_pre_ertn(CPULOONGARCHState *env)
|
|
+{
|
|
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
|
+ qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx,
|
|
+ env->active_tc.PC, env->CSR_ERA);
|
|
+ qemu_log("\n");
|
|
+ }
|
|
+}
|
|
+
|
|
+static void debug_post_ertn(CPULOONGARCHState *env)
|
|
+{
|
|
+ if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
|
|
+ qemu_log("ERTN: PC " TARGET_FMT_lx " ERA " TARGET_FMT_lx,
|
|
+ env->active_tc.PC, env->CSR_ERA);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void set_pc(CPULOONGARCHState *env, target_ulong error_pc)
|
|
+{
|
|
+ env->active_tc.PC = error_pc & ~(target_ulong)1;
|
|
+}
|
|
+
|
|
+static inline void exception_return(CPULOONGARCHState *env)
|
|
+{
|
|
+ debug_pre_ertn(env);
|
|
+
|
|
+ if (cpu_refill_state(env)) {
|
|
+ env->CSR_CRMD &= (~0x7);
|
|
+ env->CSR_CRMD |= (env->CSR_TLBRPRMD & 0x7);
|
|
+ /* Clear Refill flag and set pc */
|
|
+ env->CSR_TLBRERA &= (~0x1);
|
|
+ set_pc(env, env->CSR_TLBRERA);
|
|
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
|
+ qemu_log("%s: TLBRERA 0x%lx\n", __func__, env->CSR_TLBRERA);
|
|
+ }
|
|
+ } else {
|
|
+ env->CSR_CRMD &= (~0x7);
|
|
+ env->CSR_CRMD |= (env->CSR_PRMD & 0x7);
|
|
+ /* Clear Refill flag and set pc*/
|
|
+ set_pc(env, env->CSR_ERA);
|
|
+ if (qemu_loglevel_mask(CPU_LOG_INT)) {
|
|
+ qemu_log("%s: ERA 0x%lx\n", __func__, env->CSR_ERA);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ compute_hflags(env);
|
|
+ debug_post_ertn(env);
|
|
+}
|
|
+
|
|
+void helper_ertn(CPULOONGARCHState *env)
|
|
+{
|
|
+ exception_return(env);
|
|
+ env->lladdr = 1;
|
|
+}
|
|
+
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+void helper_idle(CPULOONGARCHState *env)
|
|
+{
|
|
+ CPUState *cs = CPU(loongarch_env_get_cpu(env));
|
|
+
|
|
+ cs->halted = 1;
|
|
+ cpu_reset_interrupt(cs, CPU_INTERRUPT_WAKE);
|
|
+ /*
|
|
+ * Last instruction in the block, PC was updated before
|
|
+ * - no need to recover PC and icount
|
|
+ */
|
|
+ raise_exception(env, EXCP_HLT);
|
|
+}
|
|
+
|
|
+#if !defined(CONFIG_USER_ONLY)
|
|
+
|
|
+void loongarch_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|
+ MMUAccessType access_type, int mmu_idx,
|
|
+ uintptr_t retaddr)
|
|
+{
|
|
+ while (1) {
|
|
+ }
|
|
+}
|
|
+
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+void helper_store_scr(CPULOONGARCHState *env, uint32_t n, target_ulong val)
|
|
+{
|
|
+ env->scr[n & 0x3] = val;
|
|
+}
|
|
+
|
|
+target_ulong helper_load_scr(CPULOONGARCHState *env, uint32_t n)
|
|
+{
|
|
+ return env->scr[n & 0x3];
|
|
+}
|
|
+
|
|
+/* loongarch assert op */
|
|
+void helper_asrtle_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt)
|
|
+{
|
|
+ if (rs > rt) {
|
|
+ do_raise_exception(env, EXCP_AdEL, GETPC());
|
|
+ }
|
|
+}
|
|
+
|
|
+void helper_asrtgt_d(CPULOONGARCHState *env, target_ulong rs, target_ulong rt)
|
|
+{
|
|
+ if (rs <= rt) {
|
|
+ do_raise_exception(env, EXCP_AdEL, GETPC());
|
|
+ }
|
|
+}
|
|
+
|
|
+target_ulong helper_cto_w(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ uint32_t v = (uint32_t)a0;
|
|
+ int temp = 0;
|
|
+
|
|
+ while ((v & 0x1) == 1) {
|
|
+ temp++;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+
|
|
+ return (target_ulong)temp;
|
|
+}
|
|
+
|
|
+target_ulong helper_ctz_w(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ uint32_t v = (uint32_t)a0;
|
|
+
|
|
+ if (v == 0) {
|
|
+ return 32;
|
|
+ }
|
|
+
|
|
+ int temp = 0;
|
|
+ while ((v & 0x1) == 0) {
|
|
+ temp++;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+
|
|
+ return (target_ulong)temp;
|
|
+}
|
|
+
|
|
+target_ulong helper_cto_d(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ uint64_t v = a0;
|
|
+ int temp = 0;
|
|
+
|
|
+ while ((v & 0x1) == 1) {
|
|
+ temp++;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+
|
|
+ return (target_ulong)temp;
|
|
+}
|
|
+
|
|
+target_ulong helper_ctz_d(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ uint64_t v = a0;
|
|
+
|
|
+ if (v == 0) {
|
|
+ return 64;
|
|
+ }
|
|
+
|
|
+ int temp = 0;
|
|
+ while ((v & 0x1) == 0) {
|
|
+ temp++;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+
|
|
+ return (target_ulong)temp;
|
|
+}
|
|
+
|
|
+target_ulong helper_bitrev_w(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ int32_t v = (int32_t)a0;
|
|
+ const int SIZE = 32;
|
|
+ uint8_t bytes[SIZE];
|
|
+
|
|
+ int i;
|
|
+ for (i = 0; i < SIZE; i++) {
|
|
+ bytes[i] = v & 0x1;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+ /* v == 0 */
|
|
+ for (i = 0; i < SIZE; i++) {
|
|
+ v = v | ((uint32_t)bytes[i] << (SIZE - 1 - i));
|
|
+ }
|
|
+
|
|
+ return (target_ulong)(int32_t)v;
|
|
+}
|
|
+
|
|
+target_ulong helper_bitrev_d(CPULOONGARCHState *env, target_ulong a0)
|
|
+{
|
|
+ uint64_t v = a0;
|
|
+ const int SIZE = 64;
|
|
+ uint8_t bytes[SIZE];
|
|
+
|
|
+ int i;
|
|
+ for (i = 0; i < SIZE; i++) {
|
|
+ bytes[i] = v & 0x1;
|
|
+ v = v >> 1;
|
|
+ }
|
|
+ /* v == 0 */
|
|
+ for (i = 0; i < SIZE; i++) {
|
|
+ v = v | ((uint64_t)bytes[i] << (SIZE - 1 - i));
|
|
+ }
|
|
+
|
|
+ return (target_ulong)v;
|
|
+}
|
|
+
|
|
+void helper_memtrace_addr(CPULOONGARCHState *env, target_ulong address,
|
|
+ uint32_t op)
|
|
+{
|
|
+ qemu_log("[cpu %d asid 0x%lx pc 0x%lx] addr 0x%lx op",
|
|
+ CPU(loongarch_env_get_cpu(env))->cpu_index, env->CSR_ASID,
|
|
+ env->active_tc.PC, address);
|
|
+ switch (op) {
|
|
+ case OPC_LARCH_LDPTR_D:
|
|
+ qemu_log("OPC_LARCH_LDPTR_D");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_D:
|
|
+ qemu_log("OPC_LARCH_LD_D");
|
|
+ break;
|
|
+ case OPC_LARCH_LDPTR_W:
|
|
+ qemu_log("OPC_LARCH_LDPTR_W");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_W:
|
|
+ qemu_log("OPC_LARCH_LD_W");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_H:
|
|
+ qemu_log("OPC_LARCH_LD_H");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_B:
|
|
+ qemu_log("OPC_LARCH_LD_B");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_WU:
|
|
+ qemu_log("OPC_LARCH_LD_WU");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_HU:
|
|
+ qemu_log("OPC_LARCH_LD_HU");
|
|
+ break;
|
|
+ case OPC_LARCH_LD_BU:
|
|
+ qemu_log("OPC_LARCH_LD_BU");
|
|
+ break;
|
|
+ case OPC_LARCH_STPTR_D:
|
|
+ qemu_log("OPC_LARCH_STPTR_D");
|
|
+ break;
|
|
+ case OPC_LARCH_ST_D:
|
|
+ qemu_log("OPC_LARCH_ST_D");
|
|
+ break;
|
|
+ case OPC_LARCH_STPTR_W:
|
|
+ qemu_log("OPC_LARCH_STPTR_W");
|
|
+ break;
|
|
+ case OPC_LARCH_ST_W:
|
|
+ qemu_log("OPC_LARCH_ST_W");
|
|
+ break;
|
|
+ case OPC_LARCH_ST_H:
|
|
+ qemu_log("OPC_LARCH_ST_H");
|
|
+ break;
|
|
+ case OPC_LARCH_ST_B:
|
|
+ qemu_log("OPC_LARCH_ST_B");
|
|
+ break;
|
|
+ case OPC_LARCH_FLD_S:
|
|
+ qemu_log("OPC_LARCH_FLD_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FLD_D:
|
|
+ qemu_log("OPC_LARCH_FLD_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FST_S:
|
|
+ qemu_log("OPC_LARCH_FST_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FST_D:
|
|
+ qemu_log("OPC_LARCH_FST_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDX_S:
|
|
+ qemu_log("OPC_LARCH_FLDX_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDGT_S:
|
|
+ qemu_log("OPC_LARCH_FLDGT_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDLE_S:
|
|
+ qemu_log("OPC_LARCH_FLDLE_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTX_S:
|
|
+ qemu_log("OPC_LARCH_FSTX_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTGT_S:
|
|
+ qemu_log("OPC_LARCH_FSTGT_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTLE_S:
|
|
+ qemu_log("OPC_LARCH_FSTLE_S");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDX_D:
|
|
+ qemu_log("OPC_LARCH_FLDX_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDGT_D:
|
|
+ qemu_log("OPC_LARCH_FLDGT_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FLDLE_D:
|
|
+ qemu_log("OPC_LARCH_FLDLE_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTX_D:
|
|
+ qemu_log("OPC_LARCH_FSTX_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTGT_D:
|
|
+ qemu_log("OPC_LARCH_FSTGT_D");
|
|
+ break;
|
|
+ case OPC_LARCH_FSTLE_D:
|
|
+ qemu_log("OPC_LARCH_FSTLE_D");
|
|
+ break;
|
|
+ case OPC_LARCH_LL_W:
|
|
+ qemu_log("OPC_LARCH_LL_W");
|
|
+ break;
|
|
+ case OPC_LARCH_LL_D:
|
|
+ qemu_log("OPC_LARCH_LL_D");
|
|
+ break;
|
|
+ default:
|
|
+ qemu_log("0x%x", op);
|
|
+ }
|
|
+}
|
|
+
|
|
+void helper_memtrace_val(CPULOONGARCHState *env, target_ulong val)
|
|
+{
|
|
+ qemu_log("val 0x%lx\n", val);
|
|
+}
|
|
diff --git a/target/loongarch64/stabletimer.c b/target/loongarch64/stabletimer.c
|
|
new file mode 100644
|
|
index 0000000000..4f4ccc5d89
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/stabletimer.c
|
|
@@ -0,0 +1,117 @@
|
|
+/*
|
|
+ * QEMU LOONGARCH timer support
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "hw/loongarch/cpudevs.h"
|
|
+#include "qemu/timer.h"
|
|
+#include "sysemu/kvm.h"
|
|
+#include "internal.h"
|
|
+#include "hw/irq.h"
|
|
+
|
|
+#ifdef DEBUG_TIMER
|
|
+#define debug_timer(fmt, args...) \
|
|
+ printf("%s(%d)-%s -> " #fmt "\n", __FILE__, __LINE__, __func__, ##args);
|
|
+#else
|
|
+#define debug_timer(fmt, args...)
|
|
+#endif
|
|
+
|
|
+#define TIMER_PERIOD 10 /* 10 ns period for 100 Mhz frequency */
|
|
+#define STABLETIMER_TICK_MASK 0xfffffffffffcUL
|
|
+#define STABLETIMER_ENABLE 0x1UL
|
|
+#define STABLETIMER_PERIOD 0x2UL
|
|
+
|
|
+/* return random value in [low, high] */
|
|
+uint32_t cpu_loongarch_get_random_ls3a5k_tlb(uint32_t low, uint32_t high)
|
|
+{
|
|
+ static uint32_t seed = 5;
|
|
+ static uint32_t prev_idx;
|
|
+ uint32_t idx;
|
|
+ uint32_t nb_rand_tlb = high - low + 1;
|
|
+
|
|
+ do {
|
|
+ seed = 1103515245 * seed + 12345;
|
|
+ idx = (seed >> 16) % nb_rand_tlb + low;
|
|
+ } while (idx == prev_idx);
|
|
+ prev_idx = idx;
|
|
+
|
|
+ return idx;
|
|
+}
|
|
+
|
|
+/* LOONGARCH timer */
|
|
+uint64_t cpu_loongarch_get_stable_counter(CPULOONGARCHState *env)
|
|
+{
|
|
+ return qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / TIMER_PERIOD;
|
|
+}
|
|
+
|
|
+uint64_t cpu_loongarch_get_stable_timer_ticks(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint64_t now, expire;
|
|
+
|
|
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
+ expire = timer_expire_time_ns(env->timer);
|
|
+
|
|
+ return (expire - now) / TIMER_PERIOD;
|
|
+}
|
|
+
|
|
+void cpu_loongarch_store_stable_timer_config(CPULOONGARCHState *env,
|
|
+ uint64_t value)
|
|
+{
|
|
+ uint64_t now, next;
|
|
+
|
|
+ env->CSR_TCFG = value;
|
|
+ if (value & STABLETIMER_ENABLE) {
|
|
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
+ next = now + (value & STABLETIMER_TICK_MASK) * TIMER_PERIOD;
|
|
+ timer_mod(env->timer, next);
|
|
+ }
|
|
+ debug_timer("0x%lx 0x%lx now 0x%lx, next 0x%lx", value, env->CSR_TCFG, now,
|
|
+ next);
|
|
+}
|
|
+
|
|
+static void loongarch_stable_timer_cb(void *opaque)
|
|
+{
|
|
+ CPULOONGARCHState *env;
|
|
+ uint64_t now, next;
|
|
+
|
|
+ env = opaque;
|
|
+ debug_timer();
|
|
+ if (env->CSR_TCFG & STABLETIMER_PERIOD) {
|
|
+ now = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
|
+ next = now + (env->CSR_TCFG & STABLETIMER_TICK_MASK) * TIMER_PERIOD;
|
|
+ timer_mod(env->timer, next);
|
|
+ } else {
|
|
+ env->CSR_TCFG &= ~STABLETIMER_ENABLE;
|
|
+ }
|
|
+
|
|
+ qemu_irq_raise(env->irq[IRQ_TIMER]);
|
|
+}
|
|
+
|
|
+void cpu_loongarch_clock_init(LOONGARCHCPU *cpu)
|
|
+{
|
|
+ CPULOONGARCHState *env = &cpu->env;
|
|
+
|
|
+ /*
|
|
+ * If we're in KVM mode, don't create the periodic timer, that is handled
|
|
+ * in kernel.
|
|
+ */
|
|
+ if (!kvm_enabled()) {
|
|
+ env->timer =
|
|
+ timer_new_ns(QEMU_CLOCK_VIRTUAL, &loongarch_stable_timer_cb, env);
|
|
+ }
|
|
+}
|
|
diff --git a/target/loongarch64/tlb_helper.c b/target/loongarch64/tlb_helper.c
|
|
new file mode 100644
|
|
index 0000000000..b6e924fbec
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/tlb_helper.c
|
|
@@ -0,0 +1,641 @@
|
|
+/*
|
|
+ * loongarch tlb emulation helpers for qemu.
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu/main-loop.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "qemu/host-utils.h"
|
|
+#include "exec/helper-proto.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "exec/cpu_ldst.h"
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+
|
|
+#define HELPER_LD(name, insn, type) \
|
|
+ static inline type do_##name(CPULOONGARCHState *env, target_ulong addr, \
|
|
+ int mem_idx, uintptr_t retaddr) \
|
|
+ { \
|
|
+ }
|
|
+
|
|
+void helper_lddir(CPULOONGARCHState *env, target_ulong base, target_ulong rt,
|
|
+ target_ulong level, uint32_t mem_idx)
|
|
+{
|
|
+}
|
|
+
|
|
+void helper_ldpte(CPULOONGARCHState *env, target_ulong base, target_ulong odd,
|
|
+ uint32_t mem_idx)
|
|
+{
|
|
+}
|
|
+
|
|
+target_ulong helper_read_pgd(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint64_t badv;
|
|
+
|
|
+ assert(env->CSR_TLBRERA & 0x1);
|
|
+
|
|
+ if (env->CSR_TLBRERA & 0x1) {
|
|
+ badv = env->CSR_TLBRBADV;
|
|
+ } else {
|
|
+ badv = env->CSR_BADV;
|
|
+ }
|
|
+
|
|
+ if ((badv >> 63) & 0x1) {
|
|
+ return env->CSR_PGDH;
|
|
+ } else {
|
|
+ return env->CSR_PGDL;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* TLB management */
|
|
+static uint64_t ls3a5k_pagesize_to_mask(int pagesize)
|
|
+{
|
|
+ /* 4KB - 1GB */
|
|
+ if (pagesize < 12 && pagesize > 30) {
|
|
+ printf("[ERROR] unsupported page size %d\n", pagesize);
|
|
+ exit(-1);
|
|
+ }
|
|
+
|
|
+ return (1 << (pagesize + 1)) - 1;
|
|
+}
|
|
+
|
|
+static void ls3a5k_fill_tlb_entry(CPULOONGARCHState *env, ls3a5k_tlb_t *tlb,
|
|
+ int is_ftlb)
|
|
+{
|
|
+ uint64_t page_mask; /* 0000...00001111...1111 */
|
|
+ uint32_t page_size;
|
|
+ uint64_t entryhi;
|
|
+ uint64_t lo0, lo1;
|
|
+
|
|
+ if (env->CSR_TLBRERA & 0x1) {
|
|
+ page_size = env->CSR_TLBREHI & 0x3f;
|
|
+ entryhi = env->CSR_TLBREHI;
|
|
+ lo0 = env->CSR_TLBRELO0;
|
|
+ lo1 = env->CSR_TLBRELO1;
|
|
+ } else {
|
|
+ page_size = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f;
|
|
+ entryhi = env->CSR_TLBEHI;
|
|
+ lo0 = env->CSR_TLBELO0;
|
|
+ lo1 = env->CSR_TLBELO1;
|
|
+ }
|
|
+
|
|
+ if (page_size == 0) {
|
|
+ printf("Warning: page_size is 0\n");
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * 15-12 11-8 7-4 3-0
|
|
+ * 4KB: 0001 1111 1111 1111 // double 4KB mask [12:0]
|
|
+ * 16KB: 0111 1111 1111 1111 // double 16KB mask [14:0]
|
|
+ */
|
|
+ if (is_ftlb) {
|
|
+ page_mask = env->tlb->mmu.ls3a5k.ftlb_mask;
|
|
+ } else {
|
|
+ page_mask = ls3a5k_pagesize_to_mask(page_size);
|
|
+ }
|
|
+
|
|
+ tlb->VPN = entryhi & 0xffffffffe000 & ~page_mask;
|
|
+
|
|
+ tlb->ASID = env->CSR_ASID & 0x3ff; /* CSR_ASID[9:0] */
|
|
+ tlb->EHINV = 0;
|
|
+ tlb->G = (lo0 >> CSR_TLBLO0_GLOBAL_SHIFT) & /* CSR_TLBLO[6] */
|
|
+ (lo1 >> CSR_TLBLO1_GLOBAL_SHIFT) & 1;
|
|
+
|
|
+ tlb->PageMask = page_mask;
|
|
+ tlb->PageSize = page_size;
|
|
+
|
|
+ tlb->V0 = (lo0 >> CSR_TLBLO0_V_SHIFT) & 0x1; /* [0] */
|
|
+ tlb->WE0 = (lo0 >> CSR_TLBLO0_WE_SHIFT) & 0x1; /* [1] */
|
|
+ tlb->PLV0 = (lo0 >> CSR_TLBLO0_PLV_SHIFT) & 0x3; /* [3:2] */
|
|
+ tlb->C0 = (lo0 >> CSR_TLBLO0_CCA_SHIFT) & 0x3; /* [5:4] */
|
|
+ tlb->PPN0 = (lo0 & 0xfffffffff000 & ~(page_mask >> 1));
|
|
+ tlb->RI0 = (lo0 >> CSR_TLBLO0_RI_SHIFT) & 0x1; /* [61] */
|
|
+ tlb->XI0 = (lo0 >> CSR_TLBLO0_XI_SHIFT) & 0x1; /* [62] */
|
|
+ tlb->RPLV0 = (lo0 >> CSR_TLBLO0_RPLV_SHIFT) & 0x1; /* [63] */
|
|
+
|
|
+ tlb->V1 = (lo1 >> CSR_TLBLO1_V_SHIFT) & 0x1; /* [0] */
|
|
+ tlb->WE1 = (lo1 >> CSR_TLBLO1_WE_SHIFT) & 0x1; /* [1] */
|
|
+ tlb->PLV1 = (lo1 >> CSR_TLBLO1_PLV_SHIFT) & 0x3; /* [3:2] */
|
|
+ tlb->C1 = (lo1 >> CSR_TLBLO1_CCA_SHIFT) & 0x3; /* [5:4] */
|
|
+ tlb->PPN1 = (lo1 & 0xfffffffff000 & ~(page_mask >> 1));
|
|
+ tlb->RI1 = (lo1 >> CSR_TLBLO1_RI_SHIFT) & 0x1; /* [61] */
|
|
+ tlb->XI1 = (lo1 >> CSR_TLBLO1_XI_SHIFT) & 0x1; /* [62] */
|
|
+ tlb->RPLV1 = (lo1 >> CSR_TLBLO1_RPLV_SHIFT) & 0x1; /* [63] */
|
|
+}
|
|
+
|
|
+static void ls3a5k_fill_tlb(CPULOONGARCHState *env, int idx, bool tlbwr)
|
|
+{
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
|
|
+ if (tlbwr) {
|
|
+ if ((env->CSR_TLBIDX >> CSR_TLBIDX_EHINV_SHIFT) & 0x1) {
|
|
+ tlb->EHINV = 1;
|
|
+ return;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (idx < 2048) {
|
|
+ ls3a5k_fill_tlb_entry(env, tlb, 1);
|
|
+ } else {
|
|
+ ls3a5k_fill_tlb_entry(env, tlb, 0);
|
|
+ }
|
|
+}
|
|
+
|
|
+void ls3a5k_flush_vtlb(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+ int i;
|
|
+
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+
|
|
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+}
|
|
+
|
|
+void ls3a5k_flush_ftlb(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ int i;
|
|
+
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+
|
|
+ for (i = 0; i < ftlb_size; ++i) {
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbclr(CPULOONGARCHState *env)
|
|
+{
|
|
+ int i;
|
|
+ uint16_t asid;
|
|
+ int vsize, fsize, index;
|
|
+ int start = 0, end = -1;
|
|
+
|
|
+ asid = env->CSR_ASID & 0x3ff;
|
|
+ vsize = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+ fsize = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ index = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
|
|
+
|
|
+ if (index < fsize) {
|
|
+ /* FTLB. One line per operation */
|
|
+ int set = index % 256;
|
|
+ start = set * 8;
|
|
+ end = start + 7;
|
|
+ } else if (index < (fsize + vsize)) {
|
|
+ /* VTLB. All entries */
|
|
+ start = fsize;
|
|
+ end = fsize + vsize - 1;
|
|
+ } else {
|
|
+ /* Ignore */
|
|
+ }
|
|
+
|
|
+ for (i = start; i <= end; i++) {
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ if (!tlb->G && tlb->ASID == asid) {
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbflush(CPULOONGARCHState *env)
|
|
+{
|
|
+ int i;
|
|
+ int vsize, fsize, index;
|
|
+ int start = 0, end = -1;
|
|
+
|
|
+ vsize = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+ fsize = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ index = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
|
|
+
|
|
+ if (index < fsize) {
|
|
+ /* FTLB. One line per operation */
|
|
+ int set = index % 256;
|
|
+ start = set * 8;
|
|
+ end = start + 7;
|
|
+ } else if (index < (fsize + vsize)) {
|
|
+ /* VTLB. All entries */
|
|
+ start = fsize;
|
|
+ end = fsize + vsize - 1;
|
|
+ } else {
|
|
+ /* Ignore */
|
|
+ }
|
|
+
|
|
+ for (i = start; i <= end; i++) {
|
|
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_invtlb(CPULOONGARCHState *env, target_ulong addr,
|
|
+ target_ulong info, int op)
|
|
+{
|
|
+ uint32_t asid = info & 0x3ff;
|
|
+ int i;
|
|
+
|
|
+ switch (op) {
|
|
+ case 0:
|
|
+ case 1:
|
|
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
|
|
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
|
|
+ }
|
|
+ break;
|
|
+ case 4: {
|
|
+ int i;
|
|
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
|
|
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+
|
|
+ if (!tlb->G && tlb->ASID == asid) {
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ case 5: {
|
|
+ int i;
|
|
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
|
|
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask;
|
|
+
|
|
+ if (!tlb->G && tlb->ASID == asid && vpn == tlb->VPN) {
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ case 6: {
|
|
+ int i;
|
|
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
|
|
+ struct ls3a5k_tlb_t *tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ uint64_t vpn = addr & 0xffffffffe000 & ~tlb->PageMask;
|
|
+
|
|
+ if ((tlb->G || tlb->ASID == asid) && vpn == tlb->VPN) {
|
|
+ tlb->EHINV = 1;
|
|
+ }
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ default:
|
|
+ helper_raise_exception(env, EXCP_RI);
|
|
+ }
|
|
+
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+}
|
|
+
|
|
+static void ls3a5k_invalidate_tlb_entry(CPULOONGARCHState *env,
|
|
+ ls3a5k_tlb_t *tlb)
|
|
+{
|
|
+ LOONGARCHCPU *cpu = loongarch_env_get_cpu(env);
|
|
+ CPUState *cs = CPU(cpu);
|
|
+ target_ulong addr;
|
|
+ target_ulong end;
|
|
+ target_ulong mask;
|
|
+
|
|
+ mask = tlb->PageMask; /* 000...000111...111 */
|
|
+
|
|
+ if (tlb->V0) {
|
|
+ addr = tlb->VPN & ~mask; /* xxx...xxx[0]000..0000 */
|
|
+ end = addr | (mask >> 1); /* xxx...xxx[0]111..1111 */
|
|
+ while (addr < end) {
|
|
+ tlb_flush_page(cs, addr);
|
|
+ addr += TARGET_PAGE_SIZE;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (tlb->V1) {
|
|
+ /* xxx...xxx[1]000..0000 */
|
|
+ addr = (tlb->VPN & ~mask) | ((mask >> 1) + 1);
|
|
+ end = addr | mask; /* xxx...xxx[1]111..1111 */
|
|
+ while (addr - 1 < end) {
|
|
+ tlb_flush_page(cs, addr);
|
|
+ addr += TARGET_PAGE_SIZE;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+void ls3a5k_invalidate_tlb(CPULOONGARCHState *env, int idx)
|
|
+{
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+ int asid = env->CSR_ASID & 0x3ff;
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
|
|
+ if (tlb->G == 0 && tlb->ASID != asid) {
|
|
+ return;
|
|
+ }
|
|
+ ls3a5k_invalidate_tlb_entry(env, tlb);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbwr(CPULOONGARCHState *env)
|
|
+{
|
|
+ int idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX; /* [11:0] */
|
|
+
|
|
+ /* Convert idx if in FTLB */
|
|
+ if (idx < env->tlb->mmu.ls3a5k.ftlb_size) {
|
|
+ /*
|
|
+ * 0 3 6 0 1 2
|
|
+ * 1 4 7 => 3 4 5
|
|
+ * 2 5 8 6 7 8
|
|
+ */
|
|
+ int set = idx % 256;
|
|
+ int way = idx / 256;
|
|
+ idx = set * 8 + way;
|
|
+ }
|
|
+ ls3a5k_invalidate_tlb(env, idx);
|
|
+ ls3a5k_fill_tlb(env, idx, true);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbfill(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint64_t mask;
|
|
+ uint64_t address;
|
|
+ int idx;
|
|
+ int set, ftlb_idx;
|
|
+
|
|
+ uint64_t entryhi;
|
|
+ uint32_t pagesize;
|
|
+
|
|
+ if (env->CSR_TLBRERA & 0x1) {
|
|
+ entryhi = env->CSR_TLBREHI & ~0x3f;
|
|
+ pagesize = env->CSR_TLBREHI & 0x3f;
|
|
+ } else {
|
|
+ entryhi = env->CSR_TLBEHI;
|
|
+ pagesize = (env->CSR_TLBIDX >> CSR_TLBIDX_PS_SHIFT) & 0x3f;
|
|
+ }
|
|
+
|
|
+ uint32_t ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ uint32_t vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+
|
|
+ mask = ls3a5k_pagesize_to_mask(pagesize);
|
|
+
|
|
+ if (mask == env->tlb->mmu.ls3a5k.ftlb_mask &&
|
|
+ env->tlb->mmu.ls3a5k.ftlb_size > 0) {
|
|
+ /* only write into FTLB */
|
|
+ address = entryhi & 0xffffffffe000; /* [47:13] */
|
|
+
|
|
+ /* choose one set ramdomly */
|
|
+ set = cpu_loongarch_get_random_ls3a5k_tlb(0, 7);
|
|
+
|
|
+ /* index in one set */
|
|
+ ftlb_idx = (address >> 15) & 0xff; /* [0,255] */
|
|
+
|
|
+ /* final idx */
|
|
+ idx = ftlb_idx * 8 + set; /* max is 7 + 8 * 255 = 2047 */
|
|
+ } else {
|
|
+ /* only write into VTLB */
|
|
+ int wired_nr = env->CSR_TLBWIRED & 0x3f;
|
|
+ idx = cpu_loongarch_get_random_ls3a5k_tlb(ftlb_size + wired_nr,
|
|
+ ftlb_size + vtlb_size - 1);
|
|
+ }
|
|
+
|
|
+ ls3a5k_invalidate_tlb(env, idx);
|
|
+ ls3a5k_fill_tlb(env, idx, false);
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbsrch(CPULOONGARCHState *env)
|
|
+{
|
|
+ uint64_t mask;
|
|
+ uint64_t vpn;
|
|
+ uint64_t tag;
|
|
+ uint16_t asid;
|
|
+
|
|
+ int ftlb_size = env->tlb->mmu.ls3a5k.ftlb_size;
|
|
+ int vtlb_size = env->tlb->mmu.ls3a5k.vtlb_size;
|
|
+ int i;
|
|
+ int ftlb_idx; /* [0,255] 2^8 0xff */
|
|
+
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+
|
|
+ asid = env->CSR_ASID & 0x3ff;
|
|
+
|
|
+ /* search VTLB */
|
|
+ for (i = ftlb_size; i < ftlb_size + vtlb_size; ++i) {
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[i];
|
|
+ mask = tlb->PageMask;
|
|
+
|
|
+ vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask;
|
|
+ tag = tlb->VPN & ~mask;
|
|
+
|
|
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag &&
|
|
+ tlb->EHINV != 1) {
|
|
+ env->CSR_TLBIDX =
|
|
+ (i & 0xfff) | ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
|
|
+ goto _MATCH_OUT_;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ftlb_size == 0) {
|
|
+ goto _NO_MATCH_OUT_;
|
|
+ }
|
|
+
|
|
+ /* search FTLB */
|
|
+ mask = env->tlb->mmu.ls3a5k.ftlb_mask;
|
|
+ vpn = env->CSR_TLBEHI & 0xffffffffe000 & ~mask;
|
|
+
|
|
+ ftlb_idx = (env->CSR_TLBEHI & 0xffffffffe000) >> 15; /* 16 KB */
|
|
+ ftlb_idx = ftlb_idx & 0xff; /* [0,255] */
|
|
+
|
|
+ for (i = 0; i < 8; ++i) {
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[ftlb_idx * 8 + i];
|
|
+ tag = tlb->VPN & ~mask;
|
|
+
|
|
+ if ((tlb->G == 1 || tlb->ASID == asid) && vpn == tag &&
|
|
+ tlb->EHINV != 1) {
|
|
+ env->CSR_TLBIDX = ((i * 256 + ftlb_idx) & 0xfff) |
|
|
+ ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
|
|
+ goto _MATCH_OUT_;
|
|
+ }
|
|
+ }
|
|
+
|
|
+_NO_MATCH_OUT_:
|
|
+ env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT;
|
|
+_MATCH_OUT_:
|
|
+ return;
|
|
+}
|
|
+
|
|
+void ls3a5k_helper_tlbrd(CPULOONGARCHState *env)
|
|
+{
|
|
+ ls3a5k_tlb_t *tlb;
|
|
+ int idx;
|
|
+ uint16_t asid;
|
|
+
|
|
+ idx = env->CSR_TLBIDX & CSR_TLBIDX_IDX;
|
|
+ if (idx < env->tlb->mmu.ls3a5k.ftlb_size) {
|
|
+ int set = idx % 256;
|
|
+ int way = idx / 256;
|
|
+ idx = set * 8 + way;
|
|
+ }
|
|
+
|
|
+ tlb = &env->tlb->mmu.ls3a5k.tlb[idx];
|
|
+
|
|
+ asid = env->CSR_ASID & 0x3ff;
|
|
+
|
|
+ if (asid != tlb->ASID) {
|
|
+ cpu_loongarch_tlb_flush(env);
|
|
+ }
|
|
+
|
|
+ if (tlb->EHINV) {
|
|
+ /* invalid TLB entry */
|
|
+ env->CSR_TLBIDX = 1 << CSR_TLBIDX_EHINV_SHIFT;
|
|
+ env->CSR_TLBEHI = 0;
|
|
+ env->CSR_TLBELO0 = 0;
|
|
+ env->CSR_TLBELO1 = 0;
|
|
+ } else {
|
|
+ /* valid TLB entry */
|
|
+ env->CSR_TLBIDX = (env->CSR_TLBIDX & 0xfff) |
|
|
+ ((tlb->PageSize & 0x3f) << CSR_TLBIDX_PS_SHIFT);
|
|
+ env->CSR_TLBEHI = tlb->VPN;
|
|
+ env->CSR_TLBELO0 = (tlb->V0 << CSR_TLBLO0_V_SHIFT) |
|
|
+ (tlb->WE0 << CSR_TLBLO0_WE_SHIFT) |
|
|
+ (tlb->PLV0 << CSR_TLBLO0_PLV_SHIFT) |
|
|
+ (tlb->C0 << CSR_TLBLO0_CCA_SHIFT) |
|
|
+ (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) |
|
|
+ (tlb->PPN0 & 0xfffffffff000) |
|
|
+ ((uint64_t)tlb->RI0 << CSR_TLBLO0_RI_SHIFT) |
|
|
+ ((uint64_t)tlb->XI0 << CSR_TLBLO0_XI_SHIFT) |
|
|
+ ((uint64_t)tlb->RPLV0 << CSR_TLBLO0_RPLV_SHIFT);
|
|
+ env->CSR_TLBELO1 = (tlb->V1 << CSR_TLBLO1_V_SHIFT) |
|
|
+ (tlb->WE1 << CSR_TLBLO1_WE_SHIFT) |
|
|
+ (tlb->PLV1 << CSR_TLBLO1_PLV_SHIFT) |
|
|
+ (tlb->C1 << CSR_TLBLO1_CCA_SHIFT) |
|
|
+ (tlb->G << CSR_TLBLO0_GLOBAL_SHIFT) |
|
|
+ (tlb->PPN1 & 0xfffffffff000) |
|
|
+ ((uint64_t)tlb->RI1 << CSR_TLBLO1_RI_SHIFT) |
|
|
+ ((uint64_t)tlb->XI1 << CSR_TLBLO1_XI_SHIFT) |
|
|
+ ((uint64_t)tlb->RPLV1 << CSR_TLBLO1_RPLV_SHIFT);
|
|
+ env->CSR_ASID =
|
|
+ (tlb->ASID << CSR_ASID_ASID_SHIFT) | (env->CSR_ASID & 0xff0000);
|
|
+ }
|
|
+}
|
|
+
|
|
+void helper_tlbwr(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbwr(env);
|
|
+}
|
|
+
|
|
+void helper_tlbfill(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbfill(env);
|
|
+}
|
|
+
|
|
+void helper_tlbsrch(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbsrch(env);
|
|
+}
|
|
+
|
|
+void helper_tlbrd(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbrd(env);
|
|
+}
|
|
+
|
|
+void helper_tlbclr(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbclr(env);
|
|
+}
|
|
+
|
|
+void helper_tlbflush(CPULOONGARCHState *env)
|
|
+{
|
|
+ env->tlb->helper_tlbflush(env);
|
|
+}
|
|
+
|
|
+void helper_invtlb(CPULOONGARCHState *env, target_ulong addr,
|
|
+ target_ulong info, target_ulong op)
|
|
+{
|
|
+ env->tlb->helper_invtlb(env, addr, info, op);
|
|
+}
|
|
+
|
|
+static void ls3a5k_mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
|
|
+{
|
|
+ /* number of VTLB */
|
|
+ env->tlb->nb_tlb = 64;
|
|
+ env->tlb->mmu.ls3a5k.vtlb_size = 64;
|
|
+
|
|
+ /* number of FTLB */
|
|
+ env->tlb->nb_tlb += 2048;
|
|
+ env->tlb->mmu.ls3a5k.ftlb_size = 2048;
|
|
+ env->tlb->mmu.ls3a5k.ftlb_mask = (1 << 15) - 1; /* 16 KB */
|
|
+ /*
|
|
+ * page_size | ftlb_mask | party field
|
|
+ * ----------------------------------------------------------------
|
|
+ * 4 KB = 12 | ( 1 << 13 ) - 1 = [12:0] | [12]
|
|
+ * 16 KB = 14 | ( 1 << 15 ) - 1 = [14:0] | [14]
|
|
+ * 64 KB = 16 | ( 1 << 17 ) - 1 = [16:0] | [16]
|
|
+ * 256 KB = 18 | ( 1 << 19 ) - 1 = [18:0] | [18]
|
|
+ * 1 MB = 20 | ( 1 << 21 ) - 1 = [20:0] | [20]
|
|
+ * 4 MB = 22 | ( 1 << 23 ) - 1 = [22:0] | [22]
|
|
+ * 16 MB = 24 | ( 1 << 25 ) - 1 = [24:0] | [24]
|
|
+ * 64 MB = 26 | ( 1 << 27 ) - 1 = [26:0] | [26]
|
|
+ * 256 MB = 28 | ( 1 << 29 ) - 1 = [28:0] | [28]
|
|
+ * 1 GB = 30 | ( 1 << 31 ) - 1 = [30:0] | [30]
|
|
+ * ----------------------------------------------------------------
|
|
+ * take party field index as @n. eg. For 16 KB, n = 14
|
|
+ * ----------------------------------------------------------------
|
|
+ * tlb->VPN = TLBEHI & 0xffffffffe000[47:13] & ~mask = [47:n+1]
|
|
+ * tlb->PPN = TLBLO0 & 0xffffffffe000[47:13] & ~mask = [47:n+1]
|
|
+ * tlb->PPN = TLBLO1 & 0xffffffffe000[47:13] & ~mask = [47:n+1]
|
|
+ * ----------------------------------------------------------------
|
|
+ * On mapping :
|
|
+ * > vpn = address & 0xffffffffe000[47:13] & ~mask = [47:n+1]
|
|
+ * > tag = tlb->VPN & ~mask = [47:n+1]
|
|
+ * ----------------------------------------------------------------
|
|
+ * physical address = [47:n+1] | [n:0]
|
|
+ * physical address = tlb->PPN0 | (address & mask)
|
|
+ * physical address = tlb->PPN1 | (address & mask)
|
|
+ */
|
|
+
|
|
+ int i;
|
|
+ for (i = 0; i < env->tlb->nb_tlb; i++) {
|
|
+ env->tlb->mmu.ls3a5k.tlb[i].EHINV = 1;
|
|
+ }
|
|
+
|
|
+ /* TLB's helper functions */
|
|
+ env->tlb->map_address = &ls3a5k_map_address;
|
|
+ env->tlb->helper_tlbwr = ls3a5k_helper_tlbwr;
|
|
+ env->tlb->helper_tlbfill = ls3a5k_helper_tlbfill;
|
|
+ env->tlb->helper_tlbsrch = ls3a5k_helper_tlbsrch;
|
|
+ env->tlb->helper_tlbrd = ls3a5k_helper_tlbrd;
|
|
+ env->tlb->helper_tlbclr = ls3a5k_helper_tlbclr;
|
|
+ env->tlb->helper_tlbflush = ls3a5k_helper_tlbflush;
|
|
+ env->tlb->helper_invtlb = ls3a5k_helper_invtlb;
|
|
+}
|
|
+
|
|
+void mmu_init(CPULOONGARCHState *env, const loongarch_def_t *def)
|
|
+{
|
|
+ env->tlb = g_malloc0(sizeof(CPULOONGARCHTLBContext));
|
|
+
|
|
+ switch (def->mmu_type) {
|
|
+ case MMU_TYPE_LS3A5K:
|
|
+ ls3a5k_mmu_init(env, def);
|
|
+ break;
|
|
+ default:
|
|
+ cpu_abort(CPU(loongarch_env_get_cpu(env)), "MMU type not supported\n");
|
|
+ }
|
|
+}
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
diff --git a/target/loongarch64/trans.inc.c b/target/loongarch64/trans.inc.c
|
|
new file mode 100644
|
|
index 0000000000..07bb0bb6e0
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/trans.inc.c
|
|
@@ -0,0 +1,3482 @@
|
|
+/*
|
|
+ * LOONGARCH emulation for QEMU - main translation routines Extension
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+static bool trans_syscall(DisasContext *ctx, arg_syscall *a)
|
|
+{
|
|
+ generate_exception_end(ctx, EXCP_SYSCALL);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_break(DisasContext *ctx, arg_break *a)
|
|
+{
|
|
+ generate_exception_end(ctx, EXCP_BREAK);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_dbcl(DisasContext *ctx, arg_dbcl *a)
|
|
+{
|
|
+ /*
|
|
+ * dbcl instruction is not support in tcg
|
|
+ */
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_addi_w(DisasContext *ctx, arg_addi_w *a)
|
|
+{
|
|
+ gen_arith_imm(ctx, OPC_LARCH_ADDI_W, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_addi_d(DisasContext *ctx, arg_addi_d *a)
|
|
+{
|
|
+ gen_arith_imm(ctx, OPC_LARCH_ADDI_D, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_slli_d(DisasContext *ctx, arg_slli_d *a)
|
|
+{
|
|
+ if (a->rd == 0) {
|
|
+ /* Nop */
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ tcg_gen_shli_tl(cpu_gpr[a->rd], t0, a->ui6);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_andi(DisasContext *ctx, arg_andi *a)
|
|
+{
|
|
+ gen_logic_imm(ctx, OPC_LARCH_ANDI, a->rd, a->rj, a->ui12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srli_d(DisasContext *ctx, arg_srli_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ tcg_gen_shri_tl(cpu_gpr[a->rd], t0, a->ui6);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_slli_w(DisasContext *ctx, arg_slli_w *a)
|
|
+{
|
|
+ if (a->rd == 0) {
|
|
+ /* Nop */
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ tcg_gen_shli_tl(t0, t0, a->ui5);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[a->rd], t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_addu16i_d(DisasContext *ctx, arg_addu16i_d *a)
|
|
+{
|
|
+ if (a->rj != 0) {
|
|
+ tcg_gen_addi_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], a->si16 << 16);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->si16 << 16);
|
|
+ }
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_lu12i_w(DisasContext *ctx, arg_lu12i_w *a)
|
|
+{
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], a->si20 << 12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_lu32i_d(DisasContext *ctx, arg_lu32i_d *a)
|
|
+{
|
|
+ TCGv_i64 t0, t1;
|
|
+ t0 = tcg_temp_new_i64();
|
|
+ t1 = tcg_temp_new_i64();
|
|
+
|
|
+ tcg_gen_movi_tl(t0, a->si20);
|
|
+ tcg_gen_concat_tl_i64(t1, cpu_gpr[a->rd], t0);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_pcaddi(DisasContext *ctx, arg_pcaddi *a)
|
|
+{
|
|
+ target_ulong pc = ctx->base.pc_next;
|
|
+ target_ulong addr = pc + (a->si20 << 2);
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_pcalau12i(DisasContext *ctx, arg_pcalau12i *a)
|
|
+{
|
|
+ target_ulong pc = ctx->base.pc_next;
|
|
+ target_ulong addr = (pc + (a->si20 << 12)) & ~0xfff;
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_pcaddu12i(DisasContext *ctx, arg_pcaddu12i *a)
|
|
+{
|
|
+ target_ulong pc = ctx->base.pc_next;
|
|
+ target_ulong addr = pc + (a->si20 << 12);
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_pcaddu18i(DisasContext *ctx, arg_pcaddu18i *a)
|
|
+{
|
|
+ target_ulong pc = ctx->base.pc_next;
|
|
+ target_ulong addr = pc + ((target_ulong)(a->si20) << 18);
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], addr);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_slti(DisasContext *ctx, arg_slti *a)
|
|
+{
|
|
+ gen_slt_imm(ctx, OPC_LARCH_SLTI, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sltui(DisasContext *ctx, arg_sltui *a)
|
|
+{
|
|
+ gen_slt_imm(ctx, OPC_LARCH_SLTIU, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_lu52i_d(DisasContext *ctx, arg_lu52i_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+
|
|
+ tcg_gen_movi_tl(t0, a->si12);
|
|
+ tcg_gen_shli_tl(t0, t0, 52);
|
|
+ tcg_gen_andi_tl(t1, t1, 0xfffffffffffffU);
|
|
+ tcg_gen_or_tl(cpu_gpr[a->rd], t0, t1);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ori(DisasContext *ctx, arg_ori *a)
|
|
+{
|
|
+ gen_logic_imm(ctx, OPC_LARCH_ORI, a->rd, a->rj, a->ui12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_xori(DisasContext *ctx, arg_xori *a)
|
|
+{
|
|
+ gen_logic_imm(ctx, OPC_LARCH_XORI, a->rd, a->rj, a->ui12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bstrins_d(DisasContext *ctx, arg_bstrins_d *a)
|
|
+{
|
|
+ int lsb = a->lsbd;
|
|
+ int msb = a->msbd;
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+
|
|
+ if (lsb > msb) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+ gen_load_gpr(t0, a->rd);
|
|
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
|
|
+ gen_store_gpr(t0, a->rd);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bstrpick_d(DisasContext *ctx, arg_bstrpick_d *a)
|
|
+{
|
|
+ int lsb = a->lsbd;
|
|
+ int msb = a->msbd;
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+
|
|
+ if (lsb > msb) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+ gen_load_gpr(t0, a->rd);
|
|
+ tcg_gen_extract_tl(t0, t1, lsb, msb - lsb + 1);
|
|
+ gen_store_gpr(t0, a->rd);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bstrins_w(DisasContext *ctx, arg_bstrins_w *a)
|
|
+{
|
|
+ gen_bitops(ctx, OPC_LARCH_TRINS_W, a->rd, a->rj, a->lsbw, a->msbw);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bstrpick_w(DisasContext *ctx, arg_bstrpick_w *a)
|
|
+{
|
|
+ if (a->lsbw > a->msbw) {
|
|
+ return false;
|
|
+ }
|
|
+ gen_bitops(ctx, OPC_LARCH_TRPICK_W, a->rd, a->rj, a->lsbw,
|
|
+ a->msbw - a->lsbw);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldptr_w(DisasContext *ctx, arg_ldptr_w *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LDPTR_W, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stptr_w(DisasContext *ctx, arg_stptr_w *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_STPTR_W, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldptr_d(DisasContext *ctx, arg_ldptr_d *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LDPTR_D, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stptr_d(DisasContext *ctx, arg_stptr_d *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_STPTR_D, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_b(DisasContext *ctx, arg_ld_b *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_B, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_h(DisasContext *ctx, arg_ld_h *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_H, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_w(DisasContext *ctx, arg_ld_w *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_W, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_d(DisasContext *ctx, arg_ld_d *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_D, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_st_b(DisasContext *ctx, arg_st_b *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_ST_B, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_st_h(DisasContext *ctx, arg_st_h *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_ST_H, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_st_w(DisasContext *ctx, arg_st_w *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_ST_W, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_st_d(DisasContext *ctx, arg_st_d *a)
|
|
+{
|
|
+ gen_st(ctx, OPC_LARCH_ST_D, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_bu(DisasContext *ctx, arg_ld_bu *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_BU, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_hu(DisasContext *ctx, arg_ld_hu *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_HU, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ld_wu(DisasContext *ctx, arg_ld_wu *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LD_WU, a->rd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_preld(DisasContext *ctx, arg_preld *a)
|
|
+{
|
|
+ /* Treat as NOP. */
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ll_w(DisasContext *ctx, arg_ll_w *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LL_W, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a)
|
|
+{
|
|
+ gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TESL, false);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ll_d(DisasContext *ctx, arg_ll_d *a)
|
|
+{
|
|
+ gen_ld(ctx, OPC_LARCH_LL_D, a->rd, a->rj, a->si14 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a)
|
|
+{
|
|
+ gen_st_cond(ctx, a->rd, a->rj, a->si14 << 2, MO_TEQ, false);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fld_s(DisasContext *ctx, arg_fld_s *a)
|
|
+{
|
|
+ gen_fp_ldst(ctx, OPC_LARCH_FLD_S, a->fd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fst_s(DisasContext *ctx, arg_fst_s *a)
|
|
+{
|
|
+ gen_fp_ldst(ctx, OPC_LARCH_FST_S, a->fd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fld_d(DisasContext *ctx, arg_fld_d *a)
|
|
+{
|
|
+ gen_fp_ldst(ctx, OPC_LARCH_FLD_D, a->fd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fst_d(DisasContext *ctx, arg_fst_d *a)
|
|
+{
|
|
+ gen_fp_ldst(ctx, OPC_LARCH_FST_D, a->fd, a->rj, a->si12);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_b(DisasContext *ctx, arg_ldx_b *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_SB);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_h(DisasContext *ctx, arg_ldx_h *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESW | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_w(DisasContext *ctx, arg_ldx_w *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TESL | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_d(DisasContext *ctx, arg_ldx_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stx_b(DisasContext *ctx, arg_stx_b *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ gen_load_gpr(t1, a->rd);
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stx_h(DisasContext *ctx, arg_stx_h *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ gen_load_gpr(t1, a->rd);
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stx_w(DisasContext *ctx, arg_stx_w *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ gen_load_gpr(t1, a->rd);
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stx_d(DisasContext *ctx, arg_stx_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ gen_load_gpr(t1, a->rd);
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_bu(DisasContext *ctx, arg_ldx_bu *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_UB);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_hu(DisasContext *ctx, arg_ldx_hu *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUW | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldx_wu(DisasContext *ctx, arg_ldx_wu *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[a->rj], cpu_gpr[a->rk]);
|
|
+ tcg_gen_qemu_ld_tl(t1, t0, mem_idx, MO_TEUL | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t1, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fldx_s(DisasContext *ctx, arg_fldx_s *a)
|
|
+{
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDX_S, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fldx_d(DisasContext *ctx, arg_fldx_d *a)
|
|
+{
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDX_D, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstx_s(DisasContext *ctx, arg_fstx_s *a)
|
|
+{
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTX_S, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstx_d(DisasContext *ctx, arg_fstx_d *a)
|
|
+{
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTX_D, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define TRANS_AM_W(name, op) \
|
|
+ static bool trans_##name(DisasContext *ctx, arg_##name *a) \
|
|
+ { \
|
|
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
|
|
+ printf("%s: warning, register equal\n", __func__); \
|
|
+ return false; \
|
|
+ } \
|
|
+ int mem_idx = ctx->mem_idx; \
|
|
+ TCGv addr = tcg_temp_new(); \
|
|
+ TCGv val = tcg_temp_new(); \
|
|
+ TCGv ret = tcg_temp_new(); \
|
|
+ \
|
|
+ gen_load_gpr(addr, a->rj); \
|
|
+ gen_load_gpr(val, a->rk); \
|
|
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \
|
|
+ MO_TESL | ctx->default_tcg_memop_mask); \
|
|
+ gen_store_gpr(ret, a->rd); \
|
|
+ \
|
|
+ tcg_temp_free(addr); \
|
|
+ tcg_temp_free(val); \
|
|
+ tcg_temp_free(ret); \
|
|
+ return true; \
|
|
+ }
|
|
+#define TRANS_AM_D(name, op) \
|
|
+ static bool trans_##name(DisasContext *ctx, arg_##name *a) \
|
|
+ { \
|
|
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
|
|
+ printf("%s: warning, register equal\n", __func__); \
|
|
+ return false; \
|
|
+ } \
|
|
+ int mem_idx = ctx->mem_idx; \
|
|
+ TCGv addr = tcg_temp_new(); \
|
|
+ TCGv val = tcg_temp_new(); \
|
|
+ TCGv ret = tcg_temp_new(); \
|
|
+ \
|
|
+ gen_load_gpr(addr, a->rj); \
|
|
+ gen_load_gpr(val, a->rk); \
|
|
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask); \
|
|
+ gen_store_gpr(ret, a->rd); \
|
|
+ \
|
|
+ tcg_temp_free(addr); \
|
|
+ tcg_temp_free(val); \
|
|
+ tcg_temp_free(ret); \
|
|
+ return true; \
|
|
+ }
|
|
+#define TRANS_AM(name, op) \
|
|
+ TRANS_AM_W(name##_w, op) \
|
|
+ TRANS_AM_D(name##_d, op)
|
|
+TRANS_AM(amswap, xchg) /* trans_amswap_w, trans_amswap_d */
|
|
+TRANS_AM(amadd, fetch_add) /* trans_amadd_w, trans_amadd_d */
|
|
+TRANS_AM(amand, fetch_and) /* trans_amand_w, trans_amand_d */
|
|
+TRANS_AM(amor, fetch_or) /* trans_amor_w, trans_amor_d */
|
|
+TRANS_AM(amxor, fetch_xor) /* trans_amxor_w, trans_amxor_d */
|
|
+TRANS_AM(ammax, fetch_smax) /* trans_ammax_w, trans_ammax_d */
|
|
+TRANS_AM(ammin, fetch_smin) /* trans_ammin_w, trans_ammin_d */
|
|
+TRANS_AM_W(ammax_wu, fetch_umax) /* trans_ammax_wu */
|
|
+TRANS_AM_D(ammax_du, fetch_umax) /* trans_ammax_du */
|
|
+TRANS_AM_W(ammin_wu, fetch_umin) /* trans_ammin_wu */
|
|
+TRANS_AM_D(ammin_du, fetch_umin) /* trans_ammin_du */
|
|
+#undef TRANS_AM
|
|
+#undef TRANS_AM_W
|
|
+#undef TRANS_AM_D
|
|
+
|
|
+#define TRANS_AM_DB_W(name, op) \
|
|
+ static bool trans_##name(DisasContext *ctx, arg_##name *a) \
|
|
+ { \
|
|
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
|
|
+ printf("%s: warning, register equal\n", __func__); \
|
|
+ return false; \
|
|
+ } \
|
|
+ int mem_idx = ctx->mem_idx; \
|
|
+ TCGv addr = tcg_temp_new(); \
|
|
+ TCGv val = tcg_temp_new(); \
|
|
+ TCGv ret = tcg_temp_new(); \
|
|
+ \
|
|
+ gen_sync(0x10); \
|
|
+ gen_load_gpr(addr, a->rj); \
|
|
+ gen_load_gpr(val, a->rk); \
|
|
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \
|
|
+ MO_TESL | ctx->default_tcg_memop_mask); \
|
|
+ gen_store_gpr(ret, a->rd); \
|
|
+ \
|
|
+ tcg_temp_free(addr); \
|
|
+ tcg_temp_free(val); \
|
|
+ tcg_temp_free(ret); \
|
|
+ return true; \
|
|
+ }
|
|
+#define TRANS_AM_DB_D(name, op) \
|
|
+ static bool trans_##name(DisasContext *ctx, arg_##name *a) \
|
|
+ { \
|
|
+ if ((a->rd != 0) && ((a->rj == a->rd) || (a->rk == a->rd))) { \
|
|
+ printf("%s: warning, register equal\n", __func__); \
|
|
+ return false; \
|
|
+ } \
|
|
+ int mem_idx = ctx->mem_idx; \
|
|
+ TCGv addr = tcg_temp_new(); \
|
|
+ TCGv val = tcg_temp_new(); \
|
|
+ TCGv ret = tcg_temp_new(); \
|
|
+ \
|
|
+ gen_sync(0x10); \
|
|
+ gen_load_gpr(addr, a->rj); \
|
|
+ gen_load_gpr(val, a->rk); \
|
|
+ tcg_gen_atomic_##op##_tl(ret, addr, val, mem_idx, \
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask); \
|
|
+ gen_store_gpr(ret, a->rd); \
|
|
+ \
|
|
+ tcg_temp_free(addr); \
|
|
+ tcg_temp_free(val); \
|
|
+ tcg_temp_free(ret); \
|
|
+ return true; \
|
|
+ }
|
|
+#define TRANS_AM_DB(name, op) \
|
|
+ TRANS_AM_DB_W(name##_db_w, op) \
|
|
+ TRANS_AM_DB_D(name##_db_d, op)
|
|
+TRANS_AM_DB(amswap, xchg) /* trans_amswap_db_w, trans_amswap_db_d */
|
|
+TRANS_AM_DB(amadd, fetch_add) /* trans_amadd_db_w, trans_amadd_db_d */
|
|
+TRANS_AM_DB(amand, fetch_and) /* trans_amand_db_w, trans_amand_db_d */
|
|
+TRANS_AM_DB(amor, fetch_or) /* trans_amor_db_w, trans_amor_db_d */
|
|
+TRANS_AM_DB(amxor, fetch_xor) /* trans_amxor_db_w, trans_amxor_db_d */
|
|
+TRANS_AM_DB(ammax, fetch_smax) /* trans_ammax_db_w, trans_ammax_db_d */
|
|
+TRANS_AM_DB(ammin, fetch_smin) /* trans_ammin_db_w, trans_ammin_db_d */
|
|
+TRANS_AM_DB_W(ammax_db_wu, fetch_umax) /* trans_ammax_db_wu */
|
|
+TRANS_AM_DB_D(ammax_db_du, fetch_umax) /* trans_ammax_db_du */
|
|
+TRANS_AM_DB_W(ammin_db_wu, fetch_umin) /* trans_ammin_db_wu */
|
|
+TRANS_AM_DB_D(ammin_db_du, fetch_umin) /* trans_ammin_db_du */
|
|
+#undef TRANS_AM_DB
|
|
+#undef TRANS_AM_DB_W
|
|
+#undef TRANS_AM_DB_D
|
|
+
|
|
+static bool trans_dbar(DisasContext *ctx, arg_dbar *a)
|
|
+{
|
|
+ gen_sync(a->whint);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ibar(DisasContext *ctx, arg_ibar *a)
|
|
+{
|
|
+ /*
|
|
+ * FENCE_I is a no-op in QEMU,
|
|
+ * however we need to end the translation block
|
|
+ */
|
|
+ ctx->base.is_jmp = DISAS_STOP;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define ASRTGT \
|
|
+ do { \
|
|
+ TCGv t1 = tcg_temp_new(); \
|
|
+ TCGv t2 = tcg_temp_new(); \
|
|
+ gen_load_gpr(t1, a->rj); \
|
|
+ gen_load_gpr(t2, a->rk); \
|
|
+ gen_helper_asrtgt_d(cpu_env, t1, t2); \
|
|
+ tcg_temp_free(t1); \
|
|
+ tcg_temp_free(t2); \
|
|
+ } while (0)
|
|
+
|
|
+#define ASRTLE \
|
|
+ do { \
|
|
+ TCGv t1 = tcg_temp_new(); \
|
|
+ TCGv t2 = tcg_temp_new(); \
|
|
+ gen_load_gpr(t1, a->rj); \
|
|
+ gen_load_gpr(t2, a->rk); \
|
|
+ gen_helper_asrtle_d(cpu_env, t1, t2); \
|
|
+ tcg_temp_free(t1); \
|
|
+ tcg_temp_free(t2); \
|
|
+ } while (0)
|
|
+
|
|
+static bool trans_fldgt_s(DisasContext *ctx, arg_fldgt_s *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_S, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fldgt_d(DisasContext *ctx, arg_fldgt_d *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDGT_D, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fldle_s(DisasContext *ctx, arg_fldle_s *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_S, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fldle_d(DisasContext *ctx, arg_fldle_d *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FLDLE_D, a->fd, 0, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstgt_s(DisasContext *ctx, arg_fstgt_s *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_S, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstgt_d(DisasContext *ctx, arg_fstgt_d *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTGT_D, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstle_s(DisasContext *ctx, arg_fstle_s *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_S, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fstle_d(DisasContext *ctx, arg_fstle_d *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ gen_flt3_ldst(ctx, OPC_LARCH_FSTLE_D, 0, a->fd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define DECL_ARG(name) \
|
|
+ arg_##name arg = { \
|
|
+ .rd = a->rd, \
|
|
+ .rj = a->rj, \
|
|
+ .rk = a->rk, \
|
|
+ };
|
|
+
|
|
+static bool trans_ldgt_b(DisasContext *ctx, arg_ldgt_b *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(ldx_b)
|
|
+ trans_ldx_b(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldgt_h(DisasContext *ctx, arg_ldgt_h *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(ldx_h)
|
|
+ trans_ldx_h(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldgt_w(DisasContext *ctx, arg_ldgt_w *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(ldx_w)
|
|
+ trans_ldx_w(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldgt_d(DisasContext *ctx, arg_ldgt_d *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(ldx_d)
|
|
+ trans_ldx_d(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldle_b(DisasContext *ctx, arg_ldle_b *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(ldx_b)
|
|
+ trans_ldx_b(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldle_h(DisasContext *ctx, arg_ldle_h *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(ldx_h)
|
|
+ trans_ldx_h(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldle_w(DisasContext *ctx, arg_ldle_w *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(ldx_w)
|
|
+ trans_ldx_w(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ldle_d(DisasContext *ctx, arg_ldle_d *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(ldx_d)
|
|
+ trans_ldx_d(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stgt_b(DisasContext *ctx, arg_stgt_b *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(stx_b)
|
|
+ trans_stx_b(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stgt_h(DisasContext *ctx, arg_stgt_h *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(stx_h)
|
|
+ trans_stx_h(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stgt_w(DisasContext *ctx, arg_stgt_w *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(stx_w)
|
|
+ trans_stx_w(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stgt_d(DisasContext *ctx, arg_stgt_d *a)
|
|
+{
|
|
+ ASRTGT;
|
|
+ DECL_ARG(stx_d)
|
|
+ trans_stx_d(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stle_b(DisasContext *ctx, arg_stle_b *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(stx_b)
|
|
+ trans_stx_b(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stle_h(DisasContext *ctx, arg_stle_h *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(stx_h)
|
|
+ trans_stx_h(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stle_w(DisasContext *ctx, arg_stle_w *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(stx_w)
|
|
+ trans_stx_w(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_stle_d(DisasContext *ctx, arg_stle_d *a)
|
|
+{
|
|
+ ASRTLE;
|
|
+ DECL_ARG(stx_d)
|
|
+ trans_stx_d(ctx, &arg);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#undef ASRTGT
|
|
+#undef ASRTLE
|
|
+#undef DECL_ARG
|
|
+
|
|
+static bool trans_beqz(DisasContext *ctx, arg_beqz *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BEQZ, 4, a->rj, 0, a->offs21 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bnez(DisasContext *ctx, arg_bnez *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BNEZ, 4, a->rj, 0, a->offs21 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bceqz(DisasContext *ctx, arg_bceqz *a)
|
|
+{
|
|
+ TCGv_i32 cj = tcg_const_i32(a->cj);
|
|
+ TCGv v0 = tcg_temp_new();
|
|
+ TCGv v1 = tcg_const_i64(0);
|
|
+
|
|
+ gen_helper_movcf2reg(v0, cpu_env, cj);
|
|
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, v0, v1);
|
|
+ ctx->hflags |= LARCH_HFLAG_BC;
|
|
+ ctx->btarget = ctx->base.pc_next + (a->offs21 << 2);
|
|
+
|
|
+ tcg_temp_free_i32(cj);
|
|
+ tcg_temp_free(v0);
|
|
+ tcg_temp_free(v1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bcnez(DisasContext *ctx, arg_bcnez *a)
|
|
+{
|
|
+ TCGv_i32 cj = tcg_const_i32(a->cj);
|
|
+ TCGv v0 = tcg_temp_new();
|
|
+ TCGv v1 = tcg_const_i64(0);
|
|
+
|
|
+ gen_helper_movcf2reg(v0, cpu_env, cj);
|
|
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, v0, v1);
|
|
+ ctx->hflags |= LARCH_HFLAG_BC;
|
|
+ ctx->btarget = ctx->base.pc_next + (a->offs21 << 2);
|
|
+
|
|
+ tcg_temp_free_i32(cj);
|
|
+ tcg_temp_free(v0);
|
|
+ tcg_temp_free(v1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_b(DisasContext *ctx, arg_b *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_B, 4, 0, 0, a->offs << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bl(DisasContext *ctx, arg_bl *a)
|
|
+{
|
|
+ ctx->btarget = ctx->base.pc_next + (a->offs << 2);
|
|
+ tcg_gen_movi_tl(cpu_gpr[1], ctx->base.pc_next + 4);
|
|
+ ctx->hflags |= LARCH_HFLAG_B;
|
|
+ gen_branch(ctx, 4);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_blt(DisasContext *ctx, arg_blt *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BLT, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bge(DisasContext *ctx, arg_bge *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BGE, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bltu(DisasContext *ctx, arg_bltu *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BLTU, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bgeu(DisasContext *ctx, arg_bgeu *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BGEU, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_beq(DisasContext *ctx, arg_beq *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BEQ, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bne(DisasContext *ctx, arg_bne *a)
|
|
+{
|
|
+ gen_compute_branch(ctx, OPC_LARCH_BNE, 4, a->rj, a->rd, a->offs16 << 2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_jirl(DisasContext *ctx, arg_jirl *a)
|
|
+{
|
|
+ gen_base_offset_addr(ctx, btarget, a->rj, a->offs16 << 2);
|
|
+ if (a->rd != 0) {
|
|
+ tcg_gen_movi_tl(cpu_gpr[a->rd], ctx->base.pc_next + 4);
|
|
+ }
|
|
+ ctx->hflags |= LARCH_HFLAG_BR;
|
|
+ gen_branch(ctx, 4);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define TRANS_F4FR(name, fmt, op, bits) \
|
|
+ static bool trans_##name##_##fmt(DisasContext *ctx, \
|
|
+ arg_##name##_##fmt *a) \
|
|
+ { \
|
|
+ check_cp1_enabled(ctx); \
|
|
+ TCGv_i##bits fp0 = tcg_temp_new_i##bits(); \
|
|
+ TCGv_i##bits fp1 = tcg_temp_new_i##bits(); \
|
|
+ TCGv_i##bits fp2 = tcg_temp_new_i##bits(); \
|
|
+ TCGv_i##bits fp3 = tcg_temp_new_i##bits(); \
|
|
+ check_cp1_enabled(ctx); \
|
|
+ gen_load_fpr##bits(ctx, fp0, a->fj); \
|
|
+ gen_load_fpr##bits(ctx, fp1, a->fk); \
|
|
+ gen_load_fpr##bits(ctx, fp2, a->fa); \
|
|
+ gen_helper_float_##op##_##fmt(fp3, cpu_env, fp0, fp1, fp2); \
|
|
+ gen_store_fpr##bits(ctx, fp3, a->fd); \
|
|
+ tcg_temp_free_i##bits(fp3); \
|
|
+ tcg_temp_free_i##bits(fp2); \
|
|
+ tcg_temp_free_i##bits(fp1); \
|
|
+ tcg_temp_free_i##bits(fp0); \
|
|
+ return true; \
|
|
+ }
|
|
+
|
|
+TRANS_F4FR(fmadd, s, maddf, 32) /* trans_fmadd_s */
|
|
+TRANS_F4FR(fmadd, d, maddf, 64) /* trans_fmadd_d */
|
|
+TRANS_F4FR(fmsub, s, msubf, 32) /* trans_fmsub_s */
|
|
+TRANS_F4FR(fmsub, d, msubf, 64) /* trans_fmsub_d */
|
|
+TRANS_F4FR(fnmadd, s, nmaddf, 32) /* trans_fnmadd_s */
|
|
+TRANS_F4FR(fnmadd, d, nmaddf, 64) /* trans_fnmadd_d */
|
|
+TRANS_F4FR(fnmsub, s, nmsubf, 32) /* trans_fnmsub_s */
|
|
+TRANS_F4FR(fnmsub, d, nmsubf, 64) /* trans_fnmsub_d */
|
|
+#undef TRANS_F4FR
|
|
+
|
|
+static bool trans_fadd_s(DisasContext *ctx, arg_fadd_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FADD_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fadd_d(DisasContext *ctx, arg_fadd_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FADD_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fsub_s(DisasContext *ctx, arg_fsub_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FSUB_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fsub_d(DisasContext *ctx, arg_fsub_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FSUB_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmul_s(DisasContext *ctx, arg_fmul_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMUL_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmul_d(DisasContext *ctx, arg_fmul_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMUL_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fdiv_s(DisasContext *ctx, arg_fdiv_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FDIV_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fdiv_d(DisasContext *ctx, arg_fdiv_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FDIV_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmax_s(DisasContext *ctx, arg_fmax_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMAX_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmax_d(DisasContext *ctx, arg_fmax_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMAX_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmin_s(DisasContext *ctx, arg_fmin_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMIN_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmin_d(DisasContext *ctx, arg_fmin_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMIN_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmaxa_s(DisasContext *ctx, arg_fmaxa_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMAXA_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmaxa_d(DisasContext *ctx, arg_fmaxa_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMAXA_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmina_s(DisasContext *ctx, arg_fmina_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMINA_S, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmina_d(DisasContext *ctx, arg_fmina_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMINA_D, a->fk, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fscaleb_s(DisasContext *ctx, arg_fscaleb_s *a)
|
|
+{
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr32(ctx, fp0, a->fj);
|
|
+ gen_load_fpr32(ctx, fp1, a->fk);
|
|
+ gen_helper_float_exp2_s(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ gen_store_fpr32(ctx, fp0, a->fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fscaleb_d(DisasContext *ctx, arg_fscaleb_d *a)
|
|
+{
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr64(ctx, fp0, a->fj);
|
|
+ gen_load_fpr64(ctx, fp1, a->fk);
|
|
+ gen_helper_float_exp2_d(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ gen_store_fpr64(ctx, fp0, a->fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcopysign_s(DisasContext *ctx, arg_fcopysign_s *a)
|
|
+{
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp2 = tcg_temp_new_i32();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr32(ctx, fp0, a->fj);
|
|
+ gen_load_fpr32(ctx, fp1, a->fk);
|
|
+ tcg_gen_deposit_i32(fp2, fp1, fp0, 0, 31);
|
|
+ gen_store_fpr32(ctx, fp2, a->fd);
|
|
+
|
|
+ tcg_temp_free_i32(fp2);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcopysign_d(DisasContext *ctx, arg_fcopysign_d *a)
|
|
+{
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp2 = tcg_temp_new_i64();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr64(ctx, fp0, a->fj);
|
|
+ gen_load_fpr64(ctx, fp1, a->fk);
|
|
+ tcg_gen_deposit_i64(fp2, fp1, fp0, 0, 63);
|
|
+ gen_store_fpr64(ctx, fp2, a->fd);
|
|
+
|
|
+ tcg_temp_free_i64(fp2);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fabs_s(DisasContext *ctx, arg_fabs_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FABS_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fabs_d(DisasContext *ctx, arg_fabs_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FABS_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fneg_s(DisasContext *ctx, arg_fneg_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FNEG_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fneg_d(DisasContext *ctx, arg_fneg_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FNEG_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_flogb_s(DisasContext *ctx, arg_flogb_s *a)
|
|
+{
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr32(ctx, fp0, a->fj);
|
|
+ gen_helper_float_logb_s(fp1, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp1, a->fd);
|
|
+
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_flogb_d(DisasContext *ctx, arg_flogb_d *a)
|
|
+{
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr64(ctx, fp0, a->fj);
|
|
+ gen_helper_float_logb_d(fp1, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp1, a->fd);
|
|
+
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fclass_s(DisasContext *ctx, arg_fclass_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FCLASS_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fclass_d(DisasContext *ctx, arg_fclass_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FCLASS_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fsqrt_s(DisasContext *ctx, arg_fsqrt_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FSQRT_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fsqrt_d(DisasContext *ctx, arg_fsqrt_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FSQRT_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frecip_s(DisasContext *ctx, arg_frecip_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRECIP_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frecip_d(DisasContext *ctx, arg_frecip_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRECIP_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frsqrt_s(DisasContext *ctx, arg_frsqrt_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRSQRT_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frsqrt_d(DisasContext *ctx, arg_frsqrt_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRSQRT_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmov_s(DisasContext *ctx, arg_fmov_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMOV_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fmov_d(DisasContext *ctx, arg_fmov_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FMOV_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movgr2fr_w(DisasContext *ctx, arg_movgr2fr_w *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_GR2FR_W, a->rj, a->fd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movgr2fr_d(DisasContext *ctx, arg_movgr2fr_d *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_GR2FR_D, a->rj, a->fd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movgr2frh_w(DisasContext *ctx, arg_movgr2frh_w *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_GR2FRH_W, a->rj, a->fd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movfr2gr_s(DisasContext *ctx, arg_movfr2gr_s *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_FR2GR_S, a->rd, a->fj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movfr2gr_d(DisasContext *ctx, arg_movfr2gr_d *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_FR2GR_D, a->rd, a->fj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movfrh2gr_s(DisasContext *ctx, arg_movfrh2gr_s *a)
|
|
+{
|
|
+ gen_cp1(ctx, OPC_LARCH_FRH2GR_S, a->rd, a->fj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movgr2fcsr(DisasContext *ctx, arg_movgr2fcsr *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ save_cpu_state(ctx, 0);
|
|
+ {
|
|
+ TCGv_i32 fs_tmp = tcg_const_i32(a->fcsrd);
|
|
+ gen_helper_0e2i(movgr2fcsr, t0, fs_tmp, a->rj);
|
|
+ tcg_temp_free_i32(fs_tmp);
|
|
+ }
|
|
+ /* Stop translation as we may have changed hflags */
|
|
+ ctx->base.is_jmp = DISAS_STOP;
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movfcsr2gr(DisasContext *ctx, arg_movfcsr2gr *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ gen_helper_1e0i(movfcsr2gr, t0, a->fcsrs);
|
|
+ gen_store_gpr(t0, a->rd);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movfr2cf(DisasContext *ctx, arg_movfr2cf *a)
|
|
+{
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i32 cd = tcg_const_i32(a->cd);
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr64(ctx, fp0, a->fj);
|
|
+ gen_helper_movreg2cf(cpu_env, cd, fp0);
|
|
+
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ tcg_temp_free_i32(cd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movcf2fr(DisasContext *ctx, arg_movcf2fr *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv_i32 cj = tcg_const_i32(a->cj);
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_helper_movcf2reg(t0, cpu_env, cj);
|
|
+ gen_store_fpr64(ctx, t0, a->fd);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movgr2cf(DisasContext *ctx, arg_movgr2cf *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv_i32 cd = tcg_const_i32(a->cd);
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_movreg2cf(cpu_env, cd, t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free_i32(cd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_movcf2gr(DisasContext *ctx, arg_movcf2gr *a)
|
|
+{
|
|
+ TCGv_i32 cj = tcg_const_i32(a->cj);
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_helper_movcf2reg(cpu_gpr[a->rd], cpu_env, cj);
|
|
+
|
|
+ tcg_temp_free_i32(cj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcvt_s_d(DisasContext *ctx, arg_fcvt_s_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FCVT_S_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcvt_d_s(DisasContext *ctx, arg_fcvt_d_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FCVT_D_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrm_w_s(DisasContext *ctx, arg_ftintrm_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRM_W_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrm_w_d(DisasContext *ctx, arg_ftintrm_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRM_W_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrm_l_s(DisasContext *ctx, arg_ftintrm_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRM_L_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrm_l_d(DisasContext *ctx, arg_ftintrm_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRM_L_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrp_w_s(DisasContext *ctx, arg_ftintrp_w_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRP_W_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrp_w_d(DisasContext *ctx, arg_ftintrp_w_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRP_W_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrp_l_s(DisasContext *ctx, arg_ftintrp_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRP_L_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrp_l_d(DisasContext *ctx, arg_ftintrp_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRP_L_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrz_w_s(DisasContext *ctx, arg_ftintrz_w_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_W_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrz_w_d(DisasContext *ctx, arg_ftintrz_w_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_W_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrz_l_s(DisasContext *ctx, arg_ftintrz_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_L_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrz_l_d(DisasContext *ctx, arg_ftintrz_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRZ_L_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrne_w_s(DisasContext *ctx, arg_ftintrne_w_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_W_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrne_w_d(DisasContext *ctx, arg_ftintrne_w_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_W_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrne_l_s(DisasContext *ctx, arg_ftintrne_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_L_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftintrne_l_d(DisasContext *ctx, arg_ftintrne_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINTRNE_L_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftint_w_s(DisasContext *ctx, arg_ftint_w_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINT_W_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftint_w_d(DisasContext *ctx, arg_ftint_w_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINT_W_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftint_l_s(DisasContext *ctx, arg_ftint_l_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINT_L_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ftint_l_d(DisasContext *ctx, arg_ftint_l_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FTINT_L_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ffint_s_w(DisasContext *ctx, arg_ffint_s_w *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FFINT_S_W, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ffint_s_l(DisasContext *ctx, arg_ffint_s_l *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FFINT_S_L, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ffint_d_w(DisasContext *ctx, arg_ffint_d_w *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FFINT_D_W, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ffint_d_l(DisasContext *ctx, arg_ffint_d_l *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FFINT_D_L, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frint_s(DisasContext *ctx, arg_frint_s *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRINT_S, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_frint_d(DisasContext *ctx, arg_frint_d *a)
|
|
+{
|
|
+ gen_farith(ctx, OPC_LARCH_FRINT_D, 0, a->fj, a->fd, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_alsl_w(DisasContext *ctx, arg_alsl_w *a)
|
|
+{
|
|
+ gen_lsa(ctx, OPC_LARCH_ALSL_W, a->rd, a->rj, a->rk, a->sa2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_alsl_wu(DisasContext *ctx, arg_alsl_wu *a)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_load_gpr(t1, a->rk);
|
|
+ tcg_gen_shli_tl(t0, t0, a->sa2 + 1);
|
|
+ tcg_gen_add_tl(t0, t0, t1);
|
|
+ tcg_gen_ext32u_tl(cpu_gpr[a->rd], t0);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_alsl_d(DisasContext *ctx, arg_alsl_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_lsa(ctx, OPC_LARCH_ALSL_D, a->rd, a->rj, a->rk, a->sa2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bytepick_w(DisasContext *ctx, arg_bytepick_w *a)
|
|
+{
|
|
+ gen_align(ctx, 32, a->rd, a->rj, a->rk, a->sa2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bytepick_d(DisasContext *ctx, arg_bytepick_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_align(ctx, 64, a->rd, a->rj, a->rk, a->sa3);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_add_w(DisasContext *ctx, arg_add_w *a)
|
|
+{
|
|
+ gen_arith(ctx, OPC_LARCH_ADD_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sub_w(DisasContext *ctx, arg_sub_w *a)
|
|
+{
|
|
+ gen_arith(ctx, OPC_LARCH_SUB_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_add_d(DisasContext *ctx, arg_add_d *a)
|
|
+{
|
|
+ gen_arith(ctx, OPC_LARCH_ADD_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sub_d(DisasContext *ctx, arg_sub_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_arith(ctx, OPC_LARCH_SUB_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_slt(DisasContext *ctx, arg_slt *a)
|
|
+{
|
|
+ gen_slt(ctx, OPC_LARCH_SLT, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sltu(DisasContext *ctx, arg_sltu *a)
|
|
+{
|
|
+ gen_slt(ctx, OPC_LARCH_SLTU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_maskeqz(DisasContext *ctx, arg_maskeqz *a)
|
|
+{
|
|
+ gen_cond_move(ctx, OPC_LARCH_MASKEQZ, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_masknez(DisasContext *ctx, arg_masknez *a)
|
|
+{
|
|
+ gen_cond_move(ctx, OPC_LARCH_MASKNEZ, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_nor(DisasContext *ctx, arg_nor *a)
|
|
+{
|
|
+ gen_logic(ctx, OPC_LARCH_NOR, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_and(DisasContext *ctx, arg_and *a)
|
|
+{
|
|
+ gen_logic(ctx, OPC_LARCH_AND, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_or(DisasContext *ctx, arg_or *a)
|
|
+{
|
|
+ gen_logic(ctx, OPC_LARCH_OR, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_xor(DisasContext *ctx, arg_xor *a)
|
|
+{
|
|
+ gen_logic(ctx, OPC_LARCH_XOR, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_orn(DisasContext *ctx, arg_orn *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rk);
|
|
+ tcg_gen_not_tl(t0, t0);
|
|
+ tcg_gen_or_tl(cpu_gpr[a->rd], cpu_gpr[a->rj], t0);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_andn(DisasContext *ctx, arg_andn *a)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rk);
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+ tcg_gen_not_tl(t0, t0);
|
|
+ tcg_gen_and_tl(cpu_gpr[a->rd], t1, t0);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sll_w(DisasContext *ctx, arg_sll_w *a)
|
|
+{
|
|
+ gen_shift(ctx, OPC_LARCH_SLL_W, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srl_w(DisasContext *ctx, arg_srl_w *a)
|
|
+{
|
|
+ gen_shift(ctx, OPC_LARCH_SRL_W, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sra_w(DisasContext *ctx, arg_sra_w *a)
|
|
+{
|
|
+ gen_shift(ctx, OPC_LARCH_SRA_W, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sll_d(DisasContext *ctx, arg_sll_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_shift(ctx, OPC_LARCH_SLL_D, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srl_d(DisasContext *ctx, arg_srl_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_shift(ctx, OPC_LARCH_SRL_D, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_sra_d(DisasContext *ctx, arg_sra_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_shift(ctx, OPC_LARCH_SRA_D, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_rotr_w(DisasContext *ctx, arg_rotr_w *a)
|
|
+{
|
|
+ gen_shift(ctx, OPC_LARCH_ROTR_W, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_rotr_d(DisasContext *ctx, arg_rotr_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_shift(ctx, OPC_LARCH_ROTR_D, a->rd, a->rk, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crc_w_b_w(DisasContext *ctx, arg_crc_w_b_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crc_w_h_w(DisasContext *ctx, arg_crc_w_h_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crc_w_w_w(DisasContext *ctx, arg_crc_w_w_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crc_w_d_w(DisasContext *ctx, arg_crc_w_d_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crcc_w_b_w(DisasContext *ctx, arg_crcc_w_b_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 1, 1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crcc_w_h_w(DisasContext *ctx, arg_crcc_w_h_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 2, 1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crcc_w_w_w(DisasContext *ctx, arg_crcc_w_w_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 4, 1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_crcc_w_d_w(DisasContext *ctx, arg_crcc_w_d_w *a)
|
|
+{
|
|
+ gen_crc32(ctx, a->rd, a->rj, a->rk, 8, 1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mul_w(DisasContext *ctx, arg_mul_w *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MUL_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulh_w(DisasContext *ctx, arg_mulh_w *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulh_wu(DisasContext *ctx, arg_mulh_wu *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_WU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mul_d(DisasContext *ctx, arg_mul_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MUL_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulh_d(DisasContext *ctx, arg_mulh_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulh_du(DisasContext *ctx, arg_mulh_du *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MULH_DU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulw_d_w(DisasContext *ctx, arg_mulw_d_w *a)
|
|
+{
|
|
+ TCGv_i64 t0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 t1 = tcg_temp_new_i64();
|
|
+ TCGv_i64 t2 = tcg_temp_new_i64();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_load_gpr(t1, a->rk);
|
|
+ tcg_gen_ext32s_i64(t0, t0);
|
|
+ tcg_gen_ext32s_i64(t1, t1);
|
|
+ tcg_gen_mul_i64(t2, t0, t1);
|
|
+ gen_store_gpr(t2, a->rd);
|
|
+ tcg_temp_free_i64(t0);
|
|
+ tcg_temp_free_i64(t1);
|
|
+ tcg_temp_free_i64(t2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mulw_d_wu(DisasContext *ctx, arg_mulw_d_wu *a)
|
|
+{
|
|
+ TCGv_i64 t0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 t1 = tcg_temp_new_i64();
|
|
+ TCGv_i64 t2 = tcg_temp_new_i64();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_load_gpr(t1, a->rk);
|
|
+ tcg_gen_ext32u_i64(t0, t0);
|
|
+ tcg_gen_ext32u_i64(t1, t1);
|
|
+ tcg_gen_mul_i64(t2, t0, t1);
|
|
+ gen_store_gpr(t2, a->rd);
|
|
+ tcg_temp_free_i64(t0);
|
|
+ tcg_temp_free_i64(t1);
|
|
+ tcg_temp_free_i64(t2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_div_w(DisasContext *ctx, arg_div_w *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mod_w(DisasContext *ctx, arg_mod_w *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_W, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_div_wu(DisasContext *ctx, arg_div_wu *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_WU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mod_wu(DisasContext *ctx, arg_mod_wu *a)
|
|
+{
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_WU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_div_d(DisasContext *ctx, arg_div_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mod_d(DisasContext *ctx, arg_mod_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_D, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_div_du(DisasContext *ctx, arg_div_du *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_DIV_DU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_mod_du(DisasContext *ctx, arg_mod_du *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_r6_muldiv(ctx, OPC_LARCH_MOD_DU, a->rd, a->rj, a->rk);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* do not update CP0.BadVaddr */
|
|
+static bool trans_asrtle_d(DisasContext *ctx, arg_asrtle_d *a)
|
|
+{
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+ gen_load_gpr(t2, a->rk);
|
|
+ gen_helper_asrtle_d(cpu_env, t1, t2);
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_temp_free(t2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+/* do not update CP0.BadVaddr */
|
|
+static bool trans_asrtgt_d(DisasContext *ctx, arg_asrtgt_d *a)
|
|
+{
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ gen_load_gpr(t1, a->rj);
|
|
+ gen_load_gpr(t2, a->rk);
|
|
+ gen_helper_asrtgt_d(cpu_env, t1, t2);
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_temp_free(t2);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+#else
|
|
+static bool trans_gr2scr(DisasContext *ctx, arg_gr2scr *a)
|
|
+{
|
|
+ TCGv_i32 sd = tcg_const_i32(a->sd);
|
|
+ TCGv val = tcg_temp_new();
|
|
+ check_lbt_enabled(ctx);
|
|
+ gen_load_gpr(val, a->rj);
|
|
+ gen_helper_store_scr(cpu_env, sd, val);
|
|
+ tcg_temp_free_i32(sd);
|
|
+ tcg_temp_free(val);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_scr2gr(DisasContext *ctx, arg_scr2gr *a)
|
|
+{
|
|
+ if (a->rd == 0) {
|
|
+ /* Nop */
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ TCGv_i32 tsj = tcg_const_i32(a->sj);
|
|
+ check_lbt_enabled(ctx);
|
|
+ gen_helper_load_scr(cpu_gpr[a->rd], cpu_env, tsj);
|
|
+ tcg_temp_free_i32(tsj);
|
|
+ return true;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static bool trans_clo_w(DisasContext *ctx, arg_clo_w *a)
|
|
+{
|
|
+ gen_cl(ctx, OPC_LARCH_CLO_W, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_clz_w(DisasContext *ctx, arg_clz_w *a)
|
|
+{
|
|
+ gen_cl(ctx, OPC_LARCH_CLZ_W, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_cto_w(DisasContext *ctx, arg_cto_w *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_cto_w(cpu_gpr[a->rd], cpu_env, t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ctz_w(DisasContext *ctx, arg_ctz_w *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_ctz_w(cpu_gpr[a->rd], cpu_env, t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_clo_d(DisasContext *ctx, arg_clo_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_cl(ctx, OPC_LARCH_CLO_D, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_clz_d(DisasContext *ctx, arg_clz_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_cl(ctx, OPC_LARCH_CLZ_D, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_cto_d(DisasContext *ctx, arg_cto_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_cto_d(cpu_gpr[a->rd], cpu_env, t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ctz_d(DisasContext *ctx, arg_ctz_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_ctz_d(cpu_gpr[a->rd], cpu_env, t0);
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revb_2h(DisasContext *ctx, arg_revb_2h *a)
|
|
+{
|
|
+ gen_bshfl(ctx, OPC_LARCH_REVB_2H, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revb_4h(DisasContext *ctx, arg_revb_4h *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_bshfl(ctx, OPC_LARCH_REVB_4H, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revb_2w(DisasContext *ctx, arg_revb_2w *a)
|
|
+{
|
|
+ handle_rev32(ctx, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revb_d(DisasContext *ctx, arg_revb_d *a)
|
|
+{
|
|
+ handle_rev64(ctx, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revh_2w(DisasContext *ctx, arg_revh_2w *a)
|
|
+{
|
|
+ handle_rev16(ctx, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_revh_d(DisasContext *ctx, arg_revh_d *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_bshfl(ctx, OPC_LARCH_REVH_D, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bitrev_4b(DisasContext *ctx, arg_bitrev_4b *a)
|
|
+{
|
|
+ gen_bitswap(ctx, OPC_LARCH_BREV_4B, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bitrev_8b(DisasContext *ctx, arg_bitrev_8b *a)
|
|
+{
|
|
+ check_larch_64(ctx);
|
|
+ gen_bitswap(ctx, OPC_LARCH_BREV_8B, a->rd, a->rj);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bitrev_w(DisasContext *ctx, arg_bitrev_w *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_bitrev_w(cpu_gpr[a->rd], cpu_env, t0);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_bitrev_d(DisasContext *ctx, arg_bitrev_d *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_bitrev_d(cpu_gpr[a->rd], cpu_env, t0);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ext_w_h(DisasContext *ctx, arg_ext_w_h *a)
|
|
+{
|
|
+ gen_bshfl(ctx, OPC_LARCH_EXT_WH, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ext_w_b(DisasContext *ctx, arg_ext_w_b *a)
|
|
+{
|
|
+ gen_bshfl(ctx, OPC_LARCH_EXT_WB, a->rj, a->rd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srli_w(DisasContext *ctx, arg_srli_w *a)
|
|
+{
|
|
+ gen_shift_imm(ctx, OPC_LARCH_SRLI_W, a->rd, a->rj, a->ui5);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srai_w(DisasContext *ctx, arg_srai_w *a)
|
|
+{
|
|
+ gen_shift_imm(ctx, OPC_LARCH_SRAI_W, a->rd, a->rj, a->ui5);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_srai_d(DisasContext *ctx, arg_srai_d *a)
|
|
+{
|
|
+ TCGv t0;
|
|
+ check_larch_64(ctx);
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ tcg_gen_sari_tl(cpu_gpr[a->rd], t0, a->ui6);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_rotri_w(DisasContext *ctx, arg_rotri_w *a)
|
|
+{
|
|
+ gen_shift_imm(ctx, OPC_LARCH_ROTRI_W, a->rd, a->rj, a->ui5);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_rotri_d(DisasContext *ctx, arg_rotri_d *a)
|
|
+{
|
|
+ TCGv t0;
|
|
+ check_larch_64(ctx);
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ tcg_gen_rotri_tl(cpu_gpr[a->rd], t0, a->ui6);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcmp_cond_s(DisasContext *ctx, arg_fcmp_cond_s *a)
|
|
+{
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_fcmp_s(ctx, a->fcond, a->fk, a->fj, a->cd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fcmp_cond_d(DisasContext *ctx, arg_fcmp_cond_d *a)
|
|
+{
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_fcmp_d(ctx, a->fcond, a->fk, a->fj, a->cd);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_fsel(DisasContext *ctx, arg_fsel *a)
|
|
+{
|
|
+ TCGv_i64 fj = tcg_temp_new_i64();
|
|
+ TCGv_i64 fk = tcg_temp_new_i64();
|
|
+ TCGv_i64 fd = tcg_temp_new_i64();
|
|
+ TCGv_i32 ca = tcg_const_i32(a->ca);
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_load_fpr64(ctx, fj, a->fj);
|
|
+ gen_load_fpr64(ctx, fk, a->fk);
|
|
+ gen_helper_fsel(fd, cpu_env, fj, fk, ca);
|
|
+ gen_store_fpr64(ctx, fd, a->fd);
|
|
+ tcg_temp_free_i64(fj);
|
|
+ tcg_temp_free_i64(fk);
|
|
+ tcg_temp_free_i64(fd);
|
|
+ tcg_temp_free_i32(ca);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#include "cpu-csr.h"
|
|
+
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+
|
|
+static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+#define GEN_CSRRQ_CASE(name) \
|
|
+ do { \
|
|
+ case LOONGARCH_CSR_##name: \
|
|
+ gen_csr_rdq(ctx, cpu_gpr[rd], LOONGARCH_CSR_##name); \
|
|
+ } while (0)
|
|
+
|
|
+static bool trans_csrrd(DisasContext *ctx, unsigned rd, unsigned csr)
|
|
+{
|
|
+ switch (csr) {
|
|
+ GEN_CSRRQ_CASE(CRMD);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PRMD);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(EUEN);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(MISC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ECFG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ESTAT);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERA);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(BADV);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(BADI);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(EEPN);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBIDX);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBEHI);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBELO0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBELO1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBWIRED);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GTLBC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TRGP);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PGDL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PGDH);
|
|
+ break;
|
|
+ case LOONGARCH_CSR_PGD:
|
|
+ gen_helper_read_pgd(cpu_gpr[rd], cpu_env);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PWCTL0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PWCTL1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(STLBPGSIZE);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(RVACFG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(CPUID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PRCFG1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PRCFG2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PRCFG3);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS3);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS4);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS5);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS6);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS7);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(KS8);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TMID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TCFG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TVAL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(CNTC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TINTCLR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GSTAT);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GCFG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GINTC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GCNTC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(LLBCTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IMPCTL1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IMPCTL2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(GNMI);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRENT);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRBADV);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRERA);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRSAVE);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRELO0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRELO1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBREHI);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(TLBRPRMD);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRCTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRINFO);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRINFO1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRENT);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRERA);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(ERRSAVE);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(CTAG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DMWIN0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DMWIN1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DMWIN2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DMWIN3);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCTRL0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCNTR0);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCTRL1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCNTR1);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCTRL2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCNTR2);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCTRL3);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(PERFCNTR3);
|
|
+ break;
|
|
+ /* debug */
|
|
+ GEN_CSRRQ_CASE(MWPC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(MWPS);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB0MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB0CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB0ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB1MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB1CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB1ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB2MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB2CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB2ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB3MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB3CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DB3ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(FWPC);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(FWPS);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB0MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB0CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB0ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB1MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB1CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB1ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB2MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB2CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB2ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB3MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB3CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB3ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB4ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB4MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB4CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB4ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB5ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB5MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB5CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB5ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB6ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB6MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB6CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB6ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB7ADDR);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB7MASK);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB7CTL);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(IB7ASID);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DEBUG);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DERA);
|
|
+ break;
|
|
+ GEN_CSRRQ_CASE(DESAVE);
|
|
+ break;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+#undef GEN_CSRRQ_CASE
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define GEN_CSRWQ_CASE(name) \
|
|
+ do { \
|
|
+ case LOONGARCH_CSR_##name: \
|
|
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_##name); \
|
|
+ } while (0)
|
|
+
|
|
+static bool trans_csrwr(DisasContext *ctx, unsigned rd, unsigned csr)
|
|
+{
|
|
+
|
|
+ switch (csr) {
|
|
+ case LOONGARCH_CSR_CRMD:
|
|
+ save_cpu_state(ctx, 1);
|
|
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_CRMD);
|
|
+ gen_save_pc(ctx->base.pc_next + 4);
|
|
+ ctx->base.is_jmp = DISAS_EXIT;
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PRMD);
|
|
+ break;
|
|
+ case LOONGARCH_CSR_EUEN:
|
|
+ gen_csr_wrq(ctx, cpu_gpr[rd], LOONGARCH_CSR_EUEN);
|
|
+ /* Stop translation */
|
|
+ gen_save_pc(ctx->base.pc_next + 4);
|
|
+ ctx->base.is_jmp = DISAS_EXIT;
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(MISC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ECFG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ESTAT);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERA);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(BADV);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(BADI);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(EEPN);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBIDX);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBEHI);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBELO0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBELO1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBWIRED);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GTLBC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TRGP);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PGDL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PGDH);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PGD);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PWCTL0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PWCTL1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(STLBPGSIZE);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(RVACFG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(CPUID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PRCFG1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PRCFG2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PRCFG3);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS3);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS4);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS5);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS6);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS7);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(KS8);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TMID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TCFG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TVAL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(CNTC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TINTCLR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GSTAT);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GCFG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GINTC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GCNTC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(LLBCTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IMPCTL1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IMPCTL2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(GNMI);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRENT);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRBADV);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRERA);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRSAVE);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRELO0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRELO1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBREHI);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(TLBRPRMD);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRCTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRINFO);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRINFO1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRENT);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRERA);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(ERRSAVE);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(CTAG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DMWIN0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DMWIN1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DMWIN2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DMWIN3);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCTRL0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCNTR0);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCTRL1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCNTR1);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCTRL2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCNTR2);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCTRL3);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(PERFCNTR3);
|
|
+ break;
|
|
+ /* debug */
|
|
+ GEN_CSRWQ_CASE(MWPC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(MWPS);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB0MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB0CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB0ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB1MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB1CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB1ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB2MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB2CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB2ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB3MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB3CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DB3ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(FWPC);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(FWPS);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB0MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB0CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB0ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB1MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB1CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB1ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB2MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB2CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB2ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB3MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB3CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB3ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB4ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB4MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB4CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB4ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB5ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB5MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB5CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB5ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB6ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB6MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB6CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB6ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB7ADDR);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB7MASK);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB7CTL);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(IB7ASID);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DEBUG);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DERA);
|
|
+ break;
|
|
+ GEN_CSRWQ_CASE(DESAVE);
|
|
+ break;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+#undef GEN_CSRWQ_CASE
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#define GEN_CSRXQ_CASE(name) \
|
|
+ do { \
|
|
+ case LOONGARCH_CSR_##name: \
|
|
+ if (rd == 0) { \
|
|
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_##name); \
|
|
+ } else { \
|
|
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], \
|
|
+ LOONGARCH_CSR_##name); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+static bool trans_csrxchg(DisasContext *ctx, arg_csrxchg *a)
|
|
+{
|
|
+ unsigned rd, rj, csr;
|
|
+ TCGv zero = tcg_const_tl(0);
|
|
+ rd = a->rd;
|
|
+ rj = a->rj;
|
|
+ csr = a->csr;
|
|
+
|
|
+ if (rj == 0) {
|
|
+ return trans_csrrd(ctx, rd, csr);
|
|
+ } else if (rj == 1) {
|
|
+ return trans_csrwr(ctx, rd, csr);
|
|
+ }
|
|
+
|
|
+ switch (csr) {
|
|
+ case LOONGARCH_CSR_CRMD:
|
|
+ save_cpu_state(ctx, 1);
|
|
+ if (rd == 0) {
|
|
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_CRMD);
|
|
+ } else {
|
|
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_CRMD);
|
|
+ }
|
|
+ gen_save_pc(ctx->base.pc_next + 4);
|
|
+ ctx->base.is_jmp = DISAS_EXIT;
|
|
+ break;
|
|
+
|
|
+ GEN_CSRXQ_CASE(PRMD);
|
|
+ break;
|
|
+ case LOONGARCH_CSR_EUEN:
|
|
+ if (rd == 0) {
|
|
+ gen_csr_xchgq(ctx, zero, cpu_gpr[rj], LOONGARCH_CSR_EUEN);
|
|
+ } else {
|
|
+ gen_csr_xchgq(ctx, cpu_gpr[rd], cpu_gpr[rj], LOONGARCH_CSR_EUEN);
|
|
+ }
|
|
+ /* Stop translation */
|
|
+ gen_save_pc(ctx->base.pc_next + 4);
|
|
+ ctx->base.is_jmp = DISAS_EXIT;
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(MISC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ECFG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ESTAT);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERA);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(BADV);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(BADI);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(EEPN);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBIDX);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBEHI);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBELO0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBELO1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBWIRED);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GTLBC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TRGP);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PGDL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PGDH);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PGD);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PWCTL0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PWCTL1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(STLBPGSIZE);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(RVACFG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(CPUID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PRCFG1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PRCFG2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PRCFG3);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS3);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS4);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS5);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS6);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS7);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(KS8);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TMID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TCFG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TVAL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(CNTC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TINTCLR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GSTAT);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GCFG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GINTC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GCNTC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(LLBCTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IMPCTL1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IMPCTL2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(GNMI);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRENT);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRBADV);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRERA);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRSAVE);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRELO0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRELO1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBREHI);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(TLBRPRMD);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRCTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRINFO);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRINFO1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRENT);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRERA);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(ERRSAVE);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(CTAG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DMWIN0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DMWIN1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DMWIN2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DMWIN3);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCTRL0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCNTR0);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCTRL1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCNTR1);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCTRL2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCNTR2);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCTRL3);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(PERFCNTR3);
|
|
+ break;
|
|
+ /* debug */
|
|
+ GEN_CSRXQ_CASE(MWPC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(MWPS);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB0MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB0CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB0ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB1MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB1CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB1ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB2MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB2CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB2ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB3MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB3CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DB3ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(FWPC);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(FWPS);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB0ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB0MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB0CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB0ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB1ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB1MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB1CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB1ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB2ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB2MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB2CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB2ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB3ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB3MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB3CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB3ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB4ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB4MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB4CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB4ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB5ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB5MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB5CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB5ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB6ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB6MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB6CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB6ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB7ADDR);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB7MASK);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB7CTL);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(IB7ASID);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DEBUG);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DERA);
|
|
+ break;
|
|
+ GEN_CSRXQ_CASE(DESAVE);
|
|
+ break;
|
|
+ default:
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+#undef GEN_CSRXQ_CASE
|
|
+ tcg_temp_free(zero);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+static bool trans_cacop(DisasContext *ctx, arg_cacop *a)
|
|
+{
|
|
+ /* Treat as NOP. */
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+
|
|
+static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+#else
|
|
+
|
|
+static bool trans_ldpte(DisasContext *ctx, arg_ldpte *a)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+ TCGv_i32 t2;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->seq);
|
|
+ t2 = tcg_const_i32(ctx->mem_idx);
|
|
+ gen_helper_ldpte(cpu_env, t0, t1, t2);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_lddir(DisasContext *ctx, arg_lddir *a)
|
|
+{
|
|
+ TCGv t0, t1, t2;
|
|
+ TCGv_i32 t3;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ t2 = tcg_const_tl(a->level);
|
|
+ t3 = tcg_const_i32(ctx->mem_idx);
|
|
+ gen_helper_lddir(cpu_env, t0, t1, t2, t3);
|
|
+
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_b(DisasContext *ctx, arg_iocsrrd_b *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_h(DisasContext *ctx, arg_iocsrrd_h *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_w(DisasContext *ctx, arg_iocsrrd_w *a)
|
|
+{
|
|
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_W);
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrrd_d(DisasContext *ctx, arg_iocsrrd_d *a)
|
|
+{
|
|
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_LD_D);
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_b(DisasContext *ctx, arg_iocsrwr_b *a)
|
|
+{
|
|
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_B);
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_h(DisasContext *ctx, arg_iocsrwr_h *a)
|
|
+{
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_w(DisasContext *ctx, arg_iocsrwr_w *a)
|
|
+{
|
|
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_W);
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_iocsrwr_d(DisasContext *ctx, arg_iocsrwr_d *a)
|
|
+{
|
|
+ TCGv_i32 iocsr_op = tcg_const_i32(OPC_LARCH_ST_D);
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rj);
|
|
+ t1 = tcg_const_tl(a->rd);
|
|
+ gen_helper_iocsr(cpu_env, t0, t1, iocsr_op);
|
|
+ return true;
|
|
+}
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+
|
|
+#define GEN_FALSE_TRANS(name) \
|
|
+ static bool trans_##name(DisasContext *ctx, arg_##name *a) \
|
|
+ { \
|
|
+ return false; \
|
|
+ }
|
|
+
|
|
+GEN_FALSE_TRANS(tlbclr)
|
|
+GEN_FALSE_TRANS(invtlb)
|
|
+GEN_FALSE_TRANS(tlbflush)
|
|
+GEN_FALSE_TRANS(tlbsrch)
|
|
+GEN_FALSE_TRANS(tlbrd)
|
|
+GEN_FALSE_TRANS(tlbwr)
|
|
+GEN_FALSE_TRANS(tlbfill)
|
|
+GEN_FALSE_TRANS(ertn)
|
|
+
|
|
+#else
|
|
+
|
|
+static bool trans_tlbclr(DisasContext *ctx, arg_tlbclr *a)
|
|
+{
|
|
+ gen_helper_tlbclr(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_tlbflush(DisasContext *ctx, arg_tlbflush *a)
|
|
+{
|
|
+ gen_helper_tlbflush(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_invtlb(DisasContext *ctx, arg_invtlb *a)
|
|
+{
|
|
+ TCGv addr = tcg_temp_new();
|
|
+ TCGv info = tcg_temp_new();
|
|
+ TCGv op = tcg_const_tl(a->invop);
|
|
+
|
|
+ gen_load_gpr(addr, a->addr);
|
|
+ gen_load_gpr(info, a->info);
|
|
+ gen_helper_invtlb(cpu_env, addr, info, op);
|
|
+
|
|
+ tcg_temp_free(addr);
|
|
+ tcg_temp_free(info);
|
|
+ tcg_temp_free(op);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_tlbsrch(DisasContext *ctx, arg_tlbsrch *a)
|
|
+{
|
|
+ gen_helper_tlbsrch(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_tlbrd(DisasContext *ctx, arg_tlbrd *a)
|
|
+{
|
|
+ gen_helper_tlbrd(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_tlbwr(DisasContext *ctx, arg_tlbwr *a)
|
|
+{
|
|
+ gen_helper_tlbwr(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_tlbfill(DisasContext *ctx, arg_tlbfill *a)
|
|
+{
|
|
+ gen_helper_tlbfill(cpu_env);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool trans_ertn(DisasContext *ctx, arg_ertn *a)
|
|
+{
|
|
+ gen_helper_ertn(cpu_env);
|
|
+ ctx->base.is_jmp = DISAS_EXIT;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_USER_ONLY */
|
|
+
|
|
+static bool trans_idle(DisasContext *ctx, arg_idle *a)
|
|
+{
|
|
+ ctx->base.pc_next += 4;
|
|
+ save_cpu_state(ctx, 1);
|
|
+ ctx->base.pc_next -= 4;
|
|
+ gen_helper_idle(cpu_env);
|
|
+ ctx->base.is_jmp = DISAS_NORETURN;
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+
|
|
+static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a)
|
|
+{
|
|
+ /* Nop */
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#else
|
|
+
|
|
+static bool trans_rdtime_d(DisasContext *ctx, arg_rdtime_d *a)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+ t0 = tcg_const_tl(a->rd);
|
|
+ t1 = tcg_const_tl(a->rj);
|
|
+ gen_helper_drdtime(cpu_env, t0, t1);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+static bool trans_cpucfg(DisasContext *ctx, arg_cpucfg *a)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, a->rj);
|
|
+ gen_helper_cpucfg(cpu_gpr[a->rd], cpu_env, t0);
|
|
+ tcg_temp_free(t0);
|
|
+ return true;
|
|
+}
|
|
diff --git a/target/loongarch64/translate.c b/target/loongarch64/translate.c
|
|
new file mode 100644
|
|
index 0000000000..2c65e4826a
|
|
--- /dev/null
|
|
+++ b/target/loongarch64/translate.c
|
|
@@ -0,0 +1,2705 @@
|
|
+/*
|
|
+ * LOONGARCH emulation for QEMU - main translation routines
|
|
+ *
|
|
+ * Copyright (c) 2023 Loongarch Technology
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify it
|
|
+ * under the terms and conditions of the GNU General Public License,
|
|
+ * version 2 or later, as published by the Free Software Foundation.
|
|
+ *
|
|
+ * This program is distributed in the hope it will be useful, but WITHOUT
|
|
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
|
|
+ * more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU General Public License along with
|
|
+ * this program. If not, see <http://www.gnu.org/licenses/>.
|
|
+ *
|
|
+ */
|
|
+
|
|
+#include "qemu/osdep.h"
|
|
+#include "cpu.h"
|
|
+#include "internal.h"
|
|
+#include "disas/disas.h"
|
|
+#include "exec/exec-all.h"
|
|
+#include "tcg/tcg-op.h"
|
|
+#include "exec/cpu_ldst.h"
|
|
+#include "hw/loongarch/cpudevs.h"
|
|
+
|
|
+#include "exec/helper-proto.h"
|
|
+#include "exec/helper-gen.h"
|
|
+#include "semihosting/semihost.h"
|
|
+
|
|
+#include "trace-tcg.h"
|
|
+#include "exec/translator.h"
|
|
+#include "exec/log.h"
|
|
+
|
|
+#include "instmap.h"
|
|
+
|
|
+#define LARCH_DEBUG_DISAS 0
|
|
+
|
|
+/* Values for the fmt field in FP instructions */
|
|
+enum {
|
|
+ /* 0 - 15 are reserved */
|
|
+ FMT_S = 16, /* single fp */
|
|
+ FMT_D = 17, /* double fp */
|
|
+};
|
|
+
|
|
+/* global register indices */
|
|
+static TCGv cpu_gpr[32], cpu_PC;
|
|
+static TCGv btarget, bcond;
|
|
+static TCGv cpu_lladdr, cpu_llval;
|
|
+static TCGv_i32 hflags;
|
|
+static TCGv_i32 fpu_fcsr0;
|
|
+static TCGv_i64 fpu_f64[32];
|
|
+
|
|
+#include "exec/gen-icount.h"
|
|
+
|
|
+#define gen_helper_0e0i(name, arg) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg); \
|
|
+ gen_helper_##name(cpu_env, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_0e1i(name, arg1, arg2) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
|
|
+ gen_helper_##name(cpu_env, arg1, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_1e0i(name, ret, arg1) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg1); \
|
|
+ gen_helper_##name(ret, cpu_env, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_1e1i(name, ret, arg1, arg2) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg2); \
|
|
+ gen_helper_##name(ret, cpu_env, arg1, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_0e2i(name, arg1, arg2, arg3) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
|
|
+ gen_helper_##name(cpu_env, arg1, arg2, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_1e2i(name, ret, arg1, arg2, arg3) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg3); \
|
|
+ gen_helper_##name(ret, cpu_env, arg1, arg2, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+#define gen_helper_0e3i(name, arg1, arg2, arg3, arg4) \
|
|
+ do { \
|
|
+ TCGv_i32 helper_tmp = tcg_const_i32(arg4); \
|
|
+ gen_helper_##name(cpu_env, arg1, arg2, arg3, helper_tmp); \
|
|
+ tcg_temp_free_i32(helper_tmp); \
|
|
+ } while (0)
|
|
+
|
|
+typedef struct DisasContext {
|
|
+ DisasContextBase base;
|
|
+ target_ulong saved_pc;
|
|
+ target_ulong page_start;
|
|
+ uint32_t opcode;
|
|
+ uint64_t insn_flags;
|
|
+ /* Routine used to access memory */
|
|
+ int mem_idx;
|
|
+ MemOp default_tcg_memop_mask;
|
|
+ uint32_t hflags, saved_hflags;
|
|
+ target_ulong btarget;
|
|
+} DisasContext;
|
|
+
|
|
+#define DISAS_STOP DISAS_TARGET_0
|
|
+#define DISAS_EXIT DISAS_TARGET_1
|
|
+
|
|
+#define LOG_DISAS(...) \
|
|
+ do { \
|
|
+ if (LARCH_DEBUG_DISAS) { \
|
|
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, ##__VA_ARGS__); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+#define LARCH_INVAL(op) \
|
|
+ do { \
|
|
+ if (LARCH_DEBUG_DISAS) { \
|
|
+ qemu_log_mask(CPU_LOG_TB_IN_ASM, \
|
|
+ TARGET_FMT_lx ": %08x Invalid %s %03x %03x %03x\n", \
|
|
+ ctx->base.pc_next, ctx->opcode, op, \
|
|
+ ctx->opcode >> 26, ctx->opcode & 0x3F, \
|
|
+ ((ctx->opcode >> 16) & 0x1F)); \
|
|
+ } \
|
|
+ } while (0)
|
|
+
|
|
+/* General purpose registers moves. */
|
|
+static inline void gen_load_gpr(TCGv t, int reg)
|
|
+{
|
|
+ if (reg == 0) {
|
|
+ tcg_gen_movi_tl(t, 0);
|
|
+ } else {
|
|
+ tcg_gen_mov_tl(t, cpu_gpr[reg]);
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void gen_store_gpr(TCGv t, int reg)
|
|
+{
|
|
+ if (reg != 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[reg], t);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Moves to/from shadow registers. */
|
|
+/* Tests */
|
|
+static inline void gen_save_pc(target_ulong pc)
|
|
+{
|
|
+ tcg_gen_movi_tl(cpu_PC, pc);
|
|
+}
|
|
+
|
|
+static inline void save_cpu_state(DisasContext *ctx, int do_save_pc)
|
|
+{
|
|
+ LOG_DISAS("hflags %08x saved %08x\n", ctx->hflags, ctx->saved_hflags);
|
|
+ if (do_save_pc && ctx->base.pc_next != ctx->saved_pc) {
|
|
+ gen_save_pc(ctx->base.pc_next);
|
|
+ ctx->saved_pc = ctx->base.pc_next;
|
|
+ }
|
|
+ if (ctx->hflags != ctx->saved_hflags) {
|
|
+ tcg_gen_movi_i32(hflags, ctx->hflags);
|
|
+ ctx->saved_hflags = ctx->hflags;
|
|
+ switch (ctx->hflags & LARCH_HFLAG_BMASK) {
|
|
+ case LARCH_HFLAG_BR:
|
|
+ break;
|
|
+ case LARCH_HFLAG_BC:
|
|
+ case LARCH_HFLAG_B:
|
|
+ tcg_gen_movi_tl(btarget, ctx->btarget);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void restore_cpu_state(CPULOONGARCHState *env, DisasContext *ctx)
|
|
+{
|
|
+ ctx->saved_hflags = ctx->hflags;
|
|
+ switch (ctx->hflags & LARCH_HFLAG_BMASK) {
|
|
+ case LARCH_HFLAG_BR:
|
|
+ break;
|
|
+ case LARCH_HFLAG_BC:
|
|
+ case LARCH_HFLAG_B:
|
|
+ ctx->btarget = env->btarget;
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void generate_exception_err(DisasContext *ctx, int excp, int err)
|
|
+{
|
|
+ TCGv_i32 texcp = tcg_const_i32(excp);
|
|
+ TCGv_i32 terr = tcg_const_i32(err);
|
|
+ save_cpu_state(ctx, 1);
|
|
+ gen_helper_raise_exception_err(cpu_env, texcp, terr);
|
|
+ tcg_temp_free_i32(terr);
|
|
+ tcg_temp_free_i32(texcp);
|
|
+ ctx->base.is_jmp = DISAS_NORETURN;
|
|
+}
|
|
+
|
|
+static inline void generate_exception_end(DisasContext *ctx, int excp)
|
|
+{
|
|
+ generate_exception_err(ctx, excp, 0);
|
|
+}
|
|
+
|
|
+/* Floating point register moves. */
|
|
+static void gen_load_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
|
|
+{
|
|
+ tcg_gen_extrl_i64_i32(t, fpu_f64[reg]);
|
|
+}
|
|
+
|
|
+static void gen_store_fpr32(DisasContext *ctx, TCGv_i32 t, int reg)
|
|
+{
|
|
+ TCGv_i64 t64;
|
|
+ t64 = tcg_temp_new_i64();
|
|
+ tcg_gen_extu_i32_i64(t64, t);
|
|
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 0, 32);
|
|
+ tcg_temp_free_i64(t64);
|
|
+}
|
|
+
|
|
+static void gen_load_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
|
|
+{
|
|
+ tcg_gen_extrh_i64_i32(t, fpu_f64[reg]);
|
|
+}
|
|
+
|
|
+static void gen_store_fpr32h(DisasContext *ctx, TCGv_i32 t, int reg)
|
|
+{
|
|
+ TCGv_i64 t64 = tcg_temp_new_i64();
|
|
+ tcg_gen_extu_i32_i64(t64, t);
|
|
+ tcg_gen_deposit_i64(fpu_f64[reg], fpu_f64[reg], t64, 32, 32);
|
|
+ tcg_temp_free_i64(t64);
|
|
+}
|
|
+
|
|
+static void gen_load_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
|
|
+{
|
|
+ tcg_gen_mov_i64(t, fpu_f64[reg]);
|
|
+}
|
|
+
|
|
+static void gen_store_fpr64(DisasContext *ctx, TCGv_i64 t, int reg)
|
|
+{
|
|
+ tcg_gen_mov_i64(fpu_f64[reg], t);
|
|
+}
|
|
+
|
|
+static inline int get_fp_bit(int cc)
|
|
+{
|
|
+ if (cc) {
|
|
+ return 24 + cc;
|
|
+ } else {
|
|
+ return 23;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Addresses computation */
|
|
+static inline void gen_op_addr_add(DisasContext *ctx, TCGv ret, TCGv arg0,
|
|
+ TCGv arg1)
|
|
+{
|
|
+ tcg_gen_add_tl(ret, arg0, arg1);
|
|
+
|
|
+ if (ctx->hflags & LARCH_HFLAG_AWRAP) {
|
|
+ tcg_gen_ext32s_i64(ret, ret);
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void gen_op_addr_addi(DisasContext *ctx, TCGv ret, TCGv base,
|
|
+ target_long ofs)
|
|
+{
|
|
+ tcg_gen_addi_tl(ret, base, ofs);
|
|
+
|
|
+ if (ctx->hflags & LARCH_HFLAG_AWRAP) {
|
|
+ tcg_gen_ext32s_i64(ret, ret);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Sign-extract the low 32-bits to a target_long. */
|
|
+static inline void gen_move_low32(TCGv ret, TCGv_i64 arg)
|
|
+{
|
|
+ tcg_gen_ext32s_i64(ret, arg);
|
|
+}
|
|
+
|
|
+/* Sign-extract the high 32-bits to a target_long. */
|
|
+static inline void gen_move_high32(TCGv ret, TCGv_i64 arg)
|
|
+{
|
|
+ tcg_gen_sari_i64(ret, arg, 32);
|
|
+}
|
|
+
|
|
+static inline void check_cp1_enabled(DisasContext *ctx)
|
|
+{
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_FPU))) {
|
|
+ generate_exception_err(ctx, EXCP_FPDIS, 1);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void check_lsx_enabled(DisasContext *ctx)
|
|
+{
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LSX))) {
|
|
+ generate_exception_err(ctx, EXCP_LSXDIS, 1);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void check_lasx_enabled(DisasContext *ctx)
|
|
+{
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LASX))) {
|
|
+ generate_exception_err(ctx, EXCP_LASXDIS, 1);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void check_lbt_enabled(DisasContext *ctx)
|
|
+{
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_LBT))) {
|
|
+ generate_exception_err(ctx, EXCP_BTDIS, 1);
|
|
+ }
|
|
+#endif
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This code generates a "reserved instruction" exception if the
|
|
+ * CPU does not support the instruction set corresponding to flags.
|
|
+ */
|
|
+static inline void check_insn(DisasContext *ctx, uint64_t flags)
|
|
+{
|
|
+ if (unlikely(!(ctx->insn_flags & flags))) {
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This code generates a "reserved instruction" exception if the
|
|
+ * CPU has corresponding flag set which indicates that the instruction
|
|
+ * has been removed.
|
|
+ */
|
|
+static inline void check_insn_opc_removed(DisasContext *ctx, uint64_t flags)
|
|
+{
|
|
+ if (unlikely(ctx->insn_flags & flags)) {
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * The Linux kernel traps certain reserved instruction exceptions to
|
|
+ * emulate the corresponding instructions. QEMU is the kernel in user
|
|
+ * mode, so those traps are emulated by accepting the instructions.
|
|
+ *
|
|
+ * A reserved instruction exception is generated for flagged CPUs if
|
|
+ * QEMU runs in system mode.
|
|
+ */
|
|
+static inline void check_insn_opc_user_only(DisasContext *ctx, uint64_t flags)
|
|
+{
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ check_insn_opc_removed(ctx, flags);
|
|
+#endif
|
|
+}
|
|
+
|
|
+/*
|
|
+ * This code generates a "reserved instruction" exception if 64-bit
|
|
+ * instructions are not enabled.
|
|
+ */
|
|
+static inline void check_larch_64(DisasContext *ctx)
|
|
+{
|
|
+ if (unlikely(!(ctx->hflags & LARCH_HFLAG_64))) {
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+ * Define small wrappers for gen_load_fpr* so that we have a uniform
|
|
+ * calling interface for 32 and 64-bit FPRs. No sense in changing
|
|
+ * all callers for gen_load_fpr32 when we need the CTX parameter for
|
|
+ * this one use.
|
|
+ */
|
|
+#define gen_ldcmp_fpr32(ctx, x, y) gen_load_fpr32(ctx, x, y)
|
|
+#define gen_ldcmp_fpr64(ctx, x, y) gen_load_fpr64(ctx, x, y)
|
|
+#define FCOP_CONDNS(fmt, ifmt, bits, STORE) \
|
|
+ static inline void gen_fcmp_##fmt(DisasContext *ctx, int n, int ft, \
|
|
+ int fs, int cd) \
|
|
+ { \
|
|
+ TCGv_i##bits fp0 = tcg_temp_new_i##bits(); \
|
|
+ TCGv_i##bits fp1 = tcg_temp_new_i##bits(); \
|
|
+ TCGv_i32 fcc = tcg_const_i32(cd); \
|
|
+ check_cp1_enabled(ctx); \
|
|
+ gen_ldcmp_fpr##bits(ctx, fp0, fs); \
|
|
+ gen_ldcmp_fpr##bits(ctx, fp1, ft); \
|
|
+ switch (n) { \
|
|
+ case 0: \
|
|
+ gen_helper_cmp_##fmt##_af(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 1: \
|
|
+ gen_helper_cmp_##fmt##_saf(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 2: \
|
|
+ gen_helper_cmp_##fmt##_lt(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 3: \
|
|
+ gen_helper_cmp_##fmt##_slt(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 4: \
|
|
+ gen_helper_cmp_##fmt##_eq(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 5: \
|
|
+ gen_helper_cmp_##fmt##_seq(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 6: \
|
|
+ gen_helper_cmp_##fmt##_le(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 7: \
|
|
+ gen_helper_cmp_##fmt##_sle(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 8: \
|
|
+ gen_helper_cmp_##fmt##_un(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 9: \
|
|
+ gen_helper_cmp_##fmt##_sun(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 10: \
|
|
+ gen_helper_cmp_##fmt##_ult(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 11: \
|
|
+ gen_helper_cmp_##fmt##_sult(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 12: \
|
|
+ gen_helper_cmp_##fmt##_ueq(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 13: \
|
|
+ gen_helper_cmp_##fmt##_sueq(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 14: \
|
|
+ gen_helper_cmp_##fmt##_ule(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 15: \
|
|
+ gen_helper_cmp_##fmt##_sule(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 16: \
|
|
+ gen_helper_cmp_##fmt##_ne(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 17: \
|
|
+ gen_helper_cmp_##fmt##_sne(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 20: \
|
|
+ gen_helper_cmp_##fmt##_or(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 21: \
|
|
+ gen_helper_cmp_##fmt##_sor(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 24: \
|
|
+ gen_helper_cmp_##fmt##_une(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ case 25: \
|
|
+ gen_helper_cmp_##fmt##_sune(fp0, cpu_env, fp0, fp1); \
|
|
+ break; \
|
|
+ default: \
|
|
+ abort(); \
|
|
+ } \
|
|
+ STORE; \
|
|
+ tcg_temp_free_i##bits(fp0); \
|
|
+ tcg_temp_free_i##bits(fp1); \
|
|
+ tcg_temp_free_i32(fcc); \
|
|
+ }
|
|
+
|
|
+FCOP_CONDNS(d, FMT_D, 64, gen_helper_movreg2cf_i64(cpu_env, fcc, fp0))
|
|
+FCOP_CONDNS(s, FMT_S, 32, gen_helper_movreg2cf_i32(cpu_env, fcc, fp0))
|
|
+#undef FCOP_CONDNS
|
|
+#undef gen_ldcmp_fpr32
|
|
+#undef gen_ldcmp_fpr64
|
|
+
|
|
+/* load/store instructions. */
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+#define OP_LD_ATOMIC(insn, fname) \
|
|
+ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
|
|
+ DisasContext *ctx) \
|
|
+ { \
|
|
+ TCGv t0 = tcg_temp_new(); \
|
|
+ tcg_gen_mov_tl(t0, arg1); \
|
|
+ tcg_gen_qemu_##fname(ret, arg1, ctx->mem_idx); \
|
|
+ tcg_gen_st_tl(t0, cpu_env, offsetof(CPULOONGARCHState, lladdr)); \
|
|
+ tcg_gen_st_tl(ret, cpu_env, offsetof(CPULOONGARCHState, llval)); \
|
|
+ tcg_temp_free(t0); \
|
|
+ }
|
|
+#else
|
|
+#define OP_LD_ATOMIC(insn, fname) \
|
|
+ static inline void op_ld_##insn(TCGv ret, TCGv arg1, int mem_idx, \
|
|
+ DisasContext *ctx) \
|
|
+ { \
|
|
+ gen_helper_1e1i(insn, ret, arg1, mem_idx); \
|
|
+ }
|
|
+#endif
|
|
+
|
|
+static void gen_base_offset_addr(DisasContext *ctx, TCGv addr, int base,
|
|
+ int offset)
|
|
+{
|
|
+ if (base == 0) {
|
|
+ tcg_gen_movi_tl(addr, offset);
|
|
+ } else if (offset == 0) {
|
|
+ gen_load_gpr(addr, base);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(addr, offset);
|
|
+ gen_op_addr_add(ctx, addr, cpu_gpr[base], addr);
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Load */
|
|
+static void gen_ld(DisasContext *ctx, uint32_t opc, int rt, int base,
|
|
+ int offset)
|
|
+{
|
|
+ TCGv t0;
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_base_offset_addr(ctx, t0, base, offset);
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_LD_WU:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx,
|
|
+ MO_TEUL | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LDPTR_D:
|
|
+ case OPC_LARCH_LD_D:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx,
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LL_D:
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LDPTR_W:
|
|
+ case OPC_LARCH_LD_W:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx,
|
|
+ MO_TESL | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LD_H:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx,
|
|
+ MO_TESW | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LD_HU:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx,
|
|
+ MO_TEUW | ctx->default_tcg_memop_mask);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LD_B:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_SB);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LD_BU:
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, mem_idx, MO_UB);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_LL_W:
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Store */
|
|
+static void gen_st(DisasContext *ctx, uint32_t opc, int rt, int base,
|
|
+ int offset)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ int mem_idx = ctx->mem_idx;
|
|
+
|
|
+ gen_base_offset_addr(ctx, t0, base, offset);
|
|
+ gen_load_gpr(t1, rt);
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_STPTR_D:
|
|
+ case OPC_LARCH_ST_D:
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx,
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ break;
|
|
+ case OPC_LARCH_STPTR_W:
|
|
+ case OPC_LARCH_ST_W:
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx,
|
|
+ MO_TEUL | ctx->default_tcg_memop_mask);
|
|
+ break;
|
|
+ case OPC_LARCH_ST_H:
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx,
|
|
+ MO_TEUW | ctx->default_tcg_memop_mask);
|
|
+ break;
|
|
+ case OPC_LARCH_ST_B:
|
|
+ tcg_gen_qemu_st_tl(t1, t0, mem_idx, MO_8);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+/* Store conditional */
|
|
+static void gen_st_cond(DisasContext *ctx, int rt, int base, int offset,
|
|
+ MemOp tcg_mo, bool eva)
|
|
+{
|
|
+ TCGv addr, t0, val;
|
|
+ TCGLabel *l1 = gen_new_label();
|
|
+ TCGLabel *done = gen_new_label();
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ addr = tcg_temp_new();
|
|
+ /* compare the address against that of the preceeding LL */
|
|
+ gen_base_offset_addr(ctx, addr, base, offset);
|
|
+ tcg_gen_brcond_tl(TCG_COND_EQ, addr, cpu_lladdr, l1);
|
|
+ tcg_temp_free(addr);
|
|
+ tcg_gen_movi_tl(t0, 0);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ tcg_gen_br(done);
|
|
+
|
|
+ gen_set_label(l1);
|
|
+ /* generate cmpxchg */
|
|
+ val = tcg_temp_new();
|
|
+ gen_load_gpr(val, rt);
|
|
+ tcg_gen_atomic_cmpxchg_tl(t0, cpu_lladdr, cpu_llval, val,
|
|
+ eva ? LARCH_HFLAG_UM : ctx->mem_idx, tcg_mo);
|
|
+ tcg_gen_setcond_tl(TCG_COND_EQ, t0, t0, cpu_llval);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ tcg_temp_free(val);
|
|
+
|
|
+ gen_set_label(done);
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Load and store */
|
|
+static void gen_flt_ldst(DisasContext *ctx, uint32_t opc, int ft, TCGv t0)
|
|
+{
|
|
+ /*
|
|
+ * Don't do NOP if destination is zero: we must perform the actual
|
|
+ * memory access.
|
|
+ */
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_FLD_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ tcg_gen_qemu_ld_i32(fp0, t0, ctx->mem_idx,
|
|
+ MO_TESL | ctx->default_tcg_memop_mask);
|
|
+ gen_store_fpr32(ctx, fp0, ft);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FST_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, ft);
|
|
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx,
|
|
+ MO_TEUL | ctx->default_tcg_memop_mask);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FLD_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx,
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ gen_store_fpr64(ctx, fp0, ft);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FST_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, ft);
|
|
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx,
|
|
+ MO_TEQ | ctx->default_tcg_memop_mask);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ default:
|
|
+ LARCH_INVAL("flt_ldst");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static void gen_fp_ldst(DisasContext *ctx, uint32_t op, int rt, int rs,
|
|
+ int16_t imm)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ gen_base_offset_addr(ctx, t0, rs, imm);
|
|
+ gen_flt_ldst(ctx, op, rt, t0);
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Arithmetic with immediate operand */
|
|
+static void gen_arith_imm(DisasContext *ctx, uint32_t opc, int rt, int rs,
|
|
+ int imm)
|
|
+{
|
|
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
|
|
+
|
|
+ if (rt == 0) {
|
|
+ /*
|
|
+ * If no destination, treat it as a NOP.
|
|
+ * For addi, we must generate the overflow exception when needed.
|
|
+ */
|
|
+ return;
|
|
+ }
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_ADDI_W:
|
|
+ if (rs != 0) {
|
|
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rt], cpu_gpr[rt]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_ADDI_D:
|
|
+ if (rs != 0) {
|
|
+ tcg_gen_addi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Logic with immediate operand */
|
|
+static void gen_logic_imm(DisasContext *ctx, uint32_t opc, int rt, int rs,
|
|
+ int16_t imm)
|
|
+{
|
|
+ target_ulong uimm;
|
|
+
|
|
+ if (rt == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+ uimm = (uint16_t)imm;
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_ANDI:
|
|
+ if (likely(rs != 0)) {
|
|
+ tcg_gen_andi_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rt], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_ORI:
|
|
+ if (rs != 0) {
|
|
+ tcg_gen_ori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_XORI:
|
|
+ if (likely(rs != 0)) {
|
|
+ tcg_gen_xori_tl(cpu_gpr[rt], cpu_gpr[rs], uimm);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rt], uimm);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Set on less than with immediate operand */
|
|
+static void gen_slt_imm(DisasContext *ctx, uint32_t opc, int rt, int rs,
|
|
+ int16_t imm)
|
|
+{
|
|
+ target_ulong uimm = (target_long)imm; /* Sign extend to 32/64 bits */
|
|
+ TCGv t0;
|
|
+
|
|
+ if (rt == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rs);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_SLTI:
|
|
+ tcg_gen_setcondi_tl(TCG_COND_LT, cpu_gpr[rt], t0, uimm);
|
|
+ break;
|
|
+ case OPC_LARCH_SLTIU:
|
|
+ tcg_gen_setcondi_tl(TCG_COND_LTU, cpu_gpr[rt], t0, uimm);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Shifts with immediate operand */
|
|
+static void gen_shift_imm(DisasContext *ctx, uint32_t opc, int rt, int rs,
|
|
+ int16_t imm)
|
|
+{
|
|
+ target_ulong uimm = ((uint16_t)imm) & 0x1f;
|
|
+ TCGv t0;
|
|
+
|
|
+ if (rt == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rs);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_SRAI_W:
|
|
+ tcg_gen_sari_tl(cpu_gpr[rt], t0, uimm);
|
|
+ break;
|
|
+ case OPC_LARCH_SRLI_W:
|
|
+ if (uimm != 0) {
|
|
+ tcg_gen_ext32u_tl(t0, t0);
|
|
+ tcg_gen_shri_tl(cpu_gpr[rt], t0, uimm);
|
|
+ } else {
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_ROTRI_W:
|
|
+ if (uimm != 0) {
|
|
+ TCGv_i32 t1 = tcg_temp_new_i32();
|
|
+
|
|
+ tcg_gen_trunc_tl_i32(t1, t0);
|
|
+ tcg_gen_rotri_i32(t1, t1, uimm);
|
|
+ tcg_gen_ext_i32_tl(cpu_gpr[rt], t1);
|
|
+ tcg_temp_free_i32(t1);
|
|
+ } else {
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rt], t0);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Arithmetic */
|
|
+static void gen_arith(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt)
|
|
+{
|
|
+ if (rd == 0) {
|
|
+ /*
|
|
+ * If no destination, treat it as a NOP.
|
|
+ * For add & sub, we must generate the
|
|
+ * overflow exception when needed.
|
|
+ */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_ADD_W:
|
|
+ if (rs != 0 && rt != 0) {
|
|
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_SUB_W:
|
|
+ if (rs != 0 && rt != 0) {
|
|
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_ADD_D:
|
|
+ if (rs != 0 && rt != 0) {
|
|
+ tcg_gen_add_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_SUB_D:
|
|
+ if (rs != 0 && rt != 0) {
|
|
+ tcg_gen_sub_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_neg_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Conditional move */
|
|
+static void gen_cond_move(DisasContext *ctx, uint32_t opc, int rd, int rs,
|
|
+ int rt)
|
|
+{
|
|
+ TCGv t0, t1, t2;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rt);
|
|
+ t1 = tcg_const_tl(0);
|
|
+ t2 = tcg_temp_new();
|
|
+ gen_load_gpr(t2, rs);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_MASKEQZ:
|
|
+ tcg_gen_movcond_tl(TCG_COND_NE, cpu_gpr[rd], t0, t1, t2, t1);
|
|
+ break;
|
|
+ case OPC_LARCH_MASKNEZ:
|
|
+ tcg_gen_movcond_tl(TCG_COND_EQ, cpu_gpr[rd], t0, t1, t2, t1);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t2);
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* Logic */
|
|
+static void gen_logic(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt)
|
|
+{
|
|
+ if (rd == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_AND:
|
|
+ if (likely(rs != 0 && rt != 0)) {
|
|
+ tcg_gen_and_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_NOR:
|
|
+ if (rs != 0 && rt != 0) {
|
|
+ tcg_gen_nor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_not_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], ~((target_ulong)0));
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_OR:
|
|
+ if (likely(rs != 0 && rt != 0)) {
|
|
+ tcg_gen_or_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_XOR:
|
|
+ if (likely(rs != 0 && rt != 0)) {
|
|
+ tcg_gen_xor_tl(cpu_gpr[rd], cpu_gpr[rs], cpu_gpr[rt]);
|
|
+ } else if (rs == 0 && rt != 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rt]);
|
|
+ } else if (rs != 0 && rt == 0) {
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], cpu_gpr[rs]);
|
|
+ } else {
|
|
+ tcg_gen_movi_tl(cpu_gpr[rd], 0);
|
|
+ }
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Set on lower than */
|
|
+static void gen_slt(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_SLT:
|
|
+ tcg_gen_setcond_tl(TCG_COND_LT, cpu_gpr[rd], t0, t1);
|
|
+ break;
|
|
+ case OPC_LARCH_SLTU:
|
|
+ tcg_gen_setcond_tl(TCG_COND_LTU, cpu_gpr[rd], t0, t1);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+/* Shifts */
|
|
+static void gen_shift(DisasContext *ctx, uint32_t opc, int rd, int rs, int rt)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /*
|
|
+ * If no destination, treat it as a NOP.
|
|
+ * For add & sub, we must generate the
|
|
+ * overflow exception when needed.
|
|
+ */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_SLL_W:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x1f);
|
|
+ tcg_gen_shl_tl(t0, t1, t0);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case OPC_LARCH_SRA_W:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x1f);
|
|
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ case OPC_LARCH_SRL_W:
|
|
+ tcg_gen_ext32u_tl(t1, t1);
|
|
+ tcg_gen_andi_tl(t0, t0, 0x1f);
|
|
+ tcg_gen_shr_tl(t0, t1, t0);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case OPC_LARCH_ROTR_W: {
|
|
+ TCGv_i32 t2 = tcg_temp_new_i32();
|
|
+ TCGv_i32 t3 = tcg_temp_new_i32();
|
|
+
|
|
+ tcg_gen_trunc_tl_i32(t2, t0);
|
|
+ tcg_gen_trunc_tl_i32(t3, t1);
|
|
+ tcg_gen_andi_i32(t2, t2, 0x1f);
|
|
+ tcg_gen_rotr_i32(t2, t3, t2);
|
|
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
|
|
+ tcg_temp_free_i32(t2);
|
|
+ tcg_temp_free_i32(t3);
|
|
+ } break;
|
|
+ case OPC_LARCH_SLL_D:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x3f);
|
|
+ tcg_gen_shl_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ case OPC_LARCH_SRA_D:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x3f);
|
|
+ tcg_gen_sar_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ case OPC_LARCH_SRL_D:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x3f);
|
|
+ tcg_gen_shr_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ case OPC_LARCH_ROTR_D:
|
|
+ tcg_gen_andi_tl(t0, t0, 0x3f);
|
|
+ tcg_gen_rotr_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+static inline void gen_r6_ld(target_long addr, int reg, int memidx,
|
|
+ MemOp memop)
|
|
+{
|
|
+ TCGv t0 = tcg_const_tl(addr);
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, memidx, memop);
|
|
+ gen_store_gpr(t0, reg);
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+static void gen_r6_muldiv(DisasContext *ctx, int opc, int rd, int rs, int rt)
|
|
+{
|
|
+ TCGv t0, t1;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_DIV_W: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ TCGv t3 = tcg_temp_new();
|
|
+ tcg_gen_ext32s_tl(t0, t0);
|
|
+ tcg_gen_ext32s_tl(t1, t1);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
|
|
+ tcg_gen_and_tl(t2, t2, t3);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
|
|
+ tcg_gen_or_tl(t2, t2, t3);
|
|
+ tcg_gen_movi_tl(t3, 0);
|
|
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
|
|
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MOD_W: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ TCGv t3 = tcg_temp_new();
|
|
+ tcg_gen_ext32s_tl(t0, t0);
|
|
+ tcg_gen_ext32s_tl(t1, t1);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, INT_MIN);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1);
|
|
+ tcg_gen_and_tl(t2, t2, t3);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
|
|
+ tcg_gen_or_tl(t2, t2, t3);
|
|
+ tcg_gen_movi_tl(t3, 0);
|
|
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
|
|
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_DIV_WU: {
|
|
+ TCGv t2 = tcg_const_tl(0);
|
|
+ TCGv t3 = tcg_const_tl(1);
|
|
+ tcg_gen_ext32u_tl(t0, t0);
|
|
+ tcg_gen_ext32u_tl(t1, t1);
|
|
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
|
|
+ tcg_gen_divu_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MOD_WU: {
|
|
+ TCGv t2 = tcg_const_tl(0);
|
|
+ TCGv t3 = tcg_const_tl(1);
|
|
+ tcg_gen_ext32u_tl(t0, t0);
|
|
+ tcg_gen_ext32u_tl(t1, t1);
|
|
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
|
|
+ tcg_gen_remu_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MUL_W: {
|
|
+ TCGv_i32 t2 = tcg_temp_new_i32();
|
|
+ TCGv_i32 t3 = tcg_temp_new_i32();
|
|
+ tcg_gen_trunc_tl_i32(t2, t0);
|
|
+ tcg_gen_trunc_tl_i32(t3, t1);
|
|
+ tcg_gen_mul_i32(t2, t2, t3);
|
|
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t2);
|
|
+ tcg_temp_free_i32(t2);
|
|
+ tcg_temp_free_i32(t3);
|
|
+ } break;
|
|
+ case OPC_LARCH_MULH_W: {
|
|
+ TCGv_i32 t2 = tcg_temp_new_i32();
|
|
+ TCGv_i32 t3 = tcg_temp_new_i32();
|
|
+ tcg_gen_trunc_tl_i32(t2, t0);
|
|
+ tcg_gen_trunc_tl_i32(t3, t1);
|
|
+ tcg_gen_muls2_i32(t2, t3, t2, t3);
|
|
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
|
|
+ tcg_temp_free_i32(t2);
|
|
+ tcg_temp_free_i32(t3);
|
|
+ } break;
|
|
+ case OPC_LARCH_MULH_WU: {
|
|
+ TCGv_i32 t2 = tcg_temp_new_i32();
|
|
+ TCGv_i32 t3 = tcg_temp_new_i32();
|
|
+ tcg_gen_trunc_tl_i32(t2, t0);
|
|
+ tcg_gen_trunc_tl_i32(t3, t1);
|
|
+ tcg_gen_mulu2_i32(t2, t3, t2, t3);
|
|
+ tcg_gen_ext_i32_tl(cpu_gpr[rd], t3);
|
|
+ tcg_temp_free_i32(t2);
|
|
+ tcg_temp_free_i32(t3);
|
|
+ } break;
|
|
+ case OPC_LARCH_DIV_D: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ TCGv t3 = tcg_temp_new();
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
|
|
+ tcg_gen_and_tl(t2, t2, t3);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
|
|
+ tcg_gen_or_tl(t2, t2, t3);
|
|
+ tcg_gen_movi_tl(t3, 0);
|
|
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
|
|
+ tcg_gen_div_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MOD_D: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ TCGv t3 = tcg_temp_new();
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t2, t0, -1LL << 63);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, -1LL);
|
|
+ tcg_gen_and_tl(t2, t2, t3);
|
|
+ tcg_gen_setcondi_tl(TCG_COND_EQ, t3, t1, 0);
|
|
+ tcg_gen_or_tl(t2, t2, t3);
|
|
+ tcg_gen_movi_tl(t3, 0);
|
|
+ tcg_gen_movcond_tl(TCG_COND_NE, t1, t2, t3, t2, t1);
|
|
+ tcg_gen_rem_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_DIV_DU: {
|
|
+ TCGv t2 = tcg_const_tl(0);
|
|
+ TCGv t3 = tcg_const_tl(1);
|
|
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
|
|
+ tcg_gen_divu_i64(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MOD_DU: {
|
|
+ TCGv t2 = tcg_const_tl(0);
|
|
+ TCGv t3 = tcg_const_tl(1);
|
|
+ tcg_gen_movcond_tl(TCG_COND_EQ, t1, t1, t2, t3, t1);
|
|
+ tcg_gen_remu_i64(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t3);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MUL_D:
|
|
+ tcg_gen_mul_i64(cpu_gpr[rd], t0, t1);
|
|
+ break;
|
|
+ case OPC_LARCH_MULH_D: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ tcg_gen_muls2_i64(t2, cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ case OPC_LARCH_MULH_DU: {
|
|
+ TCGv t2 = tcg_temp_new();
|
|
+ tcg_gen_mulu2_i64(t2, cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t2);
|
|
+ } break;
|
|
+ default:
|
|
+ LARCH_INVAL("r6 mul/div");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+out:
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+static void gen_cl(DisasContext *ctx, uint32_t opc, int rd, int rs)
|
|
+{
|
|
+ TCGv t0;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = cpu_gpr[rd];
|
|
+ gen_load_gpr(t0, rs);
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_CLO_W:
|
|
+ case OPC_LARCH_CLO_D:
|
|
+ tcg_gen_not_tl(t0, t0);
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_CLO_W:
|
|
+ case OPC_LARCH_CLZ_W:
|
|
+ tcg_gen_ext32u_tl(t0, t0);
|
|
+ tcg_gen_clzi_tl(t0, t0, TARGET_LONG_BITS);
|
|
+ tcg_gen_subi_tl(t0, t0, TARGET_LONG_BITS - 32);
|
|
+ break;
|
|
+ case OPC_LARCH_CLO_D:
|
|
+ case OPC_LARCH_CLZ_D:
|
|
+ tcg_gen_clzi_i64(t0, t0, 64);
|
|
+ break;
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline bool use_goto_tb(DisasContext *ctx, target_ulong dest)
|
|
+{
|
|
+ if (unlikely(ctx->base.singlestep_enabled)) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ return (ctx->base.tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK);
|
|
+#else
|
|
+ return true;
|
|
+#endif
|
|
+}
|
|
+
|
|
+static inline void gen_goto_tb(DisasContext *ctx, int n, target_ulong dest)
|
|
+{
|
|
+ if (use_goto_tb(ctx, dest)) {
|
|
+ tcg_gen_goto_tb(n);
|
|
+ gen_save_pc(dest);
|
|
+ tcg_gen_exit_tb(ctx->base.tb, n);
|
|
+ } else {
|
|
+ gen_save_pc(dest);
|
|
+ if (ctx->base.singlestep_enabled) {
|
|
+ save_cpu_state(ctx, 0);
|
|
+ gen_helper_raise_exception_debug(cpu_env);
|
|
+ }
|
|
+ tcg_gen_lookup_and_goto_ptr();
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Branches */
|
|
+static void gen_compute_branch(DisasContext *ctx, uint32_t opc, int insn_bytes,
|
|
+ int rs, int rt, int32_t offset)
|
|
+{
|
|
+ target_ulong btgt = -1;
|
|
+ int bcond_compute = 0;
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+
|
|
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
|
|
+#ifdef LARCH_DEBUG_DISAS
|
|
+ LOG_DISAS("Branch at PC 0x" TARGET_FMT_lx "\n", ctx->base.pc_next);
|
|
+#endif
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ /* Load needed operands */
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_BLT:
|
|
+ case OPC_LARCH_BGE:
|
|
+ case OPC_LARCH_BLTU:
|
|
+ case OPC_LARCH_BGEU:
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+ bcond_compute = 1;
|
|
+ btgt = ctx->base.pc_next + offset;
|
|
+ break;
|
|
+ case OPC_LARCH_BEQZ:
|
|
+ case OPC_LARCH_B:
|
|
+ case OPC_LARCH_BEQ:
|
|
+ case OPC_LARCH_BNEZ:
|
|
+ case OPC_LARCH_BNE:
|
|
+ /* Compare two registers */
|
|
+ if (rs != rt) {
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+ bcond_compute = 1;
|
|
+ }
|
|
+ btgt = ctx->base.pc_next + offset;
|
|
+ break;
|
|
+ default:
|
|
+ LARCH_INVAL("branch/jump");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+ if (bcond_compute == 0) {
|
|
+ /* No condition to be computed */
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_BEQZ: /* rx == rx */
|
|
+ case OPC_LARCH_B:
|
|
+ case OPC_LARCH_BEQ:
|
|
+ /* Always take */
|
|
+ ctx->hflags |= LARCH_HFLAG_B;
|
|
+ break;
|
|
+ case OPC_LARCH_BNEZ:
|
|
+ case OPC_LARCH_BNE:
|
|
+ /* Treat as NOP. */
|
|
+ goto out;
|
|
+ default:
|
|
+ LARCH_INVAL("branch/jump");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+ } else {
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_BLT:
|
|
+ tcg_gen_setcond_tl(TCG_COND_LT, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ case OPC_LARCH_BGE:
|
|
+ tcg_gen_setcond_tl(TCG_COND_GE, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ case OPC_LARCH_BLTU:
|
|
+ tcg_gen_setcond_tl(TCG_COND_LTU, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ case OPC_LARCH_BGEU:
|
|
+ tcg_gen_setcond_tl(TCG_COND_GEU, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ case OPC_LARCH_BEQZ:
|
|
+ case OPC_LARCH_B:
|
|
+ case OPC_LARCH_BEQ:
|
|
+ tcg_gen_setcond_tl(TCG_COND_EQ, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ case OPC_LARCH_BNEZ:
|
|
+ case OPC_LARCH_BNE:
|
|
+ tcg_gen_setcond_tl(TCG_COND_NE, bcond, t0, t1);
|
|
+ goto not_likely;
|
|
+ not_likely:
|
|
+ ctx->hflags |= LARCH_HFLAG_BC;
|
|
+ break;
|
|
+ default:
|
|
+ LARCH_INVAL("conditional branch/jump");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ ctx->btarget = btgt;
|
|
+
|
|
+out:
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+/* special3 bitfield operations */
|
|
+static void gen_bitops(DisasContext *ctx, uint32_t opc, int rt, int rs,
|
|
+ int lsb, int msb)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t1, rs);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_TRPICK_W:
|
|
+ if (lsb + msb > 31) {
|
|
+ goto fail;
|
|
+ }
|
|
+ if (msb != 31) {
|
|
+ tcg_gen_extract_tl(t0, t1, lsb, msb + 1);
|
|
+ } else {
|
|
+ /*
|
|
+ * The two checks together imply that lsb == 0,
|
|
+ * so this is a simple sign-extension.
|
|
+ */
|
|
+ tcg_gen_ext32s_tl(t0, t1);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_TRINS_W:
|
|
+ if (lsb > msb) {
|
|
+ goto fail;
|
|
+ }
|
|
+ gen_load_gpr(t0, rt);
|
|
+ tcg_gen_deposit_tl(t0, t0, t1, lsb, msb - lsb + 1);
|
|
+ tcg_gen_ext32s_tl(t0, t0);
|
|
+ break;
|
|
+ default:
|
|
+ fail:
|
|
+ LARCH_INVAL("bitops");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ return;
|
|
+ }
|
|
+ gen_store_gpr(t0, rt);
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+}
|
|
+
|
|
+static void gen_bshfl(DisasContext *ctx, uint32_t op2, int rt, int rd)
|
|
+{
|
|
+ TCGv t0;
|
|
+
|
|
+ if (rd == 0) {
|
|
+ /* If no destination, treat it as a NOP. */
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rt);
|
|
+ switch (op2) {
|
|
+ case OPC_LARCH_REVB_2H: {
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ TCGv t2 = tcg_const_tl(0x00FF00FF);
|
|
+
|
|
+ tcg_gen_shri_tl(t1, t0, 8);
|
|
+ tcg_gen_and_tl(t1, t1, t2);
|
|
+ tcg_gen_and_tl(t0, t0, t2);
|
|
+ tcg_gen_shli_tl(t0, t0, 8);
|
|
+ tcg_gen_or_tl(t0, t0, t1);
|
|
+ tcg_temp_free(t2);
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
|
|
+ } break;
|
|
+ case OPC_LARCH_EXT_WB:
|
|
+ tcg_gen_ext8s_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case OPC_LARCH_EXT_WH:
|
|
+ tcg_gen_ext16s_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case OPC_LARCH_REVB_4H: {
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ TCGv t2 = tcg_const_tl(0x00FF00FF00FF00FFULL);
|
|
+
|
|
+ tcg_gen_shri_tl(t1, t0, 8);
|
|
+ tcg_gen_and_tl(t1, t1, t2);
|
|
+ tcg_gen_and_tl(t0, t0, t2);
|
|
+ tcg_gen_shli_tl(t0, t0, 8);
|
|
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t2);
|
|
+ tcg_temp_free(t1);
|
|
+ } break;
|
|
+ case OPC_LARCH_REVH_D: {
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ TCGv t2 = tcg_const_tl(0x0000FFFF0000FFFFULL);
|
|
+
|
|
+ tcg_gen_shri_tl(t1, t0, 16);
|
|
+ tcg_gen_and_tl(t1, t1, t2);
|
|
+ tcg_gen_and_tl(t0, t0, t2);
|
|
+ tcg_gen_shli_tl(t0, t0, 16);
|
|
+ tcg_gen_or_tl(t0, t0, t1);
|
|
+ tcg_gen_shri_tl(t1, t0, 32);
|
|
+ tcg_gen_shli_tl(t0, t0, 32);
|
|
+ tcg_gen_or_tl(cpu_gpr[rd], t0, t1);
|
|
+ tcg_temp_free(t2);
|
|
+ tcg_temp_free(t1);
|
|
+ } break;
|
|
+ default:
|
|
+ LARCH_INVAL("bsfhl");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ tcg_temp_free(t0);
|
|
+ return;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+/* REV with sf==1, opcode==3 ("REV64") */
|
|
+static void handle_rev64(DisasContext *ctx, unsigned int rn, unsigned int rd)
|
|
+{
|
|
+ tcg_gen_bswap64_i64(cpu_gpr[rd], cpu_gpr[rn]);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * REV with sf==0, opcode==2
|
|
+ * REV32 (sf==1, opcode==2)
|
|
+ */
|
|
+static void handle_rev32(DisasContext *ctx, unsigned int rn, unsigned int rd)
|
|
+{
|
|
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
|
|
+ gen_load_gpr(tcg_rd, rd);
|
|
+
|
|
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
|
|
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
|
|
+ gen_load_gpr(tcg_rn, rn);
|
|
+
|
|
+ /* bswap32_i64 requires zero high word */
|
|
+ tcg_gen_ext32u_i64(tcg_tmp, tcg_rn);
|
|
+ tcg_gen_bswap32_i64(tcg_rd, tcg_tmp, TCG_BSWAP_OZ);
|
|
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 32);
|
|
+ tcg_gen_bswap32_i64(tcg_tmp, tcg_tmp, TCG_BSWAP_OZ);
|
|
+ tcg_gen_concat32_i64(cpu_gpr[rd], tcg_rd, tcg_tmp);
|
|
+
|
|
+ tcg_temp_free_i64(tcg_tmp);
|
|
+ tcg_temp_free_i64(tcg_rd);
|
|
+ tcg_temp_free_i64(tcg_rn);
|
|
+}
|
|
+
|
|
+/* REV16 */
|
|
+static void handle_rev16(DisasContext *ctx, unsigned int rn, unsigned int rd)
|
|
+{
|
|
+ TCGv_i64 tcg_rd = tcg_temp_new_i64();
|
|
+ TCGv_i64 tcg_rn = tcg_temp_new_i64();
|
|
+ gen_load_gpr(tcg_rd, rd);
|
|
+ gen_load_gpr(tcg_rn, rn);
|
|
+ TCGv_i64 tcg_tmp = tcg_temp_new_i64();
|
|
+ TCGv_i64 mask = tcg_const_i64(0x0000ffff0000ffffull);
|
|
+
|
|
+ tcg_gen_shri_i64(tcg_tmp, tcg_rn, 16);
|
|
+ tcg_gen_and_i64(tcg_rd, tcg_rn, mask);
|
|
+ tcg_gen_and_i64(tcg_tmp, tcg_tmp, mask);
|
|
+ tcg_gen_shli_i64(tcg_rd, tcg_rd, 16);
|
|
+ tcg_gen_or_i64(cpu_gpr[rd], tcg_rd, tcg_tmp);
|
|
+
|
|
+ tcg_temp_free_i64(mask);
|
|
+ tcg_temp_free_i64(tcg_tmp);
|
|
+ tcg_temp_free_i64(tcg_rd);
|
|
+ tcg_temp_free_i64(tcg_rn);
|
|
+}
|
|
+
|
|
+static void gen_lsa(DisasContext *ctx, int opc, int rd, int rs, int rt,
|
|
+ int imm2)
|
|
+{
|
|
+ TCGv t0;
|
|
+ TCGv t1;
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rs);
|
|
+ gen_load_gpr(t1, rt);
|
|
+ tcg_gen_shli_tl(t0, t0, imm2 + 1);
|
|
+ tcg_gen_add_tl(cpu_gpr[rd], t0, t1);
|
|
+ if (opc == OPC_LARCH_ALSL_W) {
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], cpu_gpr[rd]);
|
|
+ }
|
|
+
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_temp_free(t0);
|
|
+
|
|
+ return;
|
|
+}
|
|
+
|
|
+static void gen_align_bits(DisasContext *ctx, int wordsz, int rd, int rs,
|
|
+ int rt, int bits)
|
|
+{
|
|
+ TCGv t0;
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = tcg_temp_new();
|
|
+ if (bits == 0 || bits == wordsz) {
|
|
+ if (bits == 0) {
|
|
+ gen_load_gpr(t0, rt);
|
|
+ } else {
|
|
+ gen_load_gpr(t0, rs);
|
|
+ }
|
|
+ switch (wordsz) {
|
|
+ case 32:
|
|
+ tcg_gen_ext32s_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case 64:
|
|
+ tcg_gen_mov_tl(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ TCGv t1 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rt);
|
|
+ gen_load_gpr(t1, rs);
|
|
+ switch (wordsz) {
|
|
+ case 32: {
|
|
+ TCGv_i64 t2 = tcg_temp_new_i64();
|
|
+ tcg_gen_concat_tl_i64(t2, t1, t0);
|
|
+ tcg_gen_shri_i64(t2, t2, 32 - bits);
|
|
+ gen_move_low32(cpu_gpr[rd], t2);
|
|
+ tcg_temp_free_i64(t2);
|
|
+ } break;
|
|
+ case 64:
|
|
+ tcg_gen_shli_tl(t0, t0, bits);
|
|
+ tcg_gen_shri_tl(t1, t1, 64 - bits);
|
|
+ tcg_gen_or_tl(cpu_gpr[rd], t1, t0);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t1);
|
|
+ }
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+static void gen_align(DisasContext *ctx, int wordsz, int rd, int rs, int rt,
|
|
+ int bp)
|
|
+{
|
|
+ gen_align_bits(ctx, wordsz, rd, rs, rt, bp * 8);
|
|
+}
|
|
+
|
|
+static void gen_bitswap(DisasContext *ctx, int opc, int rd, int rt)
|
|
+{
|
|
+ TCGv t0;
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = tcg_temp_new();
|
|
+ gen_load_gpr(t0, rt);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_BREV_4B:
|
|
+ gen_helper_bitswap(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ case OPC_LARCH_BREV_8B:
|
|
+ gen_helper_dbitswap(cpu_gpr[rd], t0);
|
|
+ break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+static void gen_cp1(DisasContext *ctx, uint32_t opc, int rt, int fs)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+ check_cp1_enabled(ctx);
|
|
+
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_FR2GR_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ tcg_gen_ext_i32_tl(t0, fp0);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ }
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_GR2FR_W:
|
|
+ gen_load_gpr(t0, rt);
|
|
+ {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ tcg_gen_trunc_tl_i32(fp0, t0);
|
|
+ gen_store_fpr32(ctx, fp0, fs);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ }
|
|
+ break;
|
|
+ case OPC_LARCH_FR2GR_D:
|
|
+ gen_load_fpr64(ctx, t0, fs);
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_GR2FR_D:
|
|
+ gen_load_gpr(t0, rt);
|
|
+ gen_store_fpr64(ctx, t0, fs);
|
|
+ break;
|
|
+ case OPC_LARCH_FRH2GR_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32h(ctx, fp0, fs);
|
|
+ tcg_gen_ext_i32_tl(t0, fp0);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ }
|
|
+ gen_store_gpr(t0, rt);
|
|
+ break;
|
|
+ case OPC_LARCH_GR2FRH_W:
|
|
+ gen_load_gpr(t0, rt);
|
|
+ {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ tcg_gen_trunc_tl_i32(fp0, t0);
|
|
+ gen_store_fpr32h(ctx, fp0, fs);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ LARCH_INVAL("cp1 move");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+out:
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+static inline void gen_movcf_ps(DisasContext *ctx, int fs, int fd, int cc,
|
|
+ int tf)
|
|
+{
|
|
+ int cond;
|
|
+ TCGv_i32 t0 = tcg_temp_new_i32();
|
|
+ TCGLabel *l1 = gen_new_label();
|
|
+ TCGLabel *l2 = gen_new_label();
|
|
+
|
|
+ if (tf) {
|
|
+ cond = TCG_COND_EQ;
|
|
+ } else {
|
|
+ cond = TCG_COND_NE;
|
|
+ }
|
|
+
|
|
+ tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc));
|
|
+ tcg_gen_brcondi_i32(cond, t0, 0, l1);
|
|
+ gen_load_fpr32(ctx, t0, fs);
|
|
+ gen_store_fpr32(ctx, t0, fd);
|
|
+ gen_set_label(l1);
|
|
+
|
|
+ tcg_gen_andi_i32(t0, fpu_fcsr0, 1 << get_fp_bit(cc + 1));
|
|
+ tcg_gen_brcondi_i32(cond, t0, 0, l2);
|
|
+ gen_load_fpr32h(ctx, t0, fs);
|
|
+ gen_store_fpr32h(ctx, t0, fd);
|
|
+ tcg_temp_free_i32(t0);
|
|
+ gen_set_label(l2);
|
|
+}
|
|
+
|
|
+static void gen_farith(DisasContext *ctx, uint32_t opc, int ft, int fs, int fd,
|
|
+ int cc)
|
|
+{
|
|
+ check_cp1_enabled(ctx);
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_FADD_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_add_s(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSUB_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_sub_s(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMUL_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_mul_s(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FDIV_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_div_s(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSQRT_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_sqrt_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FABS_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_abs_s(fp0, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMOV_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FNEG_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_chs_s(fp0, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRNE_L_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_round_l_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRZ_L_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_trunc_l_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRP_L_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_ceil_l_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRM_L_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_floor_l_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRNE_W_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_round_w_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRZ_W_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_trunc_w_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRP_W_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_ceil_w_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRM_W_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_floor_w_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRECIP_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_recip_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRSQRT_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_rsqrt_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRINT_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_rint_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FCLASS_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_class_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMIN_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp2 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_min_s(fp2, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr32(ctx, fp2, fd);
|
|
+ tcg_temp_free_i32(fp2);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMINA_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp2 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_mina_s(fp2, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr32(ctx, fp2, fd);
|
|
+ tcg_temp_free_i32(fp2);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMAX_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_max_s(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr32(ctx, fp1, fd);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMAXA_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ TCGv_i32 fp1 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_load_fpr32(ctx, fp1, ft);
|
|
+ gen_helper_float_maxa_s(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr32(ctx, fp1, fd);
|
|
+ tcg_temp_free_i32(fp1);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FCVT_D_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_cvtd_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINT_W_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_cvt_w_s(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINT_L_S: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_cvt_l_s(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FADD_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_add_d(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSUB_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_sub_d(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMUL_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_mul_d(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FDIV_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_div_d(fp0, cpu_env, fp0, fp1);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSQRT_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_sqrt_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FABS_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_abs_d(fp0, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMOV_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FNEG_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_chs_d(fp0, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRNE_L_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_round_l_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRZ_L_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_trunc_l_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRP_L_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_ceil_l_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRM_L_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_floor_l_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRNE_W_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_round_w_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRZ_W_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_trunc_w_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRP_W_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_ceil_w_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINTRM_W_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_floor_w_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRECIP_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_recip_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRSQRT_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_rsqrt_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FRINT_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_rint_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FCLASS_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_class_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMIN_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_min_d(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr64(ctx, fp1, fd);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMINA_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_mina_d(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr64(ctx, fp1, fd);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMAX_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_max_d(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr64(ctx, fp1, fd);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FMAXA_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ TCGv_i64 fp1 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_load_fpr64(ctx, fp1, ft);
|
|
+ gen_helper_float_maxa_d(fp1, cpu_env, fp0, fp1);
|
|
+ gen_store_fpr64(ctx, fp1, fd);
|
|
+ tcg_temp_free_i64(fp1);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FCVT_S_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_cvts_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINT_W_D: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_cvt_w_d(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FTINT_L_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_cvt_l_d(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FFINT_S_W: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ gen_helper_float_cvts_w(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FFINT_D_W: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr32(ctx, fp32, fs);
|
|
+ gen_helper_float_cvtd_w(fp64, cpu_env, fp32);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ gen_store_fpr64(ctx, fp64, fd);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ } break;
|
|
+ case OPC_LARCH_FFINT_S_L: {
|
|
+ TCGv_i32 fp32 = tcg_temp_new_i32();
|
|
+ TCGv_i64 fp64 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp64, fs);
|
|
+ gen_helper_float_cvts_l(fp32, cpu_env, fp64);
|
|
+ tcg_temp_free_i64(fp64);
|
|
+ gen_store_fpr32(ctx, fp32, fd);
|
|
+ tcg_temp_free_i32(fp32);
|
|
+ } break;
|
|
+ case OPC_LARCH_FFINT_D_L: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ gen_helper_float_cvtd_l(fp0, cpu_env, fp0);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ default:
|
|
+ LARCH_INVAL("farith");
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ return;
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Coprocessor 3 (FPU) */
|
|
+static void gen_flt3_ldst(DisasContext *ctx, uint32_t opc, int fd, int fs,
|
|
+ int base, int index)
|
|
+{
|
|
+ TCGv t0 = tcg_temp_new();
|
|
+
|
|
+ check_cp1_enabled(ctx);
|
|
+ if (base == 0) {
|
|
+ gen_load_gpr(t0, index);
|
|
+ } else if (index == 0) {
|
|
+ gen_load_gpr(t0, base);
|
|
+ } else {
|
|
+ gen_op_addr_add(ctx, t0, cpu_gpr[base], cpu_gpr[index]);
|
|
+ }
|
|
+
|
|
+ /*
|
|
+ * Don't do NOP if destination is zero: we must perform the actual
|
|
+ * memory access.
|
|
+ */
|
|
+ switch (opc) {
|
|
+ case OPC_LARCH_FLDX_S:
|
|
+ case OPC_LARCH_FLDGT_S:
|
|
+ case OPC_LARCH_FLDLE_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+
|
|
+ tcg_gen_qemu_ld_tl(t0, t0, ctx->mem_idx, MO_TESL);
|
|
+ tcg_gen_trunc_tl_i32(fp0, t0);
|
|
+ gen_store_fpr32(ctx, fp0, fd);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FLDX_D:
|
|
+ case OPC_LARCH_FLDGT_D:
|
|
+ case OPC_LARCH_FLDLE_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ tcg_gen_qemu_ld_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
|
|
+ gen_store_fpr64(ctx, fp0, fd);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSTX_S:
|
|
+ case OPC_LARCH_FSTGT_S:
|
|
+ case OPC_LARCH_FSTLE_S: {
|
|
+ TCGv_i32 fp0 = tcg_temp_new_i32();
|
|
+ gen_load_fpr32(ctx, fp0, fs);
|
|
+ tcg_gen_qemu_st_i32(fp0, t0, ctx->mem_idx, MO_TEUL);
|
|
+ tcg_temp_free_i32(fp0);
|
|
+ } break;
|
|
+ case OPC_LARCH_FSTX_D:
|
|
+ case OPC_LARCH_FSTGT_D:
|
|
+ case OPC_LARCH_FSTLE_D: {
|
|
+ TCGv_i64 fp0 = tcg_temp_new_i64();
|
|
+ gen_load_fpr64(ctx, fp0, fs);
|
|
+ tcg_gen_qemu_st_i64(fp0, t0, ctx->mem_idx, MO_TEQ);
|
|
+ tcg_temp_free_i64(fp0);
|
|
+ } break;
|
|
+ }
|
|
+ tcg_temp_free(t0);
|
|
+}
|
|
+
|
|
+static inline void clear_branch_hflags(DisasContext *ctx)
|
|
+{
|
|
+ ctx->hflags &= ~LARCH_HFLAG_BMASK;
|
|
+ if (ctx->base.is_jmp == DISAS_NEXT) {
|
|
+ save_cpu_state(ctx, 0);
|
|
+ } else {
|
|
+ /*
|
|
+ * It is not safe to save ctx->hflags as hflags may be changed
|
|
+ * in execution time.
|
|
+ */
|
|
+ tcg_gen_andi_i32(hflags, hflags, ~LARCH_HFLAG_BMASK);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void gen_branch(DisasContext *ctx, int insn_bytes)
|
|
+{
|
|
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
|
|
+ int proc_hflags = ctx->hflags & LARCH_HFLAG_BMASK;
|
|
+ /* Branches completion */
|
|
+ clear_branch_hflags(ctx);
|
|
+ ctx->base.is_jmp = DISAS_NORETURN;
|
|
+ /* FIXME: Need to clear can_do_io. */
|
|
+ switch (proc_hflags & LARCH_HFLAG_BMASK) {
|
|
+ case LARCH_HFLAG_B:
|
|
+ /* unconditional branch */
|
|
+ gen_goto_tb(ctx, 0, ctx->btarget);
|
|
+ break;
|
|
+ case LARCH_HFLAG_BC:
|
|
+ /* Conditional branch */
|
|
+ {
|
|
+ TCGLabel *l1 = gen_new_label();
|
|
+
|
|
+ tcg_gen_brcondi_tl(TCG_COND_NE, bcond, 0, l1);
|
|
+ gen_goto_tb(ctx, 1, ctx->base.pc_next + insn_bytes);
|
|
+ gen_set_label(l1);
|
|
+ gen_goto_tb(ctx, 0, ctx->btarget);
|
|
+ }
|
|
+ break;
|
|
+ case LARCH_HFLAG_BR:
|
|
+ /* unconditional branch to register */
|
|
+ tcg_gen_mov_tl(cpu_PC, btarget);
|
|
+ if (ctx->base.singlestep_enabled) {
|
|
+ save_cpu_state(ctx, 0);
|
|
+ gen_helper_raise_exception_debug(cpu_env);
|
|
+ }
|
|
+ tcg_gen_lookup_and_goto_ptr();
|
|
+ break;
|
|
+ default:
|
|
+ fprintf(stderr, "unknown branch 0x%x\n", proc_hflags);
|
|
+ abort();
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+/* Signed immediate */
|
|
+#define SIMM(op, start, width) \
|
|
+ ((int32_t)(((op >> start) & ((~0U) >> (32 - width))) << (32 - width)) >> \
|
|
+ (32 - width))
|
|
+/* Zero-extended immediate */
|
|
+#define ZIMM(op, start, width) ((op >> start) & ((~0U) >> (32 - width)))
|
|
+
|
|
+static void gen_sync(int stype)
|
|
+{
|
|
+ TCGBar tcg_mo = TCG_BAR_SC;
|
|
+
|
|
+ switch (stype) {
|
|
+ case 0x4: /* SYNC_WMB */
|
|
+ tcg_mo |= TCG_MO_ST_ST;
|
|
+ break;
|
|
+ case 0x10: /* SYNC_MB */
|
|
+ tcg_mo |= TCG_MO_ALL;
|
|
+ break;
|
|
+ case 0x11: /* SYNC_ACQUIRE */
|
|
+ tcg_mo |= TCG_MO_LD_LD | TCG_MO_LD_ST;
|
|
+ break;
|
|
+ case 0x12: /* SYNC_RELEASE */
|
|
+ tcg_mo |= TCG_MO_ST_ST | TCG_MO_LD_ST;
|
|
+ break;
|
|
+ case 0x13: /* SYNC_RMB */
|
|
+ tcg_mo |= TCG_MO_LD_LD;
|
|
+ break;
|
|
+ default:
|
|
+ tcg_mo |= TCG_MO_ALL;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ tcg_gen_mb(tcg_mo);
|
|
+}
|
|
+
|
|
+static void gen_crc32(DisasContext *ctx, int rd, int rs, int rt, int sz,
|
|
+ int crc32c)
|
|
+{
|
|
+ TCGv t0;
|
|
+ TCGv t1;
|
|
+ TCGv_i32 tsz = tcg_const_i32(1 << sz);
|
|
+ if (rd == 0) {
|
|
+ /* Treat as NOP. */
|
|
+ return;
|
|
+ }
|
|
+ t0 = tcg_temp_new();
|
|
+ t1 = tcg_temp_new();
|
|
+
|
|
+ gen_load_gpr(t0, rt);
|
|
+ gen_load_gpr(t1, rs);
|
|
+
|
|
+ if (crc32c) {
|
|
+ gen_helper_crc32c(cpu_gpr[rd], t0, t1, tsz);
|
|
+ } else {
|
|
+ gen_helper_crc32(cpu_gpr[rd], t0, t1, tsz);
|
|
+ }
|
|
+
|
|
+ tcg_temp_free(t0);
|
|
+ tcg_temp_free(t1);
|
|
+ tcg_temp_free_i32(tsz);
|
|
+}
|
|
+
|
|
+#include "cpu-csr.h"
|
|
+
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+
|
|
+/*
|
|
+ * 64-bit CSR read
|
|
+ *
|
|
+ * @arg : GPR to store the value of CSR register
|
|
+ * @csr : CSR register number
|
|
+ */
|
|
+static void gen_csr_rdq(DisasContext *ctx, TCGv rd, int64_t a1)
|
|
+{
|
|
+ TCGv_i64 csr = tcg_const_i64(a1);
|
|
+ gen_helper_csr_rdq(rd, cpu_env, csr);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * 64-bit CSR write
|
|
+ *
|
|
+ * @arg : GPR that stores the new value of CSR register
|
|
+ * @csr : CSR register number
|
|
+ */
|
|
+static void gen_csr_wrq(DisasContext *ctx, TCGv val, int64_t a1)
|
|
+{
|
|
+ TCGv_i64 csr = tcg_const_i64(a1);
|
|
+ gen_helper_csr_wrq(val, cpu_env, val, csr);
|
|
+}
|
|
+
|
|
+/*
|
|
+ * 64-bit CSR exchange
|
|
+ *
|
|
+ * @arg : GPR that stores the new value of CSR register
|
|
+ * @csr : CSR register number
|
|
+ */
|
|
+static void gen_csr_xchgq(DisasContext *ctx, TCGv val, TCGv mask, int64_t a1)
|
|
+{
|
|
+ TCGv_i64 csr = tcg_const_i64(a1);
|
|
+ gen_helper_csr_xchgq(val, cpu_env, val, mask, csr);
|
|
+}
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
+
|
|
+static void loongarch_tr_init_disas_context(DisasContextBase *dcbase,
|
|
+ CPUState *cs)
|
|
+{
|
|
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
+ CPULOONGARCHState *env = cs->env_ptr;
|
|
+
|
|
+ ctx->page_start = ctx->base.pc_first & TARGET_PAGE_MASK;
|
|
+ ctx->saved_pc = -1;
|
|
+ ctx->insn_flags = env->insn_flags;
|
|
+ ctx->btarget = 0;
|
|
+ /* Restore state from the tb context. */
|
|
+ ctx->hflags =
|
|
+ (uint32_t)ctx->base.tb->flags; /* FIXME: maybe use 64 bits? */
|
|
+ restore_cpu_state(env, ctx);
|
|
+#ifdef CONFIG_USER_ONLY
|
|
+ ctx->mem_idx = LARCH_HFLAG_UM;
|
|
+#else
|
|
+ ctx->mem_idx = hflags_mmu_index(ctx->hflags);
|
|
+#endif
|
|
+ ctx->default_tcg_memop_mask = MO_ALIGN;
|
|
+
|
|
+ LOG_DISAS("\ntb %p idx %d hflags %04x\n", ctx->base.tb, ctx->mem_idx,
|
|
+ ctx->hflags);
|
|
+}
|
|
+
|
|
+static void loongarch_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
|
|
+{
|
|
+}
|
|
+
|
|
+static void loongarch_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
|
|
+{
|
|
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
+
|
|
+ tcg_gen_insn_start(ctx->base.pc_next, ctx->hflags & LARCH_HFLAG_BMASK,
|
|
+ ctx->btarget);
|
|
+}
|
|
+
|
|
+/* 128 and 256 lsx vector instructions are not supported yet */
|
|
+static bool decode_vector_lsx(uint32_t opcode)
|
|
+{
|
|
+ uint32_t value = (opcode & 0xff000000);
|
|
+
|
|
+ if ((opcode & 0xf0000000) == 0x70000000) {
|
|
+ return true;
|
|
+ } else if ((opcode & 0xfff00000) == 0x38400000) {
|
|
+ return true;
|
|
+ } else {
|
|
+ switch (value) {
|
|
+ case 0x09000000:
|
|
+ case 0x0a000000:
|
|
+ case 0x0e000000:
|
|
+ case 0x0f000000:
|
|
+ case 0x2c000000:
|
|
+ case 0x30000000:
|
|
+ case 0x31000000:
|
|
+ case 0x32000000:
|
|
+ case 0x33000000:
|
|
+ return true;
|
|
+ }
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool decode_insn(DisasContext *ctx, uint32_t insn);
|
|
+#include "decode-insn.c.inc"
|
|
+#include "trans.inc.c"
|
|
+
|
|
+static void loongarch_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
|
|
+{
|
|
+ CPULOONGARCHState *env = cs->env_ptr;
|
|
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
+ int insn_bytes = 4;
|
|
+
|
|
+ ctx->opcode = cpu_ldl_code(env, ctx->base.pc_next);
|
|
+
|
|
+ if (!decode_insn(ctx, ctx->opcode)) {
|
|
+ if (decode_vector_lsx(ctx->opcode)) {
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ } else {
|
|
+ fprintf(stderr, "Error: unkown opcode. 0x%lx: 0x%x\n",
|
|
+ ctx->base.pc_next, ctx->opcode);
|
|
+ generate_exception_end(ctx, EXCP_RI);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ctx->hflags & LARCH_HFLAG_BMASK) {
|
|
+ gen_branch(ctx, insn_bytes);
|
|
+ }
|
|
+ ctx->base.pc_next += insn_bytes;
|
|
+}
|
|
+
|
|
+static void loongarch_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
|
|
+{
|
|
+ DisasContext *ctx = container_of(dcbase, DisasContext, base);
|
|
+
|
|
+ if (ctx->base.singlestep_enabled && ctx->base.is_jmp != DISAS_NORETURN) {
|
|
+ save_cpu_state(ctx, ctx->base.is_jmp != DISAS_EXIT);
|
|
+ gen_helper_raise_exception_debug(cpu_env);
|
|
+ } else {
|
|
+ switch (ctx->base.is_jmp) {
|
|
+ case DISAS_STOP:
|
|
+ gen_save_pc(ctx->base.pc_next);
|
|
+ tcg_gen_lookup_and_goto_ptr();
|
|
+ break;
|
|
+ case DISAS_NEXT:
|
|
+ case DISAS_TOO_MANY:
|
|
+ save_cpu_state(ctx, 0);
|
|
+ gen_goto_tb(ctx, 0, ctx->base.pc_next);
|
|
+ break;
|
|
+ case DISAS_EXIT:
|
|
+ tcg_gen_exit_tb(NULL, 0);
|
|
+ break;
|
|
+ case DISAS_NORETURN:
|
|
+ break;
|
|
+ default:
|
|
+ g_assert_not_reached();
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+static void loongarch_tr_disas_log(const DisasContextBase *dcbase,
|
|
+ CPUState *cs)
|
|
+{
|
|
+ qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first));
|
|
+ log_target_disas(cs, dcbase->pc_first, dcbase->tb->size);
|
|
+}
|
|
+
|
|
+static const TranslatorOps loongarch_tr_ops = {
|
|
+ .init_disas_context = loongarch_tr_init_disas_context,
|
|
+ .tb_start = loongarch_tr_tb_start,
|
|
+ .insn_start = loongarch_tr_insn_start,
|
|
+ .translate_insn = loongarch_tr_translate_insn,
|
|
+ .tb_stop = loongarch_tr_tb_stop,
|
|
+ .disas_log = loongarch_tr_disas_log,
|
|
+};
|
|
+
|
|
+void gen_intermediate_code(CPUState *cs, struct TranslationBlock *tb,
|
|
+ int max_insns)
|
|
+{
|
|
+ DisasContext ctx;
|
|
+
|
|
+ translator_loop(&loongarch_tr_ops, &ctx.base, cs, tb, max_insns);
|
|
+}
|
|
+
|
|
+void loongarch_tcg_init(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < 32; i++)
|
|
+ cpu_gpr[i] = tcg_global_mem_new(
|
|
+ cpu_env, offsetof(CPULOONGARCHState, active_tc.gpr[i]),
|
|
+ regnames[i]);
|
|
+
|
|
+ for (i = 0; i < 32; i++) {
|
|
+ int off = offsetof(CPULOONGARCHState, active_fpu.fpr[i].d);
|
|
+ fpu_f64[i] = tcg_global_mem_new_i64(cpu_env, off, fregnames[i]);
|
|
+ }
|
|
+
|
|
+ cpu_PC = tcg_global_mem_new(
|
|
+ cpu_env, offsetof(CPULOONGARCHState, active_tc.PC), "PC");
|
|
+ bcond = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, bcond),
|
|
+ "bcond");
|
|
+ btarget = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, btarget),
|
|
+ "btarget");
|
|
+ hflags = tcg_global_mem_new_i32(
|
|
+ cpu_env, offsetof(CPULOONGARCHState, hflags), "hflags");
|
|
+ fpu_fcsr0 = tcg_global_mem_new_i32(
|
|
+ cpu_env, offsetof(CPULOONGARCHState, active_fpu.fcsr0), "fcsr0");
|
|
+ cpu_lladdr = tcg_global_mem_new(
|
|
+ cpu_env, offsetof(CPULOONGARCHState, lladdr), "lladdr");
|
|
+ cpu_llval = tcg_global_mem_new(cpu_env, offsetof(CPULOONGARCHState, llval),
|
|
+ "llval");
|
|
+}
|
|
+
|
|
+void restore_state_to_opc(CPULOONGARCHState *env, TranslationBlock *tb,
|
|
+ target_ulong *data)
|
|
+{
|
|
+ env->active_tc.PC = data[0];
|
|
+ env->hflags &= ~LARCH_HFLAG_BMASK;
|
|
+ env->hflags |= data[1];
|
|
+ switch (env->hflags & LARCH_HFLAG_BMASK) {
|
|
+ case LARCH_HFLAG_BR:
|
|
+ break;
|
|
+ case LARCH_HFLAG_BC:
|
|
+ case LARCH_HFLAG_B:
|
|
+ env->btarget = data[2];
|
|
+ break;
|
|
+ }
|
|
+}
|
|
diff --git a/target/meson.build b/target/meson.build
|
|
index ec6bc97331..a824a390f9 100644
|
|
--- a/target/meson.build
|
|
+++ b/target/meson.build
|
|
@@ -5,6 +5,7 @@ subdir('cris')
|
|
subdir('hexagon')
|
|
subdir('hppa')
|
|
subdir('i386')
|
|
+subdir('loongarch64')
|
|
subdir('m68k')
|
|
subdir('microblaze')
|
|
subdir('mips')
|
|
--
|
|
2.27.0
|
|
|