- sw_64: Added sw64 architecture related updates - virtio-crypto: verify src&dst buffer length for sym request - vhost-vdpa: do not cleanup the vdpa/vhost-net structures if peer nic is present - qga: Fix suspend on Linux guests without systemd - tests: vhost-user-test: release mutex on protocol violation - qapi: support updating expected test output via make - block: Fix misleading hexadecimal format - block/rbd: fix write zeroes with growing images - block/nbd.c: Fixed IO request coroutine not being wakeup when kill NBD server - block/nfs: Fix 32-bit Windows build - qapi/qdev: Tidy up device_add documentation - hw/xen/xen_pt: fix uninitialized variable - migration/ram: Fix error handling in ram_write_tracking_start() - docs/about/build-platforms: Refine the distro support policy - xen-block: Avoid leaks on new error path - QGA VSS: Add wrapper to send log to debugger and stderr - chardev/char-socket: set s->listener = NULL in char_socket_finalize - qapi/block: Tidy up block-latency-histogram-set documentation - disas/riscv Fix ctzw disassemble - vfio: Fix vfio_get_dev_region() trace event - migration/ram: Fix populate_read_range() - Check and report for incomplete 'global' option format Signed-off-by: Jiabo Feng <fengjiabo1@huawei.com>
6109 lines
211 KiB
Diff
6109 lines
211 KiB
Diff
From 91b0065ca578a6e494b39736f746fcddddfc2978 Mon Sep 17 00:00:00 2001
|
|
From: Lu Feifei <lufeifei@wxiat.com>
|
|
Date: Wed, 26 Jul 2023 14:19:42 +0800
|
|
Subject: [PATCH] sw_64: Added sw64 architecture related updates
|
|
|
|
Signed-off-by: Lu Feifei <lufeifei@wxiat.com>
|
|
---
|
|
configs/targets/sw64-linux-user.mak | 5 +
|
|
configs/targets/sw64-softmmu.mak | 1 +
|
|
disas/sw64.c | 41 +-
|
|
gdb-xml/sw64-core.xml | 43 +
|
|
hw/rtc/sun4v-rtc.c | 11 -
|
|
hw/sw64/Kconfig | 5 +-
|
|
hw/sw64/core3.c | 25 +-
|
|
hw/sw64/core3_board.c | 73 +-
|
|
hw/sw64/sw64_iommu.c | 11 +-
|
|
linux-headers/asm-sw64/kvm.h | 14 +
|
|
linux-user/elfload.c | 16 +
|
|
linux-user/host/sw64/host-signal.h | 46 +
|
|
linux-user/host/sw64/hostdep.h | 14 +
|
|
linux-user/sw64/cpu_loop.c | 5 +-
|
|
linux-user/sw64/meson.build | 5 +
|
|
linux-user/sw64/signal.c | 33 +-
|
|
linux-user/sw64/syscall.tbl | 488 +++++
|
|
linux-user/sw64/syscallhdr.sh | 32 +
|
|
linux-user/sw64/target_cpu.h | 15 +-
|
|
linux-user/sw64/target_errno_defs.h | 204 ++
|
|
linux-user/sw64/target_signal.h | 2 +
|
|
linux-user/sw64/target_syscall.h | 12 +-
|
|
linux-user/sw64/termbits.h | 1 +
|
|
linux-user/syscall_defs.h | 46 +-
|
|
pc-bios/core3-hmcode | Bin 225904 -> 227168 bytes
|
|
pc-bios/core3-reset | Bin 5032 -> 229200 bytes
|
|
pc-bios/uefi-bios-sw | Bin 3145728 -> 3145728 bytes
|
|
target/sw64/Makefile.objs | 1 +
|
|
target/sw64/cpu-param.h | 8 +-
|
|
target/sw64/cpu.c | 163 +-
|
|
target/sw64/cpu.h | 17 +-
|
|
target/sw64/gdbstub.c | 56 +
|
|
target/sw64/helper.c | 142 +-
|
|
target/sw64/kvm.c | 146 +-
|
|
target/sw64/kvm_sw64.h | 9 +
|
|
target/sw64/machine.c | 2 +-
|
|
target/sw64/meson.build | 1 +
|
|
target/sw64/translate.c | 2 +-
|
|
tcg/sw64/tcg-target.c.inc | 2839 +++++++++++++++------------
|
|
tcg/sw64/tcg-target.h | 3 +
|
|
40 files changed, 3043 insertions(+), 1494 deletions(-)
|
|
create mode 100644 configs/targets/sw64-linux-user.mak
|
|
create mode 100644 gdb-xml/sw64-core.xml
|
|
create mode 100644 linux-user/host/sw64/host-signal.h
|
|
create mode 100755 linux-user/host/sw64/hostdep.h
|
|
create mode 100644 linux-user/sw64/meson.build
|
|
create mode 100644 linux-user/sw64/syscall.tbl
|
|
create mode 100644 linux-user/sw64/syscallhdr.sh
|
|
create mode 100644 linux-user/sw64/target_errno_defs.h
|
|
mode change 100755 => 100644 pc-bios/core3-hmcode
|
|
create mode 100644 target/sw64/gdbstub.c
|
|
|
|
diff --git a/configs/targets/sw64-linux-user.mak b/configs/targets/sw64-linux-user.mak
|
|
new file mode 100644
|
|
index 0000000000..ae00665692
|
|
--- /dev/null
|
|
+++ b/configs/targets/sw64-linux-user.mak
|
|
@@ -0,0 +1,5 @@
|
|
+TARGET_ARCH=sw64
|
|
+TARGET_SYSTBL_ABI=common
|
|
+TARGET_SYSTBL=syscall.tbl
|
|
+TARGET_ALIGNED_ONLY=y
|
|
+TARGET_XML_FILES= gdb-xml/sw64-core.xml
|
|
diff --git a/configs/targets/sw64-softmmu.mak b/configs/targets/sw64-softmmu.mak
|
|
index 37cc2e05a6..9cf002df8c 100644
|
|
--- a/configs/targets/sw64-softmmu.mak
|
|
+++ b/configs/targets/sw64-softmmu.mak
|
|
@@ -6,3 +6,4 @@ TARGET_ARCH=sw64
|
|
TARGET_BASE_ARCH=sw64
|
|
TARGET_ABI_DIR=sw64
|
|
TARGET_SUPPORTS_MTTCG=y
|
|
+TARGET_XML_FILES= gdb-xml/sw64-core.xml
|
|
diff --git a/disas/sw64.c b/disas/sw64.c
|
|
index c5bd578e07..16504c673a 100755
|
|
--- a/disas/sw64.c
|
|
+++ b/disas/sw64.c
|
|
@@ -62,7 +62,7 @@ extern const unsigned sw_64_num_opcodes;
|
|
#define SW_OPCODE_CORE3 0x0002 /* Core3 private insns. */
|
|
#define SW_LITOP(i) (((i) >> 26) & 0x3D)
|
|
|
|
-#define SW_OPCODE_NOHM (~(SW_OPCODE_BASE|SW_OPCODE_CORE3))
|
|
+#define SW_OPCODE_NOHMCODE (~(SW_OPCODE_BASE|SW_OPCODE_CORE3))
|
|
|
|
/* A macro to extract the major opcode from an instruction. */
|
|
#define SW_OP(i) (((i) >> 26) & 0x3F)
|
|
@@ -328,18 +328,6 @@ static int extract_bdisp(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
|
|
return 4 * (((insn & 0x1FFFFF) ^ 0x100000) - 0x100000);
|
|
}
|
|
|
|
-static unsigned insert_bdisp26(unsigned insn, int value, const char **errmsg)
|
|
-{
|
|
- if (errmsg != (const char **)NULL && (value & 3))
|
|
- *errmsg = "branch operand unaligned";
|
|
- return insn | ((value / 4) & 0x3FFFFFF);
|
|
-}
|
|
-
|
|
-static int extract_bdisp26(unsigned insn, int *invalid ATTRIBUTE_UNUSED)
|
|
-{
|
|
- return 4 * (((insn & 0x3FFFFFF) ^ 0x2000000) - 0x2000000);
|
|
-}
|
|
-
|
|
/* The hint field of a JMP/JSR insn. */
|
|
/* sw use 16 bits hint disp. */
|
|
static unsigned insert_jhint(unsigned insn, int value, const char **errmsg)
|
|
@@ -480,13 +468,9 @@ const struct sw_64_operand sw_64_operands[] = {
|
|
{ 16, 0, -HWINDEX, SW_OPERAND_UNSIGNED, 0, 0 },
|
|
|
|
/* The 13-bit branch hint for the core3 hw_jmp/jsr (pal1e) insn. */
|
|
-#define HWJMPHINT (HWINDEX + 1)
|
|
- { 8, 0, -HWJMPHINT,
|
|
- SW_OPERAND_RELATIVE | SW_OPERAND_DEFAULT_ZERO | SW_OPERAND_NOOVERFLOW,
|
|
- insert_sw4hwjhint, extract_sw4hwjhint },
|
|
|
|
/* for the third operand of ternary operands integer insn. */
|
|
-#define R3 (HWJMPHINT + 1)
|
|
+#define R3 (HWINDEX + 1)
|
|
{ 5, 5, 0, SW_OPERAND_IR, 0, 0 },
|
|
/* The plain fp register fields */
|
|
#define F3 (R3 + 1)
|
|
@@ -494,19 +478,10 @@ const struct sw_64_operand sw_64_operands[] = {
|
|
/* sw simd settle instruction lit */
|
|
#define FMALIT (F3 + 1)
|
|
{ 5, 5, -FMALIT, SW_OPERAND_UNSIGNED, 0, 0 }, //V1.1
|
|
-#define LMDISP (FMALIT + 1)
|
|
- { 15, 0, -LMDISP, SW_OPERAND_UNSIGNED, 0, 0 },
|
|
-#define RPIINDEX (LMDISP + 1)
|
|
+#define RPIINDEX (FMALIT + 1)
|
|
{ 8, 0, -RPIINDEX, SW_OPERAND_UNSIGNED, 0, 0 },
|
|
#define ATMDISP (RPIINDEX + 1)
|
|
{ 12, 0, -ATMDISP, SW_OPERAND_SIGNED, 0, 0 },
|
|
-#define DISP13 (ATMDISP + 1)
|
|
- { 13, 13, -DISP13, SW_OPERAND_SIGNED, 0, 0},
|
|
-#define BDISP26 (DISP13 + 1)
|
|
- { 26, 0, 222,
|
|
- SW_OPERAND_RELATIVE, insert_bdisp26, extract_bdisp26 },
|
|
-#define DPFTH (BDISP26 + 1)
|
|
- { 5, 21, -DPFTH, SW_OPERAND_UNSIGNED, 0, 0}
|
|
};
|
|
|
|
const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_operands);
|
|
@@ -578,7 +553,7 @@ const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_opera
|
|
#define PRIRET_MASK (OP_MASK | 0x100000)
|
|
#define PRIRET(oo,h) PRIRET_(oo,h), PRIRET_MASK
|
|
|
|
-/* sw rpi_rcsr,rpi_wcsr. */
|
|
+/* sw pri_rcsr,pri_wcsr. */
|
|
#define CSR_(oo,ff) (OP(oo) | (((ff) & 0xFF) << 8))
|
|
#define CSR_MASK (OP_MASK | 0xFF00)
|
|
#define CSR(oo,ff) CSR_(oo,ff), CSR_MASK
|
|
@@ -610,8 +585,6 @@ const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_opera
|
|
#define ARG_FMEM { FA, MDISP, PRB }
|
|
#define ARG_OPR { RA, RB, DRC1 }
|
|
|
|
-#define ARG_OPRCAS { RA, RB, RC }
|
|
-
|
|
#define ARG_OPRL { RA, LIT, DRC1 }
|
|
#define ARG_OPRZ1 { ZA, RB, DRC1 }
|
|
#define ARG_OPRLZ1 { ZA, LIT, RC }
|
|
@@ -625,9 +598,6 @@ const unsigned sw_64_num_operands = sizeof(sw_64_operands) / sizeof(*sw_64_opera
|
|
#define ARG_FMAL { FA,FB,FMALIT, DFC1 }
|
|
#define ARG_ATMEM { RA, ATMDISP, PRB }
|
|
#define ARG_VUAMEM { FA, ATMDISP, PRB }
|
|
-#define ARG_OPRLZ3 { RA, LIT, ZC }
|
|
-
|
|
-#define ARG_DISP13 {DISP13, RC}
|
|
|
|
/* The opcode table.
|
|
|
|
@@ -662,6 +632,7 @@ const struct sw_64_opcode sw_64_opcodes[] = {
|
|
{ "jmp", MEM(0x03), BASE, { RA, CPRB, JMPHINT } },
|
|
{ "br", BRA(0x04), BASE, { ZA, BDISP } },
|
|
{ "br", BRA(0x04), BASE, ARG_BRA },
|
|
+ { "bsr", BRA(0x05), BASE, { ZA, BDISP } },
|
|
{ "bsr", BRA(0x05), BASE, ARG_BRA },
|
|
{ "memb", MFC(0x06,0x0000), BASE, ARG_NONE },
|
|
{ "imemb", MFC(0x06,0x0001), BASE, ARG_NONE },
|
|
@@ -1110,7 +1081,7 @@ int print_insn_sw_64(bfd_vma memaddr, struct disassemble_info *info)
|
|
regnames = vms_regnames;
|
|
else
|
|
regnames = osf_regnames;
|
|
- isa_mask = SW_OPCODE_NOHM;
|
|
+ isa_mask = SW_OPCODE_NOHMCODE;
|
|
switch (info->mach) {
|
|
case bfd_mach_sw_64_core3:
|
|
isa_mask |= SW_OPCODE_BASE | SW_OPCODE_CORE3;
|
|
diff --git a/gdb-xml/sw64-core.xml b/gdb-xml/sw64-core.xml
|
|
new file mode 100644
|
|
index 0000000000..24527c175b
|
|
--- /dev/null
|
|
+++ b/gdb-xml/sw64-core.xml
|
|
@@ -0,0 +1,43 @@
|
|
+<?xml version="1.0"?>
|
|
+<!-- Copyright (C) 2023 Free Software Foundation, Inc.
|
|
+
|
|
+ Copying and distribution of this file, with or without modification,
|
|
+ are permitted in any medium without royalty provided the copyright
|
|
+ notice and this notice are preserved. -->
|
|
+
|
|
+<!DOCTYPE feature SYSTEM "gdb-target.dtd">
|
|
+<feature name="org.gnu.gdb.sw_64.cpu">
|
|
+ <reg name="v0" bitsize="64" regnum="0"/>
|
|
+ <reg name="t0" bitsize="64"/>
|
|
+ <reg name="t1" bitsize="64"/>
|
|
+ <reg name="t2" bitsize="64"/>
|
|
+ <reg name="t3" bitsize="64"/>
|
|
+ <reg name="t4" bitsize="64"/>
|
|
+ <reg name="t5" bitsize="64"/>
|
|
+ <reg name="t6" bitsize="64"/>
|
|
+ <reg name="t7" bitsize="64"/>
|
|
+ <reg name="s0" bitsize="64"/>
|
|
+ <reg name="s1" bitsize="64"/>
|
|
+ <reg name="s2" bitsize="64"/>
|
|
+ <reg name="s3" bitsize="64"/>
|
|
+ <reg name="s4" bitsize="64"/>
|
|
+ <reg name="s5" bitsize="64"/>
|
|
+ <reg name="fp" bitsize="64"/>
|
|
+ <reg name="a0" bitsize="64"/>
|
|
+ <reg name="a1" bitsize="64"/>
|
|
+ <reg name="a2" bitsize="64"/>
|
|
+ <reg name="a3" bitsize="64"/>
|
|
+ <reg name="a4" bitsize="64"/>
|
|
+ <reg name="a5" bitsize="64"/>
|
|
+ <reg name="t8" bitsize="64"/>
|
|
+ <reg name="t9" bitsize="64"/>
|
|
+ <reg name="t10" bitsize="64"/>
|
|
+ <reg name="t11" bitsize="64"/>
|
|
+ <reg name="ra" bitsize="64"/>
|
|
+ <reg name="t12" bitsize="64"/>
|
|
+ <reg name="at" bitsize="64"/>
|
|
+ <reg name="gp" bitsize="64"/>
|
|
+ <reg name="sp" bitsize="64"/>
|
|
+ <reg name="zero" bitsize="64"/>
|
|
+ <reg name="pc" bitsize="64" regnum="64"/>
|
|
+</feature>
|
|
diff --git a/hw/rtc/sun4v-rtc.c b/hw/rtc/sun4v-rtc.c
|
|
index 58a0cff483..e037acd1b5 100644
|
|
--- a/hw/rtc/sun4v-rtc.c
|
|
+++ b/hw/rtc/sun4v-rtc.c
|
|
@@ -32,17 +32,10 @@ static uint64_t sun4v_rtc_read(void *opaque, hwaddr addr,
|
|
unsigned size)
|
|
{
|
|
uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND;
|
|
-#if defined(__sw_64__)
|
|
- if (addr & 4ULL) {
|
|
- /* accessing the high 32 bits */
|
|
- val >>= 32;
|
|
- }
|
|
-#else
|
|
if (!(addr & 4ULL)) {
|
|
/* accessing the high 32 bits */
|
|
val >>= 32;
|
|
}
|
|
-#endif
|
|
trace_sun4v_rtc_read(addr, val);
|
|
return val;
|
|
}
|
|
@@ -56,11 +49,7 @@ static void sun4v_rtc_write(void *opaque, hwaddr addr,
|
|
static const MemoryRegionOps sun4v_rtc_ops = {
|
|
.read = sun4v_rtc_read,
|
|
.write = sun4v_rtc_write,
|
|
-#if defined(__sw_64__)
|
|
- .endianness = DEVICE_LITTLE_ENDIAN,
|
|
-#else
|
|
.endianness = DEVICE_NATIVE_ENDIAN,
|
|
-#endif
|
|
};
|
|
|
|
void sun4v_rtc_init(hwaddr addr)
|
|
diff --git a/hw/sw64/Kconfig b/hw/sw64/Kconfig
|
|
index 2bf19e8234..0dc49576a5 100644
|
|
--- a/hw/sw64/Kconfig
|
|
+++ b/hw/sw64/Kconfig
|
|
@@ -7,5 +7,8 @@ config CORE3
|
|
select SUN4V_RTC
|
|
select VIRTIO_MMIO
|
|
select SERIAL
|
|
- select IDE_CMD646
|
|
select VIRTIO_VGA
|
|
+ select IDE_CMD646
|
|
+ select ISA_BUS
|
|
+ select PCKBD
|
|
+ select MSI_NONBROKEN
|
|
diff --git a/hw/sw64/core3.c b/hw/sw64/core3.c
|
|
index dbe4ed6fa1..eceeb3bec3 100644
|
|
--- a/hw/sw64/core3.c
|
|
+++ b/hw/sw64/core3.c
|
|
@@ -25,6 +25,10 @@
|
|
#include "core.h"
|
|
#include "hw/boards.h"
|
|
#include "sysemu/numa.h"
|
|
+#include "qemu/uuid.h"
|
|
+#include "qemu/bswap.h"
|
|
+
|
|
+#define VMUUID 0xFF40
|
|
|
|
static uint64_t cpu_sw64_virt_to_phys(void *opaque, uint64_t addr)
|
|
{
|
|
@@ -69,6 +73,7 @@ static const CPUArchIdList *sw64_possible_cpu_arch_ids(MachineState *ms)
|
|
ms->possible_cpus->cpus[i].vcpus_count = 1;
|
|
ms->possible_cpus->cpus[i].arch_id = i;
|
|
ms->possible_cpus->cpus[i].props.has_thread_id = true;
|
|
+ ms->possible_cpus->cpus[i].props.has_core_id = true;
|
|
ms->possible_cpus->cpus[i].props.core_id = i;
|
|
}
|
|
|
|
@@ -96,6 +101,7 @@ static void core3_init(MachineState *machine)
|
|
uint64_t kernel_entry, kernel_low, kernel_high;
|
|
BOOT_PARAMS *core3_boot_params = g_new0(BOOT_PARAMS, 1);
|
|
uint64_t param_offset;
|
|
+ QemuUUID uuid_out_put;
|
|
|
|
memset(cpus, 0, sizeof(cpus));
|
|
|
|
@@ -112,6 +118,9 @@ static void core3_init(MachineState *machine)
|
|
|
|
rom_add_blob_fixed("ram_size", (char *)&buf, 0x8, 0x2040);
|
|
|
|
+ uuid_out_put = qemu_uuid;
|
|
+ uuid_out_put = qemu_uuid_bswap(uuid_out_put);
|
|
+ pstrcpy_targphys("vm-uuid", VMUUID, 0x12, (char *)&(uuid_out_put));
|
|
param_offset = 0x90B000UL;
|
|
core3_boot_params->cmdline = param_offset | 0xfff0000000000000UL;
|
|
rom_add_blob_fixed("core3_boot_params", (core3_boot_params), 0x48, 0x90A100);
|
|
@@ -137,13 +146,24 @@ static void core3_init(MachineState *machine)
|
|
|
|
/* Start all cpus at the hmcode RESET entry point. */
|
|
for (i = 0; i < machine->smp.cpus; ++i) {
|
|
- cpus[i]->env.pc = hmcode_entry;
|
|
+ if (kvm_enabled())
|
|
+ cpus[i]->env.pc = init_pc;
|
|
+ else
|
|
+ cpus[i]->env.pc = hmcode_entry;
|
|
cpus[i]->env.hm_entry = hmcode_entry;
|
|
}
|
|
|
|
if (!kernel_filename) {
|
|
uefi_filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, "uefi-bios-sw");
|
|
- load_image_targphys(uefi_filename, 0x2f00000UL, -1);
|
|
+ if (uefi_filename == NULL) {
|
|
+ error_report("no virtual bios provided");
|
|
+ exit(1);
|
|
+ }
|
|
+ size = load_image_targphys(uefi_filename, 0x2f00000UL, -1);
|
|
+ if (size < 0) {
|
|
+ error_report("could not load virtual bios: '%s'", uefi_filename);
|
|
+ exit(1);
|
|
+ }
|
|
g_free(uefi_filename);
|
|
} else {
|
|
/* Load a kernel. */
|
|
@@ -170,6 +190,7 @@ static void core3_machine_init(MachineClass *mc)
|
|
mc->init = core3_init;
|
|
mc->block_default_type = IF_IDE;
|
|
mc->max_cpus = MAX_CPUS_CORE3;
|
|
+ mc->pci_allow_0_address = true;
|
|
mc->is_default = 0;
|
|
mc->reset = board_reset;
|
|
mc->possible_cpu_arch_ids = sw64_possible_cpu_arch_ids;
|
|
diff --git a/hw/sw64/core3_board.c b/hw/sw64/core3_board.c
|
|
index 7853e01edb..7f623cf773 100644
|
|
--- a/hw/sw64/core3_board.c
|
|
+++ b/hw/sw64/core3_board.c
|
|
@@ -16,17 +16,27 @@
|
|
#include "hw/ide/ahci.h"
|
|
#include "sysemu/numa.h"
|
|
#include "sysemu/kvm.h"
|
|
-#include "hw/rtc/sun4v-rtc.h"
|
|
+#include "sysemu/cpus.h"
|
|
#include "hw/pci/msi.h"
|
|
#include "hw/sw64/sw64_iommu.h"
|
|
+#include "hw/loader.h"
|
|
+#include "hw/nvram/fw_cfg.h"
|
|
|
|
#define TYPE_SWBOARD_PCI_HOST_BRIDGE "core_board-pcihost"
|
|
#define SWBOARD_PCI_HOST_BRIDGE(obj) \
|
|
OBJECT_CHECK(BoardState, (obj), TYPE_SWBOARD_PCI_HOST_BRIDGE)
|
|
|
|
+#define CORE3_MAX_CPUS_MASK 0x3ff
|
|
+#define CORE3_CORES_SHIFT 10
|
|
+#define CORE3_CORES_MASK 0x3ff
|
|
+#define CORE3_THREADS_SHIFT 20
|
|
+#define CORE3_THREADS_MASK 0xfff
|
|
+
|
|
#define MAX_IDE_BUS 2
|
|
#define SW_PIN_TO_IRQ 16
|
|
|
|
+#define SW_FW_CFG_P_BASE (0x804920000000ULL)
|
|
+
|
|
typedef struct SWBoard {
|
|
SW64CPU *cpu[MAX_CPUS_CORE3];
|
|
} SWBoard;
|
|
@@ -43,6 +53,16 @@ typedef struct TimerState {
|
|
int order;
|
|
} TimerState;
|
|
|
|
+static void sw_create_fw_cfg(hwaddr addr)
|
|
+{
|
|
+ MachineState *ms = MACHINE(qdev_get_machine());
|
|
+ uint16_t smp_cpus = ms->smp.cpus;
|
|
+ FWCfgState *fw_cfg;
|
|
+ fw_cfg = fw_cfg_init_mem_wide(addr + 8, addr, 8, addr + 16, &address_space_memory);
|
|
+ fw_cfg_add_i16(fw_cfg, FW_CFG_NB_CPUS, smp_cpus);
|
|
+ rom_set_fw(fw_cfg);
|
|
+}
|
|
+
|
|
#ifndef CONFIG_KVM
|
|
static void swboard_alarm_timer(void *opaque)
|
|
{
|
|
@@ -65,10 +85,13 @@ static PCIINTxRoute sw_route_intx_pin_to_irq(void *opaque, int pin)
|
|
|
|
static uint64_t convert_bit(int n)
|
|
{
|
|
- uint64_t ret = (1UL << n) - 1;
|
|
+ uint64_t ret;
|
|
|
|
if (n == 64)
|
|
ret = 0xffffffffffffffffUL;
|
|
+ else
|
|
+ ret = (1UL << n) - 1;
|
|
+
|
|
return ret;
|
|
}
|
|
|
|
@@ -76,6 +99,9 @@ static uint64_t mcu_read(void *opaque, hwaddr addr, unsigned size)
|
|
{
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
unsigned int smp_cpus = ms->smp.cpus;
|
|
+ unsigned int smp_threads = ms->smp.threads;
|
|
+ unsigned int smp_cores = ms->smp.cores;
|
|
+ unsigned int max_cpus = ms->smp.max_cpus;
|
|
uint64_t ret = 0;
|
|
switch (addr) {
|
|
case 0x0000:
|
|
@@ -86,6 +112,12 @@ static uint64_t mcu_read(void *opaque, hwaddr addr, unsigned size)
|
|
ret |= (1UL << i);
|
|
}
|
|
break;
|
|
+ case 0x0080:
|
|
+ /* SMP_INFO */
|
|
+ ret = (smp_threads & CORE3_THREADS_MASK) << CORE3_THREADS_SHIFT;
|
|
+ ret += (smp_cores & CORE3_CORES_MASK) << CORE3_CORES_SHIFT;
|
|
+ ret += max_cpus & CORE3_MAX_CPUS_MASK;
|
|
+ break;
|
|
/*IO_START*/
|
|
case 0x1300:
|
|
ret = 0x1;
|
|
@@ -186,7 +218,7 @@ static void intpu_write(void *opaque, hwaddr addr, uint64_t val,
|
|
val &= 0x1f;
|
|
cpu = bs->sboard.cpu[val];
|
|
cpu->env.csr[II_REQ] = 0x100000;
|
|
- cpu_interrupt(CPU(cpu),CPU_INTERRUPT_IIMAIL);
|
|
+ cpu_interrupt(CPU(cpu),CPU_INTERRUPT_II0);
|
|
break;
|
|
default:
|
|
fprintf(stderr, "Unsupported IPU addr: 0x%04lx\n", addr);
|
|
@@ -254,6 +286,33 @@ static const MemoryRegionOps msi_ops = {
|
|
},
|
|
};
|
|
|
|
+static uint64_t rtc_read(void *opaque, hwaddr addr, unsigned size)
|
|
+{
|
|
+ uint64_t val = get_clock_realtime() / NANOSECONDS_PER_SECOND;
|
|
+ return val;
|
|
+}
|
|
+
|
|
+static void rtc_write(void *opaque, hwaddr addr, uint64_t val,
|
|
+ unsigned size)
|
|
+{
|
|
+}
|
|
+
|
|
+static const MemoryRegionOps rtc_ops = {
|
|
+ .read = rtc_read,
|
|
+ .write = rtc_write,
|
|
+ .endianness = DEVICE_LITTLE_ENDIAN,
|
|
+ .valid =
|
|
+ {
|
|
+ .min_access_size = 1,
|
|
+ .max_access_size = 8,
|
|
+ },
|
|
+ .impl =
|
|
+ {
|
|
+ .min_access_size = 1,
|
|
+ .max_access_size = 8,
|
|
+ },
|
|
+};
|
|
+
|
|
static uint64_t ignore_read(void *opaque, hwaddr addr, unsigned size)
|
|
{
|
|
return 1;
|
|
@@ -392,7 +451,7 @@ void core3_board_init(SW64CPU *cpus[MAX_CPUS], MemoryRegion *ram)
|
|
MemoryRegion *mem_ep64 = g_new(MemoryRegion, 1);
|
|
MemoryRegion *conf_piu0 = g_new(MemoryRegion, 1);
|
|
MemoryRegion *io_ep = g_new(MemoryRegion, 1);
|
|
-
|
|
+ MemoryRegion *io_rtc = g_new(MemoryRegion, 1);
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
|
unsigned int smp_cpus = ms->smp.cpus;
|
|
|
|
@@ -452,6 +511,10 @@ void core3_board_init(SW64CPU *cpus[MAX_CPUS], MemoryRegion *ram)
|
|
"pci0-ep-conf-io", 4 * GB);
|
|
memory_region_add_subregion(get_system_memory(), 0x880600000000ULL,
|
|
conf_piu0);
|
|
+ memory_region_init_io(io_rtc, OBJECT(bs), &rtc_ops, b,
|
|
+ "sw64-rtc", 0x08ULL);
|
|
+ memory_region_add_subregion(get_system_memory(), 0x804910000000ULL,
|
|
+ io_rtc);
|
|
#ifdef SW64_VT_IOMMU
|
|
sw64_vt_iommu_init(b);
|
|
#endif
|
|
@@ -476,7 +539,7 @@ void core3_board_init(SW64CPU *cpus[MAX_CPUS], MemoryRegion *ram)
|
|
DEVICE_LITTLE_ENDIAN);
|
|
}
|
|
pci_create_simple(phb->bus, -1, "nec-usb-xhci");
|
|
- sun4v_rtc_init(0x804910000000ULL);
|
|
+ sw_create_fw_cfg(SW_FW_CFG_P_BASE);
|
|
}
|
|
|
|
static const TypeInfo swboard_pcihost_info = {
|
|
diff --git a/hw/sw64/sw64_iommu.c b/hw/sw64/sw64_iommu.c
|
|
index 8ded65f213..1ede2a2ce4 100644
|
|
--- a/hw/sw64/sw64_iommu.c
|
|
+++ b/hw/sw64/sw64_iommu.c
|
|
@@ -124,7 +124,7 @@ static int get_pte(dma_addr_t baseaddr, uint64_t *pte)
|
|
|
|
/* TODO: guarantee 64-bit single-copy atomicity */
|
|
ret = dma_memory_read(&address_space_memory, baseaddr,
|
|
- (uint8_t *)pte, sizeof(*pte));
|
|
+ (uint8_t *)pte, sizeof(*pte), MEMTXATTRS_UNSPECIFIED);
|
|
|
|
if (ret != MEMTX_OK)
|
|
return -EINVAL;
|
|
@@ -195,15 +195,18 @@ static void swvt_ptiotlb_inv_all(SW64IOMMUState *s)
|
|
g_hash_table_remove_all(s->ptiotlb);
|
|
}
|
|
|
|
-static void swvt_lookup_ptiotlb(SW64IOMMUState *s, uint16_t source_id,
|
|
- hwaddr addr, IOMMUTLBEntry *entry)
|
|
+static IOMMUTLBEntry *swvt_lookup_ptiotlb(SW64IOMMUState *s, uint16_t source_id,
|
|
+ hwaddr addr)
|
|
{
|
|
SW64PTIOTLBKey ptkey;
|
|
+ IOMMUTLBEntry *entry = NULL;
|
|
|
|
ptkey.source_id = source_id;
|
|
ptkey.iova = addr;
|
|
|
|
entry = g_hash_table_lookup(s->ptiotlb, &ptkey);
|
|
+
|
|
+ return entry;
|
|
}
|
|
|
|
static IOMMUTLBEntry sw64_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr,
|
|
@@ -230,7 +233,7 @@ static IOMMUTLBEntry sw64_translate_iommu(IOMMUMemoryRegion *iommu, hwaddr addr,
|
|
|
|
aligned_addr = addr & IOMMU_PAGE_MASK_8K;
|
|
|
|
- swvt_lookup_ptiotlb(s, aligned_addr, source_id, cached_entry);
|
|
+ cached_entry = swvt_lookup_ptiotlb(s, source_id, aligned_addr);
|
|
|
|
if (cached_entry)
|
|
goto out;
|
|
diff --git a/linux-headers/asm-sw64/kvm.h b/linux-headers/asm-sw64/kvm.h
|
|
index b0ce2ca346..5de7014b52 100644
|
|
--- a/linux-headers/asm-sw64/kvm.h
|
|
+++ b/linux-headers/asm-sw64/kvm.h
|
|
@@ -2,6 +2,9 @@
|
|
#define __LINUX_KVM_SW64_H
|
|
|
|
#include <linux/types.h>
|
|
+
|
|
+#define __KVM_HAVE_GUEST_DEBUG
|
|
+
|
|
/*
|
|
* for KVM_GET_REGS and KVM_SET_REGS
|
|
*/
|
|
@@ -88,6 +91,16 @@ struct vcpucb {
|
|
unsigned long exit_reason;
|
|
unsigned long ipaddr;
|
|
unsigned long vcpu_irq_vector;
|
|
+ unsigned long pri_base;
|
|
+ unsigned long stack_pc_dfault;
|
|
+ unsigned long guest_p20;
|
|
+ unsigned long guest_dfault_double;
|
|
+ unsigned long guest_irqs_pending;
|
|
+ unsigned long guest_hm_r30;
|
|
+ unsigned long migration_mark;
|
|
+ unsigned long guest_longtime;
|
|
+ unsigned long guest_longtime_offset;
|
|
+ unsigned long reserved[3];
|
|
};
|
|
|
|
/*
|
|
@@ -100,6 +113,7 @@ struct kvm_fpu {
|
|
* KVM SW_64 specific structures and definitions
|
|
*/
|
|
struct kvm_debug_exit_arch {
|
|
+ unsigned long epc;
|
|
};
|
|
|
|
/* for KVM_SET_GUEST_DEBUG */
|
|
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
|
|
index 2625af99dd..e274c0bd33 100644
|
|
--- a/linux-user/elfload.c
|
|
+++ b/linux-user/elfload.c
|
|
@@ -1549,6 +1549,22 @@ static inline void init_thread(struct target_pt_regs *regs,
|
|
|
|
#endif /* TARGET_HPPA */
|
|
|
|
+#ifdef TARGET_SW64
|
|
+
|
|
+#define ELF_CLASS ELFCLASS64
|
|
+#define ELF_ARCH EM_SW64
|
|
+
|
|
+#define ELF_START_MMAP (0x30000000000ULL)
|
|
+
|
|
+static inline void init_thread(struct target_pt_regs *regs,
|
|
+ struct image_info *infop)
|
|
+{
|
|
+ regs->pc = infop->entry;
|
|
+ regs->usp = infop->start_stack;
|
|
+}
|
|
+
|
|
+#endif /* TARGET_SW64 */
|
|
+
|
|
#ifdef TARGET_XTENSA
|
|
|
|
#define ELF_START_MMAP 0x20000000
|
|
diff --git a/linux-user/host/sw64/host-signal.h b/linux-user/host/sw64/host-signal.h
|
|
new file mode 100644
|
|
index 0000000000..11d6e97605
|
|
--- /dev/null
|
|
+++ b/linux-user/host/sw64/host-signal.h
|
|
@@ -0,0 +1,46 @@
|
|
+/*
|
|
+ * host-signal.h: signal info dependent on the host architecture
|
|
+ *
|
|
+ * Copyright (c) 2023 wxiat
|
|
+ *
|
|
+ * This work is licensed under the terms of the GNU LGPL, version 2.1 or later.
|
|
+ * See the COPYING file in the top-level directory.
|
|
+ */
|
|
+
|
|
+#ifndef SW64_HOST_SIGNAL_H
|
|
+#define SW64_HOST_SIGNAL_H
|
|
+
|
|
+static inline uintptr_t host_signal_pc(ucontext_t *uc)
|
|
+{
|
|
+ return uc->uc_mcontext.sc_pc;
|
|
+}
|
|
+
|
|
+static inline void host_signal_set_pc(ucontext_t *uc, uintptr_t pc)
|
|
+{
|
|
+ uc->uc_mcontext.sc_pc = pc;
|
|
+}
|
|
+
|
|
+static inline bool host_signal_write(siginfo_t *info, ucontext_t *uc)
|
|
+{
|
|
+ uint32_t *pc = (uint32_t *)host_signal_pc(uc);
|
|
+ uint32_t insn = *pc;
|
|
+
|
|
+ /* XXX: need kernel patch to get write flag faster */
|
|
+ switch (insn >> 26) {
|
|
+ case 0x0d: /* stw */
|
|
+ case 0x0e: /* stb */
|
|
+ case 0x0f: /* stq_u */
|
|
+ case 0x24: /* stf */
|
|
+ case 0x25: /* stg */
|
|
+ case 0x26: /* sts */
|
|
+ case 0x27: /* stt */
|
|
+ case 0x2c: /* stl */
|
|
+ case 0x2d: /* stq */
|
|
+ case 0x2e: /* stl_c */
|
|
+ case 0x2f: /* stq_c */
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+#endif
|
|
diff --git a/linux-user/host/sw64/hostdep.h b/linux-user/host/sw64/hostdep.h
|
|
new file mode 100755
|
|
index 0000000000..b30ac70100
|
|
--- /dev/null
|
|
+++ b/linux-user/host/sw64/hostdep.h
|
|
@@ -0,0 +1,14 @@
|
|
+/*
|
|
+ * hostdep.h : things which are dependent on the host architecture
|
|
+ *
|
|
+ * * Written by Wang Yuanheng
|
|
+ *
|
|
+ * Copyright (C) 2023 wxiat
|
|
+ *
|
|
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
|
|
+ * See the COPYING file in the top-level directory.
|
|
+ */
|
|
+
|
|
+#ifndef SW_64_HOSTDEP_H
|
|
+#define SW_64_HOSTDEP_H
|
|
+#endif
|
|
diff --git a/linux-user/sw64/cpu_loop.c b/linux-user/sw64/cpu_loop.c
|
|
index 3f2fde0fba..389b753401 100644
|
|
--- a/linux-user/sw64/cpu_loop.c
|
|
+++ b/linux-user/sw64/cpu_loop.c
|
|
@@ -18,8 +18,11 @@
|
|
*/
|
|
|
|
#include "qemu/osdep.h"
|
|
+#include "qemu-common.h"
|
|
#include "qemu.h"
|
|
+#include "user-internals.h"
|
|
#include "cpu_loop-common.h"
|
|
+#include "signal-common.h"
|
|
|
|
void cpu_loop(CPUSW64State *env)
|
|
{
|
|
@@ -89,7 +92,7 @@ void cpu_loop(CPUSW64State *env)
|
|
}
|
|
process_pending_signals (env);
|
|
|
|
- /* Most of the traps imply a transition through HMcode, which
|
|
+ /* Most of the traps imply a transition through hmcode, which
|
|
implies an REI instruction has been executed. Which means
|
|
that RX and LOCK_ADDR should be cleared. But there are a
|
|
few exceptions for traps internal to QEMU. */
|
|
diff --git a/linux-user/sw64/meson.build b/linux-user/sw64/meson.build
|
|
new file mode 100644
|
|
index 0000000000..eda0056782
|
|
--- /dev/null
|
|
+++ b/linux-user/sw64/meson.build
|
|
@@ -0,0 +1,5 @@
|
|
+syscall_nr_generators += {
|
|
+ 'sw64': generator(sh,
|
|
+ arguments: [ meson.current_source_dir() / 'syscallhdr.sh', '@INPUT@', '@OUTPUT@', '@EXTRA_ARGS@' ],
|
|
+ output: '@BASENAME@_nr.h')
|
|
+}
|
|
diff --git a/linux-user/sw64/signal.c b/linux-user/sw64/signal.c
|
|
index 5822e808d3..572e192a95 100644
|
|
--- a/linux-user/sw64/signal.c
|
|
+++ b/linux-user/sw64/signal.c
|
|
@@ -18,6 +18,7 @@
|
|
*/
|
|
#include "qemu/osdep.h"
|
|
#include "qemu.h"
|
|
+#include "user-internals.h"
|
|
#include "signal-common.h"
|
|
#include "linux-user/trace.h"
|
|
|
|
@@ -138,8 +139,8 @@ void setup_frame(int sig, struct target_sigaction *ka,
|
|
|
|
setup_sigcontext(&frame->sc, env, frame_addr, set);
|
|
|
|
- if (ka->sa_restorer) {
|
|
- r26 = ka->sa_restorer;
|
|
+ if (ka->ka_restorer) {
|
|
+ r26 = ka->ka_restorer;
|
|
} else {
|
|
__put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
|
|
__put_user(INSN_LDI_R0 + TARGET_NR_sigreturn,
|
|
@@ -192,8 +193,8 @@ void setup_rt_frame(int sig, struct target_sigaction *ka,
|
|
__put_user(set->sig[i], &frame->uc.tuc_sigmask.sig[i]);
|
|
}
|
|
|
|
- if (ka->sa_restorer) {
|
|
- r26 = ka->sa_restorer;
|
|
+ if (ka->ka_restorer) {
|
|
+ r26 = ka->ka_restorer;
|
|
} else {
|
|
__put_user(INSN_MOV_R30_R16, &frame->retcode[0]);
|
|
__put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn,
|
|
@@ -256,11 +257,7 @@ long do_rt_sigreturn(CPUSW64State *env)
|
|
set_sigmask(&set);
|
|
|
|
restore_sigcontext(env, &frame->uc.tuc_mcontext);
|
|
- if (do_sigaltstack(frame_addr + offsetof(struct target_rt_sigframe,
|
|
- uc.tuc_stack),
|
|
- 0, env->ir[IDX_SP]) == -EFAULT) {
|
|
- goto badframe;
|
|
- }
|
|
+ target_restore_altstack(&frame->uc.tuc_stack, env);
|
|
|
|
unlock_user_struct(frame, frame_addr, 0);
|
|
return -TARGET_QEMU_ESIGRETURN;
|
|
@@ -271,3 +268,21 @@ badframe:
|
|
force_sig(TARGET_SIGSEGV);
|
|
return -TARGET_QEMU_ESIGRETURN;
|
|
}
|
|
+
|
|
+void setup_sigtramp(abi_ulong sigtramp_page)
|
|
+{
|
|
+ uint32_t *tramp = lock_user(VERIFY_WRITE, sigtramp_page, 6 * 4, 0);
|
|
+ assert(tramp != NULL);
|
|
+
|
|
+ default_sigreturn = sigtramp_page;
|
|
+ __put_user(INSN_MOV_R30_R16, &tramp[0]);
|
|
+ __put_user(INSN_LDI_R0 + TARGET_NR_sigreturn, &tramp[1]);
|
|
+ __put_user(INSN_CALLSYS, &tramp[2]);
|
|
+
|
|
+ default_rt_sigreturn = sigtramp_page + 3 * 4;
|
|
+ __put_user(INSN_MOV_R30_R16, &tramp[3]);
|
|
+ __put_user(INSN_LDI_R0 + TARGET_NR_rt_sigreturn, &tramp[4]);
|
|
+ __put_user(INSN_CALLSYS, &tramp[5]);
|
|
+
|
|
+ unlock_user(tramp, sigtramp_page, 6 * 4);
|
|
+}
|
|
diff --git a/linux-user/sw64/syscall.tbl b/linux-user/sw64/syscall.tbl
|
|
new file mode 100644
|
|
index 0000000000..d007c7bb07
|
|
--- /dev/null
|
|
+++ b/linux-user/sw64/syscall.tbl
|
|
@@ -0,0 +1,488 @@
|
|
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
|
+#
|
|
+# system call numbers and entry vectors for sw64
|
|
+#
|
|
+# The format is:
|
|
+# <number> <abi> <name> <entry point>
|
|
+#
|
|
+# The <abi> is always "common" for this file
|
|
+#
|
|
+0 common osf_syscall sw64_syscall_zero
|
|
+1 common exit sys_exit
|
|
+2 common fork sw64_fork
|
|
+3 common read sys_read
|
|
+4 common write sys_write
|
|
+5 common osf_old_open sys_ni_syscall
|
|
+6 common close sys_close
|
|
+7 common osf_wait4 sys_osf_wait4
|
|
+8 common osf_old_creat sys_ni_syscall
|
|
+9 common link sys_link
|
|
+10 common unlink sys_unlink
|
|
+11 common osf_execve sys_ni_syscall
|
|
+12 common chdir sys_chdir
|
|
+13 common fchdir sys_fchdir
|
|
+14 common mknod sys_mknod
|
|
+15 common chmod sys_chmod
|
|
+16 common chown sys_chown
|
|
+17 common brk sys_osf_brk
|
|
+18 common osf_getfsstat sys_ni_syscall
|
|
+19 common lseek sys_lseek
|
|
+20 common getxpid sys_getxpid
|
|
+21 common osf_mount sys_osf_mount
|
|
+22 common umount2 sys_umount
|
|
+23 common setuid sys_setuid
|
|
+24 common getxuid sys_getxuid
|
|
+25 common exec_with_loader sys_ni_syscall
|
|
+26 common ptrace sys_ptrace
|
|
+27 common osf_nrecvmsg sys_ni_syscall
|
|
+28 common osf_nsendmsg sys_ni_syscall
|
|
+29 common osf_nrecvfrom sys_ni_syscall
|
|
+30 common osf_naccept sys_ni_syscall
|
|
+31 common osf_ngetpeername sys_ni_syscall
|
|
+32 common osf_ngetsockname sys_ni_syscall
|
|
+33 common access sys_access
|
|
+34 common osf_chflags sys_ni_syscall
|
|
+35 common osf_fchflags sys_ni_syscall
|
|
+36 common sync sys_sync
|
|
+37 common kill sys_kill
|
|
+38 common osf_old_stat sys_ni_syscall
|
|
+39 common setpgid sys_setpgid
|
|
+40 common osf_old_lstat sys_ni_syscall
|
|
+41 common dup sys_dup
|
|
+42 common pipe sys_sw64_pipe
|
|
+43 common osf_set_program_attributes sys_osf_set_program_attributes
|
|
+44 common osf_profil sys_ni_syscall
|
|
+45 common open sys_open
|
|
+46 common osf_old_sigaction sys_ni_syscall
|
|
+47 common getxgid sys_getxgid
|
|
+48 common osf_sigprocmask sys_osf_sigprocmask
|
|
+49 common osf_getlogin sys_ni_syscall
|
|
+50 common osf_setlogin sys_ni_syscall
|
|
+51 common acct sys_acct
|
|
+52 common sigpending sys_sigpending
|
|
+54 common ioctl sys_ioctl
|
|
+55 common osf_reboot sys_ni_syscall
|
|
+56 common osf_revoke sys_ni_syscall
|
|
+57 common symlink sys_symlink
|
|
+58 common readlink sys_readlink
|
|
+59 common execve sys_execve
|
|
+60 common umask sys_umask
|
|
+61 common chroot sys_chroot
|
|
+62 common osf_old_fstat sys_ni_syscall
|
|
+63 common getpgrp sys_getpgrp
|
|
+64 common getpagesize sys_getpagesize
|
|
+65 common osf_mremap sys_ni_syscall
|
|
+66 common vfork sw64_vfork
|
|
+67 common stat sys_newstat
|
|
+68 common lstat sys_newlstat
|
|
+69 common osf_sbrk sys_ni_syscall
|
|
+70 common osf_sstk sys_ni_syscall
|
|
+71 common mmap sys_osf_mmap
|
|
+72 common osf_old_vadvise sys_ni_syscall
|
|
+73 common munmap sys_munmap
|
|
+74 common mprotect sys_mprotect
|
|
+75 common madvise sys_madvise
|
|
+76 common vhangup sys_vhangup
|
|
+77 common osf_kmodcall sys_ni_syscall
|
|
+78 common osf_mincore sys_ni_syscall
|
|
+79 common getgroups sys_getgroups
|
|
+80 common setgroups sys_setgroups
|
|
+81 common osf_old_getpgrp sys_ni_syscall
|
|
+82 common setpgrp sys_setpgid
|
|
+83 common osf_setitimer compat_sys_setitimer
|
|
+84 common osf_old_wait sys_ni_syscall
|
|
+85 common osf_table sys_ni_syscall
|
|
+86 common osf_getitimer compat_sys_getitimer
|
|
+87 common gethostname sys_gethostname
|
|
+88 common sethostname sys_sethostname
|
|
+89 common getdtablesize sys_getdtablesize
|
|
+90 common dup2 sys_dup2
|
|
+91 common fstat sys_newfstat
|
|
+92 common fcntl sys_fcntl
|
|
+93 common osf_select sys_osf_select
|
|
+94 common poll sys_poll
|
|
+95 common fsync sys_fsync
|
|
+96 common setpriority sys_setpriority
|
|
+97 common socket sys_socket
|
|
+98 common connect sys_connect
|
|
+99 common accept sys_accept
|
|
+100 common getpriority sys_osf_getpriority
|
|
+101 common send sys_send
|
|
+102 common recv sys_recv
|
|
+103 common sigreturn sys_sigreturn
|
|
+104 common bind sys_bind
|
|
+105 common setsockopt sys_setsockopt
|
|
+106 common listen sys_listen
|
|
+107 common osf_plock sys_ni_syscall
|
|
+108 common osf_old_sigvec sys_ni_syscall
|
|
+109 common osf_old_sigblock sys_ni_syscall
|
|
+110 common osf_old_sigsetmask sys_ni_syscall
|
|
+111 common sigsuspend sys_sigsuspend
|
|
+112 common osf_sigstack sys_osf_sigstack
|
|
+113 common recvmsg sys_recvmsg
|
|
+114 common sendmsg sys_sendmsg
|
|
+115 common osf_old_vtrace sys_ni_syscall
|
|
+116 common osf_gettimeofday sys_osf_gettimeofday
|
|
+117 common osf_getrusage sys_osf_getrusage
|
|
+118 common getsockopt sys_getsockopt
|
|
+120 common readv sys_osf_readv
|
|
+121 common writev sys_osf_writev
|
|
+122 common osf_settimeofday sys_osf_settimeofday
|
|
+123 common fchown sys_fchown
|
|
+124 common fchmod sys_fchmod
|
|
+125 common recvfrom sys_recvfrom
|
|
+126 common setreuid sys_setreuid
|
|
+127 common setregid sys_setregid
|
|
+128 common rename sys_rename
|
|
+129 common truncate sys_truncate
|
|
+130 common ftruncate sys_ftruncate
|
|
+131 common flock sys_flock
|
|
+132 common setgid sys_setgid
|
|
+133 common sendto sys_sendto
|
|
+134 common shutdown sys_shutdown
|
|
+135 common socketpair sys_socketpair
|
|
+136 common mkdir sys_mkdir
|
|
+137 common rmdir sys_rmdir
|
|
+138 common osf_utimes sys_osf_utimes
|
|
+139 common osf_old_sigreturn sys_ni_syscall
|
|
+140 common osf_adjtime sys_ni_syscall
|
|
+141 common getpeername sys_getpeername
|
|
+142 common osf_gethostid sys_ni_syscall
|
|
+143 common osf_sethostid sys_ni_syscall
|
|
+144 common getrlimit sys_getrlimit
|
|
+145 common setrlimit sys_setrlimit
|
|
+146 common osf_old_killpg sys_ni_syscall
|
|
+147 common setsid sys_setsid
|
|
+148 common quotactl sys_quotactl
|
|
+149 common osf_oldquota sys_ni_syscall
|
|
+150 common getsockname sys_getsockname
|
|
+153 common osf_pid_block sys_ni_syscall
|
|
+154 common osf_pid_unblock sys_ni_syscall
|
|
+156 common sigaction sys_osf_sigaction
|
|
+157 common osf_sigwaitprim sys_ni_syscall
|
|
+158 common osf_nfssvc sys_ni_syscall
|
|
+159 common osf_getdirentries sys_osf_getdirentries
|
|
+160 common osf_statfs sys_osf_statfs
|
|
+161 common osf_fstatfs sys_osf_fstatfs
|
|
+163 common osf_asynch_daemon sys_ni_syscall
|
|
+164 common osf_getfh sys_ni_syscall
|
|
+165 common osf_getdomainname sys_osf_getdomainname
|
|
+166 common setdomainname sys_setdomainname
|
|
+169 common osf_exportfs sys_ni_syscall
|
|
+181 common osf_alt_plock sys_ni_syscall
|
|
+184 common osf_getmnt sys_ni_syscall
|
|
+187 common osf_alt_sigpending sys_ni_syscall
|
|
+188 common osf_alt_setsid sys_ni_syscall
|
|
+199 common osf_swapon sys_swapon
|
|
+200 common msgctl sys_old_msgctl
|
|
+201 common msgget sys_msgget
|
|
+202 common msgrcv sys_msgrcv
|
|
+203 common msgsnd sys_msgsnd
|
|
+204 common semctl sys_old_semctl
|
|
+205 common semget sys_semget
|
|
+206 common semop sys_semop
|
|
+207 common osf_utsname sys_osf_utsname
|
|
+208 common lchown sys_lchown
|
|
+209 common shmat sys_shmat
|
|
+210 common shmctl sys_old_shmctl
|
|
+211 common shmdt sys_shmdt
|
|
+212 common shmget sys_shmget
|
|
+213 common osf_mvalid sys_ni_syscall
|
|
+214 common osf_getaddressconf sys_ni_syscall
|
|
+215 common osf_msleep sys_ni_syscall
|
|
+216 common osf_mwakeup sys_ni_syscall
|
|
+217 common msync sys_msync
|
|
+218 common osf_signal sys_ni_syscall
|
|
+219 common osf_utc_gettime sys_ni_syscall
|
|
+220 common osf_utc_adjtime sys_ni_syscall
|
|
+222 common osf_security sys_ni_syscall
|
|
+223 common osf_kloadcall sys_ni_syscall
|
|
+224 common osf_stat sys_osf_stat
|
|
+225 common osf_lstat sys_osf_lstat
|
|
+226 common osf_fstat sys_osf_fstat
|
|
+227 common osf_statfs64 sys_osf_statfs64
|
|
+228 common osf_fstatfs64 sys_osf_fstatfs64
|
|
+233 common getpgid sys_getpgid
|
|
+234 common getsid sys_getsid
|
|
+235 common sigaltstack sys_sigaltstack
|
|
+236 common osf_waitid sys_ni_syscall
|
|
+237 common osf_priocntlset sys_ni_syscall
|
|
+238 common osf_sigsendset sys_ni_syscall
|
|
+239 common osf_set_speculative sys_ni_syscall
|
|
+240 common osf_msfs_syscall sys_ni_syscall
|
|
+241 common osf_sysinfo sys_osf_sysinfo
|
|
+242 common osf_uadmin sys_ni_syscall
|
|
+243 common osf_fuser sys_ni_syscall
|
|
+244 common osf_proplist_syscall sys_osf_proplist_syscall
|
|
+245 common osf_ntp_adjtime sys_ni_syscall
|
|
+246 common osf_ntp_gettime sys_ni_syscall
|
|
+247 common osf_pathconf sys_ni_syscall
|
|
+248 common osf_fpathconf sys_ni_syscall
|
|
+250 common osf_uswitch sys_ni_syscall
|
|
+251 common osf_usleep_thread sys_osf_usleep_thread
|
|
+252 common osf_audcntl sys_ni_syscall
|
|
+253 common osf_audgen sys_ni_syscall
|
|
+254 common sysfs sys_sysfs
|
|
+255 common osf_subsys_info sys_ni_syscall
|
|
+256 common osf_getsysinfo sys_osf_getsysinfo
|
|
+257 common osf_setsysinfo sys_osf_setsysinfo
|
|
+258 common osf_afs_syscall sys_ni_syscall
|
|
+259 common osf_swapctl sys_ni_syscall
|
|
+260 common osf_memcntl sys_ni_syscall
|
|
+261 common osf_fdatasync sys_ni_syscall
|
|
+300 common bdflush sys_bdflush
|
|
+301 common sethae sys_sethae
|
|
+302 common mount sys_mount
|
|
+303 common old_adjtimex sys_old_adjtimex
|
|
+304 common swapoff sys_swapoff
|
|
+305 common getdents sys_getdents
|
|
+306 common create_module sys_ni_syscall
|
|
+307 common init_module sys_init_module
|
|
+308 common delete_module sys_delete_module
|
|
+309 common get_kernel_syms sys_ni_syscall
|
|
+310 common syslog sys_syslog
|
|
+311 common reboot sys_reboot
|
|
+312 common clone sw64_clone
|
|
+313 common uselib sys_uselib
|
|
+314 common mlock sys_mlock
|
|
+315 common munlock sys_munlock
|
|
+316 common mlockall sys_mlockall
|
|
+317 common munlockall sys_munlockall
|
|
+318 common sysinfo sys_sysinfo
|
|
+319 common _sysctl sys_ni_syscall
|
|
+# 320 was sys_idle
|
|
+321 common oldumount sys_oldumount
|
|
+322 common swapon sys_swapon
|
|
+323 common times sys_times
|
|
+324 common personality sys_personality
|
|
+325 common setfsuid sys_setfsuid
|
|
+326 common setfsgid sys_setfsgid
|
|
+327 common ustat sys_ustat
|
|
+328 common statfs sys_statfs
|
|
+329 common fstatfs sys_fstatfs
|
|
+330 common sched_setparam sys_sched_setparam
|
|
+331 common sched_getparam sys_sched_getparam
|
|
+332 common sched_setscheduler sys_sched_setscheduler
|
|
+333 common sched_getscheduler sys_sched_getscheduler
|
|
+334 common sched_yield sys_sched_yield
|
|
+335 common sched_get_priority_max sys_sched_get_priority_max
|
|
+336 common sched_get_priority_min sys_sched_get_priority_min
|
|
+337 common sched_rr_get_interval sys_sched_rr_get_interval
|
|
+338 common afs_syscall sys_ni_syscall
|
|
+339 common uname sys_newuname
|
|
+340 common nanosleep sys_nanosleep
|
|
+341 common mremap sys_mremap
|
|
+342 common nfsservctl sys_ni_syscall
|
|
+343 common setresuid sys_setresuid
|
|
+344 common getresuid sys_getresuid
|
|
+345 common pciconfig_read sys_pciconfig_read
|
|
+346 common pciconfig_write sys_pciconfig_write
|
|
+347 common query_module sys_ni_syscall
|
|
+348 common prctl sys_prctl
|
|
+349 common pread64 sys_pread64
|
|
+350 common pwrite64 sys_pwrite64
|
|
+351 common rt_sigreturn sys_rt_sigreturn
|
|
+352 common rt_sigaction sys_rt_sigaction
|
|
+353 common rt_sigprocmask sys_rt_sigprocmask
|
|
+354 common rt_sigpending sys_rt_sigpending
|
|
+355 common rt_sigtimedwait sys_rt_sigtimedwait
|
|
+356 common rt_sigqueueinfo sys_rt_sigqueueinfo
|
|
+357 common rt_sigsuspend sys_rt_sigsuspend
|
|
+358 common select sys_select
|
|
+359 common gettimeofday sys_gettimeofday
|
|
+360 common settimeofday sys_settimeofday
|
|
+361 common getitimer sys_getitimer
|
|
+362 common setitimer sys_setitimer
|
|
+363 common utimes sys_utimes
|
|
+364 common getrusage sys_getrusage
|
|
+365 common wait4 sys_wait4
|
|
+366 common adjtimex sys_adjtimex
|
|
+367 common getcwd sys_getcwd
|
|
+368 common capget sys_capget
|
|
+369 common capset sys_capset
|
|
+370 common sendfile sys_sendfile64
|
|
+371 common setresgid sys_setresgid
|
|
+372 common getresgid sys_getresgid
|
|
+373 common dipc sys_ni_syscall
|
|
+374 common pivot_root sys_pivot_root
|
|
+375 common mincore sys_mincore
|
|
+376 common pciconfig_iobase sys_pciconfig_iobase
|
|
+377 common getdents64 sys_getdents64
|
|
+378 common gettid sys_gettid
|
|
+379 common readahead sys_readahead
|
|
+# 380 is unused
|
|
+381 common tkill sys_tkill
|
|
+382 common setxattr sys_setxattr
|
|
+383 common lsetxattr sys_lsetxattr
|
|
+384 common fsetxattr sys_fsetxattr
|
|
+385 common getxattr sys_getxattr
|
|
+386 common lgetxattr sys_lgetxattr
|
|
+387 common fgetxattr sys_fgetxattr
|
|
+388 common listxattr sys_listxattr
|
|
+389 common llistxattr sys_llistxattr
|
|
+390 common flistxattr sys_flistxattr
|
|
+391 common removexattr sys_removexattr
|
|
+392 common lremovexattr sys_lremovexattr
|
|
+393 common fremovexattr sys_fremovexattr
|
|
+394 common futex sys_futex
|
|
+395 common sched_setaffinity sys_sched_setaffinity
|
|
+396 common sched_getaffinity sys_sched_getaffinity
|
|
+397 common tuxcall sys_ni_syscall
|
|
+398 common io_setup sys_io_setup
|
|
+399 common io_destroy sys_io_destroy
|
|
+400 common io_getevents sys_io_getevents
|
|
+401 common io_submit sys_io_submit
|
|
+402 common io_cancel sys_io_cancel
|
|
+405 common exit_group sys_exit_group
|
|
+406 common lookup_dcookie sys_lookup_dcookie
|
|
+407 common epoll_create sys_epoll_create
|
|
+408 common epoll_ctl sys_epoll_ctl
|
|
+409 common epoll_wait sys_epoll_wait
|
|
+410 common remap_file_pages sys_remap_file_pages
|
|
+411 common set_tid_address sys_set_tid_address
|
|
+412 common restart_syscall sys_restart_syscall
|
|
+413 common fadvise64 sys_fadvise64
|
|
+414 common timer_create sys_timer_create
|
|
+415 common timer_settime sys_timer_settime
|
|
+416 common timer_gettime sys_timer_gettime
|
|
+417 common timer_getoverrun sys_timer_getoverrun
|
|
+418 common timer_delete sys_timer_delete
|
|
+419 common clock_settime sys_clock_settime
|
|
+420 common clock_gettime sys_clock_gettime
|
|
+421 common clock_getres sys_clock_getres
|
|
+422 common clock_nanosleep sys_clock_nanosleep
|
|
+423 common semtimedop sys_semtimedop
|
|
+424 common tgkill sys_tgkill
|
|
+425 common stat64 sys_stat64
|
|
+426 common lstat64 sys_lstat64
|
|
+427 common fstat64 sys_fstat64
|
|
+428 common vserver sys_ni_syscall
|
|
+429 common mbind sys_ni_syscall
|
|
+430 common get_mempolicy sys_ni_syscall
|
|
+431 common set_mempolicy sys_ni_syscall
|
|
+432 common mq_open sys_mq_open
|
|
+433 common mq_unlink sys_mq_unlink
|
|
+434 common mq_timedsend sys_mq_timedsend
|
|
+435 common mq_timedreceive sys_mq_timedreceive
|
|
+436 common mq_notify sys_mq_notify
|
|
+437 common mq_getsetattr sys_mq_getsetattr
|
|
+438 common waitid sys_waitid
|
|
+439 common add_key sys_add_key
|
|
+440 common request_key sys_request_key
|
|
+441 common keyctl sys_keyctl
|
|
+442 common ioprio_set sys_ioprio_set
|
|
+443 common ioprio_get sys_ioprio_get
|
|
+444 common inotify_init sys_inotify_init
|
|
+445 common inotify_add_watch sys_inotify_add_watch
|
|
+446 common inotify_rm_watch sys_inotify_rm_watch
|
|
+447 common fdatasync sys_fdatasync
|
|
+448 common kexec_load sys_kexec_load
|
|
+449 common migrate_pages sys_migrate_pages
|
|
+450 common openat sys_openat
|
|
+451 common mkdirat sys_mkdirat
|
|
+452 common mknodat sys_mknodat
|
|
+453 common fchownat sys_fchownat
|
|
+454 common futimesat sys_futimesat
|
|
+455 common fstatat64 sys_fstatat64
|
|
+456 common unlinkat sys_unlinkat
|
|
+457 common renameat sys_renameat
|
|
+458 common linkat sys_linkat
|
|
+459 common symlinkat sys_symlinkat
|
|
+460 common readlinkat sys_readlinkat
|
|
+461 common fchmodat sys_fchmodat
|
|
+462 common faccessat sys_faccessat
|
|
+463 common pselect6 sys_pselect6
|
|
+464 common ppoll sys_ppoll
|
|
+465 common unshare sys_unshare
|
|
+466 common set_robust_list sys_set_robust_list
|
|
+467 common get_robust_list sys_get_robust_list
|
|
+468 common splice sys_splice
|
|
+469 common sync_file_range sys_sync_file_range
|
|
+470 common tee sys_tee
|
|
+471 common vmsplice sys_vmsplice
|
|
+472 common move_pages sys_move_pages
|
|
+473 common getcpu sys_getcpu
|
|
+474 common epoll_pwait sys_epoll_pwait
|
|
+475 common utimensat sys_utimensat
|
|
+476 common signalfd sys_signalfd
|
|
+477 common timerfd sys_ni_syscall
|
|
+478 common eventfd sys_eventfd
|
|
+479 common recvmmsg sys_recvmmsg
|
|
+480 common fallocate sys_fallocate
|
|
+481 common timerfd_create sys_timerfd_create
|
|
+482 common timerfd_settime sys_timerfd_settime
|
|
+483 common timerfd_gettime sys_timerfd_gettime
|
|
+484 common signalfd4 sys_signalfd4
|
|
+485 common eventfd2 sys_eventfd2
|
|
+486 common epoll_create1 sys_epoll_create1
|
|
+487 common dup3 sys_dup3
|
|
+488 common pipe2 sys_pipe2
|
|
+489 common inotify_init1 sys_inotify_init1
|
|
+490 common preadv sys_preadv
|
|
+491 common pwritev sys_pwritev
|
|
+492 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
|
|
+493 common perf_event_open sys_perf_event_open
|
|
+494 common fanotify_init sys_fanotify_init
|
|
+495 common fanotify_mark sys_fanotify_mark
|
|
+496 common prlimit64 sys_prlimit64
|
|
+497 common name_to_handle_at sys_name_to_handle_at
|
|
+498 common open_by_handle_at sys_open_by_handle_at
|
|
+499 common clock_adjtime sys_clock_adjtime
|
|
+500 common syncfs sys_syncfs
|
|
+501 common setns sys_setns
|
|
+502 common accept4 sys_accept4
|
|
+503 common sendmmsg sys_sendmmsg
|
|
+504 common process_vm_readv sys_process_vm_readv
|
|
+505 common process_vm_writev sys_process_vm_writev
|
|
+506 common kcmp sys_kcmp
|
|
+507 common finit_module sys_finit_module
|
|
+508 common sched_setattr sys_sched_setattr
|
|
+509 common sched_getattr sys_sched_getattr
|
|
+510 common renameat2 sys_renameat2
|
|
+511 common getrandom sys_getrandom
|
|
+512 common memfd_create sys_memfd_create
|
|
+513 common execveat sys_execveat
|
|
+514 common seccomp sys_seccomp
|
|
+515 common bpf sys_bpf
|
|
+516 common userfaultfd sys_userfaultfd
|
|
+517 common membarrier sys_membarrier
|
|
+518 common mlock2 sys_mlock2
|
|
+519 common copy_file_range sys_copy_file_range
|
|
+520 common preadv2 sys_preadv2
|
|
+521 common pwritev2 sys_pwritev2
|
|
+522 common statx sys_statx
|
|
+523 common io_pgetevents sys_io_pgetevents
|
|
+524 common pkey_mprotect sys_pkey_mprotect
|
|
+525 common pkey_alloc sys_pkey_alloc
|
|
+526 common pkey_free sys_pkey_free
|
|
+527 common rseq sys_rseq
|
|
+528 common statfs64 sys_statfs64
|
|
+529 common fstatfs64 sys_fstatfs64
|
|
+530 common getegid sys_getegid
|
|
+531 common geteuid sys_geteuid
|
|
+532 common getppid sys_getppid
|
|
+# all other architectures have common numbers for new syscall, sw64
|
|
+# is the exception.
|
|
+534 common pidfd_send_signal sys_pidfd_send_signal
|
|
+535 common io_uring_setup sys_io_uring_setup
|
|
+536 common io_uring_enter sys_io_uring_enter
|
|
+537 common io_uring_register sys_io_uring_register
|
|
+538 common open_tree sys_open_tree
|
|
+539 common move_mount sys_move_mount
|
|
+540 common fsopen sys_fsopen
|
|
+541 common fsconfig sys_fsconfig
|
|
+542 common fsmount sys_fsmount
|
|
+543 common fspick sys_fspick
|
|
+544 common pidfd_open sys_pidfd_open
|
|
+# 545 reserved for clone3
|
|
+546 common close_range sys_close_range
|
|
+547 common openat2 sys_openat2
|
|
+548 common pidfd_getfd sys_pidfd_getfd
|
|
+549 common faccessat2 sys_faccessat2
|
|
+550 common process_madvise sys_process_madvise
|
|
+551 common epoll_pwait2 sys_epoll_pwait2
|
|
+552 common mount_setattr sys_mount_setattr
|
|
+# 553 reserved for quotactl_path
|
|
+554 common landlock_create_ruleset sys_landlock_create_ruleset
|
|
+555 common landlock_add_rule sys_landlock_add_rule
|
|
+556 common landlock_restrict_self sys_landlock_restrict_self
|
|
diff --git a/linux-user/sw64/syscallhdr.sh b/linux-user/sw64/syscallhdr.sh
|
|
new file mode 100644
|
|
index 0000000000..46c166d8ae
|
|
--- /dev/null
|
|
+++ b/linux-user/sw64/syscallhdr.sh
|
|
@@ -0,0 +1,32 @@
|
|
+#!/bin/sh
|
|
+# SPDX-License-Identifier: GPL-2.0
|
|
+
|
|
+in="$1"
|
|
+out="$2"
|
|
+my_abis=`echo "($3)" | tr ',' '|'`
|
|
+prefix="$4"
|
|
+offset="$5"
|
|
+
|
|
+fileguard=LINUX_USER_SW64_`basename "$out" | sed \
|
|
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
|
|
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
|
|
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
|
|
+ printf "#ifndef %s\n" "${fileguard}"
|
|
+ printf "#define %s\n" "${fileguard}"
|
|
+ printf "\n"
|
|
+
|
|
+ nxt=0
|
|
+ while read nr abi name entry ; do
|
|
+ if [ -z "$offset" ]; then
|
|
+ printf "#define TARGET_NR_%s%s\t%s\n" \
|
|
+ "${prefix}" "${name}" "${nr}"
|
|
+ else
|
|
+ printf "#define TARGET_NR_%s%s\t(%s + %s)\n" \
|
|
+ "${prefix}" "${name}" "${offset}" "${nr}"
|
|
+ fi
|
|
+ nxt=$((nr+1))
|
|
+ done
|
|
+
|
|
+ printf "\n"
|
|
+ printf "#endif /* %s */" "${fileguard}"
|
|
+) > "$out"
|
|
diff --git a/linux-user/sw64/target_cpu.h b/linux-user/sw64/target_cpu.h
|
|
index 1b87c8ba6d..63afa699c3 100644
|
|
--- a/linux-user/sw64/target_cpu.h
|
|
+++ b/linux-user/sw64/target_cpu.h
|
|
@@ -17,13 +17,26 @@
|
|
#ifndef SW64_TARGET_CPU_H
|
|
#define SW64_TARGET_CPU_H
|
|
|
|
-static inline void cpu_clone_regs(CPUSW64State *env, target_ulong newsp)
|
|
+static inline void cpu_clone_regs_child(CPUSW64State *env, target_ulong newsp, unsigned flags)
|
|
{
|
|
if (newsp) {
|
|
env->ir[IDX_SP] = newsp;
|
|
}
|
|
env->ir[IDX_V0] = 0;
|
|
env->ir[IDX_A3] = 0;
|
|
+ env->ir[IDX_A4] = 1; /* OSF/1 secondary return: child */
|
|
+}
|
|
+
|
|
+static inline void cpu_clone_regs_parent(CPUSW64State *env, unsigned flags)
|
|
+{
|
|
+ /*
|
|
+ * OSF/1 secondary return: parent
|
|
+ * Note that the kernel does not do this if SETTLS, because the
|
|
+ * settls argument register is still live after copy_thread.
|
|
+ */
|
|
+ if (!(flags & CLONE_SETTLS)) {
|
|
+ env->ir[IDX_A4] = 0;
|
|
+ }
|
|
}
|
|
|
|
static inline void cpu_set_tls(CPUSW64State *env, target_ulong newtls)
|
|
diff --git a/linux-user/sw64/target_errno_defs.h b/linux-user/sw64/target_errno_defs.h
|
|
new file mode 100644
|
|
index 0000000000..fd637f5bc9
|
|
--- /dev/null
|
|
+++ b/linux-user/sw64/target_errno_defs.h
|
|
@@ -0,0 +1,204 @@
|
|
+#ifndef sw64_TARGET_ERRNO_DEFS_H
|
|
+#define sw64_TARGET_ERRNO_DEFS_H
|
|
+
|
|
+#include "../generic/target_errno_defs.h"
|
|
+
|
|
+/*
|
|
+ * Generic target errno overridden with definitions taken
|
|
+ * from asm-sw64/errno.h
|
|
+ */
|
|
+#undef TARGET_EWOULDBLOCK
|
|
+#define TARGET_EWOULDBLOCK TARGET_EAGAIN
|
|
+#undef TARGET_EDEADLK
|
|
+#define TARGET_EDEADLK 11
|
|
+#undef TARGET_EAGAIN
|
|
+#define TARGET_EAGAIN 35
|
|
+#undef TARGET_EINPROGRESS
|
|
+#define TARGET_EINPROGRESS 36
|
|
+#undef TARGET_EALREADY
|
|
+#define TARGET_EALREADY 37
|
|
+#undef TARGET_ENOTSOCK
|
|
+#define TARGET_ENOTSOCK 38
|
|
+#undef TARGET_EDESTADDRREQ
|
|
+#define TARGET_EDESTADDRREQ 39
|
|
+#undef TARGET_EMSGSIZE
|
|
+#define TARGET_EMSGSIZE 40
|
|
+#undef TARGET_EPROTOTYPE
|
|
+#define TARGET_EPROTOTYPE 41
|
|
+#undef TARGET_ENOPROTOOPT
|
|
+#define TARGET_ENOPROTOOPT 42
|
|
+#undef TARGET_EPROTONOSUPPORT
|
|
+#define TARGET_EPROTONOSUPPORT 43
|
|
+#undef TARGET_ESOCKTNOSUPPORT
|
|
+#define TARGET_ESOCKTNOSUPPORT 44
|
|
+#undef TARGET_EOPNOTSUPP
|
|
+#define TARGET_EOPNOTSUPP 45
|
|
+#undef TARGET_EPFNOSUPPORT
|
|
+#define TARGET_EPFNOSUPPORT 46
|
|
+#undef TARGET_EAFNOSUPPORT
|
|
+#define TARGET_EAFNOSUPPORT 47
|
|
+#undef TARGET_EADDRINUSE
|
|
+#define TARGET_EADDRINUSE 48
|
|
+#undef TARGET_EADDRNOTAVAIL
|
|
+#define TARGET_EADDRNOTAVAIL 49
|
|
+#undef TARGET_ENETDOWN
|
|
+#define TARGET_ENETDOWN 50
|
|
+#undef TARGET_ENETUNREACH
|
|
+#define TARGET_ENETUNREACH 51
|
|
+#undef TARGET_ENETRESET
|
|
+#define TARGET_ENETRESET 52
|
|
+#undef TARGET_ECONNABORTED
|
|
+#define TARGET_ECONNABORTED 53
|
|
+#undef TARGET_ECONNRESET
|
|
+#define TARGET_ECONNRESET 54
|
|
+#undef TARGET_ENOBUFS
|
|
+#define TARGET_ENOBUFS 55
|
|
+#undef TARGET_EISCONN
|
|
+#define TARGET_EISCONN 56
|
|
+#undef TARGET_ENOTCONN
|
|
+#define TARGET_ENOTCONN 57
|
|
+#undef TARGET_ESHUTDOWN
|
|
+#define TARGET_ESHUTDOWN 58
|
|
+#undef TARGET_ETOOMANYREFS
|
|
+#define TARGET_ETOOMANYREFS 59
|
|
+#undef TARGET_ETIMEDOUT
|
|
+#define TARGET_ETIMEDOUT 60
|
|
+#undef TARGET_ECONNREFUSED
|
|
+#define TARGET_ECONNREFUSED 61
|
|
+#undef TARGET_ELOOP
|
|
+#define TARGET_ELOOP 62
|
|
+#undef TARGET_ENAMETOOLONG
|
|
+#define TARGET_ENAMETOOLONG 63
|
|
+#undef TARGET_EHOSTDOWN
|
|
+#define TARGET_EHOSTDOWN 64
|
|
+#undef TARGET_EHOSTUNREACH
|
|
+#define TARGET_EHOSTUNREACH 65
|
|
+#undef TARGET_ENOTEMPTY
|
|
+#define TARGET_ENOTEMPTY 66
|
|
+/* Unused 67 */
|
|
+#undef TARGET_EUSERS
|
|
+#define TARGET_EUSERS 68
|
|
+#undef TARGET_EDQUOT
|
|
+#define TARGET_EDQUOT 69
|
|
+#undef TARGET_ESTALE
|
|
+#define TARGET_ESTALE 70
|
|
+#undef TARGET_EREMOTE
|
|
+#define TARGET_EREMOTE 71
|
|
+/* Unused 72-76 */
|
|
+#undef TARGET_ENOLCK
|
|
+#define TARGET_ENOLCK 77
|
|
+#undef TARGET_ENOSYS
|
|
+#define TARGET_ENOSYS 78
|
|
+/* Unused 79 */
|
|
+#undef TARGET_ENOMSG
|
|
+#define TARGET_ENOMSG 80
|
|
+#undef TARGET_EIDRM
|
|
+#define TARGET_EIDRM 81
|
|
+#undef TARGET_ENOSR
|
|
+#define TARGET_ENOSR 82
|
|
+#undef TARGET_ETIME
|
|
+#define TARGET_ETIME 83
|
|
+#undef TARGET_EBADMSG
|
|
+#define TARGET_EBADMSG 84
|
|
+#undef TARGET_EPROTO
|
|
+#define TARGET_EPROTO 85
|
|
+#undef TARGET_ENODATA
|
|
+#define TARGET_ENODATA 86
|
|
+#undef TARGET_ENOSTR
|
|
+#define TARGET_ENOSTR 87
|
|
+#undef TARGET_ECHRNG
|
|
+#define TARGET_ECHRNG 88
|
|
+#undef TARGET_EL2NSYNC
|
|
+#define TARGET_EL2NSYNC 89
|
|
+#undef TARGET_EL3HLT
|
|
+#define TARGET_EL3HLT 90
|
|
+#undef TARGET_EL3RST
|
|
+#define TARGET_EL3RST 91
|
|
+#undef TARGET_ENOPKG
|
|
+#define TARGET_ENOPKG 92
|
|
+#undef TARGET_ELNRNG
|
|
+#define TARGET_ELNRNG 93
|
|
+#undef TARGET_EUNATCH
|
|
+#define TARGET_EUNATCH 94
|
|
+#undef TARGET_ENOCSI
|
|
+#define TARGET_ENOCSI 95
|
|
+#undef TARGET_EL2HLT
|
|
+#define TARGET_EL2HLT 96
|
|
+#undef TARGET_EBADE
|
|
+#define TARGET_EBADE 97
|
|
+#undef TARGET_EBADR
|
|
+#define TARGET_EBADR 98
|
|
+#undef TARGET_EXFULL
|
|
+#define TARGET_EXFULL 99
|
|
+#undef TARGET_ENOANO
|
|
+#define TARGET_ENOANO 100
|
|
+#undef TARGET_EBADRQC
|
|
+#define TARGET_EBADRQC 101
|
|
+#undef TARGET_EBADSLT
|
|
+#define TARGET_EBADSLT 102
|
|
+/* Unused 103 */
|
|
+#undef TARGET_EBFONT
|
|
+#define TARGET_EBFONT 104
|
|
+#undef TARGET_ENONET
|
|
+#define TARGET_ENONET 105
|
|
+#undef TARGET_ENOLINK
|
|
+#define TARGET_ENOLINK 106
|
|
+#undef TARGET_EADV
|
|
+#define TARGET_EADV 107
|
|
+#undef TARGET_ESRMNT
|
|
+#define TARGET_ESRMNT 108
|
|
+#undef TARGET_ECOMM
|
|
+#define TARGET_ECOMM 109
|
|
+#undef TARGET_EMULTIHOP
|
|
+#define TARGET_EMULTIHOP 110
|
|
+#undef TARGET_EDOTDOT
|
|
+#define TARGET_EDOTDOT 111
|
|
+#undef TARGET_EOVERFLOW
|
|
+#define TARGET_EOVERFLOW 112
|
|
+#undef TARGET_ENOTUNIQ
|
|
+#define TARGET_ENOTUNIQ 113
|
|
+#undef TARGET_EBADFD
|
|
+#define TARGET_EBADFD 114
|
|
+#undef TARGET_EREMCHG
|
|
+#define TARGET_EREMCHG 115
|
|
+#undef TARGET_EILSEQ
|
|
+#define TARGET_EILSEQ 116
|
|
+/* Same as default 117-121 */
|
|
+#undef TARGET_ELIBACC
|
|
+#define TARGET_ELIBACC 122
|
|
+#undef TARGET_ELIBBAD
|
|
+#define TARGET_ELIBBAD 123
|
|
+#undef TARGET_ELIBSCN
|
|
+#define TARGET_ELIBSCN 124
|
|
+#undef TARGET_ELIBMAX
|
|
+#define TARGET_ELIBMAX 125
|
|
+#undef TARGET_ELIBEXEC
|
|
+#define TARGET_ELIBEXEC 126
|
|
+#undef TARGET_ERESTART
|
|
+#define TARGET_ERESTART 127
|
|
+#undef TARGET_ESTRPIPE
|
|
+#define TARGET_ESTRPIPE 128
|
|
+#undef TARGET_ENOMEDIUM
|
|
+#define TARGET_ENOMEDIUM 129
|
|
+#undef TARGET_EMEDIUMTYPE
|
|
+#define TARGET_EMEDIUMTYPE 130
|
|
+#undef TARGET_ECANCELED
|
|
+#define TARGET_ECANCELED 131
|
|
+#undef TARGET_ENOKEY
|
|
+#define TARGET_ENOKEY 132
|
|
+#undef TARGET_EKEYEXPIRED
|
|
+#define TARGET_EKEYEXPIRED 133
|
|
+#undef TARGET_EKEYREVOKED
|
|
+#define TARGET_EKEYREVOKED 134
|
|
+#undef TARGET_EKEYREJECTED
|
|
+#define TARGET_EKEYREJECTED 135
|
|
+#undef TARGET_EOWNERDEAD
|
|
+#define TARGET_EOWNERDEAD 136
|
|
+#undef TARGET_ENOTRECOVERABLE
|
|
+#define TARGET_ENOTRECOVERABLE 137
|
|
+#undef TARGET_ERFKILL
|
|
+#define TARGET_ERFKILL 138
|
|
+#undef TARGET_EHWPOISON
|
|
+#define TARGET_EHWPOISON 139
|
|
+
|
|
+#endif
|
|
diff --git a/linux-user/sw64/target_signal.h b/linux-user/sw64/target_signal.h
|
|
index 6393a7542f..8cc1693b05 100644
|
|
--- a/linux-user/sw64/target_signal.h
|
|
+++ b/linux-user/sw64/target_signal.h
|
|
@@ -95,4 +95,6 @@ typedef struct target_sigaltstack {
|
|
#define TARGET_GEN_SUBRNG7 -25
|
|
|
|
#define TARGET_ARCH_HAS_SETUP_FRAME
|
|
+#define TARGET_ARCH_HAS_KA_RESTORER
|
|
+#define TARGET_ARCH_HAS_SIGTRAMP_PAGE 1
|
|
#endif /* SW64_TARGET_SIGNAL_H */
|
|
diff --git a/linux-user/sw64/target_syscall.h b/linux-user/sw64/target_syscall.h
|
|
index c901ae95d8..418905110c 100644
|
|
--- a/linux-user/sw64/target_syscall.h
|
|
+++ b/linux-user/sw64/target_syscall.h
|
|
@@ -23,22 +23,26 @@ struct target_pt_regs {
|
|
abi_ulong r27;
|
|
abi_ulong r28;
|
|
abi_ulong hae;
|
|
-/* JRP - These are the values provided to a0-a2 by HMcode */
|
|
+/* JRP - These are the values provided to a0-a2 by hmcode */
|
|
abi_ulong trap_a0;
|
|
abi_ulong trap_a1;
|
|
abi_ulong trap_a2;
|
|
-/* These are saved by HMcode: */
|
|
+/* These are saved by hmcode: */
|
|
abi_ulong ps;
|
|
abi_ulong pc;
|
|
abi_ulong gp;
|
|
abi_ulong r16;
|
|
abi_ulong r17;
|
|
abi_ulong r18;
|
|
+/* Those is needed by qemu to temporary store the user stack pointer */
|
|
+ abi_ulong usp;
|
|
+ abi_ulong unique;
|
|
};
|
|
|
|
-#define TARGET_MLOCKALL_MCL_CURRENT 0x2000
|
|
-#define TARGET_MLOCKALL_MCL_FUTURE 0x4000
|
|
|
|
+#define TARGET_MCL_CURRENT 0x2000
|
|
+#define TARGET_MCL_FUTURE 0x4000
|
|
+#define TARGET_MCL_ONFAULT 0x8000
|
|
|
|
#define UNAME_MACHINE "sw64"
|
|
#define UNAME_MINIMUM_RELEASE "2.6.32"
|
|
diff --git a/linux-user/sw64/termbits.h b/linux-user/sw64/termbits.h
|
|
index 37dd77120c..5c40efcb20 100644
|
|
--- a/linux-user/sw64/termbits.h
|
|
+++ b/linux-user/sw64/termbits.h
|
|
@@ -156,6 +156,7 @@ struct target_termios {
|
|
#define TARGET_FLUSHO 0x00800000
|
|
#define TARGET_PENDIN 0x20000000
|
|
#define TARGET_IEXTEN 0x00000400
|
|
+#define TARGET_EXTPROC 0x10000000
|
|
|
|
#define TARGET_FIOCLEX TARGET_IO('f', 1)
|
|
#define TARGET_FIONCLEX TARGET_IO('f', 2)
|
|
diff --git a/linux-user/syscall_defs.h b/linux-user/syscall_defs.h
|
|
index 04ca5fe7a0..a04f399278 100644
|
|
--- a/linux-user/syscall_defs.h
|
|
+++ b/linux-user/syscall_defs.h
|
|
@@ -85,7 +85,7 @@
|
|
|
|
#elif defined(TARGET_PPC) || defined(TARGET_ALPHA) || \
|
|
defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE) || \
|
|
- defined(TARGET_MIPS)
|
|
+ defined(TARGET_MIPS) || defined(TARGET_SW64)
|
|
|
|
#define TARGET_IOC_SIZEBITS 13
|
|
#define TARGET_IOC_DIRBITS 3
|
|
@@ -2270,6 +2270,50 @@ struct target_stat {
|
|
int __unused[2];
|
|
};
|
|
|
|
+#elif defined(TARGET_SW64)
|
|
+
|
|
+struct target_stat {
|
|
+ unsigned int st_dev;
|
|
+ unsigned int st_ino;
|
|
+ unsigned int st_mode;
|
|
+ unsigned int st_nlink;
|
|
+ unsigned int st_uid;
|
|
+ unsigned int st_gid;
|
|
+ unsigned int st_rdev;
|
|
+ abi_long st_size;
|
|
+ abi_ulong target_st_atime;
|
|
+ abi_ulong target_st_mtime;
|
|
+ abi_ulong target_st_ctime;
|
|
+ unsigned int st_blksize;
|
|
+ unsigned int st_blocks;
|
|
+ unsigned int st_flags;
|
|
+ unsigned int st_gen;
|
|
+};
|
|
+
|
|
+#define TARGET_HAS_STRUCT_STAT64
|
|
+struct target_stat64 {
|
|
+ abi_ulong st_dev;
|
|
+ abi_ulong st_ino;
|
|
+ abi_ulong st_rdev;
|
|
+ abi_long st_size;
|
|
+ abi_ulong st_blocks;
|
|
+
|
|
+ unsigned int st_mode;
|
|
+ unsigned int st_uid;
|
|
+ unsigned int st_gid;
|
|
+ unsigned int st_blksize;
|
|
+ unsigned int st_nlink;
|
|
+ unsigned int __pad0;
|
|
+
|
|
+ abi_ulong target_st_atime;
|
|
+ abi_ulong target_st_atime_nsec;
|
|
+ abi_ulong target_st_mtime;
|
|
+ abi_ulong target_st_mtime_nsec;
|
|
+ abi_ulong target_st_ctime;
|
|
+ abi_ulong target_st_ctime_nsec;
|
|
+ abi_long __unused[3];
|
|
+};
|
|
+
|
|
#else
|
|
#error unsupported CPU
|
|
#endif
|
|
diff --git a/target/sw64/Makefile.objs b/target/sw64/Makefile.objs
|
|
index 1e549d141c..c702eaa26d 100644
|
|
--- a/target/sw64/Makefile.objs
|
|
+++ b/target/sw64/Makefile.objs
|
|
@@ -2,3 +2,4 @@ obj-$(CONFIG_SOFTMMU) += machine.o
|
|
obj-y += cpu.o translate.o profile.o helper.o
|
|
obj-y += int_helper.o float_helper.o simd_helper.o helper.o exception.o
|
|
obj-$(CONFIG_KVM) += kvm.o
|
|
+obj-y += gdbstub.o
|
|
diff --git a/target/sw64/cpu-param.h b/target/sw64/cpu-param.h
|
|
index 978a3cd572..464cfb3dc1 100644
|
|
--- a/target/sw64/cpu-param.h
|
|
+++ b/target/sw64/cpu-param.h
|
|
@@ -7,18 +7,12 @@
|
|
#ifndef SW64_CPU_PARAM_H
|
|
#define SW64_CPU_PARAM_H 1
|
|
|
|
-#define TARGET_LONG_BITS 64 /* if use th-1 ,TARGET_PAGE_BITS is 12 */
|
|
+#define TARGET_LONG_BITS 64
|
|
#define TARGET_PAGE_BITS 13
|
|
|
|
-#ifdef CONFIG_USER_ONLY
|
|
#define TARGET_VIRT_ADDR_SPACE_BITS 64
|
|
-#else
|
|
#define TARGET_PHYS_ADDR_SPACE_BITS 48
|
|
-#define TARGET_VIRT_ADDR_SPACE_BITS 64
|
|
-#endif
|
|
|
|
-#ifndef CONFIG_USER_ONLY
|
|
#define NB_MMU_MODES 4
|
|
-#endif
|
|
|
|
#endif
|
|
diff --git a/target/sw64/cpu.c b/target/sw64/cpu.c
|
|
index 89c21850e1..8987361346 100644
|
|
--- a/target/sw64/cpu.c
|
|
+++ b/target/sw64/cpu.c
|
|
@@ -26,7 +26,6 @@
|
|
#include "sysemu/reset.h"
|
|
#include "hw/qdev-properties.h"
|
|
|
|
-
|
|
static void sw64_cpu_set_pc(CPUState *cs, vaddr value)
|
|
{
|
|
SW64CPU *cpu = SW64_CPU(cs);
|
|
@@ -36,7 +35,6 @@ static void sw64_cpu_set_pc(CPUState *cs, vaddr value)
|
|
|
|
static void sw64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
{
|
|
-#ifndef CONFIG_KVM
|
|
SW64CPU *cpu = SW64_CPU(cs);
|
|
CPUSW64State *env = &cpu->env;
|
|
int i;
|
|
@@ -91,7 +89,6 @@ static void sw64_cpu_dump_state(CPUState *cs, FILE *f, int flags)
|
|
qemu_fprintf(f, "\n");
|
|
}
|
|
qemu_fprintf(f, "\n");
|
|
-#endif
|
|
}
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
@@ -137,7 +134,6 @@ static void core3_init(Object *obj)
|
|
CPUSW64State *env = cs->env_ptr;
|
|
#ifdef CONFIG_USER_ONLY
|
|
env->fpcr = 0x680e800000000000;
|
|
- parallel_cpus = true;
|
|
#endif
|
|
set_feature(env, SW64_FEATURE_CORE3);
|
|
}
|
|
@@ -168,7 +164,7 @@ bool sw64_cpu_has_work(CPUState *cs)
|
|
* wake up by hard interrupt, timer, ii, mail or mchk.
|
|
*/
|
|
return cs->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_TIMER |
|
|
- CPU_INTERRUPT_IIMAIL | CPU_INTERRUPT_MCHK);
|
|
+ CPU_INTERRUPT_II0| CPU_INTERRUPT_MCHK);
|
|
}
|
|
|
|
static void sw64_cpu_initfn(Object *obj)
|
|
@@ -204,136 +200,6 @@ static void sw64_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, vaddr
|
|
}
|
|
#endif
|
|
|
|
-#define a0(func) (((func & 0xFF) >> 6) & 0x1)
|
|
-#define a1(func) ((((func & 0xFF) >> 6) & 0x2) >> 1)
|
|
-
|
|
-#define t(func) ((a0(func) ^ a1(func)) & 0x1)
|
|
-#define b0(func) (t(func) | a0(func))
|
|
-#define b1(func) ((~t(func) & 1) | a1(func))
|
|
-
|
|
-#define START_SYS_CALL_ADDR(func) \
|
|
- (b1(func) << 14) | (b0(func) << 13) | ((func & 0x3F) << 7)
|
|
-
|
|
-static void sw64_cpu_do_interrupt(CPUState *cs)
|
|
-{
|
|
- int i = cs->exception_index;
|
|
-
|
|
- cs->exception_index = -1;
|
|
-#if !defined(CONFIG_USER_ONLY)
|
|
- SW64CPU *cpu = SW64_CPU(cs);
|
|
- CPUSW64State *env = &cpu->env;
|
|
- switch (i) {
|
|
- case EXCP_OPCDEC:
|
|
- cpu_abort(cs, "ILLEGAL INSN");
|
|
- break;
|
|
- case EXCP_CALL_SYS:
|
|
- i = START_SYS_CALL_ADDR(env->error_code);
|
|
- if (i <= 0x3F) {
|
|
- i += 0x4000;
|
|
- } else if (i >= 0x40 && i <= 0x7F) {
|
|
- i += 0x2000;
|
|
- } else if (i >= 0x80 && i <= 0x8F) {
|
|
- i += 0x6000;
|
|
- }
|
|
- break;
|
|
- case EXCP_ARITH:
|
|
- env->error_code = -1;
|
|
- env->csr[EXC_PC] = env->pc - 4;
|
|
- env->csr[EXC_SUM] = 1;
|
|
- i = 0xB80;
|
|
- break;
|
|
- case EXCP_UNALIGN:
|
|
- i = 0xB00;
|
|
- env->csr[EXC_PC] = env->pc - 4;
|
|
- break;
|
|
- case EXCP_CLK_INTERRUPT:
|
|
- case EXCP_DEV_INTERRUPT:
|
|
- i = 0xE80;
|
|
- break;
|
|
- case EXCP_MMFAULT:
|
|
- i = 0x980;
|
|
- env->csr[EXC_PC] = env->pc;
|
|
- break;
|
|
- case EXCP_IIMAIL:
|
|
- env->csr[EXC_PC] = env->pc;
|
|
- i = 0xE00;
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
- env->pc = env->hm_entry + i;
|
|
- env->flags = ENV_FLAG_HM_MODE;
|
|
-#else
|
|
- switch (i) {
|
|
- case EXCP_OPCDEC:
|
|
- cpu_abort(cs, "ILLEGAL INSN");
|
|
- break;
|
|
- case EXCP_CALL_SYS:
|
|
- default:
|
|
- break;
|
|
- }
|
|
-#endif
|
|
-}
|
|
-
|
|
-#ifndef CONFIG_USER_ONLY
|
|
-static bool sw64_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
-{
|
|
- SW64CPU *cpu = SW64_CPU(cs);
|
|
- CPUSW64State *env = &cpu->env;
|
|
- int idx = -1;
|
|
- /* We never take interrupts while in Hardmode. */
|
|
- if (env->flags & ENV_FLAG_HM_MODE)
|
|
- return false;
|
|
-
|
|
- if (interrupt_request & CPU_INTERRUPT_IIMAIL) {
|
|
- idx = EXCP_IIMAIL;
|
|
- env->csr[INT_STAT] |= 1UL << 6;
|
|
- if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
- return false;
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_IIMAIL;
|
|
- goto done;
|
|
- }
|
|
-
|
|
- if (interrupt_request & CPU_INTERRUPT_TIMER) {
|
|
- idx = EXCP_CLK_INTERRUPT;
|
|
- env->csr[INT_STAT] |= 1UL << 4;
|
|
- if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
- return false;
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
|
|
- goto done;
|
|
- }
|
|
-
|
|
- if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
- idx = EXCP_DEV_INTERRUPT;
|
|
- env->csr[INT_STAT] |= 1UL << 12;
|
|
- if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
- return false;
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
|
- goto done;
|
|
- }
|
|
-
|
|
- if (interrupt_request & CPU_INTERRUPT_PCIE) {
|
|
- idx = EXCP_DEV_INTERRUPT;
|
|
- env->csr[INT_STAT] |= 1UL << 1;
|
|
- env->csr[INT_PCI_INT] = 0x10;
|
|
- if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
- return false;
|
|
- cs->interrupt_request &= ~CPU_INTERRUPT_PCIE;
|
|
- goto done;
|
|
- }
|
|
-
|
|
-done:
|
|
- if (idx >= 0) {
|
|
- cs->exception_index = idx;
|
|
- env->error_code = 0;
|
|
- env->csr[EXC_PC] = env->pc;
|
|
- sw64_cpu_do_interrupt(cs);
|
|
- return true;
|
|
- }
|
|
- return false;
|
|
-}
|
|
-#endif
|
|
-
|
|
static void sw64_cpu_reset(DeviceState *dev)
|
|
{
|
|
CPUState *s = CPU(dev);
|
|
@@ -370,17 +236,15 @@ static const struct SysemuCPUOps sw64_sysemu_ops = {
|
|
#include "hw/core/tcg-cpu-ops.h"
|
|
|
|
static const struct TCGCPUOps sw64_tcg_ops = {
|
|
-#ifdef CONFIG_TCG
|
|
.initialize = sw64_translate_init,
|
|
- .tlb_fill = sw64_cpu_tlb_fill,
|
|
-#endif /* CONFIG_TCG */
|
|
|
|
-#if !defined(CONFIG_USER_ONLY)
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+ .tlb_fill = sw64_cpu_tlb_fill,
|
|
.do_unaligned_access = sw64_cpu_do_unaligned_access,
|
|
.cpu_exec_interrupt = sw64_cpu_exec_interrupt,
|
|
.do_transaction_failed = sw64_cpu_do_transaction_failed,
|
|
-#endif /* !CONFIG_USER_ONLY */
|
|
.do_interrupt = sw64_cpu_do_interrupt,
|
|
+#endif /* !CONFIG_USER_ONLY */
|
|
};
|
|
|
|
static void sw64_cpu_class_init(ObjectClass *oc, void *data)
|
|
@@ -389,21 +253,26 @@ static void sw64_cpu_class_init(ObjectClass *oc, void *data)
|
|
CPUClass *cc = CPU_CLASS(oc);
|
|
SW64CPUClass *scc = SW64_CPU_CLASS(oc);
|
|
|
|
- device_class_set_parent_realize(dc, sw64_cpu_realizefn,
|
|
- &scc->parent_realize);
|
|
- device_class_set_parent_reset(dc, sw64_cpu_reset, &scc->parent_reset);
|
|
- device_class_set_props(dc, sw64_cpu_properties);
|
|
+ device_class_set_parent_realize(dc, sw64_cpu_realizefn, &scc->parent_realize);
|
|
+ device_class_set_parent_reset(dc, sw64_cpu_reset, &scc->parent_reset);
|
|
+ device_class_set_props(dc, sw64_cpu_properties);
|
|
|
|
cc->class_by_name = sw64_cpu_class_by_name;
|
|
+#ifndef CONFIG_USER_ONLY
|
|
dc->vmsd = &vmstate_sw64_cpu;
|
|
+ cc->sysemu_ops = &sw64_sysemu_ops;
|
|
+#endif
|
|
cc->has_work = sw64_cpu_has_work;
|
|
cc->set_pc = sw64_cpu_set_pc;
|
|
cc->disas_set_info = sw64_cpu_disas_set_info;
|
|
cc->dump_state = sw64_cpu_dump_state;
|
|
+
|
|
+ cc->gdb_read_register = sw64_cpu_gdb_read_register;
|
|
+ cc->gdb_write_register = sw64_cpu_gdb_write_register;
|
|
+ cc->gdb_num_core_regs = 67;
|
|
+ cc->gdb_core_xml_file = "sw64-core.xml";
|
|
+
|
|
cc->tcg_ops = &sw64_tcg_ops;
|
|
-#ifndef CONFIG_USER_ONLY
|
|
- cc->sysemu_ops = &sw64_sysemu_ops;
|
|
-#endif
|
|
}
|
|
|
|
static const SW64CPUInfo sw64_cpus[] =
|
|
diff --git a/target/sw64/cpu.h b/target/sw64/cpu.h
|
|
index 5a490e2b4a..4e14891e84 100644
|
|
--- a/target/sw64/cpu.h
|
|
+++ b/target/sw64/cpu.h
|
|
@@ -60,6 +60,8 @@
|
|
|
|
#define MCU_CLOCK 25000000
|
|
|
|
+#define init_pc 0xffffffff80011100
|
|
+
|
|
typedef struct CPUSW64State CPUSW64State;
|
|
typedef CPUSW64State CPUArchState;
|
|
typedef SW64CPU ArchCPU;
|
|
@@ -136,7 +138,7 @@ struct SW64CPU {
|
|
CPUSW64State env;
|
|
|
|
uint64_t k_regs[158];
|
|
- uint64_t k_vcb[36];
|
|
+ uint64_t k_vcb[48];
|
|
QEMUTimer *alarm_timer;
|
|
target_ulong irq;
|
|
uint32_t cid;
|
|
@@ -227,6 +229,8 @@ static inline SW64CPU *sw64_env_get_cpu(CPUSW64State *env)
|
|
#define SW64_CPU_TYPE_SUFFIX "-" TYPE_SW64_CPU
|
|
#define SW64_CPU_TYPE_NAME(name) (name SW64_CPU_TYPE_SUFFIX)
|
|
int cpu_sw64_signal_handler(int host_signum, void *pinfo, void *puc);
|
|
+int sw64_cpu_gdb_read_register(CPUState *cs, uint8_t *buf, int reg);
|
|
+int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *buf, int reg);
|
|
bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
MMUAccessType access_type, int mmu_idx,
|
|
bool probe, uintptr_t retaddr);
|
|
@@ -236,6 +240,10 @@ void sw64_stl_phys(CPUState *cs, hwaddr addr, uint64_t val);
|
|
uint64_t sw64_ldw_phys(CPUState *cs, hwaddr addr);
|
|
void sw64_stw_phys(CPUState *cs, hwaddr addr, uint64_t val);
|
|
uint64_t cpu_sw64_load_fpcr(CPUSW64State *env);
|
|
+#ifndef CONFIG_USER_ONLY
|
|
+void sw64_cpu_do_interrupt(CPUState *cs);
|
|
+bool sw64_cpu_exec_interrupt(CPUState *cpu, int int_req);
|
|
+#endif
|
|
void cpu_sw64_store_fpcr(CPUSW64State *env, uint64_t val);
|
|
void sw64_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
|
|
MMUAccessType access_type, int mmu_idx,
|
|
@@ -245,7 +253,7 @@ extern struct VMStateDescription vmstate_sw64_cpu;
|
|
|
|
/* SW64-specific interrupt pending bits */
|
|
#define CPU_INTERRUPT_TIMER CPU_INTERRUPT_TGT_EXT_0
|
|
-#define CPU_INTERRUPT_IIMAIL CPU_INTERRUPT_TGT_EXT_1
|
|
+#define CPU_INTERRUPT_II0 CPU_INTERRUPT_TGT_EXT_1
|
|
#define CPU_INTERRUPT_MCHK CPU_INTERRUPT_TGT_EXT_2
|
|
#define CPU_INTERRUPT_PCIE CPU_INTERRUPT_TGT_EXT_3
|
|
#define CPU_INTERRUPT_WAKEUP CPU_INTERRUPT_TGT_EXT_3
|
|
@@ -281,11 +289,14 @@ enum {
|
|
SWCSR(PTBR, 0x8),
|
|
SWCSR(PRI_BASE, 0x10),
|
|
SWCSR(TIMER_CTL, 0x2a),
|
|
+ SWCSR(TIMER_TH, 0x2b),
|
|
SWCSR(INT_STAT, 0x30),
|
|
SWCSR(INT_CLR, 0x31),
|
|
SWCSR(IER, 0x32),
|
|
SWCSR(INT_PCI_INT, 0x33),
|
|
SWCSR(DVA, 0x4e),
|
|
+ SWCSR(SOFT_CID, 0xc9),
|
|
+ SWCSR(SHTCLOCK, 0xca),
|
|
};
|
|
|
|
#include "exec/cpu-all.h"
|
|
@@ -302,7 +313,7 @@ void sw64_translate_init(void);
|
|
enum {
|
|
EXCP_NONE,
|
|
EXCP_HALT,
|
|
- EXCP_IIMAIL,
|
|
+ EXCP_II0,
|
|
EXCP_OPCDEC,
|
|
EXCP_CALL_SYS,
|
|
EXCP_ARITH,
|
|
diff --git a/target/sw64/gdbstub.c b/target/sw64/gdbstub.c
|
|
new file mode 100644
|
|
index 0000000000..da4d39d215
|
|
--- /dev/null
|
|
+++ b/target/sw64/gdbstub.c
|
|
@@ -0,0 +1,56 @@
|
|
+/*
|
|
+ * SW64 gdb server stub
|
|
+ *
|
|
+ * Copyright (c) 2023 Lu Feifei
|
|
+ *
|
|
+ * This library is free software; you can redistribute it and/or
|
|
+ * modify it under the terms of the GNU Lesser General Public
|
|
+ * License as published by the Free Software Foundation; either
|
|
+ * version 2 of the License, or (at your option) any later version.
|
|
+ *
|
|
+ * This library is distributed in the hope that it will be useful,
|
|
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
+ * Lesser General Public License for more details.
|
|
+ *
|
|
+ * You should have received a copy of the GNU Lesser General Public
|
|
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
+ */
|
|
+#include "qemu/osdep.h"
|
|
+#include "qemu-common.h"
|
|
+#include "cpu.h"
|
|
+#include "exec/gdbstub.h"
|
|
+
|
|
+int sw64_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|
+{
|
|
+ SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+
|
|
+ if (n < 31) {
|
|
+ return gdb_get_regl(mem_buf, env->ir[n]);
|
|
+ } else if (n == 31) {
|
|
+ return gdb_get_regl(mem_buf, 0);
|
|
+ } else if (n == 64) {
|
|
+ return gdb_get_regl(mem_buf, env->pc);
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int sw64_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n)
|
|
+{
|
|
+ SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+
|
|
+ if (n < 31) {
|
|
+ env->ir[n] = ldtul_p(mem_buf);
|
|
+ return sizeof(target_ulong);
|
|
+ } else if (n == 31) {
|
|
+ /* discard writes to r31 */
|
|
+ return sizeof(target_ulong);
|
|
+ } else if (n == 64) {
|
|
+ env->pc = ldtul_p(mem_buf);
|
|
+ return sizeof(target_ulong);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
diff --git a/target/sw64/helper.c b/target/sw64/helper.c
|
|
index 0cc0af7087..e317c08f0a 100644
|
|
--- a/target/sw64/helper.c
|
|
+++ b/target/sw64/helper.c
|
|
@@ -23,18 +23,7 @@
|
|
#include "hw/core/cpu.h"
|
|
#include "exec/memattrs.h"
|
|
|
|
-#if defined(CONFIG_USER_ONLY)
|
|
-bool sw64_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
|
|
- MMUAccessType access_type, int mmu_idx,
|
|
- bool probe, uintptr_t retaddr)
|
|
-{
|
|
- SW64CPU *cpu = SW64_CPU(cs);
|
|
-
|
|
- cs->exception_index = EXCP_MMFAULT;
|
|
- cpu->env.trap_arg0 = address;
|
|
- cpu_loop_exit_restore(cs, retaddr);
|
|
-}
|
|
-#else
|
|
+#ifndef CONFIG_USER_ONLY
|
|
static target_ulong ldq_phys_clear(CPUState *cs, target_ulong phys)
|
|
{
|
|
return ldq_phys(cs->as, phys & ~(3UL));
|
|
@@ -49,7 +38,7 @@ static int get_sw64_physical_address(CPUSW64State *env, target_ulong addr,
|
|
int prot = 0;
|
|
int ret = MM_K_ACV;
|
|
target_ulong L1pte, L2pte, L3pte, L4pte;
|
|
- target_ulong pt, index, pte_pfn_s;
|
|
+ target_ulong pt = 0, index = 0, pte_pfn_s = 0;
|
|
|
|
if (((addr >> 28) & 0xffffffff8) == 0xffffffff8) {
|
|
phys = (~(0xffffffff80000000)) & addr;
|
|
@@ -217,6 +206,124 @@ do_pgmiss:
|
|
done:
|
|
return (fail >= 0 ? -1 : phys);
|
|
}
|
|
+
|
|
+#define a0(func) (((func & 0xFF) >> 6) & 0x1)
|
|
+#define a1(func) ((((func & 0xFF) >> 6) & 0x2) >> 1)
|
|
+
|
|
+#define t(func) ((a0(func) ^ a1(func)) & 0x1)
|
|
+#define b0(func) (t(func) | a0(func))
|
|
+#define b1(func) ((~t(func) & 1) | a1(func))
|
|
+
|
|
+#define START_SYS_CALL_ADDR(func) \
|
|
+ (b1(func) << 14) | (b0(func) << 13) | ((func & 0x3F) << 7)
|
|
+
|
|
+void sw64_cpu_do_interrupt(CPUState *cs)
|
|
+{
|
|
+ int i = cs->exception_index;
|
|
+
|
|
+ cs->exception_index = -1;
|
|
+ SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+ switch (i) {
|
|
+ case EXCP_OPCDEC:
|
|
+ cpu_abort(cs, "ILLEGAL INSN");
|
|
+ break;
|
|
+ case EXCP_CALL_SYS:
|
|
+ i = START_SYS_CALL_ADDR(env->error_code);
|
|
+ if (i <= 0x3F) {
|
|
+ i += 0x4000;
|
|
+ } else if (i >= 0x40 && i <= 0x7F) {
|
|
+ i += 0x2000;
|
|
+ } else if (i >= 0x80 && i <= 0x8F) {
|
|
+ i += 0x6000;
|
|
+ }
|
|
+ break;
|
|
+ case EXCP_ARITH:
|
|
+ env->error_code = -1;
|
|
+ env->csr[EXC_PC] = env->pc - 4;
|
|
+ env->csr[EXC_SUM] = 1;
|
|
+ i = 0xB80;
|
|
+ break;
|
|
+ case EXCP_UNALIGN:
|
|
+ i = 0xB00;
|
|
+ env->csr[EXC_PC] = env->pc - 4;
|
|
+ break;
|
|
+ case EXCP_CLK_INTERRUPT:
|
|
+ case EXCP_DEV_INTERRUPT:
|
|
+ i = 0xE80;
|
|
+ break;
|
|
+ case EXCP_MMFAULT:
|
|
+ i = 0x980;
|
|
+ env->csr[EXC_PC] = env->pc;
|
|
+ break;
|
|
+ case EXCP_II0:
|
|
+ env->csr[EXC_PC] = env->pc;
|
|
+ i = 0xE00;
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
+ }
|
|
+ env->pc = env->hm_entry + i;
|
|
+ env->flags = ENV_FLAG_HM_MODE;
|
|
+}
|
|
+
|
|
+bool sw64_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
|
|
+{
|
|
+ SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+ int idx = -1;
|
|
+ /* We never take interrupts while in PALmode. */
|
|
+ if (env->flags & ENV_FLAG_HM_MODE)
|
|
+ return false;
|
|
+
|
|
+ if (interrupt_request & CPU_INTERRUPT_II0) {
|
|
+ idx = EXCP_II0;
|
|
+ env->csr[INT_STAT] |= 1UL << 6;
|
|
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
+ return false;
|
|
+ cs->interrupt_request &= ~CPU_INTERRUPT_II0;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ if (interrupt_request & CPU_INTERRUPT_TIMER) {
|
|
+ idx = EXCP_CLK_INTERRUPT;
|
|
+ env->csr[INT_STAT] |= 1UL << 4;
|
|
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
+ return false;
|
|
+ cs->interrupt_request &= ~CPU_INTERRUPT_TIMER;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ if (interrupt_request & CPU_INTERRUPT_HARD) {
|
|
+ idx = EXCP_DEV_INTERRUPT;
|
|
+ env->csr[INT_STAT] |= 1UL << 12;
|
|
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
+ return false;
|
|
+ cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+ if (interrupt_request & CPU_INTERRUPT_PCIE) {
|
|
+ idx = EXCP_DEV_INTERRUPT;
|
|
+ env->csr[INT_STAT] |= 1UL << 1;
|
|
+ env->csr[INT_PCI_INT] = 0x10;
|
|
+ if ((env->csr[IER] & env->csr[INT_STAT]) == 0)
|
|
+ return false;
|
|
+ cs->interrupt_request &= ~CPU_INTERRUPT_PCIE;
|
|
+ goto done;
|
|
+ }
|
|
+
|
|
+done:
|
|
+ if (idx >= 0) {
|
|
+ cs->exception_index = idx;
|
|
+ env->error_code = 0;
|
|
+ env->csr[EXC_PC] = env->pc;
|
|
+ sw64_cpu_do_interrupt(cs);
|
|
+ return true;
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
#endif
|
|
|
|
static void update_fpcr_status_mask(CPUSW64State* env) {
|
|
@@ -286,7 +393,9 @@ void cpu_sw64_store_fpcr(CPUSW64State* env, uint64_t val) {
|
|
uint64_t helper_read_csr(CPUSW64State *env, uint64_t index)
|
|
{
|
|
if (index == PRI_BASE)
|
|
- return 0x10000;
|
|
+ env->csr[index] = 0x10000;
|
|
+ if (index == SHTCLOCK)
|
|
+ env->csr[index] = qemu_clock_get_ns(QEMU_CLOCK_HOST) / 40;
|
|
return env->csr[index];
|
|
}
|
|
|
|
@@ -311,9 +420,12 @@ void helper_write_csr(CPUSW64State *env, uint64_t index, uint64_t va)
|
|
(index == ITB_IS) || (index == PTBR)) {
|
|
tlb_flush(cs);
|
|
}
|
|
- if (index == INT_CLR || index == INT_PCI_INT) {
|
|
+ if (index == INT_CLR) {
|
|
env->csr[INT_STAT] &= ~va;
|
|
}
|
|
+ if ((index == TIMER_CTL) && (va == 1)) {
|
|
+ timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + env->csr[TIMER_TH]);
|
|
+ }
|
|
|
|
if (index == TIMER_CTL && env->csr[index] == 1) {
|
|
timer_mod(cpu->alarm_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + 1000000000 / 250);
|
|
diff --git a/target/sw64/kvm.c b/target/sw64/kvm.c
|
|
index fc134c83fb..c38db7cabe 100644
|
|
--- a/target/sw64/kvm.c
|
|
+++ b/target/sw64/kvm.c
|
|
@@ -25,7 +25,6 @@
|
|
#include "hw/boards.h"
|
|
#include "qemu/log.h"
|
|
|
|
-#define init_pc 0xffffffff80011000
|
|
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
|
|
KVM_CAP_LAST_INFO
|
|
};
|
|
@@ -71,6 +70,7 @@ void kvm_sw64_reset_vcpu(SW64CPU *cpu)
|
|
CPUState *cs = CPU(cpu);
|
|
struct kvm_regs *regs;
|
|
int ret;
|
|
+ struct vcpucb *vcb;
|
|
|
|
regs = (struct kvm_regs *)cpu->k_regs;
|
|
regs->pc = init_pc;
|
|
@@ -82,6 +82,9 @@ void kvm_sw64_reset_vcpu(SW64CPU *cpu)
|
|
abort();
|
|
}
|
|
|
|
+ vcb = (struct vcpucb *)cpu->k_vcb;
|
|
+ vcb->vcpu_irq_disabled = 1;
|
|
+
|
|
ret = kvm_vcpu_ioctl(cs, KVM_SW64_VCPU_INIT, NULL);
|
|
|
|
if (ret < 0) {
|
|
@@ -113,12 +116,38 @@ int kvm_arch_destroy_vcpu(CPUState *cs)
|
|
|
|
int kvm_arch_get_registers(CPUState *cs)
|
|
{
|
|
- int ret;
|
|
+ int ret, i;
|
|
SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+
|
|
ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &cpu->k_regs);
|
|
if (ret < 0)
|
|
return ret;
|
|
- return kvm_vcpu_ioctl(cs, KVM_SW64_GET_VCB, &cpu->k_vcb);
|
|
+
|
|
+ ret = kvm_vcpu_ioctl(cs, KVM_SW64_GET_VCB, &cpu->k_vcb);
|
|
+ if (ret < 0)
|
|
+ return ret;
|
|
+
|
|
+ for (i = 0; i < 16; i++)
|
|
+ env->ir[i] = cpu->k_regs[i];
|
|
+
|
|
+ env->ir[16] = cpu->k_regs[155];
|
|
+ env->ir[17] = cpu->k_regs[156];
|
|
+ env->ir[18] = cpu->k_regs[157];
|
|
+
|
|
+ for (i = 19; i < 29; i++)
|
|
+ env->ir[i] = cpu->k_regs[i-3];
|
|
+
|
|
+ env->ir[29] = cpu->k_regs[154];
|
|
+
|
|
+ if (cpu->k_regs[152] >> 3)
|
|
+ env->ir[30] = cpu->k_vcb[3]; /* usp */
|
|
+ else
|
|
+ env->ir[30] = cpu->k_vcb[2]; /* ksp */
|
|
+
|
|
+ env->pc = cpu->k_regs[153];
|
|
+
|
|
+ return 0;
|
|
}
|
|
|
|
int kvm_arch_put_registers(CPUState *cs, int level)
|
|
@@ -126,15 +155,88 @@ int kvm_arch_put_registers(CPUState *cs, int level)
|
|
int ret;
|
|
SW64CPU *cpu = SW64_CPU(cs);
|
|
struct vcpucb *vcb;
|
|
+
|
|
+ if (level == KVM_PUT_RUNTIME_STATE) {
|
|
+ int i;
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+
|
|
+ for (i = 0; i < 16; i++)
|
|
+ cpu->k_regs[i] = env->ir[i];
|
|
+
|
|
+ for (i = 19; i < 29; i++)
|
|
+ cpu->k_regs[i-3] = env->ir[i];
|
|
+
|
|
+ cpu->k_regs[155] = env->ir[16];
|
|
+ cpu->k_regs[156] = env->ir[17];
|
|
+ cpu->k_regs[157] = env->ir[18];
|
|
+
|
|
+ cpu->k_regs[154] = env->ir[29];
|
|
+
|
|
+ if (cpu->k_regs[152] >> 3)
|
|
+ cpu->k_vcb[3] = env->ir[30]; /* usp */
|
|
+ else
|
|
+ cpu->k_vcb[2] = env->ir[30]; /* ksp */
|
|
+
|
|
+ cpu->k_regs[153] = env->pc;
|
|
+ }
|
|
+
|
|
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &cpu->k_regs);
|
|
if (ret < 0)
|
|
return ret;
|
|
vcb = (struct vcpucb *)cpu->k_vcb;
|
|
vcb->whami = kvm_arch_vcpu_id(cs);
|
|
fprintf(stderr,"vcpu %ld init.\n", vcb->whami);
|
|
+
|
|
+ if (level == KVM_PUT_RESET_STATE)
|
|
+ vcb->pcbb = 0;
|
|
+
|
|
return kvm_vcpu_ioctl(cs, KVM_SW64_SET_VCB, &cpu->k_vcb);
|
|
}
|
|
|
|
+static const uint32_t brk_insn = 0x00000080;
|
|
+
|
|
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
|
+{
|
|
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
|
|
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
|
|
+{
|
|
+ static uint32_t brk;
|
|
+
|
|
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
|
|
+ brk != brk_insn ||
|
|
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int kvm_arch_insert_hw_breakpoint(target_ulong addr,
|
|
+ target_ulong len, int type)
|
|
+{
|
|
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+int kvm_arch_remove_hw_breakpoint(target_ulong addr,
|
|
+ target_ulong len, int type)
|
|
+{
|
|
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+void kvm_arch_remove_all_hw_breakpoints(void)
|
|
+{
|
|
+ qemu_log_mask(LOG_UNIMP, "%s: not implemented\n", __func__);
|
|
+}
|
|
+
|
|
int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
|
|
int vector, PCIDevice *dev)
|
|
{
|
|
@@ -156,10 +258,42 @@ MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
|
|
return MEMTXATTRS_UNSPECIFIED;
|
|
}
|
|
|
|
+bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
|
|
+{
|
|
+ SW64CPU *cpu = SW64_CPU(cs);
|
|
+ CPUSW64State *env = &cpu->env;
|
|
+
|
|
+ /* Ensure PC is synchronised */
|
|
+ kvm_cpu_synchronize_state(cs);
|
|
+
|
|
+ if (cs->singlestep_enabled) {
|
|
+ return true;
|
|
+ } else if (kvm_find_sw_breakpoint(cs, debug_exit->epc)) {
|
|
+ return true;
|
|
+ } else {
|
|
+ error_report("%s: unhandled debug exit (%"PRIx64", %"PRIx64")",
|
|
+ __func__, env->pc, debug_exit->epc);
|
|
+ }
|
|
+
|
|
+ return false;
|
|
+}
|
|
|
|
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
|
|
{
|
|
- return -1;
|
|
+ int ret = 0;
|
|
+
|
|
+ switch (run->exit_reason) {
|
|
+ case KVM_EXIT_DEBUG:
|
|
+ if (kvm_sw64_handle_debug(cs, &run->debug.arch)) {
|
|
+ ret = EXCP_DEBUG;
|
|
+ } /* otherwise return to guest */
|
|
+ break;
|
|
+ default:
|
|
+ qemu_log_mask(LOG_UNIMP, "%s: un-handled exit reason %d\n",
|
|
+ __func__, run->exit_reason);
|
|
+ break;
|
|
+ }
|
|
+ return ret;
|
|
}
|
|
|
|
bool kvm_arch_stop_on_emulation_error(CPUState *cs)
|
|
@@ -213,3 +347,7 @@ bool kvm_arch_cpu_check_are_resettable(void)
|
|
{
|
|
return true;
|
|
}
|
|
+
|
|
+void kvm_arch_accel_class_init(ObjectClass *oc)
|
|
+{
|
|
+}
|
|
diff --git a/target/sw64/kvm_sw64.h b/target/sw64/kvm_sw64.h
|
|
index 5ebd4ec6fd..81dd760008 100644
|
|
--- a/target/sw64/kvm_sw64.h
|
|
+++ b/target/sw64/kvm_sw64.h
|
|
@@ -44,4 +44,13 @@ typedef struct SW64HostCPUClass {
|
|
uint32_t target;
|
|
const char *dtb_compatible;
|
|
} SW64HostCPUClass;
|
|
+
|
|
+/**
|
|
+ * kvm_sw64_handle_debug:
|
|
+ * @cs: CPUState
|
|
+ * @debug_exit: debug part of the KVM exit structure
|
|
+ *
|
|
+ * Returns: TRUE if the debug exception was handled.
|
|
+ */
|
|
+bool kvm_sw64_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit);
|
|
#endif
|
|
diff --git a/target/sw64/machine.c b/target/sw64/machine.c
|
|
index df18d3faba..93b1968ad8 100644
|
|
--- a/target/sw64/machine.c
|
|
+++ b/target/sw64/machine.c
|
|
@@ -11,7 +11,7 @@ VMStateDescription vmstate_sw64_cpu = {
|
|
.fields = (VMStateField[]) {
|
|
#ifdef CONFIG_KVM
|
|
VMSTATE_UINTTL_ARRAY(k_regs, SW64CPU, 158),
|
|
- VMSTATE_UINTTL_ARRAY(k_vcb, SW64CPU, 36),
|
|
+ VMSTATE_UINTTL_ARRAY(k_vcb, SW64CPU, 48),
|
|
#endif
|
|
VMSTATE_END_OF_LIST()
|
|
}
|
|
diff --git a/target/sw64/meson.build b/target/sw64/meson.build
|
|
index ee49e45927..332f2c2ee6 100644
|
|
--- a/target/sw64/meson.build
|
|
+++ b/target/sw64/meson.build
|
|
@@ -4,6 +4,7 @@ sw64_ss.add(files(
|
|
'exception.c',
|
|
'float_helper.c',
|
|
'helper.c',
|
|
+ 'gdbstub.c',
|
|
'int_helper.c',
|
|
'profile.c',
|
|
'simd_helper.c',
|
|
diff --git a/target/sw64/translate.c b/target/sw64/translate.c
|
|
index 37b7e89077..1e725b9294 100644
|
|
--- a/target/sw64/translate.c
|
|
+++ b/target/sw64/translate.c
|
|
@@ -2298,7 +2298,7 @@ DisasJumpType translate_one(DisasContextBase *dcbase, uint32_t insn,
|
|
/* RCID */
|
|
if (disp16 && unlikely(ra == 31)) break;
|
|
va = load_gir(ctx, ra);
|
|
- read_csr(0xc4, va);
|
|
+ read_csr(0xc9, va);
|
|
break;
|
|
case 0x0080:
|
|
/* HALT */
|
|
diff --git a/tcg/sw64/tcg-target.c.inc b/tcg/sw64/tcg-target.c.inc
|
|
index 982f159e23..da938a7382 100755
|
|
--- a/tcg/sw64/tcg-target.c.inc
|
|
+++ b/tcg/sw64/tcg-target.c.inc
|
|
@@ -10,7 +10,6 @@
|
|
the size of the operation performed. If we know the values match, it
|
|
makes things much cleaner. */
|
|
QEMU_BUILD_BUG_ON(TCG_TYPE_I32 != 0 || TCG_TYPE_I64 != 1);
|
|
-static const tcg_insn_unit *tb_ret_addr;
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
|
@@ -33,13 +32,14 @@ static const int tcg_target_reg_alloc_order[] = {
|
|
TCG_REG_X0, TCG_REG_X1, TCG_REG_X2, TCG_REG_X3, TCG_REG_X4,
|
|
TCG_REG_X5, TCG_REG_X6, TCG_REG_X7, TCG_REG_X8,
|
|
|
|
- TCG_REG_X22, TCG_REG_X23, TCG_REG_X24, /*TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, */
|
|
+ TCG_REG_X22, TCG_REG_X23, /* TCG_REG_X24, TCG_REG_X25, TCG_REG_X26, TCG_REG_X27, */
|
|
|
|
/* TCG_REG_SP=TCG_REG_X15 saved for system*/
|
|
TCG_REG_X16, TCG_REG_X17, TCG_REG_X18, TCG_REG_X19, TCG_REG_X20, TCG_REG_X21, TCG_REG_X28, /* TCG_REG_X29, TCG_REG_X30, TCG_REG_X31 */
|
|
|
|
/* TCG_REG_TMP=TCG_REG_X27 reserved as temporary register */
|
|
/* TCG_REG_TMP2=TCG_REG_X25 reserved as temporary register */
|
|
+ /* TCG_REG_TMP3=TCG_REG_X24 reserved as temporary register */
|
|
/* TCG_REG_RA=TCG_REG_X26 reserved as temporary */
|
|
/* TCG_REG_GP=TCG_REG_X29 gp saved for system*/
|
|
/* TCG_REG_SP=TCG_REG_X30 sp saved for system*/
|
|
@@ -66,27 +66,103 @@ static const int tcg_target_call_oarg_regs[1] = {
|
|
|
|
#define TCG_REG_TMP TCG_REG_X27
|
|
#define TCG_REG_TMP2 TCG_REG_X25
|
|
+#define TCG_REG_TMP3 TCG_REG_X24
|
|
#define TCG_FLOAT_TMP TCG_REG_F10
|
|
#define TCG_FLOAT_TMP2 TCG_REG_F11
|
|
|
|
+#define REG0(I) (const_args[I] ? TCG_REG_ZERO : (TCGReg)args[I])
|
|
+#define tcg_out_insn_jump tcg_out_insn_ldst
|
|
+#define tcg_out_insn_bitReg tcg_out_insn_simpleReg
|
|
+#define zeroExt 0
|
|
+#define sigExt 1
|
|
+#define noPara 0//represent this parament of function isnot needed.
|
|
+
|
|
+#ifndef CONFIG_SOFTMMU
|
|
+#define USE_GUEST_BASE (guest_base != 0 || TARGET_LONG_BITS == 32)
|
|
+#define TCG_REG_GUEST_BASE TCG_REG_X14
|
|
+#endif
|
|
+
|
|
+static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
|
+{
|
|
+ const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
+ ptrdiff_t offset = target - src_rx -1;
|
|
+
|
|
+ if (offset == sextract64(offset, 0, 21)) {
|
|
+ /* read instruction, mask away previous PC_REL21 parameter contents,
|
|
+ set the proper offset, then write back the instruction. */
|
|
+ *src_rw = deposit32(*src_rw, 0, 21, offset);
|
|
+ return true;
|
|
+ }
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend)
|
|
+{
|
|
+ tcg_debug_assert(addend == 0);
|
|
+ switch (type) {
|
|
+ case R_SW_64_BRADDR:
|
|
+ value = value;
|
|
+ return reloc_pc21(code_ptr, (const tcg_insn_unit *)value);
|
|
+ default:
|
|
+ g_assert_not_reached();
|
|
+ }
|
|
+}
|
|
+
|
|
+/*
|
|
+* contact with "tcg-target-con-str.h"
|
|
+*/
|
|
+#define TCG_CT_CONST_ZERO 0x100
|
|
+#define TCG_CT_CONST_LONG 0x200
|
|
+#define TCG_CT_CONST_MONE 0x400
|
|
+#define TCG_CT_CONST_ORRI 0x800
|
|
+#define TCG_CT_CONST_WORD 0X1000
|
|
+#define TCG_CT_CONST_U8 0x2000
|
|
+#define TCG_CT_CONST_S8 0X4000
|
|
+
|
|
#define ALL_GENERAL_REGS 0xffffffffu
|
|
+#define ALL_VECTOR_REGS 0xffffffff00000000ull
|
|
+
|
|
+#ifdef CONFIG_SOFTMMU
|
|
+#define ALL_QLDST_REGS \
|
|
+ (ALL_GENERAL_REGS & ~((1 << TCG_REG_X0) | (1 << TCG_REG_X1) | \
|
|
+ (1 << TCG_REG_X2) | (1 << TCG_REG_X3)))
|
|
+#else
|
|
#define ALL_QLDST_REGS ALL_GENERAL_REGS
|
|
-#define PUSH_SIZE ((15-9+1+1) * 8)
|
|
-#define FRAME_SIZE \
|
|
- ((PUSH_SIZE \
|
|
- + TCG_STATIC_CALL_ARGS_SIZE \
|
|
- + CPU_TEMP_BUF_NLONGS * sizeof(long) \
|
|
- + TCG_TARGET_STACK_ALIGN - 1) \
|
|
- & ~(TCG_TARGET_STACK_ALIGN - 1))
|
|
-
|
|
-/* We encode the format of the insn into the beginning of the name, so that
|
|
- we can have the preprocessor help "typecheck" the insn vs the output
|
|
- function. We don't have nice names for the formats, so we use the section
|
|
- number of the architecture reference manual in which the instruction
|
|
- group is described. */
|
|
-#define OPC_OP(x) ((x & 0x3f) << 26)
|
|
-#define OPC_FUNC(x) ((x & 0xff) << 5)
|
|
-#define OPC_FUNC_COMPLEX(x) ((x & 0xff) << 10)
|
|
+#endif
|
|
+
|
|
+/* sw test if a constant matches the constraint */
|
|
+static bool tcg_target_const_match(tcg_target_long val, TCGType type, int ct)
|
|
+{
|
|
+ if (ct & TCG_CT_CONST) {
|
|
+ return 1;
|
|
+ }
|
|
+ if (type == TCG_TYPE_I32) {
|
|
+ val = (int32_t)val;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_U8) && 0 <= val && val <= 255) {
|
|
+ return 1;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_LONG)) {
|
|
+ return 1;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_MONE)) {
|
|
+ return 1;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_ORRI)) {
|
|
+ return 1;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_WORD)) {
|
|
+ return 1;
|
|
+ }
|
|
+ if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
|
|
+ return 1;
|
|
+ }
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+#define OPC_OP(x) (((x) & 0x3f) << 26)
|
|
+#define OPC_FUNC(x) (((x) & 0xff) << 5)
|
|
+#define OPC_FUNC_COMPLEX(x) (((x) & 0xff) << 10)
|
|
typedef enum {
|
|
OPC_NOP =0X43ff075f,
|
|
OPC_SYS_CALL =OPC_OP(0x00),
|
|
@@ -103,7 +179,7 @@ typedef enum {
|
|
OPC_VLDD =OPC_OP(0x0D),
|
|
OPC_VSTS =OPC_OP(0x0E),
|
|
OPC_VSTD =OPC_OP(0x0F),
|
|
-
|
|
+
|
|
OPC_LDBU =OPC_OP(0x20),
|
|
OPC_LDHU =OPC_OP(0x21),
|
|
OPC_LDW =OPC_OP(0x22),
|
|
@@ -120,7 +196,7 @@ typedef enum {
|
|
OPC_PRI_ST =OPC_OP(0x2D),
|
|
OPC_FSTS =OPC_OP(0x2E),
|
|
OPC_FSTD =OPC_OP(0x2F),
|
|
-
|
|
+
|
|
OPC_BEQ =OPC_OP(0x30),
|
|
OPC_BNE =OPC_OP(0x31),
|
|
OPC_BLT =OPC_OP(0x32),
|
|
@@ -129,7 +205,7 @@ typedef enum {
|
|
OPC_BGE =OPC_OP(0x35),
|
|
OPC_BLBC =OPC_OP(0x36),
|
|
OPC_BLBS =OPC_OP(0x37),
|
|
-
|
|
+
|
|
OPC_FBEQ =OPC_OP(0x38),
|
|
OPC_FBNE =OPC_OP(0x39),
|
|
OPC_FBLT =OPC_OP(0x3A),
|
|
@@ -138,7 +214,7 @@ typedef enum {
|
|
OPC_FBGE =OPC_OP(0x3D),
|
|
OPC_LDI =OPC_OP(0x3E),
|
|
OPC_LDIH =OPC_OP(0x3F),
|
|
-
|
|
+
|
|
OPC_ADDW =(OPC_OP(0x10) | OPC_FUNC(0x0)),
|
|
OPC_ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x0)),
|
|
OPC_SUBW =(OPC_OP(0x10) | OPC_FUNC(0x1)),
|
|
@@ -147,62 +223,62 @@ typedef enum {
|
|
OPC_S4ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x02)),
|
|
OPC_S4SUBW =(OPC_OP(0x10) | OPC_FUNC(0x03)),
|
|
OPC_S4SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x03)),
|
|
-
|
|
+
|
|
OPC_S8ADDW =(OPC_OP(0x10) | OPC_FUNC(0x04)),
|
|
OPC_S8ADDW_I =(OPC_OP(0x12) | OPC_FUNC(0x04)),
|
|
OPC_S8SUBW =(OPC_OP(0x10) | OPC_FUNC(0x05)),
|
|
OPC_S8SUBW_I =(OPC_OP(0x12) | OPC_FUNC(0x05)),
|
|
-
|
|
+
|
|
OPC_ADDL =(OPC_OP(0x10) | OPC_FUNC(0x8)),
|
|
OPC_ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0x8)),
|
|
OPC_SUBL =(OPC_OP(0x10) | OPC_FUNC(0x9)),
|
|
OPC_SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0x9)),
|
|
-
|
|
+
|
|
OPC_S4ADDL =(OPC_OP(0x10) | OPC_FUNC(0xA)),
|
|
OPC_S4ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xA)),
|
|
OPC_S4SUBL =(OPC_OP(0x10) | OPC_FUNC(0xB)),
|
|
OPC_S4SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xB)),
|
|
-
|
|
+
|
|
OPC_S8ADDL =(OPC_OP(0x10) | OPC_FUNC(0xC)),
|
|
OPC_S8ADDL_I =(OPC_OP(0x12) | OPC_FUNC(0xC)),
|
|
OPC_S8SUBL =(OPC_OP(0x10) | OPC_FUNC(0xD)),
|
|
OPC_S8SUBL_I =(OPC_OP(0x12) | OPC_FUNC(0xD)),
|
|
-
|
|
+
|
|
OPC_MULW =(OPC_OP(0x10) | OPC_FUNC(0x10)),
|
|
OPC_MULW_I =(OPC_OP(0x12) | OPC_FUNC(0x10)),
|
|
OPC_MULL =(OPC_OP(0x10) | OPC_FUNC(0x18)),
|
|
OPC_MULL_I =(OPC_OP(0x12) | OPC_FUNC(0x18)),
|
|
-
|
|
+
|
|
OPC_UMULH =(OPC_OP(0x10) | OPC_FUNC(0x19)),
|
|
OPC_UMULH_I =(OPC_OP(0x12) | OPC_FUNC(0x19)),
|
|
-
|
|
+
|
|
OPC_CTPOP =(OPC_OP(0x10) | OPC_FUNC(0x58)),
|
|
OPC_CTLZ =(OPC_OP(0x10) | OPC_FUNC(0x59)),
|
|
OPC_CTTZ =(OPC_OP(0x10) | OPC_FUNC(0x5A)),
|
|
-
|
|
+
|
|
OPC_ZAP =(OPC_OP(0x10) | OPC_FUNC(0x68)),
|
|
OPC_ZAP_I =(OPC_OP(0x12) | OPC_FUNC(0x68)),
|
|
OPC_ZAPNOT =(OPC_OP(0x10) | OPC_FUNC(0x69)),
|
|
OPC_ZAPNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x69)),
|
|
-
|
|
+
|
|
OPC_SEXTB =(OPC_OP(0x10) | OPC_FUNC(0x6A)),
|
|
OPC_SEXTB_I =(OPC_OP(0x12) | OPC_FUNC(0x6A)),
|
|
OPC_SEXTH =(OPC_OP(0x10) | OPC_FUNC(0x6B)),
|
|
OPC_SEXTH_I =(OPC_OP(0x12) | OPC_FUNC(0x6B)),
|
|
-
|
|
+
|
|
OPC_CMPEQ =(OPC_OP(0x10) | OPC_FUNC(0x28)),
|
|
OPC_CMPEQ_I =(OPC_OP(0x12) | OPC_FUNC(0x28)),
|
|
-
|
|
+
|
|
OPC_CMPLT =(OPC_OP(0x10) | OPC_FUNC(0x29)),
|
|
OPC_CMPLT_I =(OPC_OP(0x12) | OPC_FUNC(0x29)),
|
|
OPC_CMPLE =(OPC_OP(0x10) | OPC_FUNC(0x2A)),
|
|
OPC_CMPLE_I =(OPC_OP(0x12) | OPC_FUNC(0x2A)),
|
|
-
|
|
+
|
|
OPC_CMPULT =(OPC_OP(0x10) | OPC_FUNC(0x2B)),
|
|
OPC_CMPULT_I =(OPC_OP(0x12) | OPC_FUNC(0x2B)),
|
|
OPC_CMPULE =(OPC_OP(0x10) | OPC_FUNC(0x2C)),
|
|
OPC_CMPULE_I =(OPC_OP(0x12) | OPC_FUNC(0x2C)),
|
|
-
|
|
+
|
|
OPC_AND =(OPC_OP(0x10) | OPC_FUNC(0x38)),
|
|
OPC_BIC =(OPC_OP(0x10) | OPC_FUNC(0x39)),
|
|
OPC_BIS =(OPC_OP(0x10) | OPC_FUNC(0x3A)),
|
|
@@ -216,14 +292,14 @@ typedef enum {
|
|
OPC_ORNOT_I =(OPC_OP(0x12) | OPC_FUNC(0x3B)),
|
|
OPC_XOR_I =(OPC_OP(0x12) | OPC_FUNC(0x3C)),
|
|
OPC_EQV_I =(OPC_OP(0x12) | OPC_FUNC(0x3D)),
|
|
-
|
|
+
|
|
OPC_SLL =(OPC_OP(0x10) | OPC_FUNC(0x48)),
|
|
OPC_SRL =(OPC_OP(0x10) | OPC_FUNC(0x49)),
|
|
OPC_SRA =(OPC_OP(0x10) | OPC_FUNC(0x4A)),
|
|
OPC_SLL_I =(OPC_OP(0x12) | OPC_FUNC(0x48)),
|
|
OPC_SRL_I =(OPC_OP(0x12) | OPC_FUNC(0x49)),
|
|
OPC_SRA_I =(OPC_OP(0x12) | OPC_FUNC(0x4A)),
|
|
-
|
|
+
|
|
OPC_SELEQ =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x00)),
|
|
OPC_SELGE =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x01)),
|
|
OPC_SELGT =(OPC_OP(0x11) | OPC_FUNC_COMPLEX(0x02)),
|
|
@@ -240,7 +316,7 @@ typedef enum {
|
|
OPC_SELNE_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x05)),
|
|
OPC_SELLBC_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x06)),
|
|
OPC_SELLBS_I =(OPC_OP(0x13) | OPC_FUNC_COMPLEX(0x07)),
|
|
-
|
|
+
|
|
OPC_INS0B =(OPC_OP(0x10) | OPC_FUNC(0x40)),
|
|
OPC_INS1B =(OPC_OP(0x10) | OPC_FUNC(0x41)),
|
|
OPC_INS2B =(OPC_OP(0x10) | OPC_FUNC(0x42)),
|
|
@@ -258,39 +334,39 @@ typedef enum {
|
|
OPC_INS6B_I =(OPC_OP(0x12) | OPC_FUNC(0x46)),
|
|
OPC_INS7B_I =(OPC_OP(0x12) | OPC_FUNC(0x47)),
|
|
|
|
- OPC_EXT0B =(OPC_OP(0x10) | OPC_FUNC(0x50)),
|
|
- OPC_EXT1B =(OPC_OP(0x10) | OPC_FUNC(0x51)),
|
|
- OPC_EXT2B =(OPC_OP(0x10) | OPC_FUNC(0x52)),
|
|
- OPC_EXT3B =(OPC_OP(0x10) | OPC_FUNC(0x53)),
|
|
- OPC_EXT4B =(OPC_OP(0x10) | OPC_FUNC(0x54)),
|
|
- OPC_EXT5B =(OPC_OP(0x10) | OPC_FUNC(0x55)),
|
|
- OPC_EXT6B =(OPC_OP(0x10) | OPC_FUNC(0x56)),
|
|
- OPC_EXT7B =(OPC_OP(0x10) | OPC_FUNC(0x57)),
|
|
- OPC_EXT0B_I =(OPC_OP(0x12) | OPC_FUNC(0x50)),
|
|
- OPC_EXT1B_I =(OPC_OP(0x12) | OPC_FUNC(0x51)),
|
|
- OPC_EXT2B_I =(OPC_OP(0x12) | OPC_FUNC(0x52)),
|
|
- OPC_EXT3B_I =(OPC_OP(0x12) | OPC_FUNC(0x53)),
|
|
- OPC_EXT4B_I =(OPC_OP(0x12) | OPC_FUNC(0x54)),
|
|
- OPC_EXT5B_I =(OPC_OP(0x12) | OPC_FUNC(0x55)),
|
|
- OPC_EXT6B_I =(OPC_OP(0x12) | OPC_FUNC(0x56)),
|
|
- OPC_EXT7B_I =(OPC_OP(0x12) | OPC_FUNC(0x57)),
|
|
+ OPC_EXTLB =(OPC_OP(0x10) | OPC_FUNC(0x50)),
|
|
+ OPC_EXTLH =(OPC_OP(0x10) | OPC_FUNC(0x51)),
|
|
+ OPC_EXTLW =(OPC_OP(0x10) | OPC_FUNC(0x52)),
|
|
+ OPC_EXTLL =(OPC_OP(0x10) | OPC_FUNC(0x53)),
|
|
+ OPC_EXTHB =(OPC_OP(0x10) | OPC_FUNC(0x54)),
|
|
+ OPC_EXTHH =(OPC_OP(0x10) | OPC_FUNC(0x55)),
|
|
+ OPC_EXTHW =(OPC_OP(0x10) | OPC_FUNC(0x56)),
|
|
+ OPC_EXTHL =(OPC_OP(0x10) | OPC_FUNC(0x57)),
|
|
+ OPC_EXTLB_I =(OPC_OP(0x12) | OPC_FUNC(0x50)),
|
|
+ OPC_EXTLH_I =(OPC_OP(0x12) | OPC_FUNC(0x51)),
|
|
+ OPC_EXTLW_I =(OPC_OP(0x12) | OPC_FUNC(0x52)),
|
|
+ OPC_EXTLL_I =(OPC_OP(0x12) | OPC_FUNC(0x53)),
|
|
+ OPC_EXTHB_I =(OPC_OP(0x12) | OPC_FUNC(0x54)),
|
|
+ OPC_EXTHH_I =(OPC_OP(0x12) | OPC_FUNC(0x55)),
|
|
+ OPC_EXTHW_I =(OPC_OP(0x12) | OPC_FUNC(0x56)),
|
|
+ OPC_EXTHL_I =(OPC_OP(0x12) | OPC_FUNC(0x57)),
|
|
|
|
- OPC_MASK0B =(OPC_OP(0x10) | OPC_FUNC(0x60)),
|
|
- OPC_MASK1B =(OPC_OP(0x10) | OPC_FUNC(0x61)),
|
|
- OPC_MASK2B =(OPC_OP(0x10) | OPC_FUNC(0x62)),
|
|
- OPC_MASK3B =(OPC_OP(0x10) | OPC_FUNC(0x63)),
|
|
- OPC_MASK4B =(OPC_OP(0x10) | OPC_FUNC(0x64)),
|
|
- OPC_MASK5B =(OPC_OP(0x10) | OPC_FUNC(0x65)),
|
|
- OPC_MASK6B =(OPC_OP(0x10) | OPC_FUNC(0x66)),
|
|
- OPC_MASK7B =(OPC_OP(0x10) | OPC_FUNC(0x67)),
|
|
- OPC_MASK0B_I =(OPC_OP(0x12) | OPC_FUNC(0x60)),
|
|
- OPC_MASK1B_I =(OPC_OP(0x12) | OPC_FUNC(0x61)),
|
|
- OPC_MASK2B_I =(OPC_OP(0x12) | OPC_FUNC(0x62)),
|
|
- OPC_MASK3B_I =(OPC_OP(0x12) | OPC_FUNC(0x63)),
|
|
- OPC_MASK4B_I =(OPC_OP(0x12) | OPC_FUNC(0x64)),
|
|
- OPC_MASK5B_I =(OPC_OP(0x12) | OPC_FUNC(0x65)),
|
|
- OPC_MASK6B_I =(OPC_OP(0x12) | OPC_FUNC(0x66)),
|
|
- OPC_MASK7B_I =(OPC_OP(0x12) | OPC_FUNC(0x67)),
|
|
+ OPC_MASKLB =(OPC_OP(0x10) | OPC_FUNC(0x60)),
|
|
+ OPC_MASKLH =(OPC_OP(0x10) | OPC_FUNC(0x61)),
|
|
+ OPC_MASKLW =(OPC_OP(0x10) | OPC_FUNC(0x62)),
|
|
+ OPC_MASKLL =(OPC_OP(0x10) | OPC_FUNC(0x63)),
|
|
+ OPC_MASKHB =(OPC_OP(0x10) | OPC_FUNC(0x64)),
|
|
+ OPC_MASKHH =(OPC_OP(0x10) | OPC_FUNC(0x65)),
|
|
+ OPC_MASKHW =(OPC_OP(0x10) | OPC_FUNC(0x66)),
|
|
+ OPC_MASKHL =(OPC_OP(0x10) | OPC_FUNC(0x67)),
|
|
+ OPC_MASKLB_I =(OPC_OP(0x12) | OPC_FUNC(0x60)),
|
|
+ OPC_MASKLH_I =(OPC_OP(0x12) | OPC_FUNC(0x61)),
|
|
+ OPC_MASKLW_I =(OPC_OP(0x12) | OPC_FUNC(0x62)),
|
|
+ OPC_MASKLL_I =(OPC_OP(0x12) | OPC_FUNC(0x63)),
|
|
+ OPC_MASKHB_I =(OPC_OP(0x12) | OPC_FUNC(0x64)),
|
|
+ OPC_MASKHH_I =(OPC_OP(0x12) | OPC_FUNC(0x65)),
|
|
+ OPC_MASKHW_I =(OPC_OP(0x12) | OPC_FUNC(0x66)),
|
|
+ OPC_MASKHL_I =(OPC_OP(0x12) | OPC_FUNC(0x67)),
|
|
|
|
OPC_CNPGEB =(OPC_OP(0x10) | OPC_FUNC(0x6C)),
|
|
OPC_CNPGEB_I =(OPC_OP(0x12) | OPC_FUNC(0x6C)),
|
|
@@ -337,168 +413,162 @@ typedef enum {
|
|
OPC_FSQRTD = (OPC_OP(0x18) | OPC_FUNC(0x09)),
|
|
}SW_64Insn;
|
|
|
|
-static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64);
|
|
-static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16);
|
|
-static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, TCGReg rm);
|
|
-static void tcg_out_insn_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64);
|
|
-static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, intptr_t imm64);
|
|
-static void tcg_out_insn_bitImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64);
|
|
-static void tcg_out_insn_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64);
|
|
-static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm);
|
|
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,TCGReg a1,TCGReg a2, bool const_b, TCGReg v1, TCGReg v2);
|
|
-static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target);
|
|
-static inline uint32_t tcg_in32(TCGContext *s);
|
|
-static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn);
|
|
-static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign);
|
|
-static void tcg_out_cond_cmp(TCGContext *s, TCGCond cond, TCGReg ret, TCGArg a, TCGArg b, bool const_b);
|
|
-static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t aimm);
|
|
-static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm, unsigned int m);
|
|
-static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm);
|
|
-static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm);
|
|
-static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m);
|
|
-static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m);
|
|
-static void tcg_out_cltz(TCGContext *s, SW_64Insn opc_clz, TCGType ext, TCGReg rd, TCGReg rn, TCGArg b, bool const_b);
|
|
-static inline void tcg_out_bswap16u(TCGContext *s, TCGReg rd, TCGReg rn);
|
|
-static inline void tcg_out_bswap16s(TCGContext *s, TCGReg rd, TCGReg rn);
|
|
-static inline void tcg_out_bswap32u(TCGContext *s, TCGReg rd, TCGReg rn);
|
|
-static inline void tcg_out_bswap32s(TCGContext *s, TCGReg rd, TCGReg rn);
|
|
-static inline void tcg_out_bswap64(TCGContext *s, TCGReg rd, TCGReg rn);
|
|
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi);
|
|
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext);
|
|
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, TCGReg arg2);
|
|
-static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int pos, int len);
|
|
-static void tcg_out_dep(TCGContext *s, TCGReg rd, TCGReg rn, int pos, int len);
|
|
-static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm);
|
|
-
|
|
-#define tcg_out_insn_jump tcg_out_insn_ldst
|
|
-#define tcg_out_insn_bitReg tcg_out_insn_simpleReg
|
|
+static inline uint32_t tcg_in32(TCGContext *s)
|
|
+{
|
|
+ uint32_t v = *(uint32_t *)s->code_ptr;
|
|
+ return v;
|
|
+}
|
|
|
|
-static void tcg_target_init(TCGContext *s)
|
|
+/*
|
|
+ * SW instruction format of br(alias jump)
|
|
+ * insn = opcode[31,26]:Rd[25,21]:disp[20,0],
|
|
+ */
|
|
+static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64)
|
|
{
|
|
- tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
|
|
- tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
|
|
- tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
|
|
- tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
|
|
- tcg_target_call_clobber_regs = -1ull;
|
|
-
|
|
- //sw_64 callee saved x9-x15
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X9);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X10);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X11);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X12);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X13);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X14);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X15);
|
|
-
|
|
- //sw_64 callee saved f2~f9
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F2);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F3);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F4);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F5);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F6);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F7);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F8);
|
|
- tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F9);
|
|
+ tcg_debug_assert(imm64 <= 0xfffff && imm64 >= -0x100000);
|
|
+ tcg_out32(s, insn | (rd & 0x1f) << 21 | (imm64 & 0x1fffff));
|
|
+}
|
|
|
|
- s->reserved_regs = 0;
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP); //TCG_REG_X27
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); //TCG_REG_X25
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA); //TCG_REG_X26
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_X29); /*sw_64 platform register */
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP); /*sw_64 platform register */
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP2); /*sw_64 platform register */
|
|
+/*
|
|
+ * SW instruction format of (load and store)
|
|
+ * insn = opcode[31,26]:rd[25,21]:rn[20,16]:disp[15,0]
|
|
+ */
|
|
+static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16)
|
|
+{
|
|
+ tcg_debug_assert(imm16 <= 0x7fff && imm16 >= -0x8000);
|
|
+ tcg_out32(s, insn | (rd & 0x1f) << 21 | (rn & 0x1f) << 16 | (imm16 & 0xffff));
|
|
}
|
|
|
|
+/*
|
|
+ * SW instruction format of simple operator for Register
|
|
+ * insn = opcode[31,26]:rn(ra)[25,21]:rn(rb)[20,16]:Zeors[15,13]:function[12,5]:rd(rc)[4,0]
|
|
+ */
|
|
+static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn,TCGReg rd, TCGReg rn, TCGReg rm)
|
|
+{
|
|
+ tcg_out32(s, insn | (rn & 0x1f) << 21 | (rm & 0x1f) << 16 | (rd & 0x1f));
|
|
+}
|
|
|
|
-#ifndef CONFIG_SOFTMMU
|
|
- #define USE_GUEST_BASE guest_base != 0
|
|
- #define TCG_REG_GUEST_BASE TCG_REG_X14
|
|
-#endif
|
|
+/*
|
|
+ * SW instruction format of simple operator for imm
|
|
+ * insn = opcode[31,26]:rn(ra)[25,21]:disp[20,13]:function[12,5]:rd(rc)[4,0]
|
|
+ */
|
|
+static void tcg_out_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64)
|
|
+{
|
|
+ if (imm64 <= 0x7f && imm64 >= -0x80) {
|
|
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP3, imm64);
|
|
+ tcg_out_insn_simpleReg(s, insn_Reg, rd, rn, TCG_REG_TMP3);
|
|
+ }
|
|
+}
|
|
|
|
+static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64)
|
|
+{
|
|
+ tcg_debug_assert(imm64 <= 255);
|
|
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
+}
|
|
|
|
-#define zeroExt 0
|
|
-#define sigExt 1
|
|
+/*
|
|
+ * sw bit operation: and bis etc
|
|
+ */
|
|
+static void tcg_out_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64)
|
|
+{
|
|
+ if (imm64 <= 255) {
|
|
+ tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
|
|
+ tcg_out_insn_bitReg(s, insn_Reg, rd, rn, TCG_REG_TMP);
|
|
+ }
|
|
+}
|
|
|
|
+/*
|
|
+ * SW instruction format of complex operator
|
|
+ * insn = opcode[31,26]:rd[25,21]:rn[20,16],function[15,10]:rm[9,5]:rx[4,0]
|
|
+ */
|
|
+static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
+{
|
|
+ tcg_out32(s, insn | (cond & 0x1f) << 21 | (rn & 0x1f) << 16 | (rm & 0x1f) << 5 | (rd & 0x1f));
|
|
+}
|
|
|
|
-static void tcg_target_qemu_prologue(TCGContext *s)
|
|
+static void tcg_out_insn_complexImm(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, intptr_t imm8, TCGReg rm)
|
|
{
|
|
- TCGReg r;
|
|
- int ofs;
|
|
-
|
|
- /* allocate space for all saved registers */
|
|
- /* subl $sp,PUSH_SIZE,$sp */
|
|
- tcg_out_insn_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
|
|
-
|
|
- /* Push (FP, LR) */
|
|
- /* stl $fp,0($sp) */
|
|
- tcg_out_insn_ldst(s, OPC_STL, TCG_REG_FP, TCG_REG_SP, 0);
|
|
- /* stl $26,8($sp) */
|
|
- tcg_out_insn_ldst(s, OPC_STL, TCG_REG_RA, TCG_REG_SP, 8);
|
|
+ tcg_out32(s, insn | (cond & 0x1f) << 21 | (imm8 & 0xff) << 13 | (rm & 0x1f) << 5 | (rd & 0x1f));
|
|
+}
|
|
|
|
+static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
|
|
+{
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_BIS, rd, rn, TCG_REG_ZERO);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf);
|
|
+ }
|
|
+}
|
|
|
|
- /* Set up frame pointer for canonical unwinding. */
|
|
- /* TCG_REG_FP=TCG_REG_SP */
|
|
- tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
|
|
+static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long orig)
|
|
+{
|
|
+ tcg_target_long l0=0, l1=0, l2=0, l3=0, extra=0;
|
|
+ tcg_target_long val = orig;
|
|
+ TCGReg rs = TCG_REG_ZERO;
|
|
|
|
- /* Store callee-preserved regs x9..x14. */
|
|
- for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1){
|
|
- ofs = (r - TCG_REG_X9 + 2) * 8;
|
|
- tcg_out_insn_ldst(s, OPC_STL, r, TCG_REG_SP, ofs);
|
|
+ if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) {
|
|
+ val = (int32_t)val;//val64bit
|
|
}
|
|
|
|
- /* Make stack space for TCG locals. */
|
|
- /* subl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
|
|
- tcg_out_insn_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
|
|
+ if (orig == (int16_t)orig) {
|
|
+ tcg_out_insn_ldst(s, OPC_LDI, rd, TCG_REG_ZERO, (int16_t)orig);
|
|
+ return;
|
|
+ }
|
|
|
|
- /* Inform TCG about how to find TCG locals with register, offset, size. */
|
|
- tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
|
|
- CPU_TEMP_BUF_NLONGS * sizeof(long));
|
|
+ if (orig == (uint8_t)orig) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_BIS_I, rd, TCG_REG_ZERO, (uint8_t)orig);
|
|
+ return;
|
|
+ }
|
|
|
|
-#if !defined(CONFIG_SOFTMMU)
|
|
- if (USE_GUEST_BASE) {
|
|
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
|
|
- tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
|
|
+ if (type == TCG_TYPE_I32) {
|
|
+ val = (int32_t)val;
|
|
}
|
|
-#endif
|
|
-
|
|
- /* TCG_AREG0=tcg_target_call_iarg_regs[0], on sw, we mov $16 to $9 */
|
|
- tcg_out_mov(s, TCG_TYPE_I64, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
|
- tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0);
|
|
|
|
- /*
|
|
- * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
|
- * and fall through to the rest of the epilogue.
|
|
- */
|
|
- tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, 0);
|
|
+ l0 = (int16_t)val;
|
|
+ val = (val - l0) >> 16;
|
|
+ l1 = (int16_t)val;
|
|
|
|
- /* TB epilogue */
|
|
- tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
|
|
+ if (orig >> 31 == -1 || orig >> 31 == 0) {
|
|
+ if (l1 < 0 && orig >= 0) {
|
|
+ extra = 0x4000;
|
|
+ l1 = (int16_t)(val - 0x4000);
|
|
+ }
|
|
+ } else {
|
|
+ val = (val - l1) >> 16;
|
|
+ l2 = (int16_t)val;
|
|
+ val = (val - l2) >> 16;
|
|
+ l3 = (int16_t)val;
|
|
|
|
- /* Remove TCG locals stack space. */
|
|
- /* addl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
|
|
- tcg_out_insn_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
|
|
+ if (l3) {
|
|
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l3);
|
|
+ rs = rd;
|
|
+ }
|
|
+ if (l2) {
|
|
+ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l2);
|
|
+ rs = rd;
|
|
+ }
|
|
+ if (l3 || l2)
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, rd, rd, 32);
|
|
+ }
|
|
|
|
- /* Restore registers x9..x14. */
|
|
- for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1) {
|
|
- int ofs = (r - TCG_REG_X9 + 2) * 8;
|
|
- tcg_out_insn_ldst(s, OPC_LDL, r, TCG_REG_SP, ofs);
|
|
+ if (l1) {
|
|
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l1);
|
|
+ rs = rd;
|
|
}
|
|
|
|
-
|
|
- /* Pop (FP, LR) */
|
|
- /* ldl $fp,0($sp) */
|
|
- tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_FP, TCG_REG_SP, 0);
|
|
- /* ldl $26,8($sp) */
|
|
- tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_RA, TCG_REG_SP, 8);
|
|
-
|
|
- /* restore SP to previous frame. */
|
|
- /* addl $sp,PUSH_SIZE,$sp */
|
|
- tcg_out_insn_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
|
|
-
|
|
- tcg_out_insn_jump(s, OPC_RET, TCG_REG_ZERO, TCG_REG_RA, 0);
|
|
+ if (extra) {
|
|
+ tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, extra);
|
|
+ rs = rd;
|
|
+ }
|
|
+
|
|
+ tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l0);
|
|
+ if (type == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
}
|
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
|
|
@@ -513,614 +583,541 @@ static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
|
|
tcg_out_movr(s, type, ret, arg);
|
|
break;
|
|
} else if (ret < 32) {
|
|
+ tcg_debug_assert(0);
|
|
break;
|
|
} else if (arg < 32) {
|
|
+ tcg_debug_assert(0);
|
|
break;
|
|
}
|
|
/* FALLTHRU */
|
|
+ case TCG_TYPE_V64:
|
|
+ case TCG_TYPE_V128:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
return true;
|
|
}
|
|
|
|
+static inline void tcg_out_sxt(TCGContext *s, TCGType ext, MemOp s_bits,
|
|
+ TCGReg rd, TCGReg rn)
|
|
+{
|
|
+ /*
|
|
+ * Using ALIASes SXTB, SXTH, SXTW, of SBFM Xd, Xn, #0, #7|15|31
|
|
+ * int bits = (8 << s_bits) - 1;
|
|
+ * tcg_out_sbfm(s, ext, rd, rn, 0, bits);
|
|
+ */
|
|
+ switch (s_bits) {
|
|
+ case MO_8:
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rn);
|
|
+ break;
|
|
+ case MO_16:
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rn);
|
|
+ break;
|
|
+ case MO_32:
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_ZERO);
|
|
+ break;
|
|
+ default:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ }
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+}
|
|
|
|
-static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
+/*
|
|
+ * counting heading/tailing zero numbers
|
|
+ */
|
|
+static void tcg_out_ctz64(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b)
|
|
{
|
|
- switch (op) {
|
|
- case INDEX_op_goto_ptr:
|
|
- return C_O0_I1(r);
|
|
+ if (const_b && b == 64) {
|
|
+ if (opc == OPC_CTLZ) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, rn);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, rn);
|
|
+ }
|
|
+ } else {
|
|
+ if (opc == OPC_CTLZ) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, rn);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, rn);
|
|
+ }
|
|
+ if (const_b) {
|
|
+ if (b == -1) {
|
|
+ tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd);
|
|
+ } else if (b == 0) {
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, TCG_REG_ZERO);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, rd, b);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, rd);
|
|
+ }
|
|
+ } else {
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP2, b);
|
|
+ }
|
|
+ }
|
|
+}
|
|
|
|
- case INDEX_op_ld8u_i32:
|
|
- case INDEX_op_ld8s_i32:
|
|
- case INDEX_op_ld16u_i32:
|
|
- case INDEX_op_ld16s_i32:
|
|
- case INDEX_op_ld_i32:
|
|
- case INDEX_op_ld8u_i64:
|
|
- case INDEX_op_ld8s_i64:
|
|
- case INDEX_op_ld16u_i64:
|
|
- case INDEX_op_ld16s_i64:
|
|
- case INDEX_op_ld32u_i64:
|
|
- case INDEX_op_ld32s_i64:
|
|
- case INDEX_op_ld_i64:
|
|
- case INDEX_op_neg_i32:
|
|
- case INDEX_op_neg_i64:
|
|
- case INDEX_op_not_i32:
|
|
- case INDEX_op_not_i64:
|
|
- case INDEX_op_bswap16_i32:
|
|
- case INDEX_op_bswap32_i32:
|
|
- case INDEX_op_bswap16_i64:
|
|
- case INDEX_op_bswap32_i64:
|
|
- case INDEX_op_bswap64_i64:
|
|
- case INDEX_op_ext8s_i32:
|
|
- case INDEX_op_ext16s_i32:
|
|
- case INDEX_op_ext8u_i32:
|
|
- case INDEX_op_ext16u_i32:
|
|
- case INDEX_op_ext8s_i64:
|
|
- case INDEX_op_ext16s_i64:
|
|
- case INDEX_op_ext32s_i64:
|
|
- case INDEX_op_ext8u_i64:
|
|
- case INDEX_op_ext16u_i64:
|
|
- case INDEX_op_ext32u_i64:
|
|
- case INDEX_op_ext_i32_i64:
|
|
- case INDEX_op_extu_i32_i64:
|
|
- case INDEX_op_extract_i32:
|
|
- case INDEX_op_extract_i64:
|
|
- case INDEX_op_sextract_i32:
|
|
- case INDEX_op_sextract_i64:
|
|
- return C_O1_I1(r, r);
|
|
+/*
|
|
+ * counting heading/tailing zero numbers
|
|
+ */
|
|
+static void tcg_out_ctz32(TCGContext *s, SW_64Insn opc, TCGReg rd, TCGReg rn, TCGArg b, bool const_b)
|
|
+{
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, rn, 0xf);
|
|
|
|
- case INDEX_op_st8_i32:
|
|
- case INDEX_op_st16_i32:
|
|
- case INDEX_op_st_i32:
|
|
- case INDEX_op_st8_i64:
|
|
- case INDEX_op_st16_i64:
|
|
- case INDEX_op_st32_i64:
|
|
- case INDEX_op_st_i64:
|
|
- return C_O0_I2(rZ, r);
|
|
+ if (const_b && b == 32) {
|
|
+ if (opc == OPC_CTLZ) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTLZ, rd, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rd, 32);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTTZ, rd, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, rd, 32, rd);
|
|
+ }
|
|
+ } else {
|
|
+ if (opc == OPC_CTLZ) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTLZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBW_I, TCG_REG_TMP2, TCG_REG_TMP2, 32);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_CTTZ, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ tcg_out_insn_complexImm(s, OPC_SELEQ_I, TCG_REG_TMP, TCG_REG_TMP2, 32, TCG_REG_TMP2);
|
|
+ }
|
|
+ if (const_b) {
|
|
+ if (b == -1) {
|
|
+ tcg_out_insn_bitReg(s, OPC_ORNOT, rd, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd);
|
|
+ } else if (b == 0) {
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, TCG_REG_ZERO);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I32, rd, b);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, rd);
|
|
+ }
|
|
+ } else {
|
|
+ tcg_out_insn_complexReg(s, OPC_SELNE, TCG_REG_TMP, rd, TCG_REG_TMP2, b);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+ }
|
|
+}
|
|
|
|
- case INDEX_op_add_i32:
|
|
- case INDEX_op_add_i64:
|
|
- case INDEX_op_sub_i32:
|
|
- case INDEX_op_sub_i64:
|
|
- return C_O1_I2(r, r, rU);//rA
|
|
+/*
|
|
+ * memory protect for order of (ld and st)
|
|
+ */
|
|
+static void tcg_out_mb(TCGContext *s)
|
|
+{
|
|
+ tcg_out32(s, OPC_MEMB);
|
|
+}
|
|
|
|
- case INDEX_op_setcond_i32:
|
|
- case INDEX_op_setcond_i64:
|
|
- return C_O1_I2(r, r, rU);//compare,rA
|
|
+static inline void tcg_out_bswap16(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
|
|
+{
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 1);
|
|
|
|
- case INDEX_op_mul_i32:
|
|
- case INDEX_op_mul_i64:
|
|
- case INDEX_op_div_i32:
|
|
- case INDEX_op_div_i64:
|
|
- case INDEX_op_divu_i32:
|
|
- case INDEX_op_divu_i64:
|
|
- case INDEX_op_rem_i32:
|
|
- case INDEX_op_rem_i64:
|
|
- case INDEX_op_remu_i32:
|
|
- case INDEX_op_remu_i64:
|
|
- case INDEX_op_muluh_i64:
|
|
- case INDEX_op_mulsh_i64:
|
|
- return C_O1_I2(r, r, r);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_and_i32:
|
|
- case INDEX_op_and_i64:
|
|
- case INDEX_op_or_i32:
|
|
- case INDEX_op_or_i64:
|
|
- case INDEX_op_xor_i32:
|
|
- case INDEX_op_xor_i64:
|
|
- case INDEX_op_andc_i32:
|
|
- case INDEX_op_andc_i64:
|
|
- case INDEX_op_orc_i32:
|
|
- case INDEX_op_orc_i64:
|
|
- case INDEX_op_eqv_i32:
|
|
- case INDEX_op_eqv_i64:
|
|
- return C_O1_I2(r, r, rU);//rL
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_shl_i32:
|
|
- case INDEX_op_shr_i32:
|
|
- case INDEX_op_sar_i32:
|
|
- case INDEX_op_rotl_i32:
|
|
- case INDEX_op_rotr_i32:
|
|
- case INDEX_op_shl_i64:
|
|
- case INDEX_op_shr_i64:
|
|
- case INDEX_op_sar_i64:
|
|
- case INDEX_op_rotl_i64:
|
|
- case INDEX_op_rotr_i64:
|
|
- return C_O1_I2(r, r, ri);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24);
|
|
|
|
- case INDEX_op_clz_i32:
|
|
- case INDEX_op_clz_i64:
|
|
- return C_O1_I2(r, r, r); //rAL
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_ctz_i32:
|
|
- case INDEX_op_ctz_i64:
|
|
- return C_O1_I2(r, r, r);//rAL
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_brcond_i32:
|
|
- case INDEX_op_brcond_i64:
|
|
- return C_O0_I2(r, rU);//rA
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_movcond_i32:
|
|
- case INDEX_op_movcond_i64:
|
|
- return C_O1_I4(r, r, rU, rZ, rZ);//rA->rU
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_qemu_ld_i32:
|
|
- case INDEX_op_qemu_ld_i64:
|
|
- return C_O1_I1(r, l);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+ }
|
|
+}
|
|
|
|
- case INDEX_op_qemu_st_i32:
|
|
- case INDEX_op_qemu_st_i64:
|
|
- return C_O0_I2(lZ, l);
|
|
+static void tcg_out_bswap32(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
|
|
+{
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 3);
|
|
|
|
- case INDEX_op_deposit_i32:
|
|
- case INDEX_op_deposit_i64:
|
|
- return C_O1_I2(r, 0, rZ);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_extract2_i32:
|
|
- case INDEX_op_extract2_i64:
|
|
- return C_O1_I2(r, rZ, rZ);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- case INDEX_op_add2_i32:
|
|
- case INDEX_op_add2_i64:
|
|
- case INDEX_op_sub2_i32:
|
|
- case INDEX_op_sub2_i64:
|
|
- return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24);
|
|
|
|
- case INDEX_op_add_vec:
|
|
- case INDEX_op_sub_vec:
|
|
- case INDEX_op_mul_vec:
|
|
- case INDEX_op_xor_vec:
|
|
- case INDEX_op_ssadd_vec:
|
|
- case INDEX_op_sssub_vec:
|
|
- case INDEX_op_usadd_vec:
|
|
- case INDEX_op_ussub_vec:
|
|
- case INDEX_op_smax_vec:
|
|
- case INDEX_op_smin_vec:
|
|
- case INDEX_op_umax_vec:
|
|
- case INDEX_op_umin_vec:
|
|
- case INDEX_op_shlv_vec:
|
|
- case INDEX_op_shrv_vec:
|
|
- case INDEX_op_sarv_vec:
|
|
- return C_O1_I2(w, w, w);
|
|
- case INDEX_op_not_vec:
|
|
- case INDEX_op_neg_vec:
|
|
- case INDEX_op_abs_vec:
|
|
- case INDEX_op_shli_vec:
|
|
- case INDEX_op_shri_vec:
|
|
- case INDEX_op_sari_vec:
|
|
- return C_O1_I1(w, w);
|
|
- case INDEX_op_ld_vec:
|
|
- case INDEX_op_dupm_vec:
|
|
- return C_O1_I1(w, r);
|
|
- case INDEX_op_st_vec:
|
|
- return C_O0_I2(w, r);
|
|
- case INDEX_op_dup_vec:
|
|
- return C_O1_I1(w, wr);
|
|
- case INDEX_op_or_vec:
|
|
- case INDEX_op_andc_vec:
|
|
- return C_O1_I2(w, w, wO);
|
|
- case INDEX_op_and_vec:
|
|
- case INDEX_op_orc_vec:
|
|
- return C_O1_I2(w, w, wN);
|
|
- case INDEX_op_cmp_vec:
|
|
- return C_O1_I2(w, w, wZ);
|
|
- case INDEX_op_bitsel_vec:
|
|
- return C_O1_I3(w, w, w, w);
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
- default:
|
|
- g_assert_not_reached();
|
|
- }
|
|
-}
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 7);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
-static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
|
-{
|
|
- int i;
|
|
- for (i = 0; i < count; ++i) {
|
|
- p[i] = OPC_NOP;
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP);
|
|
}
|
|
}
|
|
|
|
-/* SW instruction format of syscall
|
|
- * insn = opcode[31,26]:Function[25,0],
|
|
- */
|
|
-
|
|
-/* SW instruction format of br(alias jump)
|
|
- * insn = opcode[31,26]:Rd[25,21]:disp[20,0],
|
|
- */
|
|
-static void tcg_out_insn_br(TCGContext *s, SW_64Insn insn, TCGReg rd, intptr_t imm64)
|
|
+static void tcg_out_bswap64(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
|
|
{
|
|
- tcg_debug_assert(imm64 <= 0xfffff && imm64 >= -0x100000);
|
|
- tcg_out32(s, insn | (rd & 0x1f) << 21 | (imm64 & 0x1fffff));
|
|
-}
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP2, rn, 7);
|
|
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 6);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 8);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
-/* SW instruction format of (load and store)
|
|
- * insn = opcode[31,26]:rd[25,21]:rn[20,16]:disp[15,0]
|
|
- */
|
|
-static void tcg_out_insn_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t imm16)
|
|
-{
|
|
- tcg_debug_assert(imm16 <= 0x7fff && imm16 >= -0x8000);
|
|
- tcg_out32(s, insn | (rd & 0x1f) << 21 | (rn & 0x1f) << 16 | (imm16 & 0xffff));
|
|
-}
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 5);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 16);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 4);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 24);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
|
|
-/* SW instruction format of simple operator for Register
|
|
- * insn = opcode[31,26]:rn(ra)[25,21]:rn(rb)[20,16]:Zeors[15,13]:function[12,5]:rd(rc)[4,0]
|
|
- */
|
|
-static void tcg_out_insn_simpleReg(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
-{
|
|
- tcg_out32(s, insn | (rn & 0x1f) << 21 | (rm & 0x1f) << 16 | (rd & 0x1f));
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 3);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 32);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 40);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 48);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, TCG_REG_TMP, rn, 0);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, 56);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP2, TCG_REG_TMP);
|
|
}
|
|
|
|
-/* SW instruction format of simple operator for imm
|
|
- * insn = opcode[31,26]:rn(ra)[25,21]:disp[20,13]:function[12,5]:rd(rc)[4,0]
|
|
- */
|
|
-static void tcg_out_insn_simple(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, intptr_t imm64)
|
|
+static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len)
|
|
{
|
|
- if(imm64 <= 0x7f && imm64 >= -0x80) {
|
|
- tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
- }
|
|
- else {
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
|
|
- tcg_out_insn_simpleReg(s, insn_Reg, rd, rn, TCG_REG_TMP);
|
|
- }
|
|
-}
|
|
+ //get 000..111..0000
|
|
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
|
|
+ /* get rn[lsb, lsb+len-1]-->rd[lsb, lsb+len-1] */
|
|
+ tcg_out_insn_bitReg(s, OPC_AND, rd, rn, TCG_REG_TMP);
|
|
|
|
+ /* rd[lsb, lsb+len-1] --> rd[0, len-1] */
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, rd, rd, lsb);
|
|
+}
|
|
|
|
-static void tcg_out_insn_simpleImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, intptr_t imm64)
|
|
+static void tcg_out_dep(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, int lsb, int len)
|
|
{
|
|
- tcg_debug_assert(imm64 <= 0x7f && imm64 >= -0x80);
|
|
- tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
+ tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
|
|
+
|
|
+ /* TCG_REG_TMP2 = rn[msb,lsb] */
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, 64-len);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, TCG_REG_TMP2, 64-len-lsb);
|
|
|
|
+ /* clear rd[msb,lsb] */
|
|
+ tcg_out_insn_bitReg(s, OPC_BIC, rd, rd, TCG_REG_TMP);
|
|
+ /* rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0] */
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, rd, TCG_REG_TMP2);
|
|
+
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
}
|
|
|
|
-static void tcg_out_insn_bitImm(TCGContext *s, SW_64Insn insn_Imm, TCGReg rd, TCGReg rn, unsigned long imm64)
|
|
+static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
{
|
|
- tcg_debug_assert(imm64 <= 255);
|
|
- tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
+ tcg_out_insn_simpleReg(s, OPC_UMULH, TCG_REG_TMP, rn, rm);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, 63);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rm);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, 63);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rn);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
}
|
|
-/* sw bit operation: and bis etc */
|
|
-static void tcg_out_insn_bit(TCGContext *s, SW_64Insn insn_Imm, SW_64Insn insn_Reg, TCGReg rd, TCGReg rn, unsigned long imm64)
|
|
+
|
|
+static void tcg_out_sar(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGArg a2, bool c2)
|
|
{
|
|
- if (imm64 <= 255) {
|
|
- tcg_out32(s, insn_Imm | (rn & 0x1f) << 21 | (imm64 & 0xff) << 13 | (rd & 0x1f));
|
|
- }
|
|
- else {
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
|
|
- tcg_out_insn_bitReg(s, insn_Reg, rd, rn, TCG_REG_TMP);
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ unsigned int max = bits - 1;
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, TCG_REG_TMP, rn, TCG_REG_ZERO);
|
|
+
|
|
+ if (c2) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, TCG_REG_TMP, a2 & max);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_SRA, rd, TCG_REG_TMP, a2);
|
|
+ }
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ } else {
|
|
+ if (c2) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRA_I, rd, rn, a2 & max);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_SRA, rd, rn, a2);
|
|
+ }
|
|
}
|
|
}
|
|
|
|
-/* SW instruction format of complex operator
|
|
- * insn = opcode[31,26]:rd[25,21]:rn[20,16],function[15,10]:rm[9,5]:rx[4,0]
|
|
+/*
|
|
+ * memory <=> Reg in (B H W L) bytes
|
|
*/
|
|
-static void tcg_out_insn_complexReg(TCGContext *s, SW_64Insn insn, TCGReg cond, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
-{
|
|
- tcg_out32(s, insn | (cond & 0x1f) << 21 | (rn & 0x1f) << 16 | (rm & 0x1f) << 5 | (rd & 0x1f));
|
|
-}
|
|
-
|
|
-static bool reloc_pc21(tcg_insn_unit *src_rw, const tcg_insn_unit *target)
|
|
+static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign)
|
|
{
|
|
- const tcg_insn_unit *src_rx = tcg_splitwx_to_rx(src_rw);
|
|
- ptrdiff_t offset = target - (src_rx + 1) ;
|
|
+ if (offset != sextract64(offset, 0, 15)) {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP2, offset);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP2, TCG_REG_TMP2, rn);
|
|
+ tcg_out_insn_ldst(s, insn, rd, TCG_REG_TMP2, 0);
|
|
+ } else {
|
|
+ tcg_out_insn_ldst(s, insn, rd, rn, offset);
|
|
+ }
|
|
|
|
- if (offset == sextract64(offset, 0, 21)) {
|
|
- /* read instruction, mask away previous PC_REL21 parameter contents,
|
|
- set the proper offset, then write back the instruction. */
|
|
- *src_rw = deposit32(*src_rw, 0, 21, offset);
|
|
- return true;
|
|
+ switch (insn) {
|
|
+ case OPC_LDBU:
|
|
+ if (sign)
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rd);
|
|
+ break;
|
|
+ case OPC_LDHU:
|
|
+ if (sign)
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rd);
|
|
+ break;
|
|
+ case OPC_LDW:
|
|
+ if (!sign)
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
}
|
|
- return false;
|
|
}
|
|
|
|
-/* sw*/
|
|
-static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend)
|
|
+static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs)
|
|
{
|
|
- tcg_debug_assert(addend == 0);
|
|
switch (type) {
|
|
- case R_SW_64_BRADDR:
|
|
- return reloc_pc21(code_ptr, (const tcg_insn_unit *)value);
|
|
+ case TCG_TYPE_I32:
|
|
+ tcg_out_ldst(s, OPC_LDW, rd, rn, ofs, zeroExt);
|
|
+ break;
|
|
+ case TCG_TYPE_I64:
|
|
+ tcg_out_ldst(s, OPC_LDL, rd, rn, ofs, sigExt);
|
|
+ break;
|
|
+ case TCG_TYPE_V64:
|
|
+ case TCG_TYPE_V128:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
-static inline uint32_t tcg_in32(TCGContext *s)
|
|
+static void tcg_out_st(TCGContext *s, TCGType type, TCGReg rd,TCGReg rn, intptr_t ofs)
|
|
{
|
|
- uint32_t v = *(uint32_t *)s->code_ptr;
|
|
- return v;
|
|
-}
|
|
-
|
|
-/*SW Register to register move using ADDL*/
|
|
-static void tcg_out_movr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn)
|
|
-{
|
|
- tcg_out_insn_simpleReg(s, OPC_BIS, rd, rn, TCG_REG_ZERO);
|
|
- if (ext == TCG_TYPE_I32){
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ switch (type) {
|
|
+ case TCG_TYPE_I32:
|
|
+ tcg_out_ldst(s, OPC_STW, rd, rn, ofs, noPara);
|
|
+ break;
|
|
+ case TCG_TYPE_I64:
|
|
+ tcg_out_ldst(s, OPC_STL, rd, rn, ofs, noPara);
|
|
+ break;
|
|
+ case TCG_TYPE_V64:
|
|
+ case TCG_TYPE_V128:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ default:
|
|
+ g_assert_not_reached();
|
|
}
|
|
}
|
|
|
|
-/*sw
|
|
- *put imm into rd
|
|
- */
|
|
-static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long orig)
|
|
+static void tcg_out_cond_cmp(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret, TCGArg a, tcg_target_long b, bool const_b)
|
|
{
|
|
- long l0, l1, l2=0, l3=0, extra=0;
|
|
- tcg_target_long val = orig;
|
|
- TCGReg rs = TCG_REG_ZERO;
|
|
-
|
|
- if (type == TCG_TYPE_I32)
|
|
- val = (int32_t)val;
|
|
-
|
|
- l0 = (int16_t)val;
|
|
- val = (val - l0) >> 16;
|
|
- l1 = (int16_t)val;
|
|
-
|
|
- if (orig >> 31 == -1 || orig >> 31 == 0) {
|
|
- if (l1 < 0 && orig >= 0) {
|
|
- extra = 0x4000;
|
|
- l1 = (int16_t)(val - 0x4000);
|
|
- }
|
|
- } else {
|
|
- val = (val - l1) >> 16;
|
|
- l2 = (int16_t)val;
|
|
- val = (val - l2) >> 16;
|
|
- l3 = (int16_t)val;
|
|
-
|
|
- if (l3) {
|
|
- tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l3);
|
|
- rs = rd;
|
|
- }
|
|
- if (l2) {
|
|
- tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l2);
|
|
- rs = rd;
|
|
- }
|
|
- if (l3 || l2)
|
|
- tcg_out_insn_simpleImm(s, OPC_SLL_I, rd, rd, 32);
|
|
- }
|
|
-
|
|
- if (l1) {
|
|
- tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, l1);
|
|
- rs = rd;
|
|
+ if (const_b && (b < 0 || b > 0xff)) {
|
|
+ tcg_out_movi(s, ext, TCG_REG_TMP2, b);
|
|
+ b = TCG_REG_TMP2;
|
|
+ const_b = 0;
|
|
}
|
|
-
|
|
- if (extra) {
|
|
- tcg_out_insn_ldst(s, OPC_LDIH, rd, rs, extra);
|
|
- rs = rd;
|
|
- }
|
|
-
|
|
- tcg_out_insn_ldst(s, OPC_LDI, rd, rs, l0);
|
|
-}
|
|
|
|
-
|
|
-/*sw
|
|
-* memory <=> Reg in (B H W L) bytes
|
|
-*/
|
|
-static void tcg_out_ldst(TCGContext *s, SW_64Insn insn, TCGReg rd, TCGReg rn, intptr_t offset, bool sign)
|
|
-{
|
|
- int16_t lo = offset;
|
|
- if (offset != lo) {
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset - lo);
|
|
- if (rn != TCG_REG_ZERO) {
|
|
- tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, TCG_REG_TMP, rn);
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, a, a, TCG_REG_ZERO);
|
|
+ if (!const_b) {
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, b, b, TCG_REG_ZERO);
|
|
+ } else {
|
|
+ b = (int32_t)b;
|
|
}
|
|
- tcg_out_insn_ldst(s, insn, rd, TCG_REG_TMP, lo);
|
|
- }
|
|
- else {
|
|
- tcg_out_insn_ldst(s, insn, rd, rn, lo);
|
|
}
|
|
|
|
- switch (insn) {
|
|
- case OPC_LDBU:
|
|
- if (sign)
|
|
- tcg_out_insn_simpleReg(s, OPC_SEXTB, rd, TCG_REG_ZERO, rd); //for micro-op:INDEX_op_ld8s_i32/64,set rd[63,8]=1
|
|
- break;
|
|
- case OPC_LDHU:
|
|
- if (sign)
|
|
- tcg_out_insn_simpleReg(s, OPC_SEXTH, rd, TCG_REG_ZERO, rd); //for micro-op:INDEX_op_ld16s_i32/64,set rd[63,16]=1
|
|
- break;
|
|
- case OPC_LDW:
|
|
- if (!sign)
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf); //for micro-op:INDEX_op_ld32u_i32/64,set rd[63,32]=0
|
|
- break;
|
|
- default:
|
|
- break;
|
|
- }
|
|
-}
|
|
-
|
|
-/* TCG_REG_TMP stores result_of_condition_compare */
|
|
-static void tcg_out_cond_cmp(TCGContext *s, TCGCond cond, TCGReg ret, TCGArg a, TCGArg b, bool const_b)
|
|
-{
|
|
if (const_b) {
|
|
- switch(cond) {
|
|
- case TCG_COND_ALWAYS:
|
|
- case TCG_COND_NEVER:
|
|
- break;
|
|
- case TCG_COND_EQ:
|
|
- case TCG_COND_NE:
|
|
- tcg_out_insn_simple(s, OPC_CMPEQ_I, OPC_CMPEQ, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LT:
|
|
- case TCG_COND_GE:
|
|
- tcg_out_insn_simple(s, OPC_CMPLT_I, OPC_CMPLT, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LE:
|
|
- case TCG_COND_GT:
|
|
- tcg_out_insn_simple(s, OPC_CMPLE_I, OPC_CMPLE, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LTU:
|
|
- case TCG_COND_GEU:
|
|
- tcg_out_insn_simple(s, OPC_CMPULT_I, OPC_CMPULT, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LEU:
|
|
- case TCG_COND_GTU:
|
|
- tcg_out_insn_simple(s, OPC_CMPULE_I, OPC_CMPULE, ret, a, b);
|
|
- break;
|
|
- }//cond
|
|
- }//if (const_b)
|
|
- else {
|
|
- switch(cond) {
|
|
- case TCG_COND_ALWAYS:
|
|
- case TCG_COND_NEVER:
|
|
- break;
|
|
- case TCG_COND_EQ:
|
|
- case TCG_COND_NE:
|
|
- tcg_out_insn_simpleReg(s, OPC_CMPEQ, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LT:
|
|
- case TCG_COND_GE:
|
|
- tcg_out_insn_simpleReg(s, OPC_CMPLT, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LE:
|
|
- case TCG_COND_GT:
|
|
- tcg_out_insn_simpleReg(s, OPC_CMPLE, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LTU:
|
|
- case TCG_COND_GEU:
|
|
- tcg_out_insn_simpleReg(s, OPC_CMPULT, ret, a, b);
|
|
- break;
|
|
- case TCG_COND_LEU:
|
|
- case TCG_COND_GTU:
|
|
- tcg_out_insn_simpleReg(s, OPC_CMPULE, ret, a, b);
|
|
- break;
|
|
- }//cond
|
|
- }//else
|
|
- switch(cond) {
|
|
- case TCG_COND_ALWAYS:
|
|
- case TCG_COND_NEVER:
|
|
+ switch (cond) {
|
|
case TCG_COND_EQ:
|
|
+ case TCG_COND_NE:
|
|
+ tcg_out_insn_simpleImm(s, OPC_CMPEQ_I, ret, a, b);
|
|
+ break;
|
|
case TCG_COND_LT:
|
|
+ case TCG_COND_GE:
|
|
+ tcg_out_insn_simpleImm(s, OPC_CMPLT_I, ret, a, b);
|
|
+ break;
|
|
case TCG_COND_LE:
|
|
+ case TCG_COND_GT:
|
|
+ tcg_out_insn_simpleImm(s, OPC_CMPLE_I, ret, a, b);
|
|
+ break;
|
|
case TCG_COND_LTU:
|
|
+ case TCG_COND_GEU:
|
|
+ tcg_out_insn_simpleImm(s, OPC_CMPULT_I, ret, a, b);
|
|
+ break;
|
|
case TCG_COND_LEU:
|
|
+ case TCG_COND_GTU:
|
|
+ tcg_out_insn_simpleImm(s, OPC_CMPULE_I, ret, a, b);
|
|
break;
|
|
- case TCG_COND_NE:
|
|
- case TCG_COND_GE:
|
|
- case TCG_COND_GT:
|
|
- case TCG_COND_GEU:
|
|
- case TCG_COND_GTU:
|
|
- tcg_out_insn_bitImm(s, OPC_XOR_I, ret, ret, 0x1);
|
|
+ default:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ }
|
|
+ } else {
|
|
+ switch (cond) {
|
|
+ case TCG_COND_EQ:
|
|
+ case TCG_COND_NE:
|
|
+ tcg_out_insn_simpleReg(s, OPC_CMPEQ, ret, a, b);
|
|
break;
|
|
+ case TCG_COND_LT:
|
|
+ case TCG_COND_GE:
|
|
+ tcg_out_insn_simpleReg(s, OPC_CMPLT, ret, a, b);
|
|
+ break;
|
|
+ case TCG_COND_LE:
|
|
+ case TCG_COND_GT:
|
|
+ tcg_out_insn_simpleReg(s, OPC_CMPLE, ret, a, b);
|
|
+ break;
|
|
+ case TCG_COND_LTU:
|
|
+ case TCG_COND_GEU:
|
|
+ tcg_out_insn_simpleReg(s, OPC_CMPULT, ret, a, b);
|
|
+ break;
|
|
+ case TCG_COND_LEU:
|
|
+ case TCG_COND_GTU:
|
|
+ tcg_out_insn_simpleReg(s, OPC_CMPULE, ret, a, b);
|
|
+ break;
|
|
+ default:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a, a, 0xf);
|
|
+ if (!const_b) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, b, b, 0xf);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ switch (cond) {
|
|
+ case TCG_COND_NE:
|
|
+ case TCG_COND_GE:
|
|
+ case TCG_COND_GT:
|
|
+ case TCG_COND_GEU:
|
|
+ case TCG_COND_GTU:
|
|
+ tcg_out_insn_simpleImm(s, OPC_XOR_I, ret, ret, 0x1);
|
|
+ break;
|
|
+ case TCG_COND_ALWAYS:
|
|
+ case TCG_COND_NEVER:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ default:
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
-/* sw
|
|
+/*
|
|
* step1 tcg_out_cmp() ,"eq" and "ne" in the same case with the same insn;
|
|
* store compare result by TCG_REG_TMP, for step2;
|
|
* step2: jump address with compare result. in last "switch" section, we diff qe/ne by different case with different insn.
|
|
*/
|
|
-static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond cond, TCGArg a, TCGArg b, bool b_const, TCGLabel *l)
|
|
+static void tcg_out_brcond(TCGContext *s, TCGType ext, TCGCond cond, TCGArg a, tcg_target_long b, bool b_const, TCGLabel *l)
|
|
{
|
|
intptr_t offset;
|
|
bool need_cmp;
|
|
|
|
if (b_const && b == 0 && (cond == TCG_COND_EQ || cond == TCG_COND_NE)) {
|
|
need_cmp = false;
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, a, 0xf);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, TCG_REG_TMP, a, TCG_REG_ZERO);
|
|
+ }
|
|
} else {
|
|
need_cmp = true;
|
|
- tcg_out_cond_cmp(s, cond, TCG_REG_TMP, a, b, b_const);
|
|
+ tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a, b, b_const);
|
|
}
|
|
|
|
if (!l->has_value) {
|
|
tcg_out_reloc(s, s->code_ptr, R_SW_64_BRADDR, l, 0);
|
|
- offset=0; //offset = tcg_in32(s) >> 5; br $31, 0, do not jump here!
|
|
+ offset=0; //offset = tcg_in32(s) >> 5;//luo br $31, 0, do not jump here!
|
|
} else {
|
|
- offset = tcg_pcrel_diff(s, l->u.value_ptr) ;
|
|
- offset = offset >> 2;
|
|
+ offset = tcg_pcrel_diff(s, l->u.value_ptr);
|
|
+ offset = offset - 4;
|
|
+ offset = offset >> 2;
|
|
tcg_debug_assert(offset == sextract64(offset, 0, 21));
|
|
}
|
|
|
|
if (need_cmp) {
|
|
- tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, offset); //a cond b,jmp
|
|
+ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, offset);
|
|
} else if (cond == TCG_COND_EQ) {
|
|
- tcg_out_insn_br(s, OPC_BEQ, a, offset);
|
|
+ tcg_out_insn_br(s, OPC_BEQ, TCG_REG_TMP, offset);
|
|
} else {
|
|
- tcg_out_insn_br(s, OPC_BNE, a, offset);
|
|
- }
|
|
-}
|
|
-
|
|
-/*sw
|
|
- * contact with "tcg-target-con-str.h"
|
|
- */
|
|
-#define TCG_CT_CONST_ZERO 0x100
|
|
-#define TCG_CT_CONST_LONG 0x200
|
|
-#define TCG_CT_CONST_MONE 0x400
|
|
-#define TCG_CT_CONST_ORRI 0x800
|
|
-#define TCG_CT_CONST_WORD 0X1000
|
|
-#define TCG_CT_CONST_U8 0x2000
|
|
-#define TCG_CT_CONST_S8 0X4000
|
|
-
|
|
-#define ALL_GENERAL_REGS 0xffffffffu
|
|
-#define ALL_VECTOR_REGS 0xffffffff00000000ull
|
|
-
|
|
-
|
|
-#ifdef CONFIG_SOFTMMU
|
|
-/*sw #define ALL_QLDST_REGS */
|
|
-#else
|
|
- #define ALL_QLDST_REGS ALL_GENERAL_REGS
|
|
-#endif
|
|
-
|
|
-/* sw test if a constant matches the constraint */
|
|
-static bool tcg_target_const_match(int64_t val, TCGType type, int ct)
|
|
-{
|
|
- if (ct & TCG_CT_CONST) {
|
|
- return 1;
|
|
- }
|
|
- if (type == TCG_TYPE_I32) {
|
|
- val = (int32_t)val;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_U8) && 0 <= val && val <= 255) {
|
|
- return 1;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_LONG)) {
|
|
- return 1;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_MONE)) {
|
|
- return 1;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_ORRI)) {
|
|
- return 1;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_WORD)) {
|
|
- return 1;
|
|
- }
|
|
- if ((ct & TCG_CT_CONST_ZERO) && val == 0) {
|
|
- return 1;
|
|
+ tcg_out_insn_br(s, OPC_BNE, TCG_REG_TMP, offset);
|
|
}
|
|
- return 0;
|
|
}
|
|
|
|
-static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs)
|
|
+static void tcg_out_setcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret,
|
|
+ TCGReg a, tcg_target_long b, bool const_b)
|
|
{
|
|
- switch (type) {
|
|
- case TCG_TYPE_I32:
|
|
- tcg_out_ldst(s, OPC_LDW, rd, rn, ofs, sigExt);
|
|
- break;
|
|
- case TCG_TYPE_I64:
|
|
- tcg_out_ldst(s, OPC_LDL, rd, rn, ofs, sigExt);
|
|
+ switch (cond) {
|
|
+ case TCG_COND_EQ:
|
|
+ case TCG_COND_LT:
|
|
+ case TCG_COND_LE:
|
|
+ case TCG_COND_LTU:
|
|
+ case TCG_COND_LEU:
|
|
+ case TCG_COND_NE:
|
|
+ case TCG_COND_GE:
|
|
+ case TCG_COND_GT:
|
|
+ case TCG_COND_GEU:
|
|
+ case TCG_COND_GTU:
|
|
+ tcg_out_cond_cmp(s, ext, cond, ret, a, b, const_b);
|
|
break;
|
|
default:
|
|
- g_assert_not_reached();
|
|
+ tcg_abort();
|
|
+ break;
|
|
}
|
|
}
|
|
|
|
-static void tcg_out_st(TCGContext *s, TCGType type, TCGReg rd, TCGReg rn, intptr_t ofs)
|
|
+static void tcg_out_movcond(TCGContext *s, TCGType ext, TCGCond cond, TCGReg ret,
|
|
+ TCGReg a1, tcg_target_long a2, bool const_b, TCGReg v1, TCGReg v2)
|
|
{
|
|
- switch (type) {
|
|
- case TCG_TYPE_I32:
|
|
- tcg_out_insn_ldst(s, OPC_STW, rd, rn, ofs);
|
|
- break;
|
|
- case TCG_TYPE_I64:
|
|
- tcg_out_insn_ldst(s, OPC_STL, rd, rn, ofs);
|
|
- break;
|
|
- default:
|
|
- g_assert_not_reached();
|
|
- }
|
|
+ tcg_out_cond_cmp(s, ext, cond, TCG_REG_TMP, a1, a2, const_b);
|
|
+ tcg_out_insn_complexReg(s, OPC_SELLBS, TCG_REG_TMP, ret, v1, v2);
|
|
}
|
|
|
|
-static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs)
|
|
+static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,TCGReg base, intptr_t ofs)
|
|
{
|
|
if (type <= TCG_TYPE_I64 && val == 0) {
|
|
tcg_out_st(s, type, TCG_REG_ZERO, base, ofs);
|
|
@@ -1129,66 +1126,123 @@ static inline bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg b
|
|
return false;
|
|
}
|
|
|
|
-static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd, TCGReg rn, int64_t imm64)
|
|
+static void tcg_out_addsubi(TCGContext *s, int ext, TCGReg rd,TCGReg rn, int64_t imm64)
|
|
{
|
|
- if (imm64 >= 0) {
|
|
- if(0 <=imm64 && imm64 <= 255) {
|
|
- /* we use tcg_out_insn_bitImm because imm64 is between 0~255 */
|
|
- tcg_out_insn_bitImm(s, OPC_ADDL_I, rd, rn, imm64);
|
|
- }//aimm>0 && aimm == sextract64(aim, 0, 8)
|
|
- else {
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
|
|
- tcg_out_insn_simpleReg(s, OPC_ADDL, rd, rn, TCG_REG_TMP);
|
|
- }//aimm>0 && aimm != sextract64(aim, 0, 8)
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ if (imm64 >= 0) {
|
|
+ if (0 <=imm64 && imm64 <= 255) {
|
|
+ /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */
|
|
+ tcg_out_insn_simpleImm(s, OPC_ADDL_I, rd, rn, imm64);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, imm64);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, rd, rn, TCG_REG_TMP);
|
|
+ }
|
|
+ } else {
|
|
+ if (0 < -imm64 && -imm64 <= 255) {
|
|
+ /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rn, -imm64);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, -imm64);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SUBL, rd, rn, TCG_REG_TMP);
|
|
+ }
|
|
+ }
|
|
} else {
|
|
- if(0 < -imm64 && -imm64 <= 255) {
|
|
- /* we use tcg_out_insn_bitImm because -imm64 is between 0~255 */
|
|
- tcg_out_insn_bitImm(s, OPC_SUBL_I, rd, rn, -imm64);
|
|
- }//aimm<0 && aimm == sextract64(aim, 0, 8)
|
|
- else {
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, -imm64);
|
|
- tcg_out_insn_simpleReg(s, OPC_SUBL, rd, rn, TCG_REG_TMP);
|
|
- }//aimm<0 && aimm != sextract64(aim, 0, 8)
|
|
+ if (imm64 >= 0) {
|
|
+ if (0 <=imm64 && imm64 <= 255) {
|
|
+ /* we use tcg_out_insn_simpleImm because imm64 is between 0~255 */
|
|
+ tcg_out_insn_simpleImm(s, OPC_ADDW_I, rd, rn, imm64);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, imm64);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, rd, rn, TCG_REG_TMP);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+ } else {
|
|
+ if (0 < -imm64 && -imm64 <= 255) {
|
|
+ /* we use tcg_out_insn_simpleImm because -imm64 is between 0~255 */
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBW_I, rd, rn, -imm64);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ } else {
|
|
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_TMP, -imm64);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SUBW, rd, rn, TCG_REG_TMP);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+ }
|
|
}
|
|
}
|
|
|
|
static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
|
|
{
|
|
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
|
|
+ ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2;
|
|
tcg_debug_assert(offset == sextract64(offset, 0, 21));
|
|
tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset);
|
|
}
|
|
|
|
static void tcg_out_goto_long(TCGContext *s, const tcg_insn_unit *target)
|
|
{
|
|
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
|
|
- if (0 <= offset && offset <= 0x1fffff) {
|
|
+ ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2;
|
|
+ if (offset == sextract64(offset, 0 ,21)) {
|
|
tcg_out_insn_br(s, OPC_BR, TCG_REG_ZERO, offset);
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
|
|
- tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, 0);
|
|
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, noPara);
|
|
}
|
|
}
|
|
|
|
-
|
|
-/*sw
|
|
-* call subroutine
|
|
-*/
|
|
static void tcg_out_call(TCGContext *s, const tcg_insn_unit *target)
|
|
{
|
|
- ptrdiff_t offset = tcg_pcrel_diff(s, target) >> 2;
|
|
+ ptrdiff_t offset = (tcg_pcrel_diff(s, target) - 4) >> 2;
|
|
if (offset == sextract64(offset, 0, 21)) {
|
|
tcg_out_insn_br(s, OPC_BSR, TCG_REG_RA, offset);
|
|
} else {
|
|
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, (intptr_t)target);
|
|
- tcg_out_insn_jump(s, OPC_CALL, TCG_REG_RA, TCG_REG_TMP, 0);
|
|
+ tcg_out_insn_jump(s, OPC_CALL, TCG_REG_RA, TCG_REG_TMP, noPara);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void modify_direct_addr(uintptr_t addr, uintptr_t jmp_rw, uintptr_t jmp_rx)
|
|
+{
|
|
+ tcg_target_long l0=0, l1=0;
|
|
+ tcg_target_long val = addr;
|
|
+ TCGReg rs = TCG_REG_ZERO;
|
|
+ TCGReg rd = TCG_REG_TMP;
|
|
+ tcg_insn_unit i_nop=0, i1=0, i2=0;
|
|
+ uint64_t pair = 0;
|
|
+ i_nop = OPC_NOP;
|
|
+ uintptr_t jmp = jmp_rw;
|
|
+
|
|
+ l0 = (int16_t)val;
|
|
+ val = (val - l0) >> 16;
|
|
+ l1 = (int16_t)val;
|
|
+ if (l1) {
|
|
+ i1 = OPC_LDIH | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l1 & 0xffff);
|
|
+ } else {
|
|
+ i1 = i_nop;
|
|
}
|
|
+ i2 = OPC_LDI | (rd & 0x1f) << 21 | (rs & 0x1f) << 16 | (l0 & 0xffff);
|
|
+ pair = (uint64_t)i1 << 32 | i2;
|
|
+ qatomic_set((uint64_t *)jmp, pair);
|
|
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
|
|
}
|
|
|
|
void tb_target_set_jmp_target(uintptr_t tc_ptr, uintptr_t jmp_rx, uintptr_t jmp_rw, uintptr_t addr)
|
|
{
|
|
- tcg_debug_assert(0);
|
|
- //sw not support
|
|
+ tcg_insn_unit i1, i2;
|
|
+ uint64_t pair;
|
|
+
|
|
+ ptrdiff_t offset = addr - jmp_rx -4;
|
|
+
|
|
+ if (offset == sextract64(offset, 0, 21)) {
|
|
+ i1 = OPC_BR | (TCG_REG_ZERO & 0x1f) << 21| ((offset >> 2) & 0x1fffff);
|
|
+ i2 = OPC_NOP;
|
|
+ pair = (uint64_t)i2 << 32 | i1;
|
|
+ qatomic_set((uint64_t *)jmp_rw, pair);
|
|
+ flush_idcache_range(jmp_rx, jmp_rw, 8);
|
|
+ } else if (offset == sextract64(offset, 0, 32)) {
|
|
+ modify_direct_addr(addr, jmp_rw, jmp_rx);
|
|
+ } else {
|
|
+ tcg_debug_assert("tb_target");
|
|
+ }
|
|
}
|
|
|
|
static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
|
|
@@ -1201,8 +1255,8 @@ static inline void tcg_out_goto_label(TCGContext *s, TCGLabel *l)
|
|
}
|
|
}
|
|
|
|
-/* sw
|
|
- * resut: rd=rn(64,64-m]:rm(64-m,0]
|
|
+/*
|
|
+ * result: rd=rn(64,64-m]:rm(64-m,0]
|
|
* 1: rn(m,0]--->TCG_REG_TMP(64,64-m]
|
|
* 2: rm(64,64-m]--->rm(64-m,0]
|
|
* 3: rd=TCG_REG_TMP(64,64-m]:rm(64-m,0]
|
|
@@ -1211,84 +1265,442 @@ static inline void tcg_out_extr(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn
|
|
{
|
|
int bits = ext ? 64 : 32;
|
|
int max = bits - 1;
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, (m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, (m & max));
|
|
tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
}
|
|
|
|
-/* sw
|
|
- * loop right shift
|
|
- */
|
|
static inline void tcg_out_rotr_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m)
|
|
{
|
|
- int bits = ext ? 64 : 32;
|
|
- int max = bits - 1;
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, (m & max));
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ unsigned int max = bits - 1;
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rn, bits - (m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, (m & max));
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP, rd, bits - (m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP2, rd, (m & max));
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
}
|
|
|
|
-/* sw loop right shift
|
|
- */
|
|
static inline void tcg_out_rotr_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
{
|
|
- int bits = ext ? 64 : 32;
|
|
- //get TCG_REG_TMP=64-[rm]
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits);
|
|
- tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
|
|
|
|
- tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rn, TCG_REG_TMP); //get rn right part to TCG_REG_TMP
|
|
- tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rn, rm); //get rn left part to TCG_REG_TMP
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rn, TCG_REG_TMP);
|
|
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rn, rm);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf);
|
|
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP2, rd, TCG_REG_TMP);
|
|
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP, rd, rm);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
}
|
|
|
|
-/* sw
|
|
- * loop left shift
|
|
- */
|
|
static inline void tcg_out_rotl_Imm(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, unsigned int m)
|
|
{
|
|
- int bits = ext ? 64 : 32;
|
|
- int max = bits - 1;
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ unsigned int max = bits - 1;
|
|
+
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rn, bits -(m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, (m & max));
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, rd, bits -(m & max));
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, TCG_REG_TMP2, rd, (m & max));
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+}
|
|
+
|
|
+static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
+{
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits);
|
|
+ tcg_out_insn_bitReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+
|
|
+ if (ext == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rn, TCG_REG_TMP);
|
|
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rn, rm);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rn, 0xf);
|
|
+ tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rd, TCG_REG_TMP);
|
|
+ tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rd, rm);
|
|
+ tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, rd, rd, 0xf);
|
|
+ }
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SOFTMMU
|
|
+#include "../tcg-ldst.c.inc"
|
|
+
|
|
+static void * const qemu_ld_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
|
+ [MO_UB] = helper_ret_ldub_mmu,
|
|
+ [MO_LEUW] = helper_le_lduw_mmu,
|
|
+ [MO_LEUL] = helper_le_ldul_mmu,
|
|
+ [MO_LEQ] = helper_le_ldq_mmu,
|
|
+ [MO_BEUW] = helper_be_lduw_mmu,
|
|
+ [MO_BEUL] = helper_be_ldul_mmu,
|
|
+ [MO_BEQ] = helper_be_ldq_mmu,
|
|
+};
|
|
+
|
|
+static void * const qemu_st_helpers[(MO_SIZE | MO_BSWAP) + 1] = {
|
|
+ [MO_UB] = helper_ret_stb_mmu,
|
|
+ [MO_LEUW] = helper_le_stw_mmu,
|
|
+ [MO_LEUL] = helper_le_stl_mmu,
|
|
+ [MO_LEQ] = helper_le_stq_mmu,
|
|
+ [MO_BEUW] = helper_be_stw_mmu,
|
|
+ [MO_BEUL] = helper_be_stl_mmu,
|
|
+ [MO_BEQ] = helper_be_stq_mmu,
|
|
+};
|
|
+
|
|
+static inline void tcg_out_adr(TCGContext *s, TCGReg rd, const void *target)
|
|
+{
|
|
+ ptrdiff_t offset = tcg_pcrel_diff(s, target);
|
|
+ tcg_debug_assert(offset == sextract64(offset, 0, 21));
|
|
+ tcg_out_insn_br(s, OPC_BR, rd, 0);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SUBL_I, rd, rd, 4);
|
|
+ if (offset >= 0) {
|
|
+ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, rd, rd, offset);
|
|
+ } else {
|
|
+ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, rd, rd, -offset);
|
|
+ }
|
|
+}
|
|
+
|
|
+static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
+{
|
|
+ MemOpIdx oi = lb->oi;
|
|
+ MemOp opc = get_memop(oi);
|
|
+ MemOp size = opc & MO_SIZE;
|
|
+
|
|
+ if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X16, TCG_AREG0);
|
|
+ tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X17, lb->addrlo_reg);
|
|
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X18, oi);
|
|
+ tcg_out_adr(s, TCG_REG_X19, lb->raddr);
|
|
+ tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SIZE)]);
|
|
+ if (opc & MO_SIGN) {
|
|
+ tcg_out_sxt(s, lb->type, size, lb->datalo_reg, TCG_REG_X0);
|
|
+ } else {
|
|
+ tcg_out_mov(s, size == MO_64, lb->datalo_reg, TCG_REG_X0);
|
|
+ }
|
|
+
|
|
+ tcg_out_goto(s, lb->raddr);
|
|
+ return true;
|
|
+}
|
|
+
|
|
+static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
|
|
+{
|
|
+ MemOpIdx oi = lb->oi;
|
|
+ MemOp opc = get_memop(oi);
|
|
+ MemOp size = opc & MO_SIZE;
|
|
+
|
|
+ if (!reloc_pc21(lb->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
|
|
+ return false;
|
|
+ }
|
|
+
|
|
+ tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_X16, TCG_AREG0);
|
|
+ tcg_out_mov(s, TARGET_LONG_BITS == 64, TCG_REG_X17, lb->addrlo_reg);
|
|
+ tcg_out_mov(s, size == MO_64, TCG_REG_X18, lb->datalo_reg);
|
|
+ tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X19, oi);
|
|
+ tcg_out_adr(s, TCG_REG_X20, lb->raddr);
|
|
+ tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SIZE)]);
|
|
+ tcg_out_goto(s, lb->raddr);
|
|
+ return true;
|
|
+}
|
|
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, rn, bits -(m & max));
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, (m & max)); //get rn left part to TCG_REG_TMP
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2); //get rn right part to left
|
|
+static void add_qemu_ldst_label(TCGContext *s, bool is_ld, MemOpIdx oi,
|
|
+ TCGType ext, TCGReg data_reg, TCGReg addr_reg,
|
|
+ tcg_insn_unit *raddr, tcg_insn_unit *label_ptr)
|
|
+{
|
|
+ TCGLabelQemuLdst *label = new_ldst_label(s);
|
|
+
|
|
+ label->is_ld = is_ld;
|
|
+ label->oi = oi;
|
|
+ label->type = ext;
|
|
+ label->datalo_reg = data_reg;
|
|
+ label->addrlo_reg = addr_reg;
|
|
+ label->raddr = tcg_splitwx_to_rx(raddr);
|
|
+ label->label_ptr[0] = label_ptr;
|
|
}
|
|
|
|
+/* We expect to use a 7-bit scaled negative offset from ENV. */
|
|
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
|
|
+QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -512);
|
|
+
|
|
+/* These offsets are built into the LDP below. */
|
|
+QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, mask) != 0);
|
|
+QEMU_BUILD_BUG_ON(offsetof(CPUTLBDescFast, table) != 8);
|
|
|
|
-/* sw loop left shift
|
|
+/*
|
|
+ * Load and compare a TLB entry, emitting the conditional jump to the
|
|
+ * slow path for the failure case, which will be patched later when finalizing
|
|
+ * the slow path. Generated code returns the host addend in X1,
|
|
+ * clobbers X0,X2,X3,TMP.
|
|
*/
|
|
-static inline void tcg_out_rotl_Reg(TCGContext *s, TCGType ext, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
+static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
|
|
+ tcg_insn_unit **label_ptr, int mem_index,
|
|
+ bool is_read)
|
|
+{
|
|
+ unsigned a_bits = get_alignment_bits(opc);
|
|
+ unsigned s_bits = opc & MO_SIZE;
|
|
+ unsigned a_mask = (1u << a_bits) - 1;
|
|
+ unsigned s_mask = (1u << s_bits) - 1;
|
|
+ TCGReg x3;
|
|
+ TCGType mask_type;
|
|
+ uint64_t compare_mask;
|
|
+
|
|
+ mask_type = (TARGET_PAGE_BITS + CPU_TLB_DYN_MAX_BITS > 32
|
|
+ ? TCG_TYPE_I64 : TCG_TYPE_I32);
|
|
+
|
|
+ /* Load env_tlb(env)->f[mmu_idx].{mask,table} into {x0,x1}. */
|
|
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_X0, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index));
|
|
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_X1, TCG_AREG0, TLB_MASK_TABLE_OFS(mem_index)+8);
|
|
+
|
|
+ /* Extract the TLB index from the address into X0. */
|
|
+ if (mask_type == TCG_TYPE_I64) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, addr_reg, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
|
+ tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_X0, TCG_REG_X0, TCG_REG_TMP);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, addr_reg, 0xf);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
|
|
+ tcg_out_insn_bitReg(s, OPC_AND, TCG_REG_X0, TCG_REG_X0, TCG_REG_TMP);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X0, TCG_REG_X0, 0xf);
|
|
+ }
|
|
+ /* Add the tlb_table pointer, creating the CPUTLBEntry address into X1. */
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_X1, TCG_REG_X1, TCG_REG_X0);
|
|
+
|
|
+ /* Load the tlb comparator into X0, and the fast path addend into X1. */
|
|
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_X0, TCG_REG_X1, is_read
|
|
+ ? offsetof(CPUTLBEntry, addr_read)
|
|
+ : offsetof(CPUTLBEntry, addr_write));
|
|
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_X1, TCG_REG_X1,
|
|
+ offsetof(CPUTLBEntry, addend));
|
|
+
|
|
+ /* For aligned accesses, we check the first byte and include the alignment
|
|
+ bits within the address. For unaligned access, we check that we don't
|
|
+ cross pages using the address of the last byte of the access. */
|
|
+ if (a_bits >= s_bits) {
|
|
+ x3 = addr_reg;
|
|
+ } else {
|
|
+ if (s_mask >= a_mask) {
|
|
+ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_X3, addr_reg, s_mask - a_mask);
|
|
+ } else {
|
|
+ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_X3, addr_reg, a_mask - s_mask);
|
|
+ }
|
|
+
|
|
+ if (TARGET_LONG_BITS != 64) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X3, TCG_REG_X3, 0xf);
|
|
+ }
|
|
+ x3 = TCG_REG_X3;
|
|
+ }
|
|
+ compare_mask = (uint64_t)TARGET_PAGE_MASK | a_mask;
|
|
+
|
|
+ /* Store the page mask part of the address into X3. */
|
|
+ tcg_out_bit(s, OPC_AND_I, OPC_AND, TCG_REG_X3, x3, compare_mask);
|
|
+ if (TARGET_LONG_BITS != 64) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_X3, TCG_REG_X3, 0xf);
|
|
+ }
|
|
+
|
|
+ /* Perform the address comparison. */
|
|
+ tcg_out_cond_cmp(s, TARGET_LONG_BITS == 64, TCG_COND_NE, TCG_REG_TMP, TCG_REG_X0, TCG_REG_X3, 0);
|
|
+
|
|
+ /* If not equal, we jump to the slow path. */
|
|
+ *label_ptr = s->code_ptr;
|
|
+ tcg_out_insn_br(s, OPC_BGT, TCG_REG_TMP, 0);
|
|
+}
|
|
+
|
|
+#endif /* CONFIG_SOFTMMU */
|
|
+
|
|
+static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
|
|
+ TCGReg data_r, TCGReg addr_r,
|
|
+ TCGType otype, TCGReg off_r)
|
|
+{
|
|
+ if (otype == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, off_r, 0xf);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, TCG_REG_TMP);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, off_r);
|
|
+ }
|
|
+
|
|
+ const MemOp bswap = memop & MO_BSWAP;
|
|
+
|
|
+ switch (memop & MO_SSIZE) {
|
|
+ case MO_UB:
|
|
+ tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ break;
|
|
+ case MO_SB:
|
|
+ tcg_out_ldst(s, OPC_LDBU, data_r, TCG_REG_TMP, 0, sigExt);
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf);
|
|
+ }
|
|
+ break;
|
|
+ case MO_UW:
|
|
+ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ if (bswap) {
|
|
+ tcg_out_bswap16(s, ext, data_r, data_r);
|
|
+ }
|
|
+ break;
|
|
+ case MO_SW:
|
|
+ if (bswap) {
|
|
+ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ tcg_out_bswap16(s, ext, data_r, data_r);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, data_r, TCG_REG_ZERO, data_r);
|
|
+ } else {
|
|
+ tcg_out_ldst(s, OPC_LDHU, data_r, TCG_REG_TMP, 0, sigExt);
|
|
+ }
|
|
+
|
|
+ if (ext == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, data_r, data_r, 0xf);
|
|
+ }
|
|
+ break;
|
|
+ case MO_UL:
|
|
+ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ if (bswap) {
|
|
+ tcg_out_bswap32(s, ext, data_r, data_r);
|
|
+ }
|
|
+ break;
|
|
+ case MO_SL:
|
|
+ if (bswap) {
|
|
+ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ tcg_out_bswap32(s, ext, data_r, data_r);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, data_r, data_r, TCG_REG_ZERO);
|
|
+ } else {
|
|
+ tcg_out_ldst(s, OPC_LDW, data_r, TCG_REG_TMP, 0, sigExt);
|
|
+ }
|
|
+ break;
|
|
+ case MO_Q:
|
|
+ tcg_out_ldst(s, OPC_LDL, data_r, TCG_REG_TMP, 0, zeroExt);
|
|
+ if (bswap) {
|
|
+ tcg_out_bswap64(s, ext, data_r, data_r);
|
|
+ }
|
|
+ break;
|
|
+ default:
|
|
+ tcg_abort();
|
|
+ }
|
|
+}
|
|
+
|
|
+static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext)
|
|
+{
|
|
+ MemOp memop = get_memop(oi);
|
|
+ const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64: TCG_TYPE_I32;
|
|
+#ifdef CONFIG_SOFTMMU
|
|
+ unsigned mem_index = get_mmuidx(oi);
|
|
+ tcg_insn_unit *label_ptr;
|
|
+
|
|
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
|
|
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
|
|
+ TCG_REG_X1, otype, addr_reg);
|
|
+ add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
|
|
+ s->code_ptr, label_ptr);
|
|
+#else /* !CONFIG_SOFTMMU */
|
|
+ if (USE_GUEST_BASE) {
|
|
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg);
|
|
+ } else {
|
|
+ tcg_out_qemu_ld_direct(s, memop, ext, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_ZERO);
|
|
+ }
|
|
+#endif /* CONFIG_SOFTMMU */
|
|
+}
|
|
+
|
|
+static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
|
|
+ TCGReg data_r, TCGReg addr_r,
|
|
+ TCGType otype, TCGReg off_r)
|
|
{
|
|
- int bits = ext ? 64 : 32;
|
|
- tcg_out_insn_simpleImm(s, OPC_SUBL_I, TCG_REG_TMP, rm, bits); //rm = 64-rm
|
|
- tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_TMP);
|
|
+ if (otype == TCG_TYPE_I32) {
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_REG_TMP, off_r, 0xf);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, TCG_REG_TMP);
|
|
+ } else {
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_TMP, addr_r, off_r);
|
|
+ }
|
|
+
|
|
+ const MemOp bswap = memop & MO_BSWAP;
|
|
|
|
- tcg_out_insn_bitReg(s, OPC_SRL, TCG_REG_TMP2, rn, TCG_REG_TMP); //get rn left part to TCG_REG_TMP
|
|
- tcg_out_insn_bitReg(s, OPC_SLL, TCG_REG_TMP, rn, rm); //get rn right part to left
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ switch (memop & MO_SIZE) {
|
|
+ case MO_8:
|
|
+ tcg_out_ldst(s, OPC_STB, data_r, TCG_REG_TMP, 0, 0);
|
|
+ break;
|
|
+ case MO_16:
|
|
+ if (bswap && data_r != TCG_REG_ZERO) {
|
|
+ tcg_out_bswap16(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r);
|
|
+ data_r = TCG_REG_TMP3;
|
|
+ }
|
|
+ tcg_out_ldst(s, OPC_STH, data_r, TCG_REG_TMP, 0, 0);
|
|
+ break;
|
|
+ case MO_32:
|
|
+ if (bswap && data_r != TCG_REG_ZERO) {
|
|
+ tcg_out_bswap32(s, TCG_TYPE_I32, TCG_REG_TMP3, data_r);
|
|
+ data_r = TCG_REG_TMP3;
|
|
+ }
|
|
+ tcg_out_ldst(s, OPC_STW, data_r, TCG_REG_TMP, 0, 0);
|
|
+ break;
|
|
+ case MO_64:
|
|
+ if (bswap && data_r != TCG_REG_ZERO) {
|
|
+ tcg_out_bswap64(s, TCG_TYPE_I64, TCG_REG_TMP3, data_r);
|
|
+ data_r = TCG_REG_TMP3;
|
|
+ }
|
|
+ tcg_out_ldst(s, OPC_STL, data_r, TCG_REG_TMP, 0, 0);
|
|
+ break;
|
|
+ default:
|
|
+ tcg_abort();
|
|
+ }
|
|
}
|
|
|
|
+static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
|
+ MemOpIdx oi)
|
|
+{
|
|
+ MemOp memop = get_memop(oi);
|
|
+ const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64: TCG_TYPE_I32;
|
|
+#ifdef CONFIG_SOFTMMU
|
|
+ unsigned mem_index = get_mmuidx(oi);
|
|
+ tcg_insn_unit *label_ptr;
|
|
+
|
|
+ tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
|
|
+ tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_X1, otype, addr_reg);
|
|
+ add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64, data_reg, addr_reg, s->code_ptr, label_ptr);
|
|
+#else /* !CONFIG_SOFTMMU */
|
|
+ if (USE_GUEST_BASE) {
|
|
+ tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, addr_reg);
|
|
+ } else {
|
|
+ tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, TCG_REG_ZERO);
|
|
+ }
|
|
+#endif /* CONFIG_SOFTMMU */
|
|
+}
|
|
|
|
+static const tcg_insn_unit *tb_ret_addr;
|
|
|
|
-static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS], const int const_args[TCG_MAX_OP_ARGS])
|
|
+static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_OP_ARGS],
|
|
+ const int const_args[TCG_MAX_OP_ARGS])
|
|
{
|
|
/* 99% of the time, we can signal the use of extension registers
|
|
- by looking to see if the opcode handles 64-bit data. */
|
|
+ * by looking to see if the opcode handles 64-bit data. */
|
|
TCGType ext = (tcg_op_defs[opc].flags & TCG_OPF_64BIT) != 0;
|
|
- /* Hoist the loads of the most common arguments. */
|
|
+ /* Hoist the loads of the most common arguments. */
|
|
TCGArg a0 = args[0];
|
|
TCGArg a1 = args[1];
|
|
TCGArg a2 = args[2];
|
|
int c2 = const_args[2];
|
|
|
|
/* Some operands are defined with "rZ" constraint, a register or
|
|
- the zero register. These need not actually test args[I] == 0. */
|
|
- #define REG0(I) (const_args[I] ? TCG_REG_ZERO : (TCGReg)args[I])
|
|
+ * the zero register. These need not actually test args[I] == 0. */
|
|
|
|
switch (opc) {
|
|
case INDEX_op_exit_tb:
|
|
- /* Reuse the zeroing that exists for goto_ptr. */
|
|
+ /* Reuse the zeroing that exists for goto_ptr. */
|
|
if (a0 == 0) {
|
|
tcg_out_goto_long(s, tcg_code_gen_epilogue);
|
|
} else {
|
|
@@ -1296,34 +1708,39 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
tcg_out_goto_long(s, tb_ret_addr);
|
|
}
|
|
break;
|
|
-
|
|
case INDEX_op_goto_tb:
|
|
if (s->tb_jmp_insn_offset != NULL) {
|
|
/* TCG_TARGET_HAS_direct_jump */
|
|
- tcg_debug_assert(0);
|
|
- /* not support here */
|
|
+ /* Ensure that ADRP+ADD are 8-byte aligned so that an atomic
|
|
+ write can be used to patch the target address. */
|
|
+ if ((uintptr_t)s->code_ptr & 7) {
|
|
+ tcg_out32(s, OPC_NOP);
|
|
+ }
|
|
+ s->tb_jmp_insn_offset[a0] = tcg_current_code_size(s);
|
|
+ tcg_out32(s, OPC_NOP);
|
|
+ tcg_out32(s, OPC_NOP);
|
|
} else {
|
|
/* !TCG_TARGET_HAS_direct_jump */
|
|
tcg_debug_assert(s->tb_jmp_target_addr != NULL);
|
|
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0));
|
|
}
|
|
- tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, 0);
|
|
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, TCG_REG_TMP, noPara);
|
|
set_jmp_reset_offset(s, a0);
|
|
break;
|
|
-
|
|
case INDEX_op_goto_ptr:
|
|
- tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, a0, 0);
|
|
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, a0, noPara);
|
|
break;
|
|
-
|
|
case INDEX_op_br:
|
|
tcg_out_goto_label(s, arg_label(a0));
|
|
break;
|
|
-
|
|
case INDEX_op_ld8u_i32:
|
|
case INDEX_op_ld8u_i64:
|
|
tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_ld8s_i32:
|
|
+ tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
case INDEX_op_ld8s_i64:
|
|
tcg_out_ldst(s, OPC_LDBU, a0, a1, a2, 1);
|
|
break;
|
|
@@ -1332,11 +1749,14 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_ld16s_i32:
|
|
+ tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
case INDEX_op_ld16s_i64:
|
|
tcg_out_ldst(s, OPC_LDHU, a0, a1, a2, 1);
|
|
break;
|
|
case INDEX_op_ld_i32:
|
|
- tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 1);
|
|
+ tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0);
|
|
break;
|
|
case INDEX_op_ld32u_i64:
|
|
tcg_out_ldst(s, OPC_LDW, a0, a1, a2, 0);
|
|
@@ -1349,26 +1769,26 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
break;
|
|
case INDEX_op_st8_i32:
|
|
case INDEX_op_st8_i64:
|
|
- tcg_out_ldst(s, OPC_STB, a0, a1, a2, 0);
|
|
+ tcg_out_ldst(s, OPC_STB, REG0(0), a1, a2, 0);
|
|
break;
|
|
case INDEX_op_st16_i32:
|
|
case INDEX_op_st16_i64:
|
|
- tcg_out_ldst(s, OPC_STH, a0, a1, a2, 0);
|
|
+ tcg_out_ldst(s, OPC_STH, REG0(0), a1, a2, 0);
|
|
break;
|
|
case INDEX_op_st_i32:
|
|
case INDEX_op_st32_i64:
|
|
- tcg_out_ldst(s, OPC_STW, a0, a1, a2, 0);
|
|
+ tcg_out_ldst(s, OPC_STW, REG0(0), a1, a2, 0);
|
|
break;
|
|
case INDEX_op_st_i64:
|
|
- tcg_out_ldst(s, OPC_STL, a0, a1, a2, 0);
|
|
+ tcg_out_ldst(s, OPC_STL, REG0(0), a1, a2, 0);
|
|
break;
|
|
-
|
|
case INDEX_op_add_i32:
|
|
a2 = (int32_t)a2;
|
|
if (c2) {
|
|
tcg_out_addsubi(s, ext, a0, a1, a2);
|
|
} else {
|
|
- tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2);
|
|
+ tcg_out_insn_simpleReg(s, OPC_ADDW, a0, a1, a2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
}
|
|
break;
|
|
case INDEX_op_add_i64:
|
|
@@ -1378,13 +1798,13 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
tcg_out_insn_simpleReg(s, OPC_ADDL, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
case INDEX_op_sub_i32:
|
|
a2 = (int32_t)a2;
|
|
if (c2) {
|
|
tcg_out_addsubi(s, ext, a0, a1, -a2);
|
|
} else {
|
|
- tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2);
|
|
+ tcg_out_insn_simpleReg(s, OPC_SUBW, a0, a1, a2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
}
|
|
break;
|
|
case INDEX_op_sub_i64:
|
|
@@ -1394,230 +1814,207 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
tcg_out_insn_simpleReg(s, OPC_SUBL, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_neg_i64:
|
|
case INDEX_op_neg_i32:
|
|
+ tcg_out_insn_bitReg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
+ case INDEX_op_neg_i64:
|
|
tcg_out_insn_bitReg(s, OPC_SUBL, a0, TCG_REG_ZERO, a1);
|
|
break;
|
|
-
|
|
case INDEX_op_and_i32:
|
|
- a2 = (int32_t)a2;
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
|
|
+ a2 = (int32_t)a2;
|
|
+ tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2);
|
|
}
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
break;
|
|
case INDEX_op_and_i64:
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
|
|
+ tcg_out_bit(s, OPC_AND_I, OPC_AND, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_AND, a0, a1, a2);
|
|
}
|
|
break;
|
|
case INDEX_op_andc_i32:
|
|
- a2 = (int32_t)a2;
|
|
- tcg_debug_assert(0);
|
|
- if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, ~a2);
|
|
- } else {
|
|
- tcg_out_insn_bitReg(s, OPC_BIC, a0, a1, a2);
|
|
- }
|
|
- break;
|
|
case INDEX_op_andc_i64:
|
|
tcg_debug_assert(0);
|
|
- if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_AND_I, OPC_AND, a0, a1, ~a2);
|
|
- } else {
|
|
- tcg_out_insn_bitReg(s, OPC_BIC, a0, a1, a2);
|
|
- }
|
|
break;
|
|
-
|
|
case INDEX_op_or_i32:
|
|
- a2 = (int32_t)a2;
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
|
|
+ a2 = (int32_t)a2;
|
|
+ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2);
|
|
}
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
break;
|
|
case INDEX_op_or_i64:
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
|
|
+ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_BIS, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
case INDEX_op_orc_i32:
|
|
- a2 = (int32_t)a2;
|
|
- tcg_debug_assert(0);
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
|
|
+ a2 = (int32_t)a2;
|
|
+ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2);
|
|
}
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
break;
|
|
case INDEX_op_orc_i64:
|
|
- tcg_debug_assert(0);
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
|
|
+ tcg_out_bit(s, OPC_BIS_I, OPC_BIS, a0, a1, ~a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_ORNOT, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
case INDEX_op_xor_i32:
|
|
- a2 = (int32_t)a2;
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
|
|
+ a2 = (int32_t)a2;
|
|
+ tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2);
|
|
}
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
break;
|
|
case INDEX_op_xor_i64:
|
|
if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
|
|
+ tcg_out_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, a2);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_XOR, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
case INDEX_op_eqv_i32:
|
|
- a2 = (int32_t)a2;
|
|
- tcg_debug_assert(0);
|
|
- if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, ~a2);
|
|
- } else {
|
|
- tcg_out_insn_bitReg(s, OPC_EQV, a0, a1, a2);
|
|
- }
|
|
- break;
|
|
-
|
|
case INDEX_op_eqv_i64:
|
|
tcg_debug_assert(0);
|
|
- if (c2) {
|
|
- tcg_out_insn_bit(s, OPC_XOR_I, OPC_XOR, a0, a1, ~a2);
|
|
- } else {
|
|
- tcg_out_insn_bitReg(s, OPC_EQV, a0, a1, a2);
|
|
- }
|
|
break;
|
|
-
|
|
- case INDEX_op_not_i64:
|
|
case INDEX_op_not_i32:
|
|
tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
+ case INDEX_op_not_i64:
|
|
+ tcg_out_insn_bitReg(s, OPC_ORNOT, a0, TCG_REG_ZERO, a1);
|
|
break;
|
|
-
|
|
- case INDEX_op_mul_i64:
|
|
case INDEX_op_mul_i32:
|
|
- tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2);
|
|
+ tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
+ case INDEX_op_mul_i64:
|
|
+ tcg_out_insn_simpleReg(s, OPC_MULL, a0, a1, a2);
|
|
break;
|
|
-
|
|
- case INDEX_op_div_i64: /* a0=a1/a2 singed divide*/
|
|
case INDEX_op_div_i32:
|
|
+ case INDEX_op_div_i64:
|
|
tcg_debug_assert(0);
|
|
break;
|
|
- case INDEX_op_divu_i64: /* a0=a1/a2 unsigned divide */
|
|
case INDEX_op_divu_i32:
|
|
+ case INDEX_op_divu_i64:
|
|
tcg_debug_assert(0);
|
|
break;
|
|
-
|
|
- case INDEX_op_rem_i64: /* if a1=17,a2=4, 17/4=4...1, a0=1 */
|
|
case INDEX_op_rem_i32:
|
|
+ case INDEX_op_rem_i64:
|
|
tcg_debug_assert(0);
|
|
break;
|
|
- case INDEX_op_remu_i64:
|
|
case INDEX_op_remu_i32:
|
|
+ case INDEX_op_remu_i64:
|
|
tcg_debug_assert(0);
|
|
break;
|
|
-
|
|
- case INDEX_op_shl_i64:
|
|
case INDEX_op_shl_i32: /* sw logical left*/
|
|
if (c2) {
|
|
- int bits = ext ? 64 : 32;
|
|
- int max = bits - 1;
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, a0, a1, a2&max);
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ unsigned int max = bits - 1;
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ } else {
|
|
+ tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ }
|
|
+ break;
|
|
+ case INDEX_op_shl_i64:
|
|
+ if (c2) {
|
|
+ unsigned int bits = ext ? 64 : 32;
|
|
+ unsigned int max = bits - 1;
|
|
+ tcg_out_insn_simpleImm(s, OPC_SLL_I, a0, a1, a2&max);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_SLL, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_shr_i64:
|
|
case INDEX_op_shr_i32: /* sw logical right */
|
|
+ a2 = (int32_t)a2;
|
|
if (c2) {
|
|
int bits = ext ? 64 : 32;
|
|
int max = bits - 1;
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, a0, a1, a2&max);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max);
|
|
} else {
|
|
tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_sar_i64:
|
|
- case INDEX_op_sar_i32: /* sw arithmetic right*/
|
|
+ case INDEX_op_shr_i64:
|
|
if (c2) {
|
|
int bits = ext ? 64 : 32;
|
|
int max = bits - 1;
|
|
- tcg_out_insn_bitImm(s, OPC_SRA_I, a0, a1, a2&max);
|
|
+ tcg_out_insn_simpleImm(s, OPC_SRL_I, a0, a1, a2&max);
|
|
} else {
|
|
- tcg_out_insn_bitReg(s, OPC_SRA, a0, a1, a2);
|
|
+ tcg_out_insn_bitReg(s, OPC_SRL, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_rotr_i64:
|
|
+ case INDEX_op_sar_i32:
|
|
+ a2 = (int32_t)a2;
|
|
+ tcg_out_sar(s, ext, a0, a1, a2, c2);
|
|
+ break;
|
|
+ case INDEX_op_sar_i64: /* sw arithmetic right*/
|
|
+ tcg_out_sar(s, ext, a0, a1, a2, c2);
|
|
+ break;
|
|
case INDEX_op_rotr_i32: /* loop shift */
|
|
+ case INDEX_op_rotr_i64:
|
|
if (c2) {/* loop right shift a2*/
|
|
tcg_out_rotr_Imm(s, ext, a0, a1, a2);
|
|
} else {
|
|
tcg_out_rotr_Reg(s, ext, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_rotl_i64:
|
|
case INDEX_op_rotl_i32: /* loop shift */
|
|
+ case INDEX_op_rotl_i64: /* sw */
|
|
if (c2) {/* loop left shift a2*/
|
|
tcg_out_rotl_Imm(s, ext, a0, a1, a2);
|
|
} else {
|
|
tcg_out_rotl_Reg(s, ext, a0, a1, a2);
|
|
}
|
|
break;
|
|
-
|
|
- case INDEX_op_clz_i64: /* counting leading zero numbers */
|
|
case INDEX_op_clz_i32:
|
|
- tcg_out_cltz(s, OPC_CTLZ, ext, a0, a1, a2, c2);
|
|
+ tcg_out_ctz32(s, OPC_CTLZ, a0, a1, a2, c2);
|
|
+ break;
|
|
+ case INDEX_op_clz_i64: /* counting leading zero numbers */
|
|
+ tcg_out_ctz64(s, OPC_CTLZ, a0, a1, a2, c2);
|
|
break;
|
|
- case INDEX_op_ctz_i64: /* counting tailing zero numbers */
|
|
case INDEX_op_ctz_i32:
|
|
- tcg_out_cltz(s, OPC_CTTZ, ext, a0, a1, a2, c2);
|
|
+ tcg_out_ctz32(s, OPC_CTTZ, a0, a1, a2, c2);
|
|
break;
|
|
-
|
|
- case INDEX_op_brcond_i32:
|
|
- a1 = (int32_t)a1;
|
|
- tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
|
|
+ case INDEX_op_ctz_i64: /* counting tailing zero numbers */
|
|
+ tcg_out_ctz64(s, OPC_CTTZ, a0, a1, a2, c2);
|
|
break;
|
|
-
|
|
+ case INDEX_op_brcond_i32:
|
|
case INDEX_op_brcond_i64:
|
|
tcg_out_brcond(s, ext, a2, a0, a1, const_args[1], arg_label(args[3]));
|
|
break;
|
|
-
|
|
case INDEX_op_setcond_i32:
|
|
a2 = (int32_t)a2;
|
|
- tcg_out_setcond(s, args[3], a0, a1, a2);
|
|
+ tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2);
|
|
break;
|
|
-
|
|
case INDEX_op_setcond_i64:
|
|
- tcg_out_setcond(s, args[3], a0, a1, a2);
|
|
+ tcg_out_setcond(s, ext, args[3], a0, a1, a2, c2);
|
|
break;
|
|
-
|
|
case INDEX_op_movcond_i32:
|
|
a2 = (int32_t)a2;
|
|
- tcg_out_movcond(s, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
|
|
+ tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
|
|
break;
|
|
-
|
|
- /* FALLTHRU */
|
|
case INDEX_op_movcond_i64:
|
|
- tcg_out_movcond(s, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
|
|
+ tcg_out_movcond(s, ext, args[5], a0, a1, a2, c2, REG0(3), REG0(4));
|
|
break;
|
|
-
|
|
case INDEX_op_qemu_ld_i32:
|
|
case INDEX_op_qemu_ld_i64:
|
|
tcg_out_qemu_ld(s, a0, a1, a2, ext);
|
|
@@ -1626,443 +2023,399 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg args[TCG_MAX_O
|
|
case INDEX_op_qemu_st_i64:
|
|
tcg_out_qemu_st(s, REG0(0), a1, a2);
|
|
break;
|
|
-
|
|
- case INDEX_op_bswap64_i64: /* 0x123456789abcdef--->0xefcdab8967452301 */
|
|
- tcg_debug_assert(0);
|
|
- tcg_out_bswap64(s, a0, a1);
|
|
- break;
|
|
- case INDEX_op_bswap32_i64: /* 0x123456789abcdef--->0x67452301efcdab89 */
|
|
- tcg_debug_assert(0);
|
|
- tcg_out_bswap32u(s, a0, a1);
|
|
+ case INDEX_op_bswap64_i64:
|
|
+ tcg_out_bswap64(s, ext, a0, a1);
|
|
break;
|
|
case INDEX_op_bswap32_i32:
|
|
- tcg_debug_assert(0);
|
|
+ case INDEX_op_bswap32_i64:
|
|
+ tcg_out_bswap32(s, ext, a0, a1);
|
|
break;
|
|
- case INDEX_op_bswap16_i64: /* 0x123456789abcdef--->0x23016745ab89efcd */
|
|
case INDEX_op_bswap16_i32:
|
|
- tcg_debug_assert(0);
|
|
+ case INDEX_op_bswap16_i64:
|
|
+ tcg_out_bswap16(s, ext, a0, a1);
|
|
break;
|
|
-
|
|
- case INDEX_op_ext8s_i64:
|
|
case INDEX_op_ext8s_i32:
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTB, a0, TCG_REG_ZERO, a1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
+ case INDEX_op_ext8s_i64:
|
|
tcg_out_insn_simpleReg(s, OPC_SEXTB, a0, TCG_REG_ZERO, a1);
|
|
break;
|
|
- case INDEX_op_ext16s_i64:
|
|
case INDEX_op_ext16s_i32:
|
|
+ tcg_out_insn_simpleReg(s, OPC_SEXTH, a0, TCG_REG_ZERO, a1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a0, 0xf);
|
|
+ break;
|
|
+ case INDEX_op_ext16s_i64:
|
|
tcg_out_insn_simpleReg(s, OPC_SEXTH, a0, TCG_REG_ZERO, a1);
|
|
break;
|
|
case INDEX_op_ext_i32_i64:
|
|
case INDEX_op_ext32s_i64:
|
|
tcg_out_insn_simpleReg(s, OPC_ADDW, a0, TCG_REG_ZERO, a1);
|
|
break;
|
|
- case INDEX_op_ext8u_i64:
|
|
case INDEX_op_ext8u_i32:
|
|
- tcg_out_insn_simpleImm(s, OPC_EXT0B_I, a0, a1, 0x0);
|
|
+ case INDEX_op_ext8u_i64:
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLB_I, a0, a1, 0x0);
|
|
break;
|
|
- case INDEX_op_ext16u_i64:
|
|
case INDEX_op_ext16u_i32:
|
|
- tcg_out_insn_simpleImm(s, OPC_EXT1B_I, a0, a1, 0x0);
|
|
+ case INDEX_op_ext16u_i64:
|
|
+ tcg_out_insn_simpleImm(s, OPC_EXTLH_I, a0, a1, 0x0);
|
|
break;
|
|
case INDEX_op_extu_i32_i64:
|
|
case INDEX_op_ext32u_i64:
|
|
- tcg_out_movr(s, TCG_TYPE_I32, a0, a1);
|
|
+ tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, a0, a1, 0xf);
|
|
break;
|
|
-
|
|
- case INDEX_op_deposit_i64:
|
|
case INDEX_op_deposit_i32:
|
|
- tcg_out_dep(s, a0, a2, args[3], args[4]);
|
|
+ case INDEX_op_deposit_i64:
|
|
+ tcg_out_dep(s, ext, a0, REG0(2), args[3], args[4]);
|
|
break;
|
|
-
|
|
- case INDEX_op_extract_i64:
|
|
case INDEX_op_extract_i32:
|
|
+ case INDEX_op_extract_i64:
|
|
tcg_out_extract(s, a0, a1, a2, args[3]);
|
|
break;
|
|
-
|
|
- case INDEX_op_sextract_i64:
|
|
case INDEX_op_sextract_i32:
|
|
+ case INDEX_op_sextract_i64:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ case INDEX_op_extract2_i32: /* extract REG0(2) right args[3] bit to REG0(1) left ,save to a0*/
|
|
+ case INDEX_op_extract2_i64:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ case INDEX_op_add2_i32:
|
|
+ case INDEX_op_add2_i64:
|
|
+ tcg_debug_assert(0);
|
|
+ break;
|
|
+ case INDEX_op_sub2_i32:
|
|
+ case INDEX_op_sub2_i64:
|
|
tcg_debug_assert(0);
|
|
break;
|
|
+ case INDEX_op_muluh_i64:
|
|
+ tcg_out_insn_simpleReg(s, OPC_UMULH, a0, a1, a2);
|
|
+ break;
|
|
+ case INDEX_op_mulsh_i64:
|
|
+ tcg_out_mulsh64(s, a0, a1, a2);
|
|
+ break;
|
|
+ case INDEX_op_mb:
|
|
+ tcg_out_mb(s);
|
|
+ break;
|
|
+ case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
|
|
+ break;
|
|
+ case INDEX_op_mov_i64:
|
|
+ break;
|
|
+ case INDEX_op_call: /* Always emitted via tcg_out_call. */
|
|
+ default:
|
|
+ g_assert_not_reached();
|
|
+ }
|
|
+#undef REG0
|
|
+}
|
|
|
|
+static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
+{
|
|
+ switch (op) {
|
|
+ case INDEX_op_goto_ptr:
|
|
+ return C_O0_I1(r);
|
|
+ case INDEX_op_ld8u_i32:
|
|
+ case INDEX_op_ld8s_i32:
|
|
+ case INDEX_op_ld16u_i32:
|
|
+ case INDEX_op_ld16s_i32:
|
|
+ case INDEX_op_ld_i32:
|
|
+ case INDEX_op_ld8u_i64:
|
|
+ case INDEX_op_ld8s_i64:
|
|
+ case INDEX_op_ld16u_i64:
|
|
+ case INDEX_op_ld16s_i64:
|
|
+ case INDEX_op_ld32u_i64:
|
|
+ case INDEX_op_ld32s_i64:
|
|
+ case INDEX_op_ld_i64:
|
|
+ case INDEX_op_neg_i32:
|
|
+ case INDEX_op_neg_i64:
|
|
+ case INDEX_op_not_i32:
|
|
+ case INDEX_op_not_i64:
|
|
+ case INDEX_op_bswap16_i32:
|
|
+ case INDEX_op_bswap32_i32:
|
|
+ case INDEX_op_bswap16_i64:
|
|
+ case INDEX_op_bswap32_i64:
|
|
+ case INDEX_op_bswap64_i64:
|
|
+ case INDEX_op_ext8s_i32:
|
|
+ case INDEX_op_ext16s_i32:
|
|
+ case INDEX_op_ext8u_i32:
|
|
+ case INDEX_op_ext16u_i32:
|
|
+ case INDEX_op_ext8s_i64:
|
|
+ case INDEX_op_ext16s_i64:
|
|
+ case INDEX_op_ext32s_i64:
|
|
+ case INDEX_op_ext8u_i64:
|
|
+ case INDEX_op_ext16u_i64:
|
|
+ case INDEX_op_ext32u_i64:
|
|
+ case INDEX_op_ext_i32_i64:
|
|
+ case INDEX_op_extu_i32_i64:
|
|
+ case INDEX_op_extract_i32:
|
|
+ case INDEX_op_extract_i64:
|
|
+ case INDEX_op_sextract_i32:
|
|
+ case INDEX_op_sextract_i64:
|
|
+ return C_O1_I1(r, r);
|
|
+ case INDEX_op_st8_i32:
|
|
+ case INDEX_op_st16_i32:
|
|
+ case INDEX_op_st_i32:
|
|
+ case INDEX_op_st8_i64:
|
|
+ case INDEX_op_st16_i64:
|
|
+ case INDEX_op_st32_i64:
|
|
+ case INDEX_op_st_i64:
|
|
+ return C_O0_I2(rZ, r);
|
|
+ case INDEX_op_add_i32:
|
|
+ case INDEX_op_add_i64:
|
|
+ case INDEX_op_sub_i32:
|
|
+ case INDEX_op_sub_i64:
|
|
+ return C_O1_I2(r, r, rU);
|
|
+ case INDEX_op_setcond_i32:
|
|
+ case INDEX_op_setcond_i64:
|
|
+ return C_O1_I2(r, r, rU);
|
|
+ case INDEX_op_mul_i32:
|
|
+ case INDEX_op_mul_i64:
|
|
+ case INDEX_op_div_i32:
|
|
+ case INDEX_op_div_i64:
|
|
+ case INDEX_op_divu_i32:
|
|
+ case INDEX_op_divu_i64:
|
|
+ case INDEX_op_rem_i32:
|
|
+ case INDEX_op_rem_i64:
|
|
+ case INDEX_op_remu_i32:
|
|
+ case INDEX_op_remu_i64:
|
|
+ case INDEX_op_muluh_i64:
|
|
+ case INDEX_op_mulsh_i64:
|
|
+ return C_O1_I2(r, r, r);
|
|
+ case INDEX_op_and_i32:
|
|
+ case INDEX_op_and_i64:
|
|
+ case INDEX_op_or_i32:
|
|
+ case INDEX_op_or_i64:
|
|
+ case INDEX_op_xor_i32:
|
|
+ case INDEX_op_xor_i64:
|
|
+ case INDEX_op_andc_i32:
|
|
+ case INDEX_op_andc_i64:
|
|
+ case INDEX_op_orc_i32:
|
|
+ case INDEX_op_orc_i64:
|
|
+ case INDEX_op_eqv_i32:
|
|
+ case INDEX_op_eqv_i64:
|
|
+ return C_O1_I2(r, r, rU);
|
|
+ case INDEX_op_shl_i32:
|
|
+ case INDEX_op_shr_i32:
|
|
+ case INDEX_op_sar_i32:
|
|
+ case INDEX_op_rotl_i32:
|
|
+ case INDEX_op_rotr_i32:
|
|
+ case INDEX_op_shl_i64:
|
|
+ case INDEX_op_shr_i64:
|
|
+ case INDEX_op_sar_i64:
|
|
+ case INDEX_op_rotl_i64:
|
|
+ case INDEX_op_rotr_i64:
|
|
+ return C_O1_I2(r, r, ri);
|
|
+ case INDEX_op_clz_i32:
|
|
+ case INDEX_op_clz_i64:
|
|
+ return C_O1_I2(r, r, r);
|
|
+ case INDEX_op_ctz_i32:
|
|
+ case INDEX_op_ctz_i64:
|
|
+ return C_O1_I2(r, r, r);
|
|
+ case INDEX_op_brcond_i32:
|
|
+ case INDEX_op_brcond_i64:
|
|
+ return C_O0_I2(r, rU);
|
|
+ case INDEX_op_movcond_i32:
|
|
+ case INDEX_op_movcond_i64:
|
|
+ return C_O1_I4(r, r, rU, rZ, rZ);
|
|
+ case INDEX_op_qemu_ld_i32:
|
|
+ case INDEX_op_qemu_ld_i64:
|
|
+ return C_O1_I1(r, l);
|
|
+ case INDEX_op_qemu_st_i32:
|
|
+ case INDEX_op_qemu_st_i64:
|
|
+ return C_O0_I2(lZ, l);
|
|
+ case INDEX_op_deposit_i32:
|
|
+ case INDEX_op_deposit_i64:
|
|
+ return C_O1_I2(r, 0, rZ);
|
|
+ case INDEX_op_extract2_i32:
|
|
case INDEX_op_extract2_i64:
|
|
- case INDEX_op_extract2_i32: /* extract REG0(2) right args[3] bit to REG0(1) left ,save to a0*/
|
|
- tcg_debug_assert(0);
|
|
- break;
|
|
-
|
|
+ return C_O1_I2(r, rZ, rZ);
|
|
case INDEX_op_add2_i32:
|
|
- tcg_debug_assert(0);
|
|
- break;
|
|
case INDEX_op_add2_i64:
|
|
- tcg_debug_assert(0);
|
|
- break;
|
|
case INDEX_op_sub2_i32:
|
|
- tcg_debug_assert(0);
|
|
- break;
|
|
case INDEX_op_sub2_i64:
|
|
- tcg_debug_assert(0);
|
|
- break;
|
|
-
|
|
- case INDEX_op_muluh_i64:
|
|
- tcg_out_insn_simpleReg(s, OPC_UMULH, a0, a1, a2);
|
|
- break;
|
|
- case INDEX_op_mulsh_i64: /* sw not support */
|
|
- tcg_out_mulsh64(s, a0, a1, a2);
|
|
- break;
|
|
-
|
|
- case INDEX_op_mb:
|
|
- break;
|
|
-
|
|
- case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */
|
|
- case INDEX_op_mov_i64:
|
|
- case INDEX_op_call: /* Always emitted via tcg_out_call. */
|
|
+ return C_O2_I4(r, r, rZ, rZ, rA, rMZ);
|
|
+ case INDEX_op_add_vec:
|
|
+ case INDEX_op_sub_vec:
|
|
+ case INDEX_op_mul_vec:
|
|
+ case INDEX_op_xor_vec:
|
|
+ case INDEX_op_ssadd_vec:
|
|
+ case INDEX_op_sssub_vec:
|
|
+ case INDEX_op_usadd_vec:
|
|
+ case INDEX_op_ussub_vec:
|
|
+ case INDEX_op_smax_vec:
|
|
+ case INDEX_op_smin_vec:
|
|
+ case INDEX_op_umax_vec:
|
|
+ case INDEX_op_umin_vec:
|
|
+ case INDEX_op_shlv_vec:
|
|
+ case INDEX_op_shrv_vec:
|
|
+ case INDEX_op_sarv_vec:
|
|
+ return C_O1_I2(w, w, w);
|
|
+ case INDEX_op_not_vec:
|
|
+ case INDEX_op_neg_vec:
|
|
+ case INDEX_op_abs_vec:
|
|
+ case INDEX_op_shli_vec:
|
|
+ case INDEX_op_shri_vec:
|
|
+ case INDEX_op_sari_vec:
|
|
+ return C_O1_I1(w, w);
|
|
+ case INDEX_op_ld_vec:
|
|
+ case INDEX_op_dupm_vec:
|
|
+ return C_O1_I1(w, r);
|
|
+ case INDEX_op_st_vec:
|
|
+ return C_O0_I2(w, r);
|
|
+ case INDEX_op_dup_vec:
|
|
+ return C_O1_I1(w, wr);
|
|
+ case INDEX_op_or_vec:
|
|
+ case INDEX_op_andc_vec:
|
|
+ return C_O1_I2(w, w, wO);
|
|
+ case INDEX_op_and_vec:
|
|
+ case INDEX_op_orc_vec:
|
|
+ return C_O1_I2(w, w, wN);
|
|
+ case INDEX_op_cmp_vec:
|
|
+ return C_O1_I2(w, w, wZ);
|
|
+ case INDEX_op_bitsel_vec:
|
|
+ return C_O1_I3(w, w, w, w);
|
|
default:
|
|
g_assert_not_reached();
|
|
}
|
|
-
|
|
-#undef REG0
|
|
}
|
|
|
|
|
|
-
|
|
-/*sw
|
|
-* counting heading/tailing zero numbers
|
|
-*/
|
|
-static void tcg_out_cltz(TCGContext *s, SW_64Insn opc_clz, TCGType ext, TCGReg rd,
|
|
- TCGReg rn, TCGArg b, bool const_b)
|
|
-{
|
|
- /* cond1. b is a const, and b=64 or b=32 */
|
|
- if (const_b && b == (ext ? 64 : 32)) {
|
|
- /* count rn zero numbers, and writes to rd */
|
|
- tcg_out_insn_simpleReg(s, opc_clz, rd, TCG_REG_ZERO, rn);
|
|
- }else {
|
|
- /* TCG_REG_TMP= counting rn heading/tailing zero numbers */
|
|
- tcg_out_insn_simpleReg(s, opc_clz, TCG_REG_TMP, TCG_REG_ZERO, rn);
|
|
-
|
|
- if (const_b) {
|
|
- if (b == -1) {
|
|
- /* cond2. b is const and b=-1 */
|
|
- /* if rn != 0 , rd= counting rn heading/tailing zero numbers, else rd = 0xffffffffffffffff*/
|
|
- tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP2, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
- tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
- }
|
|
- else if (b == 0) {
|
|
- /* cond3. b is const and b=0 */
|
|
- /* if rn != 0 , rd=counting rn heading/tailing zero numbers , else rd = TCG_REG_ZERO */
|
|
- tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_ZERO);
|
|
- } else {
|
|
- /* cond4. b is const */
|
|
- tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP2, b);
|
|
- /* if rn != 0 , rd=counting rn heading/tailing zero numbers , else mov b to rd */
|
|
- tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
- }
|
|
- }
|
|
- else {
|
|
- /* if b is register */
|
|
- tcg_out_insn_complexReg(s, OPC_SELNE, rn, rd, TCG_REG_TMP, b);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-/*sw
|
|
- * unsigned 16bit, ab->ba
|
|
- */
|
|
-static inline void tcg_out_bswap16u(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
+static void tcg_target_init(TCGContext *s)
|
|
{
|
|
- TCGReg TCG_TMP0 = rn;
|
|
- TCGReg TCG_TMP1 = rd;
|
|
- /*t1=00b0*/
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP0, 8);
|
|
- /*t1=(0000)000a*/
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP0, TCG_TMP0, 8);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP0, TCG_TMP0, 0x1);
|
|
- /*t1=ooba*/
|
|
- tcg_out_insn_simpleReg(s, OPC_BIS, TCG_TMP1, TCG_TMP1, TCG_TMP0);
|
|
-}
|
|
+ tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffffu;
|
|
+ tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffffu;
|
|
+ tcg_target_available_regs[TCG_TYPE_V64] = 0xffffffff00000000ull;
|
|
+ tcg_target_available_regs[TCG_TYPE_V128] = 0xffffffff00000000ull;
|
|
+ tcg_target_call_clobber_regs = -1ull;
|
|
|
|
-/*sw
|
|
- * signed 16bit, ab->ssba
|
|
- */
|
|
-static inline void tcg_out_bswap16s(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
-{
|
|
- TCGReg TCG_TMP0 = rn;
|
|
- TCGReg TCG_TMP1 = TCG_REG_TMP;
|
|
- TCGReg TCG_TMP2 = rn;
|
|
- /*t1=(ssss)ssb0*/
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP0, 8);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAP_I, TCG_TMP1, TCG_TMP1, 0x2);
|
|
- tcg_out_insn_simpleReg(s, OPC_SEXTH, TCG_TMP1, TCG_REG_ZERO, TCG_TMP1);
|
|
- /*t2=(0000)000a*/
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP2, TCG_TMP0, 8);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP2, 0x1);
|
|
- /*t2=(ssss)ssba*/
|
|
- tcg_out_insn_simpleReg(s, OPC_BIS, TCG_TMP1, TCG_TMP1, TCG_TMP2);
|
|
-}
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X9);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X10);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X11);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X12);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X13);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X14);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_X15);
|
|
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F2);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F3);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F4);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F5);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F6);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F7);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F8);
|
|
+ tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_F9);
|
|
|
|
-/*sw
|
|
- * signed 32bit, abcd -> ssdcba
|
|
- */
|
|
-static inline void tcg_out_bswap32s(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
-{
|
|
- TCGReg TCG_TMP0 = rn;
|
|
- TCGReg TCG_TMP3 = rd;
|
|
- TCGReg TCG_TMP1 = TCG_REG_TMP;
|
|
- TCGReg TCG_TMP2 = TCG_REG_TMP2;
|
|
- /*swap32 -- 32-bit swap. a0 = abcd.*/
|
|
-
|
|
- /* t3 = (ssss)d000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP3, TCG_TMP0, 24);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP3, TCG_TMP3, 0x0f);
|
|
- tcg_out_insn_simpleReg(s, OPC_SEXTB, TCG_TMP1, TCG_REG_ZERO, TCG_TMP0);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAP_I, TCG_TMP1, TCG_TMP1, 0x0f);
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
-
|
|
- /* t1 = 000a */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 24);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x1);
|
|
-
|
|
- /* t2 = 00c0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
|
|
-
|
|
- /* t3 = (ssss)d00a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
-
|
|
- /* t1 = 0abc */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 8);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x7);
|
|
-
|
|
- /* t2 = 0c00 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 8);
|
|
- /* t1 = 00b0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
|
|
- /* t3 = (ssss)dc0a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
|
|
- /* t3 = (ssss)dcba -- delay slot */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
+ s->reserved_regs = 0;
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_FP);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP3);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_RA);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_X29);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_FLOAT_TMP2);
|
|
}
|
|
|
|
-/*sw
|
|
- * unsigned 32bit, abcd->dcba
|
|
- */
|
|
-static void tcg_out_bswap32u(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
-{
|
|
- TCGReg TCG_TMP0 = rn;
|
|
- TCGReg TCG_TMP3 = rd;
|
|
- TCGReg TCG_TMP1 = TCG_REG_TMP;
|
|
- TCGReg TCG_TMP2 = TCG_REG_TMP2;
|
|
-
|
|
- /*bswap32u -- unsigned 32-bit swap. a0 = ....abcd.*/
|
|
- /* t1 = (0000)000d */
|
|
- tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP1, TCG_TMP0, 0xff);
|
|
- /* t3 = 000a */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP3, TCG_TMP0, 24);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP3, TCG_TMP3, 0x1);
|
|
- /* t1 = (0000)d000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 24);
|
|
- /* t2 = 00c0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
|
|
- /* t3 = d00a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
- /* t1 = 0abc */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 8);
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x7);
|
|
- /* t2 = 0c00 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 8);
|
|
- /* t1 = 00b0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
|
|
- /* t3 = dc0a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
|
|
- /* t3 = dcba -- delay slot */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
-}
|
|
+
|
|
+#define PUSH_SIZE ((15-9+1+1) * 8)
|
|
+#define FRAME_SIZE \
|
|
+ ((PUSH_SIZE \
|
|
+ + TCG_STATIC_CALL_ARGS_SIZE \
|
|
+ + CPU_TEMP_BUF_NLONGS * sizeof(long) \
|
|
+ + TCG_TARGET_STACK_ALIGN - 1) \
|
|
+ & ~(TCG_TARGET_STACK_ALIGN - 1))
|
|
|
|
|
|
+/* We're expecting a 2 byte uleb128 encoded value. */
|
|
+QEMU_BUILD_BUG_ON(FRAME_SIZE >= (1 << 14));
|
|
|
|
-/*sw
|
|
- * swap 64bit, abcdefgh->hgfedcba
|
|
- */
|
|
-static void tcg_out_bswap64(TCGContext *s, TCGReg rd, TCGReg rn)
|
|
+/* We're expecting to use a single ADDI insn. */
|
|
+QEMU_BUILD_BUG_ON(FRAME_SIZE - PUSH_SIZE > 0xfff);
|
|
+
|
|
+static void tcg_target_qemu_prologue(TCGContext *s)
|
|
{
|
|
+ TCGReg r;
|
|
+ int ofs;
|
|
|
|
- TCGReg TCG_TMP0 = rn;
|
|
- TCGReg TCG_TMP3 = rd;
|
|
- TCGReg TCG_TMP1 = TCG_REG_TMP;
|
|
- TCGReg TCG_TMP2 = TCG_REG_TMP2;
|
|
-
|
|
- /* bswap64 -- 64-bit swap. a0 = abcdefgh*/
|
|
-
|
|
- /* t3 = h0000000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP3, TCG_TMP0, 56);
|
|
- /* t1 = 0000000a */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 56);
|
|
- /* t2 = 000000g0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP2, TCG_TMP0, 0x2);
|
|
- /* t3 = h000000a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
- /* t1 = 00000abc */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 40);
|
|
- /* t2 = 0g000000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 40);
|
|
- /* t1 = 000000b0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
|
|
- /* t3 = hg00000a */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
|
|
- /* t2 = 0000abcd */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP2, TCG_TMP0, 32);
|
|
- /* t3 = hg0000ba */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
- /* t1 = 000000c0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP2, 0x2);
|
|
- /* t2 = 0000000d */
|
|
- tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP2, TCG_TMP2, 0xff);
|
|
- /* t1 = 00000c00 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 8);
|
|
- /* t2 = 0000d000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 24);
|
|
- /* t3 = hg000cba */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
- /* t1 = 00abcdef */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_TMP1, TCG_TMP0, 16);
|
|
- /* t3 = hg00dcba */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
|
|
- /* t2 = 0000000f */
|
|
- tcg_out_insn_bitImm(s, OPC_AND_I, TCG_TMP2, TCG_TMP1, 0xff);
|
|
- /* t1 = 000000e0 */
|
|
- tcg_out_insn_simpleImm(s, OPC_ZAPNOT_I, TCG_TMP1, TCG_TMP1, 0x2);
|
|
- /* t2 = 00f00000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP2, TCG_TMP2, 40);
|
|
- /* t1 = 000e0000 */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_TMP1, TCG_TMP1, 24);
|
|
- /* t3 = hgf0dcba */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP2);
|
|
- /* t3 = hgfedcba -- delay slot */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, TCG_TMP3, TCG_TMP3, TCG_TMP1);
|
|
+ /* allocate space for all saved registers */
|
|
+ /* subl $sp,PUSH_SIZE,$sp */
|
|
+ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
|
|
|
|
-}
|
|
+ /* Push (FP, LR) */
|
|
+ /* stl $fp,0($sp) */
|
|
+ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_FP, TCG_REG_SP, 0);
|
|
+ /* stl $26,8($sp) */
|
|
+ tcg_out_insn_ldst(s, OPC_STL, TCG_REG_RA, TCG_REG_SP, 8);
|
|
|
|
-static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg, MemOpIdx oi, TCGType ext)
|
|
-{
|
|
-#ifndef CONFIG_SOFTMMU
|
|
- MemOp memop = get_memop(oi);
|
|
- const TCGType otype = TCG_TYPE_I64;
|
|
|
|
- if (USE_GUEST_BASE) {
|
|
- tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_GUEST_BASE, TCG_REG_GUEST_BASE, addr_reg);
|
|
- tcg_out_qemu_ld_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, 0);
|
|
- } else {
|
|
- tcg_out_qemu_ld_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, 0);
|
|
+ /* Set up frame pointer for canonical unwinding. */
|
|
+ /* TCG_REG_FP=TCG_REG_SP */
|
|
+ tcg_out_movr(s, TCG_TYPE_I64, TCG_REG_FP, TCG_REG_SP);
|
|
+
|
|
+ /* Store callee-preserved regs x9..x14. */
|
|
+ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1){
|
|
+ ofs = (r - TCG_REG_X9 + 2) * 8;
|
|
+ tcg_out_insn_ldst(s, OPC_STL, r, TCG_REG_SP, ofs);
|
|
}
|
|
-#endif /* CONFIG_SOFTMMU */
|
|
|
|
-}
|
|
+ /* Make stack space for TCG locals. */
|
|
+ /* subl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
|
|
+ tcg_out_simple(s, OPC_SUBL_I, OPC_SUBL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
|
|
|
|
-static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
|
|
- MemOpIdx oi)
|
|
-{
|
|
-#ifndef CONFIG_SOFTMMU
|
|
- MemOp memop = get_memop(oi);
|
|
- const TCGType otype = TCG_TYPE_I64;
|
|
+ /* Inform TCG about how to find TCG locals with register, offset, size. */
|
|
+ tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE,
|
|
+ CPU_TEMP_BUF_NLONGS * sizeof(long));
|
|
|
|
+#ifndef CONFIG_SOFTMMU
|
|
if (USE_GUEST_BASE) {
|
|
- tcg_out_insn_simpleReg(s, OPC_ADDL, TCG_REG_GUEST_BASE, TCG_REG_GUEST_BASE, addr_reg);
|
|
- tcg_out_qemu_st_direct(s, memop, data_reg, TCG_REG_GUEST_BASE, otype, 0);
|
|
- } else {
|
|
- tcg_out_qemu_st_direct(s, memop, data_reg, addr_reg, TCG_TYPE_I64, 0);
|
|
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_GUEST_BASE, guest_base);
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_GUEST_BASE);
|
|
}
|
|
-#endif /* CONFIG_SOFTMMU */
|
|
-}
|
|
-
|
|
-
|
|
-/*sw
|
|
- * if cond is successful, ret=1, otherwise ret = 0
|
|
- */
|
|
-static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret,
|
|
- TCGReg arg1, TCGReg arg2)
|
|
-{
|
|
- switch(cond) {
|
|
- case TCG_COND_EQ:
|
|
- case TCG_COND_LT:
|
|
- case TCG_COND_LE:
|
|
- case TCG_COND_LTU:
|
|
- case TCG_COND_LEU:
|
|
- case TCG_COND_NE:
|
|
- case TCG_COND_GE:
|
|
- case TCG_COND_GT:
|
|
- case TCG_COND_GEU:
|
|
- case TCG_COND_GTU:
|
|
- tcg_out_cond_cmp(s, cond, ret, arg1, arg2, 0);
|
|
- break;
|
|
- default:
|
|
- tcg_abort();
|
|
- break;
|
|
- }
|
|
-}
|
|
-/*sw
|
|
- * cond(a1,a2), yes:v1->ret, no:v2->ret
|
|
- */
|
|
-static void tcg_out_movcond(TCGContext *s, TCGCond cond, TCGReg ret,
|
|
- TCGReg a1, TCGReg a2, bool const_b, TCGReg v1, TCGReg v2)
|
|
-{
|
|
- tcg_out_cond_cmp(s, cond, TCG_REG_TMP, a1, a2, const_b);
|
|
- tcg_out_insn_complexReg(s, OPC_SELLBS, TCG_REG_TMP, ret, v1, v2);
|
|
-}
|
|
-
|
|
-
|
|
+#endif
|
|
|
|
-/*sw
|
|
- * extract rn[lsb, lsb+len-1] -> rd[0, len-1]
|
|
- */
|
|
-static void tcg_out_extract(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len)
|
|
-{
|
|
- //get 000..111..0000
|
|
- tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
|
|
- /* get rn[lsb, lsb+len-1]-->rd[lsb, lsb+len-1] */
|
|
- tcg_out_insn_bitReg(s, OPC_AND, rd, rn, TCG_REG_TMP);
|
|
+ /* TCG_AREG0=tcg_target_call_iarg_regs[0], on sw, we mov $16 to $9 */
|
|
+ tcg_out_mov(s, TCG_TYPE_I64, TCG_AREG0, tcg_target_call_iarg_regs[0]);
|
|
+ tcg_out_insn_jump(s, OPC_JMP, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], noPara);
|
|
|
|
- /* rd[lsb, lsb+len-1] --> rd[0, len-1] */
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, rd, rd, lsb);
|
|
-}
|
|
+ /*
|
|
+ * Return path for goto_ptr. Set return value to 0, a-la exit_tb,
|
|
+ * and fall through to the rest of the epilogue.
|
|
+ */
|
|
+ tcg_code_gen_epilogue = tcg_splitwx_to_rx(s->code_ptr);
|
|
+ tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_X0, 0);
|
|
|
|
+ /* TB epilogue */
|
|
+ tb_ret_addr = tcg_splitwx_to_rx(s->code_ptr);
|
|
|
|
-/*sw
|
|
- * depos: rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0]
|
|
- * len = msb -lsb + 1
|
|
- */
|
|
-static void tcg_out_dep(TCGContext *s, TCGReg rd, TCGReg rn, int lsb, int len)
|
|
-{
|
|
+ /* Remove TCG locals stack space. */
|
|
+ /* addl $sp,FRAME_SIZE-PUSH_SIZE,$sp */
|
|
+ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE - PUSH_SIZE);
|
|
|
|
- //get 000..111..0000
|
|
- tcg_out_insn_bitReg(s, OPC_ORNOT, TCG_REG_TMP, TCG_REG_ZERO, TCG_REG_ZERO);
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP, TCG_REG_TMP, 64 - len);
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP, TCG_REG_TMP, lsb);
|
|
+ /* Restore registers x9..x14. */
|
|
+ for (r = TCG_REG_X9; r <= TCG_REG_X14; r += 1) {
|
|
+ int ofs = (r - TCG_REG_X9 + 2) * 8;
|
|
+ tcg_out_insn_ldst(s, OPC_LDL, r, TCG_REG_SP, ofs);
|
|
+ }
|
|
|
|
- /* TCG_REG_TMP2 = rn[msb,lsb] */
|
|
- tcg_out_insn_bitImm(s, OPC_SLL_I, TCG_REG_TMP2, rn, 64-len);
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, TCG_REG_TMP2, 64-len-lsb);
|
|
+ /* Pop (FP, LR) */
|
|
+ /* ldl $fp,0($sp) */
|
|
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_FP, TCG_REG_SP, 0);
|
|
+ /* ldl $26,8($sp) */
|
|
+ tcg_out_insn_ldst(s, OPC_LDL, TCG_REG_RA, TCG_REG_SP, 8);
|
|
|
|
- /* clear rd[msb,lsb] */
|
|
- tcg_out_insn_bitReg(s, OPC_BIC, rd, rd, TCG_REG_TMP);
|
|
- /* rd = rd[63:msb+1]:rn[msb,lsb]:rd[lsb-1,0] */
|
|
- tcg_out_insn_bitReg(s, OPC_BIS, rd, rd, TCG_REG_TMP2);
|
|
+ /* restore SP to previous frame. */
|
|
+ /* addl $sp,PUSH_SIZE,$sp */
|
|
+ tcg_out_simple(s, OPC_ADDL_I, OPC_ADDL, TCG_REG_SP, TCG_REG_SP, PUSH_SIZE);
|
|
+
|
|
+ tcg_out_insn_jump(s, OPC_RET, TCG_REG_ZERO, TCG_REG_RA, noPara);
|
|
}
|
|
|
|
-/*sw
|
|
- * get val_s64(rn) * val_s64(rm) -> res_128
|
|
- * res[127:64] -> rd
|
|
- * warn:maybe rd=rn or rm
|
|
- */
|
|
-static void tcg_out_mulsh64(TCGContext *s, TCGReg rd, TCGReg rn, TCGReg rm)
|
|
+static void tcg_out_nop_fill(tcg_insn_unit *p, int count)
|
|
{
|
|
- tcg_out_insn_simpleReg(s, OPC_UMULH, TCG_REG_TMP, rn, rm);
|
|
-
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rn, 63);
|
|
- tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rm);
|
|
- tcg_out_insn_simpleReg(s, OPC_SUBL, TCG_REG_TMP, TCG_REG_TMP, TCG_REG_TMP2);
|
|
-
|
|
- tcg_out_insn_bitImm(s, OPC_SRL_I, TCG_REG_TMP2, rm, 63);
|
|
- tcg_out_insn_complexReg(s, OPC_SELEQ, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_ZERO, rn);
|
|
- tcg_out_insn_simpleReg(s, OPC_SUBL, rd, TCG_REG_TMP, TCG_REG_TMP2);
|
|
+ int i;
|
|
+ for (i = 0; i < count; ++i) {
|
|
+ p[i] = OPC_NOP;
|
|
+ }
|
|
}
|
|
|
|
typedef struct {
|
|
@@ -2071,9 +2424,11 @@ typedef struct {
|
|
uint8_t fde_reg_ofs[8 * 2];
|
|
} DebugFrame;
|
|
|
|
+/*
|
|
+ * GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
|
|
+ * which is good because they're really quite complicated for SW64.
|
|
+ */
|
|
#define ELF_HOST_MACHINE EM_SW_64
|
|
-/* GDB doesn't appear to require proper setting of ELF_HOST_FLAGS,
|
|
- which is good because they're really quite complicated for SW_64. */
|
|
|
|
static const DebugFrame debug_frame = {
|
|
.h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */
|
|
diff --git a/tcg/sw64/tcg-target.h b/tcg/sw64/tcg-target.h
|
|
index 3093e4fece..91681a0c75 100755
|
|
--- a/tcg/sw64/tcg-target.h
|
|
+++ b/tcg/sw64/tcg-target.h
|
|
@@ -119,5 +119,8 @@ typedef enum {
|
|
#define TCG_TARGET_HAS_MEMORY_BSWAP 0
|
|
/* optional instructions */
|
|
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t, uintptr_t);
|
|
+#ifdef CONFIG_SOFTMMU
|
|
+#define TCG_TARGET_NEED_LDST_LABELS
|
|
+#endif
|
|
#define TCG_TARGET_NEED_POOL_LABELS
|
|
#endif /* SW_64_TCG_TARGET_H */
|
|
--
|
|
2.41.0.windows.1
|
|
|