sim-card
/
qemu
Archived
10
0
Fork 0

HPPA (PA-RISC) host support

(Stuart Brady)


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4199 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
aurel32 2008-04-12 20:14:54 +00:00
parent 339dea2774
commit f54b3f920f
17 changed files with 4505 additions and 13 deletions

View File

@ -128,6 +128,11 @@ ifeq ($(ARCH),alpha)
CFLAGS+=-msmall-data
endif
ifeq ($(ARCH),hppa)
OP_CFLAGS=-O1 -fno-delayed-branch
BASE_LDFLAGS+=-Wl,-T,$(SRC_PATH)/$(ARCH).ld
endif
ifeq ($(ARCH),ia64)
CFLAGS+=-mno-sdata
OP_CFLAGS+=-mno-sdata
@ -267,6 +272,9 @@ endif
ifeq ($(findstring sh4, $(TARGET_ARCH) $(ARCH)),sh4)
LIBOBJS+=sh4-dis.o
endif
ifeq ($(findstring hppa, $(TARGET_BASE_ARCH) $(ARCH)),hppa)
LIBOBJS+=hppa-dis.o
endif
ifeq ($(findstring s390, $(TARGET_ARCH) $(ARCH)),s390)
LIBOBJS+=s390-dis.o
endif

7
configure vendored
View File

@ -50,6 +50,9 @@ case "$cpu" in
cris)
cpu="cris"
;;
parisc|parisc64)
cpu="hppa"
;;
ia64)
cpu="ia64"
;;
@ -576,6 +579,7 @@ else
# if cross compiling, cannot launch a program, so make a static guess
if test "$cpu" = "armv4b" \
-o "$cpu" = "hppa" \
-o "$cpu" = "m68k" \
-o "$cpu" = "mips" \
-o "$cpu" = "mips64" \
@ -865,6 +869,9 @@ elif test "$cpu" = "armv4l" ; then
elif test "$cpu" = "cris" ; then
echo "ARCH=cris" >> $config_mak
echo "#define HOST_CRIS 1" >> $config_h
elif test "$cpu" = "hppa" ; then
echo "ARCH=hppa" >> $config_mak
echo "#define HOST_HPPA 1" >> $config_h
elif test "$cpu" = "ia64" ; then
echo "ARCH=ia64" >> $config_mak
echo "#define HOST_IA64 1" >> $config_h

View File

@ -20,7 +20,7 @@
#ifndef CPU_ALL_H
#define CPU_ALL_H
#if defined(__arm__) || defined(__sparc__) || defined(__mips__)
#if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
#define WORDS_ALIGNED
#endif
@ -952,6 +952,15 @@ static inline int64_t cpu_get_real_ticks(void)
return val;
}
#elif defined(__hppa__)
static inline int64_t cpu_get_real_ticks(void)
{
int val;
asm volatile ("mfctl %%cr16, %0" : "=r"(val));
return val;
}
#elif defined(__ia64)
static inline int64_t cpu_get_real_ticks(void)

View File

@ -657,6 +657,17 @@ int cpu_exec(CPUState *env1)
"o0", "o1", "o2", "o3", "o4", "o5",
"l0", "l1", "l2", "l3", "l4", "l5",
"l6", "l7");
#elif defined(__hppa__)
asm volatile ("ble 0(%%sr4,%1)\n"
"copy %%r31,%%r18\n"
"copy %%r28,%0\n"
: "=r" (T0)
: "r" (gen_func)
: "r1", "r2", "r3", "r4", "r5", "r6", "r7",
"r8", "r9", "r10", "r11", "r12", "r13",
"r18", "r19", "r20", "r21", "r22", "r23",
"r24", "r25", "r26", "r27", "r28", "r29",
"r30", "r31");
#elif defined(__arm__)
asm volatile ("mov pc, %0\n\t"
".global exec_loop\n\t"
@ -1488,6 +1499,24 @@ int cpu_signal_handler(int host_signum, void *pinfo,
is_write, &uc->uc_sigmask, puc);
}
#elif defined(__hppa__)
int cpu_signal_handler(int host_signum, void *pinfo,
void *puc)
{
struct siginfo *info = pinfo;
struct ucontext *uc = puc;
unsigned long pc;
int is_write;
pc = uc->uc_mcontext.sc_iaoq[0];
/* FIXME: compute is_write */
is_write = 0;
return handle_cpu_signal(pc, (unsigned long)info->si_addr,
is_write,
&uc->uc_sigmask, puc);
}
#else
#error host CPU specific signal handler needed

View File

@ -157,6 +157,10 @@ enum bfd_architecture
#define bfd_mach_ppc_7400 7400
bfd_arch_rs6000, /* IBM RS/6000 */
bfd_arch_hppa, /* HP PA RISC */
#define bfd_mach_hppa10 10
#define bfd_mach_hppa11 11
#define bfd_mach_hppa20 20
#define bfd_mach_hppa20w 25
bfd_arch_d10v, /* Mitsubishi D10V */
bfd_arch_z8k, /* Zilog Z8000 */
#define bfd_mach_z8001 1

View File

@ -279,6 +279,8 @@ void disas(FILE *out, void *code, unsigned long size)
print_insn = print_insn_m68k;
#elif defined(__s390__)
print_insn = print_insn_s390;
#elif defined(__hppa__)
print_insn = print_insn_hppa;
#else
fprintf(out, "0x%lx: Asm output not supported on this arch\n",
(long) code);

View File

@ -124,6 +124,11 @@ extern int printf(const char *, ...);
#define AREG1 "r4"
#define AREG2 "r5"
#define AREG3 "r6"
#elif defined(__hppa__)
#define AREG0 "r17"
#define AREG1 "r14"
#define AREG2 "r15"
#define AREG3 "r16"
#elif defined(__mips__)
#define AREG0 "fp"
#define AREG1 "s0"
@ -279,6 +284,8 @@ extern int __op_jmp0, __op_jmp1, __op_jmp2, __op_jmp3;
#elif defined(__mips__)
#define EXIT_TB() asm volatile ("jr $ra")
#define GOTO_LABEL_PARAM(n) asm volatile (".set noat; la $1, " ASM_NAME(__op_gen_label) #n "; jr $1; .set at")
#elif defined(__hppa__)
#define GOTO_LABEL_PARAM(n) asm volatile ("b,n " ASM_NAME(__op_gen_label) #n)
#else
#error unsupported CPU
#endif

129
dyngen.c
View File

@ -117,6 +117,13 @@
#define elf_check_arch(x) ((x) == EM_68K)
#define ELF_USES_RELOCA
#elif defined(HOST_HPPA)
#define ELF_CLASS ELFCLASS32
#define ELF_ARCH EM_PARISC
#define elf_check_arch(x) ((x) == EM_PARISC)
#define ELF_USES_RELOCA
#elif defined(HOST_MIPS)
#define ELF_CLASS ELFCLASS32
@ -1223,7 +1230,7 @@ int get_reloc_expr(char *name, int name_size, const char *sym_name)
snprintf(name, name_size, "param%s", p);
return 1;
} else {
#ifdef HOST_SPARC
#if defined(HOST_SPARC) || defined(HOST_HPPA)
if (sym_name[0] == '.')
snprintf(name, name_size,
"(long)(&__dot_%s)",
@ -1661,6 +1668,43 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
error("rts expected at the end of %s", name);
copy_size = p - p_start;
}
#elif defined(HOST_HPPA)
{
uint8_t *p;
p = p_start;
while (p < p_end) {
uint32_t insn = get32((uint32_t *)p);
if (insn == 0x6bc23fd9 || /* stw rp,-14(sp) */
insn == 0x08030241 || /* copy r3,r1 */
insn == 0x081e0243 || /* copy sp,r3 */
(insn & 0xffffc000) == 0x37de0000 || /* ldo x(sp),sp */
(insn & 0xffffc000) == 0x6fc10000) /* stwm r1,x(sp) */
p += 4;
else
break;
}
start_offset += p - p_start;
p_start = p;
p = p_end - 4;
while (p > p_start) {
uint32_t insn = get32((uint32_t *)p);
if ((insn & 0xffffc000) == 0x347e0000 || /* ldo x(r3),sp */
(insn & 0xffe0c000) == 0x4fc00000 || /* ldwm x(sp),rx */
(insn & 0xffffc000) == 0x37de0000 || /* ldo x(sp),sp */
insn == 0x48623fd9 || /* ldw -14(r3),rp */
insn == 0xe840c000 || /* bv r0(rp) */
insn == 0xe840c002) /* bv,n r0(rp) */
p -= 4;
else
break;
}
p += 4;
if (p <= p_start)
error("empty code for %s", name);
copy_size = p - p_start;
}
#elif defined(HOST_MIPS) || defined(HOST_MIPS64)
{
#define INSN_RETURN 0x03e00008
@ -1746,7 +1790,7 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
!strstart(sym_name, "__op_param", NULL) &&
!strstart(sym_name, "__op_jmp", NULL) &&
!strstart(sym_name, "__op_gen_label", NULL)) {
#if defined(HOST_SPARC)
#if defined(HOST_SPARC) || defined(HOST_HPPA)
if (sym_name[0] == '.') {
fprintf(outfile,
"extern char __dot_%s __asm__(\"%s\");\n",
@ -1774,8 +1818,13 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
}
#ifdef __hppa__
fprintf(outfile, " memcpy(gen_code_ptr, (void *)((char *)__canonicalize_funcptr_for_compare(%s)+%d), %d);\n",
name, (int)(start_offset - offset), copy_size);
#else
fprintf(outfile, " memcpy(gen_code_ptr, (void *)((char *)&%s+%d), %d);\n",
name, (int)(start_offset - offset), copy_size);
#endif
/* emit code offset information */
{
@ -2581,6 +2630,82 @@ void gen_code(const char *name, host_ulong offset, host_ulong size,
}
}
}
#elif defined(HOST_HPPA)
{
char relname[256];
int type, is_label;
int addend;
int reloc_offset;
for (i = 0, rel = relocs; i < nb_relocs; i++, rel++) {
if (rel->r_offset >= start_offset &&
rel->r_offset < start_offset + copy_size) {
sym_name = get_rel_sym_name(rel);
sym_name = strtab + symtab[ELF32_R_SYM(rel->r_info)].st_name;
is_label = get_reloc_expr(relname, sizeof(relname), sym_name);
type = ELF32_R_TYPE(rel->r_info);
addend = rel->r_addend;
reloc_offset = rel->r_offset - start_offset;
if (is_label) {
switch (type) {
case R_PARISC_PCREL17F:
fprintf(outfile,
" tcg_out_reloc(s, gen_code_ptr + %d, %d, %s, %d);\n",
reloc_offset, type, relname, addend);
break;
default:
error("unsupported hppa label relocation (%d)", type);
}
} else {
switch (type) {
case R_PARISC_DIR21L:
fprintf(outfile,
" hppa_patch21l((uint32_t *)(gen_code_ptr + %d), %s, %d);\n",
reloc_offset, relname, addend);
break;
case R_PARISC_DIR14R:
fprintf(outfile,
" hppa_patch14r((uint32_t *)(gen_code_ptr + %d), %s, %d);\n",
reloc_offset, relname, addend);
break;
case R_PARISC_PCREL17F:
if (strstart(sym_name, "__op_gen_label", NULL)) {
fprintf(outfile,
" hppa_patch17f((uint32_t *)(gen_code_ptr + %d), %s, %d);\n",
reloc_offset, relname, addend);
} else {
fprintf(outfile,
" HPPA_RECORD_BRANCH(hppa_stubs, (uint32_t *)(gen_code_ptr + %d), %s);\n",
reloc_offset, relname);
}
break;
case R_PARISC_DPREL21L:
if (strstart(sym_name, "__op_param", &p))
fprintf(outfile,
" hppa_load_imm21l((uint32_t *)(gen_code_ptr + %d), param%s, %d);\n",
reloc_offset, p, addend);
else
fprintf(outfile,
" hppa_patch21l_dprel((uint32_t *)(gen_code_ptr + %d), %s, %d);\n",
reloc_offset, relname, addend);
break;
case R_PARISC_DPREL14R:
if (strstart(sym_name, "__op_param", &p))
fprintf(outfile,
" hppa_load_imm14r((uint32_t *)(gen_code_ptr + %d), param%s, %d);\n",
reloc_offset, p, addend);
else
fprintf(outfile,
" hppa_patch14r_dprel((uint32_t *)(gen_code_ptr + %d), %s, %d);\n",
reloc_offset, relname, addend);
break;
default:
error("unsupported hppa relocation (%d)", type);
}
}
}
}
}
#elif defined(HOST_MIPS) || defined(HOST_MIPS64)
{
for (i = 0, rel = relocs; i < nb_relocs; i++, rel++) {

2834
hppa-dis.c Normal file

File diff suppressed because it is too large Load Diff

214
hppa.ld Normal file
View File

@ -0,0 +1,214 @@
/* Default linker script, for normal executables */
OUTPUT_FORMAT("elf32-hppa-linux", "elf32-hppa-linux",
"elf32-hppa-linux")
OUTPUT_ARCH(hppa:hppa1.1)
ENTRY(_start)
SEARCH_DIR("/usr/hppa-linux-gnu/lib"); SEARCH_DIR("/usr/local/lib"); SEARCH_DIR("/lib"); SEARCH_DIR("/usr/lib");
SECTIONS
{
/* Read-only sections, merged into text segment: */
PROVIDE (__executable_start = 0x60000000); . = 0x60000000 + SIZEOF_HEADERS;
.interp : { *(.interp) }
.hash : { *(.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.rel.init : { *(.rel.init) }
.rela.init : { *(.rela.init) }
.rel.text : { *(.rel.text .rel.text.* .rel.gnu.linkonce.t.*) }
.rela.text : { *(.rela.text .rela.text.* .rela.gnu.linkonce.t.*) }
.rel.fini : { *(.rel.fini) }
.rela.fini : { *(.rela.fini) }
.rel.rodata : { *(.rel.rodata .rel.rodata.* .rel.gnu.linkonce.r.*) }
.rela.rodata : { *(.rela.rodata .rela.rodata.* .rela.gnu.linkonce.r.*) }
.rel.data.rel.ro : { *(.rel.data.rel.ro* .rel.gnu.linkonce.d.rel.ro.*) }
.rela.data.rel.ro : { *(.rela.data.rel.ro* .rela.gnu.linkonce.d.rel.ro.*) }
.rel.data : { *(.rel.data .rel.data.* .rel.gnu.linkonce.d.*) }
.rela.data : { *(.rela.data .rela.data.* .rela.gnu.linkonce.d.*) }
.rel.tdata : { *(.rel.tdata .rel.tdata.* .rel.gnu.linkonce.td.*) }
.rela.tdata : { *(.rela.tdata .rela.tdata.* .rela.gnu.linkonce.td.*) }
.rel.tbss : { *(.rel.tbss .rel.tbss.* .rel.gnu.linkonce.tb.*) }
.rela.tbss : { *(.rela.tbss .rela.tbss.* .rela.gnu.linkonce.tb.*) }
.rel.ctors : { *(.rel.ctors) }
.rela.ctors : { *(.rela.ctors) }
.rel.dtors : { *(.rel.dtors) }
.rela.dtors : { *(.rela.dtors) }
.rel.got : { *(.rel.got) }
.rela.got : { *(.rela.got) }
.rel.sdata : { *(.rel.sdata .rel.sdata.* .rel.gnu.linkonce.s.*) }
.rela.sdata : { *(.rela.sdata .rela.sdata.* .rela.gnu.linkonce.s.*) }
.rel.sbss : { *(.rel.sbss .rel.sbss.* .rel.gnu.linkonce.sb.*) }
.rela.sbss : { *(.rela.sbss .rela.sbss.* .rela.gnu.linkonce.sb.*) }
.rel.sdata2 : { *(.rel.sdata2 .rel.sdata2.* .rel.gnu.linkonce.s2.*) }
.rela.sdata2 : { *(.rela.sdata2 .rela.sdata2.* .rela.gnu.linkonce.s2.*) }
.rel.sbss2 : { *(.rel.sbss2 .rel.sbss2.* .rel.gnu.linkonce.sb2.*) }
.rela.sbss2 : { *(.rela.sbss2 .rela.sbss2.* .rela.gnu.linkonce.sb2.*) }
.rel.bss : { *(.rel.bss .rel.bss.* .rel.gnu.linkonce.b.*) }
.rela.bss : { *(.rela.bss .rela.bss.* .rela.gnu.linkonce.b.*) }
.rel.plt : { *(.rel.plt) }
.rela.plt : { *(.rela.plt) }
.init :
{
KEEP (*(.init))
} =0x08000240
.text :
{
*(.text .stub .text.* .gnu.linkonce.t.*)
KEEP (*(.text.*personality*))
/* .gnu.warning sections are handled specially by elf32.em. */
*(.gnu.warning)
} =0x08000240
.fini :
{
KEEP (*(.fini))
} =0x08000240
PROVIDE (__etext = .);
PROVIDE (_etext = .);
PROVIDE (etext = .);
.rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
.rodata1 : { *(.rodata1) }
.sdata2 :
{
*(.sdata2 .sdata2.* .gnu.linkonce.s2.*)
}
.sbss2 : { *(.sbss2 .sbss2.* .gnu.linkonce.sb2.*) }
.PARISC.unwind : { *(.PARISC.unwind) }
.eh_frame_hdr : { *(.eh_frame_hdr) }
.eh_frame : ONLY_IF_RO { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RO { *(.gcc_except_table .gcc_except_table.*) }
/* Adjust the address for the data segment. We want to adjust up to
the same address within the page on the next page up. */
. = ALIGN(0x10000) + (. & (0x10000 - 1));
/* Exception handling */
.eh_frame : ONLY_IF_RW { KEEP (*(.eh_frame)) }
.gcc_except_table : ONLY_IF_RW { *(.gcc_except_table .gcc_except_table.*) }
/* Thread Local Storage sections */
.tdata : { *(.tdata .tdata.* .gnu.linkonce.td.*) }
.tbss : { *(.tbss .tbss.* .gnu.linkonce.tb.*) *(.tcommon) }
.preinit_array :
{
PROVIDE_HIDDEN (__preinit_array_start = .);
KEEP (*(.preinit_array))
PROVIDE_HIDDEN (__preinit_array_end = .);
}
.init_array :
{
PROVIDE_HIDDEN (__init_array_start = .);
KEEP (*(SORT(.init_array.*)))
KEEP (*(.init_array))
PROVIDE_HIDDEN (__init_array_end = .);
}
.fini_array :
{
PROVIDE_HIDDEN (__fini_array_start = .);
KEEP (*(.fini_array))
KEEP (*(SORT(.fini_array.*)))
PROVIDE_HIDDEN (__fini_array_end = .);
}
.ctors :
{
/* gcc uses crtbegin.o to find the start of
the constructors, so we make sure it is
first. Because this is a wildcard, it
doesn't matter if the user does not
actually link against crtbegin.o; the
linker won't look for a file to match a
wildcard. The wildcard also means that it
doesn't matter which directory crtbegin.o
is in. */
KEEP (*crtbegin*.o(.ctors))
/* We don't want to include the .ctor section from
the crtend.o file until after the sorted ctors.
The .ctor section from the crtend file contains the
end of ctors marker and it must be last */
KEEP (*(EXCLUDE_FILE (*crtend*.o ) .ctors))
KEEP (*(SORT(.ctors.*)))
KEEP (*(.ctors))
}
.dtors :
{
KEEP (*crtbegin*.o(.dtors))
KEEP (*(EXCLUDE_FILE (*crtend*.o ) .dtors))
KEEP (*(SORT(.dtors.*)))
KEEP (*(.dtors))
}
.jcr : { KEEP (*(.jcr)) }
.data.rel.ro : { *(.data.rel.ro.local* .gnu.linkonce.d.rel.ro.local.*) *(.data.rel.ro* .gnu.linkonce.d.rel.ro.*) }
.dynamic : { *(.dynamic) }
.data :
{
PROVIDE ($global$ = .);
*(.data .data.* .gnu.linkonce.d.*)
KEEP (*(.gnu.linkonce.d.*personality*))
SORT(CONSTRUCTORS)
}
.data1 : { *(.data1) }
.plt : { *(.plt) }
.got : { *(.got.plt) *(.got) }
/* We want the small data sections together, so single-instruction offsets
can access them all, and initialized data all before uninitialized, so
we can shorten the on-disk segment size. */
.sdata :
{
*(.sdata .sdata.* .gnu.linkonce.s.*)
}
_edata = .; PROVIDE (edata = .);
__bss_start = .;
.sbss :
{
*(.dynsbss)
*(.sbss .sbss.* .gnu.linkonce.sb.*)
*(.scommon)
}
.bss :
{
*(.dynbss)
*(.bss .bss.* .gnu.linkonce.b.*)
*(COMMON)
/* Align here to ensure that the .bss section occupies space up to
_end. Align after .bss to ensure correct alignment even if the
.bss section disappears because there are no input sections.
FIXME: Why do we need it? When there is no .bss section, we don't
pad the .data section. */
. = ALIGN(. != 0 ? 32 / 8 : 1);
}
. = ALIGN(32 / 8);
. = ALIGN(32 / 8);
_end = .; PROVIDE (end = .);
/* Stabs debugging sections. */
.stab 0 : { *(.stab) }
.stabstr 0 : { *(.stabstr) }
.stab.excl 0 : { *(.stab.excl) }
.stab.exclstr 0 : { *(.stab.exclstr) }
.stab.index 0 : { *(.stab.index) }
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
/* DWARF debug sections.
Symbols in the DWARF debugging sections are relative to the beginning
of the section so we begin them at 0. */
/* DWARF 1 */
.debug 0 : { *(.debug) }
.line 0 : { *(.line) }
/* GNU DWARF 1 extensions */
.debug_srcinfo 0 : { *(.debug_srcinfo) }
.debug_sfnames 0 : { *(.debug_sfnames) }
/* DWARF 1.1 and DWARF 2 */
.debug_aranges 0 : { *(.debug_aranges) }
.debug_pubnames 0 : { *(.debug_pubnames) }
/* DWARF 2 */
.debug_info 0 : { *(.debug_info .gnu.linkonce.wi.*) }
.debug_abbrev 0 : { *(.debug_abbrev) }
.debug_line 0 : { *(.debug_line) }
.debug_frame 0 : { *(.debug_frame) }
.debug_str 0 : { *(.debug_str) }
.debug_loc 0 : { *(.debug_loc) }
.debug_macinfo 0 : { *(.debug_macinfo) }
/* SGI/MIPS DWARF 2 extensions */
.debug_weaknames 0 : { *(.debug_weaknames) }
.debug_funcnames 0 : { *(.debug_funcnames) }
.debug_typenames 0 : { *(.debug_typenames) }
.debug_varnames 0 : { *(.debug_varnames) }
/DISCARD/ : { *(.note.GNU-stack) }
}

980
tcg/hppa/tcg-target.c Normal file
View File

@ -0,0 +1,980 @@
/*
* Tiny Code Generator for QEMU
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
"%r0",
"%r1",
"%rp",
"%r3",
"%r4",
"%r5",
"%r6",
"%r7",
"%r8",
"%r9",
"%r10",
"%r11",
"%r12",
"%r13",
"%r14",
"%r15",
"%r16",
"%r17",
"%r18",
"%r19",
"%r20",
"%r21",
"%r22",
"%r23",
"%r24",
"%r25",
"%r26",
"%dp",
"%ret0",
"%ret1",
"%sp",
"%r31",
};
static const int tcg_target_reg_alloc_order[] = {
TCG_REG_R4,
TCG_REG_R5,
TCG_REG_R6,
TCG_REG_R7,
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
TCG_REG_R11,
TCG_REG_R12,
TCG_REG_R13,
TCG_REG_R17,
TCG_REG_R14,
TCG_REG_R15,
TCG_REG_R16,
};
static const int tcg_target_call_iarg_regs[4] = {
TCG_REG_R26,
TCG_REG_R25,
TCG_REG_R24,
TCG_REG_R23,
};
static const int tcg_target_call_oarg_regs[2] = {
TCG_REG_RET0,
TCG_REG_RET1,
};
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value, tcg_target_long addend)
{
switch (type) {
case R_PARISC_PCREL17F:
hppa_patch17f((uint32_t *)code_ptr, value, addend);
break;
default:
tcg_abort();
}
}
/* maximum number of register used for input function arguments */
static inline int tcg_target_get_call_iarg_regs_count(int flags)
{
return 4;
}
/* parse target specific constraints */
int target_parse_constraint(TCGArgConstraint *ct, const char **pct_str)
{
const char *ct_str;
ct_str = *pct_str;
switch (ct_str[0]) {
case 'r':
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
break;
case 'L': /* qemu_ld/st constraint */
ct->ct |= TCG_CT_REG;
tcg_regset_set32(ct->u.regs, 0, 0xffffffff);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R26);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R25);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R24);
tcg_regset_reset_reg(ct->u.regs, TCG_REG_R23);
break;
default:
return -1;
}
ct_str++;
*pct_str = ct_str;
return 0;
}
/* test if a constant matches the constraint */
static inline int tcg_target_const_match(tcg_target_long val,
const TCGArgConstraint *arg_ct)
{
int ct;
ct = arg_ct->ct;
/* TODO */
return 0;
}
#define INSN_OP(x) ((x) << 26)
#define INSN_EXT3BR(x) ((x) << 13)
#define INSN_EXT3SH(x) ((x) << 10)
#define INSN_EXT4(x) ((x) << 6)
#define INSN_EXT5(x) (x)
#define INSN_EXT6(x) ((x) << 6)
#define INSN_EXT7(x) ((x) << 6)
#define INSN_EXT8A(x) ((x) << 6)
#define INSN_EXT8B(x) ((x) << 5)
#define INSN_T(x) (x)
#define INSN_R1(x) ((x) << 16)
#define INSN_R2(x) ((x) << 21)
#define INSN_DEP_LEN(x) (32 - (x))
#define INSN_SHDEP_CP(x) ((31 - (x)) << 5)
#define INSN_SHDEP_P(x) ((x) << 5)
#define INSN_COND(x) ((x) << 13)
#define COND_NEVER 0
#define COND_EQUAL 1
#define COND_LT 2
#define COND_LTEQ 3
#define COND_LTU 4
#define COND_LTUEQ 5
#define COND_SV 6
#define COND_OD 7
/* Logical ADD */
#define ARITH_ADD (INSN_OP(0x02) | INSN_EXT6(0x28))
#define ARITH_AND (INSN_OP(0x02) | INSN_EXT6(0x08))
#define ARITH_OR (INSN_OP(0x02) | INSN_EXT6(0x09))
#define ARITH_XOR (INSN_OP(0x02) | INSN_EXT6(0x0a))
#define ARITH_SUB (INSN_OP(0x02) | INSN_EXT6(0x10))
#define SHD (INSN_OP(0x34) | INSN_EXT3SH(2))
#define VSHD (INSN_OP(0x34) | INSN_EXT3SH(0))
#define DEP (INSN_OP(0x35) | INSN_EXT3SH(3))
#define ZDEP (INSN_OP(0x35) | INSN_EXT3SH(2))
#define ZVDEP (INSN_OP(0x35) | INSN_EXT3SH(0))
#define EXTRU (INSN_OP(0x34) | INSN_EXT3SH(6))
#define EXTRS (INSN_OP(0x34) | INSN_EXT3SH(7))
#define VEXTRS (INSN_OP(0x34) | INSN_EXT3SH(5))
#define SUBI (INSN_OP(0x25))
#define MTCTL (INSN_OP(0x00) | INSN_EXT8B(0xc2))
#define BL (INSN_OP(0x3a) | INSN_EXT3BR(0))
#define BLE_SR4 (INSN_OP(0x39) | (1 << 13))
#define BV (INSN_OP(0x3a) | INSN_EXT3BR(6))
#define BV_N (INSN_OP(0x3a) | INSN_EXT3BR(6) | 2)
#define LDIL (INSN_OP(0x08))
#define LDO (INSN_OP(0x0d))
#define LDB (INSN_OP(0x10))
#define LDH (INSN_OP(0x11))
#define LDW (INSN_OP(0x12))
#define LDWM (INSN_OP(0x13))
#define STB (INSN_OP(0x18))
#define STH (INSN_OP(0x19))
#define STW (INSN_OP(0x1a))
#define STWM (INSN_OP(0x1b))
#define COMBT (INSN_OP(0x20))
#define COMBF (INSN_OP(0x22))
static int lowsignext(uint32_t val, int start, int length)
{
return (((val << 1) & ~(~0 << length)) |
((val >> (length - 1)) & 1)) << start;
}
static inline void tcg_out_mov(TCGContext *s, int ret, int arg)
{
/* PA1.1 defines COPY as OR r,0,t */
tcg_out32(s, ARITH_OR | INSN_T(ret) | INSN_R1(arg) | INSN_R2(TCG_REG_R0));
/* PA2.0 defines COPY as LDO 0(r),t
* but hppa-dis.c is unaware of this definition */
/* tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(arg) | reassemble_14(0)); */
}
static inline void tcg_out_movi(TCGContext *s, TCGType type,
int ret, tcg_target_long arg)
{
if (arg == (arg & 0x1fff)) {
tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(TCG_REG_R0) |
reassemble_14(arg));
} else {
tcg_out32(s, LDIL | INSN_R2(ret) |
reassemble_21(lrsel((uint32_t)arg, 0)));
if (arg & 0x7ff)
tcg_out32(s, LDO | INSN_R1(ret) | INSN_R2(ret) |
reassemble_14(rrsel((uint32_t)arg, 0)));
}
}
static inline void tcg_out_ld_raw(TCGContext *s, int ret,
tcg_target_long arg)
{
tcg_out32(s, LDIL | INSN_R2(ret) |
reassemble_21(lrsel((uint32_t)arg, 0)));
tcg_out32(s, LDW | INSN_R1(ret) | INSN_R2(ret) |
reassemble_14(rrsel((uint32_t)arg, 0)));
}
static inline void tcg_out_ld_ptr(TCGContext *s, int ret,
tcg_target_long arg)
{
tcg_out_ld_raw(s, ret, arg);
}
static inline void tcg_out_ldst(TCGContext *s, int ret, int addr, int offset,
int op)
{
if (offset == (offset & 0xfff))
tcg_out32(s, op | INSN_R1(ret) | INSN_R2(addr) |
reassemble_14(offset));
else {
fprintf(stderr, "unimplemented %s with offset %d\n", __func__, offset);
tcg_abort();
}
}
static inline void tcg_out_ld(TCGContext *s, TCGType type, int ret,
int arg1, tcg_target_long arg2)
{
fprintf(stderr, "unimplemented %s\n", __func__);
tcg_abort();
}
static inline void tcg_out_st(TCGContext *s, TCGType type, int ret,
int arg1, tcg_target_long arg2)
{
fprintf(stderr, "unimplemented %s\n", __func__);
tcg_abort();
}
static inline void tcg_out_arith(TCGContext *s, int t, int r1, int r2, int op)
{
tcg_out32(s, op | INSN_T(t) | INSN_R1(r1) | INSN_R2(r2));
}
static inline void tcg_out_arithi(TCGContext *s, int t, int r1,
tcg_target_long val, int op)
{
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, val);
tcg_out_arith(s, t, r1, TCG_REG_R20, op);
}
static inline void tcg_out_addi(TCGContext *s, int reg, tcg_target_long val)
{
tcg_out_arithi(s, reg, reg, val, ARITH_ADD);
}
static inline void tcg_out_nop(TCGContext *s)
{
tcg_out32(s, ARITH_OR | INSN_T(TCG_REG_R0) | INSN_R1(TCG_REG_R0) |
INSN_R2(TCG_REG_R0));
}
static inline void tcg_out_ext8s(TCGContext *s, int ret, int arg) {
tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
}
static inline void tcg_out_ext16s(TCGContext *s, int ret, int arg) {
tcg_out32(s, EXTRS | INSN_R1(ret) | INSN_R2(arg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
}
static inline void tcg_out_bswap16(TCGContext *s, int ret, int arg) {
if(ret != arg)
tcg_out_mov(s, ret, arg);
tcg_out32(s, DEP | INSN_R2(ret) | INSN_R1(ret) |
INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(TCG_REG_R0) |
INSN_R2(ret) | INSN_SHDEP_CP(8));
}
static inline void tcg_out_bswap32(TCGContext *s, int ret, int arg, int temp) {
tcg_out32(s, SHD | INSN_T(temp) | INSN_R1(arg) |
INSN_R2(arg) | INSN_SHDEP_CP(16));
tcg_out32(s, DEP | INSN_R2(temp) | INSN_R1(temp) |
INSN_SHDEP_CP(15) | INSN_DEP_LEN(8));
tcg_out32(s, SHD | INSN_T(ret) | INSN_R1(arg) |
INSN_R2(temp) | INSN_SHDEP_CP(8));
}
static inline void tcg_out_call(TCGContext *s, void *func)
{
uint32_t val = (uint32_t)__canonicalize_funcptr_for_compare(func);
tcg_out32(s, LDIL | INSN_R2(TCG_REG_R20) |
reassemble_21(lrsel(val, 0)));
tcg_out32(s, BLE_SR4 | INSN_R2(TCG_REG_R20) |
reassemble_17(rrsel(val, 0) >> 2));
tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
}
#if defined(CONFIG_SOFTMMU)
extern void __ldb_mmu(void);
extern void __ldw_mmu(void);
extern void __ldl_mmu(void);
extern void __ldq_mmu(void);
extern void __stb_mmu(void);
extern void __stw_mmu(void);
extern void __stl_mmu(void);
extern void __stq_mmu(void);
static void *qemu_ld_helpers[4] = {
__ldb_mmu,
__ldw_mmu,
__ldl_mmu,
__ldq_mmu,
};
static void *qemu_st_helpers[4] = {
__stb_mmu,
__stw_mmu,
__stl_mmu,
__stq_mmu,
};
#endif
static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, int opc)
{
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
#if defined(CONFIG_SOFTMMU)
uint32_t *label1_ptr, *label2_ptr;
#endif
#if TARGET_LONG_BITS == 64
#if defined(CONFIG_SOFTMMU)
uint32_t *label3_ptr;
#endif
int addr_reg2;
#endif
data_reg = *args++;
if (opc == 3)
data_reg2 = *args++;
else
data_reg2 = 0; /* surpress warning */
addr_reg = *args++;
#if TARGET_LONG_BITS == 64
addr_reg2 = *args++;
#endif
mem_index = *args;
s_bits = opc & 3;
r0 = TCG_REG_R26;
r1 = TCG_REG_R25;
#if defined(CONFIG_SOFTMMU)
tcg_out_mov(s, r1, addr_reg);
tcg_out_mov(s, r0, addr_reg);
tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
ARITH_AND);
tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
ARITH_AND);
tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
tcg_out_arithi(s, r1, r1,
offsetof(CPUState, tlb_table[mem_index][0].addr_read),
ARITH_ADD);
tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
#if TARGET_LONG_BITS == 32
/* if equal, jump to label1 */
label1_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
INSN_COND(COND_EQUAL));
tcg_out_mov(s, r0, addr_reg); /* delay slot */
#else
/* if not equal, jump to label3 */
label3_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
INSN_COND(COND_EQUAL));
tcg_out_mov(s, r0, addr_reg); /* delay slot */
tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
/* if equal, jump to label1 */
label1_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
INSN_COND(COND_EQUAL));
tcg_out_nop(s); /* delay slot */
/* label3: */
*label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
#endif
#if TARGET_LONG_BITS == 32
tcg_out_mov(s, TCG_REG_R26, addr_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R25, mem_index);
#else
tcg_out_mov(s, TCG_REG_R26, addr_reg);
tcg_out_mov(s, TCG_REG_R25, addr_reg2);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
#endif
tcg_out_call(s, qemu_ld_helpers[s_bits]);
switch(opc) {
case 0 | 4:
tcg_out_ext8s(s, data_reg, TCG_REG_RET0);
break;
case 1 | 4:
tcg_out_ext16s(s, data_reg, TCG_REG_RET0);
break;
case 0:
case 1:
case 2:
default:
tcg_out_mov(s, data_reg, TCG_REG_RET0);
break;
case 3:
tcg_abort();
tcg_out_mov(s, data_reg, TCG_REG_RET0);
tcg_out_mov(s, data_reg2, TCG_REG_RET1);
break;
}
/* jump to label2 */
label2_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
/* label1: */
*label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
tcg_out_arithi(s, TCG_REG_R20, r1,
offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_read),
ARITH_ADD);
tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
#else
r0 = addr_reg;
#endif
#ifdef TARGET_WORDS_BIGENDIAN
bswap = 0;
#else
bswap = 1;
#endif
switch (opc) {
case 0:
tcg_out_ldst(s, data_reg, r0, 0, LDB);
break;
case 0 | 4:
tcg_out_ldst(s, data_reg, r0, 0, LDB);
tcg_out_ext8s(s, data_reg, data_reg);
break;
case 1:
tcg_out_ldst(s, data_reg, r0, 0, LDH);
if (bswap)
tcg_out_bswap16(s, data_reg, data_reg);
break;
case 1 | 4:
tcg_out_ldst(s, data_reg, r0, 0, LDH);
if (bswap)
tcg_out_bswap16(s, data_reg, data_reg);
tcg_out_ext16s(s, data_reg, data_reg);
break;
case 2:
tcg_out_ldst(s, data_reg, r0, 0, LDW);
if (bswap)
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
break;
case 3:
tcg_abort();
if (!bswap) {
tcg_out_ldst(s, data_reg, r0, 0, LDW);
tcg_out_ldst(s, data_reg2, r0, 4, LDW);
} else {
tcg_out_ldst(s, data_reg, r0, 4, LDW);
tcg_out_bswap32(s, data_reg, data_reg, TCG_REG_R20);
tcg_out_ldst(s, data_reg2, r0, 0, LDW);
tcg_out_bswap32(s, data_reg2, data_reg2, TCG_REG_R20);
}
break;
default:
tcg_abort();
}
#if defined(CONFIG_SOFTMMU)
/* label2: */
*label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
#endif
}
static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, int opc)
{
int addr_reg, data_reg, data_reg2, r0, r1, mem_index, s_bits, bswap;
#if defined(CONFIG_SOFTMMU)
uint32_t *label1_ptr, *label2_ptr;
#endif
#if TARGET_LONG_BITS == 64
#if defined(CONFIG_SOFTMMU)
uint32_t *label3_ptr;
#endif
int addr_reg2;
#endif
data_reg = *args++;
if (opc == 3)
data_reg2 = *args++;
else
data_reg2 = 0; /* surpress warning */
addr_reg = *args++;
#if TARGET_LONG_BITS == 64
addr_reg2 = *args++;
#endif
mem_index = *args;
s_bits = opc;
r0 = TCG_REG_R26;
r1 = TCG_REG_R25;
#if defined(CONFIG_SOFTMMU)
tcg_out_mov(s, r1, addr_reg);
tcg_out_mov(s, r0, addr_reg);
tcg_out32(s, SHD | INSN_T(r1) | INSN_R1(TCG_REG_R0) | INSN_R2(r1) |
INSN_SHDEP_CP(TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS));
tcg_out_arithi(s, r0, r0, TARGET_PAGE_MASK | ((1 << s_bits) - 1),
ARITH_AND);
tcg_out_arithi(s, r1, r1, (CPU_TLB_SIZE - 1) << CPU_TLB_ENTRY_BITS,
ARITH_AND);
tcg_out_arith(s, r1, r1, TCG_AREG0, ARITH_ADD);
tcg_out_arithi(s, r1, r1,
offsetof(CPUState, tlb_table[mem_index][0].addr_write),
ARITH_ADD);
tcg_out_ldst(s, TCG_REG_R20, r1, 0, LDW);
#if TARGET_LONG_BITS == 32
/* if equal, jump to label1 */
label1_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
INSN_COND(COND_EQUAL));
tcg_out_mov(s, r0, addr_reg); /* delay slot */
#else
/* if not equal, jump to label3 */
label3_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBF | INSN_R1(TCG_REG_R20) | INSN_R2(r0) |
INSN_COND(COND_EQUAL));
tcg_out_mov(s, r0, addr_reg); /* delay slot */
tcg_out_ldst(s, TCG_REG_R20, r1, 4, LDW);
/* if equal, jump to label1 */
label1_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, COMBT | INSN_R1(TCG_REG_R20) | INSN_R2(addr_reg2) |
INSN_COND(COND_EQUAL));
tcg_out_nop(s); /* delay slot */
/* label3: */
*label3_ptr |= reassemble_12((uint32_t *)s->code_ptr - label3_ptr - 2);
#endif
tcg_out_mov(s, TCG_REG_R26, addr_reg);
#if TARGET_LONG_BITS == 64
tcg_out_mov(s, TCG_REG_R25, addr_reg2);
if (opc == 3) {
tcg_abort();
tcg_out_mov(s, TCG_REG_R24, data_reg);
tcg_out_mov(s, TCG_REG_R23, data_reg2);
/* TODO: push mem_index */
tcg_abort();
} else {
switch(opc) {
case 0:
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
break;
case 1:
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R24) | INSN_R2(data_reg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
break;
case 2:
tcg_out_mov(s, TCG_REG_R24, data_reg);
break;
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
}
#else
if (opc == 3) {
tcg_abort();
tcg_out_mov(s, TCG_REG_R25, data_reg);
tcg_out_mov(s, TCG_REG_R24, data_reg2);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R23, mem_index);
} else {
switch(opc) {
case 0:
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(8));
break;
case 1:
tcg_out32(s, EXTRU | INSN_R1(TCG_REG_R25) | INSN_R2(data_reg) |
INSN_SHDEP_P(31) | INSN_DEP_LEN(16));
break;
case 2:
tcg_out_mov(s, TCG_REG_R25, data_reg);
break;
}
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_R24, mem_index);
}
#endif
tcg_out_call(s, qemu_st_helpers[s_bits]);
/* jump to label2 */
label2_ptr = (uint32_t *)s->code_ptr;
tcg_out32(s, BL | INSN_R2(TCG_REG_R0) | 2);
/* label1: */
*label1_ptr |= reassemble_12((uint32_t *)s->code_ptr - label1_ptr - 2);
tcg_out_arithi(s, TCG_REG_R20, r1,
offsetof(CPUTLBEntry, addend) - offsetof(CPUTLBEntry, addr_write),
ARITH_ADD);
tcg_out_ldst(s, TCG_REG_R20, TCG_REG_R20, 0, LDW);
tcg_out_arith(s, r0, r0, TCG_REG_R20, ARITH_ADD);
#else
r0 = addr_reg;
#endif
#ifdef TARGET_WORDS_BIGENDIAN
bswap = 0;
#else
bswap = 1;
#endif
switch (opc) {
case 0:
tcg_out_ldst(s, data_reg, r0, 0, STB);
break;
case 1:
if (bswap) {
tcg_out_bswap16(s, TCG_REG_R20, data_reg);
data_reg = TCG_REG_R20;
}
tcg_out_ldst(s, data_reg, r0, 0, STH);
break;
case 2:
if (bswap) {
tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
data_reg = TCG_REG_R20;
}
tcg_out_ldst(s, data_reg, r0, 0, STW);
break;
case 3:
tcg_abort();
if (!bswap) {
tcg_out_ldst(s, data_reg, r0, 0, STW);
tcg_out_ldst(s, data_reg2, r0, 4, STW);
} else {
tcg_out_bswap32(s, TCG_REG_R20, data_reg, TCG_REG_R20);
tcg_out_ldst(s, TCG_REG_R20, r0, 4, STW);
tcg_out_bswap32(s, TCG_REG_R20, data_reg2, TCG_REG_R20);
tcg_out_ldst(s, TCG_REG_R20, r0, 0, STW);
}
break;
default:
tcg_abort();
}
#if defined(CONFIG_SOFTMMU)
/* label2: */
*label2_ptr |= reassemble_17((uint32_t *)s->code_ptr - label2_ptr - 2);
#endif
}
static inline void tcg_out_op(TCGContext *s, int opc, const TCGArg *args,
const int *const_args)
{
int c;
switch (opc) {
case INDEX_op_exit_tb:
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RET0, args[0]);
tcg_out32(s, BV_N | INSN_R2(TCG_REG_R18));
break;
case INDEX_op_goto_tb:
if (s->tb_jmp_offset) {
/* direct jump method */
fprintf(stderr, "goto_tb direct\n");
tcg_abort();
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_R20, args[0]);
tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
s->tb_jmp_offset[args[0]] = s->code_ptr - s->code_buf;
} else {
/* indirect jump method */
tcg_out_ld_ptr(s, TCG_REG_R20,
(tcg_target_long)(s->tb_next + args[0]));
tcg_out32(s, BV_N | INSN_R2(TCG_REG_R20));
}
s->tb_next_offset[args[0]] = s->code_ptr - s->code_buf;
break;
case INDEX_op_call:
tcg_out32(s, BLE_SR4 | INSN_R2(args[0]));
tcg_out_mov(s, TCG_REG_RP, TCG_REG_R31);
break;
case INDEX_op_jmp:
fprintf(stderr, "unimplemented jmp\n");
tcg_abort();
break;
case INDEX_op_br:
fprintf(stderr, "unimplemented br\n");
tcg_abort();
break;
case INDEX_op_movi_i32:
tcg_out_movi(s, TCG_TYPE_I32, args[0], (uint32_t)args[1]);
break;
case INDEX_op_ld8u_i32:
tcg_out_ldst(s, args[0], args[1], args[2], LDB);
break;
case INDEX_op_ld8s_i32:
tcg_out_ldst(s, args[0], args[1], args[2], LDB);
tcg_out_ext8s(s, args[0], args[0]);
break;
case INDEX_op_ld16u_i32:
tcg_out_ldst(s, args[0], args[1], args[2], LDH);
break;
case INDEX_op_ld16s_i32:
tcg_out_ldst(s, args[0], args[1], args[2], LDH);
tcg_out_ext16s(s, args[0], args[0]);
break;
case INDEX_op_ld_i32:
tcg_out_ldst(s, args[0], args[1], args[2], LDW);
break;
case INDEX_op_st8_i32:
tcg_out_ldst(s, args[0], args[1], args[2], STB);
break;
case INDEX_op_st16_i32:
tcg_out_ldst(s, args[0], args[1], args[2], STH);
break;
case INDEX_op_st_i32:
tcg_out_ldst(s, args[0], args[1], args[2], STW);
break;
case INDEX_op_sub_i32:
c = ARITH_SUB;
goto gen_arith;
case INDEX_op_and_i32:
c = ARITH_AND;
goto gen_arith;
case INDEX_op_or_i32:
c = ARITH_OR;
goto gen_arith;
case INDEX_op_xor_i32:
c = ARITH_XOR;
goto gen_arith;
case INDEX_op_add_i32:
c = ARITH_ADD;
goto gen_arith;
case INDEX_op_shl_i32:
tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
lowsignext(0x1f, 0, 11));
tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
tcg_out32(s, ZVDEP | INSN_R2(args[0]) | INSN_R1(args[1]) |
INSN_DEP_LEN(32));
break;
case INDEX_op_shr_i32:
tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(args[2]));
tcg_out32(s, VSHD | INSN_T(args[0]) | INSN_R1(TCG_REG_R0) |
INSN_R2(args[1]));
break;
case INDEX_op_sar_i32:
tcg_out32(s, SUBI | INSN_R1(TCG_REG_R20) | INSN_R2(args[2]) |
lowsignext(0x1f, 0, 11));
tcg_out32(s, MTCTL | INSN_R2(11) | INSN_R1(TCG_REG_R20));
tcg_out32(s, VEXTRS | INSN_R1(args[0]) | INSN_R2(args[1]) |
INSN_DEP_LEN(32));
break;
case INDEX_op_mul_i32:
fprintf(stderr, "unimplemented mul\n");
tcg_abort();
break;
case INDEX_op_mulu2_i32:
fprintf(stderr, "unimplemented mulu2\n");
tcg_abort();
break;
case INDEX_op_div2_i32:
fprintf(stderr, "unimplemented div2\n");
tcg_abort();
break;
case INDEX_op_divu2_i32:
fprintf(stderr, "unimplemented divu2\n");
tcg_abort();
break;
case INDEX_op_brcond_i32:
fprintf(stderr, "unimplemented brcond\n");
tcg_abort();
break;
case INDEX_op_qemu_ld8u:
tcg_out_qemu_ld(s, args, 0);
break;
case INDEX_op_qemu_ld8s:
tcg_out_qemu_ld(s, args, 0 | 4);
break;
case INDEX_op_qemu_ld16u:
tcg_out_qemu_ld(s, args, 1);
break;
case INDEX_op_qemu_ld16s:
tcg_out_qemu_ld(s, args, 1 | 4);
break;
case INDEX_op_qemu_ld32u:
tcg_out_qemu_ld(s, args, 2);
break;
case INDEX_op_qemu_st8:
tcg_out_qemu_st(s, args, 0);
break;
case INDEX_op_qemu_st16:
tcg_out_qemu_st(s, args, 1);
break;
case INDEX_op_qemu_st32:
tcg_out_qemu_st(s, args, 2);
break;
default:
fprintf(stderr, "unknown opcode 0x%x\n", opc);
tcg_abort();
}
return;
gen_arith:
tcg_out_arith(s, args[0], args[1], args[2], c);
}
static const TCGTargetOpDef hppa_op_defs[] = {
{ INDEX_op_exit_tb, { } },
{ INDEX_op_goto_tb, { } },
{ INDEX_op_call, { "r" } },
{ INDEX_op_jmp, { "r" } },
{ INDEX_op_br, { } },
{ INDEX_op_mov_i32, { "r", "r" } },
{ INDEX_op_movi_i32, { "r" } },
{ INDEX_op_ld8u_i32, { "r", "r" } },
{ INDEX_op_ld8s_i32, { "r", "r" } },
{ INDEX_op_ld16u_i32, { "r", "r" } },
{ INDEX_op_ld16s_i32, { "r", "r" } },
{ INDEX_op_ld_i32, { "r", "r" } },
{ INDEX_op_st8_i32, { "r", "r" } },
{ INDEX_op_st16_i32, { "r", "r" } },
{ INDEX_op_st_i32, { "r", "r" } },
{ INDEX_op_add_i32, { "r", "r", "r" } },
{ INDEX_op_sub_i32, { "r", "r", "r" } },
{ INDEX_op_and_i32, { "r", "r", "r" } },
{ INDEX_op_or_i32, { "r", "r", "r" } },
{ INDEX_op_xor_i32, { "r", "r", "r" } },
{ INDEX_op_shl_i32, { "r", "r", "r" } },
{ INDEX_op_shr_i32, { "r", "r", "r" } },
{ INDEX_op_sar_i32, { "r", "r", "r" } },
{ INDEX_op_brcond_i32, { "r", "r" } },
#if TARGET_LONG_BITS == 32
{ INDEX_op_qemu_ld8u, { "r", "L" } },
{ INDEX_op_qemu_ld8s, { "r", "L" } },
{ INDEX_op_qemu_ld16u, { "r", "L" } },
{ INDEX_op_qemu_ld16s, { "r", "L" } },
{ INDEX_op_qemu_ld32u, { "r", "L" } },
{ INDEX_op_qemu_ld64, { "r", "r", "L" } },
{ INDEX_op_qemu_st8, { "L", "L" } },
{ INDEX_op_qemu_st16, { "L", "L" } },
{ INDEX_op_qemu_st32, { "L", "L" } },
{ INDEX_op_qemu_st64, { "L", "L", "L" } },
#else
{ INDEX_op_qemu_ld8u, { "r", "L", "L" } },
{ INDEX_op_qemu_ld8s, { "r", "L", "L" } },
{ INDEX_op_qemu_ld16u, { "r", "L", "L" } },
{ INDEX_op_qemu_ld16s, { "r", "L", "L" } },
{ INDEX_op_qemu_ld32u, { "r", "L", "L" } },
{ INDEX_op_qemu_ld32s, { "r", "L", "L" } },
{ INDEX_op_qemu_ld64, { "r", "r", "L", "L" } },
{ INDEX_op_qemu_st8, { "L", "L", "L" } },
{ INDEX_op_qemu_st16, { "L", "L", "L" } },
{ INDEX_op_qemu_st32, { "L", "L", "L" } },
{ INDEX_op_qemu_st64, { "L", "L", "L", "L" } },
#endif
{ -1 },
};
void tcg_target_init(TCGContext *s)
{
tcg_regset_set32(tcg_target_available_regs[TCG_TYPE_I32], 0, 0xffffffff);
tcg_regset_set32(tcg_target_call_clobber_regs, 0,
(1 << TCG_REG_R20) |
(1 << TCG_REG_R21) |
(1 << TCG_REG_R22) |
(1 << TCG_REG_R23) |
(1 << TCG_REG_R24) |
(1 << TCG_REG_R25) |
(1 << TCG_REG_R26));
tcg_regset_clear(s->reserved_regs);
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R0); /* hardwired to zero */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R1); /* addil target */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_RP); /* link register */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R3); /* frame pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R18); /* return pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R19); /* clobbered w/o pic */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R20); /* reserved */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_DP); /* data pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); /* stack pointer */
tcg_regset_set_reg(s->reserved_regs, TCG_REG_R31); /* ble link reg */
tcg_add_target_add_op_defs(hppa_op_defs);
}

204
tcg/hppa/tcg-target.h Normal file
View File

@ -0,0 +1,204 @@
/*
* Tiny Code Generator for QEMU
*
* Copyright (c) 2008 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#define TCG_TARGET_HPPA 1
#if defined(_PA_RISC1_1)
#define TCG_TARGET_REG_BITS 32
#else
#error unsupported
#endif
#define TCG_TARGET_WORDS_BIGENDIAN
#define TCG_TARGET_NB_REGS 32
enum {
TCG_REG_R0 = 0,
TCG_REG_R1,
TCG_REG_RP,
TCG_REG_R3,
TCG_REG_R4,
TCG_REG_R5,
TCG_REG_R6,
TCG_REG_R7,
TCG_REG_R8,
TCG_REG_R9,
TCG_REG_R10,
TCG_REG_R11,
TCG_REG_R12,
TCG_REG_R13,
TCG_REG_R14,
TCG_REG_R15,
TCG_REG_R16,
TCG_REG_R17,
TCG_REG_R18,
TCG_REG_R19,
TCG_REG_R20,
TCG_REG_R21,
TCG_REG_R22,
TCG_REG_R23,
TCG_REG_R24,
TCG_REG_R25,
TCG_REG_R26,
TCG_REG_DP,
TCG_REG_RET0,
TCG_REG_RET1,
TCG_REG_SP,
TCG_REG_R31,
};
/* used for function call generation */
#define TCG_REG_CALL_STACK TCG_REG_SP
#define TCG_TARGET_STACK_ALIGN 16
#define TCG_TARGET_STACK_GROWSUP
/* optional instructions */
//#define TCG_TARGET_HAS_ext8s_i32
//#define TCG_TARGET_HAS_ext16s_i32
//#define TCG_TARGET_HAS_bswap16_i32
//#define TCG_TARGET_HAS_bswap_i32
/* Note: must be synced with dyngen-exec.h */
#define TCG_AREG0 TCG_REG_R17
#define TCG_AREG1 TCG_REG_R14
#define TCG_AREG2 TCG_REG_R15
#define TCG_AREG3 TCG_REG_R16
static inline void flush_icache_range(unsigned long start, unsigned long stop)
{
start &= ~31;
while (start <= stop)
{
asm volatile ("fdc 0(%0)\n"
"sync\n"
"fic 0(%%sr4, %0)\n"
"sync\n"
: : "r"(start) : "memory");
start += 32;
}
}
/* supplied by libgcc */
extern void *__canonicalize_funcptr_for_compare(void *);
/* Field selection types defined by hppa */
#define rnd(x) (((x)+0x1000)&~0x1fff)
/* lsel: select left 21 bits */
#define lsel(v,a) (((v)+(a))>>11)
/* rsel: select right 11 bits */
#define rsel(v,a) (((v)+(a))&0x7ff)
/* lrsel with rounding of addend to nearest 8k */
#define lrsel(v,a) (((v)+rnd(a))>>11)
/* rrsel with rounding of addend to nearest 8k */
#define rrsel(v,a) ((((v)+rnd(a))&0x7ff)+((a)-rnd(a)))
#define mask(x,sz) ((x) & ~((1<<(sz))-1))
static inline int reassemble_12(int as12)
{
return (((as12 & 0x800) >> 11) |
((as12 & 0x400) >> 8) |
((as12 & 0x3ff) << 3));
}
static inline int reassemble_14(int as14)
{
return (((as14 & 0x1fff) << 1) |
((as14 & 0x2000) >> 13));
}
static inline int reassemble_17(int as17)
{
return (((as17 & 0x10000) >> 16) |
((as17 & 0x0f800) << 5) |
((as17 & 0x00400) >> 8) |
((as17 & 0x003ff) << 3));
}
static inline int reassemble_21(int as21)
{
return (((as21 & 0x100000) >> 20) |
((as21 & 0x0ffe00) >> 8) |
((as21 & 0x000180) << 7) |
((as21 & 0x00007c) << 14) |
((as21 & 0x000003) << 12));
}
static inline void hppa_patch21l(uint32_t *insn, int val, int addend)
{
val = lrsel(val, addend);
*insn = mask(*insn, 21) | reassemble_21(val);
}
static inline void hppa_patch14r(uint32_t *insn, int val, int addend)
{
val = rrsel(val, addend);
*insn = mask(*insn, 14) | reassemble_14(val);
}
static inline void hppa_patch17r(uint32_t *insn, int val, int addend)
{
val = rrsel(val, addend);
*insn = (*insn & ~0x1f1ffd) | reassemble_17(val);
}
static inline void hppa_patch21l_dprel(uint32_t *insn, int val, int addend)
{
register unsigned int dp asm("r27");
hppa_patch21l(insn, val - dp, addend);
}
static inline void hppa_patch14r_dprel(uint32_t *insn, int val, int addend)
{
register unsigned int dp asm("r27");
hppa_patch14r(insn, val - dp, addend);
}
static inline void hppa_patch17f(uint32_t *insn, int val, int addend)
{
int dot = (int)insn & ~0x3;
int v = ((val + addend) - dot - 8) / 4;
if (v > (1 << 16) || v < -(1 << 16)) {
printf("cannot fit branch to offset %d [%08x->%08x]\n", v, dot, val);
abort();
}
*insn = (*insn & ~0x1f1ffd) | reassemble_17(v);
}
static inline void hppa_load_imm21l(uint32_t *insn, int val, int addend)
{
/* Transform addil L'sym(%dp) to ldil L'val, %r1 */
*insn = 0x20200000 | reassemble_21(lrsel(val, 0));
}
static inline void hppa_load_imm14r(uint32_t *insn, int val, int addend)
{
/* Transform ldw R'sym(%r1), %rN to ldo R'sym(%r1), %rN */
hppa_patch14r(insn, val, addend);
/* HACK */
if (addend == 0)
*insn = (*insn & ~0xfc000000) | (0x0d << 26);
}

View File

@ -47,8 +47,9 @@ const int tcg_target_call_iarg_regs[3] = { TCG_REG_EAX, TCG_REG_EDX, TCG_REG_ECX
const int tcg_target_call_oarg_regs[2] = { TCG_REG_EAX, TCG_REG_EDX };
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value)
tcg_target_long value, tcg_target_long addend)
{
value += addend;
switch(type) {
case R_386_32:
*(uint32_t *)code_ptr = value;

View File

@ -88,8 +88,9 @@ static const int tcg_target_call_oarg_regs[2] = {
};
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value)
tcg_target_long value, tcg_target_long addend)
{
value += addend;
switch (type) {
case R_SPARC_32:
if (value != (uint32_t)value)

View File

@ -465,10 +465,55 @@ static inline void ia64_apply_fixes (uint8_t **gen_code_pp,
#endif
#ifndef CONFIG_NO_DYNGEN_OP
#if defined __hppa__
struct hppa_branch_stub {
uint32_t *location;
long target;
struct hppa_branch_stub *next;
};
#define HPPA_RECORD_BRANCH(LIST, LOC, TARGET) \
do { \
struct hppa_branch_stub *stub = alloca(sizeof(struct hppa_branch_stub)); \
stub->location = LOC; \
stub->target = TARGET; \
stub->next = LIST; \
LIST = stub; \
} while (0)
static inline void hppa_process_stubs(struct hppa_branch_stub *stub,
uint8_t **gen_code_pp)
{
uint32_t *s = (uint32_t *)*gen_code_pp;
uint32_t *p = s + 1;
if (!stub) return;
for (; stub != NULL; stub = stub->next) {
unsigned long l = (unsigned long)p;
/* stub:
* ldil L'target, %r1
* be,n R'target(%sr4,%r1)
*/
*p++ = 0x20200000 | reassemble_21(lrsel(stub->target, 0));
*p++ = 0xe0202002 | (reassemble_17(rrsel(stub->target, 0) >> 2));
hppa_patch17f(stub->location, l, 0);
}
/* b,l,n stub,%r0 */
*s = 0xe8000002 | reassemble_17((p - s) - 2);
*gen_code_pp = (uint8_t *)p;
}
#endif /* __hppa__ */
const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr)
{
uint8_t *gen_code_ptr;
#ifdef __hppa__
struct hppa_branch_stub *hppa_stubs = NULL;
#endif
gen_code_ptr = s->code_ptr;
switch(opc) {
@ -478,6 +523,11 @@ const TCGArg *dyngen_op(TCGContext *s, int opc, const TCGArg *opparam_ptr)
default:
tcg_abort();
}
#ifdef __hppa__
hppa_process_stubs(hppa_stubs, &gen_code_ptr);
#endif
s->code_ptr = gen_code_ptr;
return opparam_ptr;
}

View File

@ -53,7 +53,7 @@
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value);
tcg_target_long value, tcg_target_long addend);
TCGOpDef tcg_op_defs[] = {
#define DEF(s, n, copy_size) { #s, 0, 0, n, n, 0, copy_size },
@ -100,7 +100,7 @@ void tcg_out_reloc(TCGContext *s, uint8_t *code_ptr, int type,
/* FIXME: This may break relocations on RISC targets that
modify instruction fields in place. The caller may not have
written the initial value. */
patch_reloc(code_ptr, type, l->u.value + addend);
patch_reloc(code_ptr, type, l->u.value, addend);
} else {
/* add a new relocation entry */
r = tcg_malloc(sizeof(TCGRelocation));
@ -123,7 +123,7 @@ static void tcg_out_label(TCGContext *s, int label_index,
tcg_abort();
r = l->u.first_reloc;
while (r != NULL) {
patch_reloc(r->ptr, r->type, value + r->addend);
patch_reloc(r->ptr, r->type, value, r->addend);
r = r->next;
}
l->has_value = 1;
@ -1442,7 +1442,7 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
int nb_iargs, nb_oargs, flags, nb_regs, i, reg, nb_params;
TCGArg arg, func_arg;
TCGTemp *ts;
tcg_target_long stack_offset, call_stack_size;
tcg_target_long stack_offset, call_stack_size, func_addr;
int const_func_arg;
TCGRegSet allocated_regs;
const TCGArgConstraint *arg_ct;
@ -1464,7 +1464,11 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
call_stack_size = (nb_params - nb_regs) * sizeof(tcg_target_long);
call_stack_size = (call_stack_size + TCG_TARGET_STACK_ALIGN - 1) &
~(TCG_TARGET_STACK_ALIGN - 1);
#ifdef TCG_TARGET_STACK_GROWSUP
tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size);
#else
tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size);
#endif
stack_offset = 0;
for(i = nb_regs; i < nb_params; i++) {
@ -1487,7 +1491,11 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
} else {
tcg_abort();
}
#ifdef TCG_TARGET_STACK_GROWSUP
stack_offset -= sizeof(tcg_target_long);
#else
stack_offset += sizeof(tcg_target_long);
#endif
}
/* assign input registers */
@ -1516,6 +1524,10 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
func_arg = args[nb_oargs + nb_iargs - 1];
arg_ct = &def->args_ct[0];
ts = &s->temps[func_arg];
func_addr = ts->val;
#ifdef HOST_HPPA
func_addr = (tcg_target_long)__canonicalize_funcptr_for_compare((void *)func_addr);
#endif
const_func_arg = 0;
if (ts->val_type == TEMP_VAL_MEM) {
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
@ -1529,12 +1541,12 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
}
func_arg = reg;
} else if (ts->val_type == TEMP_VAL_CONST) {
if (tcg_target_const_match(ts->val, arg_ct)) {
if (tcg_target_const_match(func_addr, arg_ct)) {
const_func_arg = 1;
func_arg = ts->val;
func_arg = func_addr;
} else {
reg = tcg_reg_alloc(s, arg_ct->u.regs, allocated_regs);
tcg_out_movi(s, ts->type, reg, ts->val);
tcg_out_movi(s, ts->type, reg, func_addr);
func_arg = reg;
}
} else {
@ -1574,7 +1586,11 @@ static int tcg_reg_alloc_call(TCGContext *s, const TCGOpDef *def,
tcg_out_op(s, opc, &func_arg, &const_func_arg);
#ifdef TCG_TARGET_STACK_GROWSUP
tcg_out_addi(s, TCG_REG_CALL_STACK, -call_stack_size);
#else
tcg_out_addi(s, TCG_REG_CALL_STACK, call_stack_size);
#endif
/* assign output registers and emit moves if needed */
for(i = 0; i < nb_oargs; i++) {

View File

@ -74,8 +74,9 @@ const int tcg_target_call_oarg_regs[2] = {
};
static void patch_reloc(uint8_t *code_ptr, int type,
tcg_target_long value)
tcg_target_long value, tcg_target_long addend)
{
value += addend;
switch(type) {
case R_X86_64_32:
if (value != (uint32_t)value)