linux-user/loongarch64: Add vdso

Requires a relatively recent binutils version in order to avoid
spurious R_LARCH_NONE relocations.  The presence of these relocs
are diagnosed by our gen-vdso tool.

Tested-by: Song Gao <gaosong@loongson.cn>
Reviewed-by: Song Gao <gaosong@loongson.cn>
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-08-14 13:22:57 -07:00
parent 468c1bb5ca
commit 00cc2934b2
9 changed files with 245 additions and 1 deletions

View File

@ -1197,6 +1197,8 @@ static void elf_core_copy_regs(target_elf_gregset_t *regs, const CPUPPCState *en
#define elf_check_arch(x) ((x) == EM_LOONGARCH)
#define VDSO_HEADER "vdso.c.inc"
static inline void init_thread(struct target_pt_regs *regs,
struct image_info *infop)
{

View File

@ -0,0 +1,11 @@
include $(BUILD_DIR)/tests/tcg/loongarch64-linux-user/config-target.mak
SUBDIR = $(SRC_PATH)/linux-user/loongarch64
VPATH += $(SUBDIR)
all: $(SUBDIR)/vdso.so
$(SUBDIR)/vdso.so: vdso.S vdso.ld vdso-asmoffset.h
$(CC) -o $@ -nostdlib -shared -fpic -Wl,-h,linux-vdso.so.1 \
-Wl,--build-id=sha1 -Wl,--hash-style=both \
-Wl,--no-warn-rwx-segments -Wl,-T,$(SUBDIR)/vdso.ld $<

View File

@ -0,0 +1,4 @@
vdso_inc = gen_vdso.process('vdso.so',
extra_args: ['-r', '__vdso_rt_sigreturn'])
linux_user_ss.add(when: 'TARGET_LOONGARCH64', if_true: vdso_inc)

View File

@ -10,9 +10,9 @@
#include "user-internals.h"
#include "signal-common.h"
#include "linux-user/trace.h"
#include "target/loongarch/internals.h"
#include "target/loongarch/vec.h"
#include "vdso-asmoffset.h"
/* FP context was used */
#define SC_USED_FP (1 << 0)
@ -24,6 +24,11 @@ struct target_sigcontext {
uint64_t sc_extcontext[0] QEMU_ALIGNED(16);
};
QEMU_BUILD_BUG_ON(sizeof(struct target_sigcontext) != sizeof_sigcontext);
QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_pc)
!= offsetof_sigcontext_pc);
QEMU_BUILD_BUG_ON(offsetof(struct target_sigcontext, sc_regs)
!= offsetof_sigcontext_gr);
#define FPU_CTX_MAGIC 0x46505501
#define FPU_CTX_ALIGN 8
@ -33,6 +38,9 @@ struct target_fpu_context {
uint32_t fcsr;
} QEMU_ALIGNED(FPU_CTX_ALIGN);
QEMU_BUILD_BUG_ON(offsetof(struct target_fpu_context, regs)
!= offsetof_fpucontext_fr);
#define CONTEXT_INFO_ALIGN 16
struct target_sctx_info {
uint32_t magic;
@ -40,6 +48,8 @@ struct target_sctx_info {
uint64_t padding;
} QEMU_ALIGNED(CONTEXT_INFO_ALIGN);
QEMU_BUILD_BUG_ON(sizeof(struct target_sctx_info) != sizeof_sctx_info);
struct target_ucontext {
abi_ulong tuc_flags;
abi_ptr tuc_link;
@ -54,6 +64,11 @@ struct target_rt_sigframe {
struct target_ucontext rs_uc;
};
QEMU_BUILD_BUG_ON(sizeof(struct target_rt_sigframe)
!= sizeof_rt_sigframe);
QEMU_BUILD_BUG_ON(offsetof(struct target_rt_sigframe, rs_uc.tuc_mcontext)
!= offsetof_sigcontext);
/*
* These two structures are not present in guest memory, are private
* to the signal implementation, but are largely copied from the

View File

@ -0,0 +1,8 @@
#define sizeof_rt_sigframe 0x240
#define sizeof_sigcontext 0x110
#define sizeof_sctx_info 0x10
#define offsetof_sigcontext 0x130
#define offsetof_sigcontext_pc 0
#define offsetof_sigcontext_gr 8
#define offsetof_fpucontext_fr 0

View File

@ -0,0 +1,130 @@
/*
* Loongarch64 linux replacement vdso.
*
* Copyright 2023 Linaro, Ltd.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
#include <asm/unistd.h>
#include <asm/errno.h>
#include "vdso-asmoffset.h"
.text
.macro endf name
.globl \name
.type \name, @function
.size \name, . - \name
.endm
.macro vdso_syscall name, nr
\name:
li.w $a7, \nr
syscall 0
jr $ra
endf \name
.endm
.cfi_startproc
vdso_syscall __vdso_gettimeofday, __NR_gettimeofday
vdso_syscall __vdso_clock_gettime, __NR_clock_gettime
vdso_syscall __vdso_clock_getres, __NR_clock_getres
vdso_syscall __vdso_getcpu, __NR_getcpu
.cfi_endproc
/*
* Start the unwind info at least one instruction before the signal
* trampoline, because the unwinder will assume we are returning
* after a call site.
*/
.cfi_startproc simple
.cfi_signal_frame
#define B_GR offsetof_sigcontext_gr
#define B_FR sizeof_sigcontext + sizeof_sctx_info + offsetof_fpucontext_fr
.cfi_def_cfa 2, offsetof_sigcontext
/* Return address */
.cfi_return_column 64
.cfi_offset 64, offsetof_sigcontext_pc /* pc */
/* Integer registers */
.cfi_offset 1, B_GR + 1 * 8
.cfi_offset 2, B_GR + 2 * 8
.cfi_offset 3, B_GR + 3 * 8
.cfi_offset 4, B_GR + 4 * 8
.cfi_offset 5, B_GR + 5 * 8
.cfi_offset 6, B_GR + 6 * 8
.cfi_offset 7, B_GR + 7 * 8
.cfi_offset 8, B_GR + 8 * 8
.cfi_offset 9, B_GR + 9 * 8
.cfi_offset 10, B_GR + 10 * 8
.cfi_offset 11, B_GR + 11 * 8
.cfi_offset 12, B_GR + 12 * 8
.cfi_offset 13, B_GR + 13 * 8
.cfi_offset 14, B_GR + 14 * 8
.cfi_offset 15, B_GR + 15 * 8
.cfi_offset 16, B_GR + 16 * 8
.cfi_offset 17, B_GR + 17 * 8
.cfi_offset 18, B_GR + 18 * 8
.cfi_offset 19, B_GR + 19 * 8
.cfi_offset 20, B_GR + 20 * 8
.cfi_offset 21, B_GR + 21 * 8
.cfi_offset 22, B_GR + 22 * 8
.cfi_offset 23, B_GR + 23 * 8
.cfi_offset 24, B_GR + 24 * 8
.cfi_offset 25, B_GR + 25 * 8
.cfi_offset 26, B_GR + 26 * 8
.cfi_offset 27, B_GR + 27 * 8
.cfi_offset 28, B_GR + 28 * 8
.cfi_offset 29, B_GR + 29 * 8
.cfi_offset 30, B_GR + 30 * 8
.cfi_offset 31, B_GR + 31 * 8
/* Floating point registers */
.cfi_offset 32, B_FR + 0
.cfi_offset 33, B_FR + 1 * 8
.cfi_offset 34, B_FR + 2 * 8
.cfi_offset 35, B_FR + 3 * 8
.cfi_offset 36, B_FR + 4 * 8
.cfi_offset 37, B_FR + 5 * 8
.cfi_offset 38, B_FR + 6 * 8
.cfi_offset 39, B_FR + 7 * 8
.cfi_offset 40, B_FR + 8 * 8
.cfi_offset 41, B_FR + 9 * 8
.cfi_offset 42, B_FR + 10 * 8
.cfi_offset 43, B_FR + 11 * 8
.cfi_offset 44, B_FR + 12 * 8
.cfi_offset 45, B_FR + 13 * 8
.cfi_offset 46, B_FR + 14 * 8
.cfi_offset 47, B_FR + 15 * 8
.cfi_offset 48, B_FR + 16 * 8
.cfi_offset 49, B_FR + 17 * 8
.cfi_offset 50, B_FR + 18 * 8
.cfi_offset 51, B_FR + 19 * 8
.cfi_offset 52, B_FR + 20 * 8
.cfi_offset 53, B_FR + 21 * 8
.cfi_offset 54, B_FR + 22 * 8
.cfi_offset 55, B_FR + 23 * 8
.cfi_offset 56, B_FR + 24 * 8
.cfi_offset 57, B_FR + 25 * 8
.cfi_offset 58, B_FR + 26 * 8
.cfi_offset 59, B_FR + 27 * 8
.cfi_offset 60, B_FR + 28 * 8
.cfi_offset 61, B_FR + 29 * 8
.cfi_offset 62, B_FR + 30 * 8
.cfi_offset 63, B_FR + 31 * 8
nop
__vdso_rt_sigreturn:
li.w $a7, __NR_rt_sigreturn
syscall 0
.cfi_endproc
endf __vdso_rt_sigreturn

View File

@ -0,0 +1,73 @@
/*
* Linker script for linux loongarch64 replacement vdso.
*
* Copyright 2023 Linaro, Ltd.
*
* SPDX-License-Identifier: GPL-2.0-or-later
*/
VERSION {
LINUX_5.10 {
global:
__vdso_getcpu;
__vdso_clock_getres;
__vdso_clock_gettime;
__vdso_gettimeofday;
__vdso_rt_sigreturn;
local: *;
};
}
PHDRS {
phdr PT_PHDR FLAGS(4) PHDRS;
load PT_LOAD FLAGS(7) FILEHDR PHDRS;
dynamic PT_DYNAMIC FLAGS(4);
eh_frame_hdr PT_GNU_EH_FRAME;
note PT_NOTE FLAGS(4);
}
SECTIONS {
/*
* We can't prelink to any address without knowing something about
* the virtual memory space of the host, since that leaks over into
* the available memory space of the guest.
*/
. = SIZEOF_HEADERS;
/*
* The following, including the FILEHDRS and PHDRS, are modified
* when we relocate the binary. We want them to be initially
* writable for the relocation; we'll force them read-only after.
*/
.note : { *(.note*) } :load :note
.dynamic : { *(.dynamic) } :load :dynamic
.dynsym : { *(.dynsym) } :load
/*
* There ought not be any real read-write data.
* But since we manipulated the segment layout,
* we have to put these sections somewhere.
*/
.data : {
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
}
.rodata : { *(.rodata*) }
.hash : { *(.hash) }
.gnu.hash : { *(.gnu.hash) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.eh_frame_hdr : { *(.eh_frame_hdr) } :load :eh_frame_hdr
.eh_frame : { *(.eh_frame) } :load
.text : { *(.text*) } :load =0xd503201f
}

BIN
linux-user/loongarch64/vdso.so Executable file

Binary file not shown.

View File

@ -40,6 +40,7 @@ subdir('alpha')
subdir('arm')
subdir('hppa')
subdir('i386')
subdir('loongarch64')
subdir('m68k')
subdir('microblaze')
subdir('mips64')