mmap emulation

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@158 c046a42c-6fe2-441c-8c8c-71466251a162
This commit is contained in:
bellard 2003-05-13 00:25:15 +00:00
parent 74c95119f2
commit 54936004fd
9 changed files with 646 additions and 101 deletions

View File

@ -58,11 +58,11 @@ LDFLAGS+=-p
main.o: CFLAGS+=-p main.o: CFLAGS+=-p
endif endif
OBJS= elfload.o main.o syscall.o signal.o vm86.o path.o OBJS= elfload.o main.o syscall.o mmap.o signal.o vm86.o path.o
SRCS:= $(OBJS:.o=.c) SRCS:= $(OBJS:.o=.c)
OBJS+= libqemu.a OBJS+= libqemu.a
LIBOBJS+=thunk.o translate-i386.o op-i386.o exec-i386.o LIBOBJS+=thunk.o translate-i386.o op-i386.o exec-i386.o exec.o
# NOTE: the disassembler code is only needed for debugging # NOTE: the disassembler code is only needed for debugging
LIBOBJS+=disas.o ppc-dis.o i386-dis.o alpha-dis.o dis-buf.o LIBOBJS+=disas.o ppc-dis.o i386-dis.o alpha-dis.o dis-buf.o

View File

@ -431,6 +431,30 @@ int cpu_x86_signal_handler(int host_signum, struct siginfo *info,
#define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */ #define X86_DUMP_CCOP 0x0002 /* dump qemu flag cache */
void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags); void cpu_x86_dump_state(CPUX86State *env, FILE *f, int flags);
/* page related stuff */
#define TARGET_PAGE_BITS 12
#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
extern unsigned long real_host_page_size;
extern unsigned long host_page_bits;
extern unsigned long host_page_size;
extern unsigned long host_page_mask;
#define HOST_PAGE_ALIGN(addr) (((addr) + host_page_size - 1) & host_page_mask)
/* same as PROT_xxx */
#define PAGE_READ 0x0001
#define PAGE_WRITE 0x0002
#define PAGE_EXEC 0x0004
#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
#define PAGE_VALID 0x0008
void page_dump(FILE *f);
int page_get_flags(unsigned long address);
void page_set_flags(unsigned long start, unsigned long end, int flags);
/* internal functions */ /* internal functions */
#define GEN_FLAG_CODE32_SHIFT 0 #define GEN_FLAG_CODE32_SHIFT 0
@ -446,5 +470,6 @@ int cpu_x86_gen_code(uint8_t *gen_code_buf, int max_code_size,
int *gen_code_size_ptr, int *gen_code_size_ptr,
uint8_t *pc_start, uint8_t *cs_base, int flags); uint8_t *pc_start, uint8_t *cs_base, int flags);
void cpu_x86_tblocks_init(void); void cpu_x86_tblocks_init(void);
void page_init(void);
#endif /* CPU_I386_H */ #endif /* CPU_I386_H */

148
exec.c Normal file
View File

@ -0,0 +1,148 @@
/*
* virtual page mapping
*
* Copyright (c) 2003 Fabrice Bellard
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <inttypes.h>
#include "cpu-i386.h"
/* XXX: pack the flags in the low bits of the pointer ? */
typedef struct PageDesc {
struct TranslationBlock *first_tb;
unsigned long flags;
} PageDesc;
#define L2_BITS 10
#define L1_BITS (32 - L2_BITS - TARGET_PAGE_BITS)
#define L1_SIZE (1 << L1_BITS)
#define L2_SIZE (1 << L2_BITS)
unsigned long real_host_page_size;
unsigned long host_page_bits;
unsigned long host_page_size;
unsigned long host_page_mask;
static PageDesc *l1_map[L1_SIZE];
void page_init(void)
{
/* NOTE: we can always suppose that host_page_size >=
TARGET_PAGE_SIZE */
real_host_page_size = getpagesize();
if (host_page_size == 0)
host_page_size = real_host_page_size;
if (host_page_size < TARGET_PAGE_SIZE)
host_page_size = TARGET_PAGE_SIZE;
host_page_bits = 0;
while ((1 << host_page_bits) < host_page_size)
host_page_bits++;
host_page_mask = ~(host_page_size - 1);
}
/* dump memory mappings */
void page_dump(FILE *f)
{
unsigned long start, end;
int i, j, prot, prot1;
PageDesc *p;
fprintf(f, "%-8s %-8s %-8s %s\n",
"start", "end", "size", "prot");
start = -1;
end = -1;
prot = 0;
for(i = 0; i <= L1_SIZE; i++) {
if (i < L1_SIZE)
p = l1_map[i];
else
p = NULL;
for(j = 0;j < L2_SIZE; j++) {
if (!p)
prot1 = 0;
else
prot1 = p[j].flags;
if (prot1 != prot) {
end = (i << (32 - L1_BITS)) | (j << TARGET_PAGE_BITS);
if (start != -1) {
fprintf(f, "%08lx-%08lx %08lx %c%c%c\n",
start, end, end - start,
prot & PAGE_READ ? 'r' : '-',
prot & PAGE_WRITE ? 'w' : '-',
prot & PAGE_EXEC ? 'x' : '-');
}
if (prot1 != 0)
start = end;
else
start = -1;
prot = prot1;
}
if (!p)
break;
}
}
}
static inline PageDesc *page_find_alloc(unsigned long address)
{
unsigned int index;
PageDesc **lp, *p;
index = address >> TARGET_PAGE_BITS;
lp = &l1_map[index >> L2_BITS];
p = *lp;
if (!p) {
/* allocate if not found */
p = malloc(sizeof(PageDesc) * L2_SIZE);
memset(p, 0, sizeof(sizeof(PageDesc) * L2_SIZE));
*lp = p;
}
return p + (index & (L2_SIZE - 1));
}
int page_get_flags(unsigned long address)
{
unsigned int index;
PageDesc *p;
index = address >> TARGET_PAGE_BITS;
p = l1_map[index >> L2_BITS];
if (!p)
return 0;
return p[index & (L2_SIZE - 1)].flags;
}
void page_set_flags(unsigned long start, unsigned long end, int flags)
{
PageDesc *p;
unsigned long addr;
start = start & TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
for(addr = start; addr < end; addr += TARGET_PAGE_SIZE) {
p = page_find_alloc(addr);
p->flags = flags;
}
}

View File

@ -95,8 +95,6 @@ struct exec
#define ZMAGIC 0413 #define ZMAGIC 0413
#define QMAGIC 0314 #define QMAGIC 0314
#define X86_STACK_TOP 0x7d000000
/* max code+data+bss space allocated to elf interpreter */ /* max code+data+bss space allocated to elf interpreter */
#define INTERP_MAP_SIZE (32 * 1024 * 1024) #define INTERP_MAP_SIZE (32 * 1024 * 1024)
@ -123,23 +121,11 @@ struct exec
#define PER_XENIX (0x0007 | STICKY_TIMEOUTS) #define PER_XENIX (0x0007 | STICKY_TIMEOUTS)
/* Necessary parameters */ /* Necessary parameters */
#define ALPHA_PAGE_SIZE 4096
#define X86_PAGE_SIZE 4096
#define ALPHA_PAGE_MASK (~(ALPHA_PAGE_SIZE-1))
#define X86_PAGE_MASK (~(X86_PAGE_SIZE-1))
#define ALPHA_PAGE_ALIGN(addr) ((((addr)+ALPHA_PAGE_SIZE)-1)&ALPHA_PAGE_MASK)
#define X86_PAGE_ALIGN(addr) ((((addr)+X86_PAGE_SIZE)-1)&X86_PAGE_MASK)
#define NGROUPS 32 #define NGROUPS 32
#define X86_ELF_EXEC_PAGESIZE X86_PAGE_SIZE #define TARGET_ELF_EXEC_PAGESIZE TARGET_PAGE_SIZE
#define X86_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(X86_ELF_EXEC_PAGESIZE-1)) #define TARGET_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(TARGET_ELF_EXEC_PAGESIZE-1))
#define X86_ELF_PAGEOFFSET(_v) ((_v) & (X86_ELF_EXEC_PAGESIZE-1)) #define TARGET_ELF_PAGEOFFSET(_v) ((_v) & (TARGET_ELF_EXEC_PAGESIZE-1))
#define ALPHA_ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ALPHA_PAGE_SIZE-1))
#define ALPHA_ELF_PAGEOFFSET(_v) ((_v) & (ALPHA_PAGE_SIZE-1))
#define INTERPRETER_NONE 0 #define INTERPRETER_NONE 0
#define INTERPRETER_AOUT 1 #define INTERPRETER_AOUT 1
@ -160,9 +146,6 @@ static inline void memcpy_tofs(void * to, const void * from, unsigned long n)
memcpy(to, from, n); memcpy(to, from, n);
} }
//extern void * mmap4k();
#define mmap4k(a, b, c, d, e, f) mmap((void *)(a), b, c, d, e, f)
extern unsigned long x86_stack_size; extern unsigned long x86_stack_size;
static int load_aout_interp(void * exptr, int interp_fd); static int load_aout_interp(void * exptr, int interp_fd);
@ -227,7 +210,7 @@ static void * get_free_page(void)
/* User-space version of kernel get_free_page. Returns a page-aligned /* User-space version of kernel get_free_page. Returns a page-aligned
* page-sized chunk of memory. * page-sized chunk of memory.
*/ */
retval = mmap4k(0, ALPHA_PAGE_SIZE, PROT_READ|PROT_WRITE, retval = (void *)target_mmap(0, host_page_size, PROT_READ|PROT_WRITE,
MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
if((long)retval == -1) { if((long)retval == -1) {
@ -241,7 +224,7 @@ static void * get_free_page(void)
static void free_page(void * pageaddr) static void free_page(void * pageaddr)
{ {
(void)munmap(pageaddr, ALPHA_PAGE_SIZE); target_munmap((unsigned long)pageaddr, host_page_size);
} }
/* /*
@ -272,9 +255,9 @@ static unsigned long copy_strings(int argc,char ** argv,unsigned long *page,
while (len) { while (len) {
--p; --tmp; --len; --p; --tmp; --len;
if (--offset < 0) { if (--offset < 0) {
offset = p % X86_PAGE_SIZE; offset = p % TARGET_PAGE_SIZE;
if (!(pag = (char *) page[p/X86_PAGE_SIZE]) && if (!(pag = (char *) page[p/TARGET_PAGE_SIZE]) &&
!(pag = (char *) page[p/X86_PAGE_SIZE] = !(pag = (char *) page[p/TARGET_PAGE_SIZE] =
(unsigned long *) get_free_page())) { (unsigned long *) get_free_page())) {
return 0; return 0;
} }
@ -390,10 +373,10 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
* it for args, we'll use it for something else... * it for args, we'll use it for something else...
*/ */
size = x86_stack_size; size = x86_stack_size;
if (size < MAX_ARG_PAGES*X86_PAGE_SIZE) if (size < MAX_ARG_PAGES*TARGET_PAGE_SIZE)
size = MAX_ARG_PAGES*X86_PAGE_SIZE; size = MAX_ARG_PAGES*TARGET_PAGE_SIZE;
error = (unsigned long)mmap4k(NULL, error = target_mmap(0,
size + X86_PAGE_SIZE, size + host_page_size,
PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0); -1, 0);
@ -402,9 +385,9 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
exit(-1); exit(-1);
} }
/* we reserve one extra page at the top of the stack as guard */ /* we reserve one extra page at the top of the stack as guard */
mprotect((void *)(error + size), X86_PAGE_SIZE, PROT_NONE); target_mprotect(error + size, host_page_size, PROT_NONE);
stack_base = error + size - MAX_ARG_PAGES*X86_PAGE_SIZE; stack_base = error + size - MAX_ARG_PAGES*TARGET_PAGE_SIZE;
p += stack_base; p += stack_base;
if (bprm->loader) { if (bprm->loader) {
@ -416,10 +399,10 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
if (bprm->page[i]) { if (bprm->page[i]) {
info->rss++; info->rss++;
memcpy((void *)stack_base, (void *)bprm->page[i], X86_PAGE_SIZE); memcpy((void *)stack_base, (void *)bprm->page[i], TARGET_PAGE_SIZE);
free_page((void *)bprm->page[i]); free_page((void *)bprm->page[i]);
} }
stack_base += X86_PAGE_SIZE; stack_base += TARGET_PAGE_SIZE;
} }
return p; return p;
} }
@ -427,11 +410,11 @@ unsigned long setup_arg_pages(unsigned long p, struct linux_binprm * bprm,
static void set_brk(unsigned long start, unsigned long end) static void set_brk(unsigned long start, unsigned long end)
{ {
/* page-align the start and end addresses... */ /* page-align the start and end addresses... */
start = ALPHA_PAGE_ALIGN(start); start = HOST_PAGE_ALIGN(start);
end = ALPHA_PAGE_ALIGN(end); end = HOST_PAGE_ALIGN(end);
if (end <= start) if (end <= start)
return; return;
if((long)mmap4k(start, end - start, if(target_mmap(start, end - start,
PROT_READ | PROT_WRITE | PROT_EXEC, PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) { MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0) == -1) {
perror("cannot mmap brk"); perror("cannot mmap brk");
@ -451,9 +434,9 @@ static void padzero(unsigned long elf_bss)
unsigned long nbyte; unsigned long nbyte;
char * fpnt; char * fpnt;
nbyte = elf_bss & (ALPHA_PAGE_SIZE-1); /* was X86_PAGE_SIZE - JRP */ nbyte = elf_bss & (host_page_size-1); /* was TARGET_PAGE_SIZE - JRP */
if (nbyte) { if (nbyte) {
nbyte = ALPHA_PAGE_SIZE - nbyte; nbyte = host_page_size - nbyte;
fpnt = (char *) elf_bss; fpnt = (char *) elf_bss;
do { do {
*fpnt++ = 0; *fpnt++ = 0;
@ -494,7 +477,7 @@ static unsigned int * create_elf_tables(char *p, int argc, int envc,
NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff)); NEW_AUX_ENT (AT_PHDR, (target_ulong)(load_addr + exec->e_phoff));
NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr))); NEW_AUX_ENT (AT_PHENT, (target_ulong)(sizeof (struct elf_phdr)));
NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum)); NEW_AUX_ENT (AT_PHNUM, (target_ulong)(exec->e_phnum));
NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(ALPHA_PAGE_SIZE)); NEW_AUX_ENT (AT_PAGESZ, (target_ulong)(TARGET_PAGE_SIZE));
NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr)); NEW_AUX_ENT (AT_BASE, (target_ulong)(interp_load_addr));
NEW_AUX_ENT (AT_FLAGS, (target_ulong)0); NEW_AUX_ENT (AT_FLAGS, (target_ulong)0);
NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry); NEW_AUX_ENT (AT_ENTRY, load_bias + exec->e_entry);
@ -554,7 +537,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
/* Now read in all of the header information */ /* Now read in all of the header information */
if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > X86_PAGE_SIZE) if (sizeof(struct elf_phdr) * interp_elf_ex->e_phnum > TARGET_PAGE_SIZE)
return ~0UL; return ~0UL;
elf_phdata = (struct elf_phdr *) elf_phdata = (struct elf_phdr *)
@ -594,7 +577,7 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
if (interp_elf_ex->e_type == ET_DYN) { if (interp_elf_ex->e_type == ET_DYN) {
/* in order to avoid harcoding the interpreter load /* in order to avoid harcoding the interpreter load
address in qemu, we allocate a big enough memory zone */ address in qemu, we allocate a big enough memory zone */
error = (unsigned long)mmap4k(NULL, INTERP_MAP_SIZE, error = target_mmap(0, INTERP_MAP_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANON, PROT_NONE, MAP_PRIVATE | MAP_ANON,
-1, 0); -1, 0);
if (error == -1) { if (error == -1) {
@ -620,12 +603,12 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
elf_type |= MAP_FIXED; elf_type |= MAP_FIXED;
vaddr = eppnt->p_vaddr; vaddr = eppnt->p_vaddr;
} }
error = (unsigned long)mmap4k(load_addr+X86_ELF_PAGESTART(vaddr), error = target_mmap(load_addr+TARGET_ELF_PAGESTART(vaddr),
eppnt->p_filesz + X86_ELF_PAGEOFFSET(eppnt->p_vaddr), eppnt->p_filesz + TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr),
elf_prot, elf_prot,
elf_type, elf_type,
interpreter_fd, interpreter_fd,
eppnt->p_offset - X86_ELF_PAGEOFFSET(eppnt->p_vaddr)); eppnt->p_offset - TARGET_ELF_PAGEOFFSET(eppnt->p_vaddr));
if (error > -1024UL) { if (error > -1024UL) {
/* Real error */ /* Real error */
@ -665,11 +648,11 @@ static unsigned long load_elf_interp(struct elfhdr * interp_elf_ex,
* bss page. * bss page.
*/ */
padzero(elf_bss); padzero(elf_bss);
elf_bss = X86_ELF_PAGESTART(elf_bss + ALPHA_PAGE_SIZE - 1); /* What we have mapped so far */ elf_bss = TARGET_ELF_PAGESTART(elf_bss + host_page_size - 1); /* What we have mapped so far */
/* Map the last of the bss segment */ /* Map the last of the bss segment */
if (last_bss > elf_bss) { if (last_bss > elf_bss) {
mmap4k(elf_bss, last_bss-elf_bss, target_mmap(elf_bss, last_bss-elf_bss,
PROT_READ|PROT_WRITE|PROT_EXEC, PROT_READ|PROT_WRITE|PROT_EXEC,
MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); MAP_FIXED|MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
} }
@ -742,7 +725,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
unsigned int interpreter_type = INTERPRETER_NONE; unsigned int interpreter_type = INTERPRETER_NONE;
unsigned char ibcs2_interpreter; unsigned char ibcs2_interpreter;
int i; int i;
void * mapped_addr; unsigned long mapped_addr;
struct elf_phdr * elf_ppnt; struct elf_phdr * elf_ppnt;
struct elf_phdr *elf_phdata; struct elf_phdr *elf_phdata;
unsigned long elf_bss, k, elf_brk; unsigned long elf_bss, k, elf_brk;
@ -979,33 +962,32 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
is because the brk will follow the loader, and is not movable. */ is because the brk will follow the loader, and is not movable. */
/* NOTE: for qemu, we do a big mmap to get enough space /* NOTE: for qemu, we do a big mmap to get enough space
without harcoding any address */ without harcoding any address */
error = (unsigned long)mmap4k(NULL, ET_DYN_MAP_SIZE, error = target_mmap(0, ET_DYN_MAP_SIZE,
PROT_NONE, MAP_PRIVATE | MAP_ANON, PROT_NONE, MAP_PRIVATE | MAP_ANON,
-1, 0); -1, 0);
if (error == -1) { if (error == -1) {
perror("mmap"); perror("mmap");
exit(-1); exit(-1);
} }
load_bias = X86_ELF_PAGESTART(error - elf_ppnt->p_vaddr); load_bias = TARGET_ELF_PAGESTART(error - elf_ppnt->p_vaddr);
} }
error = (unsigned long)mmap4k( error = target_mmap(TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr),
(elf_ppnt->p_filesz + (elf_ppnt->p_filesz +
X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)), TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)),
elf_prot, elf_prot,
(MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE), (MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE),
bprm->fd, bprm->fd,
(elf_ppnt->p_offset - (elf_ppnt->p_offset -
X86_ELF_PAGEOFFSET(elf_ppnt->p_vaddr))); TARGET_ELF_PAGEOFFSET(elf_ppnt->p_vaddr)));
if (error == -1) { if (error == -1) {
perror("mmap"); perror("mmap");
exit(-1); exit(-1);
} }
#ifdef LOW_ELF_STACK #ifdef LOW_ELF_STACK
if (X86_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack) if (TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr) < elf_stack)
elf_stack = X86_ELF_PAGESTART(elf_ppnt->p_vaddr); elf_stack = TARGET_ELF_PAGESTART(elf_ppnt->p_vaddr);
#endif #endif
if (!load_addr_set) { if (!load_addr_set) {
@ -1013,7 +995,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset; load_addr = elf_ppnt->p_vaddr - elf_ppnt->p_offset;
if (elf_ex.e_type == ET_DYN) { if (elf_ex.e_type == ET_DYN) {
load_bias += error - load_bias += error -
X86_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr); TARGET_ELF_PAGESTART(load_bias + elf_ppnt->p_vaddr);
load_addr += load_bias; load_addr += load_bias;
} }
} }
@ -1108,7 +1090,7 @@ static int load_elf_binary(struct linux_binprm * bprm, struct target_pt_regs * r
and some applications "depend" upon this behavior. and some applications "depend" upon this behavior.
Since we do not have the power to recompile these, we Since we do not have the power to recompile these, we
emulate the SVr4 behavior. Sigh. */ emulate the SVr4 behavior. Sigh. */
mapped_addr = mmap4k(NULL, ALPHA_PAGE_SIZE, PROT_READ | PROT_EXEC, mapped_addr = target_mmap(0, host_page_size, PROT_READ | PROT_EXEC,
MAP_FIXED | MAP_PRIVATE, -1, 0); MAP_FIXED | MAP_PRIVATE, -1, 0);
} }
@ -1137,7 +1119,7 @@ int elf_exec(const char * filename, char ** argv, char ** envp,
int retval; int retval;
int i; int i;
bprm.p = X86_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int); bprm.p = TARGET_PAGE_SIZE*MAX_ARG_PAGES-sizeof(unsigned int);
for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */ for (i=0 ; i<MAX_ARG_PAGES ; i++) /* clear page-table */
bprm.page[i] = 0; bprm.page[i] = 0;
retval = open(filename, O_RDONLY); retval = open(filename, O_RDONLY);

View File

@ -232,12 +232,15 @@ void usage(void)
"Linux x86 emulator\n" "Linux x86 emulator\n"
"\n" "\n"
"-h print this help\n" "-h print this help\n"
"-d activate log (logfile=%s)\n"
"-L path set the x86 elf interpreter prefix (default=%s)\n" "-L path set the x86 elf interpreter prefix (default=%s)\n"
"-s size set the x86 stack size in bytes (default=%ld)\n", "-s size set the x86 stack size in bytes (default=%ld)\n"
DEBUG_LOGFILE, "\n"
"debug options:\n"
"-d activate log (logfile=%s)\n"
"-p pagesize set the host page size to 'pagesize'\n",
interp_prefix, interp_prefix,
x86_stack_size); x86_stack_size,
DEBUG_LOGFILE);
_exit(1); _exit(1);
} }
@ -284,6 +287,13 @@ int main(int argc, char **argv)
x86_stack_size *= 1024; x86_stack_size *= 1024;
} else if (!strcmp(r, "L")) { } else if (!strcmp(r, "L")) {
interp_prefix = argv[optind++]; interp_prefix = argv[optind++];
} else if (!strcmp(r, "p")) {
host_page_size = atoi(argv[optind++]);
if (host_page_size == 0 ||
(host_page_size & (host_page_size - 1)) != 0) {
fprintf(stderr, "page size must be a power of two\n");
exit(1);
}
} else { } else {
usage(); usage();
} }
@ -311,12 +321,18 @@ int main(int argc, char **argv)
/* Scan interp_prefix dir for replacement files. */ /* Scan interp_prefix dir for replacement files. */
init_paths(interp_prefix); init_paths(interp_prefix);
/* NOTE: we need to init the CPU at this stage to get the
host_page_size */
env = cpu_x86_init();
if (elf_exec(filename, argv+optind, environ, regs, info) != 0) { if (elf_exec(filename, argv+optind, environ, regs, info) != 0) {
printf("Error loading %s\n", filename); printf("Error loading %s\n", filename);
_exit(1); _exit(1);
} }
if (loglevel) { if (loglevel) {
page_dump(logfile);
fprintf(logfile, "start_brk 0x%08lx\n" , info->start_brk); fprintf(logfile, "start_brk 0x%08lx\n" , info->start_brk);
fprintf(logfile, "end_code 0x%08lx\n" , info->end_code); fprintf(logfile, "end_code 0x%08lx\n" , info->end_code);
fprintf(logfile, "start_code 0x%08lx\n" , info->start_code); fprintf(logfile, "start_code 0x%08lx\n" , info->start_code);
@ -331,7 +347,6 @@ int main(int argc, char **argv)
syscall_init(); syscall_init();
signal_init(); signal_init();
env = cpu_x86_init();
global_env = env; global_env = env;
/* build Task State */ /* build Task State */

370
linux-user/mmap.c Normal file
View File

@ -0,0 +1,370 @@
/*
* mmap support for qemu
*
* Copyright (c) 2003 Fabrice Bellard
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include <sys/mman.h>
#include "qemu.h"
//#define DEBUG_MMAP
/* NOTE: all the constants are the HOST ones */
int target_mprotect(unsigned long start, unsigned long len, int prot)
{
unsigned long end, host_start, host_end, addr;
int prot1, ret;
#ifdef DEBUG_MMAP
printf("mprotect: start=0x%lx len=0x%lx prot=%c%c%c\n", start, len,
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-');
#endif
if ((start & ~TARGET_PAGE_MASK) != 0)
return -EINVAL;
len = TARGET_PAGE_ALIGN(len);
end = start + len;
if (end < start)
return -EINVAL;
if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
return -EINVAL;
if (len == 0)
return 0;
host_start = start & host_page_mask;
host_end = HOST_PAGE_ALIGN(end);
if (start > host_start) {
/* handle host page containing start */
prot1 = prot;
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect((void *)host_start, host_page_size, prot1 & PAGE_BITS);
if (ret != 0)
return ret;
host_start += host_page_size;
}
if (end < host_end) {
/* handle host page containing end (can be the same as first page) */
prot1 = prot;
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot1 |= page_get_flags(addr);
}
ret = mprotect((void *)(host_end - host_page_size), host_page_size,
prot1 & PAGE_BITS);
if (ret != 0)
return ret;
host_end -= host_page_size;
}
/* handle the pages in the middle */
if (host_start < host_end) {
ret = mprotect((void *)host_start, host_end - host_start, prot);
if (ret != 0)
return ret;
}
page_set_flags(start, start + len, prot | PAGE_VALID);
return 0;
}
/* map an incomplete host page */
int mmap_frag(unsigned long host_start,
unsigned long start, unsigned long end,
int prot, int flags, int fd, unsigned long offset)
{
unsigned long host_end, ret, addr;
int prot1, prot_new;
host_end = host_start + host_page_size;
/* get the protection of the target pages outside the mapping */
prot1 = 0;
for(addr = host_start; addr < host_end; addr++) {
if (addr < start || addr >= end)
prot1 |= page_get_flags(addr);
}
if (prot1 == 0) {
/* no page was there, so we allocate one */
ret = (long)mmap((void *)host_start, host_page_size, prot,
flags | MAP_ANONYMOUS, -1, 0);
if (ret == -1)
return ret;
}
prot1 &= PAGE_BITS;
prot_new = prot | prot1;
if (!(flags & MAP_ANONYMOUS)) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
if ((flags & MAP_TYPE) == MAP_SHARED &&
(prot & PROT_WRITE))
return -EINVAL;
/* adjust protection to be able to read */
if (!(prot1 & PROT_WRITE))
mprotect((void *)host_start, host_page_size, prot1 | PROT_WRITE);
/* read the corresponding file data */
pread(fd, (void *)start, end - start, offset);
/* put final protection */
if (prot_new != (prot1 | PROT_WRITE))
mprotect((void *)host_start, host_page_size, prot_new);
} else {
/* just update the protection */
if (prot_new != prot1) {
mprotect((void *)host_start, host_page_size, prot_new);
}
}
return 0;
}
/* NOTE: all the constants are the HOST ones */
long target_mmap(unsigned long start, unsigned long len, int prot,
int flags, int fd, unsigned long offset)
{
unsigned long ret, end, host_start, host_end, retaddr, host_offset, host_len;
#ifdef DEBUG_MMAP
{
printf("mmap: start=0x%lx len=0x%lx prot=%c%c%c flags=",
start, len,
prot & PROT_READ ? 'r' : '-',
prot & PROT_WRITE ? 'w' : '-',
prot & PROT_EXEC ? 'x' : '-');
if (flags & MAP_FIXED)
printf("MAP_FIXED ");
if (flags & MAP_ANONYMOUS)
printf("MAP_ANON ");
switch(flags & MAP_TYPE) {
case MAP_PRIVATE:
printf("MAP_PRIVATE ");
break;
case MAP_SHARED:
printf("MAP_SHARED ");
break;
default:
printf("[MAP_TYPE=0x%x] ", flags & MAP_TYPE);
break;
}
printf("fd=%d offset=%lx\n", fd, offset);
}
#endif
if (offset & ~TARGET_PAGE_MASK)
return -EINVAL;
len = TARGET_PAGE_ALIGN(len);
if (len == 0)
return start;
host_start = start & host_page_mask;
if (!(flags & MAP_FIXED)) {
if (host_page_size != real_host_page_size) {
/* NOTE: this code is only for debugging with '-p' option */
/* reserve a memory area */
host_len = HOST_PAGE_ALIGN(len) + host_page_size - TARGET_PAGE_SIZE;
host_start = (long)mmap((void *)host_start, host_len, PROT_NONE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
if (host_start == -1)
return host_start;
host_end = host_start + host_len;
start = HOST_PAGE_ALIGN(host_start);
end = start + HOST_PAGE_ALIGN(len);
if (start > host_start)
munmap((void *)host_start, start - host_start);
if (end < host_end)
munmap((void *)end, host_end - end);
/* use it as a fixed mapping */
flags |= MAP_FIXED;
} else {
/* if not fixed, no need to do anything */
host_offset = offset & host_page_mask;
host_len = len + offset - host_offset;
start = (long)mmap((void *)host_start, host_len,
prot, flags, fd, host_offset);
if (start == -1)
return start;
/* update start so that it points to the file position at 'offset' */
if (!(flags & MAP_ANONYMOUS))
start += offset - host_offset;
goto the_end1;
}
}
if (start & ~TARGET_PAGE_MASK)
return -EINVAL;
end = start + len;
host_end = HOST_PAGE_ALIGN(end);
/* worst case: we cannot map the file because the offset is not
aligned, so we read it */
if (!(flags & MAP_ANONYMOUS) &&
(offset & ~host_page_mask) != (start & ~host_page_mask)) {
/* msync() won't work here, so we return an error if write is
possible while it is a shared mapping */
if ((flags & MAP_TYPE) == MAP_SHARED &&
(prot & PROT_WRITE))
return -EINVAL;
retaddr = target_mmap(start, len, prot | PROT_WRITE,
MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
-1, 0);
if (retaddr == -1)
return retaddr;
pread(fd, (void *)start, len, offset);
if (!(prot & PROT_WRITE)) {
ret = target_mprotect(start, len, prot);
if (ret != 0)
return ret;
}
goto the_end;
}
/* handle the start of the mapping */
if (start > host_start) {
if (host_end == host_start + host_page_size) {
/* one single host page */
ret = mmap_frag(host_start, start, end,
prot, flags, fd, offset);
if (ret == -1)
return ret;
goto the_end1;
}
ret = mmap_frag(host_start, start, host_start + host_page_size,
prot, flags, fd, offset);
if (ret == -1)
return ret;
host_start += host_page_size;
}
/* handle the end of the mapping */
if (end < host_end) {
ret = mmap_frag(host_end - host_page_size,
host_end - host_page_size, host_end,
prot, flags, fd,
offset + host_end - host_page_size - start);
if (ret == -1)
return ret;
host_end -= host_page_size;
}
/* map the middle (easier) */
if (host_start < host_end) {
ret = (long)mmap((void *)host_start, host_end - host_start,
prot, flags, fd, offset + host_start - start);
if (ret == -1)
return ret;
}
the_end1:
page_set_flags(start, start + len, prot | PAGE_VALID);
the_end:
#ifdef DEBUG_MMAP
page_dump(stdout);
printf("\n");
#endif
return start;
}
int target_munmap(unsigned long start, unsigned long len)
{
unsigned long end, host_start, host_end, addr;
int prot, ret;
#ifdef DEBUG_MMAP
printf("munmap: start=0x%lx len=0x%lx\n", start, len);
#endif
if (start & ~TARGET_PAGE_MASK)
return -EINVAL;
len = TARGET_PAGE_ALIGN(len);
if (len == 0)
return -EINVAL;
end = start + len;
host_start = start & host_page_mask;
host_end = HOST_PAGE_ALIGN(end);
if (start > host_start) {
/* handle host page containing start */
prot = 0;
for(addr = host_start; addr < start; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (prot != 0)
host_start += host_page_size;
}
if (end < host_end) {
/* handle host page containing end (can be the same as first page) */
prot = 0;
for(addr = end; addr < host_end; addr += TARGET_PAGE_SIZE) {
prot |= page_get_flags(addr);
}
if (prot != 0)
host_end -= host_page_size;
}
/* unmap what we can */
if (host_start < host_end) {
ret = munmap((void *)host_start, host_end - host_start);
if (ret != 0)
return ret;
}
page_set_flags(start, start + len, 0);
return 0;
}
/* XXX: currently, we only handle MAP_ANONYMOUS and not MAP_FIXED
blocks which have been allocated starting on a host page */
long target_mremap(unsigned long old_addr, unsigned long old_size,
unsigned long new_size, unsigned long flags,
unsigned long new_addr)
{
int prot;
/* XXX: use 5 args syscall */
new_addr = (long)mremap((void *)old_addr, old_size, new_size, flags);
if (new_addr == -1)
return new_addr;
prot = page_get_flags(old_addr);
page_set_flags(old_addr, old_addr + old_size, 0);
page_set_flags(new_addr, new_addr + new_size, prot | PAGE_VALID);
return new_addr;
}
int target_msync(unsigned long start, unsigned long len, int flags)
{
unsigned long end;
if (start & ~TARGET_PAGE_MASK)
return -EINVAL;
len = TARGET_PAGE_ALIGN(len);
if (len == 0)
return 0;
end = start + len;
start &= host_page_mask;
return msync((void *)start, len, flags);
}

View File

@ -88,4 +88,14 @@ void handle_vm86_fault(CPUX86State *env);
int do_vm86(CPUX86State *env, long subfunction, int do_vm86(CPUX86State *env, long subfunction,
struct target_vm86plus_struct * target_v86); struct target_vm86plus_struct * target_v86);
/* mmap.c */
int target_mprotect(unsigned long start, unsigned long len, int prot);
long target_mmap(unsigned long start, unsigned long len, int prot,
int flags, int fd, unsigned long offset);
int target_munmap(unsigned long start, unsigned long len);
long target_mremap(unsigned long old_addr, unsigned long old_size,
unsigned long new_size, unsigned long flags,
unsigned long new_addr);
int target_msync(unsigned long start, unsigned long len, int flags);
#endif #endif

View File

@ -64,11 +64,6 @@
//#define DEBUG //#define DEBUG
#ifndef PAGE_SIZE
#define PAGE_SIZE 4096
#define PAGE_MASK ~(PAGE_SIZE - 1)
#endif
//#include <linux/msdos_fs.h> //#include <linux/msdos_fs.h>
#define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2]) #define VFAT_IOCTL_READDIR_BOTH _IOR('r', 1, struct dirent [2])
#define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2]) #define VFAT_IOCTL_READDIR_SHORT _IOR('r', 2, struct dirent [2])
@ -153,7 +148,7 @@ static long do_brk(char *new_brk)
if (new_brk < target_original_brk) if (new_brk < target_original_brk)
return -ENOMEM; return -ENOMEM;
brk_page = (char *)(((unsigned long)target_brk + PAGE_SIZE - 1) & PAGE_MASK); brk_page = (char *)HOST_PAGE_ALIGN((unsigned long)target_brk);
/* If the new brk is less than this, set it and we're done... */ /* If the new brk is less than this, set it and we're done... */
if (new_brk < brk_page) { if (new_brk < brk_page) {
@ -162,11 +157,10 @@ static long do_brk(char *new_brk)
} }
/* We need to allocate more memory after the brk... */ /* We need to allocate more memory after the brk... */
new_alloc_size = ((new_brk - brk_page + 1)+(PAGE_SIZE-1)) & PAGE_MASK; new_alloc_size = HOST_PAGE_ALIGN(new_brk - brk_page + 1);
mapped_addr = get_errno((long)mmap((caddr_t)brk_page, new_alloc_size, mapped_addr = get_errno(target_mmap((unsigned long)brk_page, new_alloc_size,
PROT_READ|PROT_WRITE, PROT_READ|PROT_WRITE,
MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0)); MAP_ANON|MAP_FIXED|MAP_PRIVATE, 0, 0));
if (is_error(mapped_addr)) { if (is_error(mapped_addr)) {
return mapped_addr; return mapped_addr;
} else { } else {
@ -1709,7 +1703,7 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
v4 = tswap32(vptr[3]); v4 = tswap32(vptr[3]);
v5 = tswap32(vptr[4]); v5 = tswap32(vptr[4]);
v6 = tswap32(vptr[5]); v6 = tswap32(vptr[5]);
ret = get_errno((long)mmap((void *)v1, v2, v3, v4, v5, v6)); ret = get_errno(target_mmap(v1, v2, v3, v4, v5, v6));
} }
break; break;
#endif #endif
@ -1718,16 +1712,16 @@ long do_syscall(void *cpu_env, int num, long arg1, long arg2, long arg3,
#else #else
case TARGET_NR_mmap: case TARGET_NR_mmap:
#endif #endif
ret = get_errno((long)mmap((void *)arg1, arg2, arg3, arg4, arg5, arg6)); ret = get_errno(target_mmap(arg1, arg2, arg3, arg4, arg5, arg6));
break; break;
case TARGET_NR_munmap: case TARGET_NR_munmap:
ret = get_errno(munmap((void *)arg1, arg2)); ret = get_errno(target_munmap(arg1, arg2));
break; break;
case TARGET_NR_mprotect: case TARGET_NR_mprotect:
ret = get_errno(mprotect((void *)arg1, arg2, arg3)); ret = get_errno(target_mprotect(arg1, arg2, arg3));
break; break;
case TARGET_NR_mremap: case TARGET_NR_mremap:
ret = get_errno((long)mremap((void *)arg1, arg2, arg3, arg4)); ret = get_errno(target_mremap(arg1, arg2, arg3, arg4, arg5));
break; break;
case TARGET_NR_msync: case TARGET_NR_msync:
ret = get_errno(msync((void *)arg1, arg2, arg3)); ret = get_errno(msync((void *)arg1, arg2, arg3));

View File

@ -3845,6 +3845,7 @@ CPUX86State *cpu_x86_init(void)
if (!inited) { if (!inited) {
inited = 1; inited = 1;
optimize_flags_init(); optimize_flags_init();
page_init();
} }
return env; return env;
} }