lguest: per-cpu run guest

This patch makes the run_guest() routine use the lg_cpu struct.
This is required since in a smp guest environment, there's no
more the notion of "running the guest", but rather, it is "running the vcpu"

Signed-off-by: Glauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
This commit is contained in:
Glauber de Oliveira Costa 2008-01-07 11:05:25 -02:00 committed by Rusty Russell
parent 4dcc53da49
commit d0953d42c3
4 changed files with 25 additions and 11 deletions

View File

@ -174,8 +174,10 @@ void __lgwrite(struct lguest *lg, unsigned long addr, const void *b,
/*H:030 Let's jump straight to the the main loop which runs the Guest.
* Remember, this is called by the Launcher reading /dev/lguest, and we keep
* going around and around until something interesting happens. */
int run_guest(struct lguest *lg, unsigned long __user *user)
int run_guest(struct lg_cpu *cpu, unsigned long __user *user)
{
struct lguest *lg = cpu->lg;
/* We stop running once the Guest is dead. */
while (!lg->dead) {
/* First we run any hypercalls the Guest wants done. */
@ -226,7 +228,7 @@ int run_guest(struct lguest *lg, unsigned long __user *user)
local_irq_disable();
/* Actually run the Guest until something happens. */
lguest_arch_run_guest(lg);
lguest_arch_run_guest(cpu);
/* Now we're ready to be interrupted or moved to other CPUs */
local_irq_enable();

View File

@ -126,7 +126,7 @@ void __lgwrite(struct lguest *, unsigned long, const void *, unsigned);
} while(0)
/* (end of memory access helper routines) :*/
int run_guest(struct lguest *lg, unsigned long __user *user);
int run_guest(struct lg_cpu *cpu, unsigned long __user *user);
/* Helper macros to obtain the first 12 or the last 20 bits, this is only the
* first step in the migration to the kernel types. pte_pfn is already defined
@ -177,7 +177,7 @@ void page_table_guest_data_init(struct lguest *lg);
/* <arch>/core.c: */
void lguest_arch_host_init(void);
void lguest_arch_host_fini(void);
void lguest_arch_run_guest(struct lguest *lg);
void lguest_arch_run_guest(struct lg_cpu *cpu);
void lguest_arch_handle_trap(struct lguest *lg);
int lguest_arch_init_hypercalls(struct lguest *lg);
int lguest_arch_do_hcall(struct lguest *lg, struct hcall_args *args);

View File

@ -55,11 +55,19 @@ static int user_send_irq(struct lguest *lg, const unsigned long __user *input)
static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
{
struct lguest *lg = file->private_data;
struct lg_cpu *cpu;
unsigned int cpu_id = *o;
/* You must write LHREQ_INITIALIZE first! */
if (!lg)
return -EINVAL;
/* Watch out for arbitrary vcpu indexes! */
if (cpu_id >= lg->nr_cpus)
return -EINVAL;
cpu = &lg->cpus[cpu_id];
/* If you're not the task which owns the Guest, go away. */
if (current != lg->tsk)
return -EPERM;
@ -85,7 +93,7 @@ static ssize_t read(struct file *file, char __user *user, size_t size,loff_t*o)
lg->pending_notify = 0;
/* Run the Guest until something interesting happens. */
return run_guest(lg, (unsigned long __user *)user);
return run_guest(cpu, (unsigned long __user *)user);
}
static int lg_cpu_start(struct lg_cpu *cpu, unsigned id, unsigned long start_ip)
@ -147,7 +155,7 @@ static int initialize(struct file *file, const unsigned long __user *input)
lg->pfn_limit = args[1];
/* This is the first cpu */
err = cpu_start(&lg->cpus[0], 0, args[3]);
err = lg_cpu_start(&lg->cpus[0], 0, args[3]);
if (err)
goto release_guest;

View File

@ -73,8 +73,9 @@ static DEFINE_PER_CPU(struct lguest *, last_guest);
* since it last ran. We saw this set in interrupts_and_traps.c and
* segments.c.
*/
static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
{
struct lguest *lg = cpu->lg;
/* Copying all this data can be quite expensive. We usually run the
* same Guest we ran last time (and that Guest hasn't run anywhere else
* meanwhile). If that's not the case, we pretend everything in the
@ -113,14 +114,15 @@ static void copy_in_guest_info(struct lguest *lg, struct lguest_pages *pages)
}
/* Finally: the code to actually call into the Switcher to run the Guest. */
static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
static void run_guest_once(struct lg_cpu *cpu, struct lguest_pages *pages)
{
/* This is a dummy value we need for GCC's sake. */
unsigned int clobber;
struct lguest *lg = cpu->lg;
/* Copy the guest-specific information into this CPU's "struct
* lguest_pages". */
copy_in_guest_info(lg, pages);
copy_in_guest_info(cpu, pages);
/* Set the trap number to 256 (impossible value). If we fault while
* switching to the Guest (bad segment registers or bug), this will
@ -161,8 +163,10 @@ static void run_guest_once(struct lguest *lg, struct lguest_pages *pages)
/*H:040 This is the i386-specific code to setup and run the Guest. Interrupts
* are disabled: we own the CPU. */
void lguest_arch_run_guest(struct lguest *lg)
void lguest_arch_run_guest(struct lg_cpu *cpu)
{
struct lguest *lg = cpu->lg;
/* Remember the awfully-named TS bit? If the Guest has asked to set it
* we set it now, so we can trap and pass that trap to the Guest if it
* uses the FPU. */
@ -180,7 +184,7 @@ void lguest_arch_run_guest(struct lguest *lg)
/* Now we actually run the Guest. It will return when something
* interesting happens, and we can examine its registers to see what it
* was doing. */
run_guest_once(lg, lguest_pages(raw_smp_processor_id()));
run_guest_once(cpu, lguest_pages(raw_smp_processor_id()));
/* Note that the "regs" pointer contains two extra entries which are
* not really registers: a trap number which says what interrupt or