1 /*P:500 Just as userspace programs request kernel operations through a system
2 * call, the Guest requests Host operations through a "hypercall". You might
3 * notice this nomenclature doesn't really follow any logic, but the name has
4 * been around for long enough that we're stuck with it. As you'd expect, this
5 * code is basically a one big switch statement. :*/
7 /* Copyright (C) 2006 Rusty Russell IBM Corporation
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <linux/uaccess.h>
24 #include <linux/syscalls.h>
26 #include <linux/ktime.h>
28 #include <asm/pgtable.h>
31 /*H:120 This is the core hypercall routine: where the Guest gets what it wants.
32 * Or gets killed. Or, in the case of LHCALL_SHUTDOWN, both. */
33 static void do_hcall(struct lg_cpu *cpu, struct hcall_args *args)
36 case LHCALL_FLUSH_ASYNC:
37 /* This call does nothing, except by breaking out of the Guest
38 * it makes us process all the asynchronous hypercalls. */
40 case LHCALL_SEND_INTERRUPTS:
41 /* This call does nothing too, but by breaking out of the Guest
42 * it makes us process any pending interrupts. */
44 case LHCALL_LGUEST_INIT:
45 /* You can't get here unless you're already initialized. Don't
47 kill_guest(cpu, "already have lguest_data");
49 case LHCALL_SHUTDOWN: {
50 /* Shutdown is such a trivial hypercall that we do it in four
51 * lines right here. */
53 /* If the lgread fails, it will call kill_guest() itself; the
54 * kill_guest() with the message will be ignored. */
55 __lgread(cpu, msg, args->arg1, sizeof(msg));
56 msg[sizeof(msg)-1] = '\0';
57 kill_guest(cpu, "CRASH: %s", msg);
58 if (args->arg2 == LGUEST_SHUTDOWN_RESTART)
59 cpu->lg->dead = ERR_PTR(-ERESTART);
62 case LHCALL_FLUSH_TLB:
63 /* FLUSH_TLB comes in two flavors, depending on the
66 guest_pagetable_clear_all(cpu);
68 guest_pagetable_flush_user(cpu);
71 /* All these calls simply pass the arguments through to the right
73 case LHCALL_NEW_PGTABLE:
74 guest_new_pagetable(cpu, args->arg1);
76 case LHCALL_SET_STACK:
77 guest_set_stack(cpu, args->arg1, args->arg2, args->arg3);
81 guest_set_pte(cpu, args->arg1, args->arg2,
82 __pte(args->arg3 | (u64)args->arg4 << 32));
84 guest_set_pte(cpu, args->arg1, args->arg2, __pte(args->arg3));
88 guest_set_pgd(cpu->lg, args->arg1, args->arg2);
92 guest_set_pmd(cpu->lg, args->arg1, args->arg2);
95 case LHCALL_SET_CLOCKEVENT:
96 guest_set_clockevent(cpu, args->arg1);
99 /* This sets the TS flag, as we saw used in run_guest(). */
100 cpu->ts = args->arg1;
103 /* Similarly, this sets the halted flag for run_guest(). */
107 cpu->pending_notify = args->arg1;
110 /* It should be an architecture-specific hypercall. */
111 if (lguest_arch_do_hcall(cpu, args))
112 kill_guest(cpu, "Bad hypercall %li\n", args->arg0);
117 /*H:124 Asynchronous hypercalls are easy: we just look in the array in the
118 * Guest's "struct lguest_data" to see if any new ones are marked "ready".
120 * We are careful to do these in order: obviously we respect the order the
121 * Guest put them in the ring, but we also promise the Guest that they will
122 * happen before any normal hypercall (which is why we check this before
123 * checking for a normal hcall). */
124 static void do_async_hcalls(struct lg_cpu *cpu)
127 u8 st[LHCALL_RING_SIZE];
129 /* For simplicity, we copy the entire call status array in at once. */
130 if (copy_from_user(&st, &cpu->lg->lguest_data->hcall_status, sizeof(st)))
133 /* We process "struct lguest_data"s hcalls[] ring once. */
134 for (i = 0; i < ARRAY_SIZE(st); i++) {
135 struct hcall_args args;
136 /* We remember where we were up to from last time. This makes
137 * sure that the hypercalls are done in the order the Guest
138 * places them in the ring. */
139 unsigned int n = cpu->next_hcall;
141 /* 0xFF means there's no call here (yet). */
145 /* OK, we have hypercall. Increment the "next_hcall" cursor,
146 * and wrap back to 0 if we reach the end. */
147 if (++cpu->next_hcall == LHCALL_RING_SIZE)
150 /* Copy the hypercall arguments into a local copy of
151 * the hcall_args struct. */
152 if (copy_from_user(&args, &cpu->lg->lguest_data->hcalls[n],
153 sizeof(struct hcall_args))) {
154 kill_guest(cpu, "Fetching async hypercalls");
158 /* Do the hypercall, same as a normal one. */
159 do_hcall(cpu, &args);
161 /* Mark the hypercall done. */
162 if (put_user(0xFF, &cpu->lg->lguest_data->hcall_status[n])) {
163 kill_guest(cpu, "Writing result for async hypercall");
167 /* Stop doing hypercalls if they want to notify the Launcher:
168 * it needs to service this first. */
169 if (cpu->pending_notify)
174 /* Last of all, we look at what happens first of all. The very first time the
175 * Guest makes a hypercall, we end up here to set things up: */
176 static void initialize(struct lg_cpu *cpu)
178 /* You can't do anything until you're initialized. The Guest knows the
179 * rules, so we're unforgiving here. */
180 if (cpu->hcall->arg0 != LHCALL_LGUEST_INIT) {
181 kill_guest(cpu, "hypercall %li before INIT", cpu->hcall->arg0);
185 if (lguest_arch_init_hypercalls(cpu))
186 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
188 /* The Guest tells us where we're not to deliver interrupts by putting
189 * the range of addresses into "struct lguest_data". */
190 if (get_user(cpu->lg->noirq_start, &cpu->lg->lguest_data->noirq_start)
191 || get_user(cpu->lg->noirq_end, &cpu->lg->lguest_data->noirq_end))
192 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data);
194 /* We write the current time into the Guest's data page once so it can
196 write_timestamp(cpu);
198 /* page_tables.c will also do some setup. */
199 page_table_guest_data_init(cpu);
201 /* This is the one case where the above accesses might have been the
202 * first write to a Guest page. This may have caused a copy-on-write
203 * fault, but the old page might be (read-only) in the Guest
205 guest_pagetable_clear_all(cpu);
209 /*M:013 If a Guest reads from a page (so creates a mapping) that it has never
210 * written to, and then the Launcher writes to it (ie. the output of a virtual
211 * device), the Guest will still see the old page. In practice, this never
212 * happens: why would the Guest read a page which it has never written to? But
213 * a similar scenario might one day bite us, so it's worth mentioning. :*/
218 * Remember from the Guest, hypercalls come in two flavors: normal and
219 * asynchronous. This file handles both of types.
221 void do_hypercalls(struct lg_cpu *cpu)
223 /* Not initialized yet? This hypercall must do it. */
224 if (unlikely(!cpu->lg->lguest_data)) {
225 /* Set up the "struct lguest_data" */
232 /* The Guest has initialized.
234 * Look in the hypercall ring for the async hypercalls: */
235 do_async_hcalls(cpu);
237 /* If we stopped reading the hypercall ring because the Guest did a
238 * NOTIFY to the Launcher, we want to return now. Otherwise we do
240 if (!cpu->pending_notify) {
241 do_hcall(cpu, cpu->hcall);
242 /* Tricky point: we reset the hcall pointer to mark the
243 * hypercall as "done". We use the hcall pointer rather than
244 * the trap number to indicate a hypercall is pending.
245 * Normally it doesn't matter: the Guest will run again and
246 * update the trap number before we come back here.
248 * However, if we are signalled or the Guest sends I/O to the
249 * Launcher, the run_guest() loop will exit without running the
250 * Guest. When it comes back it would try to re-run the
251 * hypercall. Finding that bug sucked. */
256 /* This routine supplies the Guest with time: it's used for wallclock time at
257 * initial boot and as a rough time source if the TSC isn't available. */
258 void write_timestamp(struct lg_cpu *cpu)
261 ktime_get_real_ts(&now);
262 if (copy_to_user(&cpu->lg->lguest_data->time,
263 &now, sizeof(struct timespec)))
264 kill_guest(cpu, "Writing timestamp");