Merge branch 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mfashe...
[linux-2.6] / drivers / lguest / hypercalls.c
1 /*  Actual hypercalls, which allow guests to actually do something.
2     Copyright (C) 2006 Rusty Russell IBM Corporation
3
4     This program is free software; you can redistribute it and/or modify
5     it under the terms of the GNU General Public License as published by
6     the Free Software Foundation; either version 2 of the License, or
7     (at your option) any later version.
8
9     This program is distributed in the hope that it will be useful,
10     but WITHOUT ANY WARRANTY; without even the implied warranty of
11     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12     GNU General Public License for more details.
13
14     You should have received a copy of the GNU General Public License
15     along with this program; if not, write to the Free Software
16     Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301 USA
17 */
18 #include <linux/uaccess.h>
19 #include <linux/syscalls.h>
20 #include <linux/mm.h>
21 #include <asm/page.h>
22 #include <asm/pgtable.h>
23 #include <irq_vectors.h>
24 #include "lg.h"
25
26 static void do_hcall(struct lguest *lg, struct lguest_regs *regs)
27 {
28         switch (regs->eax) {
29         case LHCALL_FLUSH_ASYNC:
30                 break;
31         case LHCALL_LGUEST_INIT:
32                 kill_guest(lg, "already have lguest_data");
33                 break;
34         case LHCALL_CRASH: {
35                 char msg[128];
36                 lgread(lg, msg, regs->edx, sizeof(msg));
37                 msg[sizeof(msg)-1] = '\0';
38                 kill_guest(lg, "CRASH: %s", msg);
39                 break;
40         }
41         case LHCALL_FLUSH_TLB:
42                 if (regs->edx)
43                         guest_pagetable_clear_all(lg);
44                 else
45                         guest_pagetable_flush_user(lg);
46                 break;
47         case LHCALL_GET_WALLCLOCK: {
48                 struct timespec ts;
49                 ktime_get_real_ts(&ts);
50                 regs->eax = ts.tv_sec;
51                 break;
52         }
53         case LHCALL_BIND_DMA:
54                 regs->eax = bind_dma(lg, regs->edx, regs->ebx,
55                                      regs->ecx >> 8, regs->ecx & 0xFF);
56                 break;
57         case LHCALL_SEND_DMA:
58                 send_dma(lg, regs->edx, regs->ebx);
59                 break;
60         case LHCALL_LOAD_GDT:
61                 load_guest_gdt(lg, regs->edx, regs->ebx);
62                 break;
63         case LHCALL_LOAD_IDT_ENTRY:
64                 load_guest_idt_entry(lg, regs->edx, regs->ebx, regs->ecx);
65                 break;
66         case LHCALL_NEW_PGTABLE:
67                 guest_new_pagetable(lg, regs->edx);
68                 break;
69         case LHCALL_SET_STACK:
70                 guest_set_stack(lg, regs->edx, regs->ebx, regs->ecx);
71                 break;
72         case LHCALL_SET_PTE:
73                 guest_set_pte(lg, regs->edx, regs->ebx, mkgpte(regs->ecx));
74                 break;
75         case LHCALL_SET_PMD:
76                 guest_set_pmd(lg, regs->edx, regs->ebx);
77                 break;
78         case LHCALL_LOAD_TLS:
79                 guest_load_tls(lg, regs->edx);
80                 break;
81         case LHCALL_SET_CLOCKEVENT:
82                 guest_set_clockevent(lg, regs->edx);
83                 break;
84         case LHCALL_TS:
85                 lg->ts = regs->edx;
86                 break;
87         case LHCALL_HALT:
88                 lg->halted = 1;
89                 break;
90         default:
91                 kill_guest(lg, "Bad hypercall %li\n", regs->eax);
92         }
93 }
94
95 /* We always do queued calls before actual hypercall. */
96 static void do_async_hcalls(struct lguest *lg)
97 {
98         unsigned int i;
99         u8 st[LHCALL_RING_SIZE];
100
101         if (copy_from_user(&st, &lg->lguest_data->hcall_status, sizeof(st)))
102                 return;
103
104         for (i = 0; i < ARRAY_SIZE(st); i++) {
105                 struct lguest_regs regs;
106                 unsigned int n = lg->next_hcall;
107
108                 if (st[n] == 0xFF)
109                         break;
110
111                 if (++lg->next_hcall == LHCALL_RING_SIZE)
112                         lg->next_hcall = 0;
113
114                 if (get_user(regs.eax, &lg->lguest_data->hcalls[n].eax)
115                     || get_user(regs.edx, &lg->lguest_data->hcalls[n].edx)
116                     || get_user(regs.ecx, &lg->lguest_data->hcalls[n].ecx)
117                     || get_user(regs.ebx, &lg->lguest_data->hcalls[n].ebx)) {
118                         kill_guest(lg, "Fetching async hypercalls");
119                         break;
120                 }
121
122                 do_hcall(lg, &regs);
123                 if (put_user(0xFF, &lg->lguest_data->hcall_status[n])) {
124                         kill_guest(lg, "Writing result for async hypercall");
125                         break;
126                 }
127
128                 if (lg->dma_is_pending)
129                         break;
130         }
131 }
132
133 static void initialize(struct lguest *lg)
134 {
135         u32 tsc_speed;
136
137         if (lg->regs->eax != LHCALL_LGUEST_INIT) {
138                 kill_guest(lg, "hypercall %li before LGUEST_INIT",
139                            lg->regs->eax);
140                 return;
141         }
142
143         /* We only tell the guest to use the TSC if it's reliable. */
144         if (boot_cpu_has(X86_FEATURE_CONSTANT_TSC) && !check_tsc_unstable())
145                 tsc_speed = tsc_khz;
146         else
147                 tsc_speed = 0;
148
149         lg->lguest_data = (struct lguest_data __user *)lg->regs->edx;
150         /* We check here so we can simply copy_to_user/from_user */
151         if (!lguest_address_ok(lg, lg->regs->edx, sizeof(*lg->lguest_data))) {
152                 kill_guest(lg, "bad guest page %p", lg->lguest_data);
153                 return;
154         }
155         if (get_user(lg->noirq_start, &lg->lguest_data->noirq_start)
156             || get_user(lg->noirq_end, &lg->lguest_data->noirq_end)
157             /* We reserve the top pgd entry. */
158             || put_user(4U*1024*1024, &lg->lguest_data->reserve_mem)
159             || put_user(tsc_speed, &lg->lguest_data->tsc_khz)
160             || put_user(lg->guestid, &lg->lguest_data->guestid))
161                 kill_guest(lg, "bad guest page %p", lg->lguest_data);
162
163         /* This is the one case where the above accesses might have
164          * been the first write to a Guest page.  This may have caused
165          * a copy-on-write fault, but the Guest might be referring to
166          * the old (read-only) page. */
167         guest_pagetable_clear_all(lg);
168 }
169
170 /* Even if we go out to userspace and come back, we don't want to do
171  * the hypercall again. */
172 static void clear_hcall(struct lguest *lg)
173 {
174         lg->regs->trapnum = 255;
175 }
176
177 void do_hypercalls(struct lguest *lg)
178 {
179         if (unlikely(!lg->lguest_data)) {
180                 if (lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
181                         initialize(lg);
182                         clear_hcall(lg);
183                 }
184                 return;
185         }
186
187         do_async_hcalls(lg);
188         if (!lg->dma_is_pending && lg->regs->trapnum == LGUEST_TRAP_ENTRY) {
189                 do_hcall(lg, lg->regs);
190                 clear_hcall(lg);
191         }
192 }