2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
6 #include "linux/kernel.h"
7 #include "linux/sched.h"
8 #include "linux/slab.h"
9 #include "linux/types.h"
10 #include "asm/uaccess.h"
11 #include "asm/ptrace.h"
12 #include "asm/segment.h"
15 #include "choose-mode.h"
17 #include "kern_util.h"
18 #include "mode_kern.h"
22 #ifdef CONFIG_MODE_SKAS
27 * If needed we can detect when it's uninitialized.
29 * These are initialized in an initcall and unchanged thereafter.
31 static int host_supports_tls = -1;
32 int host_gdt_entry_tls_min;
34 #ifdef CONFIG_MODE_SKAS
35 int do_set_thread_area_skas(struct user_desc *info)
41 ret = os_set_thread_area(info, userspace_pid[cpu]);
46 int do_get_thread_area_skas(struct user_desc *info)
52 ret = os_get_thread_area(info, userspace_pid[cpu]);
59 * sys_get_thread_area: get a yet unused TLS descriptor index.
60 * XXX: Consider leaving one free slot for glibc usage at first place. This must
61 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
63 * Also, this must be tested when compiling in SKAS mode with dinamic linking
64 * and running against NPTL.
66 static int get_free_idx(struct task_struct* task)
68 struct thread_struct *t = &task->thread;
71 if (!t->arch.tls_array)
72 return GDT_ENTRY_TLS_MIN;
74 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
75 if (!t->arch.tls_array[idx].present)
76 return idx + GDT_ENTRY_TLS_MIN;
80 static inline void clear_user_desc(struct user_desc* info)
82 /* Postcondition: LDT_empty(info) returns true. */
83 memset(info, 0, sizeof(*info));
85 /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
86 * indeed an empty user_desc.
88 info->read_exec_only = 1;
89 info->seg_not_present = 1;
94 static int load_TLS(int flags, struct task_struct *to)
99 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
100 struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
102 /* Actually, now if it wasn't flushed it gets cleared and
103 * flushed to the host, which will clear it.*/
104 if (!curr->present) {
105 if (!curr->flushed) {
106 clear_user_desc(&curr->tls);
107 curr->tls.entry_number = idx;
109 WARN_ON(!LDT_empty(&curr->tls));
114 if (!(flags & O_FORCE) && curr->flushed)
117 ret = do_set_thread_area(&curr->tls);
127 /* Verify if we need to do a flush for the new process, i.e. if there are any
128 * present desc's, only if they haven't been flushed.
130 static inline int needs_TLS_update(struct task_struct *task)
135 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
136 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
138 /* Can't test curr->present, we may need to clear a descriptor
139 * which had a value. */
148 /* On a newly forked process, the TLS descriptors haven't yet been flushed. So
149 * we mark them as such and the first switch_to will do the job.
151 void clear_flushed_tls(struct task_struct *task)
155 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
156 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
158 /* Still correct to do this, if it wasn't present on the host it
159 * will remain as flushed as it was. */
167 /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
168 * common host process. So this is needed in SKAS0 too.
170 * However, if each thread had a different host process (and this was discussed
171 * for SMP support) this won't be needed.
173 * And this will not need be used when (and if) we'll add support to the host
176 int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
178 if (!host_supports_tls)
181 /* We have no need whatsoever to switch TLS for kernel threads; beyond
182 * that, that would also result in us calling os_set_thread_area with
183 * userspace_pid[cpu] == 0, which gives an error. */
185 return load_TLS(O_FORCE, to);
190 int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
192 if (!host_supports_tls)
195 if (needs_TLS_update(to))
196 return load_TLS(0, to);
201 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
202 int idx, int flushed)
204 struct thread_struct *t = &task->thread;
206 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
209 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
210 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
211 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
216 int arch_copy_tls(struct task_struct *new)
218 struct user_desc info;
219 int idx, ret = -EFAULT;
221 if (copy_from_user(&info,
222 (void __user *) UPT_ESI(&new->thread.regs.regs),
227 if (LDT_empty(&info))
230 idx = info.entry_number;
232 ret = set_tls_entry(new, &info, idx, 0);
237 /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
238 static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
240 struct thread_struct *t = &task->thread;
242 if (!t->arch.tls_array)
245 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
248 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
251 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
254 /* Temporary debugging check, to make sure that things have been
255 * flushed. This could be triggered if load_TLS() failed.
257 if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
258 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
259 "without flushed TLS.", current->pid);
264 /* When the TLS entry has not been set, the values read to user in the
265 * tls_array are 0 (because it's cleared at boot, see
266 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
268 clear_user_desc(info);
269 info->entry_number = idx;
273 asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
275 struct user_desc info;
278 if (!host_supports_tls)
281 if (copy_from_user(&info, user_desc, sizeof(info)))
284 idx = info.entry_number;
287 idx = get_free_idx(current);
290 info.entry_number = idx;
291 /* Tell the user which slot we chose for him.*/
292 if (put_user(idx, &user_desc->entry_number))
296 ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
299 return set_tls_entry(current, &info, idx, 1);
303 * Perform set_thread_area on behalf of the traced child.
304 * Note: error handling is not done on the deferred load, and this differ from
305 * i386. However the only possible error are caused by bugs.
307 int ptrace_set_thread_area(struct task_struct *child, int idx,
308 struct user_desc __user *user_desc)
310 struct user_desc info;
312 if (!host_supports_tls)
315 if (copy_from_user(&info, user_desc, sizeof(info)))
318 return set_tls_entry(child, &info, idx, 0);
321 asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
323 struct user_desc info;
326 if (!host_supports_tls)
329 if (get_user(idx, &user_desc->entry_number))
332 ret = get_tls_entry(current, &info, idx);
336 if (copy_to_user(user_desc, &info, sizeof(info)))
344 * Perform get_thread_area on behalf of the traced child.
346 int ptrace_get_thread_area(struct task_struct *child, int idx,
347 struct user_desc __user *user_desc)
349 struct user_desc info;
352 if (!host_supports_tls)
355 ret = get_tls_entry(child, &info, idx);
359 if (copy_to_user(user_desc, &info, sizeof(info)))
366 /* XXX: This part is probably common to i386 and x86-64. Don't create a common
367 * file for now, do that when implementing x86-64 support.*/
368 static int __init __setup_host_supports_tls(void)
370 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
371 if (host_supports_tls) {
372 printk(KERN_INFO "Host TLS support detected\n");
373 printk(KERN_INFO "Detected host type: ");
374 switch (host_gdt_entry_tls_min) {
375 case GDT_ENTRY_TLS_MIN_I386:
378 case GDT_ENTRY_TLS_MIN_X86_64:
383 printk(KERN_ERR " Host TLS support NOT detected! "
384 "TLS support inside UML will not work\n");
388 __initcall(__setup_host_supports_tls);