2 * Copyright (C) 2005 Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
3 * Licensed under the GPL
6 #include "linux/config.h"
7 #include "linux/kernel.h"
8 #include "linux/sched.h"
9 #include "linux/slab.h"
10 #include "linux/types.h"
11 #include "asm/uaccess.h"
12 #include "asm/ptrace.h"
13 #include "asm/segment.h"
16 #include "choose-mode.h"
18 #include "kern_util.h"
19 #include "mode_kern.h"
23 #ifdef CONFIG_MODE_SKAS
27 /* If needed we can detect when it's uninitialized. */
28 static int host_supports_tls = -1;
29 int host_gdt_entry_tls_min = -1;
31 #ifdef CONFIG_MODE_SKAS
32 int do_set_thread_area_skas(struct user_desc *info)
38 ret = os_set_thread_area(info, userspace_pid[cpu]);
43 int do_get_thread_area_skas(struct user_desc *info)
49 ret = os_get_thread_area(info, userspace_pid[cpu]);
56 * sys_get_thread_area: get a yet unused TLS descriptor index.
57 * XXX: Consider leaving one free slot for glibc usage at first place. This must
58 * be done here (and by changing GDT_ENTRY_TLS_* macros) and nowhere else.
60 * Also, this must be tested when compiling in SKAS mode with dinamic linking
61 * and running against NPTL.
63 static int get_free_idx(struct task_struct* task)
65 struct thread_struct *t = &task->thread;
68 if (!t->arch.tls_array)
69 return GDT_ENTRY_TLS_MIN;
71 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
72 if (!t->arch.tls_array[idx].present)
73 return idx + GDT_ENTRY_TLS_MIN;
77 static inline void clear_user_desc(struct user_desc* info)
79 /* Postcondition: LDT_empty(info) returns true. */
80 memset(info, 0, sizeof(*info));
82 /* Check the LDT_empty or the i386 sys_get_thread_area code - we obtain
83 * indeed an empty user_desc.
85 info->read_exec_only = 1;
86 info->seg_not_present = 1;
91 static int load_TLS(int flags, struct task_struct *to)
96 for (idx = GDT_ENTRY_TLS_MIN; idx < GDT_ENTRY_TLS_MAX; idx++) {
97 struct uml_tls_struct* curr = &to->thread.arch.tls_array[idx - GDT_ENTRY_TLS_MIN];
99 /* Actually, now if it wasn't flushed it gets cleared and
100 * flushed to the host, which will clear it.*/
101 if (!curr->present) {
102 if (!curr->flushed) {
103 clear_user_desc(&curr->tls);
104 curr->tls.entry_number = idx;
106 WARN_ON(!LDT_empty(&curr->tls));
111 if (!(flags & O_FORCE) && curr->flushed)
114 ret = do_set_thread_area(&curr->tls);
124 /* Verify if we need to do a flush for the new process, i.e. if there are any
125 * present desc's, only if they haven't been flushed.
127 static inline int needs_TLS_update(struct task_struct *task)
132 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
133 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
135 /* Can't test curr->present, we may need to clear a descriptor
136 * which had a value. */
145 /* On a newly forked process, the TLS descriptors haven't yet been flushed. So
146 * we mark them as such and the first switch_to will do the job.
148 void clear_flushed_tls(struct task_struct *task)
152 for (i = GDT_ENTRY_TLS_MIN; i < GDT_ENTRY_TLS_MAX; i++) {
153 struct uml_tls_struct* curr = &task->thread.arch.tls_array[i - GDT_ENTRY_TLS_MIN];
155 /* Still correct to do this, if it wasn't present on the host it
156 * will remain as flushed as it was. */
164 /* In SKAS0 mode, currently, multiple guest threads sharing the same ->mm have a
165 * common host process. So this is needed in SKAS0 too.
167 * However, if each thread had a different host process (and this was discussed
168 * for SMP support) this won't be needed.
170 * And this will not need be used when (and if) we'll add support to the host
173 int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
175 if (!host_supports_tls)
178 /* We have no need whatsoever to switch TLS for kernel threads; beyond
179 * that, that would also result in us calling os_set_thread_area with
180 * userspace_pid[cpu] == 0, which gives an error. */
182 return load_TLS(O_FORCE, to);
187 int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
189 if (!host_supports_tls)
192 if (needs_TLS_update(to))
193 return load_TLS(0, to);
198 static int set_tls_entry(struct task_struct* task, struct user_desc *info,
199 int idx, int flushed)
201 struct thread_struct *t = &task->thread;
203 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
206 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls = *info;
207 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present = 1;
208 t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed = flushed;
213 int arch_copy_tls(struct task_struct *new)
215 struct user_desc info;
216 int idx, ret = -EFAULT;
218 if (copy_from_user(&info,
219 (void __user *) UPT_ESI(&new->thread.regs.regs),
224 if (LDT_empty(&info))
227 idx = info.entry_number;
229 ret = set_tls_entry(new, &info, idx, 0);
234 /* XXX: use do_get_thread_area to read the host value? I'm not at all sure! */
235 static int get_tls_entry(struct task_struct* task, struct user_desc *info, int idx)
237 struct thread_struct *t = &task->thread;
239 if (!t->arch.tls_array)
242 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
245 if (!t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].present)
248 *info = t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].tls;
251 /* Temporary debugging check, to make sure that things have been
252 * flushed. This could be triggered if load_TLS() failed.
254 if (unlikely(task == current && !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
255 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
256 "without flushed TLS.", current->pid);
261 /* When the TLS entry has not been set, the values read to user in the
262 * tls_array are 0 (because it's cleared at boot, see
263 * arch/i386/kernel/head.S:cpu_gdt_table). Emulate that.
265 clear_user_desc(info);
266 info->entry_number = idx;
270 asmlinkage int sys_set_thread_area(struct user_desc __user *user_desc)
272 struct user_desc info;
275 if (!host_supports_tls)
278 if (copy_from_user(&info, user_desc, sizeof(info)))
281 idx = info.entry_number;
284 idx = get_free_idx(current);
287 info.entry_number = idx;
288 /* Tell the user which slot we chose for him.*/
289 if (put_user(idx, &user_desc->entry_number))
293 ret = CHOOSE_MODE_PROC(do_set_thread_area_tt, do_set_thread_area_skas, &info);
296 return set_tls_entry(current, &info, idx, 1);
300 * Perform set_thread_area on behalf of the traced child.
301 * Note: error handling is not done on the deferred load, and this differ from
302 * i386. However the only possible error are caused by bugs.
304 int ptrace_set_thread_area(struct task_struct *child, int idx,
305 struct user_desc __user *user_desc)
307 struct user_desc info;
309 if (!host_supports_tls)
312 if (copy_from_user(&info, user_desc, sizeof(info)))
315 return set_tls_entry(child, &info, idx, 0);
318 asmlinkage int sys_get_thread_area(struct user_desc __user *user_desc)
320 struct user_desc info;
323 if (!host_supports_tls)
326 if (get_user(idx, &user_desc->entry_number))
329 ret = get_tls_entry(current, &info, idx);
333 if (copy_to_user(user_desc, &info, sizeof(info)))
341 * Perform get_thread_area on behalf of the traced child.
343 int ptrace_get_thread_area(struct task_struct *child, int idx,
344 struct user_desc __user *user_desc)
346 struct user_desc info;
349 if (!host_supports_tls)
352 ret = get_tls_entry(child, &info, idx);
356 if (copy_to_user(user_desc, &info, sizeof(info)))
363 /* XXX: This part is probably common to i386 and x86-64. Don't create a common
364 * file for now, do that when implementing x86-64 support.*/
365 static int __init __setup_host_supports_tls(void) {
366 check_host_supports_tls(&host_supports_tls, &host_gdt_entry_tls_min);
367 if (host_supports_tls) {
368 printk(KERN_INFO "Host TLS support detected\n");
369 printk(KERN_INFO "Detected host type: ");
370 switch (host_gdt_entry_tls_min) {
371 case GDT_ENTRY_TLS_MIN_I386:
374 case GDT_ENTRY_TLS_MIN_X86_64:
379 printk(KERN_ERR " Host TLS support NOT detected! "
380 "TLS support inside UML will not work\n");
384 __initcall(__setup_host_supports_tls);