Commit | Line | Data |
---|---|---|
1394f032 BW |
1 | /* |
2 | * File: arch/blackfin/kernel/process.c | |
3 | * Based on: | |
4 | * Author: | |
5 | * | |
6 | * Created: | |
7 | * Description: Blackfin architecture-dependent process handling. | |
8 | * | |
9 | * Modified: | |
10 | * Copyright 2004-2006 Analog Devices Inc. | |
11 | * | |
12 | * Bugs: Enter bugs at http://blackfin.uclinux.org/ | |
13 | * | |
14 | * This program is free software; you can redistribute it and/or modify | |
15 | * it under the terms of the GNU General Public License as published by | |
16 | * the Free Software Foundation; either version 2 of the License, or | |
17 | * (at your option) any later version. | |
18 | * | |
19 | * This program is distributed in the hope that it will be useful, | |
20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
22 | * GNU General Public License for more details. | |
23 | * | |
24 | * You should have received a copy of the GNU General Public License | |
25 | * along with this program; if not, see the file COPYING, or write | |
26 | * to the Free Software Foundation, Inc., | |
27 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | |
28 | */ | |
29 | ||
30 | #include <linux/module.h> | |
31 | #include <linux/smp_lock.h> | |
32 | #include <linux/unistd.h> | |
33 | #include <linux/user.h> | |
1f83b8f1 | 34 | #include <linux/uaccess.h> |
8b5f79f9 VM |
35 | #include <linux/sched.h> |
36 | #include <linux/tick.h> | |
d31c5ab1 BW |
37 | #include <linux/fs.h> |
38 | #include <linux/err.h> | |
1394f032 BW |
39 | |
40 | #include <asm/blackfin.h> | |
7adfb58f | 41 | #include <asm/fixed_code.h> |
dbc895f9 | 42 | #include <asm/mem_map.h> |
1394f032 | 43 | |
1394f032 BW |
44 | asmlinkage void ret_from_fork(void); |
45 | ||
46 | /* Points to the SDRAM backup memory for the stack that is currently in | |
47 | * L1 scratchpad memory. | |
48 | */ | |
49 | void *current_l1_stack_save; | |
50 | ||
51 | /* The number of tasks currently using a L1 stack area. The SRAM is | |
52 | * allocated/deallocated whenever this changes from/to zero. | |
53 | */ | |
54 | int nr_l1stack_tasks; | |
55 | ||
56 | /* Start and length of the area in L1 scratchpad memory which we've allocated | |
57 | * for process stacks. | |
58 | */ | |
59 | void *l1_stack_base; | |
60 | unsigned long l1_stack_len; | |
61 | ||
62 | /* | |
63 | * Powermanagement idle function, if any.. | |
64 | */ | |
65 | void (*pm_idle)(void) = NULL; | |
66 | EXPORT_SYMBOL(pm_idle); | |
67 | ||
68 | void (*pm_power_off)(void) = NULL; | |
69 | EXPORT_SYMBOL(pm_power_off); | |
70 | ||
1394f032 BW |
71 | /* |
72 | * The idle loop on BFIN | |
73 | */ | |
74 | #ifdef CONFIG_IDLE_L1 | |
8b5f79f9 | 75 | static void default_idle(void)__attribute__((l1_text)); |
1394f032 BW |
76 | void cpu_idle(void)__attribute__((l1_text)); |
77 | #endif | |
78 | ||
8b5f79f9 VM |
79 | /* |
80 | * This is our default idle handler. We need to disable | |
81 | * interrupts here to ensure we don't miss a wakeup call. | |
82 | */ | |
83 | static void default_idle(void) | |
1394f032 | 84 | { |
6a01f230 YL |
85 | #ifdef CONFIG_IPIPE |
86 | ipipe_suspend_domain(); | |
87 | #endif | |
88 | local_irq_disable_hw(); | |
8b5f79f9 VM |
89 | if (!need_resched()) |
90 | idle_with_irq_disabled(); | |
1394f032 | 91 | |
6a01f230 | 92 | local_irq_enable_hw(); |
8b5f79f9 | 93 | } |
1394f032 BW |
94 | |
95 | /* | |
8b5f79f9 VM |
96 | * The idle thread. We try to conserve power, while trying to keep |
97 | * overall latency low. The architecture specific idle is passed | |
98 | * a value to indicate the level of "idleness" of the system. | |
1394f032 BW |
99 | */ |
100 | void cpu_idle(void) | |
101 | { | |
102 | /* endless idle loop with no priority at all */ | |
103 | while (1) { | |
8b5f79f9 VM |
104 | void (*idle)(void) = pm_idle; |
105 | ||
106 | #ifdef CONFIG_HOTPLUG_CPU | |
107 | if (cpu_is_offline(smp_processor_id())) | |
108 | cpu_die(); | |
109 | #endif | |
110 | if (!idle) | |
111 | idle = default_idle; | |
b8f8c3cf | 112 | tick_nohz_stop_sched_tick(1); |
8b5f79f9 VM |
113 | while (!need_resched()) |
114 | idle(); | |
115 | tick_nohz_restart_sched_tick(); | |
1394f032 BW |
116 | preempt_enable_no_resched(); |
117 | schedule(); | |
118 | preempt_disable(); | |
119 | } | |
120 | } | |
121 | ||
1394f032 BW |
122 | /* Fill in the fpu structure for a core dump. */ |
123 | ||
124 | int dump_fpu(struct pt_regs *regs, elf_fpregset_t * fpregs) | |
125 | { | |
126 | return 1; | |
127 | } | |
128 | ||
129 | /* | |
130 | * This gets run with P1 containing the | |
131 | * function to call, and R1 containing | |
132 | * the "args". Note P0 is clobbered on the way here. | |
133 | */ | |
134 | void kernel_thread_helper(void); | |
135 | __asm__(".section .text\n" | |
136 | ".align 4\n" | |
137 | "_kernel_thread_helper:\n\t" | |
138 | "\tsp += -12;\n\t" | |
139 | "\tr0 = r1;\n\t" "\tcall (p1);\n\t" "\tcall _do_exit;\n" ".previous"); | |
140 | ||
141 | /* | |
142 | * Create a kernel thread. | |
143 | */ | |
144 | pid_t kernel_thread(int (*fn) (void *), void *arg, unsigned long flags) | |
145 | { | |
146 | struct pt_regs regs; | |
147 | ||
148 | memset(®s, 0, sizeof(regs)); | |
149 | ||
150 | regs.r1 = (unsigned long)arg; | |
151 | regs.p1 = (unsigned long)fn; | |
152 | regs.pc = (unsigned long)kernel_thread_helper; | |
153 | regs.orig_p0 = -1; | |
154 | /* Set bit 2 to tell ret_from_fork we should be returning to kernel | |
155 | mode. */ | |
156 | regs.ipend = 0x8002; | |
157 | __asm__ __volatile__("%0 = syscfg;":"=da"(regs.syscfg):); | |
158 | return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, | |
159 | NULL); | |
160 | } | |
fe8015ce | 161 | EXPORT_SYMBOL(kernel_thread); |
1394f032 BW |
162 | |
163 | void flush_thread(void) | |
164 | { | |
165 | } | |
166 | ||
167 | asmlinkage int bfin_vfork(struct pt_regs *regs) | |
168 | { | |
169 | return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0, NULL, | |
170 | NULL); | |
171 | } | |
172 | ||
173 | asmlinkage int bfin_clone(struct pt_regs *regs) | |
174 | { | |
175 | unsigned long clone_flags; | |
176 | unsigned long newsp; | |
177 | ||
8f65873e GY |
178 | #ifdef __ARCH_SYNC_CORE_DCACHE |
179 | if (current->rt.nr_cpus_allowed == num_possible_cpus()) { | |
180 | current->cpus_allowed = cpumask_of_cpu(smp_processor_id()); | |
181 | current->rt.nr_cpus_allowed = 1; | |
182 | } | |
183 | #endif | |
184 | ||
1394f032 BW |
185 | /* syscall2 puts clone_flags in r0 and usp in r1 */ |
186 | clone_flags = regs->r0; | |
187 | newsp = regs->r1; | |
188 | if (!newsp) | |
189 | newsp = rdusp(); | |
190 | else | |
191 | newsp -= 12; | |
192 | return do_fork(clone_flags, newsp, regs, 0, NULL, NULL); | |
193 | } | |
194 | ||
195 | int | |
6f2c55b8 | 196 | copy_thread(unsigned long clone_flags, |
1394f032 BW |
197 | unsigned long usp, unsigned long topstk, |
198 | struct task_struct *p, struct pt_regs *regs) | |
199 | { | |
200 | struct pt_regs *childregs; | |
201 | ||
202 | childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1; | |
203 | *childregs = *regs; | |
204 | childregs->r0 = 0; | |
205 | ||
206 | p->thread.usp = usp; | |
207 | p->thread.ksp = (unsigned long)childregs; | |
208 | p->thread.pc = (unsigned long)ret_from_fork; | |
209 | ||
210 | return 0; | |
211 | } | |
212 | ||
1394f032 BW |
213 | /* |
214 | * sys_execve() executes a new program. | |
215 | */ | |
216 | ||
0ddeeca2 | 217 | asmlinkage int sys_execve(char __user *name, char __user * __user *argv, char __user * __user *envp) |
1394f032 BW |
218 | { |
219 | int error; | |
220 | char *filename; | |
221 | struct pt_regs *regs = (struct pt_regs *)((&name) + 6); | |
222 | ||
223 | lock_kernel(); | |
224 | filename = getname(name); | |
225 | error = PTR_ERR(filename); | |
226 | if (IS_ERR(filename)) | |
227 | goto out; | |
228 | error = do_execve(filename, argv, envp, regs); | |
229 | putname(filename); | |
1f83b8f1 | 230 | out: |
1394f032 BW |
231 | unlock_kernel(); |
232 | return error; | |
233 | } | |
234 | ||
235 | unsigned long get_wchan(struct task_struct *p) | |
236 | { | |
237 | unsigned long fp, pc; | |
238 | unsigned long stack_page; | |
239 | int count = 0; | |
240 | if (!p || p == current || p->state == TASK_RUNNING) | |
241 | return 0; | |
242 | ||
243 | stack_page = (unsigned long)p; | |
244 | fp = p->thread.usp; | |
245 | do { | |
246 | if (fp < stack_page + sizeof(struct thread_info) || | |
247 | fp >= 8184 + stack_page) | |
248 | return 0; | |
249 | pc = ((unsigned long *)fp)[1]; | |
250 | if (!in_sched_functions(pc)) | |
251 | return pc; | |
252 | fp = *(unsigned long *)fp; | |
253 | } | |
254 | while (count++ < 16); | |
255 | return 0; | |
256 | } | |
257 | ||
7adfb58f BS |
258 | void finish_atomic_sections (struct pt_regs *regs) |
259 | { | |
19d6d7d5 | 260 | int __user *up0 = (int __user *)regs->p0; |
0ddeeca2 | 261 | |
7adfb58f BS |
262 | if (regs->pc < ATOMIC_SEQS_START || regs->pc >= ATOMIC_SEQS_END) |
263 | return; | |
264 | ||
265 | switch (regs->pc) { | |
266 | case ATOMIC_XCHG32 + 2: | |
0ddeeca2 | 267 | put_user(regs->r1, up0); |
7adfb58f BS |
268 | regs->pc += 2; |
269 | break; | |
270 | ||
271 | case ATOMIC_CAS32 + 2: | |
272 | case ATOMIC_CAS32 + 4: | |
273 | if (regs->r0 == regs->r1) | |
0ddeeca2 | 274 | put_user(regs->r2, up0); |
7adfb58f BS |
275 | regs->pc = ATOMIC_CAS32 + 8; |
276 | break; | |
277 | case ATOMIC_CAS32 + 6: | |
0ddeeca2 | 278 | put_user(regs->r2, up0); |
7adfb58f BS |
279 | regs->pc += 2; |
280 | break; | |
281 | ||
282 | case ATOMIC_ADD32 + 2: | |
283 | regs->r0 = regs->r1 + regs->r0; | |
284 | /* fall through */ | |
285 | case ATOMIC_ADD32 + 4: | |
0ddeeca2 | 286 | put_user(regs->r0, up0); |
7adfb58f BS |
287 | regs->pc = ATOMIC_ADD32 + 6; |
288 | break; | |
289 | ||
290 | case ATOMIC_SUB32 + 2: | |
291 | regs->r0 = regs->r1 - regs->r0; | |
292 | /* fall through */ | |
293 | case ATOMIC_SUB32 + 4: | |
0ddeeca2 | 294 | put_user(regs->r0, up0); |
7adfb58f BS |
295 | regs->pc = ATOMIC_SUB32 + 6; |
296 | break; | |
297 | ||
298 | case ATOMIC_IOR32 + 2: | |
299 | regs->r0 = regs->r1 | regs->r0; | |
300 | /* fall through */ | |
301 | case ATOMIC_IOR32 + 4: | |
0ddeeca2 | 302 | put_user(regs->r0, up0); |
7adfb58f BS |
303 | regs->pc = ATOMIC_IOR32 + 6; |
304 | break; | |
305 | ||
306 | case ATOMIC_AND32 + 2: | |
307 | regs->r0 = regs->r1 & regs->r0; | |
308 | /* fall through */ | |
309 | case ATOMIC_AND32 + 4: | |
0ddeeca2 | 310 | put_user(regs->r0, up0); |
7adfb58f BS |
311 | regs->pc = ATOMIC_AND32 + 6; |
312 | break; | |
313 | ||
314 | case ATOMIC_XOR32 + 2: | |
315 | regs->r0 = regs->r1 ^ regs->r0; | |
316 | /* fall through */ | |
317 | case ATOMIC_XOR32 + 4: | |
0ddeeca2 | 318 | put_user(regs->r0, up0); |
7adfb58f BS |
319 | regs->pc = ATOMIC_XOR32 + 6; |
320 | break; | |
321 | } | |
322 | } | |
323 | ||
1394f032 | 324 | #if defined(CONFIG_ACCESS_CHECK) |
b03b08ba | 325 | /* Return 1 if access to memory range is OK, 0 otherwise */ |
1394f032 BW |
326 | int _access_ok(unsigned long addr, unsigned long size) |
327 | { | |
bc41bb11 BS |
328 | if (size == 0) |
329 | return 1; | |
1394f032 BW |
330 | if (addr > (addr + size)) |
331 | return 0; | |
1f83b8f1 | 332 | if (segment_eq(get_fs(), KERNEL_DS)) |
1394f032 BW |
333 | return 1; |
334 | #ifdef CONFIG_MTD_UCLINUX | |
335 | if (addr >= memory_start && (addr + size) <= memory_end) | |
336 | return 1; | |
337 | if (addr >= memory_mtd_end && (addr + size) <= physical_mem_end) | |
338 | return 1; | |
d5adb029 | 339 | |
158772c2 | 340 | #ifdef CONFIG_ROMFS_ON_MTD |
d5adb029 BS |
341 | /* For XIP, allow user space to use pointers within the ROMFS. */ |
342 | if (addr >= memory_mtd_start && (addr + size) <= memory_mtd_end) | |
343 | return 1; | |
344 | #endif | |
1394f032 BW |
345 | #else |
346 | if (addr >= memory_start && (addr + size) <= physical_mem_end) | |
347 | return 1; | |
348 | #endif | |
349 | if (addr >= (unsigned long)__init_begin && | |
350 | addr + size <= (unsigned long)__init_end) | |
351 | return 1; | |
8f65873e GY |
352 | if (addr >= get_l1_scratch_start() |
353 | && addr + size <= get_l1_scratch_start() + L1_SCRATCH_LENGTH) | |
1394f032 BW |
354 | return 1; |
355 | #if L1_CODE_LENGTH != 0 | |
8f65873e GY |
356 | if (addr >= get_l1_code_start() + (_etext_l1 - _stext_l1) |
357 | && addr + size <= get_l1_code_start() + L1_CODE_LENGTH) | |
1394f032 BW |
358 | return 1; |
359 | #endif | |
360 | #if L1_DATA_A_LENGTH != 0 | |
8f65873e GY |
361 | if (addr >= get_l1_data_a_start() + (_ebss_l1 - _sdata_l1) |
362 | && addr + size <= get_l1_data_a_start() + L1_DATA_A_LENGTH) | |
1394f032 BW |
363 | return 1; |
364 | #endif | |
365 | #if L1_DATA_B_LENGTH != 0 | |
8f65873e GY |
366 | if (addr >= get_l1_data_b_start() + (_ebss_b_l1 - _sdata_b_l1) |
367 | && addr + size <= get_l1_data_b_start() + L1_DATA_B_LENGTH) | |
1394f032 | 368 | return 1; |
b2c2f303 JZ |
369 | #endif |
370 | #if L2_LENGTH != 0 | |
371 | if (addr >= L2_START + (_ebss_l2 - _stext_l2) | |
372 | && addr + size <= L2_START + L2_LENGTH) | |
373 | return 1; | |
1394f032 BW |
374 | #endif |
375 | return 0; | |
376 | } | |
377 | EXPORT_SYMBOL(_access_ok); | |
378 | #endif /* CONFIG_ACCESS_CHECK */ |