process.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*
  2. * linux/arch/arm/kernel/process.c
  3. *
  4. * Copyright (C) 1996-2000 Russell King - Converted to ARM.
  5. * Original Copyright (C) 1995 Linus Torvalds
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <stdarg.h>
  12. #include <linux/export.h>
  13. #include <linux/sched.h>
  14. #include <linux/kernel.h>
  15. #include <linux/mm.h>
  16. #include <linux/vmalloc.h>
  17. #include <linux/stddef.h>
  18. #include <linux/unistd.h>
  19. #include <linux/user.h>
  20. #include <linux/delay.h>
  21. #include <linux/reboot.h>
  22. #include <linux/interrupt.h>
  23. #include <linux/kallsyms.h>
  24. #include <linux/init.h>
  25. #include <linux/cpu.h>
  26. #include <linux/elfcore.h>
  27. #include <linux/pm.h>
  28. #include <linux/tick.h>
  29. #include <linux/utsname.h>
  30. #include <linux/uaccess.h>
  31. #include <linux/random.h>
  32. #include <linux/hw_breakpoint.h>
  33. #include <linux/cpuidle.h>
  34. #include <linux/console.h>
  35. #include <asm/cacheflush.h>
  36. #include <asm/processor.h>
  37. #include <asm/thread_notify.h>
  38. #include <asm/stacktrace.h>
  39. #include <asm/mach/time.h>
  40. #include <asm/tls.h>
  41. #ifdef CONFIG_CC_STACKPROTECTOR
  42. #include <linux/stackprotector.h>
  43. unsigned long __stack_chk_guard __read_mostly;
  44. EXPORT_SYMBOL(__stack_chk_guard);
  45. #endif
  46. static const char *processor_modes[] = {
  47. "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
  48. "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
  49. "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" ,
  50. "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
  51. };
  52. static const char *isa_modes[] = {
  53. "ARM" , "Thumb" , "Jazelle", "ThumbEE"
  54. };
  55. extern void setup_mm_for_reboot(void);
  56. static volatile int hlt_counter;
  57. #ifdef CONFIG_SMP
  58. void arch_trigger_all_cpu_backtrace(void)
  59. {
  60. smp_send_all_cpu_backtrace();
  61. }
  62. #else
  63. void arch_trigger_all_cpu_backtrace(void)
  64. {
  65. dump_stack();
  66. }
  67. #endif
  68. void disable_hlt(void)
  69. {
  70. hlt_counter++;
  71. }
  72. EXPORT_SYMBOL(disable_hlt);
  73. void enable_hlt(void)
  74. {
  75. hlt_counter--;
  76. }
  77. EXPORT_SYMBOL(enable_hlt);
  78. int get_hlt(void)
  79. {
  80. return hlt_counter;
  81. }
  82. EXPORT_SYMBOL(get_hlt);
  83. static int __init nohlt_setup(char *__unused)
  84. {
  85. hlt_counter = 1;
  86. return 1;
  87. }
  88. static int __init hlt_setup(char *__unused)
  89. {
  90. hlt_counter = 0;
  91. return 1;
  92. }
  93. __setup("nohlt", nohlt_setup);
  94. __setup("hlt", hlt_setup);
  95. extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
  96. typedef void (*phys_reset_t)(unsigned long);
  97. #ifdef CONFIG_ARM_FLUSH_CONSOLE_ON_RESTART
  98. void arm_machine_flush_console(void)
  99. {
  100. printk("\n");
  101. pr_emerg("Restarting %s\n", linux_banner);
  102. if (console_trylock()) {
  103. console_unlock();
  104. return;
  105. }
  106. mdelay(50);
  107. local_irq_disable();
  108. if (!console_trylock())
  109. pr_emerg("arm_restart: Console was locked! Busting\n");
  110. else
  111. pr_emerg("arm_restart: Console was locked!\n");
  112. console_unlock();
  113. }
  114. #else
  115. void arm_machine_flush_console(void)
  116. {
  117. }
  118. #endif
  119. /*
  120. * A temporary stack to use for CPU reset. This is static so that we
  121. * don't clobber it with the identity mapping. When running with this
  122. * stack, any references to the current task *will not work* so you
  123. * should really do as little as possible before jumping to your reset
  124. * code.
  125. */
  126. static u64 soft_restart_stack[16];
  127. static void __soft_restart(void *addr)
  128. {
  129. phys_reset_t phys_reset;
  130. /* Take out a flat memory mapping. */
  131. setup_mm_for_reboot();
  132. /* Clean and invalidate caches */
  133. flush_cache_all();
  134. /* Turn off caching */
  135. cpu_proc_fin();
  136. /* Push out any further dirty data, and ensure cache is empty */
  137. flush_cache_all();
  138. /* Push out the dirty data from external caches */
  139. outer_disable();
  140. /* Switch to the identity mapping. */
  141. phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
  142. phys_reset((unsigned long)addr);
  143. /* Should never get here. */
  144. BUG();
  145. }
  146. void soft_restart(unsigned long addr)
  147. {
  148. u64 *stack = soft_restart_stack + ARRAY_SIZE(soft_restart_stack);
  149. /* Disable interrupts first */
  150. local_irq_disable();
  151. local_fiq_disable();
  152. /* Disable the L2 if we're the last man standing. */
  153. if (num_online_cpus() == 1)
  154. outer_disable();
  155. /* Change to the new stack and continue with the reset. */
  156. call_with_stack(__soft_restart, (void *)addr, (void *)stack);
  157. /* Should never get here. */
  158. BUG();
  159. }
  160. static void null_restart(char mode, const char *cmd)
  161. {
  162. }
  163. /*
  164. * Function pointers to optional machine specific functions
  165. */
  166. void (*pm_power_off)(void);
  167. EXPORT_SYMBOL(pm_power_off);
  168. void (*arm_pm_restart)(char str, const char *cmd) = null_restart;
  169. EXPORT_SYMBOL_GPL(arm_pm_restart);
  170. static void do_nothing(void *unused)
  171. {
  172. }
  173. /*
  174. * cpu_idle_wait - Used to ensure that all the CPUs discard old value of
  175. * pm_idle and update to new pm_idle value. Required while changing pm_idle
  176. * handler on SMP systems.
  177. *
  178. * Caller must have changed pm_idle to the new value before the call. Old
  179. * pm_idle value will not be used by any CPU after the return of this function.
  180. */
  181. void cpu_idle_wait(void)
  182. {
  183. smp_mb();
  184. /* kick all the CPUs so that they exit out of pm_idle */
  185. smp_call_function(do_nothing, NULL, 1);
  186. }
  187. EXPORT_SYMBOL_GPL(cpu_idle_wait);
  188. /*
  189. * This is our default idle handler.
  190. */
  191. extern void arch_idle(void);
  192. void (*arm_pm_idle)(void) = arch_idle;
  193. static void default_idle(void)
  194. {
  195. if (arm_pm_idle)
  196. arm_pm_idle();
  197. else
  198. cpu_do_idle();
  199. local_irq_enable();
  200. }
  201. void (*pm_idle)(void) = default_idle;
  202. EXPORT_SYMBOL(pm_idle);
  203. /*
  204. * The idle thread, has rather strange semantics for calling pm_idle,
  205. * but this is what x86 does and we need to do the same, so that
  206. * things like cpuidle get called in the same way. The only difference
  207. * is that we always respect 'hlt_counter' to prevent low power idle.
  208. */
  209. void cpu_idle(void)
  210. {
  211. local_fiq_enable();
  212. /* endless idle loop with no priority at all */
  213. while (1) {
  214. idle_notifier_call_chain(IDLE_START);
  215. tick_nohz_idle_enter();
  216. rcu_idle_enter();
  217. while (!need_resched()) {
  218. /*
  219. * We need to disable interrupts here
  220. * to ensure we don't miss a wakeup call.
  221. */
  222. local_irq_disable();
  223. #ifdef CONFIG_PL310_ERRATA_769419
  224. wmb();
  225. #endif
  226. if (hlt_counter) {
  227. local_irq_enable();
  228. cpu_relax();
  229. } else if (!need_resched()) {
  230. stop_critical_timings();
  231. if (cpuidle_idle_call())
  232. pm_idle();
  233. start_critical_timings();
  234. /*
  235. * pm_idle functions must always
  236. * return with IRQs enabled.
  237. */
  238. WARN_ON(irqs_disabled());
  239. } else
  240. local_irq_enable();
  241. }
  242. rcu_idle_exit();
  243. tick_nohz_idle_exit();
  244. idle_notifier_call_chain(IDLE_END);
  245. schedule_preempt_disabled();
  246. #ifdef CONFIG_HOTPLUG_CPU
  247. if (cpu_is_offline(smp_processor_id()))
  248. cpu_die();
  249. #endif
  250. }
  251. }
  252. static char reboot_mode = 'h';
  253. int __init reboot_setup(char *str)
  254. {
  255. reboot_mode = str[0];
  256. return 1;
  257. }
  258. __setup("reboot=", reboot_setup);
  259. void machine_shutdown(void)
  260. {
  261. preempt_disable();
  262. #ifdef CONFIG_SMP
  263. smp_send_stop();
  264. #endif
  265. }
  266. void machine_halt(void)
  267. {
  268. machine_shutdown();
  269. local_irq_disable();
  270. while (1);
  271. }
  272. void machine_power_off(void)
  273. {
  274. machine_shutdown();
  275. if (pm_power_off)
  276. pm_power_off();
  277. }
  278. void machine_restart(char *cmd)
  279. {
  280. machine_shutdown();
  281. /* Flush the console to make sure all the relevant messages make it
  282. * out to the console drivers */
  283. arm_machine_flush_console();
  284. arm_pm_restart(reboot_mode, cmd);
  285. /* Give a grace period for failure to restart of 1s */
  286. mdelay(1000);
  287. /* Whoops - the platform was unable to reboot. Tell the user! */
  288. printk("Reboot failed -- System halted\n");
  289. local_irq_disable();
  290. while (1);
  291. }
  292. /*
  293. * dump a block of kernel memory from around the given address
  294. */
  295. static void show_data(unsigned long addr, int nbytes, const char *name)
  296. {
  297. int i, j;
  298. int nlines;
  299. u32 *p;
  300. /*
  301. * don't attempt to dump non-kernel addresses or
  302. * values that are probably just small negative numbers
  303. */
  304. if (addr < PAGE_OFFSET || addr > -256UL)
  305. return;
  306. if (is_vmalloc_addr((void *)addr))
  307. {
  308. struct vm_struct *area = find_vm_area((void *)addr);
  309. if (area && area->flags & VM_IOREMAP)
  310. {
  311. pr_err("%s: not dumping ioremapped address\n",__func__);
  312. return;
  313. }
  314. }
  315. printk("\n%s: %#lx:\n", name, addr);
  316. /*
  317. * round address down to a 32 bit boundary
  318. * and always dump a multiple of 32 bytes
  319. */
  320. p = (u32 *)(addr & ~(sizeof(u32) - 1));
  321. nbytes += (addr & (sizeof(u32) - 1));
  322. nlines = (nbytes + 31) / 32;
  323. for (i = 0; i < nlines; i++) {
  324. /*
  325. * just display low 16 bits of address to keep
  326. * each line of the dump < 80 characters
  327. */
  328. printk("%04lx ", (unsigned long)p & 0xffff);
  329. for (j = 0; j < 8; j++) {
  330. u32 data;
  331. /*
  332. * vmalloc addresses may point to
  333. * memory-mapped peripherals
  334. */
  335. if (is_vmalloc_addr(p) ||
  336. probe_kernel_address(p, data)) {
  337. printk(" ********");
  338. } else {
  339. printk(" %08x", data);
  340. }
  341. ++p;
  342. }
  343. printk("\n");
  344. }
  345. }
  346. static void show_extra_register_data(struct pt_regs *regs, int nbytes)
  347. {
  348. mm_segment_t fs;
  349. unsigned long is_user;
  350. fs = get_fs();
  351. is_user = user_mode(regs);
  352. set_fs(KERNEL_DS);
  353. if (!is_user || regs->ARM_pc < TASK_SIZE)
  354. show_data(regs->ARM_pc - nbytes, nbytes * 2, "PC");
  355. if (!is_user || regs->ARM_lr < TASK_SIZE)
  356. show_data(regs->ARM_lr - nbytes, nbytes * 2, "LR");
  357. if (!is_user || regs->ARM_sp < TASK_SIZE)
  358. show_data(regs->ARM_sp - nbytes, nbytes * 2, "SP");
  359. if (!is_user || regs->ARM_ip < TASK_SIZE)
  360. show_data(regs->ARM_ip - nbytes, nbytes * 2, "IP");
  361. if (!is_user || regs->ARM_fp < TASK_SIZE)
  362. show_data(regs->ARM_fp - nbytes, nbytes * 2, "FP");
  363. if (!is_user || regs->ARM_r0 < TASK_SIZE)
  364. show_data(regs->ARM_r0 - nbytes, nbytes * 2, "R0");
  365. if (!is_user || regs->ARM_r1 < TASK_SIZE)
  366. show_data(regs->ARM_r1 - nbytes, nbytes * 2, "R1");
  367. if (!is_user || regs->ARM_r2 < TASK_SIZE)
  368. show_data(regs->ARM_r2 - nbytes, nbytes * 2, "R2");
  369. if (!is_user || regs->ARM_r3 < TASK_SIZE)
  370. show_data(regs->ARM_r3 - nbytes, nbytes * 2, "R3");
  371. if (!is_user || regs->ARM_r4 < TASK_SIZE)
  372. show_data(regs->ARM_r4 - nbytes, nbytes * 2, "R4");
  373. if (!is_user || regs->ARM_r5 < TASK_SIZE)
  374. show_data(regs->ARM_r5 - nbytes, nbytes * 2, "R5");
  375. if (!is_user || regs->ARM_r6 < TASK_SIZE)
  376. show_data(regs->ARM_r6 - nbytes, nbytes * 2, "R6");
  377. if (!is_user || regs->ARM_r7 < TASK_SIZE)
  378. show_data(regs->ARM_r7 - nbytes, nbytes * 2, "R7");
  379. if (!is_user || regs->ARM_r8 < TASK_SIZE)
  380. show_data(regs->ARM_r8 - nbytes, nbytes * 2, "R8");
  381. if (!is_user || regs->ARM_r9 < TASK_SIZE)
  382. show_data(regs->ARM_r9 - nbytes, nbytes * 2, "R9");
  383. if (!is_user || regs->ARM_r10 < TASK_SIZE)
  384. show_data(regs->ARM_r10 - nbytes, nbytes * 2, "R10");
  385. set_fs(fs);
  386. }
  387. void __show_regs(struct pt_regs *regs)
  388. {
  389. unsigned long flags;
  390. char buf[64];
  391. printk("CPU: %d %s (%s %.*s)\n",
  392. raw_smp_processor_id(), print_tainted(),
  393. init_utsname()->release,
  394. (int)strcspn(init_utsname()->version, " "),
  395. init_utsname()->version);
  396. print_symbol("PC is at %s\n", instruction_pointer(regs));
  397. print_symbol("LR is at %s\n", regs->ARM_lr);
  398. printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
  399. "sp : %08lx ip : %08lx fp : %08lx\n",
  400. regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
  401. regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
  402. printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
  403. regs->ARM_r10, regs->ARM_r9,
  404. regs->ARM_r8);
  405. printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
  406. regs->ARM_r7, regs->ARM_r6,
  407. regs->ARM_r5, regs->ARM_r4);
  408. printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
  409. regs->ARM_r3, regs->ARM_r2,
  410. regs->ARM_r1, regs->ARM_r0);
  411. flags = regs->ARM_cpsr;
  412. buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
  413. buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
  414. buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
  415. buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
  416. buf[4] = '\0';
  417. printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
  418. buf, interrupts_enabled(regs) ? "n" : "ff",
  419. fast_interrupts_enabled(regs) ? "n" : "ff",
  420. processor_modes[processor_mode(regs)],
  421. isa_modes[isa_mode(regs)],
  422. get_fs() == get_ds() ? "kernel" : "user");
  423. #ifdef CONFIG_CPU_CP15
  424. {
  425. unsigned int ctrl;
  426. buf[0] = '\0';
  427. #ifdef CONFIG_CPU_CP15_MMU
  428. {
  429. unsigned int transbase, dac;
  430. asm("mrc p15, 0, %0, c2, c0\n\t"
  431. "mrc p15, 0, %1, c3, c0\n"
  432. : "=r" (transbase), "=r" (dac));
  433. snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
  434. transbase, dac);
  435. }
  436. #endif
  437. asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
  438. printk("Control: %08x%s\n", ctrl, buf);
  439. }
  440. #endif
  441. show_extra_register_data(regs, 128);
  442. }
  443. void show_regs(struct pt_regs * regs)
  444. {
  445. printk("\n");
  446. printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
  447. __show_regs(regs);
  448. dump_stack();
  449. }
  450. ATOMIC_NOTIFIER_HEAD(thread_notify_head);
  451. EXPORT_SYMBOL_GPL(thread_notify_head);
  452. /*
  453. * Free current thread data structures etc..
  454. */
  455. void exit_thread(void)
  456. {
  457. thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
  458. }
  459. void flush_thread(void)
  460. {
  461. struct thread_info *thread = current_thread_info();
  462. struct task_struct *tsk = current;
  463. flush_ptrace_hw_breakpoint(tsk);
  464. memset(thread->used_cp, 0, sizeof(thread->used_cp));
  465. memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
  466. memset(&thread->fpstate, 0, sizeof(union fp_state));
  467. thread_notify(THREAD_NOTIFY_FLUSH, thread);
  468. }
  469. void release_thread(struct task_struct *dead_task)
  470. {
  471. }
  472. asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
  473. int
  474. copy_thread(unsigned long clone_flags, unsigned long stack_start,
  475. unsigned long stk_sz, struct task_struct *p, struct pt_regs *regs)
  476. {
  477. struct thread_info *thread = task_thread_info(p);
  478. struct pt_regs *childregs = task_pt_regs(p);
  479. *childregs = *regs;
  480. childregs->ARM_r0 = 0;
  481. childregs->ARM_sp = stack_start;
  482. memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
  483. thread->cpu_context.sp = (unsigned long)childregs;
  484. thread->cpu_context.pc = (unsigned long)ret_from_fork;
  485. clear_ptrace_hw_breakpoint(p);
  486. if (clone_flags & CLONE_SETTLS)
  487. thread->tp_value[0] = childregs->ARM_r3;
  488. thread->tp_value[1] = get_tpuser();
  489. thread_notify(THREAD_NOTIFY_COPY, thread);
  490. return 0;
  491. }
  492. /*
  493. * Fill in the task's elfregs structure for a core dump.
  494. */
  495. int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
  496. {
  497. elf_core_copy_regs(elfregs, task_pt_regs(t));
  498. return 1;
  499. }
  500. /*
  501. * fill in the fpe structure for a core dump...
  502. */
  503. int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
  504. {
  505. struct thread_info *thread = current_thread_info();
  506. int used_math = thread->used_cp[1] | thread->used_cp[2];
  507. if (used_math)
  508. memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
  509. return used_math != 0;
  510. }
  511. EXPORT_SYMBOL(dump_fpu);
  512. /*
  513. * Shuffle the argument into the correct register before calling the
  514. * thread function. r4 is the thread argument, r5 is the pointer to
  515. * the thread function, and r6 points to the exit function.
  516. */
  517. extern void kernel_thread_helper(void);
  518. asm( ".pushsection .text\n"
  519. " .align\n"
  520. " .type kernel_thread_helper, #function\n"
  521. "kernel_thread_helper:\n"
  522. #ifdef CONFIG_TRACE_IRQFLAGS
  523. " bl trace_hardirqs_on\n"
  524. #endif
  525. " msr cpsr_c, r7\n"
  526. " mov r0, r4\n"
  527. " mov lr, r6\n"
  528. " mov pc, r5\n"
  529. " .size kernel_thread_helper, . - kernel_thread_helper\n"
  530. " .popsection");
  531. #ifdef CONFIG_ARM_UNWIND
  532. extern void kernel_thread_exit(long code);
  533. asm( ".pushsection .text\n"
  534. " .align\n"
  535. " .type kernel_thread_exit, #function\n"
  536. "kernel_thread_exit:\n"
  537. " .fnstart\n"
  538. " .cantunwind\n"
  539. " bl do_exit\n"
  540. " nop\n"
  541. " .fnend\n"
  542. " .size kernel_thread_exit, . - kernel_thread_exit\n"
  543. " .popsection");
  544. #else
  545. #define kernel_thread_exit do_exit
  546. #endif
  547. /*
  548. * Create a kernel thread.
  549. */
  550. pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
  551. {
  552. struct pt_regs regs;
  553. memset(&regs, 0, sizeof(regs));
  554. regs.ARM_r4 = (unsigned long)arg;
  555. regs.ARM_r5 = (unsigned long)fn;
  556. regs.ARM_r6 = (unsigned long)kernel_thread_exit;
  557. regs.ARM_r7 = SVC_MODE | PSR_ENDSTATE | PSR_ISETSTATE;
  558. regs.ARM_pc = (unsigned long)kernel_thread_helper;
  559. regs.ARM_cpsr = regs.ARM_r7 | PSR_I_BIT;
  560. return do_fork(flags|CLONE_VM|CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
  561. }
  562. EXPORT_SYMBOL(kernel_thread);
  563. unsigned long get_wchan(struct task_struct *p)
  564. {
  565. struct stackframe frame;
  566. unsigned long stack_page;
  567. int count = 0;
  568. if (!p || p == current || p->state == TASK_RUNNING)
  569. return 0;
  570. frame.fp = thread_saved_fp(p);
  571. frame.sp = thread_saved_sp(p);
  572. frame.lr = 0; /* recovered from the stack */
  573. frame.pc = thread_saved_pc(p);
  574. stack_page = (unsigned long)task_stack_page(p);
  575. do {
  576. if (frame.sp < stack_page ||
  577. frame.sp >= stack_page + THREAD_SIZE ||
  578. unwind_frame(&frame) < 0)
  579. return 0;
  580. if (!in_sched_functions(frame.pc))
  581. return frame.pc;
  582. } while (count ++ < 16);
  583. return 0;
  584. }
  585. unsigned long arch_randomize_brk(struct mm_struct *mm)
  586. {
  587. unsigned long range_end = mm->brk + 0x02000000;
  588. return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
  589. }
  590. #ifdef CONFIG_MMU
  591. #ifdef CONFIG_KUSER_HELPERS
  592. /*
  593. * The vectors page is always readable from user space for the
  594. * atomic helpers. Insert it into the gate_vma so that it is visible
  595. * through ptrace and /proc/<pid>/mem.
  596. */
  597. static struct vm_area_struct gate_vma;
  598. static int __init gate_vma_init(void)
  599. {
  600. gate_vma.vm_start = 0xffff0000;
  601. gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
  602. gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
  603. gate_vma.vm_flags = VM_READ | VM_EXEC |
  604. VM_MAYREAD | VM_MAYEXEC;
  605. return 0;
  606. }
  607. arch_initcall(gate_vma_init);
  608. struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
  609. {
  610. return &gate_vma;
  611. }
  612. int in_gate_area(struct mm_struct *mm, unsigned long addr)
  613. {
  614. return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
  615. }
  616. int in_gate_area_no_mm(unsigned long addr)
  617. {
  618. return in_gate_area(NULL, addr);
  619. }
  620. #define is_gate_vma(vma) ((vma) == &gate_vma)
  621. #else
  622. #define is_gate_vma(vma) 0
  623. #endif
  624. const char *arch_vma_name(struct vm_area_struct *vma)
  625. {
  626. if (is_gate_vma(vma))
  627. return "[vectors]";
  628. else if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage)
  629. return "[sigpage]";
  630. else if (vma == get_user_timers_vma(NULL))
  631. return "[timers]";
  632. else
  633. return NULL;
  634. }
  635. static struct page *signal_page;
  636. extern struct page *get_signal_page(void);
  637. int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
  638. {
  639. struct mm_struct *mm = current->mm;
  640. unsigned long addr;
  641. int ret;
  642. if (!signal_page)
  643. signal_page = get_signal_page();
  644. if (!signal_page)
  645. return -ENOMEM;
  646. down_write(&mm->mmap_sem);
  647. addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
  648. if (IS_ERR_VALUE(addr)) {
  649. ret = addr;
  650. goto up_fail;
  651. }
  652. ret = install_special_mapping(mm, addr, PAGE_SIZE,
  653. VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
  654. &signal_page);
  655. if (ret == 0)
  656. mm->context.sigpage = addr;
  657. up_fail:
  658. up_write(&mm->mmap_sem);
  659. return ret;
  660. }
  661. #endif