hardwall.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/fs.h>
  15. #include <linux/proc_fs.h>
  16. #include <linux/seq_file.h>
  17. #include <linux/rwsem.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/sched.h>
  20. #include <linux/hardirq.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/smp.h>
  23. #include <linux/cdev.h>
  24. #include <linux/compat.h>
  25. #include <asm/hardwall.h>
  26. #include <asm/traps.h>
  27. #include <asm/siginfo.h>
  28. #include <asm/irq_regs.h>
  29. #include <arch/interrupts.h>
  30. #include <arch/spr_def.h>
  31. /*
  32. * This data structure tracks the rectangle data, etc., associated
  33. * one-to-one with a "struct file *" from opening HARDWALL_FILE.
  34. * Note that the file's private data points back to this structure.
  35. */
  36. struct hardwall_info {
  37. struct list_head list; /* "rectangles" list */
  38. struct list_head task_head; /* head of tasks in this hardwall */
  39. struct cpumask cpumask; /* cpus in the rectangle */
  40. int ulhc_x; /* upper left hand corner x coord */
  41. int ulhc_y; /* upper left hand corner y coord */
  42. int width; /* rectangle width */
  43. int height; /* rectangle height */
  44. int id; /* integer id for this hardwall */
  45. int teardown_in_progress; /* are we tearing this one down? */
  46. };
  47. /* Currently allocated hardwall rectangles */
  48. static LIST_HEAD(rectangles);
  49. /* /proc/tile/hardwall */
  50. static struct proc_dir_entry *hardwall_proc_dir;
  51. /* Functions to manage files in /proc/tile/hardwall. */
  52. static void hardwall_add_proc(struct hardwall_info *rect);
  53. static void hardwall_remove_proc(struct hardwall_info *rect);
  54. /*
  55. * Guard changes to the hardwall data structures.
  56. * This could be finer grained (e.g. one lock for the list of hardwall
  57. * rectangles, then separate embedded locks for each one's list of tasks),
  58. * but there are subtle correctness issues when trying to start with
  59. * a task's "hardwall" pointer and lock the correct rectangle's embedded
  60. * lock in the presence of a simultaneous deactivation, so it seems
  61. * easier to have a single lock, given that none of these data
  62. * structures are touched very frequently during normal operation.
  63. */
  64. static DEFINE_SPINLOCK(hardwall_lock);
  65. /* Allow disabling UDN access. */
  66. static int udn_disabled;
  67. static int __init noudn(char *str)
  68. {
  69. pr_info("User-space UDN access is disabled\n");
  70. udn_disabled = 1;
  71. return 0;
  72. }
  73. early_param("noudn", noudn);
  74. /*
  75. * Low-level primitives
  76. */
  77. /* Set a CPU bit if the CPU is online. */
  78. #define cpu_online_set(cpu, dst) do { \
  79. if (cpu_online(cpu)) \
  80. cpumask_set_cpu(cpu, dst); \
  81. } while (0)
  82. /* Does the given rectangle contain the given x,y coordinate? */
  83. static int contains(struct hardwall_info *r, int x, int y)
  84. {
  85. return (x >= r->ulhc_x && x < r->ulhc_x + r->width) &&
  86. (y >= r->ulhc_y && y < r->ulhc_y + r->height);
  87. }
  88. /* Compute the rectangle parameters and validate the cpumask. */
  89. static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
  90. {
  91. int x, y, cpu, ulhc, lrhc;
  92. /* The first cpu is the ULHC, the last the LRHC. */
  93. ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits);
  94. lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits);
  95. /* Compute the rectangle attributes from the cpus. */
  96. r->ulhc_x = cpu_x(ulhc);
  97. r->ulhc_y = cpu_y(ulhc);
  98. r->width = cpu_x(lrhc) - r->ulhc_x + 1;
  99. r->height = cpu_y(lrhc) - r->ulhc_y + 1;
  100. cpumask_copy(&r->cpumask, mask);
  101. r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
  102. /* Width and height must be positive */
  103. if (r->width <= 0 || r->height <= 0)
  104. return -EINVAL;
  105. /* Confirm that the cpumask is exactly the rectangle. */
  106. for (y = 0, cpu = 0; y < smp_height; ++y)
  107. for (x = 0; x < smp_width; ++x, ++cpu)
  108. if (cpumask_test_cpu(cpu, mask) != contains(r, x, y))
  109. return -EINVAL;
  110. /*
  111. * Note that offline cpus can't be drained when this UDN
  112. * rectangle eventually closes. We used to detect this
  113. * situation and print a warning, but it annoyed users and
  114. * they ignored it anyway, so now we just return without a
  115. * warning.
  116. */
  117. return 0;
  118. }
  119. /* Do the two given rectangles overlap on any cpu? */
  120. static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
  121. {
  122. return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
  123. b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
  124. a->ulhc_y + a->height > b->ulhc_y && /* A not above */
  125. b->ulhc_y + b->height > a->ulhc_y; /* B not above */
  126. }
  127. /*
  128. * Hardware management of hardwall setup, teardown, trapping,
  129. * and enabling/disabling PL0 access to the networks.
  130. */
  131. /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */
  132. enum direction_protect {
  133. N_PROTECT = (1 << 0),
  134. E_PROTECT = (1 << 1),
  135. S_PROTECT = (1 << 2),
  136. W_PROTECT = (1 << 3)
  137. };
  138. static void enable_firewall_interrupts(void)
  139. {
  140. arch_local_irq_unmask_now(INT_UDN_FIREWALL);
  141. }
  142. static void disable_firewall_interrupts(void)
  143. {
  144. arch_local_irq_mask_now(INT_UDN_FIREWALL);
  145. }
  146. /* Set up hardwall on this cpu based on the passed hardwall_info. */
  147. static void hardwall_setup_ipi_func(void *info)
  148. {
  149. struct hardwall_info *r = info;
  150. int cpu = smp_processor_id();
  151. int x = cpu % smp_width;
  152. int y = cpu / smp_width;
  153. int bits = 0;
  154. if (x == r->ulhc_x)
  155. bits |= W_PROTECT;
  156. if (x == r->ulhc_x + r->width - 1)
  157. bits |= E_PROTECT;
  158. if (y == r->ulhc_y)
  159. bits |= N_PROTECT;
  160. if (y == r->ulhc_y + r->height - 1)
  161. bits |= S_PROTECT;
  162. BUG_ON(bits == 0);
  163. __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits);
  164. enable_firewall_interrupts();
  165. }
  166. /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
  167. static void hardwall_setup(struct hardwall_info *r)
  168. {
  169. int x, y, cpu, delta;
  170. struct cpumask rect_cpus;
  171. cpumask_clear(&rect_cpus);
  172. /* First include the top and bottom edges */
  173. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  174. delta = (r->height - 1) * smp_width;
  175. for (x = 0; x < r->width; ++x, ++cpu) {
  176. cpu_online_set(cpu, &rect_cpus);
  177. cpu_online_set(cpu + delta, &rect_cpus);
  178. }
  179. /* Then the left and right edges */
  180. cpu -= r->width;
  181. delta = r->width - 1;
  182. for (y = 0; y < r->height; ++y, cpu += smp_width) {
  183. cpu_online_set(cpu, &rect_cpus);
  184. cpu_online_set(cpu + delta, &rect_cpus);
  185. }
  186. /* Then tell all the cpus to set up their protection SPR */
  187. on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1);
  188. }
  189. void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
  190. {
  191. struct hardwall_info *rect;
  192. struct task_struct *p;
  193. struct siginfo info;
  194. int x, y;
  195. int cpu = smp_processor_id();
  196. int found_processes;
  197. unsigned long flags;
  198. struct pt_regs *old_regs = set_irq_regs(regs);
  199. irq_enter();
  200. /* This tile trapped a network access; find the rectangle. */
  201. x = cpu % smp_width;
  202. y = cpu / smp_width;
  203. spin_lock_irqsave(&hardwall_lock, flags);
  204. list_for_each_entry(rect, &rectangles, list) {
  205. if (contains(rect, x, y))
  206. break;
  207. }
  208. /*
  209. * It shouldn't be possible not to find this cpu on the
  210. * rectangle list, since only cpus in rectangles get hardwalled.
  211. * The hardwall is only removed after the UDN is drained.
  212. */
  213. BUG_ON(&rect->list == &rectangles);
  214. /*
  215. * If we already started teardown on this hardwall, don't worry;
  216. * the abort signal has been sent and we are just waiting for things
  217. * to quiesce.
  218. */
  219. if (rect->teardown_in_progress) {
  220. pr_notice("cpu %d: detected hardwall violation %#lx"
  221. " while teardown already in progress\n",
  222. cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
  223. goto done;
  224. }
  225. /*
  226. * Kill off any process that is activated in this rectangle.
  227. * We bypass security to deliver the signal, since it must be
  228. * one of the activated processes that generated the UDN
  229. * message that caused this trap, and all the activated
  230. * processes shared a single open file so are pretty tightly
  231. * bound together from a security point of view to begin with.
  232. */
  233. rect->teardown_in_progress = 1;
  234. wmb(); /* Ensure visibility of rectangle before notifying processes. */
  235. pr_notice("cpu %d: detected hardwall violation %#lx...\n",
  236. cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT));
  237. info.si_signo = SIGILL;
  238. info.si_errno = 0;
  239. info.si_code = ILL_HARDWALL;
  240. found_processes = 0;
  241. list_for_each_entry(p, &rect->task_head, thread.hardwall_list) {
  242. BUG_ON(p->thread.hardwall != rect);
  243. if (!(p->flags & PF_EXITING)) {
  244. found_processes = 1;
  245. pr_notice("hardwall: killing %d\n", p->pid);
  246. do_send_sig_info(info.si_signo, &info, p, false);
  247. }
  248. }
  249. if (!found_processes)
  250. pr_notice("hardwall: no associated processes!\n");
  251. done:
  252. spin_unlock_irqrestore(&hardwall_lock, flags);
  253. /*
  254. * We have to disable firewall interrupts now, or else when we
  255. * return from this handler, we will simply re-interrupt back to
  256. * it. However, we can't clear the protection bits, since we
  257. * haven't yet drained the network, and that would allow packets
  258. * to cross out of the hardwall region.
  259. */
  260. disable_firewall_interrupts();
  261. irq_exit();
  262. set_irq_regs(old_regs);
  263. }
  264. /* Allow access from user space to the UDN. */
  265. void grant_network_mpls(void)
  266. {
  267. __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1);
  268. __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1);
  269. __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1);
  270. __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1);
  271. #if !CHIP_HAS_REV1_XDN()
  272. __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1);
  273. __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1);
  274. #endif
  275. }
  276. /* Deny access from user space to the UDN. */
  277. void restrict_network_mpls(void)
  278. {
  279. __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1);
  280. __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1);
  281. __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1);
  282. __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1);
  283. #if !CHIP_HAS_REV1_XDN()
  284. __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1);
  285. __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1);
  286. #endif
  287. }
  288. /*
  289. * Code to create, activate, deactivate, and destroy hardwall rectangles.
  290. */
  291. /* Create a hardwall for the given rectangle */
  292. static struct hardwall_info *hardwall_create(
  293. size_t size, const unsigned char __user *bits)
  294. {
  295. struct hardwall_info *iter, *rect;
  296. struct cpumask mask;
  297. unsigned long flags;
  298. int rc;
  299. /* Reject crazy sizes out of hand, a la sys_mbind(). */
  300. if (size > PAGE_SIZE)
  301. return ERR_PTR(-EINVAL);
  302. /* Copy whatever fits into a cpumask. */
  303. if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size)))
  304. return ERR_PTR(-EFAULT);
  305. /*
  306. * If the size was short, clear the rest of the mask;
  307. * otherwise validate that the rest of the user mask was zero
  308. * (we don't try hard to be efficient when validating huge masks).
  309. */
  310. if (size < sizeof(struct cpumask)) {
  311. memset((char *)&mask + size, 0, sizeof(struct cpumask) - size);
  312. } else if (size > sizeof(struct cpumask)) {
  313. size_t i;
  314. for (i = sizeof(struct cpumask); i < size; ++i) {
  315. char c;
  316. if (get_user(c, &bits[i]))
  317. return ERR_PTR(-EFAULT);
  318. if (c)
  319. return ERR_PTR(-EINVAL);
  320. }
  321. }
  322. /* Allocate a new rectangle optimistically. */
  323. rect = kmalloc(sizeof(struct hardwall_info),
  324. GFP_KERNEL | __GFP_ZERO);
  325. if (rect == NULL)
  326. return ERR_PTR(-ENOMEM);
  327. INIT_LIST_HEAD(&rect->task_head);
  328. /* Compute the rectangle size and validate that it's plausible. */
  329. rc = setup_rectangle(rect, &mask);
  330. if (rc != 0) {
  331. kfree(rect);
  332. return ERR_PTR(rc);
  333. }
  334. /* Confirm it doesn't overlap and add it to the list. */
  335. spin_lock_irqsave(&hardwall_lock, flags);
  336. list_for_each_entry(iter, &rectangles, list) {
  337. if (overlaps(iter, rect)) {
  338. spin_unlock_irqrestore(&hardwall_lock, flags);
  339. kfree(rect);
  340. return ERR_PTR(-EBUSY);
  341. }
  342. }
  343. list_add_tail(&rect->list, &rectangles);
  344. spin_unlock_irqrestore(&hardwall_lock, flags);
  345. /* Set up appropriate hardwalling on all affected cpus. */
  346. hardwall_setup(rect);
  347. /* Create a /proc/tile/hardwall entry. */
  348. hardwall_add_proc(rect);
  349. return rect;
  350. }
  351. /* Activate a given hardwall on this cpu for this process. */
  352. static int hardwall_activate(struct hardwall_info *rect)
  353. {
  354. int cpu, x, y;
  355. unsigned long flags;
  356. struct task_struct *p = current;
  357. struct thread_struct *ts = &p->thread;
  358. /* Require a rectangle. */
  359. if (rect == NULL)
  360. return -ENODATA;
  361. /* Not allowed to activate a rectangle that is being torn down. */
  362. if (rect->teardown_in_progress)
  363. return -EINVAL;
  364. /*
  365. * Get our affinity; if we're not bound to this tile uniquely,
  366. * we can't access the network registers.
  367. */
  368. if (cpumask_weight(&p->cpus_allowed) != 1)
  369. return -EPERM;
  370. /* Make sure we are bound to a cpu in this rectangle. */
  371. cpu = smp_processor_id();
  372. BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
  373. x = cpu_x(cpu);
  374. y = cpu_y(cpu);
  375. if (!contains(rect, x, y))
  376. return -EINVAL;
  377. /* If we are already bound to this hardwall, it's a no-op. */
  378. if (ts->hardwall) {
  379. BUG_ON(ts->hardwall != rect);
  380. return 0;
  381. }
  382. /* Success! This process gets to use the user networks on this cpu. */
  383. ts->hardwall = rect;
  384. spin_lock_irqsave(&hardwall_lock, flags);
  385. list_add(&ts->hardwall_list, &rect->task_head);
  386. spin_unlock_irqrestore(&hardwall_lock, flags);
  387. grant_network_mpls();
  388. printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n",
  389. p->pid, p->comm, cpu);
  390. return 0;
  391. }
  392. /*
  393. * Deactivate a task's hardwall. Must hold hardwall_lock.
  394. * This method may be called from free_task(), so we don't want to
  395. * rely on too many fields of struct task_struct still being valid.
  396. * We assume the cpus_allowed, pid, and comm fields are still valid.
  397. */
  398. static void _hardwall_deactivate(struct task_struct *task)
  399. {
  400. struct thread_struct *ts = &task->thread;
  401. if (cpumask_weight(&task->cpus_allowed) != 1) {
  402. pr_err("pid %d (%s) releasing networks with"
  403. " an affinity mask containing %d cpus!\n",
  404. task->pid, task->comm,
  405. cpumask_weight(&task->cpus_allowed));
  406. BUG();
  407. }
  408. BUG_ON(ts->hardwall == NULL);
  409. ts->hardwall = NULL;
  410. list_del(&ts->hardwall_list);
  411. if (task == current)
  412. restrict_network_mpls();
  413. }
  414. /* Deactivate a task's hardwall. */
  415. int hardwall_deactivate(struct task_struct *task)
  416. {
  417. unsigned long flags;
  418. int activated;
  419. spin_lock_irqsave(&hardwall_lock, flags);
  420. activated = (task->thread.hardwall != NULL);
  421. if (activated)
  422. _hardwall_deactivate(task);
  423. spin_unlock_irqrestore(&hardwall_lock, flags);
  424. if (!activated)
  425. return -EINVAL;
  426. printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n",
  427. task->pid, task->comm, smp_processor_id());
  428. return 0;
  429. }
  430. /* Stop a UDN switch before draining the network. */
  431. static void stop_udn_switch(void *ignored)
  432. {
  433. #if !CHIP_HAS_REV1_XDN()
  434. /* Freeze the switch and the demux. */
  435. __insn_mtspr(SPR_UDN_SP_FREEZE,
  436. SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
  437. SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
  438. SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
  439. #endif
  440. }
  441. /* Drain all the state from a stopped switch. */
  442. static void drain_udn_switch(void *ignored)
  443. {
  444. #if !CHIP_HAS_REV1_XDN()
  445. int i;
  446. int from_tile_words, ca_count;
  447. /* Empty out the 5 switch point fifos. */
  448. for (i = 0; i < 5; i++) {
  449. int words, j;
  450. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  451. words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF;
  452. for (j = 0; j < words; j++)
  453. (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA);
  454. BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0);
  455. }
  456. /* Dump out the 3 word fifo at top. */
  457. from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3;
  458. for (i = 0; i < from_tile_words; i++)
  459. (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
  460. /* Empty out demuxes. */
  461. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
  462. (void) __tile_udn0_receive();
  463. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
  464. (void) __tile_udn1_receive();
  465. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
  466. (void) __tile_udn2_receive();
  467. while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
  468. (void) __tile_udn3_receive();
  469. BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
  470. /* Empty out catch all. */
  471. ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
  472. for (i = 0; i < ca_count; i++)
  473. (void) __insn_mfspr(SPR_UDN_CA_DATA);
  474. BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0);
  475. /* Clear demux logic. */
  476. __insn_mtspr(SPR_UDN_DEMUX_CTL, 1);
  477. /*
  478. * Write switch state; experimentation indicates that 0xc3000
  479. * is an idle switch point.
  480. */
  481. for (i = 0; i < 5; i++) {
  482. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i);
  483. __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000);
  484. }
  485. #endif
  486. }
  487. /* Reset random UDN state registers at boot up and during hardwall teardown. */
  488. void reset_network_state(void)
  489. {
  490. #if !CHIP_HAS_REV1_XDN()
  491. /* Reset UDN coordinates to their standard value */
  492. unsigned int cpu = smp_processor_id();
  493. unsigned int x = cpu % smp_width;
  494. unsigned int y = cpu / smp_width;
  495. #endif
  496. if (udn_disabled)
  497. return;
  498. #if !CHIP_HAS_REV1_XDN()
  499. __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
  500. /* Set demux tags to predefined values and enable them. */
  501. __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
  502. __insn_mtspr(SPR_UDN_TAG_0, (1 << 0));
  503. __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
  504. __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
  505. __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
  506. #endif
  507. /* Clear out other random registers so we have a clean slate. */
  508. __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
  509. __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
  510. #if !CHIP_HAS_REV1_XDN()
  511. __insn_mtspr(SPR_UDN_REFILL_EN, 0);
  512. __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
  513. __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
  514. #endif
  515. /* Start the switch and demux. */
  516. #if !CHIP_HAS_REV1_XDN()
  517. __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
  518. #endif
  519. }
  520. /* Restart a UDN switch after draining. */
  521. static void restart_udn_switch(void *ignored)
  522. {
  523. reset_network_state();
  524. /* Disable firewall interrupts. */
  525. __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0);
  526. disable_firewall_interrupts();
  527. }
  528. /* Build a struct cpumask containing all valid tiles in bounding rectangle. */
  529. static void fill_mask(struct hardwall_info *r, struct cpumask *result)
  530. {
  531. int x, y, cpu;
  532. cpumask_clear(result);
  533. cpu = r->ulhc_y * smp_width + r->ulhc_x;
  534. for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) {
  535. for (x = 0; x < r->width; ++x, ++cpu)
  536. cpu_online_set(cpu, result);
  537. }
  538. }
  539. /* Last reference to a hardwall is gone, so clear the network. */
  540. static void hardwall_destroy(struct hardwall_info *rect)
  541. {
  542. struct task_struct *task;
  543. unsigned long flags;
  544. struct cpumask mask;
  545. /* Make sure this file actually represents a rectangle. */
  546. if (rect == NULL)
  547. return;
  548. /*
  549. * Deactivate any remaining tasks. It's possible to race with
  550. * some other thread that is exiting and hasn't yet called
  551. * deactivate (when freeing its thread_info), so we carefully
  552. * deactivate any remaining tasks before freeing the
  553. * hardwall_info object itself.
  554. */
  555. spin_lock_irqsave(&hardwall_lock, flags);
  556. list_for_each_entry(task, &rect->task_head, thread.hardwall_list)
  557. _hardwall_deactivate(task);
  558. spin_unlock_irqrestore(&hardwall_lock, flags);
  559. /* Drain the UDN. */
  560. printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n",
  561. rect->width, rect->height, rect->ulhc_x, rect->ulhc_y);
  562. fill_mask(rect, &mask);
  563. on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1);
  564. on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1);
  565. /* Restart switch and disable firewall. */
  566. on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1);
  567. /* Remove the /proc/tile/hardwall entry. */
  568. hardwall_remove_proc(rect);
  569. /* Now free the rectangle from the list. */
  570. spin_lock_irqsave(&hardwall_lock, flags);
  571. BUG_ON(!list_empty(&rect->task_head));
  572. list_del(&rect->list);
  573. spin_unlock_irqrestore(&hardwall_lock, flags);
  574. kfree(rect);
  575. }
  576. static int hardwall_proc_show(struct seq_file *sf, void *v)
  577. {
  578. struct hardwall_info *rect = sf->private;
  579. char buf[256];
  580. int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask);
  581. buf[rc++] = '\n';
  582. seq_write(sf, buf, rc);
  583. return 0;
  584. }
  585. static int hardwall_proc_open(struct inode *inode,
  586. struct file *file)
  587. {
  588. return single_open(file, hardwall_proc_show, PDE(inode)->data);
  589. }
  590. static const struct file_operations hardwall_proc_fops = {
  591. .open = hardwall_proc_open,
  592. .read = seq_read,
  593. .llseek = seq_lseek,
  594. .release = single_release,
  595. };
  596. static void hardwall_add_proc(struct hardwall_info *rect)
  597. {
  598. char buf[64];
  599. snprintf(buf, sizeof(buf), "%d", rect->id);
  600. proc_create_data(buf, 0444, hardwall_proc_dir,
  601. &hardwall_proc_fops, rect);
  602. }
  603. static void hardwall_remove_proc(struct hardwall_info *rect)
  604. {
  605. char buf[64];
  606. snprintf(buf, sizeof(buf), "%d", rect->id);
  607. remove_proc_entry(buf, hardwall_proc_dir);
  608. }
  609. int proc_pid_hardwall(struct task_struct *task, char *buffer)
  610. {
  611. struct hardwall_info *rect = task->thread.hardwall;
  612. return rect ? sprintf(buffer, "%d\n", rect->id) : 0;
  613. }
  614. void proc_tile_hardwall_init(struct proc_dir_entry *root)
  615. {
  616. if (!udn_disabled)
  617. hardwall_proc_dir = proc_mkdir("hardwall", root);
  618. }
  619. /*
  620. * Character device support via ioctl/close.
  621. */
  622. static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
  623. {
  624. struct hardwall_info *rect = file->private_data;
  625. if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
  626. return -EINVAL;
  627. switch (_IOC_NR(a)) {
  628. case _HARDWALL_CREATE:
  629. if (udn_disabled)
  630. return -ENOSYS;
  631. if (rect != NULL)
  632. return -EALREADY;
  633. rect = hardwall_create(_IOC_SIZE(a),
  634. (const unsigned char __user *)b);
  635. if (IS_ERR(rect))
  636. return PTR_ERR(rect);
  637. file->private_data = rect;
  638. return 0;
  639. case _HARDWALL_ACTIVATE:
  640. return hardwall_activate(rect);
  641. case _HARDWALL_DEACTIVATE:
  642. if (current->thread.hardwall != rect)
  643. return -EINVAL;
  644. return hardwall_deactivate(current);
  645. case _HARDWALL_GET_ID:
  646. return rect ? rect->id : -EINVAL;
  647. default:
  648. return -EINVAL;
  649. }
  650. }
  651. #ifdef CONFIG_COMPAT
  652. static long hardwall_compat_ioctl(struct file *file,
  653. unsigned int a, unsigned long b)
  654. {
  655. /* Sign-extend the argument so it can be used as a pointer. */
  656. return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b));
  657. }
  658. #endif
  659. /* The user process closed the file; revoke access to user networks. */
  660. static int hardwall_flush(struct file *file, fl_owner_t owner)
  661. {
  662. struct hardwall_info *rect = file->private_data;
  663. struct task_struct *task, *tmp;
  664. unsigned long flags;
  665. if (rect) {
  666. /*
  667. * NOTE: if multiple threads are activated on this hardwall
  668. * file, the other threads will continue having access to the
  669. * UDN until they are context-switched out and back in again.
  670. *
  671. * NOTE: A NULL files pointer means the task is being torn
  672. * down, so in that case we also deactivate it.
  673. */
  674. spin_lock_irqsave(&hardwall_lock, flags);
  675. list_for_each_entry_safe(task, tmp, &rect->task_head,
  676. thread.hardwall_list) {
  677. if (task->files == owner || task->files == NULL)
  678. _hardwall_deactivate(task);
  679. }
  680. spin_unlock_irqrestore(&hardwall_lock, flags);
  681. }
  682. return 0;
  683. }
  684. /* This hardwall is gone, so destroy it. */
  685. static int hardwall_release(struct inode *inode, struct file *file)
  686. {
  687. hardwall_destroy(file->private_data);
  688. return 0;
  689. }
  690. static const struct file_operations dev_hardwall_fops = {
  691. .open = nonseekable_open,
  692. .unlocked_ioctl = hardwall_ioctl,
  693. #ifdef CONFIG_COMPAT
  694. .compat_ioctl = hardwall_compat_ioctl,
  695. #endif
  696. .flush = hardwall_flush,
  697. .release = hardwall_release,
  698. };
  699. static struct cdev hardwall_dev;
  700. static int __init dev_hardwall_init(void)
  701. {
  702. int rc;
  703. dev_t dev;
  704. rc = alloc_chrdev_region(&dev, 0, 1, "hardwall");
  705. if (rc < 0)
  706. return rc;
  707. cdev_init(&hardwall_dev, &dev_hardwall_fops);
  708. rc = cdev_add(&hardwall_dev, dev, 1);
  709. if (rc < 0)
  710. return rc;
  711. return 0;
  712. }
  713. late_initcall(dev_hardwall_init);