irq_xen.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445
  1. /******************************************************************************
  2. * arch/ia64/xen/irq_xen.c
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <linux/cpu.h>
  23. #include <xen/interface/xen.h>
  24. #include <xen/interface/callback.h>
  25. #include <xen/events.h>
  26. #include <asm/xen/privop.h>
  27. #include "irq_xen.h"
  28. /***************************************************************************
  29. * pv_irq_ops
  30. * irq operations
  31. */
  32. static int
  33. xen_assign_irq_vector(int irq)
  34. {
  35. struct physdev_irq irq_op;
  36. irq_op.irq = irq;
  37. if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
  38. return -ENOSPC;
  39. return irq_op.vector;
  40. }
  41. static void
  42. xen_free_irq_vector(int vector)
  43. {
  44. struct physdev_irq irq_op;
  45. if (vector < IA64_FIRST_DEVICE_VECTOR ||
  46. vector > IA64_LAST_DEVICE_VECTOR)
  47. return;
  48. irq_op.vector = vector;
  49. if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
  50. printk(KERN_WARNING "%s: xen_free_irq_vector fail vector=%d\n",
  51. __func__, vector);
  52. }
  53. static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
  54. static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
  55. static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
  56. static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
  57. static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
  58. static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
  59. #define NAME_SIZE 15
  60. static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
  61. static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
  62. static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
  63. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
  64. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
  65. static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
  66. #undef NAME_SIZE
  67. struct saved_irq {
  68. unsigned int irq;
  69. struct irqaction *action;
  70. };
  71. /* 16 should be far optimistic value, since only several percpu irqs
  72. * are registered early.
  73. */
  74. #define MAX_LATE_IRQ 16
  75. static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
  76. static unsigned short late_irq_cnt;
  77. static unsigned short saved_irq_cnt;
  78. static int xen_slab_ready;
  79. #ifdef CONFIG_SMP
  80. #include <linux/sched.h>
  81. /* Dummy stub. Though we may check XEN_RESCHEDULE_VECTOR before __do_IRQ,
  82. * it ends up to issue several memory accesses upon percpu data and
  83. * thus adds unnecessary traffic to other paths.
  84. */
  85. static irqreturn_t
  86. xen_dummy_handler(int irq, void *dev_id)
  87. {
  88. return IRQ_HANDLED;
  89. }
  90. static irqreturn_t
  91. xen_resched_handler(int irq, void *dev_id)
  92. {
  93. scheduler_ipi();
  94. return IRQ_HANDLED;
  95. }
  96. static struct irqaction xen_ipi_irqaction = {
  97. .handler = handle_IPI,
  98. .flags = IRQF_DISABLED,
  99. .name = "IPI"
  100. };
  101. static struct irqaction xen_resched_irqaction = {
  102. .handler = xen_resched_handler,
  103. .flags = IRQF_DISABLED,
  104. .name = "resched"
  105. };
  106. static struct irqaction xen_tlb_irqaction = {
  107. .handler = xen_dummy_handler,
  108. .flags = IRQF_DISABLED,
  109. .name = "tlb_flush"
  110. };
  111. #endif
  112. /*
  113. * This is xen version percpu irq registration, which needs bind
  114. * to xen specific evtchn sub-system. One trick here is that xen
  115. * evtchn binding interface depends on kmalloc because related
  116. * port needs to be freed at device/cpu down. So we cache the
  117. * registration on BSP before slab is ready and then deal them
  118. * at later point. For rest instances happening after slab ready,
  119. * we hook them to xen evtchn immediately.
  120. *
  121. * FIXME: MCA is not supported by far, and thus "nomca" boot param is
  122. * required.
  123. */
  124. static void
  125. __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
  126. struct irqaction *action, int save)
  127. {
  128. int irq = 0;
  129. if (xen_slab_ready) {
  130. switch (vec) {
  131. case IA64_TIMER_VECTOR:
  132. snprintf(per_cpu(xen_timer_name, cpu),
  133. sizeof(per_cpu(xen_timer_name, cpu)),
  134. "%s%d", action->name, cpu);
  135. irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
  136. action->handler, action->flags,
  137. per_cpu(xen_timer_name, cpu), action->dev_id);
  138. per_cpu(xen_timer_irq, cpu) = irq;
  139. break;
  140. case IA64_IPI_RESCHEDULE:
  141. snprintf(per_cpu(xen_resched_name, cpu),
  142. sizeof(per_cpu(xen_resched_name, cpu)),
  143. "%s%d", action->name, cpu);
  144. irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
  145. action->handler, action->flags,
  146. per_cpu(xen_resched_name, cpu), action->dev_id);
  147. per_cpu(xen_resched_irq, cpu) = irq;
  148. break;
  149. case IA64_IPI_VECTOR:
  150. snprintf(per_cpu(xen_ipi_name, cpu),
  151. sizeof(per_cpu(xen_ipi_name, cpu)),
  152. "%s%d", action->name, cpu);
  153. irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
  154. action->handler, action->flags,
  155. per_cpu(xen_ipi_name, cpu), action->dev_id);
  156. per_cpu(xen_ipi_irq, cpu) = irq;
  157. break;
  158. case IA64_CMC_VECTOR:
  159. snprintf(per_cpu(xen_cmc_name, cpu),
  160. sizeof(per_cpu(xen_cmc_name, cpu)),
  161. "%s%d", action->name, cpu);
  162. irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
  163. action->handler,
  164. action->flags,
  165. per_cpu(xen_cmc_name, cpu),
  166. action->dev_id);
  167. per_cpu(xen_cmc_irq, cpu) = irq;
  168. break;
  169. case IA64_CMCP_VECTOR:
  170. snprintf(per_cpu(xen_cmcp_name, cpu),
  171. sizeof(per_cpu(xen_cmcp_name, cpu)),
  172. "%s%d", action->name, cpu);
  173. irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
  174. action->handler,
  175. action->flags,
  176. per_cpu(xen_cmcp_name, cpu),
  177. action->dev_id);
  178. per_cpu(xen_cmcp_irq, cpu) = irq;
  179. break;
  180. case IA64_CPEP_VECTOR:
  181. snprintf(per_cpu(xen_cpep_name, cpu),
  182. sizeof(per_cpu(xen_cpep_name, cpu)),
  183. "%s%d", action->name, cpu);
  184. irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
  185. action->handler,
  186. action->flags,
  187. per_cpu(xen_cpep_name, cpu),
  188. action->dev_id);
  189. per_cpu(xen_cpep_irq, cpu) = irq;
  190. break;
  191. case IA64_CPE_VECTOR:
  192. case IA64_MCA_RENDEZ_VECTOR:
  193. case IA64_PERFMON_VECTOR:
  194. case IA64_MCA_WAKEUP_VECTOR:
  195. case IA64_SPURIOUS_INT_VECTOR:
  196. /* No need to complain, these aren't supported. */
  197. break;
  198. default:
  199. printk(KERN_WARNING "Percpu irq %d is unsupported "
  200. "by xen!\n", vec);
  201. break;
  202. }
  203. BUG_ON(irq < 0);
  204. if (irq > 0) {
  205. /*
  206. * Mark percpu. Without this, migrate_irqs() will
  207. * mark the interrupt for migrations and trigger it
  208. * on cpu hotplug.
  209. */
  210. irq_set_status_flags(irq, IRQ_PER_CPU);
  211. }
  212. }
  213. /* For BSP, we cache registered percpu irqs, and then re-walk
  214. * them when initializing APs
  215. */
  216. if (!cpu && save) {
  217. BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
  218. saved_percpu_irqs[saved_irq_cnt].irq = vec;
  219. saved_percpu_irqs[saved_irq_cnt].action = action;
  220. saved_irq_cnt++;
  221. if (!xen_slab_ready)
  222. late_irq_cnt++;
  223. }
  224. }
  225. static void
  226. xen_register_percpu_irq(ia64_vector vec, struct irqaction *action)
  227. {
  228. __xen_register_percpu_irq(smp_processor_id(), vec, action, 1);
  229. }
  230. static void
  231. xen_bind_early_percpu_irq(void)
  232. {
  233. int i;
  234. xen_slab_ready = 1;
  235. /* There's no race when accessing this cached array, since only
  236. * BSP will face with such step shortly
  237. */
  238. for (i = 0; i < late_irq_cnt; i++)
  239. __xen_register_percpu_irq(smp_processor_id(),
  240. saved_percpu_irqs[i].irq,
  241. saved_percpu_irqs[i].action, 0);
  242. }
  243. /* FIXME: There's no obvious point to check whether slab is ready. So
  244. * a hack is used here by utilizing a late time hook.
  245. */
  246. #ifdef CONFIG_HOTPLUG_CPU
  247. static int __devinit
  248. unbind_evtchn_callback(struct notifier_block *nfb,
  249. unsigned long action, void *hcpu)
  250. {
  251. unsigned int cpu = (unsigned long)hcpu;
  252. if (action == CPU_DEAD) {
  253. /* Unregister evtchn. */
  254. if (per_cpu(xen_cpep_irq, cpu) >= 0) {
  255. unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
  256. NULL);
  257. per_cpu(xen_cpep_irq, cpu) = -1;
  258. }
  259. if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
  260. unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
  261. NULL);
  262. per_cpu(xen_cmcp_irq, cpu) = -1;
  263. }
  264. if (per_cpu(xen_cmc_irq, cpu) >= 0) {
  265. unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
  266. per_cpu(xen_cmc_irq, cpu) = -1;
  267. }
  268. if (per_cpu(xen_ipi_irq, cpu) >= 0) {
  269. unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
  270. per_cpu(xen_ipi_irq, cpu) = -1;
  271. }
  272. if (per_cpu(xen_resched_irq, cpu) >= 0) {
  273. unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
  274. NULL);
  275. per_cpu(xen_resched_irq, cpu) = -1;
  276. }
  277. if (per_cpu(xen_timer_irq, cpu) >= 0) {
  278. unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
  279. NULL);
  280. per_cpu(xen_timer_irq, cpu) = -1;
  281. }
  282. }
  283. return NOTIFY_OK;
  284. }
  285. static struct notifier_block unbind_evtchn_notifier = {
  286. .notifier_call = unbind_evtchn_callback,
  287. .priority = 0
  288. };
  289. #endif
  290. void xen_smp_intr_init_early(unsigned int cpu)
  291. {
  292. #ifdef CONFIG_SMP
  293. unsigned int i;
  294. for (i = 0; i < saved_irq_cnt; i++)
  295. __xen_register_percpu_irq(cpu, saved_percpu_irqs[i].irq,
  296. saved_percpu_irqs[i].action, 0);
  297. #endif
  298. }
  299. void xen_smp_intr_init(void)
  300. {
  301. #ifdef CONFIG_SMP
  302. unsigned int cpu = smp_processor_id();
  303. struct callback_register event = {
  304. .type = CALLBACKTYPE_event,
  305. .address = { .ip = (unsigned long)&xen_event_callback },
  306. };
  307. if (cpu == 0) {
  308. /* Initialization was already done for boot cpu. */
  309. #ifdef CONFIG_HOTPLUG_CPU
  310. /* Register the notifier only once. */
  311. register_cpu_notifier(&unbind_evtchn_notifier);
  312. #endif
  313. return;
  314. }
  315. /* This should be piggyback when setup vcpu guest context */
  316. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  317. #endif /* CONFIG_SMP */
  318. }
  319. void __init
  320. xen_irq_init(void)
  321. {
  322. struct callback_register event = {
  323. .type = CALLBACKTYPE_event,
  324. .address = { .ip = (unsigned long)&xen_event_callback },
  325. };
  326. xen_init_IRQ();
  327. BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
  328. late_time_init = xen_bind_early_percpu_irq;
  329. }
  330. void
  331. xen_platform_send_ipi(int cpu, int vector, int delivery_mode, int redirect)
  332. {
  333. #ifdef CONFIG_SMP
  334. /* TODO: we need to call vcpu_up here */
  335. if (unlikely(vector == ap_wakeup_vector)) {
  336. /* XXX
  337. * This should be in __cpu_up(cpu) in ia64 smpboot.c
  338. * like x86. But don't want to modify it,
  339. * keep it untouched.
  340. */
  341. xen_smp_intr_init_early(cpu);
  342. xen_send_ipi(cpu, vector);
  343. /* vcpu_prepare_and_up(cpu); */
  344. return;
  345. }
  346. #endif
  347. switch (vector) {
  348. case IA64_IPI_VECTOR:
  349. xen_send_IPI_one(cpu, XEN_IPI_VECTOR);
  350. break;
  351. case IA64_IPI_RESCHEDULE:
  352. xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
  353. break;
  354. case IA64_CMCP_VECTOR:
  355. xen_send_IPI_one(cpu, XEN_CMCP_VECTOR);
  356. break;
  357. case IA64_CPEP_VECTOR:
  358. xen_send_IPI_one(cpu, XEN_CPEP_VECTOR);
  359. break;
  360. case IA64_TIMER_VECTOR: {
  361. /* this is used only once by check_sal_cache_flush()
  362. at boot time */
  363. static int used = 0;
  364. if (!used) {
  365. xen_send_ipi(cpu, IA64_TIMER_VECTOR);
  366. used = 1;
  367. break;
  368. }
  369. /* fallthrough */
  370. }
  371. default:
  372. printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
  373. vector);
  374. notify_remote_via_irq(0); /* defaults to 0 irq */
  375. break;
  376. }
  377. }
  378. static void __init
  379. xen_register_ipi(void)
  380. {
  381. #ifdef CONFIG_SMP
  382. register_percpu_irq(IA64_IPI_VECTOR, &xen_ipi_irqaction);
  383. register_percpu_irq(IA64_IPI_RESCHEDULE, &xen_resched_irqaction);
  384. register_percpu_irq(IA64_IPI_LOCAL_TLB_FLUSH, &xen_tlb_irqaction);
  385. #endif
  386. }
  387. static void
  388. xen_resend_irq(unsigned int vector)
  389. {
  390. (void)resend_irq_on_evtchn(vector);
  391. }
  392. const struct pv_irq_ops xen_irq_ops __initdata = {
  393. .register_ipi = xen_register_ipi,
  394. .assign_irq_vector = xen_assign_irq_vector,
  395. .free_irq_vector = xen_free_irq_vector,
  396. .register_percpu_irq = xen_register_percpu_irq,
  397. .resend_irq = xen_resend_irq,
  398. };