msi_ia64.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215
  1. /*
  2. * MSI hooks for standard x86 apic
  3. */
  4. #include <linux/pci.h>
  5. #include <linux/irq.h>
  6. #include <linux/msi.h>
  7. #include <linux/dmar.h>
  8. #include <asm/smp.h>
  9. #include <asm/msidef.h>
  10. static struct irq_chip ia64_msi_chip;
  11. #ifdef CONFIG_SMP
  12. static int ia64_set_msi_irq_affinity(struct irq_data *idata,
  13. const cpumask_t *cpu_mask, bool force)
  14. {
  15. struct msi_msg msg;
  16. u32 addr, data;
  17. int cpu = first_cpu(*cpu_mask);
  18. unsigned int irq = idata->irq;
  19. if (!cpu_online(cpu))
  20. return -1;
  21. if (irq_prepare_move(irq, cpu))
  22. return -1;
  23. get_cached_msi_msg(irq, &msg);
  24. addr = msg.address_lo;
  25. addr &= MSI_ADDR_DEST_ID_MASK;
  26. addr |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  27. msg.address_lo = addr;
  28. data = msg.data;
  29. data &= MSI_DATA_VECTOR_MASK;
  30. data |= MSI_DATA_VECTOR(irq_to_vector(irq));
  31. msg.data = data;
  32. write_msi_msg(irq, &msg);
  33. cpumask_copy(idata->affinity, cpumask_of(cpu));
  34. return 0;
  35. }
  36. #endif /* CONFIG_SMP */
  37. int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  38. {
  39. struct msi_msg msg;
  40. unsigned long dest_phys_id;
  41. int irq, vector;
  42. cpumask_t mask;
  43. irq = create_irq();
  44. if (irq < 0)
  45. return irq;
  46. irq_set_msi_desc(irq, desc);
  47. cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
  48. dest_phys_id = cpu_physical_id(first_cpu(mask));
  49. vector = irq_to_vector(irq);
  50. msg.address_hi = 0;
  51. msg.address_lo =
  52. MSI_ADDR_HEADER |
  53. MSI_ADDR_DEST_MODE_PHYS |
  54. MSI_ADDR_REDIRECTION_CPU |
  55. MSI_ADDR_DEST_ID_CPU(dest_phys_id);
  56. msg.data =
  57. MSI_DATA_TRIGGER_EDGE |
  58. MSI_DATA_LEVEL_ASSERT |
  59. MSI_DATA_DELIVERY_FIXED |
  60. MSI_DATA_VECTOR(vector);
  61. write_msi_msg(irq, &msg);
  62. irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq);
  63. return 0;
  64. }
  65. void ia64_teardown_msi_irq(unsigned int irq)
  66. {
  67. destroy_irq(irq);
  68. }
  69. static void ia64_ack_msi_irq(struct irq_data *data)
  70. {
  71. irq_complete_move(data->irq);
  72. irq_move_irq(data);
  73. ia64_eoi();
  74. }
  75. static int ia64_msi_retrigger_irq(struct irq_data *data)
  76. {
  77. unsigned int vector = irq_to_vector(data->irq);
  78. ia64_resend_irq(vector);
  79. return 1;
  80. }
  81. /*
  82. * Generic ops used on most IA64 platforms.
  83. */
  84. static struct irq_chip ia64_msi_chip = {
  85. .name = "PCI-MSI",
  86. .irq_mask = mask_msi_irq,
  87. .irq_unmask = unmask_msi_irq,
  88. .irq_ack = ia64_ack_msi_irq,
  89. #ifdef CONFIG_SMP
  90. .irq_set_affinity = ia64_set_msi_irq_affinity,
  91. #endif
  92. .irq_retrigger = ia64_msi_retrigger_irq,
  93. };
  94. int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
  95. {
  96. if (platform_setup_msi_irq)
  97. return platform_setup_msi_irq(pdev, desc);
  98. return ia64_setup_msi_irq(pdev, desc);
  99. }
  100. void arch_teardown_msi_irq(unsigned int irq)
  101. {
  102. if (platform_teardown_msi_irq)
  103. return platform_teardown_msi_irq(irq);
  104. return ia64_teardown_msi_irq(irq);
  105. }
  106. #ifdef CONFIG_INTEL_IOMMU
  107. #ifdef CONFIG_SMP
  108. static int dmar_msi_set_affinity(struct irq_data *data,
  109. const struct cpumask *mask, bool force)
  110. {
  111. unsigned int irq = data->irq;
  112. struct irq_cfg *cfg = irq_cfg + irq;
  113. struct msi_msg msg;
  114. int cpu = cpumask_first(mask);
  115. if (!cpu_online(cpu))
  116. return -1;
  117. if (irq_prepare_move(irq, cpu))
  118. return -1;
  119. dmar_msi_read(irq, &msg);
  120. msg.data &= ~MSI_DATA_VECTOR_MASK;
  121. msg.data |= MSI_DATA_VECTOR(cfg->vector);
  122. msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
  123. msg.address_lo |= MSI_ADDR_DEST_ID_CPU(cpu_physical_id(cpu));
  124. dmar_msi_write(irq, &msg);
  125. cpumask_copy(data->affinity, mask);
  126. return 0;
  127. }
  128. #endif /* CONFIG_SMP */
  129. static struct irq_chip dmar_msi_type = {
  130. .name = "DMAR_MSI",
  131. .irq_unmask = dmar_msi_unmask,
  132. .irq_mask = dmar_msi_mask,
  133. .irq_ack = ia64_ack_msi_irq,
  134. #ifdef CONFIG_SMP
  135. .irq_set_affinity = dmar_msi_set_affinity,
  136. #endif
  137. .irq_retrigger = ia64_msi_retrigger_irq,
  138. };
  139. static int
  140. msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
  141. {
  142. struct irq_cfg *cfg = irq_cfg + irq;
  143. unsigned dest;
  144. cpumask_t mask;
  145. cpumask_and(&mask, &(irq_to_domain(irq)), cpu_online_mask);
  146. dest = cpu_physical_id(first_cpu(mask));
  147. msg->address_hi = 0;
  148. msg->address_lo =
  149. MSI_ADDR_HEADER |
  150. MSI_ADDR_DEST_MODE_PHYS |
  151. MSI_ADDR_REDIRECTION_CPU |
  152. MSI_ADDR_DEST_ID_CPU(dest);
  153. msg->data =
  154. MSI_DATA_TRIGGER_EDGE |
  155. MSI_DATA_LEVEL_ASSERT |
  156. MSI_DATA_DELIVERY_FIXED |
  157. MSI_DATA_VECTOR(cfg->vector);
  158. return 0;
  159. }
  160. int arch_setup_dmar_msi(unsigned int irq)
  161. {
  162. int ret;
  163. struct msi_msg msg;
  164. ret = msi_compose_msg(NULL, irq, &msg);
  165. if (ret < 0)
  166. return ret;
  167. dmar_msi_write(irq, &msg);
  168. irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
  169. "edge");
  170. return 0;
  171. }
  172. #endif /* CONFIG_INTEL_IOMMU */