htirq.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * File: htirq.c
  3. * Purpose: Hypertransport Interrupt Capability
  4. *
  5. * Copyright (C) 2006 Linux Networx
  6. * Copyright (C) Eric Biederman <ebiederman@lnxi.com>
  7. */
  8. #include <linux/irq.h>
  9. #include <linux/pci.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/export.h>
  12. #include <linux/slab.h>
  13. #include <linux/htirq.h>
  14. /* Global ht irq lock.
  15. *
  16. * This is needed to serialize access to the data port in hypertransport
  17. * irq capability.
  18. *
  19. * With multiple simultaneous hypertransport irq devices it might pay
  20. * to make this more fine grained. But start with simple, stupid, and correct.
  21. */
  22. static DEFINE_SPINLOCK(ht_irq_lock);
  23. struct ht_irq_cfg {
  24. struct pci_dev *dev;
  25. /* Update callback used to cope with buggy hardware */
  26. ht_irq_update_t *update;
  27. unsigned pos;
  28. unsigned idx;
  29. struct ht_irq_msg msg;
  30. };
  31. void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
  32. {
  33. struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
  34. unsigned long flags;
  35. spin_lock_irqsave(&ht_irq_lock, flags);
  36. if (cfg->msg.address_lo != msg->address_lo) {
  37. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
  38. pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_lo);
  39. }
  40. if (cfg->msg.address_hi != msg->address_hi) {
  41. pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx + 1);
  42. pci_write_config_dword(cfg->dev, cfg->pos + 4, msg->address_hi);
  43. }
  44. if (cfg->update)
  45. cfg->update(cfg->dev, irq, msg);
  46. spin_unlock_irqrestore(&ht_irq_lock, flags);
  47. cfg->msg = *msg;
  48. }
  49. void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
  50. {
  51. struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
  52. *msg = cfg->msg;
  53. }
  54. void mask_ht_irq(struct irq_data *data)
  55. {
  56. struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
  57. struct ht_irq_msg msg = cfg->msg;
  58. msg.address_lo |= 1;
  59. write_ht_irq_msg(data->irq, &msg);
  60. }
  61. void unmask_ht_irq(struct irq_data *data)
  62. {
  63. struct ht_irq_cfg *cfg = irq_data_get_irq_handler_data(data);
  64. struct ht_irq_msg msg = cfg->msg;
  65. msg.address_lo &= ~1;
  66. write_ht_irq_msg(data->irq, &msg);
  67. }
  68. /**
  69. * __ht_create_irq - create an irq and attach it to a device.
  70. * @dev: The hypertransport device to find the irq capability on.
  71. * @idx: Which of the possible irqs to attach to.
  72. * @update: Function to be called when changing the htirq message
  73. *
  74. * The irq number of the new irq or a negative error value is returned.
  75. */
  76. int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
  77. {
  78. struct ht_irq_cfg *cfg;
  79. unsigned long flags;
  80. u32 data;
  81. int max_irq;
  82. int pos;
  83. int irq;
  84. int node;
  85. pos = pci_find_ht_capability(dev, HT_CAPTYPE_IRQ);
  86. if (!pos)
  87. return -EINVAL;
  88. /* Verify the idx I want to use is in range */
  89. spin_lock_irqsave(&ht_irq_lock, flags);
  90. pci_write_config_byte(dev, pos + 2, 1);
  91. pci_read_config_dword(dev, pos + 4, &data);
  92. spin_unlock_irqrestore(&ht_irq_lock, flags);
  93. max_irq = (data >> 16) & 0xff;
  94. if ( idx > max_irq)
  95. return -EINVAL;
  96. cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
  97. if (!cfg)
  98. return -ENOMEM;
  99. cfg->dev = dev;
  100. cfg->update = update;
  101. cfg->pos = pos;
  102. cfg->idx = 0x10 + (idx * 2);
  103. /* Initialize msg to a value that will never match the first write. */
  104. cfg->msg.address_lo = 0xffffffff;
  105. cfg->msg.address_hi = 0xffffffff;
  106. node = dev_to_node(&dev->dev);
  107. irq = create_irq_nr(0, node);
  108. if (irq <= 0) {
  109. kfree(cfg);
  110. return -EBUSY;
  111. }
  112. irq_set_handler_data(irq, cfg);
  113. if (arch_setup_ht_irq(irq, dev) < 0) {
  114. ht_destroy_irq(irq);
  115. return -EBUSY;
  116. }
  117. return irq;
  118. }
  119. /**
  120. * ht_create_irq - create an irq and attach it to a device.
  121. * @dev: The hypertransport device to find the irq capability on.
  122. * @idx: Which of the possible irqs to attach to.
  123. *
  124. * ht_create_irq needs to be called for all hypertransport devices
  125. * that generate irqs.
  126. *
  127. * The irq number of the new irq or a negative error value is returned.
  128. */
  129. int ht_create_irq(struct pci_dev *dev, int idx)
  130. {
  131. return __ht_create_irq(dev, idx, NULL);
  132. }
  133. /**
  134. * ht_destroy_irq - destroy an irq created with ht_create_irq
  135. * @irq: irq to be destroyed
  136. *
  137. * This reverses ht_create_irq removing the specified irq from
  138. * existence. The irq should be free before this happens.
  139. */
  140. void ht_destroy_irq(unsigned int irq)
  141. {
  142. struct ht_irq_cfg *cfg;
  143. cfg = irq_get_handler_data(irq);
  144. irq_set_chip(irq, NULL);
  145. irq_set_handler_data(irq, NULL);
  146. destroy_irq(irq);
  147. kfree(cfg);
  148. }
  149. EXPORT_SYMBOL(__ht_create_irq);
  150. EXPORT_SYMBOL(ht_create_irq);
  151. EXPORT_SYMBOL(ht_destroy_irq);