irq-sunxi-nmi.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. /*
  2. * Allwinner A20/A31 SoCs NMI IRQ chip driver.
  3. *
  4. * Carlo Caione <carlo.caione@gmail.com>
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. */
  10. #define DRV_NAME "sunxi-nmi"
  11. #define pr_fmt(fmt) DRV_NAME ": " fmt
  12. #include <linux/bitops.h>
  13. #include <linux/device.h>
  14. #include <linux/io.h>
  15. #include <linux/irq.h>
  16. #include <linux/interrupt.h>
  17. #include <linux/irqdomain.h>
  18. #include <linux/of_irq.h>
  19. #include <linux/of_address.h>
  20. #include <linux/of_platform.h>
  21. #include <linux/irqchip.h>
  22. #include <linux/irqchip/chained_irq.h>
  23. #define SUNXI_NMI_SRC_TYPE_MASK 0x00000003
  24. enum {
  25. SUNXI_SRC_TYPE_LEVEL_LOW = 0,
  26. SUNXI_SRC_TYPE_EDGE_FALLING,
  27. SUNXI_SRC_TYPE_LEVEL_HIGH,
  28. SUNXI_SRC_TYPE_EDGE_RISING,
  29. };
  30. struct sunxi_sc_nmi_reg_offs {
  31. u32 ctrl;
  32. u32 pend;
  33. u32 enable;
  34. };
  35. static struct sunxi_sc_nmi_reg_offs sun7i_reg_offs = {
  36. .ctrl = 0x00,
  37. .pend = 0x04,
  38. .enable = 0x08,
  39. };
  40. static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = {
  41. .ctrl = 0x00,
  42. .pend = 0x04,
  43. .enable = 0x34,
  44. };
  45. static struct sunxi_sc_nmi_reg_offs sun9i_reg_offs = {
  46. .ctrl = 0x00,
  47. .pend = 0x08,
  48. .enable = 0x04,
  49. };
  50. static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off,
  51. u32 val)
  52. {
  53. irq_reg_writel(gc, val, off);
  54. }
  55. static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
  56. {
  57. return irq_reg_readl(gc, off);
  58. }
  59. static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
  60. {
  61. struct irq_domain *domain = irq_desc_get_handler_data(desc);
  62. struct irq_chip *chip = irq_desc_get_chip(desc);
  63. unsigned int virq = irq_find_mapping(domain, 0);
  64. chained_irq_enter(chip, desc);
  65. generic_handle_irq(virq);
  66. chained_irq_exit(chip, desc);
  67. }
  68. static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
  69. {
  70. struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
  71. struct irq_chip_type *ct = gc->chip_types;
  72. u32 src_type_reg;
  73. u32 ctrl_off = ct->regs.type;
  74. unsigned int src_type;
  75. unsigned int i;
  76. irq_gc_lock(gc);
  77. switch (flow_type & IRQF_TRIGGER_MASK) {
  78. case IRQ_TYPE_EDGE_FALLING:
  79. src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
  80. break;
  81. case IRQ_TYPE_EDGE_RISING:
  82. src_type = SUNXI_SRC_TYPE_EDGE_RISING;
  83. break;
  84. case IRQ_TYPE_LEVEL_HIGH:
  85. src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
  86. break;
  87. case IRQ_TYPE_NONE:
  88. case IRQ_TYPE_LEVEL_LOW:
  89. src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
  90. break;
  91. default:
  92. irq_gc_unlock(gc);
  93. pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
  94. data->irq);
  95. return -EBADR;
  96. }
  97. irqd_set_trigger_type(data, flow_type);
  98. irq_setup_alt_chip(data, flow_type);
  99. for (i = 0; i < gc->num_ct; i++, ct++)
  100. if (ct->type & flow_type)
  101. ctrl_off = ct->regs.type;
  102. src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
  103. src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
  104. src_type_reg |= src_type;
  105. sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
  106. irq_gc_unlock(gc);
  107. return IRQ_SET_MASK_OK;
  108. }
  109. static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
  110. struct sunxi_sc_nmi_reg_offs *reg_offs)
  111. {
  112. struct irq_domain *domain;
  113. struct irq_chip_generic *gc;
  114. unsigned int irq;
  115. unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
  116. int ret;
  117. domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
  118. if (!domain) {
  119. pr_err("Could not register interrupt domain.\n");
  120. return -ENOMEM;
  121. }
  122. ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
  123. handle_fasteoi_irq, clr, 0,
  124. IRQ_GC_INIT_MASK_CACHE);
  125. if (ret) {
  126. pr_err("Could not allocate generic interrupt chip.\n");
  127. goto fail_irqd_remove;
  128. }
  129. irq = irq_of_parse_and_map(node, 0);
  130. if (irq <= 0) {
  131. pr_err("unable to parse irq\n");
  132. ret = -EINVAL;
  133. goto fail_irqd_remove;
  134. }
  135. gc = irq_get_domain_generic_chip(domain, 0);
  136. gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
  137. if (IS_ERR(gc->reg_base)) {
  138. pr_err("unable to map resource\n");
  139. ret = PTR_ERR(gc->reg_base);
  140. goto fail_irqd_remove;
  141. }
  142. gc->chip_types[0].type = IRQ_TYPE_LEVEL_MASK;
  143. gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
  144. gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
  145. gc->chip_types[0].chip.irq_eoi = irq_gc_ack_set_bit;
  146. gc->chip_types[0].chip.irq_set_type = sunxi_sc_nmi_set_type;
  147. gc->chip_types[0].chip.flags = IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
  148. gc->chip_types[0].regs.ack = reg_offs->pend;
  149. gc->chip_types[0].regs.mask = reg_offs->enable;
  150. gc->chip_types[0].regs.type = reg_offs->ctrl;
  151. gc->chip_types[1].type = IRQ_TYPE_EDGE_BOTH;
  152. gc->chip_types[1].chip.name = gc->chip_types[0].chip.name;
  153. gc->chip_types[1].chip.irq_ack = irq_gc_ack_set_bit;
  154. gc->chip_types[1].chip.irq_mask = irq_gc_mask_clr_bit;
  155. gc->chip_types[1].chip.irq_unmask = irq_gc_mask_set_bit;
  156. gc->chip_types[1].chip.irq_set_type = sunxi_sc_nmi_set_type;
  157. gc->chip_types[1].regs.ack = reg_offs->pend;
  158. gc->chip_types[1].regs.mask = reg_offs->enable;
  159. gc->chip_types[1].regs.type = reg_offs->ctrl;
  160. gc->chip_types[1].handler = handle_edge_irq;
  161. sunxi_sc_nmi_write(gc, reg_offs->enable, 0);
  162. sunxi_sc_nmi_write(gc, reg_offs->pend, 0x1);
  163. irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
  164. return 0;
  165. fail_irqd_remove:
  166. irq_domain_remove(domain);
  167. return ret;
  168. }
  169. static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
  170. struct device_node *parent)
  171. {
  172. return sunxi_sc_nmi_irq_init(node, &sun6i_reg_offs);
  173. }
  174. IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
  175. static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
  176. struct device_node *parent)
  177. {
  178. return sunxi_sc_nmi_irq_init(node, &sun7i_reg_offs);
  179. }
  180. IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
  181. static int __init sun9i_nmi_irq_init(struct device_node *node,
  182. struct device_node *parent)
  183. {
  184. return sunxi_sc_nmi_irq_init(node, &sun9i_reg_offs);
  185. }
  186. IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);