irqdomain.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884
  1. #define pr_fmt(fmt) "irq: " fmt
  2. #include <linux/acpi.h>
  3. #include <linux/debugfs.h>
  4. #include <linux/hardirq.h>
  5. #include <linux/interrupt.h>
  6. #include <linux/irq.h>
  7. #include <linux/irqdesc.h>
  8. #include <linux/irqdomain.h>
  9. #include <linux/module.h>
  10. #include <linux/mutex.h>
  11. #include <linux/of.h>
  12. #include <linux/of_address.h>
  13. #include <linux/of_irq.h>
  14. #include <linux/topology.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include <linux/smp.h>
  18. #include <linux/fs.h>
  19. static LIST_HEAD(irq_domain_list);
  20. static DEFINE_MUTEX(irq_domain_mutex);
  21. static DEFINE_MUTEX(revmap_trees_mutex);
  22. static struct irq_domain *irq_default_domain;
  23. static void irq_domain_check_hierarchy(struct irq_domain *domain);
  24. struct irqchip_fwid {
  25. struct fwnode_handle fwnode;
  26. unsigned int type;
  27. char *name;
  28. void *data;
  29. };
  30. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  31. static void debugfs_add_domain_dir(struct irq_domain *d);
  32. static void debugfs_remove_domain_dir(struct irq_domain *d);
  33. #else
  34. static inline void debugfs_add_domain_dir(struct irq_domain *d) { }
  35. static inline void debugfs_remove_domain_dir(struct irq_domain *d) { }
  36. #endif
  37. const struct fwnode_operations irqchip_fwnode_ops;
  38. EXPORT_SYMBOL_GPL(irqchip_fwnode_ops);
  39. /**
  40. * irq_domain_alloc_fwnode - Allocate a fwnode_handle suitable for
  41. * identifying an irq domain
  42. * @type: Type of irqchip_fwnode. See linux/irqdomain.h
  43. * @name: Optional user provided domain name
  44. * @id: Optional user provided id if name != NULL
  45. * @data: Optional user-provided data
  46. *
  47. * Allocate a struct irqchip_fwid, and return a poiner to the embedded
  48. * fwnode_handle (or NULL on failure).
  49. *
  50. * Note: The types IRQCHIP_FWNODE_NAMED and IRQCHIP_FWNODE_NAMED_ID are
  51. * solely to transport name information to irqdomain creation code. The
  52. * node is not stored. For other types the pointer is kept in the irq
  53. * domain struct.
  54. */
  55. struct fwnode_handle *__irq_domain_alloc_fwnode(unsigned int type, int id,
  56. const char *name, void *data)
  57. {
  58. struct irqchip_fwid *fwid;
  59. char *n;
  60. fwid = kzalloc(sizeof(*fwid), GFP_KERNEL);
  61. switch (type) {
  62. case IRQCHIP_FWNODE_NAMED:
  63. n = kasprintf(GFP_KERNEL, "%s", name);
  64. break;
  65. case IRQCHIP_FWNODE_NAMED_ID:
  66. n = kasprintf(GFP_KERNEL, "%s-%d", name, id);
  67. break;
  68. default:
  69. n = kasprintf(GFP_KERNEL, "irqchip@%p", data);
  70. break;
  71. }
  72. if (!fwid || !n) {
  73. kfree(fwid);
  74. kfree(n);
  75. return NULL;
  76. }
  77. fwid->type = type;
  78. fwid->name = n;
  79. fwid->data = data;
  80. fwid->fwnode.ops = &irqchip_fwnode_ops;
  81. return &fwid->fwnode;
  82. }
  83. EXPORT_SYMBOL_GPL(__irq_domain_alloc_fwnode);
  84. /**
  85. * irq_domain_free_fwnode - Free a non-OF-backed fwnode_handle
  86. *
  87. * Free a fwnode_handle allocated with irq_domain_alloc_fwnode.
  88. */
  89. void irq_domain_free_fwnode(struct fwnode_handle *fwnode)
  90. {
  91. struct irqchip_fwid *fwid;
  92. if (WARN_ON(!is_fwnode_irqchip(fwnode)))
  93. return;
  94. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  95. kfree(fwid->name);
  96. kfree(fwid);
  97. }
  98. EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
  99. /**
  100. * __irq_domain_add() - Allocate a new irq_domain data structure
  101. * @fwnode: firmware node for the interrupt controller
  102. * @size: Size of linear map; 0 for radix mapping only
  103. * @hwirq_max: Maximum number of interrupts supported by controller
  104. * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
  105. * direct mapping
  106. * @ops: domain callbacks
  107. * @host_data: Controller private data pointer
  108. *
  109. * Allocates and initialize and irq_domain structure.
  110. * Returns pointer to IRQ domain, or NULL on failure.
  111. */
  112. struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
  113. irq_hw_number_t hwirq_max, int direct_max,
  114. const struct irq_domain_ops *ops,
  115. void *host_data)
  116. {
  117. struct device_node *of_node = to_of_node(fwnode);
  118. struct irqchip_fwid *fwid;
  119. struct irq_domain *domain;
  120. static atomic_t unknown_domains;
  121. domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
  122. GFP_KERNEL, of_node_to_nid(of_node));
  123. if (WARN_ON(!domain))
  124. return NULL;
  125. if (fwnode && is_fwnode_irqchip(fwnode)) {
  126. fwid = container_of(fwnode, struct irqchip_fwid, fwnode);
  127. switch (fwid->type) {
  128. case IRQCHIP_FWNODE_NAMED:
  129. case IRQCHIP_FWNODE_NAMED_ID:
  130. domain->fwnode = fwnode;
  131. domain->name = kstrdup(fwid->name, GFP_KERNEL);
  132. if (!domain->name) {
  133. kfree(domain);
  134. return NULL;
  135. }
  136. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  137. break;
  138. default:
  139. domain->fwnode = fwnode;
  140. domain->name = fwid->name;
  141. break;
  142. }
  143. #ifdef CONFIG_ACPI
  144. } else if (is_acpi_device_node(fwnode)) {
  145. struct acpi_buffer buf = {
  146. .length = ACPI_ALLOCATE_BUFFER,
  147. };
  148. acpi_handle handle;
  149. handle = acpi_device_handle(to_acpi_device_node(fwnode));
  150. if (acpi_get_name(handle, ACPI_FULL_PATHNAME, &buf) == AE_OK) {
  151. domain->name = buf.pointer;
  152. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  153. }
  154. domain->fwnode = fwnode;
  155. #endif
  156. } else if (of_node) {
  157. char *name;
  158. /*
  159. * DT paths contain '/', which debugfs is legitimately
  160. * unhappy about. Replace them with ':', which does
  161. * the trick and is not as offensive as '\'...
  162. */
  163. name = kstrdup(of_node_full_name(of_node), GFP_KERNEL);
  164. if (!name) {
  165. kfree(domain);
  166. return NULL;
  167. }
  168. strreplace(name, '/', ':');
  169. domain->name = name;
  170. domain->fwnode = fwnode;
  171. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  172. }
  173. if (!domain->name) {
  174. if (fwnode)
  175. pr_err("Invalid fwnode type for irqdomain\n");
  176. domain->name = kasprintf(GFP_KERNEL, "unknown-%d",
  177. atomic_inc_return(&unknown_domains));
  178. if (!domain->name) {
  179. kfree(domain);
  180. return NULL;
  181. }
  182. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  183. }
  184. of_node_get(of_node);
  185. /* Fill structure */
  186. INIT_RADIX_TREE(&domain->revmap_tree, GFP_KERNEL);
  187. domain->ops = ops;
  188. domain->host_data = host_data;
  189. domain->hwirq_max = hwirq_max;
  190. domain->revmap_size = size;
  191. domain->revmap_direct_max_irq = direct_max;
  192. irq_domain_check_hierarchy(domain);
  193. mutex_lock(&irq_domain_mutex);
  194. debugfs_add_domain_dir(domain);
  195. list_add(&domain->link, &irq_domain_list);
  196. mutex_unlock(&irq_domain_mutex);
  197. pr_debug("Added domain %s\n", domain->name);
  198. return domain;
  199. }
  200. EXPORT_SYMBOL_GPL(__irq_domain_add);
  201. /**
  202. * irq_domain_remove() - Remove an irq domain.
  203. * @domain: domain to remove
  204. *
  205. * This routine is used to remove an irq domain. The caller must ensure
  206. * that all mappings within the domain have been disposed of prior to
  207. * use, depending on the revmap type.
  208. */
  209. void irq_domain_remove(struct irq_domain *domain)
  210. {
  211. mutex_lock(&irq_domain_mutex);
  212. debugfs_remove_domain_dir(domain);
  213. WARN_ON(!radix_tree_empty(&domain->revmap_tree));
  214. list_del(&domain->link);
  215. /*
  216. * If the going away domain is the default one, reset it.
  217. */
  218. if (unlikely(irq_default_domain == domain))
  219. irq_set_default_host(NULL);
  220. mutex_unlock(&irq_domain_mutex);
  221. pr_debug("Removed domain %s\n", domain->name);
  222. of_node_put(irq_domain_get_of_node(domain));
  223. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  224. kfree(domain->name);
  225. kfree(domain);
  226. }
  227. EXPORT_SYMBOL_GPL(irq_domain_remove);
  228. void irq_domain_update_bus_token(struct irq_domain *domain,
  229. enum irq_domain_bus_token bus_token)
  230. {
  231. char *name;
  232. if (domain->bus_token == bus_token)
  233. return;
  234. mutex_lock(&irq_domain_mutex);
  235. domain->bus_token = bus_token;
  236. name = kasprintf(GFP_KERNEL, "%s-%d", domain->name, bus_token);
  237. if (!name) {
  238. mutex_unlock(&irq_domain_mutex);
  239. return;
  240. }
  241. debugfs_remove_domain_dir(domain);
  242. if (domain->flags & IRQ_DOMAIN_NAME_ALLOCATED)
  243. kfree(domain->name);
  244. else
  245. domain->flags |= IRQ_DOMAIN_NAME_ALLOCATED;
  246. domain->name = name;
  247. debugfs_add_domain_dir(domain);
  248. mutex_unlock(&irq_domain_mutex);
  249. }
  250. /**
  251. * irq_domain_add_simple() - Register an irq_domain and optionally map a range of irqs
  252. * @of_node: pointer to interrupt controller's device tree node.
  253. * @size: total number of irqs in mapping
  254. * @first_irq: first number of irq block assigned to the domain,
  255. * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then
  256. * pre-map all of the irqs in the domain to virqs starting at first_irq.
  257. * @ops: domain callbacks
  258. * @host_data: Controller private data pointer
  259. *
  260. * Allocates an irq_domain, and optionally if first_irq is positive then also
  261. * allocate irq_descs and map all of the hwirqs to virqs starting at first_irq.
  262. *
  263. * This is intended to implement the expected behaviour for most
  264. * interrupt controllers. If device tree is used, then first_irq will be 0 and
  265. * irqs get mapped dynamically on the fly. However, if the controller requires
  266. * static virq assignments (non-DT boot) then it will set that up correctly.
  267. */
  268. struct irq_domain *irq_domain_add_simple(struct device_node *of_node,
  269. unsigned int size,
  270. unsigned int first_irq,
  271. const struct irq_domain_ops *ops,
  272. void *host_data)
  273. {
  274. struct irq_domain *domain;
  275. domain = __irq_domain_add(of_node_to_fwnode(of_node), size, size, 0, ops, host_data);
  276. if (!domain)
  277. return NULL;
  278. if (first_irq > 0) {
  279. if (IS_ENABLED(CONFIG_SPARSE_IRQ)) {
  280. /* attempt to allocated irq_descs */
  281. int rc = irq_alloc_descs(first_irq, first_irq, size,
  282. of_node_to_nid(of_node));
  283. if (rc < 0)
  284. pr_info("Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
  285. first_irq);
  286. }
  287. irq_domain_associate_many(domain, first_irq, 0, size);
  288. }
  289. return domain;
  290. }
  291. EXPORT_SYMBOL_GPL(irq_domain_add_simple);
  292. /**
  293. * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
  294. * @of_node: pointer to interrupt controller's device tree node.
  295. * @size: total number of irqs in legacy mapping
  296. * @first_irq: first number of irq block assigned to the domain
  297. * @first_hwirq: first hwirq number to use for the translation. Should normally
  298. * be '0', but a positive integer can be used if the effective
  299. * hwirqs numbering does not begin at zero.
  300. * @ops: map/unmap domain callbacks
  301. * @host_data: Controller private data pointer
  302. *
  303. * Note: the map() callback will be called before this function returns
  304. * for all legacy interrupts except 0 (which is always the invalid irq for
  305. * a legacy controller).
  306. */
  307. struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
  308. unsigned int size,
  309. unsigned int first_irq,
  310. irq_hw_number_t first_hwirq,
  311. const struct irq_domain_ops *ops,
  312. void *host_data)
  313. {
  314. struct irq_domain *domain;
  315. domain = __irq_domain_add(of_node_to_fwnode(of_node), first_hwirq + size,
  316. first_hwirq + size, 0, ops, host_data);
  317. if (domain)
  318. irq_domain_associate_many(domain, first_irq, first_hwirq, size);
  319. return domain;
  320. }
  321. EXPORT_SYMBOL_GPL(irq_domain_add_legacy);
  322. /**
  323. * irq_find_matching_fwspec() - Locates a domain for a given fwspec
  324. * @fwspec: FW specifier for an interrupt
  325. * @bus_token: domain-specific data
  326. */
  327. struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
  328. enum irq_domain_bus_token bus_token)
  329. {
  330. struct irq_domain *h, *found = NULL;
  331. struct fwnode_handle *fwnode = fwspec->fwnode;
  332. int rc;
  333. /* We might want to match the legacy controller last since
  334. * it might potentially be set to match all interrupts in
  335. * the absence of a device node. This isn't a problem so far
  336. * yet though...
  337. *
  338. * bus_token == DOMAIN_BUS_ANY matches any domain, any other
  339. * values must generate an exact match for the domain to be
  340. * selected.
  341. */
  342. mutex_lock(&irq_domain_mutex);
  343. list_for_each_entry(h, &irq_domain_list, link) {
  344. if (h->ops->select && fwspec->param_count)
  345. rc = h->ops->select(h, fwspec, bus_token);
  346. else if (h->ops->match)
  347. rc = h->ops->match(h, to_of_node(fwnode), bus_token);
  348. else
  349. rc = ((fwnode != NULL) && (h->fwnode == fwnode) &&
  350. ((bus_token == DOMAIN_BUS_ANY) ||
  351. (h->bus_token == bus_token)));
  352. if (rc) {
  353. found = h;
  354. break;
  355. }
  356. }
  357. mutex_unlock(&irq_domain_mutex);
  358. return found;
  359. }
  360. EXPORT_SYMBOL_GPL(irq_find_matching_fwspec);
  361. /**
  362. * irq_domain_check_msi_remap - Check whether all MSI irq domains implement
  363. * IRQ remapping
  364. *
  365. * Return: false if any MSI irq domain does not support IRQ remapping,
  366. * true otherwise (including if there is no MSI irq domain)
  367. */
  368. bool irq_domain_check_msi_remap(void)
  369. {
  370. struct irq_domain *h;
  371. bool ret = true;
  372. mutex_lock(&irq_domain_mutex);
  373. list_for_each_entry(h, &irq_domain_list, link) {
  374. if (irq_domain_is_msi(h) &&
  375. !irq_domain_hierarchical_is_msi_remap(h)) {
  376. ret = false;
  377. break;
  378. }
  379. }
  380. mutex_unlock(&irq_domain_mutex);
  381. return ret;
  382. }
  383. EXPORT_SYMBOL_GPL(irq_domain_check_msi_remap);
  384. /**
  385. * irq_set_default_host() - Set a "default" irq domain
  386. * @domain: default domain pointer
  387. *
  388. * For convenience, it's possible to set a "default" domain that will be used
  389. * whenever NULL is passed to irq_create_mapping(). It makes life easier for
  390. * platforms that want to manipulate a few hard coded interrupt numbers that
  391. * aren't properly represented in the device-tree.
  392. */
  393. void irq_set_default_host(struct irq_domain *domain)
  394. {
  395. pr_debug("Default domain set to @0x%p\n", domain);
  396. irq_default_domain = domain;
  397. }
  398. EXPORT_SYMBOL_GPL(irq_set_default_host);
  399. static void irq_domain_clear_mapping(struct irq_domain *domain,
  400. irq_hw_number_t hwirq)
  401. {
  402. if (hwirq < domain->revmap_size) {
  403. domain->linear_revmap[hwirq] = 0;
  404. } else {
  405. mutex_lock(&revmap_trees_mutex);
  406. radix_tree_delete(&domain->revmap_tree, hwirq);
  407. mutex_unlock(&revmap_trees_mutex);
  408. }
  409. }
  410. static void irq_domain_set_mapping(struct irq_domain *domain,
  411. irq_hw_number_t hwirq,
  412. struct irq_data *irq_data)
  413. {
  414. if (hwirq < domain->revmap_size) {
  415. domain->linear_revmap[hwirq] = irq_data->irq;
  416. } else {
  417. mutex_lock(&revmap_trees_mutex);
  418. radix_tree_insert(&domain->revmap_tree, hwirq, irq_data);
  419. mutex_unlock(&revmap_trees_mutex);
  420. }
  421. }
  422. void irq_domain_disassociate(struct irq_domain *domain, unsigned int irq)
  423. {
  424. struct irq_data *irq_data = irq_get_irq_data(irq);
  425. irq_hw_number_t hwirq;
  426. if (WARN(!irq_data || irq_data->domain != domain,
  427. "virq%i doesn't exist; cannot disassociate\n", irq))
  428. return;
  429. hwirq = irq_data->hwirq;
  430. irq_set_status_flags(irq, IRQ_NOREQUEST);
  431. /* remove chip and handler */
  432. irq_set_chip_and_handler(irq, NULL, NULL);
  433. /* Make sure it's completed */
  434. synchronize_irq(irq);
  435. /* Tell the PIC about it */
  436. if (domain->ops->unmap)
  437. domain->ops->unmap(domain, irq);
  438. smp_mb();
  439. irq_data->domain = NULL;
  440. irq_data->hwirq = 0;
  441. domain->mapcount--;
  442. /* Clear reverse map for this hwirq */
  443. irq_domain_clear_mapping(domain, hwirq);
  444. }
  445. int irq_domain_associate(struct irq_domain *domain, unsigned int virq,
  446. irq_hw_number_t hwirq)
  447. {
  448. struct irq_data *irq_data = irq_get_irq_data(virq);
  449. int ret;
  450. if (WARN(hwirq >= domain->hwirq_max,
  451. "error: hwirq 0x%x is too large for %s\n", (int)hwirq, domain->name))
  452. return -EINVAL;
  453. if (WARN(!irq_data, "error: virq%i is not allocated", virq))
  454. return -EINVAL;
  455. if (WARN(irq_data->domain, "error: virq%i is already associated", virq))
  456. return -EINVAL;
  457. mutex_lock(&irq_domain_mutex);
  458. irq_data->hwirq = hwirq;
  459. irq_data->domain = domain;
  460. if (domain->ops->map) {
  461. ret = domain->ops->map(domain, virq, hwirq);
  462. if (ret != 0) {
  463. /*
  464. * If map() returns -EPERM, this interrupt is protected
  465. * by the firmware or some other service and shall not
  466. * be mapped. Don't bother telling the user about it.
  467. */
  468. if (ret != -EPERM) {
  469. pr_info("%s didn't like hwirq-0x%lx to VIRQ%i mapping (rc=%d)\n",
  470. domain->name, hwirq, virq, ret);
  471. }
  472. irq_data->domain = NULL;
  473. irq_data->hwirq = 0;
  474. mutex_unlock(&irq_domain_mutex);
  475. return ret;
  476. }
  477. /* If not already assigned, give the domain the chip's name */
  478. if (!domain->name && irq_data->chip)
  479. domain->name = irq_data->chip->name;
  480. }
  481. domain->mapcount++;
  482. irq_domain_set_mapping(domain, hwirq, irq_data);
  483. mutex_unlock(&irq_domain_mutex);
  484. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  485. return 0;
  486. }
  487. EXPORT_SYMBOL_GPL(irq_domain_associate);
  488. void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base,
  489. irq_hw_number_t hwirq_base, int count)
  490. {
  491. struct device_node *of_node;
  492. int i;
  493. of_node = irq_domain_get_of_node(domain);
  494. pr_debug("%s(%s, irqbase=%i, hwbase=%i, count=%i)\n", __func__,
  495. of_node_full_name(of_node), irq_base, (int)hwirq_base, count);
  496. for (i = 0; i < count; i++) {
  497. irq_domain_associate(domain, irq_base + i, hwirq_base + i);
  498. }
  499. }
  500. EXPORT_SYMBOL_GPL(irq_domain_associate_many);
  501. /**
  502. * irq_create_direct_mapping() - Allocate an irq for direct mapping
  503. * @domain: domain to allocate the irq for or NULL for default domain
  504. *
  505. * This routine is used for irq controllers which can choose the hardware
  506. * interrupt numbers they generate. In such a case it's simplest to use
  507. * the linux irq as the hardware interrupt number. It still uses the linear
  508. * or radix tree to store the mapping, but the irq controller can optimize
  509. * the revmap path by using the hwirq directly.
  510. */
  511. unsigned int irq_create_direct_mapping(struct irq_domain *domain)
  512. {
  513. struct device_node *of_node;
  514. unsigned int virq;
  515. if (domain == NULL)
  516. domain = irq_default_domain;
  517. of_node = irq_domain_get_of_node(domain);
  518. virq = irq_alloc_desc_from(1, of_node_to_nid(of_node));
  519. if (!virq) {
  520. pr_debug("create_direct virq allocation failed\n");
  521. return 0;
  522. }
  523. if (virq >= domain->revmap_direct_max_irq) {
  524. pr_err("ERROR: no free irqs available below %i maximum\n",
  525. domain->revmap_direct_max_irq);
  526. irq_free_desc(virq);
  527. return 0;
  528. }
  529. pr_debug("create_direct obtained virq %d\n", virq);
  530. if (irq_domain_associate(domain, virq, virq)) {
  531. irq_free_desc(virq);
  532. return 0;
  533. }
  534. return virq;
  535. }
  536. EXPORT_SYMBOL_GPL(irq_create_direct_mapping);
  537. /**
  538. * irq_create_mapping() - Map a hardware interrupt into linux irq space
  539. * @domain: domain owning this hardware interrupt or NULL for default domain
  540. * @hwirq: hardware irq number in that domain space
  541. *
  542. * Only one mapping per hardware interrupt is permitted. Returns a linux
  543. * irq number.
  544. * If the sense/trigger is to be specified, set_irq_type() should be called
  545. * on the number returned from that call.
  546. */
  547. unsigned int irq_create_mapping(struct irq_domain *domain,
  548. irq_hw_number_t hwirq)
  549. {
  550. struct device_node *of_node;
  551. int virq;
  552. pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
  553. /* Look for default domain if nececssary */
  554. if (domain == NULL)
  555. domain = irq_default_domain;
  556. if (domain == NULL) {
  557. WARN(1, "%s(, %lx) called with NULL domain\n", __func__, hwirq);
  558. return 0;
  559. }
  560. pr_debug("-> using domain @%p\n", domain);
  561. of_node = irq_domain_get_of_node(domain);
  562. /* Check if mapping already exists */
  563. virq = irq_find_mapping(domain, hwirq);
  564. if (virq) {
  565. pr_debug("-> existing mapping on virq %d\n", virq);
  566. return virq;
  567. }
  568. /* Allocate a virtual interrupt number */
  569. virq = irq_domain_alloc_descs(-1, 1, hwirq, of_node_to_nid(of_node), NULL);
  570. if (virq <= 0) {
  571. pr_debug("-> virq allocation failed\n");
  572. return 0;
  573. }
  574. if (irq_domain_associate(domain, virq, hwirq)) {
  575. irq_free_desc(virq);
  576. return 0;
  577. }
  578. pr_debug("irq %lu on domain %s mapped to virtual irq %u\n",
  579. hwirq, of_node_full_name(of_node), virq);
  580. return virq;
  581. }
  582. EXPORT_SYMBOL_GPL(irq_create_mapping);
  583. /**
  584. * irq_create_strict_mappings() - Map a range of hw irqs to fixed linux irqs
  585. * @domain: domain owning the interrupt range
  586. * @irq_base: beginning of linux IRQ range
  587. * @hwirq_base: beginning of hardware IRQ range
  588. * @count: Number of interrupts to map
  589. *
  590. * This routine is used for allocating and mapping a range of hardware
  591. * irqs to linux irqs where the linux irq numbers are at pre-defined
  592. * locations. For use by controllers that already have static mappings
  593. * to insert in to the domain.
  594. *
  595. * Non-linear users can use irq_create_identity_mapping() for IRQ-at-a-time
  596. * domain insertion.
  597. *
  598. * 0 is returned upon success, while any failure to establish a static
  599. * mapping is treated as an error.
  600. */
  601. int irq_create_strict_mappings(struct irq_domain *domain, unsigned int irq_base,
  602. irq_hw_number_t hwirq_base, int count)
  603. {
  604. struct device_node *of_node;
  605. int ret;
  606. of_node = irq_domain_get_of_node(domain);
  607. ret = irq_alloc_descs(irq_base, irq_base, count,
  608. of_node_to_nid(of_node));
  609. if (unlikely(ret < 0))
  610. return ret;
  611. irq_domain_associate_many(domain, irq_base, hwirq_base, count);
  612. return 0;
  613. }
  614. EXPORT_SYMBOL_GPL(irq_create_strict_mappings);
  615. static int irq_domain_translate(struct irq_domain *d,
  616. struct irq_fwspec *fwspec,
  617. irq_hw_number_t *hwirq, unsigned int *type)
  618. {
  619. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  620. if (d->ops->translate)
  621. return d->ops->translate(d, fwspec, hwirq, type);
  622. #endif
  623. if (d->ops->xlate)
  624. return d->ops->xlate(d, to_of_node(fwspec->fwnode),
  625. fwspec->param, fwspec->param_count,
  626. hwirq, type);
  627. /* If domain has no translation, then we assume interrupt line */
  628. *hwirq = fwspec->param[0];
  629. return 0;
  630. }
  631. static void of_phandle_args_to_fwspec(struct of_phandle_args *irq_data,
  632. struct irq_fwspec *fwspec)
  633. {
  634. int i;
  635. fwspec->fwnode = irq_data->np ? &irq_data->np->fwnode : NULL;
  636. fwspec->param_count = irq_data->args_count;
  637. for (i = 0; i < irq_data->args_count; i++)
  638. fwspec->param[i] = irq_data->args[i];
  639. }
  640. unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec)
  641. {
  642. struct irq_domain *domain;
  643. struct irq_data *irq_data;
  644. irq_hw_number_t hwirq;
  645. unsigned int type = IRQ_TYPE_NONE;
  646. int virq;
  647. if (fwspec->fwnode) {
  648. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_WIRED);
  649. if (!domain)
  650. domain = irq_find_matching_fwspec(fwspec, DOMAIN_BUS_ANY);
  651. } else {
  652. domain = irq_default_domain;
  653. }
  654. if (!domain) {
  655. pr_warn("no irq domain found for %s !\n",
  656. of_node_full_name(to_of_node(fwspec->fwnode)));
  657. return 0;
  658. }
  659. if (irq_domain_translate(domain, fwspec, &hwirq, &type))
  660. return 0;
  661. /*
  662. * WARN if the irqchip returns a type with bits
  663. * outside the sense mask set and clear these bits.
  664. */
  665. if (WARN_ON(type & ~IRQ_TYPE_SENSE_MASK))
  666. type &= IRQ_TYPE_SENSE_MASK;
  667. /*
  668. * If we've already configured this interrupt,
  669. * don't do it again, or hell will break loose.
  670. */
  671. virq = irq_find_mapping(domain, hwirq);
  672. if (virq) {
  673. /*
  674. * If the trigger type is not specified or matches the
  675. * current trigger type then we are done so return the
  676. * interrupt number.
  677. */
  678. if (type == IRQ_TYPE_NONE || type == irq_get_trigger_type(virq))
  679. return virq;
  680. /*
  681. * If the trigger type has not been set yet, then set
  682. * it now and return the interrupt number.
  683. */
  684. if (irq_get_trigger_type(virq) == IRQ_TYPE_NONE) {
  685. irq_data = irq_get_irq_data(virq);
  686. if (!irq_data)
  687. return 0;
  688. irqd_set_trigger_type(irq_data, type);
  689. return virq;
  690. }
  691. pr_warn("type mismatch, failed to map hwirq-%lu for %s!\n",
  692. hwirq, of_node_full_name(to_of_node(fwspec->fwnode)));
  693. return 0;
  694. }
  695. if (irq_domain_is_hierarchy(domain)) {
  696. virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, fwspec);
  697. if (virq <= 0)
  698. return 0;
  699. } else {
  700. /* Create mapping */
  701. virq = irq_create_mapping(domain, hwirq);
  702. if (!virq)
  703. return virq;
  704. }
  705. irq_data = irq_get_irq_data(virq);
  706. if (!irq_data) {
  707. if (irq_domain_is_hierarchy(domain))
  708. irq_domain_free_irqs(virq, 1);
  709. else
  710. irq_dispose_mapping(virq);
  711. return 0;
  712. }
  713. /* Store trigger type */
  714. irqd_set_trigger_type(irq_data, type);
  715. return virq;
  716. }
  717. EXPORT_SYMBOL_GPL(irq_create_fwspec_mapping);
  718. unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data)
  719. {
  720. struct irq_fwspec fwspec;
  721. of_phandle_args_to_fwspec(irq_data, &fwspec);
  722. return irq_create_fwspec_mapping(&fwspec);
  723. }
  724. EXPORT_SYMBOL_GPL(irq_create_of_mapping);
  725. /**
  726. * irq_dispose_mapping() - Unmap an interrupt
  727. * @virq: linux irq number of the interrupt to unmap
  728. */
  729. void irq_dispose_mapping(unsigned int virq)
  730. {
  731. struct irq_data *irq_data = irq_get_irq_data(virq);
  732. struct irq_domain *domain;
  733. if (!virq || !irq_data)
  734. return;
  735. domain = irq_data->domain;
  736. if (WARN_ON(domain == NULL))
  737. return;
  738. if (irq_domain_is_hierarchy(domain)) {
  739. irq_domain_free_irqs(virq, 1);
  740. } else {
  741. irq_domain_disassociate(domain, virq);
  742. irq_free_desc(virq);
  743. }
  744. }
  745. EXPORT_SYMBOL_GPL(irq_dispose_mapping);
  746. /**
  747. * irq_find_mapping() - Find a linux irq from an hw irq number.
  748. * @domain: domain owning this hardware interrupt
  749. * @hwirq: hardware irq number in that domain space
  750. */
  751. unsigned int irq_find_mapping(struct irq_domain *domain,
  752. irq_hw_number_t hwirq)
  753. {
  754. struct irq_data *data;
  755. /* Look for default domain if nececssary */
  756. if (domain == NULL)
  757. domain = irq_default_domain;
  758. if (domain == NULL)
  759. return 0;
  760. if (hwirq < domain->revmap_direct_max_irq) {
  761. data = irq_domain_get_irq_data(domain, hwirq);
  762. if (data && data->hwirq == hwirq)
  763. return hwirq;
  764. }
  765. /* Check if the hwirq is in the linear revmap. */
  766. if (hwirq < domain->revmap_size)
  767. return domain->linear_revmap[hwirq];
  768. rcu_read_lock();
  769. data = radix_tree_lookup(&domain->revmap_tree, hwirq);
  770. rcu_read_unlock();
  771. return data ? data->irq : 0;
  772. }
  773. EXPORT_SYMBOL_GPL(irq_find_mapping);
  774. #ifdef CONFIG_IRQ_DOMAIN_DEBUG
  775. static void virq_debug_show_one(struct seq_file *m, struct irq_desc *desc)
  776. {
  777. struct irq_domain *domain;
  778. struct irq_data *data;
  779. domain = desc->irq_data.domain;
  780. data = &desc->irq_data;
  781. while (domain) {
  782. unsigned int irq = data->irq;
  783. unsigned long hwirq = data->hwirq;
  784. struct irq_chip *chip;
  785. bool direct;
  786. if (data == &desc->irq_data)
  787. seq_printf(m, "%5d ", irq);
  788. else
  789. seq_printf(m, "%5d+ ", irq);
  790. seq_printf(m, "0x%05lx ", hwirq);
  791. chip = irq_data_get_irq_chip(data);
  792. seq_printf(m, "%-15s ", (chip && chip->name) ? chip->name : "none");
  793. seq_printf(m, data ? "0x%p " : " %p ",
  794. irq_data_get_irq_chip_data(data));
  795. seq_printf(m, " %c ", (desc->action && desc->action->handler) ? '*' : ' ');
  796. direct = (irq == hwirq) && (irq < domain->revmap_direct_max_irq);
  797. seq_printf(m, "%6s%-8s ",
  798. (hwirq < domain->revmap_size) ? "LINEAR" : "RADIX",
  799. direct ? "(DIRECT)" : "");
  800. seq_printf(m, "%s\n", domain->name);
  801. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  802. domain = domain->parent;
  803. data = data->parent_data;
  804. #else
  805. domain = NULL;
  806. #endif
  807. }
  808. }
  809. static int virq_debug_show(struct seq_file *m, void *private)
  810. {
  811. unsigned long flags;
  812. struct irq_desc *desc;
  813. struct irq_domain *domain;
  814. struct radix_tree_iter iter;
  815. void __rcu **slot;
  816. int i;
  817. seq_printf(m, " %-16s %-6s %-10s %-10s %s\n",
  818. "name", "mapped", "linear-max", "direct-max", "devtree-node");
  819. mutex_lock(&irq_domain_mutex);
  820. list_for_each_entry(domain, &irq_domain_list, link) {
  821. struct device_node *of_node;
  822. const char *name;
  823. int count = 0;
  824. of_node = irq_domain_get_of_node(domain);
  825. if (of_node)
  826. name = of_node_full_name(of_node);
  827. else if (is_fwnode_irqchip(domain->fwnode))
  828. name = container_of(domain->fwnode, struct irqchip_fwid,
  829. fwnode)->name;
  830. else
  831. name = "";
  832. radix_tree_for_each_slot(slot, &domain->revmap_tree, &iter, 0)
  833. count++;
  834. seq_printf(m, "%c%-16s %6u %10u %10u %s\n",
  835. domain == irq_default_domain ? '*' : ' ', domain->name,
  836. domain->revmap_size + count, domain->revmap_size,
  837. domain->revmap_direct_max_irq,
  838. name);
  839. }
  840. mutex_unlock(&irq_domain_mutex);
  841. seq_printf(m, "%-5s %-7s %-15s %-*s %6s %-14s %s\n", "irq", "hwirq",
  842. "chip name", (int)(2 * sizeof(void *) + 2), "chip data",
  843. "active", "type", "domain");
  844. for (i = 1; i < nr_irqs; i++) {
  845. desc = irq_to_desc(i);
  846. if (!desc)
  847. continue;
  848. raw_spin_lock_irqsave(&desc->lock, flags);
  849. virq_debug_show_one(m, desc);
  850. raw_spin_unlock_irqrestore(&desc->lock, flags);
  851. }
  852. return 0;
  853. }
  854. static int virq_debug_open(struct inode *inode, struct file *file)
  855. {
  856. return single_open(file, virq_debug_show, inode->i_private);
  857. }
  858. static const struct file_operations virq_debug_fops = {
  859. .open = virq_debug_open,
  860. .read = seq_read,
  861. .llseek = seq_lseek,
  862. .release = single_release,
  863. };
  864. static int __init irq_debugfs_init(void)
  865. {
  866. if (debugfs_create_file("irq_domain_mapping", S_IRUGO, NULL,
  867. NULL, &virq_debug_fops) == NULL)
  868. return -ENOMEM;
  869. return 0;
  870. }
  871. __initcall(irq_debugfs_init);
  872. #endif /* CONFIG_IRQ_DOMAIN_DEBUG */
  873. /**
  874. * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
  875. *
  876. * Device Tree IRQ specifier translation function which works with one cell
  877. * bindings where the cell value maps directly to the hwirq number.
  878. */
  879. int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
  880. const u32 *intspec, unsigned int intsize,
  881. unsigned long *out_hwirq, unsigned int *out_type)
  882. {
  883. if (WARN_ON(intsize < 1))
  884. return -EINVAL;
  885. *out_hwirq = intspec[0];
  886. *out_type = IRQ_TYPE_NONE;
  887. return 0;
  888. }
  889. EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
  890. /**
  891. * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
  892. *
  893. * Device Tree IRQ specifier translation function which works with two cell
  894. * bindings where the cell values map directly to the hwirq number
  895. * and linux irq flags.
  896. */
  897. int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
  898. const u32 *intspec, unsigned int intsize,
  899. irq_hw_number_t *out_hwirq, unsigned int *out_type)
  900. {
  901. if (WARN_ON(intsize < 2))
  902. return -EINVAL;
  903. *out_hwirq = intspec[0];
  904. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  905. return 0;
  906. }
  907. EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
  908. /**
  909. * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
  910. *
  911. * Device Tree IRQ specifier translation function which works with either one
  912. * or two cell bindings where the cell values map directly to the hwirq number
  913. * and linux irq flags.
  914. *
  915. * Note: don't use this function unless your interrupt controller explicitly
  916. * supports both one and two cell bindings. For the majority of controllers
  917. * the _onecell() or _twocell() variants above should be used.
  918. */
  919. int irq_domain_xlate_onetwocell(struct irq_domain *d,
  920. struct device_node *ctrlr,
  921. const u32 *intspec, unsigned int intsize,
  922. unsigned long *out_hwirq, unsigned int *out_type)
  923. {
  924. if (WARN_ON(intsize < 1))
  925. return -EINVAL;
  926. *out_hwirq = intspec[0];
  927. if (intsize > 1)
  928. *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
  929. else
  930. *out_type = IRQ_TYPE_NONE;
  931. return 0;
  932. }
  933. EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
  934. const struct irq_domain_ops irq_domain_simple_ops = {
  935. .xlate = irq_domain_xlate_onetwocell,
  936. };
  937. EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
  938. int irq_domain_alloc_descs(int virq, unsigned int cnt, irq_hw_number_t hwirq,
  939. int node, const struct cpumask *affinity)
  940. {
  941. unsigned int hint;
  942. if (virq >= 0) {
  943. virq = __irq_alloc_descs(virq, virq, cnt, node, THIS_MODULE,
  944. affinity);
  945. } else {
  946. hint = hwirq % nr_irqs;
  947. if (hint == 0)
  948. hint++;
  949. virq = __irq_alloc_descs(-1, hint, cnt, node, THIS_MODULE,
  950. affinity);
  951. if (virq <= 0 && hint > 1) {
  952. virq = __irq_alloc_descs(-1, 1, cnt, node, THIS_MODULE,
  953. affinity);
  954. }
  955. }
  956. return virq;
  957. }
  958. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  959. /**
  960. * irq_domain_create_hierarchy - Add a irqdomain into the hierarchy
  961. * @parent: Parent irq domain to associate with the new domain
  962. * @flags: Irq domain flags associated to the domain
  963. * @size: Size of the domain. See below
  964. * @fwnode: Optional fwnode of the interrupt controller
  965. * @ops: Pointer to the interrupt domain callbacks
  966. * @host_data: Controller private data pointer
  967. *
  968. * If @size is 0 a tree domain is created, otherwise a linear domain.
  969. *
  970. * If successful the parent is associated to the new domain and the
  971. * domain flags are set.
  972. * Returns pointer to IRQ domain, or NULL on failure.
  973. */
  974. struct irq_domain *irq_domain_create_hierarchy(struct irq_domain *parent,
  975. unsigned int flags,
  976. unsigned int size,
  977. struct fwnode_handle *fwnode,
  978. const struct irq_domain_ops *ops,
  979. void *host_data)
  980. {
  981. struct irq_domain *domain;
  982. if (size)
  983. domain = irq_domain_create_linear(fwnode, size, ops, host_data);
  984. else
  985. domain = irq_domain_create_tree(fwnode, ops, host_data);
  986. if (domain) {
  987. domain->parent = parent;
  988. domain->flags |= flags;
  989. }
  990. return domain;
  991. }
  992. EXPORT_SYMBOL_GPL(irq_domain_create_hierarchy);
  993. static void irq_domain_insert_irq(int virq)
  994. {
  995. struct irq_data *data;
  996. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  997. struct irq_domain *domain = data->domain;
  998. domain->mapcount++;
  999. irq_domain_set_mapping(domain, data->hwirq, data);
  1000. /* If not already assigned, give the domain the chip's name */
  1001. if (!domain->name && data->chip)
  1002. domain->name = data->chip->name;
  1003. }
  1004. irq_clear_status_flags(virq, IRQ_NOREQUEST);
  1005. }
  1006. static void irq_domain_remove_irq(int virq)
  1007. {
  1008. struct irq_data *data;
  1009. irq_set_status_flags(virq, IRQ_NOREQUEST);
  1010. irq_set_chip_and_handler(virq, NULL, NULL);
  1011. synchronize_irq(virq);
  1012. smp_mb();
  1013. for (data = irq_get_irq_data(virq); data; data = data->parent_data) {
  1014. struct irq_domain *domain = data->domain;
  1015. irq_hw_number_t hwirq = data->hwirq;
  1016. domain->mapcount--;
  1017. irq_domain_clear_mapping(domain, hwirq);
  1018. }
  1019. }
  1020. static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain,
  1021. struct irq_data *child)
  1022. {
  1023. struct irq_data *irq_data;
  1024. irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL,
  1025. irq_data_get_node(child));
  1026. if (irq_data) {
  1027. child->parent_data = irq_data;
  1028. irq_data->irq = child->irq;
  1029. irq_data->common = child->common;
  1030. irq_data->domain = domain;
  1031. }
  1032. return irq_data;
  1033. }
  1034. static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs)
  1035. {
  1036. struct irq_data *irq_data, *tmp;
  1037. int i;
  1038. for (i = 0; i < nr_irqs; i++) {
  1039. irq_data = irq_get_irq_data(virq + i);
  1040. tmp = irq_data->parent_data;
  1041. irq_data->parent_data = NULL;
  1042. irq_data->domain = NULL;
  1043. while (tmp) {
  1044. irq_data = tmp;
  1045. tmp = tmp->parent_data;
  1046. kfree(irq_data);
  1047. }
  1048. }
  1049. }
  1050. static int irq_domain_alloc_irq_data(struct irq_domain *domain,
  1051. unsigned int virq, unsigned int nr_irqs)
  1052. {
  1053. struct irq_data *irq_data;
  1054. struct irq_domain *parent;
  1055. int i;
  1056. /* The outermost irq_data is embedded in struct irq_desc */
  1057. for (i = 0; i < nr_irqs; i++) {
  1058. irq_data = irq_get_irq_data(virq + i);
  1059. irq_data->domain = domain;
  1060. for (parent = domain->parent; parent; parent = parent->parent) {
  1061. irq_data = irq_domain_insert_irq_data(parent, irq_data);
  1062. if (!irq_data) {
  1063. irq_domain_free_irq_data(virq, i + 1);
  1064. return -ENOMEM;
  1065. }
  1066. }
  1067. }
  1068. return 0;
  1069. }
  1070. /**
  1071. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1072. * @domain: domain to match
  1073. * @virq: IRQ number to get irq_data
  1074. */
  1075. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1076. unsigned int virq)
  1077. {
  1078. struct irq_data *irq_data;
  1079. for (irq_data = irq_get_irq_data(virq); irq_data;
  1080. irq_data = irq_data->parent_data)
  1081. if (irq_data->domain == domain)
  1082. return irq_data;
  1083. return NULL;
  1084. }
  1085. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1086. /**
  1087. * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain
  1088. * @domain: Interrupt domain to match
  1089. * @virq: IRQ number
  1090. * @hwirq: The hwirq number
  1091. * @chip: The associated interrupt chip
  1092. * @chip_data: The associated chip data
  1093. */
  1094. int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq,
  1095. irq_hw_number_t hwirq, struct irq_chip *chip,
  1096. void *chip_data)
  1097. {
  1098. struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
  1099. if (!irq_data)
  1100. return -ENOENT;
  1101. irq_data->hwirq = hwirq;
  1102. irq_data->chip = chip ? chip : &no_irq_chip;
  1103. irq_data->chip_data = chip_data;
  1104. return 0;
  1105. }
  1106. EXPORT_SYMBOL_GPL(irq_domain_set_hwirq_and_chip);
  1107. /**
  1108. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1109. * @domain: Interrupt domain to match
  1110. * @virq: IRQ number
  1111. * @hwirq: The hardware interrupt number
  1112. * @chip: The associated interrupt chip
  1113. * @chip_data: The associated interrupt chip data
  1114. * @handler: The interrupt flow handler
  1115. * @handler_data: The interrupt flow handler data
  1116. * @handler_name: The interrupt handler name
  1117. */
  1118. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1119. irq_hw_number_t hwirq, struct irq_chip *chip,
  1120. void *chip_data, irq_flow_handler_t handler,
  1121. void *handler_data, const char *handler_name)
  1122. {
  1123. irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data);
  1124. __irq_set_handler(virq, handler, 0, handler_name);
  1125. irq_set_handler_data(virq, handler_data);
  1126. }
  1127. EXPORT_SYMBOL(irq_domain_set_info);
  1128. /**
  1129. * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data
  1130. * @irq_data: The pointer to irq_data
  1131. */
  1132. void irq_domain_reset_irq_data(struct irq_data *irq_data)
  1133. {
  1134. irq_data->hwirq = 0;
  1135. irq_data->chip = &no_irq_chip;
  1136. irq_data->chip_data = NULL;
  1137. }
  1138. EXPORT_SYMBOL_GPL(irq_domain_reset_irq_data);
  1139. /**
  1140. * irq_domain_free_irqs_common - Clear irq_data and free the parent
  1141. * @domain: Interrupt domain to match
  1142. * @virq: IRQ number to start with
  1143. * @nr_irqs: The number of irqs to free
  1144. */
  1145. void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq,
  1146. unsigned int nr_irqs)
  1147. {
  1148. struct irq_data *irq_data;
  1149. int i;
  1150. for (i = 0; i < nr_irqs; i++) {
  1151. irq_data = irq_domain_get_irq_data(domain, virq + i);
  1152. if (irq_data)
  1153. irq_domain_reset_irq_data(irq_data);
  1154. }
  1155. irq_domain_free_irqs_parent(domain, virq, nr_irqs);
  1156. }
  1157. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_common);
  1158. /**
  1159. * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent
  1160. * @domain: Interrupt domain to match
  1161. * @virq: IRQ number to start with
  1162. * @nr_irqs: The number of irqs to free
  1163. */
  1164. void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq,
  1165. unsigned int nr_irqs)
  1166. {
  1167. int i;
  1168. for (i = 0; i < nr_irqs; i++) {
  1169. irq_set_handler_data(virq + i, NULL);
  1170. irq_set_handler(virq + i, NULL);
  1171. }
  1172. irq_domain_free_irqs_common(domain, virq, nr_irqs);
  1173. }
  1174. static void irq_domain_free_irqs_hierarchy(struct irq_domain *domain,
  1175. unsigned int irq_base,
  1176. unsigned int nr_irqs)
  1177. {
  1178. unsigned int i;
  1179. if (!domain->ops->free)
  1180. return;
  1181. for (i = 0; i < nr_irqs; i++) {
  1182. if (irq_domain_get_irq_data(domain, irq_base + i))
  1183. domain->ops->free(domain, irq_base + i, 1);
  1184. }
  1185. }
  1186. int irq_domain_alloc_irqs_hierarchy(struct irq_domain *domain,
  1187. unsigned int irq_base,
  1188. unsigned int nr_irqs, void *arg)
  1189. {
  1190. if (!domain->ops->alloc) {
  1191. pr_debug("domain->ops->alloc() is NULL\n");
  1192. return -ENOSYS;
  1193. }
  1194. return domain->ops->alloc(domain, irq_base, nr_irqs, arg);
  1195. }
  1196. /**
  1197. * __irq_domain_alloc_irqs - Allocate IRQs from domain
  1198. * @domain: domain to allocate from
  1199. * @irq_base: allocate specified IRQ nubmer if irq_base >= 0
  1200. * @nr_irqs: number of IRQs to allocate
  1201. * @node: NUMA node id for memory allocation
  1202. * @arg: domain specific argument
  1203. * @realloc: IRQ descriptors have already been allocated if true
  1204. * @affinity: Optional irq affinity mask for multiqueue devices
  1205. *
  1206. * Allocate IRQ numbers and initialized all data structures to support
  1207. * hierarchy IRQ domains.
  1208. * Parameter @realloc is mainly to support legacy IRQs.
  1209. * Returns error code or allocated IRQ number
  1210. *
  1211. * The whole process to setup an IRQ has been split into two steps.
  1212. * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ
  1213. * descriptor and required hardware resources. The second step,
  1214. * irq_domain_activate_irq(), is to program hardwares with preallocated
  1215. * resources. In this way, it's easier to rollback when failing to
  1216. * allocate resources.
  1217. */
  1218. int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base,
  1219. unsigned int nr_irqs, int node, void *arg,
  1220. bool realloc, const struct cpumask *affinity)
  1221. {
  1222. int i, ret, virq;
  1223. if (domain == NULL) {
  1224. domain = irq_default_domain;
  1225. if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n"))
  1226. return -EINVAL;
  1227. }
  1228. if (realloc && irq_base >= 0) {
  1229. virq = irq_base;
  1230. } else {
  1231. virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node,
  1232. affinity);
  1233. if (virq < 0) {
  1234. pr_debug("cannot allocate IRQ(base %d, count %d)\n",
  1235. irq_base, nr_irqs);
  1236. return virq;
  1237. }
  1238. }
  1239. if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) {
  1240. pr_debug("cannot allocate memory for IRQ%d\n", virq);
  1241. ret = -ENOMEM;
  1242. goto out_free_desc;
  1243. }
  1244. mutex_lock(&irq_domain_mutex);
  1245. ret = irq_domain_alloc_irqs_hierarchy(domain, virq, nr_irqs, arg);
  1246. if (ret < 0) {
  1247. mutex_unlock(&irq_domain_mutex);
  1248. goto out_free_irq_data;
  1249. }
  1250. for (i = 0; i < nr_irqs; i++)
  1251. irq_domain_insert_irq(virq + i);
  1252. mutex_unlock(&irq_domain_mutex);
  1253. return virq;
  1254. out_free_irq_data:
  1255. irq_domain_free_irq_data(virq, nr_irqs);
  1256. out_free_desc:
  1257. irq_free_descs(virq, nr_irqs);
  1258. return ret;
  1259. }
  1260. /* The irq_data was moved, fix the revmap to refer to the new location */
  1261. static void irq_domain_fix_revmap(struct irq_data *d)
  1262. {
  1263. void __rcu **slot;
  1264. if (d->hwirq < d->domain->revmap_size)
  1265. return; /* Not using radix tree. */
  1266. /* Fix up the revmap. */
  1267. mutex_lock(&revmap_trees_mutex);
  1268. slot = radix_tree_lookup_slot(&d->domain->revmap_tree, d->hwirq);
  1269. if (slot)
  1270. radix_tree_replace_slot(&d->domain->revmap_tree, slot, d);
  1271. mutex_unlock(&revmap_trees_mutex);
  1272. }
  1273. /**
  1274. * irq_domain_push_irq() - Push a domain in to the top of a hierarchy.
  1275. * @domain: Domain to push.
  1276. * @virq: Irq to push the domain in to.
  1277. * @arg: Passed to the irq_domain_ops alloc() function.
  1278. *
  1279. * For an already existing irqdomain hierarchy, as might be obtained
  1280. * via a call to pci_enable_msix(), add an additional domain to the
  1281. * head of the processing chain. Must be called before request_irq()
  1282. * has been called.
  1283. */
  1284. int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
  1285. {
  1286. struct irq_data *child_irq_data;
  1287. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1288. struct irq_desc *desc;
  1289. int rv = 0;
  1290. /*
  1291. * Check that no action has been set, which indicates the virq
  1292. * is in a state where this function doesn't have to deal with
  1293. * races between interrupt handling and maintaining the
  1294. * hierarchy. This will catch gross misuse. Attempting to
  1295. * make the check race free would require holding locks across
  1296. * calls to struct irq_domain_ops->alloc(), which could lead
  1297. * to deadlock, so we just do a simple check before starting.
  1298. */
  1299. desc = irq_to_desc(virq);
  1300. if (!desc)
  1301. return -EINVAL;
  1302. if (WARN_ON(desc->action))
  1303. return -EBUSY;
  1304. if (domain == NULL)
  1305. return -EINVAL;
  1306. if (WARN_ON(!irq_domain_is_hierarchy(domain)))
  1307. return -EINVAL;
  1308. if (!root_irq_data)
  1309. return -EINVAL;
  1310. if (domain->parent != root_irq_data->domain)
  1311. return -EINVAL;
  1312. child_irq_data = kzalloc_node(sizeof(*child_irq_data), GFP_KERNEL,
  1313. irq_data_get_node(root_irq_data));
  1314. if (!child_irq_data)
  1315. return -ENOMEM;
  1316. mutex_lock(&irq_domain_mutex);
  1317. /* Copy the original irq_data. */
  1318. *child_irq_data = *root_irq_data;
  1319. /*
  1320. * Overwrite the root_irq_data, which is embedded in struct
  1321. * irq_desc, with values for this domain.
  1322. */
  1323. root_irq_data->parent_data = child_irq_data;
  1324. root_irq_data->domain = domain;
  1325. root_irq_data->mask = 0;
  1326. root_irq_data->hwirq = 0;
  1327. root_irq_data->chip = NULL;
  1328. root_irq_data->chip_data = NULL;
  1329. /* May (probably does) set hwirq, chip, etc. */
  1330. rv = irq_domain_alloc_irqs_hierarchy(domain, virq, 1, arg);
  1331. if (rv) {
  1332. /* Restore the original irq_data. */
  1333. *root_irq_data = *child_irq_data;
  1334. kfree(child_irq_data);
  1335. goto error;
  1336. }
  1337. irq_domain_fix_revmap(child_irq_data);
  1338. irq_domain_set_mapping(domain, root_irq_data->hwirq, root_irq_data);
  1339. error:
  1340. mutex_unlock(&irq_domain_mutex);
  1341. return rv;
  1342. }
  1343. EXPORT_SYMBOL_GPL(irq_domain_push_irq);
  1344. /**
  1345. * irq_domain_pop_irq() - Remove a domain from the top of a hierarchy.
  1346. * @domain: Domain to remove.
  1347. * @virq: Irq to remove the domain from.
  1348. *
  1349. * Undo the effects of a call to irq_domain_push_irq(). Must be
  1350. * called either before request_irq() or after free_irq().
  1351. */
  1352. int irq_domain_pop_irq(struct irq_domain *domain, int virq)
  1353. {
  1354. struct irq_data *root_irq_data = irq_get_irq_data(virq);
  1355. struct irq_data *child_irq_data;
  1356. struct irq_data *tmp_irq_data;
  1357. struct irq_desc *desc;
  1358. /*
  1359. * Check that no action is set, which indicates the virq is in
  1360. * a state where this function doesn't have to deal with races
  1361. * between interrupt handling and maintaining the hierarchy.
  1362. * This will catch gross misuse. Attempting to make the check
  1363. * race free would require holding locks across calls to
  1364. * struct irq_domain_ops->free(), which could lead to
  1365. * deadlock, so we just do a simple check before starting.
  1366. */
  1367. desc = irq_to_desc(virq);
  1368. if (!desc)
  1369. return -EINVAL;
  1370. if (WARN_ON(desc->action))
  1371. return -EBUSY;
  1372. if (domain == NULL)
  1373. return -EINVAL;
  1374. if (!root_irq_data)
  1375. return -EINVAL;
  1376. tmp_irq_data = irq_domain_get_irq_data(domain, virq);
  1377. /* We can only "pop" if this domain is at the top of the list */
  1378. if (WARN_ON(root_irq_data != tmp_irq_data))
  1379. return -EINVAL;
  1380. if (WARN_ON(root_irq_data->domain != domain))
  1381. return -EINVAL;
  1382. child_irq_data = root_irq_data->parent_data;
  1383. if (WARN_ON(!child_irq_data))
  1384. return -EINVAL;
  1385. mutex_lock(&irq_domain_mutex);
  1386. root_irq_data->parent_data = NULL;
  1387. irq_domain_clear_mapping(domain, root_irq_data->hwirq);
  1388. irq_domain_free_irqs_hierarchy(domain, virq, 1);
  1389. /* Restore the original irq_data. */
  1390. *root_irq_data = *child_irq_data;
  1391. irq_domain_fix_revmap(root_irq_data);
  1392. mutex_unlock(&irq_domain_mutex);
  1393. kfree(child_irq_data);
  1394. return 0;
  1395. }
  1396. EXPORT_SYMBOL_GPL(irq_domain_pop_irq);
  1397. /**
  1398. * irq_domain_free_irqs - Free IRQ number and associated data structures
  1399. * @virq: base IRQ number
  1400. * @nr_irqs: number of IRQs to free
  1401. */
  1402. void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs)
  1403. {
  1404. struct irq_data *data = irq_get_irq_data(virq);
  1405. int i;
  1406. if (WARN(!data || !data->domain || !data->domain->ops->free,
  1407. "NULL pointer, cannot free irq\n"))
  1408. return;
  1409. mutex_lock(&irq_domain_mutex);
  1410. for (i = 0; i < nr_irqs; i++)
  1411. irq_domain_remove_irq(virq + i);
  1412. irq_domain_free_irqs_hierarchy(data->domain, virq, nr_irqs);
  1413. mutex_unlock(&irq_domain_mutex);
  1414. irq_domain_free_irq_data(virq, nr_irqs);
  1415. irq_free_descs(virq, nr_irqs);
  1416. }
  1417. /**
  1418. * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain
  1419. * @irq_base: Base IRQ number
  1420. * @nr_irqs: Number of IRQs to allocate
  1421. * @arg: Allocation data (arch/domain specific)
  1422. *
  1423. * Check whether the domain has been setup recursive. If not allocate
  1424. * through the parent domain.
  1425. */
  1426. int irq_domain_alloc_irqs_parent(struct irq_domain *domain,
  1427. unsigned int irq_base, unsigned int nr_irqs,
  1428. void *arg)
  1429. {
  1430. if (!domain->parent)
  1431. return -ENOSYS;
  1432. return irq_domain_alloc_irqs_hierarchy(domain->parent, irq_base,
  1433. nr_irqs, arg);
  1434. }
  1435. EXPORT_SYMBOL_GPL(irq_domain_alloc_irqs_parent);
  1436. /**
  1437. * irq_domain_free_irqs_parent - Free interrupts from parent domain
  1438. * @irq_base: Base IRQ number
  1439. * @nr_irqs: Number of IRQs to free
  1440. *
  1441. * Check whether the domain has been setup recursive. If not free
  1442. * through the parent domain.
  1443. */
  1444. void irq_domain_free_irqs_parent(struct irq_domain *domain,
  1445. unsigned int irq_base, unsigned int nr_irqs)
  1446. {
  1447. if (!domain->parent)
  1448. return;
  1449. irq_domain_free_irqs_hierarchy(domain->parent, irq_base, nr_irqs);
  1450. }
  1451. EXPORT_SYMBOL_GPL(irq_domain_free_irqs_parent);
  1452. static void __irq_domain_activate_irq(struct irq_data *irq_data)
  1453. {
  1454. if (irq_data && irq_data->domain) {
  1455. struct irq_domain *domain = irq_data->domain;
  1456. if (irq_data->parent_data)
  1457. __irq_domain_activate_irq(irq_data->parent_data);
  1458. if (domain->ops->activate)
  1459. domain->ops->activate(domain, irq_data);
  1460. }
  1461. }
  1462. static void __irq_domain_deactivate_irq(struct irq_data *irq_data)
  1463. {
  1464. if (irq_data && irq_data->domain) {
  1465. struct irq_domain *domain = irq_data->domain;
  1466. if (domain->ops->deactivate)
  1467. domain->ops->deactivate(domain, irq_data);
  1468. if (irq_data->parent_data)
  1469. __irq_domain_deactivate_irq(irq_data->parent_data);
  1470. }
  1471. }
  1472. /**
  1473. * irq_domain_activate_irq - Call domain_ops->activate recursively to activate
  1474. * interrupt
  1475. * @irq_data: outermost irq_data associated with interrupt
  1476. *
  1477. * This is the second step to call domain_ops->activate to program interrupt
  1478. * controllers, so the interrupt could actually get delivered.
  1479. */
  1480. void irq_domain_activate_irq(struct irq_data *irq_data)
  1481. {
  1482. if (!irqd_is_activated(irq_data)) {
  1483. __irq_domain_activate_irq(irq_data);
  1484. irqd_set_activated(irq_data);
  1485. }
  1486. }
  1487. /**
  1488. * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to
  1489. * deactivate interrupt
  1490. * @irq_data: outermost irq_data associated with interrupt
  1491. *
  1492. * It calls domain_ops->deactivate to program interrupt controllers to disable
  1493. * interrupt delivery.
  1494. */
  1495. void irq_domain_deactivate_irq(struct irq_data *irq_data)
  1496. {
  1497. if (irqd_is_activated(irq_data)) {
  1498. __irq_domain_deactivate_irq(irq_data);
  1499. irqd_clr_activated(irq_data);
  1500. }
  1501. }
  1502. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1503. {
  1504. /* Hierarchy irq_domains must implement callback alloc() */
  1505. if (domain->ops->alloc)
  1506. domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY;
  1507. }
  1508. /**
  1509. * irq_domain_hierarchical_is_msi_remap - Check if the domain or any
  1510. * parent has MSI remapping support
  1511. * @domain: domain pointer
  1512. */
  1513. bool irq_domain_hierarchical_is_msi_remap(struct irq_domain *domain)
  1514. {
  1515. for (; domain; domain = domain->parent) {
  1516. if (irq_domain_is_msi_remap(domain))
  1517. return true;
  1518. }
  1519. return false;
  1520. }
  1521. #else /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1522. /**
  1523. * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain
  1524. * @domain: domain to match
  1525. * @virq: IRQ number to get irq_data
  1526. */
  1527. struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
  1528. unsigned int virq)
  1529. {
  1530. struct irq_data *irq_data = irq_get_irq_data(virq);
  1531. return (irq_data && irq_data->domain == domain) ? irq_data : NULL;
  1532. }
  1533. EXPORT_SYMBOL_GPL(irq_domain_get_irq_data);
  1534. /**
  1535. * irq_domain_set_info - Set the complete data for a @virq in @domain
  1536. * @domain: Interrupt domain to match
  1537. * @virq: IRQ number
  1538. * @hwirq: The hardware interrupt number
  1539. * @chip: The associated interrupt chip
  1540. * @chip_data: The associated interrupt chip data
  1541. * @handler: The interrupt flow handler
  1542. * @handler_data: The interrupt flow handler data
  1543. * @handler_name: The interrupt handler name
  1544. */
  1545. void irq_domain_set_info(struct irq_domain *domain, unsigned int virq,
  1546. irq_hw_number_t hwirq, struct irq_chip *chip,
  1547. void *chip_data, irq_flow_handler_t handler,
  1548. void *handler_data, const char *handler_name)
  1549. {
  1550. irq_set_chip_and_handler_name(virq, chip, handler, handler_name);
  1551. irq_set_chip_data(virq, chip_data);
  1552. irq_set_handler_data(virq, handler_data);
  1553. }
  1554. static void irq_domain_check_hierarchy(struct irq_domain *domain)
  1555. {
  1556. }
  1557. #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
  1558. #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
  1559. static struct dentry *domain_dir;
  1560. static void
  1561. irq_domain_debug_show_one(struct seq_file *m, struct irq_domain *d, int ind)
  1562. {
  1563. seq_printf(m, "%*sname: %s\n", ind, "", d->name);
  1564. seq_printf(m, "%*ssize: %u\n", ind + 1, "",
  1565. d->revmap_size + d->revmap_direct_max_irq);
  1566. seq_printf(m, "%*smapped: %u\n", ind + 1, "", d->mapcount);
  1567. seq_printf(m, "%*sflags: 0x%08x\n", ind +1 , "", d->flags);
  1568. #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
  1569. if (!d->parent)
  1570. return;
  1571. seq_printf(m, "%*sparent: %s\n", ind + 1, "", d->parent->name);
  1572. irq_domain_debug_show_one(m, d->parent, ind + 4);
  1573. #endif
  1574. }
  1575. static int irq_domain_debug_show(struct seq_file *m, void *p)
  1576. {
  1577. struct irq_domain *d = m->private;
  1578. /* Default domain? Might be NULL */
  1579. if (!d) {
  1580. if (!irq_default_domain)
  1581. return 0;
  1582. d = irq_default_domain;
  1583. }
  1584. irq_domain_debug_show_one(m, d, 0);
  1585. return 0;
  1586. }
  1587. static int irq_domain_debug_open(struct inode *inode, struct file *file)
  1588. {
  1589. return single_open(file, irq_domain_debug_show, inode->i_private);
  1590. }
  1591. static const struct file_operations dfs_domain_ops = {
  1592. .open = irq_domain_debug_open,
  1593. .read = seq_read,
  1594. .llseek = seq_lseek,
  1595. .release = single_release,
  1596. };
  1597. static void debugfs_add_domain_dir(struct irq_domain *d)
  1598. {
  1599. if (!d->name || !domain_dir || d->debugfs_file)
  1600. return;
  1601. d->debugfs_file = debugfs_create_file(d->name, 0444, domain_dir, d,
  1602. &dfs_domain_ops);
  1603. }
  1604. static void debugfs_remove_domain_dir(struct irq_domain *d)
  1605. {
  1606. debugfs_remove(d->debugfs_file);
  1607. }
  1608. void __init irq_domain_debugfs_init(struct dentry *root)
  1609. {
  1610. struct irq_domain *d;
  1611. domain_dir = debugfs_create_dir("domains", root);
  1612. if (!domain_dir)
  1613. return;
  1614. debugfs_create_file("default", 0444, domain_dir, NULL, &dfs_domain_ops);
  1615. mutex_lock(&irq_domain_mutex);
  1616. list_for_each_entry(d, &irq_domain_list, link)
  1617. debugfs_add_domain_dir(d);
  1618. mutex_unlock(&irq_domain_mutex);
  1619. }
  1620. #endif