wsp_pci.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. /*
  2. * Copyright 2010 Ben Herrenschmidt, IBM Corporation
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #define DEBUG
  10. #include <linux/kernel.h>
  11. #include <linux/pci.h>
  12. #include <linux/delay.h>
  13. #include <linux/string.h>
  14. #include <linux/init.h>
  15. #include <linux/bootmem.h>
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/debugfs.h>
  19. #include <asm/sections.h>
  20. #include <asm/io.h>
  21. #include <asm/prom.h>
  22. #include <asm/pci-bridge.h>
  23. #include <asm/machdep.h>
  24. #include <asm/ppc-pci.h>
  25. #include <asm/iommu.h>
  26. #include <asm/io-workarounds.h>
  27. #include <asm/debug.h>
  28. #include "wsp.h"
  29. #include "wsp_pci.h"
  30. #include "msi.h"
  31. /* Max number of TVTs for one table. Only 32-bit tables can use
  32. * multiple TVTs and so the max currently supported is thus 8
  33. * since only 2G of DMA space is supported
  34. */
  35. #define MAX_TABLE_TVT_COUNT 8
  36. struct wsp_dma_table {
  37. struct list_head link;
  38. struct iommu_table table;
  39. struct wsp_phb *phb;
  40. struct page *tces[MAX_TABLE_TVT_COUNT];
  41. };
  42. /* We support DMA regions from 0...2G in 32bit space (no support for
  43. * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
  44. * entry) with validation enabled (though not supported by SimiCS
  45. * just yet).
  46. *
  47. * To simplify things, we divide this 2G space into N regions based
  48. * on the constant below which could be turned into a tunable eventually
  49. *
  50. * We then assign dynamically those regions to devices as they show up.
  51. *
  52. * We use a bitmap as an allocator for these.
  53. *
  54. * Tables are allocated/created dynamically as devices are discovered,
  55. * multiple TVT entries are used if needed
  56. *
  57. * When 64-bit DMA support is added we should simply use a separate set
  58. * of larger regions (the HW supports 64 TVT entries). We can
  59. * additionally create a bypass region in 64-bit space for performances
  60. * though that would have a cost in term of security.
  61. *
  62. * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
  63. * for all devices and bus/dev/fn validation is disabled
  64. *
  65. * Note that a DMA32 region cannot be smaller than 256M so the max
  66. * supported here for now is 8. We don't yet support sharing regions
  67. * between multiple devices so the max number of devices supported
  68. * is MAX_TABLE_TVT_COUNT.
  69. */
  70. #define NUM_DMA32_REGIONS 1
  71. struct wsp_phb {
  72. struct pci_controller *hose;
  73. /* Lock controlling access to the list of dma tables.
  74. * It does -not- protect against dma_* operations on
  75. * those tables, those should be stopped before an entry
  76. * is removed from the list.
  77. *
  78. * The lock is also used for error handling operations
  79. */
  80. spinlock_t lock;
  81. struct list_head dma_tables;
  82. unsigned long dma32_map;
  83. unsigned long dma32_base;
  84. unsigned int dma32_num_regions;
  85. unsigned long dma32_region_size;
  86. /* Debugfs stuff */
  87. struct dentry *ddir;
  88. struct list_head all;
  89. };
  90. static LIST_HEAD(wsp_phbs);
  91. //#define cfg_debug(fmt...) pr_debug(fmt)
  92. #define cfg_debug(fmt...)
  93. static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
  94. int offset, int len, u32 *val)
  95. {
  96. struct pci_controller *hose;
  97. int suboff;
  98. u64 addr;
  99. hose = pci_bus_to_host(bus);
  100. if (hose == NULL)
  101. return PCIBIOS_DEVICE_NOT_FOUND;
  102. if (offset >= 0x1000)
  103. return PCIBIOS_BAD_REGISTER_NUMBER;
  104. addr = PCIE_REG_CA_ENABLE |
  105. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  106. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  107. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  108. suboff = offset & 3;
  109. /*
  110. * Note: the caller has already checked that offset is
  111. * suitably aligned and that len is 1, 2 or 4.
  112. */
  113. switch (len) {
  114. case 1:
  115. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  116. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  117. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  118. >> (suboff << 3)) & 0xff;
  119. cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  120. bus->number, devfn >> 3, devfn & 7,
  121. offset, suboff, addr, *val);
  122. break;
  123. case 2:
  124. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  125. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  126. *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
  127. >> (suboff << 3)) & 0xffff;
  128. cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  129. bus->number, devfn >> 3, devfn & 7,
  130. offset, suboff, addr, *val);
  131. break;
  132. default:
  133. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  134. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  135. *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
  136. cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  137. bus->number, devfn >> 3, devfn & 7,
  138. offset, suboff, addr, *val);
  139. break;
  140. }
  141. return PCIBIOS_SUCCESSFUL;
  142. }
  143. static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
  144. int offset, int len, u32 val)
  145. {
  146. struct pci_controller *hose;
  147. int suboff;
  148. u64 addr;
  149. hose = pci_bus_to_host(bus);
  150. if (hose == NULL)
  151. return PCIBIOS_DEVICE_NOT_FOUND;
  152. if (offset >= 0x1000)
  153. return PCIBIOS_BAD_REGISTER_NUMBER;
  154. addr = PCIE_REG_CA_ENABLE |
  155. ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
  156. ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
  157. ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
  158. suboff = offset & 3;
  159. /*
  160. * Note: the caller has already checked that offset is
  161. * suitably aligned and that len is 1, 2 or 4.
  162. */
  163. switch (len) {
  164. case 1:
  165. addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  166. val <<= suboff << 3;
  167. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  168. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  169. cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
  170. bus->number, devfn >> 3, devfn & 7,
  171. offset, suboff, addr, val);
  172. break;
  173. case 2:
  174. addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
  175. val <<= suboff << 3;
  176. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  177. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  178. cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
  179. bus->number, devfn >> 3, devfn & 7,
  180. offset, suboff, addr, val);
  181. break;
  182. default:
  183. addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
  184. out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
  185. out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
  186. cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
  187. bus->number, devfn >> 3, devfn & 7,
  188. offset, suboff, addr, val);
  189. break;
  190. }
  191. return PCIBIOS_SUCCESSFUL;
  192. }
  193. static struct pci_ops wsp_pcie_pci_ops =
  194. {
  195. .read = wsp_pcie_read_config,
  196. .write = wsp_pcie_write_config,
  197. };
  198. #define TCE_SHIFT 12
  199. #define TCE_PAGE_SIZE (1 << TCE_SHIFT)
  200. #define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
  201. #define TCE_PCI_READ 0x1 /* read from PCI allowed */
  202. #define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
  203. #define TCE_RPN_SHIFT 12
  204. //#define dma_debug(fmt...) pr_debug(fmt)
  205. #define dma_debug(fmt...)
  206. static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
  207. unsigned long uaddr, enum dma_data_direction direction,
  208. struct dma_attrs *attrs)
  209. {
  210. struct wsp_dma_table *ptbl = container_of(tbl,
  211. struct wsp_dma_table,
  212. table);
  213. u64 proto_tce;
  214. u64 *tcep;
  215. u64 rpn;
  216. proto_tce = TCE_PCI_READ;
  217. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  218. proto_tce |= TCE_PCI_WRITE;
  219. #else
  220. if (direction != DMA_TO_DEVICE)
  221. proto_tce |= TCE_PCI_WRITE;
  222. #endif
  223. /* XXX Make this faster by factoring out the page address for
  224. * within a TCE table
  225. */
  226. while (npages--) {
  227. /* We don't use it->base as the table can be scattered */
  228. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  229. tcep += (index & 0xffff);
  230. /* can't move this out since we might cross LMB boundary */
  231. rpn = __pa(uaddr) >> TCE_SHIFT;
  232. *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
  233. dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
  234. tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT);
  235. uaddr += TCE_PAGE_SIZE;
  236. index++;
  237. }
  238. return 0;
  239. }
  240. static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
  241. {
  242. struct wsp_dma_table *ptbl = container_of(tbl,
  243. struct wsp_dma_table,
  244. table);
  245. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  246. struct pci_controller *hose = ptbl->phb->hose;
  247. #endif
  248. u64 *tcep;
  249. /* XXX Make this faster by factoring out the page address for
  250. * within a TCE table. Also use line-kill option to kill multiple
  251. * TCEs at once
  252. */
  253. while (npages--) {
  254. /* We don't use it->base as the table can be scattered */
  255. tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
  256. tcep += (index & 0xffff);
  257. dma_debug("[DMA] TCE %p cleared\n", tcep);
  258. *tcep = 0;
  259. #ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  260. /* Don't write there since it would pollute other MMIO accesses */
  261. out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
  262. PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
  263. (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
  264. #endif
  265. index++;
  266. }
  267. }
  268. static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
  269. unsigned int region,
  270. struct pci_dev *validate)
  271. {
  272. struct pci_controller *hose = phb->hose;
  273. unsigned long size = phb->dma32_region_size;
  274. unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
  275. struct wsp_dma_table *tbl;
  276. int tvts_per_table, i, tvt, nid;
  277. unsigned long flags;
  278. nid = of_node_to_nid(phb->hose->dn);
  279. /* Calculate how many TVTs are needed */
  280. tvts_per_table = size / 0x10000000;
  281. if (tvts_per_table == 0)
  282. tvts_per_table = 1;
  283. /* Calculate the base TVT index. We know all tables have the same
  284. * size so we just do a simple multiply here
  285. */
  286. tvt = region * tvts_per_table;
  287. pr_debug(" Region : %d\n", region);
  288. pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
  289. pr_debug(" Number of TVTs : %d\n", tvts_per_table);
  290. pr_debug(" Base TVT : %d\n", tvt);
  291. pr_debug(" Node : %d\n", nid);
  292. tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
  293. if (!tbl)
  294. return ERR_PTR(-ENOMEM);
  295. tbl->phb = phb;
  296. /* Create as many TVTs as needed, each represents 256M at most */
  297. for (i = 0; i < tvts_per_table; i++) {
  298. u64 tvt_data1, tvt_data0;
  299. /* Allocate table. We use a 4K TCE size for now always so
  300. * one table is always 8 * (258M / 4K) == 512K
  301. */
  302. tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
  303. if (tbl->tces[i] == NULL)
  304. goto fail;
  305. memset(page_address(tbl->tces[i]), 0, 0x80000);
  306. pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
  307. /* Table size. We currently set it to be the whole 256M region */
  308. tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
  309. /* IO page size set to 4K */
  310. tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
  311. /* Shift in the address */
  312. tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
  313. /* Validation stuff. We only validate fully bus/dev/fn for now
  314. * one day maybe we can group devices but that isn't the case
  315. * at the moment
  316. */
  317. if (validate) {
  318. tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
  319. tvt_data0 |= validate->bus->number;
  320. tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
  321. tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
  322. << IODA_TVT1_DEVNUM_VALUE_SHIFT;
  323. tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
  324. tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
  325. << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
  326. }
  327. /* XX PE number is always 0 for now */
  328. /* Program the values using the PHB lock */
  329. spin_lock_irqsave(&phb->lock, flags);
  330. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  331. (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
  332. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
  333. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
  334. spin_unlock_irqrestore(&phb->lock, flags);
  335. }
  336. /* Init bits and pieces */
  337. tbl->table.it_blocksize = 16;
  338. tbl->table.it_offset = addr >> IOMMU_PAGE_SHIFT;
  339. tbl->table.it_size = size >> IOMMU_PAGE_SHIFT;
  340. /*
  341. * It's already blank but we clear it anyway.
  342. * Consider an aditiona interface that makes cleaing optional
  343. */
  344. iommu_init_table(&tbl->table, nid);
  345. list_add(&tbl->link, &phb->dma_tables);
  346. return tbl;
  347. fail:
  348. pr_debug(" Failed to allocate a 256M TCE table !\n");
  349. for (i = 0; i < tvts_per_table; i++)
  350. if (tbl->tces[i])
  351. __free_pages(tbl->tces[i], get_order(0x80000));
  352. kfree(tbl);
  353. return ERR_PTR(-ENOMEM);
  354. }
  355. static void __devinit wsp_pci_dma_dev_setup(struct pci_dev *pdev)
  356. {
  357. struct dev_archdata *archdata = &pdev->dev.archdata;
  358. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  359. struct wsp_phb *phb = hose->private_data;
  360. struct wsp_dma_table *table = NULL;
  361. unsigned long flags;
  362. int i;
  363. /* Don't assign an iommu table to a bridge */
  364. if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
  365. return;
  366. pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
  367. spin_lock_irqsave(&phb->lock, flags);
  368. /* If only one region, check if it already exist */
  369. if (phb->dma32_num_regions == 1) {
  370. spin_unlock_irqrestore(&phb->lock, flags);
  371. if (list_empty(&phb->dma_tables))
  372. table = wsp_pci_create_dma32_table(phb, 0, NULL);
  373. else
  374. table = list_first_entry(&phb->dma_tables,
  375. struct wsp_dma_table,
  376. link);
  377. } else {
  378. /* else find a free region */
  379. for (i = 0; i < phb->dma32_num_regions && !table; i++) {
  380. if (__test_and_set_bit(i, &phb->dma32_map))
  381. continue;
  382. spin_unlock_irqrestore(&phb->lock, flags);
  383. table = wsp_pci_create_dma32_table(phb, i, pdev);
  384. }
  385. }
  386. /* Check if we got an error */
  387. if (IS_ERR(table)) {
  388. pr_err("%s: Failed to create DMA table, err %ld !\n",
  389. pci_name(pdev), PTR_ERR(table));
  390. return;
  391. }
  392. /* Or a valid table */
  393. if (table) {
  394. pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
  395. pci_name(pdev),
  396. table->table.it_offset << IOMMU_PAGE_SHIFT,
  397. (table->table.it_offset << IOMMU_PAGE_SHIFT)
  398. + phb->dma32_region_size - 1);
  399. archdata->dma_data.iommu_table_base = &table->table;
  400. return;
  401. }
  402. /* Or no room */
  403. spin_unlock_irqrestore(&phb->lock, flags);
  404. pr_err("%s: Out of DMA space !\n", pci_name(pdev));
  405. }
  406. static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
  407. {
  408. u64 val;
  409. int i;
  410. #define DUMP_REG(x) \
  411. pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
  412. /*
  413. * Some WSP variants has a bogus class code by default in the PCI-E
  414. * root complex's built-in P2P bridge
  415. */
  416. val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
  417. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
  418. out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
  419. (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
  420. pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
  421. #ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
  422. /* XXX Disable TCE caching, it doesn't work on DD1 */
  423. out_be64(hose->cfg_data + 0xe50,
  424. in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
  425. printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
  426. #endif
  427. /* Configure M32A and IO. IO is hard wired to be 1M for now */
  428. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
  429. out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
  430. (~(hose->io_resource.end - hose->io_resource.start)) &
  431. 0x3fffffff000ul);
  432. out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
  433. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
  434. hose->mem_resources[0].start);
  435. printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
  436. (~(hose->mem_resources[0].end -
  437. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  438. out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
  439. (~(hose->mem_resources[0].end -
  440. hose->mem_resources[0].start)) & 0x3ffffff0000ul);
  441. out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
  442. (hose->mem_resources[0].start - hose->pci_mem_offset) | 1);
  443. /* Clear all TVT entries
  444. *
  445. * XX Might get TVT count from device-tree
  446. */
  447. for (i = 0; i < IODA_TVT_COUNT; i++) {
  448. out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
  449. PCIE_REG_IODA_AD_TBL_TVT | i);
  450. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
  451. out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
  452. }
  453. /* Kill the TCE cache */
  454. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
  455. in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
  456. PCIE_REG_PHBC_64B_TCE_EN);
  457. /* Enable 32 & 64-bit MSIs, IO space and M32A */
  458. val = PCIE_REG_PHBC_32BIT_MSI_EN |
  459. PCIE_REG_PHBC_IO_EN |
  460. PCIE_REG_PHBC_64BIT_MSI_EN |
  461. PCIE_REG_PHBC_M32A_EN;
  462. if (iommu_is_off)
  463. val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
  464. pr_debug("Will write config: 0x%llx\n", val);
  465. out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
  466. /* Enable error reporting */
  467. out_be64(hose->cfg_data + 0xe00,
  468. in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
  469. /* Mask an error that's generated when doing config space probe
  470. *
  471. * XXX Maybe we should only mask it around config space cycles... that or
  472. * ignore it when we know we had a config space cycle recently ?
  473. */
  474. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
  475. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
  476. /* Enable UTL errors, for now, all of them got to UTL irq 1
  477. *
  478. * We similarily mask one UTL error caused apparently during normal
  479. * probing. We also mask the link up error
  480. */
  481. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
  482. out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
  483. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
  484. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
  485. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
  486. out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
  487. DUMP_REG(PCIE_REG_IO_BASE_ADDR);
  488. DUMP_REG(PCIE_REG_IO_BASE_MASK);
  489. DUMP_REG(PCIE_REG_IO_START_ADDR);
  490. DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
  491. DUMP_REG(PCIE_REG_M32A_BASE_MASK);
  492. DUMP_REG(PCIE_REG_M32A_START_ADDR);
  493. DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
  494. DUMP_REG(PCIE_REG_M32B_BASE_MASK);
  495. DUMP_REG(PCIE_REG_M32B_START_ADDR);
  496. DUMP_REG(PCIE_REG_M64_BASE_ADDR);
  497. DUMP_REG(PCIE_REG_M64_BASE_MASK);
  498. DUMP_REG(PCIE_REG_M64_START_ADDR);
  499. DUMP_REG(PCIE_REG_PHB_CONFIG);
  500. }
  501. static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
  502. {
  503. u64 val;
  504. int i;
  505. for (i = 0; i < 10000; i++) {
  506. val = in_be64(phb->hose->cfg_data + 0xe08);
  507. if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
  508. return;
  509. udelay(1);
  510. }
  511. pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
  512. phb->hose->global_number, port);
  513. }
  514. #define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
  515. static ret wsp_pci_##name at \
  516. { \
  517. struct iowa_bus *bus; \
  518. struct wsp_phb *phb; \
  519. unsigned long flags; \
  520. ret rval; \
  521. bus = iowa_pio_find_bus(aa); \
  522. WARN_ON(!bus); \
  523. phb = bus->private; \
  524. spin_lock_irqsave(&phb->lock, flags); \
  525. wsp_pci_wait_io_idle(phb, aa); \
  526. rval = __do_##name al; \
  527. spin_unlock_irqrestore(&phb->lock, flags); \
  528. return rval; \
  529. }
  530. #define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
  531. static void wsp_pci_##name at \
  532. { \
  533. struct iowa_bus *bus; \
  534. struct wsp_phb *phb; \
  535. unsigned long flags; \
  536. bus = iowa_pio_find_bus(aa); \
  537. WARN_ON(!bus); \
  538. phb = bus->private; \
  539. spin_lock_irqsave(&phb->lock, flags); \
  540. wsp_pci_wait_io_idle(phb, aa); \
  541. __do_##name al; \
  542. spin_unlock_irqrestore(&phb->lock, flags); \
  543. }
  544. #define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
  545. #define DEF_PCI_AC_NORET_mem(name, at, al, aa)
  546. #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
  547. DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
  548. #define DEF_PCI_AC_NORET(name, at, al, space, aa) \
  549. DEF_PCI_AC_NORET_##space(name, at, al, aa) \
  550. #include <asm/io-defs.h>
  551. #undef DEF_PCI_AC_RET
  552. #undef DEF_PCI_AC_NORET
  553. static struct ppc_pci_io wsp_pci_iops = {
  554. .inb = wsp_pci_inb,
  555. .inw = wsp_pci_inw,
  556. .inl = wsp_pci_inl,
  557. .outb = wsp_pci_outb,
  558. .outw = wsp_pci_outw,
  559. .outl = wsp_pci_outl,
  560. .insb = wsp_pci_insb,
  561. .insw = wsp_pci_insw,
  562. .insl = wsp_pci_insl,
  563. .outsb = wsp_pci_outsb,
  564. .outsw = wsp_pci_outsw,
  565. .outsl = wsp_pci_outsl,
  566. };
  567. static int __init wsp_setup_one_phb(struct device_node *np)
  568. {
  569. struct pci_controller *hose;
  570. struct wsp_phb *phb;
  571. pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
  572. phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
  573. if (!phb)
  574. return -ENOMEM;
  575. hose = pcibios_alloc_controller(np);
  576. if (!hose) {
  577. /* Can't really free the phb */
  578. return -ENOMEM;
  579. }
  580. hose->private_data = phb;
  581. phb->hose = hose;
  582. INIT_LIST_HEAD(&phb->dma_tables);
  583. spin_lock_init(&phb->lock);
  584. /* XXX Use bus-range property ? */
  585. hose->first_busno = 0;
  586. hose->last_busno = 0xff;
  587. /* We use cfg_data as the address for the whole bridge MMIO space
  588. */
  589. hose->cfg_data = of_iomap(hose->dn, 0);
  590. pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
  591. /* Get the ranges of the device-tree */
  592. pci_process_bridge_OF_ranges(hose, np, 0);
  593. /* XXX Force re-assigning of everything for now */
  594. pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
  595. PCI_ENABLE_PROC_DOMAINS);
  596. /* Calculate how the TCE space is divided */
  597. phb->dma32_base = 0;
  598. phb->dma32_num_regions = NUM_DMA32_REGIONS;
  599. if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
  600. pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
  601. MAX_TABLE_TVT_COUNT);
  602. phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
  603. }
  604. phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
  605. BUG_ON(!is_power_of_2(phb->dma32_region_size));
  606. /* Setup config ops */
  607. hose->ops = &wsp_pcie_pci_ops;
  608. /* Configure the HW */
  609. wsp_pcie_configure_hw(hose);
  610. /* Instanciate IO workarounds */
  611. iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
  612. #ifdef CONFIG_PCI_MSI
  613. wsp_setup_phb_msi(hose);
  614. #endif
  615. /* Add to global list */
  616. list_add(&phb->all, &wsp_phbs);
  617. return 0;
  618. }
  619. void __init wsp_setup_pci(void)
  620. {
  621. struct device_node *np;
  622. int rc;
  623. /* Find host bridges */
  624. for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
  625. rc = wsp_setup_one_phb(np);
  626. if (rc)
  627. pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
  628. np->full_name, rc);
  629. }
  630. /* Establish device-tree linkage */
  631. pci_devs_phb_init();
  632. /* Set DMA ops to use TCEs */
  633. if (iommu_is_off) {
  634. pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
  635. set_pci_dma_ops(&dma_direct_ops);
  636. } else {
  637. ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
  638. ppc_md.tce_build = tce_build_wsp;
  639. ppc_md.tce_free = tce_free_wsp;
  640. set_pci_dma_ops(&dma_iommu_ops);
  641. }
  642. }
  643. #define err_debug(fmt...) pr_debug(fmt)
  644. //#define err_debug(fmt...)
  645. static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
  646. {
  647. const u32 *prop;
  648. int hw_irq;
  649. /* Ok, no interrupts property, let's try to find our child P2P */
  650. np = of_get_next_child(np, NULL);
  651. if (np == NULL)
  652. return 0;
  653. /* Grab it's interrupt map */
  654. prop = of_get_property(np, "interrupt-map", NULL);
  655. if (prop == NULL)
  656. return 0;
  657. /* Grab one of the interrupts in there, keep the low 4 bits */
  658. hw_irq = prop[5] & 0xf;
  659. /* 0..4 for PHB 0 and 5..9 for PHB 1 */
  660. if (hw_irq < 5)
  661. hw_irq = 4;
  662. else
  663. hw_irq = 9;
  664. hw_irq |= prop[5] & ~0xf;
  665. err_debug("PCI: Using 0x%x as error IRQ for %s\n",
  666. hw_irq, np->parent->full_name);
  667. return irq_create_mapping(NULL, hw_irq);
  668. }
  669. static const struct {
  670. u32 offset;
  671. const char *name;
  672. } wsp_pci_regs[] = {
  673. #define DREG(x) { PCIE_REG_##x, #x }
  674. #define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
  675. /* Architected registers except CONFIG_ and IODA
  676. * to avoid side effects
  677. */
  678. DREG(DMA_CHAN_STATUS),
  679. DREG(CPU_LOADSTORE_STATUS),
  680. DREG(LOCK0),
  681. DREG(LOCK1),
  682. DREG(PHB_CONFIG),
  683. DREG(IO_BASE_ADDR),
  684. DREG(IO_BASE_MASK),
  685. DREG(IO_START_ADDR),
  686. DREG(M32A_BASE_ADDR),
  687. DREG(M32A_BASE_MASK),
  688. DREG(M32A_START_ADDR),
  689. DREG(M32B_BASE_ADDR),
  690. DREG(M32B_BASE_MASK),
  691. DREG(M32B_START_ADDR),
  692. DREG(M64_BASE_ADDR),
  693. DREG(M64_BASE_MASK),
  694. DREG(M64_START_ADDR),
  695. DREG(TCE_KILL),
  696. DREG(LOCK2),
  697. DREG(PHB_GEN_CAP),
  698. DREG(PHB_TCE_CAP),
  699. DREG(PHB_IRQ_CAP),
  700. DREG(PHB_EEH_CAP),
  701. DREG(PAPR_ERR_INJ_CONTROL),
  702. DREG(PAPR_ERR_INJ_ADDR),
  703. DREG(PAPR_ERR_INJ_MASK),
  704. /* UTL core regs */
  705. DUTL(SYS_BUS_CONTROL),
  706. DUTL(STATUS),
  707. DUTL(SYS_BUS_AGENT_STATUS),
  708. DUTL(SYS_BUS_AGENT_ERR_SEV),
  709. DUTL(SYS_BUS_AGENT_IRQ_EN),
  710. DUTL(SYS_BUS_BURST_SZ_CONF),
  711. DUTL(REVISION_ID),
  712. DUTL(OUT_POST_HDR_BUF_ALLOC),
  713. DUTL(OUT_POST_DAT_BUF_ALLOC),
  714. DUTL(IN_POST_HDR_BUF_ALLOC),
  715. DUTL(IN_POST_DAT_BUF_ALLOC),
  716. DUTL(OUT_NP_BUF_ALLOC),
  717. DUTL(IN_NP_BUF_ALLOC),
  718. DUTL(PCIE_TAGS_ALLOC),
  719. DUTL(GBIF_READ_TAGS_ALLOC),
  720. DUTL(PCIE_PORT_CONTROL),
  721. DUTL(PCIE_PORT_STATUS),
  722. DUTL(PCIE_PORT_ERROR_SEV),
  723. DUTL(PCIE_PORT_IRQ_EN),
  724. DUTL(RC_STATUS),
  725. DUTL(RC_ERR_SEVERITY),
  726. DUTL(RC_IRQ_EN),
  727. DUTL(EP_STATUS),
  728. DUTL(EP_ERR_SEVERITY),
  729. DUTL(EP_ERR_IRQ_EN),
  730. DUTL(PCI_PM_CTRL1),
  731. DUTL(PCI_PM_CTRL2),
  732. /* PCIe stack regs */
  733. DREG(SYSTEM_CONFIG1),
  734. DREG(SYSTEM_CONFIG2),
  735. DREG(EP_SYSTEM_CONFIG),
  736. DREG(EP_FLR),
  737. DREG(EP_BAR_CONFIG),
  738. DREG(LINK_CONFIG),
  739. DREG(PM_CONFIG),
  740. DREG(DLP_CONTROL),
  741. DREG(DLP_STATUS),
  742. DREG(ERR_REPORT_CONTROL),
  743. DREG(SLOT_CONTROL1),
  744. DREG(SLOT_CONTROL2),
  745. DREG(UTL_CONFIG),
  746. DREG(BUFFERS_CONFIG),
  747. DREG(ERROR_INJECT),
  748. DREG(SRIOV_CONFIG),
  749. DREG(PF0_SRIOV_STATUS),
  750. DREG(PF1_SRIOV_STATUS),
  751. DREG(PORT_NUMBER),
  752. DREG(POR_SYSTEM_CONFIG),
  753. /* Internal logic regs */
  754. DREG(PHB_VERSION),
  755. DREG(RESET),
  756. DREG(PHB_CONTROL),
  757. DREG(PHB_TIMEOUT_CONTROL1),
  758. DREG(PHB_QUIESCE_DMA),
  759. DREG(PHB_DMA_READ_TAG_ACTV),
  760. DREG(PHB_TCE_READ_TAG_ACTV),
  761. /* FIR registers */
  762. DREG(LEM_FIR_ACCUM),
  763. DREG(LEM_FIR_AND_MASK),
  764. DREG(LEM_FIR_OR_MASK),
  765. DREG(LEM_ACTION0),
  766. DREG(LEM_ACTION1),
  767. DREG(LEM_ERROR_MASK),
  768. DREG(LEM_ERROR_AND_MASK),
  769. DREG(LEM_ERROR_OR_MASK),
  770. /* Error traps registers */
  771. DREG(PHB_ERR_STATUS),
  772. DREG(PHB_ERR_STATUS),
  773. DREG(PHB_ERR1_STATUS),
  774. DREG(PHB_ERR_INJECT),
  775. DREG(PHB_ERR_LEM_ENABLE),
  776. DREG(PHB_ERR_IRQ_ENABLE),
  777. DREG(PHB_ERR_FREEZE_ENABLE),
  778. DREG(PHB_ERR_SIDE_ENABLE),
  779. DREG(PHB_ERR_LOG_0),
  780. DREG(PHB_ERR_LOG_1),
  781. DREG(PHB_ERR_STATUS_MASK),
  782. DREG(PHB_ERR1_STATUS_MASK),
  783. DREG(MMIO_ERR_STATUS),
  784. DREG(MMIO_ERR1_STATUS),
  785. DREG(MMIO_ERR_INJECT),
  786. DREG(MMIO_ERR_LEM_ENABLE),
  787. DREG(MMIO_ERR_IRQ_ENABLE),
  788. DREG(MMIO_ERR_FREEZE_ENABLE),
  789. DREG(MMIO_ERR_SIDE_ENABLE),
  790. DREG(MMIO_ERR_LOG_0),
  791. DREG(MMIO_ERR_LOG_1),
  792. DREG(MMIO_ERR_STATUS_MASK),
  793. DREG(MMIO_ERR1_STATUS_MASK),
  794. DREG(DMA_ERR_STATUS),
  795. DREG(DMA_ERR1_STATUS),
  796. DREG(DMA_ERR_INJECT),
  797. DREG(DMA_ERR_LEM_ENABLE),
  798. DREG(DMA_ERR_IRQ_ENABLE),
  799. DREG(DMA_ERR_FREEZE_ENABLE),
  800. DREG(DMA_ERR_SIDE_ENABLE),
  801. DREG(DMA_ERR_LOG_0),
  802. DREG(DMA_ERR_LOG_1),
  803. DREG(DMA_ERR_STATUS_MASK),
  804. DREG(DMA_ERR1_STATUS_MASK),
  805. /* Debug and Trace registers */
  806. DREG(PHB_DEBUG_CONTROL0),
  807. DREG(PHB_DEBUG_STATUS0),
  808. DREG(PHB_DEBUG_CONTROL1),
  809. DREG(PHB_DEBUG_STATUS1),
  810. DREG(PHB_DEBUG_CONTROL2),
  811. DREG(PHB_DEBUG_STATUS2),
  812. DREG(PHB_DEBUG_CONTROL3),
  813. DREG(PHB_DEBUG_STATUS3),
  814. DREG(PHB_DEBUG_CONTROL4),
  815. DREG(PHB_DEBUG_STATUS4),
  816. DREG(PHB_DEBUG_CONTROL5),
  817. DREG(PHB_DEBUG_STATUS5),
  818. /* Don't seem to exist ...
  819. DREG(PHB_DEBUG_CONTROL6),
  820. DREG(PHB_DEBUG_STATUS6),
  821. */
  822. };
  823. static int wsp_pci_regs_show(struct seq_file *m, void *private)
  824. {
  825. struct wsp_phb *phb = m->private;
  826. struct pci_controller *hose = phb->hose;
  827. int i;
  828. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  829. /* Skip write-only regs */
  830. if (wsp_pci_regs[i].offset == 0xc08 ||
  831. wsp_pci_regs[i].offset == 0xc10 ||
  832. wsp_pci_regs[i].offset == 0xc38 ||
  833. wsp_pci_regs[i].offset == 0xc40)
  834. continue;
  835. seq_printf(m, "0x%03x: 0x%016llx %s\n",
  836. wsp_pci_regs[i].offset,
  837. in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
  838. wsp_pci_regs[i].name);
  839. }
  840. return 0;
  841. }
  842. static int wsp_pci_regs_open(struct inode *inode, struct file *file)
  843. {
  844. return single_open(file, wsp_pci_regs_show, inode->i_private);
  845. }
  846. static const struct file_operations wsp_pci_regs_fops = {
  847. .open = wsp_pci_regs_open,
  848. .read = seq_read,
  849. .llseek = seq_lseek,
  850. .release = single_release,
  851. };
  852. static int wsp_pci_reg_set(void *data, u64 val)
  853. {
  854. out_be64((void __iomem *)data, val);
  855. return 0;
  856. }
  857. static int wsp_pci_reg_get(void *data, u64 *val)
  858. {
  859. *val = in_be64((void __iomem *)data);
  860. return 0;
  861. }
  862. DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
  863. static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
  864. {
  865. struct wsp_phb *phb = dev_id;
  866. struct pci_controller *hose = phb->hose;
  867. irqreturn_t handled = IRQ_NONE;
  868. struct wsp_pcie_err_log_data ed;
  869. pr_err("PCI: Error interrupt on %s (PHB %d)\n",
  870. hose->dn->full_name, hose->global_number);
  871. again:
  872. memset(&ed, 0, sizeof(ed));
  873. /* Read and clear UTL errors */
  874. ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
  875. if (ed.utl_sys_err)
  876. out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
  877. ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
  878. if (ed.utl_port_err)
  879. out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
  880. ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
  881. if (ed.utl_rc_err)
  882. out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
  883. /* Read and clear main trap errors */
  884. ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
  885. if (ed.phb_err) {
  886. ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
  887. ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
  888. ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
  889. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
  890. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
  891. }
  892. ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
  893. if (ed.mmio_err) {
  894. ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
  895. ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
  896. ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
  897. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
  898. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
  899. }
  900. ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
  901. if (ed.dma_err) {
  902. ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
  903. ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
  904. ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
  905. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
  906. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
  907. }
  908. /* Now print things out */
  909. if (ed.phb_err) {
  910. pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
  911. pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
  912. pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
  913. pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
  914. }
  915. if (ed.mmio_err) {
  916. pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
  917. pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
  918. pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
  919. pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
  920. }
  921. if (ed.dma_err) {
  922. pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
  923. pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
  924. pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
  925. pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
  926. }
  927. if (ed.utl_sys_err)
  928. pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
  929. if (ed.utl_port_err)
  930. pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
  931. if (ed.utl_rc_err)
  932. pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
  933. /* Interrupts are caused by the error traps. If we had any error there
  934. * we loop again in case the UTL buffered some new stuff between
  935. * going there and going to the traps
  936. */
  937. if (ed.dma_err || ed.mmio_err || ed.phb_err) {
  938. handled = IRQ_HANDLED;
  939. goto again;
  940. }
  941. return handled;
  942. }
  943. static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
  944. {
  945. struct pci_controller *hose = phb->hose;
  946. int err_irq, i, rc;
  947. char fname[16];
  948. /* Create a debugfs file for that PHB */
  949. sprintf(fname, "phb%d", phb->hose->global_number);
  950. phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
  951. /* Some useful debug output */
  952. if (phb->ddir) {
  953. struct dentry *d = debugfs_create_dir("regs", phb->ddir);
  954. char tmp[64];
  955. for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
  956. sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
  957. wsp_pci_regs[i].name);
  958. debugfs_create_file(tmp, 0600, d,
  959. hose->cfg_data + wsp_pci_regs[i].offset,
  960. &wsp_pci_reg_fops);
  961. }
  962. debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
  963. }
  964. /* Find the IRQ number for that PHB */
  965. err_irq = irq_of_parse_and_map(hose->dn, 0);
  966. if (err_irq == 0)
  967. /* XXX Error IRQ lacking from device-tree */
  968. err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
  969. if (err_irq == 0) {
  970. pr_err("PCI: Failed to fetch error interrupt for %s\n",
  971. hose->dn->full_name);
  972. return;
  973. }
  974. /* Request it */
  975. rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
  976. if (rc) {
  977. pr_err("PCI: Failed to request interrupt for %s\n",
  978. hose->dn->full_name);
  979. }
  980. /* Enable interrupts for all errors for now */
  981. out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  982. out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  983. out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
  984. }
  985. /*
  986. * This is called later to hookup with the error interrupt
  987. */
  988. static int __init wsp_setup_pci_late(void)
  989. {
  990. struct wsp_phb *phb;
  991. list_for_each_entry(phb, &wsp_phbs, all)
  992. wsp_setup_pci_err_reporting(phb);
  993. return 0;
  994. }
  995. arch_initcall(wsp_setup_pci_late);