pci.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /*
  2. * Support PCI/PCIe on PowerNV platforms
  3. *
  4. * Copyright 2011 Benjamin Herrenschmidt, IBM Corp.
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/pci.h>
  13. #include <linux/delay.h>
  14. #include <linux/string.h>
  15. #include <linux/init.h>
  16. #include <linux/irq.h>
  17. #include <linux/io.h>
  18. #include <linux/msi.h>
  19. #include <linux/iommu.h>
  20. #include <asm/sections.h>
  21. #include <asm/io.h>
  22. #include <asm/prom.h>
  23. #include <asm/pci-bridge.h>
  24. #include <asm/machdep.h>
  25. #include <asm/msi_bitmap.h>
  26. #include <asm/ppc-pci.h>
  27. #include <asm/pnv-pci.h>
  28. #include <asm/opal.h>
  29. #include <asm/iommu.h>
  30. #include <asm/tce.h>
  31. #include <asm/firmware.h>
  32. #include <asm/eeh_event.h>
  33. #include <asm/eeh.h>
  34. #include "powernv.h"
  35. #include "pci.h"
  36. int pnv_pci_get_slot_id(struct device_node *np, uint64_t *id)
  37. {
  38. struct device_node *parent = np;
  39. u32 bdfn;
  40. u64 phbid;
  41. int ret;
  42. ret = of_property_read_u32(np, "reg", &bdfn);
  43. if (ret)
  44. return -ENXIO;
  45. bdfn = ((bdfn & 0x00ffff00) >> 8);
  46. while ((parent = of_get_parent(parent))) {
  47. if (!PCI_DN(parent)) {
  48. of_node_put(parent);
  49. break;
  50. }
  51. if (!of_device_is_compatible(parent, "ibm,ioda2-phb")) {
  52. of_node_put(parent);
  53. continue;
  54. }
  55. ret = of_property_read_u64(parent, "ibm,opal-phbid", &phbid);
  56. if (ret) {
  57. of_node_put(parent);
  58. return -ENXIO;
  59. }
  60. *id = PCI_SLOT_ID(phbid, bdfn);
  61. return 0;
  62. }
  63. return -ENODEV;
  64. }
  65. EXPORT_SYMBOL_GPL(pnv_pci_get_slot_id);
  66. int pnv_pci_get_device_tree(uint32_t phandle, void *buf, uint64_t len)
  67. {
  68. int64_t rc;
  69. if (!opal_check_token(OPAL_GET_DEVICE_TREE))
  70. return -ENXIO;
  71. rc = opal_get_device_tree(phandle, (uint64_t)buf, len);
  72. if (rc < OPAL_SUCCESS)
  73. return -EIO;
  74. return rc;
  75. }
  76. EXPORT_SYMBOL_GPL(pnv_pci_get_device_tree);
  77. int pnv_pci_get_presence_state(uint64_t id, uint8_t *state)
  78. {
  79. int64_t rc;
  80. if (!opal_check_token(OPAL_PCI_GET_PRESENCE_STATE))
  81. return -ENXIO;
  82. rc = opal_pci_get_presence_state(id, (uint64_t)state);
  83. if (rc != OPAL_SUCCESS)
  84. return -EIO;
  85. return 0;
  86. }
  87. EXPORT_SYMBOL_GPL(pnv_pci_get_presence_state);
  88. int pnv_pci_get_power_state(uint64_t id, uint8_t *state)
  89. {
  90. int64_t rc;
  91. if (!opal_check_token(OPAL_PCI_GET_POWER_STATE))
  92. return -ENXIO;
  93. rc = opal_pci_get_power_state(id, (uint64_t)state);
  94. if (rc != OPAL_SUCCESS)
  95. return -EIO;
  96. return 0;
  97. }
  98. EXPORT_SYMBOL_GPL(pnv_pci_get_power_state);
  99. int pnv_pci_set_power_state(uint64_t id, uint8_t state, struct opal_msg *msg)
  100. {
  101. struct opal_msg m;
  102. int token, ret;
  103. int64_t rc;
  104. if (!opal_check_token(OPAL_PCI_SET_POWER_STATE))
  105. return -ENXIO;
  106. token = opal_async_get_token_interruptible();
  107. if (unlikely(token < 0))
  108. return token;
  109. rc = opal_pci_set_power_state(token, id, (uint64_t)&state);
  110. if (rc == OPAL_SUCCESS) {
  111. ret = 0;
  112. goto exit;
  113. } else if (rc != OPAL_ASYNC_COMPLETION) {
  114. ret = -EIO;
  115. goto exit;
  116. }
  117. ret = opal_async_wait_response(token, &m);
  118. if (ret < 0)
  119. goto exit;
  120. if (msg) {
  121. ret = 1;
  122. memcpy(msg, &m, sizeof(m));
  123. }
  124. exit:
  125. opal_async_release_token(token);
  126. return ret;
  127. }
  128. EXPORT_SYMBOL_GPL(pnv_pci_set_power_state);
  129. #ifdef CONFIG_PCI_MSI
  130. int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
  131. {
  132. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  133. struct pnv_phb *phb = hose->private_data;
  134. struct msi_desc *entry;
  135. struct msi_msg msg;
  136. int hwirq;
  137. unsigned int virq;
  138. int rc;
  139. if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
  140. return -ENODEV;
  141. if (pdev->no_64bit_msi && !phb->msi32_support)
  142. return -ENODEV;
  143. for_each_pci_msi_entry(entry, pdev) {
  144. if (!entry->msi_attrib.is_64 && !phb->msi32_support) {
  145. pr_warn("%s: Supports only 64-bit MSIs\n",
  146. pci_name(pdev));
  147. return -ENXIO;
  148. }
  149. hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
  150. if (hwirq < 0) {
  151. pr_warn("%s: Failed to find a free MSI\n",
  152. pci_name(pdev));
  153. return -ENOSPC;
  154. }
  155. virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
  156. if (!virq) {
  157. pr_warn("%s: Failed to map MSI to linux irq\n",
  158. pci_name(pdev));
  159. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  160. return -ENOMEM;
  161. }
  162. rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
  163. virq, entry->msi_attrib.is_64, &msg);
  164. if (rc) {
  165. pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
  166. irq_dispose_mapping(virq);
  167. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
  168. return rc;
  169. }
  170. irq_set_msi_desc(virq, entry);
  171. pci_write_msi_msg(virq, &msg);
  172. }
  173. return 0;
  174. }
  175. void pnv_teardown_msi_irqs(struct pci_dev *pdev)
  176. {
  177. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  178. struct pnv_phb *phb = hose->private_data;
  179. struct msi_desc *entry;
  180. irq_hw_number_t hwirq;
  181. if (WARN_ON(!phb))
  182. return;
  183. for_each_pci_msi_entry(entry, pdev) {
  184. if (!entry->irq)
  185. continue;
  186. hwirq = virq_to_hw(entry->irq);
  187. irq_set_msi_desc(entry->irq, NULL);
  188. irq_dispose_mapping(entry->irq);
  189. msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq - phb->msi_base, 1);
  190. }
  191. }
  192. #endif /* CONFIG_PCI_MSI */
  193. static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
  194. struct OpalIoPhbErrorCommon *common)
  195. {
  196. struct OpalIoP7IOCPhbErrorData *data;
  197. int i;
  198. data = (struct OpalIoP7IOCPhbErrorData *)common;
  199. pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n",
  200. hose->global_number, be32_to_cpu(common->version));
  201. if (data->brdgCtl)
  202. pr_info("brdgCtl: %08x\n",
  203. be32_to_cpu(data->brdgCtl));
  204. if (data->portStatusReg || data->rootCmplxStatus ||
  205. data->busAgentStatus)
  206. pr_info("UtlSts: %08x %08x %08x\n",
  207. be32_to_cpu(data->portStatusReg),
  208. be32_to_cpu(data->rootCmplxStatus),
  209. be32_to_cpu(data->busAgentStatus));
  210. if (data->deviceStatus || data->slotStatus ||
  211. data->linkStatus || data->devCmdStatus ||
  212. data->devSecStatus)
  213. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  214. be32_to_cpu(data->deviceStatus),
  215. be32_to_cpu(data->slotStatus),
  216. be32_to_cpu(data->linkStatus),
  217. be32_to_cpu(data->devCmdStatus),
  218. be32_to_cpu(data->devSecStatus));
  219. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  220. data->corrErrorStatus)
  221. pr_info("RootErrSts: %08x %08x %08x\n",
  222. be32_to_cpu(data->rootErrorStatus),
  223. be32_to_cpu(data->uncorrErrorStatus),
  224. be32_to_cpu(data->corrErrorStatus));
  225. if (data->tlpHdr1 || data->tlpHdr2 ||
  226. data->tlpHdr3 || data->tlpHdr4)
  227. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  228. be32_to_cpu(data->tlpHdr1),
  229. be32_to_cpu(data->tlpHdr2),
  230. be32_to_cpu(data->tlpHdr3),
  231. be32_to_cpu(data->tlpHdr4));
  232. if (data->sourceId || data->errorClass ||
  233. data->correlator)
  234. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  235. be32_to_cpu(data->sourceId),
  236. be64_to_cpu(data->errorClass),
  237. be64_to_cpu(data->correlator));
  238. if (data->p7iocPlssr || data->p7iocCsr)
  239. pr_info("PhbSts: %016llx %016llx\n",
  240. be64_to_cpu(data->p7iocPlssr),
  241. be64_to_cpu(data->p7iocCsr));
  242. if (data->lemFir)
  243. pr_info("Lem: %016llx %016llx %016llx\n",
  244. be64_to_cpu(data->lemFir),
  245. be64_to_cpu(data->lemErrorMask),
  246. be64_to_cpu(data->lemWOF));
  247. if (data->phbErrorStatus)
  248. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  249. be64_to_cpu(data->phbErrorStatus),
  250. be64_to_cpu(data->phbFirstErrorStatus),
  251. be64_to_cpu(data->phbErrorLog0),
  252. be64_to_cpu(data->phbErrorLog1));
  253. if (data->mmioErrorStatus)
  254. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  255. be64_to_cpu(data->mmioErrorStatus),
  256. be64_to_cpu(data->mmioFirstErrorStatus),
  257. be64_to_cpu(data->mmioErrorLog0),
  258. be64_to_cpu(data->mmioErrorLog1));
  259. if (data->dma0ErrorStatus)
  260. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  261. be64_to_cpu(data->dma0ErrorStatus),
  262. be64_to_cpu(data->dma0FirstErrorStatus),
  263. be64_to_cpu(data->dma0ErrorLog0),
  264. be64_to_cpu(data->dma0ErrorLog1));
  265. if (data->dma1ErrorStatus)
  266. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  267. be64_to_cpu(data->dma1ErrorStatus),
  268. be64_to_cpu(data->dma1FirstErrorStatus),
  269. be64_to_cpu(data->dma1ErrorLog0),
  270. be64_to_cpu(data->dma1ErrorLog1));
  271. for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
  272. if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
  273. (be64_to_cpu(data->pestB[i]) >> 63) == 0)
  274. continue;
  275. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  276. i, be64_to_cpu(data->pestA[i]),
  277. be64_to_cpu(data->pestB[i]));
  278. }
  279. }
  280. static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
  281. struct OpalIoPhbErrorCommon *common)
  282. {
  283. struct OpalIoPhb3ErrorData *data;
  284. int i;
  285. data = (struct OpalIoPhb3ErrorData*)common;
  286. pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
  287. hose->global_number, be32_to_cpu(common->version));
  288. if (data->brdgCtl)
  289. pr_info("brdgCtl: %08x\n",
  290. be32_to_cpu(data->brdgCtl));
  291. if (data->portStatusReg || data->rootCmplxStatus ||
  292. data->busAgentStatus)
  293. pr_info("UtlSts: %08x %08x %08x\n",
  294. be32_to_cpu(data->portStatusReg),
  295. be32_to_cpu(data->rootCmplxStatus),
  296. be32_to_cpu(data->busAgentStatus));
  297. if (data->deviceStatus || data->slotStatus ||
  298. data->linkStatus || data->devCmdStatus ||
  299. data->devSecStatus)
  300. pr_info("RootSts: %08x %08x %08x %08x %08x\n",
  301. be32_to_cpu(data->deviceStatus),
  302. be32_to_cpu(data->slotStatus),
  303. be32_to_cpu(data->linkStatus),
  304. be32_to_cpu(data->devCmdStatus),
  305. be32_to_cpu(data->devSecStatus));
  306. if (data->rootErrorStatus || data->uncorrErrorStatus ||
  307. data->corrErrorStatus)
  308. pr_info("RootErrSts: %08x %08x %08x\n",
  309. be32_to_cpu(data->rootErrorStatus),
  310. be32_to_cpu(data->uncorrErrorStatus),
  311. be32_to_cpu(data->corrErrorStatus));
  312. if (data->tlpHdr1 || data->tlpHdr2 ||
  313. data->tlpHdr3 || data->tlpHdr4)
  314. pr_info("RootErrLog: %08x %08x %08x %08x\n",
  315. be32_to_cpu(data->tlpHdr1),
  316. be32_to_cpu(data->tlpHdr2),
  317. be32_to_cpu(data->tlpHdr3),
  318. be32_to_cpu(data->tlpHdr4));
  319. if (data->sourceId || data->errorClass ||
  320. data->correlator)
  321. pr_info("RootErrLog1: %08x %016llx %016llx\n",
  322. be32_to_cpu(data->sourceId),
  323. be64_to_cpu(data->errorClass),
  324. be64_to_cpu(data->correlator));
  325. if (data->nFir)
  326. pr_info("nFir: %016llx %016llx %016llx\n",
  327. be64_to_cpu(data->nFir),
  328. be64_to_cpu(data->nFirMask),
  329. be64_to_cpu(data->nFirWOF));
  330. if (data->phbPlssr || data->phbCsr)
  331. pr_info("PhbSts: %016llx %016llx\n",
  332. be64_to_cpu(data->phbPlssr),
  333. be64_to_cpu(data->phbCsr));
  334. if (data->lemFir)
  335. pr_info("Lem: %016llx %016llx %016llx\n",
  336. be64_to_cpu(data->lemFir),
  337. be64_to_cpu(data->lemErrorMask),
  338. be64_to_cpu(data->lemWOF));
  339. if (data->phbErrorStatus)
  340. pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
  341. be64_to_cpu(data->phbErrorStatus),
  342. be64_to_cpu(data->phbFirstErrorStatus),
  343. be64_to_cpu(data->phbErrorLog0),
  344. be64_to_cpu(data->phbErrorLog1));
  345. if (data->mmioErrorStatus)
  346. pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
  347. be64_to_cpu(data->mmioErrorStatus),
  348. be64_to_cpu(data->mmioFirstErrorStatus),
  349. be64_to_cpu(data->mmioErrorLog0),
  350. be64_to_cpu(data->mmioErrorLog1));
  351. if (data->dma0ErrorStatus)
  352. pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
  353. be64_to_cpu(data->dma0ErrorStatus),
  354. be64_to_cpu(data->dma0FirstErrorStatus),
  355. be64_to_cpu(data->dma0ErrorLog0),
  356. be64_to_cpu(data->dma0ErrorLog1));
  357. if (data->dma1ErrorStatus)
  358. pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
  359. be64_to_cpu(data->dma1ErrorStatus),
  360. be64_to_cpu(data->dma1FirstErrorStatus),
  361. be64_to_cpu(data->dma1ErrorLog0),
  362. be64_to_cpu(data->dma1ErrorLog1));
  363. for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
  364. if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
  365. (be64_to_cpu(data->pestB[i]) >> 63) == 0)
  366. continue;
  367. pr_info("PE[%3d] A/B: %016llx %016llx\n",
  368. i, be64_to_cpu(data->pestA[i]),
  369. be64_to_cpu(data->pestB[i]));
  370. }
  371. }
  372. void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
  373. unsigned char *log_buff)
  374. {
  375. struct OpalIoPhbErrorCommon *common;
  376. if (!hose || !log_buff)
  377. return;
  378. common = (struct OpalIoPhbErrorCommon *)log_buff;
  379. switch (be32_to_cpu(common->ioType)) {
  380. case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
  381. pnv_pci_dump_p7ioc_diag_data(hose, common);
  382. break;
  383. case OPAL_PHB_ERROR_DATA_TYPE_PHB3:
  384. pnv_pci_dump_phb3_diag_data(hose, common);
  385. break;
  386. default:
  387. pr_warn("%s: Unrecognized ioType %d\n",
  388. __func__, be32_to_cpu(common->ioType));
  389. }
  390. }
  391. static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
  392. {
  393. unsigned long flags, rc;
  394. int has_diag, ret = 0;
  395. spin_lock_irqsave(&phb->lock, flags);
  396. /* Fetch PHB diag-data */
  397. rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
  398. PNV_PCI_DIAG_BUF_SIZE);
  399. has_diag = (rc == OPAL_SUCCESS);
  400. /* If PHB supports compound PE, to handle it */
  401. if (phb->unfreeze_pe) {
  402. ret = phb->unfreeze_pe(phb,
  403. pe_no,
  404. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  405. } else {
  406. rc = opal_pci_eeh_freeze_clear(phb->opal_id,
  407. pe_no,
  408. OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
  409. if (rc) {
  410. pr_warn("%s: Failure %ld clearing frozen "
  411. "PHB#%x-PE#%x\n",
  412. __func__, rc, phb->hose->global_number,
  413. pe_no);
  414. ret = -EIO;
  415. }
  416. }
  417. /*
  418. * For now, let's only display the diag buffer when we fail to clear
  419. * the EEH status. We'll do more sensible things later when we have
  420. * proper EEH support. We need to make sure we don't pollute ourselves
  421. * with the normal errors generated when probing empty slots
  422. */
  423. if (has_diag && ret)
  424. pnv_pci_dump_phb_diag_data(phb->hose, phb->diag.blob);
  425. spin_unlock_irqrestore(&phb->lock, flags);
  426. }
  427. static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
  428. {
  429. struct pnv_phb *phb = pdn->phb->private_data;
  430. u8 fstate;
  431. __be16 pcierr;
  432. unsigned int pe_no;
  433. s64 rc;
  434. /*
  435. * Get the PE#. During the PCI probe stage, we might not
  436. * setup that yet. So all ER errors should be mapped to
  437. * reserved PE.
  438. */
  439. pe_no = pdn->pe_number;
  440. if (pe_no == IODA_INVALID_PE) {
  441. pe_no = phb->ioda.reserved_pe_idx;
  442. }
  443. /*
  444. * Fetch frozen state. If the PHB support compound PE,
  445. * we need handle that case.
  446. */
  447. if (phb->get_pe_state) {
  448. fstate = phb->get_pe_state(phb, pe_no);
  449. } else {
  450. rc = opal_pci_eeh_freeze_status(phb->opal_id,
  451. pe_no,
  452. &fstate,
  453. &pcierr,
  454. NULL);
  455. if (rc) {
  456. pr_warn("%s: Failure %lld getting PHB#%x-PE#%x state\n",
  457. __func__, rc, phb->hose->global_number, pe_no);
  458. return;
  459. }
  460. }
  461. pr_devel(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
  462. (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
  463. /* Clear the frozen state if applicable */
  464. if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
  465. fstate == OPAL_EEH_STOPPED_DMA_FREEZE ||
  466. fstate == OPAL_EEH_STOPPED_MMIO_DMA_FREEZE) {
  467. /*
  468. * If PHB supports compound PE, freeze it for
  469. * consistency.
  470. */
  471. if (phb->freeze_pe)
  472. phb->freeze_pe(phb, pe_no);
  473. pnv_pci_handle_eeh_config(phb, pe_no);
  474. }
  475. }
  476. int pnv_pci_cfg_read(struct pci_dn *pdn,
  477. int where, int size, u32 *val)
  478. {
  479. struct pnv_phb *phb = pdn->phb->private_data;
  480. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  481. s64 rc;
  482. switch (size) {
  483. case 1: {
  484. u8 v8;
  485. rc = opal_pci_config_read_byte(phb->opal_id, bdfn, where, &v8);
  486. *val = (rc == OPAL_SUCCESS) ? v8 : 0xff;
  487. break;
  488. }
  489. case 2: {
  490. __be16 v16;
  491. rc = opal_pci_config_read_half_word(phb->opal_id, bdfn, where,
  492. &v16);
  493. *val = (rc == OPAL_SUCCESS) ? be16_to_cpu(v16) : 0xffff;
  494. break;
  495. }
  496. case 4: {
  497. __be32 v32;
  498. rc = opal_pci_config_read_word(phb->opal_id, bdfn, where, &v32);
  499. *val = (rc == OPAL_SUCCESS) ? be32_to_cpu(v32) : 0xffffffff;
  500. break;
  501. }
  502. default:
  503. return PCIBIOS_FUNC_NOT_SUPPORTED;
  504. }
  505. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  506. __func__, pdn->busno, pdn->devfn, where, size, *val);
  507. return PCIBIOS_SUCCESSFUL;
  508. }
  509. int pnv_pci_cfg_write(struct pci_dn *pdn,
  510. int where, int size, u32 val)
  511. {
  512. struct pnv_phb *phb = pdn->phb->private_data;
  513. u32 bdfn = (pdn->busno << 8) | pdn->devfn;
  514. pr_devel("%s: bus: %x devfn: %x +%x/%x -> %08x\n",
  515. __func__, pdn->busno, pdn->devfn, where, size, val);
  516. switch (size) {
  517. case 1:
  518. opal_pci_config_write_byte(phb->opal_id, bdfn, where, val);
  519. break;
  520. case 2:
  521. opal_pci_config_write_half_word(phb->opal_id, bdfn, where, val);
  522. break;
  523. case 4:
  524. opal_pci_config_write_word(phb->opal_id, bdfn, where, val);
  525. break;
  526. default:
  527. return PCIBIOS_FUNC_NOT_SUPPORTED;
  528. }
  529. return PCIBIOS_SUCCESSFUL;
  530. }
  531. #if CONFIG_EEH
  532. static bool pnv_pci_cfg_check(struct pci_dn *pdn)
  533. {
  534. struct eeh_dev *edev = NULL;
  535. struct pnv_phb *phb = pdn->phb->private_data;
  536. /* EEH not enabled ? */
  537. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  538. return true;
  539. /* PE reset or device removed ? */
  540. edev = pdn->edev;
  541. if (edev) {
  542. if (edev->pe &&
  543. (edev->pe->state & EEH_PE_CFG_BLOCKED))
  544. return false;
  545. if (edev->mode & EEH_DEV_REMOVED)
  546. return false;
  547. }
  548. return true;
  549. }
  550. #else
  551. static inline pnv_pci_cfg_check(struct pci_dn *pdn)
  552. {
  553. return true;
  554. }
  555. #endif /* CONFIG_EEH */
  556. static int pnv_pci_read_config(struct pci_bus *bus,
  557. unsigned int devfn,
  558. int where, int size, u32 *val)
  559. {
  560. struct pci_dn *pdn;
  561. struct pnv_phb *phb;
  562. int ret;
  563. *val = 0xFFFFFFFF;
  564. pdn = pci_get_pdn_by_devfn(bus, devfn);
  565. if (!pdn)
  566. return PCIBIOS_DEVICE_NOT_FOUND;
  567. if (!pnv_pci_cfg_check(pdn))
  568. return PCIBIOS_DEVICE_NOT_FOUND;
  569. ret = pnv_pci_cfg_read(pdn, where, size, val);
  570. phb = pdn->phb->private_data;
  571. if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
  572. if (*val == EEH_IO_ERROR_VALUE(size) &&
  573. eeh_dev_check_failure(pdn->edev))
  574. return PCIBIOS_DEVICE_NOT_FOUND;
  575. } else {
  576. pnv_pci_config_check_eeh(pdn);
  577. }
  578. return ret;
  579. }
  580. static int pnv_pci_write_config(struct pci_bus *bus,
  581. unsigned int devfn,
  582. int where, int size, u32 val)
  583. {
  584. struct pci_dn *pdn;
  585. struct pnv_phb *phb;
  586. int ret;
  587. pdn = pci_get_pdn_by_devfn(bus, devfn);
  588. if (!pdn)
  589. return PCIBIOS_DEVICE_NOT_FOUND;
  590. if (!pnv_pci_cfg_check(pdn))
  591. return PCIBIOS_DEVICE_NOT_FOUND;
  592. ret = pnv_pci_cfg_write(pdn, where, size, val);
  593. phb = pdn->phb->private_data;
  594. if (!(phb->flags & PNV_PHB_FLAG_EEH))
  595. pnv_pci_config_check_eeh(pdn);
  596. return ret;
  597. }
  598. struct pci_ops pnv_pci_ops = {
  599. .read = pnv_pci_read_config,
  600. .write = pnv_pci_write_config,
  601. };
  602. static __be64 *pnv_tce(struct iommu_table *tbl, long idx)
  603. {
  604. __be64 *tmp = ((__be64 *)tbl->it_base);
  605. int level = tbl->it_indirect_levels;
  606. const long shift = ilog2(tbl->it_level_size);
  607. unsigned long mask = (tbl->it_level_size - 1) << (level * shift);
  608. while (level) {
  609. int n = (idx & mask) >> (level * shift);
  610. unsigned long tce = be64_to_cpu(tmp[n]);
  611. tmp = __va(tce & ~(TCE_PCI_READ | TCE_PCI_WRITE));
  612. idx &= ~mask;
  613. mask >>= shift;
  614. --level;
  615. }
  616. return tmp + idx;
  617. }
  618. int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
  619. unsigned long uaddr, enum dma_data_direction direction,
  620. unsigned long attrs)
  621. {
  622. u64 proto_tce = iommu_direction_to_tce_perm(direction);
  623. u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
  624. long i;
  625. if (proto_tce & TCE_PCI_WRITE)
  626. proto_tce |= TCE_PCI_READ;
  627. for (i = 0; i < npages; i++) {
  628. unsigned long newtce = proto_tce |
  629. ((rpn + i) << tbl->it_page_shift);
  630. unsigned long idx = index - tbl->it_offset + i;
  631. *(pnv_tce(tbl, idx)) = cpu_to_be64(newtce);
  632. }
  633. return 0;
  634. }
  635. #ifdef CONFIG_IOMMU_API
  636. int pnv_tce_xchg(struct iommu_table *tbl, long index,
  637. unsigned long *hpa, enum dma_data_direction *direction)
  638. {
  639. u64 proto_tce = iommu_direction_to_tce_perm(*direction);
  640. unsigned long newtce = *hpa | proto_tce, oldtce;
  641. unsigned long idx = index - tbl->it_offset;
  642. BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
  643. if (newtce & TCE_PCI_WRITE)
  644. newtce |= TCE_PCI_READ;
  645. oldtce = be64_to_cpu(xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce)));
  646. *hpa = oldtce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
  647. *direction = iommu_tce_direction(oldtce);
  648. return 0;
  649. }
  650. #endif
  651. void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
  652. {
  653. long i;
  654. for (i = 0; i < npages; i++) {
  655. unsigned long idx = index - tbl->it_offset + i;
  656. *(pnv_tce(tbl, idx)) = cpu_to_be64(0);
  657. }
  658. }
  659. unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
  660. {
  661. return *(pnv_tce(tbl, index - tbl->it_offset));
  662. }
  663. struct iommu_table *pnv_pci_table_alloc(int nid)
  664. {
  665. struct iommu_table *tbl;
  666. tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, nid);
  667. INIT_LIST_HEAD_RCU(&tbl->it_group_list);
  668. return tbl;
  669. }
  670. long pnv_pci_link_table_and_group(int node, int num,
  671. struct iommu_table *tbl,
  672. struct iommu_table_group *table_group)
  673. {
  674. struct iommu_table_group_link *tgl = NULL;
  675. if (WARN_ON(!tbl || !table_group))
  676. return -EINVAL;
  677. tgl = kzalloc_node(sizeof(struct iommu_table_group_link), GFP_KERNEL,
  678. node);
  679. if (!tgl)
  680. return -ENOMEM;
  681. tgl->table_group = table_group;
  682. list_add_rcu(&tgl->next, &tbl->it_group_list);
  683. table_group->tables[num] = tbl;
  684. return 0;
  685. }
  686. static void pnv_iommu_table_group_link_free(struct rcu_head *head)
  687. {
  688. struct iommu_table_group_link *tgl = container_of(head,
  689. struct iommu_table_group_link, rcu);
  690. kfree(tgl);
  691. }
  692. void pnv_pci_unlink_table_and_group(struct iommu_table *tbl,
  693. struct iommu_table_group *table_group)
  694. {
  695. long i;
  696. bool found;
  697. struct iommu_table_group_link *tgl;
  698. if (!tbl || !table_group)
  699. return;
  700. /* Remove link to a group from table's list of attached groups */
  701. found = false;
  702. list_for_each_entry_rcu(tgl, &tbl->it_group_list, next) {
  703. if (tgl->table_group == table_group) {
  704. list_del_rcu(&tgl->next);
  705. call_rcu(&tgl->rcu, pnv_iommu_table_group_link_free);
  706. found = true;
  707. break;
  708. }
  709. }
  710. if (WARN_ON(!found))
  711. return;
  712. /* Clean a pointer to iommu_table in iommu_table_group::tables[] */
  713. found = false;
  714. for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
  715. if (table_group->tables[i] == tbl) {
  716. table_group->tables[i] = NULL;
  717. found = true;
  718. break;
  719. }
  720. }
  721. WARN_ON(!found);
  722. }
  723. void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
  724. void *tce_mem, u64 tce_size,
  725. u64 dma_offset, unsigned page_shift)
  726. {
  727. tbl->it_blocksize = 16;
  728. tbl->it_base = (unsigned long)tce_mem;
  729. tbl->it_page_shift = page_shift;
  730. tbl->it_offset = dma_offset >> tbl->it_page_shift;
  731. tbl->it_index = 0;
  732. tbl->it_size = tce_size >> 3;
  733. tbl->it_busno = 0;
  734. tbl->it_type = TCE_PCI;
  735. }
  736. void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
  737. {
  738. struct pci_controller *hose = pci_bus_to_host(pdev->bus);
  739. struct pnv_phb *phb = hose->private_data;
  740. #ifdef CONFIG_PCI_IOV
  741. struct pnv_ioda_pe *pe;
  742. struct pci_dn *pdn;
  743. /* Fix the VF pdn PE number */
  744. if (pdev->is_virtfn) {
  745. pdn = pci_get_pdn(pdev);
  746. WARN_ON(pdn->pe_number != IODA_INVALID_PE);
  747. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  748. if (pe->rid == ((pdev->bus->number << 8) |
  749. (pdev->devfn & 0xff))) {
  750. pdn->pe_number = pe->pe_number;
  751. pe->pdev = pdev;
  752. break;
  753. }
  754. }
  755. }
  756. #endif /* CONFIG_PCI_IOV */
  757. if (phb && phb->dma_dev_setup)
  758. phb->dma_dev_setup(phb, pdev);
  759. }
  760. void pnv_pci_dma_bus_setup(struct pci_bus *bus)
  761. {
  762. struct pci_controller *hose = bus->sysdata;
  763. struct pnv_phb *phb = hose->private_data;
  764. struct pnv_ioda_pe *pe;
  765. list_for_each_entry(pe, &phb->ioda.pe_list, list) {
  766. if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
  767. continue;
  768. if (!pe->pbus)
  769. continue;
  770. if (bus->number == ((pe->rid >> 8) & 0xFF)) {
  771. pe->pbus = bus;
  772. break;
  773. }
  774. }
  775. }
  776. void pnv_pci_shutdown(void)
  777. {
  778. struct pci_controller *hose;
  779. list_for_each_entry(hose, &hose_list, list_node)
  780. if (hose->controller_ops.shutdown)
  781. hose->controller_ops.shutdown(hose);
  782. }
  783. /* Fixup wrong class code in p7ioc and p8 root complex */
  784. static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
  785. {
  786. dev->class = PCI_CLASS_BRIDGE_PCI << 8;
  787. }
  788. DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
  789. void __init pnv_pci_init(void)
  790. {
  791. struct device_node *np;
  792. pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
  793. /* If we don't have OPAL, eg. in sim, just skip PCI probe */
  794. if (!firmware_has_feature(FW_FEATURE_OPAL))
  795. return;
  796. /* Look for IODA IO-Hubs. */
  797. for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
  798. pnv_pci_init_ioda_hub(np);
  799. }
  800. /* Look for ioda2 built-in PHB3's */
  801. for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
  802. pnv_pci_init_ioda2_phb(np);
  803. /* Look for ioda3 built-in PHB4's, we treat them as IODA2 */
  804. for_each_compatible_node(np, NULL, "ibm,ioda3-phb")
  805. pnv_pci_init_ioda2_phb(np);
  806. /* Look for NPU PHBs */
  807. for_each_compatible_node(np, NULL, "ibm,ioda2-npu-phb")
  808. pnv_pci_init_npu_phb(np);
  809. /* Configure IOMMU DMA hooks */
  810. set_pci_dma_ops(&dma_iommu_ops);
  811. }
  812. machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);