pci_sun4v.c 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335
  1. /* pci_sun4v.c: SUN4V specific PCI controller support.
  2. *
  3. * Copyright (C) 2006, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/kernel.h>
  6. #include <linux/types.h>
  7. #include <linux/pci.h>
  8. #include <linux/init.h>
  9. #include <linux/slab.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/percpu.h>
  12. #include <linux/irq.h>
  13. #include <linux/msi.h>
  14. #include <linux/export.h>
  15. #include <linux/log2.h>
  16. #include <linux/of_device.h>
  17. #include <linux/iommu-common.h>
  18. #include <asm/iommu.h>
  19. #include <asm/irq.h>
  20. #include <asm/hypervisor.h>
  21. #include <asm/prom.h>
  22. #include "pci_impl.h"
  23. #include "iommu_common.h"
  24. #include "pci_sun4v.h"
  25. #define DRIVER_NAME "pci_sun4v"
  26. #define PFX DRIVER_NAME ": "
  27. static unsigned long vpci_major;
  28. static unsigned long vpci_minor;
  29. struct vpci_version {
  30. unsigned long major;
  31. unsigned long minor;
  32. };
  33. /* Ordered from largest major to lowest */
  34. static struct vpci_version vpci_versions[] = {
  35. { .major = 2, .minor = 0 },
  36. { .major = 1, .minor = 1 },
  37. };
  38. static unsigned long vatu_major = 1;
  39. static unsigned long vatu_minor = 1;
  40. #define PGLIST_NENTS (PAGE_SIZE / sizeof(u64))
  41. struct iommu_batch {
  42. struct device *dev; /* Device mapping is for. */
  43. unsigned long prot; /* IOMMU page protections */
  44. unsigned long entry; /* Index into IOTSB. */
  45. u64 *pglist; /* List of physical pages */
  46. unsigned long npages; /* Number of pages in list. */
  47. };
  48. static DEFINE_PER_CPU(struct iommu_batch, iommu_batch);
  49. static int iommu_batch_initialized;
  50. /* Interrupts must be disabled. */
  51. static inline void iommu_batch_start(struct device *dev, unsigned long prot, unsigned long entry)
  52. {
  53. struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  54. p->dev = dev;
  55. p->prot = prot;
  56. p->entry = entry;
  57. p->npages = 0;
  58. }
  59. /* Interrupts must be disabled. */
  60. static long iommu_batch_flush(struct iommu_batch *p, u64 mask)
  61. {
  62. struct pci_pbm_info *pbm = p->dev->archdata.host_controller;
  63. u64 *pglist = p->pglist;
  64. u64 index_count;
  65. unsigned long devhandle = pbm->devhandle;
  66. unsigned long prot = p->prot;
  67. unsigned long entry = p->entry;
  68. unsigned long npages = p->npages;
  69. unsigned long iotsb_num;
  70. unsigned long ret;
  71. long num;
  72. /* VPCI maj=1, min=[0,1] only supports read and write */
  73. if (vpci_major < 2)
  74. prot &= (HV_PCI_MAP_ATTR_READ | HV_PCI_MAP_ATTR_WRITE);
  75. while (npages != 0) {
  76. if (mask <= DMA_BIT_MASK(32)) {
  77. num = pci_sun4v_iommu_map(devhandle,
  78. HV_PCI_TSBID(0, entry),
  79. npages,
  80. prot,
  81. __pa(pglist));
  82. if (unlikely(num < 0)) {
  83. pr_err_ratelimited("%s: IOMMU map of [%08lx:%08llx:%lx:%lx:%lx] failed with status %ld\n",
  84. __func__,
  85. devhandle,
  86. HV_PCI_TSBID(0, entry),
  87. npages, prot, __pa(pglist),
  88. num);
  89. return -1;
  90. }
  91. } else {
  92. index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry),
  93. iotsb_num = pbm->iommu->atu->iotsb->iotsb_num;
  94. ret = pci_sun4v_iotsb_map(devhandle,
  95. iotsb_num,
  96. index_count,
  97. prot,
  98. __pa(pglist),
  99. &num);
  100. if (unlikely(ret != HV_EOK)) {
  101. pr_err_ratelimited("%s: ATU map of [%08lx:%lx:%llx:%lx:%lx] failed with status %ld\n",
  102. __func__,
  103. devhandle, iotsb_num,
  104. index_count, prot,
  105. __pa(pglist), ret);
  106. return -1;
  107. }
  108. }
  109. entry += num;
  110. npages -= num;
  111. pglist += num;
  112. }
  113. p->entry = entry;
  114. p->npages = 0;
  115. return 0;
  116. }
  117. static inline void iommu_batch_new_entry(unsigned long entry, u64 mask)
  118. {
  119. struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  120. if (p->entry + p->npages == entry)
  121. return;
  122. if (p->entry != ~0UL)
  123. iommu_batch_flush(p, mask);
  124. p->entry = entry;
  125. }
  126. /* Interrupts must be disabled. */
  127. static inline long iommu_batch_add(u64 phys_page, u64 mask)
  128. {
  129. struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  130. BUG_ON(p->npages >= PGLIST_NENTS);
  131. p->pglist[p->npages++] = phys_page;
  132. if (p->npages == PGLIST_NENTS)
  133. return iommu_batch_flush(p, mask);
  134. return 0;
  135. }
  136. /* Interrupts must be disabled. */
  137. static inline long iommu_batch_end(u64 mask)
  138. {
  139. struct iommu_batch *p = this_cpu_ptr(&iommu_batch);
  140. BUG_ON(p->npages >= PGLIST_NENTS);
  141. return iommu_batch_flush(p, mask);
  142. }
  143. static void *dma_4v_alloc_coherent(struct device *dev, size_t size,
  144. dma_addr_t *dma_addrp, gfp_t gfp,
  145. unsigned long attrs)
  146. {
  147. u64 mask;
  148. unsigned long flags, order, first_page, npages, n;
  149. unsigned long prot = 0;
  150. struct iommu *iommu;
  151. struct atu *atu;
  152. struct iommu_map_table *tbl;
  153. struct page *page;
  154. void *ret;
  155. long entry;
  156. int nid;
  157. size = IO_PAGE_ALIGN(size);
  158. order = get_order(size);
  159. if (unlikely(order >= MAX_ORDER))
  160. return NULL;
  161. npages = size >> IO_PAGE_SHIFT;
  162. if (attrs & DMA_ATTR_WEAK_ORDERING)
  163. prot = HV_PCI_MAP_ATTR_RELAXED_ORDER;
  164. nid = dev->archdata.numa_node;
  165. page = alloc_pages_node(nid, gfp, order);
  166. if (unlikely(!page))
  167. return NULL;
  168. first_page = (unsigned long) page_address(page);
  169. memset((char *)first_page, 0, PAGE_SIZE << order);
  170. iommu = dev->archdata.iommu;
  171. atu = iommu->atu;
  172. mask = dev->coherent_dma_mask;
  173. if (mask <= DMA_BIT_MASK(32))
  174. tbl = &iommu->tbl;
  175. else
  176. tbl = &atu->tbl;
  177. entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
  178. (unsigned long)(-1), 0);
  179. if (unlikely(entry == IOMMU_ERROR_CODE))
  180. goto range_alloc_fail;
  181. *dma_addrp = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
  182. ret = (void *) first_page;
  183. first_page = __pa(first_page);
  184. local_irq_save(flags);
  185. iommu_batch_start(dev,
  186. (HV_PCI_MAP_ATTR_READ | prot |
  187. HV_PCI_MAP_ATTR_WRITE),
  188. entry);
  189. for (n = 0; n < npages; n++) {
  190. long err = iommu_batch_add(first_page + (n * PAGE_SIZE), mask);
  191. if (unlikely(err < 0L))
  192. goto iommu_map_fail;
  193. }
  194. if (unlikely(iommu_batch_end(mask) < 0L))
  195. goto iommu_map_fail;
  196. local_irq_restore(flags);
  197. return ret;
  198. iommu_map_fail:
  199. iommu_tbl_range_free(tbl, *dma_addrp, npages, IOMMU_ERROR_CODE);
  200. range_alloc_fail:
  201. free_pages(first_page, order);
  202. return NULL;
  203. }
  204. unsigned long dma_4v_iotsb_bind(unsigned long devhandle,
  205. unsigned long iotsb_num,
  206. struct pci_bus *bus_dev)
  207. {
  208. struct pci_dev *pdev;
  209. unsigned long err;
  210. unsigned int bus;
  211. unsigned int device;
  212. unsigned int fun;
  213. list_for_each_entry(pdev, &bus_dev->devices, bus_list) {
  214. if (pdev->subordinate) {
  215. /* No need to bind pci bridge */
  216. dma_4v_iotsb_bind(devhandle, iotsb_num,
  217. pdev->subordinate);
  218. } else {
  219. bus = bus_dev->number;
  220. device = PCI_SLOT(pdev->devfn);
  221. fun = PCI_FUNC(pdev->devfn);
  222. err = pci_sun4v_iotsb_bind(devhandle, iotsb_num,
  223. HV_PCI_DEVICE_BUILD(bus,
  224. device,
  225. fun));
  226. /* If bind fails for one device it is going to fail
  227. * for rest of the devices because we are sharing
  228. * IOTSB. So in case of failure simply return with
  229. * error.
  230. */
  231. if (err)
  232. return err;
  233. }
  234. }
  235. return 0;
  236. }
  237. static void dma_4v_iommu_demap(struct device *dev, unsigned long devhandle,
  238. dma_addr_t dvma, unsigned long iotsb_num,
  239. unsigned long entry, unsigned long npages)
  240. {
  241. unsigned long num, flags;
  242. unsigned long ret;
  243. local_irq_save(flags);
  244. do {
  245. if (dvma <= DMA_BIT_MASK(32)) {
  246. num = pci_sun4v_iommu_demap(devhandle,
  247. HV_PCI_TSBID(0, entry),
  248. npages);
  249. } else {
  250. ret = pci_sun4v_iotsb_demap(devhandle, iotsb_num,
  251. entry, npages, &num);
  252. if (unlikely(ret != HV_EOK)) {
  253. pr_err_ratelimited("pci_iotsb_demap() failed with error: %ld\n",
  254. ret);
  255. }
  256. }
  257. entry += num;
  258. npages -= num;
  259. } while (npages != 0);
  260. local_irq_restore(flags);
  261. }
  262. static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu,
  263. dma_addr_t dvma, unsigned long attrs)
  264. {
  265. struct pci_pbm_info *pbm;
  266. struct iommu *iommu;
  267. struct atu *atu;
  268. struct iommu_map_table *tbl;
  269. unsigned long order, npages, entry;
  270. unsigned long iotsb_num;
  271. u32 devhandle;
  272. npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
  273. iommu = dev->archdata.iommu;
  274. pbm = dev->archdata.host_controller;
  275. atu = iommu->atu;
  276. devhandle = pbm->devhandle;
  277. if (dvma <= DMA_BIT_MASK(32)) {
  278. tbl = &iommu->tbl;
  279. iotsb_num = 0; /* we don't care for legacy iommu */
  280. } else {
  281. tbl = &atu->tbl;
  282. iotsb_num = atu->iotsb->iotsb_num;
  283. }
  284. entry = ((dvma - tbl->table_map_base) >> IO_PAGE_SHIFT);
  285. dma_4v_iommu_demap(dev, devhandle, dvma, iotsb_num, entry, npages);
  286. iommu_tbl_range_free(tbl, dvma, npages, IOMMU_ERROR_CODE);
  287. order = get_order(size);
  288. if (order < 10)
  289. free_pages((unsigned long)cpu, order);
  290. }
  291. static dma_addr_t dma_4v_map_page(struct device *dev, struct page *page,
  292. unsigned long offset, size_t sz,
  293. enum dma_data_direction direction,
  294. unsigned long attrs)
  295. {
  296. struct iommu *iommu;
  297. struct atu *atu;
  298. struct iommu_map_table *tbl;
  299. u64 mask;
  300. unsigned long flags, npages, oaddr;
  301. unsigned long i, base_paddr;
  302. unsigned long prot;
  303. dma_addr_t bus_addr, ret;
  304. long entry;
  305. iommu = dev->archdata.iommu;
  306. atu = iommu->atu;
  307. if (unlikely(direction == DMA_NONE))
  308. goto bad;
  309. oaddr = (unsigned long)(page_address(page) + offset);
  310. npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
  311. npages >>= IO_PAGE_SHIFT;
  312. mask = *dev->dma_mask;
  313. if (mask <= DMA_BIT_MASK(32))
  314. tbl = &iommu->tbl;
  315. else
  316. tbl = &atu->tbl;
  317. entry = iommu_tbl_range_alloc(dev, tbl, npages, NULL,
  318. (unsigned long)(-1), 0);
  319. if (unlikely(entry == IOMMU_ERROR_CODE))
  320. goto bad;
  321. bus_addr = (tbl->table_map_base + (entry << IO_PAGE_SHIFT));
  322. ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
  323. base_paddr = __pa(oaddr & IO_PAGE_MASK);
  324. prot = HV_PCI_MAP_ATTR_READ;
  325. if (direction != DMA_TO_DEVICE)
  326. prot |= HV_PCI_MAP_ATTR_WRITE;
  327. if (attrs & DMA_ATTR_WEAK_ORDERING)
  328. prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
  329. local_irq_save(flags);
  330. iommu_batch_start(dev, prot, entry);
  331. for (i = 0; i < npages; i++, base_paddr += IO_PAGE_SIZE) {
  332. long err = iommu_batch_add(base_paddr, mask);
  333. if (unlikely(err < 0L))
  334. goto iommu_map_fail;
  335. }
  336. if (unlikely(iommu_batch_end(mask) < 0L))
  337. goto iommu_map_fail;
  338. local_irq_restore(flags);
  339. return ret;
  340. bad:
  341. if (printk_ratelimit())
  342. WARN_ON(1);
  343. return DMA_ERROR_CODE;
  344. iommu_map_fail:
  345. iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
  346. return DMA_ERROR_CODE;
  347. }
  348. static void dma_4v_unmap_page(struct device *dev, dma_addr_t bus_addr,
  349. size_t sz, enum dma_data_direction direction,
  350. unsigned long attrs)
  351. {
  352. struct pci_pbm_info *pbm;
  353. struct iommu *iommu;
  354. struct atu *atu;
  355. struct iommu_map_table *tbl;
  356. unsigned long npages;
  357. unsigned long iotsb_num;
  358. long entry;
  359. u32 devhandle;
  360. if (unlikely(direction == DMA_NONE)) {
  361. if (printk_ratelimit())
  362. WARN_ON(1);
  363. return;
  364. }
  365. iommu = dev->archdata.iommu;
  366. pbm = dev->archdata.host_controller;
  367. atu = iommu->atu;
  368. devhandle = pbm->devhandle;
  369. npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
  370. npages >>= IO_PAGE_SHIFT;
  371. bus_addr &= IO_PAGE_MASK;
  372. if (bus_addr <= DMA_BIT_MASK(32)) {
  373. iotsb_num = 0; /* we don't care for legacy iommu */
  374. tbl = &iommu->tbl;
  375. } else {
  376. iotsb_num = atu->iotsb->iotsb_num;
  377. tbl = &atu->tbl;
  378. }
  379. entry = (bus_addr - tbl->table_map_base) >> IO_PAGE_SHIFT;
  380. dma_4v_iommu_demap(dev, devhandle, bus_addr, iotsb_num, entry, npages);
  381. iommu_tbl_range_free(tbl, bus_addr, npages, IOMMU_ERROR_CODE);
  382. }
  383. static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
  384. int nelems, enum dma_data_direction direction,
  385. unsigned long attrs)
  386. {
  387. struct scatterlist *s, *outs, *segstart;
  388. unsigned long flags, handle, prot;
  389. dma_addr_t dma_next = 0, dma_addr;
  390. unsigned int max_seg_size;
  391. unsigned long seg_boundary_size;
  392. int outcount, incount, i;
  393. struct iommu *iommu;
  394. struct atu *atu;
  395. struct iommu_map_table *tbl;
  396. u64 mask;
  397. unsigned long base_shift;
  398. long err;
  399. BUG_ON(direction == DMA_NONE);
  400. iommu = dev->archdata.iommu;
  401. atu = iommu->atu;
  402. if (nelems == 0 || !iommu)
  403. return 0;
  404. prot = HV_PCI_MAP_ATTR_READ;
  405. if (direction != DMA_TO_DEVICE)
  406. prot |= HV_PCI_MAP_ATTR_WRITE;
  407. if (attrs & DMA_ATTR_WEAK_ORDERING)
  408. prot |= HV_PCI_MAP_ATTR_RELAXED_ORDER;
  409. outs = s = segstart = &sglist[0];
  410. outcount = 1;
  411. incount = nelems;
  412. handle = 0;
  413. /* Init first segment length for backout at failure */
  414. outs->dma_length = 0;
  415. local_irq_save(flags);
  416. iommu_batch_start(dev, prot, ~0UL);
  417. max_seg_size = dma_get_max_seg_size(dev);
  418. seg_boundary_size = ALIGN(dma_get_seg_boundary(dev) + 1,
  419. IO_PAGE_SIZE) >> IO_PAGE_SHIFT;
  420. mask = *dev->dma_mask;
  421. if (mask <= DMA_BIT_MASK(32))
  422. tbl = &iommu->tbl;
  423. else
  424. tbl = &atu->tbl;
  425. base_shift = tbl->table_map_base >> IO_PAGE_SHIFT;
  426. for_each_sg(sglist, s, nelems, i) {
  427. unsigned long paddr, npages, entry, out_entry = 0, slen;
  428. slen = s->length;
  429. /* Sanity check */
  430. if (slen == 0) {
  431. dma_next = 0;
  432. continue;
  433. }
  434. /* Allocate iommu entries for that segment */
  435. paddr = (unsigned long) SG_ENT_PHYS_ADDRESS(s);
  436. npages = iommu_num_pages(paddr, slen, IO_PAGE_SIZE);
  437. entry = iommu_tbl_range_alloc(dev, tbl, npages,
  438. &handle, (unsigned long)(-1), 0);
  439. /* Handle failure */
  440. if (unlikely(entry == IOMMU_ERROR_CODE)) {
  441. pr_err_ratelimited("iommu_alloc failed, iommu %p paddr %lx npages %lx\n",
  442. tbl, paddr, npages);
  443. goto iommu_map_failed;
  444. }
  445. iommu_batch_new_entry(entry, mask);
  446. /* Convert entry to a dma_addr_t */
  447. dma_addr = tbl->table_map_base + (entry << IO_PAGE_SHIFT);
  448. dma_addr |= (s->offset & ~IO_PAGE_MASK);
  449. /* Insert into HW table */
  450. paddr &= IO_PAGE_MASK;
  451. while (npages--) {
  452. err = iommu_batch_add(paddr, mask);
  453. if (unlikely(err < 0L))
  454. goto iommu_map_failed;
  455. paddr += IO_PAGE_SIZE;
  456. }
  457. /* If we are in an open segment, try merging */
  458. if (segstart != s) {
  459. /* We cannot merge if:
  460. * - allocated dma_addr isn't contiguous to previous allocation
  461. */
  462. if ((dma_addr != dma_next) ||
  463. (outs->dma_length + s->length > max_seg_size) ||
  464. (is_span_boundary(out_entry, base_shift,
  465. seg_boundary_size, outs, s))) {
  466. /* Can't merge: create a new segment */
  467. segstart = s;
  468. outcount++;
  469. outs = sg_next(outs);
  470. } else {
  471. outs->dma_length += s->length;
  472. }
  473. }
  474. if (segstart == s) {
  475. /* This is a new segment, fill entries */
  476. outs->dma_address = dma_addr;
  477. outs->dma_length = slen;
  478. out_entry = entry;
  479. }
  480. /* Calculate next page pointer for contiguous check */
  481. dma_next = dma_addr + slen;
  482. }
  483. err = iommu_batch_end(mask);
  484. if (unlikely(err < 0L))
  485. goto iommu_map_failed;
  486. local_irq_restore(flags);
  487. if (outcount < incount) {
  488. outs = sg_next(outs);
  489. outs->dma_address = DMA_ERROR_CODE;
  490. outs->dma_length = 0;
  491. }
  492. return outcount;
  493. iommu_map_failed:
  494. for_each_sg(sglist, s, nelems, i) {
  495. if (s->dma_length != 0) {
  496. unsigned long vaddr, npages;
  497. vaddr = s->dma_address & IO_PAGE_MASK;
  498. npages = iommu_num_pages(s->dma_address, s->dma_length,
  499. IO_PAGE_SIZE);
  500. iommu_tbl_range_free(tbl, vaddr, npages,
  501. IOMMU_ERROR_CODE);
  502. /* XXX demap? XXX */
  503. s->dma_address = DMA_ERROR_CODE;
  504. s->dma_length = 0;
  505. }
  506. if (s == outs)
  507. break;
  508. }
  509. local_irq_restore(flags);
  510. return 0;
  511. }
  512. static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
  513. int nelems, enum dma_data_direction direction,
  514. unsigned long attrs)
  515. {
  516. struct pci_pbm_info *pbm;
  517. struct scatterlist *sg;
  518. struct iommu *iommu;
  519. struct atu *atu;
  520. unsigned long flags, entry;
  521. unsigned long iotsb_num;
  522. u32 devhandle;
  523. BUG_ON(direction == DMA_NONE);
  524. iommu = dev->archdata.iommu;
  525. pbm = dev->archdata.host_controller;
  526. atu = iommu->atu;
  527. devhandle = pbm->devhandle;
  528. local_irq_save(flags);
  529. sg = sglist;
  530. while (nelems--) {
  531. dma_addr_t dma_handle = sg->dma_address;
  532. unsigned int len = sg->dma_length;
  533. unsigned long npages;
  534. struct iommu_map_table *tbl;
  535. unsigned long shift = IO_PAGE_SHIFT;
  536. if (!len)
  537. break;
  538. npages = iommu_num_pages(dma_handle, len, IO_PAGE_SIZE);
  539. if (dma_handle <= DMA_BIT_MASK(32)) {
  540. iotsb_num = 0; /* we don't care for legacy iommu */
  541. tbl = &iommu->tbl;
  542. } else {
  543. iotsb_num = atu->iotsb->iotsb_num;
  544. tbl = &atu->tbl;
  545. }
  546. entry = ((dma_handle - tbl->table_map_base) >> shift);
  547. dma_4v_iommu_demap(dev, devhandle, dma_handle, iotsb_num,
  548. entry, npages);
  549. iommu_tbl_range_free(tbl, dma_handle, npages,
  550. IOMMU_ERROR_CODE);
  551. sg = sg_next(sg);
  552. }
  553. local_irq_restore(flags);
  554. }
  555. static struct dma_map_ops sun4v_dma_ops = {
  556. .alloc = dma_4v_alloc_coherent,
  557. .free = dma_4v_free_coherent,
  558. .map_page = dma_4v_map_page,
  559. .unmap_page = dma_4v_unmap_page,
  560. .map_sg = dma_4v_map_sg,
  561. .unmap_sg = dma_4v_unmap_sg,
  562. };
  563. static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent)
  564. {
  565. struct property *prop;
  566. struct device_node *dp;
  567. dp = pbm->op->dev.of_node;
  568. prop = of_find_property(dp, "66mhz-capable", NULL);
  569. pbm->is_66mhz_capable = (prop != NULL);
  570. pbm->pci_bus = pci_scan_one_pbm(pbm, parent);
  571. /* XXX register error interrupt handlers XXX */
  572. }
  573. static unsigned long probe_existing_entries(struct pci_pbm_info *pbm,
  574. struct iommu_map_table *iommu)
  575. {
  576. struct iommu_pool *pool;
  577. unsigned long i, pool_nr, cnt = 0;
  578. u32 devhandle;
  579. devhandle = pbm->devhandle;
  580. for (pool_nr = 0; pool_nr < iommu->nr_pools; pool_nr++) {
  581. pool = &(iommu->pools[pool_nr]);
  582. for (i = pool->start; i <= pool->end; i++) {
  583. unsigned long ret, io_attrs, ra;
  584. ret = pci_sun4v_iommu_getmap(devhandle,
  585. HV_PCI_TSBID(0, i),
  586. &io_attrs, &ra);
  587. if (ret == HV_EOK) {
  588. if (page_in_phys_avail(ra)) {
  589. pci_sun4v_iommu_demap(devhandle,
  590. HV_PCI_TSBID(0,
  591. i), 1);
  592. } else {
  593. cnt++;
  594. __set_bit(i, iommu->map);
  595. }
  596. }
  597. }
  598. }
  599. return cnt;
  600. }
  601. static int pci_sun4v_atu_alloc_iotsb(struct pci_pbm_info *pbm)
  602. {
  603. struct atu *atu = pbm->iommu->atu;
  604. struct atu_iotsb *iotsb;
  605. void *table;
  606. u64 table_size;
  607. u64 iotsb_num;
  608. unsigned long order;
  609. unsigned long err;
  610. iotsb = kzalloc(sizeof(*iotsb), GFP_KERNEL);
  611. if (!iotsb) {
  612. err = -ENOMEM;
  613. goto out_err;
  614. }
  615. atu->iotsb = iotsb;
  616. /* calculate size of IOTSB */
  617. table_size = (atu->size / IO_PAGE_SIZE) * 8;
  618. order = get_order(table_size);
  619. table = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
  620. if (!table) {
  621. err = -ENOMEM;
  622. goto table_failed;
  623. }
  624. iotsb->table = table;
  625. iotsb->ra = __pa(table);
  626. iotsb->dvma_size = atu->size;
  627. iotsb->dvma_base = atu->base;
  628. iotsb->table_size = table_size;
  629. iotsb->page_size = IO_PAGE_SIZE;
  630. /* configure and register IOTSB with HV */
  631. err = pci_sun4v_iotsb_conf(pbm->devhandle,
  632. iotsb->ra,
  633. iotsb->table_size,
  634. iotsb->page_size,
  635. iotsb->dvma_base,
  636. &iotsb_num);
  637. if (err) {
  638. pr_err(PFX "pci_iotsb_conf failed error: %ld\n", err);
  639. goto iotsb_conf_failed;
  640. }
  641. iotsb->iotsb_num = iotsb_num;
  642. err = dma_4v_iotsb_bind(pbm->devhandle, iotsb_num, pbm->pci_bus);
  643. if (err) {
  644. pr_err(PFX "pci_iotsb_bind failed error: %ld\n", err);
  645. goto iotsb_conf_failed;
  646. }
  647. return 0;
  648. iotsb_conf_failed:
  649. free_pages((unsigned long)table, order);
  650. table_failed:
  651. kfree(iotsb);
  652. out_err:
  653. return err;
  654. }
  655. static int pci_sun4v_atu_init(struct pci_pbm_info *pbm)
  656. {
  657. struct atu *atu = pbm->iommu->atu;
  658. unsigned long err;
  659. const u64 *ranges;
  660. u64 map_size, num_iotte;
  661. u64 dma_mask;
  662. const u32 *page_size;
  663. int len;
  664. ranges = of_get_property(pbm->op->dev.of_node, "iommu-address-ranges",
  665. &len);
  666. if (!ranges) {
  667. pr_err(PFX "No iommu-address-ranges\n");
  668. return -EINVAL;
  669. }
  670. page_size = of_get_property(pbm->op->dev.of_node, "iommu-pagesizes",
  671. NULL);
  672. if (!page_size) {
  673. pr_err(PFX "No iommu-pagesizes\n");
  674. return -EINVAL;
  675. }
  676. /* There are 4 iommu-address-ranges supported. Each range is pair of
  677. * {base, size}. The ranges[0] and ranges[1] are 32bit address space
  678. * while ranges[2] and ranges[3] are 64bit space. We want to use 64bit
  679. * address ranges to support 64bit addressing. Because 'size' for
  680. * address ranges[2] and ranges[3] are same we can select either of
  681. * ranges[2] or ranges[3] for mapping. However due to 'size' is too
  682. * large for OS to allocate IOTSB we are using fix size 32G
  683. * (ATU_64_SPACE_SIZE) which is more than enough for all PCIe devices
  684. * to share.
  685. */
  686. atu->ranges = (struct atu_ranges *)ranges;
  687. atu->base = atu->ranges[3].base;
  688. atu->size = ATU_64_SPACE_SIZE;
  689. /* Create IOTSB */
  690. err = pci_sun4v_atu_alloc_iotsb(pbm);
  691. if (err) {
  692. pr_err(PFX "Error creating ATU IOTSB\n");
  693. return err;
  694. }
  695. /* Create ATU iommu map.
  696. * One bit represents one iotte in IOTSB table.
  697. */
  698. dma_mask = (roundup_pow_of_two(atu->size) - 1UL);
  699. num_iotte = atu->size / IO_PAGE_SIZE;
  700. map_size = num_iotte / 8;
  701. atu->tbl.table_map_base = atu->base;
  702. atu->dma_addr_mask = dma_mask;
  703. atu->tbl.map = kzalloc(map_size, GFP_KERNEL);
  704. if (!atu->tbl.map)
  705. return -ENOMEM;
  706. iommu_tbl_pool_init(&atu->tbl, num_iotte, IO_PAGE_SHIFT,
  707. NULL, false /* no large_pool */,
  708. 0 /* default npools */,
  709. false /* want span boundary checking */);
  710. return 0;
  711. }
  712. static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm)
  713. {
  714. static const u32 vdma_default[] = { 0x80000000, 0x80000000 };
  715. struct iommu *iommu = pbm->iommu;
  716. unsigned long num_tsb_entries, sz;
  717. u32 dma_mask, dma_offset;
  718. const u32 *vdma;
  719. vdma = of_get_property(pbm->op->dev.of_node, "virtual-dma", NULL);
  720. if (!vdma)
  721. vdma = vdma_default;
  722. if ((vdma[0] | vdma[1]) & ~IO_PAGE_MASK) {
  723. printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n",
  724. vdma[0], vdma[1]);
  725. return -EINVAL;
  726. }
  727. dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL);
  728. num_tsb_entries = vdma[1] / IO_PAGE_SIZE;
  729. dma_offset = vdma[0];
  730. /* Setup initial software IOMMU state. */
  731. spin_lock_init(&iommu->lock);
  732. iommu->ctx_lowest_free = 1;
  733. iommu->tbl.table_map_base = dma_offset;
  734. iommu->dma_addr_mask = dma_mask;
  735. /* Allocate and initialize the free area map. */
  736. sz = (num_tsb_entries + 7) / 8;
  737. sz = (sz + 7UL) & ~7UL;
  738. iommu->tbl.map = kzalloc(sz, GFP_KERNEL);
  739. if (!iommu->tbl.map) {
  740. printk(KERN_ERR PFX "Error, kmalloc(arena.map) failed.\n");
  741. return -ENOMEM;
  742. }
  743. iommu_tbl_pool_init(&iommu->tbl, num_tsb_entries, IO_PAGE_SHIFT,
  744. NULL, false /* no large_pool */,
  745. 0 /* default npools */,
  746. false /* want span boundary checking */);
  747. sz = probe_existing_entries(pbm, &iommu->tbl);
  748. if (sz)
  749. printk("%s: Imported %lu TSB entries from OBP\n",
  750. pbm->name, sz);
  751. return 0;
  752. }
  753. #ifdef CONFIG_PCI_MSI
  754. struct pci_sun4v_msiq_entry {
  755. u64 version_type;
  756. #define MSIQ_VERSION_MASK 0xffffffff00000000UL
  757. #define MSIQ_VERSION_SHIFT 32
  758. #define MSIQ_TYPE_MASK 0x00000000000000ffUL
  759. #define MSIQ_TYPE_SHIFT 0
  760. #define MSIQ_TYPE_NONE 0x00
  761. #define MSIQ_TYPE_MSG 0x01
  762. #define MSIQ_TYPE_MSI32 0x02
  763. #define MSIQ_TYPE_MSI64 0x03
  764. #define MSIQ_TYPE_INTX 0x08
  765. #define MSIQ_TYPE_NONE2 0xff
  766. u64 intx_sysino;
  767. u64 reserved1;
  768. u64 stick;
  769. u64 req_id; /* bus/device/func */
  770. #define MSIQ_REQID_BUS_MASK 0xff00UL
  771. #define MSIQ_REQID_BUS_SHIFT 8
  772. #define MSIQ_REQID_DEVICE_MASK 0x00f8UL
  773. #define MSIQ_REQID_DEVICE_SHIFT 3
  774. #define MSIQ_REQID_FUNC_MASK 0x0007UL
  775. #define MSIQ_REQID_FUNC_SHIFT 0
  776. u64 msi_address;
  777. /* The format of this value is message type dependent.
  778. * For MSI bits 15:0 are the data from the MSI packet.
  779. * For MSI-X bits 31:0 are the data from the MSI packet.
  780. * For MSG, the message code and message routing code where:
  781. * bits 39:32 is the bus/device/fn of the msg target-id
  782. * bits 18:16 is the message routing code
  783. * bits 7:0 is the message code
  784. * For INTx the low order 2-bits are:
  785. * 00 - INTA
  786. * 01 - INTB
  787. * 10 - INTC
  788. * 11 - INTD
  789. */
  790. u64 msi_data;
  791. u64 reserved2;
  792. };
  793. static int pci_sun4v_get_head(struct pci_pbm_info *pbm, unsigned long msiqid,
  794. unsigned long *head)
  795. {
  796. unsigned long err, limit;
  797. err = pci_sun4v_msiq_gethead(pbm->devhandle, msiqid, head);
  798. if (unlikely(err))
  799. return -ENXIO;
  800. limit = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  801. if (unlikely(*head >= limit))
  802. return -EFBIG;
  803. return 0;
  804. }
  805. static int pci_sun4v_dequeue_msi(struct pci_pbm_info *pbm,
  806. unsigned long msiqid, unsigned long *head,
  807. unsigned long *msi)
  808. {
  809. struct pci_sun4v_msiq_entry *ep;
  810. unsigned long err, type;
  811. /* Note: void pointer arithmetic, 'head' is a byte offset */
  812. ep = (pbm->msi_queues + ((msiqid - pbm->msiq_first) *
  813. (pbm->msiq_ent_count *
  814. sizeof(struct pci_sun4v_msiq_entry))) +
  815. *head);
  816. if ((ep->version_type & MSIQ_TYPE_MASK) == 0)
  817. return 0;
  818. type = (ep->version_type & MSIQ_TYPE_MASK) >> MSIQ_TYPE_SHIFT;
  819. if (unlikely(type != MSIQ_TYPE_MSI32 &&
  820. type != MSIQ_TYPE_MSI64))
  821. return -EINVAL;
  822. *msi = ep->msi_data;
  823. err = pci_sun4v_msi_setstate(pbm->devhandle,
  824. ep->msi_data /* msi_num */,
  825. HV_MSISTATE_IDLE);
  826. if (unlikely(err))
  827. return -ENXIO;
  828. /* Clear the entry. */
  829. ep->version_type &= ~MSIQ_TYPE_MASK;
  830. (*head) += sizeof(struct pci_sun4v_msiq_entry);
  831. if (*head >=
  832. (pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry)))
  833. *head = 0;
  834. return 1;
  835. }
  836. static int pci_sun4v_set_head(struct pci_pbm_info *pbm, unsigned long msiqid,
  837. unsigned long head)
  838. {
  839. unsigned long err;
  840. err = pci_sun4v_msiq_sethead(pbm->devhandle, msiqid, head);
  841. if (unlikely(err))
  842. return -EINVAL;
  843. return 0;
  844. }
  845. static int pci_sun4v_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid,
  846. unsigned long msi, int is_msi64)
  847. {
  848. if (pci_sun4v_msi_setmsiq(pbm->devhandle, msi, msiqid,
  849. (is_msi64 ?
  850. HV_MSITYPE_MSI64 : HV_MSITYPE_MSI32)))
  851. return -ENXIO;
  852. if (pci_sun4v_msi_setstate(pbm->devhandle, msi, HV_MSISTATE_IDLE))
  853. return -ENXIO;
  854. if (pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_VALID))
  855. return -ENXIO;
  856. return 0;
  857. }
  858. static int pci_sun4v_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi)
  859. {
  860. unsigned long err, msiqid;
  861. err = pci_sun4v_msi_getmsiq(pbm->devhandle, msi, &msiqid);
  862. if (err)
  863. return -ENXIO;
  864. pci_sun4v_msi_setvalid(pbm->devhandle, msi, HV_MSIVALID_INVALID);
  865. return 0;
  866. }
  867. static int pci_sun4v_msiq_alloc(struct pci_pbm_info *pbm)
  868. {
  869. unsigned long q_size, alloc_size, pages, order;
  870. int i;
  871. q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  872. alloc_size = (pbm->msiq_num * q_size);
  873. order = get_order(alloc_size);
  874. pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order);
  875. if (pages == 0UL) {
  876. printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n",
  877. order);
  878. return -ENOMEM;
  879. }
  880. memset((char *)pages, 0, PAGE_SIZE << order);
  881. pbm->msi_queues = (void *) pages;
  882. for (i = 0; i < pbm->msiq_num; i++) {
  883. unsigned long err, base = __pa(pages + (i * q_size));
  884. unsigned long ret1, ret2;
  885. err = pci_sun4v_msiq_conf(pbm->devhandle,
  886. pbm->msiq_first + i,
  887. base, pbm->msiq_ent_count);
  888. if (err) {
  889. printk(KERN_ERR "MSI: msiq register fails (err=%lu)\n",
  890. err);
  891. goto h_error;
  892. }
  893. err = pci_sun4v_msiq_info(pbm->devhandle,
  894. pbm->msiq_first + i,
  895. &ret1, &ret2);
  896. if (err) {
  897. printk(KERN_ERR "MSI: Cannot read msiq (err=%lu)\n",
  898. err);
  899. goto h_error;
  900. }
  901. if (ret1 != base || ret2 != pbm->msiq_ent_count) {
  902. printk(KERN_ERR "MSI: Bogus qconf "
  903. "expected[%lx:%x] got[%lx:%lx]\n",
  904. base, pbm->msiq_ent_count,
  905. ret1, ret2);
  906. goto h_error;
  907. }
  908. }
  909. return 0;
  910. h_error:
  911. free_pages(pages, order);
  912. return -EINVAL;
  913. }
  914. static void pci_sun4v_msiq_free(struct pci_pbm_info *pbm)
  915. {
  916. unsigned long q_size, alloc_size, pages, order;
  917. int i;
  918. for (i = 0; i < pbm->msiq_num; i++) {
  919. unsigned long msiqid = pbm->msiq_first + i;
  920. (void) pci_sun4v_msiq_conf(pbm->devhandle, msiqid, 0UL, 0);
  921. }
  922. q_size = pbm->msiq_ent_count * sizeof(struct pci_sun4v_msiq_entry);
  923. alloc_size = (pbm->msiq_num * q_size);
  924. order = get_order(alloc_size);
  925. pages = (unsigned long) pbm->msi_queues;
  926. free_pages(pages, order);
  927. pbm->msi_queues = NULL;
  928. }
  929. static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm,
  930. unsigned long msiqid,
  931. unsigned long devino)
  932. {
  933. unsigned int irq = sun4v_build_irq(pbm->devhandle, devino);
  934. if (!irq)
  935. return -ENOMEM;
  936. if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID))
  937. return -EINVAL;
  938. if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE))
  939. return -EINVAL;
  940. return irq;
  941. }
  942. static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = {
  943. .get_head = pci_sun4v_get_head,
  944. .dequeue_msi = pci_sun4v_dequeue_msi,
  945. .set_head = pci_sun4v_set_head,
  946. .msi_setup = pci_sun4v_msi_setup,
  947. .msi_teardown = pci_sun4v_msi_teardown,
  948. .msiq_alloc = pci_sun4v_msiq_alloc,
  949. .msiq_free = pci_sun4v_msiq_free,
  950. .msiq_build_irq = pci_sun4v_msiq_build_irq,
  951. };
  952. static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
  953. {
  954. sparc64_pbm_msi_init(pbm, &pci_sun4v_msiq_ops);
  955. }
  956. #else /* CONFIG_PCI_MSI */
  957. static void pci_sun4v_msi_init(struct pci_pbm_info *pbm)
  958. {
  959. }
  960. #endif /* !(CONFIG_PCI_MSI) */
  961. static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm,
  962. struct platform_device *op, u32 devhandle)
  963. {
  964. struct device_node *dp = op->dev.of_node;
  965. int err;
  966. pbm->numa_node = of_node_to_nid(dp);
  967. pbm->pci_ops = &sun4v_pci_ops;
  968. pbm->config_space_reg_bits = 12;
  969. pbm->index = pci_num_pbms++;
  970. pbm->op = op;
  971. pbm->devhandle = devhandle;
  972. pbm->name = dp->full_name;
  973. printk("%s: SUN4V PCI Bus Module\n", pbm->name);
  974. printk("%s: On NUMA node %d\n", pbm->name, pbm->numa_node);
  975. pci_determine_mem_io_space(pbm);
  976. pci_get_pbm_props(pbm);
  977. err = pci_sun4v_iommu_init(pbm);
  978. if (err)
  979. return err;
  980. pci_sun4v_msi_init(pbm);
  981. pci_sun4v_scan_bus(pbm, &op->dev);
  982. /* if atu_init fails its not complete failure.
  983. * we can still continue using legacy iommu.
  984. */
  985. if (pbm->iommu->atu) {
  986. err = pci_sun4v_atu_init(pbm);
  987. if (err) {
  988. kfree(pbm->iommu->atu);
  989. pbm->iommu->atu = NULL;
  990. pr_err(PFX "ATU init failed, err=%d\n", err);
  991. }
  992. }
  993. pbm->next = pci_pbm_root;
  994. pci_pbm_root = pbm;
  995. return 0;
  996. }
  997. static int pci_sun4v_probe(struct platform_device *op)
  998. {
  999. const struct linux_prom64_registers *regs;
  1000. static int hvapi_negotiated = 0;
  1001. struct pci_pbm_info *pbm;
  1002. struct device_node *dp;
  1003. struct iommu *iommu;
  1004. struct atu *atu;
  1005. u32 devhandle;
  1006. int i, err = -ENODEV;
  1007. static bool hv_atu = true;
  1008. dp = op->dev.of_node;
  1009. if (!hvapi_negotiated++) {
  1010. for (i = 0; i < ARRAY_SIZE(vpci_versions); i++) {
  1011. vpci_major = vpci_versions[i].major;
  1012. vpci_minor = vpci_versions[i].minor;
  1013. err = sun4v_hvapi_register(HV_GRP_PCI, vpci_major,
  1014. &vpci_minor);
  1015. if (!err)
  1016. break;
  1017. }
  1018. if (err) {
  1019. pr_err(PFX "Could not register hvapi, err=%d\n", err);
  1020. return err;
  1021. }
  1022. pr_info(PFX "Registered hvapi major[%lu] minor[%lu]\n",
  1023. vpci_major, vpci_minor);
  1024. err = sun4v_hvapi_register(HV_GRP_ATU, vatu_major, &vatu_minor);
  1025. if (err) {
  1026. /* don't return an error if we fail to register the
  1027. * ATU group, but ATU hcalls won't be available.
  1028. */
  1029. hv_atu = false;
  1030. } else {
  1031. pr_info(PFX "Registered hvapi ATU major[%lu] minor[%lu]\n",
  1032. vatu_major, vatu_minor);
  1033. }
  1034. dma_ops = &sun4v_dma_ops;
  1035. }
  1036. regs = of_get_property(dp, "reg", NULL);
  1037. err = -ENODEV;
  1038. if (!regs) {
  1039. printk(KERN_ERR PFX "Could not find config registers\n");
  1040. goto out_err;
  1041. }
  1042. devhandle = (regs->phys_addr >> 32UL) & 0x0fffffff;
  1043. err = -ENOMEM;
  1044. if (!iommu_batch_initialized) {
  1045. for_each_possible_cpu(i) {
  1046. unsigned long page = get_zeroed_page(GFP_KERNEL);
  1047. if (!page)
  1048. goto out_err;
  1049. per_cpu(iommu_batch, i).pglist = (u64 *) page;
  1050. }
  1051. iommu_batch_initialized = 1;
  1052. }
  1053. pbm = kzalloc(sizeof(*pbm), GFP_KERNEL);
  1054. if (!pbm) {
  1055. printk(KERN_ERR PFX "Could not allocate pci_pbm_info\n");
  1056. goto out_err;
  1057. }
  1058. iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL);
  1059. if (!iommu) {
  1060. printk(KERN_ERR PFX "Could not allocate pbm iommu\n");
  1061. goto out_free_controller;
  1062. }
  1063. pbm->iommu = iommu;
  1064. iommu->atu = NULL;
  1065. if (hv_atu) {
  1066. atu = kzalloc(sizeof(*atu), GFP_KERNEL);
  1067. if (!atu)
  1068. pr_err(PFX "Could not allocate atu\n");
  1069. else
  1070. iommu->atu = atu;
  1071. }
  1072. err = pci_sun4v_pbm_init(pbm, op, devhandle);
  1073. if (err)
  1074. goto out_free_iommu;
  1075. dev_set_drvdata(&op->dev, pbm);
  1076. return 0;
  1077. out_free_iommu:
  1078. kfree(iommu->atu);
  1079. kfree(pbm->iommu);
  1080. out_free_controller:
  1081. kfree(pbm);
  1082. out_err:
  1083. return err;
  1084. }
  1085. static const struct of_device_id pci_sun4v_match[] = {
  1086. {
  1087. .name = "pci",
  1088. .compatible = "SUNW,sun4v-pci",
  1089. },
  1090. {},
  1091. };
  1092. static struct platform_driver pci_sun4v_driver = {
  1093. .driver = {
  1094. .name = DRIVER_NAME,
  1095. .of_match_table = pci_sun4v_match,
  1096. },
  1097. .probe = pci_sun4v_probe,
  1098. };
  1099. static int __init pci_sun4v_init(void)
  1100. {
  1101. return platform_driver_register(&pci_sun4v_driver);
  1102. }
  1103. subsys_initcall(pci_sun4v_init);