vme.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655
  1. /*
  2. * VME Bridge Framework
  3. *
  4. * Author: Martyn Welch <martyn.welch@ge.com>
  5. * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
  6. *
  7. * Based on work by Tom Armistead and Ajit Prem
  8. * Copyright 2004 Motorola Inc.
  9. *
  10. * This program is free software; you can redistribute it and/or modify it
  11. * under the terms of the GNU General Public License as published by the
  12. * Free Software Foundation; either version 2 of the License, or (at your
  13. * option) any later version.
  14. */
  15. #include <linux/init.h>
  16. #include <linux/export.h>
  17. #include <linux/mm.h>
  18. #include <linux/types.h>
  19. #include <linux/kernel.h>
  20. #include <linux/errno.h>
  21. #include <linux/pci.h>
  22. #include <linux/poll.h>
  23. #include <linux/highmem.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/pagemap.h>
  26. #include <linux/device.h>
  27. #include <linux/dma-mapping.h>
  28. #include <linux/syscalls.h>
  29. #include <linux/mutex.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/slab.h>
  32. #include <linux/vme.h>
  33. #include "vme_bridge.h"
  34. /* Bitmask and list of registered buses both protected by common mutex */
  35. static unsigned int vme_bus_numbers;
  36. static LIST_HEAD(vme_bus_list);
  37. static DEFINE_MUTEX(vme_buses_lock);
  38. static int __init vme_init(void);
  39. static struct vme_dev *dev_to_vme_dev(struct device *dev)
  40. {
  41. return container_of(dev, struct vme_dev, dev);
  42. }
  43. /*
  44. * Find the bridge that the resource is associated with.
  45. */
  46. static struct vme_bridge *find_bridge(struct vme_resource *resource)
  47. {
  48. /* Get list to search */
  49. switch (resource->type) {
  50. case VME_MASTER:
  51. return list_entry(resource->entry, struct vme_master_resource,
  52. list)->parent;
  53. break;
  54. case VME_SLAVE:
  55. return list_entry(resource->entry, struct vme_slave_resource,
  56. list)->parent;
  57. break;
  58. case VME_DMA:
  59. return list_entry(resource->entry, struct vme_dma_resource,
  60. list)->parent;
  61. break;
  62. case VME_LM:
  63. return list_entry(resource->entry, struct vme_lm_resource,
  64. list)->parent;
  65. break;
  66. default:
  67. printk(KERN_ERR "Unknown resource type\n");
  68. return NULL;
  69. break;
  70. }
  71. }
  72. /*
  73. * Allocate a contiguous block of memory for use by the driver. This is used to
  74. * create the buffers for the slave windows.
  75. */
  76. void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  77. dma_addr_t *dma)
  78. {
  79. struct vme_bridge *bridge;
  80. if (resource == NULL) {
  81. printk(KERN_ERR "No resource\n");
  82. return NULL;
  83. }
  84. bridge = find_bridge(resource);
  85. if (bridge == NULL) {
  86. printk(KERN_ERR "Can't find bridge\n");
  87. return NULL;
  88. }
  89. if (bridge->parent == NULL) {
  90. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  91. return NULL;
  92. }
  93. if (bridge->alloc_consistent == NULL) {
  94. printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
  95. bridge->name);
  96. return NULL;
  97. }
  98. return bridge->alloc_consistent(bridge->parent, size, dma);
  99. }
  100. EXPORT_SYMBOL(vme_alloc_consistent);
  101. /*
  102. * Free previously allocated contiguous block of memory.
  103. */
  104. void vme_free_consistent(struct vme_resource *resource, size_t size,
  105. void *vaddr, dma_addr_t dma)
  106. {
  107. struct vme_bridge *bridge;
  108. if (resource == NULL) {
  109. printk(KERN_ERR "No resource\n");
  110. return;
  111. }
  112. bridge = find_bridge(resource);
  113. if (bridge == NULL) {
  114. printk(KERN_ERR "Can't find bridge\n");
  115. return;
  116. }
  117. if (bridge->parent == NULL) {
  118. printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
  119. return;
  120. }
  121. if (bridge->free_consistent == NULL) {
  122. printk(KERN_ERR "free_consistent not supported by bridge %s\n",
  123. bridge->name);
  124. return;
  125. }
  126. bridge->free_consistent(bridge->parent, size, vaddr, dma);
  127. }
  128. EXPORT_SYMBOL(vme_free_consistent);
  129. size_t vme_get_size(struct vme_resource *resource)
  130. {
  131. int enabled, retval;
  132. unsigned long long base, size;
  133. dma_addr_t buf_base;
  134. u32 aspace, cycle, dwidth;
  135. switch (resource->type) {
  136. case VME_MASTER:
  137. retval = vme_master_get(resource, &enabled, &base, &size,
  138. &aspace, &cycle, &dwidth);
  139. if (retval)
  140. return 0;
  141. return size;
  142. break;
  143. case VME_SLAVE:
  144. retval = vme_slave_get(resource, &enabled, &base, &size,
  145. &buf_base, &aspace, &cycle);
  146. if (retval)
  147. return 0;
  148. return size;
  149. break;
  150. case VME_DMA:
  151. return 0;
  152. break;
  153. default:
  154. printk(KERN_ERR "Unknown resource type\n");
  155. return 0;
  156. break;
  157. }
  158. }
  159. EXPORT_SYMBOL(vme_get_size);
  160. int vme_check_window(u32 aspace, unsigned long long vme_base,
  161. unsigned long long size)
  162. {
  163. int retval = 0;
  164. switch (aspace) {
  165. case VME_A16:
  166. if (((vme_base + size) > VME_A16_MAX) ||
  167. (vme_base > VME_A16_MAX))
  168. retval = -EFAULT;
  169. break;
  170. case VME_A24:
  171. if (((vme_base + size) > VME_A24_MAX) ||
  172. (vme_base > VME_A24_MAX))
  173. retval = -EFAULT;
  174. break;
  175. case VME_A32:
  176. if (((vme_base + size) > VME_A32_MAX) ||
  177. (vme_base > VME_A32_MAX))
  178. retval = -EFAULT;
  179. break;
  180. case VME_A64:
  181. if ((size != 0) && (vme_base > U64_MAX + 1 - size))
  182. retval = -EFAULT;
  183. break;
  184. case VME_CRCSR:
  185. if (((vme_base + size) > VME_CRCSR_MAX) ||
  186. (vme_base > VME_CRCSR_MAX))
  187. retval = -EFAULT;
  188. break;
  189. case VME_USER1:
  190. case VME_USER2:
  191. case VME_USER3:
  192. case VME_USER4:
  193. /* User Defined */
  194. break;
  195. default:
  196. printk(KERN_ERR "Invalid address space\n");
  197. retval = -EINVAL;
  198. break;
  199. }
  200. return retval;
  201. }
  202. EXPORT_SYMBOL(vme_check_window);
  203. static u32 vme_get_aspace(int am)
  204. {
  205. switch (am) {
  206. case 0x29:
  207. case 0x2D:
  208. return VME_A16;
  209. case 0x38:
  210. case 0x39:
  211. case 0x3A:
  212. case 0x3B:
  213. case 0x3C:
  214. case 0x3D:
  215. case 0x3E:
  216. case 0x3F:
  217. return VME_A24;
  218. case 0x8:
  219. case 0x9:
  220. case 0xA:
  221. case 0xB:
  222. case 0xC:
  223. case 0xD:
  224. case 0xE:
  225. case 0xF:
  226. return VME_A32;
  227. case 0x0:
  228. case 0x1:
  229. case 0x3:
  230. return VME_A64;
  231. }
  232. return 0;
  233. }
  234. /*
  235. * Request a slave image with specific attributes, return some unique
  236. * identifier.
  237. */
  238. struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
  239. u32 cycle)
  240. {
  241. struct vme_bridge *bridge;
  242. struct list_head *slave_pos = NULL;
  243. struct vme_slave_resource *allocated_image = NULL;
  244. struct vme_slave_resource *slave_image = NULL;
  245. struct vme_resource *resource = NULL;
  246. bridge = vdev->bridge;
  247. if (bridge == NULL) {
  248. printk(KERN_ERR "Can't find VME bus\n");
  249. goto err_bus;
  250. }
  251. /* Loop through slave resources */
  252. list_for_each(slave_pos, &bridge->slave_resources) {
  253. slave_image = list_entry(slave_pos,
  254. struct vme_slave_resource, list);
  255. if (slave_image == NULL) {
  256. printk(KERN_ERR "Registered NULL Slave resource\n");
  257. continue;
  258. }
  259. /* Find an unlocked and compatible image */
  260. mutex_lock(&slave_image->mtx);
  261. if (((slave_image->address_attr & address) == address) &&
  262. ((slave_image->cycle_attr & cycle) == cycle) &&
  263. (slave_image->locked == 0)) {
  264. slave_image->locked = 1;
  265. mutex_unlock(&slave_image->mtx);
  266. allocated_image = slave_image;
  267. break;
  268. }
  269. mutex_unlock(&slave_image->mtx);
  270. }
  271. /* No free image */
  272. if (allocated_image == NULL)
  273. goto err_image;
  274. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  275. if (resource == NULL) {
  276. printk(KERN_WARNING "Unable to allocate resource structure\n");
  277. goto err_alloc;
  278. }
  279. resource->type = VME_SLAVE;
  280. resource->entry = &allocated_image->list;
  281. return resource;
  282. err_alloc:
  283. /* Unlock image */
  284. mutex_lock(&slave_image->mtx);
  285. slave_image->locked = 0;
  286. mutex_unlock(&slave_image->mtx);
  287. err_image:
  288. err_bus:
  289. return NULL;
  290. }
  291. EXPORT_SYMBOL(vme_slave_request);
  292. int vme_slave_set(struct vme_resource *resource, int enabled,
  293. unsigned long long vme_base, unsigned long long size,
  294. dma_addr_t buf_base, u32 aspace, u32 cycle)
  295. {
  296. struct vme_bridge *bridge = find_bridge(resource);
  297. struct vme_slave_resource *image;
  298. int retval;
  299. if (resource->type != VME_SLAVE) {
  300. printk(KERN_ERR "Not a slave resource\n");
  301. return -EINVAL;
  302. }
  303. image = list_entry(resource->entry, struct vme_slave_resource, list);
  304. if (bridge->slave_set == NULL) {
  305. printk(KERN_ERR "Function not supported\n");
  306. return -ENOSYS;
  307. }
  308. if (!(((image->address_attr & aspace) == aspace) &&
  309. ((image->cycle_attr & cycle) == cycle))) {
  310. printk(KERN_ERR "Invalid attributes\n");
  311. return -EINVAL;
  312. }
  313. retval = vme_check_window(aspace, vme_base, size);
  314. if (retval)
  315. return retval;
  316. return bridge->slave_set(image, enabled, vme_base, size, buf_base,
  317. aspace, cycle);
  318. }
  319. EXPORT_SYMBOL(vme_slave_set);
  320. int vme_slave_get(struct vme_resource *resource, int *enabled,
  321. unsigned long long *vme_base, unsigned long long *size,
  322. dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
  323. {
  324. struct vme_bridge *bridge = find_bridge(resource);
  325. struct vme_slave_resource *image;
  326. if (resource->type != VME_SLAVE) {
  327. printk(KERN_ERR "Not a slave resource\n");
  328. return -EINVAL;
  329. }
  330. image = list_entry(resource->entry, struct vme_slave_resource, list);
  331. if (bridge->slave_get == NULL) {
  332. printk(KERN_ERR "vme_slave_get not supported\n");
  333. return -EINVAL;
  334. }
  335. return bridge->slave_get(image, enabled, vme_base, size, buf_base,
  336. aspace, cycle);
  337. }
  338. EXPORT_SYMBOL(vme_slave_get);
  339. void vme_slave_free(struct vme_resource *resource)
  340. {
  341. struct vme_slave_resource *slave_image;
  342. if (resource->type != VME_SLAVE) {
  343. printk(KERN_ERR "Not a slave resource\n");
  344. return;
  345. }
  346. slave_image = list_entry(resource->entry, struct vme_slave_resource,
  347. list);
  348. if (slave_image == NULL) {
  349. printk(KERN_ERR "Can't find slave resource\n");
  350. return;
  351. }
  352. /* Unlock image */
  353. mutex_lock(&slave_image->mtx);
  354. if (slave_image->locked == 0)
  355. printk(KERN_ERR "Image is already free\n");
  356. slave_image->locked = 0;
  357. mutex_unlock(&slave_image->mtx);
  358. /* Free up resource memory */
  359. kfree(resource);
  360. }
  361. EXPORT_SYMBOL(vme_slave_free);
  362. /*
  363. * Request a master image with specific attributes, return some unique
  364. * identifier.
  365. */
  366. struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
  367. u32 cycle, u32 dwidth)
  368. {
  369. struct vme_bridge *bridge;
  370. struct list_head *master_pos = NULL;
  371. struct vme_master_resource *allocated_image = NULL;
  372. struct vme_master_resource *master_image = NULL;
  373. struct vme_resource *resource = NULL;
  374. bridge = vdev->bridge;
  375. if (bridge == NULL) {
  376. printk(KERN_ERR "Can't find VME bus\n");
  377. goto err_bus;
  378. }
  379. /* Loop through master resources */
  380. list_for_each(master_pos, &bridge->master_resources) {
  381. master_image = list_entry(master_pos,
  382. struct vme_master_resource, list);
  383. if (master_image == NULL) {
  384. printk(KERN_WARNING "Registered NULL master resource\n");
  385. continue;
  386. }
  387. /* Find an unlocked and compatible image */
  388. spin_lock(&master_image->lock);
  389. if (((master_image->address_attr & address) == address) &&
  390. ((master_image->cycle_attr & cycle) == cycle) &&
  391. ((master_image->width_attr & dwidth) == dwidth) &&
  392. (master_image->locked == 0)) {
  393. master_image->locked = 1;
  394. spin_unlock(&master_image->lock);
  395. allocated_image = master_image;
  396. break;
  397. }
  398. spin_unlock(&master_image->lock);
  399. }
  400. /* Check to see if we found a resource */
  401. if (allocated_image == NULL) {
  402. printk(KERN_ERR "Can't find a suitable resource\n");
  403. goto err_image;
  404. }
  405. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  406. if (resource == NULL) {
  407. printk(KERN_ERR "Unable to allocate resource structure\n");
  408. goto err_alloc;
  409. }
  410. resource->type = VME_MASTER;
  411. resource->entry = &allocated_image->list;
  412. return resource;
  413. err_alloc:
  414. /* Unlock image */
  415. spin_lock(&master_image->lock);
  416. master_image->locked = 0;
  417. spin_unlock(&master_image->lock);
  418. err_image:
  419. err_bus:
  420. return NULL;
  421. }
  422. EXPORT_SYMBOL(vme_master_request);
  423. int vme_master_set(struct vme_resource *resource, int enabled,
  424. unsigned long long vme_base, unsigned long long size, u32 aspace,
  425. u32 cycle, u32 dwidth)
  426. {
  427. struct vme_bridge *bridge = find_bridge(resource);
  428. struct vme_master_resource *image;
  429. int retval;
  430. if (resource->type != VME_MASTER) {
  431. printk(KERN_ERR "Not a master resource\n");
  432. return -EINVAL;
  433. }
  434. image = list_entry(resource->entry, struct vme_master_resource, list);
  435. if (bridge->master_set == NULL) {
  436. printk(KERN_WARNING "vme_master_set not supported\n");
  437. return -EINVAL;
  438. }
  439. if (!(((image->address_attr & aspace) == aspace) &&
  440. ((image->cycle_attr & cycle) == cycle) &&
  441. ((image->width_attr & dwidth) == dwidth))) {
  442. printk(KERN_WARNING "Invalid attributes\n");
  443. return -EINVAL;
  444. }
  445. retval = vme_check_window(aspace, vme_base, size);
  446. if (retval)
  447. return retval;
  448. return bridge->master_set(image, enabled, vme_base, size, aspace,
  449. cycle, dwidth);
  450. }
  451. EXPORT_SYMBOL(vme_master_set);
  452. int vme_master_get(struct vme_resource *resource, int *enabled,
  453. unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
  454. u32 *cycle, u32 *dwidth)
  455. {
  456. struct vme_bridge *bridge = find_bridge(resource);
  457. struct vme_master_resource *image;
  458. if (resource->type != VME_MASTER) {
  459. printk(KERN_ERR "Not a master resource\n");
  460. return -EINVAL;
  461. }
  462. image = list_entry(resource->entry, struct vme_master_resource, list);
  463. if (bridge->master_get == NULL) {
  464. printk(KERN_WARNING "%s not supported\n", __func__);
  465. return -EINVAL;
  466. }
  467. return bridge->master_get(image, enabled, vme_base, size, aspace,
  468. cycle, dwidth);
  469. }
  470. EXPORT_SYMBOL(vme_master_get);
  471. /*
  472. * Read data out of VME space into a buffer.
  473. */
  474. ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
  475. loff_t offset)
  476. {
  477. struct vme_bridge *bridge = find_bridge(resource);
  478. struct vme_master_resource *image;
  479. size_t length;
  480. if (bridge->master_read == NULL) {
  481. printk(KERN_WARNING "Reading from resource not supported\n");
  482. return -EINVAL;
  483. }
  484. if (resource->type != VME_MASTER) {
  485. printk(KERN_ERR "Not a master resource\n");
  486. return -EINVAL;
  487. }
  488. image = list_entry(resource->entry, struct vme_master_resource, list);
  489. length = vme_get_size(resource);
  490. if (offset > length) {
  491. printk(KERN_WARNING "Invalid Offset\n");
  492. return -EFAULT;
  493. }
  494. if ((offset + count) > length)
  495. count = length - offset;
  496. return bridge->master_read(image, buf, count, offset);
  497. }
  498. EXPORT_SYMBOL(vme_master_read);
  499. /*
  500. * Write data out to VME space from a buffer.
  501. */
  502. ssize_t vme_master_write(struct vme_resource *resource, void *buf,
  503. size_t count, loff_t offset)
  504. {
  505. struct vme_bridge *bridge = find_bridge(resource);
  506. struct vme_master_resource *image;
  507. size_t length;
  508. if (bridge->master_write == NULL) {
  509. printk(KERN_WARNING "Writing to resource not supported\n");
  510. return -EINVAL;
  511. }
  512. if (resource->type != VME_MASTER) {
  513. printk(KERN_ERR "Not a master resource\n");
  514. return -EINVAL;
  515. }
  516. image = list_entry(resource->entry, struct vme_master_resource, list);
  517. length = vme_get_size(resource);
  518. if (offset > length) {
  519. printk(KERN_WARNING "Invalid Offset\n");
  520. return -EFAULT;
  521. }
  522. if ((offset + count) > length)
  523. count = length - offset;
  524. return bridge->master_write(image, buf, count, offset);
  525. }
  526. EXPORT_SYMBOL(vme_master_write);
  527. /*
  528. * Perform RMW cycle to provided location.
  529. */
  530. unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
  531. unsigned int compare, unsigned int swap, loff_t offset)
  532. {
  533. struct vme_bridge *bridge = find_bridge(resource);
  534. struct vme_master_resource *image;
  535. if (bridge->master_rmw == NULL) {
  536. printk(KERN_WARNING "Writing to resource not supported\n");
  537. return -EINVAL;
  538. }
  539. if (resource->type != VME_MASTER) {
  540. printk(KERN_ERR "Not a master resource\n");
  541. return -EINVAL;
  542. }
  543. image = list_entry(resource->entry, struct vme_master_resource, list);
  544. return bridge->master_rmw(image, mask, compare, swap, offset);
  545. }
  546. EXPORT_SYMBOL(vme_master_rmw);
  547. int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
  548. {
  549. struct vme_master_resource *image;
  550. phys_addr_t phys_addr;
  551. unsigned long vma_size;
  552. if (resource->type != VME_MASTER) {
  553. pr_err("Not a master resource\n");
  554. return -EINVAL;
  555. }
  556. image = list_entry(resource->entry, struct vme_master_resource, list);
  557. phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
  558. vma_size = vma->vm_end - vma->vm_start;
  559. if (phys_addr + vma_size > image->bus_resource.end + 1) {
  560. pr_err("Map size cannot exceed the window size\n");
  561. return -EFAULT;
  562. }
  563. vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
  564. return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
  565. }
  566. EXPORT_SYMBOL(vme_master_mmap);
  567. void vme_master_free(struct vme_resource *resource)
  568. {
  569. struct vme_master_resource *master_image;
  570. if (resource->type != VME_MASTER) {
  571. printk(KERN_ERR "Not a master resource\n");
  572. return;
  573. }
  574. master_image = list_entry(resource->entry, struct vme_master_resource,
  575. list);
  576. if (master_image == NULL) {
  577. printk(KERN_ERR "Can't find master resource\n");
  578. return;
  579. }
  580. /* Unlock image */
  581. spin_lock(&master_image->lock);
  582. if (master_image->locked == 0)
  583. printk(KERN_ERR "Image is already free\n");
  584. master_image->locked = 0;
  585. spin_unlock(&master_image->lock);
  586. /* Free up resource memory */
  587. kfree(resource);
  588. }
  589. EXPORT_SYMBOL(vme_master_free);
  590. /*
  591. * Request a DMA controller with specific attributes, return some unique
  592. * identifier.
  593. */
  594. struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
  595. {
  596. struct vme_bridge *bridge;
  597. struct list_head *dma_pos = NULL;
  598. struct vme_dma_resource *allocated_ctrlr = NULL;
  599. struct vme_dma_resource *dma_ctrlr = NULL;
  600. struct vme_resource *resource = NULL;
  601. /* XXX Not checking resource attributes */
  602. printk(KERN_ERR "No VME resource Attribute tests done\n");
  603. bridge = vdev->bridge;
  604. if (bridge == NULL) {
  605. printk(KERN_ERR "Can't find VME bus\n");
  606. goto err_bus;
  607. }
  608. /* Loop through DMA resources */
  609. list_for_each(dma_pos, &bridge->dma_resources) {
  610. dma_ctrlr = list_entry(dma_pos,
  611. struct vme_dma_resource, list);
  612. if (dma_ctrlr == NULL) {
  613. printk(KERN_ERR "Registered NULL DMA resource\n");
  614. continue;
  615. }
  616. /* Find an unlocked and compatible controller */
  617. mutex_lock(&dma_ctrlr->mtx);
  618. if (((dma_ctrlr->route_attr & route) == route) &&
  619. (dma_ctrlr->locked == 0)) {
  620. dma_ctrlr->locked = 1;
  621. mutex_unlock(&dma_ctrlr->mtx);
  622. allocated_ctrlr = dma_ctrlr;
  623. break;
  624. }
  625. mutex_unlock(&dma_ctrlr->mtx);
  626. }
  627. /* Check to see if we found a resource */
  628. if (allocated_ctrlr == NULL)
  629. goto err_ctrlr;
  630. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  631. if (resource == NULL) {
  632. printk(KERN_WARNING "Unable to allocate resource structure\n");
  633. goto err_alloc;
  634. }
  635. resource->type = VME_DMA;
  636. resource->entry = &allocated_ctrlr->list;
  637. return resource;
  638. err_alloc:
  639. /* Unlock image */
  640. mutex_lock(&dma_ctrlr->mtx);
  641. dma_ctrlr->locked = 0;
  642. mutex_unlock(&dma_ctrlr->mtx);
  643. err_ctrlr:
  644. err_bus:
  645. return NULL;
  646. }
  647. EXPORT_SYMBOL(vme_dma_request);
  648. /*
  649. * Start new list
  650. */
  651. struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
  652. {
  653. struct vme_dma_resource *ctrlr;
  654. struct vme_dma_list *dma_list;
  655. if (resource->type != VME_DMA) {
  656. printk(KERN_ERR "Not a DMA resource\n");
  657. return NULL;
  658. }
  659. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  660. dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
  661. if (dma_list == NULL) {
  662. printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
  663. return NULL;
  664. }
  665. INIT_LIST_HEAD(&dma_list->entries);
  666. dma_list->parent = ctrlr;
  667. mutex_init(&dma_list->mtx);
  668. return dma_list;
  669. }
  670. EXPORT_SYMBOL(vme_new_dma_list);
  671. /*
  672. * Create "Pattern" type attributes
  673. */
  674. struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
  675. {
  676. struct vme_dma_attr *attributes;
  677. struct vme_dma_pattern *pattern_attr;
  678. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  679. if (attributes == NULL) {
  680. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  681. goto err_attr;
  682. }
  683. pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
  684. if (pattern_attr == NULL) {
  685. printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
  686. goto err_pat;
  687. }
  688. attributes->type = VME_DMA_PATTERN;
  689. attributes->private = (void *)pattern_attr;
  690. pattern_attr->pattern = pattern;
  691. pattern_attr->type = type;
  692. return attributes;
  693. err_pat:
  694. kfree(attributes);
  695. err_attr:
  696. return NULL;
  697. }
  698. EXPORT_SYMBOL(vme_dma_pattern_attribute);
  699. /*
  700. * Create "PCI" type attributes
  701. */
  702. struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
  703. {
  704. struct vme_dma_attr *attributes;
  705. struct vme_dma_pci *pci_attr;
  706. /* XXX Run some sanity checks here */
  707. attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
  708. if (attributes == NULL) {
  709. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  710. goto err_attr;
  711. }
  712. pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
  713. if (pci_attr == NULL) {
  714. printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
  715. goto err_pci;
  716. }
  717. attributes->type = VME_DMA_PCI;
  718. attributes->private = (void *)pci_attr;
  719. pci_attr->address = address;
  720. return attributes;
  721. err_pci:
  722. kfree(attributes);
  723. err_attr:
  724. return NULL;
  725. }
  726. EXPORT_SYMBOL(vme_dma_pci_attribute);
  727. /*
  728. * Create "VME" type attributes
  729. */
  730. struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
  731. u32 aspace, u32 cycle, u32 dwidth)
  732. {
  733. struct vme_dma_attr *attributes;
  734. struct vme_dma_vme *vme_attr;
  735. attributes = kmalloc(
  736. sizeof(struct vme_dma_attr), GFP_KERNEL);
  737. if (attributes == NULL) {
  738. printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
  739. goto err_attr;
  740. }
  741. vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
  742. if (vme_attr == NULL) {
  743. printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
  744. goto err_vme;
  745. }
  746. attributes->type = VME_DMA_VME;
  747. attributes->private = (void *)vme_attr;
  748. vme_attr->address = address;
  749. vme_attr->aspace = aspace;
  750. vme_attr->cycle = cycle;
  751. vme_attr->dwidth = dwidth;
  752. return attributes;
  753. err_vme:
  754. kfree(attributes);
  755. err_attr:
  756. return NULL;
  757. }
  758. EXPORT_SYMBOL(vme_dma_vme_attribute);
  759. /*
  760. * Free attribute
  761. */
  762. void vme_dma_free_attribute(struct vme_dma_attr *attributes)
  763. {
  764. kfree(attributes->private);
  765. kfree(attributes);
  766. }
  767. EXPORT_SYMBOL(vme_dma_free_attribute);
  768. int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
  769. struct vme_dma_attr *dest, size_t count)
  770. {
  771. struct vme_bridge *bridge = list->parent->parent;
  772. int retval;
  773. if (bridge->dma_list_add == NULL) {
  774. printk(KERN_WARNING "Link List DMA generation not supported\n");
  775. return -EINVAL;
  776. }
  777. if (!mutex_trylock(&list->mtx)) {
  778. printk(KERN_ERR "Link List already submitted\n");
  779. return -EINVAL;
  780. }
  781. retval = bridge->dma_list_add(list, src, dest, count);
  782. mutex_unlock(&list->mtx);
  783. return retval;
  784. }
  785. EXPORT_SYMBOL(vme_dma_list_add);
  786. int vme_dma_list_exec(struct vme_dma_list *list)
  787. {
  788. struct vme_bridge *bridge = list->parent->parent;
  789. int retval;
  790. if (bridge->dma_list_exec == NULL) {
  791. printk(KERN_ERR "Link List DMA execution not supported\n");
  792. return -EINVAL;
  793. }
  794. mutex_lock(&list->mtx);
  795. retval = bridge->dma_list_exec(list);
  796. mutex_unlock(&list->mtx);
  797. return retval;
  798. }
  799. EXPORT_SYMBOL(vme_dma_list_exec);
  800. int vme_dma_list_free(struct vme_dma_list *list)
  801. {
  802. struct vme_bridge *bridge = list->parent->parent;
  803. int retval;
  804. if (bridge->dma_list_empty == NULL) {
  805. printk(KERN_WARNING "Emptying of Link Lists not supported\n");
  806. return -EINVAL;
  807. }
  808. if (!mutex_trylock(&list->mtx)) {
  809. printk(KERN_ERR "Link List in use\n");
  810. return -EINVAL;
  811. }
  812. /*
  813. * Empty out all of the entries from the DMA list. We need to go to the
  814. * low level driver as DMA entries are driver specific.
  815. */
  816. retval = bridge->dma_list_empty(list);
  817. if (retval) {
  818. printk(KERN_ERR "Unable to empty link-list entries\n");
  819. mutex_unlock(&list->mtx);
  820. return retval;
  821. }
  822. mutex_unlock(&list->mtx);
  823. kfree(list);
  824. return retval;
  825. }
  826. EXPORT_SYMBOL(vme_dma_list_free);
  827. int vme_dma_free(struct vme_resource *resource)
  828. {
  829. struct vme_dma_resource *ctrlr;
  830. if (resource->type != VME_DMA) {
  831. printk(KERN_ERR "Not a DMA resource\n");
  832. return -EINVAL;
  833. }
  834. ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
  835. if (!mutex_trylock(&ctrlr->mtx)) {
  836. printk(KERN_ERR "Resource busy, can't free\n");
  837. return -EBUSY;
  838. }
  839. if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
  840. printk(KERN_WARNING "Resource still processing transfers\n");
  841. mutex_unlock(&ctrlr->mtx);
  842. return -EBUSY;
  843. }
  844. ctrlr->locked = 0;
  845. mutex_unlock(&ctrlr->mtx);
  846. kfree(resource);
  847. return 0;
  848. }
  849. EXPORT_SYMBOL(vme_dma_free);
  850. void vme_bus_error_handler(struct vme_bridge *bridge,
  851. unsigned long long address, int am)
  852. {
  853. struct list_head *handler_pos = NULL;
  854. struct vme_error_handler *handler;
  855. int handler_triggered = 0;
  856. u32 aspace = vme_get_aspace(am);
  857. list_for_each(handler_pos, &bridge->vme_error_handlers) {
  858. handler = list_entry(handler_pos, struct vme_error_handler,
  859. list);
  860. if ((aspace == handler->aspace) &&
  861. (address >= handler->start) &&
  862. (address < handler->end)) {
  863. if (!handler->num_errors)
  864. handler->first_error = address;
  865. if (handler->num_errors != UINT_MAX)
  866. handler->num_errors++;
  867. handler_triggered = 1;
  868. }
  869. }
  870. if (!handler_triggered)
  871. dev_err(bridge->parent,
  872. "Unhandled VME access error at address 0x%llx\n",
  873. address);
  874. }
  875. EXPORT_SYMBOL(vme_bus_error_handler);
  876. struct vme_error_handler *vme_register_error_handler(
  877. struct vme_bridge *bridge, u32 aspace,
  878. unsigned long long address, size_t len)
  879. {
  880. struct vme_error_handler *handler;
  881. handler = kmalloc(sizeof(*handler), GFP_KERNEL);
  882. if (!handler)
  883. return NULL;
  884. handler->aspace = aspace;
  885. handler->start = address;
  886. handler->end = address + len;
  887. handler->num_errors = 0;
  888. handler->first_error = 0;
  889. list_add_tail(&handler->list, &bridge->vme_error_handlers);
  890. return handler;
  891. }
  892. EXPORT_SYMBOL(vme_register_error_handler);
  893. void vme_unregister_error_handler(struct vme_error_handler *handler)
  894. {
  895. list_del(&handler->list);
  896. kfree(handler);
  897. }
  898. EXPORT_SYMBOL(vme_unregister_error_handler);
  899. void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
  900. {
  901. void (*call)(int, int, void *);
  902. void *priv_data;
  903. call = bridge->irq[level - 1].callback[statid].func;
  904. priv_data = bridge->irq[level - 1].callback[statid].priv_data;
  905. if (call != NULL)
  906. call(level, statid, priv_data);
  907. else
  908. printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
  909. level, statid);
  910. }
  911. EXPORT_SYMBOL(vme_irq_handler);
  912. int vme_irq_request(struct vme_dev *vdev, int level, int statid,
  913. void (*callback)(int, int, void *),
  914. void *priv_data)
  915. {
  916. struct vme_bridge *bridge;
  917. bridge = vdev->bridge;
  918. if (bridge == NULL) {
  919. printk(KERN_ERR "Can't find VME bus\n");
  920. return -EINVAL;
  921. }
  922. if ((level < 1) || (level > 7)) {
  923. printk(KERN_ERR "Invalid interrupt level\n");
  924. return -EINVAL;
  925. }
  926. if (bridge->irq_set == NULL) {
  927. printk(KERN_ERR "Configuring interrupts not supported\n");
  928. return -EINVAL;
  929. }
  930. mutex_lock(&bridge->irq_mtx);
  931. if (bridge->irq[level - 1].callback[statid].func) {
  932. mutex_unlock(&bridge->irq_mtx);
  933. printk(KERN_WARNING "VME Interrupt already taken\n");
  934. return -EBUSY;
  935. }
  936. bridge->irq[level - 1].count++;
  937. bridge->irq[level - 1].callback[statid].priv_data = priv_data;
  938. bridge->irq[level - 1].callback[statid].func = callback;
  939. /* Enable IRQ level */
  940. bridge->irq_set(bridge, level, 1, 1);
  941. mutex_unlock(&bridge->irq_mtx);
  942. return 0;
  943. }
  944. EXPORT_SYMBOL(vme_irq_request);
  945. void vme_irq_free(struct vme_dev *vdev, int level, int statid)
  946. {
  947. struct vme_bridge *bridge;
  948. bridge = vdev->bridge;
  949. if (bridge == NULL) {
  950. printk(KERN_ERR "Can't find VME bus\n");
  951. return;
  952. }
  953. if ((level < 1) || (level > 7)) {
  954. printk(KERN_ERR "Invalid interrupt level\n");
  955. return;
  956. }
  957. if (bridge->irq_set == NULL) {
  958. printk(KERN_ERR "Configuring interrupts not supported\n");
  959. return;
  960. }
  961. mutex_lock(&bridge->irq_mtx);
  962. bridge->irq[level - 1].count--;
  963. /* Disable IRQ level if no more interrupts attached at this level*/
  964. if (bridge->irq[level - 1].count == 0)
  965. bridge->irq_set(bridge, level, 0, 1);
  966. bridge->irq[level - 1].callback[statid].func = NULL;
  967. bridge->irq[level - 1].callback[statid].priv_data = NULL;
  968. mutex_unlock(&bridge->irq_mtx);
  969. }
  970. EXPORT_SYMBOL(vme_irq_free);
  971. int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
  972. {
  973. struct vme_bridge *bridge;
  974. bridge = vdev->bridge;
  975. if (bridge == NULL) {
  976. printk(KERN_ERR "Can't find VME bus\n");
  977. return -EINVAL;
  978. }
  979. if ((level < 1) || (level > 7)) {
  980. printk(KERN_WARNING "Invalid interrupt level\n");
  981. return -EINVAL;
  982. }
  983. if (bridge->irq_generate == NULL) {
  984. printk(KERN_WARNING "Interrupt generation not supported\n");
  985. return -EINVAL;
  986. }
  987. return bridge->irq_generate(bridge, level, statid);
  988. }
  989. EXPORT_SYMBOL(vme_irq_generate);
  990. /*
  991. * Request the location monitor, return resource or NULL
  992. */
  993. struct vme_resource *vme_lm_request(struct vme_dev *vdev)
  994. {
  995. struct vme_bridge *bridge;
  996. struct list_head *lm_pos = NULL;
  997. struct vme_lm_resource *allocated_lm = NULL;
  998. struct vme_lm_resource *lm = NULL;
  999. struct vme_resource *resource = NULL;
  1000. bridge = vdev->bridge;
  1001. if (bridge == NULL) {
  1002. printk(KERN_ERR "Can't find VME bus\n");
  1003. goto err_bus;
  1004. }
  1005. /* Loop through DMA resources */
  1006. list_for_each(lm_pos, &bridge->lm_resources) {
  1007. lm = list_entry(lm_pos,
  1008. struct vme_lm_resource, list);
  1009. if (lm == NULL) {
  1010. printk(KERN_ERR "Registered NULL Location Monitor resource\n");
  1011. continue;
  1012. }
  1013. /* Find an unlocked controller */
  1014. mutex_lock(&lm->mtx);
  1015. if (lm->locked == 0) {
  1016. lm->locked = 1;
  1017. mutex_unlock(&lm->mtx);
  1018. allocated_lm = lm;
  1019. break;
  1020. }
  1021. mutex_unlock(&lm->mtx);
  1022. }
  1023. /* Check to see if we found a resource */
  1024. if (allocated_lm == NULL)
  1025. goto err_lm;
  1026. resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
  1027. if (resource == NULL) {
  1028. printk(KERN_ERR "Unable to allocate resource structure\n");
  1029. goto err_alloc;
  1030. }
  1031. resource->type = VME_LM;
  1032. resource->entry = &allocated_lm->list;
  1033. return resource;
  1034. err_alloc:
  1035. /* Unlock image */
  1036. mutex_lock(&lm->mtx);
  1037. lm->locked = 0;
  1038. mutex_unlock(&lm->mtx);
  1039. err_lm:
  1040. err_bus:
  1041. return NULL;
  1042. }
  1043. EXPORT_SYMBOL(vme_lm_request);
  1044. int vme_lm_count(struct vme_resource *resource)
  1045. {
  1046. struct vme_lm_resource *lm;
  1047. if (resource->type != VME_LM) {
  1048. printk(KERN_ERR "Not a Location Monitor resource\n");
  1049. return -EINVAL;
  1050. }
  1051. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1052. return lm->monitors;
  1053. }
  1054. EXPORT_SYMBOL(vme_lm_count);
  1055. int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
  1056. u32 aspace, u32 cycle)
  1057. {
  1058. struct vme_bridge *bridge = find_bridge(resource);
  1059. struct vme_lm_resource *lm;
  1060. if (resource->type != VME_LM) {
  1061. printk(KERN_ERR "Not a Location Monitor resource\n");
  1062. return -EINVAL;
  1063. }
  1064. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1065. if (bridge->lm_set == NULL) {
  1066. printk(KERN_ERR "vme_lm_set not supported\n");
  1067. return -EINVAL;
  1068. }
  1069. return bridge->lm_set(lm, lm_base, aspace, cycle);
  1070. }
  1071. EXPORT_SYMBOL(vme_lm_set);
  1072. int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
  1073. u32 *aspace, u32 *cycle)
  1074. {
  1075. struct vme_bridge *bridge = find_bridge(resource);
  1076. struct vme_lm_resource *lm;
  1077. if (resource->type != VME_LM) {
  1078. printk(KERN_ERR "Not a Location Monitor resource\n");
  1079. return -EINVAL;
  1080. }
  1081. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1082. if (bridge->lm_get == NULL) {
  1083. printk(KERN_ERR "vme_lm_get not supported\n");
  1084. return -EINVAL;
  1085. }
  1086. return bridge->lm_get(lm, lm_base, aspace, cycle);
  1087. }
  1088. EXPORT_SYMBOL(vme_lm_get);
  1089. int vme_lm_attach(struct vme_resource *resource, int monitor,
  1090. void (*callback)(void *), void *data)
  1091. {
  1092. struct vme_bridge *bridge = find_bridge(resource);
  1093. struct vme_lm_resource *lm;
  1094. if (resource->type != VME_LM) {
  1095. printk(KERN_ERR "Not a Location Monitor resource\n");
  1096. return -EINVAL;
  1097. }
  1098. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1099. if (bridge->lm_attach == NULL) {
  1100. printk(KERN_ERR "vme_lm_attach not supported\n");
  1101. return -EINVAL;
  1102. }
  1103. return bridge->lm_attach(lm, monitor, callback, data);
  1104. }
  1105. EXPORT_SYMBOL(vme_lm_attach);
  1106. int vme_lm_detach(struct vme_resource *resource, int monitor)
  1107. {
  1108. struct vme_bridge *bridge = find_bridge(resource);
  1109. struct vme_lm_resource *lm;
  1110. if (resource->type != VME_LM) {
  1111. printk(KERN_ERR "Not a Location Monitor resource\n");
  1112. return -EINVAL;
  1113. }
  1114. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1115. if (bridge->lm_detach == NULL) {
  1116. printk(KERN_ERR "vme_lm_detach not supported\n");
  1117. return -EINVAL;
  1118. }
  1119. return bridge->lm_detach(lm, monitor);
  1120. }
  1121. EXPORT_SYMBOL(vme_lm_detach);
  1122. void vme_lm_free(struct vme_resource *resource)
  1123. {
  1124. struct vme_lm_resource *lm;
  1125. if (resource->type != VME_LM) {
  1126. printk(KERN_ERR "Not a Location Monitor resource\n");
  1127. return;
  1128. }
  1129. lm = list_entry(resource->entry, struct vme_lm_resource, list);
  1130. mutex_lock(&lm->mtx);
  1131. /* XXX
  1132. * Check to see that there aren't any callbacks still attached, if
  1133. * there are we should probably be detaching them!
  1134. */
  1135. lm->locked = 0;
  1136. mutex_unlock(&lm->mtx);
  1137. kfree(resource);
  1138. }
  1139. EXPORT_SYMBOL(vme_lm_free);
  1140. int vme_slot_num(struct vme_dev *vdev)
  1141. {
  1142. struct vme_bridge *bridge;
  1143. bridge = vdev->bridge;
  1144. if (bridge == NULL) {
  1145. printk(KERN_ERR "Can't find VME bus\n");
  1146. return -EINVAL;
  1147. }
  1148. if (bridge->slot_get == NULL) {
  1149. printk(KERN_WARNING "vme_slot_num not supported\n");
  1150. return -EINVAL;
  1151. }
  1152. return bridge->slot_get(bridge);
  1153. }
  1154. EXPORT_SYMBOL(vme_slot_num);
  1155. int vme_bus_num(struct vme_dev *vdev)
  1156. {
  1157. struct vme_bridge *bridge;
  1158. bridge = vdev->bridge;
  1159. if (bridge == NULL) {
  1160. pr_err("Can't find VME bus\n");
  1161. return -EINVAL;
  1162. }
  1163. return bridge->num;
  1164. }
  1165. EXPORT_SYMBOL(vme_bus_num);
  1166. /* - Bridge Registration --------------------------------------------------- */
  1167. static void vme_dev_release(struct device *dev)
  1168. {
  1169. kfree(dev_to_vme_dev(dev));
  1170. }
  1171. /* Common bridge initialization */
  1172. struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
  1173. {
  1174. INIT_LIST_HEAD(&bridge->vme_error_handlers);
  1175. INIT_LIST_HEAD(&bridge->master_resources);
  1176. INIT_LIST_HEAD(&bridge->slave_resources);
  1177. INIT_LIST_HEAD(&bridge->dma_resources);
  1178. INIT_LIST_HEAD(&bridge->lm_resources);
  1179. mutex_init(&bridge->irq_mtx);
  1180. return bridge;
  1181. }
  1182. EXPORT_SYMBOL(vme_init_bridge);
  1183. int vme_register_bridge(struct vme_bridge *bridge)
  1184. {
  1185. int i;
  1186. int ret = -1;
  1187. mutex_lock(&vme_buses_lock);
  1188. for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
  1189. if ((vme_bus_numbers & (1 << i)) == 0) {
  1190. vme_bus_numbers |= (1 << i);
  1191. bridge->num = i;
  1192. INIT_LIST_HEAD(&bridge->devices);
  1193. list_add_tail(&bridge->bus_list, &vme_bus_list);
  1194. ret = 0;
  1195. break;
  1196. }
  1197. }
  1198. mutex_unlock(&vme_buses_lock);
  1199. return ret;
  1200. }
  1201. EXPORT_SYMBOL(vme_register_bridge);
  1202. void vme_unregister_bridge(struct vme_bridge *bridge)
  1203. {
  1204. struct vme_dev *vdev;
  1205. struct vme_dev *tmp;
  1206. mutex_lock(&vme_buses_lock);
  1207. vme_bus_numbers &= ~(1 << bridge->num);
  1208. list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
  1209. list_del(&vdev->drv_list);
  1210. list_del(&vdev->bridge_list);
  1211. device_unregister(&vdev->dev);
  1212. }
  1213. list_del(&bridge->bus_list);
  1214. mutex_unlock(&vme_buses_lock);
  1215. }
  1216. EXPORT_SYMBOL(vme_unregister_bridge);
  1217. /* - Driver Registration --------------------------------------------------- */
  1218. static int __vme_register_driver_bus(struct vme_driver *drv,
  1219. struct vme_bridge *bridge, unsigned int ndevs)
  1220. {
  1221. int err;
  1222. unsigned int i;
  1223. struct vme_dev *vdev;
  1224. struct vme_dev *tmp;
  1225. for (i = 0; i < ndevs; i++) {
  1226. vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
  1227. if (!vdev) {
  1228. err = -ENOMEM;
  1229. goto err_devalloc;
  1230. }
  1231. vdev->num = i;
  1232. vdev->bridge = bridge;
  1233. vdev->dev.platform_data = drv;
  1234. vdev->dev.release = vme_dev_release;
  1235. vdev->dev.parent = bridge->parent;
  1236. vdev->dev.bus = &vme_bus_type;
  1237. dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
  1238. vdev->num);
  1239. err = device_register(&vdev->dev);
  1240. if (err)
  1241. goto err_reg;
  1242. if (vdev->dev.platform_data) {
  1243. list_add_tail(&vdev->drv_list, &drv->devices);
  1244. list_add_tail(&vdev->bridge_list, &bridge->devices);
  1245. } else
  1246. device_unregister(&vdev->dev);
  1247. }
  1248. return 0;
  1249. err_reg:
  1250. put_device(&vdev->dev);
  1251. kfree(vdev);
  1252. err_devalloc:
  1253. list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
  1254. list_del(&vdev->drv_list);
  1255. list_del(&vdev->bridge_list);
  1256. device_unregister(&vdev->dev);
  1257. }
  1258. return err;
  1259. }
  1260. static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1261. {
  1262. struct vme_bridge *bridge;
  1263. int err = 0;
  1264. mutex_lock(&vme_buses_lock);
  1265. list_for_each_entry(bridge, &vme_bus_list, bus_list) {
  1266. /*
  1267. * This cannot cause trouble as we already have vme_buses_lock
  1268. * and if the bridge is removed, it will have to go through
  1269. * vme_unregister_bridge() to do it (which calls remove() on
  1270. * the bridge which in turn tries to acquire vme_buses_lock and
  1271. * will have to wait).
  1272. */
  1273. err = __vme_register_driver_bus(drv, bridge, ndevs);
  1274. if (err)
  1275. break;
  1276. }
  1277. mutex_unlock(&vme_buses_lock);
  1278. return err;
  1279. }
  1280. int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
  1281. {
  1282. int err;
  1283. drv->driver.name = drv->name;
  1284. drv->driver.bus = &vme_bus_type;
  1285. INIT_LIST_HEAD(&drv->devices);
  1286. err = driver_register(&drv->driver);
  1287. if (err)
  1288. return err;
  1289. err = __vme_register_driver(drv, ndevs);
  1290. if (err)
  1291. driver_unregister(&drv->driver);
  1292. return err;
  1293. }
  1294. EXPORT_SYMBOL(vme_register_driver);
  1295. void vme_unregister_driver(struct vme_driver *drv)
  1296. {
  1297. struct vme_dev *dev, *dev_tmp;
  1298. mutex_lock(&vme_buses_lock);
  1299. list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
  1300. list_del(&dev->drv_list);
  1301. list_del(&dev->bridge_list);
  1302. device_unregister(&dev->dev);
  1303. }
  1304. mutex_unlock(&vme_buses_lock);
  1305. driver_unregister(&drv->driver);
  1306. }
  1307. EXPORT_SYMBOL(vme_unregister_driver);
  1308. /* - Bus Registration ------------------------------------------------------ */
  1309. static int vme_bus_match(struct device *dev, struct device_driver *drv)
  1310. {
  1311. struct vme_driver *vme_drv;
  1312. vme_drv = container_of(drv, struct vme_driver, driver);
  1313. if (dev->platform_data == vme_drv) {
  1314. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1315. if (vme_drv->match && vme_drv->match(vdev))
  1316. return 1;
  1317. dev->platform_data = NULL;
  1318. }
  1319. return 0;
  1320. }
  1321. static int vme_bus_probe(struct device *dev)
  1322. {
  1323. int retval = -ENODEV;
  1324. struct vme_driver *driver;
  1325. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1326. driver = dev->platform_data;
  1327. if (driver->probe != NULL)
  1328. retval = driver->probe(vdev);
  1329. return retval;
  1330. }
  1331. static int vme_bus_remove(struct device *dev)
  1332. {
  1333. int retval = -ENODEV;
  1334. struct vme_driver *driver;
  1335. struct vme_dev *vdev = dev_to_vme_dev(dev);
  1336. driver = dev->platform_data;
  1337. if (driver->remove != NULL)
  1338. retval = driver->remove(vdev);
  1339. return retval;
  1340. }
  1341. struct bus_type vme_bus_type = {
  1342. .name = "vme",
  1343. .match = vme_bus_match,
  1344. .probe = vme_bus_probe,
  1345. .remove = vme_bus_remove,
  1346. };
  1347. EXPORT_SYMBOL(vme_bus_type);
  1348. static int __init vme_init(void)
  1349. {
  1350. return bus_register(&vme_bus_type);
  1351. }
  1352. subsys_initcall(vme_init);