share.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033
  1. /*
  2. * Parallel-port resource manager code.
  3. *
  4. * Authors: David Campbell <campbell@tirian.che.curtin.edu.au>
  5. * Tim Waugh <tim@cyberelk.demon.co.uk>
  6. * Jose Renau <renau@acm.org>
  7. * Philip Blundell <philb@gnu.org>
  8. * Andrea Arcangeli
  9. *
  10. * based on work by Grant Guenther <grant@torque.net>
  11. * and Philip Blundell
  12. *
  13. * Any part of this program may be used in documents licensed under
  14. * the GNU Free Documentation License, Version 1.1 or any later version
  15. * published by the Free Software Foundation.
  16. */
  17. #undef PARPORT_DEBUG_SHARING /* undef for production */
  18. #include <linux/module.h>
  19. #include <linux/string.h>
  20. #include <linux/threads.h>
  21. #include <linux/parport.h>
  22. #include <linux/delay.h>
  23. #include <linux/errno.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/ioport.h>
  26. #include <linux/kernel.h>
  27. #include <linux/slab.h>
  28. #include <linux/sched.h>
  29. #include <linux/kmod.h>
  30. #include <linux/spinlock.h>
  31. #include <linux/mutex.h>
  32. #include <asm/irq.h>
  33. #undef PARPORT_PARANOID
  34. #define PARPORT_DEFAULT_TIMESLICE (HZ/5)
  35. unsigned long parport_default_timeslice = PARPORT_DEFAULT_TIMESLICE;
  36. int parport_default_spintime = DEFAULT_SPIN_TIME;
  37. static LIST_HEAD(portlist);
  38. static DEFINE_SPINLOCK(parportlist_lock);
  39. /* list of all allocated ports, sorted by ->number */
  40. static LIST_HEAD(all_ports);
  41. static DEFINE_SPINLOCK(full_list_lock);
  42. static LIST_HEAD(drivers);
  43. static DEFINE_MUTEX(registration_lock);
  44. /* What you can do to a port that's gone away.. */
  45. static void dead_write_lines (struct parport *p, unsigned char b){}
  46. static unsigned char dead_read_lines (struct parport *p) { return 0; }
  47. static unsigned char dead_frob_lines (struct parport *p, unsigned char b,
  48. unsigned char c) { return 0; }
  49. static void dead_onearg (struct parport *p){}
  50. static void dead_initstate (struct pardevice *d, struct parport_state *s) { }
  51. static void dead_state (struct parport *p, struct parport_state *s) { }
  52. static size_t dead_write (struct parport *p, const void *b, size_t l, int f)
  53. { return 0; }
  54. static size_t dead_read (struct parport *p, void *b, size_t l, int f)
  55. { return 0; }
  56. static struct parport_operations dead_ops = {
  57. .write_data = dead_write_lines, /* data */
  58. .read_data = dead_read_lines,
  59. .write_control = dead_write_lines, /* control */
  60. .read_control = dead_read_lines,
  61. .frob_control = dead_frob_lines,
  62. .read_status = dead_read_lines, /* status */
  63. .enable_irq = dead_onearg, /* enable_irq */
  64. .disable_irq = dead_onearg, /* disable_irq */
  65. .data_forward = dead_onearg, /* data_forward */
  66. .data_reverse = dead_onearg, /* data_reverse */
  67. .init_state = dead_initstate, /* init_state */
  68. .save_state = dead_state,
  69. .restore_state = dead_state,
  70. .epp_write_data = dead_write, /* epp */
  71. .epp_read_data = dead_read,
  72. .epp_write_addr = dead_write,
  73. .epp_read_addr = dead_read,
  74. .ecp_write_data = dead_write, /* ecp */
  75. .ecp_read_data = dead_read,
  76. .ecp_write_addr = dead_write,
  77. .compat_write_data = dead_write, /* compat */
  78. .nibble_read_data = dead_read, /* nibble */
  79. .byte_read_data = dead_read, /* byte */
  80. .owner = NULL,
  81. };
  82. /* Call attach(port) for each registered driver. */
  83. static void attach_driver_chain(struct parport *port)
  84. {
  85. /* caller has exclusive registration_lock */
  86. struct parport_driver *drv;
  87. list_for_each_entry(drv, &drivers, list)
  88. drv->attach(port);
  89. }
  90. /* Call detach(port) for each registered driver. */
  91. static void detach_driver_chain(struct parport *port)
  92. {
  93. struct parport_driver *drv;
  94. /* caller has exclusive registration_lock */
  95. list_for_each_entry(drv, &drivers, list)
  96. drv->detach (port);
  97. }
  98. /* Ask kmod for some lowlevel drivers. */
  99. static void get_lowlevel_driver (void)
  100. {
  101. /* There is no actual module called this: you should set
  102. * up an alias for modutils. */
  103. request_module ("parport_lowlevel");
  104. }
  105. /**
  106. * parport_register_driver - register a parallel port device driver
  107. * @drv: structure describing the driver
  108. *
  109. * This can be called by a parallel port device driver in order
  110. * to receive notifications about ports being found in the
  111. * system, as well as ports no longer available.
  112. *
  113. * The @drv structure is allocated by the caller and must not be
  114. * deallocated until after calling parport_unregister_driver().
  115. *
  116. * The driver's attach() function may block. The port that
  117. * attach() is given will be valid for the duration of the
  118. * callback, but if the driver wants to take a copy of the
  119. * pointer it must call parport_get_port() to do so. Calling
  120. * parport_register_device() on that port will do this for you.
  121. *
  122. * The driver's detach() function may block. The port that
  123. * detach() is given will be valid for the duration of the
  124. * callback, but if the driver wants to take a copy of the
  125. * pointer it must call parport_get_port() to do so.
  126. *
  127. * Returns 0 on success. Currently it always succeeds.
  128. **/
  129. int parport_register_driver (struct parport_driver *drv)
  130. {
  131. struct parport *port;
  132. if (list_empty(&portlist))
  133. get_lowlevel_driver ();
  134. mutex_lock(&registration_lock);
  135. list_for_each_entry(port, &portlist, list)
  136. drv->attach(port);
  137. list_add(&drv->list, &drivers);
  138. mutex_unlock(&registration_lock);
  139. return 0;
  140. }
  141. /**
  142. * parport_unregister_driver - deregister a parallel port device driver
  143. * @drv: structure describing the driver that was given to
  144. * parport_register_driver()
  145. *
  146. * This should be called by a parallel port device driver that
  147. * has registered itself using parport_register_driver() when it
  148. * is about to be unloaded.
  149. *
  150. * When it returns, the driver's attach() routine will no longer
  151. * be called, and for each port that attach() was called for, the
  152. * detach() routine will have been called.
  153. *
  154. * All the driver's attach() and detach() calls are guaranteed to have
  155. * finished by the time this function returns.
  156. **/
  157. void parport_unregister_driver (struct parport_driver *drv)
  158. {
  159. struct parport *port;
  160. mutex_lock(&registration_lock);
  161. list_del_init(&drv->list);
  162. list_for_each_entry(port, &portlist, list)
  163. drv->detach(port);
  164. mutex_unlock(&registration_lock);
  165. }
  166. static void free_port (struct parport *port)
  167. {
  168. int d;
  169. spin_lock(&full_list_lock);
  170. list_del(&port->full_list);
  171. spin_unlock(&full_list_lock);
  172. for (d = 0; d < 5; d++) {
  173. kfree(port->probe_info[d].class_name);
  174. kfree(port->probe_info[d].mfr);
  175. kfree(port->probe_info[d].model);
  176. kfree(port->probe_info[d].cmdset);
  177. kfree(port->probe_info[d].description);
  178. }
  179. kfree(port->name);
  180. kfree(port);
  181. }
  182. /**
  183. * parport_get_port - increment a port's reference count
  184. * @port: the port
  185. *
  186. * This ensures that a struct parport pointer remains valid
  187. * until the matching parport_put_port() call.
  188. **/
  189. struct parport *parport_get_port (struct parport *port)
  190. {
  191. atomic_inc (&port->ref_count);
  192. return port;
  193. }
  194. /**
  195. * parport_put_port - decrement a port's reference count
  196. * @port: the port
  197. *
  198. * This should be called once for each call to parport_get_port(),
  199. * once the port is no longer needed.
  200. **/
  201. void parport_put_port (struct parport *port)
  202. {
  203. if (atomic_dec_and_test (&port->ref_count))
  204. /* Can destroy it now. */
  205. free_port (port);
  206. return;
  207. }
  208. /**
  209. * parport_register_port - register a parallel port
  210. * @base: base I/O address
  211. * @irq: IRQ line
  212. * @dma: DMA channel
  213. * @ops: pointer to the port driver's port operations structure
  214. *
  215. * When a parallel port (lowlevel) driver finds a port that
  216. * should be made available to parallel port device drivers, it
  217. * should call parport_register_port(). The @base, @irq, and
  218. * @dma parameters are for the convenience of port drivers, and
  219. * for ports where they aren't meaningful needn't be set to
  220. * anything special. They can be altered afterwards by adjusting
  221. * the relevant members of the parport structure that is returned
  222. * and represents the port. They should not be tampered with
  223. * after calling parport_announce_port, however.
  224. *
  225. * If there are parallel port device drivers in the system that
  226. * have registered themselves using parport_register_driver(),
  227. * they are not told about the port at this time; that is done by
  228. * parport_announce_port().
  229. *
  230. * The @ops structure is allocated by the caller, and must not be
  231. * deallocated before calling parport_remove_port().
  232. *
  233. * If there is no memory to allocate a new parport structure,
  234. * this function will return %NULL.
  235. **/
  236. struct parport *parport_register_port(unsigned long base, int irq, int dma,
  237. struct parport_operations *ops)
  238. {
  239. struct list_head *l;
  240. struct parport *tmp;
  241. int num;
  242. int device;
  243. char *name;
  244. tmp = kmalloc(sizeof(struct parport), GFP_KERNEL);
  245. if (!tmp) {
  246. printk(KERN_WARNING "parport: memory squeeze\n");
  247. return NULL;
  248. }
  249. /* Init our structure */
  250. memset(tmp, 0, sizeof(struct parport));
  251. tmp->base = base;
  252. tmp->irq = irq;
  253. tmp->dma = dma;
  254. tmp->muxport = tmp->daisy = tmp->muxsel = -1;
  255. tmp->modes = 0;
  256. INIT_LIST_HEAD(&tmp->list);
  257. tmp->devices = tmp->cad = NULL;
  258. tmp->flags = 0;
  259. tmp->ops = ops;
  260. tmp->physport = tmp;
  261. memset (tmp->probe_info, 0, 5 * sizeof (struct parport_device_info));
  262. rwlock_init(&tmp->cad_lock);
  263. spin_lock_init(&tmp->waitlist_lock);
  264. spin_lock_init(&tmp->pardevice_lock);
  265. tmp->ieee1284.mode = IEEE1284_MODE_COMPAT;
  266. tmp->ieee1284.phase = IEEE1284_PH_FWD_IDLE;
  267. sema_init(&tmp->ieee1284.irq, 0);
  268. tmp->spintime = parport_default_spintime;
  269. atomic_set (&tmp->ref_count, 1);
  270. INIT_LIST_HEAD(&tmp->full_list);
  271. name = kmalloc(15, GFP_KERNEL);
  272. if (!name) {
  273. printk(KERN_ERR "parport: memory squeeze\n");
  274. kfree(tmp);
  275. return NULL;
  276. }
  277. /* Search for the lowest free parport number. */
  278. spin_lock(&full_list_lock);
  279. for (l = all_ports.next, num = 0; l != &all_ports; l = l->next, num++) {
  280. struct parport *p = list_entry(l, struct parport, full_list);
  281. if (p->number != num)
  282. break;
  283. }
  284. tmp->portnum = tmp->number = num;
  285. list_add_tail(&tmp->full_list, l);
  286. spin_unlock(&full_list_lock);
  287. /*
  288. * Now that the portnum is known finish doing the Init.
  289. */
  290. sprintf(name, "parport%d", tmp->portnum = tmp->number);
  291. tmp->name = name;
  292. for (device = 0; device < 5; device++)
  293. /* assume the worst */
  294. tmp->probe_info[device].class = PARPORT_CLASS_LEGACY;
  295. tmp->waithead = tmp->waittail = NULL;
  296. return tmp;
  297. }
  298. /**
  299. * parport_announce_port - tell device drivers about a parallel port
  300. * @port: parallel port to announce
  301. *
  302. * After a port driver has registered a parallel port with
  303. * parport_register_port, and performed any necessary
  304. * initialisation or adjustments, it should call
  305. * parport_announce_port() in order to notify all device drivers
  306. * that have called parport_register_driver(). Their attach()
  307. * functions will be called, with @port as the parameter.
  308. **/
  309. void parport_announce_port (struct parport *port)
  310. {
  311. int i;
  312. #ifdef CONFIG_PARPORT_1284
  313. /* Analyse the IEEE1284.3 topology of the port. */
  314. parport_daisy_init(port);
  315. #endif
  316. if (!port->dev)
  317. printk(KERN_WARNING "%s: fix this legacy "
  318. "no-device port driver!\n",
  319. port->name);
  320. parport_proc_register(port);
  321. mutex_lock(&registration_lock);
  322. spin_lock_irq(&parportlist_lock);
  323. list_add_tail(&port->list, &portlist);
  324. for (i = 1; i < 3; i++) {
  325. struct parport *slave = port->slaves[i-1];
  326. if (slave)
  327. list_add_tail(&slave->list, &portlist);
  328. }
  329. spin_unlock_irq(&parportlist_lock);
  330. /* Let drivers know that new port(s) has arrived. */
  331. attach_driver_chain (port);
  332. for (i = 1; i < 3; i++) {
  333. struct parport *slave = port->slaves[i-1];
  334. if (slave)
  335. attach_driver_chain(slave);
  336. }
  337. mutex_unlock(&registration_lock);
  338. }
  339. /**
  340. * parport_remove_port - deregister a parallel port
  341. * @port: parallel port to deregister
  342. *
  343. * When a parallel port driver is forcibly unloaded, or a
  344. * parallel port becomes inaccessible, the port driver must call
  345. * this function in order to deal with device drivers that still
  346. * want to use it.
  347. *
  348. * The parport structure associated with the port has its
  349. * operations structure replaced with one containing 'null'
  350. * operations that return errors or just don't do anything.
  351. *
  352. * Any drivers that have registered themselves using
  353. * parport_register_driver() are notified that the port is no
  354. * longer accessible by having their detach() routines called
  355. * with @port as the parameter.
  356. **/
  357. void parport_remove_port(struct parport *port)
  358. {
  359. int i;
  360. mutex_lock(&registration_lock);
  361. /* Spread the word. */
  362. detach_driver_chain (port);
  363. #ifdef CONFIG_PARPORT_1284
  364. /* Forget the IEEE1284.3 topology of the port. */
  365. parport_daisy_fini(port);
  366. for (i = 1; i < 3; i++) {
  367. struct parport *slave = port->slaves[i-1];
  368. if (!slave)
  369. continue;
  370. detach_driver_chain(slave);
  371. parport_daisy_fini(slave);
  372. }
  373. #endif
  374. port->ops = &dead_ops;
  375. spin_lock(&parportlist_lock);
  376. list_del_init(&port->list);
  377. for (i = 1; i < 3; i++) {
  378. struct parport *slave = port->slaves[i-1];
  379. if (slave)
  380. list_del_init(&slave->list);
  381. }
  382. spin_unlock(&parportlist_lock);
  383. mutex_unlock(&registration_lock);
  384. parport_proc_unregister(port);
  385. for (i = 1; i < 3; i++) {
  386. struct parport *slave = port->slaves[i-1];
  387. if (slave)
  388. parport_put_port(slave);
  389. }
  390. }
  391. /**
  392. * parport_register_device - register a device on a parallel port
  393. * @port: port to which the device is attached
  394. * @name: a name to refer to the device
  395. * @pf: preemption callback
  396. * @kf: kick callback (wake-up)
  397. * @irq_func: interrupt handler
  398. * @flags: registration flags
  399. * @handle: data for callback functions
  400. *
  401. * This function, called by parallel port device drivers,
  402. * declares that a device is connected to a port, and tells the
  403. * system all it needs to know.
  404. *
  405. * The @name is allocated by the caller and must not be
  406. * deallocated until the caller calls @parport_unregister_device
  407. * for that device.
  408. *
  409. * The preemption callback function, @pf, is called when this
  410. * device driver has claimed access to the port but another
  411. * device driver wants to use it. It is given @handle as its
  412. * parameter, and should return zero if it is willing for the
  413. * system to release the port to another driver on its behalf.
  414. * If it wants to keep control of the port it should return
  415. * non-zero, and no action will be taken. It is good manners for
  416. * the driver to try to release the port at the earliest
  417. * opportunity after its preemption callback rejects a preemption
  418. * attempt. Note that if a preemption callback is happy for
  419. * preemption to go ahead, there is no need to release the port;
  420. * it is done automatically. This function may not block, as it
  421. * may be called from interrupt context. If the device driver
  422. * does not support preemption, @pf can be %NULL.
  423. *
  424. * The wake-up ("kick") callback function, @kf, is called when
  425. * the port is available to be claimed for exclusive access; that
  426. * is, parport_claim() is guaranteed to succeed when called from
  427. * inside the wake-up callback function. If the driver wants to
  428. * claim the port it should do so; otherwise, it need not take
  429. * any action. This function may not block, as it may be called
  430. * from interrupt context. If the device driver does not want to
  431. * be explicitly invited to claim the port in this way, @kf can
  432. * be %NULL.
  433. *
  434. * The interrupt handler, @irq_func, is called when an interrupt
  435. * arrives from the parallel port. Note that if a device driver
  436. * wants to use interrupts it should use parport_enable_irq(),
  437. * and can also check the irq member of the parport structure
  438. * representing the port.
  439. *
  440. * The parallel port (lowlevel) driver is the one that has called
  441. * request_irq() and whose interrupt handler is called first.
  442. * This handler does whatever needs to be done to the hardware to
  443. * acknowledge the interrupt (for PC-style ports there is nothing
  444. * special to be done). It then tells the IEEE 1284 code about
  445. * the interrupt, which may involve reacting to an IEEE 1284
  446. * event depending on the current IEEE 1284 phase. After this,
  447. * it calls @irq_func. Needless to say, @irq_func will be called
  448. * from interrupt context, and may not block.
  449. *
  450. * The %PARPORT_DEV_EXCL flag is for preventing port sharing, and
  451. * so should only be used when sharing the port with other device
  452. * drivers is impossible and would lead to incorrect behaviour.
  453. * Use it sparingly! Normally, @flags will be zero.
  454. *
  455. * This function returns a pointer to a structure that represents
  456. * the device on the port, or %NULL if there is not enough memory
  457. * to allocate space for that structure.
  458. **/
  459. struct pardevice *
  460. parport_register_device(struct parport *port, const char *name,
  461. int (*pf)(void *), void (*kf)(void *),
  462. void (*irq_func)(void *),
  463. int flags, void *handle)
  464. {
  465. struct pardevice *tmp;
  466. if (port->physport->flags & PARPORT_FLAG_EXCL) {
  467. /* An exclusive device is registered. */
  468. printk (KERN_DEBUG "%s: no more devices allowed\n",
  469. port->name);
  470. return NULL;
  471. }
  472. if (flags & PARPORT_DEV_LURK) {
  473. if (!pf || !kf) {
  474. printk(KERN_INFO "%s: refused to register lurking device (%s) without callbacks\n", port->name, name);
  475. return NULL;
  476. }
  477. }
  478. /* We up our own module reference count, and that of the port
  479. on which a device is to be registered, to ensure that
  480. neither of us gets unloaded while we sleep in (e.g.)
  481. kmalloc.
  482. */
  483. if (!try_module_get(port->ops->owner)) {
  484. return NULL;
  485. }
  486. parport_get_port (port);
  487. tmp = kmalloc(sizeof(struct pardevice), GFP_KERNEL);
  488. if (tmp == NULL) {
  489. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  490. goto out;
  491. }
  492. tmp->state = kmalloc(sizeof(struct parport_state), GFP_KERNEL);
  493. if (tmp->state == NULL) {
  494. printk(KERN_WARNING "%s: memory squeeze, couldn't register %s.\n", port->name, name);
  495. goto out_free_pardevice;
  496. }
  497. tmp->name = name;
  498. tmp->port = port;
  499. tmp->daisy = -1;
  500. tmp->preempt = pf;
  501. tmp->wakeup = kf;
  502. tmp->private = handle;
  503. tmp->flags = flags;
  504. tmp->irq_func = irq_func;
  505. tmp->waiting = 0;
  506. tmp->timeout = 5 * HZ;
  507. /* Chain this onto the list */
  508. tmp->prev = NULL;
  509. /*
  510. * This function must not run from an irq handler so we don' t need
  511. * to clear irq on the local CPU. -arca
  512. */
  513. spin_lock(&port->physport->pardevice_lock);
  514. if (flags & PARPORT_DEV_EXCL) {
  515. if (port->physport->devices) {
  516. spin_unlock (&port->physport->pardevice_lock);
  517. printk (KERN_DEBUG
  518. "%s: cannot grant exclusive access for "
  519. "device %s\n", port->name, name);
  520. goto out_free_all;
  521. }
  522. port->flags |= PARPORT_FLAG_EXCL;
  523. }
  524. tmp->next = port->physport->devices;
  525. wmb(); /* Make sure that tmp->next is written before it's
  526. added to the list; see comments marked 'no locking
  527. required' */
  528. if (port->physport->devices)
  529. port->physport->devices->prev = tmp;
  530. port->physport->devices = tmp;
  531. spin_unlock(&port->physport->pardevice_lock);
  532. init_waitqueue_head(&tmp->wait_q);
  533. tmp->timeslice = parport_default_timeslice;
  534. tmp->waitnext = tmp->waitprev = NULL;
  535. /*
  536. * This has to be run as last thing since init_state may need other
  537. * pardevice fields. -arca
  538. */
  539. port->ops->init_state(tmp, tmp->state);
  540. if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
  541. port->proc_device = tmp;
  542. parport_device_proc_register(tmp);
  543. }
  544. return tmp;
  545. out_free_all:
  546. kfree(tmp->state);
  547. out_free_pardevice:
  548. kfree(tmp);
  549. out:
  550. parport_put_port (port);
  551. module_put(port->ops->owner);
  552. return NULL;
  553. }
  554. /**
  555. * parport_unregister_device - deregister a device on a parallel port
  556. * @dev: pointer to structure representing device
  557. *
  558. * This undoes the effect of parport_register_device().
  559. **/
  560. void parport_unregister_device(struct pardevice *dev)
  561. {
  562. struct parport *port;
  563. #ifdef PARPORT_PARANOID
  564. if (dev == NULL) {
  565. printk(KERN_ERR "parport_unregister_device: passed NULL\n");
  566. return;
  567. }
  568. #endif
  569. port = dev->port->physport;
  570. if (port->proc_device == dev) {
  571. port->proc_device = NULL;
  572. clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
  573. parport_device_proc_unregister(dev);
  574. }
  575. if (port->cad == dev) {
  576. printk(KERN_DEBUG "%s: %s forgot to release port\n",
  577. port->name, dev->name);
  578. parport_release (dev);
  579. }
  580. spin_lock(&port->pardevice_lock);
  581. if (dev->next)
  582. dev->next->prev = dev->prev;
  583. if (dev->prev)
  584. dev->prev->next = dev->next;
  585. else
  586. port->devices = dev->next;
  587. if (dev->flags & PARPORT_DEV_EXCL)
  588. port->flags &= ~PARPORT_FLAG_EXCL;
  589. spin_unlock(&port->pardevice_lock);
  590. /* Make sure we haven't left any pointers around in the wait
  591. * list. */
  592. spin_lock_irq(&port->waitlist_lock);
  593. if (dev->waitprev || dev->waitnext || port->waithead == dev) {
  594. if (dev->waitprev)
  595. dev->waitprev->waitnext = dev->waitnext;
  596. else
  597. port->waithead = dev->waitnext;
  598. if (dev->waitnext)
  599. dev->waitnext->waitprev = dev->waitprev;
  600. else
  601. port->waittail = dev->waitprev;
  602. }
  603. spin_unlock_irq(&port->waitlist_lock);
  604. kfree(dev->state);
  605. kfree(dev);
  606. module_put(port->ops->owner);
  607. parport_put_port (port);
  608. }
  609. /**
  610. * parport_find_number - find a parallel port by number
  611. * @number: parallel port number
  612. *
  613. * This returns the parallel port with the specified number, or
  614. * %NULL if there is none.
  615. *
  616. * There is an implicit parport_get_port() done already; to throw
  617. * away the reference to the port that parport_find_number()
  618. * gives you, use parport_put_port().
  619. */
  620. struct parport *parport_find_number (int number)
  621. {
  622. struct parport *port, *result = NULL;
  623. if (list_empty(&portlist))
  624. get_lowlevel_driver ();
  625. spin_lock (&parportlist_lock);
  626. list_for_each_entry(port, &portlist, list) {
  627. if (port->number == number) {
  628. result = parport_get_port (port);
  629. break;
  630. }
  631. }
  632. spin_unlock (&parportlist_lock);
  633. return result;
  634. }
  635. /**
  636. * parport_find_base - find a parallel port by base address
  637. * @base: base I/O address
  638. *
  639. * This returns the parallel port with the specified base
  640. * address, or %NULL if there is none.
  641. *
  642. * There is an implicit parport_get_port() done already; to throw
  643. * away the reference to the port that parport_find_base()
  644. * gives you, use parport_put_port().
  645. */
  646. struct parport *parport_find_base (unsigned long base)
  647. {
  648. struct parport *port, *result = NULL;
  649. if (list_empty(&portlist))
  650. get_lowlevel_driver ();
  651. spin_lock (&parportlist_lock);
  652. list_for_each_entry(port, &portlist, list) {
  653. if (port->base == base) {
  654. result = parport_get_port (port);
  655. break;
  656. }
  657. }
  658. spin_unlock (&parportlist_lock);
  659. return result;
  660. }
  661. /**
  662. * parport_claim - claim access to a parallel port device
  663. * @dev: pointer to structure representing a device on the port
  664. *
  665. * This function will not block and so can be used from interrupt
  666. * context. If parport_claim() succeeds in claiming access to
  667. * the port it returns zero and the port is available to use. It
  668. * may fail (returning non-zero) if the port is in use by another
  669. * driver and that driver is not willing to relinquish control of
  670. * the port.
  671. **/
  672. int parport_claim(struct pardevice *dev)
  673. {
  674. struct pardevice *oldcad;
  675. struct parport *port = dev->port->physport;
  676. unsigned long flags;
  677. if (port->cad == dev) {
  678. printk(KERN_INFO "%s: %s already owner\n",
  679. dev->port->name,dev->name);
  680. return 0;
  681. }
  682. /* Preempt any current device */
  683. write_lock_irqsave (&port->cad_lock, flags);
  684. if ((oldcad = port->cad) != NULL) {
  685. if (oldcad->preempt) {
  686. if (oldcad->preempt(oldcad->private))
  687. goto blocked;
  688. port->ops->save_state(port, dev->state);
  689. } else
  690. goto blocked;
  691. if (port->cad != oldcad) {
  692. /* I think we'll actually deadlock rather than
  693. get here, but just in case.. */
  694. printk(KERN_WARNING
  695. "%s: %s released port when preempted!\n",
  696. port->name, oldcad->name);
  697. if (port->cad)
  698. goto blocked;
  699. }
  700. }
  701. /* Can't fail from now on, so mark ourselves as no longer waiting. */
  702. if (dev->waiting & 1) {
  703. dev->waiting = 0;
  704. /* Take ourselves out of the wait list again. */
  705. spin_lock_irq (&port->waitlist_lock);
  706. if (dev->waitprev)
  707. dev->waitprev->waitnext = dev->waitnext;
  708. else
  709. port->waithead = dev->waitnext;
  710. if (dev->waitnext)
  711. dev->waitnext->waitprev = dev->waitprev;
  712. else
  713. port->waittail = dev->waitprev;
  714. spin_unlock_irq (&port->waitlist_lock);
  715. dev->waitprev = dev->waitnext = NULL;
  716. }
  717. /* Now we do the change of devices */
  718. port->cad = dev;
  719. #ifdef CONFIG_PARPORT_1284
  720. /* If it's a mux port, select it. */
  721. if (dev->port->muxport >= 0) {
  722. /* FIXME */
  723. port->muxsel = dev->port->muxport;
  724. }
  725. /* If it's a daisy chain device, select it. */
  726. if (dev->daisy >= 0) {
  727. /* This could be lazier. */
  728. if (!parport_daisy_select (port, dev->daisy,
  729. IEEE1284_MODE_COMPAT))
  730. port->daisy = dev->daisy;
  731. }
  732. #endif /* IEEE1284.3 support */
  733. /* Restore control registers */
  734. port->ops->restore_state(port, dev->state);
  735. write_unlock_irqrestore(&port->cad_lock, flags);
  736. dev->time = jiffies;
  737. return 0;
  738. blocked:
  739. /* If this is the first time we tried to claim the port, register an
  740. interest. This is only allowed for devices sleeping in
  741. parport_claim_or_block(), or those with a wakeup function. */
  742. /* The cad_lock is still held for writing here */
  743. if (dev->waiting & 2 || dev->wakeup) {
  744. spin_lock (&port->waitlist_lock);
  745. if (test_and_set_bit(0, &dev->waiting) == 0) {
  746. /* First add ourselves to the end of the wait list. */
  747. dev->waitnext = NULL;
  748. dev->waitprev = port->waittail;
  749. if (port->waittail) {
  750. port->waittail->waitnext = dev;
  751. port->waittail = dev;
  752. } else
  753. port->waithead = port->waittail = dev;
  754. }
  755. spin_unlock (&port->waitlist_lock);
  756. }
  757. write_unlock_irqrestore (&port->cad_lock, flags);
  758. return -EAGAIN;
  759. }
  760. /**
  761. * parport_claim_or_block - claim access to a parallel port device
  762. * @dev: pointer to structure representing a device on the port
  763. *
  764. * This behaves like parport_claim(), but will block if necessary
  765. * to wait for the port to be free. A return value of 1
  766. * indicates that it slept; 0 means that it succeeded without
  767. * needing to sleep. A negative error code indicates failure.
  768. **/
  769. int parport_claim_or_block(struct pardevice *dev)
  770. {
  771. int r;
  772. /* Signal to parport_claim() that we can wait even without a
  773. wakeup function. */
  774. dev->waiting = 2;
  775. /* Try to claim the port. If this fails, we need to sleep. */
  776. r = parport_claim(dev);
  777. if (r == -EAGAIN) {
  778. #ifdef PARPORT_DEBUG_SHARING
  779. printk(KERN_DEBUG "%s: parport_claim() returned -EAGAIN\n", dev->name);
  780. #endif
  781. /*
  782. * FIXME!!! Use the proper locking for dev->waiting,
  783. * and make this use the "wait_event_interruptible()"
  784. * interfaces. The cli/sti that used to be here
  785. * did nothing.
  786. *
  787. * See also parport_release()
  788. */
  789. /* If dev->waiting is clear now, an interrupt
  790. gave us the port and we would deadlock if we slept. */
  791. if (dev->waiting) {
  792. interruptible_sleep_on (&dev->wait_q);
  793. if (signal_pending (current)) {
  794. return -EINTR;
  795. }
  796. r = 1;
  797. } else {
  798. r = 0;
  799. #ifdef PARPORT_DEBUG_SHARING
  800. printk(KERN_DEBUG "%s: didn't sleep in parport_claim_or_block()\n",
  801. dev->name);
  802. #endif
  803. }
  804. #ifdef PARPORT_DEBUG_SHARING
  805. if (dev->port->physport->cad != dev)
  806. printk(KERN_DEBUG "%s: exiting parport_claim_or_block "
  807. "but %s owns port!\n", dev->name,
  808. dev->port->physport->cad ?
  809. dev->port->physport->cad->name:"nobody");
  810. #endif
  811. }
  812. dev->waiting = 0;
  813. return r;
  814. }
  815. /**
  816. * parport_release - give up access to a parallel port device
  817. * @dev: pointer to structure representing parallel port device
  818. *
  819. * This function cannot fail, but it should not be called without
  820. * the port claimed. Similarly, if the port is already claimed
  821. * you should not try claiming it again.
  822. **/
  823. void parport_release(struct pardevice *dev)
  824. {
  825. struct parport *port = dev->port->physport;
  826. struct pardevice *pd;
  827. unsigned long flags;
  828. /* Make sure that dev is the current device */
  829. write_lock_irqsave(&port->cad_lock, flags);
  830. if (port->cad != dev) {
  831. write_unlock_irqrestore (&port->cad_lock, flags);
  832. printk(KERN_WARNING "%s: %s tried to release parport "
  833. "when not owner\n", port->name, dev->name);
  834. return;
  835. }
  836. #ifdef CONFIG_PARPORT_1284
  837. /* If this is on a mux port, deselect it. */
  838. if (dev->port->muxport >= 0) {
  839. /* FIXME */
  840. port->muxsel = -1;
  841. }
  842. /* If this is a daisy device, deselect it. */
  843. if (dev->daisy >= 0) {
  844. parport_daisy_deselect_all (port);
  845. port->daisy = -1;
  846. }
  847. #endif
  848. port->cad = NULL;
  849. write_unlock_irqrestore(&port->cad_lock, flags);
  850. /* Save control registers */
  851. port->ops->save_state(port, dev->state);
  852. /* If anybody is waiting, find out who's been there longest and
  853. then wake them up. (Note: no locking required) */
  854. /* !!! LOCKING IS NEEDED HERE */
  855. for (pd = port->waithead; pd; pd = pd->waitnext) {
  856. if (pd->waiting & 2) { /* sleeping in claim_or_block */
  857. parport_claim(pd);
  858. if (waitqueue_active(&pd->wait_q))
  859. wake_up_interruptible(&pd->wait_q);
  860. return;
  861. } else if (pd->wakeup) {
  862. pd->wakeup(pd->private);
  863. if (dev->port->cad) /* racy but no matter */
  864. return;
  865. } else {
  866. printk(KERN_ERR "%s: don't know how to wake %s\n", port->name, pd->name);
  867. }
  868. }
  869. /* Nobody was waiting, so walk the list to see if anyone is
  870. interested in being woken up. (Note: no locking required) */
  871. /* !!! LOCKING IS NEEDED HERE */
  872. for (pd = port->devices; (port->cad == NULL) && pd; pd = pd->next) {
  873. if (pd->wakeup && pd != dev)
  874. pd->wakeup(pd->private);
  875. }
  876. }
  877. irqreturn_t parport_irq_handler(int irq, void *dev_id)
  878. {
  879. struct parport *port = dev_id;
  880. parport_generic_irq(port);
  881. return IRQ_HANDLED;
  882. }
  883. /* Exported symbols for modules. */
  884. EXPORT_SYMBOL(parport_claim);
  885. EXPORT_SYMBOL(parport_claim_or_block);
  886. EXPORT_SYMBOL(parport_release);
  887. EXPORT_SYMBOL(parport_register_port);
  888. EXPORT_SYMBOL(parport_announce_port);
  889. EXPORT_SYMBOL(parport_remove_port);
  890. EXPORT_SYMBOL(parport_register_driver);
  891. EXPORT_SYMBOL(parport_unregister_driver);
  892. EXPORT_SYMBOL(parport_register_device);
  893. EXPORT_SYMBOL(parport_unregister_device);
  894. EXPORT_SYMBOL(parport_get_port);
  895. EXPORT_SYMBOL(parport_put_port);
  896. EXPORT_SYMBOL(parport_find_number);
  897. EXPORT_SYMBOL(parport_find_base);
  898. EXPORT_SYMBOL(parport_irq_handler);
  899. MODULE_LICENSE("GPL");