nosy.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721
  1. /*
  2. * nosy - Snoop mode driver for TI PCILynx 1394 controllers
  3. * Copyright (C) 2002-2007 Kristian Høgsberg
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software Foundation,
  17. * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. */
  19. #include <linux/device.h>
  20. #include <linux/errno.h>
  21. #include <linux/fs.h>
  22. #include <linux/init.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/io.h>
  25. #include <linux/kernel.h>
  26. #include <linux/kref.h>
  27. #include <linux/miscdevice.h>
  28. #include <linux/module.h>
  29. #include <linux/mutex.h>
  30. #include <linux/pci.h>
  31. #include <linux/poll.h>
  32. #include <linux/sched.h> /* required for linux/wait.h */
  33. #include <linux/slab.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/time64.h>
  36. #include <linux/timex.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/wait.h>
  39. #include <linux/dma-mapping.h>
  40. #include <linux/atomic.h>
  41. #include <asm/byteorder.h>
  42. #include "nosy.h"
  43. #include "nosy-user.h"
  44. #define TCODE_PHY_PACKET 0x10
  45. #define PCI_DEVICE_ID_TI_PCILYNX 0x8000
  46. static char driver_name[] = KBUILD_MODNAME;
  47. /* this is the physical layout of a PCL, its size is 128 bytes */
  48. struct pcl {
  49. __le32 next;
  50. __le32 async_error_next;
  51. u32 user_data;
  52. __le32 pcl_status;
  53. __le32 remaining_transfer_count;
  54. __le32 next_data_buffer;
  55. struct {
  56. __le32 control;
  57. __le32 pointer;
  58. } buffer[13];
  59. };
  60. struct packet {
  61. unsigned int length;
  62. char data[0];
  63. };
  64. struct packet_buffer {
  65. char *data;
  66. size_t capacity;
  67. long total_packet_count, lost_packet_count;
  68. atomic_t size;
  69. struct packet *head, *tail;
  70. wait_queue_head_t wait;
  71. };
  72. struct pcilynx {
  73. struct pci_dev *pci_device;
  74. __iomem char *registers;
  75. struct pcl *rcv_start_pcl, *rcv_pcl;
  76. __le32 *rcv_buffer;
  77. dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
  78. spinlock_t client_list_lock;
  79. struct list_head client_list;
  80. struct miscdevice misc;
  81. struct list_head link;
  82. struct kref kref;
  83. };
  84. static inline struct pcilynx *
  85. lynx_get(struct pcilynx *lynx)
  86. {
  87. kref_get(&lynx->kref);
  88. return lynx;
  89. }
  90. static void
  91. lynx_release(struct kref *kref)
  92. {
  93. kfree(container_of(kref, struct pcilynx, kref));
  94. }
  95. static inline void
  96. lynx_put(struct pcilynx *lynx)
  97. {
  98. kref_put(&lynx->kref, lynx_release);
  99. }
  100. struct client {
  101. struct pcilynx *lynx;
  102. u32 tcode_mask;
  103. struct packet_buffer buffer;
  104. struct list_head link;
  105. };
  106. static DEFINE_MUTEX(card_mutex);
  107. static LIST_HEAD(card_list);
  108. static int
  109. packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
  110. {
  111. buffer->data = kmalloc(capacity, GFP_KERNEL);
  112. if (buffer->data == NULL)
  113. return -ENOMEM;
  114. buffer->head = (struct packet *) buffer->data;
  115. buffer->tail = (struct packet *) buffer->data;
  116. buffer->capacity = capacity;
  117. buffer->lost_packet_count = 0;
  118. atomic_set(&buffer->size, 0);
  119. init_waitqueue_head(&buffer->wait);
  120. return 0;
  121. }
  122. static void
  123. packet_buffer_destroy(struct packet_buffer *buffer)
  124. {
  125. kfree(buffer->data);
  126. }
  127. static int
  128. packet_buffer_get(struct client *client, char __user *data, size_t user_length)
  129. {
  130. struct packet_buffer *buffer = &client->buffer;
  131. size_t length;
  132. char *end;
  133. if (wait_event_interruptible(buffer->wait,
  134. atomic_read(&buffer->size) > 0) ||
  135. list_empty(&client->lynx->link))
  136. return -ERESTARTSYS;
  137. if (atomic_read(&buffer->size) == 0)
  138. return -ENODEV;
  139. /* FIXME: Check length <= user_length. */
  140. end = buffer->data + buffer->capacity;
  141. length = buffer->head->length;
  142. if (&buffer->head->data[length] < end) {
  143. if (copy_to_user(data, buffer->head->data, length))
  144. return -EFAULT;
  145. buffer->head = (struct packet *) &buffer->head->data[length];
  146. } else {
  147. size_t split = end - buffer->head->data;
  148. if (copy_to_user(data, buffer->head->data, split))
  149. return -EFAULT;
  150. if (copy_to_user(data + split, buffer->data, length - split))
  151. return -EFAULT;
  152. buffer->head = (struct packet *) &buffer->data[length - split];
  153. }
  154. /*
  155. * Decrease buffer->size as the last thing, since this is what
  156. * keeps the interrupt from overwriting the packet we are
  157. * retrieving from the buffer.
  158. */
  159. atomic_sub(sizeof(struct packet) + length, &buffer->size);
  160. return length;
  161. }
  162. static void
  163. packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
  164. {
  165. char *end;
  166. buffer->total_packet_count++;
  167. if (buffer->capacity <
  168. atomic_read(&buffer->size) + sizeof(struct packet) + length) {
  169. buffer->lost_packet_count++;
  170. return;
  171. }
  172. end = buffer->data + buffer->capacity;
  173. buffer->tail->length = length;
  174. if (&buffer->tail->data[length] < end) {
  175. memcpy(buffer->tail->data, data, length);
  176. buffer->tail = (struct packet *) &buffer->tail->data[length];
  177. } else {
  178. size_t split = end - buffer->tail->data;
  179. memcpy(buffer->tail->data, data, split);
  180. memcpy(buffer->data, data + split, length - split);
  181. buffer->tail = (struct packet *) &buffer->data[length - split];
  182. }
  183. /* Finally, adjust buffer size and wake up userspace reader. */
  184. atomic_add(sizeof(struct packet) + length, &buffer->size);
  185. wake_up_interruptible(&buffer->wait);
  186. }
  187. static inline void
  188. reg_write(struct pcilynx *lynx, int offset, u32 data)
  189. {
  190. writel(data, lynx->registers + offset);
  191. }
  192. static inline u32
  193. reg_read(struct pcilynx *lynx, int offset)
  194. {
  195. return readl(lynx->registers + offset);
  196. }
  197. static inline void
  198. reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
  199. {
  200. reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
  201. }
  202. /*
  203. * Maybe the pcl programs could be set up to just append data instead
  204. * of using a whole packet.
  205. */
  206. static inline void
  207. run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
  208. int dmachan)
  209. {
  210. reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
  211. reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
  212. DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
  213. }
  214. static int
  215. set_phy_reg(struct pcilynx *lynx, int addr, int val)
  216. {
  217. if (addr > 15) {
  218. dev_err(&lynx->pci_device->dev,
  219. "PHY register address %d out of range\n", addr);
  220. return -1;
  221. }
  222. if (val > 0xff) {
  223. dev_err(&lynx->pci_device->dev,
  224. "PHY register value %d out of range\n", val);
  225. return -1;
  226. }
  227. reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
  228. LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
  229. return 0;
  230. }
  231. static int
  232. nosy_open(struct inode *inode, struct file *file)
  233. {
  234. int minor = iminor(inode);
  235. struct client *client;
  236. struct pcilynx *tmp, *lynx = NULL;
  237. mutex_lock(&card_mutex);
  238. list_for_each_entry(tmp, &card_list, link)
  239. if (tmp->misc.minor == minor) {
  240. lynx = lynx_get(tmp);
  241. break;
  242. }
  243. mutex_unlock(&card_mutex);
  244. if (lynx == NULL)
  245. return -ENODEV;
  246. client = kmalloc(sizeof *client, GFP_KERNEL);
  247. if (client == NULL)
  248. goto fail;
  249. client->tcode_mask = ~0;
  250. client->lynx = lynx;
  251. INIT_LIST_HEAD(&client->link);
  252. if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
  253. goto fail;
  254. file->private_data = client;
  255. return nonseekable_open(inode, file);
  256. fail:
  257. kfree(client);
  258. lynx_put(lynx);
  259. return -ENOMEM;
  260. }
  261. static int
  262. nosy_release(struct inode *inode, struct file *file)
  263. {
  264. struct client *client = file->private_data;
  265. struct pcilynx *lynx = client->lynx;
  266. spin_lock_irq(&lynx->client_list_lock);
  267. list_del_init(&client->link);
  268. spin_unlock_irq(&lynx->client_list_lock);
  269. packet_buffer_destroy(&client->buffer);
  270. kfree(client);
  271. lynx_put(lynx);
  272. return 0;
  273. }
  274. static unsigned int
  275. nosy_poll(struct file *file, poll_table *pt)
  276. {
  277. struct client *client = file->private_data;
  278. unsigned int ret = 0;
  279. poll_wait(file, &client->buffer.wait, pt);
  280. if (atomic_read(&client->buffer.size) > 0)
  281. ret = POLLIN | POLLRDNORM;
  282. if (list_empty(&client->lynx->link))
  283. ret |= POLLHUP;
  284. return ret;
  285. }
  286. static ssize_t
  287. nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
  288. {
  289. struct client *client = file->private_data;
  290. return packet_buffer_get(client, buffer, count);
  291. }
  292. static long
  293. nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  294. {
  295. struct client *client = file->private_data;
  296. spinlock_t *client_list_lock = &client->lynx->client_list_lock;
  297. struct nosy_stats stats;
  298. switch (cmd) {
  299. case NOSY_IOC_GET_STATS:
  300. spin_lock_irq(client_list_lock);
  301. stats.total_packet_count = client->buffer.total_packet_count;
  302. stats.lost_packet_count = client->buffer.lost_packet_count;
  303. spin_unlock_irq(client_list_lock);
  304. if (copy_to_user((void __user *) arg, &stats, sizeof stats))
  305. return -EFAULT;
  306. else
  307. return 0;
  308. case NOSY_IOC_START:
  309. spin_lock_irq(client_list_lock);
  310. list_add_tail(&client->link, &client->lynx->client_list);
  311. spin_unlock_irq(client_list_lock);
  312. return 0;
  313. case NOSY_IOC_STOP:
  314. spin_lock_irq(client_list_lock);
  315. list_del_init(&client->link);
  316. spin_unlock_irq(client_list_lock);
  317. return 0;
  318. case NOSY_IOC_FILTER:
  319. spin_lock_irq(client_list_lock);
  320. client->tcode_mask = arg;
  321. spin_unlock_irq(client_list_lock);
  322. return 0;
  323. default:
  324. return -EINVAL;
  325. /* Flush buffer, configure filter. */
  326. }
  327. }
  328. static const struct file_operations nosy_ops = {
  329. .owner = THIS_MODULE,
  330. .read = nosy_read,
  331. .unlocked_ioctl = nosy_ioctl,
  332. .poll = nosy_poll,
  333. .open = nosy_open,
  334. .release = nosy_release,
  335. };
  336. #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
  337. static void
  338. packet_irq_handler(struct pcilynx *lynx)
  339. {
  340. struct client *client;
  341. u32 tcode_mask, tcode, timestamp;
  342. size_t length;
  343. struct timespec64 ts64;
  344. /* FIXME: Also report rcv_speed. */
  345. length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
  346. tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
  347. ktime_get_real_ts64(&ts64);
  348. timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  349. lynx->rcv_buffer[0] = (__force __le32)timestamp;
  350. if (length == PHY_PACKET_SIZE)
  351. tcode_mask = 1 << TCODE_PHY_PACKET;
  352. else
  353. tcode_mask = 1 << tcode;
  354. spin_lock(&lynx->client_list_lock);
  355. list_for_each_entry(client, &lynx->client_list, link)
  356. if (client->tcode_mask & tcode_mask)
  357. packet_buffer_put(&client->buffer,
  358. lynx->rcv_buffer, length + 4);
  359. spin_unlock(&lynx->client_list_lock);
  360. }
  361. static void
  362. bus_reset_irq_handler(struct pcilynx *lynx)
  363. {
  364. struct client *client;
  365. struct timespec64 ts64;
  366. u32 timestamp;
  367. ktime_get_real_ts64(&ts64);
  368. timestamp = ts64.tv_nsec / NSEC_PER_USEC;
  369. spin_lock(&lynx->client_list_lock);
  370. list_for_each_entry(client, &lynx->client_list, link)
  371. packet_buffer_put(&client->buffer, &timestamp, 4);
  372. spin_unlock(&lynx->client_list_lock);
  373. }
  374. static irqreturn_t
  375. irq_handler(int irq, void *device)
  376. {
  377. struct pcilynx *lynx = device;
  378. u32 pci_int_status;
  379. pci_int_status = reg_read(lynx, PCI_INT_STATUS);
  380. if (pci_int_status == ~0)
  381. /* Card was ejected. */
  382. return IRQ_NONE;
  383. if ((pci_int_status & PCI_INT_INT_PEND) == 0)
  384. /* Not our interrupt, bail out quickly. */
  385. return IRQ_NONE;
  386. if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
  387. u32 link_int_status;
  388. link_int_status = reg_read(lynx, LINK_INT_STATUS);
  389. reg_write(lynx, LINK_INT_STATUS, link_int_status);
  390. if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
  391. bus_reset_irq_handler(lynx);
  392. }
  393. /* Clear the PCI_INT_STATUS register only after clearing the
  394. * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
  395. * be set again immediately. */
  396. reg_write(lynx, PCI_INT_STATUS, pci_int_status);
  397. if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
  398. packet_irq_handler(lynx);
  399. run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  400. }
  401. return IRQ_HANDLED;
  402. }
  403. static void
  404. remove_card(struct pci_dev *dev)
  405. {
  406. struct pcilynx *lynx = pci_get_drvdata(dev);
  407. struct client *client;
  408. mutex_lock(&card_mutex);
  409. list_del_init(&lynx->link);
  410. misc_deregister(&lynx->misc);
  411. mutex_unlock(&card_mutex);
  412. reg_write(lynx, PCI_INT_ENABLE, 0);
  413. free_irq(lynx->pci_device->irq, lynx);
  414. spin_lock_irq(&lynx->client_list_lock);
  415. list_for_each_entry(client, &lynx->client_list, link)
  416. wake_up_interruptible(&client->buffer.wait);
  417. spin_unlock_irq(&lynx->client_list_lock);
  418. pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  419. lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  420. pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  421. lynx->rcv_pcl, lynx->rcv_pcl_bus);
  422. pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  423. lynx->rcv_buffer, lynx->rcv_buffer_bus);
  424. iounmap(lynx->registers);
  425. pci_disable_device(dev);
  426. lynx_put(lynx);
  427. }
  428. #define RCV_BUFFER_SIZE (16 * 1024)
  429. static int
  430. add_card(struct pci_dev *dev, const struct pci_device_id *unused)
  431. {
  432. struct pcilynx *lynx;
  433. u32 p, end;
  434. int ret, i;
  435. if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
  436. dev_err(&dev->dev,
  437. "DMA address limits not supported for PCILynx hardware\n");
  438. return -ENXIO;
  439. }
  440. if (pci_enable_device(dev)) {
  441. dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
  442. return -ENXIO;
  443. }
  444. pci_set_master(dev);
  445. lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
  446. if (lynx == NULL) {
  447. dev_err(&dev->dev, "Failed to allocate control structure\n");
  448. ret = -ENOMEM;
  449. goto fail_disable;
  450. }
  451. lynx->pci_device = dev;
  452. pci_set_drvdata(dev, lynx);
  453. spin_lock_init(&lynx->client_list_lock);
  454. INIT_LIST_HEAD(&lynx->client_list);
  455. kref_init(&lynx->kref);
  456. lynx->registers = ioremap_nocache(pci_resource_start(dev, 0),
  457. PCILYNX_MAX_REGISTER);
  458. if (lynx->registers == NULL) {
  459. dev_err(&dev->dev, "Failed to map registers\n");
  460. ret = -ENOMEM;
  461. goto fail_deallocate_lynx;
  462. }
  463. lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
  464. sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
  465. lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
  466. sizeof(struct pcl), &lynx->rcv_pcl_bus);
  467. lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
  468. RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
  469. if (lynx->rcv_start_pcl == NULL ||
  470. lynx->rcv_pcl == NULL ||
  471. lynx->rcv_buffer == NULL) {
  472. dev_err(&dev->dev, "Failed to allocate receive buffer\n");
  473. ret = -ENOMEM;
  474. goto fail_deallocate_buffers;
  475. }
  476. lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
  477. lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
  478. lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
  479. lynx->rcv_pcl->buffer[0].control =
  480. cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
  481. lynx->rcv_pcl->buffer[0].pointer =
  482. cpu_to_le32(lynx->rcv_buffer_bus + 4);
  483. p = lynx->rcv_buffer_bus + 2048;
  484. end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
  485. for (i = 1; p < end; i++, p += 2048) {
  486. lynx->rcv_pcl->buffer[i].control =
  487. cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
  488. lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
  489. }
  490. lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
  491. reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
  492. /* Fix buggy cards with autoboot pin not tied low: */
  493. reg_write(lynx, DMA0_CHAN_CTRL, 0);
  494. reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
  495. #if 0
  496. /* now, looking for PHY register set */
  497. if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
  498. lynx->phyic.reg_1394a = 1;
  499. PRINT(KERN_INFO, lynx->id,
  500. "found 1394a conform PHY (using extended register set)");
  501. lynx->phyic.vendor = get_phy_vendorid(lynx);
  502. lynx->phyic.product = get_phy_productid(lynx);
  503. } else {
  504. lynx->phyic.reg_1394a = 0;
  505. PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
  506. }
  507. #endif
  508. /* Setup the general receive FIFO max size. */
  509. reg_write(lynx, FIFO_SIZES, 255);
  510. reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
  511. reg_write(lynx, LINK_INT_ENABLE,
  512. LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
  513. LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
  514. LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
  515. LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
  516. LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
  517. /* Disable the L flag in self ID packets. */
  518. set_phy_reg(lynx, 4, 0);
  519. /* Put this baby into snoop mode */
  520. reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
  521. run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
  522. if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
  523. driver_name, lynx)) {
  524. dev_err(&dev->dev,
  525. "Failed to allocate shared interrupt %d\n", dev->irq);
  526. ret = -EIO;
  527. goto fail_deallocate_buffers;
  528. }
  529. lynx->misc.parent = &dev->dev;
  530. lynx->misc.minor = MISC_DYNAMIC_MINOR;
  531. lynx->misc.name = "nosy";
  532. lynx->misc.fops = &nosy_ops;
  533. mutex_lock(&card_mutex);
  534. ret = misc_register(&lynx->misc);
  535. if (ret) {
  536. dev_err(&dev->dev, "Failed to register misc char device\n");
  537. mutex_unlock(&card_mutex);
  538. goto fail_free_irq;
  539. }
  540. list_add_tail(&lynx->link, &card_list);
  541. mutex_unlock(&card_mutex);
  542. dev_info(&dev->dev,
  543. "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
  544. return 0;
  545. fail_free_irq:
  546. reg_write(lynx, PCI_INT_ENABLE, 0);
  547. free_irq(lynx->pci_device->irq, lynx);
  548. fail_deallocate_buffers:
  549. if (lynx->rcv_start_pcl)
  550. pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  551. lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
  552. if (lynx->rcv_pcl)
  553. pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
  554. lynx->rcv_pcl, lynx->rcv_pcl_bus);
  555. if (lynx->rcv_buffer)
  556. pci_free_consistent(lynx->pci_device, PAGE_SIZE,
  557. lynx->rcv_buffer, lynx->rcv_buffer_bus);
  558. iounmap(lynx->registers);
  559. fail_deallocate_lynx:
  560. kfree(lynx);
  561. fail_disable:
  562. pci_disable_device(dev);
  563. return ret;
  564. }
  565. static struct pci_device_id pci_table[] = {
  566. {
  567. .vendor = PCI_VENDOR_ID_TI,
  568. .device = PCI_DEVICE_ID_TI_PCILYNX,
  569. .subvendor = PCI_ANY_ID,
  570. .subdevice = PCI_ANY_ID,
  571. },
  572. { } /* Terminating entry */
  573. };
  574. MODULE_DEVICE_TABLE(pci, pci_table);
  575. static struct pci_driver lynx_pci_driver = {
  576. .name = driver_name,
  577. .id_table = pci_table,
  578. .probe = add_card,
  579. .remove = remove_card,
  580. };
  581. module_pci_driver(lynx_pci_driver);
  582. MODULE_AUTHOR("Kristian Hoegsberg");
  583. MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
  584. MODULE_LICENSE("GPL");