netjet.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164
  1. /*
  2. * NETJet mISDN driver
  3. *
  4. * Author Karsten Keil <keil@isdn4linux.de>
  5. *
  6. * Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  20. *
  21. */
  22. #include <linux/module.h>
  23. #include <linux/pci.h>
  24. #include <linux/delay.h>
  25. #include <linux/mISDNhw.h>
  26. #include <linux/slab.h>
  27. #include "ipac.h"
  28. #include "iohelper.h"
  29. #include "netjet.h"
  30. #include <linux/isdn/hdlc.h>
  31. #define NETJET_REV "2.0"
  32. enum nj_types {
  33. NETJET_S_TJ300,
  34. NETJET_S_TJ320,
  35. ENTERNOW__TJ320,
  36. };
  37. struct tiger_dma {
  38. size_t size;
  39. u32 *start;
  40. int idx;
  41. u32 dmastart;
  42. u32 dmairq;
  43. u32 dmaend;
  44. u32 dmacur;
  45. };
  46. struct tiger_hw;
  47. struct tiger_ch {
  48. struct bchannel bch;
  49. struct tiger_hw *nj;
  50. int idx;
  51. int free;
  52. int lastrx;
  53. u16 rxstate;
  54. u16 txstate;
  55. struct isdnhdlc_vars hsend;
  56. struct isdnhdlc_vars hrecv;
  57. u8 *hsbuf;
  58. u8 *hrbuf;
  59. };
  60. #define TX_INIT 0x0001
  61. #define TX_IDLE 0x0002
  62. #define TX_RUN 0x0004
  63. #define TX_UNDERRUN 0x0100
  64. #define RX_OVERRUN 0x0100
  65. #define LOG_SIZE 64
  66. struct tiger_hw {
  67. struct list_head list;
  68. struct pci_dev *pdev;
  69. char name[MISDN_MAX_IDLEN];
  70. enum nj_types typ;
  71. int irq;
  72. u32 irqcnt;
  73. u32 base;
  74. size_t base_s;
  75. dma_addr_t dma;
  76. void *dma_p;
  77. spinlock_t lock; /* lock HW */
  78. struct isac_hw isac;
  79. struct tiger_dma send;
  80. struct tiger_dma recv;
  81. struct tiger_ch bc[2];
  82. u8 ctrlreg;
  83. u8 dmactrl;
  84. u8 auxd;
  85. u8 last_is0;
  86. u8 irqmask0;
  87. char log[LOG_SIZE];
  88. };
  89. static LIST_HEAD(Cards);
  90. static DEFINE_RWLOCK(card_lock); /* protect Cards */
  91. static u32 debug;
  92. static int nj_cnt;
  93. static void
  94. _set_debug(struct tiger_hw *card)
  95. {
  96. card->isac.dch.debug = debug;
  97. card->bc[0].bch.debug = debug;
  98. card->bc[1].bch.debug = debug;
  99. }
  100. static int
  101. set_debug(const char *val, struct kernel_param *kp)
  102. {
  103. int ret;
  104. struct tiger_hw *card;
  105. ret = param_set_uint(val, kp);
  106. if (!ret) {
  107. read_lock(&card_lock);
  108. list_for_each_entry(card, &Cards, list)
  109. _set_debug(card);
  110. read_unlock(&card_lock);
  111. }
  112. return ret;
  113. }
  114. MODULE_AUTHOR("Karsten Keil");
  115. MODULE_LICENSE("GPL v2");
  116. MODULE_VERSION(NETJET_REV);
  117. module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
  118. MODULE_PARM_DESC(debug, "Netjet debug mask");
  119. static void
  120. nj_disable_hwirq(struct tiger_hw *card)
  121. {
  122. outb(0, card->base + NJ_IRQMASK0);
  123. outb(0, card->base + NJ_IRQMASK1);
  124. }
  125. static u8
  126. ReadISAC_nj(void *p, u8 offset)
  127. {
  128. struct tiger_hw *card = p;
  129. u8 ret;
  130. card->auxd &= 0xfc;
  131. card->auxd |= (offset >> 4) & 3;
  132. outb(card->auxd, card->base + NJ_AUXDATA);
  133. ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  134. return ret;
  135. }
  136. static void
  137. WriteISAC_nj(void *p, u8 offset, u8 value)
  138. {
  139. struct tiger_hw *card = p;
  140. card->auxd &= 0xfc;
  141. card->auxd |= (offset >> 4) & 3;
  142. outb(card->auxd, card->base + NJ_AUXDATA);
  143. outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
  144. }
  145. static void
  146. ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  147. {
  148. struct tiger_hw *card = p;
  149. card->auxd &= 0xfc;
  150. outb(card->auxd, card->base + NJ_AUXDATA);
  151. insb(card->base + NJ_ISAC_OFF, data, size);
  152. }
  153. static void
  154. WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
  155. {
  156. struct tiger_hw *card = p;
  157. card->auxd &= 0xfc;
  158. outb(card->auxd, card->base + NJ_AUXDATA);
  159. outsb(card->base + NJ_ISAC_OFF, data, size);
  160. }
  161. static void
  162. fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
  163. {
  164. struct tiger_hw *card = bc->bch.hw;
  165. u32 mask = 0xff, val;
  166. pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
  167. bc->bch.nr, fill, cnt, idx, card->send.idx);
  168. if (bc->bch.nr & 2) {
  169. fill <<= 8;
  170. mask <<= 8;
  171. }
  172. mask ^= 0xffffffff;
  173. while (cnt--) {
  174. val = card->send.start[idx];
  175. val &= mask;
  176. val |= fill;
  177. card->send.start[idx++] = val;
  178. if (idx >= card->send.size)
  179. idx = 0;
  180. }
  181. }
  182. static int
  183. mode_tiger(struct tiger_ch *bc, u32 protocol)
  184. {
  185. struct tiger_hw *card = bc->bch.hw;
  186. pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
  187. bc->bch.nr, bc->bch.state, protocol);
  188. switch (protocol) {
  189. case ISDN_P_NONE:
  190. if (bc->bch.state == ISDN_P_NONE)
  191. break;
  192. fill_mem(bc, 0, card->send.size, 0xff);
  193. bc->bch.state = protocol;
  194. /* only stop dma and interrupts if both channels NULL */
  195. if ((card->bc[0].bch.state == ISDN_P_NONE) &&
  196. (card->bc[1].bch.state == ISDN_P_NONE)) {
  197. card->dmactrl = 0;
  198. outb(card->dmactrl, card->base + NJ_DMACTRL);
  199. outb(0, card->base + NJ_IRQMASK0);
  200. }
  201. test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
  202. test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  203. bc->txstate = 0;
  204. bc->rxstate = 0;
  205. bc->lastrx = -1;
  206. break;
  207. case ISDN_P_B_RAW:
  208. test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
  209. bc->bch.state = protocol;
  210. bc->idx = 0;
  211. bc->free = card->send.size/2;
  212. bc->rxstate = 0;
  213. bc->txstate = TX_INIT | TX_IDLE;
  214. bc->lastrx = -1;
  215. if (!card->dmactrl) {
  216. card->dmactrl = 1;
  217. outb(card->dmactrl, card->base + NJ_DMACTRL);
  218. outb(0x0f, card->base + NJ_IRQMASK0);
  219. }
  220. break;
  221. case ISDN_P_B_HDLC:
  222. test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
  223. bc->bch.state = protocol;
  224. bc->idx = 0;
  225. bc->free = card->send.size/2;
  226. bc->rxstate = 0;
  227. bc->txstate = TX_INIT | TX_IDLE;
  228. isdnhdlc_rcv_init(&bc->hrecv, 0);
  229. isdnhdlc_out_init(&bc->hsend, 0);
  230. bc->lastrx = -1;
  231. if (!card->dmactrl) {
  232. card->dmactrl = 1;
  233. outb(card->dmactrl, card->base + NJ_DMACTRL);
  234. outb(0x0f, card->base + NJ_IRQMASK0);
  235. }
  236. break;
  237. default:
  238. pr_info("%s: %s protocol %x not handled\n", card->name,
  239. __func__, protocol);
  240. return -ENOPROTOOPT;
  241. }
  242. card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
  243. card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
  244. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  245. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  246. pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
  247. card->name, __func__,
  248. inb(card->base + NJ_DMACTRL),
  249. inb(card->base + NJ_IRQMASK0),
  250. inb(card->base + NJ_IRQSTAT0),
  251. card->send.idx,
  252. card->recv.idx);
  253. return 0;
  254. }
  255. static void
  256. nj_reset(struct tiger_hw *card)
  257. {
  258. outb(0xff, card->base + NJ_CTRL); /* Reset On */
  259. mdelay(1);
  260. /* now edge triggered for TJ320 GE 13/07/00 */
  261. /* see comment in IRQ function */
  262. if (card->typ == NETJET_S_TJ320) /* TJ320 */
  263. card->ctrlreg = 0x40; /* Reset Off and status read clear */
  264. else
  265. card->ctrlreg = 0x00; /* Reset Off and status read clear */
  266. outb(card->ctrlreg, card->base + NJ_CTRL);
  267. mdelay(10);
  268. /* configure AUX pins (all output except ISAC IRQ pin) */
  269. card->auxd = 0;
  270. card->dmactrl = 0;
  271. outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
  272. outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
  273. outb(card->auxd, card->base + NJ_AUXDATA);
  274. }
  275. static int
  276. inittiger(struct tiger_hw *card)
  277. {
  278. int i;
  279. card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE,
  280. &card->dma);
  281. if (!card->dma_p) {
  282. pr_info("%s: No DMA memory\n", card->name);
  283. return -ENOMEM;
  284. }
  285. if ((u64)card->dma > 0xffffffff) {
  286. pr_info("%s: DMA outside 32 bit\n", card->name);
  287. return -ENOMEM;
  288. }
  289. for (i = 0; i < 2; i++) {
  290. card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
  291. if (!card->bc[i].hsbuf) {
  292. pr_info("%s: no B%d send buffer\n", card->name, i + 1);
  293. return -ENOMEM;
  294. }
  295. card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
  296. if (!card->bc[i].hrbuf) {
  297. pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
  298. return -ENOMEM;
  299. }
  300. }
  301. memset(card->dma_p, 0xff, NJ_DMA_SIZE);
  302. card->send.start = card->dma_p;
  303. card->send.dmastart = (u32)card->dma;
  304. card->send.dmaend = card->send.dmastart +
  305. (4 * (NJ_DMA_TXSIZE - 1));
  306. card->send.dmairq = card->send.dmastart +
  307. (4 * ((NJ_DMA_TXSIZE / 2) - 1));
  308. card->send.size = NJ_DMA_TXSIZE;
  309. if (debug & DEBUG_HW)
  310. pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
  311. " size %zu u32\n", card->name,
  312. card->send.dmastart, card->send.dmairq,
  313. card->send.dmaend, card->send.start, card->send.size);
  314. outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
  315. outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
  316. outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
  317. card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
  318. card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
  319. card->recv.dmaend = card->recv.dmastart +
  320. (4 * (NJ_DMA_RXSIZE - 1));
  321. card->recv.dmairq = card->recv.dmastart +
  322. (4 * ((NJ_DMA_RXSIZE / 2) - 1));
  323. card->recv.size = NJ_DMA_RXSIZE;
  324. if (debug & DEBUG_HW)
  325. pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
  326. " size %zu u32\n", card->name,
  327. card->recv.dmastart, card->recv.dmairq,
  328. card->recv.dmaend, card->recv.start, card->recv.size);
  329. outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
  330. outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
  331. outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
  332. return 0;
  333. }
  334. static void
  335. read_dma(struct tiger_ch *bc, u32 idx, int cnt)
  336. {
  337. struct tiger_hw *card = bc->bch.hw;
  338. int i, stat;
  339. u32 val;
  340. u8 *p, *pn;
  341. if (bc->lastrx == idx) {
  342. bc->rxstate |= RX_OVERRUN;
  343. pr_info("%s: B%1d overrun at idx %d\n", card->name,
  344. bc->bch.nr, idx);
  345. }
  346. bc->lastrx = idx;
  347. if (!bc->bch.rx_skb) {
  348. bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC);
  349. if (!bc->bch.rx_skb) {
  350. pr_info("%s: B%1d receive out of memory\n",
  351. card->name, bc->bch.nr);
  352. return;
  353. }
  354. }
  355. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
  356. if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) {
  357. pr_debug("%s: B%1d overrun %d\n", card->name,
  358. bc->bch.nr, bc->bch.rx_skb->len + cnt);
  359. skb_trim(bc->bch.rx_skb, 0);
  360. return;
  361. }
  362. p = skb_put(bc->bch.rx_skb, cnt);
  363. } else
  364. p = bc->hrbuf;
  365. for (i = 0; i < cnt; i++) {
  366. val = card->recv.start[idx++];
  367. if (bc->bch.nr & 2)
  368. val >>= 8;
  369. if (idx >= card->recv.size)
  370. idx = 0;
  371. p[i] = val & 0xff;
  372. }
  373. pn = bc->hrbuf;
  374. next_frame:
  375. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  376. stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
  377. bc->bch.rx_skb->data, bc->bch.maxlen);
  378. if (stat > 0) /* valid frame received */
  379. p = skb_put(bc->bch.rx_skb, stat);
  380. else if (stat == -HDLC_CRC_ERROR)
  381. pr_info("%s: B%1d receive frame CRC error\n",
  382. card->name, bc->bch.nr);
  383. else if (stat == -HDLC_FRAMING_ERROR)
  384. pr_info("%s: B%1d receive framing error\n",
  385. card->name, bc->bch.nr);
  386. else if (stat == -HDLC_LENGTH_ERROR)
  387. pr_info("%s: B%1d receive frame too long (> %d)\n",
  388. card->name, bc->bch.nr, bc->bch.maxlen);
  389. } else
  390. stat = cnt;
  391. if (stat > 0) {
  392. if (debug & DEBUG_HW_BFIFO) {
  393. snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ",
  394. bc->bch.nr, card->name, stat);
  395. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET,
  396. p, stat);
  397. }
  398. recv_Bchannel(&bc->bch, 0);
  399. }
  400. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  401. pn += i;
  402. cnt -= i;
  403. if (!bc->bch.rx_skb) {
  404. bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen,
  405. GFP_ATOMIC);
  406. if (!bc->bch.rx_skb) {
  407. pr_info("%s: B%1d receive out of memory\n",
  408. card->name, bc->bch.nr);
  409. return;
  410. }
  411. }
  412. if (cnt > 0)
  413. goto next_frame;
  414. }
  415. }
  416. static void
  417. recv_tiger(struct tiger_hw *card, u8 irq_stat)
  418. {
  419. u32 idx;
  420. int cnt = card->recv.size / 2;
  421. /* Note receive is via the WRITE DMA channel */
  422. card->last_is0 &= ~NJ_IRQM0_WR_MASK;
  423. card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
  424. if (irq_stat & NJ_IRQM0_WR_END)
  425. idx = cnt - 1;
  426. else
  427. idx = card->recv.size - 1;
  428. if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
  429. read_dma(&card->bc[0], idx, cnt);
  430. if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
  431. read_dma(&card->bc[1], idx, cnt);
  432. }
  433. /* sync with current DMA address at start or after exception */
  434. static void
  435. resync(struct tiger_ch *bc, struct tiger_hw *card)
  436. {
  437. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  438. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  439. if (bc->free > card->send.size / 2)
  440. bc->free = card->send.size / 2;
  441. /* currently we simple sync to the next complete free area
  442. * this hast the advantage that we have always maximum time to
  443. * handle TX irq
  444. */
  445. if (card->send.idx < ((card->send.size / 2) - 1))
  446. bc->idx = (card->recv.size / 2) - 1;
  447. else
  448. bc->idx = card->recv.size - 1;
  449. bc->txstate = TX_RUN;
  450. pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
  451. __func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
  452. }
  453. static int bc_next_frame(struct tiger_ch *);
  454. static void
  455. fill_hdlc_flag(struct tiger_ch *bc)
  456. {
  457. struct tiger_hw *card = bc->bch.hw;
  458. int count, i;
  459. u32 m, v;
  460. u8 *p;
  461. if (bc->free == 0)
  462. return;
  463. pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
  464. __func__, bc->bch.nr, bc->free, bc->txstate,
  465. bc->idx, card->send.idx);
  466. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  467. resync(bc, card);
  468. count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
  469. bc->hsbuf, bc->free);
  470. pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
  471. bc->bch.nr, count);
  472. bc->free -= count;
  473. p = bc->hsbuf;
  474. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  475. for (i = 0; i < count; i++) {
  476. if (bc->idx >= card->send.size)
  477. bc->idx = 0;
  478. v = card->send.start[bc->idx];
  479. v &= m;
  480. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  481. card->send.start[bc->idx++] = v;
  482. }
  483. if (debug & DEBUG_HW_BFIFO) {
  484. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  485. bc->bch.nr, card->name, count);
  486. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  487. }
  488. }
  489. static void
  490. fill_dma(struct tiger_ch *bc)
  491. {
  492. struct tiger_hw *card = bc->bch.hw;
  493. int count, i;
  494. u32 m, v;
  495. u8 *p;
  496. if (bc->free == 0)
  497. return;
  498. count = bc->bch.tx_skb->len - bc->bch.tx_idx;
  499. if (count <= 0)
  500. return;
  501. pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name,
  502. __func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx,
  503. bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx);
  504. if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
  505. resync(bc, card);
  506. p = bc->bch.tx_skb->data + bc->bch.tx_idx;
  507. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  508. count = isdnhdlc_encode(&bc->hsend, p, count, &i,
  509. bc->hsbuf, bc->free);
  510. pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
  511. bc->bch.nr, i, count);
  512. bc->bch.tx_idx += i;
  513. bc->free -= count;
  514. p = bc->hsbuf;
  515. } else {
  516. if (count > bc->free)
  517. count = bc->free;
  518. bc->bch.tx_idx += count;
  519. bc->free -= count;
  520. }
  521. m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
  522. for (i = 0; i < count; i++) {
  523. if (bc->idx >= card->send.size)
  524. bc->idx = 0;
  525. v = card->send.start[bc->idx];
  526. v &= m;
  527. v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
  528. card->send.start[bc->idx++] = v;
  529. }
  530. if (debug & DEBUG_HW_BFIFO) {
  531. snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
  532. bc->bch.nr, card->name, count);
  533. print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
  534. }
  535. if (bc->free)
  536. bc_next_frame(bc);
  537. }
  538. static int
  539. bc_next_frame(struct tiger_ch *bc)
  540. {
  541. if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len)
  542. fill_dma(bc);
  543. else {
  544. if (bc->bch.tx_skb) {
  545. /* send confirm, on trans, free on hdlc. */
  546. if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
  547. confirm_Bsend(&bc->bch);
  548. dev_kfree_skb(bc->bch.tx_skb);
  549. }
  550. if (get_next_bframe(&bc->bch))
  551. fill_dma(bc);
  552. else
  553. return 0;
  554. }
  555. return 1;
  556. }
  557. static void
  558. send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
  559. {
  560. int ret;
  561. bc->free += card->send.size / 2;
  562. if (bc->free >= card->send.size) {
  563. if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
  564. pr_info("%s: B%1d TX underrun state %x\n", card->name,
  565. bc->bch.nr, bc->txstate);
  566. bc->txstate |= TX_UNDERRUN;
  567. }
  568. bc->free = card->send.size;
  569. }
  570. ret = bc_next_frame(bc);
  571. if (!ret) {
  572. if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
  573. fill_hdlc_flag(bc);
  574. return;
  575. }
  576. pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
  577. bc->bch.nr, bc->free, bc->idx, card->send.idx);
  578. if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
  579. fill_mem(bc, bc->idx, bc->free, 0xff);
  580. if (bc->free == card->send.size)
  581. bc->txstate |= TX_IDLE;
  582. }
  583. }
  584. }
  585. static void
  586. send_tiger(struct tiger_hw *card, u8 irq_stat)
  587. {
  588. int i;
  589. /* Note send is via the READ DMA channel */
  590. if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
  591. pr_info("%s: tiger warn write double dma %x/%x\n",
  592. card->name, irq_stat, card->last_is0);
  593. return;
  594. } else {
  595. card->last_is0 &= ~NJ_IRQM0_RD_MASK;
  596. card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
  597. }
  598. for (i = 0; i < 2; i++) {
  599. if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
  600. send_tiger_bc(card, &card->bc[i]);
  601. }
  602. }
  603. static irqreturn_t
  604. nj_irq(int intno, void *dev_id)
  605. {
  606. struct tiger_hw *card = dev_id;
  607. u8 val, s1val, s0val;
  608. spin_lock(&card->lock);
  609. s0val = inb(card->base | NJ_IRQSTAT0);
  610. s1val = inb(card->base | NJ_IRQSTAT1);
  611. if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
  612. /* shared IRQ */
  613. spin_unlock(&card->lock);
  614. return IRQ_NONE;
  615. }
  616. pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
  617. card->irqcnt++;
  618. if (!(s1val & NJ_ISACIRQ)) {
  619. val = ReadISAC_nj(card, ISAC_ISTA);
  620. if (val)
  621. mISDNisac_irq(&card->isac, val);
  622. }
  623. if (s0val)
  624. /* write to clear */
  625. outb(s0val, card->base | NJ_IRQSTAT0);
  626. else
  627. goto end;
  628. s1val = s0val;
  629. /* set bits in sval to indicate which page is free */
  630. card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
  631. card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
  632. if (card->recv.dmacur < card->recv.dmairq)
  633. s0val = 0x08; /* the 2nd write area is free */
  634. else
  635. s0val = 0x04; /* the 1st write area is free */
  636. card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
  637. card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
  638. if (card->send.dmacur < card->send.dmairq)
  639. s0val |= 0x02; /* the 2nd read area is free */
  640. else
  641. s0val |= 0x01; /* the 1st read area is free */
  642. pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
  643. s1val, s0val, card->last_is0,
  644. card->recv.idx, card->send.idx);
  645. /* test if we have a DMA interrupt */
  646. if (s0val != card->last_is0) {
  647. if ((s0val & NJ_IRQM0_RD_MASK) !=
  648. (card->last_is0 & NJ_IRQM0_RD_MASK))
  649. /* got a write dma int */
  650. send_tiger(card, s0val);
  651. if ((s0val & NJ_IRQM0_WR_MASK) !=
  652. (card->last_is0 & NJ_IRQM0_WR_MASK))
  653. /* got a read dma int */
  654. recv_tiger(card, s0val);
  655. }
  656. end:
  657. spin_unlock(&card->lock);
  658. return IRQ_HANDLED;
  659. }
  660. static int
  661. nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
  662. {
  663. int ret = -EINVAL;
  664. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  665. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  666. struct tiger_hw *card = bch->hw;
  667. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  668. u32 id;
  669. u_long flags;
  670. switch (hh->prim) {
  671. case PH_DATA_REQ:
  672. spin_lock_irqsave(&card->lock, flags);
  673. ret = bchannel_senddata(bch, skb);
  674. if (ret > 0) { /* direct TX */
  675. id = hh->id; /* skb can be freed */
  676. fill_dma(bc);
  677. ret = 0;
  678. spin_unlock_irqrestore(&card->lock, flags);
  679. if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
  680. queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
  681. } else
  682. spin_unlock_irqrestore(&card->lock, flags);
  683. return ret;
  684. case PH_ACTIVATE_REQ:
  685. spin_lock_irqsave(&card->lock, flags);
  686. if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
  687. ret = mode_tiger(bc, ch->protocol);
  688. else
  689. ret = 0;
  690. spin_unlock_irqrestore(&card->lock, flags);
  691. if (!ret)
  692. _queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
  693. NULL, GFP_KERNEL);
  694. break;
  695. case PH_DEACTIVATE_REQ:
  696. spin_lock_irqsave(&card->lock, flags);
  697. mISDN_clear_bchannel(bch);
  698. mode_tiger(bc, ISDN_P_NONE);
  699. spin_unlock_irqrestore(&card->lock, flags);
  700. _queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
  701. NULL, GFP_KERNEL);
  702. ret = 0;
  703. break;
  704. }
  705. if (!ret)
  706. dev_kfree_skb(skb);
  707. return ret;
  708. }
  709. static int
  710. channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
  711. {
  712. int ret = 0;
  713. struct tiger_hw *card = bc->bch.hw;
  714. switch (cq->op) {
  715. case MISDN_CTRL_GETOP:
  716. cq->op = 0;
  717. break;
  718. /* Nothing implemented yet */
  719. case MISDN_CTRL_FILL_EMPTY:
  720. default:
  721. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  722. ret = -EINVAL;
  723. break;
  724. }
  725. return ret;
  726. }
  727. static int
  728. nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  729. {
  730. struct bchannel *bch = container_of(ch, struct bchannel, ch);
  731. struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
  732. struct tiger_hw *card = bch->hw;
  733. int ret = -EINVAL;
  734. u_long flags;
  735. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  736. switch (cmd) {
  737. case CLOSE_CHANNEL:
  738. test_and_clear_bit(FLG_OPEN, &bch->Flags);
  739. if (test_bit(FLG_ACTIVE, &bch->Flags)) {
  740. spin_lock_irqsave(&card->lock, flags);
  741. mISDN_freebchannel(bch);
  742. test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
  743. test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
  744. mode_tiger(bc, ISDN_P_NONE);
  745. spin_unlock_irqrestore(&card->lock, flags);
  746. }
  747. ch->protocol = ISDN_P_NONE;
  748. ch->peer = NULL;
  749. module_put(THIS_MODULE);
  750. ret = 0;
  751. break;
  752. case CONTROL_CHANNEL:
  753. ret = channel_bctrl(bc, arg);
  754. break;
  755. default:
  756. pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
  757. }
  758. return ret;
  759. }
  760. static int
  761. channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
  762. {
  763. int ret = 0;
  764. switch (cq->op) {
  765. case MISDN_CTRL_GETOP:
  766. cq->op = MISDN_CTRL_LOOP;
  767. break;
  768. case MISDN_CTRL_LOOP:
  769. /* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
  770. if (cq->channel < 0 || cq->channel > 3) {
  771. ret = -EINVAL;
  772. break;
  773. }
  774. ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
  775. break;
  776. default:
  777. pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
  778. ret = -EINVAL;
  779. break;
  780. }
  781. return ret;
  782. }
  783. static int
  784. open_bchannel(struct tiger_hw *card, struct channel_req *rq)
  785. {
  786. struct bchannel *bch;
  787. if (rq->adr.channel > 2)
  788. return -EINVAL;
  789. if (rq->protocol == ISDN_P_NONE)
  790. return -EINVAL;
  791. bch = &card->bc[rq->adr.channel - 1].bch;
  792. if (test_and_set_bit(FLG_OPEN, &bch->Flags))
  793. return -EBUSY; /* b-channel can be only open once */
  794. test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
  795. bch->ch.protocol = rq->protocol;
  796. rq->ch = &bch->ch;
  797. return 0;
  798. }
  799. /*
  800. * device control function
  801. */
  802. static int
  803. nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
  804. {
  805. struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
  806. struct dchannel *dch = container_of(dev, struct dchannel, dev);
  807. struct tiger_hw *card = dch->hw;
  808. struct channel_req *rq;
  809. int err = 0;
  810. pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
  811. switch (cmd) {
  812. case OPEN_CHANNEL:
  813. rq = arg;
  814. if (rq->protocol == ISDN_P_TE_S0)
  815. err = card->isac.open(&card->isac, rq);
  816. else
  817. err = open_bchannel(card, rq);
  818. if (err)
  819. break;
  820. if (!try_module_get(THIS_MODULE))
  821. pr_info("%s: cannot get module\n", card->name);
  822. break;
  823. case CLOSE_CHANNEL:
  824. pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
  825. __builtin_return_address(0));
  826. module_put(THIS_MODULE);
  827. break;
  828. case CONTROL_CHANNEL:
  829. err = channel_ctrl(card, arg);
  830. break;
  831. default:
  832. pr_debug("%s: %s unknown command %x\n",
  833. card->name, __func__, cmd);
  834. return -EINVAL;
  835. }
  836. return err;
  837. }
  838. static int
  839. nj_init_card(struct tiger_hw *card)
  840. {
  841. u_long flags;
  842. int ret;
  843. spin_lock_irqsave(&card->lock, flags);
  844. nj_disable_hwirq(card);
  845. spin_unlock_irqrestore(&card->lock, flags);
  846. card->irq = card->pdev->irq;
  847. if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
  848. pr_info("%s: couldn't get interrupt %d\n",
  849. card->name, card->irq);
  850. card->irq = -1;
  851. return -EIO;
  852. }
  853. spin_lock_irqsave(&card->lock, flags);
  854. nj_reset(card);
  855. ret = card->isac.init(&card->isac);
  856. if (ret)
  857. goto error;
  858. ret = inittiger(card);
  859. if (ret)
  860. goto error;
  861. mode_tiger(&card->bc[0], ISDN_P_NONE);
  862. mode_tiger(&card->bc[1], ISDN_P_NONE);
  863. error:
  864. spin_unlock_irqrestore(&card->lock, flags);
  865. return ret;
  866. }
  867. static void
  868. nj_release(struct tiger_hw *card)
  869. {
  870. u_long flags;
  871. int i;
  872. if (card->base_s) {
  873. spin_lock_irqsave(&card->lock, flags);
  874. nj_disable_hwirq(card);
  875. mode_tiger(&card->bc[0], ISDN_P_NONE);
  876. mode_tiger(&card->bc[1], ISDN_P_NONE);
  877. card->isac.release(&card->isac);
  878. spin_unlock_irqrestore(&card->lock, flags);
  879. release_region(card->base, card->base_s);
  880. card->base_s = 0;
  881. }
  882. if (card->irq > 0)
  883. free_irq(card->irq, card);
  884. if (card->isac.dch.dev.dev.class)
  885. mISDN_unregister_device(&card->isac.dch.dev);
  886. for (i = 0; i < 2; i++) {
  887. mISDN_freebchannel(&card->bc[i].bch);
  888. kfree(card->bc[i].hsbuf);
  889. kfree(card->bc[i].hrbuf);
  890. }
  891. if (card->dma_p)
  892. pci_free_consistent(card->pdev, NJ_DMA_SIZE,
  893. card->dma_p, card->dma);
  894. write_lock_irqsave(&card_lock, flags);
  895. list_del(&card->list);
  896. write_unlock_irqrestore(&card_lock, flags);
  897. pci_clear_master(card->pdev);
  898. pci_disable_device(card->pdev);
  899. pci_set_drvdata(card->pdev, NULL);
  900. kfree(card);
  901. }
  902. static int
  903. nj_setup(struct tiger_hw *card)
  904. {
  905. card->base = pci_resource_start(card->pdev, 0);
  906. card->base_s = pci_resource_len(card->pdev, 0);
  907. if (!request_region(card->base, card->base_s, card->name)) {
  908. pr_info("%s: NETjet config port %#x-%#x already in use\n",
  909. card->name, card->base,
  910. (u32)(card->base + card->base_s - 1));
  911. card->base_s = 0;
  912. return -EIO;
  913. }
  914. ASSIGN_FUNC(nj, ISAC, card->isac);
  915. return 0;
  916. }
  917. static int __devinit
  918. setup_instance(struct tiger_hw *card)
  919. {
  920. int i, err;
  921. u_long flags;
  922. snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
  923. write_lock_irqsave(&card_lock, flags);
  924. list_add_tail(&card->list, &Cards);
  925. write_unlock_irqrestore(&card_lock, flags);
  926. _set_debug(card);
  927. card->isac.name = card->name;
  928. spin_lock_init(&card->lock);
  929. card->isac.hwlock = &card->lock;
  930. mISDNisac_init(&card->isac, card);
  931. card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
  932. (1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
  933. card->isac.dch.dev.D.ctrl = nj_dctrl;
  934. for (i = 0; i < 2; i++) {
  935. card->bc[i].bch.nr = i + 1;
  936. set_channelmap(i + 1, card->isac.dch.dev.channelmap);
  937. mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM);
  938. card->bc[i].bch.hw = card;
  939. card->bc[i].bch.ch.send = nj_l2l1B;
  940. card->bc[i].bch.ch.ctrl = nj_bctrl;
  941. card->bc[i].bch.ch.nr = i + 1;
  942. list_add(&card->bc[i].bch.ch.list,
  943. &card->isac.dch.dev.bchannels);
  944. card->bc[i].bch.hw = card;
  945. }
  946. err = nj_setup(card);
  947. if (err)
  948. goto error;
  949. err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
  950. card->name);
  951. if (err)
  952. goto error;
  953. err = nj_init_card(card);
  954. if (!err) {
  955. nj_cnt++;
  956. pr_notice("Netjet %d cards installed\n", nj_cnt);
  957. return 0;
  958. }
  959. error:
  960. nj_release(card);
  961. return err;
  962. }
  963. static int __devinit
  964. nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
  965. {
  966. int err = -ENOMEM;
  967. int cfg;
  968. struct tiger_hw *card;
  969. if (pdev->subsystem_vendor == 0x8086 &&
  970. pdev->subsystem_device == 0x0003) {
  971. pr_notice("Netjet: Digium X100P/X101P not handled\n");
  972. return -ENODEV;
  973. }
  974. if (pdev->subsystem_vendor == 0x55 &&
  975. pdev->subsystem_device == 0x02) {
  976. pr_notice("Netjet: Enter!Now not handled yet\n");
  977. return -ENODEV;
  978. }
  979. if (pdev->subsystem_vendor == 0xb100 &&
  980. pdev->subsystem_device == 0x0003 ) {
  981. pr_notice("Netjet: Digium TDM400P not handled yet\n");
  982. return -ENODEV;
  983. }
  984. card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
  985. if (!card) {
  986. pr_info("No kmem for Netjet\n");
  987. return err;
  988. }
  989. card->pdev = pdev;
  990. err = pci_enable_device(pdev);
  991. if (err) {
  992. kfree(card);
  993. return err;
  994. }
  995. printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
  996. pci_name(pdev));
  997. pci_set_master(pdev);
  998. /* the TJ300 and TJ320 must be detected, the IRQ handling is different
  999. * unfortunately the chips use the same device ID, but the TJ320 has
  1000. * the bit20 in status PCI cfg register set
  1001. */
  1002. pci_read_config_dword(pdev, 0x04, &cfg);
  1003. if (cfg & 0x00100000)
  1004. card->typ = NETJET_S_TJ320;
  1005. else
  1006. card->typ = NETJET_S_TJ300;
  1007. card->base = pci_resource_start(pdev, 0);
  1008. card->irq = pdev->irq;
  1009. pci_set_drvdata(pdev, card);
  1010. err = setup_instance(card);
  1011. if (err)
  1012. pci_set_drvdata(pdev, NULL);
  1013. return err;
  1014. }
  1015. static void __devexit nj_remove(struct pci_dev *pdev)
  1016. {
  1017. struct tiger_hw *card = pci_get_drvdata(pdev);
  1018. if (card)
  1019. nj_release(card);
  1020. else
  1021. pr_info("%s drvdata already removed\n", __func__);
  1022. }
  1023. /* We cannot select cards with PCI_SUB... IDs, since here are cards with
  1024. * SUB IDs set to PCI_ANY_ID, so we need to match all and reject
  1025. * known other cards which not work with this driver - see probe function */
  1026. static struct pci_device_id nj_pci_ids[] __devinitdata = {
  1027. { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
  1028. PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
  1029. { }
  1030. };
  1031. MODULE_DEVICE_TABLE(pci, nj_pci_ids);
  1032. static struct pci_driver nj_driver = {
  1033. .name = "netjet",
  1034. .probe = nj_probe,
  1035. .remove = __devexit_p(nj_remove),
  1036. .id_table = nj_pci_ids,
  1037. };
  1038. static int __init nj_init(void)
  1039. {
  1040. int err;
  1041. pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
  1042. err = pci_register_driver(&nj_driver);
  1043. return err;
  1044. }
  1045. static void __exit nj_cleanup(void)
  1046. {
  1047. pci_unregister_driver(&nj_driver);
  1048. }
  1049. module_init(nj_init);
  1050. module_exit(nj_cleanup);