isdnl2.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840
  1. /* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
  2. *
  3. * Author Karsten Keil
  4. * based on the teles driver from Jan den Ouden
  5. * Copyright by Karsten Keil <keil@isdn4linux.de>
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. * For changes and modifications please read
  11. * Documentation/isdn/HiSax.cert
  12. *
  13. * Thanks to Jan den Ouden
  14. * Fritz Elfert
  15. *
  16. */
  17. #include <linux/init.h>
  18. #include <linux/gfp.h>
  19. #include "hisax.h"
  20. #include "isdnl2.h"
  21. const char *l2_revision = "$Revision: 2.30.2.4 $";
  22. static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
  23. static struct Fsm l2fsm;
  24. enum {
  25. ST_L2_1,
  26. ST_L2_2,
  27. ST_L2_3,
  28. ST_L2_4,
  29. ST_L2_5,
  30. ST_L2_6,
  31. ST_L2_7,
  32. ST_L2_8,
  33. };
  34. #define L2_STATE_COUNT (ST_L2_8 + 1)
  35. static char *strL2State[] =
  36. {
  37. "ST_L2_1",
  38. "ST_L2_2",
  39. "ST_L2_3",
  40. "ST_L2_4",
  41. "ST_L2_5",
  42. "ST_L2_6",
  43. "ST_L2_7",
  44. "ST_L2_8",
  45. };
  46. enum {
  47. EV_L2_UI,
  48. EV_L2_SABME,
  49. EV_L2_DISC,
  50. EV_L2_DM,
  51. EV_L2_UA,
  52. EV_L2_FRMR,
  53. EV_L2_SUPER,
  54. EV_L2_I,
  55. EV_L2_DL_DATA,
  56. EV_L2_ACK_PULL,
  57. EV_L2_DL_UNIT_DATA,
  58. EV_L2_DL_ESTABLISH_REQ,
  59. EV_L2_DL_RELEASE_REQ,
  60. EV_L2_MDL_ASSIGN,
  61. EV_L2_MDL_REMOVE,
  62. EV_L2_MDL_ERROR,
  63. EV_L1_DEACTIVATE,
  64. EV_L2_T200,
  65. EV_L2_T203,
  66. EV_L2_SET_OWN_BUSY,
  67. EV_L2_CLEAR_OWN_BUSY,
  68. EV_L2_FRAME_ERROR,
  69. };
  70. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
  71. static char *strL2Event[] =
  72. {
  73. "EV_L2_UI",
  74. "EV_L2_SABME",
  75. "EV_L2_DISC",
  76. "EV_L2_DM",
  77. "EV_L2_UA",
  78. "EV_L2_FRMR",
  79. "EV_L2_SUPER",
  80. "EV_L2_I",
  81. "EV_L2_DL_DATA",
  82. "EV_L2_ACK_PULL",
  83. "EV_L2_DL_UNIT_DATA",
  84. "EV_L2_DL_ESTABLISH_REQ",
  85. "EV_L2_DL_RELEASE_REQ",
  86. "EV_L2_MDL_ASSIGN",
  87. "EV_L2_MDL_REMOVE",
  88. "EV_L2_MDL_ERROR",
  89. "EV_L1_DEACTIVATE",
  90. "EV_L2_T200",
  91. "EV_L2_T203",
  92. "EV_L2_SET_OWN_BUSY",
  93. "EV_L2_CLEAR_OWN_BUSY",
  94. "EV_L2_FRAME_ERROR",
  95. };
  96. static int l2addrsize(struct Layer2 *l2);
  97. static void
  98. set_peer_busy(struct Layer2 *l2) {
  99. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  100. if (!skb_queue_empty(&l2->i_queue) ||
  101. !skb_queue_empty(&l2->ui_queue))
  102. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  103. }
  104. static void
  105. clear_peer_busy(struct Layer2 *l2) {
  106. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  107. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  108. }
  109. static void
  110. InitWin(struct Layer2 *l2)
  111. {
  112. int i;
  113. for (i = 0; i < MAX_WINDOW; i++)
  114. l2->windowar[i] = NULL;
  115. }
  116. static int
  117. freewin1(struct Layer2 *l2)
  118. {
  119. int i, cnt = 0;
  120. for (i = 0; i < MAX_WINDOW; i++) {
  121. if (l2->windowar[i]) {
  122. cnt++;
  123. dev_kfree_skb(l2->windowar[i]);
  124. l2->windowar[i] = NULL;
  125. }
  126. }
  127. return cnt;
  128. }
  129. static inline void
  130. freewin(struct PStack *st)
  131. {
  132. freewin1(&st->l2);
  133. }
  134. static void
  135. ReleaseWin(struct Layer2 *l2)
  136. {
  137. int cnt;
  138. if ((cnt = freewin1(l2)))
  139. printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
  140. }
  141. static inline unsigned int
  142. cansend(struct PStack *st)
  143. {
  144. unsigned int p1;
  145. if (test_bit(FLG_MOD128, &st->l2.flag))
  146. p1 = (st->l2.vs - st->l2.va) % 128;
  147. else
  148. p1 = (st->l2.vs - st->l2.va) % 8;
  149. return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
  150. }
  151. static inline void
  152. clear_exception(struct Layer2 *l2)
  153. {
  154. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  155. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  156. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  157. clear_peer_busy(l2);
  158. }
  159. static inline int
  160. l2headersize(struct Layer2 *l2, int ui)
  161. {
  162. return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  163. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
  164. }
  165. inline int
  166. l2addrsize(struct Layer2 *l2)
  167. {
  168. return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  169. }
  170. static int
  171. sethdraddr(struct Layer2 *l2, u_char *header, int rsp)
  172. {
  173. u_char *ptr = header;
  174. int crbit = rsp;
  175. if (test_bit(FLG_LAPD, &l2->flag)) {
  176. *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
  177. *ptr++ = (l2->tei << 1) | 1;
  178. return (2);
  179. } else {
  180. if (test_bit(FLG_ORIG, &l2->flag))
  181. crbit = !crbit;
  182. if (crbit)
  183. *ptr++ = 1;
  184. else
  185. *ptr++ = 3;
  186. return (1);
  187. }
  188. }
  189. static inline void
  190. enqueue_super(struct PStack *st,
  191. struct sk_buff *skb)
  192. {
  193. if (test_bit(FLG_LAPB, &st->l2.flag))
  194. st->l1.bcs->tx_cnt += skb->len;
  195. st->l2.l2l1(st, PH_DATA | REQUEST, skb);
  196. }
  197. #define enqueue_ui(a, b) enqueue_super(a, b)
  198. static inline int
  199. IsUI(u_char *data)
  200. {
  201. return ((data[0] & 0xef) == UI);
  202. }
  203. static inline int
  204. IsUA(u_char *data)
  205. {
  206. return ((data[0] & 0xef) == UA);
  207. }
  208. static inline int
  209. IsDM(u_char *data)
  210. {
  211. return ((data[0] & 0xef) == DM);
  212. }
  213. static inline int
  214. IsDISC(u_char *data)
  215. {
  216. return ((data[0] & 0xef) == DISC);
  217. }
  218. static inline int
  219. IsSFrame(u_char *data, struct PStack *st)
  220. {
  221. register u_char d = *data;
  222. if (!test_bit(FLG_MOD128, &st->l2.flag))
  223. d &= 0xf;
  224. return (((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
  225. }
  226. static inline int
  227. IsSABME(u_char *data, struct PStack *st)
  228. {
  229. u_char d = data[0] & ~0x10;
  230. return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
  231. }
  232. static inline int
  233. IsREJ(u_char *data, struct PStack *st)
  234. {
  235. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
  236. }
  237. static inline int
  238. IsFRMR(u_char *data)
  239. {
  240. return ((data[0] & 0xef) == FRMR);
  241. }
  242. static inline int
  243. IsRNR(u_char *data, struct PStack *st)
  244. {
  245. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
  246. }
  247. static int
  248. iframe_error(struct PStack *st, struct sk_buff *skb)
  249. {
  250. int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
  251. int rsp = *skb->data & 0x2;
  252. if (test_bit(FLG_ORIG, &st->l2.flag))
  253. rsp = !rsp;
  254. if (rsp)
  255. return 'L';
  256. if (skb->len < i)
  257. return 'N';
  258. if ((skb->len - i) > st->l2.maxlen)
  259. return 'O';
  260. return 0;
  261. }
  262. static int
  263. super_error(struct PStack *st, struct sk_buff *skb)
  264. {
  265. if (skb->len != l2addrsize(&st->l2) +
  266. (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
  267. return 'N';
  268. return 0;
  269. }
  270. static int
  271. unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
  272. {
  273. int rsp = (*skb->data & 0x2) >> 1;
  274. if (test_bit(FLG_ORIG, &st->l2.flag))
  275. rsp = !rsp;
  276. if (rsp != wantrsp)
  277. return 'L';
  278. if (skb->len != l2addrsize(&st->l2) + 1)
  279. return 'N';
  280. return 0;
  281. }
  282. static int
  283. UI_error(struct PStack *st, struct sk_buff *skb)
  284. {
  285. int rsp = *skb->data & 0x2;
  286. if (test_bit(FLG_ORIG, &st->l2.flag))
  287. rsp = !rsp;
  288. if (rsp)
  289. return 'L';
  290. if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
  291. return 'O';
  292. return 0;
  293. }
  294. static int
  295. FRMR_error(struct PStack *st, struct sk_buff *skb)
  296. {
  297. int headers = l2addrsize(&st->l2) + 1;
  298. u_char *datap = skb->data + headers;
  299. int rsp = *skb->data & 0x2;
  300. if (test_bit(FLG_ORIG, &st->l2.flag))
  301. rsp = !rsp;
  302. if (!rsp)
  303. return 'L';
  304. if (test_bit(FLG_MOD128, &st->l2.flag)) {
  305. if (skb->len < headers + 5)
  306. return 'N';
  307. else
  308. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
  309. datap[0], datap[1], datap[2],
  310. datap[3], datap[4]);
  311. } else {
  312. if (skb->len < headers + 3)
  313. return 'N';
  314. else
  315. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
  316. datap[0], datap[1], datap[2]);
  317. }
  318. return 0;
  319. }
  320. static unsigned int
  321. legalnr(struct PStack *st, unsigned int nr)
  322. {
  323. struct Layer2 *l2 = &st->l2;
  324. if (test_bit(FLG_MOD128, &l2->flag))
  325. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  326. else
  327. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  328. }
  329. static void
  330. setva(struct PStack *st, unsigned int nr)
  331. {
  332. struct Layer2 *l2 = &st->l2;
  333. int len;
  334. u_long flags;
  335. spin_lock_irqsave(&l2->lock, flags);
  336. while (l2->va != nr) {
  337. (l2->va)++;
  338. if (test_bit(FLG_MOD128, &l2->flag))
  339. l2->va %= 128;
  340. else
  341. l2->va %= 8;
  342. len = l2->windowar[l2->sow]->len;
  343. if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
  344. len = -1;
  345. dev_kfree_skb(l2->windowar[l2->sow]);
  346. l2->windowar[l2->sow] = NULL;
  347. l2->sow = (l2->sow + 1) % l2->window;
  348. spin_unlock_irqrestore(&l2->lock, flags);
  349. if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >= 0))
  350. lli_writewakeup(st, len);
  351. spin_lock_irqsave(&l2->lock, flags);
  352. }
  353. spin_unlock_irqrestore(&l2->lock, flags);
  354. }
  355. static void
  356. send_uframe(struct PStack *st, u_char cmd, u_char cr)
  357. {
  358. struct sk_buff *skb;
  359. u_char tmp[MAX_HEADER_LEN];
  360. int i;
  361. i = sethdraddr(&st->l2, tmp, cr);
  362. tmp[i++] = cmd;
  363. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  364. printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
  365. return;
  366. }
  367. memcpy(skb_put(skb, i), tmp, i);
  368. enqueue_super(st, skb);
  369. }
  370. static inline u_char
  371. get_PollFlag(struct PStack *st, struct sk_buff *skb)
  372. {
  373. return (skb->data[l2addrsize(&(st->l2))] & 0x10);
  374. }
  375. static inline u_char
  376. get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
  377. {
  378. u_char PF;
  379. PF = get_PollFlag(st, skb);
  380. dev_kfree_skb(skb);
  381. return (PF);
  382. }
  383. static inline void
  384. start_t200(struct PStack *st, int i)
  385. {
  386. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  387. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  388. }
  389. static inline void
  390. restart_t200(struct PStack *st, int i)
  391. {
  392. FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  393. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  394. }
  395. static inline void
  396. stop_t200(struct PStack *st, int i)
  397. {
  398. if (test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
  399. FsmDelTimer(&st->l2.t200, i);
  400. }
  401. static inline void
  402. st5_dl_release_l2l3(struct PStack *st)
  403. {
  404. int pr;
  405. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  406. pr = DL_RELEASE | CONFIRM;
  407. else
  408. pr = DL_RELEASE | INDICATION;
  409. st->l2.l2l3(st, pr, NULL);
  410. }
  411. static inline void
  412. lapb_dl_release_l2l3(struct PStack *st, int f)
  413. {
  414. if (test_bit(FLG_LAPB, &st->l2.flag))
  415. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  416. st->l2.l2l3(st, DL_RELEASE | f, NULL);
  417. }
  418. static void
  419. establishlink(struct FsmInst *fi)
  420. {
  421. struct PStack *st = fi->userdata;
  422. u_char cmd;
  423. clear_exception(&st->l2);
  424. st->l2.rc = 0;
  425. cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
  426. send_uframe(st, cmd, CMD);
  427. FsmDelTimer(&st->l2.t203, 1);
  428. restart_t200(st, 1);
  429. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  430. freewin(st);
  431. FsmChangeState(fi, ST_L2_5);
  432. }
  433. static void
  434. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  435. {
  436. struct sk_buff *skb = arg;
  437. struct PStack *st = fi->userdata;
  438. if (get_PollFlagFree(st, skb))
  439. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
  440. else
  441. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
  442. }
  443. static void
  444. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  445. {
  446. struct sk_buff *skb = arg;
  447. struct PStack *st = fi->userdata;
  448. if (get_PollFlagFree(st, skb))
  449. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  450. else {
  451. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  452. establishlink(fi);
  453. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  454. }
  455. }
  456. static void
  457. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  458. {
  459. struct sk_buff *skb = arg;
  460. struct PStack *st = fi->userdata;
  461. if (get_PollFlagFree(st, skb))
  462. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  463. else {
  464. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  465. }
  466. establishlink(fi);
  467. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  468. }
  469. static void
  470. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  471. {
  472. FsmChangeState(fi, ST_L2_3);
  473. }
  474. static void
  475. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  476. {
  477. struct PStack *st = fi->userdata;
  478. FsmChangeState(fi, ST_L2_3);
  479. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  480. }
  481. static void
  482. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  483. {
  484. struct PStack *st = fi->userdata;
  485. struct sk_buff *skb = arg;
  486. skb_queue_tail(&st->l2.ui_queue, skb);
  487. FsmChangeState(fi, ST_L2_2);
  488. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  489. }
  490. static void
  491. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  492. {
  493. struct PStack *st = fi->userdata;
  494. struct sk_buff *skb = arg;
  495. skb_queue_tail(&st->l2.ui_queue, skb);
  496. }
  497. static void
  498. tx_ui(struct PStack *st)
  499. {
  500. struct sk_buff *skb;
  501. u_char header[MAX_HEADER_LEN];
  502. int i;
  503. i = sethdraddr(&(st->l2), header, CMD);
  504. header[i++] = UI;
  505. while ((skb = skb_dequeue(&st->l2.ui_queue))) {
  506. memcpy(skb_push(skb, i), header, i);
  507. enqueue_ui(st, skb);
  508. }
  509. }
  510. static void
  511. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  512. {
  513. struct PStack *st = fi->userdata;
  514. struct sk_buff *skb = arg;
  515. skb_queue_tail(&st->l2.ui_queue, skb);
  516. tx_ui(st);
  517. }
  518. static void
  519. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  520. {
  521. struct PStack *st = fi->userdata;
  522. struct sk_buff *skb = arg;
  523. skb_pull(skb, l2headersize(&st->l2, 1));
  524. st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
  525. /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  526. * in states 1-3 for broadcast
  527. */
  528. }
  529. static void
  530. l2_establish(struct FsmInst *fi, int event, void *arg)
  531. {
  532. struct PStack *st = fi->userdata;
  533. establishlink(fi);
  534. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  535. }
  536. static void
  537. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  538. {
  539. struct PStack *st = fi->userdata;
  540. skb_queue_purge(&st->l2.i_queue);
  541. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  542. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  543. }
  544. static void
  545. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  546. {
  547. struct PStack *st = fi->userdata;
  548. skb_queue_purge(&st->l2.i_queue);
  549. establishlink(fi);
  550. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  551. }
  552. static void
  553. l2_release(struct FsmInst *fi, int event, void *arg)
  554. {
  555. struct PStack *st = fi->userdata;
  556. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  557. }
  558. static void
  559. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  560. {
  561. struct PStack *st = fi->userdata;
  562. test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
  563. }
  564. static void
  565. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  566. {
  567. struct PStack *st = fi->userdata;
  568. skb_queue_purge(&st->l2.i_queue);
  569. freewin(st);
  570. FsmChangeState(fi, ST_L2_6);
  571. st->l2.rc = 0;
  572. send_uframe(st, DISC | 0x10, CMD);
  573. FsmDelTimer(&st->l2.t203, 1);
  574. restart_t200(st, 2);
  575. }
  576. static void
  577. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  578. {
  579. struct PStack *st = fi->userdata;
  580. struct sk_buff *skb = arg;
  581. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  582. clear_exception(&st->l2);
  583. st->l2.vs = 0;
  584. st->l2.va = 0;
  585. st->l2.vr = 0;
  586. st->l2.sow = 0;
  587. FsmChangeState(fi, ST_L2_7);
  588. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  589. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  590. }
  591. static void
  592. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  593. {
  594. struct PStack *st = fi->userdata;
  595. struct sk_buff *skb = arg;
  596. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  597. }
  598. static void
  599. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  600. {
  601. struct PStack *st = fi->userdata;
  602. struct sk_buff *skb = arg;
  603. send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
  604. }
  605. static void
  606. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  607. {
  608. struct PStack *st = fi->userdata;
  609. struct sk_buff *skb = arg;
  610. int est = 0, state;
  611. state = fi->state;
  612. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  613. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
  614. if (st->l2.vs != st->l2.va) {
  615. skb_queue_purge(&st->l2.i_queue);
  616. est = 1;
  617. }
  618. clear_exception(&st->l2);
  619. st->l2.vs = 0;
  620. st->l2.va = 0;
  621. st->l2.vr = 0;
  622. st->l2.sow = 0;
  623. FsmChangeState(fi, ST_L2_7);
  624. stop_t200(st, 3);
  625. FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  626. if (est)
  627. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  628. if ((ST_L2_7 == state) || (ST_L2_8 == state))
  629. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  630. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  631. }
  632. static void
  633. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  634. {
  635. struct PStack *st = fi->userdata;
  636. struct sk_buff *skb = arg;
  637. FsmChangeState(fi, ST_L2_4);
  638. FsmDelTimer(&st->l2.t203, 3);
  639. stop_t200(st, 4);
  640. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  641. skb_queue_purge(&st->l2.i_queue);
  642. freewin(st);
  643. lapb_dl_release_l2l3(st, INDICATION);
  644. }
  645. static void
  646. l2_connected(struct FsmInst *fi, int event, void *arg)
  647. {
  648. struct PStack *st = fi->userdata;
  649. struct sk_buff *skb = arg;
  650. int pr = -1;
  651. if (!get_PollFlag(st, skb)) {
  652. l2_mdl_error_ua(fi, event, arg);
  653. return;
  654. }
  655. dev_kfree_skb(skb);
  656. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  657. l2_disconnect(fi, event, arg);
  658. if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
  659. pr = DL_ESTABLISH | CONFIRM;
  660. } else if (st->l2.vs != st->l2.va) {
  661. skb_queue_purge(&st->l2.i_queue);
  662. pr = DL_ESTABLISH | INDICATION;
  663. }
  664. stop_t200(st, 5);
  665. st->l2.vr = 0;
  666. st->l2.vs = 0;
  667. st->l2.va = 0;
  668. st->l2.sow = 0;
  669. FsmChangeState(fi, ST_L2_7);
  670. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
  671. if (pr != -1)
  672. st->l2.l2l3(st, pr, NULL);
  673. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  674. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  675. }
  676. static void
  677. l2_released(struct FsmInst *fi, int event, void *arg)
  678. {
  679. struct PStack *st = fi->userdata;
  680. struct sk_buff *skb = arg;
  681. if (!get_PollFlag(st, skb)) {
  682. l2_mdl_error_ua(fi, event, arg);
  683. return;
  684. }
  685. dev_kfree_skb(skb);
  686. stop_t200(st, 6);
  687. lapb_dl_release_l2l3(st, CONFIRM);
  688. FsmChangeState(fi, ST_L2_4);
  689. }
  690. static void
  691. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  692. {
  693. struct PStack *st = fi->userdata;
  694. struct sk_buff *skb = arg;
  695. if (!get_PollFlagFree(st, skb)) {
  696. establishlink(fi);
  697. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  698. }
  699. }
  700. static void
  701. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  702. {
  703. struct PStack *st = fi->userdata;
  704. struct sk_buff *skb = arg;
  705. if (get_PollFlagFree(st, skb)) {
  706. stop_t200(st, 7);
  707. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  708. skb_queue_purge(&st->l2.i_queue);
  709. if (test_bit(FLG_LAPB, &st->l2.flag))
  710. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  711. st5_dl_release_l2l3(st);
  712. FsmChangeState(fi, ST_L2_4);
  713. }
  714. }
  715. static void
  716. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  717. {
  718. struct PStack *st = fi->userdata;
  719. struct sk_buff *skb = arg;
  720. if (get_PollFlagFree(st, skb)) {
  721. stop_t200(st, 8);
  722. lapb_dl_release_l2l3(st, CONFIRM);
  723. FsmChangeState(fi, ST_L2_4);
  724. }
  725. }
  726. static inline void
  727. enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
  728. {
  729. struct sk_buff *skb;
  730. struct Layer2 *l2;
  731. u_char tmp[MAX_HEADER_LEN];
  732. int i;
  733. l2 = &st->l2;
  734. i = sethdraddr(l2, tmp, cr);
  735. if (test_bit(FLG_MOD128, &l2->flag)) {
  736. tmp[i++] = typ;
  737. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  738. } else
  739. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  740. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  741. printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
  742. return;
  743. }
  744. memcpy(skb_put(skb, i), tmp, i);
  745. enqueue_super(st, skb);
  746. }
  747. static inline void
  748. enquiry_response(struct PStack *st)
  749. {
  750. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  751. enquiry_cr(st, RNR, RSP, 1);
  752. else
  753. enquiry_cr(st, RR, RSP, 1);
  754. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  755. }
  756. static inline void
  757. transmit_enquiry(struct PStack *st)
  758. {
  759. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  760. enquiry_cr(st, RNR, CMD, 1);
  761. else
  762. enquiry_cr(st, RR, CMD, 1);
  763. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  764. start_t200(st, 9);
  765. }
  766. static void
  767. nrerrorrecovery(struct FsmInst *fi)
  768. {
  769. struct PStack *st = fi->userdata;
  770. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
  771. establishlink(fi);
  772. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  773. }
  774. static void
  775. invoke_retransmission(struct PStack *st, unsigned int nr)
  776. {
  777. struct Layer2 *l2 = &st->l2;
  778. u_int p1;
  779. u_long flags;
  780. spin_lock_irqsave(&l2->lock, flags);
  781. if (l2->vs != nr) {
  782. while (l2->vs != nr) {
  783. (l2->vs)--;
  784. if (test_bit(FLG_MOD128, &l2->flag)) {
  785. l2->vs %= 128;
  786. p1 = (l2->vs - l2->va) % 128;
  787. } else {
  788. l2->vs %= 8;
  789. p1 = (l2->vs - l2->va) % 8;
  790. }
  791. p1 = (p1 + l2->sow) % l2->window;
  792. if (test_bit(FLG_LAPB, &l2->flag))
  793. st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
  794. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  795. l2->windowar[p1] = NULL;
  796. }
  797. spin_unlock_irqrestore(&l2->lock, flags);
  798. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  799. return;
  800. }
  801. spin_unlock_irqrestore(&l2->lock, flags);
  802. }
  803. static void
  804. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  805. {
  806. struct PStack *st = fi->userdata;
  807. struct sk_buff *skb = arg;
  808. int PollFlag, rsp, typ = RR;
  809. unsigned int nr;
  810. struct Layer2 *l2 = &st->l2;
  811. rsp = *skb->data & 0x2;
  812. if (test_bit(FLG_ORIG, &l2->flag))
  813. rsp = !rsp;
  814. skb_pull(skb, l2addrsize(l2));
  815. if (IsRNR(skb->data, st)) {
  816. set_peer_busy(l2);
  817. typ = RNR;
  818. } else
  819. clear_peer_busy(l2);
  820. if (IsREJ(skb->data, st))
  821. typ = REJ;
  822. if (test_bit(FLG_MOD128, &l2->flag)) {
  823. PollFlag = (skb->data[1] & 0x1) == 0x1;
  824. nr = skb->data[1] >> 1;
  825. } else {
  826. PollFlag = (skb->data[0] & 0x10);
  827. nr = (skb->data[0] >> 5) & 0x7;
  828. }
  829. dev_kfree_skb(skb);
  830. if (PollFlag) {
  831. if (rsp)
  832. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
  833. else
  834. enquiry_response(st);
  835. }
  836. if (legalnr(st, nr)) {
  837. if (typ == REJ) {
  838. setva(st, nr);
  839. invoke_retransmission(st, nr);
  840. stop_t200(st, 10);
  841. if (FsmAddTimer(&st->l2.t203, st->l2.T203,
  842. EV_L2_T203, NULL, 6))
  843. l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
  844. } else if ((nr == l2->vs) && (typ == RR)) {
  845. setva(st, nr);
  846. stop_t200(st, 11);
  847. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  848. EV_L2_T203, NULL, 7);
  849. } else if ((l2->va != nr) || (typ == RNR)) {
  850. setva(st, nr);
  851. if (typ != RR) FsmDelTimer(&st->l2.t203, 9);
  852. restart_t200(st, 12);
  853. }
  854. if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
  855. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  856. } else
  857. nrerrorrecovery(fi);
  858. }
  859. static void
  860. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  861. {
  862. struct PStack *st = fi->userdata;
  863. struct sk_buff *skb = arg;
  864. if (test_bit(FLG_LAPB, &st->l2.flag))
  865. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  866. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  867. skb_queue_tail(&st->l2.i_queue, skb);
  868. else
  869. dev_kfree_skb(skb);
  870. }
  871. static void
  872. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  873. {
  874. struct PStack *st = fi->userdata;
  875. struct sk_buff *skb = arg;
  876. if (test_bit(FLG_LAPB, &st->l2.flag))
  877. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  878. skb_queue_tail(&st->l2.i_queue, skb);
  879. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  880. }
  881. static void
  882. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  883. {
  884. struct PStack *st = fi->userdata;
  885. struct sk_buff *skb = arg;
  886. if (test_bit(FLG_LAPB, &st->l2.flag))
  887. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  888. skb_queue_tail(&st->l2.i_queue, skb);
  889. }
  890. static void
  891. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  892. {
  893. struct PStack *st = fi->userdata;
  894. struct sk_buff *skb = arg;
  895. struct Layer2 *l2 = &(st->l2);
  896. int PollFlag, ns, i;
  897. unsigned int nr;
  898. i = l2addrsize(l2);
  899. if (test_bit(FLG_MOD128, &l2->flag)) {
  900. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  901. ns = skb->data[i] >> 1;
  902. nr = (skb->data[i + 1] >> 1) & 0x7f;
  903. } else {
  904. PollFlag = (skb->data[i] & 0x10);
  905. ns = (skb->data[i] >> 1) & 0x7;
  906. nr = (skb->data[i] >> 5) & 0x7;
  907. }
  908. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  909. dev_kfree_skb(skb);
  910. if (PollFlag) enquiry_response(st);
  911. } else if (l2->vr == ns) {
  912. (l2->vr)++;
  913. if (test_bit(FLG_MOD128, &l2->flag))
  914. l2->vr %= 128;
  915. else
  916. l2->vr %= 8;
  917. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  918. if (PollFlag)
  919. enquiry_response(st);
  920. else
  921. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  922. skb_pull(skb, l2headersize(l2, 0));
  923. st->l2.l2l3(st, DL_DATA | INDICATION, skb);
  924. } else {
  925. /* n(s)!=v(r) */
  926. dev_kfree_skb(skb);
  927. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  928. if (PollFlag)
  929. enquiry_response(st);
  930. } else {
  931. enquiry_cr(st, REJ, RSP, PollFlag);
  932. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  933. }
  934. }
  935. if (legalnr(st, nr)) {
  936. if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
  937. if (nr == st->l2.vs) {
  938. stop_t200(st, 13);
  939. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  940. EV_L2_T203, NULL, 7);
  941. } else if (nr != st->l2.va)
  942. restart_t200(st, 14);
  943. }
  944. setva(st, nr);
  945. } else {
  946. nrerrorrecovery(fi);
  947. return;
  948. }
  949. if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
  950. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  951. if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
  952. enquiry_cr(st, RR, RSP, 0);
  953. }
  954. static void
  955. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  956. {
  957. struct PStack *st = fi->userdata;
  958. st->l2.tei = (long) arg;
  959. if (fi->state == ST_L2_3) {
  960. establishlink(fi);
  961. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  962. } else
  963. FsmChangeState(fi, ST_L2_4);
  964. if (!skb_queue_empty(&st->l2.ui_queue))
  965. tx_ui(st);
  966. }
  967. static void
  968. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  969. {
  970. struct PStack *st = fi->userdata;
  971. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  972. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  973. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  974. } else if (st->l2.rc == st->l2.N200) {
  975. FsmChangeState(fi, ST_L2_4);
  976. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  977. skb_queue_purge(&st->l2.i_queue);
  978. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
  979. if (test_bit(FLG_LAPB, &st->l2.flag))
  980. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  981. st5_dl_release_l2l3(st);
  982. } else {
  983. st->l2.rc++;
  984. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  985. send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
  986. | 0x10, CMD);
  987. }
  988. }
  989. static void
  990. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  991. {
  992. struct PStack *st = fi->userdata;
  993. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  994. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  995. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  996. } else if (st->l2.rc == st->l2.N200) {
  997. FsmChangeState(fi, ST_L2_4);
  998. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  999. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
  1000. lapb_dl_release_l2l3(st, CONFIRM);
  1001. } else {
  1002. st->l2.rc++;
  1003. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
  1004. NULL, 9);
  1005. send_uframe(st, DISC | 0x10, CMD);
  1006. }
  1007. }
  1008. static void
  1009. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1010. {
  1011. struct PStack *st = fi->userdata;
  1012. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1013. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1014. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1015. return;
  1016. }
  1017. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1018. st->l2.rc = 0;
  1019. FsmChangeState(fi, ST_L2_8);
  1020. transmit_enquiry(st);
  1021. st->l2.rc++;
  1022. }
  1023. static void
  1024. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1025. {
  1026. struct PStack *st = fi->userdata;
  1027. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1028. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1029. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1030. return;
  1031. }
  1032. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1033. if (st->l2.rc == st->l2.N200) {
  1034. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
  1035. establishlink(fi);
  1036. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1037. } else {
  1038. transmit_enquiry(st);
  1039. st->l2.rc++;
  1040. }
  1041. }
  1042. static void
  1043. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1044. {
  1045. struct PStack *st = fi->userdata;
  1046. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1047. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1048. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
  1049. return;
  1050. }
  1051. FsmChangeState(fi, ST_L2_8);
  1052. transmit_enquiry(st);
  1053. st->l2.rc = 0;
  1054. }
  1055. static void
  1056. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1057. {
  1058. struct PStack *st = fi->userdata;
  1059. struct sk_buff *skb, *nskb;
  1060. struct Layer2 *l2 = &st->l2;
  1061. u_char header[MAX_HEADER_LEN];
  1062. int i, hdr_space_needed;
  1063. int unsigned p1;
  1064. u_long flags;
  1065. if (!cansend(st))
  1066. return;
  1067. skb = skb_dequeue(&l2->i_queue);
  1068. if (!skb)
  1069. return;
  1070. hdr_space_needed = l2headersize(l2, 0);
  1071. nskb = skb_realloc_headroom(skb, hdr_space_needed);
  1072. if (!nskb) {
  1073. skb_queue_head(&l2->i_queue, skb);
  1074. return;
  1075. }
  1076. spin_lock_irqsave(&l2->lock, flags);
  1077. if (test_bit(FLG_MOD128, &l2->flag))
  1078. p1 = (l2->vs - l2->va) % 128;
  1079. else
  1080. p1 = (l2->vs - l2->va) % 8;
  1081. p1 = (p1 + l2->sow) % l2->window;
  1082. if (l2->windowar[p1]) {
  1083. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1084. p1);
  1085. dev_kfree_skb(l2->windowar[p1]);
  1086. }
  1087. l2->windowar[p1] = skb;
  1088. i = sethdraddr(&st->l2, header, CMD);
  1089. if (test_bit(FLG_MOD128, &l2->flag)) {
  1090. header[i++] = l2->vs << 1;
  1091. header[i++] = l2->vr << 1;
  1092. l2->vs = (l2->vs + 1) % 128;
  1093. } else {
  1094. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1095. l2->vs = (l2->vs + 1) % 8;
  1096. }
  1097. spin_unlock_irqrestore(&l2->lock, flags);
  1098. memcpy(skb_push(nskb, i), header, i);
  1099. st->l2.l2l1(st, PH_PULL | INDICATION, nskb);
  1100. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1101. if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
  1102. FsmDelTimer(&st->l2.t203, 13);
  1103. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
  1104. }
  1105. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1106. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1107. }
  1108. static void
  1109. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1110. {
  1111. struct PStack *st = fi->userdata;
  1112. struct sk_buff *skb = arg;
  1113. int PollFlag, rsp, rnr = 0;
  1114. unsigned int nr;
  1115. struct Layer2 *l2 = &st->l2;
  1116. rsp = *skb->data & 0x2;
  1117. if (test_bit(FLG_ORIG, &l2->flag))
  1118. rsp = !rsp;
  1119. skb_pull(skb, l2addrsize(l2));
  1120. if (IsRNR(skb->data, st)) {
  1121. set_peer_busy(l2);
  1122. rnr = 1;
  1123. } else
  1124. clear_peer_busy(l2);
  1125. if (test_bit(FLG_MOD128, &l2->flag)) {
  1126. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1127. nr = skb->data[1] >> 1;
  1128. } else {
  1129. PollFlag = (skb->data[0] & 0x10);
  1130. nr = (skb->data[0] >> 5) & 0x7;
  1131. }
  1132. dev_kfree_skb(skb);
  1133. if (rsp && PollFlag) {
  1134. if (legalnr(st, nr)) {
  1135. if (rnr) {
  1136. restart_t200(st, 15);
  1137. } else {
  1138. stop_t200(st, 16);
  1139. FsmAddTimer(&l2->t203, l2->T203,
  1140. EV_L2_T203, NULL, 5);
  1141. setva(st, nr);
  1142. }
  1143. invoke_retransmission(st, nr);
  1144. FsmChangeState(fi, ST_L2_7);
  1145. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1146. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1147. } else
  1148. nrerrorrecovery(fi);
  1149. } else {
  1150. if (!rsp && PollFlag)
  1151. enquiry_response(st);
  1152. if (legalnr(st, nr)) {
  1153. setva(st, nr);
  1154. } else
  1155. nrerrorrecovery(fi);
  1156. }
  1157. }
  1158. static void
  1159. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1160. {
  1161. struct PStack *st = fi->userdata;
  1162. struct sk_buff *skb = arg;
  1163. skb_pull(skb, l2addrsize(&st->l2) + 1);
  1164. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1165. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1166. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
  1167. establishlink(fi);
  1168. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1169. }
  1170. dev_kfree_skb(skb);
  1171. }
  1172. static void
  1173. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1174. {
  1175. struct PStack *st = fi->userdata;
  1176. skb_queue_purge(&st->l2.ui_queue);
  1177. st->l2.tei = -1;
  1178. FsmChangeState(fi, ST_L2_1);
  1179. }
  1180. static void
  1181. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1182. {
  1183. struct PStack *st = fi->userdata;
  1184. skb_queue_purge(&st->l2.ui_queue);
  1185. st->l2.tei = -1;
  1186. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1187. FsmChangeState(fi, ST_L2_1);
  1188. }
  1189. static void
  1190. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1191. {
  1192. struct PStack *st = fi->userdata;
  1193. skb_queue_purge(&st->l2.i_queue);
  1194. skb_queue_purge(&st->l2.ui_queue);
  1195. freewin(st);
  1196. st->l2.tei = -1;
  1197. stop_t200(st, 17);
  1198. st5_dl_release_l2l3(st);
  1199. FsmChangeState(fi, ST_L2_1);
  1200. }
  1201. static void
  1202. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1203. {
  1204. struct PStack *st = fi->userdata;
  1205. skb_queue_purge(&st->l2.ui_queue);
  1206. st->l2.tei = -1;
  1207. stop_t200(st, 18);
  1208. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1209. FsmChangeState(fi, ST_L2_1);
  1210. }
  1211. static void
  1212. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1213. {
  1214. struct PStack *st = fi->userdata;
  1215. skb_queue_purge(&st->l2.i_queue);
  1216. skb_queue_purge(&st->l2.ui_queue);
  1217. freewin(st);
  1218. st->l2.tei = -1;
  1219. stop_t200(st, 17);
  1220. FsmDelTimer(&st->l2.t203, 19);
  1221. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1222. FsmChangeState(fi, ST_L2_1);
  1223. }
  1224. static void
  1225. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1226. {
  1227. struct PStack *st = fi->userdata;
  1228. skb_queue_purge(&st->l2.i_queue);
  1229. skb_queue_purge(&st->l2.ui_queue);
  1230. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1231. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1232. }
  1233. static void
  1234. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1235. {
  1236. struct PStack *st = fi->userdata;
  1237. skb_queue_purge(&st->l2.i_queue);
  1238. skb_queue_purge(&st->l2.ui_queue);
  1239. freewin(st);
  1240. stop_t200(st, 19);
  1241. st5_dl_release_l2l3(st);
  1242. FsmChangeState(fi, ST_L2_4);
  1243. }
  1244. static void
  1245. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1246. {
  1247. struct PStack *st = fi->userdata;
  1248. skb_queue_purge(&st->l2.ui_queue);
  1249. stop_t200(st, 20);
  1250. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1251. FsmChangeState(fi, ST_L2_4);
  1252. }
  1253. static void
  1254. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1255. {
  1256. struct PStack *st = fi->userdata;
  1257. skb_queue_purge(&st->l2.i_queue);
  1258. skb_queue_purge(&st->l2.ui_queue);
  1259. freewin(st);
  1260. stop_t200(st, 19);
  1261. FsmDelTimer(&st->l2.t203, 19);
  1262. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1263. FsmChangeState(fi, ST_L2_4);
  1264. }
  1265. static void
  1266. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1267. {
  1268. struct PStack *st = fi->userdata;
  1269. if (!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1270. enquiry_cr(st, RNR, RSP, 0);
  1271. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1272. }
  1273. }
  1274. static void
  1275. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1276. {
  1277. struct PStack *st = fi->userdata;
  1278. if (!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1279. enquiry_cr(st, RR, RSP, 0);
  1280. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1281. }
  1282. }
  1283. static void
  1284. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1285. {
  1286. struct PStack *st = fi->userdata;
  1287. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1288. }
  1289. static void
  1290. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1291. {
  1292. struct PStack *st = fi->userdata;
  1293. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1294. establishlink(fi);
  1295. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1296. }
  1297. static struct FsmNode L2FnList[] __initdata =
  1298. {
  1299. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1300. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1301. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1302. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1303. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1304. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1305. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1306. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1307. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1308. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1309. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1310. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1311. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1312. {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
  1313. {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1314. {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1315. {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1316. {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1317. {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1318. {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1319. {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1320. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1321. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1322. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1323. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1324. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1325. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1326. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1327. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1328. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1329. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1330. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1331. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1332. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1333. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1334. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1335. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1336. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1337. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1338. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1339. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1340. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1341. {ST_L2_5, EV_L2_UA, l2_connected},
  1342. {ST_L2_6, EV_L2_UA, l2_released},
  1343. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1344. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1345. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1346. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1347. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1348. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1349. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1350. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1351. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1352. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1353. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1354. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1355. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1356. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1357. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1358. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1359. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1360. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1361. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1362. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1363. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1364. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1365. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1366. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1367. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1368. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1369. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1370. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1371. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1372. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1373. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1374. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1375. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1376. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1377. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1378. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1379. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1380. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1381. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1382. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1383. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1384. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1385. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1386. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1387. };
  1388. static void
  1389. isdnl2_l1l2(struct PStack *st, int pr, void *arg)
  1390. {
  1391. struct sk_buff *skb = arg;
  1392. u_char *datap;
  1393. int ret = 1, len;
  1394. int c = 0;
  1395. switch (pr) {
  1396. case (PH_DATA | INDICATION):
  1397. datap = skb->data;
  1398. len = l2addrsize(&st->l2);
  1399. if (skb->len > len)
  1400. datap += len;
  1401. else {
  1402. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1403. dev_kfree_skb(skb);
  1404. return;
  1405. }
  1406. if (!(*datap & 1)) { /* I-Frame */
  1407. if (!(c = iframe_error(st, skb)))
  1408. ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
  1409. } else if (IsSFrame(datap, st)) { /* S-Frame */
  1410. if (!(c = super_error(st, skb)))
  1411. ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
  1412. } else if (IsUI(datap)) {
  1413. if (!(c = UI_error(st, skb)))
  1414. ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
  1415. } else if (IsSABME(datap, st)) {
  1416. if (!(c = unnum_error(st, skb, CMD)))
  1417. ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
  1418. } else if (IsUA(datap)) {
  1419. if (!(c = unnum_error(st, skb, RSP)))
  1420. ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
  1421. } else if (IsDISC(datap)) {
  1422. if (!(c = unnum_error(st, skb, CMD)))
  1423. ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
  1424. } else if (IsDM(datap)) {
  1425. if (!(c = unnum_error(st, skb, RSP)))
  1426. ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
  1427. } else if (IsFRMR(datap)) {
  1428. if (!(c = FRMR_error(st, skb)))
  1429. ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
  1430. } else {
  1431. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
  1432. dev_kfree_skb(skb);
  1433. ret = 0;
  1434. }
  1435. if (c) {
  1436. dev_kfree_skb(skb);
  1437. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1438. ret = 0;
  1439. }
  1440. if (ret)
  1441. dev_kfree_skb(skb);
  1442. break;
  1443. case (PH_PULL | CONFIRM):
  1444. FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
  1445. break;
  1446. case (PH_PAUSE | INDICATION):
  1447. test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1448. break;
  1449. case (PH_PAUSE | CONFIRM):
  1450. test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1451. break;
  1452. case (PH_ACTIVATE | CONFIRM):
  1453. case (PH_ACTIVATE | INDICATION):
  1454. test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
  1455. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1456. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1457. break;
  1458. case (PH_DEACTIVATE | INDICATION):
  1459. case (PH_DEACTIVATE | CONFIRM):
  1460. test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
  1461. FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
  1462. break;
  1463. default:
  1464. l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
  1465. break;
  1466. }
  1467. }
  1468. static void
  1469. isdnl2_l3l2(struct PStack *st, int pr, void *arg)
  1470. {
  1471. switch (pr) {
  1472. case (DL_DATA | REQUEST):
  1473. if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
  1474. dev_kfree_skb((struct sk_buff *) arg);
  1475. }
  1476. break;
  1477. case (DL_UNIT_DATA | REQUEST):
  1478. if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
  1479. dev_kfree_skb((struct sk_buff *) arg);
  1480. }
  1481. break;
  1482. case (DL_ESTABLISH | REQUEST):
  1483. if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
  1484. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1485. test_bit(FLG_ORIG, &st->l2.flag)) {
  1486. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1487. }
  1488. } else {
  1489. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1490. test_bit(FLG_ORIG, &st->l2.flag)) {
  1491. test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
  1492. }
  1493. st->l2.l2l1(st, PH_ACTIVATE, NULL);
  1494. }
  1495. break;
  1496. case (DL_RELEASE | REQUEST):
  1497. if (test_bit(FLG_LAPB, &st->l2.flag)) {
  1498. st->l2.l2l1(st, PH_DEACTIVATE, NULL);
  1499. }
  1500. FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
  1501. break;
  1502. case (MDL_ASSIGN | REQUEST):
  1503. FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
  1504. break;
  1505. case (MDL_REMOVE | REQUEST):
  1506. FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
  1507. break;
  1508. case (MDL_ERROR | RESPONSE):
  1509. FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
  1510. break;
  1511. }
  1512. }
  1513. void
  1514. releasestack_isdnl2(struct PStack *st)
  1515. {
  1516. FsmDelTimer(&st->l2.t200, 21);
  1517. FsmDelTimer(&st->l2.t203, 16);
  1518. skb_queue_purge(&st->l2.i_queue);
  1519. skb_queue_purge(&st->l2.ui_queue);
  1520. ReleaseWin(&st->l2);
  1521. }
  1522. static void
  1523. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  1524. {
  1525. va_list args;
  1526. struct PStack *st = fi->userdata;
  1527. va_start(args, fmt);
  1528. VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
  1529. va_end(args);
  1530. }
  1531. void
  1532. setstack_isdnl2(struct PStack *st, char *debug_id)
  1533. {
  1534. spin_lock_init(&st->l2.lock);
  1535. st->l1.l1l2 = isdnl2_l1l2;
  1536. st->l3.l3l2 = isdnl2_l3l2;
  1537. skb_queue_head_init(&st->l2.i_queue);
  1538. skb_queue_head_init(&st->l2.ui_queue);
  1539. InitWin(&st->l2);
  1540. st->l2.debug = 0;
  1541. st->l2.l2m.fsm = &l2fsm;
  1542. if (test_bit(FLG_LAPB, &st->l2.flag))
  1543. st->l2.l2m.state = ST_L2_4;
  1544. else
  1545. st->l2.l2m.state = ST_L2_1;
  1546. st->l2.l2m.debug = 0;
  1547. st->l2.l2m.userdata = st;
  1548. st->l2.l2m.userint = 0;
  1549. st->l2.l2m.printdebug = l2m_debug;
  1550. strcpy(st->l2.debug_id, debug_id);
  1551. FsmInitTimer(&st->l2.l2m, &st->l2.t200);
  1552. FsmInitTimer(&st->l2.l2m, &st->l2.t203);
  1553. }
  1554. static void
  1555. transl2_l3l2(struct PStack *st, int pr, void *arg)
  1556. {
  1557. switch (pr) {
  1558. case (DL_DATA | REQUEST):
  1559. case (DL_UNIT_DATA | REQUEST):
  1560. st->l2.l2l1(st, PH_DATA | REQUEST, arg);
  1561. break;
  1562. case (DL_ESTABLISH | REQUEST):
  1563. st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
  1564. break;
  1565. case (DL_RELEASE | REQUEST):
  1566. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  1567. break;
  1568. }
  1569. }
  1570. void
  1571. setstack_transl2(struct PStack *st)
  1572. {
  1573. st->l3.l3l2 = transl2_l3l2;
  1574. }
  1575. void
  1576. releasestack_transl2(struct PStack *st)
  1577. {
  1578. }
  1579. int __init
  1580. Isdnl2New(void)
  1581. {
  1582. l2fsm.state_count = L2_STATE_COUNT;
  1583. l2fsm.event_count = L2_EVENT_COUNT;
  1584. l2fsm.strEvent = strL2Event;
  1585. l2fsm.strState = strL2State;
  1586. return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1587. }
  1588. void
  1589. Isdnl2Free(void)
  1590. {
  1591. FsmFree(&l2fsm);
  1592. }