isdnl2.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
  2. *
  3. * Author Karsten Keil
  4. * based on the teles driver from Jan den Ouden
  5. * Copyright by Karsten Keil <keil@isdn4linux.de>
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. * For changes and modifications please read
  11. * Documentation/isdn/HiSax.cert
  12. *
  13. * Thanks to Jan den Ouden
  14. * Fritz Elfert
  15. *
  16. */
  17. #include <linux/init.h>
  18. #include <linux/gfp.h>
  19. #include "hisax.h"
  20. #include "isdnl2.h"
  21. const char *l2_revision = "$Revision: 2.30.2.4 $";
  22. static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
  23. static struct Fsm l2fsm;
  24. enum {
  25. ST_L2_1,
  26. ST_L2_2,
  27. ST_L2_3,
  28. ST_L2_4,
  29. ST_L2_5,
  30. ST_L2_6,
  31. ST_L2_7,
  32. ST_L2_8,
  33. };
  34. #define L2_STATE_COUNT (ST_L2_8+1)
  35. static char *strL2State[] =
  36. {
  37. "ST_L2_1",
  38. "ST_L2_2",
  39. "ST_L2_3",
  40. "ST_L2_4",
  41. "ST_L2_5",
  42. "ST_L2_6",
  43. "ST_L2_7",
  44. "ST_L2_8",
  45. };
  46. enum {
  47. EV_L2_UI,
  48. EV_L2_SABME,
  49. EV_L2_DISC,
  50. EV_L2_DM,
  51. EV_L2_UA,
  52. EV_L2_FRMR,
  53. EV_L2_SUPER,
  54. EV_L2_I,
  55. EV_L2_DL_DATA,
  56. EV_L2_ACK_PULL,
  57. EV_L2_DL_UNIT_DATA,
  58. EV_L2_DL_ESTABLISH_REQ,
  59. EV_L2_DL_RELEASE_REQ,
  60. EV_L2_MDL_ASSIGN,
  61. EV_L2_MDL_REMOVE,
  62. EV_L2_MDL_ERROR,
  63. EV_L1_DEACTIVATE,
  64. EV_L2_T200,
  65. EV_L2_T203,
  66. EV_L2_SET_OWN_BUSY,
  67. EV_L2_CLEAR_OWN_BUSY,
  68. EV_L2_FRAME_ERROR,
  69. };
  70. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  71. static char *strL2Event[] =
  72. {
  73. "EV_L2_UI",
  74. "EV_L2_SABME",
  75. "EV_L2_DISC",
  76. "EV_L2_DM",
  77. "EV_L2_UA",
  78. "EV_L2_FRMR",
  79. "EV_L2_SUPER",
  80. "EV_L2_I",
  81. "EV_L2_DL_DATA",
  82. "EV_L2_ACK_PULL",
  83. "EV_L2_DL_UNIT_DATA",
  84. "EV_L2_DL_ESTABLISH_REQ",
  85. "EV_L2_DL_RELEASE_REQ",
  86. "EV_L2_MDL_ASSIGN",
  87. "EV_L2_MDL_REMOVE",
  88. "EV_L2_MDL_ERROR",
  89. "EV_L1_DEACTIVATE",
  90. "EV_L2_T200",
  91. "EV_L2_T203",
  92. "EV_L2_SET_OWN_BUSY",
  93. "EV_L2_CLEAR_OWN_BUSY",
  94. "EV_L2_FRAME_ERROR",
  95. };
  96. static int l2addrsize(struct Layer2 *l2);
  97. static void
  98. set_peer_busy(struct Layer2 *l2) {
  99. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  100. if (!skb_queue_empty(&l2->i_queue) ||
  101. !skb_queue_empty(&l2->ui_queue))
  102. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  103. }
  104. static void
  105. clear_peer_busy(struct Layer2 *l2) {
  106. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  107. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  108. }
  109. static void
  110. InitWin(struct Layer2 *l2)
  111. {
  112. int i;
  113. for (i = 0; i < MAX_WINDOW; i++)
  114. l2->windowar[i] = NULL;
  115. }
  116. static int
  117. freewin1(struct Layer2 *l2)
  118. {
  119. int i, cnt = 0;
  120. for (i = 0; i < MAX_WINDOW; i++) {
  121. if (l2->windowar[i]) {
  122. cnt++;
  123. dev_kfree_skb(l2->windowar[i]);
  124. l2->windowar[i] = NULL;
  125. }
  126. }
  127. return cnt;
  128. }
  129. static inline void
  130. freewin(struct PStack *st)
  131. {
  132. freewin1(&st->l2);
  133. }
  134. static void
  135. ReleaseWin(struct Layer2 *l2)
  136. {
  137. int cnt;
  138. if((cnt = freewin1(l2)))
  139. printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
  140. }
  141. static inline unsigned int
  142. cansend(struct PStack *st)
  143. {
  144. unsigned int p1;
  145. if(test_bit(FLG_MOD128, &st->l2.flag))
  146. p1 = (st->l2.vs - st->l2.va) % 128;
  147. else
  148. p1 = (st->l2.vs - st->l2.va) % 8;
  149. return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
  150. }
  151. static inline void
  152. clear_exception(struct Layer2 *l2)
  153. {
  154. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  155. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  156. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  157. clear_peer_busy(l2);
  158. }
  159. static inline int
  160. l2headersize(struct Layer2 *l2, int ui)
  161. {
  162. return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  163. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
  164. }
  165. inline int
  166. l2addrsize(struct Layer2 *l2)
  167. {
  168. return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  169. }
  170. static int
  171. sethdraddr(struct Layer2 *l2, u_char * header, int rsp)
  172. {
  173. u_char *ptr = header;
  174. int crbit = rsp;
  175. if (test_bit(FLG_LAPD, &l2->flag)) {
  176. *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
  177. *ptr++ = (l2->tei << 1) | 1;
  178. return (2);
  179. } else {
  180. if (test_bit(FLG_ORIG, &l2->flag))
  181. crbit = !crbit;
  182. if (crbit)
  183. *ptr++ = 1;
  184. else
  185. *ptr++ = 3;
  186. return (1);
  187. }
  188. }
  189. static inline void
  190. enqueue_super(struct PStack *st,
  191. struct sk_buff *skb)
  192. {
  193. if (test_bit(FLG_LAPB, &st->l2.flag))
  194. st->l1.bcs->tx_cnt += skb->len;
  195. st->l2.l2l1(st, PH_DATA | REQUEST, skb);
  196. }
  197. #define enqueue_ui(a, b) enqueue_super(a, b)
  198. static inline int
  199. IsUI(u_char * data)
  200. {
  201. return ((data[0] & 0xef) == UI);
  202. }
  203. static inline int
  204. IsUA(u_char * data)
  205. {
  206. return ((data[0] & 0xef) == UA);
  207. }
  208. static inline int
  209. IsDM(u_char * data)
  210. {
  211. return ((data[0] & 0xef) == DM);
  212. }
  213. static inline int
  214. IsDISC(u_char * data)
  215. {
  216. return ((data[0] & 0xef) == DISC);
  217. }
  218. static inline int
  219. IsSFrame(u_char * data, struct PStack *st)
  220. {
  221. register u_char d = *data;
  222. if (!test_bit(FLG_MOD128, &st->l2.flag))
  223. d &= 0xf;
  224. return(((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
  225. }
  226. static inline int
  227. IsSABME(u_char * data, struct PStack *st)
  228. {
  229. u_char d = data[0] & ~0x10;
  230. return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
  231. }
  232. static inline int
  233. IsREJ(u_char * data, struct PStack *st)
  234. {
  235. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
  236. }
  237. static inline int
  238. IsFRMR(u_char * data)
  239. {
  240. return ((data[0] & 0xef) == FRMR);
  241. }
  242. static inline int
  243. IsRNR(u_char * data, struct PStack *st)
  244. {
  245. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
  246. }
  247. static int
  248. iframe_error(struct PStack *st, struct sk_buff *skb)
  249. {
  250. int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
  251. int rsp = *skb->data & 0x2;
  252. if (test_bit(FLG_ORIG, &st->l2.flag))
  253. rsp = !rsp;
  254. if (rsp)
  255. return 'L';
  256. if (skb->len < i)
  257. return 'N';
  258. if ((skb->len - i) > st->l2.maxlen)
  259. return 'O';
  260. return 0;
  261. }
  262. static int
  263. super_error(struct PStack *st, struct sk_buff *skb)
  264. {
  265. if (skb->len != l2addrsize(&st->l2) +
  266. (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
  267. return 'N';
  268. return 0;
  269. }
  270. static int
  271. unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
  272. {
  273. int rsp = (*skb->data & 0x2) >> 1;
  274. if (test_bit(FLG_ORIG, &st->l2.flag))
  275. rsp = !rsp;
  276. if (rsp != wantrsp)
  277. return 'L';
  278. if (skb->len != l2addrsize(&st->l2) + 1)
  279. return 'N';
  280. return 0;
  281. }
  282. static int
  283. UI_error(struct PStack *st, struct sk_buff *skb)
  284. {
  285. int rsp = *skb->data & 0x2;
  286. if (test_bit(FLG_ORIG, &st->l2.flag))
  287. rsp = !rsp;
  288. if (rsp)
  289. return 'L';
  290. if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
  291. return 'O';
  292. return 0;
  293. }
  294. static int
  295. FRMR_error(struct PStack *st, struct sk_buff *skb)
  296. {
  297. int headers = l2addrsize(&st->l2) + 1;
  298. u_char *datap = skb->data + headers;
  299. int rsp = *skb->data & 0x2;
  300. if (test_bit(FLG_ORIG, &st->l2.flag))
  301. rsp = !rsp;
  302. if (!rsp)
  303. return 'L';
  304. if (test_bit(FLG_MOD128, &st->l2.flag)) {
  305. if (skb->len < headers + 5)
  306. return 'N';
  307. else
  308. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
  309. datap[0], datap[1], datap[2],
  310. datap[3], datap[4]);
  311. } else {
  312. if (skb->len < headers + 3)
  313. return 'N';
  314. else
  315. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
  316. datap[0], datap[1], datap[2]);
  317. }
  318. return 0;
  319. }
  320. static unsigned int
  321. legalnr(struct PStack *st, unsigned int nr)
  322. {
  323. struct Layer2 *l2 = &st->l2;
  324. if(test_bit(FLG_MOD128, &l2->flag))
  325. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  326. else
  327. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  328. }
  329. static void
  330. setva(struct PStack *st, unsigned int nr)
  331. {
  332. struct Layer2 *l2 = &st->l2;
  333. int len;
  334. u_long flags;
  335. spin_lock_irqsave(&l2->lock, flags);
  336. while (l2->va != nr) {
  337. (l2->va)++;
  338. if(test_bit(FLG_MOD128, &l2->flag))
  339. l2->va %= 128;
  340. else
  341. l2->va %= 8;
  342. len = l2->windowar[l2->sow]->len;
  343. if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
  344. len = -1;
  345. dev_kfree_skb(l2->windowar[l2->sow]);
  346. l2->windowar[l2->sow] = NULL;
  347. l2->sow = (l2->sow + 1) % l2->window;
  348. spin_unlock_irqrestore(&l2->lock, flags);
  349. if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0))
  350. lli_writewakeup(st, len);
  351. spin_lock_irqsave(&l2->lock, flags);
  352. }
  353. spin_unlock_irqrestore(&l2->lock, flags);
  354. }
  355. static void
  356. send_uframe(struct PStack *st, u_char cmd, u_char cr)
  357. {
  358. struct sk_buff *skb;
  359. u_char tmp[MAX_HEADER_LEN];
  360. int i;
  361. i = sethdraddr(&st->l2, tmp, cr);
  362. tmp[i++] = cmd;
  363. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  364. printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
  365. return;
  366. }
  367. memcpy(skb_put(skb, i), tmp, i);
  368. enqueue_super(st, skb);
  369. }
  370. static inline u_char
  371. get_PollFlag(struct PStack * st, struct sk_buff * skb)
  372. {
  373. return (skb->data[l2addrsize(&(st->l2))] & 0x10);
  374. }
  375. static inline u_char
  376. get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
  377. {
  378. u_char PF;
  379. PF = get_PollFlag(st, skb);
  380. dev_kfree_skb(skb);
  381. return (PF);
  382. }
  383. static inline void
  384. start_t200(struct PStack *st, int i)
  385. {
  386. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  387. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  388. }
  389. static inline void
  390. restart_t200(struct PStack *st, int i)
  391. {
  392. FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  393. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  394. }
  395. static inline void
  396. stop_t200(struct PStack *st, int i)
  397. {
  398. if(test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
  399. FsmDelTimer(&st->l2.t200, i);
  400. }
  401. static inline void
  402. st5_dl_release_l2l3(struct PStack *st)
  403. {
  404. int pr;
  405. if(test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  406. pr = DL_RELEASE | CONFIRM;
  407. else
  408. pr = DL_RELEASE | INDICATION;
  409. st->l2.l2l3(st, pr, NULL);
  410. }
  411. static inline void
  412. lapb_dl_release_l2l3(struct PStack *st, int f)
  413. {
  414. if (test_bit(FLG_LAPB, &st->l2.flag))
  415. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  416. st->l2.l2l3(st, DL_RELEASE | f, NULL);
  417. }
  418. static void
  419. establishlink(struct FsmInst *fi)
  420. {
  421. struct PStack *st = fi->userdata;
  422. u_char cmd;
  423. clear_exception(&st->l2);
  424. st->l2.rc = 0;
  425. cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
  426. send_uframe(st, cmd, CMD);
  427. FsmDelTimer(&st->l2.t203, 1);
  428. restart_t200(st, 1);
  429. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  430. freewin(st);
  431. FsmChangeState(fi, ST_L2_5);
  432. }
  433. static void
  434. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  435. {
  436. struct sk_buff *skb = arg;
  437. struct PStack *st = fi->userdata;
  438. if (get_PollFlagFree(st, skb))
  439. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
  440. else
  441. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
  442. }
  443. static void
  444. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  445. {
  446. struct sk_buff *skb = arg;
  447. struct PStack *st = fi->userdata;
  448. if (get_PollFlagFree(st, skb))
  449. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  450. else {
  451. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  452. establishlink(fi);
  453. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  454. }
  455. }
  456. static void
  457. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  458. {
  459. struct sk_buff *skb = arg;
  460. struct PStack *st = fi->userdata;
  461. if (get_PollFlagFree(st, skb))
  462. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  463. else {
  464. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  465. }
  466. establishlink(fi);
  467. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  468. }
  469. static void
  470. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  471. {
  472. FsmChangeState(fi, ST_L2_3);
  473. }
  474. static void
  475. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  476. {
  477. struct PStack *st = fi->userdata;
  478. FsmChangeState(fi, ST_L2_3);
  479. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  480. }
  481. static void
  482. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  483. {
  484. struct PStack *st = fi->userdata;
  485. struct sk_buff *skb = arg;
  486. skb_queue_tail(&st->l2.ui_queue, skb);
  487. FsmChangeState(fi, ST_L2_2);
  488. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  489. }
  490. static void
  491. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  492. {
  493. struct PStack *st = fi->userdata;
  494. struct sk_buff *skb = arg;
  495. skb_queue_tail(&st->l2.ui_queue, skb);
  496. }
  497. static void
  498. tx_ui(struct PStack *st)
  499. {
  500. struct sk_buff *skb;
  501. u_char header[MAX_HEADER_LEN];
  502. int i;
  503. i = sethdraddr(&(st->l2), header, CMD);
  504. header[i++] = UI;
  505. while ((skb = skb_dequeue(&st->l2.ui_queue))) {
  506. memcpy(skb_push(skb, i), header, i);
  507. enqueue_ui(st, skb);
  508. }
  509. }
  510. static void
  511. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  512. {
  513. struct PStack *st = fi->userdata;
  514. struct sk_buff *skb = arg;
  515. skb_queue_tail(&st->l2.ui_queue, skb);
  516. tx_ui(st);
  517. }
  518. static void
  519. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  520. {
  521. struct PStack *st = fi->userdata;
  522. struct sk_buff *skb = arg;
  523. skb_pull(skb, l2headersize(&st->l2, 1));
  524. st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
  525. /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  526. * in states 1-3 for broadcast
  527. */
  528. }
  529. static void
  530. l2_establish(struct FsmInst *fi, int event, void *arg)
  531. {
  532. struct PStack *st = fi->userdata;
  533. establishlink(fi);
  534. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  535. }
  536. static void
  537. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  538. {
  539. struct PStack *st = fi->userdata;
  540. skb_queue_purge(&st->l2.i_queue);
  541. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  542. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  543. }
  544. static void
  545. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  546. {
  547. struct PStack *st = fi->userdata;
  548. skb_queue_purge(&st->l2.i_queue);
  549. establishlink(fi);
  550. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  551. }
  552. static void
  553. l2_release(struct FsmInst *fi, int event, void *arg)
  554. {
  555. struct PStack *st = fi->userdata;
  556. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  557. }
  558. static void
  559. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  560. {
  561. struct PStack *st = fi->userdata;
  562. test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
  563. }
  564. static void
  565. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  566. {
  567. struct PStack *st = fi->userdata;
  568. skb_queue_purge(&st->l2.i_queue);
  569. freewin(st);
  570. FsmChangeState(fi, ST_L2_6);
  571. st->l2.rc = 0;
  572. send_uframe(st, DISC | 0x10, CMD);
  573. FsmDelTimer(&st->l2.t203, 1);
  574. restart_t200(st, 2);
  575. }
  576. static void
  577. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  578. {
  579. struct PStack *st = fi->userdata;
  580. struct sk_buff *skb = arg;
  581. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  582. clear_exception(&st->l2);
  583. st->l2.vs = 0;
  584. st->l2.va = 0;
  585. st->l2.vr = 0;
  586. st->l2.sow = 0;
  587. FsmChangeState(fi, ST_L2_7);
  588. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  589. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  590. }
  591. static void
  592. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  593. {
  594. struct PStack *st = fi->userdata;
  595. struct sk_buff *skb = arg;
  596. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  597. }
  598. static void
  599. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  600. {
  601. struct PStack *st = fi->userdata;
  602. struct sk_buff *skb = arg;
  603. send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
  604. }
  605. static void
  606. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  607. {
  608. struct PStack *st = fi->userdata;
  609. struct sk_buff *skb = arg;
  610. int est = 0, state;
  611. state = fi->state;
  612. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  613. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
  614. if (st->l2.vs != st->l2.va) {
  615. skb_queue_purge(&st->l2.i_queue);
  616. est = 1;
  617. }
  618. clear_exception(&st->l2);
  619. st->l2.vs = 0;
  620. st->l2.va = 0;
  621. st->l2.vr = 0;
  622. st->l2.sow = 0;
  623. FsmChangeState(fi, ST_L2_7);
  624. stop_t200(st, 3);
  625. FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  626. if (est)
  627. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  628. if ((ST_L2_7==state) || (ST_L2_8 == state))
  629. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  630. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  631. }
  632. static void
  633. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  634. {
  635. struct PStack *st = fi->userdata;
  636. struct sk_buff *skb = arg;
  637. FsmChangeState(fi, ST_L2_4);
  638. FsmDelTimer(&st->l2.t203, 3);
  639. stop_t200(st, 4);
  640. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  641. skb_queue_purge(&st->l2.i_queue);
  642. freewin(st);
  643. lapb_dl_release_l2l3(st, INDICATION);
  644. }
  645. static void
  646. l2_connected(struct FsmInst *fi, int event, void *arg)
  647. {
  648. struct PStack *st = fi->userdata;
  649. struct sk_buff *skb = arg;
  650. int pr=-1;
  651. if (!get_PollFlag(st, skb)) {
  652. l2_mdl_error_ua(fi, event, arg);
  653. return;
  654. }
  655. dev_kfree_skb(skb);
  656. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  657. l2_disconnect(fi, event, arg);
  658. if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
  659. pr = DL_ESTABLISH | CONFIRM;
  660. } else if (st->l2.vs != st->l2.va) {
  661. skb_queue_purge(&st->l2.i_queue);
  662. pr = DL_ESTABLISH | INDICATION;
  663. }
  664. stop_t200(st, 5);
  665. st->l2.vr = 0;
  666. st->l2.vs = 0;
  667. st->l2.va = 0;
  668. st->l2.sow = 0;
  669. FsmChangeState(fi, ST_L2_7);
  670. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
  671. if (pr != -1)
  672. st->l2.l2l3(st, pr, NULL);
  673. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  674. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  675. }
  676. static void
  677. l2_released(struct FsmInst *fi, int event, void *arg)
  678. {
  679. struct PStack *st = fi->userdata;
  680. struct sk_buff *skb = arg;
  681. if (!get_PollFlag(st, skb)) {
  682. l2_mdl_error_ua(fi, event, arg);
  683. return;
  684. }
  685. dev_kfree_skb(skb);
  686. stop_t200(st, 6);
  687. lapb_dl_release_l2l3(st, CONFIRM);
  688. FsmChangeState(fi, ST_L2_4);
  689. }
  690. static void
  691. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  692. {
  693. struct PStack *st = fi->userdata;
  694. struct sk_buff *skb = arg;
  695. if (!get_PollFlagFree(st, skb)) {
  696. establishlink(fi);
  697. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  698. }
  699. }
  700. static void
  701. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  702. {
  703. struct PStack *st = fi->userdata;
  704. struct sk_buff *skb = arg;
  705. if (get_PollFlagFree(st, skb)) {
  706. stop_t200(st, 7);
  707. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  708. skb_queue_purge(&st->l2.i_queue);
  709. if (test_bit(FLG_LAPB, &st->l2.flag))
  710. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  711. st5_dl_release_l2l3(st);
  712. FsmChangeState(fi, ST_L2_4);
  713. }
  714. }
  715. static void
  716. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  717. {
  718. struct PStack *st = fi->userdata;
  719. struct sk_buff *skb = arg;
  720. if (get_PollFlagFree(st, skb)) {
  721. stop_t200(st, 8);
  722. lapb_dl_release_l2l3(st, CONFIRM);
  723. FsmChangeState(fi, ST_L2_4);
  724. }
  725. }
  726. static inline void
  727. enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
  728. {
  729. struct sk_buff *skb;
  730. struct Layer2 *l2;
  731. u_char tmp[MAX_HEADER_LEN];
  732. int i;
  733. l2 = &st->l2;
  734. i = sethdraddr(l2, tmp, cr);
  735. if (test_bit(FLG_MOD128, &l2->flag)) {
  736. tmp[i++] = typ;
  737. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  738. } else
  739. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  740. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  741. printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
  742. return;
  743. }
  744. memcpy(skb_put(skb, i), tmp, i);
  745. enqueue_super(st, skb);
  746. }
  747. static inline void
  748. enquiry_response(struct PStack *st)
  749. {
  750. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  751. enquiry_cr(st, RNR, RSP, 1);
  752. else
  753. enquiry_cr(st, RR, RSP, 1);
  754. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  755. }
  756. static inline void
  757. transmit_enquiry(struct PStack *st)
  758. {
  759. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  760. enquiry_cr(st, RNR, CMD, 1);
  761. else
  762. enquiry_cr(st, RR, CMD, 1);
  763. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  764. start_t200(st, 9);
  765. }
  766. static void
  767. nrerrorrecovery(struct FsmInst *fi)
  768. {
  769. struct PStack *st = fi->userdata;
  770. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
  771. establishlink(fi);
  772. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  773. }
  774. static void
  775. invoke_retransmission(struct PStack *st, unsigned int nr)
  776. {
  777. struct Layer2 *l2 = &st->l2;
  778. u_int p1;
  779. u_long flags;
  780. spin_lock_irqsave(&l2->lock, flags);
  781. if (l2->vs != nr) {
  782. while (l2->vs != nr) {
  783. (l2->vs)--;
  784. if(test_bit(FLG_MOD128, &l2->flag)) {
  785. l2->vs %= 128;
  786. p1 = (l2->vs - l2->va) % 128;
  787. } else {
  788. l2->vs %= 8;
  789. p1 = (l2->vs - l2->va) % 8;
  790. }
  791. p1 = (p1 + l2->sow) % l2->window;
  792. if (test_bit(FLG_LAPB, &l2->flag))
  793. st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
  794. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  795. l2->windowar[p1] = NULL;
  796. }
  797. spin_unlock_irqrestore(&l2->lock, flags);
  798. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  799. return;
  800. }
  801. spin_unlock_irqrestore(&l2->lock, flags);
  802. }
  803. static void
  804. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  805. {
  806. struct PStack *st = fi->userdata;
  807. struct sk_buff *skb = arg;
  808. int PollFlag, rsp, typ = RR;
  809. unsigned int nr;
  810. struct Layer2 *l2 = &st->l2;
  811. rsp = *skb->data & 0x2;
  812. if (test_bit(FLG_ORIG, &l2->flag))
  813. rsp = !rsp;
  814. skb_pull(skb, l2addrsize(l2));
  815. if (IsRNR(skb->data, st)) {
  816. set_peer_busy(l2);
  817. typ = RNR;
  818. } else
  819. clear_peer_busy(l2);
  820. if (IsREJ(skb->data, st))
  821. typ = REJ;
  822. if (test_bit(FLG_MOD128, &l2->flag)) {
  823. PollFlag = (skb->data[1] & 0x1) == 0x1;
  824. nr = skb->data[1] >> 1;
  825. } else {
  826. PollFlag = (skb->data[0] & 0x10);
  827. nr = (skb->data[0] >> 5) & 0x7;
  828. }
  829. dev_kfree_skb(skb);
  830. if (PollFlag) {
  831. if (rsp)
  832. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
  833. else
  834. enquiry_response(st);
  835. }
  836. if (legalnr(st, nr)) {
  837. if (typ == REJ) {
  838. setva(st, nr);
  839. invoke_retransmission(st, nr);
  840. stop_t200(st, 10);
  841. if (FsmAddTimer(&st->l2.t203, st->l2.T203,
  842. EV_L2_T203, NULL, 6))
  843. l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
  844. } else if ((nr == l2->vs) && (typ == RR)) {
  845. setva(st, nr);
  846. stop_t200(st, 11);
  847. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  848. EV_L2_T203, NULL, 7);
  849. } else if ((l2->va != nr) || (typ == RNR)) {
  850. setva(st, nr);
  851. if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
  852. restart_t200(st, 12);
  853. }
  854. if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
  855. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  856. } else
  857. nrerrorrecovery(fi);
  858. }
  859. static void
  860. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  861. {
  862. struct PStack *st = fi->userdata;
  863. struct sk_buff *skb = arg;
  864. if (test_bit(FLG_LAPB, &st->l2.flag))
  865. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  866. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  867. skb_queue_tail(&st->l2.i_queue, skb);
  868. else
  869. dev_kfree_skb(skb);
  870. }
  871. static void
  872. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  873. {
  874. struct PStack *st = fi->userdata;
  875. struct sk_buff *skb = arg;
  876. if (test_bit(FLG_LAPB, &st->l2.flag))
  877. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  878. skb_queue_tail(&st->l2.i_queue, skb);
  879. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  880. }
  881. static void
  882. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  883. {
  884. struct PStack *st = fi->userdata;
  885. struct sk_buff *skb = arg;
  886. if (test_bit(FLG_LAPB, &st->l2.flag))
  887. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  888. skb_queue_tail(&st->l2.i_queue, skb);
  889. }
  890. static void
  891. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  892. {
  893. struct PStack *st = fi->userdata;
  894. struct sk_buff *skb = arg;
  895. struct Layer2 *l2 = &(st->l2);
  896. int PollFlag, ns, i;
  897. unsigned int nr;
  898. i = l2addrsize(l2);
  899. if (test_bit(FLG_MOD128, &l2->flag)) {
  900. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  901. ns = skb->data[i] >> 1;
  902. nr = (skb->data[i + 1] >> 1) & 0x7f;
  903. } else {
  904. PollFlag = (skb->data[i] & 0x10);
  905. ns = (skb->data[i] >> 1) & 0x7;
  906. nr = (skb->data[i] >> 5) & 0x7;
  907. }
  908. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  909. dev_kfree_skb(skb);
  910. if(PollFlag) enquiry_response(st);
  911. } else if (l2->vr == ns) {
  912. (l2->vr)++;
  913. if(test_bit(FLG_MOD128, &l2->flag))
  914. l2->vr %= 128;
  915. else
  916. l2->vr %= 8;
  917. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  918. if (PollFlag)
  919. enquiry_response(st);
  920. else
  921. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  922. skb_pull(skb, l2headersize(l2, 0));
  923. st->l2.l2l3(st, DL_DATA | INDICATION, skb);
  924. } else {
  925. /* n(s)!=v(r) */
  926. dev_kfree_skb(skb);
  927. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  928. if (PollFlag)
  929. enquiry_response(st);
  930. } else {
  931. enquiry_cr(st, REJ, RSP, PollFlag);
  932. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  933. }
  934. }
  935. if (legalnr(st, nr)) {
  936. if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
  937. if (nr == st->l2.vs) {
  938. stop_t200(st, 13);
  939. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  940. EV_L2_T203, NULL, 7);
  941. } else if (nr != st->l2.va)
  942. restart_t200(st, 14);
  943. }
  944. setva(st, nr);
  945. } else {
  946. nrerrorrecovery(fi);
  947. return;
  948. }
  949. if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
  950. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  951. if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
  952. enquiry_cr(st, RR, RSP, 0);
  953. }
  954. static void
  955. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  956. {
  957. struct PStack *st = fi->userdata;
  958. st->l2.tei = (long) arg;
  959. if (fi->state == ST_L2_3) {
  960. establishlink(fi);
  961. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  962. } else
  963. FsmChangeState(fi, ST_L2_4);
  964. if (!skb_queue_empty(&st->l2.ui_queue))
  965. tx_ui(st);
  966. }
  967. static void
  968. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  969. {
  970. struct PStack *st = fi->userdata;
  971. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  972. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  973. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  974. } else if (st->l2.rc == st->l2.N200) {
  975. FsmChangeState(fi, ST_L2_4);
  976. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  977. skb_queue_purge(&st->l2.i_queue);
  978. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
  979. if (test_bit(FLG_LAPB, &st->l2.flag))
  980. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  981. st5_dl_release_l2l3(st);
  982. } else {
  983. st->l2.rc++;
  984. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  985. send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
  986. | 0x10, CMD);
  987. }
  988. }
  989. static void
  990. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  991. {
  992. struct PStack *st = fi->userdata;
  993. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  994. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  995. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  996. } else if (st->l2.rc == st->l2.N200) {
  997. FsmChangeState(fi, ST_L2_4);
  998. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  999. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
  1000. lapb_dl_release_l2l3(st, CONFIRM);
  1001. } else {
  1002. st->l2.rc++;
  1003. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
  1004. NULL, 9);
  1005. send_uframe(st, DISC | 0x10, CMD);
  1006. }
  1007. }
  1008. static void
  1009. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1010. {
  1011. struct PStack *st = fi->userdata;
  1012. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1013. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1014. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1015. return;
  1016. }
  1017. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1018. st->l2.rc = 0;
  1019. FsmChangeState(fi, ST_L2_8);
  1020. transmit_enquiry(st);
  1021. st->l2.rc++;
  1022. }
  1023. static void
  1024. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1025. {
  1026. struct PStack *st = fi->userdata;
  1027. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1028. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1029. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1030. return;
  1031. }
  1032. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1033. if (st->l2.rc == st->l2.N200) {
  1034. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
  1035. establishlink(fi);
  1036. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1037. } else {
  1038. transmit_enquiry(st);
  1039. st->l2.rc++;
  1040. }
  1041. }
  1042. static void
  1043. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1044. {
  1045. struct PStack *st = fi->userdata;
  1046. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1047. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1048. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
  1049. return;
  1050. }
  1051. FsmChangeState(fi, ST_L2_8);
  1052. transmit_enquiry(st);
  1053. st->l2.rc = 0;
  1054. }
  1055. static void
  1056. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1057. {
  1058. struct PStack *st = fi->userdata;
  1059. struct sk_buff *skb;
  1060. struct Layer2 *l2 = &st->l2;
  1061. u_char header[MAX_HEADER_LEN];
  1062. int i, hdr_space_needed;
  1063. int unsigned p1;
  1064. u_long flags;
  1065. if (!cansend(st))
  1066. return;
  1067. skb = skb_dequeue(&l2->i_queue);
  1068. if (!skb)
  1069. return;
  1070. hdr_space_needed = l2headersize(l2, 0);
  1071. if (hdr_space_needed > skb_headroom(skb)) {
  1072. struct sk_buff *orig_skb = skb;
  1073. skb = skb_realloc_headroom(skb, hdr_space_needed);
  1074. if (!skb) {
  1075. dev_kfree_skb(orig_skb);
  1076. return;
  1077. }
  1078. }
  1079. spin_lock_irqsave(&l2->lock, flags);
  1080. if(test_bit(FLG_MOD128, &l2->flag))
  1081. p1 = (l2->vs - l2->va) % 128;
  1082. else
  1083. p1 = (l2->vs - l2->va) % 8;
  1084. p1 = (p1 + l2->sow) % l2->window;
  1085. if (l2->windowar[p1]) {
  1086. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1087. p1);
  1088. dev_kfree_skb(l2->windowar[p1]);
  1089. }
  1090. l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
  1091. i = sethdraddr(&st->l2, header, CMD);
  1092. if (test_bit(FLG_MOD128, &l2->flag)) {
  1093. header[i++] = l2->vs << 1;
  1094. header[i++] = l2->vr << 1;
  1095. l2->vs = (l2->vs + 1) % 128;
  1096. } else {
  1097. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1098. l2->vs = (l2->vs + 1) % 8;
  1099. }
  1100. spin_unlock_irqrestore(&l2->lock, flags);
  1101. memcpy(skb_push(skb, i), header, i);
  1102. st->l2.l2l1(st, PH_PULL | INDICATION, skb);
  1103. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1104. if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
  1105. FsmDelTimer(&st->l2.t203, 13);
  1106. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
  1107. }
  1108. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1109. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1110. }
  1111. static void
  1112. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1113. {
  1114. struct PStack *st = fi->userdata;
  1115. struct sk_buff *skb = arg;
  1116. int PollFlag, rsp, rnr = 0;
  1117. unsigned int nr;
  1118. struct Layer2 *l2 = &st->l2;
  1119. rsp = *skb->data & 0x2;
  1120. if (test_bit(FLG_ORIG, &l2->flag))
  1121. rsp = !rsp;
  1122. skb_pull(skb, l2addrsize(l2));
  1123. if (IsRNR(skb->data, st)) {
  1124. set_peer_busy(l2);
  1125. rnr = 1;
  1126. } else
  1127. clear_peer_busy(l2);
  1128. if (test_bit(FLG_MOD128, &l2->flag)) {
  1129. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1130. nr = skb->data[1] >> 1;
  1131. } else {
  1132. PollFlag = (skb->data[0] & 0x10);
  1133. nr = (skb->data[0] >> 5) & 0x7;
  1134. }
  1135. dev_kfree_skb(skb);
  1136. if (rsp && PollFlag) {
  1137. if (legalnr(st, nr)) {
  1138. if (rnr) {
  1139. restart_t200(st, 15);
  1140. } else {
  1141. stop_t200(st, 16);
  1142. FsmAddTimer(&l2->t203, l2->T203,
  1143. EV_L2_T203, NULL, 5);
  1144. setva(st, nr);
  1145. }
  1146. invoke_retransmission(st, nr);
  1147. FsmChangeState(fi, ST_L2_7);
  1148. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1149. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1150. } else
  1151. nrerrorrecovery(fi);
  1152. } else {
  1153. if (!rsp && PollFlag)
  1154. enquiry_response(st);
  1155. if (legalnr(st, nr)) {
  1156. setva(st, nr);
  1157. } else
  1158. nrerrorrecovery(fi);
  1159. }
  1160. }
  1161. static void
  1162. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1163. {
  1164. struct PStack *st = fi->userdata;
  1165. struct sk_buff *skb = arg;
  1166. skb_pull(skb, l2addrsize(&st->l2) + 1);
  1167. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1168. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1169. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
  1170. establishlink(fi);
  1171. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1172. }
  1173. dev_kfree_skb(skb);
  1174. }
  1175. static void
  1176. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1177. {
  1178. struct PStack *st = fi->userdata;
  1179. skb_queue_purge(&st->l2.ui_queue);
  1180. st->l2.tei = -1;
  1181. FsmChangeState(fi, ST_L2_1);
  1182. }
  1183. static void
  1184. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1185. {
  1186. struct PStack *st = fi->userdata;
  1187. skb_queue_purge(&st->l2.ui_queue);
  1188. st->l2.tei = -1;
  1189. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1190. FsmChangeState(fi, ST_L2_1);
  1191. }
  1192. static void
  1193. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1194. {
  1195. struct PStack *st = fi->userdata;
  1196. skb_queue_purge(&st->l2.i_queue);
  1197. skb_queue_purge(&st->l2.ui_queue);
  1198. freewin(st);
  1199. st->l2.tei = -1;
  1200. stop_t200(st, 17);
  1201. st5_dl_release_l2l3(st);
  1202. FsmChangeState(fi, ST_L2_1);
  1203. }
  1204. static void
  1205. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1206. {
  1207. struct PStack *st = fi->userdata;
  1208. skb_queue_purge(&st->l2.ui_queue);
  1209. st->l2.tei = -1;
  1210. stop_t200(st, 18);
  1211. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1212. FsmChangeState(fi, ST_L2_1);
  1213. }
  1214. static void
  1215. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1216. {
  1217. struct PStack *st = fi->userdata;
  1218. skb_queue_purge(&st->l2.i_queue);
  1219. skb_queue_purge(&st->l2.ui_queue);
  1220. freewin(st);
  1221. st->l2.tei = -1;
  1222. stop_t200(st, 17);
  1223. FsmDelTimer(&st->l2.t203, 19);
  1224. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1225. FsmChangeState(fi, ST_L2_1);
  1226. }
  1227. static void
  1228. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1229. {
  1230. struct PStack *st = fi->userdata;
  1231. skb_queue_purge(&st->l2.i_queue);
  1232. skb_queue_purge(&st->l2.ui_queue);
  1233. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1234. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1235. }
  1236. static void
  1237. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1238. {
  1239. struct PStack *st = fi->userdata;
  1240. skb_queue_purge(&st->l2.i_queue);
  1241. skb_queue_purge(&st->l2.ui_queue);
  1242. freewin(st);
  1243. stop_t200(st, 19);
  1244. st5_dl_release_l2l3(st);
  1245. FsmChangeState(fi, ST_L2_4);
  1246. }
  1247. static void
  1248. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1249. {
  1250. struct PStack *st = fi->userdata;
  1251. skb_queue_purge(&st->l2.ui_queue);
  1252. stop_t200(st, 20);
  1253. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1254. FsmChangeState(fi, ST_L2_4);
  1255. }
  1256. static void
  1257. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1258. {
  1259. struct PStack *st = fi->userdata;
  1260. skb_queue_purge(&st->l2.i_queue);
  1261. skb_queue_purge(&st->l2.ui_queue);
  1262. freewin(st);
  1263. stop_t200(st, 19);
  1264. FsmDelTimer(&st->l2.t203, 19);
  1265. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1266. FsmChangeState(fi, ST_L2_4);
  1267. }
  1268. static void
  1269. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1270. {
  1271. struct PStack *st = fi->userdata;
  1272. if(!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1273. enquiry_cr(st, RNR, RSP, 0);
  1274. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1275. }
  1276. }
  1277. static void
  1278. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1279. {
  1280. struct PStack *st = fi->userdata;
  1281. if(!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1282. enquiry_cr(st, RR, RSP, 0);
  1283. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1284. }
  1285. }
  1286. static void
  1287. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1288. {
  1289. struct PStack *st = fi->userdata;
  1290. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1291. }
  1292. static void
  1293. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1294. {
  1295. struct PStack *st = fi->userdata;
  1296. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1297. establishlink(fi);
  1298. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1299. }
  1300. static struct FsmNode L2FnList[] __initdata =
  1301. {
  1302. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1303. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1304. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1305. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1306. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1307. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1308. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1309. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1310. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1311. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1312. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1313. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1314. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1315. {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
  1316. {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1317. {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1318. {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1319. {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1320. {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1321. {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1322. {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1323. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1324. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1325. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1326. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1327. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1328. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1329. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1330. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1331. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1332. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1333. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1334. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1335. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1336. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1337. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1338. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1339. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1340. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1341. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1342. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1343. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1344. {ST_L2_5, EV_L2_UA, l2_connected},
  1345. {ST_L2_6, EV_L2_UA, l2_released},
  1346. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1347. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1348. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1349. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1350. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1351. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1352. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1353. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1354. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1355. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1356. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1357. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1358. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1359. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1360. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1361. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1362. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1363. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1364. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1365. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1366. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1367. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1368. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1369. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1370. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1371. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1372. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1373. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1374. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1375. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1376. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1377. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1378. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1379. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1380. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1381. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1382. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1383. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1384. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1385. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1386. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1387. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1388. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1389. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1390. };
  1391. static void
  1392. isdnl2_l1l2(struct PStack *st, int pr, void *arg)
  1393. {
  1394. struct sk_buff *skb = arg;
  1395. u_char *datap;
  1396. int ret = 1, len;
  1397. int c = 0;
  1398. switch (pr) {
  1399. case (PH_DATA | INDICATION):
  1400. datap = skb->data;
  1401. len = l2addrsize(&st->l2);
  1402. if (skb->len > len)
  1403. datap += len;
  1404. else {
  1405. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1406. dev_kfree_skb(skb);
  1407. return;
  1408. }
  1409. if (!(*datap & 1)) { /* I-Frame */
  1410. if(!(c = iframe_error(st, skb)))
  1411. ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
  1412. } else if (IsSFrame(datap, st)) { /* S-Frame */
  1413. if(!(c = super_error(st, skb)))
  1414. ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
  1415. } else if (IsUI(datap)) {
  1416. if(!(c = UI_error(st, skb)))
  1417. ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
  1418. } else if (IsSABME(datap, st)) {
  1419. if(!(c = unnum_error(st, skb, CMD)))
  1420. ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
  1421. } else if (IsUA(datap)) {
  1422. if(!(c = unnum_error(st, skb, RSP)))
  1423. ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
  1424. } else if (IsDISC(datap)) {
  1425. if(!(c = unnum_error(st, skb, CMD)))
  1426. ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
  1427. } else if (IsDM(datap)) {
  1428. if(!(c = unnum_error(st, skb, RSP)))
  1429. ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
  1430. } else if (IsFRMR(datap)) {
  1431. if(!(c = FRMR_error(st,skb)))
  1432. ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
  1433. } else {
  1434. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
  1435. dev_kfree_skb(skb);
  1436. ret = 0;
  1437. }
  1438. if(c) {
  1439. dev_kfree_skb(skb);
  1440. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1441. ret = 0;
  1442. }
  1443. if (ret)
  1444. dev_kfree_skb(skb);
  1445. break;
  1446. case (PH_PULL | CONFIRM):
  1447. FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
  1448. break;
  1449. case (PH_PAUSE | INDICATION):
  1450. test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1451. break;
  1452. case (PH_PAUSE | CONFIRM):
  1453. test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1454. break;
  1455. case (PH_ACTIVATE | CONFIRM):
  1456. case (PH_ACTIVATE | INDICATION):
  1457. test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
  1458. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1459. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1460. break;
  1461. case (PH_DEACTIVATE | INDICATION):
  1462. case (PH_DEACTIVATE | CONFIRM):
  1463. test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
  1464. FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
  1465. break;
  1466. default:
  1467. l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
  1468. break;
  1469. }
  1470. }
  1471. static void
  1472. isdnl2_l3l2(struct PStack *st, int pr, void *arg)
  1473. {
  1474. switch (pr) {
  1475. case (DL_DATA | REQUEST):
  1476. if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
  1477. dev_kfree_skb((struct sk_buff *) arg);
  1478. }
  1479. break;
  1480. case (DL_UNIT_DATA | REQUEST):
  1481. if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
  1482. dev_kfree_skb((struct sk_buff *) arg);
  1483. }
  1484. break;
  1485. case (DL_ESTABLISH | REQUEST):
  1486. if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
  1487. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1488. test_bit(FLG_ORIG, &st->l2.flag)) {
  1489. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1490. }
  1491. } else {
  1492. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1493. test_bit(FLG_ORIG, &st->l2.flag)) {
  1494. test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
  1495. }
  1496. st->l2.l2l1(st, PH_ACTIVATE, NULL);
  1497. }
  1498. break;
  1499. case (DL_RELEASE | REQUEST):
  1500. if (test_bit(FLG_LAPB, &st->l2.flag)) {
  1501. st->l2.l2l1(st, PH_DEACTIVATE, NULL);
  1502. }
  1503. FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
  1504. break;
  1505. case (MDL_ASSIGN | REQUEST):
  1506. FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
  1507. break;
  1508. case (MDL_REMOVE | REQUEST):
  1509. FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
  1510. break;
  1511. case (MDL_ERROR | RESPONSE):
  1512. FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
  1513. break;
  1514. }
  1515. }
  1516. void
  1517. releasestack_isdnl2(struct PStack *st)
  1518. {
  1519. FsmDelTimer(&st->l2.t200, 21);
  1520. FsmDelTimer(&st->l2.t203, 16);
  1521. skb_queue_purge(&st->l2.i_queue);
  1522. skb_queue_purge(&st->l2.ui_queue);
  1523. ReleaseWin(&st->l2);
  1524. }
  1525. static void
  1526. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  1527. {
  1528. va_list args;
  1529. struct PStack *st = fi->userdata;
  1530. va_start(args, fmt);
  1531. VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
  1532. va_end(args);
  1533. }
  1534. void
  1535. setstack_isdnl2(struct PStack *st, char *debug_id)
  1536. {
  1537. spin_lock_init(&st->l2.lock);
  1538. st->l1.l1l2 = isdnl2_l1l2;
  1539. st->l3.l3l2 = isdnl2_l3l2;
  1540. skb_queue_head_init(&st->l2.i_queue);
  1541. skb_queue_head_init(&st->l2.ui_queue);
  1542. InitWin(&st->l2);
  1543. st->l2.debug = 0;
  1544. st->l2.l2m.fsm = &l2fsm;
  1545. if (test_bit(FLG_LAPB, &st->l2.flag))
  1546. st->l2.l2m.state = ST_L2_4;
  1547. else
  1548. st->l2.l2m.state = ST_L2_1;
  1549. st->l2.l2m.debug = 0;
  1550. st->l2.l2m.userdata = st;
  1551. st->l2.l2m.userint = 0;
  1552. st->l2.l2m.printdebug = l2m_debug;
  1553. strcpy(st->l2.debug_id, debug_id);
  1554. FsmInitTimer(&st->l2.l2m, &st->l2.t200);
  1555. FsmInitTimer(&st->l2.l2m, &st->l2.t203);
  1556. }
  1557. static void
  1558. transl2_l3l2(struct PStack *st, int pr, void *arg)
  1559. {
  1560. switch (pr) {
  1561. case (DL_DATA | REQUEST):
  1562. case (DL_UNIT_DATA | REQUEST):
  1563. st->l2.l2l1(st, PH_DATA | REQUEST, arg);
  1564. break;
  1565. case (DL_ESTABLISH | REQUEST):
  1566. st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
  1567. break;
  1568. case (DL_RELEASE | REQUEST):
  1569. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  1570. break;
  1571. }
  1572. }
  1573. void
  1574. setstack_transl2(struct PStack *st)
  1575. {
  1576. st->l3.l3l2 = transl2_l3l2;
  1577. }
  1578. void
  1579. releasestack_transl2(struct PStack *st)
  1580. {
  1581. }
  1582. int __init
  1583. Isdnl2New(void)
  1584. {
  1585. l2fsm.state_count = L2_STATE_COUNT;
  1586. l2fsm.event_count = L2_EVENT_COUNT;
  1587. l2fsm.strEvent = strL2Event;
  1588. l2fsm.strState = strL2State;
  1589. return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1590. }
  1591. void
  1592. Isdnl2Free(void)
  1593. {
  1594. FsmFree(&l2fsm);
  1595. }