11n_rxreorder.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000
  1. /*
  2. * Marvell Wireless LAN device driver: 802.11n RX Re-ordering
  3. *
  4. * Copyright (C) 2011-2014, Marvell International Ltd.
  5. *
  6. * This software file (the "File") is distributed by Marvell International
  7. * Ltd. under the terms of the GNU General Public License Version 2, June 1991
  8. * (the "License"). You may use, redistribute and/or modify this File in
  9. * accordance with the terms and conditions of the License, a copy of which
  10. * is available by writing to the Free Software Foundation, Inc.,
  11. * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
  12. * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
  13. *
  14. * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
  15. * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
  16. * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
  17. * this warranty disclaimer.
  18. */
  19. #include "decl.h"
  20. #include "ioctl.h"
  21. #include "util.h"
  22. #include "fw.h"
  23. #include "main.h"
  24. #include "wmm.h"
  25. #include "11n.h"
  26. #include "11n_rxreorder.h"
  27. /* This function will dispatch amsdu packet and forward it to kernel/upper
  28. * layer.
  29. */
  30. static int mwifiex_11n_dispatch_amsdu_pkt(struct mwifiex_private *priv,
  31. struct sk_buff *skb)
  32. {
  33. struct rxpd *local_rx_pd = (struct rxpd *)(skb->data);
  34. int ret;
  35. if (le16_to_cpu(local_rx_pd->rx_pkt_type) == PKT_TYPE_AMSDU) {
  36. struct sk_buff_head list;
  37. struct sk_buff *rx_skb;
  38. __skb_queue_head_init(&list);
  39. skb_pull(skb, le16_to_cpu(local_rx_pd->rx_pkt_offset));
  40. skb_trim(skb, le16_to_cpu(local_rx_pd->rx_pkt_length));
  41. ieee80211_amsdu_to_8023s(skb, &list, priv->curr_addr,
  42. priv->wdev.iftype, 0, NULL, NULL);
  43. while (!skb_queue_empty(&list)) {
  44. struct rx_packet_hdr *rx_hdr;
  45. rx_skb = __skb_dequeue(&list);
  46. rx_hdr = (struct rx_packet_hdr *)rx_skb->data;
  47. if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
  48. ntohs(rx_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
  49. mwifiex_process_tdls_action_frame(priv,
  50. (u8 *)rx_hdr,
  51. skb->len);
  52. }
  53. if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
  54. ret = mwifiex_uap_recv_packet(priv, rx_skb);
  55. else
  56. ret = mwifiex_recv_packet(priv, rx_skb);
  57. if (ret == -1)
  58. mwifiex_dbg(priv->adapter, ERROR,
  59. "Rx of A-MSDU failed");
  60. }
  61. return 0;
  62. }
  63. return -1;
  64. }
  65. /* This function will process the rx packet and forward it to kernel/upper
  66. * layer.
  67. */
  68. static int mwifiex_11n_dispatch_pkt(struct mwifiex_private *priv, void *payload)
  69. {
  70. int ret;
  71. if (!payload) {
  72. mwifiex_dbg(priv->adapter, INFO, "info: fw drop data\n");
  73. return 0;
  74. }
  75. ret = mwifiex_11n_dispatch_amsdu_pkt(priv, payload);
  76. if (!ret)
  77. return 0;
  78. if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP)
  79. return mwifiex_handle_uap_rx_forward(priv, payload);
  80. return mwifiex_process_rx_packet(priv, payload);
  81. }
  82. /*
  83. * This function dispatches all packets in the Rx reorder table until the
  84. * start window.
  85. *
  86. * There could be holes in the buffer, which are skipped by the function.
  87. * Since the buffer is linear, the function uses rotation to simulate
  88. * circular buffer.
  89. */
  90. static void
  91. mwifiex_11n_dispatch_pkt_until_start_win(struct mwifiex_private *priv,
  92. struct mwifiex_rx_reorder_tbl *tbl,
  93. int start_win)
  94. {
  95. int pkt_to_send, i;
  96. void *rx_tmp_ptr;
  97. unsigned long flags;
  98. pkt_to_send = (start_win > tbl->start_win) ?
  99. min((start_win - tbl->start_win), tbl->win_size) :
  100. tbl->win_size;
  101. for (i = 0; i < pkt_to_send; ++i) {
  102. spin_lock_irqsave(&priv->rx_pkt_lock, flags);
  103. rx_tmp_ptr = NULL;
  104. if (tbl->rx_reorder_ptr[i]) {
  105. rx_tmp_ptr = tbl->rx_reorder_ptr[i];
  106. tbl->rx_reorder_ptr[i] = NULL;
  107. }
  108. spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
  109. if (rx_tmp_ptr)
  110. mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
  111. }
  112. spin_lock_irqsave(&priv->rx_pkt_lock, flags);
  113. /*
  114. * We don't have a circular buffer, hence use rotation to simulate
  115. * circular buffer
  116. */
  117. for (i = 0; i < tbl->win_size - pkt_to_send; ++i) {
  118. tbl->rx_reorder_ptr[i] = tbl->rx_reorder_ptr[pkt_to_send + i];
  119. tbl->rx_reorder_ptr[pkt_to_send + i] = NULL;
  120. }
  121. tbl->start_win = start_win;
  122. spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
  123. }
  124. /*
  125. * This function dispatches all packets in the Rx reorder table until
  126. * a hole is found.
  127. *
  128. * The start window is adjusted automatically when a hole is located.
  129. * Since the buffer is linear, the function uses rotation to simulate
  130. * circular buffer.
  131. */
  132. static void
  133. mwifiex_11n_scan_and_dispatch(struct mwifiex_private *priv,
  134. struct mwifiex_rx_reorder_tbl *tbl)
  135. {
  136. int i, j, xchg;
  137. void *rx_tmp_ptr;
  138. unsigned long flags;
  139. for (i = 0; i < tbl->win_size; ++i) {
  140. spin_lock_irqsave(&priv->rx_pkt_lock, flags);
  141. if (!tbl->rx_reorder_ptr[i]) {
  142. spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
  143. break;
  144. }
  145. rx_tmp_ptr = tbl->rx_reorder_ptr[i];
  146. tbl->rx_reorder_ptr[i] = NULL;
  147. spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
  148. mwifiex_11n_dispatch_pkt(priv, rx_tmp_ptr);
  149. }
  150. spin_lock_irqsave(&priv->rx_pkt_lock, flags);
  151. /*
  152. * We don't have a circular buffer, hence use rotation to simulate
  153. * circular buffer
  154. */
  155. if (i > 0) {
  156. xchg = tbl->win_size - i;
  157. for (j = 0; j < xchg; ++j) {
  158. tbl->rx_reorder_ptr[j] = tbl->rx_reorder_ptr[i + j];
  159. tbl->rx_reorder_ptr[i + j] = NULL;
  160. }
  161. }
  162. tbl->start_win = (tbl->start_win + i) & (MAX_TID_VALUE - 1);
  163. spin_unlock_irqrestore(&priv->rx_pkt_lock, flags);
  164. }
  165. /*
  166. * This function deletes the Rx reorder table and frees the memory.
  167. *
  168. * The function stops the associated timer and dispatches all the
  169. * pending packets in the Rx reorder table before deletion.
  170. */
  171. static void
  172. mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
  173. struct mwifiex_rx_reorder_tbl *tbl)
  174. {
  175. unsigned long flags;
  176. int start_win;
  177. if (!tbl)
  178. return;
  179. spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
  180. priv->adapter->rx_locked = true;
  181. if (priv->adapter->rx_processing) {
  182. spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
  183. flush_workqueue(priv->adapter->rx_workqueue);
  184. } else {
  185. spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
  186. }
  187. start_win = (tbl->start_win + tbl->win_size) & (MAX_TID_VALUE - 1);
  188. mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
  189. del_timer_sync(&tbl->timer_context.timer);
  190. tbl->timer_context.timer_is_set = false;
  191. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  192. list_del(&tbl->list);
  193. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  194. kfree(tbl->rx_reorder_ptr);
  195. kfree(tbl);
  196. spin_lock_irqsave(&priv->adapter->rx_proc_lock, flags);
  197. priv->adapter->rx_locked = false;
  198. spin_unlock_irqrestore(&priv->adapter->rx_proc_lock, flags);
  199. }
  200. /*
  201. * This function returns the pointer to an entry in Rx reordering
  202. * table which matches the given TA/TID pair.
  203. */
  204. struct mwifiex_rx_reorder_tbl *
  205. mwifiex_11n_get_rx_reorder_tbl(struct mwifiex_private *priv, int tid, u8 *ta)
  206. {
  207. struct mwifiex_rx_reorder_tbl *tbl;
  208. unsigned long flags;
  209. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  210. list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list) {
  211. if (!memcmp(tbl->ta, ta, ETH_ALEN) && tbl->tid == tid) {
  212. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
  213. flags);
  214. return tbl;
  215. }
  216. }
  217. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  218. return NULL;
  219. }
  220. /* This function retrieves the pointer to an entry in Rx reordering
  221. * table which matches the given TA and deletes it.
  222. */
  223. void mwifiex_11n_del_rx_reorder_tbl_by_ta(struct mwifiex_private *priv, u8 *ta)
  224. {
  225. struct mwifiex_rx_reorder_tbl *tbl, *tmp;
  226. unsigned long flags;
  227. if (!ta)
  228. return;
  229. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  230. list_for_each_entry_safe(tbl, tmp, &priv->rx_reorder_tbl_ptr, list) {
  231. if (!memcmp(tbl->ta, ta, ETH_ALEN)) {
  232. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
  233. flags);
  234. mwifiex_del_rx_reorder_entry(priv, tbl);
  235. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  236. }
  237. }
  238. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  239. return;
  240. }
  241. /*
  242. * This function finds the last sequence number used in the packets
  243. * buffered in Rx reordering table.
  244. */
  245. static int
  246. mwifiex_11n_find_last_seq_num(struct reorder_tmr_cnxt *ctx)
  247. {
  248. struct mwifiex_rx_reorder_tbl *rx_reorder_tbl_ptr = ctx->ptr;
  249. struct mwifiex_private *priv = ctx->priv;
  250. unsigned long flags;
  251. int i;
  252. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  253. for (i = rx_reorder_tbl_ptr->win_size - 1; i >= 0; --i) {
  254. if (rx_reorder_tbl_ptr->rx_reorder_ptr[i]) {
  255. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
  256. flags);
  257. return i;
  258. }
  259. }
  260. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  261. return -1;
  262. }
  263. /*
  264. * This function flushes all the packets in Rx reordering table.
  265. *
  266. * The function checks if any packets are currently buffered in the
  267. * table or not. In case there are packets available, it dispatches
  268. * them and then dumps the Rx reordering table.
  269. */
  270. static void
  271. mwifiex_flush_data(unsigned long context)
  272. {
  273. struct reorder_tmr_cnxt *ctx =
  274. (struct reorder_tmr_cnxt *) context;
  275. int start_win, seq_num;
  276. ctx->timer_is_set = false;
  277. seq_num = mwifiex_11n_find_last_seq_num(ctx);
  278. if (seq_num < 0)
  279. return;
  280. mwifiex_dbg(ctx->priv->adapter, INFO, "info: flush data %d\n", seq_num);
  281. start_win = (ctx->ptr->start_win + seq_num + 1) & (MAX_TID_VALUE - 1);
  282. mwifiex_11n_dispatch_pkt_until_start_win(ctx->priv, ctx->ptr,
  283. start_win);
  284. }
  285. /*
  286. * This function creates an entry in Rx reordering table for the
  287. * given TA/TID.
  288. *
  289. * The function also initializes the entry with sequence number, window
  290. * size as well as initializes the timer.
  291. *
  292. * If the received TA/TID pair is already present, all the packets are
  293. * dispatched and the window size is moved until the SSN.
  294. */
  295. static void
  296. mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
  297. int tid, int win_size, int seq_num)
  298. {
  299. int i;
  300. struct mwifiex_rx_reorder_tbl *tbl, *new_node;
  301. u16 last_seq = 0;
  302. unsigned long flags;
  303. struct mwifiex_sta_node *node;
  304. /*
  305. * If we get a TID, ta pair which is already present dispatch all the
  306. * the packets and move the window size until the ssn
  307. */
  308. tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
  309. if (tbl) {
  310. mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, seq_num);
  311. return;
  312. }
  313. /* if !tbl then create one */
  314. new_node = kzalloc(sizeof(struct mwifiex_rx_reorder_tbl), GFP_KERNEL);
  315. if (!new_node)
  316. return;
  317. INIT_LIST_HEAD(&new_node->list);
  318. new_node->tid = tid;
  319. memcpy(new_node->ta, ta, ETH_ALEN);
  320. new_node->start_win = seq_num;
  321. new_node->init_win = seq_num;
  322. new_node->flags = 0;
  323. spin_lock_irqsave(&priv->sta_list_spinlock, flags);
  324. if (mwifiex_queuing_ra_based(priv)) {
  325. if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP) {
  326. node = mwifiex_get_sta_entry(priv, ta);
  327. if (node)
  328. last_seq = node->rx_seq[tid];
  329. }
  330. } else {
  331. node = mwifiex_get_sta_entry(priv, ta);
  332. if (node)
  333. last_seq = node->rx_seq[tid];
  334. else
  335. last_seq = priv->rx_seq[tid];
  336. }
  337. spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
  338. mwifiex_dbg(priv->adapter, INFO,
  339. "info: last_seq=%d start_win=%d\n",
  340. last_seq, new_node->start_win);
  341. if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
  342. last_seq >= new_node->start_win) {
  343. new_node->start_win = last_seq + 1;
  344. new_node->flags |= RXREOR_INIT_WINDOW_SHIFT;
  345. }
  346. new_node->win_size = win_size;
  347. new_node->rx_reorder_ptr = kzalloc(sizeof(void *) * win_size,
  348. GFP_KERNEL);
  349. if (!new_node->rx_reorder_ptr) {
  350. kfree((u8 *) new_node);
  351. mwifiex_dbg(priv->adapter, ERROR,
  352. "%s: failed to alloc reorder_ptr\n", __func__);
  353. return;
  354. }
  355. new_node->timer_context.ptr = new_node;
  356. new_node->timer_context.priv = priv;
  357. new_node->timer_context.timer_is_set = false;
  358. setup_timer(&new_node->timer_context.timer, mwifiex_flush_data,
  359. (unsigned long)&new_node->timer_context);
  360. for (i = 0; i < win_size; ++i)
  361. new_node->rx_reorder_ptr[i] = NULL;
  362. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  363. list_add_tail(&new_node->list, &priv->rx_reorder_tbl_ptr);
  364. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  365. }
  366. static void
  367. mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl)
  368. {
  369. u32 min_flush_time;
  370. if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32)
  371. min_flush_time = MIN_FLUSH_TIMER_15_MS;
  372. else
  373. min_flush_time = MIN_FLUSH_TIMER_MS;
  374. mod_timer(&tbl->timer_context.timer,
  375. jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size));
  376. tbl->timer_context.timer_is_set = true;
  377. }
  378. /*
  379. * This function prepares command for adding a BA request.
  380. *
  381. * Preparation includes -
  382. * - Setting command ID and proper size
  383. * - Setting add BA request buffer
  384. * - Ensuring correct endian-ness
  385. */
  386. int mwifiex_cmd_11n_addba_req(struct host_cmd_ds_command *cmd, void *data_buf)
  387. {
  388. struct host_cmd_ds_11n_addba_req *add_ba_req = &cmd->params.add_ba_req;
  389. cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_REQ);
  390. cmd->size = cpu_to_le16(sizeof(*add_ba_req) + S_DS_GEN);
  391. memcpy(add_ba_req, data_buf, sizeof(*add_ba_req));
  392. return 0;
  393. }
  394. /*
  395. * This function prepares command for adding a BA response.
  396. *
  397. * Preparation includes -
  398. * - Setting command ID and proper size
  399. * - Setting add BA response buffer
  400. * - Ensuring correct endian-ness
  401. */
  402. int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
  403. struct host_cmd_ds_command *cmd,
  404. struct host_cmd_ds_11n_addba_req
  405. *cmd_addba_req)
  406. {
  407. struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
  408. struct mwifiex_sta_node *sta_ptr;
  409. u32 rx_win_size = priv->add_ba_param.rx_win_size;
  410. u8 tid;
  411. int win_size;
  412. unsigned long flags;
  413. uint16_t block_ack_param_set;
  414. if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
  415. ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
  416. priv->adapter->is_hw_11ac_capable &&
  417. memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
  418. spin_lock_irqsave(&priv->sta_list_spinlock, flags);
  419. sta_ptr = mwifiex_get_sta_entry(priv,
  420. cmd_addba_req->peer_mac_addr);
  421. if (!sta_ptr) {
  422. spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
  423. mwifiex_dbg(priv->adapter, ERROR,
  424. "BA setup with unknown TDLS peer %pM!\n",
  425. cmd_addba_req->peer_mac_addr);
  426. return -1;
  427. }
  428. if (sta_ptr->is_11ac_enabled)
  429. rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
  430. spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
  431. }
  432. cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
  433. cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
  434. memcpy(add_ba_rsp->peer_mac_addr, cmd_addba_req->peer_mac_addr,
  435. ETH_ALEN);
  436. add_ba_rsp->dialog_token = cmd_addba_req->dialog_token;
  437. add_ba_rsp->block_ack_tmo = cmd_addba_req->block_ack_tmo;
  438. add_ba_rsp->ssn = cmd_addba_req->ssn;
  439. block_ack_param_set = le16_to_cpu(cmd_addba_req->block_ack_param_set);
  440. tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
  441. >> BLOCKACKPARAM_TID_POS;
  442. add_ba_rsp->status_code = cpu_to_le16(ADDBA_RSP_STATUS_ACCEPT);
  443. block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
  444. /* If we don't support AMSDU inside AMPDU, reset the bit */
  445. if (!priv->add_ba_param.rx_amsdu ||
  446. (priv->aggr_prio_tbl[tid].amsdu == BA_STREAM_NOT_ALLOWED))
  447. block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
  448. block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
  449. add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
  450. win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
  451. & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
  452. >> BLOCKACKPARAM_WINSIZE_POS;
  453. cmd_addba_req->block_ack_param_set = cpu_to_le16(block_ack_param_set);
  454. mwifiex_11n_create_rx_reorder_tbl(priv, cmd_addba_req->peer_mac_addr,
  455. tid, win_size,
  456. le16_to_cpu(cmd_addba_req->ssn));
  457. return 0;
  458. }
  459. /*
  460. * This function prepares command for deleting a BA request.
  461. *
  462. * Preparation includes -
  463. * - Setting command ID and proper size
  464. * - Setting del BA request buffer
  465. * - Ensuring correct endian-ness
  466. */
  467. int mwifiex_cmd_11n_delba(struct host_cmd_ds_command *cmd, void *data_buf)
  468. {
  469. struct host_cmd_ds_11n_delba *del_ba = &cmd->params.del_ba;
  470. cmd->command = cpu_to_le16(HostCmd_CMD_11N_DELBA);
  471. cmd->size = cpu_to_le16(sizeof(*del_ba) + S_DS_GEN);
  472. memcpy(del_ba, data_buf, sizeof(*del_ba));
  473. return 0;
  474. }
  475. /*
  476. * This function identifies if Rx reordering is needed for a received packet.
  477. *
  478. * In case reordering is required, the function will do the reordering
  479. * before sending it to kernel.
  480. *
  481. * The Rx reorder table is checked first with the received TID/TA pair. If
  482. * not found, the received packet is dispatched immediately. But if found,
  483. * the packet is reordered and all the packets in the updated Rx reordering
  484. * table is dispatched until a hole is found.
  485. *
  486. * For sequence number less than the starting window, the packet is dropped.
  487. */
  488. int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
  489. u16 seq_num, u16 tid,
  490. u8 *ta, u8 pkt_type, void *payload)
  491. {
  492. struct mwifiex_rx_reorder_tbl *tbl;
  493. int prev_start_win, start_win, end_win, win_size;
  494. u16 pkt_index;
  495. bool init_window_shift = false;
  496. int ret = 0;
  497. tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
  498. if (!tbl) {
  499. if (pkt_type != PKT_TYPE_BAR)
  500. mwifiex_11n_dispatch_pkt(priv, payload);
  501. return ret;
  502. }
  503. if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
  504. mwifiex_11n_dispatch_pkt(priv, payload);
  505. return ret;
  506. }
  507. start_win = tbl->start_win;
  508. prev_start_win = start_win;
  509. win_size = tbl->win_size;
  510. end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
  511. if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
  512. init_window_shift = true;
  513. tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
  514. }
  515. if (tbl->flags & RXREOR_FORCE_NO_DROP) {
  516. mwifiex_dbg(priv->adapter, INFO,
  517. "RXREOR_FORCE_NO_DROP when HS is activated\n");
  518. tbl->flags &= ~RXREOR_FORCE_NO_DROP;
  519. } else if (init_window_shift && seq_num < start_win &&
  520. seq_num >= tbl->init_win) {
  521. mwifiex_dbg(priv->adapter, INFO,
  522. "Sender TID sequence number reset %d->%d for SSN %d\n",
  523. start_win, seq_num, tbl->init_win);
  524. tbl->start_win = start_win = seq_num;
  525. end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
  526. } else {
  527. /*
  528. * If seq_num is less then starting win then ignore and drop
  529. * the packet
  530. */
  531. if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
  532. if (seq_num >= ((start_win + TWOPOW11) &
  533. (MAX_TID_VALUE - 1)) &&
  534. seq_num < start_win) {
  535. ret = -1;
  536. goto done;
  537. }
  538. } else if ((seq_num < start_win) ||
  539. (seq_num >= (start_win + TWOPOW11))) {
  540. ret = -1;
  541. goto done;
  542. }
  543. }
  544. /*
  545. * If this packet is a BAR we adjust seq_num as
  546. * WinStart = seq_num
  547. */
  548. if (pkt_type == PKT_TYPE_BAR)
  549. seq_num = ((seq_num + win_size) - 1) & (MAX_TID_VALUE - 1);
  550. if (((end_win < start_win) &&
  551. (seq_num < start_win) && (seq_num > end_win)) ||
  552. ((end_win > start_win) && ((seq_num > end_win) ||
  553. (seq_num < start_win)))) {
  554. end_win = seq_num;
  555. if (((end_win - win_size) + 1) >= 0)
  556. start_win = (end_win - win_size) + 1;
  557. else
  558. start_win = (MAX_TID_VALUE - (win_size - end_win)) + 1;
  559. mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
  560. }
  561. if (pkt_type != PKT_TYPE_BAR) {
  562. if (seq_num >= start_win)
  563. pkt_index = seq_num - start_win;
  564. else
  565. pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
  566. if (tbl->rx_reorder_ptr[pkt_index]) {
  567. ret = -1;
  568. goto done;
  569. }
  570. tbl->rx_reorder_ptr[pkt_index] = payload;
  571. }
  572. /*
  573. * Dispatch all packets sequentially from start_win until a
  574. * hole is found and adjust the start_win appropriately
  575. */
  576. mwifiex_11n_scan_and_dispatch(priv, tbl);
  577. done:
  578. if (!tbl->timer_context.timer_is_set ||
  579. prev_start_win != tbl->start_win)
  580. mwifiex_11n_rxreorder_timer_restart(tbl);
  581. return ret;
  582. }
  583. /*
  584. * This function deletes an entry for a given TID/TA pair.
  585. *
  586. * The TID/TA are taken from del BA event body.
  587. */
  588. void
  589. mwifiex_del_ba_tbl(struct mwifiex_private *priv, int tid, u8 *peer_mac,
  590. u8 type, int initiator)
  591. {
  592. struct mwifiex_rx_reorder_tbl *tbl;
  593. struct mwifiex_tx_ba_stream_tbl *ptx_tbl;
  594. struct mwifiex_ra_list_tbl *ra_list;
  595. u8 cleanup_rx_reorder_tbl;
  596. unsigned long flags;
  597. int tid_down;
  598. if (type == TYPE_DELBA_RECEIVE)
  599. cleanup_rx_reorder_tbl = (initiator) ? true : false;
  600. else
  601. cleanup_rx_reorder_tbl = (initiator) ? false : true;
  602. mwifiex_dbg(priv->adapter, EVENT, "event: DELBA: %pM tid=%d initiator=%d\n",
  603. peer_mac, tid, initiator);
  604. if (cleanup_rx_reorder_tbl) {
  605. tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
  606. peer_mac);
  607. if (!tbl) {
  608. mwifiex_dbg(priv->adapter, EVENT,
  609. "event: TID, TA not found in table\n");
  610. return;
  611. }
  612. mwifiex_del_rx_reorder_entry(priv, tbl);
  613. } else {
  614. ptx_tbl = mwifiex_get_ba_tbl(priv, tid, peer_mac);
  615. if (!ptx_tbl) {
  616. mwifiex_dbg(priv->adapter, EVENT,
  617. "event: TID, RA not found in table\n");
  618. return;
  619. }
  620. tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
  621. ra_list = mwifiex_wmm_get_ralist_node(priv, tid_down, peer_mac);
  622. if (ra_list) {
  623. ra_list->amsdu_in_ampdu = false;
  624. ra_list->ba_status = BA_SETUP_NONE;
  625. }
  626. spin_lock_irqsave(&priv->tx_ba_stream_tbl_lock, flags);
  627. mwifiex_11n_delete_tx_ba_stream_tbl_entry(priv, ptx_tbl);
  628. spin_unlock_irqrestore(&priv->tx_ba_stream_tbl_lock, flags);
  629. }
  630. }
  631. /*
  632. * This function handles the command response of an add BA response.
  633. *
  634. * Handling includes changing the header fields into CPU format and
  635. * creating the stream, provided the add BA is accepted.
  636. */
  637. int mwifiex_ret_11n_addba_resp(struct mwifiex_private *priv,
  638. struct host_cmd_ds_command *resp)
  639. {
  640. struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &resp->params.add_ba_rsp;
  641. int tid, win_size;
  642. struct mwifiex_rx_reorder_tbl *tbl;
  643. uint16_t block_ack_param_set;
  644. block_ack_param_set = le16_to_cpu(add_ba_rsp->block_ack_param_set);
  645. tid = (block_ack_param_set & IEEE80211_ADDBA_PARAM_TID_MASK)
  646. >> BLOCKACKPARAM_TID_POS;
  647. /*
  648. * Check if we had rejected the ADDBA, if yes then do not create
  649. * the stream
  650. */
  651. if (le16_to_cpu(add_ba_rsp->status_code) != BA_RESULT_SUCCESS) {
  652. mwifiex_dbg(priv->adapter, ERROR, "ADDBA RSP: failed %pM tid=%d)\n",
  653. add_ba_rsp->peer_mac_addr, tid);
  654. tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
  655. add_ba_rsp->peer_mac_addr);
  656. if (tbl)
  657. mwifiex_del_rx_reorder_entry(priv, tbl);
  658. return 0;
  659. }
  660. win_size = (block_ack_param_set & IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
  661. >> BLOCKACKPARAM_WINSIZE_POS;
  662. tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid,
  663. add_ba_rsp->peer_mac_addr);
  664. if (tbl) {
  665. if ((block_ack_param_set & BLOCKACKPARAM_AMSDU_SUPP_MASK) &&
  666. priv->add_ba_param.rx_amsdu &&
  667. (priv->aggr_prio_tbl[tid].amsdu != BA_STREAM_NOT_ALLOWED))
  668. tbl->amsdu = true;
  669. else
  670. tbl->amsdu = false;
  671. }
  672. mwifiex_dbg(priv->adapter, CMD,
  673. "cmd: ADDBA RSP: %pM tid=%d ssn=%d win_size=%d\n",
  674. add_ba_rsp->peer_mac_addr, tid, add_ba_rsp->ssn, win_size);
  675. return 0;
  676. }
  677. /*
  678. * This function handles BA stream timeout event by preparing and sending
  679. * a command to the firmware.
  680. */
  681. void mwifiex_11n_ba_stream_timeout(struct mwifiex_private *priv,
  682. struct host_cmd_ds_11n_batimeout *event)
  683. {
  684. struct host_cmd_ds_11n_delba delba;
  685. memset(&delba, 0, sizeof(struct host_cmd_ds_11n_delba));
  686. memcpy(delba.peer_mac_addr, event->peer_mac_addr, ETH_ALEN);
  687. delba.del_ba_param_set |=
  688. cpu_to_le16((u16) event->tid << DELBA_TID_POS);
  689. delba.del_ba_param_set |= cpu_to_le16(
  690. (u16) event->origninator << DELBA_INITIATOR_POS);
  691. delba.reason_code = cpu_to_le16(WLAN_REASON_QSTA_TIMEOUT);
  692. mwifiex_send_cmd(priv, HostCmd_CMD_11N_DELBA, 0, 0, &delba, false);
  693. }
  694. /*
  695. * This function cleans up the Rx reorder table by deleting all the entries
  696. * and re-initializing.
  697. */
  698. void mwifiex_11n_cleanup_reorder_tbl(struct mwifiex_private *priv)
  699. {
  700. struct mwifiex_rx_reorder_tbl *del_tbl_ptr, *tmp_node;
  701. unsigned long flags;
  702. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  703. list_for_each_entry_safe(del_tbl_ptr, tmp_node,
  704. &priv->rx_reorder_tbl_ptr, list) {
  705. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  706. mwifiex_del_rx_reorder_entry(priv, del_tbl_ptr);
  707. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
  708. }
  709. INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
  710. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
  711. mwifiex_reset_11n_rx_seq_num(priv);
  712. }
  713. /*
  714. * This function updates all rx_reorder_tbl's flags.
  715. */
  716. void mwifiex_update_rxreor_flags(struct mwifiex_adapter *adapter, u8 flags)
  717. {
  718. struct mwifiex_private *priv;
  719. struct mwifiex_rx_reorder_tbl *tbl;
  720. unsigned long lock_flags;
  721. int i;
  722. for (i = 0; i < adapter->priv_num; i++) {
  723. priv = adapter->priv[i];
  724. if (!priv)
  725. continue;
  726. spin_lock_irqsave(&priv->rx_reorder_tbl_lock, lock_flags);
  727. if (list_empty(&priv->rx_reorder_tbl_ptr)) {
  728. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock,
  729. lock_flags);
  730. continue;
  731. }
  732. list_for_each_entry(tbl, &priv->rx_reorder_tbl_ptr, list)
  733. tbl->flags = flags;
  734. spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, lock_flags);
  735. }
  736. return;
  737. }
  738. /* This function update all the rx_win_size based on coex flag
  739. */
  740. static void mwifiex_update_ampdu_rxwinsize(struct mwifiex_adapter *adapter,
  741. bool coex_flag)
  742. {
  743. u8 i;
  744. u32 rx_win_size;
  745. struct mwifiex_private *priv;
  746. dev_dbg(adapter->dev, "Update rxwinsize %d\n", coex_flag);
  747. for (i = 0; i < adapter->priv_num; i++) {
  748. if (!adapter->priv[i])
  749. continue;
  750. priv = adapter->priv[i];
  751. rx_win_size = priv->add_ba_param.rx_win_size;
  752. if (coex_flag) {
  753. if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
  754. priv->add_ba_param.rx_win_size =
  755. MWIFIEX_STA_COEX_AMPDU_DEF_RXWINSIZE;
  756. if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
  757. priv->add_ba_param.rx_win_size =
  758. MWIFIEX_STA_COEX_AMPDU_DEF_RXWINSIZE;
  759. if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP)
  760. priv->add_ba_param.rx_win_size =
  761. MWIFIEX_UAP_COEX_AMPDU_DEF_RXWINSIZE;
  762. } else {
  763. if (priv->bss_type == MWIFIEX_BSS_TYPE_STA)
  764. priv->add_ba_param.rx_win_size =
  765. MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
  766. if (priv->bss_type == MWIFIEX_BSS_TYPE_P2P)
  767. priv->add_ba_param.rx_win_size =
  768. MWIFIEX_STA_AMPDU_DEF_RXWINSIZE;
  769. if (priv->bss_type == MWIFIEX_BSS_TYPE_UAP)
  770. priv->add_ba_param.rx_win_size =
  771. MWIFIEX_UAP_AMPDU_DEF_RXWINSIZE;
  772. }
  773. if (adapter->coex_win_size && adapter->coex_rx_win_size)
  774. priv->add_ba_param.rx_win_size =
  775. adapter->coex_rx_win_size;
  776. if (rx_win_size != priv->add_ba_param.rx_win_size) {
  777. if (!priv->media_connected)
  778. continue;
  779. for (i = 0; i < MAX_NUM_TID; i++)
  780. mwifiex_11n_delba(priv, i);
  781. }
  782. }
  783. }
  784. /* This function check coex for RX BA
  785. */
  786. void mwifiex_coex_ampdu_rxwinsize(struct mwifiex_adapter *adapter)
  787. {
  788. u8 i;
  789. struct mwifiex_private *priv;
  790. u8 count = 0;
  791. for (i = 0; i < adapter->priv_num; i++) {
  792. if (adapter->priv[i]) {
  793. priv = adapter->priv[i];
  794. if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) {
  795. if (priv->media_connected)
  796. count++;
  797. }
  798. if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
  799. if (priv->bss_started)
  800. count++;
  801. }
  802. }
  803. if (count >= MWIFIEX_BSS_COEX_COUNT)
  804. break;
  805. }
  806. if (count >= MWIFIEX_BSS_COEX_COUNT)
  807. mwifiex_update_ampdu_rxwinsize(adapter, true);
  808. else
  809. mwifiex_update_ampdu_rxwinsize(adapter, false);
  810. }
  811. /* This function handles rxba_sync event
  812. */
  813. void mwifiex_11n_rxba_sync_event(struct mwifiex_private *priv,
  814. u8 *event_buf, u16 len)
  815. {
  816. struct mwifiex_ie_types_rxba_sync *tlv_rxba = (void *)event_buf;
  817. u16 tlv_type, tlv_len;
  818. struct mwifiex_rx_reorder_tbl *rx_reor_tbl_ptr;
  819. u8 i, j;
  820. u16 seq_num, tlv_seq_num, tlv_bitmap_len;
  821. int tlv_buf_left = len;
  822. int ret;
  823. u8 *tmp;
  824. mwifiex_dbg_dump(priv->adapter, EVT_D, "RXBA_SYNC event:",
  825. event_buf, len);
  826. while (tlv_buf_left >= sizeof(*tlv_rxba)) {
  827. tlv_type = le16_to_cpu(tlv_rxba->header.type);
  828. tlv_len = le16_to_cpu(tlv_rxba->header.len);
  829. if (tlv_type != TLV_TYPE_RXBA_SYNC) {
  830. mwifiex_dbg(priv->adapter, ERROR,
  831. "Wrong TLV id=0x%x\n", tlv_type);
  832. return;
  833. }
  834. tlv_seq_num = le16_to_cpu(tlv_rxba->seq_num);
  835. tlv_bitmap_len = le16_to_cpu(tlv_rxba->bitmap_len);
  836. mwifiex_dbg(priv->adapter, INFO,
  837. "%pM tid=%d seq_num=%d bitmap_len=%d\n",
  838. tlv_rxba->mac, tlv_rxba->tid, tlv_seq_num,
  839. tlv_bitmap_len);
  840. rx_reor_tbl_ptr =
  841. mwifiex_11n_get_rx_reorder_tbl(priv, tlv_rxba->tid,
  842. tlv_rxba->mac);
  843. if (!rx_reor_tbl_ptr) {
  844. mwifiex_dbg(priv->adapter, ERROR,
  845. "Can not find rx_reorder_tbl!");
  846. return;
  847. }
  848. for (i = 0; i < tlv_bitmap_len; i++) {
  849. for (j = 0 ; j < 8; j++) {
  850. if (tlv_rxba->bitmap[i] & (1 << j)) {
  851. seq_num = (MAX_TID_VALUE - 1) &
  852. (tlv_seq_num + i * 8 + j);
  853. mwifiex_dbg(priv->adapter, ERROR,
  854. "drop packet,seq=%d\n",
  855. seq_num);
  856. ret = mwifiex_11n_rx_reorder_pkt
  857. (priv, seq_num, tlv_rxba->tid,
  858. tlv_rxba->mac, 0, NULL);
  859. if (ret)
  860. mwifiex_dbg(priv->adapter,
  861. ERROR,
  862. "Fail to drop packet");
  863. }
  864. }
  865. }
  866. tlv_buf_left -= (sizeof(*tlv_rxba) + tlv_len);
  867. tmp = (u8 *)tlv_rxba + tlv_len + sizeof(*tlv_rxba);
  868. tlv_rxba = (struct mwifiex_ie_types_rxba_sync *)tmp;
  869. }
  870. }