ser.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800
  1. // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
  2. /* Copyright(c) 2019-2020 Realtek Corporation
  3. */
  4. #include <linux/devcoredump.h>
  5. #include "cam.h"
  6. #include "chan.h"
  7. #include "debug.h"
  8. #include "fw.h"
  9. #include "mac.h"
  10. #include "ps.h"
  11. #include "reg.h"
  12. #include "ser.h"
  13. #include "util.h"
  14. #define SER_RECFG_TIMEOUT 1000
  15. enum ser_evt {
  16. SER_EV_NONE,
  17. SER_EV_STATE_IN,
  18. SER_EV_STATE_OUT,
  19. SER_EV_L1_RESET_PREPARE, /* pre-M0 */
  20. SER_EV_L1_RESET, /* M1 */
  21. SER_EV_DO_RECOVERY, /* M3 */
  22. SER_EV_MAC_RESET_DONE, /* M5 */
  23. SER_EV_L2_RESET,
  24. SER_EV_L2_RECFG_DONE,
  25. SER_EV_L2_RECFG_TIMEOUT,
  26. SER_EV_M1_TIMEOUT,
  27. SER_EV_M3_TIMEOUT,
  28. SER_EV_FW_M5_TIMEOUT,
  29. SER_EV_L0_RESET,
  30. SER_EV_MAXX
  31. };
  32. enum ser_state {
  33. SER_IDLE_ST,
  34. SER_L1_RESET_PRE_ST,
  35. SER_RESET_TRX_ST,
  36. SER_DO_HCI_ST,
  37. SER_L2_RESET_ST,
  38. SER_ST_MAX_ST
  39. };
  40. struct ser_msg {
  41. struct list_head list;
  42. u8 event;
  43. };
  44. struct state_ent {
  45. u8 state;
  46. char *name;
  47. void (*st_func)(struct rtw89_ser *ser, u8 event);
  48. };
  49. struct event_ent {
  50. u8 event;
  51. char *name;
  52. };
  53. static char *ser_ev_name(struct rtw89_ser *ser, u8 event)
  54. {
  55. if (event < SER_EV_MAXX)
  56. return ser->ev_tbl[event].name;
  57. return "err_ev_name";
  58. }
  59. static char *ser_st_name(struct rtw89_ser *ser)
  60. {
  61. if (ser->state < SER_ST_MAX_ST)
  62. return ser->st_tbl[ser->state].name;
  63. return "err_st_name";
  64. }
  65. #define RTW89_DEF_SER_CD_TYPE(_name, _type, _size) \
  66. struct ser_cd_ ## _name { \
  67. u32 type; \
  68. u32 type_size; \
  69. u64 padding; \
  70. u8 data[_size]; \
  71. } __packed; \
  72. static void ser_cd_ ## _name ## _init(struct ser_cd_ ## _name *p) \
  73. { \
  74. p->type = _type; \
  75. p->type_size = sizeof(p->data); \
  76. p->padding = 0x0123456789abcdef; \
  77. }
  78. enum rtw89_ser_cd_type {
  79. RTW89_SER_CD_FW_RSVD_PLE = 0,
  80. RTW89_SER_CD_FW_BACKTRACE = 1,
  81. };
  82. RTW89_DEF_SER_CD_TYPE(fw_rsvd_ple,
  83. RTW89_SER_CD_FW_RSVD_PLE,
  84. RTW89_FW_RSVD_PLE_SIZE);
  85. RTW89_DEF_SER_CD_TYPE(fw_backtrace,
  86. RTW89_SER_CD_FW_BACKTRACE,
  87. RTW89_FW_BACKTRACE_MAX_SIZE);
  88. struct rtw89_ser_cd_buffer {
  89. struct ser_cd_fw_rsvd_ple fwple;
  90. struct ser_cd_fw_backtrace fwbt;
  91. } __packed;
  92. static struct rtw89_ser_cd_buffer *rtw89_ser_cd_prep(struct rtw89_dev *rtwdev)
  93. {
  94. struct rtw89_ser_cd_buffer *buf;
  95. buf = vzalloc(sizeof(*buf));
  96. if (!buf)
  97. return NULL;
  98. ser_cd_fw_rsvd_ple_init(&buf->fwple);
  99. ser_cd_fw_backtrace_init(&buf->fwbt);
  100. return buf;
  101. }
  102. static void rtw89_ser_cd_send(struct rtw89_dev *rtwdev,
  103. struct rtw89_ser_cd_buffer *buf)
  104. {
  105. rtw89_debug(rtwdev, RTW89_DBG_SER, "SER sends core dump\n");
  106. /* After calling dev_coredump, buf's lifetime is supposed to be
  107. * handled by the device coredump framework. Note that a new dump
  108. * will be discarded if a previous one hasn't been released by
  109. * framework yet.
  110. */
  111. dev_coredumpv(rtwdev->dev, buf, sizeof(*buf), GFP_KERNEL);
  112. }
  113. static void rtw89_ser_cd_free(struct rtw89_dev *rtwdev,
  114. struct rtw89_ser_cd_buffer *buf, bool free_self)
  115. {
  116. if (!free_self)
  117. return;
  118. rtw89_debug(rtwdev, RTW89_DBG_SER, "SER frees core dump by self\n");
  119. /* When some problems happen during filling data of core dump,
  120. * we won't send it to device coredump framework. Instead, we
  121. * free buf by ourselves.
  122. */
  123. vfree(buf);
  124. }
  125. static void ser_state_run(struct rtw89_ser *ser, u8 evt)
  126. {
  127. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  128. rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s receive %s\n",
  129. ser_st_name(ser), ser_ev_name(ser, evt));
  130. mutex_lock(&rtwdev->mutex);
  131. rtw89_leave_lps(rtwdev);
  132. mutex_unlock(&rtwdev->mutex);
  133. ser->st_tbl[ser->state].st_func(ser, evt);
  134. }
  135. static void ser_state_goto(struct rtw89_ser *ser, u8 new_state)
  136. {
  137. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  138. if (ser->state == new_state || new_state >= SER_ST_MAX_ST)
  139. return;
  140. ser_state_run(ser, SER_EV_STATE_OUT);
  141. rtw89_debug(rtwdev, RTW89_DBG_SER, "ser: %s goto -> %s\n",
  142. ser_st_name(ser), ser->st_tbl[new_state].name);
  143. ser->state = new_state;
  144. ser_state_run(ser, SER_EV_STATE_IN);
  145. }
  146. static struct ser_msg *__rtw89_ser_dequeue_msg(struct rtw89_ser *ser)
  147. {
  148. struct ser_msg *msg;
  149. spin_lock_irq(&ser->msg_q_lock);
  150. msg = list_first_entry_or_null(&ser->msg_q, struct ser_msg, list);
  151. if (msg)
  152. list_del(&msg->list);
  153. spin_unlock_irq(&ser->msg_q_lock);
  154. return msg;
  155. }
  156. static void rtw89_ser_hdl_work(struct work_struct *work)
  157. {
  158. struct ser_msg *msg;
  159. struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  160. ser_hdl_work);
  161. while ((msg = __rtw89_ser_dequeue_msg(ser))) {
  162. ser_state_run(ser, msg->event);
  163. kfree(msg);
  164. }
  165. }
  166. static int ser_send_msg(struct rtw89_ser *ser, u8 event)
  167. {
  168. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  169. struct ser_msg *msg = NULL;
  170. if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  171. return -EIO;
  172. msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
  173. if (!msg)
  174. return -ENOMEM;
  175. msg->event = event;
  176. spin_lock_irq(&ser->msg_q_lock);
  177. list_add(&msg->list, &ser->msg_q);
  178. spin_unlock_irq(&ser->msg_q_lock);
  179. ieee80211_queue_work(rtwdev->hw, &ser->ser_hdl_work);
  180. return 0;
  181. }
  182. static void rtw89_ser_alarm_work(struct work_struct *work)
  183. {
  184. struct rtw89_ser *ser = container_of(work, struct rtw89_ser,
  185. ser_alarm_work.work);
  186. ser_send_msg(ser, ser->alarm_event);
  187. ser->alarm_event = SER_EV_NONE;
  188. }
  189. static void ser_set_alarm(struct rtw89_ser *ser, u32 ms, u8 event)
  190. {
  191. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  192. if (test_bit(RTW89_SER_DRV_STOP_RUN, ser->flags))
  193. return;
  194. ser->alarm_event = event;
  195. ieee80211_queue_delayed_work(rtwdev->hw, &ser->ser_alarm_work,
  196. msecs_to_jiffies(ms));
  197. }
  198. static void ser_del_alarm(struct rtw89_ser *ser)
  199. {
  200. cancel_delayed_work(&ser->ser_alarm_work);
  201. ser->alarm_event = SER_EV_NONE;
  202. }
  203. /* driver function */
  204. static void drv_stop_tx(struct rtw89_ser *ser)
  205. {
  206. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  207. ieee80211_stop_queues(rtwdev->hw);
  208. set_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  209. }
  210. static void drv_stop_rx(struct rtw89_ser *ser)
  211. {
  212. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  213. clear_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  214. set_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  215. }
  216. static void drv_trx_reset(struct rtw89_ser *ser)
  217. {
  218. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  219. rtw89_hci_reset(rtwdev);
  220. }
  221. static void drv_resume_tx(struct rtw89_ser *ser)
  222. {
  223. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  224. if (!test_bit(RTW89_SER_DRV_STOP_TX, ser->flags))
  225. return;
  226. ieee80211_wake_queues(rtwdev->hw);
  227. clear_bit(RTW89_SER_DRV_STOP_TX, ser->flags);
  228. }
  229. static void drv_resume_rx(struct rtw89_ser *ser)
  230. {
  231. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  232. if (!test_bit(RTW89_SER_DRV_STOP_RX, ser->flags))
  233. return;
  234. set_bit(RTW89_FLAG_RUNNING, rtwdev->flags);
  235. clear_bit(RTW89_SER_DRV_STOP_RX, ser->flags);
  236. }
  237. static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  238. {
  239. rtw89_core_release_bit_map(rtwdev->hw_port, rtwvif->port);
  240. rtwvif->net_type = RTW89_NET_TYPE_NO_LINK;
  241. rtwvif->trigger = false;
  242. rtwvif->tdls_peer = 0;
  243. }
  244. static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta)
  245. {
  246. struct rtw89_vif *rtwvif = (struct rtw89_vif *)data;
  247. struct rtw89_dev *rtwdev = rtwvif->rtwdev;
  248. struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv;
  249. if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls)
  250. rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam);
  251. if (sta->tdls)
  252. rtw89_cam_deinit_bssid_cam(rtwdev, &rtwsta->bssid_cam);
  253. INIT_LIST_HEAD(&rtwsta->ba_cam_list);
  254. }
  255. static void ser_deinit_cam(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif)
  256. {
  257. ieee80211_iterate_stations_atomic(rtwdev->hw,
  258. ser_sta_deinit_cam_iter,
  259. rtwvif);
  260. rtw89_cam_deinit(rtwdev, rtwvif);
  261. bitmap_zero(rtwdev->cam_info.ba_cam_map, RTW89_MAX_BA_CAM_NUM);
  262. }
  263. static void ser_reset_mac_binding(struct rtw89_dev *rtwdev)
  264. {
  265. struct rtw89_vif *rtwvif;
  266. rtw89_cam_reset_keys(rtwdev);
  267. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  268. ser_deinit_cam(rtwdev, rtwvif);
  269. rtw89_core_release_all_bits_map(rtwdev->mac_id_map, RTW89_MAX_MAC_ID_NUM);
  270. rtw89_for_each_rtwvif(rtwdev, rtwvif)
  271. ser_reset_vif(rtwdev, rtwvif);
  272. rtwdev->total_sta_assoc = 0;
  273. }
  274. /* hal function */
  275. static int hal_enable_dma(struct rtw89_ser *ser)
  276. {
  277. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  278. int ret;
  279. if (!test_bit(RTW89_SER_HAL_STOP_DMA, ser->flags))
  280. return 0;
  281. if (!rtwdev->hci.ops->mac_lv1_rcvy)
  282. return -EIO;
  283. ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_2);
  284. if (!ret)
  285. clear_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  286. return ret;
  287. }
  288. static int hal_stop_dma(struct rtw89_ser *ser)
  289. {
  290. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  291. int ret;
  292. if (!rtwdev->hci.ops->mac_lv1_rcvy)
  293. return -EIO;
  294. ret = rtwdev->hci.ops->mac_lv1_rcvy(rtwdev, RTW89_LV1_RCVY_STEP_1);
  295. if (!ret)
  296. set_bit(RTW89_SER_HAL_STOP_DMA, ser->flags);
  297. return ret;
  298. }
  299. static void hal_send_post_m0_event(struct rtw89_ser *ser)
  300. {
  301. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  302. rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RESET_START_DMAC);
  303. }
  304. static void hal_send_m2_event(struct rtw89_ser *ser)
  305. {
  306. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  307. rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_DISABLE_EN);
  308. }
  309. static void hal_send_m4_event(struct rtw89_ser *ser)
  310. {
  311. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  312. rtw89_mac_set_err_status(rtwdev, MAC_AX_ERR_L1_RCVY_EN);
  313. }
  314. /* state handler */
  315. static void ser_idle_st_hdl(struct rtw89_ser *ser, u8 evt)
  316. {
  317. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  318. switch (evt) {
  319. case SER_EV_STATE_IN:
  320. rtw89_hci_recovery_complete(rtwdev);
  321. clear_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags);
  322. clear_bit(RTW89_FLAG_CRASH_SIMULATING, rtwdev->flags);
  323. break;
  324. case SER_EV_L1_RESET_PREPARE:
  325. ser_state_goto(ser, SER_L1_RESET_PRE_ST);
  326. break;
  327. case SER_EV_L1_RESET:
  328. ser_state_goto(ser, SER_RESET_TRX_ST);
  329. break;
  330. case SER_EV_L2_RESET:
  331. ser_state_goto(ser, SER_L2_RESET_ST);
  332. break;
  333. case SER_EV_STATE_OUT:
  334. set_bit(RTW89_FLAG_SER_HANDLING, rtwdev->flags);
  335. rtw89_hci_recovery_start(rtwdev);
  336. break;
  337. default:
  338. break;
  339. }
  340. }
  341. static void ser_l1_reset_pre_st_hdl(struct rtw89_ser *ser, u8 evt)
  342. {
  343. switch (evt) {
  344. case SER_EV_STATE_IN:
  345. ser->prehandle_l1 = true;
  346. hal_send_post_m0_event(ser);
  347. ser_set_alarm(ser, 1000, SER_EV_M1_TIMEOUT);
  348. break;
  349. case SER_EV_L1_RESET:
  350. ser_state_goto(ser, SER_RESET_TRX_ST);
  351. break;
  352. case SER_EV_M1_TIMEOUT:
  353. ser_state_goto(ser, SER_L2_RESET_ST);
  354. break;
  355. case SER_EV_STATE_OUT:
  356. ser_del_alarm(ser);
  357. break;
  358. default:
  359. break;
  360. }
  361. }
  362. static void ser_reset_trx_st_hdl(struct rtw89_ser *ser, u8 evt)
  363. {
  364. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  365. switch (evt) {
  366. case SER_EV_STATE_IN:
  367. cancel_delayed_work_sync(&rtwdev->track_work);
  368. drv_stop_tx(ser);
  369. if (hal_stop_dma(ser)) {
  370. ser_state_goto(ser, SER_L2_RESET_ST);
  371. break;
  372. }
  373. drv_stop_rx(ser);
  374. drv_trx_reset(ser);
  375. /* wait m3 */
  376. hal_send_m2_event(ser);
  377. /* set alarm to prevent FW response timeout */
  378. ser_set_alarm(ser, 1000, SER_EV_M3_TIMEOUT);
  379. break;
  380. case SER_EV_DO_RECOVERY:
  381. ser_state_goto(ser, SER_DO_HCI_ST);
  382. break;
  383. case SER_EV_M3_TIMEOUT:
  384. ser_state_goto(ser, SER_L2_RESET_ST);
  385. break;
  386. case SER_EV_STATE_OUT:
  387. ser_del_alarm(ser);
  388. hal_enable_dma(ser);
  389. drv_resume_rx(ser);
  390. drv_resume_tx(ser);
  391. ieee80211_queue_delayed_work(rtwdev->hw, &rtwdev->track_work,
  392. RTW89_TRACK_WORK_PERIOD);
  393. break;
  394. default:
  395. break;
  396. }
  397. }
  398. static void ser_do_hci_st_hdl(struct rtw89_ser *ser, u8 evt)
  399. {
  400. switch (evt) {
  401. case SER_EV_STATE_IN:
  402. /* wait m5 */
  403. hal_send_m4_event(ser);
  404. /* prevent FW response timeout */
  405. ser_set_alarm(ser, 1000, SER_EV_FW_M5_TIMEOUT);
  406. break;
  407. case SER_EV_FW_M5_TIMEOUT:
  408. ser_state_goto(ser, SER_L2_RESET_ST);
  409. break;
  410. case SER_EV_MAC_RESET_DONE:
  411. ser_state_goto(ser, SER_IDLE_ST);
  412. break;
  413. case SER_EV_STATE_OUT:
  414. ser_del_alarm(ser);
  415. break;
  416. default:
  417. break;
  418. }
  419. }
  420. static void ser_mac_mem_dump(struct rtw89_dev *rtwdev, u8 *buf,
  421. u8 sel, u32 start_addr, u32 len)
  422. {
  423. const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
  424. u32 filter_model_addr = mac->filter_model_addr;
  425. u32 indir_access_addr = mac->indir_access_addr;
  426. u32 *ptr = (u32 *)buf;
  427. u32 base_addr, start_page, residue;
  428. u32 cnt = 0;
  429. u32 i;
  430. start_page = start_addr / MAC_MEM_DUMP_PAGE_SIZE;
  431. residue = start_addr % MAC_MEM_DUMP_PAGE_SIZE;
  432. base_addr = mac->mem_base_addrs[sel];
  433. base_addr += start_page * MAC_MEM_DUMP_PAGE_SIZE;
  434. while (cnt < len) {
  435. rtw89_write32(rtwdev, filter_model_addr, base_addr);
  436. for (i = indir_access_addr + residue;
  437. i < indir_access_addr + MAC_MEM_DUMP_PAGE_SIZE;
  438. i += 4, ptr++) {
  439. *ptr = rtw89_read32(rtwdev, i);
  440. cnt += 4;
  441. if (cnt >= len)
  442. break;
  443. }
  444. residue = 0;
  445. base_addr += MAC_MEM_DUMP_PAGE_SIZE;
  446. }
  447. }
  448. static void rtw89_ser_fw_rsvd_ple_dump(struct rtw89_dev *rtwdev, u8 *buf)
  449. {
  450. u32 start_addr = rtwdev->chip->rsvd_ple_ofst;
  451. rtw89_debug(rtwdev, RTW89_DBG_SER,
  452. "dump mem for fw rsvd payload engine (start addr: 0x%x)\n",
  453. start_addr);
  454. ser_mac_mem_dump(rtwdev, buf, RTW89_MAC_MEM_SHARED_BUF, start_addr,
  455. RTW89_FW_RSVD_PLE_SIZE);
  456. }
  457. struct __fw_backtrace_entry {
  458. u32 wcpu_addr;
  459. u32 size;
  460. u32 key;
  461. } __packed;
  462. struct __fw_backtrace_info {
  463. u32 ra;
  464. u32 sp;
  465. } __packed;
  466. static_assert(RTW89_FW_BACKTRACE_INFO_SIZE ==
  467. sizeof(struct __fw_backtrace_info));
  468. static int rtw89_ser_fw_backtrace_dump(struct rtw89_dev *rtwdev, u8 *buf,
  469. const struct __fw_backtrace_entry *ent)
  470. {
  471. struct __fw_backtrace_info *ptr = (struct __fw_backtrace_info *)buf;
  472. const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def;
  473. u32 filter_model_addr = mac->filter_model_addr;
  474. u32 indir_access_addr = mac->indir_access_addr;
  475. u32 fwbt_addr = ent->wcpu_addr & RTW89_WCPU_BASE_MASK;
  476. u32 fwbt_size = ent->size;
  477. u32 fwbt_key = ent->key;
  478. u32 i;
  479. if (fwbt_addr == 0) {
  480. rtw89_warn(rtwdev, "FW backtrace invalid address: 0x%x\n",
  481. fwbt_addr);
  482. return -EINVAL;
  483. }
  484. if (fwbt_key != RTW89_FW_BACKTRACE_KEY) {
  485. rtw89_warn(rtwdev, "FW backtrace invalid key: 0x%x\n",
  486. fwbt_key);
  487. return -EINVAL;
  488. }
  489. if (fwbt_size == 0 || !RTW89_VALID_FW_BACKTRACE_SIZE(fwbt_size) ||
  490. fwbt_size > RTW89_FW_BACKTRACE_MAX_SIZE) {
  491. rtw89_warn(rtwdev, "FW backtrace invalid size: 0x%x\n",
  492. fwbt_size);
  493. return -EINVAL;
  494. }
  495. rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace start\n");
  496. rtw89_write32(rtwdev, filter_model_addr, fwbt_addr);
  497. for (i = indir_access_addr;
  498. i < indir_access_addr + fwbt_size;
  499. i += RTW89_FW_BACKTRACE_INFO_SIZE, ptr++) {
  500. *ptr = (struct __fw_backtrace_info){
  501. .ra = rtw89_read32(rtwdev, i),
  502. .sp = rtw89_read32(rtwdev, i + 4),
  503. };
  504. rtw89_debug(rtwdev, RTW89_DBG_SER,
  505. "next sp: 0x%x, next ra: 0x%x\n",
  506. ptr->sp, ptr->ra);
  507. }
  508. rtw89_debug(rtwdev, RTW89_DBG_SER, "dump fw backtrace end\n");
  509. return 0;
  510. }
  511. static void ser_l2_reset_st_pre_hdl(struct rtw89_ser *ser)
  512. {
  513. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  514. struct rtw89_ser_cd_buffer *buf;
  515. struct __fw_backtrace_entry fwbt_ent;
  516. int ret = 0;
  517. buf = rtw89_ser_cd_prep(rtwdev);
  518. if (!buf) {
  519. ret = -ENOMEM;
  520. goto bottom;
  521. }
  522. rtw89_ser_fw_rsvd_ple_dump(rtwdev, buf->fwple.data);
  523. fwbt_ent = *(struct __fw_backtrace_entry *)buf->fwple.data;
  524. ret = rtw89_ser_fw_backtrace_dump(rtwdev, buf->fwbt.data, &fwbt_ent);
  525. if (ret)
  526. goto bottom;
  527. rtw89_ser_cd_send(rtwdev, buf);
  528. bottom:
  529. rtw89_ser_cd_free(rtwdev, buf, !!ret);
  530. ser_reset_mac_binding(rtwdev);
  531. rtw89_core_stop(rtwdev);
  532. rtw89_entity_init(rtwdev);
  533. rtw89_fw_release_general_pkt_list(rtwdev, false);
  534. INIT_LIST_HEAD(&rtwdev->rtwvifs_list);
  535. }
  536. static void ser_l2_reset_st_hdl(struct rtw89_ser *ser, u8 evt)
  537. {
  538. struct rtw89_dev *rtwdev = container_of(ser, struct rtw89_dev, ser);
  539. switch (evt) {
  540. case SER_EV_STATE_IN:
  541. mutex_lock(&rtwdev->mutex);
  542. ser_l2_reset_st_pre_hdl(ser);
  543. mutex_unlock(&rtwdev->mutex);
  544. ieee80211_restart_hw(rtwdev->hw);
  545. ser_set_alarm(ser, SER_RECFG_TIMEOUT, SER_EV_L2_RECFG_TIMEOUT);
  546. break;
  547. case SER_EV_L2_RECFG_TIMEOUT:
  548. rtw89_info(rtwdev, "Err: ser L2 re-config timeout\n");
  549. fallthrough;
  550. case SER_EV_L2_RECFG_DONE:
  551. ser_state_goto(ser, SER_IDLE_ST);
  552. break;
  553. case SER_EV_STATE_OUT:
  554. ser_del_alarm(ser);
  555. break;
  556. default:
  557. break;
  558. }
  559. }
  560. static const struct event_ent ser_ev_tbl[] = {
  561. {SER_EV_NONE, "SER_EV_NONE"},
  562. {SER_EV_STATE_IN, "SER_EV_STATE_IN"},
  563. {SER_EV_STATE_OUT, "SER_EV_STATE_OUT"},
  564. {SER_EV_L1_RESET_PREPARE, "SER_EV_L1_RESET_PREPARE pre-m0"},
  565. {SER_EV_L1_RESET, "SER_EV_L1_RESET m1"},
  566. {SER_EV_DO_RECOVERY, "SER_EV_DO_RECOVERY m3"},
  567. {SER_EV_MAC_RESET_DONE, "SER_EV_MAC_RESET_DONE m5"},
  568. {SER_EV_L2_RESET, "SER_EV_L2_RESET"},
  569. {SER_EV_L2_RECFG_DONE, "SER_EV_L2_RECFG_DONE"},
  570. {SER_EV_L2_RECFG_TIMEOUT, "SER_EV_L2_RECFG_TIMEOUT"},
  571. {SER_EV_M1_TIMEOUT, "SER_EV_M1_TIMEOUT"},
  572. {SER_EV_M3_TIMEOUT, "SER_EV_M3_TIMEOUT"},
  573. {SER_EV_FW_M5_TIMEOUT, "SER_EV_FW_M5_TIMEOUT"},
  574. {SER_EV_L0_RESET, "SER_EV_L0_RESET"},
  575. {SER_EV_MAXX, "SER_EV_MAX"}
  576. };
  577. static const struct state_ent ser_st_tbl[] = {
  578. {SER_IDLE_ST, "SER_IDLE_ST", ser_idle_st_hdl},
  579. {SER_L1_RESET_PRE_ST, "SER_L1_RESET_PRE_ST", ser_l1_reset_pre_st_hdl},
  580. {SER_RESET_TRX_ST, "SER_RESET_TRX_ST", ser_reset_trx_st_hdl},
  581. {SER_DO_HCI_ST, "SER_DO_HCI_ST", ser_do_hci_st_hdl},
  582. {SER_L2_RESET_ST, "SER_L2_RESET_ST", ser_l2_reset_st_hdl}
  583. };
  584. int rtw89_ser_init(struct rtw89_dev *rtwdev)
  585. {
  586. struct rtw89_ser *ser = &rtwdev->ser;
  587. memset(ser, 0, sizeof(*ser));
  588. INIT_LIST_HEAD(&ser->msg_q);
  589. ser->state = SER_IDLE_ST;
  590. ser->st_tbl = ser_st_tbl;
  591. ser->ev_tbl = ser_ev_tbl;
  592. bitmap_zero(ser->flags, RTW89_NUM_OF_SER_FLAGS);
  593. spin_lock_init(&ser->msg_q_lock);
  594. INIT_WORK(&ser->ser_hdl_work, rtw89_ser_hdl_work);
  595. INIT_DELAYED_WORK(&ser->ser_alarm_work, rtw89_ser_alarm_work);
  596. return 0;
  597. }
  598. int rtw89_ser_deinit(struct rtw89_dev *rtwdev)
  599. {
  600. struct rtw89_ser *ser = (struct rtw89_ser *)&rtwdev->ser;
  601. set_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  602. cancel_delayed_work_sync(&ser->ser_alarm_work);
  603. cancel_work_sync(&ser->ser_hdl_work);
  604. clear_bit(RTW89_SER_DRV_STOP_RUN, ser->flags);
  605. return 0;
  606. }
  607. void rtw89_ser_recfg_done(struct rtw89_dev *rtwdev)
  608. {
  609. ser_send_msg(&rtwdev->ser, SER_EV_L2_RECFG_DONE);
  610. }
  611. int rtw89_ser_notify(struct rtw89_dev *rtwdev, u32 err)
  612. {
  613. u8 event = SER_EV_NONE;
  614. rtw89_info(rtwdev, "SER catches error: 0x%x\n", err);
  615. switch (err) {
  616. case MAC_AX_ERR_L1_PREERR_DMAC: /* pre-M0 */
  617. event = SER_EV_L1_RESET_PREPARE;
  618. break;
  619. case MAC_AX_ERR_L1_ERR_DMAC:
  620. case MAC_AX_ERR_L0_PROMOTE_TO_L1:
  621. event = SER_EV_L1_RESET; /* M1 */
  622. break;
  623. case MAC_AX_ERR_L1_RESET_DISABLE_DMAC_DONE:
  624. event = SER_EV_DO_RECOVERY; /* M3 */
  625. break;
  626. case MAC_AX_ERR_L1_RESET_RECOVERY_DONE:
  627. event = SER_EV_MAC_RESET_DONE; /* M5 */
  628. break;
  629. case MAC_AX_ERR_L0_ERR_CMAC0:
  630. case MAC_AX_ERR_L0_ERR_CMAC1:
  631. case MAC_AX_ERR_L0_RESET_DONE:
  632. event = SER_EV_L0_RESET;
  633. break;
  634. default:
  635. if (err == MAC_AX_ERR_L1_PROMOTE_TO_L2 ||
  636. (err >= MAC_AX_ERR_L2_ERR_AH_DMA &&
  637. err <= MAC_AX_GET_ERR_MAX))
  638. event = SER_EV_L2_RESET;
  639. break;
  640. }
  641. if (event == SER_EV_NONE) {
  642. rtw89_warn(rtwdev, "SER cannot recognize error: 0x%x\n", err);
  643. return -EINVAL;
  644. }
  645. ser_send_msg(&rtwdev->ser, event);
  646. return 0;
  647. }
  648. EXPORT_SYMBOL(rtw89_ser_notify);