hci_h5.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762
  1. /*
  2. *
  3. * Bluetooth HCI Three-wire UART driver
  4. *
  5. * Copyright (C) 2012 Intel Corporation
  6. *
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/errno.h>
  25. #include <linux/skbuff.h>
  26. #include <net/bluetooth/bluetooth.h>
  27. #include <net/bluetooth/hci_core.h>
  28. #include "hci_uart.h"
  29. #define HCI_3WIRE_ACK_PKT 0
  30. #define HCI_3WIRE_LINK_PKT 15
  31. /* Sliding window size */
  32. #define H5_TX_WIN_MAX 4
  33. #define H5_ACK_TIMEOUT msecs_to_jiffies(250)
  34. #define H5_SYNC_TIMEOUT msecs_to_jiffies(100)
  35. /*
  36. * Maximum Three-wire packet:
  37. * 4 byte header + max value for 12-bit length + 2 bytes for CRC
  38. */
  39. #define H5_MAX_LEN (4 + 0xfff + 2)
  40. /* Convenience macros for reading Three-wire header values */
  41. #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07)
  42. #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07)
  43. #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01)
  44. #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01)
  45. #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f)
  46. #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4))
  47. #define SLIP_DELIMITER 0xc0
  48. #define SLIP_ESC 0xdb
  49. #define SLIP_ESC_DELIM 0xdc
  50. #define SLIP_ESC_ESC 0xdd
  51. /* H5 state flags */
  52. enum {
  53. H5_RX_ESC, /* SLIP escape mode */
  54. H5_TX_ACK_REQ, /* Pending ack to send */
  55. };
  56. struct h5 {
  57. struct sk_buff_head unack; /* Unack'ed packets queue */
  58. struct sk_buff_head rel; /* Reliable packets queue */
  59. struct sk_buff_head unrel; /* Unreliable packets queue */
  60. unsigned long flags;
  61. struct sk_buff *rx_skb; /* Receive buffer */
  62. size_t rx_pending; /* Expecting more bytes */
  63. u8 rx_ack; /* Last ack number received */
  64. int (*rx_func)(struct hci_uart *hu, u8 c);
  65. struct timer_list timer; /* Retransmission timer */
  66. u8 tx_seq; /* Next seq number to send */
  67. u8 tx_ack; /* Next ack number to send */
  68. u8 tx_win; /* Sliding window size */
  69. enum {
  70. H5_UNINITIALIZED,
  71. H5_INITIALIZED,
  72. H5_ACTIVE,
  73. } state;
  74. enum {
  75. H5_AWAKE,
  76. H5_SLEEPING,
  77. H5_WAKING_UP,
  78. } sleep;
  79. };
  80. static void h5_reset_rx(struct h5 *h5);
  81. static void h5_link_control(struct hci_uart *hu, const void *data, size_t len)
  82. {
  83. struct h5 *h5 = hu->priv;
  84. struct sk_buff *nskb;
  85. nskb = alloc_skb(3, GFP_ATOMIC);
  86. if (!nskb)
  87. return;
  88. hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT;
  89. memcpy(skb_put(nskb, len), data, len);
  90. skb_queue_tail(&h5->unrel, nskb);
  91. }
  92. static u8 h5_cfg_field(struct h5 *h5)
  93. {
  94. /* Sliding window size (first 3 bits) */
  95. return h5->tx_win & 0x07;
  96. }
  97. static void h5_timed_event(unsigned long arg)
  98. {
  99. const unsigned char sync_req[] = { 0x01, 0x7e };
  100. unsigned char conf_req[3] = { 0x03, 0xfc };
  101. struct hci_uart *hu = (struct hci_uart *)arg;
  102. struct h5 *h5 = hu->priv;
  103. struct sk_buff *skb;
  104. unsigned long flags;
  105. BT_DBG("%s", hu->hdev->name);
  106. if (h5->state == H5_UNINITIALIZED)
  107. h5_link_control(hu, sync_req, sizeof(sync_req));
  108. if (h5->state == H5_INITIALIZED) {
  109. conf_req[2] = h5_cfg_field(h5);
  110. h5_link_control(hu, conf_req, sizeof(conf_req));
  111. }
  112. if (h5->state != H5_ACTIVE) {
  113. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  114. goto wakeup;
  115. }
  116. if (h5->sleep != H5_AWAKE) {
  117. h5->sleep = H5_SLEEPING;
  118. goto wakeup;
  119. }
  120. BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen);
  121. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  122. while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) {
  123. h5->tx_seq = (h5->tx_seq - 1) & 0x07;
  124. skb_queue_head(&h5->rel, skb);
  125. }
  126. spin_unlock_irqrestore(&h5->unack.lock, flags);
  127. wakeup:
  128. hci_uart_tx_wakeup(hu);
  129. }
  130. static void h5_peer_reset(struct hci_uart *hu)
  131. {
  132. struct h5 *h5 = hu->priv;
  133. BT_ERR("Peer device has reset");
  134. h5->state = H5_UNINITIALIZED;
  135. del_timer(&h5->timer);
  136. skb_queue_purge(&h5->rel);
  137. skb_queue_purge(&h5->unrel);
  138. skb_queue_purge(&h5->unack);
  139. h5->tx_seq = 0;
  140. h5->tx_ack = 0;
  141. /* Send reset request to upper stack */
  142. hci_reset_dev(hu->hdev);
  143. }
  144. static int h5_open(struct hci_uart *hu)
  145. {
  146. struct h5 *h5;
  147. const unsigned char sync[] = { 0x01, 0x7e };
  148. BT_DBG("hu %p", hu);
  149. h5 = kzalloc(sizeof(*h5), GFP_KERNEL);
  150. if (!h5)
  151. return -ENOMEM;
  152. hu->priv = h5;
  153. skb_queue_head_init(&h5->unack);
  154. skb_queue_head_init(&h5->rel);
  155. skb_queue_head_init(&h5->unrel);
  156. h5_reset_rx(h5);
  157. init_timer(&h5->timer);
  158. h5->timer.function = h5_timed_event;
  159. h5->timer.data = (unsigned long)hu;
  160. h5->tx_win = H5_TX_WIN_MAX;
  161. set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags);
  162. /* Send initial sync request */
  163. h5_link_control(hu, sync, sizeof(sync));
  164. mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT);
  165. return 0;
  166. }
  167. static int h5_close(struct hci_uart *hu)
  168. {
  169. struct h5 *h5 = hu->priv;
  170. del_timer_sync(&h5->timer);
  171. skb_queue_purge(&h5->unack);
  172. skb_queue_purge(&h5->rel);
  173. skb_queue_purge(&h5->unrel);
  174. kfree(h5);
  175. return 0;
  176. }
  177. static void h5_pkt_cull(struct h5 *h5)
  178. {
  179. struct sk_buff *skb, *tmp;
  180. unsigned long flags;
  181. int i, to_remove;
  182. u8 seq;
  183. spin_lock_irqsave(&h5->unack.lock, flags);
  184. to_remove = skb_queue_len(&h5->unack);
  185. if (to_remove == 0)
  186. goto unlock;
  187. seq = h5->tx_seq;
  188. while (to_remove > 0) {
  189. if (h5->rx_ack == seq)
  190. break;
  191. to_remove--;
  192. seq = (seq - 1) & 0x07;
  193. }
  194. if (seq != h5->rx_ack)
  195. BT_ERR("Controller acked invalid packet");
  196. i = 0;
  197. skb_queue_walk_safe(&h5->unack, skb, tmp) {
  198. if (i++ >= to_remove)
  199. break;
  200. __skb_unlink(skb, &h5->unack);
  201. kfree_skb(skb);
  202. }
  203. if (skb_queue_empty(&h5->unack))
  204. del_timer(&h5->timer);
  205. unlock:
  206. spin_unlock_irqrestore(&h5->unack.lock, flags);
  207. }
  208. static void h5_handle_internal_rx(struct hci_uart *hu)
  209. {
  210. struct h5 *h5 = hu->priv;
  211. const unsigned char sync_req[] = { 0x01, 0x7e };
  212. const unsigned char sync_rsp[] = { 0x02, 0x7d };
  213. unsigned char conf_req[3] = { 0x03, 0xfc };
  214. const unsigned char conf_rsp[] = { 0x04, 0x7b };
  215. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  216. const unsigned char woken_req[] = { 0x06, 0xf9 };
  217. const unsigned char sleep_req[] = { 0x07, 0x78 };
  218. const unsigned char *hdr = h5->rx_skb->data;
  219. const unsigned char *data = &h5->rx_skb->data[4];
  220. BT_DBG("%s", hu->hdev->name);
  221. if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT)
  222. return;
  223. if (H5_HDR_LEN(hdr) < 2)
  224. return;
  225. conf_req[2] = h5_cfg_field(h5);
  226. if (memcmp(data, sync_req, 2) == 0) {
  227. if (h5->state == H5_ACTIVE)
  228. h5_peer_reset(hu);
  229. h5_link_control(hu, sync_rsp, 2);
  230. } else if (memcmp(data, sync_rsp, 2) == 0) {
  231. if (h5->state == H5_ACTIVE)
  232. h5_peer_reset(hu);
  233. h5->state = H5_INITIALIZED;
  234. h5_link_control(hu, conf_req, 3);
  235. } else if (memcmp(data, conf_req, 2) == 0) {
  236. h5_link_control(hu, conf_rsp, 2);
  237. h5_link_control(hu, conf_req, 3);
  238. } else if (memcmp(data, conf_rsp, 2) == 0) {
  239. if (H5_HDR_LEN(hdr) > 2)
  240. h5->tx_win = (data[2] & 0x07);
  241. BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win);
  242. h5->state = H5_ACTIVE;
  243. hci_uart_init_ready(hu);
  244. return;
  245. } else if (memcmp(data, sleep_req, 2) == 0) {
  246. BT_DBG("Peer went to sleep");
  247. h5->sleep = H5_SLEEPING;
  248. return;
  249. } else if (memcmp(data, woken_req, 2) == 0) {
  250. BT_DBG("Peer woke up");
  251. h5->sleep = H5_AWAKE;
  252. } else if (memcmp(data, wakeup_req, 2) == 0) {
  253. BT_DBG("Peer requested wakeup");
  254. h5_link_control(hu, woken_req, 2);
  255. h5->sleep = H5_AWAKE;
  256. } else {
  257. BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]);
  258. return;
  259. }
  260. hci_uart_tx_wakeup(hu);
  261. }
  262. static void h5_complete_rx_pkt(struct hci_uart *hu)
  263. {
  264. struct h5 *h5 = hu->priv;
  265. const unsigned char *hdr = h5->rx_skb->data;
  266. if (H5_HDR_RELIABLE(hdr)) {
  267. h5->tx_ack = (h5->tx_ack + 1) % 8;
  268. set_bit(H5_TX_ACK_REQ, &h5->flags);
  269. hci_uart_tx_wakeup(hu);
  270. }
  271. h5->rx_ack = H5_HDR_ACK(hdr);
  272. h5_pkt_cull(h5);
  273. switch (H5_HDR_PKT_TYPE(hdr)) {
  274. case HCI_EVENT_PKT:
  275. case HCI_ACLDATA_PKT:
  276. case HCI_SCODATA_PKT:
  277. hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr);
  278. /* Remove Three-wire header */
  279. skb_pull(h5->rx_skb, 4);
  280. hci_recv_frame(hu->hdev, h5->rx_skb);
  281. h5->rx_skb = NULL;
  282. break;
  283. default:
  284. h5_handle_internal_rx(hu);
  285. break;
  286. }
  287. h5_reset_rx(h5);
  288. }
  289. static int h5_rx_crc(struct hci_uart *hu, unsigned char c)
  290. {
  291. h5_complete_rx_pkt(hu);
  292. return 0;
  293. }
  294. static int h5_rx_payload(struct hci_uart *hu, unsigned char c)
  295. {
  296. struct h5 *h5 = hu->priv;
  297. const unsigned char *hdr = h5->rx_skb->data;
  298. if (H5_HDR_CRC(hdr)) {
  299. h5->rx_func = h5_rx_crc;
  300. h5->rx_pending = 2;
  301. } else {
  302. h5_complete_rx_pkt(hu);
  303. }
  304. return 0;
  305. }
  306. static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
  307. {
  308. struct h5 *h5 = hu->priv;
  309. const unsigned char *hdr = h5->rx_skb->data;
  310. BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u",
  311. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  312. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  313. H5_HDR_LEN(hdr));
  314. if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
  315. BT_ERR("Invalid header checksum");
  316. h5_reset_rx(h5);
  317. return 0;
  318. }
  319. if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
  320. BT_ERR("Out-of-order packet arrived (%u != %u)",
  321. H5_HDR_SEQ(hdr), h5->tx_ack);
  322. h5_reset_rx(h5);
  323. return 0;
  324. }
  325. if (h5->state != H5_ACTIVE &&
  326. H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
  327. BT_ERR("Non-link packet received in non-active state");
  328. h5_reset_rx(h5);
  329. return 0;
  330. }
  331. h5->rx_func = h5_rx_payload;
  332. h5->rx_pending = H5_HDR_LEN(hdr);
  333. return 0;
  334. }
  335. static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
  336. {
  337. struct h5 *h5 = hu->priv;
  338. if (c == SLIP_DELIMITER)
  339. return 1;
  340. h5->rx_func = h5_rx_3wire_hdr;
  341. h5->rx_pending = 4;
  342. h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
  343. if (!h5->rx_skb) {
  344. BT_ERR("Can't allocate mem for new packet");
  345. h5_reset_rx(h5);
  346. return -ENOMEM;
  347. }
  348. h5->rx_skb->dev = (void *)hu->hdev;
  349. return 0;
  350. }
  351. static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c)
  352. {
  353. struct h5 *h5 = hu->priv;
  354. if (c == SLIP_DELIMITER)
  355. h5->rx_func = h5_rx_pkt_start;
  356. return 1;
  357. }
  358. static void h5_unslip_one_byte(struct h5 *h5, unsigned char c)
  359. {
  360. const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC;
  361. const u8 *byte = &c;
  362. if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) {
  363. set_bit(H5_RX_ESC, &h5->flags);
  364. return;
  365. }
  366. if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) {
  367. switch (c) {
  368. case SLIP_ESC_DELIM:
  369. byte = &delim;
  370. break;
  371. case SLIP_ESC_ESC:
  372. byte = &esc;
  373. break;
  374. default:
  375. BT_ERR("Invalid esc byte 0x%02hhx", c);
  376. h5_reset_rx(h5);
  377. return;
  378. }
  379. }
  380. memcpy(skb_put(h5->rx_skb, 1), byte, 1);
  381. h5->rx_pending--;
  382. BT_DBG("unsliped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending);
  383. }
  384. static void h5_reset_rx(struct h5 *h5)
  385. {
  386. if (h5->rx_skb) {
  387. kfree_skb(h5->rx_skb);
  388. h5->rx_skb = NULL;
  389. }
  390. h5->rx_func = h5_rx_delimiter;
  391. h5->rx_pending = 0;
  392. clear_bit(H5_RX_ESC, &h5->flags);
  393. }
  394. static int h5_recv(struct hci_uart *hu, const void *data, int count)
  395. {
  396. struct h5 *h5 = hu->priv;
  397. const unsigned char *ptr = data;
  398. BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending,
  399. count);
  400. while (count > 0) {
  401. int processed;
  402. if (h5->rx_pending > 0) {
  403. if (*ptr == SLIP_DELIMITER) {
  404. BT_ERR("Too short H5 packet");
  405. h5_reset_rx(h5);
  406. continue;
  407. }
  408. h5_unslip_one_byte(h5, *ptr);
  409. ptr++; count--;
  410. continue;
  411. }
  412. processed = h5->rx_func(hu, *ptr);
  413. if (processed < 0)
  414. return processed;
  415. ptr += processed;
  416. count -= processed;
  417. }
  418. return 0;
  419. }
  420. static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
  421. {
  422. struct h5 *h5 = hu->priv;
  423. if (skb->len > 0xfff) {
  424. BT_ERR("Packet too long (%u bytes)", skb->len);
  425. kfree_skb(skb);
  426. return 0;
  427. }
  428. if (h5->state != H5_ACTIVE) {
  429. BT_ERR("Ignoring HCI data in non-active state");
  430. kfree_skb(skb);
  431. return 0;
  432. }
  433. switch (hci_skb_pkt_type(skb)) {
  434. case HCI_ACLDATA_PKT:
  435. case HCI_COMMAND_PKT:
  436. skb_queue_tail(&h5->rel, skb);
  437. break;
  438. case HCI_SCODATA_PKT:
  439. skb_queue_tail(&h5->unrel, skb);
  440. break;
  441. default:
  442. BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
  443. kfree_skb(skb);
  444. break;
  445. }
  446. return 0;
  447. }
  448. static void h5_slip_delim(struct sk_buff *skb)
  449. {
  450. const char delim = SLIP_DELIMITER;
  451. memcpy(skb_put(skb, 1), &delim, 1);
  452. }
  453. static void h5_slip_one_byte(struct sk_buff *skb, u8 c)
  454. {
  455. const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM };
  456. const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC };
  457. switch (c) {
  458. case SLIP_DELIMITER:
  459. memcpy(skb_put(skb, 2), &esc_delim, 2);
  460. break;
  461. case SLIP_ESC:
  462. memcpy(skb_put(skb, 2), &esc_esc, 2);
  463. break;
  464. default:
  465. memcpy(skb_put(skb, 1), &c, 1);
  466. }
  467. }
  468. static bool valid_packet_type(u8 type)
  469. {
  470. switch (type) {
  471. case HCI_ACLDATA_PKT:
  472. case HCI_COMMAND_PKT:
  473. case HCI_SCODATA_PKT:
  474. case HCI_3WIRE_LINK_PKT:
  475. case HCI_3WIRE_ACK_PKT:
  476. return true;
  477. default:
  478. return false;
  479. }
  480. }
  481. static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
  482. const u8 *data, size_t len)
  483. {
  484. struct h5 *h5 = hu->priv;
  485. struct sk_buff *nskb;
  486. u8 hdr[4];
  487. int i;
  488. if (!valid_packet_type(pkt_type)) {
  489. BT_ERR("Unknown packet type %u", pkt_type);
  490. return NULL;
  491. }
  492. /*
  493. * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2
  494. * (because bytes 0xc0 and 0xdb are escaped, worst case is when
  495. * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0
  496. * delimiters at start and end).
  497. */
  498. nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC);
  499. if (!nskb)
  500. return NULL;
  501. hci_skb_pkt_type(nskb) = pkt_type;
  502. h5_slip_delim(nskb);
  503. hdr[0] = h5->tx_ack << 3;
  504. clear_bit(H5_TX_ACK_REQ, &h5->flags);
  505. /* Reliable packet? */
  506. if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) {
  507. hdr[0] |= 1 << 7;
  508. hdr[0] |= h5->tx_seq;
  509. h5->tx_seq = (h5->tx_seq + 1) % 8;
  510. }
  511. hdr[1] = pkt_type | ((len & 0x0f) << 4);
  512. hdr[2] = len >> 4;
  513. hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff);
  514. BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u",
  515. hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr),
  516. H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr),
  517. H5_HDR_LEN(hdr));
  518. for (i = 0; i < 4; i++)
  519. h5_slip_one_byte(nskb, hdr[i]);
  520. for (i = 0; i < len; i++)
  521. h5_slip_one_byte(nskb, data[i]);
  522. h5_slip_delim(nskb);
  523. return nskb;
  524. }
  525. static struct sk_buff *h5_dequeue(struct hci_uart *hu)
  526. {
  527. struct h5 *h5 = hu->priv;
  528. unsigned long flags;
  529. struct sk_buff *skb, *nskb;
  530. if (h5->sleep != H5_AWAKE) {
  531. const unsigned char wakeup_req[] = { 0x05, 0xfa };
  532. if (h5->sleep == H5_WAKING_UP)
  533. return NULL;
  534. h5->sleep = H5_WAKING_UP;
  535. BT_DBG("Sending wakeup request");
  536. mod_timer(&h5->timer, jiffies + HZ / 100);
  537. return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2);
  538. }
  539. skb = skb_dequeue(&h5->unrel);
  540. if (skb) {
  541. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  542. skb->data, skb->len);
  543. if (nskb) {
  544. kfree_skb(skb);
  545. return nskb;
  546. }
  547. skb_queue_head(&h5->unrel, skb);
  548. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  549. }
  550. spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
  551. if (h5->unack.qlen >= h5->tx_win)
  552. goto unlock;
  553. skb = skb_dequeue(&h5->rel);
  554. if (skb) {
  555. nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb),
  556. skb->data, skb->len);
  557. if (nskb) {
  558. __skb_queue_tail(&h5->unack, skb);
  559. mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT);
  560. spin_unlock_irqrestore(&h5->unack.lock, flags);
  561. return nskb;
  562. }
  563. skb_queue_head(&h5->rel, skb);
  564. BT_ERR("Could not dequeue pkt because alloc_skb failed");
  565. }
  566. unlock:
  567. spin_unlock_irqrestore(&h5->unack.lock, flags);
  568. if (test_bit(H5_TX_ACK_REQ, &h5->flags))
  569. return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0);
  570. return NULL;
  571. }
  572. static int h5_flush(struct hci_uart *hu)
  573. {
  574. BT_DBG("hu %p", hu);
  575. return 0;
  576. }
  577. static const struct hci_uart_proto h5p = {
  578. .id = HCI_UART_3WIRE,
  579. .name = "Three-wire (H5)",
  580. .open = h5_open,
  581. .close = h5_close,
  582. .recv = h5_recv,
  583. .enqueue = h5_enqueue,
  584. .dequeue = h5_dequeue,
  585. .flush = h5_flush,
  586. };
  587. int __init h5_init(void)
  588. {
  589. return hci_uart_register_proto(&h5p);
  590. }
  591. int __exit h5_deinit(void)
  592. {
  593. return hci_uart_unregister_proto(&h5p);
  594. }