hci_smd.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681
  1. /*
  2. * HCI_SMD (HCI Shared Memory Driver) is Qualcomm's Shared memory driver
  3. * for the BT HCI protocol.
  4. *
  5. * Copyright (c) 2000-2001, 2011-2012 The Linux Foundation. All rights reserved.
  6. * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com>
  7. * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org>
  8. *
  9. * This file is based on drivers/bluetooth/hci_vhci.c
  10. *
  11. * This program is free software; you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2
  13. * as published by the Free Software Foundation
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. */
  20. #include <linux/module.h>
  21. #include <linux/kernel.h>
  22. #include <linux/init.h>
  23. #include <linux/errno.h>
  24. #include <linux/semaphore.h>
  25. #include <linux/string.h>
  26. #include <linux/skbuff.h>
  27. #include <linux/wakelock.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/uaccess.h>
  30. #include <linux/interrupt.h>
  31. #include <net/bluetooth/bluetooth.h>
  32. #include <net/bluetooth/hci_core.h>
  33. #include <net/bluetooth/hci.h>
  34. #include <mach/msm_smd.h>
  35. #define EVENT_CHANNEL "APPS_RIVA_BT_CMD"
  36. #define DATA_CHANNEL "APPS_RIVA_BT_ACL"
  37. /* release wakelock in 500ms, not immediately, because higher layers
  38. * don't always take wakelocks when they should
  39. * This is derived from the implementation for UART transport
  40. */
  41. #define RX_Q_MONITOR (500) /* 500 milli second */
  42. #define HCI_REGISTER_SET 0
  43. /* SSR state machine to take care of back to back SSR requests
  44. * and handling the incomming BT on/off,Airplane mode toggling and
  45. * also spuriour SMD open notification while one SSr is in progress
  46. */
  47. #define STATE_SSR_ON 0x1
  48. #define STATE_SSR_START 0x02
  49. #define STATE_SSR_CHANNEL_OPEN_PENDING 0x04
  50. #define STATE_SSR_PENDING_INIT 0x08
  51. #define STATE_SSR_COMPLETE 0x00
  52. #define STATE_SSR_OFF STATE_SSR_COMPLETE
  53. static int ssr_state = STATE_SSR_OFF;
  54. static int hcismd_set;
  55. static DEFINE_SEMAPHORE(hci_smd_enable);
  56. static int restart_in_progress;
  57. static int hcismd_set_enable(const char *val, struct kernel_param *kp);
  58. module_param_call(hcismd_set, hcismd_set_enable, NULL, &hcismd_set, 0644);
  59. static void hci_dev_smd_open(struct work_struct *worker);
  60. static void hci_dev_restart(struct work_struct *worker);
  61. struct hci_smd_data {
  62. struct hci_dev *hdev;
  63. unsigned long flags;
  64. struct smd_channel *event_channel;
  65. struct smd_channel *data_channel;
  66. struct wake_lock wake_lock_tx;
  67. struct wake_lock wake_lock_rx;
  68. struct timer_list rx_q_timer;
  69. struct tasklet_struct rx_task;
  70. };
  71. static struct hci_smd_data hs;
  72. /* Rx queue monitor timer function */
  73. static int is_rx_q_empty(unsigned long arg)
  74. {
  75. struct hci_dev *hdev = (struct hci_dev *) arg;
  76. struct sk_buff_head *list_ = &hdev->rx_q;
  77. struct sk_buff *list = ((struct sk_buff *)list_)->next;
  78. BT_DBG("%s Rx timer triggered", hdev->name);
  79. if (list == (struct sk_buff *)list_) {
  80. BT_DBG("%s RX queue empty", hdev->name);
  81. return 1;
  82. } else{
  83. BT_DBG("%s RX queue not empty", hdev->name);
  84. return 0;
  85. }
  86. }
  87. static void release_lock(void)
  88. {
  89. struct hci_smd_data *hsmd = &hs;
  90. BT_DBG("Releasing Rx Lock");
  91. if (is_rx_q_empty((unsigned long)hsmd->hdev) &&
  92. wake_lock_active(&hs.wake_lock_rx))
  93. wake_unlock(&hs.wake_lock_rx);
  94. }
  95. /* Rx timer callback function */
  96. static void schedule_timer(unsigned long arg)
  97. {
  98. struct hci_dev *hdev = (struct hci_dev *) arg;
  99. struct hci_smd_data *hsmd = &hs;
  100. BT_DBG("%s Schedule Rx timer", hdev->name);
  101. if (is_rx_q_empty(arg) && wake_lock_active(&hs.wake_lock_rx)) {
  102. BT_DBG("%s RX queue empty", hdev->name);
  103. /*
  104. * Since the queue is empty, its ideal
  105. * to release the wake lock on Rx
  106. */
  107. wake_unlock(&hs.wake_lock_rx);
  108. } else{
  109. BT_DBG("%s RX queue not empty", hdev->name);
  110. /*
  111. * Restart the timer to monitor whether the Rx queue is
  112. * empty for releasing the Rx wake lock
  113. */
  114. mod_timer(&hsmd->rx_q_timer,
  115. jiffies + msecs_to_jiffies(RX_Q_MONITOR));
  116. }
  117. }
  118. static int hci_smd_open(struct hci_dev *hdev)
  119. {
  120. set_bit(HCI_RUNNING, &hdev->flags);
  121. return 0;
  122. }
  123. static int hci_smd_close(struct hci_dev *hdev)
  124. {
  125. if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags))
  126. return 0;
  127. else
  128. return -EPERM;
  129. }
  130. static void hci_smd_destruct(struct hci_dev *hdev)
  131. {
  132. if (NULL != hdev->driver_data)
  133. kfree(hdev->driver_data);
  134. }
  135. static void hci_smd_recv_data(void)
  136. {
  137. int len = 0;
  138. int rc = 0;
  139. struct sk_buff *skb = NULL;
  140. struct hci_smd_data *hsmd = &hs;
  141. wake_lock(&hs.wake_lock_rx);
  142. len = smd_read_avail(hsmd->data_channel);
  143. if (len > HCI_MAX_FRAME_SIZE) {
  144. BT_ERR("Frame larger than the allowed size, flushing frame");
  145. smd_read(hsmd->data_channel, NULL, len);
  146. goto out_data;
  147. }
  148. if (len <= 0)
  149. goto out_data;
  150. skb = bt_skb_alloc(len, GFP_ATOMIC);
  151. if (!skb) {
  152. BT_ERR("Error in allocating socket buffer");
  153. smd_read(hsmd->data_channel, NULL, len);
  154. goto out_data;
  155. }
  156. rc = smd_read(hsmd->data_channel, skb_put(skb, len), len);
  157. if (rc < len) {
  158. BT_ERR("Error in reading from the channel");
  159. goto out_data;
  160. }
  161. skb->dev = (void *)hsmd->hdev;
  162. bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
  163. skb_orphan(skb);
  164. rc = hci_recv_frame(skb);
  165. if (rc < 0) {
  166. BT_ERR("Error in passing the packet to HCI Layer");
  167. /*
  168. * skb is getting freed in hci_recv_frame, making it
  169. * to null to avoid multiple access
  170. */
  171. skb = NULL;
  172. goto out_data;
  173. }
  174. /*
  175. * Start the timer to monitor whether the Rx queue is
  176. * empty for releasing the Rx wake lock
  177. */
  178. BT_DBG("Rx Timer is starting");
  179. mod_timer(&hsmd->rx_q_timer,
  180. jiffies + msecs_to_jiffies(RX_Q_MONITOR));
  181. out_data:
  182. release_lock();
  183. if (rc)
  184. kfree_skb(skb);
  185. }
  186. static void hci_smd_recv_event(void)
  187. {
  188. int len = 0;
  189. int rc = 0;
  190. struct sk_buff *skb = NULL;
  191. struct hci_smd_data *hsmd = &hs;
  192. wake_lock(&hs.wake_lock_rx);
  193. len = smd_read_avail(hsmd->event_channel);
  194. if (len > HCI_MAX_FRAME_SIZE) {
  195. BT_ERR("Frame larger than the allowed size, flushing frame");
  196. rc = smd_read(hsmd->event_channel, NULL, len);
  197. goto out_event;
  198. }
  199. while (len > 0) {
  200. skb = bt_skb_alloc(len, GFP_ATOMIC);
  201. if (!skb) {
  202. BT_ERR("Error in allocating socket buffer");
  203. smd_read(hsmd->event_channel, NULL, len);
  204. goto out_event;
  205. }
  206. rc = smd_read(hsmd->event_channel, skb_put(skb, len), len);
  207. if (rc < len) {
  208. BT_ERR("Error in reading from the event channel");
  209. goto out_event;
  210. }
  211. skb->dev = (void *)hsmd->hdev;
  212. bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
  213. skb_orphan(skb);
  214. rc = hci_recv_frame(skb);
  215. if (rc < 0) {
  216. BT_ERR("Error in passing the packet to HCI Layer");
  217. /*
  218. * skb is getting freed in hci_recv_frame, making it
  219. * to null to avoid multiple access
  220. */
  221. skb = NULL;
  222. goto out_event;
  223. }
  224. len = smd_read_avail(hsmd->event_channel);
  225. /*
  226. * Start the timer to monitor whether the Rx queue is
  227. * empty for releasing the Rx wake lock
  228. */
  229. BT_DBG("Rx Timer is starting");
  230. mod_timer(&hsmd->rx_q_timer,
  231. jiffies + msecs_to_jiffies(RX_Q_MONITOR));
  232. }
  233. out_event:
  234. release_lock();
  235. if (rc)
  236. kfree_skb(skb);
  237. }
  238. static int hci_smd_send_frame(struct sk_buff *skb)
  239. {
  240. int len;
  241. int avail;
  242. int ret = 0;
  243. wake_lock(&hs.wake_lock_tx);
  244. switch (bt_cb(skb)->pkt_type) {
  245. case HCI_COMMAND_PKT:
  246. avail = smd_write_avail(hs.event_channel);
  247. if (!avail) {
  248. BT_ERR("No space available for smd frame");
  249. ret = -ENOSPC;
  250. }
  251. len = smd_write(hs.event_channel, skb->data, skb->len);
  252. if (len < skb->len) {
  253. BT_ERR("Failed to write Command %d", len);
  254. ret = -ENODEV;
  255. }
  256. break;
  257. case HCI_ACLDATA_PKT:
  258. case HCI_SCODATA_PKT:
  259. avail = smd_write_avail(hs.data_channel);
  260. if (!avail) {
  261. BT_ERR("No space available for smd frame");
  262. ret = -ENOSPC;
  263. }
  264. len = smd_write(hs.data_channel, skb->data, skb->len);
  265. if (len < skb->len) {
  266. BT_ERR("Failed to write Data %d", len);
  267. ret = -ENODEV;
  268. }
  269. break;
  270. default:
  271. BT_ERR("Uknown packet type");
  272. ret = -ENODEV;
  273. break;
  274. }
  275. kfree_skb(skb);
  276. wake_unlock(&hs.wake_lock_tx);
  277. return ret;
  278. }
  279. static void hci_smd_rx(unsigned long arg)
  280. {
  281. struct hci_smd_data *hsmd = &hs;
  282. while ((smd_read_avail(hsmd->event_channel) > 0) ||
  283. (smd_read_avail(hsmd->data_channel) > 0)) {
  284. hci_smd_recv_event();
  285. hci_smd_recv_data();
  286. }
  287. }
  288. static void hci_smd_notify_event(void *data, unsigned int event)
  289. {
  290. struct hci_dev *hdev = hs.hdev;
  291. struct hci_smd_data *hsmd = &hs;
  292. struct work_struct *reset_worker;
  293. struct work_struct *open_worker;
  294. int len = 0;
  295. if (!hdev) {
  296. BT_ERR("Frame for unknown HCI device (hdev=NULL)");
  297. return;
  298. }
  299. switch (event) {
  300. case SMD_EVENT_DATA:
  301. len = smd_read_avail(hsmd->event_channel);
  302. if (len > 0)
  303. tasklet_hi_schedule(&hs.rx_task);
  304. else if (len < 0)
  305. BT_ERR("Failed to read event from smd %d", len);
  306. break;
  307. case SMD_EVENT_OPEN:
  308. BT_INFO("opening HCI-SMD channel :%s", EVENT_CHANNEL);
  309. BT_DBG("SSR state is : %x", ssr_state);
  310. if ((ssr_state == STATE_SSR_OFF) ||
  311. (ssr_state == STATE_SSR_CHANNEL_OPEN_PENDING)) {
  312. hci_smd_open(hdev);
  313. open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC);
  314. if (!open_worker) {
  315. BT_ERR("Out of memory");
  316. break;
  317. }
  318. if (ssr_state == STATE_SSR_CHANNEL_OPEN_PENDING) {
  319. ssr_state = STATE_SSR_PENDING_INIT;
  320. BT_INFO("SSR state is : %x", ssr_state);
  321. }
  322. INIT_WORK(open_worker, hci_dev_smd_open);
  323. schedule_work(open_worker);
  324. }
  325. break;
  326. case SMD_EVENT_CLOSE:
  327. BT_INFO("Closing HCI-SMD channel :%s", EVENT_CHANNEL);
  328. BT_DBG("SSR state is : %x", ssr_state);
  329. if ((ssr_state == STATE_SSR_OFF) ||
  330. (ssr_state == (STATE_SSR_PENDING_INIT))) {
  331. hci_smd_close(hdev);
  332. reset_worker = kzalloc(sizeof(*reset_worker),
  333. GFP_ATOMIC);
  334. if (!reset_worker) {
  335. BT_ERR("Out of memory");
  336. break;
  337. }
  338. ssr_state = STATE_SSR_ON;
  339. BT_INFO("SSR state is : %x", ssr_state);
  340. INIT_WORK(reset_worker, hci_dev_restart);
  341. schedule_work(reset_worker);
  342. } else if (ssr_state & STATE_SSR_ON) {
  343. BT_ERR("SSR state is : %x", ssr_state);
  344. }
  345. break;
  346. default:
  347. break;
  348. }
  349. }
  350. static void hci_smd_notify_data(void *data, unsigned int event)
  351. {
  352. struct hci_dev *hdev = hs.hdev;
  353. struct hci_smd_data *hsmd = &hs;
  354. int len = 0;
  355. if (!hdev) {
  356. BT_ERR("Frame for unknown HCI device (hdev=NULL)");
  357. return;
  358. }
  359. switch (event) {
  360. case SMD_EVENT_DATA:
  361. len = smd_read_avail(hsmd->data_channel);
  362. if (len > 0)
  363. tasklet_hi_schedule(&hs.rx_task);
  364. else if (len < 0)
  365. BT_ERR("Failed to read data from smd %d", len);
  366. break;
  367. case SMD_EVENT_OPEN:
  368. BT_INFO("opening HCI-SMD channel :%s", DATA_CHANNEL);
  369. hci_smd_open(hdev);
  370. break;
  371. case SMD_EVENT_CLOSE:
  372. BT_INFO("Closing HCI-SMD channel :%s", DATA_CHANNEL);
  373. hci_smd_close(hdev);
  374. break;
  375. default:
  376. break;
  377. }
  378. }
  379. static int hci_smd_hci_register_dev(struct hci_smd_data *hsmd)
  380. {
  381. struct hci_dev *hdev;
  382. if (hsmd->hdev)
  383. hdev = hsmd->hdev;
  384. else {
  385. BT_ERR("hdev is NULL");
  386. return 0;
  387. }
  388. /* Allow the incomming SSR even the prev one at PENDING INIT STATE
  389. * since clenup need to be started again from the beging and ignore
  390. * or bypass the prev one
  391. */
  392. if ((ssr_state == STATE_SSR_OFF) ||
  393. (ssr_state == STATE_SSR_PENDING_INIT)) {
  394. if (test_and_set_bit(HCI_REGISTER_SET, &hsmd->flags)) {
  395. BT_ERR("HCI device registered already");
  396. return 0;
  397. } else
  398. BT_INFO("HCI device registration is starting");
  399. if (hci_register_dev(hdev) < 0) {
  400. BT_ERR("Can't register HCI device");
  401. hci_free_dev(hdev);
  402. hsmd->hdev = NULL;
  403. clear_bit(HCI_REGISTER_SET, &hsmd->flags);
  404. return -ENODEV;
  405. }
  406. if (ssr_state == STATE_SSR_PENDING_INIT) {
  407. ssr_state = STATE_SSR_COMPLETE;
  408. BT_INFO("SSR state is : %x", ssr_state);
  409. }
  410. } else if (ssr_state)
  411. BT_ERR("Registration called in invalid context");
  412. return 0;
  413. }
  414. static int hci_smd_register_smd(struct hci_smd_data *hsmd)
  415. {
  416. struct hci_dev *hdev;
  417. int rc;
  418. /* Initialize and register HCI device */
  419. hdev = hci_alloc_dev();
  420. if (!hdev) {
  421. BT_ERR("Can't allocate HCI device");
  422. return -ENOMEM;
  423. }
  424. hsmd->hdev = hdev;
  425. hdev->bus = HCI_SMD;
  426. hdev->driver_data = NULL;
  427. hdev->open = hci_smd_open;
  428. hdev->close = hci_smd_close;
  429. hdev->send = hci_smd_send_frame;
  430. hdev->destruct = hci_smd_destruct;
  431. hdev->owner = THIS_MODULE;
  432. tasklet_init(&hsmd->rx_task,
  433. hci_smd_rx, (unsigned long) hsmd);
  434. /*
  435. * Setup the timer to monitor whether the Rx queue is empty,
  436. * to control the wake lock release
  437. */
  438. setup_timer(&hsmd->rx_q_timer, schedule_timer,
  439. (unsigned long) hsmd->hdev);
  440. if (ssr_state == STATE_SSR_START) {
  441. ssr_state = STATE_SSR_CHANNEL_OPEN_PENDING;
  442. BT_INFO("SSR state is : %x", ssr_state);
  443. }
  444. /* Open the SMD Channel and device and register the callback function */
  445. rc = smd_named_open_on_edge(EVENT_CHANNEL, SMD_APPS_WCNSS,
  446. &hsmd->event_channel, hdev, hci_smd_notify_event);
  447. if (rc < 0) {
  448. BT_ERR("Cannot open the command channel");
  449. hci_free_dev(hdev);
  450. hsmd->hdev = NULL;
  451. return -ENODEV;
  452. }
  453. rc = smd_named_open_on_edge(DATA_CHANNEL, SMD_APPS_WCNSS,
  454. &hsmd->data_channel, hdev, hci_smd_notify_data);
  455. if (rc < 0) {
  456. BT_ERR("Failed to open the Data channel");
  457. hci_free_dev(hdev);
  458. hsmd->hdev = NULL;
  459. return -ENODEV;
  460. }
  461. /* Disable the read interrupts on the channel */
  462. smd_disable_read_intr(hsmd->event_channel);
  463. smd_disable_read_intr(hsmd->data_channel);
  464. return 0;
  465. }
  466. static void hci_smd_deregister_dev(struct hci_smd_data *hsmd)
  467. {
  468. tasklet_kill(&hs.rx_task);
  469. if (ssr_state)
  470. BT_DBG("SSR state is : %x", ssr_state);
  471. /* Though the hci_smd driver is not registered with the hci
  472. * need to close the opened channels as a part of cleaup
  473. */
  474. if (!test_and_clear_bit(HCI_REGISTER_SET, &hsmd->flags)) {
  475. BT_ERR("HCI device un-registered already");
  476. } else {
  477. BT_INFO("HCI device un-registration going on");
  478. if (hsmd->hdev) {
  479. if (hci_unregister_dev(hsmd->hdev) < 0)
  480. BT_ERR("Can't unregister HCI device %s",
  481. hsmd->hdev->name);
  482. hci_free_dev(hsmd->hdev);
  483. hsmd->hdev = NULL;
  484. }
  485. }
  486. smd_close(hs.event_channel);
  487. smd_close(hs.data_channel);
  488. if (wake_lock_active(&hs.wake_lock_rx))
  489. wake_unlock(&hs.wake_lock_rx);
  490. if (wake_lock_active(&hs.wake_lock_tx))
  491. wake_unlock(&hs.wake_lock_tx);
  492. /*Destroy the timer used to monitor the Rx queue for emptiness */
  493. if (hs.rx_q_timer.function) {
  494. del_timer_sync(&hs.rx_q_timer);
  495. hs.rx_q_timer.function = NULL;
  496. hs.rx_q_timer.data = 0;
  497. }
  498. }
  499. static void hci_dev_restart(struct work_struct *worker)
  500. {
  501. down(&hci_smd_enable);
  502. restart_in_progress = 1;
  503. BT_DBG("SSR state is : %x", ssr_state);
  504. if (ssr_state == STATE_SSR_ON) {
  505. ssr_state = STATE_SSR_START;
  506. BT_INFO("SSR state is : %x", ssr_state);
  507. } else {
  508. BT_ERR("restart triggered in wrong context");
  509. up(&hci_smd_enable);
  510. kfree(worker);
  511. return;
  512. }
  513. hci_smd_deregister_dev(&hs);
  514. hci_smd_register_smd(&hs);
  515. up(&hci_smd_enable);
  516. kfree(worker);
  517. }
  518. static void hci_dev_smd_open(struct work_struct *worker)
  519. {
  520. down(&hci_smd_enable);
  521. if (ssr_state)
  522. BT_DBG("SSR state is : %x", ssr_state);
  523. if ((ssr_state != STATE_SSR_OFF) &&
  524. (ssr_state != (STATE_SSR_PENDING_INIT))) {
  525. up(&hci_smd_enable);
  526. kfree(worker);
  527. return;
  528. }
  529. if (restart_in_progress == 1) {
  530. /* Allow wcnss to initialize */
  531. restart_in_progress = 0;
  532. msleep(10000);
  533. }
  534. hci_smd_hci_register_dev(&hs);
  535. up(&hci_smd_enable);
  536. kfree(worker);
  537. }
  538. static int hcismd_set_enable(const char *val, struct kernel_param *kp)
  539. {
  540. int ret = 0;
  541. pr_err("hcismd_set_enable %d", hcismd_set);
  542. down(&hci_smd_enable);
  543. ret = param_set_int(val, kp);
  544. if (ret)
  545. goto done;
  546. /* Ignore the all incomming register de-register requests in case of
  547. * SSR is in-progress
  548. */
  549. switch (hcismd_set) {
  550. case 1:
  551. if ((hs.hdev == NULL) && (ssr_state == STATE_SSR_OFF))
  552. hci_smd_register_smd(&hs);
  553. else if (ssr_state)
  554. BT_ERR("SSR is in progress,state is : %x", ssr_state);
  555. break;
  556. case 0:
  557. if (ssr_state == STATE_SSR_OFF)
  558. hci_smd_deregister_dev(&hs);
  559. else if (ssr_state)
  560. BT_ERR("SSR is in progress,state is : %x", ssr_state);
  561. break;
  562. default:
  563. ret = -EFAULT;
  564. }
  565. done:
  566. up(&hci_smd_enable);
  567. return ret;
  568. }
  569. static int __init hci_smd_init(void)
  570. {
  571. wake_lock_init(&hs.wake_lock_rx, WAKE_LOCK_SUSPEND,
  572. "msm_smd_Rx");
  573. wake_lock_init(&hs.wake_lock_tx, WAKE_LOCK_SUSPEND,
  574. "msm_smd_Tx");
  575. restart_in_progress = 0;
  576. ssr_state = STATE_SSR_OFF;
  577. hs.hdev = NULL;
  578. return 0;
  579. }
  580. module_init(hci_smd_init);
  581. static void __exit hci_smd_exit(void)
  582. {
  583. wake_lock_destroy(&hs.wake_lock_rx);
  584. wake_lock_destroy(&hs.wake_lock_tx);
  585. }
  586. module_exit(hci_smd_exit);
  587. MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>");
  588. MODULE_DESCRIPTION("Bluetooth SMD driver");
  589. MODULE_LICENSE("GPL v2");