amp.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042
  1. /*
  2. Copyright (c) 2010-2012 The Linux Foundation. All rights reserved.
  3. This program is free software; you can redistribute it and/or modify
  4. it under the terms of the GNU General Public License version 2 and
  5. only version 2 as published by the Free Software Foundation.
  6. This program is distributed in the hope that it will be useful,
  7. but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. GNU General Public License for more details.
  10. */
  11. #include <linux/interrupt.h>
  12. #include <linux/module.h>
  13. #include <linux/types.h>
  14. #include <linux/errno.h>
  15. #include <linux/kernel.h>
  16. #include <linux/skbuff.h>
  17. #include <linux/list.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/timer.h>
  20. #include <linux/crypto.h>
  21. #include <linux/scatterlist.h>
  22. #include <linux/err.h>
  23. #include <crypto/hash.h>
  24. #include <net/bluetooth/bluetooth.h>
  25. #include <net/bluetooth/hci_core.h>
  26. #include <net/bluetooth/l2cap.h>
  27. #include <net/bluetooth/amp.h>
  28. static struct workqueue_struct *amp_workqueue;
  29. LIST_HEAD(amp_mgr_list);
  30. DEFINE_RWLOCK(amp_mgr_list_lock);
  31. static int send_a2mp(struct socket *sock, u8 *data, int len);
  32. static void ctx_timeout(unsigned long data);
  33. static void launch_ctx(struct amp_mgr *mgr);
  34. static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data);
  35. static int kill_ctx(struct amp_ctx *ctx);
  36. static int cancel_ctx(struct amp_ctx *ctx);
  37. static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst);
  38. static void remove_amp_mgr(struct amp_mgr *mgr)
  39. {
  40. BT_DBG("mgr %p", mgr);
  41. write_lock(&amp_mgr_list_lock);
  42. list_del(&mgr->list);
  43. write_unlock(&amp_mgr_list_lock);
  44. read_lock(&mgr->ctx_list_lock);
  45. while (!list_empty(&mgr->ctx_list)) {
  46. struct amp_ctx *ctx;
  47. ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
  48. read_unlock(&mgr->ctx_list_lock);
  49. BT_DBG("kill ctx %p", ctx);
  50. kill_ctx(ctx);
  51. read_lock(&mgr->ctx_list_lock);
  52. }
  53. read_unlock(&mgr->ctx_list_lock);
  54. kfree(mgr->ctrls);
  55. kfree(mgr);
  56. }
  57. static struct amp_mgr *get_amp_mgr_sk(struct sock *sk)
  58. {
  59. struct amp_mgr *mgr;
  60. struct amp_mgr *found = NULL;
  61. read_lock(&amp_mgr_list_lock);
  62. list_for_each_entry(mgr, &amp_mgr_list, list) {
  63. if ((mgr->a2mp_sock) && (mgr->a2mp_sock->sk == sk)) {
  64. found = mgr;
  65. break;
  66. }
  67. }
  68. read_unlock(&amp_mgr_list_lock);
  69. return found;
  70. }
  71. static struct amp_mgr *get_create_amp_mgr(struct hci_conn *hcon,
  72. struct sk_buff *skb)
  73. {
  74. struct amp_mgr *mgr;
  75. write_lock(&amp_mgr_list_lock);
  76. list_for_each_entry(mgr, &amp_mgr_list, list) {
  77. if (mgr->l2cap_conn == hcon->l2cap_data) {
  78. BT_DBG("found %p", mgr);
  79. write_unlock(&amp_mgr_list_lock);
  80. goto gc_finished;
  81. }
  82. }
  83. write_unlock(&amp_mgr_list_lock);
  84. mgr = kzalloc(sizeof(*mgr), GFP_ATOMIC);
  85. if (!mgr)
  86. return NULL;
  87. mgr->l2cap_conn = hcon->l2cap_data;
  88. mgr->next_ident = 1;
  89. INIT_LIST_HEAD(&mgr->ctx_list);
  90. rwlock_init(&mgr->ctx_list_lock);
  91. mgr->skb = skb;
  92. BT_DBG("hcon %p mgr %p", hcon, mgr);
  93. mgr->a2mp_sock = open_fixed_channel(&hcon->hdev->bdaddr, &hcon->dst);
  94. if (!mgr->a2mp_sock) {
  95. kfree(mgr);
  96. return NULL;
  97. }
  98. write_lock(&amp_mgr_list_lock);
  99. list_add(&(mgr->list), &amp_mgr_list);
  100. write_unlock(&amp_mgr_list_lock);
  101. gc_finished:
  102. return mgr;
  103. }
  104. static struct amp_ctrl *get_ctrl(struct amp_mgr *mgr, u8 remote_id)
  105. {
  106. if ((mgr->ctrls) && (mgr->ctrls->id == remote_id))
  107. return mgr->ctrls;
  108. else
  109. return NULL;
  110. }
  111. static struct amp_ctrl *get_create_ctrl(struct amp_mgr *mgr, u8 id)
  112. {
  113. struct amp_ctrl *ctrl;
  114. BT_DBG("mgr %p, id %d", mgr, id);
  115. if ((mgr->ctrls) && (mgr->ctrls->id == id))
  116. ctrl = mgr->ctrls;
  117. else {
  118. kfree(mgr->ctrls);
  119. ctrl = kzalloc(sizeof(struct amp_ctrl), GFP_ATOMIC);
  120. if (ctrl) {
  121. ctrl->mgr = mgr;
  122. ctrl->id = id;
  123. }
  124. mgr->ctrls = ctrl;
  125. }
  126. return ctrl;
  127. }
  128. static struct amp_ctx *create_ctx(u8 type, u8 state)
  129. {
  130. struct amp_ctx *ctx = NULL;
  131. ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
  132. if (ctx) {
  133. ctx->type = type;
  134. ctx->state = state;
  135. init_timer(&(ctx->timer));
  136. ctx->timer.function = ctx_timeout;
  137. ctx->timer.data = (unsigned long) ctx;
  138. }
  139. BT_DBG("ctx %p, type %d", ctx, type);
  140. return ctx;
  141. }
  142. static inline void start_ctx(struct amp_mgr *mgr, struct amp_ctx *ctx)
  143. {
  144. BT_DBG("ctx %p", ctx);
  145. write_lock(&mgr->ctx_list_lock);
  146. list_add(&ctx->list, &mgr->ctx_list);
  147. write_unlock(&mgr->ctx_list_lock);
  148. ctx->mgr = mgr;
  149. execute_ctx(ctx, AMP_INIT, 0);
  150. }
  151. static void destroy_ctx(struct amp_ctx *ctx)
  152. {
  153. struct amp_mgr *mgr = ctx->mgr;
  154. BT_DBG("ctx %p deferred %p", ctx, ctx->deferred);
  155. del_timer(&ctx->timer);
  156. write_lock(&mgr->ctx_list_lock);
  157. list_del(&ctx->list);
  158. write_unlock(&mgr->ctx_list_lock);
  159. if (ctx->deferred)
  160. execute_ctx(ctx->deferred, AMP_INIT, 0);
  161. kfree(ctx);
  162. }
  163. static struct amp_ctx *get_ctx_mgr(struct amp_mgr *mgr, u8 type)
  164. {
  165. struct amp_ctx *fnd = NULL;
  166. struct amp_ctx *ctx;
  167. read_lock(&mgr->ctx_list_lock);
  168. list_for_each_entry(ctx, &mgr->ctx_list, list) {
  169. if (ctx->type == type) {
  170. fnd = ctx;
  171. break;
  172. }
  173. }
  174. read_unlock(&mgr->ctx_list_lock);
  175. return fnd;
  176. }
  177. static struct amp_ctx *get_ctx_type(struct amp_ctx *cur, u8 type)
  178. {
  179. struct amp_mgr *mgr = cur->mgr;
  180. struct amp_ctx *fnd = NULL;
  181. struct amp_ctx *ctx;
  182. read_lock(&mgr->ctx_list_lock);
  183. list_for_each_entry(ctx, &mgr->ctx_list, list) {
  184. if ((ctx->type == type) && (ctx != cur)) {
  185. fnd = ctx;
  186. break;
  187. }
  188. }
  189. read_unlock(&mgr->ctx_list_lock);
  190. return fnd;
  191. }
  192. static struct amp_ctx *get_ctx_a2mp(struct amp_mgr *mgr, u8 ident)
  193. {
  194. struct amp_ctx *fnd = NULL;
  195. struct amp_ctx *ctx;
  196. read_lock(&mgr->ctx_list_lock);
  197. list_for_each_entry(ctx, &mgr->ctx_list, list) {
  198. if ((ctx->evt_type & AMP_A2MP_RSP) &&
  199. (ctx->rsp_ident == ident)) {
  200. fnd = ctx;
  201. break;
  202. }
  203. }
  204. read_unlock(&mgr->ctx_list_lock);
  205. return fnd;
  206. }
  207. static struct amp_ctx *get_ctx_hdev(struct hci_dev *hdev, u8 evt_type,
  208. u16 evt_value)
  209. {
  210. struct amp_mgr *mgr;
  211. struct amp_ctx *fnd = NULL;
  212. read_lock(&amp_mgr_list_lock);
  213. list_for_each_entry(mgr, &amp_mgr_list, list) {
  214. struct amp_ctx *ctx;
  215. read_lock(&mgr->ctx_list_lock);
  216. list_for_each_entry(ctx, &mgr->ctx_list, list) {
  217. struct hci_dev *ctx_hdev;
  218. ctx_hdev = hci_dev_get(ctx->id);
  219. if ((ctx_hdev == hdev) && (ctx->evt_type & evt_type)) {
  220. switch (evt_type) {
  221. case AMP_HCI_CMD_STATUS:
  222. case AMP_HCI_CMD_CMPLT:
  223. if (ctx->opcode == evt_value)
  224. fnd = ctx;
  225. break;
  226. case AMP_HCI_EVENT:
  227. if (ctx->evt_code == (u8) evt_value)
  228. fnd = ctx;
  229. break;
  230. }
  231. }
  232. if (ctx_hdev)
  233. hci_dev_put(ctx_hdev);
  234. if (fnd)
  235. break;
  236. }
  237. read_unlock(&mgr->ctx_list_lock);
  238. }
  239. read_unlock(&amp_mgr_list_lock);
  240. return fnd;
  241. }
  242. static inline u8 next_ident(struct amp_mgr *mgr)
  243. {
  244. if (++mgr->next_ident == 0)
  245. mgr->next_ident = 1;
  246. return mgr->next_ident;
  247. }
  248. static inline void send_a2mp_cmd2(struct amp_mgr *mgr, u8 ident, u8 code,
  249. u16 len, void *data, u16 len2, void *data2)
  250. {
  251. struct a2mp_cmd_hdr *hdr;
  252. int plen;
  253. u8 *p, *cmd;
  254. BT_DBG("ident %d code 0x%02x", ident, code);
  255. if (!mgr->a2mp_sock)
  256. return;
  257. plen = sizeof(*hdr) + len + len2;
  258. cmd = kzalloc(plen, GFP_ATOMIC);
  259. if (!cmd)
  260. return;
  261. hdr = (struct a2mp_cmd_hdr *) cmd;
  262. hdr->code = code;
  263. hdr->ident = ident;
  264. hdr->len = cpu_to_le16(len+len2);
  265. p = cmd + sizeof(*hdr);
  266. memcpy(p, data, len);
  267. p += len;
  268. memcpy(p, data2, len2);
  269. send_a2mp(mgr->a2mp_sock, cmd, plen);
  270. kfree(cmd);
  271. }
  272. static inline void send_a2mp_cmd(struct amp_mgr *mgr, u8 ident,
  273. u8 code, u16 len, void *data)
  274. {
  275. send_a2mp_cmd2(mgr, ident, code, len, data, 0, NULL);
  276. }
  277. static inline int command_rej(struct amp_mgr *mgr, struct sk_buff *skb)
  278. {
  279. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  280. struct a2mp_cmd_rej *rej;
  281. struct amp_ctx *ctx;
  282. BT_DBG("ident %d code %d", hdr->ident, hdr->code);
  283. rej = (struct a2mp_cmd_rej *) skb_pull(skb, sizeof(*hdr));
  284. if (skb->len < sizeof(*rej))
  285. return -EINVAL;
  286. BT_DBG("reason %d", le16_to_cpu(rej->reason));
  287. ctx = get_ctx_a2mp(mgr, hdr->ident);
  288. if (ctx)
  289. kill_ctx(ctx);
  290. skb_pull(skb, sizeof(*rej));
  291. return 0;
  292. }
  293. static int send_a2mp_cl(struct amp_mgr *mgr, u8 ident, u8 code, u16 len,
  294. void *msg)
  295. {
  296. struct a2mp_cl clist[16];
  297. struct a2mp_cl *cl;
  298. struct hci_dev *hdev;
  299. int num_ctrls = 1, id;
  300. cl = clist;
  301. cl->id = 0;
  302. cl->type = 0;
  303. cl->status = 1;
  304. for (id = 0; id < 16; ++id) {
  305. hdev = hci_dev_get(id);
  306. if (hdev) {
  307. if ((hdev->amp_type != HCI_BREDR) &&
  308. test_bit(HCI_UP, &hdev->flags)) {
  309. (cl + num_ctrls)->id = hdev->id;
  310. (cl + num_ctrls)->type = hdev->amp_type;
  311. (cl + num_ctrls)->status = hdev->amp_status;
  312. ++num_ctrls;
  313. }
  314. hci_dev_put(hdev);
  315. }
  316. }
  317. send_a2mp_cmd2(mgr, ident, code, len, msg,
  318. num_ctrls*sizeof(*cl), clist);
  319. return 0;
  320. }
  321. static void send_a2mp_change_notify(void)
  322. {
  323. struct amp_mgr *mgr;
  324. list_for_each_entry(mgr, &amp_mgr_list, list) {
  325. if (mgr->discovered)
  326. send_a2mp_cl(mgr, next_ident(mgr),
  327. A2MP_CHANGE_NOTIFY, 0, NULL);
  328. }
  329. }
  330. static inline int discover_req(struct amp_mgr *mgr, struct sk_buff *skb)
  331. {
  332. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  333. struct a2mp_discover_req *req;
  334. u16 *efm;
  335. struct a2mp_discover_rsp rsp;
  336. req = (struct a2mp_discover_req *) skb_pull(skb, sizeof(*hdr));
  337. if (skb->len < sizeof(*req))
  338. return -EINVAL;
  339. efm = (u16 *) skb_pull(skb, sizeof(*req));
  340. BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu),
  341. le16_to_cpu(req->ext_feat));
  342. while (le16_to_cpu(req->ext_feat) & 0x8000) {
  343. if (skb->len < sizeof(*efm))
  344. return -EINVAL;
  345. req->ext_feat = *efm;
  346. BT_DBG("efm 0x%4.4x", le16_to_cpu(req->ext_feat));
  347. efm = (u16 *) skb_pull(skb, sizeof(*efm));
  348. }
  349. rsp.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
  350. rsp.ext_feat = 0;
  351. mgr->discovered = 1;
  352. return send_a2mp_cl(mgr, hdr->ident, A2MP_DISCOVER_RSP,
  353. sizeof(rsp), &rsp);
  354. }
  355. static inline int change_notify(struct amp_mgr *mgr, struct sk_buff *skb)
  356. {
  357. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  358. struct a2mp_cl *cl;
  359. cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*hdr));
  360. while (skb->len >= sizeof(*cl)) {
  361. struct amp_ctrl *ctrl;
  362. if (cl->id != 0) {
  363. ctrl = get_create_ctrl(mgr, cl->id);
  364. if (ctrl != NULL) {
  365. ctrl->type = cl->type;
  366. ctrl->status = cl->status;
  367. }
  368. }
  369. cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
  370. }
  371. /* TODO find controllers in manager that were not on received */
  372. /* controller list and destroy them */
  373. send_a2mp_cmd(mgr, hdr->ident, A2MP_CHANGE_RSP, 0, NULL);
  374. return 0;
  375. }
  376. static inline int getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb)
  377. {
  378. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  379. u8 *data;
  380. int id;
  381. struct hci_dev *hdev;
  382. struct a2mp_getinfo_rsp rsp;
  383. data = (u8 *) skb_pull(skb, sizeof(*hdr));
  384. if (le16_to_cpu(hdr->len) < sizeof(*data))
  385. return -EINVAL;
  386. if (skb->len < sizeof(*data))
  387. return -EINVAL;
  388. id = *data;
  389. skb_pull(skb, sizeof(*data));
  390. rsp.id = id;
  391. rsp.status = 1;
  392. BT_DBG("id %d", id);
  393. hdev = hci_dev_get(id);
  394. if (hdev && hdev->amp_type != HCI_BREDR) {
  395. rsp.status = 0;
  396. rsp.total_bw = cpu_to_le32(hdev->amp_total_bw);
  397. rsp.max_bw = cpu_to_le32(hdev->amp_max_bw);
  398. rsp.min_latency = cpu_to_le32(hdev->amp_min_latency);
  399. rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap);
  400. rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size);
  401. }
  402. send_a2mp_cmd(mgr, hdr->ident, A2MP_GETINFO_RSP, sizeof(rsp), &rsp);
  403. if (hdev)
  404. hci_dev_put(hdev);
  405. return 0;
  406. }
  407. static void create_physical(struct l2cap_conn *conn, struct sock *sk)
  408. {
  409. struct amp_mgr *mgr;
  410. struct amp_ctx *ctx = NULL;
  411. BT_DBG("conn %p", conn);
  412. mgr = get_create_amp_mgr(conn->hcon, NULL);
  413. if (!mgr)
  414. goto cp_finished;
  415. BT_DBG("mgr %p", mgr);
  416. ctx = create_ctx(AMP_CREATEPHYSLINK, AMP_CPL_INIT);
  417. if (!ctx)
  418. goto cp_finished;
  419. ctx->sk = sk;
  420. sock_hold(sk);
  421. start_ctx(mgr, ctx);
  422. return;
  423. cp_finished:
  424. l2cap_amp_physical_complete(-ENOMEM, 0, 0, sk);
  425. }
  426. static void accept_physical(struct l2cap_conn *lcon, u8 id, struct sock *sk)
  427. {
  428. struct amp_mgr *mgr;
  429. struct hci_dev *hdev;
  430. struct hci_conn *conn;
  431. struct amp_ctx *aplctx = NULL;
  432. u8 remote_id = 0;
  433. int result = -EINVAL;
  434. BT_DBG("lcon %p", lcon);
  435. hdev = hci_dev_get(id);
  436. if (!hdev)
  437. goto ap_finished;
  438. BT_DBG("hdev %p", hdev);
  439. mgr = get_create_amp_mgr(lcon->hcon, NULL);
  440. if (!mgr)
  441. goto ap_finished;
  442. BT_DBG("mgr %p", mgr);
  443. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  444. &mgr->l2cap_conn->hcon->dst);
  445. if (conn) {
  446. BT_DBG("conn %p", hdev);
  447. result = 0;
  448. remote_id = conn->dst_id;
  449. goto ap_finished;
  450. }
  451. aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
  452. if (!aplctx)
  453. goto ap_finished;
  454. aplctx->sk = sk;
  455. sock_hold(sk);
  456. return;
  457. ap_finished:
  458. if (hdev)
  459. hci_dev_put(hdev);
  460. l2cap_amp_physical_complete(result, id, remote_id, sk);
  461. }
  462. static int getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb)
  463. {
  464. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  465. struct amp_ctx *ctx;
  466. struct a2mp_getampassoc_req *req;
  467. if (hdr->len < sizeof(*req))
  468. return -EINVAL;
  469. req = (struct a2mp_getampassoc_req *) skb_pull(skb, sizeof(*hdr));
  470. skb_pull(skb, sizeof(*req));
  471. ctx = create_ctx(AMP_GETAMPASSOC, AMP_GAA_INIT);
  472. if (!ctx)
  473. return -ENOMEM;
  474. ctx->id = req->id;
  475. ctx->d.gaa.req_ident = hdr->ident;
  476. ctx->hdev = hci_dev_get(ctx->id);
  477. if (ctx->hdev)
  478. ctx->d.gaa.assoc = kmalloc(ctx->hdev->amp_assoc_size,
  479. GFP_ATOMIC);
  480. start_ctx(mgr, ctx);
  481. return 0;
  482. }
  483. static u8 getampassoc_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
  484. {
  485. struct sk_buff *skb = (struct sk_buff *) data;
  486. struct hci_cp_read_local_amp_assoc cp;
  487. struct hci_rp_read_local_amp_assoc *rp;
  488. struct a2mp_getampassoc_rsp rsp;
  489. u16 rem_len;
  490. u16 frag_len;
  491. rsp.status = 1;
  492. if ((evt_type == AMP_KILLED) || (!ctx->hdev) || (!ctx->d.gaa.assoc))
  493. goto gaa_finished;
  494. switch (ctx->state) {
  495. case AMP_GAA_INIT:
  496. ctx->state = AMP_GAA_RLAA_COMPLETE;
  497. ctx->evt_type = AMP_HCI_CMD_CMPLT;
  498. ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
  499. ctx->d.gaa.len_so_far = 0;
  500. cp.phy_handle = 0;
  501. cp.len_so_far = 0;
  502. cp.max_len = ctx->hdev->amp_assoc_size;
  503. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
  504. break;
  505. case AMP_GAA_RLAA_COMPLETE:
  506. if (skb->len < 4)
  507. goto gaa_finished;
  508. rp = (struct hci_rp_read_local_amp_assoc *) skb->data;
  509. if (rp->status)
  510. goto gaa_finished;
  511. rem_len = le16_to_cpu(rp->rem_len);
  512. skb_pull(skb, 4);
  513. frag_len = skb->len;
  514. if (ctx->d.gaa.len_so_far + rem_len <=
  515. ctx->hdev->amp_assoc_size) {
  516. struct hci_cp_read_local_amp_assoc cp;
  517. u8 *assoc = ctx->d.gaa.assoc + ctx->d.gaa.len_so_far;
  518. memcpy(assoc, rp->frag, frag_len);
  519. ctx->d.gaa.len_so_far += rem_len;
  520. rem_len -= frag_len;
  521. if (rem_len == 0) {
  522. rsp.status = 0;
  523. goto gaa_finished;
  524. }
  525. /* more assoc data to read */
  526. cp.phy_handle = 0;
  527. cp.len_so_far = ctx->d.gaa.len_so_far;
  528. cp.max_len = ctx->hdev->amp_assoc_size;
  529. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
  530. }
  531. break;
  532. default:
  533. goto gaa_finished;
  534. break;
  535. }
  536. return 0;
  537. gaa_finished:
  538. rsp.id = ctx->id;
  539. send_a2mp_cmd2(ctx->mgr, ctx->d.gaa.req_ident, A2MP_GETAMPASSOC_RSP,
  540. sizeof(rsp), &rsp,
  541. ctx->d.gaa.len_so_far, ctx->d.gaa.assoc);
  542. kfree(ctx->d.gaa.assoc);
  543. if (ctx->hdev)
  544. hci_dev_put(ctx->hdev);
  545. return 1;
  546. }
  547. struct hmac_sha256_result {
  548. struct completion completion;
  549. int err;
  550. };
  551. static void hmac_sha256_final(struct crypto_async_request *req, int err)
  552. {
  553. struct hmac_sha256_result *r = req->data;
  554. if (err == -EINPROGRESS)
  555. return;
  556. r->err = err;
  557. complete(&r->completion);
  558. }
  559. int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize,
  560. u8 *output, u8 outlen)
  561. {
  562. int ret = 0;
  563. struct crypto_ahash *tfm;
  564. struct scatterlist sg;
  565. struct ahash_request *req;
  566. struct hmac_sha256_result tresult;
  567. void *hash_buff = NULL;
  568. unsigned char hash_result[64];
  569. int i;
  570. memset(output, 0, outlen);
  571. init_completion(&tresult.completion);
  572. tfm = crypto_alloc_ahash("hmac(sha256)", CRYPTO_ALG_TYPE_AHASH,
  573. CRYPTO_ALG_TYPE_AHASH_MASK);
  574. if (IS_ERR(tfm)) {
  575. BT_DBG("crypto_alloc_ahash failed");
  576. ret = PTR_ERR(tfm);
  577. goto err_tfm;
  578. }
  579. req = ahash_request_alloc(tfm, GFP_KERNEL);
  580. if (!req) {
  581. BT_DBG("failed to allocate request for hmac(sha256)");
  582. ret = -ENOMEM;
  583. goto err_req;
  584. }
  585. ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
  586. hmac_sha256_final, &tresult);
  587. hash_buff = kzalloc(psize, GFP_KERNEL);
  588. if (!hash_buff) {
  589. BT_DBG("failed to kzalloc hash_buff");
  590. ret = -ENOMEM;
  591. goto err_hash_buf;
  592. }
  593. memset(hash_result, 0, 64);
  594. memcpy(hash_buff, plaintext, psize);
  595. sg_init_one(&sg, hash_buff, psize);
  596. if (ksize) {
  597. crypto_ahash_clear_flags(tfm, ~0);
  598. ret = crypto_ahash_setkey(tfm, key, ksize);
  599. if (ret) {
  600. BT_DBG("crypto_ahash_setkey failed");
  601. goto err_setkey;
  602. }
  603. }
  604. ahash_request_set_crypt(req, &sg, hash_result, psize);
  605. ret = crypto_ahash_digest(req);
  606. BT_DBG("ret 0x%x", ret);
  607. switch (ret) {
  608. case 0:
  609. for (i = 0; i < outlen; i++)
  610. output[i] = hash_result[i];
  611. break;
  612. case -EINPROGRESS:
  613. case -EBUSY:
  614. ret = wait_for_completion_interruptible(&tresult.completion);
  615. if (!ret && !tresult.err) {
  616. INIT_COMPLETION(tresult.completion);
  617. break;
  618. } else {
  619. BT_DBG("wait_for_completion_interruptible failed");
  620. if (!ret)
  621. ret = tresult.err;
  622. goto out;
  623. }
  624. default:
  625. goto out;
  626. }
  627. out:
  628. err_setkey:
  629. kfree(hash_buff);
  630. err_hash_buf:
  631. ahash_request_free(req);
  632. err_req:
  633. crypto_free_ahash(tfm);
  634. err_tfm:
  635. return ret;
  636. }
  637. static void show_key(u8 *k)
  638. {
  639. int i = 0;
  640. for (i = 0; i < 32; i += 8)
  641. BT_DBG(" %02x %02x %02x %02x %02x %02x %02x %02x",
  642. *(k+i+0), *(k+i+1), *(k+i+2), *(k+i+3),
  643. *(k+i+4), *(k+i+5), *(k+i+6), *(k+i+7));
  644. }
  645. static int physlink_security(struct hci_conn *conn, u8 *data, u8 *len, u8 *type)
  646. {
  647. u8 bt2_key[32];
  648. u8 gamp_key[32];
  649. u8 b802_key[32];
  650. int result;
  651. if (!hci_conn_check_link_mode(conn))
  652. return -EACCES;
  653. BT_DBG("key_type %d", conn->key_type);
  654. if (conn->key_type < 3)
  655. return -EACCES;
  656. *type = conn->key_type;
  657. *len = 32;
  658. memcpy(&bt2_key[0], conn->link_key, 16);
  659. memcpy(&bt2_key[16], conn->link_key, 16);
  660. result = hmac_sha256(bt2_key, 32, "gamp", 4, gamp_key, 32);
  661. if (result)
  662. goto ps_finished;
  663. if (conn->key_type == 3) {
  664. BT_DBG("gamp_key");
  665. show_key(gamp_key);
  666. memcpy(data, gamp_key, 32);
  667. goto ps_finished;
  668. }
  669. result = hmac_sha256(gamp_key, 32, "802b", 4, b802_key, 32);
  670. if (result)
  671. goto ps_finished;
  672. BT_DBG("802b_key");
  673. show_key(b802_key);
  674. memcpy(data, b802_key, 32);
  675. ps_finished:
  676. return result;
  677. }
  678. static u8 amp_next_handle;
  679. static inline u8 physlink_handle(struct hci_dev *hdev)
  680. {
  681. /* TODO amp_next_handle should be part of hci_dev */
  682. if (amp_next_handle == 0)
  683. amp_next_handle = 1;
  684. return amp_next_handle++;
  685. }
  686. /* Start an Accept Physical Link sequence */
  687. static int createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
  688. {
  689. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  690. struct amp_ctx *ctx = NULL;
  691. struct a2mp_createphyslink_req *req;
  692. if (hdr->len < sizeof(*req))
  693. return -EINVAL;
  694. req = (struct a2mp_createphyslink_req *) skb_pull(skb, sizeof(*hdr));
  695. skb_pull(skb, sizeof(*req));
  696. BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id);
  697. /* initialize the context */
  698. ctx = create_ctx(AMP_ACCEPTPHYSLINK, AMP_APL_INIT);
  699. if (!ctx)
  700. return -ENOMEM;
  701. ctx->d.apl.req_ident = hdr->ident;
  702. ctx->d.apl.remote_id = req->local_id;
  703. ctx->id = req->remote_id;
  704. /* add the supplied remote assoc to the context */
  705. ctx->d.apl.remote_assoc = kmalloc(skb->len, GFP_ATOMIC);
  706. if (ctx->d.apl.remote_assoc)
  707. memcpy(ctx->d.apl.remote_assoc, skb->data, skb->len);
  708. ctx->d.apl.len_so_far = 0;
  709. ctx->d.apl.rem_len = skb->len;
  710. skb_pull(skb, skb->len);
  711. ctx->hdev = hci_dev_get(ctx->id);
  712. start_ctx(mgr, ctx);
  713. return 0;
  714. }
  715. static u8 acceptphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
  716. {
  717. struct sk_buff *skb = data;
  718. struct hci_cp_accept_phys_link acp;
  719. struct hci_cp_write_remote_amp_assoc wcp;
  720. struct hci_rp_write_remote_amp_assoc *wrp;
  721. struct hci_ev_cmd_status *cs = data;
  722. struct hci_ev_phys_link_complete *ev;
  723. struct a2mp_createphyslink_rsp rsp;
  724. struct amp_ctx *cplctx;
  725. struct amp_ctx *aplctx;
  726. u16 frag_len;
  727. struct hci_conn *conn;
  728. int result;
  729. BT_DBG("state %d", ctx->state);
  730. result = -EINVAL;
  731. rsp.status = 1; /* Invalid Controller ID */
  732. if (!ctx->hdev || !test_bit(HCI_UP, &ctx->hdev->flags))
  733. goto apl_finished;
  734. if (evt_type == AMP_KILLED) {
  735. result = -EAGAIN;
  736. rsp.status = 4; /* Disconnect request received */
  737. goto apl_finished;
  738. }
  739. if (!ctx->d.apl.remote_assoc) {
  740. result = -ENOMEM;
  741. rsp.status = 2; /* Unable to Start */
  742. goto apl_finished;
  743. }
  744. switch (ctx->state) {
  745. case AMP_APL_INIT:
  746. BT_DBG("local_id %d, remote_id %d",
  747. ctx->id, ctx->d.apl.remote_id);
  748. conn = hci_conn_hash_lookup_id(ctx->hdev,
  749. &ctx->mgr->l2cap_conn->hcon->dst,
  750. ctx->d.apl.remote_id);
  751. if (conn) {
  752. result = -EEXIST;
  753. rsp.status = 5; /* Already Exists */
  754. goto apl_finished;
  755. }
  756. aplctx = get_ctx_type(ctx, AMP_ACCEPTPHYSLINK);
  757. if ((aplctx) &&
  758. (aplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
  759. BT_DBG("deferred to %p", aplctx);
  760. aplctx->deferred = ctx;
  761. break;
  762. }
  763. cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
  764. if ((cplctx) &&
  765. (cplctx->d.cpl.remote_id == ctx->d.apl.remote_id)) {
  766. struct hci_conn *bcon = ctx->mgr->l2cap_conn->hcon;
  767. BT_DBG("local %s remote %s",
  768. batostr(&bcon->hdev->bdaddr),
  769. batostr(&bcon->dst));
  770. if ((cplctx->state < AMP_CPL_PL_COMPLETE) ||
  771. (bacmp(&bcon->hdev->bdaddr, &bcon->dst) < 0)) {
  772. BT_DBG("COLLISION LOSER");
  773. cplctx->deferred = ctx;
  774. cancel_ctx(cplctx);
  775. break;
  776. } else {
  777. BT_DBG("COLLISION WINNER");
  778. result = -EISCONN;
  779. rsp.status = 3; /* Collision */
  780. goto apl_finished;
  781. }
  782. }
  783. result = physlink_security(ctx->mgr->l2cap_conn->hcon, acp.data,
  784. &acp.key_len, &acp.type);
  785. if (result) {
  786. BT_DBG("SECURITY");
  787. rsp.status = 6; /* Security Violation */
  788. goto apl_finished;
  789. }
  790. ctx->d.apl.phy_handle = physlink_handle(ctx->hdev);
  791. ctx->state = AMP_APL_APL_STATUS;
  792. ctx->evt_type = AMP_HCI_CMD_STATUS;
  793. ctx->opcode = HCI_OP_ACCEPT_PHYS_LINK;
  794. acp.phy_handle = ctx->d.apl.phy_handle;
  795. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(acp), &acp);
  796. break;
  797. case AMP_APL_APL_STATUS:
  798. if (cs->status != 0)
  799. goto apl_finished;
  800. /* PAL will accept link, send a2mp response */
  801. rsp.local_id = ctx->id;
  802. rsp.remote_id = ctx->d.apl.remote_id;
  803. rsp.status = 0;
  804. send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
  805. A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
  806. /* send the first assoc fragment */
  807. wcp.phy_handle = ctx->d.apl.phy_handle;
  808. wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
  809. wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
  810. frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
  811. memcpy(wcp.frag, ctx->d.apl.remote_assoc, frag_len);
  812. ctx->state = AMP_APL_WRA_COMPLETE;
  813. ctx->evt_type = AMP_HCI_CMD_CMPLT;
  814. ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
  815. hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
  816. break;
  817. case AMP_APL_WRA_COMPLETE:
  818. /* received write remote amp assoc command complete event */
  819. wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
  820. if (wrp->status != 0)
  821. goto apl_finished;
  822. if (wrp->phy_handle != ctx->d.apl.phy_handle)
  823. goto apl_finished;
  824. /* update progress */
  825. frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
  826. ctx->d.apl.len_so_far += frag_len;
  827. ctx->d.apl.rem_len -= frag_len;
  828. if (ctx->d.apl.rem_len > 0) {
  829. u8 *assoc;
  830. /* another assoc fragment to send */
  831. wcp.phy_handle = ctx->d.apl.phy_handle;
  832. wcp.len_so_far = cpu_to_le16(ctx->d.apl.len_so_far);
  833. wcp.rem_len = cpu_to_le16(ctx->d.apl.rem_len);
  834. frag_len = min_t(u16, 248, ctx->d.apl.rem_len);
  835. assoc = ctx->d.apl.remote_assoc + ctx->d.apl.len_so_far;
  836. memcpy(wcp.frag, assoc, frag_len);
  837. hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
  838. break;
  839. }
  840. /* wait for physical link complete event */
  841. ctx->state = AMP_APL_PL_COMPLETE;
  842. ctx->evt_type = AMP_HCI_EVENT;
  843. ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
  844. break;
  845. case AMP_APL_PL_COMPLETE:
  846. /* physical link complete event received */
  847. if (skb->len < sizeof(*ev))
  848. goto apl_finished;
  849. ev = (struct hci_ev_phys_link_complete *) skb->data;
  850. if (ev->phy_handle != ctx->d.apl.phy_handle)
  851. break;
  852. if (ev->status != 0)
  853. goto apl_finished;
  854. conn = hci_conn_hash_lookup_handle(ctx->hdev, ev->phy_handle);
  855. if (!conn)
  856. goto apl_finished;
  857. result = 0;
  858. BT_DBG("PL_COMPLETE phy_handle %x", ev->phy_handle);
  859. conn->dst_id = ctx->d.apl.remote_id;
  860. bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
  861. goto apl_finished;
  862. break;
  863. default:
  864. goto apl_finished;
  865. break;
  866. }
  867. return 0;
  868. apl_finished:
  869. if (ctx->sk)
  870. l2cap_amp_physical_complete(result, ctx->id,
  871. ctx->d.apl.remote_id, ctx->sk);
  872. if ((result) && (ctx->state < AMP_APL_PL_COMPLETE)) {
  873. rsp.local_id = ctx->id;
  874. rsp.remote_id = ctx->d.apl.remote_id;
  875. send_a2mp_cmd(ctx->mgr, ctx->d.apl.req_ident,
  876. A2MP_CREATEPHYSLINK_RSP, sizeof(rsp), &rsp);
  877. }
  878. kfree(ctx->d.apl.remote_assoc);
  879. if (ctx->sk)
  880. sock_put(ctx->sk);
  881. if (ctx->hdev)
  882. hci_dev_put(ctx->hdev);
  883. return 1;
  884. }
  885. static void cancel_cpl_ctx(struct amp_ctx *ctx, u8 reason)
  886. {
  887. struct hci_cp_disconn_phys_link dcp;
  888. ctx->state = AMP_CPL_PL_CANCEL;
  889. ctx->evt_type = AMP_HCI_EVENT;
  890. ctx->evt_code = HCI_EV_DISCONN_PHYS_LINK_COMPLETE;
  891. dcp.phy_handle = ctx->d.cpl.phy_handle;
  892. dcp.reason = reason;
  893. hci_send_cmd(ctx->hdev, HCI_OP_DISCONN_PHYS_LINK, sizeof(dcp), &dcp);
  894. }
  895. static u8 createphyslink_handler(struct amp_ctx *ctx, u8 evt_type, void *data)
  896. {
  897. struct amp_ctrl *ctrl;
  898. struct sk_buff *skb = data;
  899. struct a2mp_cmd_hdr *hdr;
  900. struct hci_ev_cmd_status *cs = data;
  901. struct amp_ctx *cplctx;
  902. struct a2mp_discover_req dreq;
  903. struct a2mp_discover_rsp *drsp;
  904. u16 *efm;
  905. struct a2mp_getinfo_req greq;
  906. struct a2mp_getinfo_rsp *grsp;
  907. struct a2mp_cl *cl;
  908. struct a2mp_getampassoc_req areq;
  909. struct a2mp_getampassoc_rsp *arsp;
  910. struct hci_cp_create_phys_link cp;
  911. struct hci_cp_write_remote_amp_assoc wcp;
  912. struct hci_rp_write_remote_amp_assoc *wrp;
  913. struct hci_ev_channel_selected *cev;
  914. struct hci_cp_read_local_amp_assoc rcp;
  915. struct hci_rp_read_local_amp_assoc *rrp;
  916. struct a2mp_createphyslink_req creq;
  917. struct a2mp_createphyslink_rsp *crsp;
  918. struct hci_ev_phys_link_complete *pev;
  919. struct hci_ev_disconn_phys_link_complete *dev;
  920. u8 *assoc, *rassoc, *lassoc;
  921. u16 frag_len;
  922. u16 rem_len;
  923. int result = -EAGAIN;
  924. struct hci_conn *conn;
  925. BT_DBG("state %d", ctx->state);
  926. if (evt_type == AMP_KILLED)
  927. goto cpl_finished;
  928. if (evt_type == AMP_CANCEL) {
  929. if ((ctx->state < AMP_CPL_CPL_STATUS) ||
  930. ((ctx->state == AMP_CPL_PL_COMPLETE) &&
  931. !(ctx->evt_type & AMP_HCI_EVENT)))
  932. goto cpl_finished;
  933. cancel_cpl_ctx(ctx, 0x16);
  934. return 0;
  935. }
  936. switch (ctx->state) {
  937. case AMP_CPL_INIT:
  938. cplctx = get_ctx_type(ctx, AMP_CREATEPHYSLINK);
  939. if (cplctx) {
  940. BT_DBG("deferred to %p", cplctx);
  941. cplctx->deferred = ctx;
  942. break;
  943. }
  944. ctx->state = AMP_CPL_DISC_RSP;
  945. ctx->evt_type = AMP_A2MP_RSP;
  946. ctx->rsp_ident = next_ident(ctx->mgr);
  947. dreq.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU);
  948. dreq.ext_feat = 0;
  949. send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_DISCOVER_REQ,
  950. sizeof(dreq), &dreq);
  951. break;
  952. case AMP_CPL_DISC_RSP:
  953. drsp = (struct a2mp_discover_rsp *) skb_pull(skb, sizeof(*hdr));
  954. if (skb->len < (sizeof(*drsp))) {
  955. result = -EINVAL;
  956. goto cpl_finished;
  957. }
  958. efm = (u16 *) skb_pull(skb, sizeof(*drsp));
  959. BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(drsp->mtu),
  960. le16_to_cpu(drsp->ext_feat));
  961. while (le16_to_cpu(drsp->ext_feat) & 0x8000) {
  962. if (skb->len < sizeof(*efm)) {
  963. result = -EINVAL;
  964. goto cpl_finished;
  965. }
  966. drsp->ext_feat = *efm;
  967. BT_DBG("efm 0x%4.4x", le16_to_cpu(drsp->ext_feat));
  968. efm = (u16 *) skb_pull(skb, sizeof(*efm));
  969. }
  970. cl = (struct a2mp_cl *) efm;
  971. /* find the first remote and local controller with the
  972. * same type
  973. */
  974. greq.id = 0;
  975. result = -ENODEV;
  976. while (skb->len >= sizeof(*cl)) {
  977. if ((cl->id != 0) && (greq.id == 0)) {
  978. struct hci_dev *hdev;
  979. hdev = hci_dev_get_type(cl->type);
  980. if (hdev) {
  981. struct hci_conn *conn;
  982. ctx->hdev = hdev;
  983. ctx->id = hdev->id;
  984. ctx->d.cpl.remote_id = cl->id;
  985. conn = hci_conn_hash_lookup_ba(hdev,
  986. ACL_LINK,
  987. &ctx->mgr->l2cap_conn->hcon->dst);
  988. if (conn) {
  989. BT_DBG("PL_COMPLETE exists %x",
  990. (int) conn->handle);
  991. result = 0;
  992. }
  993. ctrl = get_create_ctrl(ctx->mgr,
  994. cl->id);
  995. if (ctrl) {
  996. ctrl->type = cl->type;
  997. ctrl->status = cl->status;
  998. }
  999. greq.id = cl->id;
  1000. }
  1001. }
  1002. cl = (struct a2mp_cl *) skb_pull(skb, sizeof(*cl));
  1003. }
  1004. if ((!greq.id) || (!result))
  1005. goto cpl_finished;
  1006. ctx->state = AMP_CPL_GETINFO_RSP;
  1007. ctx->evt_type = AMP_A2MP_RSP;
  1008. ctx->rsp_ident = next_ident(ctx->mgr);
  1009. send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETINFO_REQ,
  1010. sizeof(greq), &greq);
  1011. break;
  1012. case AMP_CPL_GETINFO_RSP:
  1013. if (skb->len < sizeof(*grsp))
  1014. goto cpl_finished;
  1015. grsp = (struct a2mp_getinfo_rsp *) skb_pull(skb, sizeof(*hdr));
  1016. skb_pull(skb, sizeof(*grsp));
  1017. if (grsp->status)
  1018. goto cpl_finished;
  1019. if (grsp->id != ctx->d.cpl.remote_id)
  1020. goto cpl_finished;
  1021. ctrl = get_ctrl(ctx->mgr, grsp->id);
  1022. if (!ctrl)
  1023. goto cpl_finished;
  1024. ctrl->status = grsp->status;
  1025. ctrl->total_bw = le32_to_cpu(grsp->total_bw);
  1026. ctrl->max_bw = le32_to_cpu(grsp->max_bw);
  1027. ctrl->min_latency = le32_to_cpu(grsp->min_latency);
  1028. ctrl->pal_cap = le16_to_cpu(grsp->pal_cap);
  1029. ctrl->max_assoc_size = le16_to_cpu(grsp->assoc_size);
  1030. ctx->d.cpl.max_len = ctrl->max_assoc_size;
  1031. /* setup up GAA request */
  1032. areq.id = ctx->d.cpl.remote_id;
  1033. /* advance context state */
  1034. ctx->state = AMP_CPL_GAA_RSP;
  1035. ctx->evt_type = AMP_A2MP_RSP;
  1036. ctx->rsp_ident = next_ident(ctx->mgr);
  1037. send_a2mp_cmd(ctx->mgr, ctx->rsp_ident, A2MP_GETAMPASSOC_REQ,
  1038. sizeof(areq), &areq);
  1039. break;
  1040. case AMP_CPL_GAA_RSP:
  1041. if (skb->len < sizeof(*arsp))
  1042. goto cpl_finished;
  1043. hdr = (void *) skb->data;
  1044. arsp = (void *) skb_pull(skb, sizeof(*hdr));
  1045. if (arsp->status != 0)
  1046. goto cpl_finished;
  1047. /* store away remote assoc */
  1048. assoc = (u8 *) skb_pull(skb, sizeof(*arsp));
  1049. ctx->d.cpl.len_so_far = 0;
  1050. ctx->d.cpl.rem_len = hdr->len - sizeof(*arsp);
  1051. skb_pull(skb, ctx->d.cpl.rem_len);
  1052. rassoc = kmalloc(ctx->d.cpl.rem_len, GFP_ATOMIC);
  1053. if (!rassoc)
  1054. goto cpl_finished;
  1055. memcpy(rassoc, assoc, ctx->d.cpl.rem_len);
  1056. ctx->d.cpl.remote_assoc = rassoc;
  1057. /* set up CPL command */
  1058. ctx->d.cpl.phy_handle = physlink_handle(ctx->hdev);
  1059. cp.phy_handle = ctx->d.cpl.phy_handle;
  1060. if (physlink_security(ctx->mgr->l2cap_conn->hcon, cp.data,
  1061. &cp.key_len, &cp.type)) {
  1062. result = -EPERM;
  1063. goto cpl_finished;
  1064. }
  1065. /* advance context state */
  1066. ctx->state = AMP_CPL_CPL_STATUS;
  1067. ctx->evt_type = AMP_HCI_CMD_STATUS;
  1068. ctx->opcode = HCI_OP_CREATE_PHYS_LINK;
  1069. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(cp), &cp);
  1070. break;
  1071. case AMP_CPL_CPL_STATUS:
  1072. /* received create physical link command status */
  1073. if (cs->status != 0)
  1074. goto cpl_finished;
  1075. /* send the first assoc fragment */
  1076. wcp.phy_handle = ctx->d.cpl.phy_handle;
  1077. wcp.len_so_far = ctx->d.cpl.len_so_far;
  1078. wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
  1079. frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
  1080. memcpy(wcp.frag, ctx->d.cpl.remote_assoc, frag_len);
  1081. ctx->state = AMP_CPL_WRA_COMPLETE;
  1082. ctx->evt_type = AMP_HCI_CMD_CMPLT;
  1083. ctx->opcode = HCI_OP_WRITE_REMOTE_AMP_ASSOC;
  1084. hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
  1085. break;
  1086. case AMP_CPL_WRA_COMPLETE:
  1087. /* received write remote amp assoc command complete event */
  1088. if (skb->len < sizeof(*wrp))
  1089. goto cpl_finished;
  1090. wrp = (struct hci_rp_write_remote_amp_assoc *) skb->data;
  1091. if (wrp->status != 0)
  1092. goto cpl_finished;
  1093. if (wrp->phy_handle != ctx->d.cpl.phy_handle)
  1094. goto cpl_finished;
  1095. /* update progress */
  1096. frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
  1097. ctx->d.cpl.len_so_far += frag_len;
  1098. ctx->d.cpl.rem_len -= frag_len;
  1099. if (ctx->d.cpl.rem_len > 0) {
  1100. /* another assoc fragment to send */
  1101. wcp.phy_handle = ctx->d.cpl.phy_handle;
  1102. wcp.len_so_far = cpu_to_le16(ctx->d.cpl.len_so_far);
  1103. wcp.rem_len = cpu_to_le16(ctx->d.cpl.rem_len);
  1104. frag_len = min_t(u16, 248, ctx->d.cpl.rem_len);
  1105. memcpy(wcp.frag,
  1106. ctx->d.cpl.remote_assoc + ctx->d.cpl.len_so_far,
  1107. frag_len);
  1108. hci_send_cmd(ctx->hdev, ctx->opcode, 5+frag_len, &wcp);
  1109. break;
  1110. }
  1111. /* now wait for channel selected event */
  1112. ctx->state = AMP_CPL_CHANNEL_SELECT;
  1113. ctx->evt_type = AMP_HCI_EVENT;
  1114. ctx->evt_code = HCI_EV_CHANNEL_SELECTED;
  1115. break;
  1116. case AMP_CPL_CHANNEL_SELECT:
  1117. /* received channel selection event */
  1118. if (skb->len < sizeof(*cev))
  1119. goto cpl_finished;
  1120. cev = (void *) skb->data;
  1121. /* TODO - PK This check is valid but Libra PAL returns 0 for handle during
  1122. Create Physical Link collision scenario
  1123. if (cev->phy_handle != ctx->d.cpl.phy_handle)
  1124. goto cpl_finished;
  1125. */
  1126. /* request the first local assoc fragment */
  1127. rcp.phy_handle = ctx->d.cpl.phy_handle;
  1128. rcp.len_so_far = 0;
  1129. rcp.max_len = ctx->d.cpl.max_len;
  1130. lassoc = kmalloc(ctx->d.cpl.max_len, GFP_ATOMIC);
  1131. if (!lassoc)
  1132. goto cpl_finished;
  1133. ctx->d.cpl.local_assoc = lassoc;
  1134. ctx->d.cpl.len_so_far = 0;
  1135. ctx->state = AMP_CPL_RLA_COMPLETE;
  1136. ctx->evt_type = AMP_HCI_CMD_CMPLT;
  1137. ctx->opcode = HCI_OP_READ_LOCAL_AMP_ASSOC;
  1138. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
  1139. break;
  1140. case AMP_CPL_RLA_COMPLETE:
  1141. /* received read local amp assoc command complete event */
  1142. if (skb->len < 4)
  1143. goto cpl_finished;
  1144. rrp = (struct hci_rp_read_local_amp_assoc *) skb->data;
  1145. if (rrp->status)
  1146. goto cpl_finished;
  1147. if (rrp->phy_handle != ctx->d.cpl.phy_handle)
  1148. goto cpl_finished;
  1149. rem_len = le16_to_cpu(rrp->rem_len);
  1150. skb_pull(skb, 4);
  1151. frag_len = skb->len;
  1152. if (ctx->d.cpl.len_so_far + rem_len > ctx->d.cpl.max_len)
  1153. goto cpl_finished;
  1154. /* save this fragment in context */
  1155. lassoc = ctx->d.cpl.local_assoc + ctx->d.cpl.len_so_far;
  1156. memcpy(lassoc, rrp->frag, frag_len);
  1157. ctx->d.cpl.len_so_far += frag_len;
  1158. rem_len -= frag_len;
  1159. if (rem_len > 0) {
  1160. /* request another local assoc fragment */
  1161. rcp.phy_handle = ctx->d.cpl.phy_handle;
  1162. rcp.len_so_far = ctx->d.cpl.len_so_far;
  1163. rcp.max_len = ctx->d.cpl.max_len;
  1164. hci_send_cmd(ctx->hdev, ctx->opcode, sizeof(rcp), &rcp);
  1165. } else {
  1166. creq.local_id = ctx->id;
  1167. creq.remote_id = ctx->d.cpl.remote_id;
  1168. /* wait for A2MP rsp AND phys link complete event */
  1169. ctx->state = AMP_CPL_PL_COMPLETE;
  1170. ctx->evt_type = AMP_A2MP_RSP | AMP_HCI_EVENT;
  1171. ctx->rsp_ident = next_ident(ctx->mgr);
  1172. ctx->evt_code = HCI_EV_PHYS_LINK_COMPLETE;
  1173. send_a2mp_cmd2(ctx->mgr, ctx->rsp_ident,
  1174. A2MP_CREATEPHYSLINK_REQ, sizeof(creq), &creq,
  1175. ctx->d.cpl.len_so_far, ctx->d.cpl.local_assoc);
  1176. }
  1177. break;
  1178. case AMP_CPL_PL_COMPLETE:
  1179. if (evt_type == AMP_A2MP_RSP) {
  1180. /* create physical link response received */
  1181. ctx->evt_type &= ~AMP_A2MP_RSP;
  1182. if (skb->len < sizeof(*crsp))
  1183. goto cpl_finished;
  1184. crsp = (void *) skb_pull(skb, sizeof(*hdr));
  1185. if ((crsp->local_id != ctx->d.cpl.remote_id) ||
  1186. (crsp->remote_id != ctx->id) ||
  1187. (crsp->status != 0)) {
  1188. cancel_cpl_ctx(ctx, 0x13);
  1189. break;
  1190. }
  1191. /* notify Qualcomm PAL */
  1192. if (ctx->hdev->manufacturer == 0x001d)
  1193. hci_send_cmd(ctx->hdev,
  1194. hci_opcode_pack(0x3f, 0x00), 0, NULL);
  1195. }
  1196. if (evt_type == AMP_HCI_EVENT) {
  1197. ctx->evt_type &= ~AMP_HCI_EVENT;
  1198. /* physical link complete event received */
  1199. if (skb->len < sizeof(*pev))
  1200. goto cpl_finished;
  1201. pev = (void *) skb->data;
  1202. if (pev->phy_handle != ctx->d.cpl.phy_handle)
  1203. break;
  1204. if (pev->status != 0)
  1205. goto cpl_finished;
  1206. }
  1207. if (ctx->evt_type)
  1208. break;
  1209. conn = hci_conn_hash_lookup_handle(ctx->hdev,
  1210. ctx->d.cpl.phy_handle);
  1211. if (!conn)
  1212. goto cpl_finished;
  1213. result = 0;
  1214. BT_DBG("PL_COMPLETE phy_handle %x", ctx->d.cpl.phy_handle);
  1215. bacpy(&conn->dst, &ctx->mgr->l2cap_conn->hcon->dst);
  1216. conn->dst_id = ctx->d.cpl.remote_id;
  1217. conn->out = 1;
  1218. goto cpl_finished;
  1219. break;
  1220. case AMP_CPL_PL_CANCEL:
  1221. dev = (void *) skb->data;
  1222. BT_DBG("PL_COMPLETE cancelled %x", dev->phy_handle);
  1223. result = -EISCONN;
  1224. goto cpl_finished;
  1225. break;
  1226. default:
  1227. goto cpl_finished;
  1228. break;
  1229. }
  1230. return 0;
  1231. cpl_finished:
  1232. l2cap_amp_physical_complete(result, ctx->id, ctx->d.cpl.remote_id,
  1233. ctx->sk);
  1234. if (ctx->sk)
  1235. sock_put(ctx->sk);
  1236. if (ctx->hdev)
  1237. hci_dev_put(ctx->hdev);
  1238. kfree(ctx->d.cpl.remote_assoc);
  1239. kfree(ctx->d.cpl.local_assoc);
  1240. return 1;
  1241. }
  1242. static int disconnphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb)
  1243. {
  1244. struct a2mp_cmd_hdr *hdr = (void *) skb->data;
  1245. struct a2mp_disconnphyslink_req *req;
  1246. struct a2mp_disconnphyslink_rsp rsp;
  1247. struct hci_dev *hdev;
  1248. struct hci_conn *conn;
  1249. struct amp_ctx *aplctx;
  1250. BT_DBG("mgr %p skb %p", mgr, skb);
  1251. if (hdr->len < sizeof(*req))
  1252. return -EINVAL;
  1253. req = (void *) skb_pull(skb, sizeof(*hdr));
  1254. skb_pull(skb, sizeof(*req));
  1255. rsp.local_id = req->remote_id;
  1256. rsp.remote_id = req->local_id;
  1257. rsp.status = 0;
  1258. BT_DBG("local_id %d remote_id %d",
  1259. (int) rsp.local_id, (int) rsp.remote_id);
  1260. hdev = hci_dev_get(rsp.local_id);
  1261. if (!hdev) {
  1262. rsp.status = 1; /* Invalid Controller ID */
  1263. goto dpl_finished;
  1264. }
  1265. BT_DBG("hdev %p", hdev);
  1266. conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
  1267. &mgr->l2cap_conn->hcon->dst);
  1268. if (!conn) {
  1269. aplctx = get_ctx_mgr(mgr, AMP_ACCEPTPHYSLINK);
  1270. if (aplctx) {
  1271. kill_ctx(aplctx);
  1272. rsp.status = 0;
  1273. goto dpl_finished;
  1274. }
  1275. rsp.status = 2; /* No Physical Link exists */
  1276. goto dpl_finished;
  1277. }
  1278. BT_DBG("conn %p", conn);
  1279. hci_disconnect(conn, 0x13);
  1280. dpl_finished:
  1281. send_a2mp_cmd(mgr, hdr->ident,
  1282. A2MP_DISCONNPHYSLINK_RSP, sizeof(rsp), &rsp);
  1283. if (hdev)
  1284. hci_dev_put(hdev);
  1285. return 0;
  1286. }
  1287. static int execute_ctx(struct amp_ctx *ctx, u8 evt_type, void *data)
  1288. {
  1289. struct amp_mgr *mgr = ctx->mgr;
  1290. u8 finished = 0;
  1291. if (!mgr->connected)
  1292. return 0;
  1293. switch (ctx->type) {
  1294. case AMP_GETAMPASSOC:
  1295. finished = getampassoc_handler(ctx, evt_type, data);
  1296. break;
  1297. case AMP_CREATEPHYSLINK:
  1298. finished = createphyslink_handler(ctx, evt_type, data);
  1299. break;
  1300. case AMP_ACCEPTPHYSLINK:
  1301. finished = acceptphyslink_handler(ctx, evt_type, data);
  1302. break;
  1303. }
  1304. if (!finished)
  1305. mod_timer(&(ctx->timer), jiffies +
  1306. msecs_to_jiffies(A2MP_RSP_TIMEOUT));
  1307. else
  1308. destroy_ctx(ctx);
  1309. return finished;
  1310. }
  1311. static int cancel_ctx(struct amp_ctx *ctx)
  1312. {
  1313. return execute_ctx(ctx, AMP_CANCEL, 0);
  1314. }
  1315. static int kill_ctx(struct amp_ctx *ctx)
  1316. {
  1317. return execute_ctx(ctx, AMP_KILLED, 0);
  1318. }
  1319. static void ctx_timeout_worker(struct work_struct *w)
  1320. {
  1321. struct amp_work_ctx_timeout *work = (struct amp_work_ctx_timeout *) w;
  1322. struct amp_ctx *ctx = work->ctx;
  1323. kill_ctx(ctx);
  1324. kfree(work);
  1325. }
  1326. static void ctx_timeout(unsigned long data)
  1327. {
  1328. struct amp_ctx *ctx = (struct amp_ctx *) data;
  1329. struct amp_work_ctx_timeout *work;
  1330. BT_DBG("ctx %p", ctx);
  1331. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1332. if (work) {
  1333. INIT_WORK((struct work_struct *) work, ctx_timeout_worker);
  1334. work->ctx = ctx;
  1335. if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
  1336. kfree(work);
  1337. }
  1338. }
  1339. static void launch_ctx(struct amp_mgr *mgr)
  1340. {
  1341. struct amp_ctx *ctx = NULL;
  1342. BT_DBG("mgr %p", mgr);
  1343. read_lock(&mgr->ctx_list_lock);
  1344. if (!list_empty(&mgr->ctx_list))
  1345. ctx = list_first_entry(&mgr->ctx_list, struct amp_ctx, list);
  1346. read_unlock(&mgr->ctx_list_lock);
  1347. BT_DBG("ctx %p", ctx);
  1348. if (ctx)
  1349. execute_ctx(ctx, AMP_INIT, NULL);
  1350. }
  1351. static inline int a2mp_rsp(struct amp_mgr *mgr, struct sk_buff *skb)
  1352. {
  1353. struct amp_ctx *ctx;
  1354. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  1355. u16 hdr_len = le16_to_cpu(hdr->len);
  1356. /* find context waiting for A2MP rsp with this rsp's identifier */
  1357. BT_DBG("ident %d code %d", hdr->ident, hdr->code);
  1358. ctx = get_ctx_a2mp(mgr, hdr->ident);
  1359. if (ctx) {
  1360. execute_ctx(ctx, AMP_A2MP_RSP, skb);
  1361. } else {
  1362. BT_DBG("context not found");
  1363. skb_pull(skb, sizeof(*hdr));
  1364. if (hdr_len > skb->len)
  1365. hdr_len = skb->len;
  1366. skb_pull(skb, hdr_len);
  1367. }
  1368. return 0;
  1369. }
  1370. /* L2CAP-A2MP interface */
  1371. static void a2mp_receive(struct sock *sk, struct sk_buff *skb)
  1372. {
  1373. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  1374. int len;
  1375. int err = 0;
  1376. struct amp_mgr *mgr;
  1377. mgr = get_amp_mgr_sk(sk);
  1378. if (!mgr)
  1379. goto a2mp_finished;
  1380. len = skb->len;
  1381. while (len >= sizeof(*hdr)) {
  1382. struct a2mp_cmd_hdr *hdr = (struct a2mp_cmd_hdr *) skb->data;
  1383. u16 clen = le16_to_cpu(hdr->len);
  1384. BT_DBG("code 0x%02x id %d len %d", hdr->code, hdr->ident, clen);
  1385. if (clen > len || !hdr->ident) {
  1386. err = -EINVAL;
  1387. break;
  1388. }
  1389. switch (hdr->code) {
  1390. case A2MP_COMMAND_REJ:
  1391. command_rej(mgr, skb);
  1392. break;
  1393. case A2MP_DISCOVER_REQ:
  1394. err = discover_req(mgr, skb);
  1395. break;
  1396. case A2MP_CHANGE_NOTIFY:
  1397. err = change_notify(mgr, skb);
  1398. break;
  1399. case A2MP_GETINFO_REQ:
  1400. err = getinfo_req(mgr, skb);
  1401. break;
  1402. case A2MP_GETAMPASSOC_REQ:
  1403. err = getampassoc_req(mgr, skb);
  1404. break;
  1405. case A2MP_CREATEPHYSLINK_REQ:
  1406. err = createphyslink_req(mgr, skb);
  1407. break;
  1408. case A2MP_DISCONNPHYSLINK_REQ:
  1409. err = disconnphyslink_req(mgr, skb);
  1410. break;
  1411. case A2MP_CHANGE_RSP:
  1412. case A2MP_DISCOVER_RSP:
  1413. case A2MP_GETINFO_RSP:
  1414. case A2MP_GETAMPASSOC_RSP:
  1415. case A2MP_CREATEPHYSLINK_RSP:
  1416. case A2MP_DISCONNPHYSLINK_RSP:
  1417. err = a2mp_rsp(mgr, skb);
  1418. break;
  1419. default:
  1420. BT_ERR("Unknown A2MP signaling command 0x%2.2x",
  1421. hdr->code);
  1422. skb_pull(skb, sizeof(*hdr));
  1423. err = -EINVAL;
  1424. break;
  1425. }
  1426. len = skb->len;
  1427. }
  1428. a2mp_finished:
  1429. if (err && mgr) {
  1430. struct a2mp_cmd_rej rej;
  1431. rej.reason = cpu_to_le16(0);
  1432. send_a2mp_cmd(mgr, hdr->ident, A2MP_COMMAND_REJ,
  1433. sizeof(rej), &rej);
  1434. }
  1435. }
  1436. /* L2CAP-A2MP interface */
  1437. static int send_a2mp(struct socket *sock, u8 *data, int len)
  1438. {
  1439. struct kvec iv = { data, len };
  1440. struct msghdr msg;
  1441. memset(&msg, 0, sizeof(msg));
  1442. return kernel_sendmsg(sock, &msg, &iv, 1, len);
  1443. }
  1444. static void data_ready_worker(struct work_struct *w)
  1445. {
  1446. struct amp_work_data_ready *work = (struct amp_work_data_ready *) w;
  1447. struct sock *sk = work->sk;
  1448. struct sk_buff *skb;
  1449. /* skb_dequeue() is thread-safe */
  1450. while ((skb = skb_dequeue(&sk->sk_receive_queue))) {
  1451. a2mp_receive(sk, skb);
  1452. kfree_skb(skb);
  1453. }
  1454. sock_put(work->sk);
  1455. kfree(work);
  1456. }
  1457. static void data_ready(struct sock *sk, int bytes)
  1458. {
  1459. struct amp_work_data_ready *work;
  1460. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1461. if (work) {
  1462. INIT_WORK((struct work_struct *) work, data_ready_worker);
  1463. sock_hold(sk);
  1464. work->sk = sk;
  1465. work->bytes = bytes;
  1466. if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
  1467. kfree(work);
  1468. sock_put(sk);
  1469. }
  1470. }
  1471. }
  1472. static void state_change_worker(struct work_struct *w)
  1473. {
  1474. struct amp_work_state_change *work = (struct amp_work_state_change *) w;
  1475. struct amp_mgr *mgr;
  1476. switch (work->sk->sk_state) {
  1477. case BT_CONNECTED:
  1478. /* socket is up */
  1479. BT_DBG("CONNECTED");
  1480. mgr = get_amp_mgr_sk(work->sk);
  1481. if (mgr) {
  1482. mgr->connected = 1;
  1483. if (mgr->skb) {
  1484. l2cap_recv_deferred_frame(work->sk, mgr->skb);
  1485. mgr->skb = NULL;
  1486. }
  1487. launch_ctx(mgr);
  1488. }
  1489. break;
  1490. case BT_CLOSED:
  1491. /* connection is gone */
  1492. BT_DBG("CLOSED");
  1493. mgr = get_amp_mgr_sk(work->sk);
  1494. if (mgr) {
  1495. if (!sock_flag(work->sk, SOCK_DEAD))
  1496. sock_release(mgr->a2mp_sock);
  1497. mgr->a2mp_sock = NULL;
  1498. remove_amp_mgr(mgr);
  1499. }
  1500. break;
  1501. default:
  1502. /* something else happened */
  1503. break;
  1504. }
  1505. sock_put(work->sk);
  1506. kfree(work);
  1507. }
  1508. static void state_change(struct sock *sk)
  1509. {
  1510. struct amp_work_state_change *work;
  1511. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1512. if (work) {
  1513. INIT_WORK((struct work_struct *) work, state_change_worker);
  1514. sock_hold(sk);
  1515. work->sk = sk;
  1516. if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
  1517. kfree(work);
  1518. sock_put(sk);
  1519. }
  1520. }
  1521. }
  1522. static struct socket *open_fixed_channel(bdaddr_t *src, bdaddr_t *dst)
  1523. {
  1524. int err;
  1525. struct socket *sock;
  1526. struct sockaddr_l2 addr;
  1527. struct sock *sk;
  1528. struct l2cap_options opts = {L2CAP_A2MP_DEFAULT_MTU,
  1529. L2CAP_A2MP_DEFAULT_MTU, L2CAP_DEFAULT_FLUSH_TO,
  1530. L2CAP_MODE_ERTM, 1, 0xFF, 1};
  1531. err = sock_create_kern(PF_BLUETOOTH, SOCK_SEQPACKET,
  1532. BTPROTO_L2CAP, &sock);
  1533. if (err) {
  1534. BT_ERR("sock_create_kern failed %d", err);
  1535. return NULL;
  1536. }
  1537. sk = sock->sk;
  1538. sk->sk_data_ready = data_ready;
  1539. sk->sk_state_change = state_change;
  1540. memset(&addr, 0, sizeof(addr));
  1541. bacpy(&addr.l2_bdaddr, src);
  1542. addr.l2_family = AF_BLUETOOTH;
  1543. addr.l2_cid = L2CAP_CID_A2MP;
  1544. err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr));
  1545. if (err) {
  1546. BT_ERR("kernel_bind failed %d", err);
  1547. sock_release(sock);
  1548. return NULL;
  1549. }
  1550. l2cap_fixed_channel_config(sk, &opts);
  1551. memset(&addr, 0, sizeof(addr));
  1552. bacpy(&addr.l2_bdaddr, dst);
  1553. addr.l2_family = AF_BLUETOOTH;
  1554. addr.l2_cid = L2CAP_CID_A2MP;
  1555. err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr),
  1556. O_NONBLOCK);
  1557. if ((err == 0) || (err == -EINPROGRESS))
  1558. return sock;
  1559. else {
  1560. BT_ERR("kernel_connect failed %d", err);
  1561. sock_release(sock);
  1562. return NULL;
  1563. }
  1564. }
  1565. static void conn_ind_worker(struct work_struct *w)
  1566. {
  1567. struct amp_work_conn_ind *work = (struct amp_work_conn_ind *) w;
  1568. struct hci_conn *hcon = work->hcon;
  1569. struct sk_buff *skb = work->skb;
  1570. struct amp_mgr *mgr;
  1571. mgr = get_create_amp_mgr(hcon, skb);
  1572. BT_DBG("mgr %p", mgr);
  1573. hci_conn_put(hcon);
  1574. kfree(work);
  1575. }
  1576. static void create_physical_worker(struct work_struct *w)
  1577. {
  1578. struct amp_work_create_physical *work =
  1579. (struct amp_work_create_physical *) w;
  1580. create_physical(work->conn, work->sk);
  1581. sock_put(work->sk);
  1582. kfree(work);
  1583. }
  1584. static void accept_physical_worker(struct work_struct *w)
  1585. {
  1586. struct amp_work_accept_physical *work =
  1587. (struct amp_work_accept_physical *) w;
  1588. accept_physical(work->conn, work->id, work->sk);
  1589. sock_put(work->sk);
  1590. kfree(work);
  1591. }
  1592. /* L2CAP Fixed Channel interface */
  1593. void amp_conn_ind(struct hci_conn *hcon, struct sk_buff *skb)
  1594. {
  1595. struct amp_work_conn_ind *work;
  1596. BT_DBG("hcon %p, skb %p", hcon, skb);
  1597. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1598. if (work) {
  1599. INIT_WORK((struct work_struct *) work, conn_ind_worker);
  1600. hci_conn_hold(hcon);
  1601. work->hcon = hcon;
  1602. work->skb = skb;
  1603. if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
  1604. hci_conn_put(hcon);
  1605. kfree(work);
  1606. }
  1607. }
  1608. }
  1609. /* L2CAP Physical Link interface */
  1610. void amp_create_physical(struct l2cap_conn *conn, struct sock *sk)
  1611. {
  1612. struct amp_work_create_physical *work;
  1613. BT_DBG("conn %p", conn);
  1614. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1615. if (work) {
  1616. INIT_WORK((struct work_struct *) work, create_physical_worker);
  1617. work->conn = conn;
  1618. work->sk = sk;
  1619. sock_hold(sk);
  1620. if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
  1621. sock_put(sk);
  1622. kfree(work);
  1623. }
  1624. }
  1625. }
  1626. void amp_accept_physical(struct l2cap_conn *conn, u8 id, struct sock *sk)
  1627. {
  1628. struct amp_work_accept_physical *work;
  1629. BT_DBG("conn %p", conn);
  1630. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1631. if (work) {
  1632. INIT_WORK((struct work_struct *) work, accept_physical_worker);
  1633. work->conn = conn;
  1634. work->sk = sk;
  1635. work->id = id;
  1636. sock_hold(sk);
  1637. if (!queue_work(amp_workqueue, (struct work_struct *) work)) {
  1638. sock_put(sk);
  1639. kfree(work);
  1640. }
  1641. }
  1642. }
  1643. /* HCI interface */
  1644. static void amp_cmd_cmplt_worker(struct work_struct *w)
  1645. {
  1646. struct amp_work_cmd_cmplt *work = (struct amp_work_cmd_cmplt *) w;
  1647. struct hci_dev *hdev = work->hdev;
  1648. u16 opcode = work->opcode;
  1649. struct sk_buff *skb = work->skb;
  1650. struct amp_ctx *ctx;
  1651. ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_CMPLT, opcode);
  1652. if (ctx)
  1653. execute_ctx(ctx, AMP_HCI_CMD_CMPLT, skb);
  1654. kfree_skb(skb);
  1655. kfree(w);
  1656. }
  1657. static void amp_cmd_cmplt_evt(struct hci_dev *hdev, u16 opcode,
  1658. struct sk_buff *skb)
  1659. {
  1660. struct amp_work_cmd_cmplt *work;
  1661. struct sk_buff *skbc;
  1662. BT_DBG("hdev %p opcode 0x%x skb %p len %d",
  1663. hdev, opcode, skb, skb->len);
  1664. skbc = skb_clone(skb, GFP_ATOMIC);
  1665. if (!skbc)
  1666. return;
  1667. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1668. if (work) {
  1669. INIT_WORK((struct work_struct *) work, amp_cmd_cmplt_worker);
  1670. work->hdev = hdev;
  1671. work->opcode = opcode;
  1672. work->skb = skbc;
  1673. if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
  1674. kfree(work);
  1675. }
  1676. }
  1677. static void amp_cmd_status_worker(struct work_struct *w)
  1678. {
  1679. struct amp_work_cmd_status *work = (struct amp_work_cmd_status *) w;
  1680. struct hci_dev *hdev = work->hdev;
  1681. u16 opcode = work->opcode;
  1682. u8 status = work->status;
  1683. struct amp_ctx *ctx;
  1684. ctx = get_ctx_hdev(hdev, AMP_HCI_CMD_STATUS, opcode);
  1685. if (ctx)
  1686. execute_ctx(ctx, AMP_HCI_CMD_STATUS, &status);
  1687. kfree(w);
  1688. }
  1689. static void amp_cmd_status_evt(struct hci_dev *hdev, u16 opcode, u8 status)
  1690. {
  1691. struct amp_work_cmd_status *work;
  1692. BT_DBG("hdev %p opcode 0x%x status %d", hdev, opcode, status);
  1693. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1694. if (work) {
  1695. INIT_WORK((struct work_struct *) work, amp_cmd_status_worker);
  1696. work->hdev = hdev;
  1697. work->opcode = opcode;
  1698. work->status = status;
  1699. if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
  1700. kfree(work);
  1701. }
  1702. }
  1703. static void amp_event_worker(struct work_struct *w)
  1704. {
  1705. struct amp_work_event *work = (struct amp_work_event *) w;
  1706. struct hci_dev *hdev = work->hdev;
  1707. u8 event = work->event;
  1708. struct sk_buff *skb = work->skb;
  1709. struct amp_ctx *ctx;
  1710. if (event == HCI_EV_AMP_STATUS_CHANGE) {
  1711. struct hci_ev_amp_status_change *ev;
  1712. if (skb->len < sizeof(*ev))
  1713. goto amp_event_finished;
  1714. ev = (void *) skb->data;
  1715. if (ev->status != 0)
  1716. goto amp_event_finished;
  1717. if (ev->amp_status == hdev->amp_status)
  1718. goto amp_event_finished;
  1719. hdev->amp_status = ev->amp_status;
  1720. send_a2mp_change_notify();
  1721. goto amp_event_finished;
  1722. }
  1723. ctx = get_ctx_hdev(hdev, AMP_HCI_EVENT, (u16) event);
  1724. if (ctx)
  1725. execute_ctx(ctx, AMP_HCI_EVENT, skb);
  1726. amp_event_finished:
  1727. kfree_skb(skb);
  1728. kfree(w);
  1729. }
  1730. static void amp_evt(struct hci_dev *hdev, u8 event, struct sk_buff *skb)
  1731. {
  1732. struct amp_work_event *work;
  1733. struct sk_buff *skbc;
  1734. BT_DBG("hdev %p event 0x%x skb %p", hdev, event, skb);
  1735. skbc = skb_clone(skb, GFP_ATOMIC);
  1736. if (!skbc)
  1737. return;
  1738. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1739. if (work) {
  1740. INIT_WORK((struct work_struct *) work, amp_event_worker);
  1741. work->hdev = hdev;
  1742. work->event = event;
  1743. work->skb = skbc;
  1744. if (queue_work(amp_workqueue, (struct work_struct *) work) == 0)
  1745. kfree(work);
  1746. }
  1747. }
  1748. static void amp_dev_event_worker(struct work_struct *w)
  1749. {
  1750. send_a2mp_change_notify();
  1751. kfree(w);
  1752. }
  1753. static int amp_dev_event(struct notifier_block *this, unsigned long event,
  1754. void *ptr)
  1755. {
  1756. struct hci_dev *hdev = (struct hci_dev *) ptr;
  1757. struct amp_work_event *work;
  1758. if (hdev->amp_type == HCI_BREDR)
  1759. return NOTIFY_DONE;
  1760. switch (event) {
  1761. case HCI_DEV_UNREG:
  1762. case HCI_DEV_REG:
  1763. case HCI_DEV_UP:
  1764. case HCI_DEV_DOWN:
  1765. BT_DBG("hdev %p event %ld", hdev, event);
  1766. work = kmalloc(sizeof(*work), GFP_ATOMIC);
  1767. if (work) {
  1768. INIT_WORK((struct work_struct *) work,
  1769. amp_dev_event_worker);
  1770. if (queue_work(amp_workqueue,
  1771. (struct work_struct *) work) == 0)
  1772. kfree(work);
  1773. }
  1774. }
  1775. return NOTIFY_DONE;
  1776. }
  1777. /* L2CAP module init continued */
  1778. static struct notifier_block amp_notifier = {
  1779. .notifier_call = amp_dev_event
  1780. };
  1781. static struct amp_mgr_cb hci_amp = {
  1782. .amp_cmd_complete_event = amp_cmd_cmplt_evt,
  1783. .amp_cmd_status_event = amp_cmd_status_evt,
  1784. .amp_event = amp_evt
  1785. };
  1786. int amp_init(void)
  1787. {
  1788. hci_register_amp(&hci_amp);
  1789. hci_register_notifier(&amp_notifier);
  1790. amp_next_handle = 1;
  1791. amp_workqueue = create_singlethread_workqueue("a2mp");
  1792. if (!amp_workqueue)
  1793. return -EPERM;
  1794. return 0;
  1795. }
  1796. void amp_exit(void)
  1797. {
  1798. hci_unregister_amp(&hci_amp);
  1799. hci_unregister_notifier(&amp_notifier);
  1800. flush_workqueue(amp_workqueue);
  1801. destroy_workqueue(amp_workqueue);
  1802. }