trusty-ipc.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869
  1. /*
  2. * Copyright (C) 2015 Google, Inc.
  3. *
  4. * This software is licensed under the terms of the GNU General Public
  5. * License version 2, as published by the Free Software Foundation, and
  6. * may be copied, distributed, and modified under those terms.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. *
  13. */
  14. #include <linux/aio.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/cdev.h>
  18. #include <linux/slab.h>
  19. #include <linux/fs.h>
  20. #include <linux/poll.h>
  21. #include <linux/idr.h>
  22. #include <linux/completion.h>
  23. #include <linux/sched.h>
  24. #include <linux/sched/signal.h>
  25. #include <linux/compat.h>
  26. #include <linux/uio.h>
  27. #include <linux/virtio.h>
  28. #include <linux/virtio_ids.h>
  29. #include <linux/virtio_config.h>
  30. #include <linux/trusty/smcall.h>
  31. #include <linux/trusty/trusty.h>
  32. #include <linux/trusty/trusty_ipc.h>
  33. #define MAX_DEVICES 4
  34. #define REPLY_TIMEOUT 5000
  35. #define TXBUF_TIMEOUT 15000
  36. #define MAX_SRV_NAME_LEN 256
  37. #define MAX_DEV_NAME_LEN 32
  38. #define DEFAULT_MSG_BUF_SIZE PAGE_SIZE
  39. #define DEFAULT_MSG_BUF_ALIGN PAGE_SIZE
  40. #define TIPC_CTRL_ADDR 53
  41. #define TIPC_ANY_ADDR 0xFFFFFFFF
  42. #define TIPC_MIN_LOCAL_ADDR 1024
  43. #define TIPC_IOC_MAGIC 'r'
  44. #define TIPC_IOC_CONNECT _IOW(TIPC_IOC_MAGIC, 0x80, char *)
  45. #if defined(CONFIG_COMPAT)
  46. #define TIPC_IOC_CONNECT_COMPAT _IOW(TIPC_IOC_MAGIC, 0x80, \
  47. compat_uptr_t)
  48. #endif
  49. struct tipc_virtio_dev;
  50. struct tipc_dev_config {
  51. u32 msg_buf_max_size;
  52. u32 msg_buf_alignment;
  53. char dev_name[MAX_DEV_NAME_LEN];
  54. } __packed;
  55. struct tipc_msg_hdr {
  56. u32 src;
  57. u32 dst;
  58. u32 reserved;
  59. u16 len;
  60. u16 flags;
  61. u8 data[0];
  62. } __packed;
  63. enum tipc_ctrl_msg_types {
  64. TIPC_CTRL_MSGTYPE_GO_ONLINE = 1,
  65. TIPC_CTRL_MSGTYPE_GO_OFFLINE,
  66. TIPC_CTRL_MSGTYPE_CONN_REQ,
  67. TIPC_CTRL_MSGTYPE_CONN_RSP,
  68. TIPC_CTRL_MSGTYPE_DISC_REQ,
  69. };
  70. struct tipc_ctrl_msg {
  71. u32 type;
  72. u32 body_len;
  73. u8 body[0];
  74. } __packed;
  75. struct tipc_conn_req_body {
  76. char name[MAX_SRV_NAME_LEN];
  77. } __packed;
  78. struct tipc_conn_rsp_body {
  79. u32 target;
  80. u32 status;
  81. u32 remote;
  82. u32 max_msg_size;
  83. u32 max_msg_cnt;
  84. } __packed;
  85. struct tipc_disc_req_body {
  86. u32 target;
  87. } __packed;
  88. struct tipc_cdev_node {
  89. struct cdev cdev;
  90. struct device *dev;
  91. unsigned int minor;
  92. };
  93. enum tipc_device_state {
  94. VDS_OFFLINE = 0,
  95. VDS_ONLINE,
  96. VDS_DEAD,
  97. };
  98. struct tipc_virtio_dev {
  99. struct kref refcount;
  100. struct mutex lock; /* protects access to this device */
  101. struct virtio_device *vdev;
  102. struct virtqueue *rxvq;
  103. struct virtqueue *txvq;
  104. uint msg_buf_cnt;
  105. uint msg_buf_max_cnt;
  106. size_t msg_buf_max_sz;
  107. uint free_msg_buf_cnt;
  108. struct list_head free_buf_list;
  109. wait_queue_head_t sendq;
  110. struct idr addr_idr;
  111. enum tipc_device_state state;
  112. struct tipc_cdev_node cdev_node;
  113. char cdev_name[MAX_DEV_NAME_LEN];
  114. };
  115. enum tipc_chan_state {
  116. TIPC_DISCONNECTED = 0,
  117. TIPC_CONNECTING,
  118. TIPC_CONNECTED,
  119. TIPC_STALE,
  120. };
  121. struct tipc_chan {
  122. struct mutex lock; /* protects channel state */
  123. struct kref refcount;
  124. enum tipc_chan_state state;
  125. struct tipc_virtio_dev *vds;
  126. const struct tipc_chan_ops *ops;
  127. void *ops_arg;
  128. u32 remote;
  129. u32 local;
  130. u32 max_msg_size;
  131. u32 max_msg_cnt;
  132. char srv_name[MAX_SRV_NAME_LEN];
  133. };
  134. static struct class *tipc_class;
  135. static unsigned int tipc_major;
  136. struct virtio_device *default_vdev;
  137. static DEFINE_IDR(tipc_devices);
  138. static DEFINE_MUTEX(tipc_devices_lock);
  139. static int _match_any(int id, void *p, void *data)
  140. {
  141. return id;
  142. }
  143. static int _match_data(int id, void *p, void *data)
  144. {
  145. return (p == data);
  146. }
  147. static void *_alloc_shareable_mem(size_t sz, phys_addr_t *ppa, gfp_t gfp)
  148. {
  149. return alloc_pages_exact(sz, gfp);
  150. }
  151. static void _free_shareable_mem(size_t sz, void *va, phys_addr_t pa)
  152. {
  153. free_pages_exact(va, sz);
  154. }
  155. static struct tipc_msg_buf *_alloc_msg_buf(size_t sz)
  156. {
  157. struct tipc_msg_buf *mb;
  158. /* allocate tracking structure */
  159. mb = kzalloc(sizeof(struct tipc_msg_buf), GFP_KERNEL);
  160. if (!mb)
  161. return NULL;
  162. /* allocate buffer that can be shared with secure world */
  163. mb->buf_va = _alloc_shareable_mem(sz, &mb->buf_pa, GFP_KERNEL);
  164. if (!mb->buf_va)
  165. goto err_alloc;
  166. mb->buf_sz = sz;
  167. return mb;
  168. err_alloc:
  169. kfree(mb);
  170. return NULL;
  171. }
  172. static void _free_msg_buf(struct tipc_msg_buf *mb)
  173. {
  174. _free_shareable_mem(mb->buf_sz, mb->buf_va, mb->buf_pa);
  175. kfree(mb);
  176. }
  177. static void _free_msg_buf_list(struct list_head *list)
  178. {
  179. struct tipc_msg_buf *mb = NULL;
  180. mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
  181. while (mb) {
  182. list_del(&mb->node);
  183. _free_msg_buf(mb);
  184. mb = list_first_entry_or_null(list, struct tipc_msg_buf, node);
  185. }
  186. }
  187. static inline void mb_reset(struct tipc_msg_buf *mb)
  188. {
  189. mb->wpos = 0;
  190. mb->rpos = 0;
  191. }
  192. static void _free_vds(struct kref *kref)
  193. {
  194. struct tipc_virtio_dev *vds =
  195. container_of(kref, struct tipc_virtio_dev, refcount);
  196. kfree(vds);
  197. }
  198. static void _free_chan(struct kref *kref)
  199. {
  200. struct tipc_chan *ch = container_of(kref, struct tipc_chan, refcount);
  201. if (ch->ops && ch->ops->handle_release)
  202. ch->ops->handle_release(ch->ops_arg);
  203. kref_put(&ch->vds->refcount, _free_vds);
  204. kfree(ch);
  205. }
  206. static struct tipc_msg_buf *vds_alloc_msg_buf(struct tipc_virtio_dev *vds)
  207. {
  208. return _alloc_msg_buf(vds->msg_buf_max_sz);
  209. }
  210. static void vds_free_msg_buf(struct tipc_virtio_dev *vds,
  211. struct tipc_msg_buf *mb)
  212. {
  213. _free_msg_buf(mb);
  214. }
  215. static bool _put_txbuf_locked(struct tipc_virtio_dev *vds,
  216. struct tipc_msg_buf *mb)
  217. {
  218. list_add_tail(&mb->node, &vds->free_buf_list);
  219. return vds->free_msg_buf_cnt++ == 0;
  220. }
  221. static struct tipc_msg_buf *_get_txbuf_locked(struct tipc_virtio_dev *vds)
  222. {
  223. struct tipc_msg_buf *mb;
  224. if (vds->state != VDS_ONLINE)
  225. return ERR_PTR(-ENODEV);
  226. if (vds->free_msg_buf_cnt) {
  227. /* take it out of free list */
  228. mb = list_first_entry(&vds->free_buf_list,
  229. struct tipc_msg_buf, node);
  230. list_del(&mb->node);
  231. vds->free_msg_buf_cnt--;
  232. } else {
  233. if (vds->msg_buf_cnt >= vds->msg_buf_max_cnt)
  234. return ERR_PTR(-EAGAIN);
  235. /* try to allocate it */
  236. mb = _alloc_msg_buf(vds->msg_buf_max_sz);
  237. if (!mb)
  238. return ERR_PTR(-ENOMEM);
  239. vds->msg_buf_cnt++;
  240. }
  241. return mb;
  242. }
  243. static struct tipc_msg_buf *_vds_get_txbuf(struct tipc_virtio_dev *vds)
  244. {
  245. struct tipc_msg_buf *mb;
  246. mutex_lock(&vds->lock);
  247. mb = _get_txbuf_locked(vds);
  248. mutex_unlock(&vds->lock);
  249. return mb;
  250. }
  251. static void vds_put_txbuf(struct tipc_virtio_dev *vds, struct tipc_msg_buf *mb)
  252. {
  253. mutex_lock(&vds->lock);
  254. _put_txbuf_locked(vds, mb);
  255. wake_up_interruptible(&vds->sendq);
  256. mutex_unlock(&vds->lock);
  257. }
  258. static struct tipc_msg_buf *vds_get_txbuf(struct tipc_virtio_dev *vds,
  259. long timeout)
  260. {
  261. struct tipc_msg_buf *mb;
  262. mb = _vds_get_txbuf(vds);
  263. if ((PTR_ERR(mb) == -EAGAIN) && timeout) {
  264. DEFINE_WAIT_FUNC(wait, woken_wake_function);
  265. timeout = msecs_to_jiffies(timeout);
  266. add_wait_queue(&vds->sendq, &wait);
  267. for (;;) {
  268. timeout = wait_woken(&wait, TASK_INTERRUPTIBLE,
  269. timeout);
  270. if (!timeout) {
  271. mb = ERR_PTR(-ETIMEDOUT);
  272. break;
  273. }
  274. if (signal_pending(current)) {
  275. mb = ERR_PTR(-ERESTARTSYS);
  276. break;
  277. }
  278. mb = _vds_get_txbuf(vds);
  279. if (PTR_ERR(mb) != -EAGAIN)
  280. break;
  281. }
  282. remove_wait_queue(&vds->sendq, &wait);
  283. }
  284. if (IS_ERR(mb))
  285. return mb;
  286. BUG_ON(!mb);
  287. /* reset and reserve space for message header */
  288. mb_reset(mb);
  289. mb_put_data(mb, sizeof(struct tipc_msg_hdr));
  290. return mb;
  291. }
  292. static int vds_queue_txbuf(struct tipc_virtio_dev *vds,
  293. struct tipc_msg_buf *mb)
  294. {
  295. int err;
  296. struct scatterlist sg;
  297. bool need_notify = false;
  298. mutex_lock(&vds->lock);
  299. if (vds->state == VDS_ONLINE) {
  300. sg_init_one(&sg, mb->buf_va, mb->wpos);
  301. err = virtqueue_add_outbuf(vds->txvq, &sg, 1, mb, GFP_KERNEL);
  302. need_notify = virtqueue_kick_prepare(vds->txvq);
  303. } else {
  304. err = -ENODEV;
  305. }
  306. mutex_unlock(&vds->lock);
  307. if (need_notify)
  308. virtqueue_notify(vds->txvq);
  309. return err;
  310. }
  311. static int vds_add_channel(struct tipc_virtio_dev *vds,
  312. struct tipc_chan *chan)
  313. {
  314. int ret;
  315. mutex_lock(&vds->lock);
  316. if (vds->state == VDS_ONLINE) {
  317. ret = idr_alloc(&vds->addr_idr, chan,
  318. TIPC_MIN_LOCAL_ADDR, TIPC_ANY_ADDR - 1,
  319. GFP_KERNEL);
  320. if (ret > 0) {
  321. chan->local = ret;
  322. kref_get(&chan->refcount);
  323. ret = 0;
  324. }
  325. } else {
  326. ret = -EINVAL;
  327. }
  328. mutex_unlock(&vds->lock);
  329. return ret;
  330. }
  331. static void vds_del_channel(struct tipc_virtio_dev *vds,
  332. struct tipc_chan *chan)
  333. {
  334. mutex_lock(&vds->lock);
  335. if (chan->local) {
  336. idr_remove(&vds->addr_idr, chan->local);
  337. chan->local = 0;
  338. chan->remote = 0;
  339. kref_put(&chan->refcount, _free_chan);
  340. }
  341. mutex_unlock(&vds->lock);
  342. }
  343. static struct tipc_chan *vds_lookup_channel(struct tipc_virtio_dev *vds,
  344. u32 addr)
  345. {
  346. int id;
  347. struct tipc_chan *chan = NULL;
  348. mutex_lock(&vds->lock);
  349. if (addr == TIPC_ANY_ADDR) {
  350. id = idr_for_each(&vds->addr_idr, _match_any, NULL);
  351. if (id > 0)
  352. chan = idr_find(&vds->addr_idr, id);
  353. } else {
  354. chan = idr_find(&vds->addr_idr, addr);
  355. }
  356. if (chan)
  357. kref_get(&chan->refcount);
  358. mutex_unlock(&vds->lock);
  359. return chan;
  360. }
  361. static struct tipc_chan *vds_create_channel(struct tipc_virtio_dev *vds,
  362. const struct tipc_chan_ops *ops,
  363. void *ops_arg)
  364. {
  365. int ret;
  366. struct tipc_chan *chan = NULL;
  367. if (!vds)
  368. return ERR_PTR(-ENOENT);
  369. if (!ops)
  370. return ERR_PTR(-EINVAL);
  371. chan = kzalloc(sizeof(*chan), GFP_KERNEL);
  372. if (!chan)
  373. return ERR_PTR(-ENOMEM);
  374. kref_get(&vds->refcount);
  375. chan->vds = vds;
  376. chan->ops = ops;
  377. chan->ops_arg = ops_arg;
  378. mutex_init(&chan->lock);
  379. kref_init(&chan->refcount);
  380. chan->state = TIPC_DISCONNECTED;
  381. ret = vds_add_channel(vds, chan);
  382. if (ret) {
  383. kfree(chan);
  384. kref_put(&vds->refcount, _free_vds);
  385. return ERR_PTR(ret);
  386. }
  387. return chan;
  388. }
  389. static void fill_msg_hdr(struct tipc_msg_buf *mb, u32 src, u32 dst)
  390. {
  391. struct tipc_msg_hdr *hdr = mb_get_data(mb, sizeof(*hdr));
  392. hdr->src = src;
  393. hdr->dst = dst;
  394. hdr->len = mb_avail_data(mb);
  395. hdr->flags = 0;
  396. hdr->reserved = 0;
  397. }
  398. /*****************************************************************************/
  399. struct tipc_chan *tipc_create_channel(struct device *dev,
  400. const struct tipc_chan_ops *ops,
  401. void *ops_arg)
  402. {
  403. struct virtio_device *vd;
  404. struct tipc_chan *chan;
  405. struct tipc_virtio_dev *vds;
  406. mutex_lock(&tipc_devices_lock);
  407. if (dev) {
  408. vd = container_of(dev, struct virtio_device, dev);
  409. } else {
  410. vd = default_vdev;
  411. if (!vd) {
  412. mutex_unlock(&tipc_devices_lock);
  413. return ERR_PTR(-ENOENT);
  414. }
  415. }
  416. vds = vd->priv;
  417. kref_get(&vds->refcount);
  418. mutex_unlock(&tipc_devices_lock);
  419. chan = vds_create_channel(vds, ops, ops_arg);
  420. kref_put(&vds->refcount, _free_vds);
  421. return chan;
  422. }
  423. EXPORT_SYMBOL(tipc_create_channel);
  424. struct tipc_msg_buf *tipc_chan_get_rxbuf(struct tipc_chan *chan)
  425. {
  426. return vds_alloc_msg_buf(chan->vds);
  427. }
  428. EXPORT_SYMBOL(tipc_chan_get_rxbuf);
  429. void tipc_chan_put_rxbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
  430. {
  431. vds_free_msg_buf(chan->vds, mb);
  432. }
  433. EXPORT_SYMBOL(tipc_chan_put_rxbuf);
  434. struct tipc_msg_buf *tipc_chan_get_txbuf_timeout(struct tipc_chan *chan,
  435. long timeout)
  436. {
  437. return vds_get_txbuf(chan->vds, timeout);
  438. }
  439. EXPORT_SYMBOL(tipc_chan_get_txbuf_timeout);
  440. void tipc_chan_put_txbuf(struct tipc_chan *chan, struct tipc_msg_buf *mb)
  441. {
  442. vds_put_txbuf(chan->vds, mb);
  443. }
  444. EXPORT_SYMBOL(tipc_chan_put_txbuf);
  445. int tipc_chan_queue_msg(struct tipc_chan *chan, struct tipc_msg_buf *mb)
  446. {
  447. int err;
  448. mutex_lock(&chan->lock);
  449. switch (chan->state) {
  450. case TIPC_CONNECTED:
  451. fill_msg_hdr(mb, chan->local, chan->remote);
  452. err = vds_queue_txbuf(chan->vds, mb);
  453. if (err) {
  454. /* this should never happen */
  455. pr_err("%s: failed to queue tx buffer (%d)\n",
  456. __func__, err);
  457. }
  458. break;
  459. case TIPC_DISCONNECTED:
  460. case TIPC_CONNECTING:
  461. err = -ENOTCONN;
  462. break;
  463. case TIPC_STALE:
  464. err = -ESHUTDOWN;
  465. break;
  466. default:
  467. err = -EBADFD;
  468. pr_err("%s: unexpected channel state %d\n",
  469. __func__, chan->state);
  470. }
  471. mutex_unlock(&chan->lock);
  472. return err;
  473. }
  474. EXPORT_SYMBOL(tipc_chan_queue_msg);
  475. int tipc_chan_connect(struct tipc_chan *chan, const char *name)
  476. {
  477. int err;
  478. struct tipc_ctrl_msg *msg;
  479. struct tipc_conn_req_body *body;
  480. struct tipc_msg_buf *txbuf;
  481. txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
  482. if (IS_ERR(txbuf))
  483. return PTR_ERR(txbuf);
  484. /* reserve space for connection request control message */
  485. msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
  486. body = (struct tipc_conn_req_body *)msg->body;
  487. /* fill message */
  488. msg->type = TIPC_CTRL_MSGTYPE_CONN_REQ;
  489. msg->body_len = sizeof(*body);
  490. strncpy(body->name, name, sizeof(body->name));
  491. body->name[sizeof(body->name)-1] = '\0';
  492. mutex_lock(&chan->lock);
  493. switch (chan->state) {
  494. case TIPC_DISCONNECTED:
  495. /* save service name we are connecting to */
  496. strcpy(chan->srv_name, body->name);
  497. fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
  498. err = vds_queue_txbuf(chan->vds, txbuf);
  499. if (err) {
  500. /* this should never happen */
  501. pr_err("%s: failed to queue tx buffer (%d)\n",
  502. __func__, err);
  503. } else {
  504. chan->state = TIPC_CONNECTING;
  505. txbuf = NULL; /* prevents discarding buffer */
  506. }
  507. break;
  508. case TIPC_CONNECTED:
  509. case TIPC_CONNECTING:
  510. /* check if we are trying to connect to the same service */
  511. if (strcmp(chan->srv_name, body->name) == 0)
  512. err = 0;
  513. else
  514. if (chan->state == TIPC_CONNECTING)
  515. err = -EALREADY; /* in progress */
  516. else
  517. err = -EISCONN; /* already connected */
  518. break;
  519. case TIPC_STALE:
  520. err = -ESHUTDOWN;
  521. break;
  522. default:
  523. err = -EBADFD;
  524. pr_err("%s: unexpected channel state %d\n",
  525. __func__, chan->state);
  526. break;
  527. }
  528. mutex_unlock(&chan->lock);
  529. if (txbuf)
  530. tipc_chan_put_txbuf(chan, txbuf); /* discard it */
  531. return err;
  532. }
  533. EXPORT_SYMBOL(tipc_chan_connect);
  534. int tipc_chan_shutdown(struct tipc_chan *chan)
  535. {
  536. int err;
  537. struct tipc_ctrl_msg *msg;
  538. struct tipc_disc_req_body *body;
  539. struct tipc_msg_buf *txbuf = NULL;
  540. /* get tx buffer */
  541. txbuf = vds_get_txbuf(chan->vds, TXBUF_TIMEOUT);
  542. if (IS_ERR(txbuf))
  543. return PTR_ERR(txbuf);
  544. mutex_lock(&chan->lock);
  545. if (chan->state == TIPC_CONNECTED || chan->state == TIPC_CONNECTING) {
  546. /* reserve space for disconnect request control message */
  547. msg = mb_put_data(txbuf, sizeof(*msg) + sizeof(*body));
  548. body = (struct tipc_disc_req_body *)msg->body;
  549. msg->type = TIPC_CTRL_MSGTYPE_DISC_REQ;
  550. msg->body_len = sizeof(*body);
  551. body->target = chan->remote;
  552. fill_msg_hdr(txbuf, chan->local, TIPC_CTRL_ADDR);
  553. err = vds_queue_txbuf(chan->vds, txbuf);
  554. if (err) {
  555. /* this should never happen */
  556. pr_err("%s: failed to queue tx buffer (%d)\n",
  557. __func__, err);
  558. }
  559. } else {
  560. err = -ENOTCONN;
  561. }
  562. chan->state = TIPC_STALE;
  563. mutex_unlock(&chan->lock);
  564. if (err) {
  565. /* release buffer */
  566. tipc_chan_put_txbuf(chan, txbuf);
  567. }
  568. return err;
  569. }
  570. EXPORT_SYMBOL(tipc_chan_shutdown);
  571. void tipc_chan_destroy(struct tipc_chan *chan)
  572. {
  573. vds_del_channel(chan->vds, chan);
  574. kref_put(&chan->refcount, _free_chan);
  575. }
  576. EXPORT_SYMBOL(tipc_chan_destroy);
  577. static int dn_wait_for_reply(struct tipc_dn_chan *dn, int timeout)
  578. {
  579. int ret;
  580. ret = wait_for_completion_interruptible_timeout(&dn->reply_comp,
  581. msecs_to_jiffies(timeout));
  582. if (ret < 0)
  583. return ret;
  584. mutex_lock(&dn->lock);
  585. if (!ret) {
  586. /* no reply from remote */
  587. dn->state = TIPC_STALE;
  588. ret = -ETIMEDOUT;
  589. } else {
  590. /* got reply */
  591. if (dn->state == TIPC_CONNECTED)
  592. ret = 0;
  593. else if (dn->state == TIPC_DISCONNECTED)
  594. if (!list_empty(&dn->rx_msg_queue))
  595. ret = 0;
  596. else
  597. ret = -ENOTCONN;
  598. else
  599. ret = -EIO;
  600. }
  601. mutex_unlock(&dn->lock);
  602. return ret;
  603. }
  604. struct tipc_msg_buf *dn_handle_msg(void *data, struct tipc_msg_buf *rxbuf)
  605. {
  606. struct tipc_dn_chan *dn = data;
  607. struct tipc_msg_buf *newbuf = rxbuf;
  608. mutex_lock(&dn->lock);
  609. if (dn->state == TIPC_CONNECTED) {
  610. /* get new buffer */
  611. newbuf = tipc_chan_get_rxbuf(dn->chan);
  612. if (newbuf) {
  613. /* queue an old buffer and return a new one */
  614. list_add_tail(&rxbuf->node, &dn->rx_msg_queue);
  615. wake_up_interruptible(&dn->readq);
  616. } else {
  617. /*
  618. * return an old buffer effectively discarding
  619. * incoming message
  620. */
  621. pr_err("%s: discard incoming message\n", __func__);
  622. newbuf = rxbuf;
  623. }
  624. }
  625. mutex_unlock(&dn->lock);
  626. return newbuf;
  627. }
  628. static void dn_connected(struct tipc_dn_chan *dn)
  629. {
  630. mutex_lock(&dn->lock);
  631. dn->state = TIPC_CONNECTED;
  632. /* complete all pending */
  633. complete(&dn->reply_comp);
  634. mutex_unlock(&dn->lock);
  635. }
  636. static void dn_disconnected(struct tipc_dn_chan *dn)
  637. {
  638. mutex_lock(&dn->lock);
  639. dn->state = TIPC_DISCONNECTED;
  640. /* complete all pending */
  641. complete(&dn->reply_comp);
  642. /* wakeup all readers */
  643. wake_up_interruptible_all(&dn->readq);
  644. mutex_unlock(&dn->lock);
  645. }
  646. static void dn_shutdown(struct tipc_dn_chan *dn)
  647. {
  648. mutex_lock(&dn->lock);
  649. /* set state to STALE */
  650. dn->state = TIPC_STALE;
  651. /* complete all pending */
  652. complete(&dn->reply_comp);
  653. /* wakeup all readers */
  654. wake_up_interruptible_all(&dn->readq);
  655. mutex_unlock(&dn->lock);
  656. }
  657. static void dn_handle_event(void *data, int event)
  658. {
  659. struct tipc_dn_chan *dn = data;
  660. switch (event) {
  661. case TIPC_CHANNEL_SHUTDOWN:
  662. dn_shutdown(dn);
  663. break;
  664. case TIPC_CHANNEL_DISCONNECTED:
  665. dn_disconnected(dn);
  666. break;
  667. case TIPC_CHANNEL_CONNECTED:
  668. dn_connected(dn);
  669. break;
  670. default:
  671. pr_err("%s: unhandled event %d\n", __func__, event);
  672. break;
  673. }
  674. }
  675. static void dn_handle_release(void *data)
  676. {
  677. kfree(data);
  678. }
  679. static struct tipc_chan_ops _dn_ops = {
  680. .handle_msg = dn_handle_msg,
  681. .handle_event = dn_handle_event,
  682. .handle_release = dn_handle_release,
  683. };
  684. #define cdev_to_cdn(c) container_of((c), struct tipc_cdev_node, cdev)
  685. #define cdn_to_vds(cdn) container_of((cdn), struct tipc_virtio_dev, cdev_node)
  686. static struct tipc_virtio_dev *_dn_lookup_vds(struct tipc_cdev_node *cdn)
  687. {
  688. int ret;
  689. struct tipc_virtio_dev *vds = NULL;
  690. mutex_lock(&tipc_devices_lock);
  691. ret = idr_for_each(&tipc_devices, _match_data, cdn);
  692. if (ret) {
  693. vds = cdn_to_vds(cdn);
  694. kref_get(&vds->refcount);
  695. }
  696. mutex_unlock(&tipc_devices_lock);
  697. return vds;
  698. }
  699. static int tipc_open(struct inode *inode, struct file *filp)
  700. {
  701. int ret;
  702. struct tipc_virtio_dev *vds;
  703. struct tipc_dn_chan *dn;
  704. struct tipc_cdev_node *cdn = cdev_to_cdn(inode->i_cdev);
  705. vds = _dn_lookup_vds(cdn);
  706. if (!vds) {
  707. ret = -ENOENT;
  708. goto err_vds_lookup;
  709. }
  710. dn = kzalloc(sizeof(*dn), GFP_KERNEL);
  711. if (!dn) {
  712. ret = -ENOMEM;
  713. goto err_alloc_chan;
  714. }
  715. mutex_init(&dn->lock);
  716. init_waitqueue_head(&dn->readq);
  717. init_completion(&dn->reply_comp);
  718. INIT_LIST_HEAD(&dn->rx_msg_queue);
  719. dn->state = TIPC_DISCONNECTED;
  720. dn->chan = vds_create_channel(vds, &_dn_ops, dn);
  721. if (IS_ERR(dn->chan)) {
  722. ret = PTR_ERR(dn->chan);
  723. goto err_create_chan;
  724. }
  725. filp->private_data = dn;
  726. kref_put(&vds->refcount, _free_vds);
  727. return 0;
  728. err_create_chan:
  729. kfree(dn);
  730. err_alloc_chan:
  731. kref_put(&vds->refcount, _free_vds);
  732. err_vds_lookup:
  733. return ret;
  734. }
  735. static int dn_connect_ioctl(struct tipc_dn_chan *dn, char __user *usr_name)
  736. {
  737. int err;
  738. char name[MAX_SRV_NAME_LEN];
  739. /* copy in service name from user space */
  740. err = strncpy_from_user(name, usr_name, sizeof(name));
  741. if (err < 0) {
  742. pr_err("%s: copy_from_user (%p) failed (%d)\n",
  743. __func__, usr_name, err);
  744. return err;
  745. }
  746. name[sizeof(name)-1] = '\0';
  747. /* send connect request */
  748. err = tipc_chan_connect(dn->chan, name);
  749. if (err)
  750. return err;
  751. /* and wait for reply */
  752. return dn_wait_for_reply(dn, REPLY_TIMEOUT);
  753. }
  754. static long tipc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  755. {
  756. int ret;
  757. struct tipc_dn_chan *dn = filp->private_data;
  758. if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC)
  759. return -EINVAL;
  760. switch (cmd) {
  761. case TIPC_IOC_CONNECT:
  762. ret = dn_connect_ioctl(dn, (char __user *)arg);
  763. if (ret) {
  764. pr_err("%s: TIPC_IOC_CONNECT error (%d)!\n",
  765. __func__, ret);
  766. trusty_fast_call32(
  767. dn->chan->vds->vdev->dev.parent->parent,
  768. MT_SMC_FC_THREADS, 0, 0, 0);
  769. trusty_std_call32(
  770. dn->chan->vds->vdev->dev.parent->parent,
  771. SMC_SC_NOP, 0, 0, 0);
  772. }
  773. break;
  774. default:
  775. pr_warn("%s: Unhandled ioctl cmd: 0x%x\n",
  776. __func__, cmd);
  777. ret = -EINVAL;
  778. }
  779. return ret;
  780. }
  781. #if defined(CONFIG_COMPAT)
  782. static long tipc_compat_ioctl(struct file *filp,
  783. unsigned int cmd, unsigned long arg)
  784. {
  785. int ret;
  786. struct tipc_dn_chan *dn = filp->private_data;
  787. void __user *user_req = compat_ptr(arg);
  788. if (_IOC_TYPE(cmd) != TIPC_IOC_MAGIC)
  789. return -EINVAL;
  790. switch (cmd) {
  791. case TIPC_IOC_CONNECT_COMPAT:
  792. ret = dn_connect_ioctl(dn, user_req);
  793. break;
  794. default:
  795. pr_warn("%s: Unhandled ioctl cmd: 0x%x\n",
  796. __func__, cmd);
  797. ret = -EINVAL;
  798. }
  799. return ret;
  800. }
  801. #endif
  802. static inline bool _got_rx(struct tipc_dn_chan *dn)
  803. {
  804. if (dn->state != TIPC_CONNECTED)
  805. return true;
  806. if (!list_empty(&dn->rx_msg_queue))
  807. return true;
  808. return false;
  809. }
  810. static ssize_t tipc_read_iter(struct kiocb *iocb, struct iov_iter *iter)
  811. {
  812. ssize_t ret;
  813. size_t len;
  814. struct tipc_msg_buf *mb;
  815. struct file *filp = iocb->ki_filp;
  816. struct tipc_dn_chan *dn = filp->private_data;
  817. mutex_lock(&dn->lock);
  818. while (list_empty(&dn->rx_msg_queue)) {
  819. if (dn->state != TIPC_CONNECTED) {
  820. if (dn->state == TIPC_CONNECTING)
  821. ret = -ENOTCONN;
  822. else if (dn->state == TIPC_DISCONNECTED)
  823. ret = -ENOTCONN;
  824. else if (dn->state == TIPC_STALE)
  825. ret = -ESHUTDOWN;
  826. else
  827. ret = -EBADFD;
  828. goto out;
  829. }
  830. mutex_unlock(&dn->lock);
  831. if (filp->f_flags & O_NONBLOCK)
  832. return -EAGAIN;
  833. if (wait_event_interruptible(dn->readq, _got_rx(dn)))
  834. return -ERESTARTSYS;
  835. mutex_lock(&dn->lock);
  836. }
  837. mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
  838. len = mb_avail_data(mb);
  839. if (len > iov_iter_count(iter)) {
  840. ret = -EMSGSIZE;
  841. goto out;
  842. }
  843. if (copy_to_iter(mb_get_data(mb, len), len, iter) != len) {
  844. ret = -EFAULT;
  845. goto out;
  846. }
  847. ret = len;
  848. list_del(&mb->node);
  849. tipc_chan_put_rxbuf(dn->chan, mb);
  850. out:
  851. mutex_unlock(&dn->lock);
  852. return ret;
  853. }
  854. static ssize_t tipc_write_iter(struct kiocb *iocb, struct iov_iter *iter)
  855. {
  856. ssize_t ret;
  857. size_t len;
  858. long timeout = TXBUF_TIMEOUT;
  859. struct tipc_msg_buf *txbuf = NULL;
  860. struct file *filp = iocb->ki_filp;
  861. struct tipc_dn_chan *dn = filp->private_data;
  862. if (filp->f_flags & O_NONBLOCK)
  863. timeout = 0;
  864. txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
  865. if (IS_ERR(txbuf))
  866. return PTR_ERR(txbuf);
  867. /* message length */
  868. len = iov_iter_count(iter);
  869. /* check available space */
  870. if (len > mb_avail_space(txbuf)) {
  871. ret = -EMSGSIZE;
  872. goto err_out;
  873. }
  874. /* copy in message data */
  875. if (copy_from_iter(mb_put_data(txbuf, len), len, iter) != len) {
  876. ret = -EFAULT;
  877. goto err_out;
  878. }
  879. /* queue message */
  880. ret = tipc_chan_queue_msg(dn->chan, txbuf);
  881. if (ret)
  882. goto err_out;
  883. return len;
  884. err_out:
  885. tipc_chan_put_txbuf(dn->chan, txbuf);
  886. return ret;
  887. }
  888. static unsigned int tipc_poll(struct file *filp, poll_table *wait)
  889. {
  890. unsigned int mask = 0;
  891. struct tipc_dn_chan *dn = filp->private_data;
  892. mutex_lock(&dn->lock);
  893. poll_wait(filp, &dn->readq, wait);
  894. /* Writes always succeed for now */
  895. mask |= POLLOUT | POLLWRNORM;
  896. if (!list_empty(&dn->rx_msg_queue))
  897. mask |= POLLIN | POLLRDNORM;
  898. if (dn->state != TIPC_CONNECTED)
  899. mask |= POLLERR;
  900. mutex_unlock(&dn->lock);
  901. return mask;
  902. }
  903. static int tipc_release(struct inode *inode, struct file *filp)
  904. {
  905. struct tipc_dn_chan *dn = filp->private_data;
  906. dn_shutdown(dn);
  907. /* free all pending buffers */
  908. _free_msg_buf_list(&dn->rx_msg_queue);
  909. /* shutdown channel */
  910. tipc_chan_shutdown(dn->chan);
  911. /* and destroy it */
  912. tipc_chan_destroy(dn->chan);
  913. return 0;
  914. }
  915. static const struct file_operations tipc_fops = {
  916. .open = tipc_open,
  917. .release = tipc_release,
  918. .unlocked_ioctl = tipc_ioctl,
  919. #if defined(CONFIG_COMPAT)
  920. .compat_ioctl = tipc_compat_ioctl,
  921. #endif
  922. .read_iter = tipc_read_iter,
  923. .write_iter = tipc_write_iter,
  924. .poll = tipc_poll,
  925. .owner = THIS_MODULE,
  926. };
  927. /*****************************************************************************/
  928. #ifdef CONFIG_MTK_ENABLE_GENIEZONE
  929. static struct tipc_virtio_dev *_get_vds(struct tipc_cdev_node *cdn)
  930. {
  931. if (!cdn) {
  932. struct tipc_virtio_dev *vds;
  933. mutex_lock(&tipc_devices_lock);
  934. if (default_vdev) {
  935. vds = default_vdev->priv;
  936. kref_get(&vds->refcount);
  937. }
  938. mutex_unlock(&tipc_devices_lock);
  939. return vds;
  940. }
  941. return _dn_lookup_vds(cdn);
  942. }
  943. static int tipc_open_channel(struct tipc_cdev_node *cdn,
  944. struct tipc_dn_chan **o_dn)
  945. {
  946. int ret;
  947. struct tipc_virtio_dev *vds;
  948. struct tipc_dn_chan *dn;
  949. vds = _get_vds(cdn);
  950. if (!vds) {
  951. ret = -ENOENT;
  952. goto err_vds_lookup;
  953. }
  954. dn = kzalloc(sizeof(*dn), GFP_KERNEL);
  955. if (!dn) {
  956. ret = -ENOMEM;
  957. goto err_alloc_chan;
  958. }
  959. mutex_init(&dn->lock);
  960. init_waitqueue_head(&dn->readq);
  961. init_completion(&dn->reply_comp);
  962. INIT_LIST_HEAD(&dn->rx_msg_queue);
  963. dn->state = TIPC_DISCONNECTED;
  964. dn->chan = vds_create_channel(vds, &_dn_ops, dn);
  965. if (IS_ERR(dn->chan)) {
  966. ret = PTR_ERR(dn->chan);
  967. goto err_create_chan;
  968. }
  969. kref_put(&vds->refcount, _free_vds);
  970. *o_dn = dn;
  971. return 0;
  972. err_create_chan:
  973. kfree(dn);
  974. err_alloc_chan:
  975. kref_put(&vds->refcount, _free_vds);
  976. err_vds_lookup:
  977. return ret;
  978. }
  979. int tipc_k_connect(struct tipc_k_handle *h, const char *port)
  980. {
  981. int err;
  982. struct tipc_dn_chan *dn = NULL;
  983. err = tipc_open_channel(NULL, &dn);
  984. if (err)
  985. return err;
  986. h->dn = dn;
  987. /* send connect request */
  988. err = tipc_chan_connect(dn->chan, port);
  989. if (err)
  990. return err;
  991. /* and wait for reply */
  992. return dn_wait_for_reply(dn, REPLY_TIMEOUT);
  993. }
  994. EXPORT_SYMBOL(tipc_k_connect);
  995. int tipc_k_disconnect(struct tipc_k_handle *h)
  996. {
  997. struct tipc_dn_chan *dn = h->dn;
  998. dn_shutdown(dn);
  999. /* free all pending buffers */
  1000. _free_msg_buf_list(&dn->rx_msg_queue);
  1001. /* shutdown channel */
  1002. tipc_chan_shutdown(dn->chan);
  1003. /* and destroy it */
  1004. tipc_chan_destroy(dn->chan);
  1005. /* data is now be free in dn_handle_release(..) */
  1006. #if 0
  1007. kfree(dn);
  1008. #endif
  1009. return 0;
  1010. }
  1011. EXPORT_SYMBOL(tipc_k_disconnect);
  1012. ssize_t tipc_k_read(struct tipc_k_handle *h, void *buf, size_t buf_len,
  1013. unsigned int flags)
  1014. {
  1015. ssize_t ret;
  1016. size_t data_len;
  1017. struct tipc_msg_buf *mb;
  1018. struct tipc_dn_chan *dn = (struct tipc_dn_chan *)h->dn;
  1019. mutex_lock(&dn->lock);
  1020. while (list_empty(&dn->rx_msg_queue)) {
  1021. if (dn->state != TIPC_CONNECTED) {
  1022. if (dn->state == TIPC_CONNECTING)
  1023. ret = -ENOTCONN;
  1024. else if (dn->state == TIPC_DISCONNECTED)
  1025. ret = -ENOTCONN;
  1026. else if (dn->state == TIPC_STALE)
  1027. ret = -ESHUTDOWN;
  1028. else
  1029. ret = -EBADFD;
  1030. goto out;
  1031. }
  1032. mutex_unlock(&dn->lock);
  1033. if (flags & O_NONBLOCK)
  1034. return -EAGAIN;
  1035. if (wait_event_interruptible(dn->readq, _got_rx(dn)))
  1036. return -ERESTARTSYS;
  1037. mutex_lock(&dn->lock);
  1038. }
  1039. mb = list_first_entry(&dn->rx_msg_queue, struct tipc_msg_buf, node);
  1040. data_len = mb_avail_data(mb);
  1041. if (data_len > buf_len) {
  1042. ret = -EMSGSIZE;
  1043. goto out;
  1044. }
  1045. memcpy(buf, mb_get_data(mb, data_len), data_len);
  1046. ret = data_len;
  1047. list_del(&mb->node);
  1048. tipc_chan_put_rxbuf(dn->chan, mb);
  1049. out:
  1050. mutex_unlock(&dn->lock);
  1051. return ret;
  1052. }
  1053. EXPORT_SYMBOL(tipc_k_read);
  1054. ssize_t tipc_k_write(struct tipc_k_handle *h, void *buf, size_t len,
  1055. unsigned int flags)
  1056. {
  1057. ssize_t ret;
  1058. long timeout = TXBUF_TIMEOUT;
  1059. struct tipc_msg_buf *txbuf = NULL;
  1060. struct tipc_dn_chan *dn = (struct tipc_dn_chan *)h->dn;
  1061. if (flags & O_NONBLOCK)
  1062. timeout = 0;
  1063. txbuf = tipc_chan_get_txbuf_timeout(dn->chan, timeout);
  1064. if (IS_ERR(txbuf))
  1065. return PTR_ERR(txbuf);
  1066. /* check available space */
  1067. if (len > mb_avail_space(txbuf)) {
  1068. ret = -EMSGSIZE;
  1069. goto err_out;
  1070. }
  1071. /* copy in message data */
  1072. memcpy(mb_put_data(txbuf, len), buf, len);
  1073. /* queue message */
  1074. ret = tipc_chan_queue_msg(dn->chan, txbuf);
  1075. if (ret)
  1076. goto err_out;
  1077. return len;
  1078. err_out:
  1079. tipc_chan_put_txbuf(dn->chan, txbuf);
  1080. return ret;
  1081. }
  1082. EXPORT_SYMBOL(tipc_k_write);
  1083. #endif /* end of CONFIG_MTK_ENABLE_GENIEZONE */
  1084. /*****************************************************************************/
  1085. static void chan_trigger_event(struct tipc_chan *chan, int event)
  1086. {
  1087. if (!event)
  1088. return;
  1089. chan->ops->handle_event(chan->ops_arg, event);
  1090. }
  1091. static void _cleanup_vq(struct virtqueue *vq)
  1092. {
  1093. struct tipc_msg_buf *mb;
  1094. while ((mb = virtqueue_detach_unused_buf(vq)) != NULL)
  1095. _free_msg_buf(mb);
  1096. }
  1097. static int _create_cdev_node(struct device *parent,
  1098. struct tipc_cdev_node *cdn,
  1099. const char *name)
  1100. {
  1101. int ret;
  1102. dev_t devt;
  1103. if (!name) {
  1104. dev_dbg(parent, "%s: cdev name has to be provided\n",
  1105. __func__);
  1106. return -EINVAL;
  1107. }
  1108. /* allocate minor */
  1109. ret = idr_alloc(&tipc_devices, cdn, 0, MAX_DEVICES-1, GFP_KERNEL);
  1110. if (ret < 0) {
  1111. dev_dbg(parent, "%s: failed (%d) to get id\n",
  1112. __func__, ret);
  1113. return ret;
  1114. }
  1115. cdn->minor = ret;
  1116. cdev_init(&cdn->cdev, &tipc_fops);
  1117. cdn->cdev.owner = THIS_MODULE;
  1118. /* Add character device */
  1119. devt = MKDEV(tipc_major, cdn->minor);
  1120. ret = cdev_add(&cdn->cdev, devt, 1);
  1121. if (ret) {
  1122. dev_dbg(parent, "%s: cdev_add failed (%d)\n",
  1123. __func__, ret);
  1124. goto err_add_cdev;
  1125. }
  1126. /* Create a device node */
  1127. cdn->dev = device_create(tipc_class, parent,
  1128. devt, NULL, "trusty-ipc-%s", name);
  1129. if (IS_ERR(cdn->dev)) {
  1130. ret = PTR_ERR(cdn->dev);
  1131. dev_dbg(parent, "%s: device_create failed: %d\n",
  1132. __func__, ret);
  1133. goto err_device_create;
  1134. }
  1135. return 0;
  1136. err_device_create:
  1137. cdn->dev = NULL;
  1138. cdev_del(&cdn->cdev);
  1139. err_add_cdev:
  1140. idr_remove(&tipc_devices, cdn->minor);
  1141. return ret;
  1142. }
  1143. static void create_cdev_node(struct tipc_virtio_dev *vds,
  1144. struct tipc_cdev_node *cdn)
  1145. {
  1146. int err;
  1147. mutex_lock(&tipc_devices_lock);
  1148. if (!default_vdev) {
  1149. kref_get(&vds->refcount);
  1150. default_vdev = vds->vdev;
  1151. }
  1152. if (vds->cdev_name[0] && !cdn->dev) {
  1153. kref_get(&vds->refcount);
  1154. err = _create_cdev_node(&vds->vdev->dev, cdn, vds->cdev_name);
  1155. if (err) {
  1156. dev_err(&vds->vdev->dev,
  1157. "failed (%d) to create cdev node\n", err);
  1158. kref_put(&vds->refcount, _free_vds);
  1159. }
  1160. }
  1161. mutex_unlock(&tipc_devices_lock);
  1162. }
  1163. static void destroy_cdev_node(struct tipc_virtio_dev *vds,
  1164. struct tipc_cdev_node *cdn)
  1165. {
  1166. mutex_lock(&tipc_devices_lock);
  1167. if (cdn->dev) {
  1168. device_destroy(tipc_class, MKDEV(tipc_major, cdn->minor));
  1169. cdev_del(&cdn->cdev);
  1170. idr_remove(&tipc_devices, cdn->minor);
  1171. cdn->dev = NULL;
  1172. kref_put(&vds->refcount, _free_vds);
  1173. }
  1174. if (default_vdev == vds->vdev) {
  1175. default_vdev = NULL;
  1176. kref_put(&vds->refcount, _free_vds);
  1177. }
  1178. mutex_unlock(&tipc_devices_lock);
  1179. }
  1180. static void _go_online(struct tipc_virtio_dev *vds)
  1181. {
  1182. mutex_lock(&vds->lock);
  1183. if (vds->state == VDS_OFFLINE)
  1184. vds->state = VDS_ONLINE;
  1185. mutex_unlock(&vds->lock);
  1186. create_cdev_node(vds, &vds->cdev_node);
  1187. dev_info(&vds->vdev->dev, "is online\n");
  1188. }
  1189. static void _go_offline(struct tipc_virtio_dev *vds)
  1190. {
  1191. struct tipc_chan *chan;
  1192. /* change state to OFFLINE */
  1193. mutex_lock(&vds->lock);
  1194. if (vds->state != VDS_ONLINE) {
  1195. mutex_unlock(&vds->lock);
  1196. return;
  1197. }
  1198. vds->state = VDS_OFFLINE;
  1199. mutex_unlock(&vds->lock);
  1200. /* wakeup all waiters */
  1201. wake_up_interruptible_all(&vds->sendq);
  1202. /* shutdown all channels */
  1203. while ((chan = vds_lookup_channel(vds, TIPC_ANY_ADDR))) {
  1204. mutex_lock(&chan->lock);
  1205. chan->state = TIPC_STALE;
  1206. chan->remote = 0;
  1207. chan_trigger_event(chan, TIPC_CHANNEL_SHUTDOWN);
  1208. mutex_unlock(&chan->lock);
  1209. kref_put(&chan->refcount, _free_chan);
  1210. }
  1211. /* shutdown device node */
  1212. destroy_cdev_node(vds, &vds->cdev_node);
  1213. dev_info(&vds->vdev->dev, "is offline\n");
  1214. }
  1215. static void _handle_conn_rsp(struct tipc_virtio_dev *vds,
  1216. struct tipc_conn_rsp_body *rsp, size_t len)
  1217. {
  1218. struct tipc_chan *chan;
  1219. if (sizeof(*rsp) != len) {
  1220. dev_err(&vds->vdev->dev, "%s: Invalid response length %zd\n",
  1221. __func__, len);
  1222. return;
  1223. }
  1224. dev_dbg(&vds->vdev->dev,
  1225. "%s: connection response: for addr 0x%x: "
  1226. "status %d remote addr 0x%x\n",
  1227. __func__, rsp->target, rsp->status, rsp->remote);
  1228. /* Lookup channel */
  1229. chan = vds_lookup_channel(vds, rsp->target);
  1230. if (chan) {
  1231. mutex_lock(&chan->lock);
  1232. if (chan->state == TIPC_CONNECTING) {
  1233. if (!rsp->status) {
  1234. chan->state = TIPC_CONNECTED;
  1235. chan->remote = rsp->remote;
  1236. chan->max_msg_cnt = rsp->max_msg_cnt;
  1237. chan->max_msg_size = rsp->max_msg_size;
  1238. chan_trigger_event(chan,
  1239. TIPC_CHANNEL_CONNECTED);
  1240. } else {
  1241. chan->state = TIPC_DISCONNECTED;
  1242. chan->remote = 0;
  1243. chan_trigger_event(chan,
  1244. TIPC_CHANNEL_DISCONNECTED);
  1245. }
  1246. }
  1247. mutex_unlock(&chan->lock);
  1248. kref_put(&chan->refcount, _free_chan);
  1249. }
  1250. }
  1251. static void _handle_disc_req(struct tipc_virtio_dev *vds,
  1252. struct tipc_disc_req_body *req, size_t len)
  1253. {
  1254. struct tipc_chan *chan;
  1255. if (sizeof(*req) != len) {
  1256. dev_err(&vds->vdev->dev, "%s: Invalid request length %zd\n",
  1257. __func__, len);
  1258. return;
  1259. }
  1260. dev_dbg(&vds->vdev->dev, "%s: disconnect request: for addr 0x%x\n",
  1261. __func__, req->target);
  1262. chan = vds_lookup_channel(vds, req->target);
  1263. if (chan) {
  1264. mutex_lock(&chan->lock);
  1265. if (chan->state == TIPC_CONNECTED ||
  1266. chan->state == TIPC_CONNECTING) {
  1267. chan->state = TIPC_DISCONNECTED;
  1268. chan->remote = 0;
  1269. chan_trigger_event(chan, TIPC_CHANNEL_DISCONNECTED);
  1270. }
  1271. mutex_unlock(&chan->lock);
  1272. kref_put(&chan->refcount, _free_chan);
  1273. }
  1274. }
  1275. static void _handle_ctrl_msg(struct tipc_virtio_dev *vds,
  1276. void *data, int len, u32 src)
  1277. {
  1278. struct tipc_ctrl_msg *msg = data;
  1279. if ((len < sizeof(*msg)) || (sizeof(*msg) + msg->body_len != len)) {
  1280. dev_err(&vds->vdev->dev,
  1281. "%s: Invalid message length ( %d vs. %d)\n",
  1282. __func__, (int)(sizeof(*msg) + msg->body_len), len);
  1283. return;
  1284. }
  1285. dev_dbg(&vds->vdev->dev,
  1286. "%s: Incoming ctrl message: src 0x%x type %d len %d\n",
  1287. __func__, src, msg->type, msg->body_len);
  1288. switch (msg->type) {
  1289. case TIPC_CTRL_MSGTYPE_GO_ONLINE:
  1290. _go_online(vds);
  1291. break;
  1292. case TIPC_CTRL_MSGTYPE_GO_OFFLINE:
  1293. _go_offline(vds);
  1294. break;
  1295. case TIPC_CTRL_MSGTYPE_CONN_RSP:
  1296. _handle_conn_rsp(vds, (struct tipc_conn_rsp_body *)msg->body,
  1297. msg->body_len);
  1298. break;
  1299. case TIPC_CTRL_MSGTYPE_DISC_REQ:
  1300. _handle_disc_req(vds, (struct tipc_disc_req_body *)msg->body,
  1301. msg->body_len);
  1302. break;
  1303. default:
  1304. dev_warn(&vds->vdev->dev,
  1305. "%s: Unexpected message type: %d\n",
  1306. __func__, msg->type);
  1307. }
  1308. }
  1309. static int _handle_rxbuf(struct tipc_virtio_dev *vds,
  1310. struct tipc_msg_buf *rxbuf, size_t rxlen)
  1311. {
  1312. int err;
  1313. struct scatterlist sg;
  1314. struct tipc_msg_hdr *msg;
  1315. struct device *dev = &vds->vdev->dev;
  1316. /* message sanity check */
  1317. if (rxlen > rxbuf->buf_sz) {
  1318. dev_warn(dev, "inbound msg is too big: %zd\n", rxlen);
  1319. goto drop_it;
  1320. }
  1321. if (rxlen < sizeof(*msg)) {
  1322. dev_warn(dev, "inbound msg is too short: %zd\n", rxlen);
  1323. goto drop_it;
  1324. }
  1325. /* reset buffer and put data */
  1326. mb_reset(rxbuf);
  1327. mb_put_data(rxbuf, rxlen);
  1328. /* get message header */
  1329. msg = mb_get_data(rxbuf, sizeof(*msg));
  1330. if (mb_avail_data(rxbuf) != msg->len) {
  1331. dev_warn(dev, "inbound msg length mismatch: (%d vs. %d)\n",
  1332. (uint) mb_avail_data(rxbuf), (uint)msg->len);
  1333. goto drop_it;
  1334. }
  1335. dev_dbg(dev, "From: %d, To: %d, Len: %d, Flags: 0x%x, Reserved: %d\n",
  1336. msg->src, msg->dst, msg->len, msg->flags, msg->reserved);
  1337. /* message directed to control endpoint is a special case */
  1338. if (msg->dst == TIPC_CTRL_ADDR) {
  1339. _handle_ctrl_msg(vds, msg->data, msg->len, msg->src);
  1340. } else {
  1341. struct tipc_chan *chan = NULL;
  1342. /* Lookup channel */
  1343. chan = vds_lookup_channel(vds, msg->dst);
  1344. if (chan) {
  1345. /* handle it */
  1346. rxbuf = chan->ops->handle_msg(chan->ops_arg, rxbuf);
  1347. BUG_ON(!rxbuf);
  1348. kref_put(&chan->refcount, _free_chan);
  1349. }
  1350. }
  1351. drop_it:
  1352. /* add the buffer back to the virtqueue */
  1353. sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz);
  1354. err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
  1355. if (err < 0) {
  1356. dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
  1357. return err;
  1358. }
  1359. return 0;
  1360. }
  1361. static void _rxvq_cb(struct virtqueue *rxvq)
  1362. {
  1363. unsigned int len;
  1364. struct tipc_msg_buf *mb;
  1365. unsigned int msg_cnt = 0;
  1366. struct tipc_virtio_dev *vds = rxvq->vdev->priv;
  1367. while ((mb = virtqueue_get_buf(rxvq, &len)) != NULL) {
  1368. if (_handle_rxbuf(vds, mb, len))
  1369. break;
  1370. msg_cnt++;
  1371. }
  1372. /* tell the other size that we added rx buffers */
  1373. if (msg_cnt)
  1374. virtqueue_kick(rxvq);
  1375. }
  1376. static void _txvq_cb(struct virtqueue *txvq)
  1377. {
  1378. unsigned int len;
  1379. struct tipc_msg_buf *mb;
  1380. bool need_wakeup = false;
  1381. struct tipc_virtio_dev *vds = txvq->vdev->priv;
  1382. dev_dbg(&txvq->vdev->dev, "%s\n", __func__);
  1383. /* detach all buffers */
  1384. mutex_lock(&vds->lock);
  1385. while ((mb = virtqueue_get_buf(txvq, &len)) != NULL)
  1386. need_wakeup |= _put_txbuf_locked(vds, mb);
  1387. mutex_unlock(&vds->lock);
  1388. if (need_wakeup) {
  1389. /* wake up potential senders waiting for a tx buffer */
  1390. wake_up_interruptible_all(&vds->sendq);
  1391. }
  1392. }
  1393. static int tipc_virtio_probe(struct virtio_device *vdev)
  1394. {
  1395. int err, i;
  1396. struct tipc_virtio_dev *vds;
  1397. struct tipc_dev_config config;
  1398. struct virtqueue *vqs[2];
  1399. vq_callback_t *vq_cbs[] = {_rxvq_cb, _txvq_cb};
  1400. const char *vq_names[] = { "rx", "tx" };
  1401. dev_dbg(&vdev->dev, "%s:\n", __func__);
  1402. vds = kzalloc(sizeof(*vds), GFP_KERNEL);
  1403. if (!vds)
  1404. return -ENOMEM;
  1405. vds->vdev = vdev;
  1406. mutex_init(&vds->lock);
  1407. kref_init(&vds->refcount);
  1408. init_waitqueue_head(&vds->sendq);
  1409. INIT_LIST_HEAD(&vds->free_buf_list);
  1410. idr_init(&vds->addr_idr);
  1411. /* set default max message size and alignment */
  1412. memset(&config, 0, sizeof(config));
  1413. config.msg_buf_max_size = DEFAULT_MSG_BUF_SIZE;
  1414. config.msg_buf_alignment = DEFAULT_MSG_BUF_ALIGN;
  1415. /* get configuration if present */
  1416. vdev->config->get(vdev, 0, &config, sizeof(config));
  1417. /* copy dev name */
  1418. strncpy(vds->cdev_name, config.dev_name, sizeof(vds->cdev_name));
  1419. vds->cdev_name[sizeof(vds->cdev_name)-1] = '\0';
  1420. /* find tx virtqueues (rx and tx and in this order) */
  1421. err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, vq_names, NULL, NULL);
  1422. if (err)
  1423. goto err_find_vqs;
  1424. vds->rxvq = vqs[0];
  1425. vds->txvq = vqs[1];
  1426. /* save max buffer size and count */
  1427. vds->msg_buf_max_sz = config.msg_buf_max_size;
  1428. vds->msg_buf_max_cnt = virtqueue_get_vring_size(vds->txvq);
  1429. /* set up the receive buffers */
  1430. for (i = 0; i < virtqueue_get_vring_size(vds->rxvq); i++) {
  1431. struct scatterlist sg;
  1432. struct tipc_msg_buf *rxbuf;
  1433. rxbuf = _alloc_msg_buf(vds->msg_buf_max_sz);
  1434. if (!rxbuf) {
  1435. dev_err(&vdev->dev, "failed to allocate rx buffer\n");
  1436. err = -ENOMEM;
  1437. goto err_free_rx_buffers;
  1438. }
  1439. sg_init_one(&sg, rxbuf->buf_va, rxbuf->buf_sz);
  1440. err = virtqueue_add_inbuf(vds->rxvq, &sg, 1, rxbuf, GFP_KERNEL);
  1441. WARN_ON(err); /* sanity check; this can't really happen */
  1442. }
  1443. vdev->priv = vds;
  1444. vds->state = VDS_OFFLINE;
  1445. dev_dbg(&vdev->dev, "%s: done\n", __func__);
  1446. return 0;
  1447. err_free_rx_buffers:
  1448. _cleanup_vq(vds->rxvq);
  1449. err_find_vqs:
  1450. kref_put(&vds->refcount, _free_vds);
  1451. return err;
  1452. }
  1453. static void tipc_virtio_remove(struct virtio_device *vdev)
  1454. {
  1455. struct tipc_virtio_dev *vds = vdev->priv;
  1456. _go_offline(vds);
  1457. mutex_lock(&vds->lock);
  1458. vds->state = VDS_DEAD;
  1459. vds->vdev = NULL;
  1460. mutex_unlock(&vds->lock);
  1461. vdev->config->reset(vdev);
  1462. idr_destroy(&vds->addr_idr);
  1463. _cleanup_vq(vds->rxvq);
  1464. _cleanup_vq(vds->txvq);
  1465. _free_msg_buf_list(&vds->free_buf_list);
  1466. vdev->config->del_vqs(vds->vdev);
  1467. kref_put(&vds->refcount, _free_vds);
  1468. }
  1469. static struct virtio_device_id tipc_virtio_id_table[] = {
  1470. { VIRTIO_ID_TRUSTY_IPC, VIRTIO_DEV_ANY_ID },
  1471. { 0 },
  1472. };
  1473. static unsigned int features[] = {
  1474. 0,
  1475. };
  1476. static struct virtio_driver virtio_tipc_driver = {
  1477. .feature_table = features,
  1478. .feature_table_size = ARRAY_SIZE(features),
  1479. .driver.name = KBUILD_MODNAME,
  1480. .driver.owner = THIS_MODULE,
  1481. .id_table = tipc_virtio_id_table,
  1482. .probe = tipc_virtio_probe,
  1483. .remove = tipc_virtio_remove,
  1484. };
  1485. static int __init tipc_init(void)
  1486. {
  1487. int ret;
  1488. dev_t dev;
  1489. ret = alloc_chrdev_region(&dev, 0, MAX_DEVICES, KBUILD_MODNAME);
  1490. if (ret) {
  1491. pr_err("%s: alloc_chrdev_region failed: %d\n", __func__, ret);
  1492. return ret;
  1493. }
  1494. tipc_major = MAJOR(dev);
  1495. tipc_class = class_create(THIS_MODULE, KBUILD_MODNAME);
  1496. if (IS_ERR(tipc_class)) {
  1497. ret = PTR_ERR(tipc_class);
  1498. pr_err("%s: class_create failed: %d\n", __func__, ret);
  1499. goto err_class_create;
  1500. }
  1501. ret = register_virtio_driver(&virtio_tipc_driver);
  1502. if (ret) {
  1503. pr_err("failed to register virtio driver: %d\n", ret);
  1504. goto err_register_virtio_drv;
  1505. }
  1506. return 0;
  1507. err_register_virtio_drv:
  1508. class_destroy(tipc_class);
  1509. err_class_create:
  1510. unregister_chrdev_region(dev, MAX_DEVICES);
  1511. return ret;
  1512. }
  1513. static void __exit tipc_exit(void)
  1514. {
  1515. unregister_virtio_driver(&virtio_tipc_driver);
  1516. class_destroy(tipc_class);
  1517. unregister_chrdev_region(MKDEV(tipc_major, 0), MAX_DEVICES);
  1518. }
  1519. /* We need to init this early */
  1520. subsys_initcall(tipc_init);
  1521. module_exit(tipc_exit);
  1522. MODULE_DEVICE_TABLE(tipc, tipc_virtio_id_table);
  1523. MODULE_DESCRIPTION("Trusty IPC driver");
  1524. MODULE_LICENSE("GPL v2");