virtio_rpmsg_bus.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. /*
  2. * Virtio-based remote processor messaging bus
  3. *
  4. * Copyright (C) 2011 Texas Instruments, Inc.
  5. * Copyright (C) 2011 Google, Inc.
  6. *
  7. * Ohad Ben-Cohen <ohad@wizery.com>
  8. * Brian Swetland <swetland@google.com>
  9. *
  10. * This software is licensed under the terms of the GNU General Public
  11. * License version 2, as published by the Free Software Foundation, and
  12. * may be copied, distributed, and modified under those terms.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. */
  19. #define pr_fmt(fmt) "%s: " fmt, __func__
  20. #include <linux/kernel.h>
  21. #include <linux/module.h>
  22. #include <linux/virtio.h>
  23. #include <linux/virtio_ids.h>
  24. #include <linux/virtio_config.h>
  25. #include <linux/scatterlist.h>
  26. #include <linux/dma-mapping.h>
  27. #include <linux/slab.h>
  28. #include <linux/idr.h>
  29. #include <linux/jiffies.h>
  30. #include <linux/sched.h>
  31. #include <linux/wait.h>
  32. #include <linux/rpmsg.h>
  33. #include <linux/mutex.h>
  34. /**
  35. * struct virtproc_info - virtual remote processor state
  36. * @vdev: the virtio device
  37. * @rvq: rx virtqueue
  38. * @svq: tx virtqueue
  39. * @rbufs: kernel address of rx buffers
  40. * @sbufs: kernel address of tx buffers
  41. * @last_sbuf: index of last tx buffer used
  42. * @bufs_dma: dma base addr of the buffers
  43. * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders.
  44. * sending a message might require waking up a dozing remote
  45. * processor, which involves sleeping, hence the mutex.
  46. * @endpoints: idr of local endpoints, allows fast retrieval
  47. * @endpoints_lock: lock of the endpoints set
  48. * @sendq: wait queue of sending contexts waiting for a tx buffers
  49. * @sleepers: number of senders that are waiting for a tx buffer
  50. * @ns_ept: the bus's name service endpoint
  51. *
  52. * This structure stores the rpmsg state of a given virtio remote processor
  53. * device (there might be several virtio proc devices for each physical
  54. * remote processor).
  55. */
  56. struct virtproc_info {
  57. struct virtio_device *vdev;
  58. struct virtqueue *rvq, *svq;
  59. void *rbufs, *sbufs;
  60. int last_sbuf;
  61. dma_addr_t bufs_dma;
  62. struct mutex tx_lock;
  63. struct idr endpoints;
  64. struct mutex endpoints_lock;
  65. wait_queue_head_t sendq;
  66. atomic_t sleepers;
  67. struct rpmsg_endpoint *ns_ept;
  68. };
  69. /**
  70. * struct rpmsg_channel_info - internal channel info representation
  71. * @name: name of service
  72. * @src: local address
  73. * @dst: destination address
  74. */
  75. struct rpmsg_channel_info {
  76. char name[RPMSG_NAME_SIZE];
  77. u32 src;
  78. u32 dst;
  79. };
  80. #define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev)
  81. #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv)
  82. /*
  83. * We're allocating 512 buffers of 512 bytes for communications, and then
  84. * using the first 256 buffers for RX, and the last 256 buffers for TX.
  85. *
  86. * Each buffer will have 16 bytes for the msg header and 496 bytes for
  87. * the payload.
  88. *
  89. * This will require a total space of 256KB for the buffers.
  90. *
  91. * We might also want to add support for user-provided buffers in time.
  92. * This will allow bigger buffer size flexibility, and can also be used
  93. * to achieve zero-copy messaging.
  94. *
  95. * Note that these numbers are purely a decision of this driver - we
  96. * can change this without changing anything in the firmware of the remote
  97. * processor.
  98. */
  99. #define RPMSG_NUM_BUFS (512)
  100. #define RPMSG_BUF_SIZE (512)
  101. #define RPMSG_TOTAL_BUF_SPACE (RPMSG_NUM_BUFS * RPMSG_BUF_SIZE)
  102. /*
  103. * Local addresses are dynamically allocated on-demand.
  104. * We do not dynamically assign addresses from the low 1024 range,
  105. * in order to reserve that address range for predefined services.
  106. */
  107. #define RPMSG_RESERVED_ADDRESSES (1024)
  108. /* Address 53 is reserved for advertising remote services */
  109. #define RPMSG_NS_ADDR (53)
  110. /* sysfs show configuration fields */
  111. #define rpmsg_show_attr(field, path, format_string) \
  112. static ssize_t \
  113. field##_show(struct device *dev, \
  114. struct device_attribute *attr, char *buf) \
  115. { \
  116. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \
  117. \
  118. return sprintf(buf, format_string, rpdev->path); \
  119. }
  120. /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */
  121. rpmsg_show_attr(name, id.name, "%s\n");
  122. rpmsg_show_attr(src, src, "0x%x\n");
  123. rpmsg_show_attr(dst, dst, "0x%x\n");
  124. rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n");
  125. /*
  126. * Unique (and free running) index for rpmsg devices.
  127. *
  128. * Yeah, we're not recycling those numbers (yet?). will be easy
  129. * to change if/when we want to.
  130. */
  131. static unsigned int rpmsg_dev_index;
  132. static ssize_t modalias_show(struct device *dev,
  133. struct device_attribute *attr, char *buf)
  134. {
  135. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  136. return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name);
  137. }
  138. static struct device_attribute rpmsg_dev_attrs[] = {
  139. __ATTR_RO(name),
  140. __ATTR_RO(modalias),
  141. __ATTR_RO(dst),
  142. __ATTR_RO(src),
  143. __ATTR_RO(announce),
  144. __ATTR_NULL
  145. };
  146. /* rpmsg devices and drivers are matched using the service name */
  147. static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev,
  148. const struct rpmsg_device_id *id)
  149. {
  150. return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0;
  151. }
  152. /* match rpmsg channel and rpmsg driver */
  153. static int rpmsg_dev_match(struct device *dev, struct device_driver *drv)
  154. {
  155. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  156. struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv);
  157. const struct rpmsg_device_id *ids = rpdrv->id_table;
  158. unsigned int i;
  159. for (i = 0; ids[i].name[0]; i++)
  160. if (rpmsg_id_match(rpdev, &ids[i]))
  161. return 1;
  162. return 0;
  163. }
  164. static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env)
  165. {
  166. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  167. return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT,
  168. rpdev->id.name);
  169. }
  170. /**
  171. * __ept_release() - deallocate an rpmsg endpoint
  172. * @kref: the ept's reference count
  173. *
  174. * This function deallocates an ept, and is invoked when its @kref refcount
  175. * drops to zero.
  176. *
  177. * Never invoke this function directly!
  178. */
  179. static void __ept_release(struct kref *kref)
  180. {
  181. struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint,
  182. refcount);
  183. /*
  184. * At this point no one holds a reference to ept anymore,
  185. * so we can directly free it
  186. */
  187. kfree(ept);
  188. }
  189. /* for more info, see below documentation of rpmsg_create_ept() */
  190. static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp,
  191. struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb,
  192. void *priv, u32 addr)
  193. {
  194. int err, tmpaddr, request;
  195. struct rpmsg_endpoint *ept;
  196. struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev;
  197. if (!idr_pre_get(&vrp->endpoints, GFP_KERNEL))
  198. return NULL;
  199. ept = kzalloc(sizeof(*ept), GFP_KERNEL);
  200. if (!ept) {
  201. dev_err(dev, "failed to kzalloc a new ept\n");
  202. return NULL;
  203. }
  204. kref_init(&ept->refcount);
  205. mutex_init(&ept->cb_lock);
  206. ept->rpdev = rpdev;
  207. ept->cb = cb;
  208. ept->priv = priv;
  209. /* do we need to allocate a local address ? */
  210. request = addr == RPMSG_ADDR_ANY ? RPMSG_RESERVED_ADDRESSES : addr;
  211. mutex_lock(&vrp->endpoints_lock);
  212. /* bind the endpoint to an rpmsg address (and allocate one if needed) */
  213. err = idr_get_new_above(&vrp->endpoints, ept, request, &tmpaddr);
  214. if (err) {
  215. dev_err(dev, "idr_get_new_above failed: %d\n", err);
  216. goto free_ept;
  217. }
  218. /* make sure the user's address request is fulfilled, if relevant */
  219. if (addr != RPMSG_ADDR_ANY && tmpaddr != addr) {
  220. dev_err(dev, "address 0x%x already in use\n", addr);
  221. goto rem_idr;
  222. }
  223. ept->addr = tmpaddr;
  224. mutex_unlock(&vrp->endpoints_lock);
  225. return ept;
  226. rem_idr:
  227. idr_remove(&vrp->endpoints, request);
  228. free_ept:
  229. mutex_unlock(&vrp->endpoints_lock);
  230. kref_put(&ept->refcount, __ept_release);
  231. return NULL;
  232. }
  233. /**
  234. * rpmsg_create_ept() - create a new rpmsg_endpoint
  235. * @rpdev: rpmsg channel device
  236. * @cb: rx callback handler
  237. * @priv: private data for the driver's use
  238. * @addr: local rpmsg address to bind with @cb
  239. *
  240. * Every rpmsg address in the system is bound to an rx callback (so when
  241. * inbound messages arrive, they are dispatched by the rpmsg bus using the
  242. * appropriate callback handler) by means of an rpmsg_endpoint struct.
  243. *
  244. * This function allows drivers to create such an endpoint, and by that,
  245. * bind a callback, and possibly some private data too, to an rpmsg address
  246. * (either one that is known in advance, or one that will be dynamically
  247. * assigned for them).
  248. *
  249. * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint
  250. * is already created for them when they are probed by the rpmsg bus
  251. * (using the rx callback provided when they registered to the rpmsg bus).
  252. *
  253. * So things should just work for simple drivers: they already have an
  254. * endpoint, their rx callback is bound to their rpmsg address, and when
  255. * relevant inbound messages arrive (i.e. messages which their dst address
  256. * equals to the src address of their rpmsg channel), the driver's handler
  257. * is invoked to process it.
  258. *
  259. * That said, more complicated drivers might do need to allocate
  260. * additional rpmsg addresses, and bind them to different rx callbacks.
  261. * To accomplish that, those drivers need to call this function.
  262. *
  263. * Drivers should provide their @rpdev channel (so the new endpoint would belong
  264. * to the same remote processor their channel belongs to), an rx callback
  265. * function, an optional private data (which is provided back when the
  266. * rx callback is invoked), and an address they want to bind with the
  267. * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will
  268. * dynamically assign them an available rpmsg address (drivers should have
  269. * a very good reason why not to always use RPMSG_ADDR_ANY here).
  270. *
  271. * Returns a pointer to the endpoint on success, or NULL on error.
  272. */
  273. struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev,
  274. rpmsg_rx_cb_t cb, void *priv, u32 addr)
  275. {
  276. return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr);
  277. }
  278. EXPORT_SYMBOL(rpmsg_create_ept);
  279. /**
  280. * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
  281. * @vrp: virtproc which owns this ept
  282. * @ept: endpoing to destroy
  283. *
  284. * An internal function which destroy an ept without assuming it is
  285. * bound to an rpmsg channel. This is needed for handling the internal
  286. * name service endpoint, which isn't bound to an rpmsg channel.
  287. * See also __rpmsg_create_ept().
  288. */
  289. static void
  290. __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept)
  291. {
  292. /* make sure new inbound messages can't find this ept anymore */
  293. mutex_lock(&vrp->endpoints_lock);
  294. idr_remove(&vrp->endpoints, ept->addr);
  295. mutex_unlock(&vrp->endpoints_lock);
  296. /* make sure in-flight inbound messages won't invoke cb anymore */
  297. mutex_lock(&ept->cb_lock);
  298. ept->cb = NULL;
  299. mutex_unlock(&ept->cb_lock);
  300. kref_put(&ept->refcount, __ept_release);
  301. }
  302. /**
  303. * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint
  304. * @ept: endpoing to destroy
  305. *
  306. * Should be used by drivers to destroy an rpmsg endpoint previously
  307. * created with rpmsg_create_ept().
  308. */
  309. void rpmsg_destroy_ept(struct rpmsg_endpoint *ept)
  310. {
  311. __rpmsg_destroy_ept(ept->rpdev->vrp, ept);
  312. }
  313. EXPORT_SYMBOL(rpmsg_destroy_ept);
  314. /*
  315. * when an rpmsg driver is probed with a channel, we seamlessly create
  316. * it an endpoint, binding its rx callback to a unique local rpmsg
  317. * address.
  318. *
  319. * if we need to, we also announce about this channel to the remote
  320. * processor (needed in case the driver is exposing an rpmsg service).
  321. */
  322. static int rpmsg_dev_probe(struct device *dev)
  323. {
  324. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  325. struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
  326. struct virtproc_info *vrp = rpdev->vrp;
  327. struct rpmsg_endpoint *ept;
  328. int err;
  329. ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src);
  330. if (!ept) {
  331. dev_err(dev, "failed to create endpoint\n");
  332. err = -ENOMEM;
  333. goto out;
  334. }
  335. rpdev->ept = ept;
  336. rpdev->src = ept->addr;
  337. err = rpdrv->probe(rpdev);
  338. if (err) {
  339. dev_err(dev, "%s: failed: %d\n", __func__, err);
  340. rpmsg_destroy_ept(ept);
  341. goto out;
  342. }
  343. /* need to tell remote processor's name service about this channel ? */
  344. if (rpdev->announce &&
  345. virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
  346. struct rpmsg_ns_msg nsm;
  347. strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
  348. nsm.addr = rpdev->src;
  349. nsm.flags = RPMSG_NS_CREATE;
  350. err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
  351. if (err)
  352. dev_err(dev, "failed to announce service %d\n", err);
  353. }
  354. out:
  355. return err;
  356. }
  357. static int rpmsg_dev_remove(struct device *dev)
  358. {
  359. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  360. struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver);
  361. struct virtproc_info *vrp = rpdev->vrp;
  362. int err = 0;
  363. /* tell remote processor's name service we're removing this channel */
  364. if (rpdev->announce &&
  365. virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) {
  366. struct rpmsg_ns_msg nsm;
  367. strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE);
  368. nsm.addr = rpdev->src;
  369. nsm.flags = RPMSG_NS_DESTROY;
  370. err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR);
  371. if (err)
  372. dev_err(dev, "failed to announce service %d\n", err);
  373. }
  374. rpdrv->remove(rpdev);
  375. rpmsg_destroy_ept(rpdev->ept);
  376. return err;
  377. }
  378. static struct bus_type rpmsg_bus = {
  379. .name = "rpmsg",
  380. .match = rpmsg_dev_match,
  381. .dev_attrs = rpmsg_dev_attrs,
  382. .uevent = rpmsg_uevent,
  383. .probe = rpmsg_dev_probe,
  384. .remove = rpmsg_dev_remove,
  385. };
  386. /**
  387. * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus
  388. * @rpdrv: pointer to a struct rpmsg_driver
  389. *
  390. * Returns 0 on success, and an appropriate error value on failure.
  391. */
  392. int register_rpmsg_driver(struct rpmsg_driver *rpdrv)
  393. {
  394. rpdrv->drv.bus = &rpmsg_bus;
  395. return driver_register(&rpdrv->drv);
  396. }
  397. EXPORT_SYMBOL(register_rpmsg_driver);
  398. /**
  399. * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus
  400. * @rpdrv: pointer to a struct rpmsg_driver
  401. *
  402. * Returns 0 on success, and an appropriate error value on failure.
  403. */
  404. void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv)
  405. {
  406. driver_unregister(&rpdrv->drv);
  407. }
  408. EXPORT_SYMBOL(unregister_rpmsg_driver);
  409. static void rpmsg_release_device(struct device *dev)
  410. {
  411. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  412. kfree(rpdev);
  413. }
  414. /*
  415. * match an rpmsg channel with a channel info struct.
  416. * this is used to make sure we're not creating rpmsg devices for channels
  417. * that already exist.
  418. */
  419. static int rpmsg_channel_match(struct device *dev, void *data)
  420. {
  421. struct rpmsg_channel_info *chinfo = data;
  422. struct rpmsg_channel *rpdev = to_rpmsg_channel(dev);
  423. if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src)
  424. return 0;
  425. if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst)
  426. return 0;
  427. if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE))
  428. return 0;
  429. /* found a match ! */
  430. return 1;
  431. }
  432. /*
  433. * create an rpmsg channel using its name and address info.
  434. * this function will be used to create both static and dynamic
  435. * channels.
  436. */
  437. static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp,
  438. struct rpmsg_channel_info *chinfo)
  439. {
  440. struct rpmsg_channel *rpdev;
  441. struct device *tmp, *dev = &vrp->vdev->dev;
  442. int ret;
  443. /* make sure a similar channel doesn't already exist */
  444. tmp = device_find_child(dev, chinfo, rpmsg_channel_match);
  445. if (tmp) {
  446. /* decrement the matched device's refcount back */
  447. put_device(tmp);
  448. dev_err(dev, "channel %s:%x:%x already exist\n",
  449. chinfo->name, chinfo->src, chinfo->dst);
  450. return NULL;
  451. }
  452. rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL);
  453. if (!rpdev) {
  454. pr_err("kzalloc failed\n");
  455. return NULL;
  456. }
  457. rpdev->vrp = vrp;
  458. rpdev->src = chinfo->src;
  459. rpdev->dst = chinfo->dst;
  460. /*
  461. * rpmsg server channels has predefined local address (for now),
  462. * and their existence needs to be announced remotely
  463. */
  464. rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false;
  465. strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE);
  466. /* very simple device indexing plumbing which is enough for now */
  467. dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++);
  468. rpdev->dev.parent = &vrp->vdev->dev;
  469. rpdev->dev.bus = &rpmsg_bus;
  470. rpdev->dev.release = rpmsg_release_device;
  471. ret = device_register(&rpdev->dev);
  472. if (ret) {
  473. dev_err(dev, "device_register failed: %d\n", ret);
  474. put_device(&rpdev->dev);
  475. return NULL;
  476. }
  477. return rpdev;
  478. }
  479. /*
  480. * find an existing channel using its name + address properties,
  481. * and destroy it
  482. */
  483. static int rpmsg_destroy_channel(struct virtproc_info *vrp,
  484. struct rpmsg_channel_info *chinfo)
  485. {
  486. struct virtio_device *vdev = vrp->vdev;
  487. struct device *dev;
  488. dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match);
  489. if (!dev)
  490. return -EINVAL;
  491. device_unregister(dev);
  492. put_device(dev);
  493. return 0;
  494. }
  495. /* super simple buffer "allocator" that is just enough for now */
  496. static void *get_a_tx_buf(struct virtproc_info *vrp)
  497. {
  498. unsigned int len;
  499. void *ret;
  500. /* support multiple concurrent senders */
  501. mutex_lock(&vrp->tx_lock);
  502. /*
  503. * either pick the next unused tx buffer
  504. * (half of our buffers are used for sending messages)
  505. */
  506. if (vrp->last_sbuf < RPMSG_NUM_BUFS / 2)
  507. ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++;
  508. /* or recycle a used one */
  509. else
  510. ret = virtqueue_get_buf(vrp->svq, &len);
  511. mutex_unlock(&vrp->tx_lock);
  512. return ret;
  513. }
  514. /**
  515. * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed
  516. * @vrp: virtual remote processor state
  517. *
  518. * This function is called before a sender is blocked, waiting for
  519. * a tx buffer to become available.
  520. *
  521. * If we already have blocking senders, this function merely increases
  522. * the "sleepers" reference count, and exits.
  523. *
  524. * Otherwise, if this is the first sender to block, we also enable
  525. * virtio's tx callbacks, so we'd be immediately notified when a tx
  526. * buffer is consumed (we rely on virtio's tx callback in order
  527. * to wake up sleeping senders as soon as a tx buffer is used by the
  528. * remote processor).
  529. */
  530. static void rpmsg_upref_sleepers(struct virtproc_info *vrp)
  531. {
  532. /* support multiple concurrent senders */
  533. mutex_lock(&vrp->tx_lock);
  534. /* are we the first sleeping context waiting for tx buffers ? */
  535. if (atomic_inc_return(&vrp->sleepers) == 1)
  536. /* enable "tx-complete" interrupts before dozing off */
  537. virtqueue_enable_cb(vrp->svq);
  538. mutex_unlock(&vrp->tx_lock);
  539. }
  540. /**
  541. * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed
  542. * @vrp: virtual remote processor state
  543. *
  544. * This function is called after a sender, that waited for a tx buffer
  545. * to become available, is unblocked.
  546. *
  547. * If we still have blocking senders, this function merely decreases
  548. * the "sleepers" reference count, and exits.
  549. *
  550. * Otherwise, if there are no more blocking senders, we also disable
  551. * virtio's tx callbacks, to avoid the overhead incurred with handling
  552. * those (now redundant) interrupts.
  553. */
  554. static void rpmsg_downref_sleepers(struct virtproc_info *vrp)
  555. {
  556. /* support multiple concurrent senders */
  557. mutex_lock(&vrp->tx_lock);
  558. /* are we the last sleeping context waiting for tx buffers ? */
  559. if (atomic_dec_and_test(&vrp->sleepers))
  560. /* disable "tx-complete" interrupts */
  561. virtqueue_disable_cb(vrp->svq);
  562. mutex_unlock(&vrp->tx_lock);
  563. }
  564. /**
  565. * rpmsg_send_offchannel_raw() - send a message across to the remote processor
  566. * @rpdev: the rpmsg channel
  567. * @src: source address
  568. * @dst: destination address
  569. * @data: payload of message
  570. * @len: length of payload
  571. * @wait: indicates whether caller should block in case no TX buffers available
  572. *
  573. * This function is the base implementation for all of the rpmsg sending API.
  574. *
  575. * It will send @data of length @len to @dst, and say it's from @src. The
  576. * message will be sent to the remote processor which the @rpdev channel
  577. * belongs to.
  578. *
  579. * The message is sent using one of the TX buffers that are available for
  580. * communication with this remote processor.
  581. *
  582. * If @wait is true, the caller will be blocked until either a TX buffer is
  583. * available, or 15 seconds elapses (we don't want callers to
  584. * sleep indefinitely due to misbehaving remote processors), and in that
  585. * case -ERESTARTSYS is returned. The number '15' itself was picked
  586. * arbitrarily; there's little point in asking drivers to provide a timeout
  587. * value themselves.
  588. *
  589. * Otherwise, if @wait is false, and there are no TX buffers available,
  590. * the function will immediately fail, and -ENOMEM will be returned.
  591. *
  592. * Normally drivers shouldn't use this function directly; instead, drivers
  593. * should use the appropriate rpmsg_{try}send{to, _offchannel} API
  594. * (see include/linux/rpmsg.h).
  595. *
  596. * Returns 0 on success and an appropriate error value on failure.
  597. */
  598. int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst,
  599. void *data, int len, bool wait)
  600. {
  601. struct virtproc_info *vrp = rpdev->vrp;
  602. struct device *dev = &rpdev->dev;
  603. struct scatterlist sg;
  604. struct rpmsg_hdr *msg;
  605. int err;
  606. /* bcasting isn't allowed */
  607. if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) {
  608. dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst);
  609. return -EINVAL;
  610. }
  611. /*
  612. * We currently use fixed-sized buffers, and therefore the payload
  613. * length is limited.
  614. *
  615. * One of the possible improvements here is either to support
  616. * user-provided buffers (and then we can also support zero-copy
  617. * messaging), or to improve the buffer allocator, to support
  618. * variable-length buffer sizes.
  619. */
  620. if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) {
  621. dev_err(dev, "message is too big (%d)\n", len);
  622. return -EMSGSIZE;
  623. }
  624. /* grab a buffer */
  625. msg = get_a_tx_buf(vrp);
  626. if (!msg && !wait)
  627. return -ENOMEM;
  628. /* no free buffer ? wait for one (but bail after 15 seconds) */
  629. while (!msg) {
  630. /* enable "tx-complete" interrupts, if not already enabled */
  631. rpmsg_upref_sleepers(vrp);
  632. /*
  633. * sleep until a free buffer is available or 15 secs elapse.
  634. * the timeout period is not configurable because there's
  635. * little point in asking drivers to specify that.
  636. * if later this happens to be required, it'd be easy to add.
  637. */
  638. err = wait_event_interruptible_timeout(vrp->sendq,
  639. (msg = get_a_tx_buf(vrp)),
  640. msecs_to_jiffies(15000));
  641. /* disable "tx-complete" interrupts if we're the last sleeper */
  642. rpmsg_downref_sleepers(vrp);
  643. /* timeout ? */
  644. if (!err) {
  645. dev_err(dev, "timeout waiting for a tx buffer\n");
  646. return -ERESTARTSYS;
  647. }
  648. }
  649. msg->len = len;
  650. msg->flags = 0;
  651. msg->src = src;
  652. msg->dst = dst;
  653. msg->reserved = 0;
  654. memcpy(msg->data, data, len);
  655. dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n",
  656. msg->src, msg->dst, msg->len,
  657. msg->flags, msg->reserved);
  658. print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1,
  659. msg, sizeof(*msg) + msg->len, true);
  660. sg_init_one(&sg, msg, sizeof(*msg) + len);
  661. mutex_lock(&vrp->tx_lock);
  662. /* add message to the remote processor's virtqueue */
  663. err = virtqueue_add_buf(vrp->svq, &sg, 1, 0, msg, GFP_KERNEL);
  664. if (err < 0) {
  665. /*
  666. * need to reclaim the buffer here, otherwise it's lost
  667. * (memory won't leak, but rpmsg won't use it again for TX).
  668. * this will wait for a buffer management overhaul.
  669. */
  670. dev_err(dev, "virtqueue_add_buf failed: %d\n", err);
  671. goto out;
  672. }
  673. /* tell the remote processor it has a pending message to read */
  674. virtqueue_kick(vrp->svq);
  675. err = 0;
  676. out:
  677. mutex_unlock(&vrp->tx_lock);
  678. return err;
  679. }
  680. EXPORT_SYMBOL(rpmsg_send_offchannel_raw);
  681. /* called when an rx buffer is used, and it's time to digest a message */
  682. static void rpmsg_recv_done(struct virtqueue *rvq)
  683. {
  684. struct rpmsg_hdr *msg;
  685. unsigned int len;
  686. struct rpmsg_endpoint *ept;
  687. struct scatterlist sg;
  688. struct virtproc_info *vrp = rvq->vdev->priv;
  689. struct device *dev = &rvq->vdev->dev;
  690. int err;
  691. msg = virtqueue_get_buf(rvq, &len);
  692. if (!msg) {
  693. dev_err(dev, "uhm, incoming signal, but no used buffer ?\n");
  694. return;
  695. }
  696. dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n",
  697. msg->src, msg->dst, msg->len,
  698. msg->flags, msg->reserved);
  699. print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1,
  700. msg, sizeof(*msg) + msg->len, true);
  701. /*
  702. * We currently use fixed-sized buffers, so trivially sanitize
  703. * the reported payload length.
  704. */
  705. if (len > RPMSG_BUF_SIZE ||
  706. msg->len > (len - sizeof(struct rpmsg_hdr))) {
  707. dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len);
  708. return;
  709. }
  710. /* use the dst addr to fetch the callback of the appropriate user */
  711. mutex_lock(&vrp->endpoints_lock);
  712. ept = idr_find(&vrp->endpoints, msg->dst);
  713. /* let's make sure no one deallocates ept while we use it */
  714. if (ept)
  715. kref_get(&ept->refcount);
  716. mutex_unlock(&vrp->endpoints_lock);
  717. if (ept) {
  718. /* make sure ept->cb doesn't go away while we use it */
  719. mutex_lock(&ept->cb_lock);
  720. if (ept->cb)
  721. ept->cb(ept->rpdev, msg->data, msg->len, ept->priv,
  722. msg->src);
  723. mutex_unlock(&ept->cb_lock);
  724. /* farewell, ept, we don't need you anymore */
  725. kref_put(&ept->refcount, __ept_release);
  726. } else
  727. dev_warn(dev, "msg received with no recipient\n");
  728. /* publish the real size of the buffer */
  729. sg_init_one(&sg, msg, RPMSG_BUF_SIZE);
  730. /* add the buffer back to the remote processor's virtqueue */
  731. err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, msg, GFP_KERNEL);
  732. if (err < 0) {
  733. dev_err(dev, "failed to add a virtqueue buffer: %d\n", err);
  734. return;
  735. }
  736. /* tell the remote processor we added another available rx buffer */
  737. virtqueue_kick(vrp->rvq);
  738. }
  739. /*
  740. * This is invoked whenever the remote processor completed processing
  741. * a TX msg we just sent it, and the buffer is put back to the used ring.
  742. *
  743. * Normally, though, we suppress this "tx complete" interrupt in order to
  744. * avoid the incurred overhead.
  745. */
  746. static void rpmsg_xmit_done(struct virtqueue *svq)
  747. {
  748. struct virtproc_info *vrp = svq->vdev->priv;
  749. dev_dbg(&svq->vdev->dev, "%s\n", __func__);
  750. /* wake up potential senders that are waiting for a tx buffer */
  751. wake_up_interruptible(&vrp->sendq);
  752. }
  753. /* invoked when a name service announcement arrives */
  754. static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len,
  755. void *priv, u32 src)
  756. {
  757. struct rpmsg_ns_msg *msg = data;
  758. struct rpmsg_channel *newch;
  759. struct rpmsg_channel_info chinfo;
  760. struct virtproc_info *vrp = priv;
  761. struct device *dev = &vrp->vdev->dev;
  762. int ret;
  763. print_hex_dump(KERN_DEBUG, "NS announcement: ",
  764. DUMP_PREFIX_NONE, 16, 1,
  765. data, len, true);
  766. if (len != sizeof(*msg)) {
  767. dev_err(dev, "malformed ns msg (%d)\n", len);
  768. return;
  769. }
  770. /*
  771. * the name service ept does _not_ belong to a real rpmsg channel,
  772. * and is handled by the rpmsg bus itself.
  773. * for sanity reasons, make sure a valid rpdev has _not_ sneaked
  774. * in somehow.
  775. */
  776. if (rpdev) {
  777. dev_err(dev, "anomaly: ns ept has an rpdev handle\n");
  778. return;
  779. }
  780. /* don't trust the remote processor for null terminating the name */
  781. msg->name[RPMSG_NAME_SIZE - 1] = '\0';
  782. dev_info(dev, "%sing channel %s addr 0x%x\n",
  783. msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat",
  784. msg->name, msg->addr);
  785. strncpy(chinfo.name, msg->name, sizeof(chinfo.name));
  786. chinfo.src = RPMSG_ADDR_ANY;
  787. chinfo.dst = msg->addr;
  788. if (msg->flags & RPMSG_NS_DESTROY) {
  789. ret = rpmsg_destroy_channel(vrp, &chinfo);
  790. if (ret)
  791. dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret);
  792. } else {
  793. newch = rpmsg_create_channel(vrp, &chinfo);
  794. if (!newch)
  795. dev_err(dev, "rpmsg_create_channel failed\n");
  796. }
  797. }
  798. static int rpmsg_probe(struct virtio_device *vdev)
  799. {
  800. vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done };
  801. const char *names[] = { "input", "output" };
  802. struct virtqueue *vqs[2];
  803. struct virtproc_info *vrp;
  804. void *bufs_va;
  805. int err = 0, i;
  806. vrp = kzalloc(sizeof(*vrp), GFP_KERNEL);
  807. if (!vrp)
  808. return -ENOMEM;
  809. vrp->vdev = vdev;
  810. idr_init(&vrp->endpoints);
  811. mutex_init(&vrp->endpoints_lock);
  812. mutex_init(&vrp->tx_lock);
  813. init_waitqueue_head(&vrp->sendq);
  814. /* We expect two virtqueues, rx and tx (and in this order) */
  815. err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names);
  816. if (err)
  817. goto free_vrp;
  818. vrp->rvq = vqs[0];
  819. vrp->svq = vqs[1];
  820. /* allocate coherent memory for the buffers */
  821. bufs_va = dma_alloc_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
  822. &vrp->bufs_dma, GFP_KERNEL);
  823. if (!bufs_va)
  824. goto vqs_del;
  825. dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va,
  826. (unsigned long long)vrp->bufs_dma);
  827. /* half of the buffers is dedicated for RX */
  828. vrp->rbufs = bufs_va;
  829. /* and half is dedicated for TX */
  830. vrp->sbufs = bufs_va + RPMSG_TOTAL_BUF_SPACE / 2;
  831. /* set up the receive buffers */
  832. for (i = 0; i < RPMSG_NUM_BUFS / 2; i++) {
  833. struct scatterlist sg;
  834. void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE;
  835. sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE);
  836. err = virtqueue_add_buf(vrp->rvq, &sg, 0, 1, cpu_addr,
  837. GFP_KERNEL);
  838. WARN_ON(err < 0); /* sanity check; this can't really happen */
  839. }
  840. /* suppress "tx-complete" interrupts */
  841. virtqueue_disable_cb(vrp->svq);
  842. vdev->priv = vrp;
  843. /* if supported by the remote processor, enable the name service */
  844. if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) {
  845. /* a dedicated endpoint handles the name service msgs */
  846. vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb,
  847. vrp, RPMSG_NS_ADDR);
  848. if (!vrp->ns_ept) {
  849. dev_err(&vdev->dev, "failed to create the ns ept\n");
  850. err = -ENOMEM;
  851. goto free_coherent;
  852. }
  853. }
  854. /* tell the remote processor it can start sending messages */
  855. virtqueue_kick(vrp->rvq);
  856. dev_info(&vdev->dev, "rpmsg host is online\n");
  857. return 0;
  858. free_coherent:
  859. dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE, bufs_va,
  860. vrp->bufs_dma);
  861. vqs_del:
  862. vdev->config->del_vqs(vrp->vdev);
  863. free_vrp:
  864. kfree(vrp);
  865. return err;
  866. }
  867. static int rpmsg_remove_device(struct device *dev, void *data)
  868. {
  869. device_unregister(dev);
  870. return 0;
  871. }
  872. static void __devexit rpmsg_remove(struct virtio_device *vdev)
  873. {
  874. struct virtproc_info *vrp = vdev->priv;
  875. int ret;
  876. vdev->config->reset(vdev);
  877. ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device);
  878. if (ret)
  879. dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret);
  880. if (vrp->ns_ept)
  881. __rpmsg_destroy_ept(vrp, vrp->ns_ept);
  882. idr_remove_all(&vrp->endpoints);
  883. idr_destroy(&vrp->endpoints);
  884. vdev->config->del_vqs(vrp->vdev);
  885. dma_free_coherent(vdev->dev.parent, RPMSG_TOTAL_BUF_SPACE,
  886. vrp->rbufs, vrp->bufs_dma);
  887. kfree(vrp);
  888. }
  889. static struct virtio_device_id id_table[] = {
  890. { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID },
  891. { 0 },
  892. };
  893. static unsigned int features[] = {
  894. VIRTIO_RPMSG_F_NS,
  895. };
  896. static struct virtio_driver virtio_ipc_driver = {
  897. .feature_table = features,
  898. .feature_table_size = ARRAY_SIZE(features),
  899. .driver.name = KBUILD_MODNAME,
  900. .driver.owner = THIS_MODULE,
  901. .id_table = id_table,
  902. .probe = rpmsg_probe,
  903. .remove = __devexit_p(rpmsg_remove),
  904. };
  905. static int __init rpmsg_init(void)
  906. {
  907. int ret;
  908. ret = bus_register(&rpmsg_bus);
  909. if (ret) {
  910. pr_err("failed to register rpmsg bus: %d\n", ret);
  911. return ret;
  912. }
  913. ret = register_virtio_driver(&virtio_ipc_driver);
  914. if (ret) {
  915. pr_err("failed to register virtio driver: %d\n", ret);
  916. bus_unregister(&rpmsg_bus);
  917. }
  918. return ret;
  919. }
  920. subsys_initcall(rpmsg_init);
  921. static void __exit rpmsg_fini(void)
  922. {
  923. unregister_virtio_driver(&virtio_ipc_driver);
  924. bus_unregister(&rpmsg_bus);
  925. }
  926. module_exit(rpmsg_fini);
  927. MODULE_DEVICE_TABLE(virtio, id_table);
  928. MODULE_DESCRIPTION("Virtio-based remote processor messaging bus");
  929. MODULE_LICENSE("GPL v2");