operation.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240
  1. /*
  2. * Greybus operations
  3. *
  4. * Copyright 2014-2015 Google Inc.
  5. * Copyright 2014-2015 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/sched.h>
  13. #include <linux/wait.h>
  14. #include <linux/workqueue.h>
  15. #include "greybus.h"
  16. #include "greybus_trace.h"
  17. static struct kmem_cache *gb_operation_cache;
  18. static struct kmem_cache *gb_message_cache;
  19. /* Workqueue to handle Greybus operation completions. */
  20. static struct workqueue_struct *gb_operation_completion_wq;
  21. /* Wait queue for synchronous cancellations. */
  22. static DECLARE_WAIT_QUEUE_HEAD(gb_operation_cancellation_queue);
  23. /*
  24. * Protects updates to operation->errno.
  25. */
  26. static DEFINE_SPINLOCK(gb_operations_lock);
  27. static int gb_operation_response_send(struct gb_operation *operation,
  28. int errno);
  29. /*
  30. * Increment operation active count and add to connection list unless the
  31. * connection is going away.
  32. *
  33. * Caller holds operation reference.
  34. */
  35. static int gb_operation_get_active(struct gb_operation *operation)
  36. {
  37. struct gb_connection *connection = operation->connection;
  38. unsigned long flags;
  39. spin_lock_irqsave(&connection->lock, flags);
  40. switch (connection->state) {
  41. case GB_CONNECTION_STATE_ENABLED:
  42. break;
  43. case GB_CONNECTION_STATE_ENABLED_TX:
  44. if (gb_operation_is_incoming(operation))
  45. goto err_unlock;
  46. break;
  47. case GB_CONNECTION_STATE_DISCONNECTING:
  48. if (!gb_operation_is_core(operation))
  49. goto err_unlock;
  50. break;
  51. default:
  52. goto err_unlock;
  53. }
  54. if (operation->active++ == 0)
  55. list_add_tail(&operation->links, &connection->operations);
  56. trace_gb_operation_get_active(operation);
  57. spin_unlock_irqrestore(&connection->lock, flags);
  58. return 0;
  59. err_unlock:
  60. spin_unlock_irqrestore(&connection->lock, flags);
  61. return -ENOTCONN;
  62. }
  63. /* Caller holds operation reference. */
  64. static void gb_operation_put_active(struct gb_operation *operation)
  65. {
  66. struct gb_connection *connection = operation->connection;
  67. unsigned long flags;
  68. spin_lock_irqsave(&connection->lock, flags);
  69. trace_gb_operation_put_active(operation);
  70. if (--operation->active == 0) {
  71. list_del(&operation->links);
  72. if (atomic_read(&operation->waiters))
  73. wake_up(&gb_operation_cancellation_queue);
  74. }
  75. spin_unlock_irqrestore(&connection->lock, flags);
  76. }
  77. static bool gb_operation_is_active(struct gb_operation *operation)
  78. {
  79. struct gb_connection *connection = operation->connection;
  80. unsigned long flags;
  81. bool ret;
  82. spin_lock_irqsave(&connection->lock, flags);
  83. ret = operation->active;
  84. spin_unlock_irqrestore(&connection->lock, flags);
  85. return ret;
  86. }
  87. /*
  88. * Set an operation's result.
  89. *
  90. * Initially an outgoing operation's errno value is -EBADR.
  91. * If no error occurs before sending the request message the only
  92. * valid value operation->errno can be set to is -EINPROGRESS,
  93. * indicating the request has been (or rather is about to be) sent.
  94. * At that point nobody should be looking at the result until the
  95. * response arrives.
  96. *
  97. * The first time the result gets set after the request has been
  98. * sent, that result "sticks." That is, if two concurrent threads
  99. * race to set the result, the first one wins. The return value
  100. * tells the caller whether its result was recorded; if not the
  101. * caller has nothing more to do.
  102. *
  103. * The result value -EILSEQ is reserved to signal an implementation
  104. * error; if it's ever observed, the code performing the request has
  105. * done something fundamentally wrong. It is an error to try to set
  106. * the result to -EBADR, and attempts to do so result in a warning,
  107. * and -EILSEQ is used instead. Similarly, the only valid result
  108. * value to set for an operation in initial state is -EINPROGRESS.
  109. * Attempts to do otherwise will also record a (successful) -EILSEQ
  110. * operation result.
  111. */
  112. static bool gb_operation_result_set(struct gb_operation *operation, int result)
  113. {
  114. unsigned long flags;
  115. int prev;
  116. if (result == -EINPROGRESS) {
  117. /*
  118. * -EINPROGRESS is used to indicate the request is
  119. * in flight. It should be the first result value
  120. * set after the initial -EBADR. Issue a warning
  121. * and record an implementation error if it's
  122. * set at any other time.
  123. */
  124. spin_lock_irqsave(&gb_operations_lock, flags);
  125. prev = operation->errno;
  126. if (prev == -EBADR)
  127. operation->errno = result;
  128. else
  129. operation->errno = -EILSEQ;
  130. spin_unlock_irqrestore(&gb_operations_lock, flags);
  131. WARN_ON(prev != -EBADR);
  132. return true;
  133. }
  134. /*
  135. * The first result value set after a request has been sent
  136. * will be the final result of the operation. Subsequent
  137. * attempts to set the result are ignored.
  138. *
  139. * Note that -EBADR is a reserved "initial state" result
  140. * value. Attempts to set this value result in a warning,
  141. * and the result code is set to -EILSEQ instead.
  142. */
  143. if (WARN_ON(result == -EBADR))
  144. result = -EILSEQ; /* Nobody should be setting -EBADR */
  145. spin_lock_irqsave(&gb_operations_lock, flags);
  146. prev = operation->errno;
  147. if (prev == -EINPROGRESS)
  148. operation->errno = result; /* First and final result */
  149. spin_unlock_irqrestore(&gb_operations_lock, flags);
  150. return prev == -EINPROGRESS;
  151. }
  152. int gb_operation_result(struct gb_operation *operation)
  153. {
  154. int result = operation->errno;
  155. WARN_ON(result == -EBADR);
  156. WARN_ON(result == -EINPROGRESS);
  157. return result;
  158. }
  159. EXPORT_SYMBOL_GPL(gb_operation_result);
  160. /*
  161. * Looks up an outgoing operation on a connection and returns a refcounted
  162. * pointer if found, or NULL otherwise.
  163. */
  164. static struct gb_operation *
  165. gb_operation_find_outgoing(struct gb_connection *connection, u16 operation_id)
  166. {
  167. struct gb_operation *operation;
  168. unsigned long flags;
  169. bool found = false;
  170. spin_lock_irqsave(&connection->lock, flags);
  171. list_for_each_entry(operation, &connection->operations, links)
  172. if (operation->id == operation_id &&
  173. !gb_operation_is_incoming(operation)) {
  174. gb_operation_get(operation);
  175. found = true;
  176. break;
  177. }
  178. spin_unlock_irqrestore(&connection->lock, flags);
  179. return found ? operation : NULL;
  180. }
  181. static int gb_message_send(struct gb_message *message, gfp_t gfp)
  182. {
  183. struct gb_connection *connection = message->operation->connection;
  184. trace_gb_message_send(message);
  185. return connection->hd->driver->message_send(connection->hd,
  186. connection->hd_cport_id,
  187. message,
  188. gfp);
  189. }
  190. /*
  191. * Cancel a message we have passed to the host device layer to be sent.
  192. */
  193. static void gb_message_cancel(struct gb_message *message)
  194. {
  195. struct gb_host_device *hd = message->operation->connection->hd;
  196. hd->driver->message_cancel(message);
  197. }
  198. static void gb_operation_request_handle(struct gb_operation *operation)
  199. {
  200. struct gb_connection *connection = operation->connection;
  201. int status;
  202. int ret;
  203. if (connection->handler) {
  204. status = connection->handler(operation);
  205. } else {
  206. dev_err(&connection->hd->dev,
  207. "%s: unexpected incoming request of type 0x%02x\n",
  208. connection->name, operation->type);
  209. status = -EPROTONOSUPPORT;
  210. }
  211. ret = gb_operation_response_send(operation, status);
  212. if (ret) {
  213. dev_err(&connection->hd->dev,
  214. "%s: failed to send response %d for type 0x%02x: %d\n",
  215. connection->name, status, operation->type, ret);
  216. return;
  217. }
  218. }
  219. /*
  220. * Process operation work.
  221. *
  222. * For incoming requests, call the protocol request handler. The operation
  223. * result should be -EINPROGRESS at this point.
  224. *
  225. * For outgoing requests, the operation result value should have
  226. * been set before queueing this. The operation callback function
  227. * allows the original requester to know the request has completed
  228. * and its result is available.
  229. */
  230. static void gb_operation_work(struct work_struct *work)
  231. {
  232. struct gb_operation *operation;
  233. operation = container_of(work, struct gb_operation, work);
  234. if (gb_operation_is_incoming(operation))
  235. gb_operation_request_handle(operation);
  236. else
  237. operation->callback(operation);
  238. gb_operation_put_active(operation);
  239. gb_operation_put(operation);
  240. }
  241. static void gb_operation_message_init(struct gb_host_device *hd,
  242. struct gb_message *message, u16 operation_id,
  243. size_t payload_size, u8 type)
  244. {
  245. struct gb_operation_msg_hdr *header;
  246. header = message->buffer;
  247. message->header = header;
  248. message->payload = payload_size ? header + 1 : NULL;
  249. message->payload_size = payload_size;
  250. /*
  251. * The type supplied for incoming message buffers will be
  252. * GB_REQUEST_TYPE_INVALID. Such buffers will be overwritten by
  253. * arriving data so there's no need to initialize the message header.
  254. */
  255. if (type != GB_REQUEST_TYPE_INVALID) {
  256. u16 message_size = (u16)(sizeof(*header) + payload_size);
  257. /*
  258. * For a request, the operation id gets filled in
  259. * when the message is sent. For a response, it
  260. * will be copied from the request by the caller.
  261. *
  262. * The result field in a request message must be
  263. * zero. It will be set just prior to sending for
  264. * a response.
  265. */
  266. header->size = cpu_to_le16(message_size);
  267. header->operation_id = 0;
  268. header->type = type;
  269. header->result = 0;
  270. }
  271. }
  272. /*
  273. * Allocate a message to be used for an operation request or response.
  274. * Both types of message contain a common header. The request message
  275. * for an outgoing operation is outbound, as is the response message
  276. * for an incoming operation. The message header for an outbound
  277. * message is partially initialized here.
  278. *
  279. * The headers for inbound messages don't need to be initialized;
  280. * they'll be filled in by arriving data.
  281. *
  282. * Our message buffers have the following layout:
  283. * message header \_ these combined are
  284. * message payload / the message size
  285. */
  286. static struct gb_message *
  287. gb_operation_message_alloc(struct gb_host_device *hd, u8 type,
  288. size_t payload_size, gfp_t gfp_flags)
  289. {
  290. struct gb_message *message;
  291. struct gb_operation_msg_hdr *header;
  292. size_t message_size = payload_size + sizeof(*header);
  293. if (message_size > hd->buffer_size_max) {
  294. dev_warn(&hd->dev, "requested message size too big (%zu > %zu)\n",
  295. message_size, hd->buffer_size_max);
  296. return NULL;
  297. }
  298. /* Allocate the message structure and buffer. */
  299. message = kmem_cache_zalloc(gb_message_cache, gfp_flags);
  300. if (!message)
  301. return NULL;
  302. message->buffer = kzalloc(message_size, gfp_flags);
  303. if (!message->buffer)
  304. goto err_free_message;
  305. /* Initialize the message. Operation id is filled in later. */
  306. gb_operation_message_init(hd, message, 0, payload_size, type);
  307. return message;
  308. err_free_message:
  309. kmem_cache_free(gb_message_cache, message);
  310. return NULL;
  311. }
  312. static void gb_operation_message_free(struct gb_message *message)
  313. {
  314. kfree(message->buffer);
  315. kmem_cache_free(gb_message_cache, message);
  316. }
  317. /*
  318. * Map an enum gb_operation_status value (which is represented in a
  319. * message as a single byte) to an appropriate Linux negative errno.
  320. */
  321. static int gb_operation_status_map(u8 status)
  322. {
  323. switch (status) {
  324. case GB_OP_SUCCESS:
  325. return 0;
  326. case GB_OP_INTERRUPTED:
  327. return -EINTR;
  328. case GB_OP_TIMEOUT:
  329. return -ETIMEDOUT;
  330. case GB_OP_NO_MEMORY:
  331. return -ENOMEM;
  332. case GB_OP_PROTOCOL_BAD:
  333. return -EPROTONOSUPPORT;
  334. case GB_OP_OVERFLOW:
  335. return -EMSGSIZE;
  336. case GB_OP_INVALID:
  337. return -EINVAL;
  338. case GB_OP_RETRY:
  339. return -EAGAIN;
  340. case GB_OP_NONEXISTENT:
  341. return -ENODEV;
  342. case GB_OP_MALFUNCTION:
  343. return -EILSEQ;
  344. case GB_OP_UNKNOWN_ERROR:
  345. default:
  346. return -EIO;
  347. }
  348. }
  349. /*
  350. * Map a Linux errno value (from operation->errno) into the value
  351. * that should represent it in a response message status sent
  352. * over the wire. Returns an enum gb_operation_status value (which
  353. * is represented in a message as a single byte).
  354. */
  355. static u8 gb_operation_errno_map(int errno)
  356. {
  357. switch (errno) {
  358. case 0:
  359. return GB_OP_SUCCESS;
  360. case -EINTR:
  361. return GB_OP_INTERRUPTED;
  362. case -ETIMEDOUT:
  363. return GB_OP_TIMEOUT;
  364. case -ENOMEM:
  365. return GB_OP_NO_MEMORY;
  366. case -EPROTONOSUPPORT:
  367. return GB_OP_PROTOCOL_BAD;
  368. case -EMSGSIZE:
  369. return GB_OP_OVERFLOW; /* Could be underflow too */
  370. case -EINVAL:
  371. return GB_OP_INVALID;
  372. case -EAGAIN:
  373. return GB_OP_RETRY;
  374. case -EILSEQ:
  375. return GB_OP_MALFUNCTION;
  376. case -ENODEV:
  377. return GB_OP_NONEXISTENT;
  378. case -EIO:
  379. default:
  380. return GB_OP_UNKNOWN_ERROR;
  381. }
  382. }
  383. bool gb_operation_response_alloc(struct gb_operation *operation,
  384. size_t response_size, gfp_t gfp)
  385. {
  386. struct gb_host_device *hd = operation->connection->hd;
  387. struct gb_operation_msg_hdr *request_header;
  388. struct gb_message *response;
  389. u8 type;
  390. type = operation->type | GB_MESSAGE_TYPE_RESPONSE;
  391. response = gb_operation_message_alloc(hd, type, response_size, gfp);
  392. if (!response)
  393. return false;
  394. response->operation = operation;
  395. /*
  396. * Size and type get initialized when the message is
  397. * allocated. The errno will be set before sending. All
  398. * that's left is the operation id, which we copy from the
  399. * request message header (as-is, in little-endian order).
  400. */
  401. request_header = operation->request->header;
  402. response->header->operation_id = request_header->operation_id;
  403. operation->response = response;
  404. return true;
  405. }
  406. EXPORT_SYMBOL_GPL(gb_operation_response_alloc);
  407. /*
  408. * Create a Greybus operation to be sent over the given connection.
  409. * The request buffer will be big enough for a payload of the given
  410. * size.
  411. *
  412. * For outgoing requests, the request message's header will be
  413. * initialized with the type of the request and the message size.
  414. * Outgoing operations must also specify the response buffer size,
  415. * which must be sufficient to hold all expected response data. The
  416. * response message header will eventually be overwritten, so there's
  417. * no need to initialize it here.
  418. *
  419. * Request messages for incoming operations can arrive in interrupt
  420. * context, so they must be allocated with GFP_ATOMIC. In this case
  421. * the request buffer will be immediately overwritten, so there is
  422. * no need to initialize the message header. Responsibility for
  423. * allocating a response buffer lies with the incoming request
  424. * handler for a protocol. So we don't allocate that here.
  425. *
  426. * Returns a pointer to the new operation or a null pointer if an
  427. * error occurs.
  428. */
  429. static struct gb_operation *
  430. gb_operation_create_common(struct gb_connection *connection, u8 type,
  431. size_t request_size, size_t response_size,
  432. unsigned long op_flags, gfp_t gfp_flags)
  433. {
  434. struct gb_host_device *hd = connection->hd;
  435. struct gb_operation *operation;
  436. operation = kmem_cache_zalloc(gb_operation_cache, gfp_flags);
  437. if (!operation)
  438. return NULL;
  439. operation->connection = connection;
  440. operation->request = gb_operation_message_alloc(hd, type, request_size,
  441. gfp_flags);
  442. if (!operation->request)
  443. goto err_cache;
  444. operation->request->operation = operation;
  445. /* Allocate the response buffer for outgoing operations */
  446. if (!(op_flags & GB_OPERATION_FLAG_INCOMING)) {
  447. if (!gb_operation_response_alloc(operation, response_size,
  448. gfp_flags)) {
  449. goto err_request;
  450. }
  451. }
  452. operation->flags = op_flags;
  453. operation->type = type;
  454. operation->errno = -EBADR; /* Initial value--means "never set" */
  455. INIT_WORK(&operation->work, gb_operation_work);
  456. init_completion(&operation->completion);
  457. kref_init(&operation->kref);
  458. atomic_set(&operation->waiters, 0);
  459. return operation;
  460. err_request:
  461. gb_operation_message_free(operation->request);
  462. err_cache:
  463. kmem_cache_free(gb_operation_cache, operation);
  464. return NULL;
  465. }
  466. /*
  467. * Create a new operation associated with the given connection. The
  468. * request and response sizes provided are the number of bytes
  469. * required to hold the request/response payload only. Both of
  470. * these are allowed to be 0. Note that 0x00 is reserved as an
  471. * invalid operation type for all protocols, and this is enforced
  472. * here.
  473. */
  474. struct gb_operation *
  475. gb_operation_create_flags(struct gb_connection *connection,
  476. u8 type, size_t request_size,
  477. size_t response_size, unsigned long flags,
  478. gfp_t gfp)
  479. {
  480. struct gb_operation *operation;
  481. if (WARN_ON_ONCE(type == GB_REQUEST_TYPE_INVALID))
  482. return NULL;
  483. if (WARN_ON_ONCE(type & GB_MESSAGE_TYPE_RESPONSE))
  484. type &= ~GB_MESSAGE_TYPE_RESPONSE;
  485. if (WARN_ON_ONCE(flags & ~GB_OPERATION_FLAG_USER_MASK))
  486. flags &= GB_OPERATION_FLAG_USER_MASK;
  487. operation = gb_operation_create_common(connection, type,
  488. request_size, response_size,
  489. flags, gfp);
  490. if (operation)
  491. trace_gb_operation_create(operation);
  492. return operation;
  493. }
  494. EXPORT_SYMBOL_GPL(gb_operation_create_flags);
  495. struct gb_operation *
  496. gb_operation_create_core(struct gb_connection *connection,
  497. u8 type, size_t request_size,
  498. size_t response_size, unsigned long flags,
  499. gfp_t gfp)
  500. {
  501. struct gb_operation *operation;
  502. flags |= GB_OPERATION_FLAG_CORE;
  503. operation = gb_operation_create_common(connection, type,
  504. request_size, response_size,
  505. flags, gfp);
  506. if (operation)
  507. trace_gb_operation_create_core(operation);
  508. return operation;
  509. }
  510. /* Do not export this function. */
  511. size_t gb_operation_get_payload_size_max(struct gb_connection *connection)
  512. {
  513. struct gb_host_device *hd = connection->hd;
  514. return hd->buffer_size_max - sizeof(struct gb_operation_msg_hdr);
  515. }
  516. EXPORT_SYMBOL_GPL(gb_operation_get_payload_size_max);
  517. static struct gb_operation *
  518. gb_operation_create_incoming(struct gb_connection *connection, u16 id,
  519. u8 type, void *data, size_t size)
  520. {
  521. struct gb_operation *operation;
  522. size_t request_size;
  523. unsigned long flags = GB_OPERATION_FLAG_INCOMING;
  524. /* Caller has made sure we at least have a message header. */
  525. request_size = size - sizeof(struct gb_operation_msg_hdr);
  526. if (!id)
  527. flags |= GB_OPERATION_FLAG_UNIDIRECTIONAL;
  528. operation = gb_operation_create_common(connection, type,
  529. request_size,
  530. GB_REQUEST_TYPE_INVALID,
  531. flags, GFP_ATOMIC);
  532. if (!operation)
  533. return NULL;
  534. operation->id = id;
  535. memcpy(operation->request->header, data, size);
  536. trace_gb_operation_create_incoming(operation);
  537. return operation;
  538. }
  539. /*
  540. * Get an additional reference on an operation.
  541. */
  542. void gb_operation_get(struct gb_operation *operation)
  543. {
  544. kref_get(&operation->kref);
  545. }
  546. EXPORT_SYMBOL_GPL(gb_operation_get);
  547. /*
  548. * Destroy a previously created operation.
  549. */
  550. static void _gb_operation_destroy(struct kref *kref)
  551. {
  552. struct gb_operation *operation;
  553. operation = container_of(kref, struct gb_operation, kref);
  554. trace_gb_operation_destroy(operation);
  555. if (operation->response)
  556. gb_operation_message_free(operation->response);
  557. gb_operation_message_free(operation->request);
  558. kmem_cache_free(gb_operation_cache, operation);
  559. }
  560. /*
  561. * Drop a reference on an operation, and destroy it when the last
  562. * one is gone.
  563. */
  564. void gb_operation_put(struct gb_operation *operation)
  565. {
  566. if (WARN_ON(!operation))
  567. return;
  568. kref_put(&operation->kref, _gb_operation_destroy);
  569. }
  570. EXPORT_SYMBOL_GPL(gb_operation_put);
  571. /* Tell the requester we're done */
  572. static void gb_operation_sync_callback(struct gb_operation *operation)
  573. {
  574. complete(&operation->completion);
  575. }
  576. /**
  577. * gb_operation_request_send() - send an operation request message
  578. * @operation: the operation to initiate
  579. * @callback: the operation completion callback
  580. * @gfp: the memory flags to use for any allocations
  581. *
  582. * The caller has filled in any payload so the request message is ready to go.
  583. * The callback function supplied will be called when the response message has
  584. * arrived, a unidirectional request has been sent, or the operation is
  585. * cancelled, indicating that the operation is complete. The callback function
  586. * can fetch the result of the operation using gb_operation_result() if
  587. * desired.
  588. *
  589. * Return: 0 if the request was successfully queued in the host-driver queues,
  590. * or a negative errno.
  591. */
  592. int gb_operation_request_send(struct gb_operation *operation,
  593. gb_operation_callback callback,
  594. gfp_t gfp)
  595. {
  596. struct gb_connection *connection = operation->connection;
  597. struct gb_operation_msg_hdr *header;
  598. unsigned int cycle;
  599. int ret;
  600. if (gb_connection_is_offloaded(connection))
  601. return -EBUSY;
  602. if (!callback)
  603. return -EINVAL;
  604. /*
  605. * Record the callback function, which is executed in
  606. * non-atomic (workqueue) context when the final result
  607. * of an operation has been set.
  608. */
  609. operation->callback = callback;
  610. /*
  611. * Assign the operation's id, and store it in the request header.
  612. * Zero is a reserved operation id for unidirectional operations.
  613. */
  614. if (gb_operation_is_unidirectional(operation)) {
  615. operation->id = 0;
  616. } else {
  617. cycle = (unsigned int)atomic_inc_return(&connection->op_cycle);
  618. operation->id = (u16)(cycle % U16_MAX + 1);
  619. }
  620. header = operation->request->header;
  621. header->operation_id = cpu_to_le16(operation->id);
  622. gb_operation_result_set(operation, -EINPROGRESS);
  623. /*
  624. * Get an extra reference on the operation. It'll be dropped when the
  625. * operation completes.
  626. */
  627. gb_operation_get(operation);
  628. ret = gb_operation_get_active(operation);
  629. if (ret)
  630. goto err_put;
  631. ret = gb_message_send(operation->request, gfp);
  632. if (ret)
  633. goto err_put_active;
  634. return 0;
  635. err_put_active:
  636. gb_operation_put_active(operation);
  637. err_put:
  638. gb_operation_put(operation);
  639. return ret;
  640. }
  641. EXPORT_SYMBOL_GPL(gb_operation_request_send);
  642. /*
  643. * Send a synchronous operation. This function is expected to
  644. * block, returning only when the response has arrived, (or when an
  645. * error is detected. The return value is the result of the
  646. * operation.
  647. */
  648. int gb_operation_request_send_sync_timeout(struct gb_operation *operation,
  649. unsigned int timeout)
  650. {
  651. int ret;
  652. unsigned long timeout_jiffies;
  653. ret = gb_operation_request_send(operation, gb_operation_sync_callback,
  654. GFP_KERNEL);
  655. if (ret)
  656. return ret;
  657. if (timeout)
  658. timeout_jiffies = msecs_to_jiffies(timeout);
  659. else
  660. timeout_jiffies = MAX_SCHEDULE_TIMEOUT;
  661. ret = wait_for_completion_interruptible_timeout(&operation->completion,
  662. timeout_jiffies);
  663. if (ret < 0) {
  664. /* Cancel the operation if interrupted */
  665. gb_operation_cancel(operation, -ECANCELED);
  666. } else if (ret == 0) {
  667. /* Cancel the operation if op timed out */
  668. gb_operation_cancel(operation, -ETIMEDOUT);
  669. }
  670. return gb_operation_result(operation);
  671. }
  672. EXPORT_SYMBOL_GPL(gb_operation_request_send_sync_timeout);
  673. /*
  674. * Send a response for an incoming operation request. A non-zero
  675. * errno indicates a failed operation.
  676. *
  677. * If there is any response payload, the incoming request handler is
  678. * responsible for allocating the response message. Otherwise the
  679. * it can simply supply the result errno; this function will
  680. * allocate the response message if necessary.
  681. */
  682. static int gb_operation_response_send(struct gb_operation *operation,
  683. int errno)
  684. {
  685. struct gb_connection *connection = operation->connection;
  686. int ret;
  687. if (!operation->response &&
  688. !gb_operation_is_unidirectional(operation)) {
  689. if (!gb_operation_response_alloc(operation, 0, GFP_KERNEL))
  690. return -ENOMEM;
  691. }
  692. /* Record the result */
  693. if (!gb_operation_result_set(operation, errno)) {
  694. dev_err(&connection->hd->dev, "request result already set\n");
  695. return -EIO; /* Shouldn't happen */
  696. }
  697. /* Sender of request does not care about response. */
  698. if (gb_operation_is_unidirectional(operation))
  699. return 0;
  700. /* Reference will be dropped when message has been sent. */
  701. gb_operation_get(operation);
  702. ret = gb_operation_get_active(operation);
  703. if (ret)
  704. goto err_put;
  705. /* Fill in the response header and send it */
  706. operation->response->header->result = gb_operation_errno_map(errno);
  707. ret = gb_message_send(operation->response, GFP_KERNEL);
  708. if (ret)
  709. goto err_put_active;
  710. return 0;
  711. err_put_active:
  712. gb_operation_put_active(operation);
  713. err_put:
  714. gb_operation_put(operation);
  715. return ret;
  716. }
  717. /*
  718. * This function is called when a message send request has completed.
  719. */
  720. void greybus_message_sent(struct gb_host_device *hd,
  721. struct gb_message *message, int status)
  722. {
  723. struct gb_operation *operation = message->operation;
  724. struct gb_connection *connection = operation->connection;
  725. /*
  726. * If the message was a response, we just need to drop our
  727. * reference to the operation. If an error occurred, report
  728. * it.
  729. *
  730. * For requests, if there's no error and the operation in not
  731. * unidirectional, there's nothing more to do until the response
  732. * arrives. If an error occurred attempting to send it, or if the
  733. * operation is unidrectional, record the result of the operation and
  734. * schedule its completion.
  735. */
  736. if (message == operation->response) {
  737. if (status) {
  738. dev_err(&connection->hd->dev,
  739. "%s: error sending response 0x%02x: %d\n",
  740. connection->name, operation->type, status);
  741. }
  742. gb_operation_put_active(operation);
  743. gb_operation_put(operation);
  744. } else if (status || gb_operation_is_unidirectional(operation)) {
  745. if (gb_operation_result_set(operation, status)) {
  746. queue_work(gb_operation_completion_wq,
  747. &operation->work);
  748. }
  749. }
  750. }
  751. EXPORT_SYMBOL_GPL(greybus_message_sent);
  752. /*
  753. * We've received data on a connection, and it doesn't look like a
  754. * response, so we assume it's a request.
  755. *
  756. * This is called in interrupt context, so just copy the incoming
  757. * data into the request buffer and handle the rest via workqueue.
  758. */
  759. static void gb_connection_recv_request(struct gb_connection *connection,
  760. const struct gb_operation_msg_hdr *header,
  761. void *data, size_t size)
  762. {
  763. struct gb_operation *operation;
  764. u16 operation_id;
  765. u8 type;
  766. int ret;
  767. operation_id = le16_to_cpu(header->operation_id);
  768. type = header->type;
  769. operation = gb_operation_create_incoming(connection, operation_id,
  770. type, data, size);
  771. if (!operation) {
  772. dev_err(&connection->hd->dev,
  773. "%s: can't create incoming operation\n",
  774. connection->name);
  775. return;
  776. }
  777. ret = gb_operation_get_active(operation);
  778. if (ret) {
  779. gb_operation_put(operation);
  780. return;
  781. }
  782. trace_gb_message_recv_request(operation->request);
  783. /*
  784. * The initial reference to the operation will be dropped when the
  785. * request handler returns.
  786. */
  787. if (gb_operation_result_set(operation, -EINPROGRESS))
  788. queue_work(connection->wq, &operation->work);
  789. }
  790. /*
  791. * We've received data that appears to be an operation response
  792. * message. Look up the operation, and record that we've received
  793. * its response.
  794. *
  795. * This is called in interrupt context, so just copy the incoming
  796. * data into the response buffer and handle the rest via workqueue.
  797. */
  798. static void gb_connection_recv_response(struct gb_connection *connection,
  799. const struct gb_operation_msg_hdr *header,
  800. void *data, size_t size)
  801. {
  802. struct gb_operation *operation;
  803. struct gb_message *message;
  804. size_t message_size;
  805. u16 operation_id;
  806. int errno;
  807. operation_id = le16_to_cpu(header->operation_id);
  808. if (!operation_id) {
  809. dev_err_ratelimited(&connection->hd->dev,
  810. "%s: invalid response id 0 received\n",
  811. connection->name);
  812. return;
  813. }
  814. operation = gb_operation_find_outgoing(connection, operation_id);
  815. if (!operation) {
  816. dev_err_ratelimited(&connection->hd->dev,
  817. "%s: unexpected response id 0x%04x received\n",
  818. connection->name, operation_id);
  819. return;
  820. }
  821. errno = gb_operation_status_map(header->result);
  822. message = operation->response;
  823. message_size = sizeof(*header) + message->payload_size;
  824. if (!errno && size > message_size) {
  825. dev_err_ratelimited(&connection->hd->dev,
  826. "%s: malformed response 0x%02x received (%zu > %zu)\n",
  827. connection->name, header->type,
  828. size, message_size);
  829. errno = -EMSGSIZE;
  830. } else if (!errno && size < message_size) {
  831. if (gb_operation_short_response_allowed(operation)) {
  832. message->payload_size = size - sizeof(*header);
  833. } else {
  834. dev_err_ratelimited(&connection->hd->dev,
  835. "%s: short response 0x%02x received (%zu < %zu)\n",
  836. connection->name, header->type,
  837. size, message_size);
  838. errno = -EMSGSIZE;
  839. }
  840. }
  841. /* We must ignore the payload if a bad status is returned */
  842. if (errno)
  843. size = sizeof(*header);
  844. /* The rest will be handled in work queue context */
  845. if (gb_operation_result_set(operation, errno)) {
  846. memcpy(message->buffer, data, size);
  847. trace_gb_message_recv_response(message);
  848. queue_work(gb_operation_completion_wq, &operation->work);
  849. }
  850. gb_operation_put(operation);
  851. }
  852. /*
  853. * Handle data arriving on a connection. As soon as we return the
  854. * supplied data buffer will be reused (so unless we do something
  855. * with, it's effectively dropped).
  856. */
  857. void gb_connection_recv(struct gb_connection *connection,
  858. void *data, size_t size)
  859. {
  860. struct gb_operation_msg_hdr header;
  861. struct device *dev = &connection->hd->dev;
  862. size_t msg_size;
  863. if (connection->state == GB_CONNECTION_STATE_DISABLED ||
  864. gb_connection_is_offloaded(connection)) {
  865. dev_warn_ratelimited(dev, "%s: dropping %zu received bytes\n",
  866. connection->name, size);
  867. return;
  868. }
  869. if (size < sizeof(header)) {
  870. dev_err_ratelimited(dev, "%s: short message received\n",
  871. connection->name);
  872. return;
  873. }
  874. /* Use memcpy as data may be unaligned */
  875. memcpy(&header, data, sizeof(header));
  876. msg_size = le16_to_cpu(header.size);
  877. if (size < msg_size) {
  878. dev_err_ratelimited(dev,
  879. "%s: incomplete message 0x%04x of type 0x%02x received (%zu < %zu)\n",
  880. connection->name,
  881. le16_to_cpu(header.operation_id),
  882. header.type, size, msg_size);
  883. return; /* XXX Should still complete operation */
  884. }
  885. if (header.type & GB_MESSAGE_TYPE_RESPONSE) {
  886. gb_connection_recv_response(connection, &header, data,
  887. msg_size);
  888. } else {
  889. gb_connection_recv_request(connection, &header, data,
  890. msg_size);
  891. }
  892. }
  893. /*
  894. * Cancel an outgoing operation synchronously, and record the given error to
  895. * indicate why.
  896. */
  897. void gb_operation_cancel(struct gb_operation *operation, int errno)
  898. {
  899. if (WARN_ON(gb_operation_is_incoming(operation)))
  900. return;
  901. if (gb_operation_result_set(operation, errno)) {
  902. gb_message_cancel(operation->request);
  903. queue_work(gb_operation_completion_wq, &operation->work);
  904. }
  905. trace_gb_message_cancel_outgoing(operation->request);
  906. atomic_inc(&operation->waiters);
  907. wait_event(gb_operation_cancellation_queue,
  908. !gb_operation_is_active(operation));
  909. atomic_dec(&operation->waiters);
  910. }
  911. EXPORT_SYMBOL_GPL(gb_operation_cancel);
  912. /*
  913. * Cancel an incoming operation synchronously. Called during connection tear
  914. * down.
  915. */
  916. void gb_operation_cancel_incoming(struct gb_operation *operation, int errno)
  917. {
  918. if (WARN_ON(!gb_operation_is_incoming(operation)))
  919. return;
  920. if (!gb_operation_is_unidirectional(operation)) {
  921. /*
  922. * Make sure the request handler has submitted the response
  923. * before cancelling it.
  924. */
  925. flush_work(&operation->work);
  926. if (!gb_operation_result_set(operation, errno))
  927. gb_message_cancel(operation->response);
  928. }
  929. trace_gb_message_cancel_incoming(operation->response);
  930. atomic_inc(&operation->waiters);
  931. wait_event(gb_operation_cancellation_queue,
  932. !gb_operation_is_active(operation));
  933. atomic_dec(&operation->waiters);
  934. }
  935. /**
  936. * gb_operation_sync_timeout() - implement a "simple" synchronous operation
  937. * @connection: the Greybus connection to send this to
  938. * @type: the type of operation to send
  939. * @request: pointer to a memory buffer to copy the request from
  940. * @request_size: size of @request
  941. * @response: pointer to a memory buffer to copy the response to
  942. * @response_size: the size of @response.
  943. * @timeout: operation timeout in milliseconds
  944. *
  945. * This function implements a simple synchronous Greybus operation. It sends
  946. * the provided operation request and waits (sleeps) until the corresponding
  947. * operation response message has been successfully received, or an error
  948. * occurs. @request and @response are buffers to hold the request and response
  949. * data respectively, and if they are not NULL, their size must be specified in
  950. * @request_size and @response_size.
  951. *
  952. * If a response payload is to come back, and @response is not NULL,
  953. * @response_size number of bytes will be copied into @response if the operation
  954. * is successful.
  955. *
  956. * If there is an error, the response buffer is left alone.
  957. */
  958. int gb_operation_sync_timeout(struct gb_connection *connection, int type,
  959. void *request, int request_size,
  960. void *response, int response_size,
  961. unsigned int timeout)
  962. {
  963. struct gb_operation *operation;
  964. int ret;
  965. if ((response_size && !response) ||
  966. (request_size && !request))
  967. return -EINVAL;
  968. operation = gb_operation_create(connection, type,
  969. request_size, response_size,
  970. GFP_KERNEL);
  971. if (!operation)
  972. return -ENOMEM;
  973. if (request_size)
  974. memcpy(operation->request->payload, request, request_size);
  975. ret = gb_operation_request_send_sync_timeout(operation, timeout);
  976. if (ret) {
  977. dev_err(&connection->hd->dev,
  978. "%s: synchronous operation id 0x%04x of type 0x%02x failed: %d\n",
  979. connection->name, operation->id, type, ret);
  980. } else {
  981. if (response_size) {
  982. memcpy(response, operation->response->payload,
  983. response_size);
  984. }
  985. }
  986. gb_operation_put(operation);
  987. return ret;
  988. }
  989. EXPORT_SYMBOL_GPL(gb_operation_sync_timeout);
  990. /**
  991. * gb_operation_unidirectional_timeout() - initiate a unidirectional operation
  992. * @connection: connection to use
  993. * @type: type of operation to send
  994. * @request: memory buffer to copy the request from
  995. * @request_size: size of @request
  996. * @timeout: send timeout in milliseconds
  997. *
  998. * Initiate a unidirectional operation by sending a request message and
  999. * waiting for it to be acknowledged as sent by the host device.
  1000. *
  1001. * Note that successful send of a unidirectional operation does not imply that
  1002. * the request as actually reached the remote end of the connection.
  1003. */
  1004. int gb_operation_unidirectional_timeout(struct gb_connection *connection,
  1005. int type, void *request, int request_size,
  1006. unsigned int timeout)
  1007. {
  1008. struct gb_operation *operation;
  1009. int ret;
  1010. if (request_size && !request)
  1011. return -EINVAL;
  1012. operation = gb_operation_create_flags(connection, type,
  1013. request_size, 0,
  1014. GB_OPERATION_FLAG_UNIDIRECTIONAL,
  1015. GFP_KERNEL);
  1016. if (!operation)
  1017. return -ENOMEM;
  1018. if (request_size)
  1019. memcpy(operation->request->payload, request, request_size);
  1020. ret = gb_operation_request_send_sync_timeout(operation, timeout);
  1021. if (ret) {
  1022. dev_err(&connection->hd->dev,
  1023. "%s: unidirectional operation of type 0x%02x failed: %d\n",
  1024. connection->name, type, ret);
  1025. }
  1026. gb_operation_put(operation);
  1027. return ret;
  1028. }
  1029. EXPORT_SYMBOL_GPL(gb_operation_unidirectional_timeout);
  1030. int __init gb_operation_init(void)
  1031. {
  1032. gb_message_cache = kmem_cache_create("gb_message_cache",
  1033. sizeof(struct gb_message), 0, 0, NULL);
  1034. if (!gb_message_cache)
  1035. return -ENOMEM;
  1036. gb_operation_cache = kmem_cache_create("gb_operation_cache",
  1037. sizeof(struct gb_operation), 0, 0, NULL);
  1038. if (!gb_operation_cache)
  1039. goto err_destroy_message_cache;
  1040. gb_operation_completion_wq = alloc_workqueue("greybus_completion",
  1041. 0, 0);
  1042. if (!gb_operation_completion_wq)
  1043. goto err_destroy_operation_cache;
  1044. return 0;
  1045. err_destroy_operation_cache:
  1046. kmem_cache_destroy(gb_operation_cache);
  1047. gb_operation_cache = NULL;
  1048. err_destroy_message_cache:
  1049. kmem_cache_destroy(gb_message_cache);
  1050. gb_message_cache = NULL;
  1051. return -ENOMEM;
  1052. }
  1053. void gb_operation_exit(void)
  1054. {
  1055. destroy_workqueue(gb_operation_completion_wq);
  1056. gb_operation_completion_wq = NULL;
  1057. kmem_cache_destroy(gb_operation_cache);
  1058. gb_operation_cache = NULL;
  1059. kmem_cache_destroy(gb_message_cache);
  1060. gb_message_cache = NULL;
  1061. }