client.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055
  1. /*
  2. * ISHTP client logic
  3. *
  4. * Copyright (c) 2003-2016, Intel Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms and conditions of the GNU General Public License,
  8. * version 2, as published by the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. */
  16. #include <linux/slab.h>
  17. #include <linux/sched.h>
  18. #include <linux/wait.h>
  19. #include <linux/delay.h>
  20. #include <linux/dma-mapping.h>
  21. #include "hbm.h"
  22. #include "client.h"
  23. /**
  24. * ishtp_read_list_flush() - Flush read queue
  25. * @cl: ishtp client instance
  26. *
  27. * Used to remove all entries from read queue for a client
  28. */
  29. static void ishtp_read_list_flush(struct ishtp_cl *cl)
  30. {
  31. struct ishtp_cl_rb *rb;
  32. struct ishtp_cl_rb *next;
  33. unsigned long flags;
  34. spin_lock_irqsave(&cl->dev->read_list_spinlock, flags);
  35. list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list)
  36. if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) {
  37. list_del(&rb->list);
  38. ishtp_io_rb_free(rb);
  39. }
  40. spin_unlock_irqrestore(&cl->dev->read_list_spinlock, flags);
  41. }
  42. /**
  43. * ishtp_cl_flush_queues() - Flush all queues for a client
  44. * @cl: ishtp client instance
  45. *
  46. * Used to remove all queues for a client. This is called when a client device
  47. * needs reset due to error, S3 resume or during module removal
  48. *
  49. * Return: 0 on success else -EINVAL if device is NULL
  50. */
  51. int ishtp_cl_flush_queues(struct ishtp_cl *cl)
  52. {
  53. if (WARN_ON(!cl || !cl->dev))
  54. return -EINVAL;
  55. ishtp_read_list_flush(cl);
  56. return 0;
  57. }
  58. EXPORT_SYMBOL(ishtp_cl_flush_queues);
  59. /**
  60. * ishtp_cl_init() - Initialize all fields of a client device
  61. * @cl: ishtp client instance
  62. * @dev: ishtp device
  63. *
  64. * Initializes a client device fields: Init spinlocks, init queues etc.
  65. * This function is called during new client creation
  66. */
  67. static void ishtp_cl_init(struct ishtp_cl *cl, struct ishtp_device *dev)
  68. {
  69. memset(cl, 0, sizeof(struct ishtp_cl));
  70. init_waitqueue_head(&cl->wait_ctrl_res);
  71. spin_lock_init(&cl->free_list_spinlock);
  72. spin_lock_init(&cl->in_process_spinlock);
  73. spin_lock_init(&cl->tx_list_spinlock);
  74. spin_lock_init(&cl->tx_free_list_spinlock);
  75. spin_lock_init(&cl->fc_spinlock);
  76. INIT_LIST_HEAD(&cl->link);
  77. cl->dev = dev;
  78. INIT_LIST_HEAD(&cl->free_rb_list.list);
  79. INIT_LIST_HEAD(&cl->tx_list.list);
  80. INIT_LIST_HEAD(&cl->tx_free_list.list);
  81. INIT_LIST_HEAD(&cl->in_process_list.list);
  82. cl->rx_ring_size = CL_DEF_RX_RING_SIZE;
  83. cl->tx_ring_size = CL_DEF_TX_RING_SIZE;
  84. /* dma */
  85. cl->last_tx_path = CL_TX_PATH_IPC;
  86. cl->last_dma_acked = 1;
  87. cl->last_dma_addr = NULL;
  88. cl->last_ipc_acked = 1;
  89. }
  90. /**
  91. * ishtp_cl_allocate() - allocates client structure and sets it up.
  92. * @dev: ishtp device
  93. *
  94. * Allocate memory for new client device and call to initialize each field.
  95. *
  96. * Return: The allocated client instance or NULL on failure
  97. */
  98. struct ishtp_cl *ishtp_cl_allocate(struct ishtp_device *dev)
  99. {
  100. struct ishtp_cl *cl;
  101. cl = kmalloc(sizeof(struct ishtp_cl), GFP_KERNEL);
  102. if (!cl)
  103. return NULL;
  104. ishtp_cl_init(cl, dev);
  105. return cl;
  106. }
  107. EXPORT_SYMBOL(ishtp_cl_allocate);
  108. /**
  109. * ishtp_cl_free() - Frees a client device
  110. * @cl: client device instance
  111. *
  112. * Frees a client device
  113. */
  114. void ishtp_cl_free(struct ishtp_cl *cl)
  115. {
  116. struct ishtp_device *dev;
  117. unsigned long flags;
  118. if (!cl)
  119. return;
  120. dev = cl->dev;
  121. if (!dev)
  122. return;
  123. spin_lock_irqsave(&dev->cl_list_lock, flags);
  124. ishtp_cl_free_rx_ring(cl);
  125. ishtp_cl_free_tx_ring(cl);
  126. kfree(cl);
  127. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  128. }
  129. EXPORT_SYMBOL(ishtp_cl_free);
  130. /**
  131. * ishtp_cl_link() - Reserve a host id and link the client instance
  132. * @cl: client device instance
  133. * @id: host client id to use. It can be ISHTP_HOST_CLIENT_ID_ANY if any
  134. * id from the available can be used
  135. *
  136. *
  137. * This allocates a single bit in the hostmap. This function will make sure
  138. * that not many client sessions are opened at the same time. Once allocated
  139. * the client device instance is added to the ishtp device in the current
  140. * client list
  141. *
  142. * Return: 0 or error code on failure
  143. */
  144. int ishtp_cl_link(struct ishtp_cl *cl, int id)
  145. {
  146. struct ishtp_device *dev;
  147. unsigned long flags, flags_cl;
  148. int ret = 0;
  149. if (WARN_ON(!cl || !cl->dev))
  150. return -EINVAL;
  151. dev = cl->dev;
  152. spin_lock_irqsave(&dev->device_lock, flags);
  153. if (dev->open_handle_count >= ISHTP_MAX_OPEN_HANDLE_COUNT) {
  154. ret = -EMFILE;
  155. goto unlock_dev;
  156. }
  157. /* If Id is not assigned get one*/
  158. if (id == ISHTP_HOST_CLIENT_ID_ANY)
  159. id = find_first_zero_bit(dev->host_clients_map,
  160. ISHTP_CLIENTS_MAX);
  161. if (id >= ISHTP_CLIENTS_MAX) {
  162. spin_unlock_irqrestore(&dev->device_lock, flags);
  163. dev_err(&cl->device->dev, "id exceeded %d", ISHTP_CLIENTS_MAX);
  164. return -ENOENT;
  165. }
  166. dev->open_handle_count++;
  167. cl->host_client_id = id;
  168. spin_lock_irqsave(&dev->cl_list_lock, flags_cl);
  169. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  170. ret = -ENODEV;
  171. goto unlock_cl;
  172. }
  173. list_add_tail(&cl->link, &dev->cl_list);
  174. set_bit(id, dev->host_clients_map);
  175. cl->state = ISHTP_CL_INITIALIZING;
  176. unlock_cl:
  177. spin_unlock_irqrestore(&dev->cl_list_lock, flags_cl);
  178. unlock_dev:
  179. spin_unlock_irqrestore(&dev->device_lock, flags);
  180. return ret;
  181. }
  182. EXPORT_SYMBOL(ishtp_cl_link);
  183. /**
  184. * ishtp_cl_unlink() - remove fw_cl from the client device list
  185. * @cl: client device instance
  186. *
  187. * Remove a previously linked device to a ishtp device
  188. */
  189. void ishtp_cl_unlink(struct ishtp_cl *cl)
  190. {
  191. struct ishtp_device *dev;
  192. struct ishtp_cl *pos;
  193. unsigned long flags;
  194. /* don't shout on error exit path */
  195. if (!cl || !cl->dev)
  196. return;
  197. dev = cl->dev;
  198. spin_lock_irqsave(&dev->device_lock, flags);
  199. if (dev->open_handle_count > 0) {
  200. clear_bit(cl->host_client_id, dev->host_clients_map);
  201. dev->open_handle_count--;
  202. }
  203. spin_unlock_irqrestore(&dev->device_lock, flags);
  204. /*
  205. * This checks that 'cl' is actually linked into device's structure,
  206. * before attempting 'list_del'
  207. */
  208. spin_lock_irqsave(&dev->cl_list_lock, flags);
  209. list_for_each_entry(pos, &dev->cl_list, link)
  210. if (cl->host_client_id == pos->host_client_id) {
  211. list_del_init(&pos->link);
  212. break;
  213. }
  214. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  215. }
  216. EXPORT_SYMBOL(ishtp_cl_unlink);
  217. /**
  218. * ishtp_cl_disconnect() - Send disconnect request to firmware
  219. * @cl: client device instance
  220. *
  221. * Send a disconnect request for a client to firmware.
  222. *
  223. * Return: 0 if successful disconnect response from the firmware or error
  224. * code on failure
  225. */
  226. int ishtp_cl_disconnect(struct ishtp_cl *cl)
  227. {
  228. struct ishtp_device *dev;
  229. int err;
  230. if (WARN_ON(!cl || !cl->dev))
  231. return -ENODEV;
  232. dev = cl->dev;
  233. dev->print_log(dev, "%s() state %d\n", __func__, cl->state);
  234. if (cl->state != ISHTP_CL_DISCONNECTING) {
  235. dev->print_log(dev, "%s() Disconnect in progress\n", __func__);
  236. return 0;
  237. }
  238. if (ishtp_hbm_cl_disconnect_req(dev, cl)) {
  239. dev->print_log(dev, "%s() Failed to disconnect\n", __func__);
  240. dev_err(&cl->device->dev, "failed to disconnect.\n");
  241. return -ENODEV;
  242. }
  243. err = wait_event_interruptible_timeout(cl->wait_ctrl_res,
  244. (dev->dev_state != ISHTP_DEV_ENABLED ||
  245. cl->state == ISHTP_CL_DISCONNECTED),
  246. ishtp_secs_to_jiffies(ISHTP_CL_CONNECT_TIMEOUT));
  247. /*
  248. * If FW reset arrived, this will happen. Don't check cl->,
  249. * as 'cl' may be freed already
  250. */
  251. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  252. dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
  253. __func__);
  254. return -ENODEV;
  255. }
  256. if (cl->state == ISHTP_CL_DISCONNECTED) {
  257. dev->print_log(dev, "%s() successful\n", __func__);
  258. return 0;
  259. }
  260. return -ENODEV;
  261. }
  262. EXPORT_SYMBOL(ishtp_cl_disconnect);
  263. /**
  264. * ishtp_cl_is_other_connecting() - Check other client is connecting
  265. * @cl: client device instance
  266. *
  267. * Checks if other client with the same fw client id is connecting
  268. *
  269. * Return: true if other client is connected else false
  270. */
  271. static bool ishtp_cl_is_other_connecting(struct ishtp_cl *cl)
  272. {
  273. struct ishtp_device *dev;
  274. struct ishtp_cl *pos;
  275. unsigned long flags;
  276. if (WARN_ON(!cl || !cl->dev))
  277. return false;
  278. dev = cl->dev;
  279. spin_lock_irqsave(&dev->cl_list_lock, flags);
  280. list_for_each_entry(pos, &dev->cl_list, link) {
  281. if ((pos->state == ISHTP_CL_CONNECTING) && (pos != cl) &&
  282. cl->fw_client_id == pos->fw_client_id) {
  283. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  284. return true;
  285. }
  286. }
  287. spin_unlock_irqrestore(&dev->cl_list_lock, flags);
  288. return false;
  289. }
  290. /**
  291. * ishtp_cl_connect() - Send connect request to firmware
  292. * @cl: client device instance
  293. *
  294. * Send a connect request for a client to firmware. If successful it will
  295. * RX and TX ring buffers
  296. *
  297. * Return: 0 if successful connect response from the firmware and able
  298. * to bind and allocate ring buffers or error code on failure
  299. */
  300. int ishtp_cl_connect(struct ishtp_cl *cl)
  301. {
  302. struct ishtp_device *dev;
  303. int rets;
  304. if (WARN_ON(!cl || !cl->dev))
  305. return -ENODEV;
  306. dev = cl->dev;
  307. dev->print_log(dev, "%s() current_state = %d\n", __func__, cl->state);
  308. if (ishtp_cl_is_other_connecting(cl)) {
  309. dev->print_log(dev, "%s() Busy\n", __func__);
  310. return -EBUSY;
  311. }
  312. if (ishtp_hbm_cl_connect_req(dev, cl)) {
  313. dev->print_log(dev, "%s() HBM connect req fail\n", __func__);
  314. return -ENODEV;
  315. }
  316. rets = wait_event_interruptible_timeout(cl->wait_ctrl_res,
  317. (dev->dev_state == ISHTP_DEV_ENABLED &&
  318. (cl->state == ISHTP_CL_CONNECTED ||
  319. cl->state == ISHTP_CL_DISCONNECTED)),
  320. ishtp_secs_to_jiffies(
  321. ISHTP_CL_CONNECT_TIMEOUT));
  322. /*
  323. * If FW reset arrived, this will happen. Don't check cl->,
  324. * as 'cl' may be freed already
  325. */
  326. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  327. dev->print_log(dev, "%s() dev_state != ISHTP_DEV_ENABLED\n",
  328. __func__);
  329. return -EFAULT;
  330. }
  331. if (cl->state != ISHTP_CL_CONNECTED) {
  332. dev->print_log(dev, "%s() state != ISHTP_CL_CONNECTED\n",
  333. __func__);
  334. return -EFAULT;
  335. }
  336. rets = cl->status;
  337. if (rets) {
  338. dev->print_log(dev, "%s() Invalid status\n", __func__);
  339. return rets;
  340. }
  341. rets = ishtp_cl_device_bind(cl);
  342. if (rets) {
  343. dev->print_log(dev, "%s() Bind error\n", __func__);
  344. ishtp_cl_disconnect(cl);
  345. return rets;
  346. }
  347. rets = ishtp_cl_alloc_rx_ring(cl);
  348. if (rets) {
  349. dev->print_log(dev, "%s() Alloc RX ring failed\n", __func__);
  350. /* if failed allocation, disconnect */
  351. ishtp_cl_disconnect(cl);
  352. return rets;
  353. }
  354. rets = ishtp_cl_alloc_tx_ring(cl);
  355. if (rets) {
  356. dev->print_log(dev, "%s() Alloc TX ring failed\n", __func__);
  357. /* if failed allocation, disconnect */
  358. ishtp_cl_free_rx_ring(cl);
  359. ishtp_cl_disconnect(cl);
  360. return rets;
  361. }
  362. /* Upon successful connection and allocation, emit flow-control */
  363. rets = ishtp_cl_read_start(cl);
  364. dev->print_log(dev, "%s() successful\n", __func__);
  365. return rets;
  366. }
  367. EXPORT_SYMBOL(ishtp_cl_connect);
  368. /**
  369. * ishtp_cl_read_start() - Prepare to read client message
  370. * @cl: client device instance
  371. *
  372. * Get a free buffer from pool of free read buffers and add to read buffer
  373. * pool to add contents. Send a flow control request to firmware to be able
  374. * send next message.
  375. *
  376. * Return: 0 if successful or error code on failure
  377. */
  378. int ishtp_cl_read_start(struct ishtp_cl *cl)
  379. {
  380. struct ishtp_device *dev;
  381. struct ishtp_cl_rb *rb;
  382. int rets;
  383. int i;
  384. unsigned long flags;
  385. unsigned long dev_flags;
  386. if (WARN_ON(!cl || !cl->dev))
  387. return -ENODEV;
  388. dev = cl->dev;
  389. if (cl->state != ISHTP_CL_CONNECTED)
  390. return -ENODEV;
  391. if (dev->dev_state != ISHTP_DEV_ENABLED)
  392. return -ENODEV;
  393. i = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
  394. if (i < 0) {
  395. dev_err(&cl->device->dev, "no such fw client %d\n",
  396. cl->fw_client_id);
  397. return -ENODEV;
  398. }
  399. /* The current rb is the head of the free rb list */
  400. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  401. if (list_empty(&cl->free_rb_list.list)) {
  402. dev_warn(&cl->device->dev,
  403. "[ishtp-ish] Rx buffers pool is empty\n");
  404. rets = -ENOMEM;
  405. rb = NULL;
  406. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  407. goto out;
  408. }
  409. rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list);
  410. list_del_init(&rb->list);
  411. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  412. rb->cl = cl;
  413. rb->buf_idx = 0;
  414. INIT_LIST_HEAD(&rb->list);
  415. rets = 0;
  416. /*
  417. * This must be BEFORE sending flow control -
  418. * response in ISR may come too fast...
  419. */
  420. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  421. list_add_tail(&rb->list, &dev->read_list.list);
  422. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  423. if (ishtp_hbm_cl_flow_control_req(dev, cl)) {
  424. rets = -ENODEV;
  425. goto out;
  426. }
  427. out:
  428. /* if ishtp_hbm_cl_flow_control_req failed, return rb to free list */
  429. if (rets && rb) {
  430. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  431. list_del(&rb->list);
  432. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  433. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  434. list_add_tail(&rb->list, &cl->free_rb_list.list);
  435. spin_unlock_irqrestore(&cl->free_list_spinlock, flags);
  436. }
  437. return rets;
  438. }
  439. /**
  440. * ishtp_cl_send() - Send a message to firmware
  441. * @cl: client device instance
  442. * @buf: message buffer
  443. * @length: length of message
  444. *
  445. * If the client is correct state to send message, this function gets a buffer
  446. * from tx ring buffers, copy the message data and call to send the message
  447. * using ishtp_cl_send_msg()
  448. *
  449. * Return: 0 if successful or error code on failure
  450. */
  451. int ishtp_cl_send(struct ishtp_cl *cl, uint8_t *buf, size_t length)
  452. {
  453. struct ishtp_device *dev;
  454. int id;
  455. struct ishtp_cl_tx_ring *cl_msg;
  456. int have_msg_to_send = 0;
  457. unsigned long tx_flags, tx_free_flags;
  458. if (WARN_ON(!cl || !cl->dev))
  459. return -ENODEV;
  460. dev = cl->dev;
  461. if (cl->state != ISHTP_CL_CONNECTED) {
  462. ++cl->err_send_msg;
  463. return -EPIPE;
  464. }
  465. if (dev->dev_state != ISHTP_DEV_ENABLED) {
  466. ++cl->err_send_msg;
  467. return -ENODEV;
  468. }
  469. /* Check if we have fw client device */
  470. id = ishtp_fw_cl_by_id(dev, cl->fw_client_id);
  471. if (id < 0) {
  472. ++cl->err_send_msg;
  473. return -ENOENT;
  474. }
  475. if (length > dev->fw_clients[id].props.max_msg_length) {
  476. ++cl->err_send_msg;
  477. return -EMSGSIZE;
  478. }
  479. /* No free bufs */
  480. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  481. if (list_empty(&cl->tx_free_list.list)) {
  482. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  483. tx_free_flags);
  484. ++cl->err_send_msg;
  485. return -ENOMEM;
  486. }
  487. cl_msg = list_first_entry(&cl->tx_free_list.list,
  488. struct ishtp_cl_tx_ring, list);
  489. if (!cl_msg->send_buf.data) {
  490. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  491. tx_free_flags);
  492. return -EIO;
  493. /* Should not happen, as free list is pre-allocated */
  494. }
  495. /*
  496. * This is safe, as 'length' is already checked for not exceeding
  497. * max ISHTP message size per client
  498. */
  499. list_del_init(&cl_msg->list);
  500. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  501. memcpy(cl_msg->send_buf.data, buf, length);
  502. cl_msg->send_buf.size = length;
  503. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  504. have_msg_to_send = !list_empty(&cl->tx_list.list);
  505. list_add_tail(&cl_msg->list, &cl->tx_list.list);
  506. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  507. if (!have_msg_to_send && cl->ishtp_flow_ctrl_creds > 0)
  508. ishtp_cl_send_msg(dev, cl);
  509. return 0;
  510. }
  511. EXPORT_SYMBOL(ishtp_cl_send);
  512. /**
  513. * ishtp_cl_read_complete() - read complete
  514. * @rb: Pointer to client request block
  515. *
  516. * If the message is completely received call ishtp_cl_bus_rx_event()
  517. * to process message
  518. */
  519. static void ishtp_cl_read_complete(struct ishtp_cl_rb *rb)
  520. {
  521. unsigned long flags;
  522. int schedule_work_flag = 0;
  523. struct ishtp_cl *cl = rb->cl;
  524. spin_lock_irqsave(&cl->in_process_spinlock, flags);
  525. /*
  526. * if in-process list is empty, then need to schedule
  527. * the processing thread
  528. */
  529. schedule_work_flag = list_empty(&cl->in_process_list.list);
  530. list_add_tail(&rb->list, &cl->in_process_list.list);
  531. spin_unlock_irqrestore(&cl->in_process_spinlock, flags);
  532. if (schedule_work_flag)
  533. ishtp_cl_bus_rx_event(cl->device);
  534. }
  535. /**
  536. * ipc_tx_callback() - IPC tx callback function
  537. * @prm: Pointer to client device instance
  538. *
  539. * Send message over IPC either first time or on callback on previous message
  540. * completion
  541. */
  542. static void ipc_tx_callback(void *prm)
  543. {
  544. struct ishtp_cl *cl = prm;
  545. struct ishtp_cl_tx_ring *cl_msg;
  546. size_t rem;
  547. struct ishtp_device *dev = (cl ? cl->dev : NULL);
  548. struct ishtp_msg_hdr ishtp_hdr;
  549. unsigned long tx_flags, tx_free_flags;
  550. unsigned char *pmsg;
  551. if (!dev)
  552. return;
  553. /*
  554. * Other conditions if some critical error has
  555. * occurred before this callback is called
  556. */
  557. if (dev->dev_state != ISHTP_DEV_ENABLED)
  558. return;
  559. if (cl->state != ISHTP_CL_CONNECTED)
  560. return;
  561. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  562. if (list_empty(&cl->tx_list.list)) {
  563. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  564. return;
  565. }
  566. if (cl->ishtp_flow_ctrl_creds != 1 && !cl->sending) {
  567. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  568. return;
  569. }
  570. if (!cl->sending) {
  571. --cl->ishtp_flow_ctrl_creds;
  572. cl->last_ipc_acked = 0;
  573. cl->last_tx_path = CL_TX_PATH_IPC;
  574. cl->sending = 1;
  575. }
  576. cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
  577. list);
  578. rem = cl_msg->send_buf.size - cl->tx_offs;
  579. ishtp_hdr.host_addr = cl->host_client_id;
  580. ishtp_hdr.fw_addr = cl->fw_client_id;
  581. ishtp_hdr.reserved = 0;
  582. pmsg = cl_msg->send_buf.data + cl->tx_offs;
  583. if (rem <= dev->mtu) {
  584. ishtp_hdr.length = rem;
  585. ishtp_hdr.msg_complete = 1;
  586. cl->sending = 0;
  587. list_del_init(&cl_msg->list); /* Must be before write */
  588. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  589. /* Submit to IPC queue with no callback */
  590. ishtp_write_message(dev, &ishtp_hdr, pmsg);
  591. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  592. list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
  593. spin_unlock_irqrestore(&cl->tx_free_list_spinlock,
  594. tx_free_flags);
  595. } else {
  596. /* Send IPC fragment */
  597. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  598. cl->tx_offs += dev->mtu;
  599. ishtp_hdr.length = dev->mtu;
  600. ishtp_hdr.msg_complete = 0;
  601. ishtp_send_msg(dev, &ishtp_hdr, pmsg, ipc_tx_callback, cl);
  602. }
  603. }
  604. /**
  605. * ishtp_cl_send_msg_ipc() -Send message using IPC
  606. * @dev: ISHTP device instance
  607. * @cl: Pointer to client device instance
  608. *
  609. * Send message over IPC not using DMA
  610. */
  611. static void ishtp_cl_send_msg_ipc(struct ishtp_device *dev,
  612. struct ishtp_cl *cl)
  613. {
  614. /* If last DMA message wasn't acked yet, leave this one in Tx queue */
  615. if (cl->last_tx_path == CL_TX_PATH_DMA && cl->last_dma_acked == 0)
  616. return;
  617. cl->tx_offs = 0;
  618. ipc_tx_callback(cl);
  619. ++cl->send_msg_cnt_ipc;
  620. }
  621. /**
  622. * ishtp_cl_send_msg_dma() -Send message using DMA
  623. * @dev: ISHTP device instance
  624. * @cl: Pointer to client device instance
  625. *
  626. * Send message using DMA
  627. */
  628. static void ishtp_cl_send_msg_dma(struct ishtp_device *dev,
  629. struct ishtp_cl *cl)
  630. {
  631. struct ishtp_msg_hdr hdr;
  632. struct dma_xfer_hbm dma_xfer;
  633. unsigned char *msg_addr;
  634. int off;
  635. struct ishtp_cl_tx_ring *cl_msg;
  636. unsigned long tx_flags, tx_free_flags;
  637. /* If last IPC message wasn't acked yet, leave this one in Tx queue */
  638. if (cl->last_tx_path == CL_TX_PATH_IPC && cl->last_ipc_acked == 0)
  639. return;
  640. spin_lock_irqsave(&cl->tx_list_spinlock, tx_flags);
  641. if (list_empty(&cl->tx_list.list)) {
  642. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  643. return;
  644. }
  645. cl_msg = list_entry(cl->tx_list.list.next, struct ishtp_cl_tx_ring,
  646. list);
  647. msg_addr = ishtp_cl_get_dma_send_buf(dev, cl_msg->send_buf.size);
  648. if (!msg_addr) {
  649. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  650. if (dev->transfer_path == CL_TX_PATH_DEFAULT)
  651. ishtp_cl_send_msg_ipc(dev, cl);
  652. return;
  653. }
  654. list_del_init(&cl_msg->list); /* Must be before write */
  655. spin_unlock_irqrestore(&cl->tx_list_spinlock, tx_flags);
  656. --cl->ishtp_flow_ctrl_creds;
  657. cl->last_dma_acked = 0;
  658. cl->last_dma_addr = msg_addr;
  659. cl->last_tx_path = CL_TX_PATH_DMA;
  660. /* write msg to dma buf */
  661. memcpy(msg_addr, cl_msg->send_buf.data, cl_msg->send_buf.size);
  662. /* send dma_xfer hbm msg */
  663. off = msg_addr - (unsigned char *)dev->ishtp_host_dma_tx_buf;
  664. ishtp_hbm_hdr(&hdr, sizeof(struct dma_xfer_hbm));
  665. dma_xfer.hbm = DMA_XFER;
  666. dma_xfer.fw_client_id = cl->fw_client_id;
  667. dma_xfer.host_client_id = cl->host_client_id;
  668. dma_xfer.reserved = 0;
  669. dma_xfer.msg_addr = dev->ishtp_host_dma_tx_buf_phys + off;
  670. dma_xfer.msg_length = cl_msg->send_buf.size;
  671. dma_xfer.reserved2 = 0;
  672. ishtp_write_message(dev, &hdr, (unsigned char *)&dma_xfer);
  673. spin_lock_irqsave(&cl->tx_free_list_spinlock, tx_free_flags);
  674. list_add_tail(&cl_msg->list, &cl->tx_free_list.list);
  675. spin_unlock_irqrestore(&cl->tx_free_list_spinlock, tx_free_flags);
  676. ++cl->send_msg_cnt_dma;
  677. }
  678. /**
  679. * ishtp_cl_send_msg() -Send message using DMA or IPC
  680. * @dev: ISHTP device instance
  681. * @cl: Pointer to client device instance
  682. *
  683. * Send message using DMA or IPC based on transfer_path
  684. */
  685. void ishtp_cl_send_msg(struct ishtp_device *dev, struct ishtp_cl *cl)
  686. {
  687. if (dev->transfer_path == CL_TX_PATH_DMA)
  688. ishtp_cl_send_msg_dma(dev, cl);
  689. else
  690. ishtp_cl_send_msg_ipc(dev, cl);
  691. }
  692. /**
  693. * recv_ishtp_cl_msg() -Receive client message
  694. * @dev: ISHTP device instance
  695. * @ishtp_hdr: Pointer to message header
  696. *
  697. * Receive and dispatch ISHTP client messages. This function executes in ISR
  698. * context
  699. */
  700. void recv_ishtp_cl_msg(struct ishtp_device *dev,
  701. struct ishtp_msg_hdr *ishtp_hdr)
  702. {
  703. struct ishtp_cl *cl;
  704. struct ishtp_cl_rb *rb;
  705. struct ishtp_cl_rb *new_rb;
  706. unsigned char *buffer = NULL;
  707. struct ishtp_cl_rb *complete_rb = NULL;
  708. unsigned long dev_flags;
  709. unsigned long flags;
  710. int rb_count;
  711. if (ishtp_hdr->reserved) {
  712. dev_err(dev->devc, "corrupted message header.\n");
  713. goto eoi;
  714. }
  715. if (ishtp_hdr->length > IPC_PAYLOAD_SIZE) {
  716. dev_err(dev->devc,
  717. "ISHTP message length in hdr exceeds IPC MTU\n");
  718. goto eoi;
  719. }
  720. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  721. rb_count = -1;
  722. list_for_each_entry(rb, &dev->read_list.list, list) {
  723. ++rb_count;
  724. cl = rb->cl;
  725. if (!cl || !(cl->host_client_id == ishtp_hdr->host_addr &&
  726. cl->fw_client_id == ishtp_hdr->fw_addr) ||
  727. !(cl->state == ISHTP_CL_CONNECTED))
  728. continue;
  729. /* If no Rx buffer is allocated, disband the rb */
  730. if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
  731. spin_unlock_irqrestore(&dev->read_list_spinlock,
  732. dev_flags);
  733. dev_err(&cl->device->dev,
  734. "Rx buffer is not allocated.\n");
  735. list_del(&rb->list);
  736. ishtp_io_rb_free(rb);
  737. cl->status = -ENOMEM;
  738. goto eoi;
  739. }
  740. /*
  741. * If message buffer overflown (exceeds max. client msg
  742. * size, drop message and return to free buffer.
  743. * Do we need to disconnect such a client? (We don't send
  744. * back FC, so communication will be stuck anyway)
  745. */
  746. if (rb->buffer.size < ishtp_hdr->length + rb->buf_idx) {
  747. spin_unlock_irqrestore(&dev->read_list_spinlock,
  748. dev_flags);
  749. dev_err(&cl->device->dev,
  750. "message overflow. size %d len %d idx %ld\n",
  751. rb->buffer.size, ishtp_hdr->length,
  752. rb->buf_idx);
  753. list_del(&rb->list);
  754. ishtp_cl_io_rb_recycle(rb);
  755. cl->status = -EIO;
  756. goto eoi;
  757. }
  758. buffer = rb->buffer.data + rb->buf_idx;
  759. dev->ops->ishtp_read(dev, buffer, ishtp_hdr->length);
  760. rb->buf_idx += ishtp_hdr->length;
  761. if (ishtp_hdr->msg_complete) {
  762. /* Last fragment in message - it's complete */
  763. cl->status = 0;
  764. list_del(&rb->list);
  765. complete_rb = rb;
  766. --cl->out_flow_ctrl_creds;
  767. /*
  768. * the whole msg arrived, send a new FC, and add a new
  769. * rb buffer for the next coming msg
  770. */
  771. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  772. if (!list_empty(&cl->free_rb_list.list)) {
  773. new_rb = list_entry(cl->free_rb_list.list.next,
  774. struct ishtp_cl_rb, list);
  775. list_del_init(&new_rb->list);
  776. spin_unlock_irqrestore(&cl->free_list_spinlock,
  777. flags);
  778. new_rb->cl = cl;
  779. new_rb->buf_idx = 0;
  780. INIT_LIST_HEAD(&new_rb->list);
  781. list_add_tail(&new_rb->list,
  782. &dev->read_list.list);
  783. ishtp_hbm_cl_flow_control_req(dev, cl);
  784. } else {
  785. spin_unlock_irqrestore(&cl->free_list_spinlock,
  786. flags);
  787. }
  788. }
  789. /* One more fragment in message (even if this was last) */
  790. ++cl->recv_msg_num_frags;
  791. /*
  792. * We can safely break here (and in BH too),
  793. * a single input message can go only to a single request!
  794. */
  795. break;
  796. }
  797. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  798. /* If it's nobody's message, just read and discard it */
  799. if (!buffer) {
  800. uint8_t rd_msg_buf[ISHTP_RD_MSG_BUF_SIZE];
  801. dev_err(dev->devc, "Dropped Rx msg - no request\n");
  802. dev->ops->ishtp_read(dev, rd_msg_buf, ishtp_hdr->length);
  803. goto eoi;
  804. }
  805. if (complete_rb) {
  806. getnstimeofday(&cl->ts_rx);
  807. ++cl->recv_msg_cnt_ipc;
  808. ishtp_cl_read_complete(complete_rb);
  809. }
  810. eoi:
  811. return;
  812. }
  813. /**
  814. * recv_ishtp_cl_msg_dma() -Receive client message
  815. * @dev: ISHTP device instance
  816. * @msg: message pointer
  817. * @hbm: hbm buffer
  818. *
  819. * Receive and dispatch ISHTP client messages using DMA. This function executes
  820. * in ISR context
  821. */
  822. void recv_ishtp_cl_msg_dma(struct ishtp_device *dev, void *msg,
  823. struct dma_xfer_hbm *hbm)
  824. {
  825. struct ishtp_cl *cl;
  826. struct ishtp_cl_rb *rb;
  827. struct ishtp_cl_rb *new_rb;
  828. unsigned char *buffer = NULL;
  829. struct ishtp_cl_rb *complete_rb = NULL;
  830. unsigned long dev_flags;
  831. unsigned long flags;
  832. spin_lock_irqsave(&dev->read_list_spinlock, dev_flags);
  833. list_for_each_entry(rb, &dev->read_list.list, list) {
  834. cl = rb->cl;
  835. if (!cl || !(cl->host_client_id == hbm->host_client_id &&
  836. cl->fw_client_id == hbm->fw_client_id) ||
  837. !(cl->state == ISHTP_CL_CONNECTED))
  838. continue;
  839. /*
  840. * If no Rx buffer is allocated, disband the rb
  841. */
  842. if (rb->buffer.size == 0 || rb->buffer.data == NULL) {
  843. spin_unlock_irqrestore(&dev->read_list_spinlock,
  844. dev_flags);
  845. dev_err(&cl->device->dev,
  846. "response buffer is not allocated.\n");
  847. list_del(&rb->list);
  848. ishtp_io_rb_free(rb);
  849. cl->status = -ENOMEM;
  850. goto eoi;
  851. }
  852. /*
  853. * If message buffer overflown (exceeds max. client msg
  854. * size, drop message and return to free buffer.
  855. * Do we need to disconnect such a client? (We don't send
  856. * back FC, so communication will be stuck anyway)
  857. */
  858. if (rb->buffer.size < hbm->msg_length) {
  859. spin_unlock_irqrestore(&dev->read_list_spinlock,
  860. dev_flags);
  861. dev_err(&cl->device->dev,
  862. "message overflow. size %d len %d idx %ld\n",
  863. rb->buffer.size, hbm->msg_length, rb->buf_idx);
  864. list_del(&rb->list);
  865. ishtp_cl_io_rb_recycle(rb);
  866. cl->status = -EIO;
  867. goto eoi;
  868. }
  869. buffer = rb->buffer.data;
  870. memcpy(buffer, msg, hbm->msg_length);
  871. rb->buf_idx = hbm->msg_length;
  872. /* Last fragment in message - it's complete */
  873. cl->status = 0;
  874. list_del(&rb->list);
  875. complete_rb = rb;
  876. --cl->out_flow_ctrl_creds;
  877. /*
  878. * the whole msg arrived, send a new FC, and add a new
  879. * rb buffer for the next coming msg
  880. */
  881. spin_lock_irqsave(&cl->free_list_spinlock, flags);
  882. if (!list_empty(&cl->free_rb_list.list)) {
  883. new_rb = list_entry(cl->free_rb_list.list.next,
  884. struct ishtp_cl_rb, list);
  885. list_del_init(&new_rb->list);
  886. spin_unlock_irqrestore(&cl->free_list_spinlock,
  887. flags);
  888. new_rb->cl = cl;
  889. new_rb->buf_idx = 0;
  890. INIT_LIST_HEAD(&new_rb->list);
  891. list_add_tail(&new_rb->list,
  892. &dev->read_list.list);
  893. ishtp_hbm_cl_flow_control_req(dev, cl);
  894. } else {
  895. spin_unlock_irqrestore(&cl->free_list_spinlock,
  896. flags);
  897. }
  898. /* One more fragment in message (this is always last) */
  899. ++cl->recv_msg_num_frags;
  900. /*
  901. * We can safely break here (and in BH too),
  902. * a single input message can go only to a single request!
  903. */
  904. break;
  905. }
  906. spin_unlock_irqrestore(&dev->read_list_spinlock, dev_flags);
  907. /* If it's nobody's message, just read and discard it */
  908. if (!buffer) {
  909. dev_err(dev->devc, "Dropped Rx (DMA) msg - no request\n");
  910. goto eoi;
  911. }
  912. if (complete_rb) {
  913. getnstimeofday(&cl->ts_rx);
  914. ++cl->recv_msg_cnt_dma;
  915. ishtp_cl_read_complete(complete_rb);
  916. }
  917. eoi:
  918. return;
  919. }