es2.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599
  1. /*
  2. * Greybus "AP" USB driver for "ES2" controller chips
  3. *
  4. * Copyright 2014-2015 Google Inc.
  5. * Copyright 2014-2015 Linaro Ltd.
  6. *
  7. * Released under the GPLv2 only.
  8. */
  9. #include <linux/kthread.h>
  10. #include <linux/sizes.h>
  11. #include <linux/usb.h>
  12. #include <linux/kfifo.h>
  13. #include <linux/debugfs.h>
  14. #include <linux/list.h>
  15. #include <asm/unaligned.h>
  16. #include "arpc.h"
  17. #include "greybus.h"
  18. #include "greybus_trace.h"
  19. #include "connection.h"
  20. /* Default timeout for USB vendor requests. */
  21. #define ES2_USB_CTRL_TIMEOUT 500
  22. /* Default timeout for ARPC CPort requests */
  23. #define ES2_ARPC_CPORT_TIMEOUT 500
  24. /* Fixed CPort numbers */
  25. #define ES2_CPORT_CDSI0 16
  26. #define ES2_CPORT_CDSI1 17
  27. /* Memory sizes for the buffers sent to/from the ES2 controller */
  28. #define ES2_GBUF_MSG_SIZE_MAX 2048
  29. /* Memory sizes for the ARPC buffers */
  30. #define ARPC_OUT_SIZE_MAX U16_MAX
  31. #define ARPC_IN_SIZE_MAX 128
  32. static const struct usb_device_id id_table[] = {
  33. { USB_DEVICE(0x18d1, 0x1eaf) },
  34. { },
  35. };
  36. MODULE_DEVICE_TABLE(usb, id_table);
  37. #define APB1_LOG_SIZE SZ_16K
  38. /*
  39. * Number of CPort IN urbs in flight at any point in time.
  40. * Adjust if we are having stalls in the USB buffer due to not enough urbs in
  41. * flight.
  42. */
  43. #define NUM_CPORT_IN_URB 4
  44. /* Number of CPort OUT urbs in flight at any point in time.
  45. * Adjust if we get messages saying we are out of urbs in the system log.
  46. */
  47. #define NUM_CPORT_OUT_URB 8
  48. /*
  49. * Number of ARPC in urbs in flight at any point in time.
  50. */
  51. #define NUM_ARPC_IN_URB 2
  52. /*
  53. * @endpoint: bulk in endpoint for CPort data
  54. * @urb: array of urbs for the CPort in messages
  55. * @buffer: array of buffers for the @cport_in_urb urbs
  56. */
  57. struct es2_cport_in {
  58. __u8 endpoint;
  59. struct urb *urb[NUM_CPORT_IN_URB];
  60. u8 *buffer[NUM_CPORT_IN_URB];
  61. };
  62. /**
  63. * es2_ap_dev - ES2 USB Bridge to AP structure
  64. * @usb_dev: pointer to the USB device we are.
  65. * @usb_intf: pointer to the USB interface we are bound to.
  66. * @hd: pointer to our gb_host_device structure
  67. * @cport_in: endpoint, urbs and buffer for cport in messages
  68. * @cport_out_endpoint: endpoint for for cport out messages
  69. * @cport_out_urb: array of urbs for the CPort out messages
  70. * @cport_out_urb_busy: array of flags to see if the @cport_out_urb is busy or
  71. * not.
  72. * @cport_out_urb_cancelled: array of flags indicating whether the
  73. * corresponding @cport_out_urb is being cancelled
  74. * @cport_out_urb_lock: locks the @cport_out_urb_busy "list"
  75. *
  76. * @apb_log_task: task pointer for logging thread
  77. * @apb_log_dentry: file system entry for the log file interface
  78. * @apb_log_enable_dentry: file system entry for enabling logging
  79. * @apb_log_fifo: kernel FIFO to carry logged data
  80. * @arpc_urb: array of urbs for the ARPC in messages
  81. * @arpc_buffer: array of buffers for the @arpc_urb urbs
  82. * @arpc_endpoint_in: bulk in endpoint for APBridgeA RPC
  83. * @arpc_id_cycle: gives an unique id to ARPC
  84. * @arpc_lock: locks ARPC list
  85. * @arpcs: list of in progress ARPCs
  86. */
  87. struct es2_ap_dev {
  88. struct usb_device *usb_dev;
  89. struct usb_interface *usb_intf;
  90. struct gb_host_device *hd;
  91. struct es2_cport_in cport_in;
  92. __u8 cport_out_endpoint;
  93. struct urb *cport_out_urb[NUM_CPORT_OUT_URB];
  94. bool cport_out_urb_busy[NUM_CPORT_OUT_URB];
  95. bool cport_out_urb_cancelled[NUM_CPORT_OUT_URB];
  96. spinlock_t cport_out_urb_lock;
  97. bool cdsi1_in_use;
  98. struct task_struct *apb_log_task;
  99. struct dentry *apb_log_dentry;
  100. struct dentry *apb_log_enable_dentry;
  101. DECLARE_KFIFO(apb_log_fifo, char, APB1_LOG_SIZE);
  102. __u8 arpc_endpoint_in;
  103. struct urb *arpc_urb[NUM_ARPC_IN_URB];
  104. u8 *arpc_buffer[NUM_ARPC_IN_URB];
  105. int arpc_id_cycle;
  106. spinlock_t arpc_lock;
  107. struct list_head arpcs;
  108. };
  109. /**
  110. * timesync_enable_request - Enable timesync in an APBridge
  111. * @count: number of TimeSync Pulses to expect
  112. * @frame_time: the initial FrameTime at the first TimeSync Pulse
  113. * @strobe_delay: the expected delay in microseconds between each TimeSync Pulse
  114. * @refclk: The AP mandated reference clock to run FrameTime at
  115. */
  116. struct timesync_enable_request {
  117. __u8 count;
  118. __le64 frame_time;
  119. __le32 strobe_delay;
  120. __le32 refclk;
  121. } __packed;
  122. /**
  123. * timesync_authoritative_request - Transmit authoritative FrameTime to APBridge
  124. * @frame_time: An array of authoritative FrameTimes provided by the SVC
  125. * and relayed to the APBridge by the AP
  126. */
  127. struct timesync_authoritative_request {
  128. __le64 frame_time[GB_TIMESYNC_MAX_STROBES];
  129. } __packed;
  130. struct arpc {
  131. struct list_head list;
  132. struct arpc_request_message *req;
  133. struct arpc_response_message *resp;
  134. struct completion response_received;
  135. bool active;
  136. };
  137. static inline struct es2_ap_dev *hd_to_es2(struct gb_host_device *hd)
  138. {
  139. return (struct es2_ap_dev *)&hd->hd_priv;
  140. }
  141. static void cport_out_callback(struct urb *urb);
  142. static void usb_log_enable(struct es2_ap_dev *es2);
  143. static void usb_log_disable(struct es2_ap_dev *es2);
  144. static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
  145. size_t size, int *result, unsigned int timeout);
  146. static int output_sync(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
  147. {
  148. struct usb_device *udev = es2->usb_dev;
  149. u8 *data;
  150. int retval;
  151. data = kmalloc(size, GFP_KERNEL);
  152. if (!data)
  153. return -ENOMEM;
  154. memcpy(data, req, size);
  155. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  156. cmd,
  157. USB_DIR_OUT | USB_TYPE_VENDOR |
  158. USB_RECIP_INTERFACE,
  159. 0, 0, data, size, ES2_USB_CTRL_TIMEOUT);
  160. if (retval < 0)
  161. dev_err(&udev->dev, "%s: return error %d\n", __func__, retval);
  162. else
  163. retval = 0;
  164. kfree(data);
  165. return retval;
  166. }
  167. static void ap_urb_complete(struct urb *urb)
  168. {
  169. struct usb_ctrlrequest *dr = urb->context;
  170. kfree(dr);
  171. usb_free_urb(urb);
  172. }
  173. static int output_async(struct es2_ap_dev *es2, void *req, u16 size, u8 cmd)
  174. {
  175. struct usb_device *udev = es2->usb_dev;
  176. struct urb *urb;
  177. struct usb_ctrlrequest *dr;
  178. u8 *buf;
  179. int retval;
  180. urb = usb_alloc_urb(0, GFP_ATOMIC);
  181. if (!urb)
  182. return -ENOMEM;
  183. dr = kmalloc(sizeof(*dr) + size, GFP_ATOMIC);
  184. if (!dr) {
  185. usb_free_urb(urb);
  186. return -ENOMEM;
  187. }
  188. buf = (u8 *)dr + sizeof(*dr);
  189. memcpy(buf, req, size);
  190. dr->bRequest = cmd;
  191. dr->bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE;
  192. dr->wValue = 0;
  193. dr->wIndex = 0;
  194. dr->wLength = cpu_to_le16(size);
  195. usb_fill_control_urb(urb, udev, usb_sndctrlpipe(udev, 0),
  196. (unsigned char *)dr, buf, size,
  197. ap_urb_complete, dr);
  198. retval = usb_submit_urb(urb, GFP_ATOMIC);
  199. if (retval) {
  200. usb_free_urb(urb);
  201. kfree(dr);
  202. }
  203. return retval;
  204. }
  205. static int output(struct gb_host_device *hd, void *req, u16 size, u8 cmd,
  206. bool async)
  207. {
  208. struct es2_ap_dev *es2 = hd_to_es2(hd);
  209. if (async)
  210. return output_async(es2, req, size, cmd);
  211. return output_sync(es2, req, size, cmd);
  212. }
  213. static int es2_cport_in_enable(struct es2_ap_dev *es2,
  214. struct es2_cport_in *cport_in)
  215. {
  216. struct urb *urb;
  217. int ret;
  218. int i;
  219. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  220. urb = cport_in->urb[i];
  221. ret = usb_submit_urb(urb, GFP_KERNEL);
  222. if (ret) {
  223. dev_err(&es2->usb_dev->dev,
  224. "failed to submit in-urb: %d\n", ret);
  225. goto err_kill_urbs;
  226. }
  227. }
  228. return 0;
  229. err_kill_urbs:
  230. for (--i; i >= 0; --i) {
  231. urb = cport_in->urb[i];
  232. usb_kill_urb(urb);
  233. }
  234. return ret;
  235. }
  236. static void es2_cport_in_disable(struct es2_ap_dev *es2,
  237. struct es2_cport_in *cport_in)
  238. {
  239. struct urb *urb;
  240. int i;
  241. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  242. urb = cport_in->urb[i];
  243. usb_kill_urb(urb);
  244. }
  245. }
  246. static int es2_arpc_in_enable(struct es2_ap_dev *es2)
  247. {
  248. struct urb *urb;
  249. int ret;
  250. int i;
  251. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  252. urb = es2->arpc_urb[i];
  253. ret = usb_submit_urb(urb, GFP_KERNEL);
  254. if (ret) {
  255. dev_err(&es2->usb_dev->dev,
  256. "failed to submit arpc in-urb: %d\n", ret);
  257. goto err_kill_urbs;
  258. }
  259. }
  260. return 0;
  261. err_kill_urbs:
  262. for (--i; i >= 0; --i) {
  263. urb = es2->arpc_urb[i];
  264. usb_kill_urb(urb);
  265. }
  266. return ret;
  267. }
  268. static void es2_arpc_in_disable(struct es2_ap_dev *es2)
  269. {
  270. struct urb *urb;
  271. int i;
  272. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  273. urb = es2->arpc_urb[i];
  274. usb_kill_urb(urb);
  275. }
  276. }
  277. static struct urb *next_free_urb(struct es2_ap_dev *es2, gfp_t gfp_mask)
  278. {
  279. struct urb *urb = NULL;
  280. unsigned long flags;
  281. int i;
  282. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  283. /* Look in our pool of allocated urbs first, as that's the "fastest" */
  284. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  285. if (es2->cport_out_urb_busy[i] == false &&
  286. es2->cport_out_urb_cancelled[i] == false) {
  287. es2->cport_out_urb_busy[i] = true;
  288. urb = es2->cport_out_urb[i];
  289. break;
  290. }
  291. }
  292. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  293. if (urb)
  294. return urb;
  295. /*
  296. * Crap, pool is empty, complain to the syslog and go allocate one
  297. * dynamically as we have to succeed.
  298. */
  299. dev_dbg(&es2->usb_dev->dev,
  300. "No free CPort OUT urbs, having to dynamically allocate one!\n");
  301. return usb_alloc_urb(0, gfp_mask);
  302. }
  303. static void free_urb(struct es2_ap_dev *es2, struct urb *urb)
  304. {
  305. unsigned long flags;
  306. int i;
  307. /*
  308. * See if this was an urb in our pool, if so mark it "free", otherwise
  309. * we need to free it ourselves.
  310. */
  311. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  312. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  313. if (urb == es2->cport_out_urb[i]) {
  314. es2->cport_out_urb_busy[i] = false;
  315. urb = NULL;
  316. break;
  317. }
  318. }
  319. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  320. /* If urb is not NULL, then we need to free this urb */
  321. usb_free_urb(urb);
  322. }
  323. /*
  324. * We (ab)use the operation-message header pad bytes to transfer the
  325. * cport id in order to minimise overhead.
  326. */
  327. static void
  328. gb_message_cport_pack(struct gb_operation_msg_hdr *header, u16 cport_id)
  329. {
  330. header->pad[0] = cport_id;
  331. }
  332. /* Clear the pad bytes used for the CPort id */
  333. static void gb_message_cport_clear(struct gb_operation_msg_hdr *header)
  334. {
  335. header->pad[0] = 0;
  336. }
  337. /* Extract the CPort id packed into the header, and clear it */
  338. static u16 gb_message_cport_unpack(struct gb_operation_msg_hdr *header)
  339. {
  340. u16 cport_id = header->pad[0];
  341. gb_message_cport_clear(header);
  342. return cport_id;
  343. }
  344. /*
  345. * Returns zero if the message was successfully queued, or a negative errno
  346. * otherwise.
  347. */
  348. static int message_send(struct gb_host_device *hd, u16 cport_id,
  349. struct gb_message *message, gfp_t gfp_mask)
  350. {
  351. struct es2_ap_dev *es2 = hd_to_es2(hd);
  352. struct usb_device *udev = es2->usb_dev;
  353. size_t buffer_size;
  354. int retval;
  355. struct urb *urb;
  356. unsigned long flags;
  357. /*
  358. * The data actually transferred will include an indication
  359. * of where the data should be sent. Do one last check of
  360. * the target CPort id before filling it in.
  361. */
  362. if (!cport_id_valid(hd, cport_id)) {
  363. dev_err(&udev->dev, "invalid cport %u\n", cport_id);
  364. return -EINVAL;
  365. }
  366. /* Find a free urb */
  367. urb = next_free_urb(es2, gfp_mask);
  368. if (!urb)
  369. return -ENOMEM;
  370. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  371. message->hcpriv = urb;
  372. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  373. /* Pack the cport id into the message header */
  374. gb_message_cport_pack(message->header, cport_id);
  375. buffer_size = sizeof(*message->header) + message->payload_size;
  376. usb_fill_bulk_urb(urb, udev,
  377. usb_sndbulkpipe(udev,
  378. es2->cport_out_endpoint),
  379. message->buffer, buffer_size,
  380. cport_out_callback, message);
  381. urb->transfer_flags |= URB_ZERO_PACKET;
  382. trace_gb_message_submit(message);
  383. retval = usb_submit_urb(urb, gfp_mask);
  384. if (retval) {
  385. dev_err(&udev->dev, "failed to submit out-urb: %d\n", retval);
  386. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  387. message->hcpriv = NULL;
  388. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  389. free_urb(es2, urb);
  390. gb_message_cport_clear(message->header);
  391. return retval;
  392. }
  393. return 0;
  394. }
  395. /*
  396. * Can not be called in atomic context.
  397. */
  398. static void message_cancel(struct gb_message *message)
  399. {
  400. struct gb_host_device *hd = message->operation->connection->hd;
  401. struct es2_ap_dev *es2 = hd_to_es2(hd);
  402. struct urb *urb;
  403. int i;
  404. might_sleep();
  405. spin_lock_irq(&es2->cport_out_urb_lock);
  406. urb = message->hcpriv;
  407. /* Prevent dynamically allocated urb from being deallocated. */
  408. usb_get_urb(urb);
  409. /* Prevent pre-allocated urb from being reused. */
  410. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  411. if (urb == es2->cport_out_urb[i]) {
  412. es2->cport_out_urb_cancelled[i] = true;
  413. break;
  414. }
  415. }
  416. spin_unlock_irq(&es2->cport_out_urb_lock);
  417. usb_kill_urb(urb);
  418. if (i < NUM_CPORT_OUT_URB) {
  419. spin_lock_irq(&es2->cport_out_urb_lock);
  420. es2->cport_out_urb_cancelled[i] = false;
  421. spin_unlock_irq(&es2->cport_out_urb_lock);
  422. }
  423. usb_free_urb(urb);
  424. }
  425. static int es2_cport_allocate(struct gb_host_device *hd, int cport_id,
  426. unsigned long flags)
  427. {
  428. struct es2_ap_dev *es2 = hd_to_es2(hd);
  429. struct ida *id_map = &hd->cport_id_map;
  430. int ida_start, ida_end;
  431. switch (cport_id) {
  432. case ES2_CPORT_CDSI0:
  433. case ES2_CPORT_CDSI1:
  434. dev_err(&hd->dev, "cport %d not available\n", cport_id);
  435. return -EBUSY;
  436. }
  437. if (flags & GB_CONNECTION_FLAG_OFFLOADED &&
  438. flags & GB_CONNECTION_FLAG_CDSI1) {
  439. if (es2->cdsi1_in_use) {
  440. dev_err(&hd->dev, "CDSI1 already in use\n");
  441. return -EBUSY;
  442. }
  443. es2->cdsi1_in_use = true;
  444. return ES2_CPORT_CDSI1;
  445. }
  446. if (cport_id < 0) {
  447. ida_start = 0;
  448. ida_end = hd->num_cports;
  449. } else if (cport_id < hd->num_cports) {
  450. ida_start = cport_id;
  451. ida_end = cport_id + 1;
  452. } else {
  453. dev_err(&hd->dev, "cport %d not available\n", cport_id);
  454. return -EINVAL;
  455. }
  456. return ida_simple_get(id_map, ida_start, ida_end, GFP_KERNEL);
  457. }
  458. static void es2_cport_release(struct gb_host_device *hd, u16 cport_id)
  459. {
  460. struct es2_ap_dev *es2 = hd_to_es2(hd);
  461. switch (cport_id) {
  462. case ES2_CPORT_CDSI1:
  463. es2->cdsi1_in_use = false;
  464. return;
  465. }
  466. ida_simple_remove(&hd->cport_id_map, cport_id);
  467. }
  468. static int cport_enable(struct gb_host_device *hd, u16 cport_id,
  469. unsigned long flags)
  470. {
  471. struct es2_ap_dev *es2 = hd_to_es2(hd);
  472. struct usb_device *udev = es2->usb_dev;
  473. struct gb_apb_request_cport_flags *req;
  474. u32 connection_flags;
  475. int ret;
  476. req = kzalloc(sizeof(*req), GFP_KERNEL);
  477. if (!req)
  478. return -ENOMEM;
  479. connection_flags = 0;
  480. if (flags & GB_CONNECTION_FLAG_CONTROL)
  481. connection_flags |= GB_APB_CPORT_FLAG_CONTROL;
  482. if (flags & GB_CONNECTION_FLAG_HIGH_PRIO)
  483. connection_flags |= GB_APB_CPORT_FLAG_HIGH_PRIO;
  484. req->flags = cpu_to_le32(connection_flags);
  485. dev_dbg(&hd->dev, "%s - cport = %u, flags = %02x\n", __func__,
  486. cport_id, connection_flags);
  487. ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  488. GB_APB_REQUEST_CPORT_FLAGS,
  489. USB_DIR_OUT | USB_TYPE_VENDOR |
  490. USB_RECIP_INTERFACE, cport_id, 0,
  491. req, sizeof(*req), ES2_USB_CTRL_TIMEOUT);
  492. if (ret != sizeof(*req)) {
  493. dev_err(&udev->dev, "failed to set cport flags for port %d\n",
  494. cport_id);
  495. if (ret >= 0)
  496. ret = -EIO;
  497. goto out;
  498. }
  499. ret = 0;
  500. out:
  501. kfree(req);
  502. return ret;
  503. }
  504. static int es2_cport_connected(struct gb_host_device *hd, u16 cport_id)
  505. {
  506. struct es2_ap_dev *es2 = hd_to_es2(hd);
  507. struct device *dev = &es2->usb_dev->dev;
  508. struct arpc_cport_connected_req req;
  509. int ret;
  510. req.cport_id = cpu_to_le16(cport_id);
  511. ret = arpc_sync(es2, ARPC_TYPE_CPORT_CONNECTED, &req, sizeof(req),
  512. NULL, ES2_ARPC_CPORT_TIMEOUT);
  513. if (ret) {
  514. dev_err(dev, "failed to set connected state for cport %u: %d\n",
  515. cport_id, ret);
  516. return ret;
  517. }
  518. return 0;
  519. }
  520. static int es2_cport_flush(struct gb_host_device *hd, u16 cport_id)
  521. {
  522. struct es2_ap_dev *es2 = hd_to_es2(hd);
  523. struct device *dev = &es2->usb_dev->dev;
  524. struct arpc_cport_flush_req req;
  525. int ret;
  526. req.cport_id = cpu_to_le16(cport_id);
  527. ret = arpc_sync(es2, ARPC_TYPE_CPORT_FLUSH, &req, sizeof(req),
  528. NULL, ES2_ARPC_CPORT_TIMEOUT);
  529. if (ret) {
  530. dev_err(dev, "failed to flush cport %u: %d\n", cport_id, ret);
  531. return ret;
  532. }
  533. return 0;
  534. }
  535. static int es2_cport_shutdown(struct gb_host_device *hd, u16 cport_id,
  536. u8 phase, unsigned int timeout)
  537. {
  538. struct es2_ap_dev *es2 = hd_to_es2(hd);
  539. struct device *dev = &es2->usb_dev->dev;
  540. struct arpc_cport_shutdown_req req;
  541. int result;
  542. int ret;
  543. if (timeout > U16_MAX)
  544. return -EINVAL;
  545. req.cport_id = cpu_to_le16(cport_id);
  546. req.timeout = cpu_to_le16(timeout);
  547. req.phase = phase;
  548. ret = arpc_sync(es2, ARPC_TYPE_CPORT_SHUTDOWN, &req, sizeof(req),
  549. &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
  550. if (ret) {
  551. dev_err(dev, "failed to send shutdown over cport %u: %d (%d)\n",
  552. cport_id, ret, result);
  553. return ret;
  554. }
  555. return 0;
  556. }
  557. static int es2_cport_quiesce(struct gb_host_device *hd, u16 cport_id,
  558. size_t peer_space, unsigned int timeout)
  559. {
  560. struct es2_ap_dev *es2 = hd_to_es2(hd);
  561. struct device *dev = &es2->usb_dev->dev;
  562. struct arpc_cport_quiesce_req req;
  563. int result;
  564. int ret;
  565. if (peer_space > U16_MAX)
  566. return -EINVAL;
  567. if (timeout > U16_MAX)
  568. return -EINVAL;
  569. req.cport_id = cpu_to_le16(cport_id);
  570. req.peer_space = cpu_to_le16(peer_space);
  571. req.timeout = cpu_to_le16(timeout);
  572. ret = arpc_sync(es2, ARPC_TYPE_CPORT_QUIESCE, &req, sizeof(req),
  573. &result, ES2_ARPC_CPORT_TIMEOUT + timeout);
  574. if (ret) {
  575. dev_err(dev, "failed to quiesce cport %u: %d (%d)\n",
  576. cport_id, ret, result);
  577. return ret;
  578. }
  579. return 0;
  580. }
  581. static int es2_cport_clear(struct gb_host_device *hd, u16 cport_id)
  582. {
  583. struct es2_ap_dev *es2 = hd_to_es2(hd);
  584. struct device *dev = &es2->usb_dev->dev;
  585. struct arpc_cport_clear_req req;
  586. int ret;
  587. req.cport_id = cpu_to_le16(cport_id);
  588. ret = arpc_sync(es2, ARPC_TYPE_CPORT_CLEAR, &req, sizeof(req),
  589. NULL, ES2_ARPC_CPORT_TIMEOUT);
  590. if (ret) {
  591. dev_err(dev, "failed to clear cport %u: %d\n", cport_id, ret);
  592. return ret;
  593. }
  594. return 0;
  595. }
  596. static int latency_tag_enable(struct gb_host_device *hd, u16 cport_id)
  597. {
  598. int retval;
  599. struct es2_ap_dev *es2 = hd_to_es2(hd);
  600. struct usb_device *udev = es2->usb_dev;
  601. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  602. GB_APB_REQUEST_LATENCY_TAG_EN,
  603. USB_DIR_OUT | USB_TYPE_VENDOR |
  604. USB_RECIP_INTERFACE, cport_id, 0, NULL,
  605. 0, ES2_USB_CTRL_TIMEOUT);
  606. if (retval < 0)
  607. dev_err(&udev->dev, "Cannot enable latency tag for cport %d\n",
  608. cport_id);
  609. return retval;
  610. }
  611. static int latency_tag_disable(struct gb_host_device *hd, u16 cport_id)
  612. {
  613. int retval;
  614. struct es2_ap_dev *es2 = hd_to_es2(hd);
  615. struct usb_device *udev = es2->usb_dev;
  616. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  617. GB_APB_REQUEST_LATENCY_TAG_DIS,
  618. USB_DIR_OUT | USB_TYPE_VENDOR |
  619. USB_RECIP_INTERFACE, cport_id, 0, NULL,
  620. 0, ES2_USB_CTRL_TIMEOUT);
  621. if (retval < 0)
  622. dev_err(&udev->dev, "Cannot disable latency tag for cport %d\n",
  623. cport_id);
  624. return retval;
  625. }
  626. static int timesync_enable(struct gb_host_device *hd, u8 count,
  627. u64 frame_time, u32 strobe_delay, u32 refclk)
  628. {
  629. int retval;
  630. struct es2_ap_dev *es2 = hd_to_es2(hd);
  631. struct usb_device *udev = es2->usb_dev;
  632. struct gb_control_timesync_enable_request *request;
  633. request = kzalloc(sizeof(*request), GFP_KERNEL);
  634. if (!request)
  635. return -ENOMEM;
  636. request->count = count;
  637. request->frame_time = cpu_to_le64(frame_time);
  638. request->strobe_delay = cpu_to_le32(strobe_delay);
  639. request->refclk = cpu_to_le32(refclk);
  640. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  641. GB_APB_REQUEST_TIMESYNC_ENABLE,
  642. USB_DIR_OUT | USB_TYPE_VENDOR |
  643. USB_RECIP_INTERFACE, 0, 0, request,
  644. sizeof(*request), ES2_USB_CTRL_TIMEOUT);
  645. if (retval < 0)
  646. dev_err(&udev->dev, "Cannot enable timesync %d\n", retval);
  647. kfree(request);
  648. return retval;
  649. }
  650. static int timesync_disable(struct gb_host_device *hd)
  651. {
  652. int retval;
  653. struct es2_ap_dev *es2 = hd_to_es2(hd);
  654. struct usb_device *udev = es2->usb_dev;
  655. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  656. GB_APB_REQUEST_TIMESYNC_DISABLE,
  657. USB_DIR_OUT | USB_TYPE_VENDOR |
  658. USB_RECIP_INTERFACE, 0, 0, NULL,
  659. 0, ES2_USB_CTRL_TIMEOUT);
  660. if (retval < 0)
  661. dev_err(&udev->dev, "Cannot disable timesync %d\n", retval);
  662. return retval;
  663. }
  664. static int timesync_authoritative(struct gb_host_device *hd, u64 *frame_time)
  665. {
  666. int retval, i;
  667. struct es2_ap_dev *es2 = hd_to_es2(hd);
  668. struct usb_device *udev = es2->usb_dev;
  669. struct timesync_authoritative_request *request;
  670. request = kzalloc(sizeof(*request), GFP_KERNEL);
  671. if (!request)
  672. return -ENOMEM;
  673. for (i = 0; i < GB_TIMESYNC_MAX_STROBES; i++)
  674. request->frame_time[i] = cpu_to_le64(frame_time[i]);
  675. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  676. GB_APB_REQUEST_TIMESYNC_AUTHORITATIVE,
  677. USB_DIR_OUT | USB_TYPE_VENDOR |
  678. USB_RECIP_INTERFACE, 0, 0, request,
  679. sizeof(*request), ES2_USB_CTRL_TIMEOUT);
  680. if (retval < 0)
  681. dev_err(&udev->dev, "Cannot timesync authoritative out %d\n", retval);
  682. kfree(request);
  683. return retval;
  684. }
  685. static int timesync_get_last_event(struct gb_host_device *hd, u64 *frame_time)
  686. {
  687. int retval;
  688. struct es2_ap_dev *es2 = hd_to_es2(hd);
  689. struct usb_device *udev = es2->usb_dev;
  690. __le64 *response_frame_time;
  691. response_frame_time = kzalloc(sizeof(*response_frame_time), GFP_KERNEL);
  692. if (!response_frame_time)
  693. return -ENOMEM;
  694. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  695. GB_APB_REQUEST_TIMESYNC_GET_LAST_EVENT,
  696. USB_DIR_IN | USB_TYPE_VENDOR |
  697. USB_RECIP_INTERFACE, 0, 0, response_frame_time,
  698. sizeof(*response_frame_time),
  699. ES2_USB_CTRL_TIMEOUT);
  700. if (retval != sizeof(*response_frame_time)) {
  701. dev_err(&udev->dev, "Cannot get last TimeSync event: %d\n",
  702. retval);
  703. if (retval >= 0)
  704. retval = -EIO;
  705. goto out;
  706. }
  707. *frame_time = le64_to_cpu(*response_frame_time);
  708. retval = 0;
  709. out:
  710. kfree(response_frame_time);
  711. return retval;
  712. }
  713. static struct gb_hd_driver es2_driver = {
  714. .hd_priv_size = sizeof(struct es2_ap_dev),
  715. .message_send = message_send,
  716. .message_cancel = message_cancel,
  717. .cport_allocate = es2_cport_allocate,
  718. .cport_release = es2_cport_release,
  719. .cport_enable = cport_enable,
  720. .cport_connected = es2_cport_connected,
  721. .cport_flush = es2_cport_flush,
  722. .cport_shutdown = es2_cport_shutdown,
  723. .cport_quiesce = es2_cport_quiesce,
  724. .cport_clear = es2_cport_clear,
  725. .latency_tag_enable = latency_tag_enable,
  726. .latency_tag_disable = latency_tag_disable,
  727. .output = output,
  728. .timesync_enable = timesync_enable,
  729. .timesync_disable = timesync_disable,
  730. .timesync_authoritative = timesync_authoritative,
  731. .timesync_get_last_event = timesync_get_last_event,
  732. };
  733. /* Common function to report consistent warnings based on URB status */
  734. static int check_urb_status(struct urb *urb)
  735. {
  736. struct device *dev = &urb->dev->dev;
  737. int status = urb->status;
  738. switch (status) {
  739. case 0:
  740. return 0;
  741. case -EOVERFLOW:
  742. dev_err(dev, "%s: overflow actual length is %d\n",
  743. __func__, urb->actual_length);
  744. case -ECONNRESET:
  745. case -ENOENT:
  746. case -ESHUTDOWN:
  747. case -EILSEQ:
  748. case -EPROTO:
  749. /* device is gone, stop sending */
  750. return status;
  751. }
  752. dev_err(dev, "%s: unknown status %d\n", __func__, status);
  753. return -EAGAIN;
  754. }
  755. static void es2_destroy(struct es2_ap_dev *es2)
  756. {
  757. struct usb_device *udev;
  758. struct urb *urb;
  759. int i;
  760. debugfs_remove(es2->apb_log_enable_dentry);
  761. usb_log_disable(es2);
  762. /* Tear down everything! */
  763. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  764. urb = es2->cport_out_urb[i];
  765. usb_kill_urb(urb);
  766. usb_free_urb(urb);
  767. es2->cport_out_urb[i] = NULL;
  768. es2->cport_out_urb_busy[i] = false; /* just to be anal */
  769. }
  770. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  771. usb_free_urb(es2->arpc_urb[i]);
  772. kfree(es2->arpc_buffer[i]);
  773. es2->arpc_buffer[i] = NULL;
  774. }
  775. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  776. usb_free_urb(es2->cport_in.urb[i]);
  777. kfree(es2->cport_in.buffer[i]);
  778. es2->cport_in.buffer[i] = NULL;
  779. }
  780. /* release reserved CDSI0 and CDSI1 cports */
  781. gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI1);
  782. gb_hd_cport_release_reserved(es2->hd, ES2_CPORT_CDSI0);
  783. udev = es2->usb_dev;
  784. gb_hd_put(es2->hd);
  785. usb_put_dev(udev);
  786. }
  787. static void cport_in_callback(struct urb *urb)
  788. {
  789. struct gb_host_device *hd = urb->context;
  790. struct device *dev = &urb->dev->dev;
  791. struct gb_operation_msg_hdr *header;
  792. int status = check_urb_status(urb);
  793. int retval;
  794. u16 cport_id;
  795. if (status) {
  796. if ((status == -EAGAIN) || (status == -EPROTO))
  797. goto exit;
  798. /* The urb is being unlinked */
  799. if (status == -ENOENT || status == -ESHUTDOWN)
  800. return;
  801. dev_err(dev, "urb cport in error %d (dropped)\n", status);
  802. return;
  803. }
  804. if (urb->actual_length < sizeof(*header)) {
  805. dev_err(dev, "short message received\n");
  806. goto exit;
  807. }
  808. /* Extract the CPort id, which is packed in the message header */
  809. header = urb->transfer_buffer;
  810. cport_id = gb_message_cport_unpack(header);
  811. if (cport_id_valid(hd, cport_id)) {
  812. greybus_data_rcvd(hd, cport_id, urb->transfer_buffer,
  813. urb->actual_length);
  814. } else {
  815. dev_err(dev, "invalid cport id %u received\n", cport_id);
  816. }
  817. exit:
  818. /* put our urb back in the request pool */
  819. retval = usb_submit_urb(urb, GFP_ATOMIC);
  820. if (retval)
  821. dev_err(dev, "failed to resubmit in-urb: %d\n", retval);
  822. }
  823. static void cport_out_callback(struct urb *urb)
  824. {
  825. struct gb_message *message = urb->context;
  826. struct gb_host_device *hd = message->operation->connection->hd;
  827. struct es2_ap_dev *es2 = hd_to_es2(hd);
  828. int status = check_urb_status(urb);
  829. unsigned long flags;
  830. gb_message_cport_clear(message->header);
  831. spin_lock_irqsave(&es2->cport_out_urb_lock, flags);
  832. message->hcpriv = NULL;
  833. spin_unlock_irqrestore(&es2->cport_out_urb_lock, flags);
  834. /*
  835. * Tell the submitter that the message send (attempt) is
  836. * complete, and report the status.
  837. */
  838. greybus_message_sent(hd, message, status);
  839. free_urb(es2, urb);
  840. }
  841. static struct arpc *arpc_alloc(void *payload, u16 size, u8 type)
  842. {
  843. struct arpc *rpc;
  844. if (size + sizeof(*rpc->req) > ARPC_OUT_SIZE_MAX)
  845. return NULL;
  846. rpc = kzalloc(sizeof(*rpc), GFP_KERNEL);
  847. if (!rpc)
  848. return NULL;
  849. INIT_LIST_HEAD(&rpc->list);
  850. rpc->req = kzalloc(sizeof(*rpc->req) + size, GFP_KERNEL);
  851. if (!rpc->req)
  852. goto err_free_rpc;
  853. rpc->resp = kzalloc(sizeof(*rpc->resp), GFP_KERNEL);
  854. if (!rpc->resp)
  855. goto err_free_req;
  856. rpc->req->type = type;
  857. rpc->req->size = cpu_to_le16(sizeof(rpc->req) + size);
  858. memcpy(rpc->req->data, payload, size);
  859. init_completion(&rpc->response_received);
  860. return rpc;
  861. err_free_req:
  862. kfree(rpc->req);
  863. err_free_rpc:
  864. kfree(rpc);
  865. return NULL;
  866. }
  867. static void arpc_free(struct arpc *rpc)
  868. {
  869. kfree(rpc->req);
  870. kfree(rpc->resp);
  871. kfree(rpc);
  872. }
  873. static struct arpc *arpc_find(struct es2_ap_dev *es2, __le16 id)
  874. {
  875. struct arpc *rpc;
  876. list_for_each_entry(rpc, &es2->arpcs, list) {
  877. if (rpc->req->id == id)
  878. return rpc;
  879. }
  880. return NULL;
  881. }
  882. static void arpc_add(struct es2_ap_dev *es2, struct arpc *rpc)
  883. {
  884. rpc->active = true;
  885. rpc->req->id = cpu_to_le16(es2->arpc_id_cycle++);
  886. list_add_tail(&rpc->list, &es2->arpcs);
  887. }
  888. static void arpc_del(struct es2_ap_dev *es2, struct arpc *rpc)
  889. {
  890. if (rpc->active) {
  891. rpc->active = false;
  892. list_del(&rpc->list);
  893. }
  894. }
  895. static int arpc_send(struct es2_ap_dev *es2, struct arpc *rpc, int timeout)
  896. {
  897. struct usb_device *udev = es2->usb_dev;
  898. int retval;
  899. retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
  900. GB_APB_REQUEST_ARPC_RUN,
  901. USB_DIR_OUT | USB_TYPE_VENDOR |
  902. USB_RECIP_INTERFACE,
  903. 0, 0,
  904. rpc->req, le16_to_cpu(rpc->req->size),
  905. ES2_USB_CTRL_TIMEOUT);
  906. if (retval != le16_to_cpu(rpc->req->size)) {
  907. dev_err(&udev->dev,
  908. "failed to send ARPC request %d: %d\n",
  909. rpc->req->type, retval);
  910. if (retval > 0)
  911. retval = -EIO;
  912. return retval;
  913. }
  914. return 0;
  915. }
  916. static int arpc_sync(struct es2_ap_dev *es2, u8 type, void *payload,
  917. size_t size, int *result, unsigned int timeout)
  918. {
  919. struct arpc *rpc;
  920. unsigned long flags;
  921. int retval;
  922. if (result)
  923. *result = 0;
  924. rpc = arpc_alloc(payload, size, type);
  925. if (!rpc)
  926. return -ENOMEM;
  927. spin_lock_irqsave(&es2->arpc_lock, flags);
  928. arpc_add(es2, rpc);
  929. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  930. retval = arpc_send(es2, rpc, timeout);
  931. if (retval)
  932. goto out_arpc_del;
  933. retval = wait_for_completion_interruptible_timeout(
  934. &rpc->response_received,
  935. msecs_to_jiffies(timeout));
  936. if (retval <= 0) {
  937. if (!retval)
  938. retval = -ETIMEDOUT;
  939. goto out_arpc_del;
  940. }
  941. if (rpc->resp->result) {
  942. retval = -EREMOTEIO;
  943. if (result)
  944. *result = rpc->resp->result;
  945. } else {
  946. retval = 0;
  947. }
  948. out_arpc_del:
  949. spin_lock_irqsave(&es2->arpc_lock, flags);
  950. arpc_del(es2, rpc);
  951. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  952. arpc_free(rpc);
  953. if (retval < 0 && retval != -EREMOTEIO) {
  954. dev_err(&es2->usb_dev->dev,
  955. "failed to execute ARPC: %d\n", retval);
  956. }
  957. return retval;
  958. }
  959. static void arpc_in_callback(struct urb *urb)
  960. {
  961. struct es2_ap_dev *es2 = urb->context;
  962. struct device *dev = &urb->dev->dev;
  963. int status = check_urb_status(urb);
  964. struct arpc *rpc;
  965. struct arpc_response_message *resp;
  966. unsigned long flags;
  967. int retval;
  968. if (status) {
  969. if ((status == -EAGAIN) || (status == -EPROTO))
  970. goto exit;
  971. /* The urb is being unlinked */
  972. if (status == -ENOENT || status == -ESHUTDOWN)
  973. return;
  974. dev_err(dev, "arpc in-urb error %d (dropped)\n", status);
  975. return;
  976. }
  977. if (urb->actual_length < sizeof(*resp)) {
  978. dev_err(dev, "short aprc response received\n");
  979. goto exit;
  980. }
  981. resp = urb->transfer_buffer;
  982. spin_lock_irqsave(&es2->arpc_lock, flags);
  983. rpc = arpc_find(es2, resp->id);
  984. if (!rpc) {
  985. dev_err(dev, "invalid arpc response id received: %u\n",
  986. le16_to_cpu(resp->id));
  987. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  988. goto exit;
  989. }
  990. arpc_del(es2, rpc);
  991. memcpy(rpc->resp, resp, sizeof(*resp));
  992. complete(&rpc->response_received);
  993. spin_unlock_irqrestore(&es2->arpc_lock, flags);
  994. exit:
  995. /* put our urb back in the request pool */
  996. retval = usb_submit_urb(urb, GFP_ATOMIC);
  997. if (retval)
  998. dev_err(dev, "failed to resubmit arpc in-urb: %d\n", retval);
  999. }
  1000. #define APB1_LOG_MSG_SIZE 64
  1001. static void apb_log_get(struct es2_ap_dev *es2, char *buf)
  1002. {
  1003. int retval;
  1004. do {
  1005. retval = usb_control_msg(es2->usb_dev,
  1006. usb_rcvctrlpipe(es2->usb_dev, 0),
  1007. GB_APB_REQUEST_LOG,
  1008. USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE,
  1009. 0x00, 0x00,
  1010. buf,
  1011. APB1_LOG_MSG_SIZE,
  1012. ES2_USB_CTRL_TIMEOUT);
  1013. if (retval > 0)
  1014. kfifo_in(&es2->apb_log_fifo, buf, retval);
  1015. } while (retval > 0);
  1016. }
  1017. static int apb_log_poll(void *data)
  1018. {
  1019. struct es2_ap_dev *es2 = data;
  1020. char *buf;
  1021. buf = kmalloc(APB1_LOG_MSG_SIZE, GFP_KERNEL);
  1022. if (!buf)
  1023. return -ENOMEM;
  1024. while (!kthread_should_stop()) {
  1025. msleep(1000);
  1026. apb_log_get(es2, buf);
  1027. }
  1028. kfree(buf);
  1029. return 0;
  1030. }
  1031. static ssize_t apb_log_read(struct file *f, char __user *buf,
  1032. size_t count, loff_t *ppos)
  1033. {
  1034. struct es2_ap_dev *es2 = f->f_inode->i_private;
  1035. ssize_t ret;
  1036. size_t copied;
  1037. char *tmp_buf;
  1038. if (count > APB1_LOG_SIZE)
  1039. count = APB1_LOG_SIZE;
  1040. tmp_buf = kmalloc(count, GFP_KERNEL);
  1041. if (!tmp_buf)
  1042. return -ENOMEM;
  1043. copied = kfifo_out(&es2->apb_log_fifo, tmp_buf, count);
  1044. ret = simple_read_from_buffer(buf, count, ppos, tmp_buf, copied);
  1045. kfree(tmp_buf);
  1046. return ret;
  1047. }
  1048. static const struct file_operations apb_log_fops = {
  1049. .read = apb_log_read,
  1050. };
  1051. static void usb_log_enable(struct es2_ap_dev *es2)
  1052. {
  1053. if (!IS_ERR_OR_NULL(es2->apb_log_task))
  1054. return;
  1055. /* get log from APB1 */
  1056. es2->apb_log_task = kthread_run(apb_log_poll, es2, "apb_log");
  1057. if (IS_ERR(es2->apb_log_task))
  1058. return;
  1059. /* XXX We will need to rename this per APB */
  1060. es2->apb_log_dentry = debugfs_create_file("apb_log", S_IRUGO,
  1061. gb_debugfs_get(), es2,
  1062. &apb_log_fops);
  1063. }
  1064. static void usb_log_disable(struct es2_ap_dev *es2)
  1065. {
  1066. if (IS_ERR_OR_NULL(es2->apb_log_task))
  1067. return;
  1068. debugfs_remove(es2->apb_log_dentry);
  1069. es2->apb_log_dentry = NULL;
  1070. kthread_stop(es2->apb_log_task);
  1071. es2->apb_log_task = NULL;
  1072. }
  1073. static ssize_t apb_log_enable_read(struct file *f, char __user *buf,
  1074. size_t count, loff_t *ppos)
  1075. {
  1076. struct es2_ap_dev *es2 = f->f_inode->i_private;
  1077. int enable = !IS_ERR_OR_NULL(es2->apb_log_task);
  1078. char tmp_buf[3];
  1079. sprintf(tmp_buf, "%d\n", enable);
  1080. return simple_read_from_buffer(buf, count, ppos, tmp_buf, 3);
  1081. }
  1082. static ssize_t apb_log_enable_write(struct file *f, const char __user *buf,
  1083. size_t count, loff_t *ppos)
  1084. {
  1085. int enable;
  1086. ssize_t retval;
  1087. struct es2_ap_dev *es2 = f->f_inode->i_private;
  1088. retval = kstrtoint_from_user(buf, count, 10, &enable);
  1089. if (retval)
  1090. return retval;
  1091. if (enable)
  1092. usb_log_enable(es2);
  1093. else
  1094. usb_log_disable(es2);
  1095. return count;
  1096. }
  1097. static const struct file_operations apb_log_enable_fops = {
  1098. .read = apb_log_enable_read,
  1099. .write = apb_log_enable_write,
  1100. };
  1101. static int apb_get_cport_count(struct usb_device *udev)
  1102. {
  1103. int retval;
  1104. __le16 *cport_count;
  1105. cport_count = kzalloc(sizeof(*cport_count), GFP_KERNEL);
  1106. if (!cport_count)
  1107. return -ENOMEM;
  1108. retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
  1109. GB_APB_REQUEST_CPORT_COUNT,
  1110. USB_DIR_IN | USB_TYPE_VENDOR |
  1111. USB_RECIP_INTERFACE, 0, 0, cport_count,
  1112. sizeof(*cport_count), ES2_USB_CTRL_TIMEOUT);
  1113. if (retval != sizeof(*cport_count)) {
  1114. dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
  1115. retval);
  1116. if (retval >= 0)
  1117. retval = -EIO;
  1118. goto out;
  1119. }
  1120. retval = le16_to_cpu(*cport_count);
  1121. /* We need to fit a CPort ID in one byte of a message header */
  1122. if (retval > U8_MAX) {
  1123. retval = U8_MAX;
  1124. dev_warn(&udev->dev, "Limiting number of CPorts to U8_MAX\n");
  1125. }
  1126. out:
  1127. kfree(cport_count);
  1128. return retval;
  1129. }
  1130. /*
  1131. * The ES2 USB Bridge device has 15 endpoints
  1132. * 1 Control - usual USB stuff + AP -> APBridgeA messages
  1133. * 7 Bulk IN - CPort data in
  1134. * 7 Bulk OUT - CPort data out
  1135. */
  1136. static int ap_probe(struct usb_interface *interface,
  1137. const struct usb_device_id *id)
  1138. {
  1139. struct es2_ap_dev *es2;
  1140. struct gb_host_device *hd;
  1141. struct usb_device *udev;
  1142. struct usb_host_interface *iface_desc;
  1143. struct usb_endpoint_descriptor *endpoint;
  1144. __u8 ep_addr;
  1145. int retval;
  1146. int i;
  1147. int num_cports;
  1148. bool bulk_out_found = false;
  1149. bool bulk_in_found = false;
  1150. bool arpc_in_found = false;
  1151. udev = usb_get_dev(interface_to_usbdev(interface));
  1152. num_cports = apb_get_cport_count(udev);
  1153. if (num_cports < 0) {
  1154. usb_put_dev(udev);
  1155. dev_err(&udev->dev, "Cannot retrieve CPort count: %d\n",
  1156. num_cports);
  1157. return num_cports;
  1158. }
  1159. hd = gb_hd_create(&es2_driver, &udev->dev, ES2_GBUF_MSG_SIZE_MAX,
  1160. num_cports);
  1161. if (IS_ERR(hd)) {
  1162. usb_put_dev(udev);
  1163. return PTR_ERR(hd);
  1164. }
  1165. es2 = hd_to_es2(hd);
  1166. es2->hd = hd;
  1167. es2->usb_intf = interface;
  1168. es2->usb_dev = udev;
  1169. spin_lock_init(&es2->cport_out_urb_lock);
  1170. INIT_KFIFO(es2->apb_log_fifo);
  1171. usb_set_intfdata(interface, es2);
  1172. /*
  1173. * Reserve the CDSI0 and CDSI1 CPorts so they won't be allocated
  1174. * dynamically.
  1175. */
  1176. retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI0);
  1177. if (retval)
  1178. goto error;
  1179. retval = gb_hd_cport_reserve(hd, ES2_CPORT_CDSI1);
  1180. if (retval)
  1181. goto error;
  1182. /* find all bulk endpoints */
  1183. iface_desc = interface->cur_altsetting;
  1184. for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
  1185. endpoint = &iface_desc->endpoint[i].desc;
  1186. ep_addr = endpoint->bEndpointAddress;
  1187. if (usb_endpoint_is_bulk_in(endpoint)) {
  1188. if (!bulk_in_found) {
  1189. es2->cport_in.endpoint = ep_addr;
  1190. bulk_in_found = true;
  1191. } else if (!arpc_in_found) {
  1192. es2->arpc_endpoint_in = ep_addr;
  1193. arpc_in_found = true;
  1194. } else {
  1195. dev_warn(&udev->dev,
  1196. "Unused bulk IN endpoint found: 0x%02x\n",
  1197. ep_addr);
  1198. }
  1199. continue;
  1200. }
  1201. if (usb_endpoint_is_bulk_out(endpoint)) {
  1202. if (!bulk_out_found) {
  1203. es2->cport_out_endpoint = ep_addr;
  1204. bulk_out_found = true;
  1205. } else {
  1206. dev_warn(&udev->dev,
  1207. "Unused bulk OUT endpoint found: 0x%02x\n",
  1208. ep_addr);
  1209. }
  1210. continue;
  1211. }
  1212. dev_warn(&udev->dev,
  1213. "Unknown endpoint type found, address 0x%02x\n",
  1214. ep_addr);
  1215. }
  1216. if (!bulk_in_found || !arpc_in_found || !bulk_out_found) {
  1217. dev_err(&udev->dev, "Not enough endpoints found in device, aborting!\n");
  1218. retval = -ENODEV;
  1219. goto error;
  1220. }
  1221. /* Allocate buffers for our cport in messages */
  1222. for (i = 0; i < NUM_CPORT_IN_URB; ++i) {
  1223. struct urb *urb;
  1224. u8 *buffer;
  1225. urb = usb_alloc_urb(0, GFP_KERNEL);
  1226. if (!urb) {
  1227. retval = -ENOMEM;
  1228. goto error;
  1229. }
  1230. es2->cport_in.urb[i] = urb;
  1231. buffer = kmalloc(ES2_GBUF_MSG_SIZE_MAX, GFP_KERNEL);
  1232. if (!buffer) {
  1233. retval = -ENOMEM;
  1234. goto error;
  1235. }
  1236. usb_fill_bulk_urb(urb, udev,
  1237. usb_rcvbulkpipe(udev, es2->cport_in.endpoint),
  1238. buffer, ES2_GBUF_MSG_SIZE_MAX,
  1239. cport_in_callback, hd);
  1240. es2->cport_in.buffer[i] = buffer;
  1241. }
  1242. /* Allocate buffers for ARPC in messages */
  1243. for (i = 0; i < NUM_ARPC_IN_URB; ++i) {
  1244. struct urb *urb;
  1245. u8 *buffer;
  1246. urb = usb_alloc_urb(0, GFP_KERNEL);
  1247. if (!urb) {
  1248. retval = -ENOMEM;
  1249. goto error;
  1250. }
  1251. es2->arpc_urb[i] = urb;
  1252. buffer = kmalloc(ARPC_IN_SIZE_MAX, GFP_KERNEL);
  1253. if (!buffer) {
  1254. retval = -ENOMEM;
  1255. goto error;
  1256. }
  1257. usb_fill_bulk_urb(urb, udev,
  1258. usb_rcvbulkpipe(udev,
  1259. es2->arpc_endpoint_in),
  1260. buffer, ARPC_IN_SIZE_MAX,
  1261. arpc_in_callback, es2);
  1262. es2->arpc_buffer[i] = buffer;
  1263. }
  1264. /* Allocate urbs for our CPort OUT messages */
  1265. for (i = 0; i < NUM_CPORT_OUT_URB; ++i) {
  1266. struct urb *urb;
  1267. urb = usb_alloc_urb(0, GFP_KERNEL);
  1268. if (!urb) {
  1269. retval = -ENOMEM;
  1270. goto error;
  1271. }
  1272. es2->cport_out_urb[i] = urb;
  1273. es2->cport_out_urb_busy[i] = false; /* just to be anal */
  1274. }
  1275. /* XXX We will need to rename this per APB */
  1276. es2->apb_log_enable_dentry = debugfs_create_file("apb_log_enable",
  1277. (S_IWUSR | S_IRUGO),
  1278. gb_debugfs_get(), es2,
  1279. &apb_log_enable_fops);
  1280. INIT_LIST_HEAD(&es2->arpcs);
  1281. spin_lock_init(&es2->arpc_lock);
  1282. retval = es2_arpc_in_enable(es2);
  1283. if (retval)
  1284. goto error;
  1285. retval = gb_hd_add(hd);
  1286. if (retval)
  1287. goto err_disable_arpc_in;
  1288. retval = es2_cport_in_enable(es2, &es2->cport_in);
  1289. if (retval)
  1290. goto err_hd_del;
  1291. return 0;
  1292. err_hd_del:
  1293. gb_hd_del(hd);
  1294. err_disable_arpc_in:
  1295. es2_arpc_in_disable(es2);
  1296. error:
  1297. es2_destroy(es2);
  1298. return retval;
  1299. }
  1300. static void ap_disconnect(struct usb_interface *interface)
  1301. {
  1302. struct es2_ap_dev *es2 = usb_get_intfdata(interface);
  1303. gb_hd_del(es2->hd);
  1304. es2_cport_in_disable(es2, &es2->cport_in);
  1305. es2_arpc_in_disable(es2);
  1306. es2_destroy(es2);
  1307. }
  1308. static struct usb_driver es2_ap_driver = {
  1309. .name = "es2_ap_driver",
  1310. .probe = ap_probe,
  1311. .disconnect = ap_disconnect,
  1312. .id_table = id_table,
  1313. .soft_unbind = 1,
  1314. };
  1315. module_usb_driver(es2_ap_driver);
  1316. MODULE_LICENSE("GPL v2");
  1317. MODULE_AUTHOR("Greg Kroah-Hartman <gregkh@linuxfoundation.org>");