ctl.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * Thunderbolt Cactus Ridge driver - control channel and configuration commands
  3. *
  4. * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com>
  5. */
  6. #include <linux/crc32.h>
  7. #include <linux/slab.h>
  8. #include <linux/pci.h>
  9. #include <linux/dmapool.h>
  10. #include <linux/workqueue.h>
  11. #include <linux/kfifo.h>
  12. #include "ctl.h"
  13. struct ctl_pkg {
  14. struct tb_ctl *ctl;
  15. void *buffer;
  16. struct ring_frame frame;
  17. };
  18. #define TB_CTL_RX_PKG_COUNT 10
  19. /**
  20. * struct tb_cfg - thunderbolt control channel
  21. */
  22. struct tb_ctl {
  23. struct tb_nhi *nhi;
  24. struct tb_ring *tx;
  25. struct tb_ring *rx;
  26. struct dma_pool *frame_pool;
  27. struct ctl_pkg *rx_packets[TB_CTL_RX_PKG_COUNT];
  28. DECLARE_KFIFO(response_fifo, struct ctl_pkg*, 16);
  29. struct completion response_ready;
  30. hotplug_cb callback;
  31. void *callback_data;
  32. };
  33. #define tb_ctl_WARN(ctl, format, arg...) \
  34. dev_WARN(&(ctl)->nhi->pdev->dev, format, ## arg)
  35. #define tb_ctl_err(ctl, format, arg...) \
  36. dev_err(&(ctl)->nhi->pdev->dev, format, ## arg)
  37. #define tb_ctl_warn(ctl, format, arg...) \
  38. dev_warn(&(ctl)->nhi->pdev->dev, format, ## arg)
  39. #define tb_ctl_info(ctl, format, arg...) \
  40. dev_info(&(ctl)->nhi->pdev->dev, format, ## arg)
  41. /* configuration packets definitions */
  42. enum tb_cfg_pkg_type {
  43. TB_CFG_PKG_READ = 1,
  44. TB_CFG_PKG_WRITE = 2,
  45. TB_CFG_PKG_ERROR = 3,
  46. TB_CFG_PKG_NOTIFY_ACK = 4,
  47. TB_CFG_PKG_EVENT = 5,
  48. TB_CFG_PKG_XDOMAIN_REQ = 6,
  49. TB_CFG_PKG_XDOMAIN_RESP = 7,
  50. TB_CFG_PKG_OVERRIDE = 8,
  51. TB_CFG_PKG_RESET = 9,
  52. TB_CFG_PKG_PREPARE_TO_SLEEP = 0xd,
  53. };
  54. /* common header */
  55. struct tb_cfg_header {
  56. u32 route_hi:22;
  57. u32 unknown:10; /* highest order bit is set on replies */
  58. u32 route_lo;
  59. } __packed;
  60. /* additional header for read/write packets */
  61. struct tb_cfg_address {
  62. u32 offset:13; /* in dwords */
  63. u32 length:6; /* in dwords */
  64. u32 port:6;
  65. enum tb_cfg_space space:2;
  66. u32 seq:2; /* sequence number */
  67. u32 zero:3;
  68. } __packed;
  69. /* TB_CFG_PKG_READ, response for TB_CFG_PKG_WRITE */
  70. struct cfg_read_pkg {
  71. struct tb_cfg_header header;
  72. struct tb_cfg_address addr;
  73. } __packed;
  74. /* TB_CFG_PKG_WRITE, response for TB_CFG_PKG_READ */
  75. struct cfg_write_pkg {
  76. struct tb_cfg_header header;
  77. struct tb_cfg_address addr;
  78. u32 data[64]; /* maximum size, tb_cfg_address.length has 6 bits */
  79. } __packed;
  80. /* TB_CFG_PKG_ERROR */
  81. struct cfg_error_pkg {
  82. struct tb_cfg_header header;
  83. enum tb_cfg_error error:4;
  84. u32 zero1:4;
  85. u32 port:6;
  86. u32 zero2:2; /* Both should be zero, still they are different fields. */
  87. u32 zero3:16;
  88. } __packed;
  89. /* TB_CFG_PKG_EVENT */
  90. struct cfg_event_pkg {
  91. struct tb_cfg_header header;
  92. u32 port:6;
  93. u32 zero:25;
  94. bool unplug:1;
  95. } __packed;
  96. /* TB_CFG_PKG_RESET */
  97. struct cfg_reset_pkg {
  98. struct tb_cfg_header header;
  99. } __packed;
  100. /* TB_CFG_PKG_PREPARE_TO_SLEEP */
  101. struct cfg_pts_pkg {
  102. struct tb_cfg_header header;
  103. u32 data;
  104. } __packed;
  105. /* utility functions */
  106. static u64 get_route(struct tb_cfg_header header)
  107. {
  108. return (u64) header.route_hi << 32 | header.route_lo;
  109. }
  110. static struct tb_cfg_header make_header(u64 route)
  111. {
  112. struct tb_cfg_header header = {
  113. .route_hi = route >> 32,
  114. .route_lo = route,
  115. };
  116. /* check for overflow, route_hi is not 32 bits! */
  117. WARN_ON(get_route(header) != route);
  118. return header;
  119. }
  120. static int check_header(struct ctl_pkg *pkg, u32 len, enum tb_cfg_pkg_type type,
  121. u64 route)
  122. {
  123. struct tb_cfg_header *header = pkg->buffer;
  124. /* check frame, TODO: frame flags */
  125. if (WARN(len != pkg->frame.size,
  126. "wrong framesize (expected %#x, got %#x)\n",
  127. len, pkg->frame.size))
  128. return -EIO;
  129. if (WARN(type != pkg->frame.eof, "wrong eof (expected %#x, got %#x)\n",
  130. type, pkg->frame.eof))
  131. return -EIO;
  132. if (WARN(pkg->frame.sof, "wrong sof (expected 0x0, got %#x)\n",
  133. pkg->frame.sof))
  134. return -EIO;
  135. /* check header */
  136. if (WARN(header->unknown != 1 << 9,
  137. "header->unknown is %#x\n", header->unknown))
  138. return -EIO;
  139. if (WARN(route != get_route(*header),
  140. "wrong route (expected %llx, got %llx)",
  141. route, get_route(*header)))
  142. return -EIO;
  143. return 0;
  144. }
  145. static int check_config_address(struct tb_cfg_address addr,
  146. enum tb_cfg_space space, u32 offset,
  147. u32 length)
  148. {
  149. if (WARN(addr.zero, "addr.zero is %#x\n", addr.zero))
  150. return -EIO;
  151. if (WARN(space != addr.space, "wrong space (expected %x, got %x\n)",
  152. space, addr.space))
  153. return -EIO;
  154. if (WARN(offset != addr.offset, "wrong offset (expected %x, got %x\n)",
  155. offset, addr.offset))
  156. return -EIO;
  157. if (WARN(length != addr.length, "wrong space (expected %x, got %x\n)",
  158. length, addr.length))
  159. return -EIO;
  160. if (WARN(addr.seq, "addr.seq is %#x\n", addr.seq))
  161. return -EIO;
  162. /*
  163. * We cannot check addr->port as it is set to the upstream port of the
  164. * sender.
  165. */
  166. return 0;
  167. }
  168. static struct tb_cfg_result decode_error(struct ctl_pkg *response)
  169. {
  170. struct cfg_error_pkg *pkg = response->buffer;
  171. struct tb_cfg_result res = { 0 };
  172. res.response_route = get_route(pkg->header);
  173. res.response_port = 0;
  174. res.err = check_header(response, sizeof(*pkg), TB_CFG_PKG_ERROR,
  175. get_route(pkg->header));
  176. if (res.err)
  177. return res;
  178. WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1);
  179. WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1);
  180. WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1);
  181. res.err = 1;
  182. res.tb_error = pkg->error;
  183. res.response_port = pkg->port;
  184. return res;
  185. }
  186. static struct tb_cfg_result parse_header(struct ctl_pkg *pkg, u32 len,
  187. enum tb_cfg_pkg_type type, u64 route)
  188. {
  189. struct tb_cfg_header *header = pkg->buffer;
  190. struct tb_cfg_result res = { 0 };
  191. if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  192. return decode_error(pkg);
  193. res.response_port = 0; /* will be updated later for cfg_read/write */
  194. res.response_route = get_route(*header);
  195. res.err = check_header(pkg, len, type, route);
  196. return res;
  197. }
  198. static void tb_cfg_print_error(struct tb_ctl *ctl,
  199. const struct tb_cfg_result *res)
  200. {
  201. WARN_ON(res->err != 1);
  202. switch (res->tb_error) {
  203. case TB_CFG_ERROR_PORT_NOT_CONNECTED:
  204. /* Port is not connected. This can happen during surprise
  205. * removal. Do not warn. */
  206. return;
  207. case TB_CFG_ERROR_INVALID_CONFIG_SPACE:
  208. /*
  209. * Invalid cfg_space/offset/length combination in
  210. * cfg_read/cfg_write.
  211. */
  212. tb_ctl_WARN(ctl,
  213. "CFG_ERROR(%llx:%x): Invalid config space or offset\n",
  214. res->response_route, res->response_port);
  215. return;
  216. case TB_CFG_ERROR_NO_SUCH_PORT:
  217. /*
  218. * - The route contains a non-existent port.
  219. * - The route contains a non-PHY port (e.g. PCIe).
  220. * - The port in cfg_read/cfg_write does not exist.
  221. */
  222. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Invalid port\n",
  223. res->response_route, res->response_port);
  224. return;
  225. case TB_CFG_ERROR_LOOP:
  226. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n",
  227. res->response_route, res->response_port);
  228. return;
  229. default:
  230. /* 5,6,7,9 and 11 are also valid error codes */
  231. tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n",
  232. res->response_route, res->response_port);
  233. return;
  234. }
  235. }
  236. static void cpu_to_be32_array(__be32 *dst, u32 *src, size_t len)
  237. {
  238. int i;
  239. for (i = 0; i < len; i++)
  240. dst[i] = cpu_to_be32(src[i]);
  241. }
  242. static void be32_to_cpu_array(u32 *dst, __be32 *src, size_t len)
  243. {
  244. int i;
  245. for (i = 0; i < len; i++)
  246. dst[i] = be32_to_cpu(src[i]);
  247. }
  248. static __be32 tb_crc(void *data, size_t len)
  249. {
  250. return cpu_to_be32(~__crc32c_le(~0, data, len));
  251. }
  252. static void tb_ctl_pkg_free(struct ctl_pkg *pkg)
  253. {
  254. if (pkg) {
  255. dma_pool_free(pkg->ctl->frame_pool,
  256. pkg->buffer, pkg->frame.buffer_phy);
  257. kfree(pkg);
  258. }
  259. }
  260. static struct ctl_pkg *tb_ctl_pkg_alloc(struct tb_ctl *ctl)
  261. {
  262. struct ctl_pkg *pkg = kzalloc(sizeof(*pkg), GFP_KERNEL);
  263. if (!pkg)
  264. return NULL;
  265. pkg->ctl = ctl;
  266. pkg->buffer = dma_pool_alloc(ctl->frame_pool, GFP_KERNEL,
  267. &pkg->frame.buffer_phy);
  268. if (!pkg->buffer) {
  269. kfree(pkg);
  270. return NULL;
  271. }
  272. return pkg;
  273. }
  274. /* RX/TX handling */
  275. static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame,
  276. bool canceled)
  277. {
  278. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  279. tb_ctl_pkg_free(pkg);
  280. }
  281. /**
  282. * tb_cfg_tx() - transmit a packet on the control channel
  283. *
  284. * len must be a multiple of four.
  285. *
  286. * Return: Returns 0 on success or an error code on failure.
  287. */
  288. static int tb_ctl_tx(struct tb_ctl *ctl, void *data, size_t len,
  289. enum tb_cfg_pkg_type type)
  290. {
  291. int res;
  292. struct ctl_pkg *pkg;
  293. if (len % 4 != 0) { /* required for le->be conversion */
  294. tb_ctl_WARN(ctl, "TX: invalid size: %zu\n", len);
  295. return -EINVAL;
  296. }
  297. if (len > TB_FRAME_SIZE - 4) { /* checksum is 4 bytes */
  298. tb_ctl_WARN(ctl, "TX: packet too large: %zu/%d\n",
  299. len, TB_FRAME_SIZE - 4);
  300. return -EINVAL;
  301. }
  302. pkg = tb_ctl_pkg_alloc(ctl);
  303. if (!pkg)
  304. return -ENOMEM;
  305. pkg->frame.callback = tb_ctl_tx_callback;
  306. pkg->frame.size = len + 4;
  307. pkg->frame.sof = type;
  308. pkg->frame.eof = type;
  309. cpu_to_be32_array(pkg->buffer, data, len / 4);
  310. *(__be32 *) (pkg->buffer + len) = tb_crc(pkg->buffer, len);
  311. res = ring_tx(ctl->tx, &pkg->frame);
  312. if (res) /* ring is stopped */
  313. tb_ctl_pkg_free(pkg);
  314. return res;
  315. }
  316. /**
  317. * tb_ctl_handle_plug_event() - acknowledge a plug event, invoke ctl->callback
  318. */
  319. static void tb_ctl_handle_plug_event(struct tb_ctl *ctl,
  320. struct ctl_pkg *response)
  321. {
  322. struct cfg_event_pkg *pkg = response->buffer;
  323. u64 route = get_route(pkg->header);
  324. if (check_header(response, sizeof(*pkg), TB_CFG_PKG_EVENT, route)) {
  325. tb_ctl_warn(ctl, "malformed TB_CFG_PKG_EVENT\n");
  326. return;
  327. }
  328. if (tb_cfg_error(ctl, route, pkg->port, TB_CFG_ERROR_ACK_PLUG_EVENT))
  329. tb_ctl_warn(ctl, "could not ack plug event on %llx:%x\n",
  330. route, pkg->port);
  331. WARN(pkg->zero, "pkg->zero is %#x\n", pkg->zero);
  332. ctl->callback(ctl->callback_data, route, pkg->port, pkg->unplug);
  333. }
  334. static void tb_ctl_rx_submit(struct ctl_pkg *pkg)
  335. {
  336. ring_rx(pkg->ctl->rx, &pkg->frame); /*
  337. * We ignore failures during stop.
  338. * All rx packets are referenced
  339. * from ctl->rx_packets, so we do
  340. * not loose them.
  341. */
  342. }
  343. static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame,
  344. bool canceled)
  345. {
  346. struct ctl_pkg *pkg = container_of(frame, typeof(*pkg), frame);
  347. if (canceled)
  348. return; /*
  349. * ring is stopped, packet is referenced from
  350. * ctl->rx_packets.
  351. */
  352. if (frame->size < 4 || frame->size % 4 != 0) {
  353. tb_ctl_err(pkg->ctl, "RX: invalid size %#x, dropping packet\n",
  354. frame->size);
  355. goto rx;
  356. }
  357. frame->size -= 4; /* remove checksum */
  358. if (*(__be32 *) (pkg->buffer + frame->size)
  359. != tb_crc(pkg->buffer, frame->size)) {
  360. tb_ctl_err(pkg->ctl,
  361. "RX: checksum mismatch, dropping packet\n");
  362. goto rx;
  363. }
  364. be32_to_cpu_array(pkg->buffer, pkg->buffer, frame->size / 4);
  365. if (frame->eof == TB_CFG_PKG_EVENT) {
  366. tb_ctl_handle_plug_event(pkg->ctl, pkg);
  367. goto rx;
  368. }
  369. if (!kfifo_put(&pkg->ctl->response_fifo, pkg)) {
  370. tb_ctl_err(pkg->ctl, "RX: fifo is full\n");
  371. goto rx;
  372. }
  373. complete(&pkg->ctl->response_ready);
  374. return;
  375. rx:
  376. tb_ctl_rx_submit(pkg);
  377. }
  378. /**
  379. * tb_ctl_rx() - receive a packet from the control channel
  380. */
  381. static struct tb_cfg_result tb_ctl_rx(struct tb_ctl *ctl, void *buffer,
  382. size_t length, int timeout_msec,
  383. u64 route, enum tb_cfg_pkg_type type)
  384. {
  385. struct tb_cfg_result res;
  386. struct ctl_pkg *pkg;
  387. if (!wait_for_completion_timeout(&ctl->response_ready,
  388. msecs_to_jiffies(timeout_msec))) {
  389. tb_ctl_WARN(ctl, "RX: timeout\n");
  390. return (struct tb_cfg_result) { .err = -ETIMEDOUT };
  391. }
  392. if (!kfifo_get(&ctl->response_fifo, &pkg)) {
  393. tb_ctl_WARN(ctl, "empty kfifo\n");
  394. return (struct tb_cfg_result) { .err = -EIO };
  395. }
  396. res = parse_header(pkg, length, type, route);
  397. if (!res.err)
  398. memcpy(buffer, pkg->buffer, length);
  399. tb_ctl_rx_submit(pkg);
  400. return res;
  401. }
  402. /* public interface, alloc/start/stop/free */
  403. /**
  404. * tb_ctl_alloc() - allocate a control channel
  405. *
  406. * cb will be invoked once for every hot plug event.
  407. *
  408. * Return: Returns a pointer on success or NULL on failure.
  409. */
  410. struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, hotplug_cb cb, void *cb_data)
  411. {
  412. int i;
  413. struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
  414. if (!ctl)
  415. return NULL;
  416. ctl->nhi = nhi;
  417. ctl->callback = cb;
  418. ctl->callback_data = cb_data;
  419. init_completion(&ctl->response_ready);
  420. INIT_KFIFO(ctl->response_fifo);
  421. ctl->frame_pool = dma_pool_create("thunderbolt_ctl", &nhi->pdev->dev,
  422. TB_FRAME_SIZE, 4, 0);
  423. if (!ctl->frame_pool)
  424. goto err;
  425. ctl->tx = ring_alloc_tx(nhi, 0, 10);
  426. if (!ctl->tx)
  427. goto err;
  428. ctl->rx = ring_alloc_rx(nhi, 0, 10);
  429. if (!ctl->rx)
  430. goto err;
  431. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++) {
  432. ctl->rx_packets[i] = tb_ctl_pkg_alloc(ctl);
  433. if (!ctl->rx_packets[i])
  434. goto err;
  435. ctl->rx_packets[i]->frame.callback = tb_ctl_rx_callback;
  436. }
  437. tb_ctl_info(ctl, "control channel created\n");
  438. return ctl;
  439. err:
  440. tb_ctl_free(ctl);
  441. return NULL;
  442. }
  443. /**
  444. * tb_ctl_free() - free a control channel
  445. *
  446. * Must be called after tb_ctl_stop.
  447. *
  448. * Must NOT be called from ctl->callback.
  449. */
  450. void tb_ctl_free(struct tb_ctl *ctl)
  451. {
  452. int i;
  453. if (ctl->rx)
  454. ring_free(ctl->rx);
  455. if (ctl->tx)
  456. ring_free(ctl->tx);
  457. /* free RX packets */
  458. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  459. tb_ctl_pkg_free(ctl->rx_packets[i]);
  460. if (ctl->frame_pool)
  461. dma_pool_destroy(ctl->frame_pool);
  462. kfree(ctl);
  463. }
  464. /**
  465. * tb_cfg_start() - start/resume the control channel
  466. */
  467. void tb_ctl_start(struct tb_ctl *ctl)
  468. {
  469. int i;
  470. tb_ctl_info(ctl, "control channel starting...\n");
  471. ring_start(ctl->tx); /* is used to ack hotplug packets, start first */
  472. ring_start(ctl->rx);
  473. for (i = 0; i < TB_CTL_RX_PKG_COUNT; i++)
  474. tb_ctl_rx_submit(ctl->rx_packets[i]);
  475. }
  476. /**
  477. * control() - pause the control channel
  478. *
  479. * All invocations of ctl->callback will have finished after this method
  480. * returns.
  481. *
  482. * Must NOT be called from ctl->callback.
  483. */
  484. void tb_ctl_stop(struct tb_ctl *ctl)
  485. {
  486. ring_stop(ctl->rx);
  487. ring_stop(ctl->tx);
  488. if (!kfifo_is_empty(&ctl->response_fifo))
  489. tb_ctl_WARN(ctl, "dangling response in response_fifo\n");
  490. kfifo_reset(&ctl->response_fifo);
  491. tb_ctl_info(ctl, "control channel stopped\n");
  492. }
  493. /* public interface, commands */
  494. /**
  495. * tb_cfg_error() - send error packet
  496. *
  497. * Return: Returns 0 on success or an error code on failure.
  498. */
  499. int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port,
  500. enum tb_cfg_error error)
  501. {
  502. struct cfg_error_pkg pkg = {
  503. .header = make_header(route),
  504. .port = port,
  505. .error = error,
  506. };
  507. tb_ctl_info(ctl, "resetting error on %llx:%x.\n", route, port);
  508. return tb_ctl_tx(ctl, &pkg, sizeof(pkg), TB_CFG_PKG_ERROR);
  509. }
  510. /**
  511. * tb_cfg_reset() - send a reset packet and wait for a response
  512. *
  513. * If the switch at route is incorrectly configured then we will not receive a
  514. * reply (even though the switch will reset). The caller should check for
  515. * -ETIMEDOUT and attempt to reconfigure the switch.
  516. */
  517. struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route,
  518. int timeout_msec)
  519. {
  520. int err;
  521. struct cfg_reset_pkg request = { .header = make_header(route) };
  522. struct tb_cfg_header reply;
  523. err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_RESET);
  524. if (err)
  525. return (struct tb_cfg_result) { .err = err };
  526. return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
  527. TB_CFG_PKG_RESET);
  528. }
  529. /**
  530. * tb_cfg_read() - read from config space into buffer
  531. *
  532. * Offset and length are in dwords.
  533. */
  534. struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer,
  535. u64 route, u32 port, enum tb_cfg_space space,
  536. u32 offset, u32 length, int timeout_msec)
  537. {
  538. struct tb_cfg_result res = { 0 };
  539. struct cfg_read_pkg request = {
  540. .header = make_header(route),
  541. .addr = {
  542. .port = port,
  543. .space = space,
  544. .offset = offset,
  545. .length = length,
  546. },
  547. };
  548. struct cfg_write_pkg reply;
  549. res.err = tb_ctl_tx(ctl, &request, sizeof(request), TB_CFG_PKG_READ);
  550. if (res.err)
  551. return res;
  552. res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route,
  553. TB_CFG_PKG_READ);
  554. if (res.err)
  555. return res;
  556. res.response_port = reply.addr.port;
  557. res.err = check_config_address(reply.addr, space, offset, length);
  558. if (!res.err)
  559. memcpy(buffer, &reply.data, 4 * length);
  560. return res;
  561. }
  562. /**
  563. * tb_cfg_write() - write from buffer into config space
  564. *
  565. * Offset and length are in dwords.
  566. */
  567. struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, void *buffer,
  568. u64 route, u32 port, enum tb_cfg_space space,
  569. u32 offset, u32 length, int timeout_msec)
  570. {
  571. struct tb_cfg_result res = { 0 };
  572. struct cfg_write_pkg request = {
  573. .header = make_header(route),
  574. .addr = {
  575. .port = port,
  576. .space = space,
  577. .offset = offset,
  578. .length = length,
  579. },
  580. };
  581. struct cfg_read_pkg reply;
  582. memcpy(&request.data, buffer, length * 4);
  583. res.err = tb_ctl_tx(ctl, &request, 12 + 4 * length, TB_CFG_PKG_WRITE);
  584. if (res.err)
  585. return res;
  586. res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route,
  587. TB_CFG_PKG_WRITE);
  588. if (res.err)
  589. return res;
  590. res.response_port = reply.addr.port;
  591. res.err = check_config_address(reply.addr, space, offset, length);
  592. return res;
  593. }
  594. int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  595. enum tb_cfg_space space, u32 offset, u32 length)
  596. {
  597. struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port,
  598. space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
  599. if (res.err == 1) {
  600. tb_cfg_print_error(ctl, &res);
  601. return -EIO;
  602. }
  603. WARN(res.err, "tb_cfg_read: %d\n", res.err);
  604. return res.err;
  605. }
  606. int tb_cfg_write(struct tb_ctl *ctl, void *buffer, u64 route, u32 port,
  607. enum tb_cfg_space space, u32 offset, u32 length)
  608. {
  609. struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port,
  610. space, offset, length, TB_CFG_DEFAULT_TIMEOUT);
  611. if (res.err == 1) {
  612. tb_cfg_print_error(ctl, &res);
  613. return -EIO;
  614. }
  615. WARN(res.err, "tb_cfg_write: %d\n", res.err);
  616. return res.err;
  617. }
  618. /**
  619. * tb_cfg_get_upstream_port() - get upstream port number of switch at route
  620. *
  621. * Reads the first dword from the switches TB_CFG_SWITCH config area and
  622. * returns the port number from which the reply originated.
  623. *
  624. * Return: Returns the upstream port number on success or an error code on
  625. * failure.
  626. */
  627. int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route)
  628. {
  629. u32 dummy;
  630. struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0,
  631. TB_CFG_SWITCH, 0, 1,
  632. TB_CFG_DEFAULT_TIMEOUT);
  633. if (res.err == 1)
  634. return -EIO;
  635. if (res.err)
  636. return res.err;
  637. return res.response_port;
  638. }