cvmx-cmd-queue.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618
  1. /***********************license start***************
  2. * Author: Cavium Networks
  3. *
  4. * Contact: support@caviumnetworks.com
  5. * This file is part of the OCTEON SDK
  6. *
  7. * Copyright (c) 2003-2008 Cavium Networks
  8. *
  9. * This file is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License, Version 2, as
  11. * published by the Free Software Foundation.
  12. *
  13. * This file is distributed in the hope that it will be useful, but
  14. * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
  15. * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
  16. * NONINFRINGEMENT. See the GNU General Public License for more
  17. * details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this file; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  22. * or visit http://www.gnu.org/licenses/.
  23. *
  24. * This file may also be available under a different license from Cavium.
  25. * Contact Cavium Networks for more information
  26. ***********************license end**************************************/
  27. /*
  28. *
  29. * Support functions for managing command queues used for
  30. * various hardware blocks.
  31. *
  32. * The common command queue infrastructure abstracts out the
  33. * software necessary for adding to Octeon's chained queue
  34. * structures. These structures are used for commands to the
  35. * PKO, ZIP, DFA, RAID, and DMA engine blocks. Although each
  36. * hardware unit takes commands and CSRs of different types,
  37. * they all use basic linked command buffers to store the
  38. * pending request. In general, users of the CVMX API don't
  39. * call cvmx-cmd-queue functions directly. Instead the hardware
  40. * unit specific wrapper should be used. The wrappers perform
  41. * unit specific validation and CSR writes to submit the
  42. * commands.
  43. *
  44. * Even though most software will never directly interact with
  45. * cvmx-cmd-queue, knowledge of its internal working can help
  46. * in diagnosing performance problems and help with debugging.
  47. *
  48. * Command queue pointers are stored in a global named block
  49. * called "cvmx_cmd_queues". Except for the PKO queues, each
  50. * hardware queue is stored in its own cache line to reduce SMP
  51. * contention on spin locks. The PKO queues are stored such that
  52. * every 16th queue is next to each other in memory. This scheme
  53. * allows for queues being in separate cache lines when there
  54. * are low number of queues per port. With 16 queues per port,
  55. * the first queue for each port is in the same cache area. The
  56. * second queues for each port are in another area, etc. This
  57. * allows software to implement very efficient lockless PKO with
  58. * 16 queues per port using a minimum of cache lines per core.
  59. * All queues for a given core will be isolated in the same
  60. * cache area.
  61. *
  62. * In addition to the memory pointer layout, cvmx-cmd-queue
  63. * provides an optimized fair ll/sc locking mechanism for the
  64. * queues. The lock uses a "ticket / now serving" model to
  65. * maintain fair order on contended locks. In addition, it uses
  66. * predicted locking time to limit cache contention. When a core
  67. * know it must wait in line for a lock, it spins on the
  68. * internal cycle counter to completely eliminate any causes of
  69. * bus traffic.
  70. *
  71. */
  72. #ifndef __CVMX_CMD_QUEUE_H__
  73. #define __CVMX_CMD_QUEUE_H__
  74. #include <linux/prefetch.h>
  75. #include "cvmx-fpa.h"
  76. /**
  77. * By default we disable the max depth support. Most programs
  78. * don't use it and it slows down the command queue processing
  79. * significantly.
  80. */
  81. #ifndef CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH
  82. #define CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH 0
  83. #endif
  84. /**
  85. * Enumeration representing all hardware blocks that use command
  86. * queues. Each hardware block has up to 65536 sub identifiers for
  87. * multiple command queues. Not all chips support all hardware
  88. * units.
  89. */
  90. typedef enum {
  91. CVMX_CMD_QUEUE_PKO_BASE = 0x00000,
  92. #define CVMX_CMD_QUEUE_PKO(queue) \
  93. ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
  94. CVMX_CMD_QUEUE_ZIP = 0x10000,
  95. CVMX_CMD_QUEUE_DFA = 0x20000,
  96. CVMX_CMD_QUEUE_RAID = 0x30000,
  97. CVMX_CMD_QUEUE_DMA_BASE = 0x40000,
  98. #define CVMX_CMD_QUEUE_DMA(queue) \
  99. ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
  100. CVMX_CMD_QUEUE_END = 0x50000,
  101. } cvmx_cmd_queue_id_t;
  102. /**
  103. * Command write operations can fail if the command queue needs
  104. * a new buffer and the associated FPA pool is empty. It can also
  105. * fail if the number of queued command words reaches the maximum
  106. * set at initialization.
  107. */
  108. typedef enum {
  109. CVMX_CMD_QUEUE_SUCCESS = 0,
  110. CVMX_CMD_QUEUE_NO_MEMORY = -1,
  111. CVMX_CMD_QUEUE_FULL = -2,
  112. CVMX_CMD_QUEUE_INVALID_PARAM = -3,
  113. CVMX_CMD_QUEUE_ALREADY_SETUP = -4,
  114. } cvmx_cmd_queue_result_t;
  115. typedef struct {
  116. /* You have lock when this is your ticket */
  117. uint8_t now_serving;
  118. uint64_t unused1:24;
  119. /* Maximum outstanding command words */
  120. uint32_t max_depth;
  121. /* FPA pool buffers come from */
  122. uint64_t fpa_pool:3;
  123. /* Top of command buffer pointer shifted 7 */
  124. uint64_t base_ptr_div128:29;
  125. uint64_t unused2:6;
  126. /* FPA buffer size in 64bit words minus 1 */
  127. uint64_t pool_size_m1:13;
  128. /* Number of commands already used in buffer */
  129. uint64_t index:13;
  130. } __cvmx_cmd_queue_state_t;
  131. /**
  132. * This structure contains the global state of all command queues.
  133. * It is stored in a bootmem named block and shared by all
  134. * applications running on Octeon. Tickets are stored in a differnet
  135. * cahce line that queue information to reduce the contention on the
  136. * ll/sc used to get a ticket. If this is not the case, the update
  137. * of queue state causes the ll/sc to fail quite often.
  138. */
  139. typedef struct {
  140. uint64_t ticket[(CVMX_CMD_QUEUE_END >> 16) * 256];
  141. __cvmx_cmd_queue_state_t state[(CVMX_CMD_QUEUE_END >> 16) * 256];
  142. } __cvmx_cmd_queue_all_state_t;
  143. /**
  144. * Initialize a command queue for use. The initial FPA buffer is
  145. * allocated and the hardware unit is configured to point to the
  146. * new command queue.
  147. *
  148. * @queue_id: Hardware command queue to initialize.
  149. * @max_depth: Maximum outstanding commands that can be queued.
  150. * @fpa_pool: FPA pool the command queues should come from.
  151. * @pool_size: Size of each buffer in the FPA pool (bytes)
  152. *
  153. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  154. */
  155. cvmx_cmd_queue_result_t cvmx_cmd_queue_initialize(cvmx_cmd_queue_id_t queue_id,
  156. int max_depth, int fpa_pool,
  157. int pool_size);
  158. /**
  159. * Shutdown a queue a free it's command buffers to the FPA. The
  160. * hardware connected to the queue must be stopped before this
  161. * function is called.
  162. *
  163. * @queue_id: Queue to shutdown
  164. *
  165. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  166. */
  167. cvmx_cmd_queue_result_t cvmx_cmd_queue_shutdown(cvmx_cmd_queue_id_t queue_id);
  168. /**
  169. * Return the number of command words pending in the queue. This
  170. * function may be relatively slow for some hardware units.
  171. *
  172. * @queue_id: Hardware command queue to query
  173. *
  174. * Returns Number of outstanding commands
  175. */
  176. int cvmx_cmd_queue_length(cvmx_cmd_queue_id_t queue_id);
  177. /**
  178. * Return the command buffer to be written to. The purpose of this
  179. * function is to allow CVMX routine access t othe low level buffer
  180. * for initial hardware setup. User applications should not call this
  181. * function directly.
  182. *
  183. * @queue_id: Command queue to query
  184. *
  185. * Returns Command buffer or NULL on failure
  186. */
  187. void *cvmx_cmd_queue_buffer(cvmx_cmd_queue_id_t queue_id);
  188. /**
  189. * Get the index into the state arrays for the supplied queue id.
  190. *
  191. * @queue_id: Queue ID to get an index for
  192. *
  193. * Returns Index into the state arrays
  194. */
  195. static inline int __cvmx_cmd_queue_get_index(cvmx_cmd_queue_id_t queue_id)
  196. {
  197. /*
  198. * Warning: This code currently only works with devices that
  199. * have 256 queues or less. Devices with more than 16 queues
  200. * are laid out in memory to allow cores quick access to
  201. * every 16th queue. This reduces cache thrashing when you are
  202. * running 16 queues per port to support lockless operation.
  203. */
  204. int unit = queue_id >> 16;
  205. int q = (queue_id >> 4) & 0xf;
  206. int core = queue_id & 0xf;
  207. return unit * 256 + core * 16 + q;
  208. }
  209. /**
  210. * Lock the supplied queue so nobody else is updating it at the same
  211. * time as us.
  212. *
  213. * @queue_id: Queue ID to lock
  214. * @qptr: Pointer to the queue's global state
  215. */
  216. static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
  217. __cvmx_cmd_queue_state_t *qptr)
  218. {
  219. extern __cvmx_cmd_queue_all_state_t
  220. *__cvmx_cmd_queue_state_ptr;
  221. int tmp;
  222. int my_ticket;
  223. prefetch(qptr);
  224. asm volatile (
  225. ".set push\n"
  226. ".set noreorder\n"
  227. "1:\n"
  228. /* Atomic add one to ticket_ptr */
  229. "ll %[my_ticket], %[ticket_ptr]\n"
  230. /* and store the original value */
  231. "li %[ticket], 1\n"
  232. /* in my_ticket */
  233. "baddu %[ticket], %[my_ticket]\n"
  234. "sc %[ticket], %[ticket_ptr]\n"
  235. "beqz %[ticket], 1b\n"
  236. " nop\n"
  237. /* Load the current now_serving ticket */
  238. "lbu %[ticket], %[now_serving]\n"
  239. "2:\n"
  240. /* Jump out if now_serving == my_ticket */
  241. "beq %[ticket], %[my_ticket], 4f\n"
  242. /* Find out how many tickets are in front of me */
  243. " subu %[ticket], %[my_ticket], %[ticket]\n"
  244. /* Use tickets in front of me minus one to delay */
  245. "subu %[ticket], 1\n"
  246. /* Delay will be ((tickets in front)-1)*32 loops */
  247. "cins %[ticket], %[ticket], 5, 7\n"
  248. "3:\n"
  249. /* Loop here until our ticket might be up */
  250. "bnez %[ticket], 3b\n"
  251. " subu %[ticket], 1\n"
  252. /* Jump back up to check out ticket again */
  253. "b 2b\n"
  254. /* Load the current now_serving ticket */
  255. " lbu %[ticket], %[now_serving]\n"
  256. "4:\n"
  257. ".set pop\n" :
  258. [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
  259. [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
  260. [my_ticket] "=r"(my_ticket)
  261. );
  262. }
  263. /**
  264. * Unlock the queue, flushing all writes.
  265. *
  266. * @qptr: Queue to unlock
  267. */
  268. static inline void __cvmx_cmd_queue_unlock(__cvmx_cmd_queue_state_t *qptr)
  269. {
  270. qptr->now_serving++;
  271. CVMX_SYNCWS;
  272. }
  273. /**
  274. * Get the queue state structure for the given queue id
  275. *
  276. * @queue_id: Queue id to get
  277. *
  278. * Returns Queue structure or NULL on failure
  279. */
  280. static inline __cvmx_cmd_queue_state_t
  281. *__cvmx_cmd_queue_get_state(cvmx_cmd_queue_id_t queue_id)
  282. {
  283. extern __cvmx_cmd_queue_all_state_t
  284. *__cvmx_cmd_queue_state_ptr;
  285. return &__cvmx_cmd_queue_state_ptr->
  286. state[__cvmx_cmd_queue_get_index(queue_id)];
  287. }
  288. /**
  289. * Write an arbitrary number of command words to a command queue.
  290. * This is a generic function; the fixed number of command word
  291. * functions yield higher performance.
  292. *
  293. * @queue_id: Hardware command queue to write to
  294. * @use_locking:
  295. * Use internal locking to ensure exclusive access for queue
  296. * updates. If you don't use this locking you must ensure
  297. * exclusivity some other way. Locking is strongly recommended.
  298. * @cmd_count: Number of command words to write
  299. * @cmds: Array of commands to write
  300. *
  301. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  302. */
  303. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write(cvmx_cmd_queue_id_t
  304. queue_id,
  305. int use_locking,
  306. int cmd_count,
  307. uint64_t *cmds)
  308. {
  309. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  310. /* Make sure nobody else is updating the same queue */
  311. if (likely(use_locking))
  312. __cvmx_cmd_queue_lock(queue_id, qptr);
  313. /*
  314. * If a max queue length was specified then make sure we don't
  315. * exceed it. If any part of the command would be below the
  316. * limit we allow it.
  317. */
  318. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  319. if (unlikely
  320. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  321. if (likely(use_locking))
  322. __cvmx_cmd_queue_unlock(qptr);
  323. return CVMX_CMD_QUEUE_FULL;
  324. }
  325. }
  326. /*
  327. * Normally there is plenty of room in the current buffer for
  328. * the command.
  329. */
  330. if (likely(qptr->index + cmd_count < qptr->pool_size_m1)) {
  331. uint64_t *ptr =
  332. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  333. base_ptr_div128 << 7);
  334. ptr += qptr->index;
  335. qptr->index += cmd_count;
  336. while (cmd_count--)
  337. *ptr++ = *cmds++;
  338. } else {
  339. uint64_t *ptr;
  340. int count;
  341. /*
  342. * We need a new command buffer. Fail if there isn't
  343. * one available.
  344. */
  345. uint64_t *new_buffer =
  346. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  347. if (unlikely(new_buffer == NULL)) {
  348. if (likely(use_locking))
  349. __cvmx_cmd_queue_unlock(qptr);
  350. return CVMX_CMD_QUEUE_NO_MEMORY;
  351. }
  352. ptr =
  353. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  354. base_ptr_div128 << 7);
  355. /*
  356. * Figure out how many command words will fit in this
  357. * buffer. One location will be needed for the next
  358. * buffer pointer.
  359. */
  360. count = qptr->pool_size_m1 - qptr->index;
  361. ptr += qptr->index;
  362. cmd_count -= count;
  363. while (count--)
  364. *ptr++ = *cmds++;
  365. *ptr = cvmx_ptr_to_phys(new_buffer);
  366. /*
  367. * The current buffer is full and has a link to the
  368. * next buffer. Time to write the rest of the commands
  369. * into the new buffer.
  370. */
  371. qptr->base_ptr_div128 = *ptr >> 7;
  372. qptr->index = cmd_count;
  373. ptr = new_buffer;
  374. while (cmd_count--)
  375. *ptr++ = *cmds++;
  376. }
  377. /* All updates are complete. Release the lock and return */
  378. if (likely(use_locking))
  379. __cvmx_cmd_queue_unlock(qptr);
  380. return CVMX_CMD_QUEUE_SUCCESS;
  381. }
  382. /**
  383. * Simple function to write two command words to a command
  384. * queue.
  385. *
  386. * @queue_id: Hardware command queue to write to
  387. * @use_locking:
  388. * Use internal locking to ensure exclusive access for queue
  389. * updates. If you don't use this locking you must ensure
  390. * exclusivity some other way. Locking is strongly recommended.
  391. * @cmd1: Command
  392. * @cmd2: Command
  393. *
  394. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  395. */
  396. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write2(cvmx_cmd_queue_id_t
  397. queue_id,
  398. int use_locking,
  399. uint64_t cmd1,
  400. uint64_t cmd2)
  401. {
  402. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  403. /* Make sure nobody else is updating the same queue */
  404. if (likely(use_locking))
  405. __cvmx_cmd_queue_lock(queue_id, qptr);
  406. /*
  407. * If a max queue length was specified then make sure we don't
  408. * exceed it. If any part of the command would be below the
  409. * limit we allow it.
  410. */
  411. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  412. if (unlikely
  413. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  414. if (likely(use_locking))
  415. __cvmx_cmd_queue_unlock(qptr);
  416. return CVMX_CMD_QUEUE_FULL;
  417. }
  418. }
  419. /*
  420. * Normally there is plenty of room in the current buffer for
  421. * the command.
  422. */
  423. if (likely(qptr->index + 2 < qptr->pool_size_m1)) {
  424. uint64_t *ptr =
  425. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  426. base_ptr_div128 << 7);
  427. ptr += qptr->index;
  428. qptr->index += 2;
  429. ptr[0] = cmd1;
  430. ptr[1] = cmd2;
  431. } else {
  432. uint64_t *ptr;
  433. /*
  434. * Figure out how many command words will fit in this
  435. * buffer. One location will be needed for the next
  436. * buffer pointer.
  437. */
  438. int count = qptr->pool_size_m1 - qptr->index;
  439. /*
  440. * We need a new command buffer. Fail if there isn't
  441. * one available.
  442. */
  443. uint64_t *new_buffer =
  444. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  445. if (unlikely(new_buffer == NULL)) {
  446. if (likely(use_locking))
  447. __cvmx_cmd_queue_unlock(qptr);
  448. return CVMX_CMD_QUEUE_NO_MEMORY;
  449. }
  450. count--;
  451. ptr =
  452. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  453. base_ptr_div128 << 7);
  454. ptr += qptr->index;
  455. *ptr++ = cmd1;
  456. if (likely(count))
  457. *ptr++ = cmd2;
  458. *ptr = cvmx_ptr_to_phys(new_buffer);
  459. /*
  460. * The current buffer is full and has a link to the
  461. * next buffer. Time to write the rest of the commands
  462. * into the new buffer.
  463. */
  464. qptr->base_ptr_div128 = *ptr >> 7;
  465. qptr->index = 0;
  466. if (unlikely(count == 0)) {
  467. qptr->index = 1;
  468. new_buffer[0] = cmd2;
  469. }
  470. }
  471. /* All updates are complete. Release the lock and return */
  472. if (likely(use_locking))
  473. __cvmx_cmd_queue_unlock(qptr);
  474. return CVMX_CMD_QUEUE_SUCCESS;
  475. }
  476. /**
  477. * Simple function to write three command words to a command
  478. * queue.
  479. *
  480. * @queue_id: Hardware command queue to write to
  481. * @use_locking:
  482. * Use internal locking to ensure exclusive access for queue
  483. * updates. If you don't use this locking you must ensure
  484. * exclusivity some other way. Locking is strongly recommended.
  485. * @cmd1: Command
  486. * @cmd2: Command
  487. * @cmd3: Command
  488. *
  489. * Returns CVMX_CMD_QUEUE_SUCCESS or a failure code
  490. */
  491. static inline cvmx_cmd_queue_result_t cvmx_cmd_queue_write3(cvmx_cmd_queue_id_t
  492. queue_id,
  493. int use_locking,
  494. uint64_t cmd1,
  495. uint64_t cmd2,
  496. uint64_t cmd3)
  497. {
  498. __cvmx_cmd_queue_state_t *qptr = __cvmx_cmd_queue_get_state(queue_id);
  499. /* Make sure nobody else is updating the same queue */
  500. if (likely(use_locking))
  501. __cvmx_cmd_queue_lock(queue_id, qptr);
  502. /*
  503. * If a max queue length was specified then make sure we don't
  504. * exceed it. If any part of the command would be below the
  505. * limit we allow it.
  506. */
  507. if (CVMX_CMD_QUEUE_ENABLE_MAX_DEPTH && unlikely(qptr->max_depth)) {
  508. if (unlikely
  509. (cvmx_cmd_queue_length(queue_id) > (int)qptr->max_depth)) {
  510. if (likely(use_locking))
  511. __cvmx_cmd_queue_unlock(qptr);
  512. return CVMX_CMD_QUEUE_FULL;
  513. }
  514. }
  515. /*
  516. * Normally there is plenty of room in the current buffer for
  517. * the command.
  518. */
  519. if (likely(qptr->index + 3 < qptr->pool_size_m1)) {
  520. uint64_t *ptr =
  521. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  522. base_ptr_div128 << 7);
  523. ptr += qptr->index;
  524. qptr->index += 3;
  525. ptr[0] = cmd1;
  526. ptr[1] = cmd2;
  527. ptr[2] = cmd3;
  528. } else {
  529. uint64_t *ptr;
  530. /*
  531. * Figure out how many command words will fit in this
  532. * buffer. One location will be needed for the next
  533. * buffer pointer
  534. */
  535. int count = qptr->pool_size_m1 - qptr->index;
  536. /*
  537. * We need a new command buffer. Fail if there isn't
  538. * one available
  539. */
  540. uint64_t *new_buffer =
  541. (uint64_t *) cvmx_fpa_alloc(qptr->fpa_pool);
  542. if (unlikely(new_buffer == NULL)) {
  543. if (likely(use_locking))
  544. __cvmx_cmd_queue_unlock(qptr);
  545. return CVMX_CMD_QUEUE_NO_MEMORY;
  546. }
  547. count--;
  548. ptr =
  549. (uint64_t *) cvmx_phys_to_ptr((uint64_t) qptr->
  550. base_ptr_div128 << 7);
  551. ptr += qptr->index;
  552. *ptr++ = cmd1;
  553. if (count) {
  554. *ptr++ = cmd2;
  555. if (count > 1)
  556. *ptr++ = cmd3;
  557. }
  558. *ptr = cvmx_ptr_to_phys(new_buffer);
  559. /*
  560. * The current buffer is full and has a link to the
  561. * next buffer. Time to write the rest of the commands
  562. * into the new buffer.
  563. */
  564. qptr->base_ptr_div128 = *ptr >> 7;
  565. qptr->index = 0;
  566. ptr = new_buffer;
  567. if (count == 0) {
  568. *ptr++ = cmd2;
  569. qptr->index++;
  570. }
  571. if (count < 2) {
  572. *ptr++ = cmd3;
  573. qptr->index++;
  574. }
  575. }
  576. /* All updates are complete. Release the lock and return */
  577. if (likely(use_locking))
  578. __cvmx_cmd_queue_unlock(qptr);
  579. return CVMX_CMD_QUEUE_SUCCESS;
  580. }
  581. #endif /* __CVMX_CMD_QUEUE_H__ */