bman.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /* Copyright 2008 - 2016 Freescale Semiconductor, Inc.
  2. *
  3. * Redistribution and use in source and binary forms, with or without
  4. * modification, are permitted provided that the following conditions are met:
  5. * * Redistributions of source code must retain the above copyright
  6. * notice, this list of conditions and the following disclaimer.
  7. * * Redistributions in binary form must reproduce the above copyright
  8. * notice, this list of conditions and the following disclaimer in the
  9. * documentation and/or other materials provided with the distribution.
  10. * * Neither the name of Freescale Semiconductor nor the
  11. * names of its contributors may be used to endorse or promote products
  12. * derived from this software without specific prior written permission.
  13. *
  14. * ALTERNATIVELY, this software may be distributed under the terms of the
  15. * GNU General Public License ("GPL") as published by the Free Software
  16. * Foundation, either version 2 of that License or (at your option) any
  17. * later version.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
  20. * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
  21. * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  22. * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
  23. * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
  24. * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
  25. * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
  26. * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  27. * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  28. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  29. */
  30. #include "bman_priv.h"
  31. #define IRQNAME "BMan portal %d"
  32. #define MAX_IRQNAME 16 /* big enough for "BMan portal %d" */
  33. /* Portal register assists */
  34. /* Cache-inhibited register offsets */
  35. #define BM_REG_RCR_PI_CINH 0x0000
  36. #define BM_REG_RCR_CI_CINH 0x0004
  37. #define BM_REG_RCR_ITR 0x0008
  38. #define BM_REG_CFG 0x0100
  39. #define BM_REG_SCN(n) (0x0200 + ((n) << 2))
  40. #define BM_REG_ISR 0x0e00
  41. #define BM_REG_IER 0x0e04
  42. #define BM_REG_ISDR 0x0e08
  43. #define BM_REG_IIR 0x0e0c
  44. /* Cache-enabled register offsets */
  45. #define BM_CL_CR 0x0000
  46. #define BM_CL_RR0 0x0100
  47. #define BM_CL_RR1 0x0140
  48. #define BM_CL_RCR 0x1000
  49. #define BM_CL_RCR_PI_CENA 0x3000
  50. #define BM_CL_RCR_CI_CENA 0x3100
  51. /*
  52. * Portal modes.
  53. * Enum types;
  54. * pmode == production mode
  55. * cmode == consumption mode,
  56. * Enum values use 3 letter codes. First letter matches the portal mode,
  57. * remaining two letters indicate;
  58. * ci == cache-inhibited portal register
  59. * ce == cache-enabled portal register
  60. * vb == in-band valid-bit (cache-enabled)
  61. */
  62. enum bm_rcr_pmode { /* matches BCSP_CFG::RPM */
  63. bm_rcr_pci = 0, /* PI index, cache-inhibited */
  64. bm_rcr_pce = 1, /* PI index, cache-enabled */
  65. bm_rcr_pvb = 2 /* valid-bit */
  66. };
  67. enum bm_rcr_cmode { /* s/w-only */
  68. bm_rcr_cci, /* CI index, cache-inhibited */
  69. bm_rcr_cce /* CI index, cache-enabled */
  70. };
  71. /* --- Portal structures --- */
  72. #define BM_RCR_SIZE 8
  73. /* Release Command */
  74. struct bm_rcr_entry {
  75. union {
  76. struct {
  77. u8 _ncw_verb; /* writes to this are non-coherent */
  78. u8 bpid; /* used with BM_RCR_VERB_CMD_BPID_SINGLE */
  79. u8 __reserved1[62];
  80. };
  81. struct bm_buffer bufs[8];
  82. };
  83. };
  84. #define BM_RCR_VERB_VBIT 0x80
  85. #define BM_RCR_VERB_CMD_MASK 0x70 /* one of two values; */
  86. #define BM_RCR_VERB_CMD_BPID_SINGLE 0x20
  87. #define BM_RCR_VERB_CMD_BPID_MULTI 0x30
  88. #define BM_RCR_VERB_BUFCOUNT_MASK 0x0f /* values 1..8 */
  89. struct bm_rcr {
  90. struct bm_rcr_entry *ring, *cursor;
  91. u8 ci, available, ithresh, vbit;
  92. #ifdef CONFIG_FSL_DPAA_CHECKING
  93. u32 busy;
  94. enum bm_rcr_pmode pmode;
  95. enum bm_rcr_cmode cmode;
  96. #endif
  97. };
  98. /* MC (Management Command) command */
  99. struct bm_mc_command {
  100. u8 _ncw_verb; /* writes to this are non-coherent */
  101. u8 bpid; /* used by acquire command */
  102. u8 __reserved[62];
  103. };
  104. #define BM_MCC_VERB_VBIT 0x80
  105. #define BM_MCC_VERB_CMD_MASK 0x70 /* where the verb contains; */
  106. #define BM_MCC_VERB_CMD_ACQUIRE 0x10
  107. #define BM_MCC_VERB_CMD_QUERY 0x40
  108. #define BM_MCC_VERB_ACQUIRE_BUFCOUNT 0x0f /* values 1..8 go here */
  109. /* MC result, Acquire and Query Response */
  110. union bm_mc_result {
  111. struct {
  112. u8 verb;
  113. u8 bpid;
  114. u8 __reserved[62];
  115. };
  116. struct bm_buffer bufs[8];
  117. };
  118. #define BM_MCR_VERB_VBIT 0x80
  119. #define BM_MCR_VERB_CMD_MASK BM_MCC_VERB_CMD_MASK
  120. #define BM_MCR_VERB_CMD_ACQUIRE BM_MCC_VERB_CMD_ACQUIRE
  121. #define BM_MCR_VERB_CMD_QUERY BM_MCC_VERB_CMD_QUERY
  122. #define BM_MCR_VERB_CMD_ERR_INVALID 0x60
  123. #define BM_MCR_VERB_CMD_ERR_ECC 0x70
  124. #define BM_MCR_VERB_ACQUIRE_BUFCOUNT BM_MCC_VERB_ACQUIRE_BUFCOUNT /* 0..8 */
  125. #define BM_MCR_TIMEOUT 10000 /* us */
  126. struct bm_mc {
  127. struct bm_mc_command *cr;
  128. union bm_mc_result *rr;
  129. u8 rridx, vbit;
  130. #ifdef CONFIG_FSL_DPAA_CHECKING
  131. enum {
  132. /* Can only be _mc_start()ed */
  133. mc_idle,
  134. /* Can only be _mc_commit()ed or _mc_abort()ed */
  135. mc_user,
  136. /* Can only be _mc_retry()ed */
  137. mc_hw
  138. } state;
  139. #endif
  140. };
  141. struct bm_addr {
  142. void __iomem *ce; /* cache-enabled */
  143. void __iomem *ci; /* cache-inhibited */
  144. };
  145. struct bm_portal {
  146. struct bm_addr addr;
  147. struct bm_rcr rcr;
  148. struct bm_mc mc;
  149. } ____cacheline_aligned;
  150. /* Cache-inhibited register access. */
  151. static inline u32 bm_in(struct bm_portal *p, u32 offset)
  152. {
  153. return __raw_readl(p->addr.ci + offset);
  154. }
  155. static inline void bm_out(struct bm_portal *p, u32 offset, u32 val)
  156. {
  157. __raw_writel(val, p->addr.ci + offset);
  158. }
  159. /* Cache Enabled Portal Access */
  160. static inline void bm_cl_invalidate(struct bm_portal *p, u32 offset)
  161. {
  162. dpaa_invalidate(p->addr.ce + offset);
  163. }
  164. static inline void bm_cl_touch_ro(struct bm_portal *p, u32 offset)
  165. {
  166. dpaa_touch_ro(p->addr.ce + offset);
  167. }
  168. static inline u32 bm_ce_in(struct bm_portal *p, u32 offset)
  169. {
  170. return __raw_readl(p->addr.ce + offset);
  171. }
  172. struct bman_portal {
  173. struct bm_portal p;
  174. /* interrupt sources processed by portal_isr(), configurable */
  175. unsigned long irq_sources;
  176. /* probing time config params for cpu-affine portals */
  177. const struct bm_portal_config *config;
  178. char irqname[MAX_IRQNAME];
  179. };
  180. static cpumask_t affine_mask;
  181. static DEFINE_SPINLOCK(affine_mask_lock);
  182. static DEFINE_PER_CPU(struct bman_portal, bman_affine_portal);
  183. static inline struct bman_portal *get_affine_portal(void)
  184. {
  185. return &get_cpu_var(bman_affine_portal);
  186. }
  187. static inline void put_affine_portal(void)
  188. {
  189. put_cpu_var(bman_affine_portal);
  190. }
  191. /*
  192. * This object type refers to a pool, it isn't *the* pool. There may be
  193. * more than one such object per BMan buffer pool, eg. if different users of the
  194. * pool are operating via different portals.
  195. */
  196. struct bman_pool {
  197. /* index of the buffer pool to encapsulate (0-63) */
  198. u32 bpid;
  199. /* Used for hash-table admin when using depletion notifications. */
  200. struct bman_portal *portal;
  201. struct bman_pool *next;
  202. };
  203. static u32 poll_portal_slow(struct bman_portal *p, u32 is);
  204. static irqreturn_t portal_isr(int irq, void *ptr)
  205. {
  206. struct bman_portal *p = ptr;
  207. struct bm_portal *portal = &p->p;
  208. u32 clear = p->irq_sources;
  209. u32 is = bm_in(portal, BM_REG_ISR) & p->irq_sources;
  210. if (unlikely(!is))
  211. return IRQ_NONE;
  212. clear |= poll_portal_slow(p, is);
  213. bm_out(portal, BM_REG_ISR, clear);
  214. return IRQ_HANDLED;
  215. }
  216. /* --- RCR API --- */
  217. #define RCR_SHIFT ilog2(sizeof(struct bm_rcr_entry))
  218. #define RCR_CARRY (uintptr_t)(BM_RCR_SIZE << RCR_SHIFT)
  219. /* Bit-wise logic to wrap a ring pointer by clearing the "carry bit" */
  220. static struct bm_rcr_entry *rcr_carryclear(struct bm_rcr_entry *p)
  221. {
  222. uintptr_t addr = (uintptr_t)p;
  223. addr &= ~RCR_CARRY;
  224. return (struct bm_rcr_entry *)addr;
  225. }
  226. #ifdef CONFIG_FSL_DPAA_CHECKING
  227. /* Bit-wise logic to convert a ring pointer to a ring index */
  228. static int rcr_ptr2idx(struct bm_rcr_entry *e)
  229. {
  230. return ((uintptr_t)e >> RCR_SHIFT) & (BM_RCR_SIZE - 1);
  231. }
  232. #endif
  233. /* Increment the 'cursor' ring pointer, taking 'vbit' into account */
  234. static inline void rcr_inc(struct bm_rcr *rcr)
  235. {
  236. /* increment to the next RCR pointer and handle overflow and 'vbit' */
  237. struct bm_rcr_entry *partial = rcr->cursor + 1;
  238. rcr->cursor = rcr_carryclear(partial);
  239. if (partial != rcr->cursor)
  240. rcr->vbit ^= BM_RCR_VERB_VBIT;
  241. }
  242. static int bm_rcr_get_avail(struct bm_portal *portal)
  243. {
  244. struct bm_rcr *rcr = &portal->rcr;
  245. return rcr->available;
  246. }
  247. static int bm_rcr_get_fill(struct bm_portal *portal)
  248. {
  249. struct bm_rcr *rcr = &portal->rcr;
  250. return BM_RCR_SIZE - 1 - rcr->available;
  251. }
  252. static void bm_rcr_set_ithresh(struct bm_portal *portal, u8 ithresh)
  253. {
  254. struct bm_rcr *rcr = &portal->rcr;
  255. rcr->ithresh = ithresh;
  256. bm_out(portal, BM_REG_RCR_ITR, ithresh);
  257. }
  258. static void bm_rcr_cce_prefetch(struct bm_portal *portal)
  259. {
  260. __maybe_unused struct bm_rcr *rcr = &portal->rcr;
  261. DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
  262. bm_cl_touch_ro(portal, BM_CL_RCR_CI_CENA);
  263. }
  264. static u8 bm_rcr_cce_update(struct bm_portal *portal)
  265. {
  266. struct bm_rcr *rcr = &portal->rcr;
  267. u8 diff, old_ci = rcr->ci;
  268. DPAA_ASSERT(rcr->cmode == bm_rcr_cce);
  269. rcr->ci = bm_ce_in(portal, BM_CL_RCR_CI_CENA) & (BM_RCR_SIZE - 1);
  270. bm_cl_invalidate(portal, BM_CL_RCR_CI_CENA);
  271. diff = dpaa_cyc_diff(BM_RCR_SIZE, old_ci, rcr->ci);
  272. rcr->available += diff;
  273. return diff;
  274. }
  275. static inline struct bm_rcr_entry *bm_rcr_start(struct bm_portal *portal)
  276. {
  277. struct bm_rcr *rcr = &portal->rcr;
  278. DPAA_ASSERT(!rcr->busy);
  279. if (!rcr->available)
  280. return NULL;
  281. #ifdef CONFIG_FSL_DPAA_CHECKING
  282. rcr->busy = 1;
  283. #endif
  284. dpaa_zero(rcr->cursor);
  285. return rcr->cursor;
  286. }
  287. static inline void bm_rcr_pvb_commit(struct bm_portal *portal, u8 myverb)
  288. {
  289. struct bm_rcr *rcr = &portal->rcr;
  290. struct bm_rcr_entry *rcursor;
  291. DPAA_ASSERT(rcr->busy);
  292. DPAA_ASSERT(rcr->pmode == bm_rcr_pvb);
  293. DPAA_ASSERT(rcr->available >= 1);
  294. dma_wmb();
  295. rcursor = rcr->cursor;
  296. rcursor->_ncw_verb = myverb | rcr->vbit;
  297. dpaa_flush(rcursor);
  298. rcr_inc(rcr);
  299. rcr->available--;
  300. #ifdef CONFIG_FSL_DPAA_CHECKING
  301. rcr->busy = 0;
  302. #endif
  303. }
  304. static int bm_rcr_init(struct bm_portal *portal, enum bm_rcr_pmode pmode,
  305. enum bm_rcr_cmode cmode)
  306. {
  307. struct bm_rcr *rcr = &portal->rcr;
  308. u32 cfg;
  309. u8 pi;
  310. rcr->ring = portal->addr.ce + BM_CL_RCR;
  311. rcr->ci = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  312. pi = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  313. rcr->cursor = rcr->ring + pi;
  314. rcr->vbit = (bm_in(portal, BM_REG_RCR_PI_CINH) & BM_RCR_SIZE) ?
  315. BM_RCR_VERB_VBIT : 0;
  316. rcr->available = BM_RCR_SIZE - 1
  317. - dpaa_cyc_diff(BM_RCR_SIZE, rcr->ci, pi);
  318. rcr->ithresh = bm_in(portal, BM_REG_RCR_ITR);
  319. #ifdef CONFIG_FSL_DPAA_CHECKING
  320. rcr->busy = 0;
  321. rcr->pmode = pmode;
  322. rcr->cmode = cmode;
  323. #endif
  324. cfg = (bm_in(portal, BM_REG_CFG) & 0xffffffe0)
  325. | (pmode & 0x3); /* BCSP_CFG::RPM */
  326. bm_out(portal, BM_REG_CFG, cfg);
  327. return 0;
  328. }
  329. static void bm_rcr_finish(struct bm_portal *portal)
  330. {
  331. #ifdef CONFIG_FSL_DPAA_CHECKING
  332. struct bm_rcr *rcr = &portal->rcr;
  333. int i;
  334. DPAA_ASSERT(!rcr->busy);
  335. i = bm_in(portal, BM_REG_RCR_PI_CINH) & (BM_RCR_SIZE - 1);
  336. if (i != rcr_ptr2idx(rcr->cursor))
  337. pr_crit("losing uncommited RCR entries\n");
  338. i = bm_in(portal, BM_REG_RCR_CI_CINH) & (BM_RCR_SIZE - 1);
  339. if (i != rcr->ci)
  340. pr_crit("missing existing RCR completions\n");
  341. if (rcr->ci != rcr_ptr2idx(rcr->cursor))
  342. pr_crit("RCR destroyed unquiesced\n");
  343. #endif
  344. }
  345. /* --- Management command API --- */
  346. static int bm_mc_init(struct bm_portal *portal)
  347. {
  348. struct bm_mc *mc = &portal->mc;
  349. mc->cr = portal->addr.ce + BM_CL_CR;
  350. mc->rr = portal->addr.ce + BM_CL_RR0;
  351. mc->rridx = (__raw_readb(&mc->cr->_ncw_verb) & BM_MCC_VERB_VBIT) ?
  352. 0 : 1;
  353. mc->vbit = mc->rridx ? BM_MCC_VERB_VBIT : 0;
  354. #ifdef CONFIG_FSL_DPAA_CHECKING
  355. mc->state = mc_idle;
  356. #endif
  357. return 0;
  358. }
  359. static void bm_mc_finish(struct bm_portal *portal)
  360. {
  361. #ifdef CONFIG_FSL_DPAA_CHECKING
  362. struct bm_mc *mc = &portal->mc;
  363. DPAA_ASSERT(mc->state == mc_idle);
  364. if (mc->state != mc_idle)
  365. pr_crit("Losing incomplete MC command\n");
  366. #endif
  367. }
  368. static inline struct bm_mc_command *bm_mc_start(struct bm_portal *portal)
  369. {
  370. struct bm_mc *mc = &portal->mc;
  371. DPAA_ASSERT(mc->state == mc_idle);
  372. #ifdef CONFIG_FSL_DPAA_CHECKING
  373. mc->state = mc_user;
  374. #endif
  375. dpaa_zero(mc->cr);
  376. return mc->cr;
  377. }
  378. static inline void bm_mc_commit(struct bm_portal *portal, u8 myverb)
  379. {
  380. struct bm_mc *mc = &portal->mc;
  381. union bm_mc_result *rr = mc->rr + mc->rridx;
  382. DPAA_ASSERT(mc->state == mc_user);
  383. dma_wmb();
  384. mc->cr->_ncw_verb = myverb | mc->vbit;
  385. dpaa_flush(mc->cr);
  386. dpaa_invalidate_touch_ro(rr);
  387. #ifdef CONFIG_FSL_DPAA_CHECKING
  388. mc->state = mc_hw;
  389. #endif
  390. }
  391. static inline union bm_mc_result *bm_mc_result(struct bm_portal *portal)
  392. {
  393. struct bm_mc *mc = &portal->mc;
  394. union bm_mc_result *rr = mc->rr + mc->rridx;
  395. DPAA_ASSERT(mc->state == mc_hw);
  396. /*
  397. * The inactive response register's verb byte always returns zero until
  398. * its command is submitted and completed. This includes the valid-bit,
  399. * in case you were wondering...
  400. */
  401. if (!__raw_readb(&rr->verb)) {
  402. dpaa_invalidate_touch_ro(rr);
  403. return NULL;
  404. }
  405. mc->rridx ^= 1;
  406. mc->vbit ^= BM_MCC_VERB_VBIT;
  407. #ifdef CONFIG_FSL_DPAA_CHECKING
  408. mc->state = mc_idle;
  409. #endif
  410. return rr;
  411. }
  412. static inline int bm_mc_result_timeout(struct bm_portal *portal,
  413. union bm_mc_result **mcr)
  414. {
  415. int timeout = BM_MCR_TIMEOUT;
  416. do {
  417. *mcr = bm_mc_result(portal);
  418. if (*mcr)
  419. break;
  420. udelay(1);
  421. } while (--timeout);
  422. return timeout;
  423. }
  424. /* Disable all BSCN interrupts for the portal */
  425. static void bm_isr_bscn_disable(struct bm_portal *portal)
  426. {
  427. bm_out(portal, BM_REG_SCN(0), 0);
  428. bm_out(portal, BM_REG_SCN(1), 0);
  429. }
  430. static int bman_create_portal(struct bman_portal *portal,
  431. const struct bm_portal_config *c)
  432. {
  433. struct bm_portal *p;
  434. int ret;
  435. p = &portal->p;
  436. /*
  437. * prep the low-level portal struct with the mapped addresses from the
  438. * config, everything that follows depends on it and "config" is more
  439. * for (de)reference...
  440. */
  441. p->addr.ce = c->addr_virt[DPAA_PORTAL_CE];
  442. p->addr.ci = c->addr_virt[DPAA_PORTAL_CI];
  443. if (bm_rcr_init(p, bm_rcr_pvb, bm_rcr_cce)) {
  444. dev_err(c->dev, "RCR initialisation failed\n");
  445. goto fail_rcr;
  446. }
  447. if (bm_mc_init(p)) {
  448. dev_err(c->dev, "MC initialisation failed\n");
  449. goto fail_mc;
  450. }
  451. /*
  452. * Default to all BPIDs disabled, we enable as required at
  453. * run-time.
  454. */
  455. bm_isr_bscn_disable(p);
  456. /* Write-to-clear any stale interrupt status bits */
  457. bm_out(p, BM_REG_ISDR, 0xffffffff);
  458. portal->irq_sources = 0;
  459. bm_out(p, BM_REG_IER, 0);
  460. bm_out(p, BM_REG_ISR, 0xffffffff);
  461. snprintf(portal->irqname, MAX_IRQNAME, IRQNAME, c->cpu);
  462. if (request_irq(c->irq, portal_isr, 0, portal->irqname, portal)) {
  463. dev_err(c->dev, "request_irq() failed\n");
  464. goto fail_irq;
  465. }
  466. if (c->cpu != -1 && irq_can_set_affinity(c->irq) &&
  467. irq_set_affinity(c->irq, cpumask_of(c->cpu))) {
  468. dev_err(c->dev, "irq_set_affinity() failed\n");
  469. goto fail_affinity;
  470. }
  471. /* Need RCR to be empty before continuing */
  472. ret = bm_rcr_get_fill(p);
  473. if (ret) {
  474. dev_err(c->dev, "RCR unclean\n");
  475. goto fail_rcr_empty;
  476. }
  477. /* Success */
  478. portal->config = c;
  479. bm_out(p, BM_REG_ISDR, 0);
  480. bm_out(p, BM_REG_IIR, 0);
  481. return 0;
  482. fail_rcr_empty:
  483. fail_affinity:
  484. free_irq(c->irq, portal);
  485. fail_irq:
  486. bm_mc_finish(p);
  487. fail_mc:
  488. bm_rcr_finish(p);
  489. fail_rcr:
  490. return -EIO;
  491. }
  492. struct bman_portal *bman_create_affine_portal(const struct bm_portal_config *c)
  493. {
  494. struct bman_portal *portal;
  495. int err;
  496. portal = &per_cpu(bman_affine_portal, c->cpu);
  497. err = bman_create_portal(portal, c);
  498. if (err)
  499. return NULL;
  500. spin_lock(&affine_mask_lock);
  501. cpumask_set_cpu(c->cpu, &affine_mask);
  502. spin_unlock(&affine_mask_lock);
  503. return portal;
  504. }
  505. static u32 poll_portal_slow(struct bman_portal *p, u32 is)
  506. {
  507. u32 ret = is;
  508. if (is & BM_PIRQ_RCRI) {
  509. bm_rcr_cce_update(&p->p);
  510. bm_rcr_set_ithresh(&p->p, 0);
  511. bm_out(&p->p, BM_REG_ISR, BM_PIRQ_RCRI);
  512. is &= ~BM_PIRQ_RCRI;
  513. }
  514. /* There should be no status register bits left undefined */
  515. DPAA_ASSERT(!is);
  516. return ret;
  517. }
  518. int bman_p_irqsource_add(struct bman_portal *p, u32 bits)
  519. {
  520. unsigned long irqflags;
  521. local_irq_save(irqflags);
  522. set_bits(bits & BM_PIRQ_VISIBLE, &p->irq_sources);
  523. bm_out(&p->p, BM_REG_IER, p->irq_sources);
  524. local_irq_restore(irqflags);
  525. return 0;
  526. }
  527. static int bm_shutdown_pool(u32 bpid)
  528. {
  529. struct bm_mc_command *bm_cmd;
  530. union bm_mc_result *bm_res;
  531. while (1) {
  532. struct bman_portal *p = get_affine_portal();
  533. /* Acquire buffers until empty */
  534. bm_cmd = bm_mc_start(&p->p);
  535. bm_cmd->bpid = bpid;
  536. bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE | 1);
  537. if (!bm_mc_result_timeout(&p->p, &bm_res)) {
  538. put_affine_portal();
  539. pr_crit("BMan Acquire Command timedout\n");
  540. return -ETIMEDOUT;
  541. }
  542. if (!(bm_res->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT)) {
  543. put_affine_portal();
  544. /* Pool is empty */
  545. return 0;
  546. }
  547. put_affine_portal();
  548. }
  549. return 0;
  550. }
  551. struct gen_pool *bm_bpalloc;
  552. static int bm_alloc_bpid_range(u32 *result, u32 count)
  553. {
  554. unsigned long addr;
  555. addr = gen_pool_alloc(bm_bpalloc, count);
  556. if (!addr)
  557. return -ENOMEM;
  558. *result = addr & ~DPAA_GENALLOC_OFF;
  559. return 0;
  560. }
  561. static int bm_release_bpid(u32 bpid)
  562. {
  563. int ret;
  564. ret = bm_shutdown_pool(bpid);
  565. if (ret) {
  566. pr_debug("BPID %d leaked\n", bpid);
  567. return ret;
  568. }
  569. gen_pool_free(bm_bpalloc, bpid | DPAA_GENALLOC_OFF, 1);
  570. return 0;
  571. }
  572. struct bman_pool *bman_new_pool(void)
  573. {
  574. struct bman_pool *pool = NULL;
  575. u32 bpid;
  576. if (bm_alloc_bpid_range(&bpid, 1))
  577. return NULL;
  578. pool = kmalloc(sizeof(*pool), GFP_KERNEL);
  579. if (!pool)
  580. goto err;
  581. pool->bpid = bpid;
  582. return pool;
  583. err:
  584. bm_release_bpid(bpid);
  585. kfree(pool);
  586. return NULL;
  587. }
  588. EXPORT_SYMBOL(bman_new_pool);
  589. void bman_free_pool(struct bman_pool *pool)
  590. {
  591. bm_release_bpid(pool->bpid);
  592. kfree(pool);
  593. }
  594. EXPORT_SYMBOL(bman_free_pool);
  595. int bman_get_bpid(const struct bman_pool *pool)
  596. {
  597. return pool->bpid;
  598. }
  599. EXPORT_SYMBOL(bman_get_bpid);
  600. static void update_rcr_ci(struct bman_portal *p, int avail)
  601. {
  602. if (avail)
  603. bm_rcr_cce_prefetch(&p->p);
  604. else
  605. bm_rcr_cce_update(&p->p);
  606. }
  607. int bman_release(struct bman_pool *pool, const struct bm_buffer *bufs, u8 num)
  608. {
  609. struct bman_portal *p;
  610. struct bm_rcr_entry *r;
  611. unsigned long irqflags;
  612. int avail, timeout = 1000; /* 1ms */
  613. int i = num - 1;
  614. DPAA_ASSERT(num > 0 && num <= 8);
  615. do {
  616. p = get_affine_portal();
  617. local_irq_save(irqflags);
  618. avail = bm_rcr_get_avail(&p->p);
  619. if (avail < 2)
  620. update_rcr_ci(p, avail);
  621. r = bm_rcr_start(&p->p);
  622. local_irq_restore(irqflags);
  623. put_affine_portal();
  624. if (likely(r))
  625. break;
  626. udelay(1);
  627. } while (--timeout);
  628. if (unlikely(!timeout))
  629. return -ETIMEDOUT;
  630. p = get_affine_portal();
  631. local_irq_save(irqflags);
  632. /*
  633. * we can copy all but the first entry, as this can trigger badness
  634. * with the valid-bit
  635. */
  636. bm_buffer_set64(r->bufs, bm_buffer_get64(bufs));
  637. bm_buffer_set_bpid(r->bufs, pool->bpid);
  638. if (i)
  639. memcpy(&r->bufs[1], &bufs[1], i * sizeof(bufs[0]));
  640. bm_rcr_pvb_commit(&p->p, BM_RCR_VERB_CMD_BPID_SINGLE |
  641. (num & BM_RCR_VERB_BUFCOUNT_MASK));
  642. local_irq_restore(irqflags);
  643. put_affine_portal();
  644. return 0;
  645. }
  646. EXPORT_SYMBOL(bman_release);
  647. int bman_acquire(struct bman_pool *pool, struct bm_buffer *bufs, u8 num)
  648. {
  649. struct bman_portal *p = get_affine_portal();
  650. struct bm_mc_command *mcc;
  651. union bm_mc_result *mcr;
  652. int ret;
  653. DPAA_ASSERT(num > 0 && num <= 8);
  654. mcc = bm_mc_start(&p->p);
  655. mcc->bpid = pool->bpid;
  656. bm_mc_commit(&p->p, BM_MCC_VERB_CMD_ACQUIRE |
  657. (num & BM_MCC_VERB_ACQUIRE_BUFCOUNT));
  658. if (!bm_mc_result_timeout(&p->p, &mcr)) {
  659. put_affine_portal();
  660. pr_crit("BMan Acquire Timeout\n");
  661. return -ETIMEDOUT;
  662. }
  663. ret = mcr->verb & BM_MCR_VERB_ACQUIRE_BUFCOUNT;
  664. if (bufs)
  665. memcpy(&bufs[0], &mcr->bufs[0], num * sizeof(bufs[0]));
  666. put_affine_portal();
  667. if (ret != num)
  668. ret = -ENOMEM;
  669. return ret;
  670. }
  671. EXPORT_SYMBOL(bman_acquire);
  672. const struct bm_portal_config *
  673. bman_get_bm_portal_config(const struct bman_portal *portal)
  674. {
  675. return portal->config;
  676. }