sched.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559
  1. /*
  2. * This file is part of the Chelsio T4 Ethernet driver for Linux.
  3. *
  4. * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
  5. *
  6. * This software is available to you under a choice of one of two
  7. * licenses. You may choose to be licensed under the terms of the GNU
  8. * General Public License (GPL) Version 2, available from the file
  9. * COPYING in the main directory of this source tree, or the
  10. * OpenIB.org BSD license below:
  11. *
  12. * Redistribution and use in source and binary forms, with or
  13. * without modification, are permitted provided that the following
  14. * conditions are met:
  15. *
  16. * - Redistributions of source code must retain the above
  17. * copyright notice, this list of conditions and the following
  18. * disclaimer.
  19. *
  20. * - Redistributions in binary form must reproduce the above
  21. * copyright notice, this list of conditions and the following
  22. * disclaimer in the documentation and/or other materials
  23. * provided with the distribution.
  24. *
  25. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  26. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  27. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  28. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  29. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  30. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  31. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  32. * SOFTWARE.
  33. */
  34. #include <linux/module.h>
  35. #include <linux/netdevice.h>
  36. #include "cxgb4.h"
  37. #include "sched.h"
  38. /* Spinlock must be held by caller */
  39. static int t4_sched_class_fw_cmd(struct port_info *pi,
  40. struct ch_sched_params *p,
  41. enum sched_fw_ops op)
  42. {
  43. struct adapter *adap = pi->adapter;
  44. struct sched_table *s = pi->sched_tbl;
  45. struct sched_class *e;
  46. int err = 0;
  47. e = &s->tab[p->u.params.class];
  48. switch (op) {
  49. case SCHED_FW_OP_ADD:
  50. err = t4_sched_params(adap, p->type,
  51. p->u.params.level, p->u.params.mode,
  52. p->u.params.rateunit,
  53. p->u.params.ratemode,
  54. p->u.params.channel, e->idx,
  55. p->u.params.minrate, p->u.params.maxrate,
  56. p->u.params.weight, p->u.params.pktsize);
  57. break;
  58. default:
  59. err = -ENOTSUPP;
  60. break;
  61. }
  62. return err;
  63. }
  64. /* Spinlock must be held by caller */
  65. static int t4_sched_bind_unbind_op(struct port_info *pi, void *arg,
  66. enum sched_bind_type type, bool bind)
  67. {
  68. struct adapter *adap = pi->adapter;
  69. u32 fw_mnem, fw_class, fw_param;
  70. unsigned int pf = adap->pf;
  71. unsigned int vf = 0;
  72. int err = 0;
  73. switch (type) {
  74. case SCHED_QUEUE: {
  75. struct sched_queue_entry *qe;
  76. qe = (struct sched_queue_entry *)arg;
  77. /* Create a template for the FW_PARAMS_CMD mnemonic and
  78. * value (TX Scheduling Class in this case).
  79. */
  80. fw_mnem = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
  81. FW_PARAMS_PARAM_X_V(
  82. FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH));
  83. fw_class = bind ? qe->param.class : FW_SCHED_CLS_NONE;
  84. fw_param = (fw_mnem | FW_PARAMS_PARAM_YZ_V(qe->cntxt_id));
  85. pf = adap->pf;
  86. vf = 0;
  87. break;
  88. }
  89. default:
  90. err = -ENOTSUPP;
  91. goto out;
  92. }
  93. err = t4_set_params(adap, adap->mbox, pf, vf, 1, &fw_param, &fw_class);
  94. out:
  95. return err;
  96. }
  97. static struct sched_class *t4_sched_queue_lookup(struct port_info *pi,
  98. const unsigned int qid,
  99. int *index)
  100. {
  101. struct sched_table *s = pi->sched_tbl;
  102. struct sched_class *e, *end;
  103. struct sched_class *found = NULL;
  104. int i;
  105. /* Look for a class with matching bound queue parameters */
  106. end = &s->tab[s->sched_size];
  107. for (e = &s->tab[0]; e != end; ++e) {
  108. struct sched_queue_entry *qe;
  109. i = 0;
  110. if (e->state == SCHED_STATE_UNUSED)
  111. continue;
  112. list_for_each_entry(qe, &e->queue_list, list) {
  113. if (qe->cntxt_id == qid) {
  114. found = e;
  115. if (index)
  116. *index = i;
  117. break;
  118. }
  119. i++;
  120. }
  121. if (found)
  122. break;
  123. }
  124. return found;
  125. }
  126. static int t4_sched_queue_unbind(struct port_info *pi, struct ch_sched_queue *p)
  127. {
  128. struct adapter *adap = pi->adapter;
  129. struct sched_class *e;
  130. struct sched_queue_entry *qe = NULL;
  131. struct sge_eth_txq *txq;
  132. unsigned int qid;
  133. int index = -1;
  134. int err = 0;
  135. if (p->queue < 0 || p->queue >= pi->nqsets)
  136. return -ERANGE;
  137. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  138. qid = txq->q.cntxt_id;
  139. /* Find the existing class that the queue is bound to */
  140. e = t4_sched_queue_lookup(pi, qid, &index);
  141. if (e && index >= 0) {
  142. int i = 0;
  143. spin_lock(&e->lock);
  144. list_for_each_entry(qe, &e->queue_list, list) {
  145. if (i == index)
  146. break;
  147. i++;
  148. }
  149. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE,
  150. false);
  151. if (err) {
  152. spin_unlock(&e->lock);
  153. goto out;
  154. }
  155. list_del(&qe->list);
  156. t4_free_mem(qe);
  157. if (atomic_dec_and_test(&e->refcnt)) {
  158. e->state = SCHED_STATE_UNUSED;
  159. memset(&e->info, 0, sizeof(e->info));
  160. }
  161. spin_unlock(&e->lock);
  162. }
  163. out:
  164. return err;
  165. }
  166. static int t4_sched_queue_bind(struct port_info *pi, struct ch_sched_queue *p)
  167. {
  168. struct adapter *adap = pi->adapter;
  169. struct sched_table *s = pi->sched_tbl;
  170. struct sched_class *e;
  171. struct sched_queue_entry *qe = NULL;
  172. struct sge_eth_txq *txq;
  173. unsigned int qid;
  174. int err = 0;
  175. if (p->queue < 0 || p->queue >= pi->nqsets)
  176. return -ERANGE;
  177. qe = t4_alloc_mem(sizeof(struct sched_queue_entry));
  178. if (!qe)
  179. return -ENOMEM;
  180. txq = &adap->sge.ethtxq[pi->first_qset + p->queue];
  181. qid = txq->q.cntxt_id;
  182. /* Unbind queue from any existing class */
  183. err = t4_sched_queue_unbind(pi, p);
  184. if (err) {
  185. t4_free_mem(qe);
  186. goto out;
  187. }
  188. /* Bind queue to specified class */
  189. memset(qe, 0, sizeof(*qe));
  190. qe->cntxt_id = qid;
  191. memcpy(&qe->param, p, sizeof(qe->param));
  192. e = &s->tab[qe->param.class];
  193. spin_lock(&e->lock);
  194. err = t4_sched_bind_unbind_op(pi, (void *)qe, SCHED_QUEUE, true);
  195. if (err) {
  196. t4_free_mem(qe);
  197. spin_unlock(&e->lock);
  198. goto out;
  199. }
  200. list_add_tail(&qe->list, &e->queue_list);
  201. atomic_inc(&e->refcnt);
  202. spin_unlock(&e->lock);
  203. out:
  204. return err;
  205. }
  206. static void t4_sched_class_unbind_all(struct port_info *pi,
  207. struct sched_class *e,
  208. enum sched_bind_type type)
  209. {
  210. if (!e)
  211. return;
  212. switch (type) {
  213. case SCHED_QUEUE: {
  214. struct sched_queue_entry *qe;
  215. list_for_each_entry(qe, &e->queue_list, list)
  216. t4_sched_queue_unbind(pi, &qe->param);
  217. break;
  218. }
  219. default:
  220. break;
  221. }
  222. }
  223. static int t4_sched_class_bind_unbind_op(struct port_info *pi, void *arg,
  224. enum sched_bind_type type, bool bind)
  225. {
  226. int err = 0;
  227. if (!arg)
  228. return -EINVAL;
  229. switch (type) {
  230. case SCHED_QUEUE: {
  231. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  232. if (bind)
  233. err = t4_sched_queue_bind(pi, qe);
  234. else
  235. err = t4_sched_queue_unbind(pi, qe);
  236. break;
  237. }
  238. default:
  239. err = -ENOTSUPP;
  240. break;
  241. }
  242. return err;
  243. }
  244. /**
  245. * cxgb4_sched_class_bind - Bind an entity to a scheduling class
  246. * @dev: net_device pointer
  247. * @arg: Entity opaque data
  248. * @type: Entity type (Queue)
  249. *
  250. * Binds an entity (queue) to a scheduling class. If the entity
  251. * is bound to another class, it will be unbound from the other class
  252. * and bound to the class specified in @arg.
  253. */
  254. int cxgb4_sched_class_bind(struct net_device *dev, void *arg,
  255. enum sched_bind_type type)
  256. {
  257. struct port_info *pi = netdev2pinfo(dev);
  258. struct sched_table *s;
  259. int err = 0;
  260. u8 class_id;
  261. if (!can_sched(dev))
  262. return -ENOTSUPP;
  263. if (!arg)
  264. return -EINVAL;
  265. switch (type) {
  266. case SCHED_QUEUE: {
  267. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  268. class_id = qe->class;
  269. break;
  270. }
  271. default:
  272. return -ENOTSUPP;
  273. }
  274. if (!valid_class_id(dev, class_id))
  275. return -EINVAL;
  276. if (class_id == SCHED_CLS_NONE)
  277. return -ENOTSUPP;
  278. s = pi->sched_tbl;
  279. write_lock(&s->rw_lock);
  280. err = t4_sched_class_bind_unbind_op(pi, arg, type, true);
  281. write_unlock(&s->rw_lock);
  282. return err;
  283. }
  284. /**
  285. * cxgb4_sched_class_unbind - Unbind an entity from a scheduling class
  286. * @dev: net_device pointer
  287. * @arg: Entity opaque data
  288. * @type: Entity type (Queue)
  289. *
  290. * Unbinds an entity (queue) from a scheduling class.
  291. */
  292. int cxgb4_sched_class_unbind(struct net_device *dev, void *arg,
  293. enum sched_bind_type type)
  294. {
  295. struct port_info *pi = netdev2pinfo(dev);
  296. struct sched_table *s;
  297. int err = 0;
  298. u8 class_id;
  299. if (!can_sched(dev))
  300. return -ENOTSUPP;
  301. if (!arg)
  302. return -EINVAL;
  303. switch (type) {
  304. case SCHED_QUEUE: {
  305. struct ch_sched_queue *qe = (struct ch_sched_queue *)arg;
  306. class_id = qe->class;
  307. break;
  308. }
  309. default:
  310. return -ENOTSUPP;
  311. }
  312. if (!valid_class_id(dev, class_id))
  313. return -EINVAL;
  314. s = pi->sched_tbl;
  315. write_lock(&s->rw_lock);
  316. err = t4_sched_class_bind_unbind_op(pi, arg, type, false);
  317. write_unlock(&s->rw_lock);
  318. return err;
  319. }
  320. /* If @p is NULL, fetch any available unused class */
  321. static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
  322. const struct ch_sched_params *p)
  323. {
  324. struct sched_table *s = pi->sched_tbl;
  325. struct sched_class *e, *end;
  326. struct sched_class *found = NULL;
  327. if (!p) {
  328. /* Get any available unused class */
  329. end = &s->tab[s->sched_size];
  330. for (e = &s->tab[0]; e != end; ++e) {
  331. if (e->state == SCHED_STATE_UNUSED) {
  332. found = e;
  333. break;
  334. }
  335. }
  336. } else {
  337. /* Look for a class with matching scheduling parameters */
  338. struct ch_sched_params info;
  339. struct ch_sched_params tp;
  340. memset(&info, 0, sizeof(info));
  341. memset(&tp, 0, sizeof(tp));
  342. memcpy(&tp, p, sizeof(tp));
  343. /* Don't try to match class parameter */
  344. tp.u.params.class = SCHED_CLS_NONE;
  345. end = &s->tab[s->sched_size];
  346. for (e = &s->tab[0]; e != end; ++e) {
  347. if (e->state == SCHED_STATE_UNUSED)
  348. continue;
  349. memset(&info, 0, sizeof(info));
  350. memcpy(&info, &e->info, sizeof(info));
  351. /* Don't try to match class parameter */
  352. info.u.params.class = SCHED_CLS_NONE;
  353. if ((info.type == tp.type) &&
  354. (!memcmp(&info.u.params, &tp.u.params,
  355. sizeof(info.u.params)))) {
  356. found = e;
  357. break;
  358. }
  359. }
  360. }
  361. return found;
  362. }
  363. static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
  364. struct ch_sched_params *p)
  365. {
  366. struct sched_table *s = pi->sched_tbl;
  367. struct sched_class *e;
  368. u8 class_id;
  369. int err;
  370. if (!p)
  371. return NULL;
  372. class_id = p->u.params.class;
  373. /* Only accept search for existing class with matching params
  374. * or allocation of new class with specified params
  375. */
  376. if (class_id != SCHED_CLS_NONE)
  377. return NULL;
  378. write_lock(&s->rw_lock);
  379. /* See if there's an exisiting class with same
  380. * requested sched params
  381. */
  382. e = t4_sched_class_lookup(pi, p);
  383. if (!e) {
  384. struct ch_sched_params np;
  385. /* Fetch any available unused class */
  386. e = t4_sched_class_lookup(pi, NULL);
  387. if (!e)
  388. goto out;
  389. memset(&np, 0, sizeof(np));
  390. memcpy(&np, p, sizeof(np));
  391. np.u.params.class = e->idx;
  392. spin_lock(&e->lock);
  393. /* New class */
  394. err = t4_sched_class_fw_cmd(pi, &np, SCHED_FW_OP_ADD);
  395. if (err) {
  396. spin_unlock(&e->lock);
  397. e = NULL;
  398. goto out;
  399. }
  400. memcpy(&e->info, &np, sizeof(e->info));
  401. atomic_set(&e->refcnt, 0);
  402. e->state = SCHED_STATE_ACTIVE;
  403. spin_unlock(&e->lock);
  404. }
  405. out:
  406. write_unlock(&s->rw_lock);
  407. return e;
  408. }
  409. /**
  410. * cxgb4_sched_class_alloc - allocate a scheduling class
  411. * @dev: net_device pointer
  412. * @p: new scheduling class to create.
  413. *
  414. * Returns pointer to the scheduling class created. If @p is NULL, then
  415. * it allocates and returns any available unused scheduling class. If a
  416. * scheduling class with matching @p is found, then the matching class is
  417. * returned.
  418. */
  419. struct sched_class *cxgb4_sched_class_alloc(struct net_device *dev,
  420. struct ch_sched_params *p)
  421. {
  422. struct port_info *pi = netdev2pinfo(dev);
  423. u8 class_id;
  424. if (!can_sched(dev))
  425. return NULL;
  426. class_id = p->u.params.class;
  427. if (!valid_class_id(dev, class_id))
  428. return NULL;
  429. return t4_sched_class_alloc(pi, p);
  430. }
  431. static void t4_sched_class_free(struct port_info *pi, struct sched_class *e)
  432. {
  433. t4_sched_class_unbind_all(pi, e, SCHED_QUEUE);
  434. }
  435. struct sched_table *t4_init_sched(unsigned int sched_size)
  436. {
  437. struct sched_table *s;
  438. unsigned int i;
  439. s = t4_alloc_mem(sizeof(*s) + sched_size * sizeof(struct sched_class));
  440. if (!s)
  441. return NULL;
  442. s->sched_size = sched_size;
  443. rwlock_init(&s->rw_lock);
  444. for (i = 0; i < s->sched_size; i++) {
  445. memset(&s->tab[i], 0, sizeof(struct sched_class));
  446. s->tab[i].idx = i;
  447. s->tab[i].state = SCHED_STATE_UNUSED;
  448. INIT_LIST_HEAD(&s->tab[i].queue_list);
  449. spin_lock_init(&s->tab[i].lock);
  450. atomic_set(&s->tab[i].refcnt, 0);
  451. }
  452. return s;
  453. }
  454. void t4_cleanup_sched(struct adapter *adap)
  455. {
  456. struct sched_table *s;
  457. unsigned int i;
  458. for_each_port(adap, i) {
  459. struct port_info *pi = netdev2pinfo(adap->port[i]);
  460. s = pi->sched_tbl;
  461. for (i = 0; i < s->sched_size; i++) {
  462. struct sched_class *e;
  463. write_lock(&s->rw_lock);
  464. e = &s->tab[i];
  465. if (e->state == SCHED_STATE_ACTIVE)
  466. t4_sched_class_free(pi, e);
  467. write_unlock(&s->rw_lock);
  468. }
  469. t4_free_mem(s);
  470. }
  471. }