sch_qfq.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530
  1. /*
  2. * net/sched/sch_qfq.c Quick Fair Queueing Plus Scheduler.
  3. *
  4. * Copyright (c) 2009 Fabio Checconi, Luigi Rizzo, and Paolo Valente.
  5. * Copyright (c) 2012 Paolo Valente.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * version 2 as published by the Free Software Foundation.
  10. */
  11. #include <linux/module.h>
  12. #include <linux/init.h>
  13. #include <linux/bitops.h>
  14. #include <linux/errno.h>
  15. #include <linux/netdevice.h>
  16. #include <linux/pkt_sched.h>
  17. #include <net/sch_generic.h>
  18. #include <net/pkt_sched.h>
  19. #include <net/pkt_cls.h>
  20. /* Quick Fair Queueing Plus
  21. ========================
  22. Sources:
  23. [1] Paolo Valente,
  24. "Reducing the Execution Time of Fair-Queueing Schedulers."
  25. http://algo.ing.unimo.it/people/paolo/agg-sched/agg-sched.pdf
  26. Sources for QFQ:
  27. [2] Fabio Checconi, Luigi Rizzo, and Paolo Valente: "QFQ: Efficient
  28. Packet Scheduling with Tight Bandwidth Distribution Guarantees."
  29. See also:
  30. http://retis.sssup.it/~fabio/linux/qfq/
  31. */
  32. /*
  33. QFQ+ divides classes into aggregates of at most MAX_AGG_CLASSES
  34. classes. Each aggregate is timestamped with a virtual start time S
  35. and a virtual finish time F, and scheduled according to its
  36. timestamps. S and F are computed as a function of a system virtual
  37. time function V. The classes within each aggregate are instead
  38. scheduled with DRR.
  39. To speed up operations, QFQ+ divides also aggregates into a limited
  40. number of groups. Which group a class belongs to depends on the
  41. ratio between the maximum packet length for the class and the weight
  42. of the class. Groups have their own S and F. In the end, QFQ+
  43. schedules groups, then aggregates within groups, then classes within
  44. aggregates. See [1] and [2] for a full description.
  45. Virtual time computations.
  46. S, F and V are all computed in fixed point arithmetic with
  47. FRAC_BITS decimal bits.
  48. QFQ_MAX_INDEX is the maximum index allowed for a group. We need
  49. one bit per index.
  50. QFQ_MAX_WSHIFT is the maximum power of two supported as a weight.
  51. The layout of the bits is as below:
  52. [ MTU_SHIFT ][ FRAC_BITS ]
  53. [ MAX_INDEX ][ MIN_SLOT_SHIFT ]
  54. ^.__grp->index = 0
  55. *.__grp->slot_shift
  56. where MIN_SLOT_SHIFT is derived by difference from the others.
  57. The max group index corresponds to Lmax/w_min, where
  58. Lmax=1<<MTU_SHIFT, w_min = 1 .
  59. From this, and knowing how many groups (MAX_INDEX) we want,
  60. we can derive the shift corresponding to each group.
  61. Because we often need to compute
  62. F = S + len/w_i and V = V + len/wsum
  63. instead of storing w_i store the value
  64. inv_w = (1<<FRAC_BITS)/w_i
  65. so we can do F = S + len * inv_w * wsum.
  66. We use W_TOT in the formulas so we can easily move between
  67. static and adaptive weight sum.
  68. The per-scheduler-instance data contain all the data structures
  69. for the scheduler: bitmaps and bucket lists.
  70. */
  71. /*
  72. * Maximum number of consecutive slots occupied by backlogged classes
  73. * inside a group.
  74. */
  75. #define QFQ_MAX_SLOTS 32
  76. /*
  77. * Shifts used for aggregate<->group mapping. We allow class weights that are
  78. * in the range [1, 2^MAX_WSHIFT], and we try to map each aggregate i to the
  79. * group with the smallest index that can support the L_i / r_i configured
  80. * for the classes in the aggregate.
  81. *
  82. * grp->index is the index of the group; and grp->slot_shift
  83. * is the shift for the corresponding (scaled) sigma_i.
  84. */
  85. #define QFQ_MAX_INDEX 24
  86. #define QFQ_MAX_WSHIFT 10
  87. #define QFQ_MAX_WEIGHT (1<<QFQ_MAX_WSHIFT) /* see qfq_slot_insert */
  88. #define QFQ_MAX_WSUM (64*QFQ_MAX_WEIGHT)
  89. #define FRAC_BITS 30 /* fixed point arithmetic */
  90. #define ONE_FP (1UL << FRAC_BITS)
  91. #define QFQ_MTU_SHIFT 16 /* to support TSO/GSO */
  92. #define QFQ_MIN_LMAX 512 /* see qfq_slot_insert */
  93. #define QFQ_MAX_AGG_CLASSES 8 /* max num classes per aggregate allowed */
  94. /*
  95. * Possible group states. These values are used as indexes for the bitmaps
  96. * array of struct qfq_queue.
  97. */
  98. enum qfq_state { ER, IR, EB, IB, QFQ_MAX_STATE };
  99. struct qfq_group;
  100. struct qfq_aggregate;
  101. struct qfq_class {
  102. struct Qdisc_class_common common;
  103. unsigned int filter_cnt;
  104. struct gnet_stats_basic_packed bstats;
  105. struct gnet_stats_queue qstats;
  106. struct net_rate_estimator __rcu *rate_est;
  107. struct Qdisc *qdisc;
  108. struct list_head alist; /* Link for active-classes list. */
  109. struct qfq_aggregate *agg; /* Parent aggregate. */
  110. int deficit; /* DRR deficit counter. */
  111. };
  112. struct qfq_aggregate {
  113. struct hlist_node next; /* Link for the slot list. */
  114. u64 S, F; /* flow timestamps (exact) */
  115. /* group we belong to. In principle we would need the index,
  116. * which is log_2(lmax/weight), but we never reference it
  117. * directly, only the group.
  118. */
  119. struct qfq_group *grp;
  120. /* these are copied from the flowset. */
  121. u32 class_weight; /* Weight of each class in this aggregate. */
  122. /* Max pkt size for the classes in this aggregate, DRR quantum. */
  123. int lmax;
  124. u32 inv_w; /* ONE_FP/(sum of weights of classes in aggr.). */
  125. u32 budgetmax; /* Max budget for this aggregate. */
  126. u32 initial_budget, budget; /* Initial and current budget. */
  127. int num_classes; /* Number of classes in this aggr. */
  128. struct list_head active; /* DRR queue of active classes. */
  129. struct hlist_node nonfull_next; /* See nonfull_aggs in qfq_sched. */
  130. };
  131. struct qfq_group {
  132. u64 S, F; /* group timestamps (approx). */
  133. unsigned int slot_shift; /* Slot shift. */
  134. unsigned int index; /* Group index. */
  135. unsigned int front; /* Index of the front slot. */
  136. unsigned long full_slots; /* non-empty slots */
  137. /* Array of RR lists of active aggregates. */
  138. struct hlist_head slots[QFQ_MAX_SLOTS];
  139. };
  140. struct qfq_sched {
  141. struct tcf_proto __rcu *filter_list;
  142. struct tcf_block *block;
  143. struct Qdisc_class_hash clhash;
  144. u64 oldV, V; /* Precise virtual times. */
  145. struct qfq_aggregate *in_serv_agg; /* Aggregate being served. */
  146. u32 wsum; /* weight sum */
  147. u32 iwsum; /* inverse weight sum */
  148. unsigned long bitmaps[QFQ_MAX_STATE]; /* Group bitmaps. */
  149. struct qfq_group groups[QFQ_MAX_INDEX + 1]; /* The groups. */
  150. u32 min_slot_shift; /* Index of the group-0 bit in the bitmaps. */
  151. u32 max_agg_classes; /* Max number of classes per aggr. */
  152. struct hlist_head nonfull_aggs; /* Aggs with room for more classes. */
  153. };
  154. /*
  155. * Possible reasons why the timestamps of an aggregate are updated
  156. * enqueue: the aggregate switches from idle to active and must scheduled
  157. * for service
  158. * requeue: the aggregate finishes its budget, so it stops being served and
  159. * must be rescheduled for service
  160. */
  161. enum update_reason {enqueue, requeue};
  162. static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
  163. {
  164. struct qfq_sched *q = qdisc_priv(sch);
  165. struct Qdisc_class_common *clc;
  166. clc = qdisc_class_find(&q->clhash, classid);
  167. if (clc == NULL)
  168. return NULL;
  169. return container_of(clc, struct qfq_class, common);
  170. }
  171. static void qfq_purge_queue(struct qfq_class *cl)
  172. {
  173. unsigned int len = cl->qdisc->q.qlen;
  174. unsigned int backlog = cl->qdisc->qstats.backlog;
  175. qdisc_reset(cl->qdisc);
  176. qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
  177. }
  178. static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
  179. [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
  180. [TCA_QFQ_LMAX] = { .type = NLA_U32 },
  181. };
  182. /*
  183. * Calculate a flow index, given its weight and maximum packet length.
  184. * index = log_2(maxlen/weight) but we need to apply the scaling.
  185. * This is used only once at flow creation.
  186. */
  187. static int qfq_calc_index(u32 inv_w, unsigned int maxlen, u32 min_slot_shift)
  188. {
  189. u64 slot_size = (u64)maxlen * inv_w;
  190. unsigned long size_map;
  191. int index = 0;
  192. size_map = slot_size >> min_slot_shift;
  193. if (!size_map)
  194. goto out;
  195. index = __fls(size_map) + 1; /* basically a log_2 */
  196. index -= !(slot_size - (1ULL << (index + min_slot_shift - 1)));
  197. if (index < 0)
  198. index = 0;
  199. out:
  200. pr_debug("qfq calc_index: W = %lu, L = %u, I = %d\n",
  201. (unsigned long) ONE_FP/inv_w, maxlen, index);
  202. return index;
  203. }
  204. static void qfq_deactivate_agg(struct qfq_sched *, struct qfq_aggregate *);
  205. static void qfq_activate_agg(struct qfq_sched *, struct qfq_aggregate *,
  206. enum update_reason);
  207. static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  208. u32 lmax, u32 weight)
  209. {
  210. INIT_LIST_HEAD(&agg->active);
  211. hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
  212. agg->lmax = lmax;
  213. agg->class_weight = weight;
  214. }
  215. static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q,
  216. u32 lmax, u32 weight)
  217. {
  218. struct qfq_aggregate *agg;
  219. hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next)
  220. if (agg->lmax == lmax && agg->class_weight == weight)
  221. return agg;
  222. return NULL;
  223. }
  224. /* Update aggregate as a function of the new number of classes. */
  225. static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  226. int new_num_classes)
  227. {
  228. u32 new_agg_weight;
  229. if (new_num_classes == q->max_agg_classes)
  230. hlist_del_init(&agg->nonfull_next);
  231. if (agg->num_classes > new_num_classes &&
  232. new_num_classes == q->max_agg_classes - 1) /* agg no more full */
  233. hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs);
  234. /* The next assignment may let
  235. * agg->initial_budget > agg->budgetmax
  236. * hold, we will take it into account in charge_actual_service().
  237. */
  238. agg->budgetmax = new_num_classes * agg->lmax;
  239. new_agg_weight = agg->class_weight * new_num_classes;
  240. agg->inv_w = ONE_FP/new_agg_weight;
  241. if (agg->grp == NULL) {
  242. int i = qfq_calc_index(agg->inv_w, agg->budgetmax,
  243. q->min_slot_shift);
  244. agg->grp = &q->groups[i];
  245. }
  246. q->wsum +=
  247. (int) agg->class_weight * (new_num_classes - agg->num_classes);
  248. q->iwsum = ONE_FP / q->wsum;
  249. agg->num_classes = new_num_classes;
  250. }
  251. /* Add class to aggregate. */
  252. static void qfq_add_to_agg(struct qfq_sched *q,
  253. struct qfq_aggregate *agg,
  254. struct qfq_class *cl)
  255. {
  256. cl->agg = agg;
  257. qfq_update_agg(q, agg, agg->num_classes+1);
  258. if (cl->qdisc->q.qlen > 0) { /* adding an active class */
  259. list_add_tail(&cl->alist, &agg->active);
  260. if (list_first_entry(&agg->active, struct qfq_class, alist) ==
  261. cl && q->in_serv_agg != agg) /* agg was inactive */
  262. qfq_activate_agg(q, agg, enqueue); /* schedule agg */
  263. }
  264. }
  265. static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *);
  266. static void qfq_destroy_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  267. {
  268. hlist_del_init(&agg->nonfull_next);
  269. q->wsum -= agg->class_weight;
  270. if (q->wsum != 0)
  271. q->iwsum = ONE_FP / q->wsum;
  272. if (q->in_serv_agg == agg)
  273. q->in_serv_agg = qfq_choose_next_agg(q);
  274. kfree(agg);
  275. }
  276. /* Deschedule class from within its parent aggregate. */
  277. static void qfq_deactivate_class(struct qfq_sched *q, struct qfq_class *cl)
  278. {
  279. struct qfq_aggregate *agg = cl->agg;
  280. list_del(&cl->alist); /* remove from RR queue of the aggregate */
  281. if (list_empty(&agg->active)) /* agg is now inactive */
  282. qfq_deactivate_agg(q, agg);
  283. }
  284. /* Remove class from its parent aggregate. */
  285. static void qfq_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
  286. {
  287. struct qfq_aggregate *agg = cl->agg;
  288. cl->agg = NULL;
  289. if (agg->num_classes == 1) { /* agg being emptied, destroy it */
  290. qfq_destroy_agg(q, agg);
  291. return;
  292. }
  293. qfq_update_agg(q, agg, agg->num_classes-1);
  294. }
  295. /* Deschedule class and remove it from its parent aggregate. */
  296. static void qfq_deact_rm_from_agg(struct qfq_sched *q, struct qfq_class *cl)
  297. {
  298. if (cl->qdisc->q.qlen > 0) /* class is active */
  299. qfq_deactivate_class(q, cl);
  300. qfq_rm_from_agg(q, cl);
  301. }
  302. /* Move class to a new aggregate, matching the new class weight and/or lmax */
  303. static int qfq_change_agg(struct Qdisc *sch, struct qfq_class *cl, u32 weight,
  304. u32 lmax)
  305. {
  306. struct qfq_sched *q = qdisc_priv(sch);
  307. struct qfq_aggregate *new_agg = qfq_find_agg(q, lmax, weight);
  308. if (new_agg == NULL) { /* create new aggregate */
  309. new_agg = kzalloc(sizeof(*new_agg), GFP_ATOMIC);
  310. if (new_agg == NULL)
  311. return -ENOBUFS;
  312. qfq_init_agg(q, new_agg, lmax, weight);
  313. }
  314. qfq_deact_rm_from_agg(q, cl);
  315. qfq_add_to_agg(q, new_agg, cl);
  316. return 0;
  317. }
  318. static int qfq_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
  319. struct nlattr **tca, unsigned long *arg)
  320. {
  321. struct qfq_sched *q = qdisc_priv(sch);
  322. struct qfq_class *cl = (struct qfq_class *)*arg;
  323. bool existing = false;
  324. struct nlattr *tb[TCA_QFQ_MAX + 1];
  325. struct qfq_aggregate *new_agg = NULL;
  326. u32 weight, lmax, inv_w;
  327. int err;
  328. int delta_w;
  329. if (tca[TCA_OPTIONS] == NULL) {
  330. pr_notice("qfq: no options\n");
  331. return -EINVAL;
  332. }
  333. err = nla_parse_nested(tb, TCA_QFQ_MAX, tca[TCA_OPTIONS], qfq_policy,
  334. NULL);
  335. if (err < 0)
  336. return err;
  337. if (tb[TCA_QFQ_WEIGHT]) {
  338. weight = nla_get_u32(tb[TCA_QFQ_WEIGHT]);
  339. if (!weight || weight > (1UL << QFQ_MAX_WSHIFT)) {
  340. pr_notice("qfq: invalid weight %u\n", weight);
  341. return -EINVAL;
  342. }
  343. } else
  344. weight = 1;
  345. if (tb[TCA_QFQ_LMAX]) {
  346. lmax = nla_get_u32(tb[TCA_QFQ_LMAX]);
  347. if (lmax < QFQ_MIN_LMAX || lmax > (1UL << QFQ_MTU_SHIFT)) {
  348. pr_notice("qfq: invalid max length %u\n", lmax);
  349. return -EINVAL;
  350. }
  351. } else
  352. lmax = psched_mtu(qdisc_dev(sch));
  353. inv_w = ONE_FP / weight;
  354. weight = ONE_FP / inv_w;
  355. if (cl != NULL &&
  356. lmax == cl->agg->lmax &&
  357. weight == cl->agg->class_weight)
  358. return 0; /* nothing to change */
  359. delta_w = weight - (cl ? cl->agg->class_weight : 0);
  360. if (q->wsum + delta_w > QFQ_MAX_WSUM) {
  361. pr_notice("qfq: total weight out of range (%d + %u)\n",
  362. delta_w, q->wsum);
  363. return -EINVAL;
  364. }
  365. if (cl != NULL) { /* modify existing class */
  366. if (tca[TCA_RATE]) {
  367. err = gen_replace_estimator(&cl->bstats, NULL,
  368. &cl->rate_est,
  369. NULL,
  370. qdisc_root_sleeping_running(sch),
  371. tca[TCA_RATE]);
  372. if (err)
  373. return err;
  374. }
  375. existing = true;
  376. goto set_change_agg;
  377. }
  378. /* create and init new class */
  379. cl = kzalloc(sizeof(struct qfq_class), GFP_KERNEL);
  380. if (cl == NULL)
  381. return -ENOBUFS;
  382. cl->common.classid = classid;
  383. cl->deficit = lmax;
  384. cl->qdisc = qdisc_create_dflt(sch->dev_queue,
  385. &pfifo_qdisc_ops, classid);
  386. if (cl->qdisc == NULL)
  387. cl->qdisc = &noop_qdisc;
  388. if (tca[TCA_RATE]) {
  389. err = gen_new_estimator(&cl->bstats, NULL,
  390. &cl->rate_est,
  391. NULL,
  392. qdisc_root_sleeping_running(sch),
  393. tca[TCA_RATE]);
  394. if (err)
  395. goto destroy_class;
  396. }
  397. if (cl->qdisc != &noop_qdisc)
  398. qdisc_hash_add(cl->qdisc, true);
  399. set_change_agg:
  400. sch_tree_lock(sch);
  401. new_agg = qfq_find_agg(q, lmax, weight);
  402. if (new_agg == NULL) { /* create new aggregate */
  403. sch_tree_unlock(sch);
  404. new_agg = kzalloc(sizeof(*new_agg), GFP_KERNEL);
  405. if (new_agg == NULL) {
  406. err = -ENOBUFS;
  407. gen_kill_estimator(&cl->rate_est);
  408. goto destroy_class;
  409. }
  410. sch_tree_lock(sch);
  411. qfq_init_agg(q, new_agg, lmax, weight);
  412. }
  413. if (existing)
  414. qfq_deact_rm_from_agg(q, cl);
  415. else
  416. qdisc_class_hash_insert(&q->clhash, &cl->common);
  417. qfq_add_to_agg(q, new_agg, cl);
  418. sch_tree_unlock(sch);
  419. qdisc_class_hash_grow(sch, &q->clhash);
  420. *arg = (unsigned long)cl;
  421. return 0;
  422. destroy_class:
  423. qdisc_destroy(cl->qdisc);
  424. kfree(cl);
  425. return err;
  426. }
  427. static void qfq_destroy_class(struct Qdisc *sch, struct qfq_class *cl)
  428. {
  429. struct qfq_sched *q = qdisc_priv(sch);
  430. qfq_rm_from_agg(q, cl);
  431. gen_kill_estimator(&cl->rate_est);
  432. qdisc_destroy(cl->qdisc);
  433. kfree(cl);
  434. }
  435. static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
  436. {
  437. struct qfq_sched *q = qdisc_priv(sch);
  438. struct qfq_class *cl = (struct qfq_class *)arg;
  439. if (cl->filter_cnt > 0)
  440. return -EBUSY;
  441. sch_tree_lock(sch);
  442. qfq_purge_queue(cl);
  443. qdisc_class_hash_remove(&q->clhash, &cl->common);
  444. sch_tree_unlock(sch);
  445. qfq_destroy_class(sch, cl);
  446. return 0;
  447. }
  448. static unsigned long qfq_search_class(struct Qdisc *sch, u32 classid)
  449. {
  450. return (unsigned long)qfq_find_class(sch, classid);
  451. }
  452. static struct tcf_block *qfq_tcf_block(struct Qdisc *sch, unsigned long cl)
  453. {
  454. struct qfq_sched *q = qdisc_priv(sch);
  455. if (cl)
  456. return NULL;
  457. return q->block;
  458. }
  459. static unsigned long qfq_bind_tcf(struct Qdisc *sch, unsigned long parent,
  460. u32 classid)
  461. {
  462. struct qfq_class *cl = qfq_find_class(sch, classid);
  463. if (cl != NULL)
  464. cl->filter_cnt++;
  465. return (unsigned long)cl;
  466. }
  467. static void qfq_unbind_tcf(struct Qdisc *sch, unsigned long arg)
  468. {
  469. struct qfq_class *cl = (struct qfq_class *)arg;
  470. cl->filter_cnt--;
  471. }
  472. static int qfq_graft_class(struct Qdisc *sch, unsigned long arg,
  473. struct Qdisc *new, struct Qdisc **old)
  474. {
  475. struct qfq_class *cl = (struct qfq_class *)arg;
  476. if (new == NULL) {
  477. new = qdisc_create_dflt(sch->dev_queue,
  478. &pfifo_qdisc_ops, cl->common.classid);
  479. if (new == NULL)
  480. new = &noop_qdisc;
  481. }
  482. *old = qdisc_replace(sch, new, &cl->qdisc);
  483. return 0;
  484. }
  485. static struct Qdisc *qfq_class_leaf(struct Qdisc *sch, unsigned long arg)
  486. {
  487. struct qfq_class *cl = (struct qfq_class *)arg;
  488. return cl->qdisc;
  489. }
  490. static int qfq_dump_class(struct Qdisc *sch, unsigned long arg,
  491. struct sk_buff *skb, struct tcmsg *tcm)
  492. {
  493. struct qfq_class *cl = (struct qfq_class *)arg;
  494. struct nlattr *nest;
  495. tcm->tcm_parent = TC_H_ROOT;
  496. tcm->tcm_handle = cl->common.classid;
  497. tcm->tcm_info = cl->qdisc->handle;
  498. nest = nla_nest_start(skb, TCA_OPTIONS);
  499. if (nest == NULL)
  500. goto nla_put_failure;
  501. if (nla_put_u32(skb, TCA_QFQ_WEIGHT, cl->agg->class_weight) ||
  502. nla_put_u32(skb, TCA_QFQ_LMAX, cl->agg->lmax))
  503. goto nla_put_failure;
  504. return nla_nest_end(skb, nest);
  505. nla_put_failure:
  506. nla_nest_cancel(skb, nest);
  507. return -EMSGSIZE;
  508. }
  509. static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
  510. struct gnet_dump *d)
  511. {
  512. struct qfq_class *cl = (struct qfq_class *)arg;
  513. struct tc_qfq_stats xstats;
  514. memset(&xstats, 0, sizeof(xstats));
  515. xstats.weight = cl->agg->class_weight;
  516. xstats.lmax = cl->agg->lmax;
  517. if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
  518. d, NULL, &cl->bstats) < 0 ||
  519. gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
  520. gnet_stats_copy_queue(d, NULL,
  521. &cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
  522. return -1;
  523. return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
  524. }
  525. static void qfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
  526. {
  527. struct qfq_sched *q = qdisc_priv(sch);
  528. struct qfq_class *cl;
  529. unsigned int i;
  530. if (arg->stop)
  531. return;
  532. for (i = 0; i < q->clhash.hashsize; i++) {
  533. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  534. if (arg->count < arg->skip) {
  535. arg->count++;
  536. continue;
  537. }
  538. if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
  539. arg->stop = 1;
  540. return;
  541. }
  542. arg->count++;
  543. }
  544. }
  545. }
  546. static struct qfq_class *qfq_classify(struct sk_buff *skb, struct Qdisc *sch,
  547. int *qerr)
  548. {
  549. struct qfq_sched *q = qdisc_priv(sch);
  550. struct qfq_class *cl;
  551. struct tcf_result res;
  552. struct tcf_proto *fl;
  553. int result;
  554. if (TC_H_MAJ(skb->priority ^ sch->handle) == 0) {
  555. pr_debug("qfq_classify: found %d\n", skb->priority);
  556. cl = qfq_find_class(sch, skb->priority);
  557. if (cl != NULL)
  558. return cl;
  559. }
  560. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
  561. fl = rcu_dereference_bh(q->filter_list);
  562. result = tcf_classify(skb, fl, &res, false);
  563. if (result >= 0) {
  564. #ifdef CONFIG_NET_CLS_ACT
  565. switch (result) {
  566. case TC_ACT_QUEUED:
  567. case TC_ACT_STOLEN:
  568. case TC_ACT_TRAP:
  569. *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
  570. case TC_ACT_SHOT:
  571. return NULL;
  572. }
  573. #endif
  574. cl = (struct qfq_class *)res.class;
  575. if (cl == NULL)
  576. cl = qfq_find_class(sch, res.classid);
  577. return cl;
  578. }
  579. return NULL;
  580. }
  581. /* Generic comparison function, handling wraparound. */
  582. static inline int qfq_gt(u64 a, u64 b)
  583. {
  584. return (s64)(a - b) > 0;
  585. }
  586. /* Round a precise timestamp to its slotted value. */
  587. static inline u64 qfq_round_down(u64 ts, unsigned int shift)
  588. {
  589. return ts & ~((1ULL << shift) - 1);
  590. }
  591. /* return the pointer to the group with lowest index in the bitmap */
  592. static inline struct qfq_group *qfq_ffs(struct qfq_sched *q,
  593. unsigned long bitmap)
  594. {
  595. int index = __ffs(bitmap);
  596. return &q->groups[index];
  597. }
  598. /* Calculate a mask to mimic what would be ffs_from(). */
  599. static inline unsigned long mask_from(unsigned long bitmap, int from)
  600. {
  601. return bitmap & ~((1UL << from) - 1);
  602. }
  603. /*
  604. * The state computation relies on ER=0, IR=1, EB=2, IB=3
  605. * First compute eligibility comparing grp->S, q->V,
  606. * then check if someone is blocking us and possibly add EB
  607. */
  608. static int qfq_calc_state(struct qfq_sched *q, const struct qfq_group *grp)
  609. {
  610. /* if S > V we are not eligible */
  611. unsigned int state = qfq_gt(grp->S, q->V);
  612. unsigned long mask = mask_from(q->bitmaps[ER], grp->index);
  613. struct qfq_group *next;
  614. if (mask) {
  615. next = qfq_ffs(q, mask);
  616. if (qfq_gt(grp->F, next->F))
  617. state |= EB;
  618. }
  619. return state;
  620. }
  621. /*
  622. * In principle
  623. * q->bitmaps[dst] |= q->bitmaps[src] & mask;
  624. * q->bitmaps[src] &= ~mask;
  625. * but we should make sure that src != dst
  626. */
  627. static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask,
  628. int src, int dst)
  629. {
  630. q->bitmaps[dst] |= q->bitmaps[src] & mask;
  631. q->bitmaps[src] &= ~mask;
  632. }
  633. static void qfq_unblock_groups(struct qfq_sched *q, int index, u64 old_F)
  634. {
  635. unsigned long mask = mask_from(q->bitmaps[ER], index + 1);
  636. struct qfq_group *next;
  637. if (mask) {
  638. next = qfq_ffs(q, mask);
  639. if (!qfq_gt(next->F, old_F))
  640. return;
  641. }
  642. mask = (1UL << index) - 1;
  643. qfq_move_groups(q, mask, EB, ER);
  644. qfq_move_groups(q, mask, IB, IR);
  645. }
  646. /*
  647. * perhaps
  648. *
  649. old_V ^= q->V;
  650. old_V >>= q->min_slot_shift;
  651. if (old_V) {
  652. ...
  653. }
  654. *
  655. */
  656. static void qfq_make_eligible(struct qfq_sched *q)
  657. {
  658. unsigned long vslot = q->V >> q->min_slot_shift;
  659. unsigned long old_vslot = q->oldV >> q->min_slot_shift;
  660. if (vslot != old_vslot) {
  661. unsigned long mask;
  662. int last_flip_pos = fls(vslot ^ old_vslot);
  663. if (last_flip_pos > 31) /* higher than the number of groups */
  664. mask = ~0UL; /* make all groups eligible */
  665. else
  666. mask = (1UL << last_flip_pos) - 1;
  667. qfq_move_groups(q, mask, IR, ER);
  668. qfq_move_groups(q, mask, IB, EB);
  669. }
  670. }
  671. /*
  672. * The index of the slot in which the input aggregate agg is to be
  673. * inserted must not be higher than QFQ_MAX_SLOTS-2. There is a '-2'
  674. * and not a '-1' because the start time of the group may be moved
  675. * backward by one slot after the aggregate has been inserted, and
  676. * this would cause non-empty slots to be right-shifted by one
  677. * position.
  678. *
  679. * QFQ+ fully satisfies this bound to the slot index if the parameters
  680. * of the classes are not changed dynamically, and if QFQ+ never
  681. * happens to postpone the service of agg unjustly, i.e., it never
  682. * happens that the aggregate becomes backlogged and eligible, or just
  683. * eligible, while an aggregate with a higher approximated finish time
  684. * is being served. In particular, in this case QFQ+ guarantees that
  685. * the timestamps of agg are low enough that the slot index is never
  686. * higher than 2. Unfortunately, QFQ+ cannot provide the same
  687. * guarantee if it happens to unjustly postpone the service of agg, or
  688. * if the parameters of some class are changed.
  689. *
  690. * As for the first event, i.e., an out-of-order service, the
  691. * upper bound to the slot index guaranteed by QFQ+ grows to
  692. * 2 +
  693. * QFQ_MAX_AGG_CLASSES * ((1<<QFQ_MTU_SHIFT)/QFQ_MIN_LMAX) *
  694. * (current_max_weight/current_wsum) <= 2 + 8 * 128 * 1.
  695. *
  696. * The following function deals with this problem by backward-shifting
  697. * the timestamps of agg, if needed, so as to guarantee that the slot
  698. * index is never higher than QFQ_MAX_SLOTS-2. This backward-shift may
  699. * cause the service of other aggregates to be postponed, yet the
  700. * worst-case guarantees of these aggregates are not violated. In
  701. * fact, in case of no out-of-order service, the timestamps of agg
  702. * would have been even lower than they are after the backward shift,
  703. * because QFQ+ would have guaranteed a maximum value equal to 2 for
  704. * the slot index, and 2 < QFQ_MAX_SLOTS-2. Hence the aggregates whose
  705. * service is postponed because of the backward-shift would have
  706. * however waited for the service of agg before being served.
  707. *
  708. * The other event that may cause the slot index to be higher than 2
  709. * for agg is a recent change of the parameters of some class. If the
  710. * weight of a class is increased or the lmax (max_pkt_size) of the
  711. * class is decreased, then a new aggregate with smaller slot size
  712. * than the original parent aggregate of the class may happen to be
  713. * activated. The activation of this aggregate should be properly
  714. * delayed to when the service of the class has finished in the ideal
  715. * system tracked by QFQ+. If the activation of the aggregate is not
  716. * delayed to this reference time instant, then this aggregate may be
  717. * unjustly served before other aggregates waiting for service. This
  718. * may cause the above bound to the slot index to be violated for some
  719. * of these unlucky aggregates.
  720. *
  721. * Instead of delaying the activation of the new aggregate, which is
  722. * quite complex, the above-discussed capping of the slot index is
  723. * used to handle also the consequences of a change of the parameters
  724. * of a class.
  725. */
  726. static void qfq_slot_insert(struct qfq_group *grp, struct qfq_aggregate *agg,
  727. u64 roundedS)
  728. {
  729. u64 slot = (roundedS - grp->S) >> grp->slot_shift;
  730. unsigned int i; /* slot index in the bucket list */
  731. if (unlikely(slot > QFQ_MAX_SLOTS - 2)) {
  732. u64 deltaS = roundedS - grp->S -
  733. ((u64)(QFQ_MAX_SLOTS - 2)<<grp->slot_shift);
  734. agg->S -= deltaS;
  735. agg->F -= deltaS;
  736. slot = QFQ_MAX_SLOTS - 2;
  737. }
  738. i = (grp->front + slot) % QFQ_MAX_SLOTS;
  739. hlist_add_head(&agg->next, &grp->slots[i]);
  740. __set_bit(slot, &grp->full_slots);
  741. }
  742. /* Maybe introduce hlist_first_entry?? */
  743. static struct qfq_aggregate *qfq_slot_head(struct qfq_group *grp)
  744. {
  745. return hlist_entry(grp->slots[grp->front].first,
  746. struct qfq_aggregate, next);
  747. }
  748. /*
  749. * remove the entry from the slot
  750. */
  751. static void qfq_front_slot_remove(struct qfq_group *grp)
  752. {
  753. struct qfq_aggregate *agg = qfq_slot_head(grp);
  754. BUG_ON(!agg);
  755. hlist_del(&agg->next);
  756. if (hlist_empty(&grp->slots[grp->front]))
  757. __clear_bit(0, &grp->full_slots);
  758. }
  759. /*
  760. * Returns the first aggregate in the first non-empty bucket of the
  761. * group. As a side effect, adjusts the bucket list so the first
  762. * non-empty bucket is at position 0 in full_slots.
  763. */
  764. static struct qfq_aggregate *qfq_slot_scan(struct qfq_group *grp)
  765. {
  766. unsigned int i;
  767. pr_debug("qfq slot_scan: grp %u full %#lx\n",
  768. grp->index, grp->full_slots);
  769. if (grp->full_slots == 0)
  770. return NULL;
  771. i = __ffs(grp->full_slots); /* zero based */
  772. if (i > 0) {
  773. grp->front = (grp->front + i) % QFQ_MAX_SLOTS;
  774. grp->full_slots >>= i;
  775. }
  776. return qfq_slot_head(grp);
  777. }
  778. /*
  779. * adjust the bucket list. When the start time of a group decreases,
  780. * we move the index down (modulo QFQ_MAX_SLOTS) so we don't need to
  781. * move the objects. The mask of occupied slots must be shifted
  782. * because we use ffs() to find the first non-empty slot.
  783. * This covers decreases in the group's start time, but what about
  784. * increases of the start time ?
  785. * Here too we should make sure that i is less than 32
  786. */
  787. static void qfq_slot_rotate(struct qfq_group *grp, u64 roundedS)
  788. {
  789. unsigned int i = (grp->S - roundedS) >> grp->slot_shift;
  790. grp->full_slots <<= i;
  791. grp->front = (grp->front - i) % QFQ_MAX_SLOTS;
  792. }
  793. static void qfq_update_eligible(struct qfq_sched *q)
  794. {
  795. struct qfq_group *grp;
  796. unsigned long ineligible;
  797. ineligible = q->bitmaps[IR] | q->bitmaps[IB];
  798. if (ineligible) {
  799. if (!q->bitmaps[ER]) {
  800. grp = qfq_ffs(q, ineligible);
  801. if (qfq_gt(grp->S, q->V))
  802. q->V = grp->S;
  803. }
  804. qfq_make_eligible(q);
  805. }
  806. }
  807. /* Dequeue head packet of the head class in the DRR queue of the aggregate. */
  808. static void agg_dequeue(struct qfq_aggregate *agg,
  809. struct qfq_class *cl, unsigned int len)
  810. {
  811. qdisc_dequeue_peeked(cl->qdisc);
  812. cl->deficit -= (int) len;
  813. if (cl->qdisc->q.qlen == 0) /* no more packets, remove from list */
  814. list_del(&cl->alist);
  815. else if (cl->deficit < qdisc_pkt_len(cl->qdisc->ops->peek(cl->qdisc))) {
  816. cl->deficit += agg->lmax;
  817. list_move_tail(&cl->alist, &agg->active);
  818. }
  819. }
  820. static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,
  821. struct qfq_class **cl,
  822. unsigned int *len)
  823. {
  824. struct sk_buff *skb;
  825. *cl = list_first_entry(&agg->active, struct qfq_class, alist);
  826. skb = (*cl)->qdisc->ops->peek((*cl)->qdisc);
  827. if (skb == NULL)
  828. WARN_ONCE(1, "qfq_dequeue: non-workconserving leaf\n");
  829. else
  830. *len = qdisc_pkt_len(skb);
  831. return skb;
  832. }
  833. /* Update F according to the actual service received by the aggregate. */
  834. static inline void charge_actual_service(struct qfq_aggregate *agg)
  835. {
  836. /* Compute the service received by the aggregate, taking into
  837. * account that, after decreasing the number of classes in
  838. * agg, it may happen that
  839. * agg->initial_budget - agg->budget > agg->bugdetmax
  840. */
  841. u32 service_received = min(agg->budgetmax,
  842. agg->initial_budget - agg->budget);
  843. agg->F = agg->S + (u64)service_received * agg->inv_w;
  844. }
  845. /* Assign a reasonable start time for a new aggregate in group i.
  846. * Admissible values for \hat(F) are multiples of \sigma_i
  847. * no greater than V+\sigma_i . Larger values mean that
  848. * we had a wraparound so we consider the timestamp to be stale.
  849. *
  850. * If F is not stale and F >= V then we set S = F.
  851. * Otherwise we should assign S = V, but this may violate
  852. * the ordering in EB (see [2]). So, if we have groups in ER,
  853. * set S to the F_j of the first group j which would be blocking us.
  854. * We are guaranteed not to move S backward because
  855. * otherwise our group i would still be blocked.
  856. */
  857. static void qfq_update_start(struct qfq_sched *q, struct qfq_aggregate *agg)
  858. {
  859. unsigned long mask;
  860. u64 limit, roundedF;
  861. int slot_shift = agg->grp->slot_shift;
  862. roundedF = qfq_round_down(agg->F, slot_shift);
  863. limit = qfq_round_down(q->V, slot_shift) + (1ULL << slot_shift);
  864. if (!qfq_gt(agg->F, q->V) || qfq_gt(roundedF, limit)) {
  865. /* timestamp was stale */
  866. mask = mask_from(q->bitmaps[ER], agg->grp->index);
  867. if (mask) {
  868. struct qfq_group *next = qfq_ffs(q, mask);
  869. if (qfq_gt(roundedF, next->F)) {
  870. if (qfq_gt(limit, next->F))
  871. agg->S = next->F;
  872. else /* preserve timestamp correctness */
  873. agg->S = limit;
  874. return;
  875. }
  876. }
  877. agg->S = q->V;
  878. } else /* timestamp is not stale */
  879. agg->S = agg->F;
  880. }
  881. /* Update the timestamps of agg before scheduling/rescheduling it for
  882. * service. In particular, assign to agg->F its maximum possible
  883. * value, i.e., the virtual finish time with which the aggregate
  884. * should be labeled if it used all its budget once in service.
  885. */
  886. static inline void
  887. qfq_update_agg_ts(struct qfq_sched *q,
  888. struct qfq_aggregate *agg, enum update_reason reason)
  889. {
  890. if (reason != requeue)
  891. qfq_update_start(q, agg);
  892. else /* just charge agg for the service received */
  893. agg->S = agg->F;
  894. agg->F = agg->S + (u64)agg->budgetmax * agg->inv_w;
  895. }
  896. static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg);
  897. static struct sk_buff *qfq_dequeue(struct Qdisc *sch)
  898. {
  899. struct qfq_sched *q = qdisc_priv(sch);
  900. struct qfq_aggregate *in_serv_agg = q->in_serv_agg;
  901. struct qfq_class *cl;
  902. struct sk_buff *skb = NULL;
  903. /* next-packet len, 0 means no more active classes in in-service agg */
  904. unsigned int len = 0;
  905. if (in_serv_agg == NULL)
  906. return NULL;
  907. if (!list_empty(&in_serv_agg->active))
  908. skb = qfq_peek_skb(in_serv_agg, &cl, &len);
  909. /*
  910. * If there are no active classes in the in-service aggregate,
  911. * or if the aggregate has not enough budget to serve its next
  912. * class, then choose the next aggregate to serve.
  913. */
  914. if (len == 0 || in_serv_agg->budget < len) {
  915. charge_actual_service(in_serv_agg);
  916. /* recharge the budget of the aggregate */
  917. in_serv_agg->initial_budget = in_serv_agg->budget =
  918. in_serv_agg->budgetmax;
  919. if (!list_empty(&in_serv_agg->active)) {
  920. /*
  921. * Still active: reschedule for
  922. * service. Possible optimization: if no other
  923. * aggregate is active, then there is no point
  924. * in rescheduling this aggregate, and we can
  925. * just keep it as the in-service one. This
  926. * should be however a corner case, and to
  927. * handle it, we would need to maintain an
  928. * extra num_active_aggs field.
  929. */
  930. qfq_update_agg_ts(q, in_serv_agg, requeue);
  931. qfq_schedule_agg(q, in_serv_agg);
  932. } else if (sch->q.qlen == 0) { /* no aggregate to serve */
  933. q->in_serv_agg = NULL;
  934. return NULL;
  935. }
  936. /*
  937. * If we get here, there are other aggregates queued:
  938. * choose the new aggregate to serve.
  939. */
  940. in_serv_agg = q->in_serv_agg = qfq_choose_next_agg(q);
  941. skb = qfq_peek_skb(in_serv_agg, &cl, &len);
  942. }
  943. if (!skb)
  944. return NULL;
  945. qdisc_qstats_backlog_dec(sch, skb);
  946. sch->q.qlen--;
  947. qdisc_bstats_update(sch, skb);
  948. agg_dequeue(in_serv_agg, cl, len);
  949. /* If lmax is lowered, through qfq_change_class, for a class
  950. * owning pending packets with larger size than the new value
  951. * of lmax, then the following condition may hold.
  952. */
  953. if (unlikely(in_serv_agg->budget < len))
  954. in_serv_agg->budget = 0;
  955. else
  956. in_serv_agg->budget -= len;
  957. q->V += (u64)len * q->iwsum;
  958. pr_debug("qfq dequeue: len %u F %lld now %lld\n",
  959. len, (unsigned long long) in_serv_agg->F,
  960. (unsigned long long) q->V);
  961. return skb;
  962. }
  963. static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
  964. {
  965. struct qfq_group *grp;
  966. struct qfq_aggregate *agg, *new_front_agg;
  967. u64 old_F;
  968. qfq_update_eligible(q);
  969. q->oldV = q->V;
  970. if (!q->bitmaps[ER])
  971. return NULL;
  972. grp = qfq_ffs(q, q->bitmaps[ER]);
  973. old_F = grp->F;
  974. agg = qfq_slot_head(grp);
  975. /* agg starts to be served, remove it from schedule */
  976. qfq_front_slot_remove(grp);
  977. new_front_agg = qfq_slot_scan(grp);
  978. if (new_front_agg == NULL) /* group is now inactive, remove from ER */
  979. __clear_bit(grp->index, &q->bitmaps[ER]);
  980. else {
  981. u64 roundedS = qfq_round_down(new_front_agg->S,
  982. grp->slot_shift);
  983. unsigned int s;
  984. if (grp->S == roundedS)
  985. return agg;
  986. grp->S = roundedS;
  987. grp->F = roundedS + (2ULL << grp->slot_shift);
  988. __clear_bit(grp->index, &q->bitmaps[ER]);
  989. s = qfq_calc_state(q, grp);
  990. __set_bit(grp->index, &q->bitmaps[s]);
  991. }
  992. qfq_unblock_groups(q, grp->index, old_F);
  993. return agg;
  994. }
  995. static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
  996. struct sk_buff **to_free)
  997. {
  998. struct qfq_sched *q = qdisc_priv(sch);
  999. struct qfq_class *cl;
  1000. struct qfq_aggregate *agg;
  1001. int err = 0;
  1002. cl = qfq_classify(skb, sch, &err);
  1003. if (cl == NULL) {
  1004. if (err & __NET_XMIT_BYPASS)
  1005. qdisc_qstats_drop(sch);
  1006. __qdisc_drop(skb, to_free);
  1007. return err;
  1008. }
  1009. pr_debug("qfq_enqueue: cl = %x\n", cl->common.classid);
  1010. if (unlikely(cl->agg->lmax < qdisc_pkt_len(skb))) {
  1011. pr_debug("qfq: increasing maxpkt from %u to %u for class %u",
  1012. cl->agg->lmax, qdisc_pkt_len(skb), cl->common.classid);
  1013. err = qfq_change_agg(sch, cl, cl->agg->class_weight,
  1014. qdisc_pkt_len(skb));
  1015. if (err) {
  1016. cl->qstats.drops++;
  1017. return qdisc_drop(skb, sch, to_free);
  1018. }
  1019. }
  1020. err = qdisc_enqueue(skb, cl->qdisc, to_free);
  1021. if (unlikely(err != NET_XMIT_SUCCESS)) {
  1022. pr_debug("qfq_enqueue: enqueue failed %d\n", err);
  1023. if (net_xmit_drop_count(err)) {
  1024. cl->qstats.drops++;
  1025. qdisc_qstats_drop(sch);
  1026. }
  1027. return err;
  1028. }
  1029. bstats_update(&cl->bstats, skb);
  1030. qdisc_qstats_backlog_inc(sch, skb);
  1031. ++sch->q.qlen;
  1032. agg = cl->agg;
  1033. /* if the queue was not empty, then done here */
  1034. if (cl->qdisc->q.qlen != 1) {
  1035. if (unlikely(skb == cl->qdisc->ops->peek(cl->qdisc)) &&
  1036. list_first_entry(&agg->active, struct qfq_class, alist)
  1037. == cl && cl->deficit < qdisc_pkt_len(skb))
  1038. list_move_tail(&cl->alist, &agg->active);
  1039. return err;
  1040. }
  1041. /* schedule class for service within the aggregate */
  1042. cl->deficit = agg->lmax;
  1043. list_add_tail(&cl->alist, &agg->active);
  1044. if (list_first_entry(&agg->active, struct qfq_class, alist) != cl ||
  1045. q->in_serv_agg == agg)
  1046. return err; /* non-empty or in service, nothing else to do */
  1047. qfq_activate_agg(q, agg, enqueue);
  1048. return err;
  1049. }
  1050. /*
  1051. * Schedule aggregate according to its timestamps.
  1052. */
  1053. static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  1054. {
  1055. struct qfq_group *grp = agg->grp;
  1056. u64 roundedS;
  1057. int s;
  1058. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1059. /*
  1060. * Insert agg in the correct bucket.
  1061. * If agg->S >= grp->S we don't need to adjust the
  1062. * bucket list and simply go to the insertion phase.
  1063. * Otherwise grp->S is decreasing, we must make room
  1064. * in the bucket list, and also recompute the group state.
  1065. * Finally, if there were no flows in this group and nobody
  1066. * was in ER make sure to adjust V.
  1067. */
  1068. if (grp->full_slots) {
  1069. if (!qfq_gt(grp->S, agg->S))
  1070. goto skip_update;
  1071. /* create a slot for this agg->S */
  1072. qfq_slot_rotate(grp, roundedS);
  1073. /* group was surely ineligible, remove */
  1074. __clear_bit(grp->index, &q->bitmaps[IR]);
  1075. __clear_bit(grp->index, &q->bitmaps[IB]);
  1076. } else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) &&
  1077. q->in_serv_agg == NULL)
  1078. q->V = roundedS;
  1079. grp->S = roundedS;
  1080. grp->F = roundedS + (2ULL << grp->slot_shift);
  1081. s = qfq_calc_state(q, grp);
  1082. __set_bit(grp->index, &q->bitmaps[s]);
  1083. pr_debug("qfq enqueue: new state %d %#lx S %lld F %lld V %lld\n",
  1084. s, q->bitmaps[s],
  1085. (unsigned long long) agg->S,
  1086. (unsigned long long) agg->F,
  1087. (unsigned long long) q->V);
  1088. skip_update:
  1089. qfq_slot_insert(grp, agg, roundedS);
  1090. }
  1091. /* Update agg ts and schedule agg for service */
  1092. static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,
  1093. enum update_reason reason)
  1094. {
  1095. agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */
  1096. qfq_update_agg_ts(q, agg, reason);
  1097. if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */
  1098. q->in_serv_agg = agg; /* start serving this aggregate */
  1099. /* update V: to be in service, agg must be eligible */
  1100. q->oldV = q->V = agg->S;
  1101. } else if (agg != q->in_serv_agg)
  1102. qfq_schedule_agg(q, agg);
  1103. }
  1104. static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp,
  1105. struct qfq_aggregate *agg)
  1106. {
  1107. unsigned int i, offset;
  1108. u64 roundedS;
  1109. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1110. offset = (roundedS - grp->S) >> grp->slot_shift;
  1111. i = (grp->front + offset) % QFQ_MAX_SLOTS;
  1112. hlist_del(&agg->next);
  1113. if (hlist_empty(&grp->slots[i]))
  1114. __clear_bit(offset, &grp->full_slots);
  1115. }
  1116. /*
  1117. * Called to forcibly deschedule an aggregate. If the aggregate is
  1118. * not in the front bucket, or if the latter has other aggregates in
  1119. * the front bucket, we can simply remove the aggregate with no other
  1120. * side effects.
  1121. * Otherwise we must propagate the event up.
  1122. */
  1123. static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)
  1124. {
  1125. struct qfq_group *grp = agg->grp;
  1126. unsigned long mask;
  1127. u64 roundedS;
  1128. int s;
  1129. if (agg == q->in_serv_agg) {
  1130. charge_actual_service(agg);
  1131. q->in_serv_agg = qfq_choose_next_agg(q);
  1132. return;
  1133. }
  1134. agg->F = agg->S;
  1135. qfq_slot_remove(q, grp, agg);
  1136. if (!grp->full_slots) {
  1137. __clear_bit(grp->index, &q->bitmaps[IR]);
  1138. __clear_bit(grp->index, &q->bitmaps[EB]);
  1139. __clear_bit(grp->index, &q->bitmaps[IB]);
  1140. if (test_bit(grp->index, &q->bitmaps[ER]) &&
  1141. !(q->bitmaps[ER] & ~((1UL << grp->index) - 1))) {
  1142. mask = q->bitmaps[ER] & ((1UL << grp->index) - 1);
  1143. if (mask)
  1144. mask = ~((1UL << __fls(mask)) - 1);
  1145. else
  1146. mask = ~0UL;
  1147. qfq_move_groups(q, mask, EB, ER);
  1148. qfq_move_groups(q, mask, IB, IR);
  1149. }
  1150. __clear_bit(grp->index, &q->bitmaps[ER]);
  1151. } else if (hlist_empty(&grp->slots[grp->front])) {
  1152. agg = qfq_slot_scan(grp);
  1153. roundedS = qfq_round_down(agg->S, grp->slot_shift);
  1154. if (grp->S != roundedS) {
  1155. __clear_bit(grp->index, &q->bitmaps[ER]);
  1156. __clear_bit(grp->index, &q->bitmaps[IR]);
  1157. __clear_bit(grp->index, &q->bitmaps[EB]);
  1158. __clear_bit(grp->index, &q->bitmaps[IB]);
  1159. grp->S = roundedS;
  1160. grp->F = roundedS + (2ULL << grp->slot_shift);
  1161. s = qfq_calc_state(q, grp);
  1162. __set_bit(grp->index, &q->bitmaps[s]);
  1163. }
  1164. }
  1165. }
  1166. static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg)
  1167. {
  1168. struct qfq_sched *q = qdisc_priv(sch);
  1169. struct qfq_class *cl = (struct qfq_class *)arg;
  1170. qfq_deactivate_class(q, cl);
  1171. }
  1172. static int qfq_init_qdisc(struct Qdisc *sch, struct nlattr *opt)
  1173. {
  1174. struct qfq_sched *q = qdisc_priv(sch);
  1175. struct qfq_group *grp;
  1176. int i, j, err;
  1177. u32 max_cl_shift, maxbudg_shift, max_classes;
  1178. err = tcf_block_get(&q->block, &q->filter_list);
  1179. if (err)
  1180. return err;
  1181. err = qdisc_class_hash_init(&q->clhash);
  1182. if (err < 0)
  1183. return err;
  1184. if (qdisc_dev(sch)->tx_queue_len + 1 > QFQ_MAX_AGG_CLASSES)
  1185. max_classes = QFQ_MAX_AGG_CLASSES;
  1186. else
  1187. max_classes = qdisc_dev(sch)->tx_queue_len + 1;
  1188. /* max_cl_shift = floor(log_2(max_classes)) */
  1189. max_cl_shift = __fls(max_classes);
  1190. q->max_agg_classes = 1<<max_cl_shift;
  1191. /* maxbudg_shift = log2(max_len * max_classes_per_agg) */
  1192. maxbudg_shift = QFQ_MTU_SHIFT + max_cl_shift;
  1193. q->min_slot_shift = FRAC_BITS + maxbudg_shift - QFQ_MAX_INDEX;
  1194. for (i = 0; i <= QFQ_MAX_INDEX; i++) {
  1195. grp = &q->groups[i];
  1196. grp->index = i;
  1197. grp->slot_shift = q->min_slot_shift + i;
  1198. for (j = 0; j < QFQ_MAX_SLOTS; j++)
  1199. INIT_HLIST_HEAD(&grp->slots[j]);
  1200. }
  1201. INIT_HLIST_HEAD(&q->nonfull_aggs);
  1202. return 0;
  1203. }
  1204. static void qfq_reset_qdisc(struct Qdisc *sch)
  1205. {
  1206. struct qfq_sched *q = qdisc_priv(sch);
  1207. struct qfq_class *cl;
  1208. unsigned int i;
  1209. for (i = 0; i < q->clhash.hashsize; i++) {
  1210. hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
  1211. if (cl->qdisc->q.qlen > 0)
  1212. qfq_deactivate_class(q, cl);
  1213. qdisc_reset(cl->qdisc);
  1214. }
  1215. }
  1216. sch->qstats.backlog = 0;
  1217. sch->q.qlen = 0;
  1218. }
  1219. static void qfq_destroy_qdisc(struct Qdisc *sch)
  1220. {
  1221. struct qfq_sched *q = qdisc_priv(sch);
  1222. struct qfq_class *cl;
  1223. struct hlist_node *next;
  1224. unsigned int i;
  1225. tcf_block_put(q->block);
  1226. for (i = 0; i < q->clhash.hashsize; i++) {
  1227. hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
  1228. common.hnode) {
  1229. qfq_destroy_class(sch, cl);
  1230. }
  1231. }
  1232. qdisc_class_hash_destroy(&q->clhash);
  1233. }
  1234. static const struct Qdisc_class_ops qfq_class_ops = {
  1235. .change = qfq_change_class,
  1236. .delete = qfq_delete_class,
  1237. .find = qfq_search_class,
  1238. .tcf_block = qfq_tcf_block,
  1239. .bind_tcf = qfq_bind_tcf,
  1240. .unbind_tcf = qfq_unbind_tcf,
  1241. .graft = qfq_graft_class,
  1242. .leaf = qfq_class_leaf,
  1243. .qlen_notify = qfq_qlen_notify,
  1244. .dump = qfq_dump_class,
  1245. .dump_stats = qfq_dump_class_stats,
  1246. .walk = qfq_walk,
  1247. };
  1248. static struct Qdisc_ops qfq_qdisc_ops __read_mostly = {
  1249. .cl_ops = &qfq_class_ops,
  1250. .id = "qfq",
  1251. .priv_size = sizeof(struct qfq_sched),
  1252. .enqueue = qfq_enqueue,
  1253. .dequeue = qfq_dequeue,
  1254. .peek = qdisc_peek_dequeued,
  1255. .init = qfq_init_qdisc,
  1256. .reset = qfq_reset_qdisc,
  1257. .destroy = qfq_destroy_qdisc,
  1258. .owner = THIS_MODULE,
  1259. };
  1260. static int __init qfq_init(void)
  1261. {
  1262. return register_qdisc(&qfq_qdisc_ops);
  1263. }
  1264. static void __exit qfq_exit(void)
  1265. {
  1266. unregister_qdisc(&qfq_qdisc_ops);
  1267. }
  1268. module_init(qfq_init);
  1269. module_exit(qfq_exit);
  1270. MODULE_LICENSE("GPL");