bfq-sched.c 32 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184
  1. /*
  2. * BFQ: Hierarchical B-WF2Q+ scheduler.
  3. *
  4. * Based on ideas and code from CFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it>
  11. */
  12. #ifdef CONFIG_CGROUP_BFQIO
  13. #define for_each_entity(entity) \
  14. for (; entity != NULL; entity = entity->parent)
  15. #define for_each_entity_safe(entity, parent) \
  16. for (; entity && ({ parent = entity->parent; 1; }); entity = parent)
  17. static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
  18. int extract,
  19. struct bfq_data *bfqd);
  20. static inline void bfq_update_budget(struct bfq_entity *next_in_service)
  21. {
  22. struct bfq_entity *bfqg_entity;
  23. struct bfq_group *bfqg;
  24. struct bfq_sched_data *group_sd;
  25. BUG_ON(next_in_service == NULL);
  26. group_sd = next_in_service->sched_data;
  27. bfqg = container_of(group_sd, struct bfq_group, sched_data);
  28. /*
  29. * bfq_group's my_entity field is not NULL only if the group
  30. * is not the root group. We must not touch the root entity
  31. * as it must never become an in-service entity.
  32. */
  33. bfqg_entity = bfqg->my_entity;
  34. if (bfqg_entity != NULL)
  35. bfqg_entity->budget = next_in_service->budget;
  36. }
  37. static int bfq_update_next_in_service(struct bfq_sched_data *sd)
  38. {
  39. struct bfq_entity *next_in_service;
  40. if (sd->in_service_entity != NULL)
  41. /* will update/requeue at the end of service */
  42. return 0;
  43. /*
  44. * NOTE: this can be improved in many ways, such as returning
  45. * 1 (and thus propagating upwards the update) only when the
  46. * budget changes, or caching the bfqq that will be scheduled
  47. * next from this subtree. By now we worry more about
  48. * correctness than about performance...
  49. */
  50. next_in_service = bfq_lookup_next_entity(sd, 0, NULL);
  51. sd->next_in_service = next_in_service;
  52. if (next_in_service != NULL)
  53. bfq_update_budget(next_in_service);
  54. return 1;
  55. }
  56. static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
  57. struct bfq_entity *entity)
  58. {
  59. BUG_ON(sd->next_in_service != entity);
  60. }
  61. #else
  62. #define for_each_entity(entity) \
  63. for (; entity != NULL; entity = NULL)
  64. #define for_each_entity_safe(entity, parent) \
  65. for (parent = NULL; entity != NULL; entity = parent)
  66. static inline int bfq_update_next_in_service(struct bfq_sched_data *sd)
  67. {
  68. return 0;
  69. }
  70. static inline void bfq_check_next_in_service(struct bfq_sched_data *sd,
  71. struct bfq_entity *entity)
  72. {
  73. }
  74. static inline void bfq_update_budget(struct bfq_entity *next_in_service)
  75. {
  76. }
  77. #endif
  78. /*
  79. * Shift for timestamp calculations. This actually limits the maximum
  80. * service allowed in one timestamp delta (small shift values increase it),
  81. * the maximum total weight that can be used for the queues in the system
  82. * (big shift values increase it), and the period of virtual time
  83. * wraparounds.
  84. */
  85. #define WFQ_SERVICE_SHIFT 22
  86. /**
  87. * bfq_gt - compare two timestamps.
  88. * @a: first ts.
  89. * @b: second ts.
  90. *
  91. * Return @a > @b, dealing with wrapping correctly.
  92. */
  93. static inline int bfq_gt(u64 a, u64 b)
  94. {
  95. return (s64)(a - b) > 0;
  96. }
  97. static inline struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
  98. {
  99. struct bfq_queue *bfqq = NULL;
  100. BUG_ON(entity == NULL);
  101. if (entity->my_sched_data == NULL)
  102. bfqq = container_of(entity, struct bfq_queue, entity);
  103. return bfqq;
  104. }
  105. /**
  106. * bfq_delta - map service into the virtual time domain.
  107. * @service: amount of service.
  108. * @weight: scale factor (weight of an entity or weight sum).
  109. */
  110. static inline u64 bfq_delta(unsigned long service,
  111. unsigned long weight)
  112. {
  113. u64 d = (u64)service << WFQ_SERVICE_SHIFT;
  114. do_div(d, weight);
  115. return d;
  116. }
  117. /**
  118. * bfq_calc_finish - assign the finish time to an entity.
  119. * @entity: the entity to act upon.
  120. * @service: the service to be charged to the entity.
  121. */
  122. static inline void bfq_calc_finish(struct bfq_entity *entity,
  123. unsigned long service)
  124. {
  125. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  126. BUG_ON(entity->weight == 0);
  127. entity->finish = entity->start +
  128. bfq_delta(service, entity->weight);
  129. if (bfqq != NULL) {
  130. bfq_log_bfqq(bfqq->bfqd, bfqq,
  131. "calc_finish: serv %lu, w %d",
  132. service, entity->weight);
  133. bfq_log_bfqq(bfqq->bfqd, bfqq,
  134. "calc_finish: start %llu, finish %llu, delta %llu",
  135. entity->start, entity->finish,
  136. bfq_delta(service, entity->weight));
  137. }
  138. }
  139. /**
  140. * bfq_entity_of - get an entity from a node.
  141. * @node: the node field of the entity.
  142. *
  143. * Convert a node pointer to the relative entity. This is used only
  144. * to simplify the logic of some functions and not as the generic
  145. * conversion mechanism because, e.g., in the tree walking functions,
  146. * the check for a %NULL value would be redundant.
  147. */
  148. static inline struct bfq_entity *bfq_entity_of(struct rb_node *node)
  149. {
  150. struct bfq_entity *entity = NULL;
  151. if (node != NULL)
  152. entity = rb_entry(node, struct bfq_entity, rb_node);
  153. return entity;
  154. }
  155. /**
  156. * bfq_extract - remove an entity from a tree.
  157. * @root: the tree root.
  158. * @entity: the entity to remove.
  159. */
  160. static inline void bfq_extract(struct rb_root *root,
  161. struct bfq_entity *entity)
  162. {
  163. BUG_ON(entity->tree != root);
  164. entity->tree = NULL;
  165. rb_erase(&entity->rb_node, root);
  166. }
  167. /**
  168. * bfq_idle_extract - extract an entity from the idle tree.
  169. * @st: the service tree of the owning @entity.
  170. * @entity: the entity being removed.
  171. */
  172. static void bfq_idle_extract(struct bfq_service_tree *st,
  173. struct bfq_entity *entity)
  174. {
  175. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  176. struct rb_node *next;
  177. BUG_ON(entity->tree != &st->idle);
  178. if (entity == st->first_idle) {
  179. next = rb_next(&entity->rb_node);
  180. st->first_idle = bfq_entity_of(next);
  181. }
  182. if (entity == st->last_idle) {
  183. next = rb_prev(&entity->rb_node);
  184. st->last_idle = bfq_entity_of(next);
  185. }
  186. bfq_extract(&st->idle, entity);
  187. if (bfqq != NULL)
  188. list_del(&bfqq->bfqq_list);
  189. }
  190. /**
  191. * bfq_insert - generic tree insertion.
  192. * @root: tree root.
  193. * @entity: entity to insert.
  194. *
  195. * This is used for the idle and the active tree, since they are both
  196. * ordered by finish time.
  197. */
  198. static void bfq_insert(struct rb_root *root, struct bfq_entity *entity)
  199. {
  200. struct bfq_entity *entry;
  201. struct rb_node **node = &root->rb_node;
  202. struct rb_node *parent = NULL;
  203. BUG_ON(entity->tree != NULL);
  204. while (*node != NULL) {
  205. parent = *node;
  206. entry = rb_entry(parent, struct bfq_entity, rb_node);
  207. if (bfq_gt(entry->finish, entity->finish))
  208. node = &parent->rb_left;
  209. else
  210. node = &parent->rb_right;
  211. }
  212. rb_link_node(&entity->rb_node, parent, node);
  213. rb_insert_color(&entity->rb_node, root);
  214. entity->tree = root;
  215. }
  216. /**
  217. * bfq_update_min - update the min_start field of a entity.
  218. * @entity: the entity to update.
  219. * @node: one of its children.
  220. *
  221. * This function is called when @entity may store an invalid value for
  222. * min_start due to updates to the active tree. The function assumes
  223. * that the subtree rooted at @node (which may be its left or its right
  224. * child) has a valid min_start value.
  225. */
  226. static inline void bfq_update_min(struct bfq_entity *entity,
  227. struct rb_node *node)
  228. {
  229. struct bfq_entity *child;
  230. if (node != NULL) {
  231. child = rb_entry(node, struct bfq_entity, rb_node);
  232. if (bfq_gt(entity->min_start, child->min_start))
  233. entity->min_start = child->min_start;
  234. }
  235. }
  236. /**
  237. * bfq_update_active_node - recalculate min_start.
  238. * @node: the node to update.
  239. *
  240. * @node may have changed position or one of its children may have moved,
  241. * this function updates its min_start value. The left and right subtrees
  242. * are assumed to hold a correct min_start value.
  243. */
  244. static inline void bfq_update_active_node(struct rb_node *node)
  245. {
  246. struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node);
  247. entity->min_start = entity->start;
  248. bfq_update_min(entity, node->rb_right);
  249. bfq_update_min(entity, node->rb_left);
  250. }
  251. /**
  252. * bfq_update_active_tree - update min_start for the whole active tree.
  253. * @node: the starting node.
  254. *
  255. * @node must be the deepest modified node after an update. This function
  256. * updates its min_start using the values held by its children, assuming
  257. * that they did not change, and then updates all the nodes that may have
  258. * changed in the path to the root. The only nodes that may have changed
  259. * are the ones in the path or their siblings.
  260. */
  261. static void bfq_update_active_tree(struct rb_node *node)
  262. {
  263. struct rb_node *parent;
  264. up:
  265. bfq_update_active_node(node);
  266. parent = rb_parent(node);
  267. if (parent == NULL)
  268. return;
  269. if (node == parent->rb_left && parent->rb_right != NULL)
  270. bfq_update_active_node(parent->rb_right);
  271. else if (parent->rb_left != NULL)
  272. bfq_update_active_node(parent->rb_left);
  273. node = parent;
  274. goto up;
  275. }
  276. static void bfq_weights_tree_add(struct bfq_data *bfqd,
  277. struct bfq_entity *entity,
  278. struct rb_root *root);
  279. static void bfq_weights_tree_remove(struct bfq_data *bfqd,
  280. struct bfq_entity *entity,
  281. struct rb_root *root);
  282. /**
  283. * bfq_active_insert - insert an entity in the active tree of its
  284. * group/device.
  285. * @st: the service tree of the entity.
  286. * @entity: the entity being inserted.
  287. *
  288. * The active tree is ordered by finish time, but an extra key is kept
  289. * per each node, containing the minimum value for the start times of
  290. * its children (and the node itself), so it's possible to search for
  291. * the eligible node with the lowest finish time in logarithmic time.
  292. */
  293. static void bfq_active_insert(struct bfq_service_tree *st,
  294. struct bfq_entity *entity)
  295. {
  296. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  297. struct rb_node *node = &entity->rb_node;
  298. #ifdef CONFIG_CGROUP_BFQIO
  299. struct bfq_sched_data *sd = NULL;
  300. struct bfq_group *bfqg = NULL;
  301. struct bfq_data *bfqd = NULL;
  302. #endif
  303. bfq_insert(&st->active, entity);
  304. if (node->rb_left != NULL)
  305. node = node->rb_left;
  306. else if (node->rb_right != NULL)
  307. node = node->rb_right;
  308. bfq_update_active_tree(node);
  309. #ifdef CONFIG_CGROUP_BFQIO
  310. sd = entity->sched_data;
  311. bfqg = container_of(sd, struct bfq_group, sched_data);
  312. BUG_ON(!bfqg);
  313. bfqd = (struct bfq_data *)bfqg->bfqd;
  314. #endif
  315. if (bfqq != NULL)
  316. list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list);
  317. #ifdef CONFIG_CGROUP_BFQIO
  318. else { /* bfq_group */
  319. BUG_ON(!bfqd);
  320. bfq_weights_tree_add(bfqd, entity, &bfqd->group_weights_tree);
  321. }
  322. if (bfqg != bfqd->root_group) {
  323. BUG_ON(!bfqg);
  324. BUG_ON(!bfqd);
  325. bfqg->active_entities++;
  326. if (bfqg->active_entities == 2)
  327. bfqd->active_numerous_groups++;
  328. }
  329. #endif
  330. }
  331. /**
  332. * bfq_ioprio_to_weight - calc a weight from an ioprio.
  333. * @ioprio: the ioprio value to convert.
  334. */
  335. static inline unsigned short bfq_ioprio_to_weight(int ioprio)
  336. {
  337. BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR);
  338. return IOPRIO_BE_NR - ioprio;
  339. }
  340. /**
  341. * bfq_weight_to_ioprio - calc an ioprio from a weight.
  342. * @weight: the weight value to convert.
  343. *
  344. * To preserve as mush as possible the old only-ioprio user interface,
  345. * 0 is used as an escape ioprio value for weights (numerically) equal or
  346. * larger than IOPRIO_BE_NR
  347. */
  348. static inline unsigned short bfq_weight_to_ioprio(int weight)
  349. {
  350. BUG_ON(weight < BFQ_MIN_WEIGHT || weight > BFQ_MAX_WEIGHT);
  351. return IOPRIO_BE_NR - weight < 0 ? 0 : IOPRIO_BE_NR - weight;
  352. }
  353. static inline void bfq_get_entity(struct bfq_entity *entity)
  354. {
  355. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  356. if (bfqq != NULL) {
  357. atomic_inc(&bfqq->ref);
  358. bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d",
  359. bfqq, atomic_read(&bfqq->ref));
  360. }
  361. }
  362. /**
  363. * bfq_find_deepest - find the deepest node that an extraction can modify.
  364. * @node: the node being removed.
  365. *
  366. * Do the first step of an extraction in an rb tree, looking for the
  367. * node that will replace @node, and returning the deepest node that
  368. * the following modifications to the tree can touch. If @node is the
  369. * last node in the tree return %NULL.
  370. */
  371. static struct rb_node *bfq_find_deepest(struct rb_node *node)
  372. {
  373. struct rb_node *deepest;
  374. if (node->rb_right == NULL && node->rb_left == NULL)
  375. deepest = rb_parent(node);
  376. else if (node->rb_right == NULL)
  377. deepest = node->rb_left;
  378. else if (node->rb_left == NULL)
  379. deepest = node->rb_right;
  380. else {
  381. deepest = rb_next(node);
  382. if (deepest->rb_right != NULL)
  383. deepest = deepest->rb_right;
  384. else if (rb_parent(deepest) != node)
  385. deepest = rb_parent(deepest);
  386. }
  387. return deepest;
  388. }
  389. /**
  390. * bfq_active_extract - remove an entity from the active tree.
  391. * @st: the service_tree containing the tree.
  392. * @entity: the entity being removed.
  393. */
  394. static void bfq_active_extract(struct bfq_service_tree *st,
  395. struct bfq_entity *entity)
  396. {
  397. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  398. struct rb_node *node;
  399. #ifdef CONFIG_CGROUP_BFQIO
  400. struct bfq_sched_data *sd = NULL;
  401. struct bfq_group *bfqg = NULL;
  402. struct bfq_data *bfqd = NULL;
  403. #endif
  404. node = bfq_find_deepest(&entity->rb_node);
  405. bfq_extract(&st->active, entity);
  406. if (node != NULL)
  407. bfq_update_active_tree(node);
  408. #ifdef CONFIG_CGROUP_BFQIO
  409. sd = entity->sched_data;
  410. bfqg = container_of(sd, struct bfq_group, sched_data);
  411. BUG_ON(!bfqg);
  412. bfqd = (struct bfq_data *)bfqg->bfqd;
  413. #endif
  414. if (bfqq != NULL)
  415. list_del(&bfqq->bfqq_list);
  416. #ifdef CONFIG_CGROUP_BFQIO
  417. else { /* bfq_group */
  418. BUG_ON(!bfqd);
  419. bfq_weights_tree_remove(bfqd, entity,
  420. &bfqd->group_weights_tree);
  421. }
  422. if (bfqg != bfqd->root_group) {
  423. BUG_ON(!bfqg);
  424. BUG_ON(!bfqd);
  425. BUG_ON(!bfqg->active_entities);
  426. bfqg->active_entities--;
  427. if (bfqg->active_entities == 1) {
  428. BUG_ON(!bfqd->active_numerous_groups);
  429. bfqd->active_numerous_groups--;
  430. }
  431. }
  432. #endif
  433. }
  434. /**
  435. * bfq_idle_insert - insert an entity into the idle tree.
  436. * @st: the service tree containing the tree.
  437. * @entity: the entity to insert.
  438. */
  439. static void bfq_idle_insert(struct bfq_service_tree *st,
  440. struct bfq_entity *entity)
  441. {
  442. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  443. struct bfq_entity *first_idle = st->first_idle;
  444. struct bfq_entity *last_idle = st->last_idle;
  445. if (first_idle == NULL || bfq_gt(first_idle->finish, entity->finish))
  446. st->first_idle = entity;
  447. if (last_idle == NULL || bfq_gt(entity->finish, last_idle->finish))
  448. st->last_idle = entity;
  449. bfq_insert(&st->idle, entity);
  450. if (bfqq != NULL)
  451. list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list);
  452. }
  453. /**
  454. * bfq_forget_entity - remove an entity from the wfq trees.
  455. * @st: the service tree.
  456. * @entity: the entity being removed.
  457. *
  458. * Update the device status and forget everything about @entity, putting
  459. * the device reference to it, if it is a queue. Entities belonging to
  460. * groups are not refcounted.
  461. */
  462. static void bfq_forget_entity(struct bfq_service_tree *st,
  463. struct bfq_entity *entity)
  464. {
  465. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  466. struct bfq_sched_data *sd;
  467. BUG_ON(!entity->on_st);
  468. entity->on_st = 0;
  469. st->wsum -= entity->weight;
  470. if (bfqq != NULL) {
  471. sd = entity->sched_data;
  472. bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d",
  473. bfqq, atomic_read(&bfqq->ref));
  474. bfq_put_queue(bfqq);
  475. }
  476. }
  477. /**
  478. * bfq_put_idle_entity - release the idle tree ref of an entity.
  479. * @st: service tree for the entity.
  480. * @entity: the entity being released.
  481. */
  482. static void bfq_put_idle_entity(struct bfq_service_tree *st,
  483. struct bfq_entity *entity)
  484. {
  485. bfq_idle_extract(st, entity);
  486. bfq_forget_entity(st, entity);
  487. }
  488. /**
  489. * bfq_forget_idle - update the idle tree if necessary.
  490. * @st: the service tree to act upon.
  491. *
  492. * To preserve the global O(log N) complexity we only remove one entry here;
  493. * as the idle tree will not grow indefinitely this can be done safely.
  494. */
  495. static void bfq_forget_idle(struct bfq_service_tree *st)
  496. {
  497. struct bfq_entity *first_idle = st->first_idle;
  498. struct bfq_entity *last_idle = st->last_idle;
  499. if (RB_EMPTY_ROOT(&st->active) && last_idle != NULL &&
  500. !bfq_gt(last_idle->finish, st->vtime)) {
  501. /*
  502. * Forget the whole idle tree, increasing the vtime past
  503. * the last finish time of idle entities.
  504. */
  505. st->vtime = last_idle->finish;
  506. }
  507. if (first_idle != NULL && !bfq_gt(first_idle->finish, st->vtime))
  508. bfq_put_idle_entity(st, first_idle);
  509. }
  510. static struct bfq_service_tree *
  511. __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st,
  512. struct bfq_entity *entity)
  513. {
  514. struct bfq_service_tree *new_st = old_st;
  515. if (entity->ioprio_changed) {
  516. struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
  517. unsigned short prev_weight, new_weight;
  518. struct bfq_data *bfqd = NULL;
  519. struct rb_root *root;
  520. #ifdef CONFIG_CGROUP_BFQIO
  521. struct bfq_sched_data *sd;
  522. struct bfq_group *bfqg;
  523. #endif
  524. if (bfqq != NULL)
  525. bfqd = bfqq->bfqd;
  526. #ifdef CONFIG_CGROUP_BFQIO
  527. else {
  528. sd = entity->my_sched_data;
  529. bfqg = container_of(sd, struct bfq_group, sched_data);
  530. BUG_ON(!bfqg);
  531. bfqd = (struct bfq_data *)bfqg->bfqd;
  532. BUG_ON(!bfqd);
  533. }
  534. #endif
  535. BUG_ON(old_st->wsum < entity->weight);
  536. old_st->wsum -= entity->weight;
  537. if (entity->new_weight != entity->orig_weight) {
  538. if (entity->new_weight < BFQ_MIN_WEIGHT ||
  539. entity->new_weight > BFQ_MAX_WEIGHT) {
  540. printk(KERN_CRIT "update_weight_prio: "
  541. "new_weight %d\n",
  542. entity->new_weight);
  543. BUG();
  544. }
  545. entity->orig_weight = entity->new_weight;
  546. entity->ioprio =
  547. bfq_weight_to_ioprio(entity->orig_weight);
  548. }
  549. entity->ioprio_class = entity->new_ioprio_class;
  550. entity->ioprio_changed = 0;
  551. /*
  552. * NOTE: here we may be changing the weight too early,
  553. * this will cause unfairness. The correct approach
  554. * would have required additional complexity to defer
  555. * weight changes to the proper time instants (i.e.,
  556. * when entity->finish <= old_st->vtime).
  557. */
  558. new_st = bfq_entity_service_tree(entity);
  559. prev_weight = entity->weight;
  560. new_weight = entity->orig_weight *
  561. (bfqq != NULL ? bfqq->wr_coeff : 1);
  562. /*
  563. * If the weight of the entity changes, remove the entity
  564. * from its old weight counter (if there is a counter
  565. * associated with the entity), and add it to the counter
  566. * associated with its new weight.
  567. */
  568. if (prev_weight != new_weight) {
  569. root = bfqq ? &bfqd->queue_weights_tree :
  570. &bfqd->group_weights_tree;
  571. bfq_weights_tree_remove(bfqd, entity, root);
  572. }
  573. entity->weight = new_weight;
  574. /*
  575. * Add the entity to its weights tree only if it is
  576. * not associated with a weight-raised queue.
  577. */
  578. if (prev_weight != new_weight &&
  579. (bfqq ? bfqq->wr_coeff == 1 : 1))
  580. /* If we get here, root has been initialized. */
  581. bfq_weights_tree_add(bfqd, entity, root);
  582. new_st->wsum += entity->weight;
  583. if (new_st != old_st)
  584. entity->start = new_st->vtime;
  585. }
  586. return new_st;
  587. }
  588. /**
  589. * bfq_bfqq_served - update the scheduler status after selection for
  590. * service.
  591. * @bfqq: the queue being served.
  592. * @served: bytes to transfer.
  593. *
  594. * NOTE: this can be optimized, as the timestamps of upper level entities
  595. * are synchronized every time a new bfqq is selected for service. By now,
  596. * we keep it to better check consistency.
  597. */
  598. static void bfq_bfqq_served(struct bfq_queue *bfqq, unsigned long served)
  599. {
  600. struct bfq_entity *entity = &bfqq->entity;
  601. struct bfq_service_tree *st;
  602. for_each_entity(entity) {
  603. st = bfq_entity_service_tree(entity);
  604. entity->service += served;
  605. BUG_ON(entity->service > entity->budget);
  606. BUG_ON(st->wsum == 0);
  607. st->vtime += bfq_delta(served, st->wsum);
  608. bfq_forget_idle(st);
  609. }
  610. bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %lu secs", served);
  611. }
  612. /**
  613. * bfq_bfqq_charge_full_budget - set the service to the entity budget.
  614. * @bfqq: the queue that needs a service update.
  615. *
  616. * When it's not possible to be fair in the service domain, because
  617. * a queue is not consuming its budget fast enough (the meaning of
  618. * fast depends on the timeout parameter), we charge it a full
  619. * budget. In this way we should obtain a sort of time-domain
  620. * fairness among all the seeky/slow queues.
  621. */
  622. static inline void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq)
  623. {
  624. struct bfq_entity *entity = &bfqq->entity;
  625. bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget");
  626. bfq_bfqq_served(bfqq, entity->budget - entity->service);
  627. }
  628. /**
  629. * __bfq_activate_entity - activate an entity.
  630. * @entity: the entity being activated.
  631. *
  632. * Called whenever an entity is activated, i.e., it is not active and one
  633. * of its children receives a new request, or has to be reactivated due to
  634. * budget exhaustion. It uses the current budget of the entity (and the
  635. * service received if @entity is active) of the queue to calculate its
  636. * timestamps.
  637. */
  638. static void __bfq_activate_entity(struct bfq_entity *entity)
  639. {
  640. struct bfq_sched_data *sd = entity->sched_data;
  641. struct bfq_service_tree *st = bfq_entity_service_tree(entity);
  642. if (entity == sd->in_service_entity) {
  643. BUG_ON(entity->tree != NULL);
  644. /*
  645. * If we are requeueing the current entity we have
  646. * to take care of not charging to it service it has
  647. * not received.
  648. */
  649. bfq_calc_finish(entity, entity->service);
  650. entity->start = entity->finish;
  651. sd->in_service_entity = NULL;
  652. } else if (entity->tree == &st->active) {
  653. /*
  654. * Requeueing an entity due to a change of some
  655. * next_in_service entity below it. We reuse the
  656. * old start time.
  657. */
  658. bfq_active_extract(st, entity);
  659. } else if (entity->tree == &st->idle) {
  660. /*
  661. * Must be on the idle tree, bfq_idle_extract() will
  662. * check for that.
  663. */
  664. bfq_idle_extract(st, entity);
  665. entity->start = bfq_gt(st->vtime, entity->finish) ?
  666. st->vtime : entity->finish;
  667. } else {
  668. /*
  669. * The finish time of the entity may be invalid, and
  670. * it is in the past for sure, otherwise the queue
  671. * would have been on the idle tree.
  672. */
  673. entity->start = st->vtime;
  674. st->wsum += entity->weight;
  675. bfq_get_entity(entity);
  676. BUG_ON(entity->on_st);
  677. entity->on_st = 1;
  678. }
  679. st = __bfq_entity_update_weight_prio(st, entity);
  680. bfq_calc_finish(entity, entity->budget);
  681. bfq_active_insert(st, entity);
  682. }
  683. /**
  684. * bfq_activate_entity - activate an entity and its ancestors if necessary.
  685. * @entity: the entity to activate.
  686. *
  687. * Activate @entity and all the entities on the path from it to the root.
  688. */
  689. static void bfq_activate_entity(struct bfq_entity *entity)
  690. {
  691. struct bfq_sched_data *sd;
  692. for_each_entity(entity) {
  693. __bfq_activate_entity(entity);
  694. sd = entity->sched_data;
  695. if (!bfq_update_next_in_service(sd))
  696. /*
  697. * No need to propagate the activation to the
  698. * upper entities, as they will be updated when
  699. * the in-service entity is rescheduled.
  700. */
  701. break;
  702. }
  703. }
  704. /**
  705. * __bfq_deactivate_entity - deactivate an entity from its service tree.
  706. * @entity: the entity to deactivate.
  707. * @requeue: if false, the entity will not be put into the idle tree.
  708. *
  709. * Deactivate an entity, independently from its previous state. If the
  710. * entity was not on a service tree just return, otherwise if it is on
  711. * any scheduler tree, extract it from that tree, and if necessary
  712. * and if the caller did not specify @requeue, put it on the idle tree.
  713. *
  714. * Return %1 if the caller should update the entity hierarchy, i.e.,
  715. * if the entity was in service or if it was the next_in_service for
  716. * its sched_data; return %0 otherwise.
  717. */
  718. static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
  719. {
  720. struct bfq_sched_data *sd = entity->sched_data;
  721. struct bfq_service_tree *st = bfq_entity_service_tree(entity);
  722. int was_in_service = entity == sd->in_service_entity;
  723. int ret = 0;
  724. if (!entity->on_st)
  725. return 0;
  726. BUG_ON(was_in_service && entity->tree != NULL);
  727. if (was_in_service) {
  728. bfq_calc_finish(entity, entity->service);
  729. sd->in_service_entity = NULL;
  730. } else if (entity->tree == &st->active)
  731. bfq_active_extract(st, entity);
  732. else if (entity->tree == &st->idle)
  733. bfq_idle_extract(st, entity);
  734. else if (entity->tree != NULL)
  735. BUG();
  736. if (was_in_service || sd->next_in_service == entity)
  737. ret = bfq_update_next_in_service(sd);
  738. if (!requeue || !bfq_gt(entity->finish, st->vtime))
  739. bfq_forget_entity(st, entity);
  740. else
  741. bfq_idle_insert(st, entity);
  742. BUG_ON(sd->in_service_entity == entity);
  743. BUG_ON(sd->next_in_service == entity);
  744. return ret;
  745. }
  746. /**
  747. * bfq_deactivate_entity - deactivate an entity.
  748. * @entity: the entity to deactivate.
  749. * @requeue: true if the entity can be put on the idle tree
  750. */
  751. static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
  752. {
  753. struct bfq_sched_data *sd;
  754. struct bfq_entity *parent;
  755. for_each_entity_safe(entity, parent) {
  756. sd = entity->sched_data;
  757. if (!__bfq_deactivate_entity(entity, requeue))
  758. /*
  759. * The parent entity is still backlogged, and
  760. * we don't need to update it as it is still
  761. * in service.
  762. */
  763. break;
  764. if (sd->next_in_service != NULL)
  765. /*
  766. * The parent entity is still backlogged and
  767. * the budgets on the path towards the root
  768. * need to be updated.
  769. */
  770. goto update;
  771. /*
  772. * If we reach there the parent is no more backlogged and
  773. * we want to propagate the dequeue upwards.
  774. */
  775. requeue = 1;
  776. }
  777. return;
  778. update:
  779. entity = parent;
  780. for_each_entity(entity) {
  781. __bfq_activate_entity(entity);
  782. sd = entity->sched_data;
  783. if (!bfq_update_next_in_service(sd))
  784. break;
  785. }
  786. }
  787. /**
  788. * bfq_update_vtime - update vtime if necessary.
  789. * @st: the service tree to act upon.
  790. *
  791. * If necessary update the service tree vtime to have at least one
  792. * eligible entity, skipping to its start time. Assumes that the
  793. * active tree of the device is not empty.
  794. *
  795. * NOTE: this hierarchical implementation updates vtimes quite often,
  796. * we may end up with reactivated processes getting timestamps after a
  797. * vtime skip done because we needed a ->first_active entity on some
  798. * intermediate node.
  799. */
  800. static void bfq_update_vtime(struct bfq_service_tree *st)
  801. {
  802. struct bfq_entity *entry;
  803. struct rb_node *node = st->active.rb_node;
  804. entry = rb_entry(node, struct bfq_entity, rb_node);
  805. if (bfq_gt(entry->min_start, st->vtime)) {
  806. st->vtime = entry->min_start;
  807. bfq_forget_idle(st);
  808. }
  809. }
  810. /**
  811. * bfq_first_active_entity - find the eligible entity with
  812. * the smallest finish time
  813. * @st: the service tree to select from.
  814. *
  815. * This function searches the first schedulable entity, starting from the
  816. * root of the tree and going on the left every time on this side there is
  817. * a subtree with at least one eligible (start >= vtime) entity. The path on
  818. * the right is followed only if a) the left subtree contains no eligible
  819. * entities and b) no eligible entity has been found yet.
  820. */
  821. static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st)
  822. {
  823. struct bfq_entity *entry, *first = NULL;
  824. struct rb_node *node = st->active.rb_node;
  825. while (node != NULL) {
  826. entry = rb_entry(node, struct bfq_entity, rb_node);
  827. left:
  828. if (!bfq_gt(entry->start, st->vtime))
  829. first = entry;
  830. BUG_ON(bfq_gt(entry->min_start, st->vtime));
  831. if (node->rb_left != NULL) {
  832. entry = rb_entry(node->rb_left,
  833. struct bfq_entity, rb_node);
  834. if (!bfq_gt(entry->min_start, st->vtime)) {
  835. node = node->rb_left;
  836. goto left;
  837. }
  838. }
  839. if (first != NULL)
  840. break;
  841. node = node->rb_right;
  842. }
  843. BUG_ON(first == NULL && !RB_EMPTY_ROOT(&st->active));
  844. return first;
  845. }
  846. /**
  847. * __bfq_lookup_next_entity - return the first eligible entity in @st.
  848. * @st: the service tree.
  849. *
  850. * Update the virtual time in @st and return the first eligible entity
  851. * it contains.
  852. */
  853. static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st,
  854. bool force)
  855. {
  856. struct bfq_entity *entity, *new_next_in_service = NULL;
  857. if (RB_EMPTY_ROOT(&st->active))
  858. return NULL;
  859. bfq_update_vtime(st);
  860. entity = bfq_first_active_entity(st);
  861. BUG_ON(bfq_gt(entity->start, st->vtime));
  862. /*
  863. * If the chosen entity does not match with the sched_data's
  864. * next_in_service and we are forcedly serving the IDLE priority
  865. * class tree, bubble up budget update.
  866. */
  867. if (unlikely(force && entity != entity->sched_data->next_in_service)) {
  868. new_next_in_service = entity;
  869. for_each_entity(new_next_in_service)
  870. bfq_update_budget(new_next_in_service);
  871. }
  872. return entity;
  873. }
  874. /**
  875. * bfq_lookup_next_entity - return the first eligible entity in @sd.
  876. * @sd: the sched_data.
  877. * @extract: if true the returned entity will be also extracted from @sd.
  878. *
  879. * NOTE: since we cache the next_in_service entity at each level of the
  880. * hierarchy, the complexity of the lookup can be decreased with
  881. * absolutely no effort just returning the cached next_in_service value;
  882. * we prefer to do full lookups to test the consistency of * the data
  883. * structures.
  884. */
  885. static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
  886. int extract,
  887. struct bfq_data *bfqd)
  888. {
  889. struct bfq_service_tree *st = sd->service_tree;
  890. struct bfq_entity *entity;
  891. int i = 0;
  892. BUG_ON(sd->in_service_entity != NULL);
  893. if (bfqd != NULL &&
  894. jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) {
  895. entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1,
  896. true);
  897. if (entity != NULL) {
  898. i = BFQ_IOPRIO_CLASSES - 1;
  899. bfqd->bfq_class_idle_last_service = jiffies;
  900. sd->next_in_service = entity;
  901. }
  902. }
  903. for (; i < BFQ_IOPRIO_CLASSES; i++) {
  904. entity = __bfq_lookup_next_entity(st + i, false);
  905. if (entity != NULL) {
  906. if (extract) {
  907. if (sd->next_in_service != entity) {
  908. entity = __bfq_lookup_next_entity(st + i, true);
  909. }
  910. bfq_check_next_in_service(sd, entity);
  911. bfq_active_extract(st + i, entity);
  912. sd->in_service_entity = entity;
  913. sd->next_in_service = NULL;
  914. }
  915. break;
  916. }
  917. }
  918. return entity;
  919. }
  920. /*
  921. * Get next queue for service.
  922. */
  923. static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd)
  924. {
  925. struct bfq_entity *entity = NULL;
  926. struct bfq_sched_data *sd;
  927. struct bfq_queue *bfqq;
  928. BUG_ON(bfqd->in_service_queue != NULL);
  929. if (bfqd->busy_queues == 0)
  930. return NULL;
  931. sd = &bfqd->root_group->sched_data;
  932. for (; sd != NULL; sd = entity->my_sched_data) {
  933. entity = bfq_lookup_next_entity(sd, 1, bfqd);
  934. BUG_ON(entity == NULL);
  935. entity->service = 0;
  936. }
  937. bfqq = bfq_entity_to_bfqq(entity);
  938. BUG_ON(bfqq == NULL);
  939. return bfqq;
  940. }
  941. static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd)
  942. {
  943. if (bfqd->in_service_bic != NULL) {
  944. put_io_context(bfqd->in_service_bic->icq.ioc);
  945. bfqd->in_service_bic = NULL;
  946. }
  947. bfqd->in_service_queue = NULL;
  948. del_timer(&bfqd->idle_slice_timer);
  949. }
  950. static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  951. int requeue)
  952. {
  953. struct bfq_entity *entity = &bfqq->entity;
  954. if (bfqq == bfqd->in_service_queue)
  955. __bfq_bfqd_reset_in_service(bfqd);
  956. bfq_deactivate_entity(entity, requeue);
  957. }
  958. static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  959. {
  960. struct bfq_entity *entity = &bfqq->entity;
  961. bfq_activate_entity(entity);
  962. }
  963. /*
  964. * Called when the bfqq no longer has requests pending, remove it from
  965. * the service tree.
  966. */
  967. static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq,
  968. int requeue)
  969. {
  970. BUG_ON(!bfq_bfqq_busy(bfqq));
  971. BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list));
  972. bfq_log_bfqq(bfqd, bfqq, "del from busy");
  973. bfq_clear_bfqq_busy(bfqq);
  974. BUG_ON(bfqd->busy_queues == 0);
  975. bfqd->busy_queues--;
  976. if (!bfqq->dispatched) {
  977. bfq_weights_tree_remove(bfqd, &bfqq->entity,
  978. &bfqd->queue_weights_tree);
  979. if (!blk_queue_nonrot(bfqd->queue)) {
  980. BUG_ON(!bfqd->busy_in_flight_queues);
  981. bfqd->busy_in_flight_queues--;
  982. if (bfq_bfqq_constantly_seeky(bfqq)) {
  983. BUG_ON(!bfqd->
  984. const_seeky_busy_in_flight_queues);
  985. bfqd->const_seeky_busy_in_flight_queues--;
  986. }
  987. }
  988. }
  989. if (bfqq->wr_coeff > 1)
  990. bfqd->wr_busy_queues--;
  991. bfq_deactivate_bfqq(bfqd, bfqq, requeue);
  992. }
  993. /*
  994. * Called when an inactive queue receives a new request.
  995. */
  996. static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq)
  997. {
  998. BUG_ON(bfq_bfqq_busy(bfqq));
  999. BUG_ON(bfqq == bfqd->in_service_queue);
  1000. bfq_log_bfqq(bfqd, bfqq, "add to busy");
  1001. bfq_activate_bfqq(bfqd, bfqq);
  1002. bfq_mark_bfqq_busy(bfqq);
  1003. bfqd->busy_queues++;
  1004. if (!bfqq->dispatched) {
  1005. if (bfqq->wr_coeff == 1)
  1006. bfq_weights_tree_add(bfqd, &bfqq->entity,
  1007. &bfqd->queue_weights_tree);
  1008. if (!blk_queue_nonrot(bfqd->queue)) {
  1009. bfqd->busy_in_flight_queues++;
  1010. if (bfq_bfqq_constantly_seeky(bfqq))
  1011. bfqd->const_seeky_busy_in_flight_queues++;
  1012. }
  1013. }
  1014. if (bfqq->wr_coeff > 1)
  1015. bfqd->wr_busy_queues++;
  1016. }