blk-cgroup.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719
  1. /*
  2. * Common Block IO controller cgroup interface
  3. *
  4. * Based on ideas and code from CFQ, CFS and BFQ:
  5. * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
  6. *
  7. * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
  8. * Paolo Valente <paolo.valente@unimore.it>
  9. *
  10. * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
  11. * Nauman Rafique <nauman@google.com>
  12. */
  13. #include <linux/ioprio.h>
  14. #include <linux/seq_file.h>
  15. #include <linux/kdev_t.h>
  16. #include <linux/module.h>
  17. #include <linux/err.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/slab.h>
  20. #include <linux/genhd.h>
  21. #include <linux/delay.h>
  22. #include "blk-cgroup.h"
  23. #define MAX_KEY_LEN 100
  24. static DEFINE_SPINLOCK(blkio_list_lock);
  25. static LIST_HEAD(blkio_list);
  26. struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
  27. EXPORT_SYMBOL_GPL(blkio_root_cgroup);
  28. /* for encoding cft->private value on file */
  29. #define BLKIOFILE_PRIVATE(x, val) (((x) << 16) | (val))
  30. /* What policy owns the file, proportional or throttle */
  31. #define BLKIOFILE_POLICY(val) (((val) >> 16) & 0xffff)
  32. #define BLKIOFILE_ATTR(val) ((val) & 0xffff)
  33. static inline void blkio_policy_insert_node(struct blkio_cgroup *blkcg,
  34. struct blkio_policy_node *pn)
  35. {
  36. list_add(&pn->node, &blkcg->policy_list);
  37. }
  38. static inline bool cftype_blkg_same_policy(struct cftype *cft,
  39. struct blkio_group *blkg)
  40. {
  41. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  42. if (blkg->plid == plid)
  43. return 1;
  44. return 0;
  45. }
  46. /* Determines if policy node matches cgroup file being accessed */
  47. static inline bool pn_matches_cftype(struct cftype *cft,
  48. struct blkio_policy_node *pn)
  49. {
  50. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  51. int fileid = BLKIOFILE_ATTR(cft->private);
  52. return (plid == pn->plid && fileid == pn->fileid);
  53. }
  54. /* Must be called with blkcg->lock held */
  55. static inline void blkio_policy_delete_node(struct blkio_policy_node *pn)
  56. {
  57. list_del(&pn->node);
  58. }
  59. /* Must be called with blkcg->lock held */
  60. static struct blkio_policy_node *
  61. blkio_policy_search_node(const struct blkio_cgroup *blkcg, dev_t dev,
  62. enum blkio_policy_id plid, int fileid)
  63. {
  64. struct blkio_policy_node *pn;
  65. list_for_each_entry(pn, &blkcg->policy_list, node) {
  66. if (pn->dev == dev && pn->plid == plid && pn->fileid == fileid)
  67. return pn;
  68. }
  69. return NULL;
  70. }
  71. struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup)
  72. {
  73. return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id),
  74. struct blkio_cgroup, css);
  75. }
  76. EXPORT_SYMBOL_GPL(cgroup_to_blkio_cgroup);
  77. struct blkio_cgroup *task_blkio_cgroup(struct task_struct *tsk)
  78. {
  79. return container_of(task_subsys_state(tsk, blkio_subsys_id),
  80. struct blkio_cgroup, css);
  81. }
  82. EXPORT_SYMBOL_GPL(task_blkio_cgroup);
  83. static inline void
  84. blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
  85. {
  86. struct blkio_policy_type *blkiop;
  87. list_for_each_entry(blkiop, &blkio_list, list) {
  88. /* If this policy does not own the blkg, do not send updates */
  89. if (blkiop->plid != blkg->plid)
  90. continue;
  91. if (blkiop->ops.blkio_update_group_weight_fn)
  92. blkiop->ops.blkio_update_group_weight_fn(blkg->key,
  93. blkg, weight);
  94. }
  95. }
  96. static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
  97. int fileid)
  98. {
  99. struct blkio_policy_type *blkiop;
  100. list_for_each_entry(blkiop, &blkio_list, list) {
  101. /* If this policy does not own the blkg, do not send updates */
  102. if (blkiop->plid != blkg->plid)
  103. continue;
  104. if (fileid == BLKIO_THROTL_read_bps_device
  105. && blkiop->ops.blkio_update_group_read_bps_fn)
  106. blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
  107. blkg, bps);
  108. if (fileid == BLKIO_THROTL_write_bps_device
  109. && blkiop->ops.blkio_update_group_write_bps_fn)
  110. blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
  111. blkg, bps);
  112. }
  113. }
  114. static inline void blkio_update_group_iops(struct blkio_group *blkg,
  115. unsigned int iops, int fileid)
  116. {
  117. struct blkio_policy_type *blkiop;
  118. list_for_each_entry(blkiop, &blkio_list, list) {
  119. /* If this policy does not own the blkg, do not send updates */
  120. if (blkiop->plid != blkg->plid)
  121. continue;
  122. if (fileid == BLKIO_THROTL_read_iops_device
  123. && blkiop->ops.blkio_update_group_read_iops_fn)
  124. blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
  125. blkg, iops);
  126. if (fileid == BLKIO_THROTL_write_iops_device
  127. && blkiop->ops.blkio_update_group_write_iops_fn)
  128. blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
  129. blkg,iops);
  130. }
  131. }
  132. /*
  133. * Add to the appropriate stat variable depending on the request type.
  134. * This should be called with the blkg->stats_lock held.
  135. */
  136. static void blkio_add_stat(uint64_t *stat, uint64_t add, bool direction,
  137. bool sync)
  138. {
  139. if (direction)
  140. stat[BLKIO_STAT_WRITE] += add;
  141. else
  142. stat[BLKIO_STAT_READ] += add;
  143. if (sync)
  144. stat[BLKIO_STAT_SYNC] += add;
  145. else
  146. stat[BLKIO_STAT_ASYNC] += add;
  147. }
  148. /*
  149. * Decrements the appropriate stat variable if non-zero depending on the
  150. * request type. Panics on value being zero.
  151. * This should be called with the blkg->stats_lock held.
  152. */
  153. static void blkio_check_and_dec_stat(uint64_t *stat, bool direction, bool sync)
  154. {
  155. if (direction) {
  156. BUG_ON(stat[BLKIO_STAT_WRITE] == 0);
  157. stat[BLKIO_STAT_WRITE]--;
  158. } else {
  159. BUG_ON(stat[BLKIO_STAT_READ] == 0);
  160. stat[BLKIO_STAT_READ]--;
  161. }
  162. if (sync) {
  163. BUG_ON(stat[BLKIO_STAT_SYNC] == 0);
  164. stat[BLKIO_STAT_SYNC]--;
  165. } else {
  166. BUG_ON(stat[BLKIO_STAT_ASYNC] == 0);
  167. stat[BLKIO_STAT_ASYNC]--;
  168. }
  169. }
  170. #ifdef CONFIG_DEBUG_BLK_CGROUP
  171. /* This should be called with the blkg->stats_lock held. */
  172. static void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  173. struct blkio_group *curr_blkg)
  174. {
  175. if (blkio_blkg_waiting(&blkg->stats))
  176. return;
  177. if (blkg == curr_blkg)
  178. return;
  179. blkg->stats.start_group_wait_time = sched_clock();
  180. blkio_mark_blkg_waiting(&blkg->stats);
  181. }
  182. /* This should be called with the blkg->stats_lock held. */
  183. static void blkio_update_group_wait_time(struct blkio_group_stats *stats)
  184. {
  185. unsigned long long now;
  186. if (!blkio_blkg_waiting(stats))
  187. return;
  188. now = sched_clock();
  189. if (time_after64(now, stats->start_group_wait_time))
  190. stats->group_wait_time += now - stats->start_group_wait_time;
  191. blkio_clear_blkg_waiting(stats);
  192. }
  193. /* This should be called with the blkg->stats_lock held. */
  194. static void blkio_end_empty_time(struct blkio_group_stats *stats)
  195. {
  196. unsigned long long now;
  197. if (!blkio_blkg_empty(stats))
  198. return;
  199. now = sched_clock();
  200. if (time_after64(now, stats->start_empty_time))
  201. stats->empty_time += now - stats->start_empty_time;
  202. blkio_clear_blkg_empty(stats);
  203. }
  204. void blkiocg_update_set_idle_time_stats(struct blkio_group *blkg)
  205. {
  206. unsigned long flags;
  207. spin_lock_irqsave(&blkg->stats_lock, flags);
  208. BUG_ON(blkio_blkg_idling(&blkg->stats));
  209. blkg->stats.start_idle_time = sched_clock();
  210. blkio_mark_blkg_idling(&blkg->stats);
  211. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  212. }
  213. EXPORT_SYMBOL_GPL(blkiocg_update_set_idle_time_stats);
  214. void blkiocg_update_idle_time_stats(struct blkio_group *blkg)
  215. {
  216. unsigned long flags;
  217. unsigned long long now;
  218. struct blkio_group_stats *stats;
  219. spin_lock_irqsave(&blkg->stats_lock, flags);
  220. stats = &blkg->stats;
  221. if (blkio_blkg_idling(stats)) {
  222. now = sched_clock();
  223. if (time_after64(now, stats->start_idle_time))
  224. stats->idle_time += now - stats->start_idle_time;
  225. blkio_clear_blkg_idling(stats);
  226. }
  227. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  228. }
  229. EXPORT_SYMBOL_GPL(blkiocg_update_idle_time_stats);
  230. void blkiocg_update_avg_queue_size_stats(struct blkio_group *blkg)
  231. {
  232. unsigned long flags;
  233. struct blkio_group_stats *stats;
  234. spin_lock_irqsave(&blkg->stats_lock, flags);
  235. stats = &blkg->stats;
  236. stats->avg_queue_size_sum +=
  237. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] +
  238. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE];
  239. stats->avg_queue_size_samples++;
  240. blkio_update_group_wait_time(stats);
  241. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  242. }
  243. EXPORT_SYMBOL_GPL(blkiocg_update_avg_queue_size_stats);
  244. void blkiocg_set_start_empty_time(struct blkio_group *blkg)
  245. {
  246. unsigned long flags;
  247. struct blkio_group_stats *stats;
  248. spin_lock_irqsave(&blkg->stats_lock, flags);
  249. stats = &blkg->stats;
  250. if (stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_READ] ||
  251. stats->stat_arr[BLKIO_STAT_QUEUED][BLKIO_STAT_WRITE]) {
  252. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  253. return;
  254. }
  255. /*
  256. * group is already marked empty. This can happen if cfqq got new
  257. * request in parent group and moved to this group while being added
  258. * to service tree. Just ignore the event and move on.
  259. */
  260. if(blkio_blkg_empty(stats)) {
  261. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  262. return;
  263. }
  264. stats->start_empty_time = sched_clock();
  265. blkio_mark_blkg_empty(stats);
  266. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  267. }
  268. EXPORT_SYMBOL_GPL(blkiocg_set_start_empty_time);
  269. void blkiocg_update_dequeue_stats(struct blkio_group *blkg,
  270. unsigned long dequeue)
  271. {
  272. blkg->stats.dequeue += dequeue;
  273. }
  274. EXPORT_SYMBOL_GPL(blkiocg_update_dequeue_stats);
  275. #else
  276. static inline void blkio_set_start_group_wait_time(struct blkio_group *blkg,
  277. struct blkio_group *curr_blkg) {}
  278. static inline void blkio_end_empty_time(struct blkio_group_stats *stats) {}
  279. #endif
  280. void blkiocg_update_io_add_stats(struct blkio_group *blkg,
  281. struct blkio_group *curr_blkg, bool direction,
  282. bool sync)
  283. {
  284. unsigned long flags;
  285. spin_lock_irqsave(&blkg->stats_lock, flags);
  286. blkio_add_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED], 1, direction,
  287. sync);
  288. blkio_end_empty_time(&blkg->stats);
  289. blkio_set_start_group_wait_time(blkg, curr_blkg);
  290. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  291. }
  292. EXPORT_SYMBOL_GPL(blkiocg_update_io_add_stats);
  293. void blkiocg_update_io_remove_stats(struct blkio_group *blkg,
  294. bool direction, bool sync)
  295. {
  296. unsigned long flags;
  297. spin_lock_irqsave(&blkg->stats_lock, flags);
  298. blkio_check_and_dec_stat(blkg->stats.stat_arr[BLKIO_STAT_QUEUED],
  299. direction, sync);
  300. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  301. }
  302. EXPORT_SYMBOL_GPL(blkiocg_update_io_remove_stats);
  303. void blkiocg_update_timeslice_used(struct blkio_group *blkg, unsigned long time,
  304. unsigned long unaccounted_time)
  305. {
  306. unsigned long flags;
  307. spin_lock_irqsave(&blkg->stats_lock, flags);
  308. blkg->stats.time += time;
  309. #ifdef CONFIG_DEBUG_BLK_CGROUP
  310. blkg->stats.unaccounted_time += unaccounted_time;
  311. #endif
  312. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  313. }
  314. EXPORT_SYMBOL_GPL(blkiocg_update_timeslice_used);
  315. /*
  316. * should be called under rcu read lock or queue lock to make sure blkg pointer
  317. * is valid.
  318. */
  319. void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
  320. uint64_t bytes, bool direction, bool sync)
  321. {
  322. struct blkio_group_stats_cpu *stats_cpu;
  323. unsigned long flags;
  324. /*
  325. * Disabling interrupts to provide mutual exclusion between two
  326. * writes on same cpu. It probably is not needed for 64bit. Not
  327. * optimizing that case yet.
  328. */
  329. local_irq_save(flags);
  330. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  331. u64_stats_update_begin(&stats_cpu->syncp);
  332. stats_cpu->sectors += bytes >> 9;
  333. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
  334. 1, direction, sync);
  335. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
  336. bytes, direction, sync);
  337. u64_stats_update_end(&stats_cpu->syncp);
  338. local_irq_restore(flags);
  339. }
  340. EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
  341. void blkiocg_update_completion_stats(struct blkio_group *blkg,
  342. uint64_t start_time, uint64_t io_start_time, bool direction, bool sync)
  343. {
  344. struct blkio_group_stats *stats;
  345. unsigned long flags;
  346. unsigned long long now = sched_clock();
  347. spin_lock_irqsave(&blkg->stats_lock, flags);
  348. stats = &blkg->stats;
  349. if (time_after64(now, io_start_time))
  350. blkio_add_stat(stats->stat_arr[BLKIO_STAT_SERVICE_TIME],
  351. now - io_start_time, direction, sync);
  352. if (time_after64(io_start_time, start_time))
  353. blkio_add_stat(stats->stat_arr[BLKIO_STAT_WAIT_TIME],
  354. io_start_time - start_time, direction, sync);
  355. spin_unlock_irqrestore(&blkg->stats_lock, flags);
  356. }
  357. EXPORT_SYMBOL_GPL(blkiocg_update_completion_stats);
  358. /* Merged stats are per cpu. */
  359. void blkiocg_update_io_merged_stats(struct blkio_group *blkg, bool direction,
  360. bool sync)
  361. {
  362. struct blkio_group_stats_cpu *stats_cpu;
  363. unsigned long flags;
  364. /*
  365. * Disabling interrupts to provide mutual exclusion between two
  366. * writes on same cpu. It probably is not needed for 64bit. Not
  367. * optimizing that case yet.
  368. */
  369. local_irq_save(flags);
  370. stats_cpu = this_cpu_ptr(blkg->stats_cpu);
  371. u64_stats_update_begin(&stats_cpu->syncp);
  372. blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_MERGED], 1,
  373. direction, sync);
  374. u64_stats_update_end(&stats_cpu->syncp);
  375. local_irq_restore(flags);
  376. }
  377. EXPORT_SYMBOL_GPL(blkiocg_update_io_merged_stats);
  378. /*
  379. * This function allocates the per cpu stats for blkio_group. Should be called
  380. * from sleepable context as alloc_per_cpu() requires that.
  381. */
  382. int blkio_alloc_blkg_stats(struct blkio_group *blkg)
  383. {
  384. /* Allocate memory for per cpu stats */
  385. blkg->stats_cpu = alloc_percpu(struct blkio_group_stats_cpu);
  386. if (!blkg->stats_cpu)
  387. return -ENOMEM;
  388. return 0;
  389. }
  390. EXPORT_SYMBOL_GPL(blkio_alloc_blkg_stats);
  391. void blkiocg_add_blkio_group(struct blkio_cgroup *blkcg,
  392. struct blkio_group *blkg, void *key, dev_t dev,
  393. enum blkio_policy_id plid)
  394. {
  395. unsigned long flags;
  396. spin_lock_irqsave(&blkcg->lock, flags);
  397. spin_lock_init(&blkg->stats_lock);
  398. rcu_assign_pointer(blkg->key, key);
  399. blkg->blkcg_id = css_id(&blkcg->css);
  400. hlist_add_head_rcu(&blkg->blkcg_node, &blkcg->blkg_list);
  401. blkg->plid = plid;
  402. spin_unlock_irqrestore(&blkcg->lock, flags);
  403. /* Need to take css reference ? */
  404. cgroup_path(blkcg->css.cgroup, blkg->path, sizeof(blkg->path));
  405. blkg->dev = dev;
  406. }
  407. EXPORT_SYMBOL_GPL(blkiocg_add_blkio_group);
  408. static void __blkiocg_del_blkio_group(struct blkio_group *blkg)
  409. {
  410. hlist_del_init_rcu(&blkg->blkcg_node);
  411. blkg->blkcg_id = 0;
  412. }
  413. /*
  414. * returns 0 if blkio_group was still on cgroup list. Otherwise returns 1
  415. * indicating that blk_group was unhashed by the time we got to it.
  416. */
  417. int blkiocg_del_blkio_group(struct blkio_group *blkg)
  418. {
  419. struct blkio_cgroup *blkcg;
  420. unsigned long flags;
  421. struct cgroup_subsys_state *css;
  422. int ret = 1;
  423. rcu_read_lock();
  424. css = css_lookup(&blkio_subsys, blkg->blkcg_id);
  425. if (css) {
  426. blkcg = container_of(css, struct blkio_cgroup, css);
  427. spin_lock_irqsave(&blkcg->lock, flags);
  428. if (!hlist_unhashed(&blkg->blkcg_node)) {
  429. __blkiocg_del_blkio_group(blkg);
  430. ret = 0;
  431. }
  432. spin_unlock_irqrestore(&blkcg->lock, flags);
  433. }
  434. rcu_read_unlock();
  435. return ret;
  436. }
  437. EXPORT_SYMBOL_GPL(blkiocg_del_blkio_group);
  438. /* called under rcu_read_lock(). */
  439. struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
  440. {
  441. struct blkio_group *blkg;
  442. struct hlist_node *n;
  443. void *__key;
  444. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  445. __key = blkg->key;
  446. if (__key == key)
  447. return blkg;
  448. }
  449. return NULL;
  450. }
  451. EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
  452. void blkg_destroy_all(struct request_queue *q)
  453. {
  454. struct blkio_policy_type *pol;
  455. while (true) {
  456. bool done = true;
  457. spin_lock(&blkio_list_lock);
  458. spin_lock_irq(q->queue_lock);
  459. /*
  460. * clear_queue_fn() might return with non-empty group list
  461. * if it raced cgroup removal and lost. cgroup removal is
  462. * guaranteed to make forward progress and retrying after a
  463. * while is enough. This ugliness is scheduled to be
  464. * removed after locking update.
  465. */
  466. list_for_each_entry(pol, &blkio_list, list)
  467. if (!pol->ops.blkio_clear_queue_fn(q))
  468. done = false;
  469. spin_unlock_irq(q->queue_lock);
  470. spin_unlock(&blkio_list_lock);
  471. if (done)
  472. break;
  473. msleep(10); /* just some random duration I like */
  474. }
  475. }
  476. static void blkio_reset_stats_cpu(struct blkio_group *blkg)
  477. {
  478. struct blkio_group_stats_cpu *stats_cpu;
  479. int i, j, k;
  480. /*
  481. * Note: On 64 bit arch this should not be an issue. This has the
  482. * possibility of returning some inconsistent value on 32bit arch
  483. * as 64bit update on 32bit is non atomic. Taking care of this
  484. * corner case makes code very complicated, like sending IPIs to
  485. * cpus, taking care of stats of offline cpus etc.
  486. *
  487. * reset stats is anyway more of a debug feature and this sounds a
  488. * corner case. So I am not complicating the code yet until and
  489. * unless this becomes a real issue.
  490. */
  491. for_each_possible_cpu(i) {
  492. stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
  493. stats_cpu->sectors = 0;
  494. for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
  495. for (k = 0; k < BLKIO_STAT_TOTAL; k++)
  496. stats_cpu->stat_arr_cpu[j][k] = 0;
  497. }
  498. }
  499. static int
  500. blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
  501. {
  502. struct blkio_cgroup *blkcg;
  503. struct blkio_group *blkg;
  504. struct blkio_group_stats *stats;
  505. struct hlist_node *n;
  506. uint64_t queued[BLKIO_STAT_TOTAL];
  507. int i;
  508. #ifdef CONFIG_DEBUG_BLK_CGROUP
  509. bool idling, waiting, empty;
  510. unsigned long long now = sched_clock();
  511. #endif
  512. blkcg = cgroup_to_blkio_cgroup(cgroup);
  513. spin_lock_irq(&blkcg->lock);
  514. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  515. spin_lock(&blkg->stats_lock);
  516. stats = &blkg->stats;
  517. #ifdef CONFIG_DEBUG_BLK_CGROUP
  518. idling = blkio_blkg_idling(stats);
  519. waiting = blkio_blkg_waiting(stats);
  520. empty = blkio_blkg_empty(stats);
  521. #endif
  522. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  523. queued[i] = stats->stat_arr[BLKIO_STAT_QUEUED][i];
  524. memset(stats, 0, sizeof(struct blkio_group_stats));
  525. for (i = 0; i < BLKIO_STAT_TOTAL; i++)
  526. stats->stat_arr[BLKIO_STAT_QUEUED][i] = queued[i];
  527. #ifdef CONFIG_DEBUG_BLK_CGROUP
  528. if (idling) {
  529. blkio_mark_blkg_idling(stats);
  530. stats->start_idle_time = now;
  531. }
  532. if (waiting) {
  533. blkio_mark_blkg_waiting(stats);
  534. stats->start_group_wait_time = now;
  535. }
  536. if (empty) {
  537. blkio_mark_blkg_empty(stats);
  538. stats->start_empty_time = now;
  539. }
  540. #endif
  541. spin_unlock(&blkg->stats_lock);
  542. /* Reset Per cpu stats which don't take blkg->stats_lock */
  543. blkio_reset_stats_cpu(blkg);
  544. }
  545. spin_unlock_irq(&blkcg->lock);
  546. return 0;
  547. }
  548. static void blkio_get_key_name(enum stat_sub_type type, dev_t dev, char *str,
  549. int chars_left, bool diskname_only)
  550. {
  551. snprintf(str, chars_left, "%d:%d", MAJOR(dev), MINOR(dev));
  552. chars_left -= strlen(str);
  553. if (chars_left <= 0) {
  554. printk(KERN_WARNING
  555. "Possibly incorrect cgroup stat display format");
  556. return;
  557. }
  558. if (diskname_only)
  559. return;
  560. switch (type) {
  561. case BLKIO_STAT_READ:
  562. strlcat(str, " Read", chars_left);
  563. break;
  564. case BLKIO_STAT_WRITE:
  565. strlcat(str, " Write", chars_left);
  566. break;
  567. case BLKIO_STAT_SYNC:
  568. strlcat(str, " Sync", chars_left);
  569. break;
  570. case BLKIO_STAT_ASYNC:
  571. strlcat(str, " Async", chars_left);
  572. break;
  573. case BLKIO_STAT_TOTAL:
  574. strlcat(str, " Total", chars_left);
  575. break;
  576. default:
  577. strlcat(str, " Invalid", chars_left);
  578. }
  579. }
  580. static uint64_t blkio_fill_stat(char *str, int chars_left, uint64_t val,
  581. struct cgroup_map_cb *cb, dev_t dev)
  582. {
  583. blkio_get_key_name(0, dev, str, chars_left, true);
  584. cb->fill(cb, str, val);
  585. return val;
  586. }
  587. static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
  588. enum stat_type_cpu type, enum stat_sub_type sub_type)
  589. {
  590. int cpu;
  591. struct blkio_group_stats_cpu *stats_cpu;
  592. u64 val = 0, tval;
  593. for_each_possible_cpu(cpu) {
  594. unsigned int start;
  595. stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
  596. do {
  597. start = u64_stats_fetch_begin(&stats_cpu->syncp);
  598. if (type == BLKIO_STAT_CPU_SECTORS)
  599. tval = stats_cpu->sectors;
  600. else
  601. tval = stats_cpu->stat_arr_cpu[type][sub_type];
  602. } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
  603. val += tval;
  604. }
  605. return val;
  606. }
  607. static uint64_t blkio_get_stat_cpu(struct blkio_group *blkg,
  608. struct cgroup_map_cb *cb, dev_t dev, enum stat_type_cpu type)
  609. {
  610. uint64_t disk_total, val;
  611. char key_str[MAX_KEY_LEN];
  612. enum stat_sub_type sub_type;
  613. if (type == BLKIO_STAT_CPU_SECTORS) {
  614. val = blkio_read_stat_cpu(blkg, type, 0);
  615. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, val, cb, dev);
  616. }
  617. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  618. sub_type++) {
  619. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  620. val = blkio_read_stat_cpu(blkg, type, sub_type);
  621. cb->fill(cb, key_str, val);
  622. }
  623. disk_total = blkio_read_stat_cpu(blkg, type, BLKIO_STAT_READ) +
  624. blkio_read_stat_cpu(blkg, type, BLKIO_STAT_WRITE);
  625. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  626. cb->fill(cb, key_str, disk_total);
  627. return disk_total;
  628. }
  629. /* This should be called with blkg->stats_lock held */
  630. static uint64_t blkio_get_stat(struct blkio_group *blkg,
  631. struct cgroup_map_cb *cb, dev_t dev, enum stat_type type)
  632. {
  633. uint64_t disk_total;
  634. char key_str[MAX_KEY_LEN];
  635. enum stat_sub_type sub_type;
  636. if (type == BLKIO_STAT_TIME)
  637. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  638. blkg->stats.time, cb, dev);
  639. #ifdef CONFIG_DEBUG_BLK_CGROUP
  640. if (type == BLKIO_STAT_UNACCOUNTED_TIME)
  641. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  642. blkg->stats.unaccounted_time, cb, dev);
  643. if (type == BLKIO_STAT_AVG_QUEUE_SIZE) {
  644. uint64_t sum = blkg->stats.avg_queue_size_sum;
  645. uint64_t samples = blkg->stats.avg_queue_size_samples;
  646. if (samples)
  647. do_div(sum, samples);
  648. else
  649. sum = 0;
  650. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1, sum, cb, dev);
  651. }
  652. if (type == BLKIO_STAT_GROUP_WAIT_TIME)
  653. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  654. blkg->stats.group_wait_time, cb, dev);
  655. if (type == BLKIO_STAT_IDLE_TIME)
  656. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  657. blkg->stats.idle_time, cb, dev);
  658. if (type == BLKIO_STAT_EMPTY_TIME)
  659. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  660. blkg->stats.empty_time, cb, dev);
  661. if (type == BLKIO_STAT_DEQUEUE)
  662. return blkio_fill_stat(key_str, MAX_KEY_LEN - 1,
  663. blkg->stats.dequeue, cb, dev);
  664. #endif
  665. for (sub_type = BLKIO_STAT_READ; sub_type < BLKIO_STAT_TOTAL;
  666. sub_type++) {
  667. blkio_get_key_name(sub_type, dev, key_str, MAX_KEY_LEN, false);
  668. cb->fill(cb, key_str, blkg->stats.stat_arr[type][sub_type]);
  669. }
  670. disk_total = blkg->stats.stat_arr[type][BLKIO_STAT_READ] +
  671. blkg->stats.stat_arr[type][BLKIO_STAT_WRITE];
  672. blkio_get_key_name(BLKIO_STAT_TOTAL, dev, key_str, MAX_KEY_LEN, false);
  673. cb->fill(cb, key_str, disk_total);
  674. return disk_total;
  675. }
  676. static int blkio_policy_parse_and_set(char *buf,
  677. struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid)
  678. {
  679. struct gendisk *disk = NULL;
  680. char *s[4], *p, *major_s = NULL, *minor_s = NULL;
  681. unsigned long major, minor;
  682. int i = 0, ret = -EINVAL;
  683. int part;
  684. dev_t dev;
  685. u64 temp;
  686. memset(s, 0, sizeof(s));
  687. while ((p = strsep(&buf, " ")) != NULL) {
  688. if (!*p)
  689. continue;
  690. s[i++] = p;
  691. /* Prevent from inputing too many things */
  692. if (i == 3)
  693. break;
  694. }
  695. if (i != 2)
  696. goto out;
  697. p = strsep(&s[0], ":");
  698. if (p != NULL)
  699. major_s = p;
  700. else
  701. goto out;
  702. minor_s = s[0];
  703. if (!minor_s)
  704. goto out;
  705. if (strict_strtoul(major_s, 10, &major))
  706. goto out;
  707. if (strict_strtoul(minor_s, 10, &minor))
  708. goto out;
  709. dev = MKDEV(major, minor);
  710. if (strict_strtoull(s[1], 10, &temp))
  711. goto out;
  712. /* For rule removal, do not check for device presence. */
  713. if (temp) {
  714. disk = get_gendisk(dev, &part);
  715. if (!disk || part) {
  716. ret = -ENODEV;
  717. goto out;
  718. }
  719. }
  720. newpn->dev = dev;
  721. switch (plid) {
  722. case BLKIO_POLICY_PROP:
  723. if ((temp < BLKIO_WEIGHT_MIN && temp > 0) ||
  724. temp > BLKIO_WEIGHT_MAX)
  725. goto out;
  726. newpn->plid = plid;
  727. newpn->fileid = fileid;
  728. newpn->val.weight = temp;
  729. break;
  730. case BLKIO_POLICY_THROTL:
  731. switch(fileid) {
  732. case BLKIO_THROTL_read_bps_device:
  733. case BLKIO_THROTL_write_bps_device:
  734. newpn->plid = plid;
  735. newpn->fileid = fileid;
  736. newpn->val.bps = temp;
  737. break;
  738. case BLKIO_THROTL_read_iops_device:
  739. case BLKIO_THROTL_write_iops_device:
  740. if (temp > THROTL_IOPS_MAX)
  741. goto out;
  742. newpn->plid = plid;
  743. newpn->fileid = fileid;
  744. newpn->val.iops = (unsigned int)temp;
  745. break;
  746. }
  747. break;
  748. default:
  749. BUG();
  750. }
  751. ret = 0;
  752. out:
  753. put_disk(disk);
  754. return ret;
  755. }
  756. unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg,
  757. dev_t dev)
  758. {
  759. struct blkio_policy_node *pn;
  760. unsigned long flags;
  761. unsigned int weight;
  762. spin_lock_irqsave(&blkcg->lock, flags);
  763. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP,
  764. BLKIO_PROP_weight_device);
  765. if (pn)
  766. weight = pn->val.weight;
  767. else
  768. weight = blkcg->weight;
  769. spin_unlock_irqrestore(&blkcg->lock, flags);
  770. return weight;
  771. }
  772. EXPORT_SYMBOL_GPL(blkcg_get_weight);
  773. uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev)
  774. {
  775. struct blkio_policy_node *pn;
  776. unsigned long flags;
  777. uint64_t bps = -1;
  778. spin_lock_irqsave(&blkcg->lock, flags);
  779. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  780. BLKIO_THROTL_read_bps_device);
  781. if (pn)
  782. bps = pn->val.bps;
  783. spin_unlock_irqrestore(&blkcg->lock, flags);
  784. return bps;
  785. }
  786. uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev)
  787. {
  788. struct blkio_policy_node *pn;
  789. unsigned long flags;
  790. uint64_t bps = -1;
  791. spin_lock_irqsave(&blkcg->lock, flags);
  792. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  793. BLKIO_THROTL_write_bps_device);
  794. if (pn)
  795. bps = pn->val.bps;
  796. spin_unlock_irqrestore(&blkcg->lock, flags);
  797. return bps;
  798. }
  799. unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev)
  800. {
  801. struct blkio_policy_node *pn;
  802. unsigned long flags;
  803. unsigned int iops = -1;
  804. spin_lock_irqsave(&blkcg->lock, flags);
  805. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  806. BLKIO_THROTL_read_iops_device);
  807. if (pn)
  808. iops = pn->val.iops;
  809. spin_unlock_irqrestore(&blkcg->lock, flags);
  810. return iops;
  811. }
  812. unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev)
  813. {
  814. struct blkio_policy_node *pn;
  815. unsigned long flags;
  816. unsigned int iops = -1;
  817. spin_lock_irqsave(&blkcg->lock, flags);
  818. pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL,
  819. BLKIO_THROTL_write_iops_device);
  820. if (pn)
  821. iops = pn->val.iops;
  822. spin_unlock_irqrestore(&blkcg->lock, flags);
  823. return iops;
  824. }
  825. /* Checks whether user asked for deleting a policy rule */
  826. static bool blkio_delete_rule_command(struct blkio_policy_node *pn)
  827. {
  828. switch(pn->plid) {
  829. case BLKIO_POLICY_PROP:
  830. if (pn->val.weight == 0)
  831. return 1;
  832. break;
  833. case BLKIO_POLICY_THROTL:
  834. switch(pn->fileid) {
  835. case BLKIO_THROTL_read_bps_device:
  836. case BLKIO_THROTL_write_bps_device:
  837. if (pn->val.bps == 0)
  838. return 1;
  839. break;
  840. case BLKIO_THROTL_read_iops_device:
  841. case BLKIO_THROTL_write_iops_device:
  842. if (pn->val.iops == 0)
  843. return 1;
  844. }
  845. break;
  846. default:
  847. BUG();
  848. }
  849. return 0;
  850. }
  851. static void blkio_update_policy_rule(struct blkio_policy_node *oldpn,
  852. struct blkio_policy_node *newpn)
  853. {
  854. switch(oldpn->plid) {
  855. case BLKIO_POLICY_PROP:
  856. oldpn->val.weight = newpn->val.weight;
  857. break;
  858. case BLKIO_POLICY_THROTL:
  859. switch(newpn->fileid) {
  860. case BLKIO_THROTL_read_bps_device:
  861. case BLKIO_THROTL_write_bps_device:
  862. oldpn->val.bps = newpn->val.bps;
  863. break;
  864. case BLKIO_THROTL_read_iops_device:
  865. case BLKIO_THROTL_write_iops_device:
  866. oldpn->val.iops = newpn->val.iops;
  867. }
  868. break;
  869. default:
  870. BUG();
  871. }
  872. }
  873. /*
  874. * Some rules/values in blkg have changed. Propagate those to respective
  875. * policies.
  876. */
  877. static void blkio_update_blkg_policy(struct blkio_cgroup *blkcg,
  878. struct blkio_group *blkg, struct blkio_policy_node *pn)
  879. {
  880. unsigned int weight, iops;
  881. u64 bps;
  882. switch(pn->plid) {
  883. case BLKIO_POLICY_PROP:
  884. weight = pn->val.weight ? pn->val.weight :
  885. blkcg->weight;
  886. blkio_update_group_weight(blkg, weight);
  887. break;
  888. case BLKIO_POLICY_THROTL:
  889. switch(pn->fileid) {
  890. case BLKIO_THROTL_read_bps_device:
  891. case BLKIO_THROTL_write_bps_device:
  892. bps = pn->val.bps ? pn->val.bps : (-1);
  893. blkio_update_group_bps(blkg, bps, pn->fileid);
  894. break;
  895. case BLKIO_THROTL_read_iops_device:
  896. case BLKIO_THROTL_write_iops_device:
  897. iops = pn->val.iops ? pn->val.iops : (-1);
  898. blkio_update_group_iops(blkg, iops, pn->fileid);
  899. break;
  900. }
  901. break;
  902. default:
  903. BUG();
  904. }
  905. }
  906. /*
  907. * A policy node rule has been updated. Propagate this update to all the
  908. * block groups which might be affected by this update.
  909. */
  910. static void blkio_update_policy_node_blkg(struct blkio_cgroup *blkcg,
  911. struct blkio_policy_node *pn)
  912. {
  913. struct blkio_group *blkg;
  914. struct hlist_node *n;
  915. spin_lock(&blkio_list_lock);
  916. spin_lock_irq(&blkcg->lock);
  917. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  918. if (pn->dev != blkg->dev || pn->plid != blkg->plid)
  919. continue;
  920. blkio_update_blkg_policy(blkcg, blkg, pn);
  921. }
  922. spin_unlock_irq(&blkcg->lock);
  923. spin_unlock(&blkio_list_lock);
  924. }
  925. static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft,
  926. const char *buffer)
  927. {
  928. int ret = 0;
  929. char *buf;
  930. struct blkio_policy_node *newpn, *pn;
  931. struct blkio_cgroup *blkcg;
  932. int keep_newpn = 0;
  933. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  934. int fileid = BLKIOFILE_ATTR(cft->private);
  935. buf = kstrdup(buffer, GFP_KERNEL);
  936. if (!buf)
  937. return -ENOMEM;
  938. newpn = kzalloc(sizeof(*newpn), GFP_KERNEL);
  939. if (!newpn) {
  940. ret = -ENOMEM;
  941. goto free_buf;
  942. }
  943. ret = blkio_policy_parse_and_set(buf, newpn, plid, fileid);
  944. if (ret)
  945. goto free_newpn;
  946. blkcg = cgroup_to_blkio_cgroup(cgrp);
  947. spin_lock_irq(&blkcg->lock);
  948. pn = blkio_policy_search_node(blkcg, newpn->dev, plid, fileid);
  949. if (!pn) {
  950. if (!blkio_delete_rule_command(newpn)) {
  951. blkio_policy_insert_node(blkcg, newpn);
  952. keep_newpn = 1;
  953. }
  954. spin_unlock_irq(&blkcg->lock);
  955. goto update_io_group;
  956. }
  957. if (blkio_delete_rule_command(newpn)) {
  958. blkio_policy_delete_node(pn);
  959. kfree(pn);
  960. spin_unlock_irq(&blkcg->lock);
  961. goto update_io_group;
  962. }
  963. spin_unlock_irq(&blkcg->lock);
  964. blkio_update_policy_rule(pn, newpn);
  965. update_io_group:
  966. blkio_update_policy_node_blkg(blkcg, newpn);
  967. free_newpn:
  968. if (!keep_newpn)
  969. kfree(newpn);
  970. free_buf:
  971. kfree(buf);
  972. return ret;
  973. }
  974. static void
  975. blkio_print_policy_node(struct seq_file *m, struct blkio_policy_node *pn)
  976. {
  977. switch(pn->plid) {
  978. case BLKIO_POLICY_PROP:
  979. if (pn->fileid == BLKIO_PROP_weight_device)
  980. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  981. MINOR(pn->dev), pn->val.weight);
  982. break;
  983. case BLKIO_POLICY_THROTL:
  984. switch(pn->fileid) {
  985. case BLKIO_THROTL_read_bps_device:
  986. case BLKIO_THROTL_write_bps_device:
  987. seq_printf(m, "%u:%u\t%llu\n", MAJOR(pn->dev),
  988. MINOR(pn->dev), pn->val.bps);
  989. break;
  990. case BLKIO_THROTL_read_iops_device:
  991. case BLKIO_THROTL_write_iops_device:
  992. seq_printf(m, "%u:%u\t%u\n", MAJOR(pn->dev),
  993. MINOR(pn->dev), pn->val.iops);
  994. break;
  995. }
  996. break;
  997. default:
  998. BUG();
  999. }
  1000. }
  1001. /* cgroup files which read their data from policy nodes end up here */
  1002. static void blkio_read_policy_node_files(struct cftype *cft,
  1003. struct blkio_cgroup *blkcg, struct seq_file *m)
  1004. {
  1005. struct blkio_policy_node *pn;
  1006. if (!list_empty(&blkcg->policy_list)) {
  1007. spin_lock_irq(&blkcg->lock);
  1008. list_for_each_entry(pn, &blkcg->policy_list, node) {
  1009. if (!pn_matches_cftype(cft, pn))
  1010. continue;
  1011. blkio_print_policy_node(m, pn);
  1012. }
  1013. spin_unlock_irq(&blkcg->lock);
  1014. }
  1015. }
  1016. static int blkiocg_file_read(struct cgroup *cgrp, struct cftype *cft,
  1017. struct seq_file *m)
  1018. {
  1019. struct blkio_cgroup *blkcg;
  1020. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1021. int name = BLKIOFILE_ATTR(cft->private);
  1022. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1023. switch(plid) {
  1024. case BLKIO_POLICY_PROP:
  1025. switch(name) {
  1026. case BLKIO_PROP_weight_device:
  1027. blkio_read_policy_node_files(cft, blkcg, m);
  1028. return 0;
  1029. default:
  1030. BUG();
  1031. }
  1032. break;
  1033. case BLKIO_POLICY_THROTL:
  1034. switch(name){
  1035. case BLKIO_THROTL_read_bps_device:
  1036. case BLKIO_THROTL_write_bps_device:
  1037. case BLKIO_THROTL_read_iops_device:
  1038. case BLKIO_THROTL_write_iops_device:
  1039. blkio_read_policy_node_files(cft, blkcg, m);
  1040. return 0;
  1041. default:
  1042. BUG();
  1043. }
  1044. break;
  1045. default:
  1046. BUG();
  1047. }
  1048. return 0;
  1049. }
  1050. static int blkio_read_blkg_stats(struct blkio_cgroup *blkcg,
  1051. struct cftype *cft, struct cgroup_map_cb *cb,
  1052. enum stat_type type, bool show_total, bool pcpu)
  1053. {
  1054. struct blkio_group *blkg;
  1055. struct hlist_node *n;
  1056. uint64_t cgroup_total = 0;
  1057. rcu_read_lock();
  1058. hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1059. if (blkg->dev) {
  1060. if (!cftype_blkg_same_policy(cft, blkg))
  1061. continue;
  1062. if (pcpu)
  1063. cgroup_total += blkio_get_stat_cpu(blkg, cb,
  1064. blkg->dev, type);
  1065. else {
  1066. spin_lock_irq(&blkg->stats_lock);
  1067. cgroup_total += blkio_get_stat(blkg, cb,
  1068. blkg->dev, type);
  1069. spin_unlock_irq(&blkg->stats_lock);
  1070. }
  1071. }
  1072. }
  1073. if (show_total)
  1074. cb->fill(cb, "Total", cgroup_total);
  1075. rcu_read_unlock();
  1076. return 0;
  1077. }
  1078. /* All map kind of cgroup file get serviced by this function */
  1079. static int blkiocg_file_read_map(struct cgroup *cgrp, struct cftype *cft,
  1080. struct cgroup_map_cb *cb)
  1081. {
  1082. struct blkio_cgroup *blkcg;
  1083. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1084. int name = BLKIOFILE_ATTR(cft->private);
  1085. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1086. switch(plid) {
  1087. case BLKIO_POLICY_PROP:
  1088. switch(name) {
  1089. case BLKIO_PROP_time:
  1090. return blkio_read_blkg_stats(blkcg, cft, cb,
  1091. BLKIO_STAT_TIME, 0, 0);
  1092. case BLKIO_PROP_sectors:
  1093. return blkio_read_blkg_stats(blkcg, cft, cb,
  1094. BLKIO_STAT_CPU_SECTORS, 0, 1);
  1095. case BLKIO_PROP_io_service_bytes:
  1096. return blkio_read_blkg_stats(blkcg, cft, cb,
  1097. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1098. case BLKIO_PROP_io_serviced:
  1099. return blkio_read_blkg_stats(blkcg, cft, cb,
  1100. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1101. case BLKIO_PROP_io_service_time:
  1102. return blkio_read_blkg_stats(blkcg, cft, cb,
  1103. BLKIO_STAT_SERVICE_TIME, 1, 0);
  1104. case BLKIO_PROP_io_wait_time:
  1105. return blkio_read_blkg_stats(blkcg, cft, cb,
  1106. BLKIO_STAT_WAIT_TIME, 1, 0);
  1107. case BLKIO_PROP_io_merged:
  1108. return blkio_read_blkg_stats(blkcg, cft, cb,
  1109. BLKIO_STAT_CPU_MERGED, 1, 1);
  1110. case BLKIO_PROP_io_queued:
  1111. return blkio_read_blkg_stats(blkcg, cft, cb,
  1112. BLKIO_STAT_QUEUED, 1, 0);
  1113. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1114. case BLKIO_PROP_unaccounted_time:
  1115. return blkio_read_blkg_stats(blkcg, cft, cb,
  1116. BLKIO_STAT_UNACCOUNTED_TIME, 0, 0);
  1117. case BLKIO_PROP_dequeue:
  1118. return blkio_read_blkg_stats(blkcg, cft, cb,
  1119. BLKIO_STAT_DEQUEUE, 0, 0);
  1120. case BLKIO_PROP_avg_queue_size:
  1121. return blkio_read_blkg_stats(blkcg, cft, cb,
  1122. BLKIO_STAT_AVG_QUEUE_SIZE, 0, 0);
  1123. case BLKIO_PROP_group_wait_time:
  1124. return blkio_read_blkg_stats(blkcg, cft, cb,
  1125. BLKIO_STAT_GROUP_WAIT_TIME, 0, 0);
  1126. case BLKIO_PROP_idle_time:
  1127. return blkio_read_blkg_stats(blkcg, cft, cb,
  1128. BLKIO_STAT_IDLE_TIME, 0, 0);
  1129. case BLKIO_PROP_empty_time:
  1130. return blkio_read_blkg_stats(blkcg, cft, cb,
  1131. BLKIO_STAT_EMPTY_TIME, 0, 0);
  1132. #endif
  1133. default:
  1134. BUG();
  1135. }
  1136. break;
  1137. case BLKIO_POLICY_THROTL:
  1138. switch(name){
  1139. case BLKIO_THROTL_io_service_bytes:
  1140. return blkio_read_blkg_stats(blkcg, cft, cb,
  1141. BLKIO_STAT_CPU_SERVICE_BYTES, 1, 1);
  1142. case BLKIO_THROTL_io_serviced:
  1143. return blkio_read_blkg_stats(blkcg, cft, cb,
  1144. BLKIO_STAT_CPU_SERVICED, 1, 1);
  1145. default:
  1146. BUG();
  1147. }
  1148. break;
  1149. default:
  1150. BUG();
  1151. }
  1152. return 0;
  1153. }
  1154. static int blkio_weight_write(struct blkio_cgroup *blkcg, u64 val)
  1155. {
  1156. struct blkio_group *blkg;
  1157. struct hlist_node *n;
  1158. struct blkio_policy_node *pn;
  1159. if (val < BLKIO_WEIGHT_MIN || val > BLKIO_WEIGHT_MAX)
  1160. return -EINVAL;
  1161. spin_lock(&blkio_list_lock);
  1162. spin_lock_irq(&blkcg->lock);
  1163. blkcg->weight = (unsigned int)val;
  1164. hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
  1165. pn = blkio_policy_search_node(blkcg, blkg->dev,
  1166. BLKIO_POLICY_PROP, BLKIO_PROP_weight_device);
  1167. if (pn)
  1168. continue;
  1169. blkio_update_group_weight(blkg, blkcg->weight);
  1170. }
  1171. spin_unlock_irq(&blkcg->lock);
  1172. spin_unlock(&blkio_list_lock);
  1173. return 0;
  1174. }
  1175. static u64 blkiocg_file_read_u64 (struct cgroup *cgrp, struct cftype *cft) {
  1176. struct blkio_cgroup *blkcg;
  1177. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1178. int name = BLKIOFILE_ATTR(cft->private);
  1179. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1180. switch(plid) {
  1181. case BLKIO_POLICY_PROP:
  1182. switch(name) {
  1183. case BLKIO_PROP_weight:
  1184. return (u64)blkcg->weight;
  1185. }
  1186. break;
  1187. default:
  1188. BUG();
  1189. }
  1190. return 0;
  1191. }
  1192. static int
  1193. blkiocg_file_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val)
  1194. {
  1195. struct blkio_cgroup *blkcg;
  1196. enum blkio_policy_id plid = BLKIOFILE_POLICY(cft->private);
  1197. int name = BLKIOFILE_ATTR(cft->private);
  1198. blkcg = cgroup_to_blkio_cgroup(cgrp);
  1199. switch(plid) {
  1200. case BLKIO_POLICY_PROP:
  1201. switch(name) {
  1202. case BLKIO_PROP_weight:
  1203. return blkio_weight_write(blkcg, val);
  1204. }
  1205. break;
  1206. default:
  1207. BUG();
  1208. }
  1209. return 0;
  1210. }
  1211. struct cftype blkio_files[] = {
  1212. {
  1213. .name = "weight_device",
  1214. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1215. BLKIO_PROP_weight_device),
  1216. .read_seq_string = blkiocg_file_read,
  1217. .write_string = blkiocg_file_write,
  1218. .max_write_len = 256,
  1219. },
  1220. {
  1221. .name = "weight",
  1222. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1223. BLKIO_PROP_weight),
  1224. .read_u64 = blkiocg_file_read_u64,
  1225. .write_u64 = blkiocg_file_write_u64,
  1226. },
  1227. {
  1228. .name = "time",
  1229. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1230. BLKIO_PROP_time),
  1231. .read_map = blkiocg_file_read_map,
  1232. },
  1233. {
  1234. .name = "sectors",
  1235. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1236. BLKIO_PROP_sectors),
  1237. .read_map = blkiocg_file_read_map,
  1238. },
  1239. {
  1240. .name = "io_service_bytes",
  1241. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1242. BLKIO_PROP_io_service_bytes),
  1243. .read_map = blkiocg_file_read_map,
  1244. },
  1245. {
  1246. .name = "io_serviced",
  1247. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1248. BLKIO_PROP_io_serviced),
  1249. .read_map = blkiocg_file_read_map,
  1250. },
  1251. {
  1252. .name = "io_service_time",
  1253. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1254. BLKIO_PROP_io_service_time),
  1255. .read_map = blkiocg_file_read_map,
  1256. },
  1257. {
  1258. .name = "io_wait_time",
  1259. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1260. BLKIO_PROP_io_wait_time),
  1261. .read_map = blkiocg_file_read_map,
  1262. },
  1263. {
  1264. .name = "io_merged",
  1265. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1266. BLKIO_PROP_io_merged),
  1267. .read_map = blkiocg_file_read_map,
  1268. },
  1269. {
  1270. .name = "io_queued",
  1271. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1272. BLKIO_PROP_io_queued),
  1273. .read_map = blkiocg_file_read_map,
  1274. },
  1275. {
  1276. .name = "reset_stats",
  1277. .write_u64 = blkiocg_reset_stats,
  1278. },
  1279. #ifdef CONFIG_BLK_DEV_THROTTLING
  1280. {
  1281. .name = "throttle.read_bps_device",
  1282. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1283. BLKIO_THROTL_read_bps_device),
  1284. .read_seq_string = blkiocg_file_read,
  1285. .write_string = blkiocg_file_write,
  1286. .max_write_len = 256,
  1287. },
  1288. {
  1289. .name = "throttle.write_bps_device",
  1290. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1291. BLKIO_THROTL_write_bps_device),
  1292. .read_seq_string = blkiocg_file_read,
  1293. .write_string = blkiocg_file_write,
  1294. .max_write_len = 256,
  1295. },
  1296. {
  1297. .name = "throttle.read_iops_device",
  1298. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1299. BLKIO_THROTL_read_iops_device),
  1300. .read_seq_string = blkiocg_file_read,
  1301. .write_string = blkiocg_file_write,
  1302. .max_write_len = 256,
  1303. },
  1304. {
  1305. .name = "throttle.write_iops_device",
  1306. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1307. BLKIO_THROTL_write_iops_device),
  1308. .read_seq_string = blkiocg_file_read,
  1309. .write_string = blkiocg_file_write,
  1310. .max_write_len = 256,
  1311. },
  1312. {
  1313. .name = "throttle.io_service_bytes",
  1314. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1315. BLKIO_THROTL_io_service_bytes),
  1316. .read_map = blkiocg_file_read_map,
  1317. },
  1318. {
  1319. .name = "throttle.io_serviced",
  1320. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_THROTL,
  1321. BLKIO_THROTL_io_serviced),
  1322. .read_map = blkiocg_file_read_map,
  1323. },
  1324. #endif /* CONFIG_BLK_DEV_THROTTLING */
  1325. #ifdef CONFIG_DEBUG_BLK_CGROUP
  1326. {
  1327. .name = "avg_queue_size",
  1328. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1329. BLKIO_PROP_avg_queue_size),
  1330. .read_map = blkiocg_file_read_map,
  1331. },
  1332. {
  1333. .name = "group_wait_time",
  1334. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1335. BLKIO_PROP_group_wait_time),
  1336. .read_map = blkiocg_file_read_map,
  1337. },
  1338. {
  1339. .name = "idle_time",
  1340. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1341. BLKIO_PROP_idle_time),
  1342. .read_map = blkiocg_file_read_map,
  1343. },
  1344. {
  1345. .name = "empty_time",
  1346. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1347. BLKIO_PROP_empty_time),
  1348. .read_map = blkiocg_file_read_map,
  1349. },
  1350. {
  1351. .name = "dequeue",
  1352. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1353. BLKIO_PROP_dequeue),
  1354. .read_map = blkiocg_file_read_map,
  1355. },
  1356. {
  1357. .name = "unaccounted_time",
  1358. .private = BLKIOFILE_PRIVATE(BLKIO_POLICY_PROP,
  1359. BLKIO_PROP_unaccounted_time),
  1360. .read_map = blkiocg_file_read_map,
  1361. },
  1362. #endif
  1363. { } /* terminate */
  1364. };
  1365. static void blkiocg_destroy(struct cgroup *cgroup)
  1366. {
  1367. struct blkio_cgroup *blkcg = cgroup_to_blkio_cgroup(cgroup);
  1368. unsigned long flags;
  1369. struct blkio_group *blkg;
  1370. void *key;
  1371. struct blkio_policy_type *blkiop;
  1372. struct blkio_policy_node *pn, *pntmp;
  1373. rcu_read_lock();
  1374. do {
  1375. spin_lock_irqsave(&blkcg->lock, flags);
  1376. if (hlist_empty(&blkcg->blkg_list)) {
  1377. spin_unlock_irqrestore(&blkcg->lock, flags);
  1378. break;
  1379. }
  1380. blkg = hlist_entry(blkcg->blkg_list.first, struct blkio_group,
  1381. blkcg_node);
  1382. key = rcu_dereference(blkg->key);
  1383. __blkiocg_del_blkio_group(blkg);
  1384. spin_unlock_irqrestore(&blkcg->lock, flags);
  1385. /*
  1386. * This blkio_group is being unlinked as associated cgroup is
  1387. * going away. Let all the IO controlling policies know about
  1388. * this event.
  1389. */
  1390. spin_lock(&blkio_list_lock);
  1391. list_for_each_entry(blkiop, &blkio_list, list) {
  1392. if (blkiop->plid != blkg->plid)
  1393. continue;
  1394. blkiop->ops.blkio_unlink_group_fn(key, blkg);
  1395. }
  1396. spin_unlock(&blkio_list_lock);
  1397. } while (1);
  1398. list_for_each_entry_safe(pn, pntmp, &blkcg->policy_list, node) {
  1399. blkio_policy_delete_node(pn);
  1400. kfree(pn);
  1401. }
  1402. free_css_id(&blkio_subsys, &blkcg->css);
  1403. rcu_read_unlock();
  1404. if (blkcg != &blkio_root_cgroup)
  1405. kfree(blkcg);
  1406. }
  1407. static struct cgroup_subsys_state *blkiocg_create(struct cgroup *cgroup)
  1408. {
  1409. struct blkio_cgroup *blkcg;
  1410. struct cgroup *parent = cgroup->parent;
  1411. if (!parent) {
  1412. blkcg = &blkio_root_cgroup;
  1413. goto done;
  1414. }
  1415. blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
  1416. if (!blkcg)
  1417. return ERR_PTR(-ENOMEM);
  1418. blkcg->weight = BLKIO_WEIGHT_DEFAULT;
  1419. done:
  1420. spin_lock_init(&blkcg->lock);
  1421. INIT_HLIST_HEAD(&blkcg->blkg_list);
  1422. INIT_LIST_HEAD(&blkcg->policy_list);
  1423. return &blkcg->css;
  1424. }
  1425. /*
  1426. * We cannot support shared io contexts, as we have no mean to support
  1427. * two tasks with the same ioc in two different groups without major rework
  1428. * of the main cic data structures. For now we allow a task to change
  1429. * its cgroup only if it's the only owner of its ioc.
  1430. */
  1431. static int blkiocg_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1432. {
  1433. struct task_struct *task;
  1434. struct io_context *ioc;
  1435. int ret = 0;
  1436. /* task_lock() is needed to avoid races with exit_io_context() */
  1437. cgroup_taskset_for_each(task, cgrp, tset) {
  1438. task_lock(task);
  1439. ioc = task->io_context;
  1440. if (ioc && atomic_read(&ioc->nr_tasks) > 1)
  1441. ret = -EINVAL;
  1442. task_unlock(task);
  1443. if (ret)
  1444. break;
  1445. }
  1446. return ret;
  1447. }
  1448. static void blkiocg_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
  1449. {
  1450. struct task_struct *task;
  1451. struct io_context *ioc;
  1452. cgroup_taskset_for_each(task, cgrp, tset) {
  1453. /* we don't lose anything even if ioc allocation fails */
  1454. ioc = get_task_io_context(task, GFP_ATOMIC, NUMA_NO_NODE);
  1455. if (ioc) {
  1456. ioc_cgroup_changed(ioc);
  1457. put_io_context(ioc);
  1458. }
  1459. }
  1460. }
  1461. struct cgroup_subsys blkio_subsys = {
  1462. .name = "blkio",
  1463. .create = blkiocg_create,
  1464. .can_attach = blkiocg_can_attach,
  1465. .attach = blkiocg_attach,
  1466. .destroy = blkiocg_destroy,
  1467. #ifdef CONFIG_BLK_CGROUP
  1468. /* note: blkio_subsys_id is otherwise defined in blk-cgroup.h */
  1469. .subsys_id = blkio_subsys_id,
  1470. #endif
  1471. .base_cftypes = blkio_files,
  1472. .use_id = 1,
  1473. .module = THIS_MODULE,
  1474. /*
  1475. * blkio subsystem is utterly broken in terms of hierarchy support.
  1476. * It treats all cgroups equally regardless of where they're
  1477. * located in the hierarchy - all cgroups are treated as if they're
  1478. * right below the root. Fix it and remove the following.
  1479. */
  1480. .broken_hierarchy = true,
  1481. };
  1482. EXPORT_SYMBOL_GPL(blkio_subsys);
  1483. void blkio_policy_register(struct blkio_policy_type *blkiop)
  1484. {
  1485. spin_lock(&blkio_list_lock);
  1486. list_add_tail(&blkiop->list, &blkio_list);
  1487. spin_unlock(&blkio_list_lock);
  1488. }
  1489. EXPORT_SYMBOL_GPL(blkio_policy_register);
  1490. void blkio_policy_unregister(struct blkio_policy_type *blkiop)
  1491. {
  1492. spin_lock(&blkio_list_lock);
  1493. list_del_init(&blkiop->list);
  1494. spin_unlock(&blkio_list_lock);
  1495. }
  1496. EXPORT_SYMBOL_GPL(blkio_policy_unregister);
  1497. static int __init init_cgroup_blkio(void)
  1498. {
  1499. return cgroup_load_subsys(&blkio_subsys);
  1500. }
  1501. static void __exit exit_cgroup_blkio(void)
  1502. {
  1503. cgroup_unload_subsys(&blkio_subsys);
  1504. }
  1505. module_init(init_cgroup_blkio);
  1506. module_exit(exit_cgroup_blkio);
  1507. MODULE_LICENSE("GPL");