res_counter.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. /*
  2. * resource cgroups
  3. *
  4. * Copyright 2007 OpenVZ SWsoft Inc
  5. *
  6. * Author: Pavel Emelianov <xemul@openvz.org>
  7. *
  8. */
  9. #include <linux/types.h>
  10. #include <linux/parser.h>
  11. #include <linux/fs.h>
  12. #include <linux/res_counter.h>
  13. #include <linux/uaccess.h>
  14. #include <linux/mm.h>
  15. void res_counter_init(struct res_counter *counter, struct res_counter *parent)
  16. {
  17. spin_lock_init(&counter->lock);
  18. counter->limit = RESOURCE_MAX;
  19. counter->soft_limit = RESOURCE_MAX;
  20. counter->parent = parent;
  21. }
  22. int res_counter_charge_locked(struct res_counter *counter, unsigned long val)
  23. {
  24. if (counter->usage + val > counter->limit) {
  25. counter->failcnt++;
  26. return -ENOMEM;
  27. }
  28. counter->usage += val;
  29. if (counter->usage > counter->max_usage)
  30. counter->max_usage = counter->usage;
  31. return 0;
  32. }
  33. int res_counter_charge(struct res_counter *counter, unsigned long val,
  34. struct res_counter **limit_fail_at)
  35. {
  36. int ret;
  37. unsigned long flags;
  38. struct res_counter *c, *u;
  39. *limit_fail_at = NULL;
  40. local_irq_save(flags);
  41. for (c = counter; c != NULL; c = c->parent) {
  42. spin_lock(&c->lock);
  43. ret = res_counter_charge_locked(c, val);
  44. spin_unlock(&c->lock);
  45. if (ret < 0) {
  46. *limit_fail_at = c;
  47. goto undo;
  48. }
  49. }
  50. ret = 0;
  51. goto done;
  52. undo:
  53. for (u = counter; u != c; u = u->parent) {
  54. spin_lock(&u->lock);
  55. res_counter_uncharge_locked(u, val);
  56. spin_unlock(&u->lock);
  57. }
  58. done:
  59. local_irq_restore(flags);
  60. return ret;
  61. }
  62. void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
  63. {
  64. if (WARN_ON(counter->usage < val))
  65. val = counter->usage;
  66. counter->usage -= val;
  67. }
  68. void res_counter_uncharge(struct res_counter *counter, unsigned long val)
  69. {
  70. unsigned long flags;
  71. struct res_counter *c;
  72. local_irq_save(flags);
  73. for (c = counter; c != NULL; c = c->parent) {
  74. spin_lock(&c->lock);
  75. res_counter_uncharge_locked(c, val);
  76. spin_unlock(&c->lock);
  77. }
  78. local_irq_restore(flags);
  79. }
  80. static inline unsigned long long *
  81. res_counter_member(struct res_counter *counter, int member)
  82. {
  83. switch (member) {
  84. case RES_USAGE:
  85. return &counter->usage;
  86. case RES_MAX_USAGE:
  87. return &counter->max_usage;
  88. case RES_LIMIT:
  89. return &counter->limit;
  90. case RES_FAILCNT:
  91. return &counter->failcnt;
  92. case RES_SOFT_LIMIT:
  93. return &counter->soft_limit;
  94. };
  95. BUG();
  96. return NULL;
  97. }
  98. ssize_t res_counter_read(struct res_counter *counter, int member,
  99. const char __user *userbuf, size_t nbytes, loff_t *pos,
  100. int (*read_strategy)(unsigned long long val, char *st_buf))
  101. {
  102. unsigned long long *val;
  103. char buf[64], *s;
  104. s = buf;
  105. val = res_counter_member(counter, member);
  106. if (read_strategy)
  107. s += read_strategy(*val, s);
  108. else
  109. s += sprintf(s, "%llu\n", *val);
  110. return simple_read_from_buffer((void __user *)userbuf, nbytes,
  111. pos, buf, s - buf);
  112. }
  113. #if BITS_PER_LONG == 32
  114. u64 res_counter_read_u64(struct res_counter *counter, int member)
  115. {
  116. unsigned long flags;
  117. u64 ret;
  118. spin_lock_irqsave(&counter->lock, flags);
  119. ret = *res_counter_member(counter, member);
  120. spin_unlock_irqrestore(&counter->lock, flags);
  121. return ret;
  122. }
  123. #else
  124. u64 res_counter_read_u64(struct res_counter *counter, int member)
  125. {
  126. return *res_counter_member(counter, member);
  127. }
  128. #endif
  129. int res_counter_memparse_write_strategy(const char *buf,
  130. unsigned long long *res)
  131. {
  132. char *end;
  133. /* return RESOURCE_MAX(unlimited) if "-1" is specified */
  134. if (*buf == '-') {
  135. *res = simple_strtoull(buf + 1, &end, 10);
  136. if (*res != 1 || *end != '\0')
  137. return -EINVAL;
  138. *res = RESOURCE_MAX;
  139. return 0;
  140. }
  141. /* FIXME - make memparse() take const char* args */
  142. *res = memparse((char *)buf, &end);
  143. if (*end != '\0')
  144. return -EINVAL;
  145. *res = PAGE_ALIGN(*res);
  146. return 0;
  147. }
  148. int res_counter_write(struct res_counter *counter, int member,
  149. const char *buf, write_strategy_fn write_strategy)
  150. {
  151. char *end;
  152. unsigned long flags;
  153. unsigned long long tmp, *val;
  154. if (write_strategy) {
  155. if (write_strategy(buf, &tmp))
  156. return -EINVAL;
  157. } else {
  158. tmp = simple_strtoull(buf, &end, 10);
  159. if (*end != '\0')
  160. return -EINVAL;
  161. }
  162. spin_lock_irqsave(&counter->lock, flags);
  163. val = res_counter_member(counter, member);
  164. *val = tmp;
  165. spin_unlock_irqrestore(&counter->lock, flags);
  166. return 0;
  167. }