regcache-rbtree.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. /*
  2. * Register cache access API - rbtree caching support
  3. *
  4. * Copyright 2011 Wolfson Microelectronics plc
  5. *
  6. * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/debugfs.h>
  13. #include <linux/device.h>
  14. #include <linux/rbtree.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  19. unsigned int value);
  20. static int regcache_rbtree_exit(struct regmap *map);
  21. struct regcache_rbtree_node {
  22. /* block of adjacent registers */
  23. void *block;
  24. /* Which registers are present */
  25. long *cache_present;
  26. /* base register handled by this block */
  27. unsigned int base_reg;
  28. /* number of registers available in the block */
  29. unsigned int blklen;
  30. /* the actual rbtree node holding this block */
  31. struct rb_node node;
  32. } __attribute__ ((packed));
  33. struct regcache_rbtree_ctx {
  34. struct rb_root root;
  35. struct regcache_rbtree_node *cached_rbnode;
  36. };
  37. static inline void regcache_rbtree_get_base_top_reg(
  38. struct regmap *map,
  39. struct regcache_rbtree_node *rbnode,
  40. unsigned int *base, unsigned int *top)
  41. {
  42. *base = rbnode->base_reg;
  43. *top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
  44. }
  45. static unsigned int regcache_rbtree_get_register(struct regmap *map,
  46. struct regcache_rbtree_node *rbnode, unsigned int idx)
  47. {
  48. return regcache_get_val(map, rbnode->block, idx);
  49. }
  50. static void regcache_rbtree_set_register(struct regmap *map,
  51. struct regcache_rbtree_node *rbnode,
  52. unsigned int idx, unsigned int val)
  53. {
  54. set_bit(idx, rbnode->cache_present);
  55. regcache_set_val(map, rbnode->block, idx, val);
  56. }
  57. static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
  58. unsigned int reg)
  59. {
  60. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  61. struct rb_node *node;
  62. struct regcache_rbtree_node *rbnode;
  63. unsigned int base_reg, top_reg;
  64. rbnode = rbtree_ctx->cached_rbnode;
  65. if (rbnode) {
  66. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  67. &top_reg);
  68. if (reg >= base_reg && reg <= top_reg)
  69. return rbnode;
  70. }
  71. node = rbtree_ctx->root.rb_node;
  72. while (node) {
  73. rbnode = container_of(node, struct regcache_rbtree_node, node);
  74. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  75. &top_reg);
  76. if (reg >= base_reg && reg <= top_reg) {
  77. rbtree_ctx->cached_rbnode = rbnode;
  78. return rbnode;
  79. } else if (reg > top_reg) {
  80. node = node->rb_right;
  81. } else if (reg < base_reg) {
  82. node = node->rb_left;
  83. }
  84. }
  85. return NULL;
  86. }
  87. static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
  88. struct regcache_rbtree_node *rbnode)
  89. {
  90. struct rb_node **new, *parent;
  91. struct regcache_rbtree_node *rbnode_tmp;
  92. unsigned int base_reg_tmp, top_reg_tmp;
  93. unsigned int base_reg;
  94. parent = NULL;
  95. new = &root->rb_node;
  96. while (*new) {
  97. rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
  98. node);
  99. /* base and top registers of the current rbnode */
  100. regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
  101. &top_reg_tmp);
  102. /* base register of the rbnode to be added */
  103. base_reg = rbnode->base_reg;
  104. parent = *new;
  105. /* if this register has already been inserted, just return */
  106. if (base_reg >= base_reg_tmp &&
  107. base_reg <= top_reg_tmp)
  108. return 0;
  109. else if (base_reg > top_reg_tmp)
  110. new = &((*new)->rb_right);
  111. else if (base_reg < base_reg_tmp)
  112. new = &((*new)->rb_left);
  113. }
  114. /* insert the node into the rbtree */
  115. rb_link_node(&rbnode->node, parent, new);
  116. rb_insert_color(&rbnode->node, root);
  117. return 1;
  118. }
  119. #ifdef CONFIG_DEBUG_FS
  120. static int rbtree_show(struct seq_file *s, void *ignored)
  121. {
  122. struct regmap *map = s->private;
  123. struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
  124. struct regcache_rbtree_node *n;
  125. struct rb_node *node;
  126. unsigned int base, top;
  127. size_t mem_size;
  128. int nodes = 0;
  129. int registers = 0;
  130. int this_registers, average;
  131. map->lock(map->lock_arg);
  132. mem_size = sizeof(*rbtree_ctx);
  133. for (node = rb_first(&rbtree_ctx->root); node != NULL;
  134. node = rb_next(node)) {
  135. n = container_of(node, struct regcache_rbtree_node, node);
  136. mem_size += sizeof(*n);
  137. mem_size += (n->blklen * map->cache_word_size);
  138. mem_size += BITS_TO_LONGS(n->blklen) * sizeof(long);
  139. regcache_rbtree_get_base_top_reg(map, n, &base, &top);
  140. this_registers = ((top - base) / map->reg_stride) + 1;
  141. seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
  142. nodes++;
  143. registers += this_registers;
  144. }
  145. if (nodes)
  146. average = registers / nodes;
  147. else
  148. average = 0;
  149. seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
  150. nodes, registers, average, mem_size);
  151. map->unlock(map->lock_arg);
  152. return 0;
  153. }
  154. static int rbtree_open(struct inode *inode, struct file *file)
  155. {
  156. return single_open(file, rbtree_show, inode->i_private);
  157. }
  158. static const struct file_operations rbtree_fops = {
  159. .open = rbtree_open,
  160. .read = seq_read,
  161. .llseek = seq_lseek,
  162. .release = single_release,
  163. };
  164. static void rbtree_debugfs_init(struct regmap *map)
  165. {
  166. debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
  167. }
  168. #endif
  169. static int regcache_rbtree_init(struct regmap *map)
  170. {
  171. struct regcache_rbtree_ctx *rbtree_ctx;
  172. int i;
  173. int ret;
  174. map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
  175. if (!map->cache)
  176. return -ENOMEM;
  177. rbtree_ctx = map->cache;
  178. rbtree_ctx->root = RB_ROOT;
  179. rbtree_ctx->cached_rbnode = NULL;
  180. for (i = 0; i < map->num_reg_defaults; i++) {
  181. ret = regcache_rbtree_write(map,
  182. map->reg_defaults[i].reg,
  183. map->reg_defaults[i].def);
  184. if (ret)
  185. goto err;
  186. }
  187. return 0;
  188. err:
  189. regcache_rbtree_exit(map);
  190. return ret;
  191. }
  192. static int regcache_rbtree_exit(struct regmap *map)
  193. {
  194. struct rb_node *next;
  195. struct regcache_rbtree_ctx *rbtree_ctx;
  196. struct regcache_rbtree_node *rbtree_node;
  197. /* if we've already been called then just return */
  198. rbtree_ctx = map->cache;
  199. if (!rbtree_ctx)
  200. return 0;
  201. /* free up the rbtree */
  202. next = rb_first(&rbtree_ctx->root);
  203. while (next) {
  204. rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
  205. next = rb_next(&rbtree_node->node);
  206. rb_erase(&rbtree_node->node, &rbtree_ctx->root);
  207. kfree(rbtree_node->cache_present);
  208. kfree(rbtree_node->block);
  209. kfree(rbtree_node);
  210. }
  211. /* release the resources */
  212. kfree(map->cache);
  213. map->cache = NULL;
  214. return 0;
  215. }
  216. static int regcache_rbtree_read(struct regmap *map,
  217. unsigned int reg, unsigned int *value)
  218. {
  219. struct regcache_rbtree_node *rbnode;
  220. unsigned int reg_tmp;
  221. rbnode = regcache_rbtree_lookup(map, reg);
  222. if (rbnode) {
  223. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  224. if (!test_bit(reg_tmp, rbnode->cache_present))
  225. return -ENOENT;
  226. *value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
  227. } else {
  228. return -ENOENT;
  229. }
  230. return 0;
  231. }
  232. static int regcache_rbtree_insert_to_block(struct regmap *map,
  233. struct regcache_rbtree_node *rbnode,
  234. unsigned int base_reg,
  235. unsigned int top_reg,
  236. unsigned int reg,
  237. unsigned int value)
  238. {
  239. unsigned int blklen;
  240. unsigned int pos, offset;
  241. unsigned long *present;
  242. u8 *blk;
  243. blklen = (top_reg - base_reg) / map->reg_stride + 1;
  244. pos = (reg - base_reg) / map->reg_stride;
  245. offset = (rbnode->base_reg - base_reg) / map->reg_stride;
  246. blk = krealloc(rbnode->block,
  247. blklen * map->cache_word_size,
  248. GFP_KERNEL);
  249. if (!blk)
  250. return -ENOMEM;
  251. if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
  252. present = krealloc(rbnode->cache_present,
  253. BITS_TO_LONGS(blklen) * sizeof(*present),
  254. GFP_KERNEL);
  255. if (!present) {
  256. kfree(blk);
  257. return -ENOMEM;
  258. }
  259. memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
  260. (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
  261. * sizeof(*present));
  262. } else {
  263. present = rbnode->cache_present;
  264. }
  265. /* insert the register value in the correct place in the rbnode block */
  266. if (pos == 0) {
  267. memmove(blk + offset * map->cache_word_size,
  268. blk, rbnode->blklen * map->cache_word_size);
  269. bitmap_shift_left(present, present, offset, blklen);
  270. }
  271. /* update the rbnode block, its size and the base register */
  272. rbnode->block = blk;
  273. rbnode->blklen = blklen;
  274. rbnode->base_reg = base_reg;
  275. rbnode->cache_present = present;
  276. regcache_rbtree_set_register(map, rbnode, pos, value);
  277. return 0;
  278. }
  279. static struct regcache_rbtree_node *
  280. regcache_rbtree_node_alloc(struct regmap *map, unsigned int reg)
  281. {
  282. struct regcache_rbtree_node *rbnode;
  283. const struct regmap_range *range;
  284. int i;
  285. rbnode = kzalloc(sizeof(*rbnode), GFP_KERNEL);
  286. if (!rbnode)
  287. return NULL;
  288. /* If there is a read table then use it to guess at an allocation */
  289. if (map->rd_table) {
  290. for (i = 0; i < map->rd_table->n_yes_ranges; i++) {
  291. if (regmap_reg_in_range(reg,
  292. &map->rd_table->yes_ranges[i]))
  293. break;
  294. }
  295. if (i != map->rd_table->n_yes_ranges) {
  296. range = &map->rd_table->yes_ranges[i];
  297. rbnode->blklen = (range->range_max - range->range_min) /
  298. map->reg_stride + 1;
  299. rbnode->base_reg = range->range_min;
  300. }
  301. }
  302. if (!rbnode->blklen) {
  303. rbnode->blklen = 1;
  304. rbnode->base_reg = reg;
  305. }
  306. rbnode->block = kmalloc_array(rbnode->blklen, map->cache_word_size,
  307. GFP_KERNEL);
  308. if (!rbnode->block)
  309. goto err_free;
  310. rbnode->cache_present = kcalloc(BITS_TO_LONGS(rbnode->blklen),
  311. sizeof(*rbnode->cache_present),
  312. GFP_KERNEL);
  313. if (!rbnode->cache_present)
  314. goto err_free_block;
  315. return rbnode;
  316. err_free_block:
  317. kfree(rbnode->block);
  318. err_free:
  319. kfree(rbnode);
  320. return NULL;
  321. }
  322. static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
  323. unsigned int value)
  324. {
  325. struct regcache_rbtree_ctx *rbtree_ctx;
  326. struct regcache_rbtree_node *rbnode, *rbnode_tmp;
  327. struct rb_node *node;
  328. unsigned int reg_tmp;
  329. int ret;
  330. rbtree_ctx = map->cache;
  331. /* if we can't locate it in the cached rbnode we'll have
  332. * to traverse the rbtree looking for it.
  333. */
  334. rbnode = regcache_rbtree_lookup(map, reg);
  335. if (rbnode) {
  336. reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
  337. regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
  338. } else {
  339. unsigned int base_reg, top_reg;
  340. unsigned int new_base_reg, new_top_reg;
  341. unsigned int min, max;
  342. unsigned int max_dist;
  343. unsigned int dist, best_dist = UINT_MAX;
  344. max_dist = map->reg_stride * sizeof(*rbnode_tmp) /
  345. map->cache_word_size;
  346. if (reg < max_dist)
  347. min = 0;
  348. else
  349. min = reg - max_dist;
  350. max = reg + max_dist;
  351. /* look for an adjacent register to the one we are about to add */
  352. node = rbtree_ctx->root.rb_node;
  353. while (node) {
  354. rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
  355. node);
  356. regcache_rbtree_get_base_top_reg(map, rbnode_tmp,
  357. &base_reg, &top_reg);
  358. if (base_reg <= max && top_reg >= min) {
  359. if (reg < base_reg)
  360. dist = base_reg - reg;
  361. else if (reg > top_reg)
  362. dist = reg - top_reg;
  363. else
  364. dist = 0;
  365. if (dist < best_dist) {
  366. rbnode = rbnode_tmp;
  367. best_dist = dist;
  368. new_base_reg = min(reg, base_reg);
  369. new_top_reg = max(reg, top_reg);
  370. }
  371. }
  372. /*
  373. * Keep looking, we want to choose the closest block,
  374. * otherwise we might end up creating overlapping
  375. * blocks, which breaks the rbtree.
  376. */
  377. if (reg < base_reg)
  378. node = node->rb_left;
  379. else if (reg > top_reg)
  380. node = node->rb_right;
  381. else
  382. break;
  383. }
  384. if (rbnode) {
  385. ret = regcache_rbtree_insert_to_block(map, rbnode,
  386. new_base_reg,
  387. new_top_reg, reg,
  388. value);
  389. if (ret)
  390. return ret;
  391. rbtree_ctx->cached_rbnode = rbnode;
  392. return 0;
  393. }
  394. /* We did not manage to find a place to insert it in
  395. * an existing block so create a new rbnode.
  396. */
  397. rbnode = regcache_rbtree_node_alloc(map, reg);
  398. if (!rbnode)
  399. return -ENOMEM;
  400. regcache_rbtree_set_register(map, rbnode,
  401. reg - rbnode->base_reg, value);
  402. regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
  403. rbtree_ctx->cached_rbnode = rbnode;
  404. }
  405. return 0;
  406. }
  407. static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
  408. unsigned int max)
  409. {
  410. struct regcache_rbtree_ctx *rbtree_ctx;
  411. struct rb_node *node;
  412. struct regcache_rbtree_node *rbnode;
  413. unsigned int base_reg, top_reg;
  414. unsigned int start, end;
  415. int ret;
  416. rbtree_ctx = map->cache;
  417. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  418. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  419. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  420. &top_reg);
  421. if (base_reg > max)
  422. break;
  423. if (top_reg < min)
  424. continue;
  425. if (min > base_reg)
  426. start = (min - base_reg) / map->reg_stride;
  427. else
  428. start = 0;
  429. if (max < top_reg)
  430. end = (max - base_reg) / map->reg_stride + 1;
  431. else
  432. end = rbnode->blklen;
  433. ret = regcache_sync_block(map, rbnode->block,
  434. rbnode->cache_present,
  435. rbnode->base_reg, start, end);
  436. if (ret != 0)
  437. return ret;
  438. }
  439. return regmap_async_complete(map);
  440. }
  441. static int regcache_rbtree_drop(struct regmap *map, unsigned int min,
  442. unsigned int max)
  443. {
  444. struct regcache_rbtree_ctx *rbtree_ctx;
  445. struct regcache_rbtree_node *rbnode;
  446. struct rb_node *node;
  447. unsigned int base_reg, top_reg;
  448. unsigned int start, end;
  449. rbtree_ctx = map->cache;
  450. for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
  451. rbnode = rb_entry(node, struct regcache_rbtree_node, node);
  452. regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
  453. &top_reg);
  454. if (base_reg > max)
  455. break;
  456. if (top_reg < min)
  457. continue;
  458. if (min > base_reg)
  459. start = (min - base_reg) / map->reg_stride;
  460. else
  461. start = 0;
  462. if (max < top_reg)
  463. end = (max - base_reg) / map->reg_stride + 1;
  464. else
  465. end = rbnode->blklen;
  466. bitmap_clear(rbnode->cache_present, start, end - start);
  467. }
  468. return 0;
  469. }
  470. struct regcache_ops regcache_rbtree_ops = {
  471. .type = REGCACHE_RBTREE,
  472. .name = "rbtree",
  473. .init = regcache_rbtree_init,
  474. .exit = regcache_rbtree_exit,
  475. #ifdef CONFIG_DEBUG_FS
  476. .debugfs_init = rbtree_debugfs_init,
  477. #endif
  478. .read = regcache_rbtree_read,
  479. .write = regcache_rbtree_write,
  480. .sync = regcache_rbtree_sync,
  481. .drop = regcache_rbtree_drop,
  482. };