regcache-rbtree.c revision 137b833457864091610ca01d7443a67028a2b3ce
1/*
2 * Register cache access API - rbtree caching support
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/device.h>
15#include <linux/debugfs.h>
16#include <linux/rbtree.h>
17#include <linux/seq_file.h>
18
19#include "internal.h"
20
21static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
22				 unsigned int value);
23static int regcache_rbtree_exit(struct regmap *map);
24
25struct regcache_rbtree_node {
26	/* the actual rbtree node holding this block */
27	struct rb_node node;
28	/* base register handled by this block */
29	unsigned int base_reg;
30	/* block of adjacent registers */
31	void *block;
32	/* number of registers available in the block */
33	unsigned int blklen;
34} __attribute__ ((packed));
35
36struct regcache_rbtree_ctx {
37	struct rb_root root;
38	struct regcache_rbtree_node *cached_rbnode;
39	unsigned long *reg_present;
40	unsigned int reg_present_nbits;
41};
42
43static inline void regcache_rbtree_get_base_top_reg(
44	struct regmap *map,
45	struct regcache_rbtree_node *rbnode,
46	unsigned int *base, unsigned int *top)
47{
48	*base = rbnode->base_reg;
49	*top = rbnode->base_reg + ((rbnode->blklen - 1) * map->reg_stride);
50}
51
52static unsigned int regcache_rbtree_get_register(struct regmap *map,
53	struct regcache_rbtree_node *rbnode, unsigned int idx)
54{
55	return regcache_get_val(map, rbnode->block, idx);
56}
57
58static const void *regcache_rbtree_get_reg_addr(struct regmap *map,
59	struct regcache_rbtree_node *rbnode, unsigned int idx)
60{
61	return regcache_get_val_addr(map, rbnode->block, idx);
62}
63
64static void regcache_rbtree_set_register(struct regmap *map,
65					 struct regcache_rbtree_node *rbnode,
66					 unsigned int idx, unsigned int val)
67{
68	regcache_set_val(map, rbnode->block, idx, val);
69}
70
71static struct regcache_rbtree_node *regcache_rbtree_lookup(struct regmap *map,
72							   unsigned int reg)
73{
74	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
75	struct rb_node *node;
76	struct regcache_rbtree_node *rbnode;
77	unsigned int base_reg, top_reg;
78
79	rbnode = rbtree_ctx->cached_rbnode;
80	if (rbnode) {
81		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
82						 &top_reg);
83		if (reg >= base_reg && reg <= top_reg)
84			return rbnode;
85	}
86
87	node = rbtree_ctx->root.rb_node;
88	while (node) {
89		rbnode = container_of(node, struct regcache_rbtree_node, node);
90		regcache_rbtree_get_base_top_reg(map, rbnode, &base_reg,
91						 &top_reg);
92		if (reg >= base_reg && reg <= top_reg) {
93			rbtree_ctx->cached_rbnode = rbnode;
94			return rbnode;
95		} else if (reg > top_reg) {
96			node = node->rb_right;
97		} else if (reg < base_reg) {
98			node = node->rb_left;
99		}
100	}
101
102	return NULL;
103}
104
105static int regcache_rbtree_insert(struct regmap *map, struct rb_root *root,
106				  struct regcache_rbtree_node *rbnode)
107{
108	struct rb_node **new, *parent;
109	struct regcache_rbtree_node *rbnode_tmp;
110	unsigned int base_reg_tmp, top_reg_tmp;
111	unsigned int base_reg;
112
113	parent = NULL;
114	new = &root->rb_node;
115	while (*new) {
116		rbnode_tmp = container_of(*new, struct regcache_rbtree_node,
117					  node);
118		/* base and top registers of the current rbnode */
119		regcache_rbtree_get_base_top_reg(map, rbnode_tmp, &base_reg_tmp,
120						 &top_reg_tmp);
121		/* base register of the rbnode to be added */
122		base_reg = rbnode->base_reg;
123		parent = *new;
124		/* if this register has already been inserted, just return */
125		if (base_reg >= base_reg_tmp &&
126		    base_reg <= top_reg_tmp)
127			return 0;
128		else if (base_reg > top_reg_tmp)
129			new = &((*new)->rb_right);
130		else if (base_reg < base_reg_tmp)
131			new = &((*new)->rb_left);
132	}
133
134	/* insert the node into the rbtree */
135	rb_link_node(&rbnode->node, parent, new);
136	rb_insert_color(&rbnode->node, root);
137
138	return 1;
139}
140
141#ifdef CONFIG_DEBUG_FS
142static int rbtree_show(struct seq_file *s, void *ignored)
143{
144	struct regmap *map = s->private;
145	struct regcache_rbtree_ctx *rbtree_ctx = map->cache;
146	struct regcache_rbtree_node *n;
147	struct rb_node *node;
148	unsigned int base, top;
149	size_t mem_size;
150	int nodes = 0;
151	int registers = 0;
152	int this_registers, average;
153
154	map->lock(map);
155
156	mem_size = sizeof(*rbtree_ctx);
157	mem_size += BITS_TO_LONGS(rbtree_ctx->reg_present_nbits) * sizeof(long);
158
159	for (node = rb_first(&rbtree_ctx->root); node != NULL;
160	     node = rb_next(node)) {
161		n = container_of(node, struct regcache_rbtree_node, node);
162		mem_size += sizeof(*n);
163		mem_size += (n->blklen * map->cache_word_size);
164
165		regcache_rbtree_get_base_top_reg(map, n, &base, &top);
166		this_registers = ((top - base) / map->reg_stride) + 1;
167		seq_printf(s, "%x-%x (%d)\n", base, top, this_registers);
168
169		nodes++;
170		registers += this_registers;
171	}
172
173	if (nodes)
174		average = registers / nodes;
175	else
176		average = 0;
177
178	seq_printf(s, "%d nodes, %d registers, average %d registers, used %zu bytes\n",
179		   nodes, registers, average, mem_size);
180
181	map->unlock(map);
182
183	return 0;
184}
185
186static int rbtree_open(struct inode *inode, struct file *file)
187{
188	return single_open(file, rbtree_show, inode->i_private);
189}
190
191static const struct file_operations rbtree_fops = {
192	.open		= rbtree_open,
193	.read		= seq_read,
194	.llseek		= seq_lseek,
195	.release	= single_release,
196};
197
198static void rbtree_debugfs_init(struct regmap *map)
199{
200	debugfs_create_file("rbtree", 0400, map->debugfs, map, &rbtree_fops);
201}
202#else
203static void rbtree_debugfs_init(struct regmap *map)
204{
205}
206#endif
207
208static int enlarge_reg_present_bitmap(struct regmap *map, unsigned int reg)
209{
210	struct regcache_rbtree_ctx *rbtree_ctx;
211	unsigned long *reg_present;
212	unsigned int reg_present_size;
213	unsigned int nregs;
214	int i;
215
216	rbtree_ctx = map->cache;
217	nregs = reg + 1;
218	reg_present_size = BITS_TO_LONGS(nregs);
219	reg_present_size *= sizeof(long);
220
221	if (!rbtree_ctx->reg_present) {
222		reg_present = kmalloc(reg_present_size, GFP_KERNEL);
223		if (!reg_present)
224			return -ENOMEM;
225		bitmap_zero(reg_present, nregs);
226		rbtree_ctx->reg_present = reg_present;
227		rbtree_ctx->reg_present_nbits = nregs;
228		return 0;
229	}
230
231	if (nregs > rbtree_ctx->reg_present_nbits) {
232		reg_present = krealloc(rbtree_ctx->reg_present,
233				       reg_present_size, GFP_KERNEL);
234		if (!reg_present)
235			return -ENOMEM;
236		for (i = 0; i < nregs; i++)
237			if (i >= rbtree_ctx->reg_present_nbits)
238				clear_bit(i, reg_present);
239		rbtree_ctx->reg_present = reg_present;
240		rbtree_ctx->reg_present_nbits = nregs;
241	}
242
243	return 0;
244}
245
246static int regcache_rbtree_init(struct regmap *map)
247{
248	struct regcache_rbtree_ctx *rbtree_ctx;
249	int i;
250	int ret;
251
252	map->cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
253	if (!map->cache)
254		return -ENOMEM;
255
256	rbtree_ctx = map->cache;
257	rbtree_ctx->root = RB_ROOT;
258	rbtree_ctx->cached_rbnode = NULL;
259	rbtree_ctx->reg_present = NULL;
260	rbtree_ctx->reg_present_nbits = 0;
261
262	for (i = 0; i < map->num_reg_defaults; i++) {
263		ret = regcache_rbtree_write(map,
264					    map->reg_defaults[i].reg,
265					    map->reg_defaults[i].def);
266		if (ret)
267			goto err;
268	}
269
270	rbtree_debugfs_init(map);
271
272	return 0;
273
274err:
275	regcache_rbtree_exit(map);
276	return ret;
277}
278
279static int regcache_rbtree_exit(struct regmap *map)
280{
281	struct rb_node *next;
282	struct regcache_rbtree_ctx *rbtree_ctx;
283	struct regcache_rbtree_node *rbtree_node;
284
285	/* if we've already been called then just return */
286	rbtree_ctx = map->cache;
287	if (!rbtree_ctx)
288		return 0;
289
290	kfree(rbtree_ctx->reg_present);
291
292	/* free up the rbtree */
293	next = rb_first(&rbtree_ctx->root);
294	while (next) {
295		rbtree_node = rb_entry(next, struct regcache_rbtree_node, node);
296		next = rb_next(&rbtree_node->node);
297		rb_erase(&rbtree_node->node, &rbtree_ctx->root);
298		kfree(rbtree_node->block);
299		kfree(rbtree_node);
300	}
301
302	/* release the resources */
303	kfree(map->cache);
304	map->cache = NULL;
305
306	return 0;
307}
308
309static int regcache_reg_present(struct regmap *map, unsigned int reg)
310{
311	struct regcache_rbtree_ctx *rbtree_ctx;
312
313	rbtree_ctx = map->cache;
314	if (!(rbtree_ctx->reg_present[BIT_WORD(reg)] & BIT_MASK(reg)))
315		return 0;
316	return 1;
317
318}
319
320static int regcache_rbtree_read(struct regmap *map,
321				unsigned int reg, unsigned int *value)
322{
323	struct regcache_rbtree_node *rbnode;
324	unsigned int reg_tmp;
325
326	rbnode = regcache_rbtree_lookup(map, reg);
327	if (rbnode) {
328		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
329		if (!regcache_reg_present(map, reg))
330			return -ENOENT;
331		*value = regcache_rbtree_get_register(map, rbnode, reg_tmp);
332	} else {
333		return -ENOENT;
334	}
335
336	return 0;
337}
338
339
340static int regcache_rbtree_insert_to_block(struct regmap *map,
341					   struct regcache_rbtree_node *rbnode,
342					   unsigned int pos, unsigned int reg,
343					   unsigned int value)
344{
345	u8 *blk;
346
347	blk = krealloc(rbnode->block,
348		       (rbnode->blklen + 1) * map->cache_word_size,
349		       GFP_KERNEL);
350	if (!blk)
351		return -ENOMEM;
352
353	/* insert the register value in the correct place in the rbnode block */
354	memmove(blk + (pos + 1) * map->cache_word_size,
355		blk + pos * map->cache_word_size,
356		(rbnode->blklen - pos) * map->cache_word_size);
357
358	/* update the rbnode block, its size and the base register */
359	rbnode->block = blk;
360	rbnode->blklen++;
361	if (!pos)
362		rbnode->base_reg = reg;
363
364	regcache_rbtree_set_register(map, rbnode, pos, value);
365	return 0;
366}
367
368static int regcache_rbtree_write(struct regmap *map, unsigned int reg,
369				 unsigned int value)
370{
371	struct regcache_rbtree_ctx *rbtree_ctx;
372	struct regcache_rbtree_node *rbnode, *rbnode_tmp;
373	struct rb_node *node;
374	unsigned int reg_tmp;
375	unsigned int pos;
376	int i;
377	int ret;
378
379	rbtree_ctx = map->cache;
380	/* update the reg_present bitmap, make space if necessary */
381	ret = enlarge_reg_present_bitmap(map, reg);
382	if (ret < 0)
383		return ret;
384	set_bit(reg, rbtree_ctx->reg_present);
385
386	/* if we can't locate it in the cached rbnode we'll have
387	 * to traverse the rbtree looking for it.
388	 */
389	rbnode = regcache_rbtree_lookup(map, reg);
390	if (rbnode) {
391		reg_tmp = (reg - rbnode->base_reg) / map->reg_stride;
392		regcache_rbtree_set_register(map, rbnode, reg_tmp, value);
393	} else {
394		/* look for an adjacent register to the one we are about to add */
395		for (node = rb_first(&rbtree_ctx->root); node;
396		     node = rb_next(node)) {
397			rbnode_tmp = rb_entry(node, struct regcache_rbtree_node,
398					      node);
399			for (i = 0; i < rbnode_tmp->blklen; i++) {
400				reg_tmp = rbnode_tmp->base_reg +
401						(i * map->reg_stride);
402				if (abs(reg_tmp - reg) != map->reg_stride)
403					continue;
404				/* decide where in the block to place our register */
405				if (reg_tmp + map->reg_stride == reg)
406					pos = i + 1;
407				else
408					pos = i;
409				ret = regcache_rbtree_insert_to_block(map,
410								      rbnode_tmp,
411								      pos, reg,
412								      value);
413				if (ret)
414					return ret;
415				rbtree_ctx->cached_rbnode = rbnode_tmp;
416				return 0;
417			}
418		}
419		/* we did not manage to find a place to insert it in an existing
420		 * block so create a new rbnode with a single register in its block.
421		 * This block will get populated further if any other adjacent
422		 * registers get modified in the future.
423		 */
424		rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
425		if (!rbnode)
426			return -ENOMEM;
427		rbnode->blklen = sizeof(*rbnode);
428		rbnode->base_reg = reg;
429		rbnode->block = kmalloc(rbnode->blklen * map->cache_word_size,
430					GFP_KERNEL);
431		if (!rbnode->block) {
432			kfree(rbnode);
433			return -ENOMEM;
434		}
435		regcache_rbtree_set_register(map, rbnode, 0, value);
436		regcache_rbtree_insert(map, &rbtree_ctx->root, rbnode);
437		rbtree_ctx->cached_rbnode = rbnode;
438	}
439
440	return 0;
441}
442
443static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
444				unsigned int max)
445{
446	struct regcache_rbtree_ctx *rbtree_ctx;
447	struct rb_node *node;
448	struct regcache_rbtree_node *rbnode;
449	unsigned int regtmp;
450	unsigned int val;
451	const void *addr;
452	int ret;
453	int i, base, end;
454
455	rbtree_ctx = map->cache;
456	for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
457		rbnode = rb_entry(node, struct regcache_rbtree_node, node);
458
459		if (rbnode->base_reg < min)
460			continue;
461		if (rbnode->base_reg > max)
462			break;
463		if (rbnode->base_reg + rbnode->blklen < min)
464			continue;
465
466		if (min > rbnode->base_reg)
467			base = min - rbnode->base_reg;
468		else
469			base = 0;
470
471		if (max < rbnode->base_reg + rbnode->blklen)
472			end = rbnode->base_reg + rbnode->blklen - max;
473		else
474			end = rbnode->blklen;
475
476		for (i = base; i < end; i++) {
477			regtmp = rbnode->base_reg + (i * map->reg_stride);
478
479			if (!regcache_reg_present(map, regtmp))
480				continue;
481
482			val = regcache_rbtree_get_register(map, rbnode, i);
483
484			/* Is this the hardware default?  If so skip. */
485			ret = regcache_lookup_reg(map, regtmp);
486			if (ret >= 0 && val == map->reg_defaults[ret].def)
487				continue;
488
489			map->cache_bypass = 1;
490
491			if (regmap_can_raw_write(map)) {
492				addr = regcache_rbtree_get_reg_addr(map,
493								    rbnode, i);
494				ret = _regmap_raw_write(map, regtmp, addr,
495							map->format.val_bytes,
496							false);
497			} else {
498				ret = _regmap_write(map, regtmp, val);
499			}
500
501			map->cache_bypass = 0;
502			if (ret)
503				return ret;
504			dev_dbg(map->dev, "Synced register %#x, value %#x\n",
505				regtmp, val);
506		}
507	}
508
509	return 0;
510}
511
512struct regcache_ops regcache_rbtree_ops = {
513	.type = REGCACHE_RBTREE,
514	.name = "rbtree",
515	.init = regcache_rbtree_init,
516	.exit = regcache_rbtree_exit,
517	.read = regcache_rbtree_read,
518	.write = regcache_rbtree_write,
519	.sync = regcache_rbtree_sync
520};
521