1/*
2 * Generic OPP Interface
3 *
4 * Copyright (C) 2009-2010 Texas Instruments Incorporated.
5 *	Nishanth Menon
6 *	Romit Dasgupta
7 *	Kevin Hilman
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/init.h>
18#include <linux/slab.h>
19#include <linux/cpufreq.h>
20#include <linux/device.h>
21#include <linux/list.h>
22#include <linux/rculist.h>
23#include <linux/rcupdate.h>
24#include <linux/opp.h>
25
26/*
27 * Internal data structure organization with the OPP layer library is as
28 * follows:
29 * dev_opp_list (root)
30 *	|- device 1 (represents voltage domain 1)
31 *	|	|- opp 1 (availability, freq, voltage)
32 *	|	|- opp 2 ..
33 *	...	...
34 *	|	`- opp n ..
35 *	|- device 2 (represents the next voltage domain)
36 *	...
37 *	`- device m (represents mth voltage domain)
38 * device 1, 2.. are represented by dev_opp structure while each opp
39 * is represented by the opp structure.
40 */
41
42/**
43 * struct opp - Generic OPP description structure
44 * @node:	opp list node. The nodes are maintained throughout the lifetime
45 *		of boot. It is expected only an optimal set of OPPs are
46 *		added to the library by the SoC framework.
47 *		RCU usage: opp list is traversed with RCU locks. node
48 *		modification is possible realtime, hence the modifications
49 *		are protected by the dev_opp_list_lock for integrity.
50 *		IMPORTANT: the opp nodes should be maintained in increasing
51 *		order.
52 * @available:	true/false - marks if this OPP as available or not
53 * @rate:	Frequency in hertz
54 * @u_volt:	Nominal voltage in microvolts corresponding to this OPP
55 * @dev_opp:	points back to the device_opp struct this opp belongs to
56 *
57 * This structure stores the OPP information for a given device.
58 */
59struct opp {
60	struct list_head node;
61
62	bool available;
63	unsigned long rate;
64	unsigned long u_volt;
65
66	struct device_opp *dev_opp;
67};
68
69/**
70 * struct device_opp - Device opp structure
71 * @node:	list node - contains the devices with OPPs that
72 *		have been registered. Nodes once added are not modified in this
73 *		list.
74 *		RCU usage: nodes are not modified in the list of device_opp,
75 *		however addition is possible and is secured by dev_opp_list_lock
76 * @dev:	device pointer
77 * @head:	notifier head to notify the OPP availability changes.
78 * @opp_list:	list of opps
79 *
80 * This is an internal data structure maintaining the link to opps attached to
81 * a device. This structure is not meant to be shared to users as it is
82 * meant for book keeping and private to OPP library
83 */
84struct device_opp {
85	struct list_head node;
86
87	struct device *dev;
88	struct srcu_notifier_head head;
89	struct list_head opp_list;
90};
91
92/*
93 * The root of the list of all devices. All device_opp structures branch off
94 * from here, with each device_opp containing the list of opp it supports in
95 * various states of availability.
96 */
97static LIST_HEAD(dev_opp_list);
98/* Lock to allow exclusive modification to the device and opp lists */
99static DEFINE_MUTEX(dev_opp_list_lock);
100
101/**
102 * find_device_opp() - find device_opp struct using device pointer
103 * @dev:	device pointer used to lookup device OPPs
104 *
105 * Search list of device OPPs for one containing matching device. Does a RCU
106 * reader operation to grab the pointer needed.
107 *
108 * Returns pointer to 'struct device_opp' if found, otherwise -ENODEV or
109 * -EINVAL based on type of error.
110 *
111 * Locking: This function must be called under rcu_read_lock(). device_opp
112 * is a RCU protected pointer. This means that device_opp is valid as long
113 * as we are under RCU lock.
114 */
115static struct device_opp *find_device_opp(struct device *dev)
116{
117	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
118
119	if (unlikely(IS_ERR_OR_NULL(dev))) {
120		pr_err("%s: Invalid parameters\n", __func__);
121		return ERR_PTR(-EINVAL);
122	}
123
124	list_for_each_entry_rcu(tmp_dev_opp, &dev_opp_list, node) {
125		if (tmp_dev_opp->dev == dev) {
126			dev_opp = tmp_dev_opp;
127			break;
128		}
129	}
130
131	return dev_opp;
132}
133
134/**
135 * opp_get_voltage() - Gets the voltage corresponding to an available opp
136 * @opp:	opp for which voltage has to be returned for
137 *
138 * Return voltage in micro volt corresponding to the opp, else
139 * return 0
140 *
141 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
142 * protected pointer. This means that opp which could have been fetched by
143 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
144 * under RCU lock. The pointer returned by the opp_find_freq family must be
145 * used in the same section as the usage of this function with the pointer
146 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
147 * pointer.
148 */
149unsigned long opp_get_voltage(struct opp *opp)
150{
151	struct opp *tmp_opp;
152	unsigned long v = 0;
153
154	tmp_opp = rcu_dereference(opp);
155	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
156		pr_err("%s: Invalid parameters\n", __func__);
157	else
158		v = tmp_opp->u_volt;
159
160	return v;
161}
162
163/**
164 * opp_get_freq() - Gets the frequency corresponding to an available opp
165 * @opp:	opp for which frequency has to be returned for
166 *
167 * Return frequency in hertz corresponding to the opp, else
168 * return 0
169 *
170 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
171 * protected pointer. This means that opp which could have been fetched by
172 * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are
173 * under RCU lock. The pointer returned by the opp_find_freq family must be
174 * used in the same section as the usage of this function with the pointer
175 * prior to unlocking with rcu_read_unlock() to maintain the integrity of the
176 * pointer.
177 */
178unsigned long opp_get_freq(struct opp *opp)
179{
180	struct opp *tmp_opp;
181	unsigned long f = 0;
182
183	tmp_opp = rcu_dereference(opp);
184	if (unlikely(IS_ERR_OR_NULL(tmp_opp)) || !tmp_opp->available)
185		pr_err("%s: Invalid parameters\n", __func__);
186	else
187		f = tmp_opp->rate;
188
189	return f;
190}
191
192/**
193 * opp_get_opp_count() - Get number of opps available in the opp list
194 * @dev:	device for which we do this operation
195 *
196 * This function returns the number of available opps if there are any,
197 * else returns 0 if none or the corresponding error value.
198 *
199 * Locking: This function must be called under rcu_read_lock(). This function
200 * internally references two RCU protected structures: device_opp and opp which
201 * are safe as long as we are under a common RCU locked section.
202 */
203int opp_get_opp_count(struct device *dev)
204{
205	struct device_opp *dev_opp;
206	struct opp *temp_opp;
207	int count = 0;
208
209	dev_opp = find_device_opp(dev);
210	if (IS_ERR(dev_opp)) {
211		int r = PTR_ERR(dev_opp);
212		dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
213		return r;
214	}
215
216	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
217		if (temp_opp->available)
218			count++;
219	}
220
221	return count;
222}
223
224/**
225 * opp_find_freq_exact() - search for an exact frequency
226 * @dev:		device for which we do this operation
227 * @freq:		frequency to search for
228 * @available:		true/false - match for available opp
229 *
230 * Searches for exact match in the opp list and returns pointer to the matching
231 * opp if found, else returns ERR_PTR in case of error and should be handled
232 * using IS_ERR.
233 *
234 * Note: available is a modifier for the search. if available=true, then the
235 * match is for exact matching frequency and is available in the stored OPP
236 * table. if false, the match is for exact frequency which is not available.
237 *
238 * This provides a mechanism to enable an opp which is not available currently
239 * or the opposite as well.
240 *
241 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
242 * protected pointer. The reason for the same is that the opp pointer which is
243 * returned will remain valid for use with opp_get_{voltage, freq} only while
244 * under the locked area. The pointer returned must be used prior to unlocking
245 * with rcu_read_unlock() to maintain the integrity of the pointer.
246 */
247struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq,
248				bool available)
249{
250	struct device_opp *dev_opp;
251	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
252
253	dev_opp = find_device_opp(dev);
254	if (IS_ERR(dev_opp)) {
255		int r = PTR_ERR(dev_opp);
256		dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
257		return ERR_PTR(r);
258	}
259
260	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
261		if (temp_opp->available == available &&
262				temp_opp->rate == freq) {
263			opp = temp_opp;
264			break;
265		}
266	}
267
268	return opp;
269}
270
271/**
272 * opp_find_freq_ceil() - Search for an rounded ceil freq
273 * @dev:	device for which we do this operation
274 * @freq:	Start frequency
275 *
276 * Search for the matching ceil *available* OPP from a starting freq
277 * for a device.
278 *
279 * Returns matching *opp and refreshes *freq accordingly, else returns
280 * ERR_PTR in case of error and should be handled using IS_ERR.
281 *
282 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
283 * protected pointer. The reason for the same is that the opp pointer which is
284 * returned will remain valid for use with opp_get_{voltage, freq} only while
285 * under the locked area. The pointer returned must be used prior to unlocking
286 * with rcu_read_unlock() to maintain the integrity of the pointer.
287 */
288struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq)
289{
290	struct device_opp *dev_opp;
291	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
292
293	if (!dev || !freq) {
294		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
295		return ERR_PTR(-EINVAL);
296	}
297
298	dev_opp = find_device_opp(dev);
299	if (IS_ERR(dev_opp))
300		return opp;
301
302	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
303		if (temp_opp->available && temp_opp->rate >= *freq) {
304			opp = temp_opp;
305			*freq = opp->rate;
306			break;
307		}
308	}
309
310	return opp;
311}
312
313/**
314 * opp_find_freq_floor() - Search for a rounded floor freq
315 * @dev:	device for which we do this operation
316 * @freq:	Start frequency
317 *
318 * Search for the matching floor *available* OPP from a starting freq
319 * for a device.
320 *
321 * Returns matching *opp and refreshes *freq accordingly, else returns
322 * ERR_PTR in case of error and should be handled using IS_ERR.
323 *
324 * Locking: This function must be called under rcu_read_lock(). opp is a rcu
325 * protected pointer. The reason for the same is that the opp pointer which is
326 * returned will remain valid for use with opp_get_{voltage, freq} only while
327 * under the locked area. The pointer returned must be used prior to unlocking
328 * with rcu_read_unlock() to maintain the integrity of the pointer.
329 */
330struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq)
331{
332	struct device_opp *dev_opp;
333	struct opp *temp_opp, *opp = ERR_PTR(-ENODEV);
334
335	if (!dev || !freq) {
336		dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
337		return ERR_PTR(-EINVAL);
338	}
339
340	dev_opp = find_device_opp(dev);
341	if (IS_ERR(dev_opp))
342		return opp;
343
344	list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
345		if (temp_opp->available) {
346			/* go to the next node, before choosing prev */
347			if (temp_opp->rate > *freq)
348				break;
349			else
350				opp = temp_opp;
351		}
352	}
353	if (!IS_ERR(opp))
354		*freq = opp->rate;
355
356	return opp;
357}
358
359/**
360 * opp_add()  - Add an OPP table from a table definitions
361 * @dev:	device for which we do this operation
362 * @freq:	Frequency in Hz for this OPP
363 * @u_volt:	Voltage in uVolts for this OPP
364 *
365 * This function adds an opp definition to the opp list and returns status.
366 * The opp is made available by default and it can be controlled using
367 * opp_enable/disable functions.
368 *
369 * Locking: The internal device_opp and opp structures are RCU protected.
370 * Hence this function internally uses RCU updater strategy with mutex locks
371 * to keep the integrity of the internal data structures. Callers should ensure
372 * that this function is *NOT* called under RCU protection or in contexts where
373 * mutex cannot be locked.
374 */
375int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt)
376{
377	struct device_opp *dev_opp = NULL;
378	struct opp *opp, *new_opp;
379	struct list_head *head;
380
381	/* allocate new OPP node */
382	new_opp = kzalloc(sizeof(struct opp), GFP_KERNEL);
383	if (!new_opp) {
384		dev_warn(dev, "%s: Unable to create new OPP node\n", __func__);
385		return -ENOMEM;
386	}
387
388	/* Hold our list modification lock here */
389	mutex_lock(&dev_opp_list_lock);
390
391	/* Check for existing list for 'dev' */
392	dev_opp = find_device_opp(dev);
393	if (IS_ERR(dev_opp)) {
394		/*
395		 * Allocate a new device OPP table. In the infrequent case
396		 * where a new device is needed to be added, we pay this
397		 * penalty.
398		 */
399		dev_opp = kzalloc(sizeof(struct device_opp), GFP_KERNEL);
400		if (!dev_opp) {
401			mutex_unlock(&dev_opp_list_lock);
402			kfree(new_opp);
403			dev_warn(dev,
404				"%s: Unable to create device OPP structure\n",
405				__func__);
406			return -ENOMEM;
407		}
408
409		dev_opp->dev = dev;
410		srcu_init_notifier_head(&dev_opp->head);
411		INIT_LIST_HEAD(&dev_opp->opp_list);
412
413		/* Secure the device list modification */
414		list_add_rcu(&dev_opp->node, &dev_opp_list);
415	}
416
417	/* populate the opp table */
418	new_opp->dev_opp = dev_opp;
419	new_opp->rate = freq;
420	new_opp->u_volt = u_volt;
421	new_opp->available = true;
422
423	/* Insert new OPP in order of increasing frequency */
424	head = &dev_opp->opp_list;
425	list_for_each_entry_rcu(opp, &dev_opp->opp_list, node) {
426		if (new_opp->rate < opp->rate)
427			break;
428		else
429			head = &opp->node;
430	}
431
432	list_add_rcu(&new_opp->node, head);
433	mutex_unlock(&dev_opp_list_lock);
434
435	/*
436	 * Notify the changes in the availability of the operable
437	 * frequency/voltage list.
438	 */
439	srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ADD, new_opp);
440	return 0;
441}
442
443/**
444 * opp_set_availability() - helper to set the availability of an opp
445 * @dev:		device for which we do this operation
446 * @freq:		OPP frequency to modify availability
447 * @availability_req:	availability status requested for this opp
448 *
449 * Set the availability of an OPP with an RCU operation, opp_{enable,disable}
450 * share a common logic which is isolated here.
451 *
452 * Returns -EINVAL for bad pointers, -ENOMEM if no memory available for the
453 * copy operation, returns 0 if no modifcation was done OR modification was
454 * successful.
455 *
456 * Locking: The internal device_opp and opp structures are RCU protected.
457 * Hence this function internally uses RCU updater strategy with mutex locks to
458 * keep the integrity of the internal data structures. Callers should ensure
459 * that this function is *NOT* called under RCU protection or in contexts where
460 * mutex locking or synchronize_rcu() blocking calls cannot be used.
461 */
462static int opp_set_availability(struct device *dev, unsigned long freq,
463		bool availability_req)
464{
465	struct device_opp *tmp_dev_opp, *dev_opp = ERR_PTR(-ENODEV);
466	struct opp *new_opp, *tmp_opp, *opp = ERR_PTR(-ENODEV);
467	int r = 0;
468
469	/* keep the node allocated */
470	new_opp = kmalloc(sizeof(struct opp), GFP_KERNEL);
471	if (!new_opp) {
472		dev_warn(dev, "%s: Unable to create OPP\n", __func__);
473		return -ENOMEM;
474	}
475
476	mutex_lock(&dev_opp_list_lock);
477
478	/* Find the device_opp */
479	list_for_each_entry(tmp_dev_opp, &dev_opp_list, node) {
480		if (dev == tmp_dev_opp->dev) {
481			dev_opp = tmp_dev_opp;
482			break;
483		}
484	}
485	if (IS_ERR(dev_opp)) {
486		r = PTR_ERR(dev_opp);
487		dev_warn(dev, "%s: Device OPP not found (%d)\n", __func__, r);
488		goto unlock;
489	}
490
491	/* Do we have the frequency? */
492	list_for_each_entry(tmp_opp, &dev_opp->opp_list, node) {
493		if (tmp_opp->rate == freq) {
494			opp = tmp_opp;
495			break;
496		}
497	}
498	if (IS_ERR(opp)) {
499		r = PTR_ERR(opp);
500		goto unlock;
501	}
502
503	/* Is update really needed? */
504	if (opp->available == availability_req)
505		goto unlock;
506	/* copy the old data over */
507	*new_opp = *opp;
508
509	/* plug in new node */
510	new_opp->available = availability_req;
511
512	list_replace_rcu(&opp->node, &new_opp->node);
513	mutex_unlock(&dev_opp_list_lock);
514	synchronize_rcu();
515
516	/* Notify the change of the OPP availability */
517	if (availability_req)
518		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_ENABLE,
519					 new_opp);
520	else
521		srcu_notifier_call_chain(&dev_opp->head, OPP_EVENT_DISABLE,
522					 new_opp);
523
524	/* clean up old opp */
525	new_opp = opp;
526	goto out;
527
528unlock:
529	mutex_unlock(&dev_opp_list_lock);
530out:
531	kfree(new_opp);
532	return r;
533}
534
535/**
536 * opp_enable() - Enable a specific OPP
537 * @dev:	device for which we do this operation
538 * @freq:	OPP frequency to enable
539 *
540 * Enables a provided opp. If the operation is valid, this returns 0, else the
541 * corresponding error value. It is meant to be used for users an OPP available
542 * after being temporarily made unavailable with opp_disable.
543 *
544 * Locking: The internal device_opp and opp structures are RCU protected.
545 * Hence this function indirectly uses RCU and mutex locks to keep the
546 * integrity of the internal data structures. Callers should ensure that
547 * this function is *NOT* called under RCU protection or in contexts where
548 * mutex locking or synchronize_rcu() blocking calls cannot be used.
549 */
550int opp_enable(struct device *dev, unsigned long freq)
551{
552	return opp_set_availability(dev, freq, true);
553}
554
555/**
556 * opp_disable() - Disable a specific OPP
557 * @dev:	device for which we do this operation
558 * @freq:	OPP frequency to disable
559 *
560 * Disables a provided opp. If the operation is valid, this returns
561 * 0, else the corresponding error value. It is meant to be a temporary
562 * control by users to make this OPP not available until the circumstances are
563 * right to make it available again (with a call to opp_enable).
564 *
565 * Locking: The internal device_opp and opp structures are RCU protected.
566 * Hence this function indirectly uses RCU and mutex locks to keep the
567 * integrity of the internal data structures. Callers should ensure that
568 * this function is *NOT* called under RCU protection or in contexts where
569 * mutex locking or synchronize_rcu() blocking calls cannot be used.
570 */
571int opp_disable(struct device *dev, unsigned long freq)
572{
573	return opp_set_availability(dev, freq, false);
574}
575
576#ifdef CONFIG_CPU_FREQ
577/**
578 * opp_init_cpufreq_table() - create a cpufreq table for a device
579 * @dev:	device for which we do this operation
580 * @table:	Cpufreq table returned back to caller
581 *
582 * Generate a cpufreq table for a provided device- this assumes that the
583 * opp list is already initialized and ready for usage.
584 *
585 * This function allocates required memory for the cpufreq table. It is
586 * expected that the caller does the required maintenance such as freeing
587 * the table as required.
588 *
589 * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
590 * if no memory available for the operation (table is not populated), returns 0
591 * if successful and table is populated.
592 *
593 * WARNING: It is  important for the callers to ensure refreshing their copy of
594 * the table if any of the mentioned functions have been invoked in the interim.
595 *
596 * Locking: The internal device_opp and opp structures are RCU protected.
597 * To simplify the logic, we pretend we are updater and hold relevant mutex here
598 * Callers should ensure that this function is *NOT* called under RCU protection
599 * or in contexts where mutex locking cannot be used.
600 */
601int opp_init_cpufreq_table(struct device *dev,
602			    struct cpufreq_frequency_table **table)
603{
604	struct device_opp *dev_opp;
605	struct opp *opp;
606	struct cpufreq_frequency_table *freq_table;
607	int i = 0;
608
609	/* Pretend as if I am an updater */
610	mutex_lock(&dev_opp_list_lock);
611
612	dev_opp = find_device_opp(dev);
613	if (IS_ERR(dev_opp)) {
614		int r = PTR_ERR(dev_opp);
615		mutex_unlock(&dev_opp_list_lock);
616		dev_err(dev, "%s: Device OPP not found (%d)\n", __func__, r);
617		return r;
618	}
619
620	freq_table = kzalloc(sizeof(struct cpufreq_frequency_table) *
621			     (opp_get_opp_count(dev) + 1), GFP_KERNEL);
622	if (!freq_table) {
623		mutex_unlock(&dev_opp_list_lock);
624		dev_warn(dev, "%s: Unable to allocate frequency table\n",
625			__func__);
626		return -ENOMEM;
627	}
628
629	list_for_each_entry(opp, &dev_opp->opp_list, node) {
630		if (opp->available) {
631			freq_table[i].index = i;
632			freq_table[i].frequency = opp->rate / 1000;
633			i++;
634		}
635	}
636	mutex_unlock(&dev_opp_list_lock);
637
638	freq_table[i].index = i;
639	freq_table[i].frequency = CPUFREQ_TABLE_END;
640
641	*table = &freq_table[0];
642
643	return 0;
644}
645
646/**
647 * opp_free_cpufreq_table() - free the cpufreq table
648 * @dev:	device for which we do this operation
649 * @table:	table to free
650 *
651 * Free up the table allocated by opp_init_cpufreq_table
652 */
653void opp_free_cpufreq_table(struct device *dev,
654				struct cpufreq_frequency_table **table)
655{
656	if (!table)
657		return;
658
659	kfree(*table);
660	*table = NULL;
661}
662#endif		/* CONFIG_CPU_FREQ */
663
664/**
665 * opp_get_notifier() - find notifier_head of the device with opp
666 * @dev:	device pointer used to lookup device OPPs.
667 */
668struct srcu_notifier_head *opp_get_notifier(struct device *dev)
669{
670	struct device_opp *dev_opp = find_device_opp(dev);
671
672	if (IS_ERR(dev_opp))
673		return ERR_CAST(dev_opp); /* matching type */
674
675	return &dev_opp->head;
676}
677