1#include <linux/export.h>
2#include <linux/interrupt.h>
3#include <linux/mutex.h>
4#include <linux/kernel.h>
5#include <linux/spi/spi.h>
6#include <linux/slab.h>
7
8#include "../iio.h"
9#include "../ring_sw.h"
10#include "../trigger_consumer.h"
11#include "adis16209.h"
12
13/**
14 * adis16209_read_ring_data() read data registers which will be placed into ring
15 * @dev: device associated with child of actual device (iio_dev or iio_trig)
16 * @rx: somewhere to pass back the value read
17 **/
18static int adis16209_read_ring_data(struct device *dev, u8 *rx)
19{
20	struct spi_message msg;
21	struct iio_dev *indio_dev = dev_get_drvdata(dev);
22	struct adis16209_state *st = iio_priv(indio_dev);
23	struct spi_transfer xfers[ADIS16209_OUTPUTS + 1];
24	int ret;
25	int i;
26
27	mutex_lock(&st->buf_lock);
28
29	spi_message_init(&msg);
30
31	memset(xfers, 0, sizeof(xfers));
32	for (i = 0; i <= ADIS16209_OUTPUTS; i++) {
33		xfers[i].bits_per_word = 8;
34		xfers[i].cs_change = 1;
35		xfers[i].len = 2;
36		xfers[i].delay_usecs = 30;
37		xfers[i].tx_buf = st->tx + 2 * i;
38		st->tx[2 * i]
39			= ADIS16209_READ_REG(ADIS16209_SUPPLY_OUT + 2 * i);
40		st->tx[2 * i + 1] = 0;
41		if (i >= 1)
42			xfers[i].rx_buf = rx + 2 * (i - 1);
43		spi_message_add_tail(&xfers[i], &msg);
44	}
45
46	ret = spi_sync(st->us, &msg);
47	if (ret)
48		dev_err(&st->us->dev, "problem when burst reading");
49
50	mutex_unlock(&st->buf_lock);
51
52	return ret;
53}
54
55/* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device
56 * specific to be rolled into the core.
57 */
58static irqreturn_t adis16209_trigger_handler(int irq, void *p)
59{
60	struct iio_poll_func *pf = p;
61	struct iio_dev *indio_dev = pf->indio_dev;
62	struct adis16209_state *st = iio_priv(indio_dev);
63	struct iio_buffer *ring = indio_dev->buffer;
64
65	int i = 0;
66	s16 *data;
67	size_t datasize = ring->access->get_bytes_per_datum(ring);
68
69	data = kmalloc(datasize , GFP_KERNEL);
70	if (data == NULL) {
71		dev_err(&st->us->dev, "memory alloc failed in ring bh");
72		return -ENOMEM;
73	}
74
75	if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) &&
76	    adis16209_read_ring_data(&indio_dev->dev, st->rx) >= 0)
77		for (; i < bitmap_weight(indio_dev->active_scan_mask,
78					 indio_dev->masklength); i++)
79			data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2]));
80
81	/* Guaranteed to be aligned with 8 byte boundary */
82	if (ring->scan_timestamp)
83		*((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp;
84
85	ring->access->store_to(ring, (u8 *)data, pf->timestamp);
86
87	iio_trigger_notify_done(indio_dev->trig);
88	kfree(data);
89
90	return IRQ_HANDLED;
91}
92
93void adis16209_unconfigure_ring(struct iio_dev *indio_dev)
94{
95	iio_dealloc_pollfunc(indio_dev->pollfunc);
96	iio_sw_rb_free(indio_dev->buffer);
97}
98
99static const struct iio_buffer_setup_ops adis16209_ring_setup_ops = {
100	.preenable = &iio_sw_buffer_preenable,
101	.postenable = &iio_triggered_buffer_postenable,
102	.predisable = &iio_triggered_buffer_predisable,
103};
104
105int adis16209_configure_ring(struct iio_dev *indio_dev)
106{
107	int ret = 0;
108	struct iio_buffer *ring;
109
110	ring = iio_sw_rb_allocate(indio_dev);
111	if (!ring) {
112		ret = -ENOMEM;
113		return ret;
114	}
115	indio_dev->buffer = ring;
116	ring->scan_timestamp = true;
117	indio_dev->setup_ops = &adis16209_ring_setup_ops;
118
119	indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
120						 &adis16209_trigger_handler,
121						 IRQF_ONESHOT,
122						 indio_dev,
123						 "%s_consumer%d",
124						 indio_dev->name,
125						 indio_dev->id);
126	if (indio_dev->pollfunc == NULL) {
127		ret = -ENOMEM;
128		goto error_iio_sw_rb_free;
129	}
130
131	indio_dev->modes |= INDIO_BUFFER_TRIGGERED;
132	return 0;
133
134error_iio_sw_rb_free:
135	iio_sw_rb_free(indio_dev->buffer);
136	return ret;
137}
138