c_can.c revision 4fb6dccd13b27651998f773755e2a1db461c62f1
1/*
2 * CAN bus driver for Bosch C_CAN controller
3 *
4 * Copyright (C) 2010 ST Microelectronics
5 * Bhupesh Sharma <bhupesh.sharma@st.com>
6 *
7 * Borrowed heavily from the C_CAN driver originally written by:
8 * Copyright (C) 2007
9 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <s.hauer@pengutronix.de>
10 * - Simon Kallweit, intefo AG <simon.kallweit@intefo.ch>
11 *
12 * TX and RX NAPI implementation has been borrowed from at91 CAN driver
13 * written by:
14 * Copyright
15 * (C) 2007 by Hans J. Koch <hjk@hansjkoch.de>
16 * (C) 2008, 2009 by Marc Kleine-Budde <kernel@pengutronix.de>
17 *
18 * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B.
19 * Bosch C_CAN user manual can be obtained from:
20 * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/
21 * users_manual_c_can.pdf
22 *
23 * This file is licensed under the terms of the GNU General Public
24 * License version 2. This program is licensed "as is" without any
25 * warranty of any kind, whether express or implied.
26 */
27
28#include <linux/kernel.h>
29#include <linux/module.h>
30#include <linux/interrupt.h>
31#include <linux/delay.h>
32#include <linux/netdevice.h>
33#include <linux/if_arp.h>
34#include <linux/if_ether.h>
35#include <linux/list.h>
36#include <linux/io.h>
37#include <linux/pm_runtime.h>
38
39#include <linux/can.h>
40#include <linux/can/dev.h>
41#include <linux/can/error.h>
42#include <linux/can/led.h>
43
44#include "c_can.h"
45
46/* Number of interface registers */
47#define IF_ENUM_REG_LEN		11
48#define C_CAN_IFACE(reg, iface)	(C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN)
49
50/* control extension register D_CAN specific */
51#define CONTROL_EX_PDR		BIT(8)
52
53/* control register */
54#define CONTROL_TEST		BIT(7)
55#define CONTROL_CCE		BIT(6)
56#define CONTROL_DISABLE_AR	BIT(5)
57#define CONTROL_ENABLE_AR	(0 << 5)
58#define CONTROL_EIE		BIT(3)
59#define CONTROL_SIE		BIT(2)
60#define CONTROL_IE		BIT(1)
61#define CONTROL_INIT		BIT(0)
62
63#define CONTROL_IRQMSK		(CONTROL_EIE | CONTROL_IE | CONTROL_SIE)
64
65/* test register */
66#define TEST_RX			BIT(7)
67#define TEST_TX1		BIT(6)
68#define TEST_TX2		BIT(5)
69#define TEST_LBACK		BIT(4)
70#define TEST_SILENT		BIT(3)
71#define TEST_BASIC		BIT(2)
72
73/* status register */
74#define STATUS_PDA		BIT(10)
75#define STATUS_BOFF		BIT(7)
76#define STATUS_EWARN		BIT(6)
77#define STATUS_EPASS		BIT(5)
78#define STATUS_RXOK		BIT(4)
79#define STATUS_TXOK		BIT(3)
80
81/* error counter register */
82#define ERR_CNT_TEC_MASK	0xff
83#define ERR_CNT_TEC_SHIFT	0
84#define ERR_CNT_REC_SHIFT	8
85#define ERR_CNT_REC_MASK	(0x7f << ERR_CNT_REC_SHIFT)
86#define ERR_CNT_RP_SHIFT	15
87#define ERR_CNT_RP_MASK		(0x1 << ERR_CNT_RP_SHIFT)
88
89/* bit-timing register */
90#define BTR_BRP_MASK		0x3f
91#define BTR_BRP_SHIFT		0
92#define BTR_SJW_SHIFT		6
93#define BTR_SJW_MASK		(0x3 << BTR_SJW_SHIFT)
94#define BTR_TSEG1_SHIFT		8
95#define BTR_TSEG1_MASK		(0xf << BTR_TSEG1_SHIFT)
96#define BTR_TSEG2_SHIFT		12
97#define BTR_TSEG2_MASK		(0x7 << BTR_TSEG2_SHIFT)
98
99/* brp extension register */
100#define BRP_EXT_BRPE_MASK	0x0f
101#define BRP_EXT_BRPE_SHIFT	0
102
103/* IFx command request */
104#define IF_COMR_BUSY		BIT(15)
105
106/* IFx command mask */
107#define IF_COMM_WR		BIT(7)
108#define IF_COMM_MASK		BIT(6)
109#define IF_COMM_ARB		BIT(5)
110#define IF_COMM_CONTROL		BIT(4)
111#define IF_COMM_CLR_INT_PND	BIT(3)
112#define IF_COMM_TXRQST		BIT(2)
113#define IF_COMM_CLR_NEWDAT	IF_COMM_TXRQST
114#define IF_COMM_DATAA		BIT(1)
115#define IF_COMM_DATAB		BIT(0)
116#define IF_COMM_ALL		(IF_COMM_MASK | IF_COMM_ARB | \
117				IF_COMM_CONTROL | IF_COMM_TXRQST | \
118				IF_COMM_DATAA | IF_COMM_DATAB)
119
120/* For the low buffers we clear the interrupt bit, but keep newdat */
121#define IF_COMM_RCV_LOW		(IF_COMM_MASK | IF_COMM_ARB | \
122				 IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \
123				 IF_COMM_DATAA | IF_COMM_DATAB)
124
125/* For the high buffers we clear the interrupt bit and newdat */
126#define IF_COMM_RCV_HIGH	(IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT)
127
128/* IFx arbitration */
129#define IF_ARB_MSGVAL		BIT(15)
130#define IF_ARB_MSGXTD		BIT(14)
131#define IF_ARB_TRANSMIT		BIT(13)
132
133/* IFx message control */
134#define IF_MCONT_NEWDAT		BIT(15)
135#define IF_MCONT_MSGLST		BIT(14)
136#define IF_MCONT_INTPND		BIT(13)
137#define IF_MCONT_UMASK		BIT(12)
138#define IF_MCONT_TXIE		BIT(11)
139#define IF_MCONT_RXIE		BIT(10)
140#define IF_MCONT_RMTEN		BIT(9)
141#define IF_MCONT_TXRQST		BIT(8)
142#define IF_MCONT_EOB		BIT(7)
143#define IF_MCONT_DLC_MASK	0xf
144
145/*
146 * Use IF1 for RX and IF2 for TX
147 */
148#define IF_RX			0
149#define IF_TX			1
150
151/* minimum timeout for checking BUSY status */
152#define MIN_TIMEOUT_VALUE	6
153
154/* Wait for ~1 sec for INIT bit */
155#define INIT_WAIT_MS		1000
156
157/* napi related */
158#define C_CAN_NAPI_WEIGHT	C_CAN_MSG_OBJ_RX_NUM
159
160/* c_can lec values */
161enum c_can_lec_type {
162	LEC_NO_ERROR = 0,
163	LEC_STUFF_ERROR,
164	LEC_FORM_ERROR,
165	LEC_ACK_ERROR,
166	LEC_BIT1_ERROR,
167	LEC_BIT0_ERROR,
168	LEC_CRC_ERROR,
169	LEC_UNUSED,
170	LEC_MASK = LEC_UNUSED,
171};
172
173/*
174 * c_can error types:
175 * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported
176 */
177enum c_can_bus_error_types {
178	C_CAN_NO_ERROR = 0,
179	C_CAN_BUS_OFF,
180	C_CAN_ERROR_WARNING,
181	C_CAN_ERROR_PASSIVE,
182};
183
184static const struct can_bittiming_const c_can_bittiming_const = {
185	.name = KBUILD_MODNAME,
186	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
187	.tseg1_max = 16,
188	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
189	.tseg2_max = 8,
190	.sjw_max = 4,
191	.brp_min = 1,
192	.brp_max = 1024,	/* 6-bit BRP field + 4-bit BRPE field*/
193	.brp_inc = 1,
194};
195
196static inline void c_can_pm_runtime_enable(const struct c_can_priv *priv)
197{
198	if (priv->device)
199		pm_runtime_enable(priv->device);
200}
201
202static inline void c_can_pm_runtime_disable(const struct c_can_priv *priv)
203{
204	if (priv->device)
205		pm_runtime_disable(priv->device);
206}
207
208static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv)
209{
210	if (priv->device)
211		pm_runtime_get_sync(priv->device);
212}
213
214static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv)
215{
216	if (priv->device)
217		pm_runtime_put_sync(priv->device);
218}
219
220static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable)
221{
222	if (priv->raminit)
223		priv->raminit(priv, enable);
224}
225
226static inline int get_tx_next_msg_obj(const struct c_can_priv *priv)
227{
228	return (priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) +
229			C_CAN_MSG_OBJ_TX_FIRST;
230}
231
232static inline int get_tx_echo_msg_obj(int txecho)
233{
234	return (txecho & C_CAN_NEXT_MSG_OBJ_MASK) + C_CAN_MSG_OBJ_TX_FIRST;
235}
236
237static u32 c_can_read_reg32(struct c_can_priv *priv, enum reg index)
238{
239	u32 val = priv->read_reg(priv, index);
240	val |= ((u32) priv->read_reg(priv, index + 1)) << 16;
241	return val;
242}
243
244static void c_can_irq_control(struct c_can_priv *priv, bool enable)
245{
246	u32 ctrl = priv->read_reg(priv,	C_CAN_CTRL_REG) & ~CONTROL_IRQMSK;
247
248	if (enable)
249		ctrl |= CONTROL_IRQMSK;
250
251	priv->write_reg(priv, C_CAN_CTRL_REG, ctrl);
252}
253
254static inline int c_can_msg_obj_is_busy(struct c_can_priv *priv, int iface)
255{
256	int count = MIN_TIMEOUT_VALUE;
257
258	while (count && priv->read_reg(priv,
259				C_CAN_IFACE(COMREQ_REG, iface)) &
260				IF_COMR_BUSY) {
261		count--;
262		udelay(1);
263	}
264
265	if (!count)
266		return 1;
267
268	return 0;
269}
270
271static inline void c_can_object_get(struct net_device *dev,
272					int iface, int objno, int mask)
273{
274	struct c_can_priv *priv = netdev_priv(dev);
275
276	/*
277	 * As per specs, after writting the message object number in the
278	 * IF command request register the transfer b/w interface
279	 * register and message RAM must be complete in 6 CAN-CLK
280	 * period.
281	 */
282	priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
283			IFX_WRITE_LOW_16BIT(mask));
284	priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
285			IFX_WRITE_LOW_16BIT(objno));
286
287	if (c_can_msg_obj_is_busy(priv, iface))
288		netdev_err(dev, "timed out in object get\n");
289}
290
291static inline void c_can_object_put(struct net_device *dev,
292					int iface, int objno, int mask)
293{
294	struct c_can_priv *priv = netdev_priv(dev);
295
296	/*
297	 * As per specs, after writting the message object number in the
298	 * IF command request register the transfer b/w interface
299	 * register and message RAM must be complete in 6 CAN-CLK
300	 * period.
301	 */
302	priv->write_reg(priv, C_CAN_IFACE(COMMSK_REG, iface),
303			(IF_COMM_WR | IFX_WRITE_LOW_16BIT(mask)));
304	priv->write_reg(priv, C_CAN_IFACE(COMREQ_REG, iface),
305			IFX_WRITE_LOW_16BIT(objno));
306
307	if (c_can_msg_obj_is_busy(priv, iface))
308		netdev_err(dev, "timed out in object put\n");
309}
310
311static void c_can_write_msg_object(struct net_device *dev,
312			int iface, struct can_frame *frame, int objno)
313{
314	int i;
315	u16 flags = 0;
316	unsigned int id;
317	struct c_can_priv *priv = netdev_priv(dev);
318
319	if (!(frame->can_id & CAN_RTR_FLAG))
320		flags |= IF_ARB_TRANSMIT;
321
322	if (frame->can_id & CAN_EFF_FLAG) {
323		id = frame->can_id & CAN_EFF_MASK;
324		flags |= IF_ARB_MSGXTD;
325	} else
326		id = ((frame->can_id & CAN_SFF_MASK) << 18);
327
328	flags |= IF_ARB_MSGVAL;
329
330	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
331				IFX_WRITE_LOW_16BIT(id));
332	priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), flags |
333				IFX_WRITE_HIGH_16BIT(id));
334
335	for (i = 0; i < frame->can_dlc; i += 2) {
336		priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2,
337				frame->data[i] | (frame->data[i + 1] << 8));
338	}
339
340	/* enable interrupt for this message object */
341	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface),
342			IF_MCONT_TXIE | IF_MCONT_TXRQST | IF_MCONT_EOB |
343			frame->can_dlc);
344	c_can_object_put(dev, iface, objno, IF_COMM_ALL);
345}
346
347static inline void c_can_activate_all_lower_rx_msg_obj(struct net_device *dev,
348						       int iface)
349{
350	int i;
351
352	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_MSG_RX_LOW_LAST; i++)
353		c_can_object_get(dev, iface, i, IF_COMM_CLR_NEWDAT);
354}
355
356static int c_can_handle_lost_msg_obj(struct net_device *dev,
357				     int iface, int objno, u32 ctrl)
358{
359	struct net_device_stats *stats = &dev->stats;
360	struct c_can_priv *priv = netdev_priv(dev);
361	struct can_frame *frame;
362	struct sk_buff *skb;
363
364	ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT);
365	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl);
366	c_can_object_put(dev, iface, objno, IF_COMM_CONTROL);
367
368	stats->rx_errors++;
369	stats->rx_over_errors++;
370
371	/* create an error msg */
372	skb = alloc_can_err_skb(dev, &frame);
373	if (unlikely(!skb))
374		return 0;
375
376	frame->can_id |= CAN_ERR_CRTL;
377	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
378
379	netif_receive_skb(skb);
380	return 1;
381}
382
383static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl)
384{
385	struct net_device_stats *stats = &dev->stats;
386	struct c_can_priv *priv = netdev_priv(dev);
387	struct can_frame *frame;
388	struct sk_buff *skb;
389	u32 arb, data;
390
391	skb = alloc_can_skb(dev, &frame);
392	if (!skb) {
393		stats->rx_dropped++;
394		return -ENOMEM;
395	}
396
397	frame->can_dlc = get_can_dlc(ctrl & 0x0F);
398
399	arb = priv->read_reg(priv, C_CAN_IFACE(ARB1_REG, iface));
400	arb |= priv->read_reg(priv, C_CAN_IFACE(ARB2_REG, iface)) << 16;
401
402	if (arb & (IF_ARB_MSGXTD << 16))
403		frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG;
404	else
405		frame->can_id = (arb >> 18) & CAN_SFF_MASK;
406
407	if (arb & (IF_ARB_TRANSMIT << 16)) {
408		frame->can_id |= CAN_RTR_FLAG;
409	} else {
410		int i, dreg = C_CAN_IFACE(DATA1_REG, iface);
411
412		for (i = 0; i < frame->can_dlc; i += 2, dreg ++) {
413			data = priv->read_reg(priv, dreg);
414			frame->data[i] = data;
415			frame->data[i + 1] = data >> 8;
416		}
417	}
418
419	stats->rx_packets++;
420	stats->rx_bytes += frame->can_dlc;
421
422	netif_receive_skb(skb);
423	return 0;
424}
425
426static void c_can_setup_receive_object(struct net_device *dev, int iface,
427					int objno, unsigned int mask,
428					unsigned int id, unsigned int mcont)
429{
430	struct c_can_priv *priv = netdev_priv(dev);
431
432	priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface),
433			IFX_WRITE_LOW_16BIT(mask));
434
435	/* According to C_CAN documentation, the reserved bit
436	 * in IFx_MASK2 register is fixed 1
437	 */
438	priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface),
439			IFX_WRITE_HIGH_16BIT(mask) | BIT(13));
440
441	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface),
442			IFX_WRITE_LOW_16BIT(id));
443	priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface),
444			(IF_ARB_MSGVAL | IFX_WRITE_HIGH_16BIT(id)));
445
446	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont);
447	c_can_object_put(dev, iface, objno, IF_COMM_ALL & ~IF_COMM_TXRQST);
448
449	netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
450			c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
451}
452
453static void c_can_inval_msg_object(struct net_device *dev, int iface, int objno)
454{
455	struct c_can_priv *priv = netdev_priv(dev);
456
457	priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), 0);
458	priv->write_reg(priv, C_CAN_IFACE(ARB2_REG, iface), 0);
459	priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0);
460
461	c_can_object_put(dev, iface, objno, IF_COMM_ARB | IF_COMM_CONTROL);
462
463	netdev_dbg(dev, "obj no:%d, msgval:0x%08x\n", objno,
464			c_can_read_reg32(priv, C_CAN_MSGVAL1_REG));
465}
466
467static inline int c_can_is_next_tx_obj_busy(struct c_can_priv *priv, int objno)
468{
469	int val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
470
471	/*
472	 * as transmission request register's bit n-1 corresponds to
473	 * message object n, we need to handle the same properly.
474	 */
475	if (val & (1 << (objno - 1)))
476		return 1;
477
478	return 0;
479}
480
481static netdev_tx_t c_can_start_xmit(struct sk_buff *skb,
482					struct net_device *dev)
483{
484	u32 msg_obj_no;
485	struct c_can_priv *priv = netdev_priv(dev);
486	struct can_frame *frame = (struct can_frame *)skb->data;
487
488	if (can_dropped_invalid_skb(dev, skb))
489		return NETDEV_TX_OK;
490
491	spin_lock_bh(&priv->xmit_lock);
492	msg_obj_no = get_tx_next_msg_obj(priv);
493
494	/* prepare message object for transmission */
495	c_can_write_msg_object(dev, IF_TX, frame, msg_obj_no);
496	priv->dlc[msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST] = frame->can_dlc;
497	can_put_echo_skb(skb, dev, msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
498
499	/*
500	 * we have to stop the queue in case of a wrap around or
501	 * if the next TX message object is still in use
502	 */
503	priv->tx_next++;
504	if (c_can_is_next_tx_obj_busy(priv, get_tx_next_msg_obj(priv)) ||
505			(priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) == 0)
506		netif_stop_queue(dev);
507	spin_unlock_bh(&priv->xmit_lock);
508
509	return NETDEV_TX_OK;
510}
511
512static int c_can_wait_for_ctrl_init(struct net_device *dev,
513				    struct c_can_priv *priv, u32 init)
514{
515	int retry = 0;
516
517	while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) {
518		udelay(10);
519		if (retry++ > 1000) {
520			netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n");
521			return -EIO;
522		}
523	}
524	return 0;
525}
526
527static int c_can_set_bittiming(struct net_device *dev)
528{
529	unsigned int reg_btr, reg_brpe, ctrl_save;
530	u8 brp, brpe, sjw, tseg1, tseg2;
531	u32 ten_bit_brp;
532	struct c_can_priv *priv = netdev_priv(dev);
533	const struct can_bittiming *bt = &priv->can.bittiming;
534	int res;
535
536	/* c_can provides a 6-bit brp and 4-bit brpe fields */
537	ten_bit_brp = bt->brp - 1;
538	brp = ten_bit_brp & BTR_BRP_MASK;
539	brpe = ten_bit_brp >> 6;
540
541	sjw = bt->sjw - 1;
542	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
543	tseg2 = bt->phase_seg2 - 1;
544	reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) |
545			(tseg2 << BTR_TSEG2_SHIFT);
546	reg_brpe = brpe & BRP_EXT_BRPE_MASK;
547
548	netdev_info(dev,
549		"setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe);
550
551	ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG);
552	ctrl_save &= ~CONTROL_INIT;
553	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT);
554	res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT);
555	if (res)
556		return res;
557
558	priv->write_reg(priv, C_CAN_BTR_REG, reg_btr);
559	priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe);
560	priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save);
561
562	return c_can_wait_for_ctrl_init(dev, priv, 0);
563}
564
565/*
566 * Configure C_CAN message objects for Tx and Rx purposes:
567 * C_CAN provides a total of 32 message objects that can be configured
568 * either for Tx or Rx purposes. Here the first 16 message objects are used as
569 * a reception FIFO. The end of reception FIFO is signified by the EoB bit
570 * being SET. The remaining 16 message objects are kept aside for Tx purposes.
571 * See user guide document for further details on configuring message
572 * objects.
573 */
574static void c_can_configure_msg_objects(struct net_device *dev)
575{
576	int i;
577
578	/* first invalidate all message objects */
579	for (i = C_CAN_MSG_OBJ_RX_FIRST; i <= C_CAN_NO_OF_OBJECTS; i++)
580		c_can_inval_msg_object(dev, IF_RX, i);
581
582	/* setup receive message objects */
583	for (i = C_CAN_MSG_OBJ_RX_FIRST; i < C_CAN_MSG_OBJ_RX_LAST; i++)
584		c_can_setup_receive_object(dev, IF_RX, i, 0, 0,
585					   IF_MCONT_RXIE | IF_MCONT_UMASK);
586
587	c_can_setup_receive_object(dev, IF_RX, C_CAN_MSG_OBJ_RX_LAST, 0, 0,
588			IF_MCONT_EOB | IF_MCONT_RXIE | IF_MCONT_UMASK);
589}
590
591/*
592 * Configure C_CAN chip:
593 * - enable/disable auto-retransmission
594 * - set operating mode
595 * - configure message objects
596 */
597static int c_can_chip_config(struct net_device *dev)
598{
599	struct c_can_priv *priv = netdev_priv(dev);
600
601	/* enable automatic retransmission */
602	priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR);
603
604	if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) &&
605	    (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) {
606		/* loopback + silent mode : useful for hot self-test */
607		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
608		priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT);
609	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
610		/* loopback mode : useful for self-test function */
611		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
612		priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK);
613	} else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) {
614		/* silent mode : bus-monitoring mode */
615		priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST);
616		priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT);
617	}
618
619	/* configure message objects */
620	c_can_configure_msg_objects(dev);
621
622	/* set a `lec` value so that we can check for updates later */
623	priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
624
625	/* set bittiming params */
626	return c_can_set_bittiming(dev);
627}
628
629static int c_can_start(struct net_device *dev)
630{
631	struct c_can_priv *priv = netdev_priv(dev);
632	int err;
633
634	/* basic c_can configuration */
635	err = c_can_chip_config(dev);
636	if (err)
637		return err;
638
639	/* Setup the command for new messages */
640	priv->comm_rcv_high = priv->type != BOSCH_D_CAN ?
641		IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH;
642
643	priv->can.state = CAN_STATE_ERROR_ACTIVE;
644
645	/* reset tx helper pointers and the rx mask */
646	priv->tx_next = priv->tx_echo = 0;
647	priv->rxmasked = 0;
648
649	return 0;
650}
651
652static void c_can_stop(struct net_device *dev)
653{
654	struct c_can_priv *priv = netdev_priv(dev);
655
656	c_can_irq_control(priv, false);
657	priv->can.state = CAN_STATE_STOPPED;
658}
659
660static int c_can_set_mode(struct net_device *dev, enum can_mode mode)
661{
662	struct c_can_priv *priv = netdev_priv(dev);
663	int err;
664
665	switch (mode) {
666	case CAN_MODE_START:
667		err = c_can_start(dev);
668		if (err)
669			return err;
670		netif_wake_queue(dev);
671		c_can_irq_control(priv, true);
672		break;
673	default:
674		return -EOPNOTSUPP;
675	}
676
677	return 0;
678}
679
680static int __c_can_get_berr_counter(const struct net_device *dev,
681				    struct can_berr_counter *bec)
682{
683	unsigned int reg_err_counter;
684	struct c_can_priv *priv = netdev_priv(dev);
685
686	reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
687	bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >>
688				ERR_CNT_REC_SHIFT;
689	bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK;
690
691	return 0;
692}
693
694static int c_can_get_berr_counter(const struct net_device *dev,
695				  struct can_berr_counter *bec)
696{
697	struct c_can_priv *priv = netdev_priv(dev);
698	int err;
699
700	c_can_pm_runtime_get_sync(priv);
701	err = __c_can_get_berr_counter(dev, bec);
702	c_can_pm_runtime_put_sync(priv);
703
704	return err;
705}
706
707/*
708 * priv->tx_echo holds the number of the oldest can_frame put for
709 * transmission into the hardware, but not yet ACKed by the CAN tx
710 * complete IRQ.
711 *
712 * We iterate from priv->tx_echo to priv->tx_next and check if the
713 * packet has been transmitted, echo it back to the CAN framework.
714 * If we discover a not yet transmitted packet, stop looking for more.
715 */
716static void c_can_do_tx(struct net_device *dev)
717{
718	struct c_can_priv *priv = netdev_priv(dev);
719	struct net_device_stats *stats = &dev->stats;
720	u32 val, obj, pkts = 0, bytes = 0;
721
722	spin_lock_bh(&priv->xmit_lock);
723
724	for (; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
725		obj = get_tx_echo_msg_obj(priv->tx_echo);
726		val = c_can_read_reg32(priv, C_CAN_TXRQST1_REG);
727
728		if (val & (1 << (obj - 1)))
729			break;
730
731		can_get_echo_skb(dev, obj - C_CAN_MSG_OBJ_TX_FIRST);
732		bytes += priv->dlc[obj - C_CAN_MSG_OBJ_TX_FIRST];
733		pkts++;
734		c_can_inval_msg_object(dev, IF_TX, obj);
735	}
736
737	/* restart queue if wrap-up or if queue stalled on last pkt */
738	if (((priv->tx_next & C_CAN_NEXT_MSG_OBJ_MASK) != 0) ||
739			((priv->tx_echo & C_CAN_NEXT_MSG_OBJ_MASK) == 0))
740		netif_wake_queue(dev);
741
742	spin_unlock_bh(&priv->xmit_lock);
743
744	if (pkts) {
745		stats->tx_bytes += bytes;
746		stats->tx_packets += pkts;
747		can_led_event(dev, CAN_LED_EVENT_TX);
748	}
749}
750
751/*
752 * If we have a gap in the pending bits, that means we either
753 * raced with the hardware or failed to readout all upper
754 * objects in the last run due to quota limit.
755 */
756static u32 c_can_adjust_pending(u32 pend)
757{
758	u32 weight, lasts;
759
760	if (pend == RECEIVE_OBJECT_BITS)
761		return pend;
762
763	/*
764	 * If the last set bit is larger than the number of pending
765	 * bits we have a gap.
766	 */
767	weight = hweight32(pend);
768	lasts = fls(pend);
769
770	/* If the bits are linear, nothing to do */
771	if (lasts == weight)
772		return pend;
773
774	/*
775	 * Find the first set bit after the gap. We walk backwards
776	 * from the last set bit.
777	 */
778	for (lasts--; pend & (1 << (lasts - 1)); lasts--);
779
780	return pend & ~((1 << lasts) - 1);
781}
782
783static inline void c_can_rx_object_get(struct net_device *dev,
784				       struct c_can_priv *priv, u32 obj)
785{
786#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
787	if (obj < C_CAN_MSG_RX_LOW_LAST)
788		c_can_object_get(dev, IF_RX, obj, IF_COMM_RCV_LOW);
789	else
790#endif
791		c_can_object_get(dev, IF_RX, obj, priv->comm_rcv_high);
792}
793
794static inline void c_can_rx_finalize(struct net_device *dev,
795				     struct c_can_priv *priv, u32 obj)
796{
797#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
798	if (obj < C_CAN_MSG_RX_LOW_LAST)
799		priv->rxmasked |= BIT(obj - 1);
800	else if (obj == C_CAN_MSG_RX_LOW_LAST) {
801		priv->rxmasked = 0;
802		/* activate all lower message objects */
803		c_can_activate_all_lower_rx_msg_obj(dev, IF_RX);
804	}
805#endif
806	if (priv->type != BOSCH_D_CAN)
807		c_can_object_get(dev, IF_RX, obj, IF_COMM_CLR_NEWDAT);
808}
809
810static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv,
811			      u32 pend, int quota)
812{
813	u32 pkts = 0, ctrl, obj;
814
815	while ((obj = ffs(pend)) && quota > 0) {
816		pend &= ~BIT(obj - 1);
817
818		c_can_rx_object_get(dev, priv, obj);
819		ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_RX));
820
821		if (ctrl & IF_MCONT_MSGLST) {
822			int n = c_can_handle_lost_msg_obj(dev, IF_RX, obj, ctrl);
823
824			pkts += n;
825			quota -= n;
826			continue;
827		}
828
829		/*
830		 * This really should not happen, but this covers some
831		 * odd HW behaviour. Do not remove that unless you
832		 * want to brick your machine.
833		 */
834		if (!(ctrl & IF_MCONT_NEWDAT))
835			continue;
836
837		/* read the data from the message object */
838		c_can_read_msg_object(dev, IF_RX, ctrl);
839
840		c_can_rx_finalize(dev, priv, obj);
841
842		pkts++;
843		quota--;
844	}
845
846	return pkts;
847}
848
849static inline u32 c_can_get_pending(struct c_can_priv *priv)
850{
851	u32 pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG);
852
853#ifdef CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING
854	pend &= ~priv->rxmasked;
855#endif
856	return pend;
857}
858
859/*
860 * theory of operation:
861 *
862 * c_can core saves a received CAN message into the first free message
863 * object it finds free (starting with the lowest). Bits NEWDAT and
864 * INTPND are set for this message object indicating that a new message
865 * has arrived. To work-around this issue, we keep two groups of message
866 * objects whose partitioning is defined by C_CAN_MSG_OBJ_RX_SPLIT.
867 *
868 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = y
869 *
870 * To ensure in-order frame reception we use the following
871 * approach while re-activating a message object to receive further
872 * frames:
873 * - if the current message object number is lower than
874 *   C_CAN_MSG_RX_LOW_LAST, do not clear the NEWDAT bit while clearing
875 *   the INTPND bit.
876 * - if the current message object number is equal to
877 *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of all lower
878 *   receive message objects.
879 * - if the current message object number is greater than
880 *   C_CAN_MSG_RX_LOW_LAST then clear the NEWDAT bit of
881 *   only this message object.
882 *
883 * This can cause packet loss!
884 *
885 * If CONFIG_CAN_C_CAN_STRICT_FRAME_ORDERING = n
886 *
887 * We clear the newdat bit right away.
888 *
889 * This can result in packet reordering when the readout is slow.
890 */
891static int c_can_do_rx_poll(struct net_device *dev, int quota)
892{
893	struct c_can_priv *priv = netdev_priv(dev);
894	u32 pkts = 0, pend = 0, toread, n;
895
896	/*
897	 * It is faster to read only one 16bit register. This is only possible
898	 * for a maximum number of 16 objects.
899	 */
900	BUILD_BUG_ON_MSG(C_CAN_MSG_OBJ_RX_LAST > 16,
901			"Implementation does not support more message objects than 16");
902
903	while (quota > 0) {
904		if (!pend) {
905			pend = c_can_get_pending(priv);
906			if (!pend)
907				break;
908			/*
909			 * If the pending field has a gap, handle the
910			 * bits above the gap first.
911			 */
912			toread = c_can_adjust_pending(pend);
913		} else {
914			toread = pend;
915		}
916		/* Remove the bits from pend */
917		pend &= ~toread;
918		/* Read the objects */
919		n = c_can_read_objects(dev, priv, toread, quota);
920		pkts += n;
921		quota -= n;
922	}
923
924	if (pkts)
925		can_led_event(dev, CAN_LED_EVENT_RX);
926
927	return pkts;
928}
929
930static int c_can_handle_state_change(struct net_device *dev,
931				enum c_can_bus_error_types error_type)
932{
933	unsigned int reg_err_counter;
934	unsigned int rx_err_passive;
935	struct c_can_priv *priv = netdev_priv(dev);
936	struct net_device_stats *stats = &dev->stats;
937	struct can_frame *cf;
938	struct sk_buff *skb;
939	struct can_berr_counter bec;
940
941	switch (error_type) {
942	case C_CAN_ERROR_WARNING:
943		/* error warning state */
944		priv->can.can_stats.error_warning++;
945		priv->can.state = CAN_STATE_ERROR_WARNING;
946		break;
947	case C_CAN_ERROR_PASSIVE:
948		/* error passive state */
949		priv->can.can_stats.error_passive++;
950		priv->can.state = CAN_STATE_ERROR_PASSIVE;
951		break;
952	case C_CAN_BUS_OFF:
953		/* bus-off state */
954		priv->can.state = CAN_STATE_BUS_OFF;
955		can_bus_off(dev);
956		break;
957	default:
958		break;
959	}
960
961	/* propagate the error condition to the CAN stack */
962	skb = alloc_can_err_skb(dev, &cf);
963	if (unlikely(!skb))
964		return 0;
965
966	__c_can_get_berr_counter(dev, &bec);
967	reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG);
968	rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >>
969				ERR_CNT_RP_SHIFT;
970
971	switch (error_type) {
972	case C_CAN_ERROR_WARNING:
973		/* error warning state */
974		cf->can_id |= CAN_ERR_CRTL;
975		cf->data[1] = (bec.txerr > bec.rxerr) ?
976			CAN_ERR_CRTL_TX_WARNING :
977			CAN_ERR_CRTL_RX_WARNING;
978		cf->data[6] = bec.txerr;
979		cf->data[7] = bec.rxerr;
980
981		break;
982	case C_CAN_ERROR_PASSIVE:
983		/* error passive state */
984		cf->can_id |= CAN_ERR_CRTL;
985		if (rx_err_passive)
986			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
987		if (bec.txerr > 127)
988			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
989
990		cf->data[6] = bec.txerr;
991		cf->data[7] = bec.rxerr;
992		break;
993	case C_CAN_BUS_OFF:
994		/* bus-off state */
995		cf->can_id |= CAN_ERR_BUSOFF;
996		can_bus_off(dev);
997		break;
998	default:
999		break;
1000	}
1001
1002	stats->rx_packets++;
1003	stats->rx_bytes += cf->can_dlc;
1004	netif_receive_skb(skb);
1005
1006	return 1;
1007}
1008
1009static int c_can_handle_bus_err(struct net_device *dev,
1010				enum c_can_lec_type lec_type)
1011{
1012	struct c_can_priv *priv = netdev_priv(dev);
1013	struct net_device_stats *stats = &dev->stats;
1014	struct can_frame *cf;
1015	struct sk_buff *skb;
1016
1017	/*
1018	 * early exit if no lec update or no error.
1019	 * no lec update means that no CAN bus event has been detected
1020	 * since CPU wrote 0x7 value to status reg.
1021	 */
1022	if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR)
1023		return 0;
1024
1025	if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
1026		return 0;
1027
1028	/* common for all type of bus errors */
1029	priv->can.can_stats.bus_error++;
1030	stats->rx_errors++;
1031
1032	/* propagate the error condition to the CAN stack */
1033	skb = alloc_can_err_skb(dev, &cf);
1034	if (unlikely(!skb))
1035		return 0;
1036
1037	/*
1038	 * check for 'last error code' which tells us the
1039	 * type of the last error to occur on the CAN bus
1040	 */
1041	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
1042	cf->data[2] |= CAN_ERR_PROT_UNSPEC;
1043
1044	switch (lec_type) {
1045	case LEC_STUFF_ERROR:
1046		netdev_dbg(dev, "stuff error\n");
1047		cf->data[2] |= CAN_ERR_PROT_STUFF;
1048		break;
1049	case LEC_FORM_ERROR:
1050		netdev_dbg(dev, "form error\n");
1051		cf->data[2] |= CAN_ERR_PROT_FORM;
1052		break;
1053	case LEC_ACK_ERROR:
1054		netdev_dbg(dev, "ack error\n");
1055		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
1056				CAN_ERR_PROT_LOC_ACK_DEL);
1057		break;
1058	case LEC_BIT1_ERROR:
1059		netdev_dbg(dev, "bit1 error\n");
1060		cf->data[2] |= CAN_ERR_PROT_BIT1;
1061		break;
1062	case LEC_BIT0_ERROR:
1063		netdev_dbg(dev, "bit0 error\n");
1064		cf->data[2] |= CAN_ERR_PROT_BIT0;
1065		break;
1066	case LEC_CRC_ERROR:
1067		netdev_dbg(dev, "CRC error\n");
1068		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
1069				CAN_ERR_PROT_LOC_CRC_DEL);
1070		break;
1071	default:
1072		break;
1073	}
1074
1075	stats->rx_packets++;
1076	stats->rx_bytes += cf->can_dlc;
1077	netif_receive_skb(skb);
1078	return 1;
1079}
1080
1081static int c_can_poll(struct napi_struct *napi, int quota)
1082{
1083	struct net_device *dev = napi->dev;
1084	struct c_can_priv *priv = netdev_priv(dev);
1085	u16 curr, last = priv->last_status;
1086	int work_done = 0;
1087
1088	priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG);
1089	/* Ack status on C_CAN. D_CAN is self clearing */
1090	if (priv->type != BOSCH_D_CAN)
1091		priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED);
1092
1093	/* handle state changes */
1094	if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) {
1095		netdev_dbg(dev, "entered error warning state\n");
1096		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING);
1097	}
1098
1099	if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) {
1100		netdev_dbg(dev, "entered error passive state\n");
1101		work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE);
1102	}
1103
1104	if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) {
1105		netdev_dbg(dev, "entered bus off state\n");
1106		work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF);
1107		goto end;
1108	}
1109
1110	/* handle bus recovery events */
1111	if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) {
1112		netdev_dbg(dev, "left bus off state\n");
1113		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1114	}
1115	if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) {
1116		netdev_dbg(dev, "left error passive state\n");
1117		priv->can.state = CAN_STATE_ERROR_ACTIVE;
1118	}
1119
1120	/* handle lec errors on the bus */
1121	work_done += c_can_handle_bus_err(dev, curr & LEC_MASK);
1122
1123	/* Handle Tx/Rx events. We do this unconditionally */
1124	work_done += c_can_do_rx_poll(dev, (quota - work_done));
1125	c_can_do_tx(dev);
1126
1127end:
1128	if (work_done < quota) {
1129		napi_complete(napi);
1130		/* enable all IRQs if we are not in bus off state */
1131		if (priv->can.state != CAN_STATE_BUS_OFF)
1132			c_can_irq_control(priv, true);
1133	}
1134
1135	return work_done;
1136}
1137
1138static irqreturn_t c_can_isr(int irq, void *dev_id)
1139{
1140	struct net_device *dev = (struct net_device *)dev_id;
1141	struct c_can_priv *priv = netdev_priv(dev);
1142
1143	if (!priv->read_reg(priv, C_CAN_INT_REG))
1144		return IRQ_NONE;
1145
1146	/* disable all interrupts and schedule the NAPI */
1147	c_can_irq_control(priv, false);
1148	napi_schedule(&priv->napi);
1149
1150	return IRQ_HANDLED;
1151}
1152
1153static int c_can_open(struct net_device *dev)
1154{
1155	int err;
1156	struct c_can_priv *priv = netdev_priv(dev);
1157
1158	c_can_pm_runtime_get_sync(priv);
1159	c_can_reset_ram(priv, true);
1160
1161	/* open the can device */
1162	err = open_candev(dev);
1163	if (err) {
1164		netdev_err(dev, "failed to open can device\n");
1165		goto exit_open_fail;
1166	}
1167
1168	/* register interrupt handler */
1169	err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
1170				dev);
1171	if (err < 0) {
1172		netdev_err(dev, "failed to request interrupt\n");
1173		goto exit_irq_fail;
1174	}
1175
1176	/* start the c_can controller */
1177	err = c_can_start(dev);
1178	if (err)
1179		goto exit_start_fail;
1180
1181	can_led_event(dev, CAN_LED_EVENT_OPEN);
1182
1183	napi_enable(&priv->napi);
1184	/* enable status change, error and module interrupts */
1185	c_can_irq_control(priv, true);
1186	netif_start_queue(dev);
1187
1188	return 0;
1189
1190exit_start_fail:
1191	free_irq(dev->irq, dev);
1192exit_irq_fail:
1193	close_candev(dev);
1194exit_open_fail:
1195	c_can_reset_ram(priv, false);
1196	c_can_pm_runtime_put_sync(priv);
1197	return err;
1198}
1199
1200static int c_can_close(struct net_device *dev)
1201{
1202	struct c_can_priv *priv = netdev_priv(dev);
1203
1204	netif_stop_queue(dev);
1205	napi_disable(&priv->napi);
1206	c_can_stop(dev);
1207	free_irq(dev->irq, dev);
1208	close_candev(dev);
1209
1210	c_can_reset_ram(priv, false);
1211	c_can_pm_runtime_put_sync(priv);
1212
1213	can_led_event(dev, CAN_LED_EVENT_STOP);
1214
1215	return 0;
1216}
1217
1218struct net_device *alloc_c_can_dev(void)
1219{
1220	struct net_device *dev;
1221	struct c_can_priv *priv;
1222
1223	dev = alloc_candev(sizeof(struct c_can_priv), C_CAN_MSG_OBJ_TX_NUM);
1224	if (!dev)
1225		return NULL;
1226
1227	priv = netdev_priv(dev);
1228	spin_lock_init(&priv->xmit_lock);
1229	netif_napi_add(dev, &priv->napi, c_can_poll, C_CAN_NAPI_WEIGHT);
1230
1231	priv->dev = dev;
1232	priv->can.bittiming_const = &c_can_bittiming_const;
1233	priv->can.do_set_mode = c_can_set_mode;
1234	priv->can.do_get_berr_counter = c_can_get_berr_counter;
1235	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
1236					CAN_CTRLMODE_LISTENONLY |
1237					CAN_CTRLMODE_BERR_REPORTING;
1238
1239	return dev;
1240}
1241EXPORT_SYMBOL_GPL(alloc_c_can_dev);
1242
1243#ifdef CONFIG_PM
1244int c_can_power_down(struct net_device *dev)
1245{
1246	u32 val;
1247	unsigned long time_out;
1248	struct c_can_priv *priv = netdev_priv(dev);
1249
1250	if (!(dev->flags & IFF_UP))
1251		return 0;
1252
1253	WARN_ON(priv->type != BOSCH_D_CAN);
1254
1255	/* set PDR value so the device goes to power down mode */
1256	val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1257	val |= CONTROL_EX_PDR;
1258	priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1259
1260	/* Wait for the PDA bit to get set */
1261	time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1262	while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1263				time_after(time_out, jiffies))
1264		cpu_relax();
1265
1266	if (time_after(jiffies, time_out))
1267		return -ETIMEDOUT;
1268
1269	c_can_stop(dev);
1270
1271	c_can_reset_ram(priv, false);
1272	c_can_pm_runtime_put_sync(priv);
1273
1274	return 0;
1275}
1276EXPORT_SYMBOL_GPL(c_can_power_down);
1277
1278int c_can_power_up(struct net_device *dev)
1279{
1280	u32 val;
1281	unsigned long time_out;
1282	struct c_can_priv *priv = netdev_priv(dev);
1283	int ret;
1284
1285	if (!(dev->flags & IFF_UP))
1286		return 0;
1287
1288	WARN_ON(priv->type != BOSCH_D_CAN);
1289
1290	c_can_pm_runtime_get_sync(priv);
1291	c_can_reset_ram(priv, true);
1292
1293	/* Clear PDR and INIT bits */
1294	val = priv->read_reg(priv, C_CAN_CTRL_EX_REG);
1295	val &= ~CONTROL_EX_PDR;
1296	priv->write_reg(priv, C_CAN_CTRL_EX_REG, val);
1297	val = priv->read_reg(priv, C_CAN_CTRL_REG);
1298	val &= ~CONTROL_INIT;
1299	priv->write_reg(priv, C_CAN_CTRL_REG, val);
1300
1301	/* Wait for the PDA bit to get clear */
1302	time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS);
1303	while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) &&
1304				time_after(time_out, jiffies))
1305		cpu_relax();
1306
1307	if (time_after(jiffies, time_out))
1308		return -ETIMEDOUT;
1309
1310	ret = c_can_start(dev);
1311	if (!ret)
1312		c_can_irq_control(priv, true);
1313
1314	return ret;
1315}
1316EXPORT_SYMBOL_GPL(c_can_power_up);
1317#endif
1318
1319void free_c_can_dev(struct net_device *dev)
1320{
1321	struct c_can_priv *priv = netdev_priv(dev);
1322
1323	netif_napi_del(&priv->napi);
1324	free_candev(dev);
1325}
1326EXPORT_SYMBOL_GPL(free_c_can_dev);
1327
1328static const struct net_device_ops c_can_netdev_ops = {
1329	.ndo_open = c_can_open,
1330	.ndo_stop = c_can_close,
1331	.ndo_start_xmit = c_can_start_xmit,
1332	.ndo_change_mtu = can_change_mtu,
1333};
1334
1335int register_c_can_dev(struct net_device *dev)
1336{
1337	struct c_can_priv *priv = netdev_priv(dev);
1338	int err;
1339
1340	c_can_pm_runtime_enable(priv);
1341
1342	dev->flags |= IFF_ECHO;	/* we support local echo */
1343	dev->netdev_ops = &c_can_netdev_ops;
1344
1345	err = register_candev(dev);
1346	if (err)
1347		c_can_pm_runtime_disable(priv);
1348	else
1349		devm_can_led_init(dev);
1350
1351	return err;
1352}
1353EXPORT_SYMBOL_GPL(register_c_can_dev);
1354
1355void unregister_c_can_dev(struct net_device *dev)
1356{
1357	struct c_can_priv *priv = netdev_priv(dev);
1358
1359	unregister_candev(dev);
1360
1361	c_can_pm_runtime_disable(priv);
1362}
1363EXPORT_SYMBOL_GPL(unregister_c_can_dev);
1364
1365MODULE_AUTHOR("Bhupesh Sharma <bhupesh.sharma@st.com>");
1366MODULE_LICENSE("GPL v2");
1367MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
1368