m_can.c revision 80646733f11c2e9de3b6339f7e635047e6087280
1/*
2 * CAN bus driver for Bosch M_CAN controller
3 *
4 * Copyright (C) 2014 Freescale Semiconductor, Inc.
5 *	Dong Aisheng <b29396@freescale.com>
6 *
7 * Bosch M_CAN user manual can be obtained from:
8 * http://www.bosch-semiconductors.de/media/pdf_1/ipmodules_1/m_can/
9 * mcan_users_manual_v302.pdf
10 *
11 * This file is licensed under the terms of the GNU General Public
12 * License version 2. This program is licensed "as is" without any
13 * warranty of any kind, whether express or implied.
14 */
15
16#include <linux/clk.h>
17#include <linux/delay.h>
18#include <linux/interrupt.h>
19#include <linux/io.h>
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/netdevice.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/platform_device.h>
26
27#include <linux/can/dev.h>
28
29/* napi related */
30#define M_CAN_NAPI_WEIGHT	64
31
32/* message ram configuration data length */
33#define MRAM_CFG_LEN	8
34
35/* registers definition */
36enum m_can_reg {
37	M_CAN_CREL	= 0x0,
38	M_CAN_ENDN	= 0x4,
39	M_CAN_CUST	= 0x8,
40	M_CAN_FBTP	= 0xc,
41	M_CAN_TEST	= 0x10,
42	M_CAN_RWD	= 0x14,
43	M_CAN_CCCR	= 0x18,
44	M_CAN_BTP	= 0x1c,
45	M_CAN_TSCC	= 0x20,
46	M_CAN_TSCV	= 0x24,
47	M_CAN_TOCC	= 0x28,
48	M_CAN_TOCV	= 0x2c,
49	M_CAN_ECR	= 0x40,
50	M_CAN_PSR	= 0x44,
51	M_CAN_IR	= 0x50,
52	M_CAN_IE	= 0x54,
53	M_CAN_ILS	= 0x58,
54	M_CAN_ILE	= 0x5c,
55	M_CAN_GFC	= 0x80,
56	M_CAN_SIDFC	= 0x84,
57	M_CAN_XIDFC	= 0x88,
58	M_CAN_XIDAM	= 0x90,
59	M_CAN_HPMS	= 0x94,
60	M_CAN_NDAT1	= 0x98,
61	M_CAN_NDAT2	= 0x9c,
62	M_CAN_RXF0C	= 0xa0,
63	M_CAN_RXF0S	= 0xa4,
64	M_CAN_RXF0A	= 0xa8,
65	M_CAN_RXBC	= 0xac,
66	M_CAN_RXF1C	= 0xb0,
67	M_CAN_RXF1S	= 0xb4,
68	M_CAN_RXF1A	= 0xb8,
69	M_CAN_RXESC	= 0xbc,
70	M_CAN_TXBC	= 0xc0,
71	M_CAN_TXFQS	= 0xc4,
72	M_CAN_TXESC	= 0xc8,
73	M_CAN_TXBRP	= 0xcc,
74	M_CAN_TXBAR	= 0xd0,
75	M_CAN_TXBCR	= 0xd4,
76	M_CAN_TXBTO	= 0xd8,
77	M_CAN_TXBCF	= 0xdc,
78	M_CAN_TXBTIE	= 0xe0,
79	M_CAN_TXBCIE	= 0xe4,
80	M_CAN_TXEFC	= 0xf0,
81	M_CAN_TXEFS	= 0xf4,
82	M_CAN_TXEFA	= 0xf8,
83};
84
85/* m_can lec values */
86enum m_can_lec_type {
87	LEC_NO_ERROR = 0,
88	LEC_STUFF_ERROR,
89	LEC_FORM_ERROR,
90	LEC_ACK_ERROR,
91	LEC_BIT1_ERROR,
92	LEC_BIT0_ERROR,
93	LEC_CRC_ERROR,
94	LEC_UNUSED,
95};
96
97enum m_can_mram_cfg {
98	MRAM_SIDF = 0,
99	MRAM_XIDF,
100	MRAM_RXF0,
101	MRAM_RXF1,
102	MRAM_RXB,
103	MRAM_TXE,
104	MRAM_TXB,
105	MRAM_CFG_NUM,
106};
107
108/* Fast Bit Timing & Prescaler Register (FBTP) */
109#define FBTR_FBRP_MASK		0x1f
110#define FBTR_FBRP_SHIFT		16
111#define FBTR_FTSEG1_SHIFT	8
112#define FBTR_FTSEG1_MASK	(0xf << FBTR_FTSEG1_SHIFT)
113#define FBTR_FTSEG2_SHIFT	4
114#define FBTR_FTSEG2_MASK	(0x7 << FBTR_FTSEG2_SHIFT)
115#define FBTR_FSJW_SHIFT		0
116#define FBTR_FSJW_MASK		0x3
117
118/* Test Register (TEST) */
119#define TEST_LBCK	BIT(4)
120
121/* CC Control Register(CCCR) */
122#define CCCR_TEST		BIT(7)
123#define CCCR_CMR_MASK		0x3
124#define CCCR_CMR_SHIFT		10
125#define CCCR_CMR_CANFD		0x1
126#define CCCR_CMR_CANFD_BRS	0x2
127#define CCCR_CMR_CAN		0x3
128#define CCCR_CME_MASK		0x3
129#define CCCR_CME_SHIFT		8
130#define CCCR_CME_CAN		0
131#define CCCR_CME_CANFD		0x1
132#define CCCR_CME_CANFD_BRS	0x2
133#define CCCR_TEST		BIT(7)
134#define CCCR_MON		BIT(5)
135#define CCCR_CCE		BIT(1)
136#define CCCR_INIT		BIT(0)
137#define CCCR_CANFD		0x10
138
139/* Bit Timing & Prescaler Register (BTP) */
140#define BTR_BRP_MASK		0x3ff
141#define BTR_BRP_SHIFT		16
142#define BTR_TSEG1_SHIFT		8
143#define BTR_TSEG1_MASK		(0x3f << BTR_TSEG1_SHIFT)
144#define BTR_TSEG2_SHIFT		4
145#define BTR_TSEG2_MASK		(0xf << BTR_TSEG2_SHIFT)
146#define BTR_SJW_SHIFT		0
147#define BTR_SJW_MASK		0xf
148
149/* Error Counter Register(ECR) */
150#define ECR_RP			BIT(15)
151#define ECR_REC_SHIFT		8
152#define ECR_REC_MASK		(0x7f << ECR_REC_SHIFT)
153#define ECR_TEC_SHIFT		0
154#define ECR_TEC_MASK		0xff
155
156/* Protocol Status Register(PSR) */
157#define PSR_BO		BIT(7)
158#define PSR_EW		BIT(6)
159#define PSR_EP		BIT(5)
160#define PSR_LEC_MASK	0x7
161
162/* Interrupt Register(IR) */
163#define IR_ALL_INT	0xffffffff
164#define IR_STE		BIT(31)
165#define IR_FOE		BIT(30)
166#define IR_ACKE		BIT(29)
167#define IR_BE		BIT(28)
168#define IR_CRCE		BIT(27)
169#define IR_WDI		BIT(26)
170#define IR_BO		BIT(25)
171#define IR_EW		BIT(24)
172#define IR_EP		BIT(23)
173#define IR_ELO		BIT(22)
174#define IR_BEU		BIT(21)
175#define IR_BEC		BIT(20)
176#define IR_DRX		BIT(19)
177#define IR_TOO		BIT(18)
178#define IR_MRAF		BIT(17)
179#define IR_TSW		BIT(16)
180#define IR_TEFL		BIT(15)
181#define IR_TEFF		BIT(14)
182#define IR_TEFW		BIT(13)
183#define IR_TEFN		BIT(12)
184#define IR_TFE		BIT(11)
185#define IR_TCF		BIT(10)
186#define IR_TC		BIT(9)
187#define IR_HPM		BIT(8)
188#define IR_RF1L		BIT(7)
189#define IR_RF1F		BIT(6)
190#define IR_RF1W		BIT(5)
191#define IR_RF1N		BIT(4)
192#define IR_RF0L		BIT(3)
193#define IR_RF0F		BIT(2)
194#define IR_RF0W		BIT(1)
195#define IR_RF0N		BIT(0)
196#define IR_ERR_STATE	(IR_BO | IR_EW | IR_EP)
197#define IR_ERR_LEC	(IR_STE	| IR_FOE | IR_ACKE | IR_BE | IR_CRCE)
198#define IR_ERR_BUS	(IR_ERR_LEC | IR_WDI | IR_ELO | IR_BEU | \
199			 IR_BEC | IR_TOO | IR_MRAF | IR_TSW | IR_TEFL | \
200			 IR_RF1L | IR_RF0L)
201#define IR_ERR_ALL	(IR_ERR_STATE | IR_ERR_BUS)
202
203/* Interrupt Line Select (ILS) */
204#define ILS_ALL_INT0	0x0
205#define ILS_ALL_INT1	0xFFFFFFFF
206
207/* Interrupt Line Enable (ILE) */
208#define ILE_EINT0	BIT(0)
209#define ILE_EINT1	BIT(1)
210
211/* Rx FIFO 0/1 Configuration (RXF0C/RXF1C) */
212#define RXFC_FWM_OFF	24
213#define RXFC_FWM_MASK	0x7f
214#define RXFC_FWM_1	(1 << RXFC_FWM_OFF)
215#define RXFC_FS_OFF	16
216#define RXFC_FS_MASK	0x7f
217
218/* Rx FIFO 0/1 Status (RXF0S/RXF1S) */
219#define RXFS_RFL	BIT(25)
220#define RXFS_FF		BIT(24)
221#define RXFS_FPI_OFF	16
222#define RXFS_FPI_MASK	0x3f0000
223#define RXFS_FGI_OFF	8
224#define RXFS_FGI_MASK	0x3f00
225#define RXFS_FFL_MASK	0x7f
226
227/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
228#define M_CAN_RXESC_8BYTES	0x0
229#define M_CAN_RXESC_64BYTES	0x777
230
231/* Tx Buffer Configuration(TXBC) */
232#define TXBC_NDTB_OFF		16
233#define TXBC_NDTB_MASK		0x3f
234
235/* Tx Buffer Element Size Configuration(TXESC) */
236#define TXESC_TBDS_8BYTES	0x0
237#define TXESC_TBDS_64BYTES	0x7
238
239/* Tx Event FIFO Con.guration (TXEFC) */
240#define TXEFC_EFS_OFF		16
241#define TXEFC_EFS_MASK		0x3f
242
243/* Message RAM Configuration (in bytes) */
244#define SIDF_ELEMENT_SIZE	4
245#define XIDF_ELEMENT_SIZE	8
246#define RXF0_ELEMENT_SIZE	72
247#define RXF1_ELEMENT_SIZE	72
248#define RXB_ELEMENT_SIZE	16
249#define TXE_ELEMENT_SIZE	8
250#define TXB_ELEMENT_SIZE	72
251
252/* Message RAM Elements */
253#define M_CAN_FIFO_ID		0x0
254#define M_CAN_FIFO_DLC		0x4
255#define M_CAN_FIFO_DATA(n)	(0x8 + ((n) << 2))
256
257/* Rx Buffer Element */
258/* R0 */
259#define RX_BUF_ESI		BIT(31)
260#define RX_BUF_XTD		BIT(30)
261#define RX_BUF_RTR		BIT(29)
262/* R1 */
263#define RX_BUF_ANMF		BIT(31)
264#define RX_BUF_EDL		BIT(21)
265#define RX_BUF_BRS		BIT(20)
266
267/* Tx Buffer Element */
268/* R0 */
269#define TX_BUF_XTD		BIT(30)
270#define TX_BUF_RTR		BIT(29)
271
272/* address offset and element number for each FIFO/Buffer in the Message RAM */
273struct mram_cfg {
274	u16 off;
275	u8  num;
276};
277
278/* m_can private data structure */
279struct m_can_priv {
280	struct can_priv can;	/* must be the first member */
281	struct napi_struct napi;
282	struct net_device *dev;
283	struct device *device;
284	struct clk *hclk;
285	struct clk *cclk;
286	void __iomem *base;
287	u32 irqstatus;
288
289	/* message ram configuration */
290	void __iomem *mram_base;
291	struct mram_cfg mcfg[MRAM_CFG_NUM];
292};
293
294static inline u32 m_can_read(const struct m_can_priv *priv, enum m_can_reg reg)
295{
296	return readl(priv->base + reg);
297}
298
299static inline void m_can_write(const struct m_can_priv *priv,
300			       enum m_can_reg reg, u32 val)
301{
302	writel(val, priv->base + reg);
303}
304
305static inline u32 m_can_fifo_read(const struct m_can_priv *priv,
306				  u32 fgi, unsigned int offset)
307{
308	return readl(priv->mram_base + priv->mcfg[MRAM_RXF0].off +
309		     fgi * RXF0_ELEMENT_SIZE + offset);
310}
311
312static inline void m_can_fifo_write(const struct m_can_priv *priv,
313				    u32 fpi, unsigned int offset, u32 val)
314{
315	return writel(val, priv->mram_base + priv->mcfg[MRAM_TXB].off +
316		      fpi * TXB_ELEMENT_SIZE + offset);
317}
318
319static inline void m_can_config_endisable(const struct m_can_priv *priv,
320					  bool enable)
321{
322	u32 cccr = m_can_read(priv, M_CAN_CCCR);
323	u32 timeout = 10;
324	u32 val = 0;
325
326	if (enable) {
327		/* enable m_can configuration */
328		m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
329		udelay(5);
330		/* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
331		m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
332	} else {
333		m_can_write(priv, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE));
334	}
335
336	/* there's a delay for module initialization */
337	if (enable)
338		val = CCCR_INIT | CCCR_CCE;
339
340	while ((m_can_read(priv, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) {
341		if (timeout == 0) {
342			netdev_warn(priv->dev, "Failed to init module\n");
343			return;
344		}
345		timeout--;
346		udelay(1);
347	}
348}
349
350static inline void m_can_enable_all_interrupts(const struct m_can_priv *priv)
351{
352	m_can_write(priv, M_CAN_ILE, ILE_EINT0 | ILE_EINT1);
353}
354
355static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
356{
357	m_can_write(priv, M_CAN_ILE, 0x0);
358}
359
360static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
361{
362	struct net_device_stats *stats = &dev->stats;
363	struct m_can_priv *priv = netdev_priv(dev);
364	struct canfd_frame *cf;
365	struct sk_buff *skb;
366	u32 id, fgi, dlc;
367	int i;
368
369	/* calculate the fifo get index for where to read data */
370	fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
371	dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
372	if (dlc & RX_BUF_EDL)
373		skb = alloc_canfd_skb(dev, &cf);
374	else
375		skb = alloc_can_skb(dev, (struct can_frame **)&cf);
376	if (!skb) {
377		stats->rx_dropped++;
378		return;
379	}
380
381	if (dlc & RX_BUF_EDL)
382		cf->len = can_dlc2len((dlc >> 16) & 0x0F);
383	else
384		cf->len = get_can_dlc((dlc >> 16) & 0x0F);
385
386	id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
387	if (id & RX_BUF_XTD)
388		cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
389	else
390		cf->can_id = (id >> 18) & CAN_SFF_MASK;
391
392	if (id & RX_BUF_ESI) {
393		cf->flags |= CANFD_ESI;
394		netdev_dbg(dev, "ESI Error\n");
395	}
396
397	if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) {
398		cf->can_id |= CAN_RTR_FLAG;
399	} else {
400		if (dlc & RX_BUF_BRS)
401			cf->flags |= CANFD_BRS;
402
403		for (i = 0; i < cf->len; i += 4)
404			*(u32 *)(cf->data + i) =
405				m_can_fifo_read(priv, fgi,
406						M_CAN_FIFO_DATA(i / 4));
407	}
408
409	/* acknowledge rx fifo 0 */
410	m_can_write(priv, M_CAN_RXF0A, fgi);
411
412	stats->rx_packets++;
413	stats->rx_bytes += cf->len;
414
415	netif_receive_skb(skb);
416}
417
418static int m_can_do_rx_poll(struct net_device *dev, int quota)
419{
420	struct m_can_priv *priv = netdev_priv(dev);
421	u32 pkts = 0;
422	u32 rxfs;
423
424	rxfs = m_can_read(priv, M_CAN_RXF0S);
425	if (!(rxfs & RXFS_FFL_MASK)) {
426		netdev_dbg(dev, "no messages in fifo0\n");
427		return 0;
428	}
429
430	while ((rxfs & RXFS_FFL_MASK) && (quota > 0)) {
431		if (rxfs & RXFS_RFL)
432			netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
433
434		m_can_read_fifo(dev, rxfs);
435
436		quota--;
437		pkts++;
438		rxfs = m_can_read(priv, M_CAN_RXF0S);
439	}
440
441	if (pkts)
442		can_led_event(dev, CAN_LED_EVENT_RX);
443
444	return pkts;
445}
446
447static int m_can_handle_lost_msg(struct net_device *dev)
448{
449	struct net_device_stats *stats = &dev->stats;
450	struct sk_buff *skb;
451	struct can_frame *frame;
452
453	netdev_err(dev, "msg lost in rxf0\n");
454
455	stats->rx_errors++;
456	stats->rx_over_errors++;
457
458	skb = alloc_can_err_skb(dev, &frame);
459	if (unlikely(!skb))
460		return 0;
461
462	frame->can_id |= CAN_ERR_CRTL;
463	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
464
465	netif_receive_skb(skb);
466
467	return 1;
468}
469
470static int m_can_handle_lec_err(struct net_device *dev,
471				enum m_can_lec_type lec_type)
472{
473	struct m_can_priv *priv = netdev_priv(dev);
474	struct net_device_stats *stats = &dev->stats;
475	struct can_frame *cf;
476	struct sk_buff *skb;
477
478	priv->can.can_stats.bus_error++;
479	stats->rx_errors++;
480
481	/* propagate the error condition to the CAN stack */
482	skb = alloc_can_err_skb(dev, &cf);
483	if (unlikely(!skb))
484		return 0;
485
486	/* check for 'last error code' which tells us the
487	 * type of the last error to occur on the CAN bus
488	 */
489	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
490	cf->data[2] |= CAN_ERR_PROT_UNSPEC;
491
492	switch (lec_type) {
493	case LEC_STUFF_ERROR:
494		netdev_dbg(dev, "stuff error\n");
495		cf->data[2] |= CAN_ERR_PROT_STUFF;
496		break;
497	case LEC_FORM_ERROR:
498		netdev_dbg(dev, "form error\n");
499		cf->data[2] |= CAN_ERR_PROT_FORM;
500		break;
501	case LEC_ACK_ERROR:
502		netdev_dbg(dev, "ack error\n");
503		cf->data[3] |= (CAN_ERR_PROT_LOC_ACK |
504				CAN_ERR_PROT_LOC_ACK_DEL);
505		break;
506	case LEC_BIT1_ERROR:
507		netdev_dbg(dev, "bit1 error\n");
508		cf->data[2] |= CAN_ERR_PROT_BIT1;
509		break;
510	case LEC_BIT0_ERROR:
511		netdev_dbg(dev, "bit0 error\n");
512		cf->data[2] |= CAN_ERR_PROT_BIT0;
513		break;
514	case LEC_CRC_ERROR:
515		netdev_dbg(dev, "CRC error\n");
516		cf->data[3] |= (CAN_ERR_PROT_LOC_CRC_SEQ |
517				CAN_ERR_PROT_LOC_CRC_DEL);
518		break;
519	default:
520		break;
521	}
522
523	stats->rx_packets++;
524	stats->rx_bytes += cf->can_dlc;
525	netif_receive_skb(skb);
526
527	return 1;
528}
529
530static int __m_can_get_berr_counter(const struct net_device *dev,
531				    struct can_berr_counter *bec)
532{
533	struct m_can_priv *priv = netdev_priv(dev);
534	unsigned int ecr;
535
536	ecr = m_can_read(priv, M_CAN_ECR);
537	bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
538	bec->txerr = ecr & ECR_TEC_MASK;
539
540	return 0;
541}
542
543static int m_can_get_berr_counter(const struct net_device *dev,
544				  struct can_berr_counter *bec)
545{
546	struct m_can_priv *priv = netdev_priv(dev);
547	int err;
548
549	err = clk_prepare_enable(priv->hclk);
550	if (err)
551		return err;
552
553	err = clk_prepare_enable(priv->cclk);
554	if (err) {
555		clk_disable_unprepare(priv->hclk);
556		return err;
557	}
558
559	__m_can_get_berr_counter(dev, bec);
560
561	clk_disable_unprepare(priv->cclk);
562	clk_disable_unprepare(priv->hclk);
563
564	return 0;
565}
566
567static int m_can_handle_state_change(struct net_device *dev,
568				     enum can_state new_state)
569{
570	struct m_can_priv *priv = netdev_priv(dev);
571	struct net_device_stats *stats = &dev->stats;
572	struct can_frame *cf;
573	struct sk_buff *skb;
574	struct can_berr_counter bec;
575	unsigned int ecr;
576
577	switch (new_state) {
578	case CAN_STATE_ERROR_ACTIVE:
579		/* error warning state */
580		priv->can.can_stats.error_warning++;
581		priv->can.state = CAN_STATE_ERROR_WARNING;
582		break;
583	case CAN_STATE_ERROR_PASSIVE:
584		/* error passive state */
585		priv->can.can_stats.error_passive++;
586		priv->can.state = CAN_STATE_ERROR_PASSIVE;
587		break;
588	case CAN_STATE_BUS_OFF:
589		/* bus-off state */
590		priv->can.state = CAN_STATE_BUS_OFF;
591		m_can_disable_all_interrupts(priv);
592		can_bus_off(dev);
593		break;
594	default:
595		break;
596	}
597
598	/* propagate the error condition to the CAN stack */
599	skb = alloc_can_err_skb(dev, &cf);
600	if (unlikely(!skb))
601		return 0;
602
603	__m_can_get_berr_counter(dev, &bec);
604
605	switch (new_state) {
606	case CAN_STATE_ERROR_ACTIVE:
607		/* error warning state */
608		cf->can_id |= CAN_ERR_CRTL;
609		cf->data[1] = (bec.txerr > bec.rxerr) ?
610			CAN_ERR_CRTL_TX_WARNING :
611			CAN_ERR_CRTL_RX_WARNING;
612		cf->data[6] = bec.txerr;
613		cf->data[7] = bec.rxerr;
614		break;
615	case CAN_STATE_ERROR_PASSIVE:
616		/* error passive state */
617		cf->can_id |= CAN_ERR_CRTL;
618		ecr = m_can_read(priv, M_CAN_ECR);
619		if (ecr & ECR_RP)
620			cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
621		if (bec.txerr > 127)
622			cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
623		cf->data[6] = bec.txerr;
624		cf->data[7] = bec.rxerr;
625		break;
626	case CAN_STATE_BUS_OFF:
627		/* bus-off state */
628		cf->can_id |= CAN_ERR_BUSOFF;
629		break;
630	default:
631		break;
632	}
633
634	stats->rx_packets++;
635	stats->rx_bytes += cf->can_dlc;
636	netif_receive_skb(skb);
637
638	return 1;
639}
640
641static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
642{
643	struct m_can_priv *priv = netdev_priv(dev);
644	int work_done = 0;
645
646	if ((psr & PSR_EW) &&
647	    (priv->can.state != CAN_STATE_ERROR_WARNING)) {
648		netdev_dbg(dev, "entered error warning state\n");
649		work_done += m_can_handle_state_change(dev,
650						       CAN_STATE_ERROR_WARNING);
651	}
652
653	if ((psr & PSR_EP) &&
654	    (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
655		netdev_dbg(dev, "entered error passive state\n");
656		work_done += m_can_handle_state_change(dev,
657						       CAN_STATE_ERROR_PASSIVE);
658	}
659
660	if ((psr & PSR_BO) &&
661	    (priv->can.state != CAN_STATE_BUS_OFF)) {
662		netdev_dbg(dev, "entered error bus off state\n");
663		work_done += m_can_handle_state_change(dev,
664						       CAN_STATE_BUS_OFF);
665	}
666
667	return work_done;
668}
669
670static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
671{
672	if (irqstatus & IR_WDI)
673		netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
674	if (irqstatus & IR_ELO)
675		netdev_err(dev, "Error Logging Overflow\n");
676	if (irqstatus & IR_BEU)
677		netdev_err(dev, "Bit Error Uncorrected\n");
678	if (irqstatus & IR_BEC)
679		netdev_err(dev, "Bit Error Corrected\n");
680	if (irqstatus & IR_TOO)
681		netdev_err(dev, "Timeout reached\n");
682	if (irqstatus & IR_MRAF)
683		netdev_err(dev, "Message RAM access failure occurred\n");
684}
685
686static inline bool is_lec_err(u32 psr)
687{
688	psr &= LEC_UNUSED;
689
690	return psr && (psr != LEC_UNUSED);
691}
692
693static int m_can_handle_bus_errors(struct net_device *dev, u32 irqstatus,
694				   u32 psr)
695{
696	struct m_can_priv *priv = netdev_priv(dev);
697	int work_done = 0;
698
699	if (irqstatus & IR_RF0L)
700		work_done += m_can_handle_lost_msg(dev);
701
702	/* handle lec errors on the bus */
703	if ((priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) &&
704	    is_lec_err(psr))
705		work_done += m_can_handle_lec_err(dev, psr & LEC_UNUSED);
706
707	/* other unproccessed error interrupts */
708	m_can_handle_other_err(dev, irqstatus);
709
710	return work_done;
711}
712
713static int m_can_poll(struct napi_struct *napi, int quota)
714{
715	struct net_device *dev = napi->dev;
716	struct m_can_priv *priv = netdev_priv(dev);
717	int work_done = 0;
718	u32 irqstatus, psr;
719
720	irqstatus = priv->irqstatus | m_can_read(priv, M_CAN_IR);
721	if (!irqstatus)
722		goto end;
723
724	psr = m_can_read(priv, M_CAN_PSR);
725	if (irqstatus & IR_ERR_STATE)
726		work_done += m_can_handle_state_errors(dev, psr);
727
728	if (irqstatus & IR_ERR_BUS)
729		work_done += m_can_handle_bus_errors(dev, irqstatus, psr);
730
731	if (irqstatus & IR_RF0N)
732		work_done += m_can_do_rx_poll(dev, (quota - work_done));
733
734	if (work_done < quota) {
735		napi_complete(napi);
736		m_can_enable_all_interrupts(priv);
737	}
738
739end:
740	return work_done;
741}
742
743static irqreturn_t m_can_isr(int irq, void *dev_id)
744{
745	struct net_device *dev = (struct net_device *)dev_id;
746	struct m_can_priv *priv = netdev_priv(dev);
747	struct net_device_stats *stats = &dev->stats;
748	u32 ir;
749
750	ir = m_can_read(priv, M_CAN_IR);
751	if (!ir)
752		return IRQ_NONE;
753
754	/* ACK all irqs */
755	if (ir & IR_ALL_INT)
756		m_can_write(priv, M_CAN_IR, ir);
757
758	/* schedule NAPI in case of
759	 * - rx IRQ
760	 * - state change IRQ
761	 * - bus error IRQ and bus error reporting
762	 */
763	if ((ir & IR_RF0N) || (ir & IR_ERR_ALL)) {
764		priv->irqstatus = ir;
765		m_can_disable_all_interrupts(priv);
766		napi_schedule(&priv->napi);
767	}
768
769	/* transmission complete interrupt */
770	if (ir & IR_TC) {
771		stats->tx_bytes += can_get_echo_skb(dev, 0);
772		stats->tx_packets++;
773		can_led_event(dev, CAN_LED_EVENT_TX);
774		netif_wake_queue(dev);
775	}
776
777	return IRQ_HANDLED;
778}
779
780static const struct can_bittiming_const m_can_bittiming_const = {
781	.name = KBUILD_MODNAME,
782	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
783	.tseg1_max = 64,
784	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
785	.tseg2_max = 16,
786	.sjw_max = 16,
787	.brp_min = 1,
788	.brp_max = 1024,
789	.brp_inc = 1,
790};
791
792static const struct can_bittiming_const m_can_data_bittiming_const = {
793	.name = KBUILD_MODNAME,
794	.tseg1_min = 2,		/* Time segment 1 = prop_seg + phase_seg1 */
795	.tseg1_max = 16,
796	.tseg2_min = 1,		/* Time segment 2 = phase_seg2 */
797	.tseg2_max = 8,
798	.sjw_max = 4,
799	.brp_min = 1,
800	.brp_max = 32,
801	.brp_inc = 1,
802};
803
804static int m_can_set_bittiming(struct net_device *dev)
805{
806	struct m_can_priv *priv = netdev_priv(dev);
807	const struct can_bittiming *bt = &priv->can.bittiming;
808	const struct can_bittiming *dbt = &priv->can.data_bittiming;
809	u16 brp, sjw, tseg1, tseg2;
810	u32 reg_btp;
811
812	brp = bt->brp - 1;
813	sjw = bt->sjw - 1;
814	tseg1 = bt->prop_seg + bt->phase_seg1 - 1;
815	tseg2 = bt->phase_seg2 - 1;
816	reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
817			(tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
818	m_can_write(priv, M_CAN_BTP, reg_btp);
819
820	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
821		brp = dbt->brp - 1;
822		sjw = dbt->sjw - 1;
823		tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
824		tseg2 = dbt->phase_seg2 - 1;
825		reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) |
826				(tseg1 << FBTR_FTSEG1_SHIFT) |
827				(tseg2 << FBTR_FTSEG2_SHIFT);
828		m_can_write(priv, M_CAN_FBTP, reg_btp);
829	}
830
831	return 0;
832}
833
834/* Configure M_CAN chip:
835 * - set rx buffer/fifo element size
836 * - configure rx fifo
837 * - accept non-matching frame into fifo 0
838 * - configure tx buffer
839 * - configure mode
840 * - setup bittiming
841 */
842static void m_can_chip_config(struct net_device *dev)
843{
844	struct m_can_priv *priv = netdev_priv(dev);
845	u32 cccr, test;
846
847	m_can_config_endisable(priv, true);
848
849	/* RX Buffer/FIFO Element Size 64 bytes data field */
850	m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
851
852	/* Accept Non-matching Frames Into FIFO 0 */
853	m_can_write(priv, M_CAN_GFC, 0x0);
854
855	/* only support one Tx Buffer currently */
856	m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
857		    priv->mcfg[MRAM_TXB].off);
858
859	/* support 64 bytes payload */
860	m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
861
862	m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
863		    priv->mcfg[MRAM_TXE].off);
864
865	/* rx fifo configuration, blocking mode, fifo size 1 */
866	m_can_write(priv, M_CAN_RXF0C,
867		    (priv->mcfg[MRAM_RXF0].num << RXFC_FS_OFF) |
868		    RXFC_FWM_1 | priv->mcfg[MRAM_RXF0].off);
869
870	m_can_write(priv, M_CAN_RXF1C,
871		    (priv->mcfg[MRAM_RXF1].num << RXFC_FS_OFF) |
872		    RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
873
874	cccr = m_can_read(priv, M_CAN_CCCR);
875	cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
876		(CCCR_CME_MASK << CCCR_CME_SHIFT));
877	test = m_can_read(priv, M_CAN_TEST);
878	test &= ~TEST_LBCK;
879
880	if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
881		cccr |= CCCR_MON;
882
883	if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
884		cccr |= CCCR_TEST;
885		test |= TEST_LBCK;
886	}
887
888	if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
889		cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
890
891	m_can_write(priv, M_CAN_CCCR, cccr);
892	m_can_write(priv, M_CAN_TEST, test);
893
894	/* enable interrupts */
895	m_can_write(priv, M_CAN_IR, IR_ALL_INT);
896	if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING))
897		m_can_write(priv, M_CAN_IE, IR_ALL_INT & ~IR_ERR_LEC);
898	else
899		m_can_write(priv, M_CAN_IE, IR_ALL_INT);
900
901	/* route all interrupts to INT0 */
902	m_can_write(priv, M_CAN_ILS, ILS_ALL_INT0);
903
904	/* set bittiming params */
905	m_can_set_bittiming(dev);
906
907	m_can_config_endisable(priv, false);
908}
909
910static void m_can_start(struct net_device *dev)
911{
912	struct m_can_priv *priv = netdev_priv(dev);
913
914	/* basic m_can configuration */
915	m_can_chip_config(dev);
916
917	priv->can.state = CAN_STATE_ERROR_ACTIVE;
918
919	m_can_enable_all_interrupts(priv);
920}
921
922static int m_can_set_mode(struct net_device *dev, enum can_mode mode)
923{
924	switch (mode) {
925	case CAN_MODE_START:
926		m_can_start(dev);
927		netif_wake_queue(dev);
928		break;
929	default:
930		return -EOPNOTSUPP;
931	}
932
933	return 0;
934}
935
936static void free_m_can_dev(struct net_device *dev)
937{
938	free_candev(dev);
939}
940
941static struct net_device *alloc_m_can_dev(void)
942{
943	struct net_device *dev;
944	struct m_can_priv *priv;
945
946	dev = alloc_candev(sizeof(*priv), 1);
947	if (!dev)
948		return NULL;
949
950	priv = netdev_priv(dev);
951	netif_napi_add(dev, &priv->napi, m_can_poll, M_CAN_NAPI_WEIGHT);
952
953	priv->dev = dev;
954	priv->can.bittiming_const = &m_can_bittiming_const;
955	priv->can.data_bittiming_const = &m_can_data_bittiming_const;
956	priv->can.do_set_mode = m_can_set_mode;
957	priv->can.do_get_berr_counter = m_can_get_berr_counter;
958	priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
959					CAN_CTRLMODE_LISTENONLY |
960					CAN_CTRLMODE_BERR_REPORTING |
961					CAN_CTRLMODE_FD;
962
963	return dev;
964}
965
966static int m_can_open(struct net_device *dev)
967{
968	struct m_can_priv *priv = netdev_priv(dev);
969	int err;
970
971	err = clk_prepare_enable(priv->hclk);
972	if (err)
973		return err;
974
975	err = clk_prepare_enable(priv->cclk);
976	if (err)
977		goto exit_disable_hclk;
978
979	/* open the can device */
980	err = open_candev(dev);
981	if (err) {
982		netdev_err(dev, "failed to open can device\n");
983		goto exit_disable_cclk;
984	}
985
986	/* register interrupt handler */
987	err = request_irq(dev->irq, m_can_isr, IRQF_SHARED, dev->name,
988			  dev);
989	if (err < 0) {
990		netdev_err(dev, "failed to request interrupt\n");
991		goto exit_irq_fail;
992	}
993
994	/* start the m_can controller */
995	m_can_start(dev);
996
997	can_led_event(dev, CAN_LED_EVENT_OPEN);
998	napi_enable(&priv->napi);
999	netif_start_queue(dev);
1000
1001	return 0;
1002
1003exit_irq_fail:
1004	close_candev(dev);
1005exit_disable_cclk:
1006	clk_disable_unprepare(priv->cclk);
1007exit_disable_hclk:
1008	clk_disable_unprepare(priv->hclk);
1009	return err;
1010}
1011
1012static void m_can_stop(struct net_device *dev)
1013{
1014	struct m_can_priv *priv = netdev_priv(dev);
1015
1016	/* disable all interrupts */
1017	m_can_disable_all_interrupts(priv);
1018
1019	clk_disable_unprepare(priv->hclk);
1020	clk_disable_unprepare(priv->cclk);
1021
1022	/* set the state as STOPPED */
1023	priv->can.state = CAN_STATE_STOPPED;
1024}
1025
1026static int m_can_close(struct net_device *dev)
1027{
1028	struct m_can_priv *priv = netdev_priv(dev);
1029
1030	netif_stop_queue(dev);
1031	napi_disable(&priv->napi);
1032	m_can_stop(dev);
1033	free_irq(dev->irq, dev);
1034	close_candev(dev);
1035	can_led_event(dev, CAN_LED_EVENT_STOP);
1036
1037	return 0;
1038}
1039
1040static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
1041				    struct net_device *dev)
1042{
1043	struct m_can_priv *priv = netdev_priv(dev);
1044	struct canfd_frame *cf = (struct canfd_frame *)skb->data;
1045	u32 id, cccr;
1046	int i;
1047
1048	if (can_dropped_invalid_skb(dev, skb))
1049		return NETDEV_TX_OK;
1050
1051	netif_stop_queue(dev);
1052
1053	if (cf->can_id & CAN_EFF_FLAG) {
1054		id = cf->can_id & CAN_EFF_MASK;
1055		id |= TX_BUF_XTD;
1056	} else {
1057		id = ((cf->can_id & CAN_SFF_MASK) << 18);
1058	}
1059
1060	if (cf->can_id & CAN_RTR_FLAG)
1061		id |= TX_BUF_RTR;
1062
1063	/* message ram configuration */
1064	m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
1065	m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16);
1066
1067	for (i = 0; i < cf->len; i += 4)
1068		m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4),
1069				 *(u32 *)(cf->data + i));
1070
1071	can_put_echo_skb(skb, dev, 0);
1072
1073	if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1074		cccr = m_can_read(priv, M_CAN_CCCR);
1075		cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
1076		if (can_is_canfd_skb(skb)) {
1077			if (cf->flags & CANFD_BRS)
1078				cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT;
1079			else
1080				cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT;
1081		} else {
1082			cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
1083		}
1084		m_can_write(priv, M_CAN_CCCR, cccr);
1085	}
1086
1087	/* enable first TX buffer to start transfer  */
1088	m_can_write(priv, M_CAN_TXBTIE, 0x1);
1089	m_can_write(priv, M_CAN_TXBAR, 0x1);
1090
1091	return NETDEV_TX_OK;
1092}
1093
1094static const struct net_device_ops m_can_netdev_ops = {
1095	.ndo_open = m_can_open,
1096	.ndo_stop = m_can_close,
1097	.ndo_start_xmit = m_can_start_xmit,
1098	.ndo_change_mtu = can_change_mtu,
1099};
1100
1101static int register_m_can_dev(struct net_device *dev)
1102{
1103	dev->flags |= IFF_ECHO;	/* we support local echo */
1104	dev->netdev_ops = &m_can_netdev_ops;
1105
1106	return register_candev(dev);
1107}
1108
1109static int m_can_of_parse_mram(struct platform_device *pdev,
1110			       struct m_can_priv *priv)
1111{
1112	struct device_node *np = pdev->dev.of_node;
1113	struct resource *res;
1114	void __iomem *addr;
1115	u32 out_val[MRAM_CFG_LEN];
1116	int i, start, end, ret;
1117
1118	/* message ram could be shared */
1119	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
1120	if (!res)
1121		return -ENODEV;
1122
1123	addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1124	if (!addr)
1125		return -ENOMEM;
1126
1127	/* get message ram configuration */
1128	ret = of_property_read_u32_array(np, "bosch,mram-cfg",
1129					 out_val, sizeof(out_val) / 4);
1130	if (ret) {
1131		dev_err(&pdev->dev, "can not get message ram configuration\n");
1132		return -ENODEV;
1133	}
1134
1135	priv->mram_base = addr;
1136	priv->mcfg[MRAM_SIDF].off = out_val[0];
1137	priv->mcfg[MRAM_SIDF].num = out_val[1];
1138	priv->mcfg[MRAM_XIDF].off = priv->mcfg[MRAM_SIDF].off +
1139			priv->mcfg[MRAM_SIDF].num * SIDF_ELEMENT_SIZE;
1140	priv->mcfg[MRAM_XIDF].num = out_val[2];
1141	priv->mcfg[MRAM_RXF0].off = priv->mcfg[MRAM_XIDF].off +
1142			priv->mcfg[MRAM_XIDF].num * XIDF_ELEMENT_SIZE;
1143	priv->mcfg[MRAM_RXF0].num = out_val[3] & RXFC_FS_MASK;
1144	priv->mcfg[MRAM_RXF1].off = priv->mcfg[MRAM_RXF0].off +
1145			priv->mcfg[MRAM_RXF0].num * RXF0_ELEMENT_SIZE;
1146	priv->mcfg[MRAM_RXF1].num = out_val[4] & RXFC_FS_MASK;
1147	priv->mcfg[MRAM_RXB].off = priv->mcfg[MRAM_RXF1].off +
1148			priv->mcfg[MRAM_RXF1].num * RXF1_ELEMENT_SIZE;
1149	priv->mcfg[MRAM_RXB].num = out_val[5];
1150	priv->mcfg[MRAM_TXE].off = priv->mcfg[MRAM_RXB].off +
1151			priv->mcfg[MRAM_RXB].num * RXB_ELEMENT_SIZE;
1152	priv->mcfg[MRAM_TXE].num = out_val[6];
1153	priv->mcfg[MRAM_TXB].off = priv->mcfg[MRAM_TXE].off +
1154			priv->mcfg[MRAM_TXE].num * TXE_ELEMENT_SIZE;
1155	priv->mcfg[MRAM_TXB].num = out_val[7] & TXBC_NDTB_MASK;
1156
1157	dev_dbg(&pdev->dev, "mram_base %p sidf 0x%x %d xidf 0x%x %d rxf0 0x%x %d rxf1 0x%x %d rxb 0x%x %d txe 0x%x %d txb 0x%x %d\n",
1158		priv->mram_base,
1159		priv->mcfg[MRAM_SIDF].off, priv->mcfg[MRAM_SIDF].num,
1160		priv->mcfg[MRAM_XIDF].off, priv->mcfg[MRAM_XIDF].num,
1161		priv->mcfg[MRAM_RXF0].off, priv->mcfg[MRAM_RXF0].num,
1162		priv->mcfg[MRAM_RXF1].off, priv->mcfg[MRAM_RXF1].num,
1163		priv->mcfg[MRAM_RXB].off, priv->mcfg[MRAM_RXB].num,
1164		priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
1165		priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
1166
1167	/* initialize the entire Message RAM in use to avoid possible
1168	 * ECC/parity checksum errors when reading an uninitialized buffer
1169	 */
1170	start = priv->mcfg[MRAM_SIDF].off;
1171	end = priv->mcfg[MRAM_TXB].off +
1172		priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
1173	for (i = start; i < end; i += 4)
1174		writel(0x0, priv->mram_base + i);
1175
1176	return 0;
1177}
1178
1179static int m_can_plat_probe(struct platform_device *pdev)
1180{
1181	struct net_device *dev;
1182	struct m_can_priv *priv;
1183	struct resource *res;
1184	void __iomem *addr;
1185	struct clk *hclk, *cclk;
1186	int irq, ret;
1187
1188	hclk = devm_clk_get(&pdev->dev, "hclk");
1189	cclk = devm_clk_get(&pdev->dev, "cclk");
1190	if (IS_ERR(hclk) || IS_ERR(cclk)) {
1191		dev_err(&pdev->dev, "no clock find\n");
1192		return -ENODEV;
1193	}
1194
1195	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "m_can");
1196	addr = devm_ioremap_resource(&pdev->dev, res);
1197	irq = platform_get_irq_byname(pdev, "int0");
1198	if (IS_ERR(addr) || irq < 0)
1199		return -EINVAL;
1200
1201	/* allocate the m_can device */
1202	dev = alloc_m_can_dev();
1203	if (!dev)
1204		return -ENOMEM;
1205
1206	priv = netdev_priv(dev);
1207	dev->irq = irq;
1208	priv->base = addr;
1209	priv->device = &pdev->dev;
1210	priv->hclk = hclk;
1211	priv->cclk = cclk;
1212	priv->can.clock.freq = clk_get_rate(cclk);
1213
1214	ret = m_can_of_parse_mram(pdev, priv);
1215	if (ret)
1216		goto failed_free_dev;
1217
1218	platform_set_drvdata(pdev, dev);
1219	SET_NETDEV_DEV(dev, &pdev->dev);
1220
1221	ret = register_m_can_dev(dev);
1222	if (ret) {
1223		dev_err(&pdev->dev, "registering %s failed (err=%d)\n",
1224			KBUILD_MODNAME, ret);
1225		goto failed_free_dev;
1226	}
1227
1228	devm_can_led_init(dev);
1229
1230	dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n",
1231		 KBUILD_MODNAME, priv->base, dev->irq);
1232
1233	return 0;
1234
1235failed_free_dev:
1236	free_m_can_dev(dev);
1237	return ret;
1238}
1239
1240static __maybe_unused int m_can_suspend(struct device *dev)
1241{
1242	struct net_device *ndev = dev_get_drvdata(dev);
1243	struct m_can_priv *priv = netdev_priv(ndev);
1244
1245	if (netif_running(ndev)) {
1246		netif_stop_queue(ndev);
1247		netif_device_detach(ndev);
1248	}
1249
1250	/* TODO: enter low power */
1251
1252	priv->can.state = CAN_STATE_SLEEPING;
1253
1254	return 0;
1255}
1256
1257static __maybe_unused int m_can_resume(struct device *dev)
1258{
1259	struct net_device *ndev = dev_get_drvdata(dev);
1260	struct m_can_priv *priv = netdev_priv(ndev);
1261
1262	/* TODO: exit low power */
1263
1264	priv->can.state = CAN_STATE_ERROR_ACTIVE;
1265
1266	if (netif_running(ndev)) {
1267		netif_device_attach(ndev);
1268		netif_start_queue(ndev);
1269	}
1270
1271	return 0;
1272}
1273
1274static void unregister_m_can_dev(struct net_device *dev)
1275{
1276	unregister_candev(dev);
1277}
1278
1279static int m_can_plat_remove(struct platform_device *pdev)
1280{
1281	struct net_device *dev = platform_get_drvdata(pdev);
1282
1283	unregister_m_can_dev(dev);
1284	platform_set_drvdata(pdev, NULL);
1285
1286	free_m_can_dev(dev);
1287
1288	return 0;
1289}
1290
1291static const struct dev_pm_ops m_can_pmops = {
1292	SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume)
1293};
1294
1295static const struct of_device_id m_can_of_table[] = {
1296	{ .compatible = "bosch,m_can", .data = NULL },
1297	{ /* sentinel */ },
1298};
1299MODULE_DEVICE_TABLE(of, m_can_of_table);
1300
1301static struct platform_driver m_can_plat_driver = {
1302	.driver = {
1303		.name = KBUILD_MODNAME,
1304		.of_match_table = m_can_of_table,
1305		.pm     = &m_can_pmops,
1306	},
1307	.probe = m_can_plat_probe,
1308	.remove = m_can_plat_remove,
1309};
1310
1311module_platform_driver(m_can_plat_driver);
1312
1313MODULE_AUTHOR("Dong Aisheng <b29396@freescale.com>");
1314MODULE_LICENSE("GPL v2");
1315MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller");
1316