tsi721.c revision 9a9a9a7adafe62a34de8b4fb48936c1c5f9bafa5
1/*
2 * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge
3 *
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 * Chul Kim <chul.kim@idt.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16 * more details.
17 *
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
21 */
22
23#include <linux/io.h>
24#include <linux/errno.h>
25#include <linux/init.h>
26#include <linux/ioport.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/rio.h>
31#include <linux/rio_drv.h>
32#include <linux/dma-mapping.h>
33#include <linux/interrupt.h>
34#include <linux/kfifo.h>
35#include <linux/delay.h>
36
37#include "tsi721.h"
38
39#define DEBUG_PW	/* Inbound Port-Write debugging */
40
41static void tsi721_omsg_handler(struct tsi721_device *priv, int ch);
42static void tsi721_imsg_handler(struct tsi721_device *priv, int ch);
43
44/**
45 * tsi721_lcread - read from local SREP config space
46 * @mport: RapidIO master port info
47 * @index: ID of RapdiIO interface
48 * @offset: Offset into configuration space
49 * @len: Length (in bytes) of the maintenance transaction
50 * @data: Value to be read into
51 *
52 * Generates a local SREP space read. Returns %0 on
53 * success or %-EINVAL on failure.
54 */
55static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset,
56			 int len, u32 *data)
57{
58	struct tsi721_device *priv = mport->priv;
59
60	if (len != sizeof(u32))
61		return -EINVAL; /* only 32-bit access is supported */
62
63	*data = ioread32(priv->regs + offset);
64
65	return 0;
66}
67
68/**
69 * tsi721_lcwrite - write into local SREP config space
70 * @mport: RapidIO master port info
71 * @index: ID of RapdiIO interface
72 * @offset: Offset into configuration space
73 * @len: Length (in bytes) of the maintenance transaction
74 * @data: Value to be written
75 *
76 * Generates a local write into SREP configuration space. Returns %0 on
77 * success or %-EINVAL on failure.
78 */
79static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset,
80			  int len, u32 data)
81{
82	struct tsi721_device *priv = mport->priv;
83
84	if (len != sizeof(u32))
85		return -EINVAL; /* only 32-bit access is supported */
86
87	iowrite32(data, priv->regs + offset);
88
89	return 0;
90}
91
92/**
93 * tsi721_maint_dma - Helper function to generate RapidIO maintenance
94 *                    transactions using designated Tsi721 DMA channel.
95 * @priv: pointer to tsi721 private data
96 * @sys_size: RapdiIO transport system size
97 * @destid: Destination ID of transaction
98 * @hopcount: Number of hops to target device
99 * @offset: Offset into configuration space
100 * @len: Length (in bytes) of the maintenance transaction
101 * @data: Location to be read from or write into
102 * @do_wr: Operation flag (1 == MAINT_WR)
103 *
104 * Generates a RapidIO maintenance transaction (Read or Write).
105 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
106 */
107static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
108			u16 destid, u8 hopcount, u32 offset, int len,
109			u32 *data, int do_wr)
110{
111	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
112	struct tsi721_dma_desc *bd_ptr;
113	u32 rd_count, swr_ptr, ch_stat;
114	int i, err = 0;
115	u32 op = do_wr ? MAINT_WR : MAINT_RD;
116
117	if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
118		return -EINVAL;
119
120	bd_ptr = priv->mdma.bd_base;
121
122	rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
123
124	/* Initialize DMA descriptor */
125	bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
126	bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04);
127	bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset);
128	bd_ptr[0].raddr_hi = 0;
129	if (do_wr)
130		bd_ptr[0].data[0] = cpu_to_be32p(data);
131	else
132		bd_ptr[0].data[0] = 0xffffffff;
133
134	mb();
135
136	/* Start DMA operation */
137	iowrite32(rd_count + 2,	regs + TSI721_DMAC_DWRCNT);
138	ioread32(regs + TSI721_DMAC_DWRCNT);
139	i = 0;
140
141	/* Wait until DMA transfer is finished */
142	while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
143							& TSI721_DMAC_STS_RUN) {
144		udelay(1);
145		if (++i >= 5000000) {
146			dev_dbg(&priv->pdev->dev,
147				"%s : DMA[%d] read timeout ch_status=%x\n",
148				__func__, priv->mdma.ch_id, ch_stat);
149			if (!do_wr)
150				*data = 0xffffffff;
151			err = -EIO;
152			goto err_out;
153		}
154	}
155
156	if (ch_stat & TSI721_DMAC_STS_ABORT) {
157		/* If DMA operation aborted due to error,
158		 * reinitialize DMA channel
159		 */
160		dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n",
161			__func__, ch_stat);
162		dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
163			do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
164		iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
165		iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
166		udelay(10);
167		iowrite32(0, regs + TSI721_DMAC_DWRCNT);
168		udelay(1);
169		if (!do_wr)
170			*data = 0xffffffff;
171		err = -EIO;
172		goto err_out;
173	}
174
175	if (!do_wr)
176		*data = be32_to_cpu(bd_ptr[0].data[0]);
177
178	/*
179	 * Update descriptor status FIFO RD pointer.
180	 * NOTE: Skipping check and clear FIFO entries because we are waiting
181	 * for transfer to be completed.
182	 */
183	swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
184	iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
185err_out:
186
187	return err;
188}
189
190/**
191 * tsi721_cread_dma - Generate a RapidIO maintenance read transaction
192 *                    using Tsi721 BDMA engine.
193 * @mport: RapidIO master port control structure
194 * @index: ID of RapdiIO interface
195 * @destid: Destination ID of transaction
196 * @hopcount: Number of hops to target device
197 * @offset: Offset into configuration space
198 * @len: Length (in bytes) of the maintenance transaction
199 * @val: Location to be read into
200 *
201 * Generates a RapidIO maintenance read transaction.
202 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
203 */
204static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid,
205			u8 hopcount, u32 offset, int len, u32 *data)
206{
207	struct tsi721_device *priv = mport->priv;
208
209	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
210				offset, len, data, 0);
211}
212
213/**
214 * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction
215 *                     using Tsi721 BDMA engine
216 * @mport: RapidIO master port control structure
217 * @index: ID of RapdiIO interface
218 * @destid: Destination ID of transaction
219 * @hopcount: Number of hops to target device
220 * @offset: Offset into configuration space
221 * @len: Length (in bytes) of the maintenance transaction
222 * @val: Value to be written
223 *
224 * Generates a RapidIO maintenance write transaction.
225 * Returns %0 on success and %-EINVAL or %-EFAULT on failure.
226 */
227static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid,
228			 u8 hopcount, u32 offset, int len, u32 data)
229{
230	struct tsi721_device *priv = mport->priv;
231	u32 temp = data;
232
233	return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount,
234				offset, len, &temp, 1);
235}
236
237/**
238 * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler
239 * @mport: RapidIO master port structure
240 *
241 * Handles inbound port-write interrupts. Copies PW message from an internal
242 * buffer into PW message FIFO and schedules deferred routine to process
243 * queued messages.
244 */
245static int
246tsi721_pw_handler(struct rio_mport *mport)
247{
248	struct tsi721_device *priv = mport->priv;
249	u32 pw_stat;
250	u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)];
251
252
253	pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT);
254
255	if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) {
256		pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0));
257		pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1));
258		pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2));
259		pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3));
260
261		/* Queue PW message (if there is room in FIFO),
262		 * otherwise discard it.
263		 */
264		spin_lock(&priv->pw_fifo_lock);
265		if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE)
266			kfifo_in(&priv->pw_fifo, pw_buf,
267						TSI721_RIO_PW_MSG_SIZE);
268		else
269			priv->pw_discard_count++;
270		spin_unlock(&priv->pw_fifo_lock);
271	}
272
273	/* Clear pending PW interrupts */
274	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
275		  priv->regs + TSI721_RIO_PW_RX_STAT);
276
277	schedule_work(&priv->pw_work);
278
279	return 0;
280}
281
282static void tsi721_pw_dpc(struct work_struct *work)
283{
284	struct tsi721_device *priv = container_of(work, struct tsi721_device,
285						    pw_work);
286	u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message
287							buffer for RIO layer */
288
289	/*
290	 * Process port-write messages
291	 */
292	while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer,
293			 TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) {
294		/* Process one message */
295#ifdef DEBUG_PW
296		{
297		u32 i;
298		pr_debug("%s : Port-Write Message:", __func__);
299		for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) {
300			pr_debug("0x%02x: %08x %08x %08x %08x", i*4,
301				msg_buffer[i], msg_buffer[i + 1],
302				msg_buffer[i + 2], msg_buffer[i + 3]);
303			i += 4;
304		}
305		pr_debug("\n");
306		}
307#endif
308		/* Pass the port-write message to RIO core for processing */
309		rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer);
310	}
311}
312
313/**
314 * tsi721_pw_enable - enable/disable port-write interface init
315 * @mport: Master port implementing the port write unit
316 * @enable:    1=enable; 0=disable port-write message handling
317 */
318static int tsi721_pw_enable(struct rio_mport *mport, int enable)
319{
320	struct tsi721_device *priv = mport->priv;
321	u32 rval;
322
323	rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE);
324
325	if (enable)
326		rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX;
327	else
328		rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX;
329
330	/* Clear pending PW interrupts */
331	iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL,
332		  priv->regs + TSI721_RIO_PW_RX_STAT);
333	/* Update enable bits */
334	iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE);
335
336	return 0;
337}
338
339/**
340 * tsi721_dsend - Send a RapidIO doorbell
341 * @mport: RapidIO master port info
342 * @index: ID of RapidIO interface
343 * @destid: Destination ID of target device
344 * @data: 16-bit info field of RapidIO doorbell
345 *
346 * Sends a RapidIO doorbell message. Always returns %0.
347 */
348static int tsi721_dsend(struct rio_mport *mport, int index,
349			u16 destid, u16 data)
350{
351	struct tsi721_device *priv = mport->priv;
352	u32 offset;
353
354	offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) |
355		 (destid << 2);
356
357	dev_dbg(&priv->pdev->dev,
358		"Send Doorbell 0x%04x to destID 0x%x\n", data, destid);
359	iowrite16be(data, priv->odb_base + offset);
360
361	return 0;
362}
363
364/**
365 * tsi721_dbell_handler - Tsi721 doorbell interrupt handler
366 * @mport: RapidIO master port structure
367 *
368 * Handles inbound doorbell interrupts. Copies doorbell entry from an internal
369 * buffer into DB message FIFO and schedules deferred  routine to process
370 * queued DBs.
371 */
372static int
373tsi721_dbell_handler(struct rio_mport *mport)
374{
375	struct tsi721_device *priv = mport->priv;
376	u32 regval;
377
378	/* Disable IDB interrupts */
379	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
380	regval &= ~TSI721_SR_CHINT_IDBQRCV;
381	iowrite32(regval,
382		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
383
384	schedule_work(&priv->idb_work);
385
386	return 0;
387}
388
389static void tsi721_db_dpc(struct work_struct *work)
390{
391	struct tsi721_device *priv = container_of(work, struct tsi721_device,
392						    idb_work);
393	struct rio_mport *mport;
394	struct rio_dbell *dbell;
395	int found = 0;
396	u32 wr_ptr, rd_ptr;
397	u64 *idb_entry;
398	u32 regval;
399	union {
400		u64 msg;
401		u8  bytes[8];
402	} idb;
403
404	/*
405	 * Process queued inbound doorbells
406	 */
407	mport = priv->mport;
408
409	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
410	rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
411
412	while (wr_ptr != rd_ptr) {
413		idb_entry = (u64 *)(priv->idb_base +
414					(TSI721_IDB_ENTRY_SIZE * rd_ptr));
415		rd_ptr++;
416		rd_ptr %= IDB_QSIZE;
417		idb.msg = *idb_entry;
418		*idb_entry = 0;
419
420		/* Process one doorbell */
421		list_for_each_entry(dbell, &mport->dbells, node) {
422			if ((dbell->res->start <= DBELL_INF(idb.bytes)) &&
423			    (dbell->res->end >= DBELL_INF(idb.bytes))) {
424				found = 1;
425				break;
426			}
427		}
428
429		if (found) {
430			dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes),
431				    DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
432		} else {
433			dev_dbg(&priv->pdev->dev,
434				"spurious inb doorbell, sid %2.2x tid %2.2x"
435				" info %4.4x\n", DBELL_SID(idb.bytes),
436				DBELL_TID(idb.bytes), DBELL_INF(idb.bytes));
437		}
438
439		wr_ptr = ioread32(priv->regs +
440				  TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
441	}
442
443	iowrite32(rd_ptr & (IDB_QSIZE - 1),
444		priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
445
446	/* Re-enable IDB interrupts */
447	regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
448	regval |= TSI721_SR_CHINT_IDBQRCV;
449	iowrite32(regval,
450		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
451
452	wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
453	if (wr_ptr != rd_ptr)
454		schedule_work(&priv->idb_work);
455}
456
457/**
458 * tsi721_irqhandler - Tsi721 interrupt handler
459 * @irq: Linux interrupt number
460 * @ptr: Pointer to interrupt-specific data (mport structure)
461 *
462 * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported
463 * interrupt events and calls an event-specific handler(s).
464 */
465static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
466{
467	struct rio_mport *mport = (struct rio_mport *)ptr;
468	struct tsi721_device *priv = mport->priv;
469	u32 dev_int;
470	u32 dev_ch_int;
471	u32 intval;
472	u32 ch_inte;
473
474	dev_int = ioread32(priv->regs + TSI721_DEV_INT);
475	if (!dev_int)
476		return IRQ_NONE;
477
478	dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT);
479
480	if (dev_int & TSI721_DEV_INT_SR2PC_CH) {
481		/* Service SR2PC Channel interrupts */
482		if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) {
483			/* Service Inbound Doorbell interrupt */
484			intval = ioread32(priv->regs +
485						TSI721_SR_CHINT(IDB_QUEUE));
486			if (intval & TSI721_SR_CHINT_IDBQRCV)
487				tsi721_dbell_handler(mport);
488			else
489				dev_info(&priv->pdev->dev,
490					"Unsupported SR_CH_INT %x\n", intval);
491
492			/* Clear interrupts */
493			iowrite32(intval,
494				priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
495			ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
496		}
497	}
498
499	if (dev_int & TSI721_DEV_INT_SMSG_CH) {
500		int ch;
501
502		/*
503		 * Service channel interrupts from Messaging Engine
504		 */
505
506		if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */
507			/* Disable signaled OB MSG Channel interrupts */
508			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
509			ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M);
510			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
511
512			/*
513			 * Process Inbound Message interrupt for each MBOX
514			 */
515			for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) {
516				if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch)))
517					continue;
518				tsi721_imsg_handler(priv, ch);
519			}
520		}
521
522		if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */
523			/* Disable signaled OB MSG Channel interrupts */
524			ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
525			ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M);
526			iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
527
528			/*
529			 * Process Outbound Message interrupts for each MBOX
530			 */
531
532			for (ch = 0; ch < RIO_MAX_MBOX; ch++) {
533				if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch)))
534					continue;
535				tsi721_omsg_handler(priv, ch);
536			}
537		}
538	}
539
540	if (dev_int & TSI721_DEV_INT_SRIO) {
541		/* Service SRIO MAC interrupts */
542		intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
543		if (intval & TSI721_RIO_EM_INT_STAT_PW_RX)
544			tsi721_pw_handler(mport);
545	}
546
547#ifdef CONFIG_RAPIDIO_DMA_ENGINE
548	if (dev_int & TSI721_DEV_INT_BDMA_CH) {
549		int ch;
550
551		if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
552			dev_dbg(&priv->pdev->dev,
553				"IRQ from DMA channel 0x%08x\n", dev_ch_int);
554
555			for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
556				if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
557					continue;
558				tsi721_bdma_handler(&priv->bdma[ch]);
559			}
560		}
561	}
562#endif
563	return IRQ_HANDLED;
564}
565
566static void tsi721_interrupts_init(struct tsi721_device *priv)
567{
568	u32 intr;
569
570	/* Enable IDB interrupts */
571	iowrite32(TSI721_SR_CHINT_ALL,
572		priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
573	iowrite32(TSI721_SR_CHINT_IDBQRCV,
574		priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
575
576	/* Enable SRIO MAC interrupts */
577	iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
578		priv->regs + TSI721_RIO_EM_DEV_INT_EN);
579
580	/* Enable interrupts from channels in use */
581#ifdef CONFIG_RAPIDIO_DMA_ENGINE
582	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
583		(TSI721_INT_BDMA_CHAN_M &
584		 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
585#else
586	intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
587#endif
588	iowrite32(intr,	priv->regs + TSI721_DEV_CHAN_INTE);
589
590	if (priv->flags & TSI721_USING_MSIX)
591		intr = TSI721_DEV_INT_SRIO;
592	else
593		intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
594			TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
595
596	iowrite32(intr, priv->regs + TSI721_DEV_INTE);
597	ioread32(priv->regs + TSI721_DEV_INTE);
598}
599
600#ifdef CONFIG_PCI_MSI
601/**
602 * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging
603 * @irq: Linux interrupt number
604 * @ptr: Pointer to interrupt-specific data (mport structure)
605 *
606 * Handles outbound messaging interrupts signaled using MSI-X.
607 */
608static irqreturn_t tsi721_omsg_msix(int irq, void *ptr)
609{
610	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
611	int mbox;
612
613	mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX;
614	tsi721_omsg_handler(priv, mbox);
615	return IRQ_HANDLED;
616}
617
618/**
619 * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging
620 * @irq: Linux interrupt number
621 * @ptr: Pointer to interrupt-specific data (mport structure)
622 *
623 * Handles inbound messaging interrupts signaled using MSI-X.
624 */
625static irqreturn_t tsi721_imsg_msix(int irq, void *ptr)
626{
627	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
628	int mbox;
629
630	mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX;
631	tsi721_imsg_handler(priv, mbox + 4);
632	return IRQ_HANDLED;
633}
634
635/**
636 * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler
637 * @irq: Linux interrupt number
638 * @ptr: Pointer to interrupt-specific data (mport structure)
639 *
640 * Handles Tsi721 interrupts from SRIO MAC.
641 */
642static irqreturn_t tsi721_srio_msix(int irq, void *ptr)
643{
644	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
645	u32 srio_int;
646
647	/* Service SRIO MAC interrupts */
648	srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT);
649	if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX)
650		tsi721_pw_handler((struct rio_mport *)ptr);
651
652	return IRQ_HANDLED;
653}
654
655/**
656 * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler
657 * @irq: Linux interrupt number
658 * @ptr: Pointer to interrupt-specific data (mport structure)
659 *
660 * Handles Tsi721 interrupts from SR2PC Channel.
661 * NOTE: At this moment services only one SR2PC channel associated with inbound
662 * doorbells.
663 */
664static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr)
665{
666	struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv;
667	u32 sr_ch_int;
668
669	/* Service Inbound DB interrupt from SR2PC channel */
670	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
671	if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV)
672		tsi721_dbell_handler((struct rio_mport *)ptr);
673
674	/* Clear interrupts */
675	iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
676	/* Read back to ensure that interrupt was cleared */
677	sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
678
679	return IRQ_HANDLED;
680}
681
682/**
683 * tsi721_request_msix - register interrupt service for MSI-X mode.
684 * @mport: RapidIO master port structure
685 *
686 * Registers MSI-X interrupt service routines for interrupts that are active
687 * immediately after mport initialization. Messaging interrupt service routines
688 * should be registered during corresponding open requests.
689 */
690static int tsi721_request_msix(struct rio_mport *mport)
691{
692	struct tsi721_device *priv = mport->priv;
693	int err = 0;
694
695	err = request_irq(priv->msix[TSI721_VECT_IDB].vector,
696			tsi721_sr2pc_ch_msix, 0,
697			priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport);
698	if (err)
699		goto out;
700
701	err = request_irq(priv->msix[TSI721_VECT_PWRX].vector,
702			tsi721_srio_msix, 0,
703			priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport);
704	if (err)
705		free_irq(
706			priv->msix[TSI721_VECT_IDB].vector,
707			(void *)mport);
708out:
709	return err;
710}
711
712/**
713 * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721.
714 * @priv: pointer to tsi721 private data
715 *
716 * Configures MSI-X support for Tsi721. Supports only an exact number
717 * of requested vectors.
718 */
719static int tsi721_enable_msix(struct tsi721_device *priv)
720{
721	struct msix_entry entries[TSI721_VECT_MAX];
722	int err;
723	int i;
724
725	entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE);
726	entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT;
727
728	/*
729	 * Initialize MSI-X entries for Messaging Engine:
730	 * this driver supports four RIO mailboxes (inbound and outbound)
731	 * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore
732	 * offset +4 is added to IB MBOX number.
733	 */
734	for (i = 0; i < RIO_MAX_MBOX; i++) {
735		entries[TSI721_VECT_IMB0_RCV + i].entry =
736					TSI721_MSIX_IMSG_DQ_RCV(i + 4);
737		entries[TSI721_VECT_IMB0_INT + i].entry =
738					TSI721_MSIX_IMSG_INT(i + 4);
739		entries[TSI721_VECT_OMB0_DONE + i].entry =
740					TSI721_MSIX_OMSG_DONE(i);
741		entries[TSI721_VECT_OMB0_INT + i].entry =
742					TSI721_MSIX_OMSG_INT(i);
743	}
744
745#ifdef CONFIG_RAPIDIO_DMA_ENGINE
746	/*
747	 * Initialize MSI-X entries for Block DMA Engine:
748	 * this driver supports XXX DMA channels
749	 * (one is reserved for SRIO maintenance transactions)
750	 */
751	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
752		entries[TSI721_VECT_DMA0_DONE + i].entry =
753					TSI721_MSIX_DMACH_DONE(i);
754		entries[TSI721_VECT_DMA0_INT + i].entry =
755					TSI721_MSIX_DMACH_INT(i);
756	}
757#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
758
759	err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
760	if (err) {
761		if (err > 0)
762			dev_info(&priv->pdev->dev,
763				 "Only %d MSI-X vectors available, "
764				 "not using MSI-X\n", err);
765		else
766			dev_err(&priv->pdev->dev,
767				"Failed to enable MSI-X (err=%d)\n", err);
768		return err;
769	}
770
771	/*
772	 * Copy MSI-X vector information into tsi721 private structure
773	 */
774	priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector;
775	snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX,
776		 DRV_NAME "-idb@pci:%s", pci_name(priv->pdev));
777	priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector;
778	snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX,
779		 DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev));
780
781	for (i = 0; i < RIO_MAX_MBOX; i++) {
782		priv->msix[TSI721_VECT_IMB0_RCV + i].vector =
783				entries[TSI721_VECT_IMB0_RCV + i].vector;
784		snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name,
785			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s",
786			 i, pci_name(priv->pdev));
787
788		priv->msix[TSI721_VECT_IMB0_INT + i].vector =
789				entries[TSI721_VECT_IMB0_INT + i].vector;
790		snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name,
791			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s",
792			 i, pci_name(priv->pdev));
793
794		priv->msix[TSI721_VECT_OMB0_DONE + i].vector =
795				entries[TSI721_VECT_OMB0_DONE + i].vector;
796		snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name,
797			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s",
798			 i, pci_name(priv->pdev));
799
800		priv->msix[TSI721_VECT_OMB0_INT + i].vector =
801				entries[TSI721_VECT_OMB0_INT + i].vector;
802		snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name,
803			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s",
804			 i, pci_name(priv->pdev));
805	}
806
807#ifdef CONFIG_RAPIDIO_DMA_ENGINE
808	for (i = 0; i < TSI721_DMA_CHNUM; i++) {
809		priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
810				entries[TSI721_VECT_DMA0_DONE + i].vector;
811		snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
812			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
813			 i, pci_name(priv->pdev));
814
815		priv->msix[TSI721_VECT_DMA0_INT + i].vector =
816				entries[TSI721_VECT_DMA0_INT + i].vector;
817		snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
818			 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
819			 i, pci_name(priv->pdev));
820	}
821#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
822
823	return 0;
824}
825#endif /* CONFIG_PCI_MSI */
826
827static int tsi721_request_irq(struct rio_mport *mport)
828{
829	struct tsi721_device *priv = mport->priv;
830	int err;
831
832#ifdef CONFIG_PCI_MSI
833	if (priv->flags & TSI721_USING_MSIX)
834		err = tsi721_request_msix(mport);
835	else
836#endif
837		err = request_irq(priv->pdev->irq, tsi721_irqhandler,
838			  (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED,
839			  DRV_NAME, (void *)mport);
840
841	if (err)
842		dev_err(&priv->pdev->dev,
843			"Unable to allocate interrupt, Error: %d\n", err);
844
845	return err;
846}
847
848/**
849 * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO)
850 * translation regions.
851 * @priv: pointer to tsi721 private data
852 *
853 * Disables SREP translation regions.
854 */
855static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv)
856{
857	int i;
858
859	/* Disable all PC2SR translation windows */
860	for (i = 0; i < TSI721_OBWIN_NUM; i++)
861		iowrite32(0, priv->regs + TSI721_OBWINLB(i));
862}
863
864/**
865 * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe)
866 * translation regions.
867 * @priv: pointer to tsi721 private data
868 *
869 * Disables inbound windows.
870 */
871static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv)
872{
873	int i;
874
875	/* Disable all SR2PC inbound windows */
876	for (i = 0; i < TSI721_IBWIN_NUM; i++)
877		iowrite32(0, priv->regs + TSI721_IBWINLB(i));
878}
879
880/**
881 * tsi721_port_write_init - Inbound port write interface init
882 * @priv: pointer to tsi721 private data
883 *
884 * Initializes inbound port write handler.
885 * Returns %0 on success or %-ENOMEM on failure.
886 */
887static int tsi721_port_write_init(struct tsi721_device *priv)
888{
889	priv->pw_discard_count = 0;
890	INIT_WORK(&priv->pw_work, tsi721_pw_dpc);
891	spin_lock_init(&priv->pw_fifo_lock);
892	if (kfifo_alloc(&priv->pw_fifo,
893			TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) {
894		dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n");
895		return -ENOMEM;
896	}
897
898	/* Use reliable port-write capture mode */
899	iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL);
900	return 0;
901}
902
903static int tsi721_doorbell_init(struct tsi721_device *priv)
904{
905	/* Outbound Doorbells do not require any setup.
906	 * Tsi721 uses dedicated PCI BAR1 to generate doorbells.
907	 * That BAR1 was mapped during the probe routine.
908	 */
909
910	/* Initialize Inbound Doorbell processing DPC and queue */
911	priv->db_discard_count = 0;
912	INIT_WORK(&priv->idb_work, tsi721_db_dpc);
913
914	/* Allocate buffer for inbound doorbells queue */
915	priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev,
916				IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
917				&priv->idb_dma, GFP_KERNEL);
918	if (!priv->idb_base)
919		return -ENOMEM;
920
921	dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n",
922		priv->idb_base, (unsigned long long)priv->idb_dma);
923
924	iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE),
925		priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE));
926	iowrite32(((u64)priv->idb_dma >> 32),
927		priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE));
928	iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR),
929		priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE));
930	/* Enable accepting all inbound doorbells */
931	iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE));
932
933	iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE));
934
935	iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
936
937	return 0;
938}
939
940static void tsi721_doorbell_free(struct tsi721_device *priv)
941{
942	if (priv->idb_base == NULL)
943		return;
944
945	/* Free buffer allocated for inbound doorbell queue */
946	dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE,
947			  priv->idb_base, priv->idb_dma);
948	priv->idb_base = NULL;
949}
950
951/**
952 * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
953 * @priv: pointer to tsi721 private data
954 *
955 * Initialize BDMA channel allocated for RapidIO maintenance read/write
956 * request generation
957 * Returns %0 on success or %-ENOMEM on failure.
958 */
959static int tsi721_bdma_maint_init(struct tsi721_device *priv)
960{
961	struct tsi721_dma_desc *bd_ptr;
962	u64		*sts_ptr;
963	dma_addr_t	bd_phys, sts_phys;
964	int		sts_size;
965	int		bd_num = 2;
966	void __iomem	*regs;
967
968	dev_dbg(&priv->pdev->dev,
969		"Init Block DMA Engine for Maintenance requests, CH%d\n",
970		TSI721_DMACH_MAINT);
971
972	/*
973	 * Initialize DMA channel for maintenance requests
974	 */
975
976	priv->mdma.ch_id = TSI721_DMACH_MAINT;
977	regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
978
979	/* Allocate space for DMA descriptors */
980	bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
981					bd_num * sizeof(struct tsi721_dma_desc),
982					&bd_phys, GFP_KERNEL);
983	if (!bd_ptr)
984		return -ENOMEM;
985
986	priv->mdma.bd_num = bd_num;
987	priv->mdma.bd_phys = bd_phys;
988	priv->mdma.bd_base = bd_ptr;
989
990	dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
991		bd_ptr, (unsigned long long)bd_phys);
992
993	/* Allocate space for descriptor status FIFO */
994	sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
995					bd_num : TSI721_DMA_MINSTSSZ;
996	sts_size = roundup_pow_of_two(sts_size);
997	sts_ptr = dma_zalloc_coherent(&priv->pdev->dev,
998				     sts_size * sizeof(struct tsi721_dma_sts),
999				     &sts_phys, GFP_KERNEL);
1000	if (!sts_ptr) {
1001		/* Free space allocated for DMA descriptors */
1002		dma_free_coherent(&priv->pdev->dev,
1003				  bd_num * sizeof(struct tsi721_dma_desc),
1004				  bd_ptr, bd_phys);
1005		priv->mdma.bd_base = NULL;
1006		return -ENOMEM;
1007	}
1008
1009	priv->mdma.sts_phys = sts_phys;
1010	priv->mdma.sts_base = sts_ptr;
1011	priv->mdma.sts_size = sts_size;
1012
1013	dev_dbg(&priv->pdev->dev,
1014		"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
1015		sts_ptr, (unsigned long long)sts_phys, sts_size);
1016
1017	/* Initialize DMA descriptors ring */
1018	bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
1019	bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
1020						 TSI721_DMAC_DPTRL_MASK);
1021	bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
1022
1023	/* Setup DMA descriptor pointers */
1024	iowrite32(((u64)bd_phys >> 32),	regs + TSI721_DMAC_DPTRH);
1025	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
1026		regs + TSI721_DMAC_DPTRL);
1027
1028	/* Setup descriptor status FIFO */
1029	iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
1030	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
1031		regs + TSI721_DMAC_DSBL);
1032	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
1033		regs + TSI721_DMAC_DSSZ);
1034
1035	/* Clear interrupt bits */
1036	iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
1037
1038	ioread32(regs + TSI721_DMAC_INT);
1039
1040	/* Toggle DMA channel initialization */
1041	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
1042	ioread32(regs + TSI721_DMAC_CTL);
1043	udelay(10);
1044
1045	return 0;
1046}
1047
1048static int tsi721_bdma_maint_free(struct tsi721_device *priv)
1049{
1050	u32 ch_stat;
1051	struct tsi721_bdma_maint *mdma = &priv->mdma;
1052	void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
1053
1054	if (mdma->bd_base == NULL)
1055		return 0;
1056
1057	/* Check if DMA channel still running */
1058	ch_stat = ioread32(regs + TSI721_DMAC_STS);
1059	if (ch_stat & TSI721_DMAC_STS_RUN)
1060		return -EFAULT;
1061
1062	/* Put DMA channel into init state */
1063	iowrite32(TSI721_DMAC_CTL_INIT,	regs + TSI721_DMAC_CTL);
1064
1065	/* Free space allocated for DMA descriptors */
1066	dma_free_coherent(&priv->pdev->dev,
1067		mdma->bd_num * sizeof(struct tsi721_dma_desc),
1068		mdma->bd_base, mdma->bd_phys);
1069	mdma->bd_base = NULL;
1070
1071	/* Free space allocated for status FIFO */
1072	dma_free_coherent(&priv->pdev->dev,
1073		mdma->sts_size * sizeof(struct tsi721_dma_sts),
1074		mdma->sts_base, mdma->sts_phys);
1075	mdma->sts_base = NULL;
1076	return 0;
1077}
1078
1079/* Enable Inbound Messaging Interrupts */
1080static void
1081tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
1082				  u32 inte_mask)
1083{
1084	u32 rval;
1085
1086	if (!inte_mask)
1087		return;
1088
1089	/* Clear pending Inbound Messaging interrupts */
1090	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1091
1092	/* Enable Inbound Messaging interrupts */
1093	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1094	iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch));
1095
1096	if (priv->flags & TSI721_USING_MSIX)
1097		return; /* Finished if we are in MSI-X mode */
1098
1099	/*
1100	 * For MSI and INTA interrupt signalling we need to enable next levels
1101	 */
1102
1103	/* Enable Device Channel Interrupt */
1104	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1105	iowrite32(rval | TSI721_INT_IMSG_CHAN(ch),
1106		  priv->regs + TSI721_DEV_CHAN_INTE);
1107}
1108
1109/* Disable Inbound Messaging Interrupts */
1110static void
1111tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch,
1112				   u32 inte_mask)
1113{
1114	u32 rval;
1115
1116	if (!inte_mask)
1117		return;
1118
1119	/* Clear pending Inbound Messaging interrupts */
1120	iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch));
1121
1122	/* Disable Inbound Messaging interrupts */
1123	rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch));
1124	rval &= ~inte_mask;
1125	iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch));
1126
1127	if (priv->flags & TSI721_USING_MSIX)
1128		return; /* Finished if we are in MSI-X mode */
1129
1130	/*
1131	 * For MSI and INTA interrupt signalling we need to disable next levels
1132	 */
1133
1134	/* Disable Device Channel Interrupt */
1135	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1136	rval &= ~TSI721_INT_IMSG_CHAN(ch);
1137	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1138}
1139
1140/* Enable Outbound Messaging interrupts */
1141static void
1142tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch,
1143				  u32 inte_mask)
1144{
1145	u32 rval;
1146
1147	if (!inte_mask)
1148		return;
1149
1150	/* Clear pending Outbound Messaging interrupts */
1151	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1152
1153	/* Enable Outbound Messaging channel interrupts */
1154	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1155	iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch));
1156
1157	if (priv->flags & TSI721_USING_MSIX)
1158		return; /* Finished if we are in MSI-X mode */
1159
1160	/*
1161	 * For MSI and INTA interrupt signalling we need to enable next levels
1162	 */
1163
1164	/* Enable Device Channel Interrupt */
1165	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1166	iowrite32(rval | TSI721_INT_OMSG_CHAN(ch),
1167		  priv->regs + TSI721_DEV_CHAN_INTE);
1168}
1169
1170/* Disable Outbound Messaging interrupts */
1171static void
1172tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch,
1173				   u32 inte_mask)
1174{
1175	u32 rval;
1176
1177	if (!inte_mask)
1178		return;
1179
1180	/* Clear pending Outbound Messaging interrupts */
1181	iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch));
1182
1183	/* Disable Outbound Messaging interrupts */
1184	rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch));
1185	rval &= ~inte_mask;
1186	iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch));
1187
1188	if (priv->flags & TSI721_USING_MSIX)
1189		return; /* Finished if we are in MSI-X mode */
1190
1191	/*
1192	 * For MSI and INTA interrupt signalling we need to disable next levels
1193	 */
1194
1195	/* Disable Device Channel Interrupt */
1196	rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1197	rval &= ~TSI721_INT_OMSG_CHAN(ch);
1198	iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE);
1199}
1200
1201/**
1202 * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue
1203 * @mport: Master port with outbound message queue
1204 * @rdev: Target of outbound message
1205 * @mbox: Outbound mailbox
1206 * @buffer: Message to add to outbound queue
1207 * @len: Length of message
1208 */
1209static int
1210tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
1211			void *buffer, size_t len)
1212{
1213	struct tsi721_device *priv = mport->priv;
1214	struct tsi721_omsg_desc *desc;
1215	u32 tx_slot;
1216
1217	if (!priv->omsg_init[mbox] ||
1218	    len > TSI721_MSG_MAX_SIZE || len < 8)
1219		return -EINVAL;
1220
1221	tx_slot = priv->omsg_ring[mbox].tx_slot;
1222
1223	/* Copy copy message into transfer buffer */
1224	memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len);
1225
1226	if (len & 0x7)
1227		len += 8;
1228
1229	/* Build descriptor associated with buffer */
1230	desc = priv->omsg_ring[mbox].omd_base;
1231	desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid);
1232	if (tx_slot % 4 == 0)
1233		desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF);
1234
1235	desc[tx_slot].msg_info =
1236		cpu_to_le32((mport->sys_size << 26) | (mbox << 22) |
1237			    (0xe << 12) | (len & 0xff8));
1238	desc[tx_slot].bufptr_lo =
1239		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] &
1240			    0xffffffff);
1241	desc[tx_slot].bufptr_hi =
1242		cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32);
1243
1244	priv->omsg_ring[mbox].wr_count++;
1245
1246	/* Go to next descriptor */
1247	if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) {
1248		priv->omsg_ring[mbox].tx_slot = 0;
1249		/* Move through the ring link descriptor at the end */
1250		priv->omsg_ring[mbox].wr_count++;
1251	}
1252
1253	mb();
1254
1255	/* Set new write count value */
1256	iowrite32(priv->omsg_ring[mbox].wr_count,
1257		priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1258	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1259
1260	return 0;
1261}
1262
1263/**
1264 * tsi721_omsg_handler - Outbound Message Interrupt Handler
1265 * @priv: pointer to tsi721 private data
1266 * @ch:   number of OB MSG channel to service
1267 *
1268 * Services channel interrupts from outbound messaging engine.
1269 */
1270static void tsi721_omsg_handler(struct tsi721_device *priv, int ch)
1271{
1272	u32 omsg_int;
1273
1274	spin_lock(&priv->omsg_ring[ch].lock);
1275
1276	omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch));
1277
1278	if (omsg_int & TSI721_OBDMAC_INT_ST_FULL)
1279		dev_info(&priv->pdev->dev,
1280			"OB MBOX%d: Status FIFO is full\n", ch);
1281
1282	if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) {
1283		u32 srd_ptr;
1284		u64 *sts_ptr, last_ptr = 0, prev_ptr = 0;
1285		int i, j;
1286		u32 tx_slot;
1287
1288		/*
1289		 * Find last successfully processed descriptor
1290		 */
1291
1292		/* Check and clear descriptor status FIFO entries */
1293		srd_ptr = priv->omsg_ring[ch].sts_rdptr;
1294		sts_ptr = priv->omsg_ring[ch].sts_base;
1295		j = srd_ptr * 8;
1296		while (sts_ptr[j]) {
1297			for (i = 0; i < 8 && sts_ptr[j]; i++, j++) {
1298				prev_ptr = last_ptr;
1299				last_ptr = le64_to_cpu(sts_ptr[j]);
1300				sts_ptr[j] = 0;
1301			}
1302
1303			++srd_ptr;
1304			srd_ptr %= priv->omsg_ring[ch].sts_size;
1305			j = srd_ptr * 8;
1306		}
1307
1308		if (last_ptr == 0)
1309			goto no_sts_update;
1310
1311		priv->omsg_ring[ch].sts_rdptr = srd_ptr;
1312		iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch));
1313
1314		if (!priv->mport->outb_msg[ch].mcback)
1315			goto no_sts_update;
1316
1317		/* Inform upper layer about transfer completion */
1318
1319		tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/
1320						sizeof(struct tsi721_omsg_desc);
1321
1322		/*
1323		 * Check if this is a Link Descriptor (LD).
1324		 * If yes, ignore LD and use descriptor processed
1325		 * before LD.
1326		 */
1327		if (tx_slot == priv->omsg_ring[ch].size) {
1328			if (prev_ptr)
1329				tx_slot = (prev_ptr -
1330					(u64)priv->omsg_ring[ch].omd_phys)/
1331						sizeof(struct tsi721_omsg_desc);
1332			else
1333				goto no_sts_update;
1334		}
1335
1336		/* Move slot index to the next message to be sent */
1337		++tx_slot;
1338		if (tx_slot == priv->omsg_ring[ch].size)
1339			tx_slot = 0;
1340		BUG_ON(tx_slot >= priv->omsg_ring[ch].size);
1341		priv->mport->outb_msg[ch].mcback(priv->mport,
1342				priv->omsg_ring[ch].dev_id, ch,
1343				tx_slot);
1344	}
1345
1346no_sts_update:
1347
1348	if (omsg_int & TSI721_OBDMAC_INT_ERROR) {
1349		/*
1350		* Outbound message operation aborted due to error,
1351		* reinitialize OB MSG channel
1352		*/
1353
1354		dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n",
1355			ioread32(priv->regs + TSI721_OBDMAC_STS(ch)));
1356
1357		iowrite32(TSI721_OBDMAC_INT_ERROR,
1358				priv->regs + TSI721_OBDMAC_INT(ch));
1359		iowrite32(TSI721_OBDMAC_CTL_INIT,
1360				priv->regs + TSI721_OBDMAC_CTL(ch));
1361		ioread32(priv->regs + TSI721_OBDMAC_CTL(ch));
1362
1363		/* Inform upper level to clear all pending tx slots */
1364		if (priv->mport->outb_msg[ch].mcback)
1365			priv->mport->outb_msg[ch].mcback(priv->mport,
1366					priv->omsg_ring[ch].dev_id, ch,
1367					priv->omsg_ring[ch].tx_slot);
1368		/* Synch tx_slot tracking */
1369		iowrite32(priv->omsg_ring[ch].tx_slot,
1370			priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1371		ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch));
1372		priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot;
1373		priv->omsg_ring[ch].sts_rdptr = 0;
1374	}
1375
1376	/* Clear channel interrupts */
1377	iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch));
1378
1379	if (!(priv->flags & TSI721_USING_MSIX)) {
1380		u32 ch_inte;
1381
1382		/* Re-enable channel interrupts */
1383		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1384		ch_inte |= TSI721_INT_OMSG_CHAN(ch);
1385		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1386	}
1387
1388	spin_unlock(&priv->omsg_ring[ch].lock);
1389}
1390
1391/**
1392 * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox
1393 * @mport: Master port implementing Outbound Messaging Engine
1394 * @dev_id: Device specific pointer to pass on event
1395 * @mbox: Mailbox to open
1396 * @entries: Number of entries in the outbound mailbox ring
1397 */
1398static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id,
1399				 int mbox, int entries)
1400{
1401	struct tsi721_device *priv = mport->priv;
1402	struct tsi721_omsg_desc *bd_ptr;
1403	int i, rc = 0;
1404
1405	if ((entries < TSI721_OMSGD_MIN_RING_SIZE) ||
1406	    (entries > (TSI721_OMSGD_RING_SIZE)) ||
1407	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1408		rc = -EINVAL;
1409		goto out;
1410	}
1411
1412	priv->omsg_ring[mbox].dev_id = dev_id;
1413	priv->omsg_ring[mbox].size = entries;
1414	priv->omsg_ring[mbox].sts_rdptr = 0;
1415	spin_lock_init(&priv->omsg_ring[mbox].lock);
1416
1417	/* Outbound Msg Buffer allocation based on
1418	   the number of maximum descriptor entries */
1419	for (i = 0; i < entries; i++) {
1420		priv->omsg_ring[mbox].omq_base[i] =
1421			dma_alloc_coherent(
1422				&priv->pdev->dev, TSI721_MSG_BUFFER_SIZE,
1423				&priv->omsg_ring[mbox].omq_phys[i],
1424				GFP_KERNEL);
1425		if (priv->omsg_ring[mbox].omq_base[i] == NULL) {
1426			dev_dbg(&priv->pdev->dev,
1427				"Unable to allocate OB MSG data buffer for"
1428				" MBOX%d\n", mbox);
1429			rc = -ENOMEM;
1430			goto out_buf;
1431		}
1432	}
1433
1434	/* Outbound message descriptor allocation */
1435	priv->omsg_ring[mbox].omd_base = dma_alloc_coherent(
1436				&priv->pdev->dev,
1437				(entries + 1) * sizeof(struct tsi721_omsg_desc),
1438				&priv->omsg_ring[mbox].omd_phys, GFP_KERNEL);
1439	if (priv->omsg_ring[mbox].omd_base == NULL) {
1440		dev_dbg(&priv->pdev->dev,
1441			"Unable to allocate OB MSG descriptor memory "
1442			"for MBOX%d\n", mbox);
1443		rc = -ENOMEM;
1444		goto out_buf;
1445	}
1446
1447	priv->omsg_ring[mbox].tx_slot = 0;
1448
1449	/* Outbound message descriptor status FIFO allocation */
1450	priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1);
1451	priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev,
1452			priv->omsg_ring[mbox].sts_size *
1453						sizeof(struct tsi721_dma_sts),
1454			&priv->omsg_ring[mbox].sts_phys, GFP_KERNEL);
1455	if (priv->omsg_ring[mbox].sts_base == NULL) {
1456		dev_dbg(&priv->pdev->dev,
1457			"Unable to allocate OB MSG descriptor status FIFO "
1458			"for MBOX%d\n", mbox);
1459		rc = -ENOMEM;
1460		goto out_desc;
1461	}
1462
1463	/*
1464	 * Configure Outbound Messaging Engine
1465	 */
1466
1467	/* Setup Outbound Message descriptor pointer */
1468	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32),
1469			priv->regs + TSI721_OBDMAC_DPTRH(mbox));
1470	iowrite32(((u64)priv->omsg_ring[mbox].omd_phys &
1471					TSI721_OBDMAC_DPTRL_MASK),
1472			priv->regs + TSI721_OBDMAC_DPTRL(mbox));
1473
1474	/* Setup Outbound Message descriptor status FIFO */
1475	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32),
1476			priv->regs + TSI721_OBDMAC_DSBH(mbox));
1477	iowrite32(((u64)priv->omsg_ring[mbox].sts_phys &
1478					TSI721_OBDMAC_DSBL_MASK),
1479			priv->regs + TSI721_OBDMAC_DSBL(mbox));
1480	iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size),
1481		priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox));
1482
1483	/* Enable interrupts */
1484
1485#ifdef CONFIG_PCI_MSI
1486	if (priv->flags & TSI721_USING_MSIX) {
1487		/* Request interrupt service if we are in MSI-X mode */
1488		rc = request_irq(
1489			priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1490			tsi721_omsg_msix, 0,
1491			priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name,
1492			(void *)mport);
1493
1494		if (rc) {
1495			dev_dbg(&priv->pdev->dev,
1496				"Unable to allocate MSI-X interrupt for "
1497				"OBOX%d-DONE\n", mbox);
1498			goto out_stat;
1499		}
1500
1501		rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
1502			tsi721_omsg_msix, 0,
1503			priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name,
1504			(void *)mport);
1505
1506		if (rc)	{
1507			dev_dbg(&priv->pdev->dev,
1508				"Unable to allocate MSI-X interrupt for "
1509				"MBOX%d-INT\n", mbox);
1510			free_irq(
1511				priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1512				(void *)mport);
1513			goto out_stat;
1514		}
1515	}
1516#endif /* CONFIG_PCI_MSI */
1517
1518	tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1519
1520	/* Initialize Outbound Message descriptors ring */
1521	bd_ptr = priv->omsg_ring[mbox].omd_base;
1522	bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29);
1523	bd_ptr[entries].msg_info = 0;
1524	bd_ptr[entries].next_lo =
1525		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys &
1526		TSI721_OBDMAC_DPTRL_MASK);
1527	bd_ptr[entries].next_hi =
1528		cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32);
1529	priv->omsg_ring[mbox].wr_count = 0;
1530	mb();
1531
1532	/* Initialize Outbound Message engine */
1533	iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox));
1534	ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox));
1535	udelay(10);
1536
1537	priv->omsg_init[mbox] = 1;
1538
1539	return 0;
1540
1541#ifdef CONFIG_PCI_MSI
1542out_stat:
1543	dma_free_coherent(&priv->pdev->dev,
1544		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1545		priv->omsg_ring[mbox].sts_base,
1546		priv->omsg_ring[mbox].sts_phys);
1547
1548	priv->omsg_ring[mbox].sts_base = NULL;
1549#endif /* CONFIG_PCI_MSI */
1550
1551out_desc:
1552	dma_free_coherent(&priv->pdev->dev,
1553		(entries + 1) * sizeof(struct tsi721_omsg_desc),
1554		priv->omsg_ring[mbox].omd_base,
1555		priv->omsg_ring[mbox].omd_phys);
1556
1557	priv->omsg_ring[mbox].omd_base = NULL;
1558
1559out_buf:
1560	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
1561		if (priv->omsg_ring[mbox].omq_base[i]) {
1562			dma_free_coherent(&priv->pdev->dev,
1563				TSI721_MSG_BUFFER_SIZE,
1564				priv->omsg_ring[mbox].omq_base[i],
1565				priv->omsg_ring[mbox].omq_phys[i]);
1566
1567			priv->omsg_ring[mbox].omq_base[i] = NULL;
1568		}
1569	}
1570
1571out:
1572	return rc;
1573}
1574
1575/**
1576 * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox
1577 * @mport: Master port implementing the outbound message unit
1578 * @mbox: Mailbox to close
1579 */
1580static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox)
1581{
1582	struct tsi721_device *priv = mport->priv;
1583	u32 i;
1584
1585	if (!priv->omsg_init[mbox])
1586		return;
1587	priv->omsg_init[mbox] = 0;
1588
1589	/* Disable Interrupts */
1590
1591	tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL);
1592
1593#ifdef CONFIG_PCI_MSI
1594	if (priv->flags & TSI721_USING_MSIX) {
1595		free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector,
1596			 (void *)mport);
1597		free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector,
1598			 (void *)mport);
1599	}
1600#endif /* CONFIG_PCI_MSI */
1601
1602	/* Free OMSG Descriptor Status FIFO */
1603	dma_free_coherent(&priv->pdev->dev,
1604		priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts),
1605		priv->omsg_ring[mbox].sts_base,
1606		priv->omsg_ring[mbox].sts_phys);
1607
1608	priv->omsg_ring[mbox].sts_base = NULL;
1609
1610	/* Free OMSG descriptors */
1611	dma_free_coherent(&priv->pdev->dev,
1612		(priv->omsg_ring[mbox].size + 1) *
1613			sizeof(struct tsi721_omsg_desc),
1614		priv->omsg_ring[mbox].omd_base,
1615		priv->omsg_ring[mbox].omd_phys);
1616
1617	priv->omsg_ring[mbox].omd_base = NULL;
1618
1619	/* Free message buffers */
1620	for (i = 0; i < priv->omsg_ring[mbox].size; i++) {
1621		if (priv->omsg_ring[mbox].omq_base[i]) {
1622			dma_free_coherent(&priv->pdev->dev,
1623				TSI721_MSG_BUFFER_SIZE,
1624				priv->omsg_ring[mbox].omq_base[i],
1625				priv->omsg_ring[mbox].omq_phys[i]);
1626
1627			priv->omsg_ring[mbox].omq_base[i] = NULL;
1628		}
1629	}
1630}
1631
1632/**
1633 * tsi721_imsg_handler - Inbound Message Interrupt Handler
1634 * @priv: pointer to tsi721 private data
1635 * @ch: inbound message channel number to service
1636 *
1637 * Services channel interrupts from inbound messaging engine.
1638 */
1639static void tsi721_imsg_handler(struct tsi721_device *priv, int ch)
1640{
1641	u32 mbox = ch - 4;
1642	u32 imsg_int;
1643
1644	spin_lock(&priv->imsg_ring[mbox].lock);
1645
1646	imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch));
1647
1648	if (imsg_int & TSI721_IBDMAC_INT_SRTO)
1649		dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n",
1650			mbox);
1651
1652	if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR)
1653		dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n",
1654			mbox);
1655
1656	if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW)
1657		dev_info(&priv->pdev->dev,
1658			"IB MBOX%d IB free queue low\n", mbox);
1659
1660	/* Clear IB channel interrupts */
1661	iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch));
1662
1663	/* If an IB Msg is received notify the upper layer */
1664	if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV &&
1665		priv->mport->inb_msg[mbox].mcback)
1666		priv->mport->inb_msg[mbox].mcback(priv->mport,
1667				priv->imsg_ring[mbox].dev_id, mbox, -1);
1668
1669	if (!(priv->flags & TSI721_USING_MSIX)) {
1670		u32 ch_inte;
1671
1672		/* Re-enable channel interrupts */
1673		ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE);
1674		ch_inte |= TSI721_INT_IMSG_CHAN(ch);
1675		iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE);
1676	}
1677
1678	spin_unlock(&priv->imsg_ring[mbox].lock);
1679}
1680
1681/**
1682 * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox
1683 * @mport: Master port implementing the Inbound Messaging Engine
1684 * @dev_id: Device specific pointer to pass on event
1685 * @mbox: Mailbox to open
1686 * @entries: Number of entries in the inbound mailbox ring
1687 */
1688static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id,
1689				int mbox, int entries)
1690{
1691	struct tsi721_device *priv = mport->priv;
1692	int ch = mbox + 4;
1693	int i;
1694	u64 *free_ptr;
1695	int rc = 0;
1696
1697	if ((entries < TSI721_IMSGD_MIN_RING_SIZE) ||
1698	    (entries > TSI721_IMSGD_RING_SIZE) ||
1699	    (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) {
1700		rc = -EINVAL;
1701		goto out;
1702	}
1703
1704	/* Initialize IB Messaging Ring */
1705	priv->imsg_ring[mbox].dev_id = dev_id;
1706	priv->imsg_ring[mbox].size = entries;
1707	priv->imsg_ring[mbox].rx_slot = 0;
1708	priv->imsg_ring[mbox].desc_rdptr = 0;
1709	priv->imsg_ring[mbox].fq_wrptr = 0;
1710	for (i = 0; i < priv->imsg_ring[mbox].size; i++)
1711		priv->imsg_ring[mbox].imq_base[i] = NULL;
1712	spin_lock_init(&priv->imsg_ring[mbox].lock);
1713
1714	/* Allocate buffers for incoming messages */
1715	priv->imsg_ring[mbox].buf_base =
1716		dma_alloc_coherent(&priv->pdev->dev,
1717				   entries * TSI721_MSG_BUFFER_SIZE,
1718				   &priv->imsg_ring[mbox].buf_phys,
1719				   GFP_KERNEL);
1720
1721	if (priv->imsg_ring[mbox].buf_base == NULL) {
1722		dev_err(&priv->pdev->dev,
1723			"Failed to allocate buffers for IB MBOX%d\n", mbox);
1724		rc = -ENOMEM;
1725		goto out;
1726	}
1727
1728	/* Allocate memory for circular free list */
1729	priv->imsg_ring[mbox].imfq_base =
1730		dma_alloc_coherent(&priv->pdev->dev,
1731				   entries * 8,
1732				   &priv->imsg_ring[mbox].imfq_phys,
1733				   GFP_KERNEL);
1734
1735	if (priv->imsg_ring[mbox].imfq_base == NULL) {
1736		dev_err(&priv->pdev->dev,
1737			"Failed to allocate free queue for IB MBOX%d\n", mbox);
1738		rc = -ENOMEM;
1739		goto out_buf;
1740	}
1741
1742	/* Allocate memory for Inbound message descriptors */
1743	priv->imsg_ring[mbox].imd_base =
1744		dma_alloc_coherent(&priv->pdev->dev,
1745				   entries * sizeof(struct tsi721_imsg_desc),
1746				   &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL);
1747
1748	if (priv->imsg_ring[mbox].imd_base == NULL) {
1749		dev_err(&priv->pdev->dev,
1750			"Failed to allocate descriptor memory for IB MBOX%d\n",
1751			mbox);
1752		rc = -ENOMEM;
1753		goto out_dma;
1754	}
1755
1756	/* Fill free buffer pointer list */
1757	free_ptr = priv->imsg_ring[mbox].imfq_base;
1758	for (i = 0; i < entries; i++)
1759		free_ptr[i] = cpu_to_le64(
1760				(u64)(priv->imsg_ring[mbox].buf_phys) +
1761				i * 0x1000);
1762
1763	mb();
1764
1765	/*
1766	 * For mapping of inbound SRIO Messages into appropriate queues we need
1767	 * to set Inbound Device ID register in the messaging engine. We do it
1768	 * once when first inbound mailbox is requested.
1769	 */
1770	if (!(priv->flags & TSI721_IMSGID_SET)) {
1771		iowrite32((u32)priv->mport->host_deviceid,
1772			priv->regs + TSI721_IB_DEVID);
1773		priv->flags |= TSI721_IMSGID_SET;
1774	}
1775
1776	/*
1777	 * Configure Inbound Messaging channel (ch = mbox + 4)
1778	 */
1779
1780	/* Setup Inbound Message free queue */
1781	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32),
1782		priv->regs + TSI721_IBDMAC_FQBH(ch));
1783	iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys &
1784			TSI721_IBDMAC_FQBL_MASK),
1785		priv->regs+TSI721_IBDMAC_FQBL(ch));
1786	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
1787		priv->regs + TSI721_IBDMAC_FQSZ(ch));
1788
1789	/* Setup Inbound Message descriptor queue */
1790	iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32),
1791		priv->regs + TSI721_IBDMAC_DQBH(ch));
1792	iowrite32(((u32)priv->imsg_ring[mbox].imd_phys &
1793		   (u32)TSI721_IBDMAC_DQBL_MASK),
1794		priv->regs+TSI721_IBDMAC_DQBL(ch));
1795	iowrite32(TSI721_DMAC_DSSZ_SIZE(entries),
1796		priv->regs + TSI721_IBDMAC_DQSZ(ch));
1797
1798	/* Enable interrupts */
1799
1800#ifdef CONFIG_PCI_MSI
1801	if (priv->flags & TSI721_USING_MSIX) {
1802		/* Request interrupt service if we are in MSI-X mode */
1803		rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1804			tsi721_imsg_msix, 0,
1805			priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name,
1806			(void *)mport);
1807
1808		if (rc) {
1809			dev_dbg(&priv->pdev->dev,
1810				"Unable to allocate MSI-X interrupt for "
1811				"IBOX%d-DONE\n", mbox);
1812			goto out_desc;
1813		}
1814
1815		rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
1816			tsi721_imsg_msix, 0,
1817			priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name,
1818			(void *)mport);
1819
1820		if (rc)	{
1821			dev_dbg(&priv->pdev->dev,
1822				"Unable to allocate MSI-X interrupt for "
1823				"IBOX%d-INT\n", mbox);
1824			free_irq(
1825				priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1826				(void *)mport);
1827			goto out_desc;
1828		}
1829	}
1830#endif /* CONFIG_PCI_MSI */
1831
1832	tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL);
1833
1834	/* Initialize Inbound Message Engine */
1835	iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch));
1836	ioread32(priv->regs + TSI721_IBDMAC_CTL(ch));
1837	udelay(10);
1838	priv->imsg_ring[mbox].fq_wrptr = entries - 1;
1839	iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch));
1840
1841	priv->imsg_init[mbox] = 1;
1842	return 0;
1843
1844#ifdef CONFIG_PCI_MSI
1845out_desc:
1846	dma_free_coherent(&priv->pdev->dev,
1847		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
1848		priv->imsg_ring[mbox].imd_base,
1849		priv->imsg_ring[mbox].imd_phys);
1850
1851	priv->imsg_ring[mbox].imd_base = NULL;
1852#endif /* CONFIG_PCI_MSI */
1853
1854out_dma:
1855	dma_free_coherent(&priv->pdev->dev,
1856		priv->imsg_ring[mbox].size * 8,
1857		priv->imsg_ring[mbox].imfq_base,
1858		priv->imsg_ring[mbox].imfq_phys);
1859
1860	priv->imsg_ring[mbox].imfq_base = NULL;
1861
1862out_buf:
1863	dma_free_coherent(&priv->pdev->dev,
1864		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
1865		priv->imsg_ring[mbox].buf_base,
1866		priv->imsg_ring[mbox].buf_phys);
1867
1868	priv->imsg_ring[mbox].buf_base = NULL;
1869
1870out:
1871	return rc;
1872}
1873
1874/**
1875 * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox
1876 * @mport: Master port implementing the Inbound Messaging Engine
1877 * @mbox: Mailbox to close
1878 */
1879static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox)
1880{
1881	struct tsi721_device *priv = mport->priv;
1882	u32 rx_slot;
1883	int ch = mbox + 4;
1884
1885	if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */
1886		return;
1887	priv->imsg_init[mbox] = 0;
1888
1889	/* Disable Inbound Messaging Engine */
1890
1891	/* Disable Interrupts */
1892	tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK);
1893
1894#ifdef CONFIG_PCI_MSI
1895	if (priv->flags & TSI721_USING_MSIX) {
1896		free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector,
1897				(void *)mport);
1898		free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector,
1899				(void *)mport);
1900	}
1901#endif /* CONFIG_PCI_MSI */
1902
1903	/* Clear Inbound Buffer Queue */
1904	for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++)
1905		priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
1906
1907	/* Free memory allocated for message buffers */
1908	dma_free_coherent(&priv->pdev->dev,
1909		priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE,
1910		priv->imsg_ring[mbox].buf_base,
1911		priv->imsg_ring[mbox].buf_phys);
1912
1913	priv->imsg_ring[mbox].buf_base = NULL;
1914
1915	/* Free memory allocated for free pointr list */
1916	dma_free_coherent(&priv->pdev->dev,
1917		priv->imsg_ring[mbox].size * 8,
1918		priv->imsg_ring[mbox].imfq_base,
1919		priv->imsg_ring[mbox].imfq_phys);
1920
1921	priv->imsg_ring[mbox].imfq_base = NULL;
1922
1923	/* Free memory allocated for RX descriptors */
1924	dma_free_coherent(&priv->pdev->dev,
1925		priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc),
1926		priv->imsg_ring[mbox].imd_base,
1927		priv->imsg_ring[mbox].imd_phys);
1928
1929	priv->imsg_ring[mbox].imd_base = NULL;
1930}
1931
1932/**
1933 * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue
1934 * @mport: Master port implementing the Inbound Messaging Engine
1935 * @mbox: Inbound mailbox number
1936 * @buf: Buffer to add to inbound queue
1937 */
1938static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf)
1939{
1940	struct tsi721_device *priv = mport->priv;
1941	u32 rx_slot;
1942	int rc = 0;
1943
1944	rx_slot = priv->imsg_ring[mbox].rx_slot;
1945	if (priv->imsg_ring[mbox].imq_base[rx_slot]) {
1946		dev_err(&priv->pdev->dev,
1947			"Error adding inbound buffer %d, buffer exists\n",
1948			rx_slot);
1949		rc = -EINVAL;
1950		goto out;
1951	}
1952
1953	priv->imsg_ring[mbox].imq_base[rx_slot] = buf;
1954
1955	if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size)
1956		priv->imsg_ring[mbox].rx_slot = 0;
1957
1958out:
1959	return rc;
1960}
1961
1962/**
1963 * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue
1964 * @mport: Master port implementing the Inbound Messaging Engine
1965 * @mbox: Inbound mailbox number
1966 *
1967 * Returns pointer to the message on success or NULL on failure.
1968 */
1969static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox)
1970{
1971	struct tsi721_device *priv = mport->priv;
1972	struct tsi721_imsg_desc *desc;
1973	u32 rx_slot;
1974	void *rx_virt = NULL;
1975	u64 rx_phys;
1976	void *buf = NULL;
1977	u64 *free_ptr;
1978	int ch = mbox + 4;
1979	int msg_size;
1980
1981	if (!priv->imsg_init[mbox])
1982		return NULL;
1983
1984	desc = priv->imsg_ring[mbox].imd_base;
1985	desc += priv->imsg_ring[mbox].desc_rdptr;
1986
1987	if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO))
1988		goto out;
1989
1990	rx_slot = priv->imsg_ring[mbox].rx_slot;
1991	while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) {
1992		if (++rx_slot == priv->imsg_ring[mbox].size)
1993			rx_slot = 0;
1994	}
1995
1996	rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) |
1997			le32_to_cpu(desc->bufptr_lo);
1998
1999	rx_virt = priv->imsg_ring[mbox].buf_base +
2000		  (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys);
2001
2002	buf = priv->imsg_ring[mbox].imq_base[rx_slot];
2003	msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT;
2004	if (msg_size == 0)
2005		msg_size = RIO_MAX_MSG_SIZE;
2006
2007	memcpy(buf, rx_virt, msg_size);
2008	priv->imsg_ring[mbox].imq_base[rx_slot] = NULL;
2009
2010	desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO);
2011	if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size)
2012		priv->imsg_ring[mbox].desc_rdptr = 0;
2013
2014	iowrite32(priv->imsg_ring[mbox].desc_rdptr,
2015		priv->regs + TSI721_IBDMAC_DQRP(ch));
2016
2017	/* Return free buffer into the pointer list */
2018	free_ptr = priv->imsg_ring[mbox].imfq_base;
2019	free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys);
2020
2021	if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size)
2022		priv->imsg_ring[mbox].fq_wrptr = 0;
2023
2024	iowrite32(priv->imsg_ring[mbox].fq_wrptr,
2025		priv->regs + TSI721_IBDMAC_FQWP(ch));
2026out:
2027	return buf;
2028}
2029
2030/**
2031 * tsi721_messages_init - Initialization of Messaging Engine
2032 * @priv: pointer to tsi721 private data
2033 *
2034 * Configures Tsi721 messaging engine.
2035 */
2036static int tsi721_messages_init(struct tsi721_device *priv)
2037{
2038	int	ch;
2039
2040	iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG);
2041	iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT);
2042	iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT);
2043
2044	/* Set SRIO Message Request/Response Timeout */
2045	iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO);
2046
2047	/* Initialize Inbound Messaging Engine Registers */
2048	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) {
2049		/* Clear interrupt bits */
2050		iowrite32(TSI721_IBDMAC_INT_MASK,
2051			priv->regs + TSI721_IBDMAC_INT(ch));
2052		/* Clear Status */
2053		iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch));
2054
2055		iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK,
2056				priv->regs + TSI721_SMSG_ECC_COR_LOG(ch));
2057		iowrite32(TSI721_SMSG_ECC_NCOR_MASK,
2058				priv->regs + TSI721_SMSG_ECC_NCOR(ch));
2059	}
2060
2061	return 0;
2062}
2063
2064/**
2065 * tsi721_disable_ints - disables all device interrupts
2066 * @priv: pointer to tsi721 private data
2067 */
2068static void tsi721_disable_ints(struct tsi721_device *priv)
2069{
2070	int ch;
2071
2072	/* Disable all device level interrupts */
2073	iowrite32(0, priv->regs + TSI721_DEV_INTE);
2074
2075	/* Disable all Device Channel interrupts */
2076	iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE);
2077
2078	/* Disable all Inbound Msg Channel interrupts */
2079	for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++)
2080		iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch));
2081
2082	/* Disable all Outbound Msg Channel interrupts */
2083	for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++)
2084		iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch));
2085
2086	/* Disable all general messaging interrupts */
2087	iowrite32(0, priv->regs + TSI721_SMSG_INTE);
2088
2089	/* Disable all BDMA Channel interrupts */
2090	for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2091		iowrite32(0,
2092			priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2093
2094	/* Disable all general BDMA interrupts */
2095	iowrite32(0, priv->regs + TSI721_BDMA_INTE);
2096
2097	/* Disable all SRIO Channel interrupts */
2098	for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++)
2099		iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch));
2100
2101	/* Disable all general SR2PC interrupts */
2102	iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE);
2103
2104	/* Disable all PC2SR interrupts */
2105	iowrite32(0, priv->regs + TSI721_PC2SR_INTE);
2106
2107	/* Disable all I2C interrupts */
2108	iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE);
2109
2110	/* Disable SRIO MAC interrupts */
2111	iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE);
2112	iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN);
2113}
2114
2115/**
2116 * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port
2117 * @priv: pointer to tsi721 private data
2118 *
2119 * Configures Tsi721 as RapidIO master port.
2120 */
2121static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2122{
2123	struct pci_dev *pdev = priv->pdev;
2124	int err = 0;
2125	struct rio_ops *ops;
2126
2127	struct rio_mport *mport;
2128
2129	ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL);
2130	if (!ops) {
2131		dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n");
2132		return -ENOMEM;
2133	}
2134
2135	ops->lcread = tsi721_lcread;
2136	ops->lcwrite = tsi721_lcwrite;
2137	ops->cread = tsi721_cread_dma;
2138	ops->cwrite = tsi721_cwrite_dma;
2139	ops->dsend = tsi721_dsend;
2140	ops->open_inb_mbox = tsi721_open_inb_mbox;
2141	ops->close_inb_mbox = tsi721_close_inb_mbox;
2142	ops->open_outb_mbox = tsi721_open_outb_mbox;
2143	ops->close_outb_mbox = tsi721_close_outb_mbox;
2144	ops->add_outb_message = tsi721_add_outb_message;
2145	ops->add_inb_buffer = tsi721_add_inb_buffer;
2146	ops->get_inb_message = tsi721_get_inb_message;
2147
2148	mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL);
2149	if (!mport) {
2150		kfree(ops);
2151		dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n");
2152		return -ENOMEM;
2153	}
2154
2155	mport->ops = ops;
2156	mport->index = 0;
2157	mport->sys_size = 0; /* small system */
2158	mport->phy_type = RIO_PHY_SERIAL;
2159	mport->priv = (void *)priv;
2160	mport->phys_efptr = 0x100;
2161	priv->mport = mport;
2162
2163	INIT_LIST_HEAD(&mport->dbells);
2164
2165	rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff);
2166	rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3);
2167	rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3);
2168	strcpy(mport->name, "Tsi721 mport");
2169
2170	/* Hook up interrupt handler */
2171
2172#ifdef CONFIG_PCI_MSI
2173	if (!tsi721_enable_msix(priv))
2174		priv->flags |= TSI721_USING_MSIX;
2175	else if (!pci_enable_msi(pdev))
2176		priv->flags |= TSI721_USING_MSI;
2177	else
2178		dev_info(&pdev->dev,
2179			 "MSI/MSI-X is not available. Using legacy INTx.\n");
2180#endif /* CONFIG_PCI_MSI */
2181
2182	err = tsi721_request_irq(mport);
2183
2184	if (!err) {
2185		tsi721_interrupts_init(priv);
2186		ops->pwenable = tsi721_pw_enable;
2187	} else {
2188		dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
2189			"vector %02X err=0x%x\n", pdev->irq, err);
2190		goto err_exit;
2191	}
2192
2193#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2194	tsi721_register_dma(priv);
2195#endif
2196	/* Enable SRIO link */
2197	iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2198		  TSI721_DEVCTL_SRBOOT_CMPL,
2199		  priv->regs + TSI721_DEVCTL);
2200
2201	rio_register_mport(mport);
2202
2203	if (mport->host_deviceid >= 0)
2204		iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
2205			  RIO_PORT_GEN_DISCOVERED,
2206			  priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2207	else
2208		iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2209
2210	return 0;
2211
2212err_exit:
2213	kfree(mport);
2214	kfree(ops);
2215	return err;
2216}
2217
2218static int __devinit tsi721_probe(struct pci_dev *pdev,
2219				  const struct pci_device_id *id)
2220{
2221	struct tsi721_device *priv;
2222	int cap;
2223	int err;
2224	u32 regval;
2225
2226	priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL);
2227	if (priv == NULL) {
2228		dev_err(&pdev->dev, "Failed to allocate memory for device\n");
2229		err = -ENOMEM;
2230		goto err_exit;
2231	}
2232
2233	err = pci_enable_device(pdev);
2234	if (err) {
2235		dev_err(&pdev->dev, "Failed to enable PCI device\n");
2236		goto err_clean;
2237	}
2238
2239	priv->pdev = pdev;
2240
2241#ifdef DEBUG
2242	{
2243	int i;
2244	for (i = 0; i <= PCI_STD_RESOURCE_END; i++) {
2245		dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n",
2246			i, (unsigned long long)pci_resource_start(pdev, i),
2247			(unsigned long)pci_resource_len(pdev, i),
2248			pci_resource_flags(pdev, i));
2249	}
2250	}
2251#endif
2252	/*
2253	 * Verify BAR configuration
2254	 */
2255
2256	/* BAR_0 (registers) must be 512KB+ in 32-bit address space */
2257	if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) ||
2258	    pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 ||
2259	    pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) {
2260		dev_err(&pdev->dev,
2261			"Missing or misconfigured CSR BAR0, aborting.\n");
2262		err = -ENODEV;
2263		goto err_disable_pdev;
2264	}
2265
2266	/* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */
2267	if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) ||
2268	    pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 ||
2269	    pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) {
2270		dev_err(&pdev->dev,
2271			"Missing or misconfigured Doorbell BAR1, aborting.\n");
2272		err = -ENODEV;
2273		goto err_disable_pdev;
2274	}
2275
2276	/*
2277	 * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address
2278	 * space.
2279	 * NOTE: BAR_2 and BAR_4 are not used by this version of driver.
2280	 * It may be a good idea to keep them disabled using HW configuration
2281	 * to save PCI memory space.
2282	 */
2283	if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) &&
2284	    (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) {
2285		dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n");
2286	}
2287
2288	if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) &&
2289	    (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) {
2290		dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n");
2291	}
2292
2293	err = pci_request_regions(pdev, DRV_NAME);
2294	if (err) {
2295		dev_err(&pdev->dev, "Cannot obtain PCI resources, "
2296			"aborting.\n");
2297		goto err_disable_pdev;
2298	}
2299
2300	pci_set_master(pdev);
2301
2302	priv->regs = pci_ioremap_bar(pdev, BAR_0);
2303	if (!priv->regs) {
2304		dev_err(&pdev->dev,
2305			"Unable to map device registers space, aborting\n");
2306		err = -ENOMEM;
2307		goto err_free_res;
2308	}
2309
2310	priv->odb_base = pci_ioremap_bar(pdev, BAR_1);
2311	if (!priv->odb_base) {
2312		dev_err(&pdev->dev,
2313			"Unable to map outbound doorbells space, aborting\n");
2314		err = -ENOMEM;
2315		goto err_unmap_bars;
2316	}
2317
2318	/* Configure DMA attributes. */
2319	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
2320		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
2321			dev_info(&pdev->dev, "Unable to set DMA mask\n");
2322			goto err_unmap_bars;
2323		}
2324
2325		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2326			dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2327	} else {
2328		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
2329		if (err)
2330			dev_info(&pdev->dev, "Unable to set consistent DMA mask\n");
2331	}
2332
2333	cap = pci_pcie_cap(pdev);
2334	BUG_ON(cap == 0);
2335
2336	/* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */
2337	pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL, &regval);
2338	regval &= ~(PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN |
2339		    PCI_EXP_DEVCTL_NOSNOOP_EN);
2340	regval |= 0x2 << MAX_READ_REQUEST_SZ_SHIFT;
2341	pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL, regval);
2342
2343	/* Adjust PCIe completion timeout. */
2344	pci_read_config_dword(pdev, cap + PCI_EXP_DEVCTL2, &regval);
2345	regval &= ~(0x0f);
2346	pci_write_config_dword(pdev, cap + PCI_EXP_DEVCTL2, regval | 0x2);
2347
2348	/*
2349	 * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block
2350	 */
2351	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01);
2352	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL,
2353						TSI721_MSIXTBL_OFFSET);
2354	pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA,
2355						TSI721_MSIXPBA_OFFSET);
2356	pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0);
2357	/* End of FIXUP */
2358
2359	tsi721_disable_ints(priv);
2360
2361	tsi721_init_pc2sr_mapping(priv);
2362	tsi721_init_sr2pc_mapping(priv);
2363
2364	if (tsi721_bdma_maint_init(priv)) {
2365		dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
2366		err = -ENOMEM;
2367		goto err_unmap_bars;
2368	}
2369
2370	err = tsi721_doorbell_init(priv);
2371	if (err)
2372		goto err_free_bdma;
2373
2374	tsi721_port_write_init(priv);
2375
2376	err = tsi721_messages_init(priv);
2377	if (err)
2378		goto err_free_consistent;
2379
2380	err = tsi721_setup_mport(priv);
2381	if (err)
2382		goto err_free_consistent;
2383
2384	return 0;
2385
2386err_free_consistent:
2387	tsi721_doorbell_free(priv);
2388err_free_bdma:
2389	tsi721_bdma_maint_free(priv);
2390err_unmap_bars:
2391	if (priv->regs)
2392		iounmap(priv->regs);
2393	if (priv->odb_base)
2394		iounmap(priv->odb_base);
2395err_free_res:
2396	pci_release_regions(pdev);
2397	pci_clear_master(pdev);
2398err_disable_pdev:
2399	pci_disable_device(pdev);
2400err_clean:
2401	kfree(priv);
2402err_exit:
2403	return err;
2404}
2405
2406static DEFINE_PCI_DEVICE_TABLE(tsi721_pci_tbl) = {
2407	{ PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) },
2408	{ 0, }	/* terminate list */
2409};
2410
2411MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl);
2412
2413static struct pci_driver tsi721_driver = {
2414	.name		= "tsi721",
2415	.id_table	= tsi721_pci_tbl,
2416	.probe		= tsi721_probe,
2417};
2418
2419static int __init tsi721_init(void)
2420{
2421	return pci_register_driver(&tsi721_driver);
2422}
2423
2424static void __exit tsi721_exit(void)
2425{
2426	pci_unregister_driver(&tsi721_driver);
2427}
2428
2429device_initcall(tsi721_init);
2430