Lines Matching refs:dd

54  * @dd: the qlogic_ib device
61 void qib_disarm_piobufs(struct qib_devdata *dd, unsigned first, unsigned cnt)
68 spin_lock_irqsave(&dd->pioavail_lock, flags);
70 __clear_bit(i, dd->pio_need_disarm);
71 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
73 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
82 struct qib_devdata *dd = rcd->dd;
103 spin_lock_irq(&dd->pioavail_lock);
105 if (__test_and_clear_bit(i, dd->pio_need_disarm)) {
107 dd->f_sendctrl(rcd->ppd, QIB_SENDCTRL_DISARM_BUF(i));
110 spin_unlock_irq(&dd->pioavail_lock);
114 static struct qib_pportdata *is_sdma_buf(struct qib_devdata *dd, unsigned i)
119 for (pidx = 0; pidx < dd->num_pports; pidx++) {
120 ppd = dd->pport + pidx;
132 static int find_ctxt(struct qib_devdata *dd, unsigned bufn)
138 spin_lock(&dd->uctxt_lock);
139 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
140 rcd = dd->rcd[ctxt];
159 spin_unlock(&dd->uctxt_lock);
171 void qib_disarm_piobufs_set(struct qib_devdata *dd, unsigned long *mask,
178 for (i = 0; i < dd->num_pports; i++)
189 ppd = is_sdma_buf(dd, i);
198 spin_lock_irqsave(&dd->pioavail_lock, flags);
199 if (test_bit(i, dd->pio_writing) ||
200 (!test_bit(i << 1, dd->pioavailkernel) &&
201 find_ctxt(dd, i))) {
202 __set_bit(i, dd->pio_need_disarm);
206 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(i));
208 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
212 for (i = 0; i < dd->num_pports; i++)
219 * @dd: the qlogic_ib device
223 static void update_send_bufs(struct qib_devdata *dd)
227 const unsigned piobregs = dd->pioavregs;
247 if (!dd->pioavailregs_dma)
249 spin_lock_irqsave(&dd->pioavail_lock, flags);
253 piov = le64_to_cpu(dd->pioavailregs_dma[i]);
254 pchg = dd->pioavailkernel[i] &
255 ~(dd->pioavailshadow[i] ^ piov);
257 if (pchg && (pchbusy & dd->pioavailshadow[i])) {
258 pnew = dd->pioavailshadow[i] & ~pchbusy;
260 dd->pioavailshadow[i] = pnew;
263 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
269 static noinline void no_send_bufs(struct qib_devdata *dd)
271 dd->upd_pio_shadow = 1;
284 u32 __iomem *qib_getsendbuf_range(struct qib_devdata *dd, u32 *pbufnum,
290 unsigned long *shadow = dd->pioavailshadow;
293 if (!(dd->flags & QIB_PRESENT))
297 if (dd->upd_pio_shadow) {
304 update_send_bufs(dd);
313 spin_lock_irqsave(&dd->pioavail_lock, flags);
314 if (dd->last_pio >= first && dd->last_pio <= last)
315 i = dd->last_pio + 1;
318 nbufs = last - dd->min_kernel_pio + 1;
321 i = !first ? dd->min_kernel_pio : first;
327 __set_bit(i, dd->pio_writing);
329 dd->last_pio = i;
332 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
341 no_send_bufs(dd);
344 if (i < dd->piobcnt2k)
345 buf = (u32 __iomem *)(dd->pio2kbase +
346 i * dd->palign);
347 else if (i < dd->piobcnt2k + dd->piobcnt4k || !dd->piovl15base)
348 buf = (u32 __iomem *)(dd->pio4kbase +
349 (i - dd->piobcnt2k) * dd->align4k);
351 buf = (u32 __iomem *)(dd->piovl15base +
352 (i - (dd->piobcnt2k + dd->piobcnt4k)) *
353 dd->align4k);
356 dd->upd_pio_shadow = 0;
366 void qib_sendbuf_done(struct qib_devdata *dd, unsigned n)
370 spin_lock_irqsave(&dd->pioavail_lock, flags);
371 __clear_bit(n, dd->pio_writing);
372 if (__test_and_clear_bit(n, dd->pio_need_disarm))
373 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_DISARM_BUF(n));
374 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
379 * @dd: the qlogic_ib device
384 void qib_chg_pioavailkernel(struct qib_devdata *dd, unsigned start,
395 spin_lock_irqsave(&dd->pioavail_lock, flags);
417 dd->pioavailshadow);
419 le64_to_cpu(dd->pioavailregs_dma[i]);
423 start, dd->pioavailshadow);
426 + start, dd->pioavailshadow);
427 __set_bit(start, dd->pioavailkernel);
428 if ((start >> 1) < dd->min_kernel_pio)
429 dd->min_kernel_pio = start >> 1;
432 dd->pioavailshadow);
433 __clear_bit(start, dd->pioavailkernel);
434 if ((start >> 1) > dd->min_kernel_pio)
435 dd->min_kernel_pio = start >> 1;
440 if (dd->min_kernel_pio > 0 && dd->last_pio < dd->min_kernel_pio - 1)
441 dd->last_pio = dd->min_kernel_pio - 1;
442 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
444 dd->f_txchk_change(dd, ostart, len, avail, rcd);
458 struct qib_devdata *dd = ppd->dd;
473 for (ctxt = dd->first_user_ctxt; ctxt < dd->cfgctxts; ctxt++) {
474 spin_lock_irqsave(&dd->uctxt_lock, flags);
475 rcd = dd->rcd[ctxt];
491 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
492 spin_lock_irqsave(&dd->pioavail_lock, flags);
494 __set_bit(i, dd->pio_need_disarm);
495 spin_unlock_irqrestore(&dd->pioavail_lock, flags);
497 spin_unlock_irqrestore(&dd->uctxt_lock, flags);
500 if (!(dd->flags & QIB_HAS_SEND_DMA))
501 dd->f_sendctrl(ppd, QIB_SENDCTRL_DISARM_ALL |
512 void qib_force_pio_avail_update(struct qib_devdata *dd)
514 dd->f_sendctrl(dd->pport, QIB_SENDCTRL_AVAIL_BLIP);
559 if (!(ppd->dd->flags & QIB_INITTED))