Lines Matching refs:ioc

191  * @ioc: IO MMU structure which owns the pdir we are interested in.
198 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
201 u64 *ptr = &(ioc->pdir_base[pide & (~0U * BITS_PER_LONG)]);
202 unsigned long *rptr = (unsigned long *) &(ioc->res_map[(pide >>3) & ~(sizeof(unsigned long) - 1)]);
224 * @ioc: IO MMU structure which owns the pdir we are interested in.
230 sba_check_pdir(struct ioc *ioc, char *msg)
232 u32 *rptr_end = (u32 *) &(ioc->res_map[ioc->res_size]);
233 u32 *rptr = (u32 *) ioc->res_map; /* resource map ptr */
234 u64 *pptr = ioc->pdir_base; /* pdir ptr */
250 sba_dump_pdir_entry(ioc, msg, pide);
267 * @ioc: IO MMU structure which owns the pdir we are interested in.
274 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
306 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((ioc->ibase) | (iovp) | (offset))
307 #define SBA_IOVP(ioc,iova) ((iova) & (ioc)->iovp_mask)
310 #define SBA_IOVA(ioc,iovp,offset,hint_reg) ((iovp) | (offset))
311 #define SBA_IOVP(ioc,iova) (iova)
319 static unsigned long ptr_to_pide(struct ioc *ioc, unsigned long *res_ptr,
322 return (((unsigned long)res_ptr - (unsigned long)ioc->res_map) << 3)
328 * @ioc: IO MMU structure which owns the pdir we are interested in.
336 sba_search_bitmap(struct ioc *ioc, struct device *dev,
339 unsigned long *res_ptr = ioc->res_hint;
340 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
350 BUG_ON(ioc->ibase & ~IOVP_MASK);
351 shift = ioc->ibase >> IOVP_SHIFT;
359 tpide = ptr_to_pide(ioc, res_ptr, 0);
371 ioc->res_bitshift = 0;
380 uint bitshiftcnt = ALIGN(ioc->res_bitshift, o);
394 tpide = ptr_to_pide(ioc, res_ptr, bitshiftcnt);
412 ioc->res_bitshift = bitshiftcnt + bits_wanted;
417 ioc->res_hint = (unsigned long *) ioc->res_map;
418 ioc->res_bitshift = 0;
420 ioc->res_hint = res_ptr;
428 * @ioc: IO MMU structure which owns the pdir we are interested in.
435 sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
443 pide = sba_search_bitmap(ioc, dev, pages_needed);
444 if (pide >= (ioc->res_size << 3)) {
445 pide = sba_search_bitmap(ioc, dev, pages_needed);
446 if (pide >= (ioc->res_size << 3))
448 __FILE__, ioc->ioc_hpa);
453 if(0x00 != ((u8 *) ioc->pdir_base)[pide*sizeof(u64) + 7]) {
454 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
460 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
461 ioc->res_bitshift );
470 ioc->avg_search[ioc->avg_idx++] = cr_start;
471 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
473 ioc->used_pages += pages_needed;
482 * @ioc: IO MMU structure which owns the pdir we are interested in.
486 * clear bits in the ioc's resource map
489 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
491 unsigned long iovp = SBA_IOVP(ioc, iova);
494 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
506 ioc->used_pages -= bits_not_wanted;
520 #define SBA_DMA_HINT(ioc, val) ((val) << (ioc)->hint_shift_pdir)
595 * @ioc: IO MMU structure which owns the pdir we are interested in.
610 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
612 u32 iovp = (u32) SBA_IOVP(ioc,iova);
613 u64 *pdir_ptr = &ioc->pdir_base[PDIR_INDEX(iovp)];
623 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
665 WRITE_REG( SBA_IOVA(ioc, iovp, 0, 0), ioc->ioc_hpa+IOC_PCOM);
677 struct ioc *ioc;
693 ioc = GET_IOC(dev);
699 return((int)(mask >= (ioc->ibase - 1 +
700 (ioc->pdir_size / sizeof(u64) * IOVP_SIZE) )));
717 struct ioc *ioc;
724 ioc = GET_IOC(dev);
732 spin_lock_irqsave(&ioc->res_lock, flags);
734 sba_check_pdir(ioc,"Check before sba_map_single()");
738 ioc->msingle_calls++;
739 ioc->msingle_pages += size >> IOVP_SHIFT;
741 pide = sba_alloc_range(ioc, dev, size);
747 pdir_start = &(ioc->pdir_base[pide]);
774 sba_check_pdir(ioc,"Check after sba_map_single()");
776 spin_unlock_irqrestore(&ioc->res_lock, flags);
779 return SBA_IOVA(ioc, iovp, offset, DEFAULT_DMA_HINT_REG);
796 struct ioc *ioc;
805 ioc = GET_IOC(dev);
811 spin_lock_irqsave(&ioc->res_lock, flags);
814 ioc->usingle_calls++;
815 ioc->usingle_pages += size >> IOVP_SHIFT;
818 sba_mark_invalid(ioc, iova, size);
824 d = &(ioc->saved[ioc->saved_cnt]);
827 if (++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT) {
828 int cnt = ioc->saved_cnt;
830 sba_free_range(ioc, d->iova, d->size);
833 ioc->saved_cnt = 0;
835 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
838 sba_free_range(ioc, iova, size);
844 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
847 spin_unlock_irqrestore(&ioc->res_lock, flags);
938 struct ioc *ioc;
944 ioc = GET_IOC(dev);
955 spin_lock_irqsave(&ioc->res_lock, flags);
958 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
960 sba_dump_sg(ioc, sglist, nents);
966 ioc->msg_calls++;
977 coalesced = iommu_coalesce_chunks(ioc, dev, sglist, nents, sba_alloc_range);
987 filled = iommu_fill_pdir(ioc, sglist, nents, 0, sba_io_pdir_entry);
994 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
996 sba_dump_sg(ioc, sglist, nents);
1001 spin_unlock_irqrestore(&ioc->res_lock, flags);
1022 struct ioc *ioc;
1030 ioc = GET_IOC(dev);
1033 ioc->usg_calls++;
1037 spin_lock_irqsave(&ioc->res_lock, flags);
1038 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1039 spin_unlock_irqrestore(&ioc->res_lock, flags);
1046 ioc->usg_pages += ((sg_dma_address(sglist) & ~IOVP_MASK) + sg_dma_len(sglist) + IOVP_SIZE - 1) >> PAGE_SHIFT;
1047 ioc->usingle_calls--; /* kluge since call is unmap_sg() */
1055 spin_lock_irqsave(&ioc->res_lock, flags);
1056 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1057 spin_unlock_irqrestore(&ioc->res_lock, flags);
1101 PAT_MOD(mod)->mod_info.ioc = PAT_GET_IOC(temp);
1212 struct ioc *ioc;
1224 lba_set_iregs(lba, ibd->ioc->ibase, ibd->ioc->imask);
1230 setup_ibase_imask(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1233 .ioc = ioc,
1255 sba_ioc_init_pluto(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1268 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE);
1269 iova_space_size = ~(READ_REG(ioc->ioc_hpa + IOC_IMASK) & 0xFFFFFFFFUL) + 1;
1271 if ((ioc->ibase < 0xfed00000UL) && ((ioc->ibase + iova_space_size) > 0xfee00000UL)) {
1281 ioc->pdir_size = (iova_space_size / IOVP_SIZE) * sizeof(u64);
1284 __func__, ioc->ioc_hpa, iova_space_size >> 20,
1287 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1288 get_order(ioc->pdir_size));
1289 if (!ioc->pdir_base)
1292 memset(ioc->pdir_base, 0, ioc->pdir_size);
1295 __func__, ioc->pdir_base, ioc->pdir_size);
1298 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1299 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1302 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1305 WARN_ON((((unsigned long) ioc->pdir_base) & PAGE_MASK) != (unsigned long) ioc->pdir_base);
1306 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1311 ioc->imask = iova_space_mask;
1313 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1315 sba_dump_tlb(ioc->ioc_hpa);
1317 setup_ibase_imask(sba, ioc, ioc_num);
1319 WRITE_REG(ioc->imask, ioc->ioc_hpa + IOC_IMASK);
1326 ioc->imask |= 0xFFFFFFFF00000000UL;
1340 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1346 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1352 WRITE_REG(ioc->ibase | 31, ioc->ioc_hpa + IOC_PCOM);
1369 ioc->pdir_size /= 2;
1370 ioc->pdir_base[PDIR_INDEX(iova_space_size/2)] = SBA_AGPGART_COOKIE;
1376 sba_ioc_init(struct parisc_device *sba, struct ioc *ioc, int ioc_num)
1415 ioc->pdir_size = pdir_size = (iova_space_size/IOVP_SIZE) * sizeof(u64);
1419 ioc->ioc_hpa,
1424 ioc->pdir_base = sba_alloc_pdir(pdir_size);
1427 __func__, ioc->pdir_base, pdir_size);
1431 ioc->hint_shift_pdir = iov_order + PAGE_SHIFT;
1432 ioc->hint_mask_pdir = ~(0x3 << (iov_order + PAGE_SHIFT));
1435 ioc->hint_shift_pdir, ioc->hint_mask_pdir);
1438 WRITE_REG64(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1448 ioc->ibase = 0;
1449 ioc->imask = iova_space_mask; /* save it */
1451 ioc->iovp_mask = ~(iova_space_mask + PAGE_SIZE - 1);
1455 __func__, ioc->ibase, ioc->imask);
1463 setup_ibase_imask(sba, ioc, ioc_num);
1468 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa+IOC_IBASE);
1469 WRITE_REG(ioc->imask, ioc->ioc_hpa+IOC_IMASK);
1472 WRITE_REG(0, ioc->ioc_hpa+IOC_TCNFG);
1478 WRITE_REG(0 | 31, ioc->ioc_hpa+IOC_PCOM);
1480 ioc->ibase = 0; /* used by SBA_IOVA and related macros */
1572 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, ASTRO_IOC_OFFSET);
1584 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, PLUTO_IOC_OFFSET);
1600 sba_dev->ioc[0].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(0));
1601 sba_dev->ioc[1].ioc_hpa = ioc_remap(sba_dev, IKE_IOC_OFFSET(1));
1610 void __iomem *ioc_hpa = sba_dev->ioc[i].ioc_hpa;
1638 READ_REG(sba_dev->ioc[i].ioc_hpa + ROPE7_CTL);
1640 DBG_INIT(" ioc[%d] ROPE_CFG 0x%Lx ROPE_DBG 0x%Lx\n",
1642 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x40),
1643 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x50)
1646 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x108),
1647 READ_REG(sba_dev->ioc[i].ioc_hpa + 0x400)
1651 sba_ioc_init_pluto(sba_dev->dev, &(sba_dev->ioc[i]), i);
1653 sba_ioc_init(sba_dev->dev, &(sba_dev->ioc[i]), i);
1678 res_size = sba_dev->ioc[i].pdir_size/sizeof(u64); /* entries */
1689 sba_dev->ioc[i].res_size = res_size;
1690 sba_dev->ioc[i].res_map = (char *) __get_free_pages(GFP_KERNEL, get_order(res_size));
1693 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1697 if (NULL == sba_dev->ioc[i].res_map)
1703 memset(sba_dev->ioc[i].res_map, 0, res_size);
1705 sba_dev->ioc[i].res_hint = (unsigned long *)
1706 &(sba_dev->ioc[i].res_map[L1_CACHE_BYTES]);
1710 sba_dev->ioc[i].res_map[0] = 0x80;
1711 sba_dev->ioc[i].pdir_base[0] = 0xeeffc0addbba0080ULL;
1720 long *p_start = (long *) &(sba_dev->ioc[i].res_map[idx_start]);
1721 long *p_end = (long *) &(sba_dev->ioc[i].res_map[idx_end]);
1730 iterate_pages( sba_dev->ioc[i].res_map, res_size,
1732 iterate_pages( sba_dev->ioc[i].pdir_base, sba_dev->ioc[i].pdir_size,
1737 __func__, i, res_size, sba_dev->ioc[i].res_map);
1761 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1762 int total_pages = (int) (ioc->res_size << 3); /* 8 bits per byte */
1774 (int) ((ioc->res_size << 3) * sizeof(u64)), /* 8 bits/byte */
1778 ioc->res_size, ioc->res_size << 3); /* 8 bits per byte */
1795 total_pages - ioc->used_pages, ioc->used_pages,
1796 (int) (ioc->used_pages * 100 / total_pages));
1798 min = max = ioc->avg_search[0];
1800 avg += ioc->avg_search[i];
1801 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1802 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1809 ioc->msingle_calls, ioc->msingle_pages,
1810 (int) ((ioc->msingle_pages * 1000)/ioc->msingle_calls));
1813 min = ioc->usingle_calls;
1814 max = ioc->usingle_pages - ioc->usg_pages;
1819 ioc->msg_calls, ioc->msg_pages,
1820 (int) ((ioc->msg_pages * 1000)/ioc->msg_calls));
1823 ioc->usg_calls, ioc->usg_pages,
1824 (int) ((ioc->usg_pages * 1000)/ioc->usg_calls));
1848 struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
1849 unsigned int *res_ptr = (unsigned int *)ioc->res_map;
1852 for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
1961 spin_lock_init(&(sba_dev->ioc[i].res_lock));
2021 return &(sba->ioc[iocnum]);