Searched refs:nent (Results 1 - 25 of 26) sorted by relevance

12

/drivers/net/ethernet/mellanox/mlx5/core/
H A Deq.c112 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1));
114 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe;
326 for (i = 0; i < eq->nent; i++) {
333 int nent, u64 mask, const char *name, struct mlx5_uar *uar)
341 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
342 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
360 in->ctx.log_sz_usr_page = cpu_to_be32(ilog2(eq->nent) << 24 | uar->index);
332 mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, int nent, u64 mask, const char *name, struct mlx5_uar *uar) argument
/drivers/infiniband/hw/mthca/
H A Dmthca_allocator.c160 int mthca_array_init(struct mthca_array *array, int nent) argument
162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE;
177 void mthca_array_cleanup(struct mthca_array *array, int nent) argument
181 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
H A Dmthca_eq.c184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1),
230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE;
466 int nent,
479 eq->nent = roundup_pow_of_two(max(nent, 2));
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE;
511 for (i = 0; i < eq->nent; ++i)
535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24);
560 eq->eqn, eq->nent);
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZ
465 mthca_create_eq(struct mthca_dev *dev, int nent, u8 intr, struct mthca_eq *eq) argument
[all...]
H A Dmthca_cq.c353 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) argument
358 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE,
365 for (i = 0; i < nent; ++i)
776 int mthca_init_cq(struct mthca_dev *dev, int nent, argument
784 cq->ibcq.cqe = nent - 1;
820 err = mthca_alloc_cq_buf(dev, &cq->buf, nent);
834 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
H A Dmthca_dev.h422 int mthca_array_init(struct mthca_array *array, int nent);
423 void mthca_array_cleanup(struct mthca_array *array, int nent);
498 int mthca_init_cq(struct mthca_dev *dev, int nent,
509 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
H A Dmthca_provider.h113 int nent; member in struct:mthca_eq
H A Dmthca_cmd.c662 int nent = 0; local
692 pages[nent * 2] = cpu_to_be64(virt);
696 pages[nent * 2 + 1] =
702 if (++nent == MTHCA_MAILBOX_SIZE / 16) {
703 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
707 nent = 0;
712 if (nent)
713 err = mthca_cmd(dev, mailbox->dma, nent, 0, op,
H A Dmthca_provider.c651 int nent; local
686 for (nent = 1; nent <= entries; nent <<= 1)
689 err = mthca_init_cq(to_mdev(ibdev), nent,
/drivers/iommu/
H A Domap-iommu.c722 int nent = 1; local
733 nent *= 16;
737 bytes *= nent;
738 memset(iopte, 0, nent * sizeof(*iopte));
739 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
750 nent = 1; /* for the next L1 entry */
754 nent *= 16;
758 bytes *= nent;
760 memset(iopgd, 0, nent * sizeof(*iopgd));
761 flush_iopgd_range(iopgd, iopgd + (nent
[all...]
/drivers/tty/serial/
H A Dpch_uart.c253 int nent; member in struct:eg20t_port
801 for (i = 0; i < priv->nent; i++, sg++) {
807 dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE);
809 priv->nent = 0;
950 int nent; local
1029 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE);
1030 if (!nent) {
1034 priv->nent = nent;
1036 for (i = 0; i < nent;
[all...]
H A Datmel_serial.c825 int ret, nent; local
844 nent = dma_map_sg(port->dev,
849 if (!nent) {
987 int ret, nent; local
1008 nent = dma_map_sg(port->dev,
1013 if (!nent) {
H A Dsh-sci.c1626 int nent; local
1652 nent = dma_map_sg(port->dev, &s->sg_tx, 1, DMA_TO_DEVICE);
1653 if (!nent)
1661 s->sg_len_tx = nent;
/drivers/infiniband/hw/mlx5/
H A Dcq.c77 static u8 sw_ownership_bit(int n, int nent) argument
79 return (n & nent) ? 1 : 0;
585 int nent, int cqe_size)
589 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
595 buf->nent = nent;
680 for (i = 0; i < buf->nent; i++) {
1040 (i + 1) & (cq->resize_buf->nent),
1043 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent);
584 alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf, int nent, int cqe_size) argument
H A Dmain.c91 int nent; local
97 nent = MLX5_COMP_EQ_SIZE;
107 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
H A Dmlx5_ib.h201 int nent; member in struct:mlx5_ib_cq_buf
/drivers/net/ethernet/mellanox/mlx4/
H A Deq.c107 /* (entry & (eq->nent - 1)) gives us a cyclic array */
108 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size;
122 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe;
716 mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
718 eq->cons_index, eqe->owner, eq->nent,
721 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
733 mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
735 eq->cons_index, eqe->owner, eq->nent,
738 !!(eq->cons_index & eq->nent) ? "HW" : "SW");
886 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, argument
[all...]
H A Dcq.c238 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, argument
276 cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
H A Dfw.c1072 int nent = 0; local
1102 pages[nent * 2] = cpu_to_be64(virt);
1106 pages[nent * 2 + 1] =
1112 if (++nent == MLX4_MAILBOX_SIZE / 16) {
1113 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
1118 nent = 0;
1123 if (nent)
1124 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op,
H A Dmain.c1796 int nent; local
1801 nent = dev->caps.max_counters;
1802 return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
/drivers/infiniband/hw/qib/
H A Dqib_pcie.c273 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, argument
288 if (nent && *nent && pos) {
289 qib_msix_setup(dd, pos, nent, entry);
/drivers/infiniband/hw/mlx4/
H A Dcq.c100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) argument
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf);
/drivers/spi/
H A Dspi-topcliff-pch.c129 int nent; member in struct:pch_spi_dma_ctrl
807 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
810 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
1057 dma->nent = num;
1116 dma->nent = num;
/drivers/xen/
H A Defi.c342 efi_systab_xen.nr_tables = info->cfg.nent;
/drivers/target/
H A Dtarget_core_transport.c2207 unsigned int nent; local
2210 nent = DIV_ROUND_UP(length, PAGE_SIZE);
2211 sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
2215 sg_init_table(sg, nent);
2228 *nents = nent;
/drivers/scsi/pm8001/
H A Dpm80xx_hwi.h859 __le32 nent; member in struct:dek_mgmt_req

Completed in 2837 milliseconds

12