Searched defs:pd (Results 1 - 25 of 214) sorted by relevance

123456789

/drivers/infiniband/hw/amso1100/
H A Dc2_pd.c43 int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd) argument
55 pd->pd_id = obj;
66 void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd) argument
69 __clear_bit(pd->pd_id, c2dev->pd_table.table);
/drivers/infiniband/hw/mlx5/
H A Dah.c54 struct ib_ah *mlx5_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) argument
/drivers/infiniband/hw/mthca/
H A Dmthca_pd.c39 int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) argument
43 pd->privileged = privileged;
45 atomic_set(&pd->sqp_count, 0);
46 pd->pd_num = mthca_alloc(&dev->pd_table.alloc);
47 if (pd->pd_num == -1)
51 err = mthca_mr_alloc_notrans(dev, pd->pd_num,
54 &pd->ntmr);
56 mthca_free(&dev->pd_table.alloc, pd->pd_num);
62 void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) argument
64 if (pd
[all...]
H A Dmthca_allocator.c195 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
260 err = mthca_mr_alloc_phys(dev, pd->pd_num,
194 mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, int hca_write, struct mthca_mr *mr) argument
/drivers/staging/ozwpan/
H A Dozusbsvc.h18 struct oz_pd *pd; member in struct:oz_usb_ctx
25 int oz_usb_start(struct oz_pd *pd, int resume);
26 void oz_usb_stop(struct oz_pd *pd, int pause);
27 void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt);
28 int oz_usb_heartbeat(struct oz_pd *pd);
29 void oz_usb_farewell(struct oz_pd *pd, u8 ep_num, u8 *data, u8 len);
H A Dozusbsvc.c52 int oz_usb_start(struct oz_pd *pd, int resume) argument
70 usb_ctx->pd = pd;
76 spin_lock_bh(&pd->app_lock[OZ_APPID_USB]);
77 old_ctx = pd->app_ctx[OZ_APPID_USB];
79 pd->app_ctx[OZ_APPID_USB] = usb_ctx;
80 oz_usb_get(pd->app_ctx[OZ_APPID_USB]);
81 spin_unlock_bh(&pd->app_lock[OZ_APPID_USB]);
90 oz_pd_get(pd);
102 spin_lock_bh(&pd
117 oz_usb_stop(struct oz_pd *pd, int pause) argument
186 oz_usb_heartbeat(struct oz_pd *pd) argument
214 struct oz_pd *pd = usb_ctx->pd; local
238 struct oz_pd *pd = usb_ctx->pd; local
[all...]
/drivers/infiniband/hw/ehca/
H A Dehca_pd.c51 struct ehca_pd *pd; local
54 pd = kmem_cache_zalloc(pd_cache, GFP_KERNEL);
55 if (!pd) {
62 INIT_LIST_HEAD(&pd->free[i]);
63 INIT_LIST_HEAD(&pd->full[i]);
65 mutex_init(&pd->lock);
78 pd->fw_pd.value = shca->pd->fw_pd.value;
80 pd->fw_pd.value = (u64)pd;
85 ehca_dealloc_pd(struct ib_pd *pd) argument
[all...]
H A Dehca_av.c88 struct ib_ah *ehca_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) argument
92 struct ehca_shca *shca = container_of(pd->device, struct ehca_shca,
97 ehca_err(pd->device, "Out of memory pd=%p ah_attr=%p",
98 pd, ah_attr);
132 rc = ehca_query_port(pd->device, ah_attr->port_num,
136 ehca_err(pd->device, "Invalid port number "
138 "pd=%p ah_attr=%p", rc, pd, ah_attr);
142 rc = ehca_query_gid(pd
[all...]
H A Dipz_pt_fn.c127 static int alloc_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) argument
133 mutex_lock(&pd->lock);
135 if (!list_empty(&pd->free[order]))
136 page = list_entry(pd->free[order].next,
149 list_add(&page->list, &pd->free[order]);
157 list_move(&page->list, &pd->full[order]);
159 mutex_unlock(&pd->lock);
167 ehca_err(pd->ib_pd.device, "failed to allocate small queue page");
168 mutex_unlock(&pd->lock);
172 static void free_small_queue_page(struct ipz_queue *queue, struct ehca_pd *pd) argument
204 ipz_queue_ctor(struct ehca_pd *pd, struct ipz_queue *queue, const u32 nr_of_pages, const u32 pagesize, const u32 qe_size, const u32 nr_of_sg, int is_small) argument
256 ipz_queue_dtor(struct ehca_pd *pd, struct ipz_queue *queue) argument
[all...]
/drivers/infiniband/hw/mlx4/
H A Dah.c42 static struct ib_ah *create_ib_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, argument
45 struct mlx4_dev *dev = to_mdev(pd->device)->dev;
47 ah->av.ib.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
71 static struct ib_ah *create_iboe_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, argument
74 struct mlx4_ib_dev *ibdev = to_mdev(pd->device);
90 ah->av.eth.port_pd = cpu_to_be32(to_mpd(pd)->pdn | (ah_attr->port_num << 24));
112 struct ib_ah *mlx4_ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) argument
121 if (rdma_port_get_link_layer(pd->device, ah_attr->port_num) == IB_LINK_LAYER_ETHERNET) {
133 ret = create_iboe_ah(pd, ah_attr, ah);
141 return create_ib_ah(pd, ah_att
[all...]
H A Dsrq.c71 struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, argument
75 struct mlx4_ib_dev *dev = to_mdev(pd->device);
108 if (pd->uobject) {
116 srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
132 err = mlx4_ib_db_map_user(to_mucontext(pd->uobject->context),
185 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt,
193 if (pd->uobject)
204 if (pd->uobject)
205 mlx4_ib_db_unmap_user(to_mucontext(pd->uobject->context), &srq->db);
213 if (pd
[all...]
/drivers/leds/
H A Dleds-s3c24xx.c46 struct s3c24xx_led_platdata *pd = led->pdata; local
47 int state = (value ? 1 : 0) ^ (pd->flags & S3C24XX_LEDF_ACTLOW);
52 gpio_set_value(pd->gpio, state);
54 if (pd->flags & S3C24XX_LEDF_TRISTATE) {
56 gpio_direction_output(pd->gpio, state);
58 gpio_direction_input(pd->gpio);
/drivers/gpu/drm/nouveau/core/subdev/fb/
H A Dgddr5.c39 int pd, lf, xd, vh, vr, vo, l3; local
45 pd = ram->next->bios.ramcfg_11_01_80;
105 pd = 1; /* binary driver does this.. bug? */
108 ram->mr[6] |= (pd & 0x01) << 0;
/drivers/base/power/
H A Ddomain_governor.c98 * @pd: PM domain to check.
102 static bool default_power_down_ok(struct dev_pm_domain *pd) argument
104 struct generic_pm_domain *genpd = pd_to_genpd(pd);
/drivers/gpu/drm/gma500/
H A Dmmu.h19 /* protects driver- and pd structures. Always take in read mode
43 struct psb_mmu_pd *pd; member in struct:psb_mmu_pt
72 extern void psb_mmu_free_pagedir(struct psb_mmu_pd *pd);
74 extern void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd *pd,
77 extern int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd *pd,
81 extern int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
83 extern void psb_mmu_set_pd_context(struct psb_mmu_pd *pd, int hw_context);
84 extern int psb_mmu_insert_pages(struct psb_mmu_pd *pd, struct page **pages,
88 extern void psb_mmu_remove_pages(struct psb_mmu_pd *pd,
/drivers/i2c/busses/
H A Di2c-simtec.c44 struct simtec_i2c_data *pd = pw; local
45 writeb(CMD_SET_SDA | (state ? STATE_SDA : 0), pd->reg);
50 struct simtec_i2c_data *pd = pw; local
51 writeb(CMD_SET_SCL | (state ? STATE_SCL : 0), pd->reg);
56 struct simtec_i2c_data *pd = pw; local
57 return readb(pd->reg) & STATE_SDA ? 1 : 0;
62 struct simtec_i2c_data *pd = pw; local
63 return readb(pd->reg) & STATE_SCL ? 1 : 0;
70 struct simtec_i2c_data *pd; local
75 pd
140 struct simtec_i2c_data *pd = platform_get_drvdata(dev); local
[all...]
H A Di2c-pca-isa.c46 static void pca_isa_writebyte(void *pd, int reg, int val) argument
56 static int pca_isa_readbyte(void *pd, int reg) argument
68 static int pca_isa_waitforcompletion(void *pd) argument
75 pca_isa_readbyte(pd, I2C_PCA_CON)
82 if (pca_isa_readbyte(pd, I2C_PCA_CON)
92 static void pca_isa_resetchip(void *pd) argument
H A Di2c-pca-platform.c42 static int i2c_pca_pf_readbyte8(void *pd, int reg) argument
44 struct i2c_pca_pf_data *i2c = pd;
48 static int i2c_pca_pf_readbyte16(void *pd, int reg) argument
50 struct i2c_pca_pf_data *i2c = pd;
54 static int i2c_pca_pf_readbyte32(void *pd, int reg) argument
56 struct i2c_pca_pf_data *i2c = pd;
60 static void i2c_pca_pf_writebyte8(void *pd, int reg, int val) argument
62 struct i2c_pca_pf_data *i2c = pd;
66 static void i2c_pca_pf_writebyte16(void *pd, int reg, int val) argument
68 struct i2c_pca_pf_data *i2c = pd;
72 i2c_pca_pf_writebyte32(void *pd, int reg, int val) argument
79 i2c_pca_pf_waitforcompletion(void *pd) argument
104 i2c_pca_pf_dummyreset(void *pd) argument
111 i2c_pca_pf_resetchip(void *pd) argument
[all...]
/drivers/infiniband/hw/ipath/
H A Dipath_keys.c136 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); local
138 if (pd->user) {
151 qp->ibqp.pd != mr->pd)) {
216 struct ipath_pd *pd = to_ipd(qp->ibqp.pd); local
218 if (pd->user) {
234 qp->ibqp.pd != mr->pd)) {
[all...]
H A Dipath_stats.c144 struct ipath_portdata *pd = dd->ipath_pd[0]; local
150 if (pd->port_hdrqfull != dd->ipath_p0_hdrqfull) {
152 pd->port_hdrqfull -
154 dd->ipath_p0_hdrqfull = pd->port_hdrqfull;
186 hdrqtail = ipath_get_hdrqtail(pd);
187 if (pd->port_head != hdrqtail) {
193 pd->port_head, hdrqtail,
197 dd->ipath_rhdrhead_intr_off, pd->port_port);
337 struct ipath_portdata *pd = dd->ipath_pd[i]; local
339 if (pd
[all...]
/drivers/net/wireless/ath/ath9k/
H A Ddfs.c180 struct dfs_pattern_detector *pd = sc->dfs_detector; local
188 if (pd != NULL && pd->add_pulse(pd, &pe)) {
/drivers/pcmcia/
H A Dbcm63xx_pcmcia.h25 struct bcm63xx_pcmcia_platform_data *pd; member in struct:bcm63xx_pcmcia_socket
/drivers/infiniband/hw/ocrdma/
H A Docrdma_ah.c96 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd); local
122 if (pd->uctx &&
133 status = set_av_attr(dev, ah, attr, &sgid, pd->id);
137 /* if pd is for the user process, pass the ah_id to user space */
138 if ((pd->uctx) && (pd->uctx->ah_tbl.va)) {
139 ahid_addr = pd->uctx->ah_tbl.va + attr->dlid;
/drivers/infiniband/hw/qib/
H A Dqib_keys.c55 struct qib_ibdev *dev = to_idev(mr->pd->device);
118 struct qib_ibdev *dev = to_idev(mr->pd->device);
139 * @pd: protection domain
151 int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, argument
164 struct qib_ibdev *dev = to_idev(pd->ibpd.device);
166 if (pd->user)
185 if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
262 struct qib_pd *pd = to_ipd(qp->ibqp.pd); local
340 struct qib_pd *pd = to_ipd(qp->ibqp.pd); local
[all...]
/drivers/infiniband/hw/usnic/
H A Dusnic_ib.h77 struct usnic_ib_pd *pd; member in struct:usnic_ib_vf

Completed in 5772 milliseconds

123456789