/drivers/gpu/drm/msm/ |
H A D | msm_ringbuffer.c | 23 struct msm_ringbuffer *ring; local 28 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 29 if (!ring) { 34 ring->gpu = gpu; 35 ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); 36 if (IS_ERR(ring->bo)) { 37 ret = PTR_ERR(ring->bo); 38 ring->bo = NULL; 42 ring 56 msm_ringbuffer_destroy(struct msm_ringbuffer *ring) argument [all...] |
H A D | msm_ringbuffer.h | 31 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring); 36 OUT_RING(struct msm_ringbuffer *ring, uint32_t data) argument 38 if (ring->cur == ring->end) 39 ring->cur = ring->start; 40 *(ring->cur++) = data;
|
/drivers/gpu/drm/radeon/ |
H A D | uvd_v3_1.c | 34 * @ring: radeon_ring pointer 38 * Emit a semaphore command (either wait or signal) to the UVD ring. 41 struct radeon_ring *ring, 47 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0)); 48 radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF); 50 radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0)); 51 radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF); 53 radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0)); 54 radeon_ring_write(ring, 0x80 | (emit_wait ? 1 : 0)); 40 uvd_v3_1_semaphore_emit(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_semaphore *semaphore, bool emit_wait) argument
|
H A D | uvd_v2_2.c | 37 * Write a fence and a trap command to the ring. 42 struct radeon_ring *ring = &rdev->ring[fence->ring]; local 43 uint64_t addr = rdev->fence_drv[fence->ring].gpu_addr; 45 radeon_ring_write(ring, PACKET0(UVD_CONTEXT_ID, 0)); 46 radeon_ring_write(ring, fence->seq); 47 radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_DATA0, 0)); 48 radeon_ring_write(ring, lower_32_bits(addr)); 49 radeon_ring_write(ring, PACKET [all...] |
H A D | rv770_dma.c | 50 struct radeon_ring *ring = &rdev->ring[ring_index]; local 63 r = radeon_ring_lock(rdev, ring, num_loops * 5 + 8); 71 radeon_semaphore_sync_rings(rdev, sem, ring->idx); 78 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_COPY, 0, 0, cur_size_in_dw)); 79 radeon_ring_write(ring, dst_offset & 0xfffffffc); 80 radeon_ring_write(ring, src_offset & 0xfffffffc); 81 radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xff); 82 radeon_ring_write(ring, upper_32_bits(src_offset) & 0xff); 87 r = radeon_fence_emit(rdev, &fence, ring [all...] |
H A D | vce_v1_0.c | 38 * @ring: radeon_ring pointer 43 struct radeon_ring *ring) 45 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 55 * @ring: radeon_ring pointer 60 struct radeon_ring *ring) 62 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 72 * @ring: radeon_ring pointer 77 struct radeon_ring *ring) 79 if (ring->idx == TN_RING_TYPE_VCE1_INDEX) 80 WREG32(VCE_RB_WPTR, ring 42 vce_v1_0_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 59 vce_v1_0_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 76 vce_v1_0_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 94 struct radeon_ring *ring; local 161 struct radeon_ring *ring; local [all...] |
H A D | evergreen_dma.c | 32 * evergreen_dma_fence_ring_emit - emit a fence on the DMA ring 37 * Add a DMA fence packet to the ring to write 44 struct radeon_ring *ring = &rdev->ring[fence->ring]; local 45 u64 addr = rdev->fence_drv[fence->ring].gpu_addr; 47 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_FENCE, 0, 0)); 48 radeon_ring_write(ring, addr & 0xfffffffc); 49 radeon_ring_write(ring, (upper_32_bits(addr) & 0xff)); 50 radeon_ring_write(ring, fenc 70 struct radeon_ring *ring = &rdev->ring[ib->ring]; local 116 struct radeon_ring *ring = &rdev->ring[ring_index]; local 175 evergreen_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument [all...] |
H A D | radeon_ib.c | 36 * command ring and the hw will fetch the commands from the IB 39 * put in IBs for execution by the requested ring. 47 * @ring: ring index the IB is associated with 55 int radeon_ib_get(struct radeon_device *rdev, int ring, argument 72 ib->ring = ring; 105 * radeon_ib_schedule - schedule an IB (Indirect Buffer) on the ring 112 * Schedule an IB on the associated ring (all asics). 115 * On SI, there are two parallel engines fed from the primary ring, 128 struct radeon_ring *ring = &rdev->ring[ib->ring]; local 265 struct radeon_ring *ring = &rdev->ring[i]; local [all...] |
H A D | radeon_semaphore.c | 67 struct radeon_ring *ring = &rdev->ring[ridx]; local 71 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, false)) { 75 ring->last_semaphore_signal_addr = semaphore->gpu_addr; 84 struct radeon_ring *ring = &rdev->ring[ridx]; local 88 if (radeon_semaphore_ring_emit(rdev, ridx, ring, semaphore, true)) { 92 ring->last_semaphore_wait_addr = semaphore->gpu_addr; 114 other = semaphore->sync_to[fence->ring]; 115 semaphore->sync_to[fence->ring] 175 radeon_semaphore_sync_rings(struct radeon_device *rdev, struct radeon_semaphore *semaphore, int ring) argument [all...] |
H A D | ni_dma.c | 36 * to the 3D engine (ring buffer, IBs, etc.), but the 49 * @ring: radeon ring pointer 54 struct radeon_ring *ring) 59 rptr = rdev->wb.wb[ring->rptr_offs/4]; 61 if (ring->idx == R600_RING_TYPE_DMA_INDEX) 76 * @ring: radeon ring pointer 81 struct radeon_ring *ring) 85 if (ring 53 cayman_dma_get_rptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 80 cayman_dma_get_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 101 cayman_dma_set_wptr(struct radeon_device *rdev, struct radeon_ring *ring) argument 125 struct radeon_ring *ring = &rdev->ring[ib->ring]; local 188 struct radeon_ring *ring; local 286 cayman_dma_is_lockup(struct radeon_device *rdev, struct radeon_ring *ring) argument 451 struct radeon_ring *ring = &rdev->ring[ridx]; local [all...] |
H A D | r200.c | 89 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local 99 r = radeon_ring_lock(rdev, ring, num_loops * 4 + 64); 105 radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); 106 radeon_ring_write(ring, (1 << 16)); 113 radeon_ring_write(ring, PACKET0(0x720, 2)); 114 radeon_ring_write(ring, src_offset); 115 radeon_ring_write(ring, dst_offset); 116 radeon_ring_write(ring, cur_size | (1 << 31) | (1 << 30)); 120 radeon_ring_write(ring, PACKET [all...] |
H A D | r420.c | 209 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local 218 radeon_ring_lock(rdev, ring, 8); 219 radeon_ring_write(ring, PACKET0(R300_CP_RESYNC_ADDR, 1)); 220 radeon_ring_write(ring, rdev->config.r300.resync_scratch); 221 radeon_ring_write(ring, 0xDEADBEEF); 222 radeon_ring_unlock_commit(rdev, ring, false); 227 struct radeon_ring *ring = &rdev->ring[RADEON_RING_TYPE_GFX_INDEX]; local 232 radeon_ring_lock(rdev, ring, [all...] |
H A D | radeon_irq_kms.c | 316 * @ring: ring whose interrupt you want to enable 318 * Enables the software interrupt for a specific ring (all asics). 320 * a particular ring. 322 void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev, int ring) argument 329 if (atomic_inc_return(&rdev->irq.ring_int[ring]) == 1) { 340 * @ring: ring whose interrupt you want to enable 342 * Enables the software interrupt for a specific ring (all asics). 344 * a particular ring 346 radeon_irq_kms_sw_irq_get_delayed(struct radeon_device *rdev, int ring) argument 361 radeon_irq_kms_sw_irq_put(struct radeon_device *rdev, int ring) argument [all...] |
H A D | radeon_test.c | 40 int i, r, ring; local 44 ring = radeon_copy_dma_ring_index(rdev); 47 ring = radeon_copy_blit_ring_index(rdev); 57 * (Total GTT - IB pool - writeback page - ring buffers) / test size 119 if (ring == R600_RING_TYPE_DMA_INDEX) 170 if (ring == R600_RING_TYPE_DMA_INDEX) 262 struct radeon_ring *ring, 265 uint32_t handle = ring->idx ^ 0xdeafbeef; 268 if (ring->idx == R600_RING_TYPE_UVD_INDEX) { 269 r = radeon_uvd_get_create_msg(rdev, ring 261 radeon_test_create_and_emit_fence(struct radeon_device *rdev, struct radeon_ring *ring, struct radeon_fence **fence) argument [all...] |
/drivers/gpu/drm/i915/ |
H A D | i915_gem_render_state.c | 129 int i915_gem_render_state_prepare(struct intel_engine_cs *ring, argument 134 if (WARN_ON(ring->id != RCS)) 137 ret = render_state_init(so, ring->dev); 153 int i915_gem_render_state_init(struct intel_engine_cs *ring) argument 158 ret = i915_gem_render_state_prepare(ring, &so); 165 ret = ring->dispatch_execbuffer(ring, 172 i915_vma_move_to_active(i915_gem_obj_to_ggtt(so.obj), ring); 174 ret = __i915_add_request(ring, NULL, so.obj, NULL);
|
H A D | intel_lrc.h | 28 #define RING_ELSP(ring) ((ring)->mmio_base+0x230) 29 #define RING_EXECLIST_STATUS(ring) ((ring)->mmio_base+0x234) 30 #define RING_CONTEXT_CONTROL(ring) ((ring)->mmio_base+0x244) 31 #define RING_CONTEXT_STATUS_BUF(ring) ((ring)->mmio_base+0x370) 32 #define RING_CONTEXT_STATUS_PTR(ring) ((ring) 103 struct intel_engine_cs *ring; member in struct:intel_ctx_submit_request [all...] |
/drivers/crypto/qat/qat_dh895xcc/ |
H A D | adf_hw_arbiter.c | 98 * ring flow control check enabled. */ 107 /* Setup ring response ordering */ 127 void adf_update_ring_arb_enable(struct adf_etr_ring_data *ring) argument 129 WRITE_CSR_ARB_RINGSRVARBEN(ring->bank->csr_addr, 130 ring->bank->bank_number, 131 ring->bank->ring_mask & 0xFF);
|
/drivers/gpu/drm/msm/adreno/ |
H A D | a3xx_gpu.c | 44 struct msm_ringbuffer *ring = gpu->rb; local 46 OUT_PKT3(ring, CP_ME_INIT, 17); 47 OUT_RING(ring, 0x000003f7); 48 OUT_RING(ring, 0x00000000); 49 OUT_RING(ring, 0x00000000); 50 OUT_RING(ring, 0x00000000); 51 OUT_RING(ring, 0x00000080); 52 OUT_RING(ring, 0x00000100); 53 OUT_RING(ring, 0x00000180); 54 OUT_RING(ring, [all...] |
/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_cq.c | 48 int entries, int ring, enum cq_type mode, 67 cq->ring = ring; 122 cq->ring); 123 /* Set IRQ for specific name (per ring) */ 126 cq->vector = (cq->ring + 1 + priv->port) 134 cq->vector = (cq->ring + 1 + priv->port) % 143 ring we assigned for the RX */ 152 cq->size = priv->rx_ring[cq->ring]->actual_size; 171 struct mlx4_en_rx_ring *ring local 46 mlx4_en_create_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq, int entries, int ring, enum cq_type mode, int node) argument [all...] |
H A D | en_port.c | 157 const struct mlx4_en_tx_ring *ring = priv->tx_ring[i]; local 159 stats->tx_packets += ring->packets; 160 stats->tx_bytes += ring->bytes; 161 priv->port_stats.tx_chksum_offload += ring->tx_csum; 162 priv->port_stats.queue_stopped += ring->queue_stopped; 163 priv->port_stats.wake_queue += ring->wake_queue; 164 priv->port_stats.tso_packets += ring->tso_packets; 165 priv->port_stats.xmit_more += ring->xmit_more;
|
/drivers/net/wireless/ath/carl9170/ |
H A D | debug.h | 124 struct carl9170_debug_mem_rbe ring[CARL9170_DEBUG_RING_SIZE]; member in struct:carl9170_debug
|
/drivers/net/wireless/rtlwifi/rtl8192se/ |
H A D | sw.c | 243 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; local 244 u8 *entry = (u8 *)(&ring->desc[ring->idx]);
|
/drivers/scsi/fnic/ |
H A D | vnic_wq_copy.h | 31 struct vnic_dev_ring ring; member in struct:vnic_wq_copy 38 return wq->ring.desc_avail; 43 return wq->ring.desc_count - 1 - wq->ring.desc_avail; 48 struct fcpio_host_req *desc = wq->ring.descs; 55 ((wq->to_use_index + 1) == wq->ring.desc_count) ? 57 wq->ring.desc_avail--; 76 cnt = wq->ring.desc_count - wq->to_clean_index + index + 1; 78 wq->to_clean_index = ((index + 1) % wq->ring.desc_count); 79 wq->ring [all...] |
/drivers/crypto/qat/qat_common/ |
H A D | adf_transport_debug.c | 59 struct adf_etr_ring_data *ring = sfile->private; local 65 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / 66 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) 69 return ring->base_addr + 70 (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) * (*pos)++); 75 struct adf_etr_ring_data *ring = sfile->private; local 77 if (*pos >= (ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size) / 78 ADF_MSG_SIZE_TO_BYTES(ring->msg_size))) 81 return ring->base_addr + 82 (ADF_MSG_SIZE_TO_BYTES(ring 87 struct adf_etr_ring_data *ring = sfile->private; local 160 adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name) argument 185 adf_ring_debugfs_rm(struct adf_etr_ring_data *ring) argument 223 struct adf_etr_ring_data *ring = &bank->rings[ring_id]; local [all...] |
H A D | adf_transport_internal.h | 63 spinlock_t lock; /* protects ring data struct */ 100 int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, const char *name); 101 void adf_ring_debugfs_rm(struct adf_etr_ring_data *ring); 110 static inline int adf_ring_debugfs_add(struct adf_etr_ring_data *ring, argument 116 #define adf_ring_debugfs_rm(ring) do {} while (0)
|