Lines Matching refs:rdev

63 int radeon_uvd_init(struct radeon_device *rdev)
69 INIT_DELAYED_WORK(&rdev->uvd.idle_work, radeon_uvd_idle_work_handler);
71 switch (rdev->family) {
133 r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev);
135 dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n",
140 bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) +
143 r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true,
145 NULL, &rdev->uvd.vcpu_bo);
147 dev_err(rdev->dev, "(%d) failed to allocate UVD bo\n", r);
151 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
153 radeon_bo_unref(&rdev->uvd.vcpu_bo);
154 dev_err(rdev->dev, "(%d) failed to reserve UVD bo\n", r);
158 r = radeon_bo_pin(rdev->uvd.vcpu_bo, RADEON_GEM_DOMAIN_VRAM,
159 &rdev->uvd.gpu_addr);
161 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
162 radeon_bo_unref(&rdev->uvd.vcpu_bo);
163 dev_err(rdev->dev, "(%d) UVD bo pin failed\n", r);
167 r = radeon_bo_kmap(rdev->uvd.vcpu_bo, &rdev->uvd.cpu_addr);
169 dev_err(rdev->dev, "(%d) UVD map failed\n", r);
173 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
176 atomic_set(&rdev->uvd.handles[i], 0);
177 rdev->uvd.filp[i] = NULL;
178 rdev->uvd.img_size[i] = 0;
184 void radeon_uvd_fini(struct radeon_device *rdev)
188 if (rdev->uvd.vcpu_bo == NULL)
191 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, false);
193 radeon_bo_kunmap(rdev->uvd.vcpu_bo);
194 radeon_bo_unpin(rdev->uvd.vcpu_bo);
195 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
198 radeon_bo_unref(&rdev->uvd.vcpu_bo);
200 radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
202 release_firmware(rdev->uvd_fw);
205 int radeon_uvd_suspend(struct radeon_device *rdev)
211 if (rdev->uvd.vcpu_bo == NULL)
215 if (atomic_read(&rdev->uvd.handles[i]))
221 size = radeon_bo_size(rdev->uvd.vcpu_bo);
222 size -= rdev->uvd_fw->size;
224 ptr = rdev->uvd.cpu_addr;
225 ptr += rdev->uvd_fw->size;
227 rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
228 memcpy(rdev->uvd.saved_bo, ptr, size);
233 int radeon_uvd_resume(struct radeon_device *rdev)
238 if (rdev->uvd.vcpu_bo == NULL)
241 memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size);
243 size = radeon_bo_size(rdev->uvd.vcpu_bo);
244 size -= rdev->uvd_fw->size;
246 ptr = rdev->uvd.cpu_addr;
247 ptr += rdev->uvd_fw->size;
249 if (rdev->uvd.saved_bo != NULL) {
250 memcpy(ptr, rdev->uvd.saved_bo, size);
251 kfree(rdev->uvd.saved_bo);
252 rdev->uvd.saved_bo = NULL;
285 void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp)
289 uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
290 if (handle != 0 && rdev->uvd.filp[i] == filp) {
293 radeon_uvd_note_usage(rdev);
295 r = radeon_uvd_get_destroy_msg(rdev,
305 rdev->uvd.filp[i] = NULL;
306 atomic_set(&rdev->uvd.handles[i], 0);
451 atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
469 if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
475 if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
476 p->rdev->uvd.filp[i] = p->filp;
477 p->rdev->uvd.img_size[i] = img_size;
539 (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
653 static int radeon_uvd_send_msg(struct radeon_device *rdev,
660 r = radeon_ib_get(rdev, ring, &ib, NULL, 64);
674 r = radeon_ib_schedule(rdev, &ib, NULL, false);
679 radeon_ib_free(rdev, &ib);
686 int radeon_uvd_get_create_msg(struct radeon_device *rdev, int ring,
690 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
693 uint32_t *msg = rdev->uvd.cpu_addr + offs;
694 uint64_t addr = rdev->uvd.gpu_addr + offs;
698 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
717 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
718 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
722 int radeon_uvd_get_destroy_msg(struct radeon_device *rdev, int ring,
726 uint64_t offs = radeon_bo_size(rdev->uvd.vcpu_bo) -
729 uint32_t *msg = rdev->uvd.cpu_addr + offs;
730 uint64_t addr = rdev->uvd.gpu_addr + offs;
734 r = radeon_bo_reserve(rdev->uvd.vcpu_bo, true);
746 r = radeon_uvd_send_msg(rdev, ring, addr, fence);
747 radeon_bo_unreserve(rdev->uvd.vcpu_bo);
754 * @rdev: radeon_device pointer
760 static void radeon_uvd_count_handles(struct radeon_device *rdev,
769 if (!atomic_read(&rdev->uvd.handles[i]))
772 if (rdev->uvd.img_size[i] >= 720*576)
781 struct radeon_device *rdev =
784 if (radeon_fence_count_emitted(rdev, R600_RING_TYPE_UVD_INDEX) == 0) {
785 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
786 radeon_uvd_count_handles(rdev, &rdev->pm.dpm.sd,
787 &rdev->pm.dpm.hd);
788 radeon_dpm_enable_uvd(rdev, false);
790 radeon_set_uvd_clocks(rdev, 0, 0);
793 schedule_delayed_work(&rdev->uvd.idle_work,
798 void radeon_uvd_note_usage(struct radeon_device *rdev)
801 bool set_clocks = !cancel_delayed_work_sync(&rdev->uvd.idle_work);
802 set_clocks &= schedule_delayed_work(&rdev->uvd.idle_work,
805 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
807 radeon_uvd_count_handles(rdev, &sd, &hd);
808 if ((rdev->pm.dpm.sd != sd) ||
809 (rdev->pm.dpm.hd != hd)) {
810 rdev->pm.dpm.sd = sd;
811 rdev->pm.dpm.hd = hd;
818 if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
819 radeon_dpm_enable_uvd(rdev, true);
821 radeon_set_uvd_clocks(rdev, 53300, 40000);
851 * @rdev: radeon_device pointer
868 int radeon_uvd_calc_upll_dividers(struct radeon_device *rdev,
878 unsigned vco_freq, ref_freq = rdev->clock.spll.reference_freq;
931 int radeon_uvd_send_upll_ctlreq(struct radeon_device *rdev,