Searched refs:gpu (Results 1 - 25 of 148) sorted by relevance

123456

/external/autotest/client/site_tests/graphics_dEQP/
H A Ddiff.sh5 for gpu in ${gpus[*]}
7 rm expectations/${gpu}/*.json
8 cat expectations/${gpu}/* | sort > /tmp/${gpu}.sorted
9 cat expectations/${gpu}/* | sort | uniq > /tmp/${gpu}.sorted_uniq
10 diff /tmp/${gpu}.sorted /tmp/${gpu}.sorted_uniq > ${gpu}.diff
/external/skia/src/gpu/vk/
H A DGrVkUniformBuffer.h19 static GrVkUniformBuffer* Create(GrVkGpu* gpu, size_t size, bool dynamic);
21 void* map(const GrVkGpu* gpu) { argument
22 return this->vkMap(gpu);
24 void unmap(const GrVkGpu* gpu) { argument
25 this->vkUnmap(gpu);
27 bool updateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) { argument
28 return this->vkUpdateData(gpu, src, srcSizeInBytes);
30 void release(const GrVkGpu* gpu) { argument
31 this->vkRelease(gpu);
H A DGrVkBuffer.cpp21 const GrVkBuffer::Resource* GrVkBuffer::Create(const GrVkGpu* gpu, const Desc& desc) { argument
54 err = VK_CALL(gpu, CreateBuffer(gpu->device(), &bufInfo, nullptr, &buffer));
62 if (!GrVkMemory::AllocAndBindBufferMemory(gpu,
66 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
72 VK_CALL(gpu, DestroyBuffer(gpu->device(), buffer, nullptr));
73 VK_CALL(gpu, FreeMemory(gpu
81 addMemoryBarrier(const GrVkGpu* gpu, VkAccessFlags srcAccessMask, VkAccessFlags dstAccesMask, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion) const argument
110 vkRelease(const GrVkGpu* gpu) argument
124 vkMap(const GrVkGpu* gpu) argument
137 vkUnmap(const GrVkGpu* gpu) argument
151 vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes) argument
[all...]
H A DGrVkCommandBuffer.h23 static GrVkCommandBuffer* Create(const GrVkGpu* gpu, VkCommandPool cmdPool);
26 void begin(const GrVkGpu* gpu);
27 void end(const GrVkGpu* gpu);
33 void beginRenderPass(const GrVkGpu* gpu,
36 void endRenderPass(const GrVkGpu* gpu);
38 void submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync);
39 bool finished(const GrVkGpu* gpu) const;
45 void bindPipeline(const GrVkGpu* gpu) const;
46 void bindDynamicState(const GrVkGpu* gpu) const;
47 void bindDescriptorSet(const GrVkGpu* gpu) cons
66 bindVertexBuffer(GrVkGpu* gpu, GrVkVertexBuffer* vbuffer) argument
81 bindIndexBuffer(GrVkGpu* gpu, GrVkIndexBuffer* ibuffer) argument
94 bindPipeline(const GrVkGpu* gpu, const GrVkPipeline* pipeline) argument
[all...]
H A DGrVkCommandBuffer.cpp18 GrVkCommandBuffer* GrVkCommandBuffer::Create(const GrVkGpu* gpu, VkCommandPool cmdPool) { argument
28 VkResult err = GR_VK_CALL(gpu->vkInterface(), AllocateCommandBuffers(gpu->device(),
49 void GrVkCommandBuffer::freeGPUData(const GrVkGpu* gpu) const {
53 fTrackedResources[i]->unref(gpu);
58 GR_VK_CALL(gpu->vkInterface(), DestroyFence(gpu->device(), fSubmitFence, nullptr));
61 GR_VK_CALL(gpu->vkInterface(), FreeCommandBuffers(gpu->device(), gpu
71 begin(const GrVkGpu* gpu) argument
85 end(const GrVkGpu* gpu) argument
95 beginRenderPass(const GrVkGpu* gpu, const GrVkRenderPass* renderPass, const GrVkRenderTarget& target) argument
110 endRenderPass(const GrVkGpu* gpu) argument
117 submitToQueue(const GrVkGpu* gpu, VkQueue queue, GrVkGpu::SyncQueue sync) argument
181 pipelineBarrier(const GrVkGpu* gpu, VkPipelineStageFlags srcStageMask, VkPipelineStageFlags dstStageMask, bool byRegion, BarrierType barrierType, void* barrier) const argument
226 copyImage(const GrVkGpu* gpu, GrVkImage* srcImage, VkImageLayout srcLayout, GrVkImage* dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkImageCopy* copyRegions) argument
246 copyImageToBuffer(const GrVkGpu* gpu, GrVkImage* srcImage, VkImageLayout srcLayout, GrVkTransferBuffer* dstBuffer, uint32_t copyRegionCount, const VkBufferImageCopy* copyRegions) argument
264 copyBufferToImage(const GrVkGpu* gpu, GrVkTransferBuffer* srcBuffer, GrVkImage* dstImage, VkImageLayout dstLayout, uint32_t copyRegionCount, const VkBufferImageCopy* copyRegions) argument
282 clearColorImage(const GrVkGpu* gpu, GrVkImage* image, const VkClearColorValue* color, uint32_t subRangeCount, const VkImageSubresourceRange* subRanges) argument
298 clearAttachments(const GrVkGpu* gpu, int numAttachments, const VkClearAttachment* attachments, int numRects, const VkClearRect* clearRects) const argument
323 bindDescriptorSets(const GrVkGpu* gpu, GrVkProgram* program, VkPipelineLayout layout, uint32_t firstSet, uint32_t setCount, const VkDescriptorSet* descriptorSets, uint32_t dynamicOffsetCount, const uint32_t* dynamicOffsets) argument
343 drawIndexed(const GrVkGpu* gpu, uint32_t indexCount, uint32_t instanceCount, uint32_t firstIndex, int32_t vertexOffset, uint32_t firstInstance) const argument
359 draw(const GrVkGpu* gpu, uint32_t vertexCount, uint32_t instanceCount, uint32_t firstVertex, uint32_t firstInstance) const argument
[all...]
H A DGrVkImageView.cpp12 const GrVkImageView* GrVkImageView::Create(GrVkGpu* gpu, VkImage image, VkFormat format, argument
36 VkResult err = GR_VK_CALL(gpu->vkInterface(), CreateImageView(gpu->device(), &viewInfo,
45 void GrVkImageView::freeGPUData(const GrVkGpu* gpu) const {
46 GR_VK_CALL(gpu->vkInterface(), DestroyImageView(gpu->device(), fImageView, nullptr));
H A DGrVkImage.cpp15 void GrVkImage::setImageLayout(const GrVkGpu* gpu, VkImageLayout newLayout, argument
42 gpu->addImageMemoryBarrier(srcStageMask, dstStageMask, byRegion, &imageMemoryBarrier);
47 const GrVkImage::Resource* GrVkImage::CreateResource(const GrVkGpu* gpu, argument
81 err = VK_CALL(gpu, CreateImage(gpu->device(), &imageCreateInfo, nullptr, &image));
84 if (!GrVkMemory::AllocAndBindImageMemory(gpu, image, imageDesc.fMemProps, &alloc)) {
85 VK_CALL(gpu, DestroyImage(gpu->device(), image, nullptr));
101 void GrVkImage::releaseImage(const GrVkGpu* gpu) { argument
103 fResource->unref(gpu);
[all...]
H A DGrVkTexture.cpp16 GrVkTexture::GrVkTexture(GrVkGpu* gpu, argument
21 : GrSurface(gpu, lifeCycle, desc)
23 , INHERITED(gpu, lifeCycle, desc)
29 GrVkTexture::GrVkTexture(GrVkGpu* gpu, argument
35 : GrSurface(gpu, lifeCycle, desc)
37 , INHERITED(gpu, lifeCycle, desc)
41 GrVkTexture* GrVkTexture::Create(GrVkGpu* gpu, argument
47 const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
53 return new GrVkTexture(gpu, desc, lifeCycle, imageResource, imageView);
56 GrVkTexture* GrVkTexture::CreateNewTexture(GrVkGpu* gpu, cons argument
73 CreateWrappedTexture(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, VkFormat format, const GrVkImage::Resource* imageResource) argument
[all...]
H A DGrVkRenderTarget.cpp22 GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu, argument
29 : GrSurface(gpu, lifeCycle, desc)
32 , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
41 this->createFramebuffer(gpu);
48 GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu, argument
56 : GrSurface(gpu, lifeCycle, desc)
59 , GrRenderTarget(gpu, lifeCycle, desc, kUnified_SampleConfig)
68 this->createFramebuffer(gpu);
74 GrVkRenderTarget::GrVkRenderTarget(GrVkGpu* gpu, argument
79 : GrSurface(gpu, lifeCycl
95 GrVkRenderTarget(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, const GrVkImage::Resource* imageResource, const GrVkImageView* colorAttachmentView, Derived) argument
115 Create(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, const GrVkImage::Resource* imageResource) argument
185 CreateNewRenderTarget(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, const GrVkImage::ImageDesc& imageDesc) argument
204 CreateWrappedRenderTarget(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, const GrVkImage::Resource* imageResource) argument
220 createFramebuffer(GrVkGpu* gpu) argument
294 GrVkGpu* gpu = this->getVkGpu(); local
[all...]
H A DGrVkMemory.h17 * Allocates vulkan device memory and binds it to the gpu's device for the given object.
20 bool AllocAndBindBufferMemory(const GrVkGpu* gpu,
25 bool AllocAndBindImageMemory(const GrVkGpu* gpu,
H A DGrVkTextureRenderTarget.cpp18 GrVkTextureRenderTarget::Create(GrVkGpu* gpu, argument
26 const GrVkImageView* imageView = GrVkImageView::Create(gpu, image, format,
52 msaaImageResource = GrVkImage::CreateResource(gpu, msImageDesc);
55 imageView->unref(gpu);
68 resolveAttachmentView = GrVkImageView::Create(gpu, image, pixelFormat,
71 msaaImageResource->unref(gpu);
72 imageView->unref(gpu);
89 colorAttachmentView = GrVkImageView::Create(gpu, colorImage, pixelFormat,
93 resolveAttachmentView->unref(gpu);
94 msaaImageResource->unref(gpu);
117 CreateNewTextureRenderTarget(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, const GrVkImage::ImageDesc& imageDesc) argument
139 CreateWrappedTextureRenderTarget(GrVkGpu* gpu, const GrSurfaceDesc& desc, GrGpuResource::LifeCycle lifeCycle, VkFormat format, GrVkImage::Resource* imageRsrc) argument
[all...]
H A DGrVkBuffer.h32 void addMemoryBarrier(const GrVkGpu* gpu,
61 void freeGPUData(const GrVkGpu* gpu) const;
67 static const Resource* Create(const GrVkGpu* gpu,
74 void* vkMap(const GrVkGpu* gpu);
75 void vkUnmap(const GrVkGpu* gpu);
76 bool vkUpdateData(const GrVkGpu* gpu, const void* src, size_t srcSizeInBytes);
79 void vkRelease(const GrVkGpu* gpu);
H A DGrVkDescriptorPool.cpp14 GrVkDescriptorPool::GrVkDescriptorPool(const GrVkGpu* gpu, const DescriptorTypeCounts& typeCounts) argument
38 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), CreateDescriptorPool(gpu->device(),
48 void GrVkDescriptorPool::reset(const GrVkGpu* gpu) { argument
49 GR_VK_CALL_ERRCHECK(gpu->vkInterface(), ResetDescriptorPool(gpu->device(), fDescPool, 0));
52 void GrVkDescriptorPool::freeGPUData(const GrVkGpu* gpu) const {
55 GR_VK_CALL(gpu->vkInterface(), DestroyDescriptorPool(gpu->device(), fDescPool, nullptr));
H A DGrVkStencilAttachment.cpp16 GrVkStencilAttachment::GrVkStencilAttachment(GrVkGpu* gpu, argument
22 : INHERITED(gpu, lifeCycle, desc.fWidth, desc.fHeight, format.fStencilBits, desc.fSamples)
31 GrVkStencilAttachment* GrVkStencilAttachment::Create(GrVkGpu* gpu, argument
48 const GrVkImage::Resource* imageResource = GrVkImage::CreateResource(gpu, imageDesc);
53 const GrVkImageView* imageView = GrVkImageView::Create(gpu, imageResource->fImage,
57 imageResource->unref(gpu);
61 GrVkStencilAttachment* stencil = new GrVkStencilAttachment(gpu, lifeCycle, format, imageDesc,
63 imageResource->unref(gpu);
64 imageView->unref(gpu);
84 GrVkGpu* gpu local
[all...]
H A DGrVkUniformBuffer.cpp12 GrVkUniformBuffer* GrVkUniformBuffer::Create(GrVkGpu* gpu, size_t size, bool dynamic) { argument
21 const GrVkBuffer::Resource* bufferResource = GrVkBuffer::Create(gpu, desc);
28 bufferResource->unref(gpu);
H A DGrVkSampler.h21 static GrVkSampler* Create(const GrVkGpu* gpu, const GrTextureAccess& textureAccess);
28 void freeGPUData(const GrVkGpu* gpu) const override;
H A DGrVkTextureRenderTarget.h48 GrVkTextureRenderTarget(GrVkGpu* gpu, argument
56 : GrSurface(gpu, lifeCycle, desc)
58 , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
59 , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, msaaResource, colorAttachmentView,
64 GrVkTextureRenderTarget(GrVkGpu* gpu, argument
70 : GrSurface(gpu, lifeCycle, desc)
72 , GrVkTexture(gpu, desc, lifeCycle, imageResource, texView, GrVkTexture::kDerived)
73 , GrVkRenderTarget(gpu, desc, lifeCycle, imageResource, colorAttachmentView,
/external/skia/src/gpu/
H A DGrBatchFlushState.cpp13 GrBatchFlushState::GrBatchFlushState(GrGpu* gpu, GrResourceProvider* resourceProvider) argument
14 : fGpu(gpu)
15 , fUploader(gpu)
17 , fVertexPool(gpu)
18 , fIndexPool(gpu)
/external/chromium-trace/catapult/telemetry/telemetry/internal/platform/
H A Dsystem_info_unittest.py16 'gpu': {
25 self.assertTrue(isinstance(info.gpu, gpu_info.GPUInfo))
27 self.assertTrue(len(info.gpu.devices) == 1)
28 self.assertTrue(isinstance(info.gpu.devices[0], gpu_device.GPUDevice))
29 self.assertEquals(info.gpu.devices[0].vendor_id, 1000)
30 self.assertEquals(info.gpu.devices[0].device_id, 2000)
31 self.assertEquals(info.gpu.devices[0].vendor_string, 'a')
32 self.assertEquals(info.gpu.devices[0].device_string, 'b')
37 'gpu': {
/external/skia/src/gpu/gl/
H A DGrGLPathRange.cpp14 GrGLPathRange::GrGLPathRange(GrGLGpu* gpu, PathGenerator* pathGenerator, const GrStrokeInfo& stroke) argument
15 : INHERITED(gpu, pathGenerator),
17 fBasePathID(gpu->glPathRendering()->genPaths(this->getNumPaths())),
23 GrGLPathRange::GrGLPathRange(GrGLGpu* gpu, argument
28 : INHERITED(gpu, numPaths),
54 GrGLGpu* gpu = static_cast<GrGLGpu*>(this->getGpu()); local
55 if (nullptr == gpu) {
62 GR_GL_CALL_RET(gpu->glInterface(), isPath, IsPath(fBasePathID + index)));
66 GrGLPath::InitPathObjectEmptyPath(gpu, fBasePathID + index);
68 GrGLPath::InitPathObjectPathData(gpu, fBasePathI
[all...]
H A DGrGLBufferImpl.h45 void release(GrGLGpu* gpu);
51 void* map(GrGLGpu* gpu);
52 void unmap(GrGLGpu* gpu);
54 bool updateData(GrGLGpu* gpu, const void* src, size_t srcSizeInBytes);
H A DGrGLTextureRenderTarget.h28 GrGLTextureRenderTarget(GrGLGpu* gpu, argument
32 : GrSurface(gpu, texIDDesc.fLifeCycle, desc)
33 , GrGLTexture(gpu, desc, texIDDesc, GrGLTexture::kDerived)
34 , GrGLRenderTarget(gpu, desc, rtIDDesc, GrGLRenderTarget::kDerived) {
/external/chromium-trace/catapult/tracing/tracing/ui/extras/chrome/gpu/
H A Dstate_view.css6 .tr-ui-e-chrome-gpu-state-snapshot-view {
12 .tr-ui-e-chrome-gpu-state-snapshot-view img {
/external/skia/experimental/fiddle/
H A Dfiddle_main.h19 , gpu(g)
25 bool gpu; member in struct:DrawOptions
/external/skia/
H A DAndroid.mk371 src/gpu/GrAuditTrail.cpp \
372 src/gpu/GrBatchAtlas.cpp \
373 src/gpu/GrBatchFlushState.cpp \
374 src/gpu/GrBatchTest.cpp \
375 src/gpu/GrBlend.cpp \
376 src/gpu/GrBlurUtils.cpp \
377 src/gpu/GrBufferAllocPool.cpp \
378 src/gpu/GrCaps.cpp \
379 src/gpu/GrClip.cpp \
380 src/gpu/GrClipMaskManage
[all...]

Completed in 469 milliseconds

123456