1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
6#define CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
7
8#include <map>
9#include <queue>
10#include <string>
11
12#include "base/callback.h"
13#include "base/compiler_specific.h"
14#include "base/containers/hash_tables.h"
15#include "base/containers/scoped_ptr_hash_map.h"
16#include "base/memory/ref_counted.h"
17#include "base/memory/weak_ptr.h"
18#include "base/observer_list.h"
19#include "gpu/command_buffer/client/gpu_control.h"
20#include "gpu/command_buffer/common/command_buffer.h"
21#include "gpu/command_buffer/common/command_buffer_shared.h"
22#include "gpu/command_buffer/common/gpu_memory_allocation.h"
23#include "ipc/ipc_listener.h"
24#include "ui/events/latency_info.h"
25
26struct GPUCommandBufferConsoleMessage;
27
28namespace base {
29class SharedMemory;
30}
31
32namespace gpu {
33struct Mailbox;
34}
35
36namespace media {
37class VideoDecodeAccelerator;
38class VideoEncodeAccelerator;
39}
40
41namespace content {
42class GpuChannelHost;
43
44// Client side proxy that forwards messages synchronously to a
45// CommandBufferStub.
46class CommandBufferProxyImpl
47    : public gpu::CommandBuffer,
48      public gpu::GpuControl,
49      public IPC::Listener,
50      public base::SupportsWeakPtr<CommandBufferProxyImpl> {
51 public:
52  class DeletionObserver {
53   public:
54    // Called during the destruction of the CommandBufferProxyImpl.
55    virtual void OnWillDeleteImpl() = 0;
56
57   protected:
58    virtual ~DeletionObserver() {}
59  };
60
61  typedef base::Callback<void(
62      const std::string& msg, int id)> GpuConsoleMessageCallback;
63
64  CommandBufferProxyImpl(GpuChannelHost* channel, int route_id);
65  virtual ~CommandBufferProxyImpl();
66
67  // Sends an IPC message to create a GpuVideoDecodeAccelerator. Creates and
68  // returns it as an owned pointer to a media::VideoDecodeAccelerator.  Returns
69  // NULL on failure to create the GpuVideoDecodeAcceleratorHost.
70  // Note that the GpuVideoDecodeAccelerator may still fail to be created in
71  // the GPU process, even if this returns non-NULL. In this case the VDA client
72  // is notified of an error later, after Initialize().
73  scoped_ptr<media::VideoDecodeAccelerator> CreateVideoDecoder();
74
75  // Sends an IPC message to create a GpuVideoEncodeAccelerator. Creates and
76  // returns it as an owned pointer to a media::VideoEncodeAccelerator.  Returns
77  // NULL on failure to create the GpuVideoEncodeAcceleratorHost.
78  // Note that the GpuVideoEncodeAccelerator may still fail to be created in
79  // the GPU process, even if this returns non-NULL. In this case the VEA client
80  // is notified of an error later, after Initialize();
81  scoped_ptr<media::VideoEncodeAccelerator> CreateVideoEncoder();
82
83  // IPC::Listener implementation:
84  virtual bool OnMessageReceived(const IPC::Message& message) OVERRIDE;
85  virtual void OnChannelError() OVERRIDE;
86
87  // CommandBuffer implementation:
88  virtual bool Initialize() OVERRIDE;
89  virtual State GetLastState() OVERRIDE;
90  virtual int32 GetLastToken() OVERRIDE;
91  virtual void Flush(int32 put_offset) OVERRIDE;
92  virtual void WaitForTokenInRange(int32 start, int32 end) OVERRIDE;
93  virtual void WaitForGetOffsetInRange(int32 start, int32 end) OVERRIDE;
94  virtual void SetGetBuffer(int32 shm_id) OVERRIDE;
95  virtual scoped_refptr<gpu::Buffer> CreateTransferBuffer(size_t size,
96                                                          int32* id) OVERRIDE;
97  virtual void DestroyTransferBuffer(int32 id) OVERRIDE;
98
99  // gpu::GpuControl implementation:
100  virtual gpu::Capabilities GetCapabilities() OVERRIDE;
101  virtual gfx::GpuMemoryBuffer* CreateGpuMemoryBuffer(size_t width,
102                                                      size_t height,
103                                                      unsigned internalformat,
104                                                      unsigned usage,
105                                                      int32* id) OVERRIDE;
106  virtual void DestroyGpuMemoryBuffer(int32 id) OVERRIDE;
107  virtual uint32 InsertSyncPoint() OVERRIDE;
108  virtual uint32_t InsertFutureSyncPoint() OVERRIDE;
109  virtual void RetireSyncPoint(uint32_t sync_point) OVERRIDE;
110  virtual void SignalSyncPoint(uint32 sync_point,
111                               const base::Closure& callback) OVERRIDE;
112  virtual void SignalQuery(uint32 query,
113                           const base::Closure& callback) OVERRIDE;
114  virtual void SetSurfaceVisible(bool visible) OVERRIDE;
115  virtual void Echo(const base::Closure& callback) OVERRIDE;
116  virtual uint32 CreateStreamTexture(uint32 texture_id) OVERRIDE;
117
118  int GetRouteID() const;
119  bool ProduceFrontBuffer(const gpu::Mailbox& mailbox);
120  void SetChannelErrorCallback(const base::Closure& callback);
121
122  typedef base::Callback<void(const gpu::MemoryAllocation&)>
123      MemoryAllocationChangedCallback;
124  void SetMemoryAllocationChangedCallback(
125      const MemoryAllocationChangedCallback& callback);
126  void AddDeletionObserver(DeletionObserver* observer);
127  void RemoveDeletionObserver(DeletionObserver* observer);
128
129  bool EnsureBackbuffer();
130
131  void SetOnConsoleMessageCallback(
132      const GpuConsoleMessageCallback& callback);
133
134  void SetLatencyInfo(const std::vector<ui::LatencyInfo>& latency_info);
135
136  // TODO(apatrick): this is a temporary optimization while skia is calling
137  // ContentGLContext::MakeCurrent prior to every GL call. It saves returning 6
138  // ints redundantly when only the error is needed for the
139  // CommandBufferProxyImpl implementation.
140  virtual gpu::error::Error GetLastError() OVERRIDE;
141
142  GpuChannelHost* channel() const { return channel_; }
143
144  base::SharedMemoryHandle GetSharedStateHandle() const {
145    return shared_state_shm_->handle();
146  }
147
148 private:
149  typedef std::map<int32, scoped_refptr<gpu::Buffer> > TransferBufferMap;
150  typedef base::hash_map<uint32, base::Closure> SignalTaskMap;
151  typedef base::ScopedPtrHashMap<int32, gfx::GpuMemoryBuffer>
152      GpuMemoryBufferMap;
153
154  // Send an IPC message over the GPU channel. This is private to fully
155  // encapsulate the channel; all callers of this function must explicitly
156  // verify that the context has not been lost.
157  bool Send(IPC::Message* msg);
158
159  // Message handlers:
160  void OnUpdateState(const gpu::CommandBuffer::State& state);
161  void OnDestroyed(gpu::error::ContextLostReason reason);
162  void OnEchoAck();
163  void OnConsoleMessage(const GPUCommandBufferConsoleMessage& message);
164  void OnSetMemoryAllocation(const gpu::MemoryAllocation& allocation);
165  void OnSignalSyncPointAck(uint32 id);
166
167  // Try to read an updated copy of the state from shared memory.
168  void TryUpdateState();
169
170  // The shared memory area used to update state.
171  gpu::CommandBufferSharedState* shared_state() const;
172
173  // Unowned list of DeletionObservers.
174  ObserverList<DeletionObserver> deletion_observers_;
175
176  // The last cached state received from the service.
177  State last_state_;
178
179  // The shared memory area used to update state.
180  scoped_ptr<base::SharedMemory> shared_state_shm_;
181
182  // |*this| is owned by |*channel_| and so is always outlived by it, so using a
183  // raw pointer is ok.
184  GpuChannelHost* channel_;
185  int route_id_;
186  unsigned int flush_count_;
187  int32 last_put_offset_;
188
189  // Tasks to be invoked in echo responses.
190  std::queue<base::Closure> echo_tasks_;
191
192  base::Closure channel_error_callback_;
193
194  MemoryAllocationChangedCallback memory_allocation_changed_callback_;
195
196  GpuConsoleMessageCallback console_message_callback_;
197
198  // Tasks to be invoked in SignalSyncPoint responses.
199  uint32 next_signal_id_;
200  SignalTaskMap signal_tasks_;
201
202  // Local cache of id to gpu memory buffer mapping.
203  GpuMemoryBufferMap gpu_memory_buffers_;
204
205  gpu::Capabilities capabilities_;
206
207  std::vector<ui::LatencyInfo> latency_info_;
208
209  DISALLOW_COPY_AND_ASSIGN(CommandBufferProxyImpl);
210};
211
212}  // namespace content
213
214#endif  // CONTENT_COMMON_GPU_CLIENT_COMMAND_BUFFER_PROXY_IMPL_H_
215