1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "ui/gl/gl_context_cgl.h"
6
7#include <OpenGL/CGLRenderers.h>
8#include <OpenGL/CGLTypes.h>
9#include <vector>
10
11#include "base/debug/trace_event.h"
12#include "base/logging.h"
13#include "base/memory/scoped_ptr.h"
14#include "ui/gl/gl_bindings.h"
15#include "ui/gl/gl_implementation.h"
16#include "ui/gl/gl_surface_cgl.h"
17#include "ui/gl/gpu_switching_manager.h"
18
19namespace gfx {
20
21namespace {
22
23bool g_support_renderer_switching;
24
25struct CGLRendererInfoObjDeleter {
26  void operator()(CGLRendererInfoObj* x) {
27    if (x)
28      CGLDestroyRendererInfo(*x);
29  }
30};
31
32}  // namespace
33
34static CGLPixelFormatObj GetPixelFormat() {
35  static CGLPixelFormatObj format;
36  if (format)
37    return format;
38  std::vector<CGLPixelFormatAttribute> attribs;
39  // If the system supports dual gpus then allow offline renderers for every
40  // context, so that they can all be in the same share group.
41  if (ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus()) {
42    attribs.push_back(kCGLPFAAllowOfflineRenderers);
43    g_support_renderer_switching = true;
44  }
45  if (GetGLImplementation() == kGLImplementationAppleGL) {
46    attribs.push_back(kCGLPFARendererID);
47    attribs.push_back((CGLPixelFormatAttribute) kCGLRendererGenericFloatID);
48    g_support_renderer_switching = false;
49  }
50  attribs.push_back((CGLPixelFormatAttribute) 0);
51
52  GLint num_virtual_screens;
53  if (CGLChoosePixelFormat(&attribs.front(),
54                           &format,
55                           &num_virtual_screens) != kCGLNoError) {
56    LOG(ERROR) << "Error choosing pixel format.";
57    return NULL;
58  }
59  if (!format) {
60    LOG(ERROR) << "format == 0.";
61    return NULL;
62  }
63  DCHECK_NE(num_virtual_screens, 0);
64  return format;
65}
66
67GLContextCGL::GLContextCGL(GLShareGroup* share_group)
68  : GLContextReal(share_group),
69    context_(NULL),
70    gpu_preference_(PreferIntegratedGpu),
71    discrete_pixelformat_(NULL),
72    screen_(-1),
73    renderer_id_(-1),
74    safe_to_force_gpu_switch_(false) {
75}
76
77bool GLContextCGL::Initialize(GLSurface* compatible_surface,
78                              GpuPreference gpu_preference) {
79  DCHECK(compatible_surface);
80
81  gpu_preference = ui::GpuSwitchingManager::GetInstance()->AdjustGpuPreference(
82      gpu_preference);
83
84  GLContextCGL* share_context = share_group() ?
85      static_cast<GLContextCGL*>(share_group()->GetContext()) : NULL;
86
87  CGLPixelFormatObj format = GetPixelFormat();
88  if (!format)
89    return false;
90
91  // If using the discrete gpu, create a pixel format requiring it before we
92  // create the context.
93  if (!ui::GpuSwitchingManager::GetInstance()->SupportsDualGpus() ||
94      gpu_preference == PreferDiscreteGpu) {
95    std::vector<CGLPixelFormatAttribute> discrete_attribs;
96    discrete_attribs.push_back((CGLPixelFormatAttribute) 0);
97    GLint num_pixel_formats;
98    if (CGLChoosePixelFormat(&discrete_attribs.front(),
99                             &discrete_pixelformat_,
100                             &num_pixel_formats) != kCGLNoError) {
101      LOG(ERROR) << "Error choosing pixel format.";
102      return false;
103    }
104    // The renderer might be switched after this, so ignore the saved ID.
105    share_group()->SetRendererID(-1);
106  }
107
108  CGLError res = CGLCreateContext(
109      format,
110      share_context ?
111          static_cast<CGLContextObj>(share_context->GetHandle()) : NULL,
112      reinterpret_cast<CGLContextObj*>(&context_));
113  if (res != kCGLNoError) {
114    LOG(ERROR) << "Error creating context.";
115    Destroy();
116    return false;
117  }
118
119  gpu_preference_ = gpu_preference;
120  return true;
121}
122
123void GLContextCGL::Destroy() {
124  if (discrete_pixelformat_) {
125    // Delay releasing the pixel format for 10 seconds to reduce the number of
126    // unnecessary GPU switches.
127    base::MessageLoop::current()->PostDelayedTask(
128        FROM_HERE,
129        base::Bind(&CGLReleasePixelFormat, discrete_pixelformat_),
130        base::TimeDelta::FromSeconds(10));
131    discrete_pixelformat_ = NULL;
132  }
133  if (context_) {
134    CGLDestroyContext(static_cast<CGLContextObj>(context_));
135    context_ = NULL;
136  }
137}
138
139bool GLContextCGL::MakeCurrent(GLSurface* surface) {
140  DCHECK(context_);
141
142  // The call to CGLSetVirtualScreen can hang on some AMD drivers
143  // http://crbug.com/227228
144  if (safe_to_force_gpu_switch_) {
145    int renderer_id = share_group()->GetRendererID();
146    int screen;
147    CGLGetVirtualScreen(static_cast<CGLContextObj>(context_), &screen);
148
149    if (g_support_renderer_switching &&
150        !discrete_pixelformat_ && renderer_id != -1 &&
151        (screen != screen_ || renderer_id != renderer_id_)) {
152      // Attempt to find a virtual screen that's using the requested renderer,
153      // and switch the context to use that screen. Don't attempt to switch if
154      // the context requires the discrete GPU.
155      CGLPixelFormatObj format = GetPixelFormat();
156      int virtual_screen_count;
157      if (CGLDescribePixelFormat(format, 0, kCGLPFAVirtualScreenCount,
158                                 &virtual_screen_count) != kCGLNoError)
159        return false;
160
161      for (int i = 0; i < virtual_screen_count; ++i) {
162        int screen_renderer_id;
163        if (CGLDescribePixelFormat(format, i, kCGLPFARendererID,
164                                   &screen_renderer_id) != kCGLNoError)
165          return false;
166
167        screen_renderer_id &= kCGLRendererIDMatchingMask;
168        if (screen_renderer_id == renderer_id) {
169          CGLSetVirtualScreen(static_cast<CGLContextObj>(context_), i);
170          screen_ = i;
171          break;
172        }
173      }
174      renderer_id_ = renderer_id;
175    }
176  }
177
178  if (IsCurrent(surface))
179    return true;
180
181  TRACE_EVENT0("gpu", "GLContextCGL::MakeCurrent");
182
183  if (CGLSetCurrentContext(
184      static_cast<CGLContextObj>(context_)) != kCGLNoError) {
185    LOG(ERROR) << "Unable to make gl context current.";
186    return false;
187  }
188
189  // Set this as soon as the context is current, since we might call into GL.
190  SetRealGLApi();
191
192  SetCurrent(surface);
193  if (!InitializeExtensionBindings()) {
194    ReleaseCurrent(surface);
195    return false;
196  }
197
198  if (!surface->OnMakeCurrent(this)) {
199    LOG(ERROR) << "Unable to make gl context current.";
200    return false;
201  }
202
203  return true;
204}
205
206void GLContextCGL::ReleaseCurrent(GLSurface* surface) {
207  if (!IsCurrent(surface))
208    return;
209
210  SetCurrent(NULL);
211  CGLSetCurrentContext(NULL);
212}
213
214bool GLContextCGL::IsCurrent(GLSurface* surface) {
215  bool native_context_is_current = CGLGetCurrentContext() == context_;
216
217  // If our context is current then our notion of which GLContext is
218  // current must be correct. On the other hand, third-party code
219  // using OpenGL might change the current context.
220  DCHECK(!native_context_is_current || (GetRealCurrent() == this));
221
222  if (!native_context_is_current)
223    return false;
224
225  return true;
226}
227
228void* GLContextCGL::GetHandle() {
229  return context_;
230}
231
232void GLContextCGL::SetSwapInterval(int interval) {
233  DCHECK(IsCurrent(NULL));
234  LOG(WARNING) << "GLContex: GLContextCGL::SetSwapInterval is ignored.";
235}
236
237
238bool GLContextCGL::GetTotalGpuMemory(size_t* bytes) {
239  DCHECK(bytes);
240  *bytes = 0;
241
242  CGLContextObj context = reinterpret_cast<CGLContextObj>(context_);
243  if (!context)
244    return false;
245
246  // Retrieve the current renderer ID
247  GLint current_renderer_id = 0;
248  if (CGLGetParameter(context,
249                      kCGLCPCurrentRendererID,
250                      &current_renderer_id) != kCGLNoError)
251    return false;
252
253  // Iterate through the list of all renderers
254  GLuint display_mask = static_cast<GLuint>(-1);
255  CGLRendererInfoObj renderer_info = NULL;
256  GLint num_renderers = 0;
257  if (CGLQueryRendererInfo(display_mask,
258                           &renderer_info,
259                           &num_renderers) != kCGLNoError)
260    return false;
261
262  scoped_ptr<CGLRendererInfoObj,
263      CGLRendererInfoObjDeleter> scoper(&renderer_info);
264
265  for (GLint renderer_index = 0;
266       renderer_index < num_renderers;
267       ++renderer_index) {
268    // Skip this if this renderer is not the current renderer.
269    GLint renderer_id = 0;
270    if (CGLDescribeRenderer(renderer_info,
271                            renderer_index,
272                            kCGLRPRendererID,
273                            &renderer_id) != kCGLNoError)
274        continue;
275    if (renderer_id != current_renderer_id)
276        continue;
277    // Retrieve the video memory for the renderer.
278    GLint video_memory = 0;
279    if (CGLDescribeRenderer(renderer_info,
280                            renderer_index,
281                            kCGLRPVideoMemory,
282                            &video_memory) != kCGLNoError)
283        continue;
284    *bytes = video_memory;
285    return true;
286  }
287
288  return false;
289}
290
291void GLContextCGL::SetSafeToForceGpuSwitch() {
292  safe_to_force_gpu_switch_ = true;
293}
294
295
296GLContextCGL::~GLContextCGL() {
297  Destroy();
298}
299
300GpuPreference GLContextCGL::GetGpuPreference() {
301  return gpu_preference_;
302}
303
304}  // namespace gfx
305