1/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "hwc-platform-nv"
18
19#include "drmresources.h"
20#include "platform.h"
21#include "platformnv.h"
22
23#include <cinttypes>
24#include <stdatomic.h>
25#include <drm/drm_fourcc.h>
26#include <xf86drm.h>
27#include <xf86drmMode.h>
28
29#include <cutils/log.h>
30#include <hardware/gralloc.h>
31
32#ifndef EGL_NATIVE_HANDLE_ANDROID_NVX
33#define EGL_NATIVE_HANDLE_ANDROID_NVX 0x322A
34#endif
35
36namespace android {
37
38#ifdef USE_NVIDIA_IMPORTER
39// static
40Importer *Importer::CreateInstance(DrmResources *drm) {
41  NvImporter *importer = new NvImporter(drm);
42  if (!importer)
43    return NULL;
44
45  int ret = importer->Init();
46  if (ret) {
47    ALOGE("Failed to initialize the nv importer %d", ret);
48    delete importer;
49    return NULL;
50  }
51  return importer;
52}
53#endif
54
55NvImporter::NvImporter(DrmResources *drm) : drm_(drm) {
56}
57
58NvImporter::~NvImporter() {
59}
60
61int NvImporter::Init() {
62  int ret = hw_get_module(GRALLOC_HARDWARE_MODULE_ID,
63                          (const hw_module_t **)&gralloc_);
64  if (ret) {
65    ALOGE("Failed to open gralloc module %d", ret);
66    return ret;
67  }
68
69  if (strcasecmp(gralloc_->common.author, "NVIDIA"))
70    ALOGW("Using non-NVIDIA gralloc module: %s/%s\n", gralloc_->common.name,
71          gralloc_->common.author);
72
73  return 0;
74}
75
76
77EGLImageKHR NvImporter::ImportImage(EGLDisplay egl_display, buffer_handle_t handle) {
78  return eglCreateImageKHR(
79      egl_display, EGL_NO_CONTEXT, EGL_NATIVE_HANDLE_ANDROID_NVX,
80      (EGLClientBuffer)handle, NULL /* no attribs */);
81}
82
83int NvImporter::ImportBuffer(buffer_handle_t handle, hwc_drm_bo_t *bo) {
84  memset(bo, 0, sizeof(hwc_drm_bo_t));
85  NvBuffer_t *buf = GrallocGetNvBuffer(handle);
86  if (buf) {
87    atomic_fetch_add(&buf->ref, 1);
88    *bo = buf->bo;
89    return 0;
90  }
91
92  buf = new NvBuffer_t();
93  if (!buf) {
94    ALOGE("Failed to allocate new NvBuffer_t");
95    return -ENOMEM;
96  }
97  buf->bo.priv = buf;
98  buf->importer = this;
99
100  // We initialize the reference count to 2 since NvGralloc is still using this
101  // buffer (will be cleared in the NvGrallocRelease), and the other
102  // reference is for HWC (this ImportBuffer call).
103  atomic_init(&buf->ref, 2);
104
105  int ret = gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_DRM_IMPORT,
106                              drm_->fd(), handle, &buf->bo);
107  if (ret) {
108    ALOGE("GRALLOC_MODULE_PERFORM_DRM_IMPORT failed %d", ret);
109    delete buf;
110    return ret;
111  }
112
113  ret = drmModeAddFB2(drm_->fd(), buf->bo.width, buf->bo.height, buf->bo.format,
114                      buf->bo.gem_handles, buf->bo.pitches, buf->bo.offsets,
115                      &buf->bo.fb_id, 0);
116  if (ret) {
117    ALOGE("Failed to add fb %d", ret);
118    ReleaseBufferImpl(&buf->bo);
119    delete buf;
120    return ret;
121  }
122
123  ret = GrallocSetNvBuffer(handle, buf);
124  if (ret) {
125    /* This will happen is persist.tegra.gpu_mapping_cache is 0/off,
126     * or if NV gralloc runs out of "priv slots" (currently 3 per buffer,
127     * only one of which should be used by drm_hwcomposer). */
128    ALOGE("Failed to register free callback for imported buffer %d", ret);
129    ReleaseBufferImpl(&buf->bo);
130    delete buf;
131    return ret;
132  }
133  *bo = buf->bo;
134  return 0;
135}
136
137int NvImporter::ReleaseBuffer(hwc_drm_bo_t *bo) {
138  NvBuffer_t *buf = (NvBuffer_t *)bo->priv;
139  if (!buf) {
140    ALOGE("Freeing bo %" PRIu32 ", buf is NULL!", bo->fb_id);
141    return 0;
142  }
143  if (atomic_fetch_sub(&buf->ref, 1) > 1)
144    return 0;
145
146  ReleaseBufferImpl(bo);
147  delete buf;
148  return 0;
149}
150
151// static
152void NvImporter::NvGrallocRelease(void *nv_buffer) {
153  NvBuffer_t *buf = (NvBuffer *)nv_buffer;
154  buf->importer->ReleaseBuffer(&buf->bo);
155}
156
157void NvImporter::ReleaseBufferImpl(hwc_drm_bo_t *bo) {
158  if (bo->fb_id) {
159    int ret = drmModeRmFB(drm_->fd(), bo->fb_id);
160    if (ret)
161      ALOGE("Failed to rm fb %d", ret);
162  }
163
164  struct drm_gem_close gem_close;
165  memset(&gem_close, 0, sizeof(gem_close));
166  int num_gem_handles = sizeof(bo->gem_handles) / sizeof(bo->gem_handles[0]);
167  for (int i = 0; i < num_gem_handles; i++) {
168    if (!bo->gem_handles[i])
169      continue;
170
171    gem_close.handle = bo->gem_handles[i];
172    int ret = drmIoctl(drm_->fd(), DRM_IOCTL_GEM_CLOSE, &gem_close);
173    if (ret) {
174      ALOGE("Failed to close gem handle %d %d", i, ret);
175    } else {
176      /* Clear any duplicate gem handle as well but don't close again */
177      for (int j = i + 1; j < num_gem_handles; j++)
178        if (bo->gem_handles[j] == bo->gem_handles[i])
179          bo->gem_handles[j] = 0;
180      bo->gem_handles[i] = 0;
181    }
182  }
183}
184
185NvImporter::NvBuffer_t *NvImporter::GrallocGetNvBuffer(buffer_handle_t handle) {
186  void *priv = NULL;
187  int ret =
188      gralloc_->perform(gralloc_, GRALLOC_MODULE_PERFORM_GET_IMPORTER_PRIVATE,
189                        handle, NvGrallocRelease, &priv);
190  return ret ? NULL : (NvBuffer_t *)priv;
191}
192
193int NvImporter::GrallocSetNvBuffer(buffer_handle_t handle, NvBuffer_t *buf) {
194  return gralloc_->perform(gralloc_,
195                           GRALLOC_MODULE_PERFORM_SET_IMPORTER_PRIVATE, handle,
196                           NvGrallocRelease, buf);
197}
198
199#ifdef USE_NVIDIA_IMPORTER
200// static
201std::unique_ptr<Planner> Planner::CreateInstance(DrmResources *) {
202  std::unique_ptr<Planner> planner(new Planner);
203  planner->AddStage<PlanStageNvLimits>();
204  planner->AddStage<PlanStageProtectedRotated>();
205  planner->AddStage<PlanStageProtected>();
206  planner->AddStage<PlanStagePrecomp>();
207  planner->AddStage<PlanStageGreedy>();
208  return planner;
209}
210#endif
211
212static DrmPlane *GetCrtcPrimaryPlane(DrmCrtc *crtc,
213                                     std::vector<DrmPlane *> *planes) {
214  for (auto i = planes->begin(); i != planes->end(); ++i) {
215    if ((*i)->GetCrtcSupported(*crtc)) {
216      DrmPlane *plane = *i;
217      planes->erase(i);
218      return plane;
219    }
220  }
221  return NULL;
222}
223
224int PlanStageProtectedRotated::ProvisionPlanes(
225    std::vector<DrmCompositionPlane> *composition,
226    std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
227    std::vector<DrmPlane *> *planes) {
228  int ret;
229  int protected_zorder = -1;
230  for (auto i = layers.begin(); i != layers.end();) {
231    if (!i->second->protected_usage() || !i->second->transform) {
232      ++i;
233      continue;
234    }
235
236    auto primary_iter = planes->begin();
237    for (; primary_iter != planes->end(); ++primary_iter) {
238      if ((*primary_iter)->type() == DRM_PLANE_TYPE_PRIMARY)
239        break;
240    }
241
242    // We cheat a little here. Since there can only be one primary plane per
243    // crtc, we know we'll only hit this case once. So we blindly insert the
244    // protected content at the beginning of the composition, knowing this path
245    // won't be taken a second time during the loop.
246    if (primary_iter != planes->end()) {
247      composition->emplace(composition->begin(),
248                           DrmCompositionPlane::Type::kLayer, *primary_iter,
249                           crtc, i->first);
250      planes->erase(primary_iter);
251      protected_zorder = i->first;
252    } else {
253      ALOGE("Could not provision primary plane for protected/rotated layer");
254    }
255    i = layers.erase(i);
256  }
257
258  if (protected_zorder == -1)
259    return 0;
260
261  // Add any layers below the protected content to the precomposition since we
262  // need to punch a hole through them.
263  for (auto i = layers.begin(); i != layers.end();) {
264    // Skip layers above the z-order of the protected content
265    if (i->first > static_cast<size_t>(protected_zorder)) {
266      ++i;
267      continue;
268    }
269
270    // If there's no precomp layer already queued, queue one now.
271    DrmCompositionPlane *precomp = GetPrecomp(composition);
272    if (precomp) {
273      precomp->source_layers().emplace_back(i->first);
274    } else {
275      if (planes->size()) {
276        DrmPlane *precomp_plane = planes->back();
277        planes->pop_back();
278        composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
279                                  precomp_plane, crtc, i->first);
280      } else {
281        ALOGE("Not enough planes to reserve for precomp fb");
282      }
283    }
284    i = layers.erase(i);
285  }
286  return 0;
287}
288
289bool PlanStageNvLimits::CheckLayer(size_t zorder, DrmHwcLayer *layer) {
290    auto src_w = layer->source_crop.width();
291    auto src_h = layer->source_crop.height();
292    auto dst_w = layer->display_frame.width();
293    auto dst_h = layer->display_frame.height();
294    int h_limit = 4;
295    int v_limit;
296
297    switch (layer->buffer->format) {
298      case DRM_FORMAT_ARGB8888:
299      case DRM_FORMAT_ABGR8888:
300      case DRM_FORMAT_XBGR8888:
301      case DRM_FORMAT_XRGB8888:
302        // tegra driver assumes any layer with alpha channel has premult
303        // blending, avoid handling it this is not the case. This is not an
304        // issue for bottom-most layer since there's nothing to blend with
305        if (zorder > 0 && layer->blending != DrmHwcBlending::kPreMult)
306          return false;
307
308        v_limit = 2;
309        break;
310      case DRM_FORMAT_YVU420:
311      case DRM_FORMAT_YUV420:
312      case DRM_FORMAT_YUV422:
313      case DRM_FORMAT_UYVY:
314      case DRM_FORMAT_YUYV:
315      case DRM_FORMAT_NV12:
316      case DRM_FORMAT_NV21:
317      case DRM_FORMAT_RGB565:
318      case DRM_FORMAT_BGR565:
319        v_limit = 4;
320        break;
321      default:
322        v_limit = 2;
323        break;
324    }
325
326    if (layer->transform &
327        (DrmHwcTransform::kRotate90 | DrmHwcTransform::kRotate270))
328      std::swap(dst_w, dst_h);
329
330    // check for max supported down scaling
331    if (((src_w / dst_w) > h_limit) || ((src_h / dst_h) > v_limit))
332      return false;
333
334    return true;
335}
336
337int PlanStageNvLimits::ProvisionPlanes(
338    std::vector<DrmCompositionPlane> *composition,
339    std::map<size_t, DrmHwcLayer *> &layers, DrmCrtc *crtc,
340    std::vector<DrmPlane *> *planes) {
341  int ret;
342
343  for (auto i = layers.begin(); i != layers.end();) {
344    // Skip layer if supported
345    if (CheckLayer(i->first, i->second)) {
346      i++;
347      continue;
348    }
349
350    if (i->second->protected_usage()) {
351      // Drop the layer if unsupported and protected, this will just display
352      // black in the area of this layer but it's better than failing miserably
353      i = layers.erase(i);
354      continue;
355    }
356
357    // If there's no precomp layer already queued, queue one now.
358    DrmCompositionPlane *precomp = GetPrecomp(composition);
359    if (precomp) {
360      precomp->source_layers().emplace_back(i->first);
361    } else if (!planes->empty()) {
362      DrmPlane *precomp_plane = planes->back();
363      planes->pop_back();
364      composition->emplace_back(DrmCompositionPlane::Type::kPrecomp,
365                                precomp_plane, crtc, i->first);
366    } else {
367      ALOGE("Not enough planes to reserve for precomp fb");
368    }
369    i = layers.erase(i);
370  }
371
372  return 0;
373}
374}
375