1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 * Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
4 *
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at
8 *
9 *      http://www.apache.org/licenses/LICENSE-2.0
10 *
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
16 */
17
18#include <limits.h>
19#include <unistd.h>
20#include <fcntl.h>
21#include <cutils/properties.h>
22#include <sys/mman.h>
23
24#include <genlock.h>
25
26#include "gr.h"
27#include "gpu.h"
28#include "memalloc.h"
29#include "alloc_controller.h"
30
31using namespace gralloc;
32
33gpu_context_t::gpu_context_t(const private_module_t* module,
34                             IAllocController* alloc_ctrl ) :
35    mAllocCtrl(alloc_ctrl)
36{
37    // Zero out the alloc_device_t
38    memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
39
40    // Initialize the procs
41    common.tag     = HARDWARE_DEVICE_TAG;
42    common.version = 0;
43    common.module  = const_cast<hw_module_t*>(&module->base.common);
44    common.close   = gralloc_close;
45    alloc          = gralloc_alloc;
46    free           = gralloc_free;
47
48}
49
50int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
51                                                    buffer_handle_t* pHandle)
52{
53    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
54
55    // we don't support framebuffer allocations with graphics heap flags
56    if (usage & GRALLOC_HEAP_MASK) {
57        return -EINVAL;
58    }
59
60    if (m->framebuffer == NULL) {
61        ALOGE("%s: Invalid framebuffer", __FUNCTION__);
62        return -EINVAL;
63    }
64
65    const uint32_t bufferMask = m->bufferMask;
66    const uint32_t numBuffers = m->numBuffers;
67    size_t bufferSize = m->finfo.line_length * m->info.yres;
68
69    //adreno needs FB size to be page aligned
70    bufferSize = roundUpToPageSize(bufferSize);
71
72    if (numBuffers == 1) {
73        // If we have only one buffer, we never use page-flipping. Instead,
74        // we return a regular buffer which will be memcpy'ed to the main
75        // screen when post is called.
76        int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
77        return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
78                                    m->fbFormat, m->info.xres, m->info.yres);
79    }
80
81    if (bufferMask >= ((1LU<<numBuffers)-1)) {
82        // We ran out of buffers.
83        return -ENOMEM;
84    }
85
86    // create a "fake" handles for it
87    // Set the PMEM flag as well, since adreno
88    // treats the FB memory as pmem
89    intptr_t vaddr = intptr_t(m->framebuffer->base);
90    private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
91                                                 private_handle_t::PRIV_FLAGS_USES_PMEM |
92                                                 private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
93                                                 BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
94                                                 m->info.yres);
95
96    // find a free slot
97    for (uint32_t i=0 ; i<numBuffers ; i++) {
98        if ((bufferMask & (1LU<<i)) == 0) {
99            m->bufferMask |= (1LU<<i);
100            break;
101        }
102        vaddr += bufferSize;
103    }
104
105    hnd->base = vaddr;
106    hnd->offset = vaddr - intptr_t(m->framebuffer->base);
107    *pHandle = hnd;
108    return 0;
109}
110
111
112int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
113                                             buffer_handle_t* pHandle)
114{
115    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
116    pthread_mutex_lock(&m->lock);
117    int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
118    pthread_mutex_unlock(&m->lock);
119    return err;
120}
121
122int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
123                                        buffer_handle_t* pHandle, int bufferType,
124                                        int format, int width, int height)
125{
126    int err = 0;
127    int flags = 0;
128    size = roundUpToPageSize(size);
129    alloc_data data;
130    data.offset = 0;
131    data.fd = -1;
132    data.base = 0;
133    data.size = size;
134    if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
135        data.align = 8192;
136    else
137        data.align = getpagesize();
138    data.pHandle = (unsigned int) pHandle;
139    err = mAllocCtrl->allocate(data, usage);
140
141    if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
142        flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
143    }
144
145    if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_ONLY) {
146        flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
147        //The EXTERNAL_BLOCK flag is always an add-on
148        if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_BLOCK) {
149            flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
150        }if (usage & GRALLOC_USAGE_PRIVATE_EXTERNAL_CC) {
151            flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_CC;
152        }
153    }
154
155    if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER ) {
156        flags |= private_handle_t::PRIV_FLAGS_VIDEO_ENCODER;
157    }
158
159    if (usage & GRALLOC_USAGE_HW_CAMERA_WRITE) {
160        flags |= private_handle_t::PRIV_FLAGS_CAMERA_WRITE;
161    }
162
163    if (usage & GRALLOC_USAGE_HW_CAMERA_READ) {
164        flags |= private_handle_t::PRIV_FLAGS_CAMERA_READ;
165    }
166
167    if (err == 0) {
168        flags |= data.allocType;
169        private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
170                                                     bufferType, format, width,
171                                                     height);
172
173        hnd->offset = data.offset;
174        hnd->base = int(data.base) + data.offset;
175        *pHandle = hnd;
176    }
177
178    ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
179    return err;
180}
181
182void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
183                                                    int *bufferType)
184{
185    *bufferType = BUFFER_TYPE_VIDEO;
186
187    if (inputFormat < 0x7) {
188        // RGB formats
189        *bufferType = BUFFER_TYPE_UI;
190    } else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
191               (inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
192        *bufferType = BUFFER_TYPE_UI;
193    }
194}
195
196int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
197                              buffer_handle_t* pHandle, int* pStride,
198                              size_t bufferSize) {
199    if (!pHandle || !pStride)
200        return -EINVAL;
201
202    size_t size;
203    int alignedw, alignedh;
204    int grallocFormat = format;
205    int bufferType;
206
207    //If input format is HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED then based on
208    //the usage bits, gralloc assigns a format.
209    if(format == HAL_PIXEL_FORMAT_IMPLEMENTATION_DEFINED) {
210        if(usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
211            grallocFormat = HAL_PIXEL_FORMAT_YCbCr_420_SP; //NV12
212        else if(usage & GRALLOC_USAGE_HW_CAMERA_READ)
213            grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
214        else if(usage & GRALLOC_USAGE_HW_CAMERA_WRITE)
215            grallocFormat = HAL_PIXEL_FORMAT_YCrCb_420_SP; //NV21
216    }
217
218    getGrallocInformationFromFormat(grallocFormat, &bufferType);
219    size = getBufferSizeAndDimensions(w, h, grallocFormat, alignedw, alignedh);
220
221    if ((ssize_t)size <= 0)
222        return -EINVAL;
223    size = (bufferSize >= size)? bufferSize : size;
224
225    // All buffers marked as protected or for external
226    // display need to go to overlay
227    if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
228        (usage & GRALLOC_USAGE_PROTECTED) ||
229        (usage & GRALLOC_USAGE_PRIVATE_CP_BUFFER)) {
230        bufferType = BUFFER_TYPE_VIDEO;
231    }
232    int err;
233    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
234    uint32_t bufferMask = m->bufferMask;
235    uint32_t numBuffers = m->numBuffers;
236    if (usage & GRALLOC_USAGE_HW_FB && (bufferMask < ((1LU << numBuffers) - 1))) {
237        err = gralloc_alloc_framebuffer(size, usage, pHandle);
238    } else {
239        err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
240                                   grallocFormat, alignedw, alignedh);
241    }
242
243    if (err < 0) {
244        return err;
245    }
246
247    // Create a genlock lock for this buffer handle.
248    err = genlock_create_lock((native_handle_t*)(*pHandle));
249    if (err) {
250        ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
251        free_impl(reinterpret_cast<private_handle_t*>(pHandle));
252        return err;
253    }
254    *pStride = alignedw;
255    return 0;
256}
257
258int gpu_context_t::free_impl(private_handle_t const* hnd) {
259    private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
260    if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
261        // free this buffer
262        const size_t bufferSize = m->finfo.line_length * m->info.yres;
263        int index = (hnd->base - m->framebuffer->base) / bufferSize;
264        m->bufferMask &= ~(1<<index);
265    } else {
266        terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
267        IMemAlloc* memalloc = mAllocCtrl->getAllocator(hnd->flags);
268        int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
269                                        hnd->offset, hnd->fd);
270        if(err)
271            return err;
272    }
273
274    // Release the genlock
275    int err = genlock_release_lock((native_handle_t*)hnd);
276    if (err) {
277        ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
278    }
279
280    delete hnd;
281    return 0;
282}
283
284int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
285                                 int usage, buffer_handle_t* pHandle,
286                                 int* pStride)
287{
288    if (!dev) {
289        return -EINVAL;
290    }
291    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
292    return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
293}
294int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h,
295                                      int format, int usage,
296                                      buffer_handle_t* pHandle, int* pStride,
297                                      int bufferSize)
298{
299    if (!dev) {
300        return -EINVAL;
301    }
302    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
303    return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
304}
305
306
307int gpu_context_t::gralloc_free(alloc_device_t* dev,
308                                buffer_handle_t handle)
309{
310    if (private_handle_t::validate(handle) < 0)
311        return -EINVAL;
312
313    private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
314    gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
315    return gpu->free_impl(hnd);
316}
317
318/*****************************************************************************/
319
320int gpu_context_t::gralloc_close(struct hw_device_t *dev)
321{
322    gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
323    if (ctx) {
324        /* TODO: keep a list of all buffer_handle_t created, and free them
325         * all here.
326         */
327        delete ctx;
328    }
329    return 0;
330}
331
332