rsdAllocation.cpp revision ba24d08b4228fd1e35cd79319f15fddc11f24a49
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#ifndef RS_SERVER
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifndef RS_COMPATIBILITY_LIB
29#include "rsdFrameBufferObj.h"
30#include "gui/GLConsumer.h"
31#include "gui/CpuConsumer.h"
32#include "gui/Surface.h"
33#include "hardware/gralloc.h"
34
35#include <GLES/gl.h>
36#include <GLES2/gl2.h>
37#include <GLES/glext.h>
38#endif
39
40#ifdef RS_SERVER
41// server requires malloc.h for memalign
42#include <malloc.h>
43#endif
44
45using namespace android;
46using namespace android::renderscript;
47
48
49#ifndef RS_COMPATIBILITY_LIB
50const static GLenum gFaceOrder[] = {
51    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
52    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
53    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
54    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
55    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
56    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
57};
58
59GLenum rsdTypeToGLType(RsDataType t) {
60    switch (t) {
61    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
62    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
63    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
64
65    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
66    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
67    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
68    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
69    case RS_TYPE_SIGNED_8:      return GL_BYTE;
70    case RS_TYPE_SIGNED_16:     return GL_SHORT;
71    default:    break;
72    }
73    return 0;
74}
75
76GLenum rsdKindToGLFormat(RsDataKind k) {
77    switch (k) {
78    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
79    case RS_KIND_PIXEL_A: return GL_ALPHA;
80    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
81    case RS_KIND_PIXEL_RGB: return GL_RGB;
82    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
83    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
84    default: break;
85    }
86    return 0;
87}
88#endif
89
90uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
91                      uint32_t xoff, uint32_t yoff, uint32_t lod,
92                      RsAllocationCubemapFace face) {
93    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
94    ptr += face * alloc->mHal.drvState.faceOffset;
95    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
96    ptr += xoff * alloc->mHal.state.elementSizeBytes;
97    return ptr;
98}
99
100
101static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
102                            uint32_t xoff, uint32_t yoff, uint32_t lod,
103                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
104#ifndef RS_COMPATIBILITY_LIB
105    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
106
107    rsAssert(drv->textureID);
108    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
109    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
110    GLenum t = GL_TEXTURE_2D;
111    if (alloc->mHal.state.hasFaces) {
112        t = gFaceOrder[face];
113    }
114    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
115#endif
116}
117
118
119#ifndef RS_COMPATIBILITY_LIB
120static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
121    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
122
123    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
124    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
125
126    uint32_t faceCount = 1;
127    if (alloc->mHal.state.hasFaces) {
128        faceCount = 6;
129    }
130
131    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
132    for (uint32_t face = 0; face < faceCount; face ++) {
133        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
134            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, lod, (RsAllocationCubemapFace)face);
135
136            GLenum t = GL_TEXTURE_2D;
137            if (alloc->mHal.state.hasFaces) {
138                t = gFaceOrder[face];
139            }
140
141            if (isFirstUpload) {
142                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
143                             alloc->mHal.state.type->getLODDimX(lod),
144                             alloc->mHal.state.type->getLODDimY(lod),
145                             0, drv->glFormat, drv->glType, p);
146            } else {
147                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
148                                alloc->mHal.state.type->getLODDimX(lod),
149                                alloc->mHal.state.type->getLODDimY(lod),
150                                drv->glFormat, drv->glType, p);
151            }
152        }
153    }
154
155    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
156        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
157    }
158    rsdGLCheckError(rsc, "Upload2DTexture");
159}
160#endif
161
162static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
163#ifndef RS_COMPATIBILITY_LIB
164    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
165
166    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
167        if (!drv->textureID) {
168            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
169        }
170        return;
171    }
172
173    if (!drv->glType || !drv->glFormat) {
174        return;
175    }
176
177    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
178        return;
179    }
180
181    bool isFirstUpload = false;
182
183    if (!drv->textureID) {
184        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
185        isFirstUpload = true;
186    }
187
188    Upload2DTexture(rsc, alloc, isFirstUpload);
189
190    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
191        if (alloc->mHal.drvState.lod[0].mallocPtr) {
192            free(alloc->mHal.drvState.lod[0].mallocPtr);
193            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
194        }
195    }
196    rsdGLCheckError(rsc, "UploadToTexture");
197#endif
198}
199
200static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
201#ifndef RS_COMPATIBILITY_LIB
202    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
203
204    if (!drv->glFormat) {
205        return;
206    }
207
208    if (!drv->renderTargetID) {
209        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
210
211        if (!drv->renderTargetID) {
212            // This should generally not happen
213            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
214            rsc->dumpDebug();
215            return;
216        }
217        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
218        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
219                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
220    }
221    rsdGLCheckError(rsc, "AllocateRenderTarget");
222#endif
223}
224
225static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
226#ifndef RS_COMPATIBILITY_LIB
227    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
228
229    rsAssert(!alloc->mHal.state.type->getDimY());
230    rsAssert(!alloc->mHal.state.type->getDimZ());
231
232    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
233
234    if (!drv->bufferID) {
235        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
236    }
237    if (!drv->bufferID) {
238        ALOGE("Upload to buffer object failed");
239        drv->uploadDeferred = true;
240        return;
241    }
242    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
243    RSD_CALL_GL(glBufferData, drv->glTarget, alloc->mHal.state.type->getSizeBytes(),
244                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
245    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
246    rsdGLCheckError(rsc, "UploadToBufferObject");
247#endif
248}
249
250
251static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
252    // YUV only supports basic 2d
253    // so we can stash the plane pointers in the mipmap levels.
254    size_t uvSize = 0;
255#ifndef RS_SERVER
256    switch(yuv) {
257    case HAL_PIXEL_FORMAT_YV12:
258        state->lod[1].dimX = state->lod[0].dimX / 2;
259        state->lod[1].dimY = state->lod[0].dimY / 2;
260        state->lod[1].stride = rsRound(state->lod[0].stride >> 1, 16);
261        state->lod[1].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
262                (state->lod[0].stride * state->lod[0].dimY);
263        uvSize += state->lod[1].stride * state->lod[1].dimY;
264
265        state->lod[2].dimX = state->lod[1].dimX;
266        state->lod[2].dimY = state->lod[1].dimY;
267        state->lod[2].stride = state->lod[1].stride;
268        state->lod[2].mallocPtr = ((uint8_t *)state->lod[1].mallocPtr) +
269                (state->lod[1].stride * state->lod[1].dimY);
270        uvSize += state->lod[2].stride * state->lod[2].dimY;
271
272        state->lodCount = 3;
273        break;
274    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
275        state->lod[1].dimX = state->lod[0].dimX;
276        state->lod[1].dimY = state->lod[0].dimY / 2;
277        state->lod[1].stride = state->lod[0].stride;
278        state->lod[1].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
279                (state->lod[0].stride * state->lod[0].dimY);
280        uvSize += state->lod[1].stride * state->lod[1].dimY;
281        state->lodCount = 2;
282        break;
283    default:
284        rsAssert(0);
285    }
286#endif
287    return uvSize;
288}
289
290
291static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
292        const Type *type, uint8_t *ptr) {
293    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
294    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
295    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
296    alloc->mHal.drvState.lod[0].mallocPtr = 0;
297    // Stride needs to be 16-byte aligned too!
298    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
299    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
300    alloc->mHal.drvState.lodCount = type->getLODCount();
301    alloc->mHal.drvState.faceCount = type->getDimFaces();
302
303    size_t offsets[Allocation::MAX_LOD];
304    memset(offsets, 0, sizeof(offsets));
305
306    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
307            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
308    if(alloc->mHal.drvState.lodCount > 1) {
309        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
310        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
311        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
312        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
313            alloc->mHal.drvState.lod[lod].dimX = tx;
314            alloc->mHal.drvState.lod[lod].dimY = ty;
315            alloc->mHal.drvState.lod[lod].dimZ = tz;
316            alloc->mHal.drvState.lod[lod].stride =
317                    rsRound(tx * type->getElementSizeBytes(), 16);
318            offsets[lod] = o;
319            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
320            if (tx > 1) tx >>= 1;
321            if (ty > 1) ty >>= 1;
322            if (tz > 1) tz >>= 1;
323        }
324    } else if (alloc->mHal.state.yuv) {
325        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
326
327        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
328            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
329        }
330    }
331
332    alloc->mHal.drvState.faceOffset = o;
333
334    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
335    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
336        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
337    }
338
339    size_t allocSize = alloc->mHal.drvState.faceOffset;
340    if(alloc->mHal.drvState.faceCount) {
341        allocSize *= 6;
342    }
343
344    return allocSize;
345}
346
347static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
348    // We align all allocations to a 16-byte boundary.
349    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
350    if (!ptr) {
351        return NULL;
352    }
353    if (forceZero) {
354        memset(ptr, 0, allocSize);
355    }
356    return ptr;
357}
358
359bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
360    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
361    if (!drv) {
362        return false;
363    }
364    alloc->mHal.drv = drv;
365
366    // Calculate the object size.
367    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
368
369    uint8_t * ptr = NULL;
370    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
371
372    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
373        // Allocation is allocated when the surface is created
374        // in getSurface
375    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
376        // user-provided allocation
377        // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
378        if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
379              alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
380            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
381            return false;
382        }
383        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
384            ALOGE("User-allocated buffers must not have multiple faces or LODs");
385            return false;
386        }
387
388        // rows must be 16-byte aligned
389        // validate that here, otherwise fall back to not use the user-backed allocation
390        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
391            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
392            drv->useUserProvidedPtr = false;
393
394            ptr = allocAlignedMemory(allocSize, forceZero);
395            if (!ptr) {
396                alloc->mHal.drv = NULL;
397                free(drv);
398                return false;
399            }
400
401        } else {
402            drv->useUserProvidedPtr = true;
403            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
404        }
405    } else {
406        ptr = allocAlignedMemory(allocSize, forceZero);
407        if (!ptr) {
408            alloc->mHal.drv = NULL;
409            free(drv);
410            return false;
411        }
412    }
413    // Build the pointer tables
414    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
415    if(allocSize != verifySize) {
416        rsAssert(!"Size mismatch");
417    }
418
419#ifndef RS_SERVER
420    drv->glTarget = GL_NONE;
421    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
422        if (alloc->mHal.state.hasFaces) {
423            drv->glTarget = GL_TEXTURE_CUBE_MAP;
424        } else {
425            drv->glTarget = GL_TEXTURE_2D;
426        }
427    } else {
428        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
429            drv->glTarget = GL_ARRAY_BUFFER;
430        }
431    }
432#endif
433
434#ifndef RS_COMPATIBILITY_LIB
435    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
436    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
437#else
438    drv->glType = 0;
439    drv->glFormat = 0;
440#endif
441
442    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
443        drv->uploadDeferred = true;
444    }
445
446
447    drv->readBackFBO = NULL;
448
449    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
450    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
451        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
452    }
453
454    return true;
455}
456
457void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
458    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
459
460#ifndef RS_COMPATIBILITY_LIB
461    if (drv->bufferID) {
462        // Causes a SW crash....
463        //ALOGV(" mBufferID %i", mBufferID);
464        //glDeleteBuffers(1, &mBufferID);
465        //mBufferID = 0;
466    }
467    if (drv->textureID) {
468        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
469        drv->textureID = 0;
470    }
471    if (drv->renderTargetID) {
472        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
473        drv->renderTargetID = 0;
474    }
475#endif
476
477    if (alloc->mHal.drvState.lod[0].mallocPtr) {
478        // don't free user-allocated ptrs
479        if (!(drv->useUserProvidedPtr)) {
480            free(alloc->mHal.drvState.lod[0].mallocPtr);
481        }
482        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
483    }
484
485#ifndef RS_COMPATIBILITY_LIB
486    if (drv->readBackFBO != NULL) {
487        delete drv->readBackFBO;
488        drv->readBackFBO = NULL;
489    }
490
491    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
492        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
493
494        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
495        ANativeWindow *nw = drv->wndSurface;
496        if (nw) {
497            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
498            mapper.unlock(drv->wndBuffer->handle);
499            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
500        }
501    }
502#endif
503
504    free(drv);
505    alloc->mHal.drv = NULL;
506}
507
508void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
509                         const Type *newType, bool zeroNew) {
510    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
511    const uint32_t dimX = newType->getDimX();
512
513    // can't resize Allocations with user-allocated buffers
514    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
515        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
516        return;
517    }
518    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
519    // Calculate the object size
520    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
521    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
522    // Build the relative pointer tables.
523    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
524    if(s != verifySize) {
525        rsAssert(!"Size mismatch");
526    }
527
528
529    if (dimX > oldDimX) {
530        size_t stride = alloc->mHal.state.elementSizeBytes;
531        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
532                 0, stride * (dimX - oldDimX));
533    }
534}
535
536static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
537#ifndef RS_COMPATIBILITY_LIB
538    if (!alloc->getIsScript()) {
539        return; // nothing to sync
540    }
541
542    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
543    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
544
545    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
546    if (!drv->textureID && !drv->renderTargetID) {
547        return; // nothing was rendered here yet, so nothing to sync
548    }
549    if (drv->readBackFBO == NULL) {
550        drv->readBackFBO = new RsdFrameBufferObj();
551        drv->readBackFBO->setColorTarget(drv, 0);
552        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
553                                        alloc->getType()->getDimY());
554    }
555
556    // Bind the framebuffer object so we can read back from it
557    drv->readBackFBO->setActive(rsc);
558
559    // Do the readback
560    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
561                alloc->mHal.drvState.lod[0].dimY,
562                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
563
564    // Revert framebuffer to its original
565    lastFbo->setActive(rsc);
566#endif
567}
568
569
570void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
571                         RsAllocationUsageType src) {
572    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
573
574    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
575        if(!alloc->getIsRenderTarget()) {
576            rsc->setError(RS_ERROR_FATAL_DRIVER,
577                          "Attempting to sync allocation from render target, "
578                          "for non-render target allocation");
579        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
580            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
581                                                 "render target");
582        } else {
583            rsdAllocationSyncFromFBO(rsc, alloc);
584        }
585        return;
586    }
587
588    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
589
590    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
591        UploadToTexture(rsc, alloc);
592    } else {
593        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
594            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
595            AllocateRenderTarget(rsc, alloc);
596        }
597    }
598    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
599        UploadToBufferObject(rsc, alloc);
600    }
601
602    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
603        // NOP in CPU driver for now
604    }
605
606    drv->uploadDeferred = false;
607}
608
609void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
610    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
611    drv->uploadDeferred = true;
612}
613
614void* rsdAllocationGetSurface(const Context *rsc, const Allocation *alloc) {
615#ifndef RS_COMPATIBILITY_LIB
616    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
617
618    // Configure CpuConsumer to be in asynchronous mode
619    drv->cpuConsumer = new CpuConsumer(2, false);
620    sp<IGraphicBufferProducer> bp = drv->cpuConsumer->getProducerInterface();
621    bp->incStrong(NULL);
622    return bp.get();
623#else
624    return NULL;
625#endif
626}
627
628#ifndef RS_COMPATIBILITY_LIB
629static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
630    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
631
632    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
633    if (r) {
634        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
635        return false;
636    }
637
638    // Must lock the whole surface
639    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
640    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
641
642    void *dst = NULL;
643    mapper.lock(drv->wndBuffer->handle,
644            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
645            bounds, &dst);
646    alloc->mHal.drvState.lod[0].mallocPtr = dst;
647    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
648    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
649
650    return true;
651}
652#endif
653
654void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
655#ifndef RS_COMPATIBILITY_LIB
656    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
657    ANativeWindow *old = drv->wndSurface;
658
659    if (nw) {
660        nw->incStrong(NULL);
661    }
662
663    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
664        //TODO finish support for render target + script
665        drv->wnd = nw;
666        return;
667    }
668
669    // Cleanup old surface if there is one.
670    if (drv->wndSurface) {
671        ANativeWindow *old = drv->wndSurface;
672        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
673        mapper.unlock(drv->wndBuffer->handle);
674        old->cancelBuffer(old, drv->wndBuffer, -1);
675        drv->wndSurface = NULL;
676        old->decStrong(NULL);
677    }
678
679    if (nw != NULL) {
680        int32_t r;
681        uint32_t flags = 0;
682        r = native_window_set_buffer_count(nw, 3);
683        if (r) {
684            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer count.");
685            goto error;
686        }
687
688        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
689            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
690        }
691        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
692            flags |= GRALLOC_USAGE_HW_RENDER;
693        }
694
695        r = native_window_set_usage(nw, flags);
696        if (r) {
697            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
698            goto error;
699        }
700
701        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
702                                                 alloc->mHal.drvState.lod[0].dimY);
703        if (r) {
704            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
705            goto error;
706        }
707
708        int format = 0;
709        const Element *e = alloc->mHal.state.type->getElement();
710        switch(e->getType()) {
711        case RS_TYPE_UNSIGNED_8:
712            switch (e->getVectorSize()) {
713            case 1:
714                rsAssert(e->getKind() == RS_KIND_PIXEL_A);
715                format = PIXEL_FORMAT_A_8;
716                break;
717            case 4:
718                rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
719                format = PIXEL_FORMAT_RGBA_8888;
720                break;
721            default:
722                rsAssert(0);
723            }
724            break;
725        default:
726            rsAssert(0);
727        }
728
729        r = native_window_set_buffers_format(nw, format);
730        if (r) {
731            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
732            goto error;
733        }
734
735        IoGetBuffer(rsc, alloc, nw);
736        drv->wndSurface = nw;
737    }
738
739    return;
740
741 error:
742
743    if (nw) {
744        nw->decStrong(NULL);
745    }
746
747
748#endif
749}
750
751void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
752#ifndef RS_COMPATIBILITY_LIB
753    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
754    ANativeWindow *nw = drv->wndSurface;
755    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
756        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
757        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
758        return;
759    }
760    if (nw) {
761        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
762            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
763            mapper.unlock(drv->wndBuffer->handle);
764            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
765            if (r) {
766                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
767                return;
768            }
769
770            IoGetBuffer(rsc, alloc, nw);
771        }
772    } else {
773        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
774        return;
775    }
776#endif
777}
778
779void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
780#ifndef RS_COMPATIBILITY_LIB
781    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
782
783    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
784        CpuConsumer::LockedBuffer lb;
785        status_t ret = drv->cpuConsumer->lockNextBuffer(&lb);
786        if (ret == OK) {
787            if (drv->lb.data != NULL) {
788                drv->cpuConsumer->unlockBuffer(drv->lb);
789            }
790            drv->lb = lb;
791            alloc->mHal.drvState.lod[0].mallocPtr = drv->lb.data;
792            alloc->mHal.drvState.lod[0].stride = drv->lb.stride *
793                    alloc->mHal.state.elementSizeBytes;
794
795            if (alloc->mHal.state.yuv) {
796                DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
797            }
798        } else if (ret == BAD_VALUE) {
799            // No new frame, don't do anything
800        } else {
801            rsc->setError(RS_ERROR_DRIVER, "Error receiving IO input buffer.");
802        }
803
804    } else {
805        drv->surfaceTexture->updateTexImage();
806    }
807
808
809#endif
810}
811
812
813void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
814                         uint32_t xoff, uint32_t lod, size_t count,
815                         const void *data, size_t sizeBytes) {
816    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
817
818    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
819    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
820    size_t size = count * eSize;
821
822    if (ptr != data) {
823        // Skip the copy if we are the same allocation. This can arise from
824        // our Bitmap optimization, where we share the same storage.
825        if (alloc->mHal.state.hasReferences) {
826            alloc->incRefs(data, count);
827            alloc->decRefs(ptr, count);
828        }
829        memcpy(ptr, data, size);
830    }
831    drv->uploadDeferred = true;
832}
833
834void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
835                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
836                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
837    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
838
839    size_t eSize = alloc->mHal.state.elementSizeBytes;
840    size_t lineSize = eSize * w;
841    if (!stride) {
842        stride = lineSize;
843    }
844
845    if (alloc->mHal.drvState.lod[0].mallocPtr) {
846        const uint8_t *src = static_cast<const uint8_t *>(data);
847        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
848        if (dst == src) {
849            // Skip the copy if we are the same allocation. This can arise from
850            // our Bitmap optimization, where we share the same storage.
851            drv->uploadDeferred = true;
852            return;
853        }
854
855        for (uint32_t line=yoff; line < (yoff+h); line++) {
856            if (alloc->mHal.state.hasReferences) {
857                alloc->incRefs(src, w);
858                alloc->decRefs(dst, w);
859            }
860            memcpy(dst, src, lineSize);
861            src += stride;
862            dst += alloc->mHal.drvState.lod[lod].stride;
863        }
864        if (alloc->mHal.state.yuv) {
865            int lod = 1;
866            while (alloc->mHal.drvState.lod[lod].mallocPtr) {
867                size_t lineSize = alloc->mHal.drvState.lod[lod].dimX;
868                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
869
870                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
871                    memcpy(dst, src, lineSize);
872                    src += lineSize;
873                    dst += alloc->mHal.drvState.lod[lod].stride;
874                }
875                lod++;
876            }
877
878        }
879        drv->uploadDeferred = true;
880    } else {
881        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
882    }
883}
884
885void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
886                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
887                         uint32_t lod, RsAllocationCubemapFace face,
888                         uint32_t w, uint32_t h, uint32_t d, const void *data, size_t sizeBytes) {
889
890}
891
892void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
893                         uint32_t xoff, uint32_t lod, size_t count,
894                         void *data, size_t sizeBytes) {
895    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
896    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
897    if (data != ptr) {
898        // Skip the copy if we are the same allocation. This can arise from
899        // our Bitmap optimization, where we share the same storage.
900        memcpy(data, ptr, count * eSize);
901    }
902}
903
904void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
905                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
906                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
907    size_t eSize = alloc->mHal.state.elementSizeBytes;
908    size_t lineSize = eSize * w;
909    if (!stride) {
910        stride = lineSize;
911    }
912
913    if (alloc->mHal.drvState.lod[0].mallocPtr) {
914        uint8_t *dst = static_cast<uint8_t *>(data);
915        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, lod, face);
916        if (dst == src) {
917            // Skip the copy if we are the same allocation. This can arise from
918            // our Bitmap optimization, where we share the same storage.
919            return;
920        }
921
922        for (uint32_t line=yoff; line < (yoff+h); line++) {
923            memcpy(dst, src, lineSize);
924            dst += stride;
925            src += alloc->mHal.drvState.lod[lod].stride;
926        }
927    } else {
928        ALOGE("Add code to readback from non-script memory");
929    }
930}
931
932
933void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
934                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
935                         uint32_t lod, RsAllocationCubemapFace face,
936                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes) {
937
938}
939
940void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
941                          const android::renderscript::Allocation *alloc) {
942    return alloc->mHal.drvState.lod[0].mallocPtr;
943}
944
945void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
946                          const android::renderscript::Allocation *alloc) {
947
948}
949
950void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
951                               const android::renderscript::Allocation *dstAlloc,
952                               uint32_t dstXoff, uint32_t dstLod, size_t count,
953                               const android::renderscript::Allocation *srcAlloc,
954                               uint32_t srcXoff, uint32_t srcLod) {
955}
956
957
958void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
959                                      const android::renderscript::Allocation *dstAlloc,
960                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
961                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
962                                      const android::renderscript::Allocation *srcAlloc,
963                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
964                                      RsAllocationCubemapFace srcFace) {
965    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
966    for (uint32_t i = 0; i < h; i ++) {
967        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstLod, dstFace);
968        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcLod, srcFace);
969        memcpy(dstPtr, srcPtr, w * elementSize);
970
971        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
972        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
973    }
974}
975
976void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
977                               const android::renderscript::Allocation *dstAlloc,
978                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
979                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
980                               const android::renderscript::Allocation *srcAlloc,
981                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
982                               RsAllocationCubemapFace srcFace) {
983    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
984        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
985                                             "yet implemented.");
986        return;
987    }
988    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
989                                     dstLod, dstFace, w, h, srcAlloc,
990                                     srcXoff, srcYoff, srcLod, srcFace);
991}
992
993void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
994                               const android::renderscript::Allocation *dstAlloc,
995                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
996                               uint32_t dstLod, RsAllocationCubemapFace dstFace,
997                               uint32_t w, uint32_t h, uint32_t d,
998                               const android::renderscript::Allocation *srcAlloc,
999                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1000                               uint32_t srcLod, RsAllocationCubemapFace srcFace) {
1001}
1002
1003void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
1004                                uint32_t x,
1005                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1006    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1007
1008    size_t eSize = alloc->mHal.state.elementSizeBytes;
1009    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1010
1011    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1012    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1013
1014    if (alloc->mHal.state.hasReferences) {
1015        e->incRefs(data);
1016        e->decRefs(ptr);
1017    }
1018
1019    memcpy(ptr, data, sizeBytes);
1020    drv->uploadDeferred = true;
1021}
1022
1023void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
1024                                uint32_t x, uint32_t y,
1025                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1026    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1027
1028    size_t eSize = alloc->mHal.state.elementSizeBytes;
1029    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1030
1031    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1032    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1033
1034    if (alloc->mHal.state.hasReferences) {
1035        e->incRefs(data);
1036        e->decRefs(ptr);
1037    }
1038
1039    memcpy(ptr, data, sizeBytes);
1040    drv->uploadDeferred = true;
1041}
1042
1043static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1044    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1045    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1046
1047    for (uint32_t y=0; y < h; y++) {
1048        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
1049        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
1050        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1051
1052        for (uint32_t x=0; x < w; x++) {
1053            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1054            oPtr ++;
1055            i1 += 2;
1056            i2 += 2;
1057        }
1058    }
1059}
1060
1061static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1062    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1063    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1064
1065    for (uint32_t y=0; y < h; y++) {
1066        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
1067        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
1068        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1069
1070        for (uint32_t x=0; x < w; x++) {
1071            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1072            oPtr ++;
1073            i1 += 2;
1074            i2 += 2;
1075        }
1076    }
1077}
1078
1079static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1080    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1081    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1082
1083    for (uint32_t y=0; y < h; y++) {
1084        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, lod + 1, face);
1085        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, lod, face);
1086        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1087
1088        for (uint32_t x=0; x < w; x++) {
1089            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1090            oPtr ++;
1091            i1 += 2;
1092            i2 += 2;
1093        }
1094    }
1095}
1096
1097void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1098    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1099        return;
1100    }
1101    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1102    for (uint32_t face = 0; face < numFaces; face ++) {
1103        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1104            switch (alloc->getType()->getElement()->getSizeBits()) {
1105            case 32:
1106                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1107                break;
1108            case 16:
1109                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1110                break;
1111            case 8:
1112                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1113                break;
1114            }
1115        }
1116    }
1117}
1118