rsdAllocation.cpp revision 0d44f12aaeb34f7ed6d2a9acafac459646773e47
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#ifndef RS_SERVER
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifndef RS_COMPATIBILITY_LIB
29#include "rsdFrameBufferObj.h"
30#include "gui/GLConsumer.h"
31#include "gui/CpuConsumer.h"
32#include "gui/Surface.h"
33#include "hardware/gralloc.h"
34
35#include <GLES/gl.h>
36#include <GLES2/gl2.h>
37#include <GLES/glext.h>
38#endif
39
40#ifdef RS_SERVER
41// server requires malloc.h for memalign
42#include <malloc.h>
43#endif
44
45using namespace android;
46using namespace android::renderscript;
47
48
49#ifndef RS_COMPATIBILITY_LIB
50const static GLenum gFaceOrder[] = {
51    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
52    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
53    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
54    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
55    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
56    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
57};
58
59GLenum rsdTypeToGLType(RsDataType t) {
60    switch (t) {
61    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
62    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
63    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
64
65    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
66    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
67    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
68    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
69    case RS_TYPE_SIGNED_8:      return GL_BYTE;
70    case RS_TYPE_SIGNED_16:     return GL_SHORT;
71    default:    break;
72    }
73    return 0;
74}
75
76GLenum rsdKindToGLFormat(RsDataKind k) {
77    switch (k) {
78    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
79    case RS_KIND_PIXEL_A: return GL_ALPHA;
80    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
81    case RS_KIND_PIXEL_RGB: return GL_RGB;
82    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
83    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
84    default: break;
85    }
86    return 0;
87}
88#endif
89
90uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
91                      uint32_t xoff, uint32_t yoff, uint32_t lod,
92                      RsAllocationCubemapFace face) {
93    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
94    ptr += face * alloc->mHal.drvState.faceOffset;
95    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
96    ptr += xoff * alloc->mHal.state.elementSizeBytes;
97    return ptr;
98}
99
100
101static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
102                            uint32_t xoff, uint32_t yoff, uint32_t lod,
103                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
104#ifndef RS_COMPATIBILITY_LIB
105    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
106
107    rsAssert(drv->textureID);
108    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
109    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
110    GLenum t = GL_TEXTURE_2D;
111    if (alloc->mHal.state.hasFaces) {
112        t = gFaceOrder[face];
113    }
114    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
115#endif
116}
117
118
119#ifndef RS_COMPATIBILITY_LIB
120static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
121    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
122
123    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
124    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
125
126    uint32_t faceCount = 1;
127    if (alloc->mHal.state.hasFaces) {
128        faceCount = 6;
129    }
130
131    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
132    for (uint32_t face = 0; face < faceCount; face ++) {
133        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
134            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, lod, (RsAllocationCubemapFace)face);
135
136            GLenum t = GL_TEXTURE_2D;
137            if (alloc->mHal.state.hasFaces) {
138                t = gFaceOrder[face];
139            }
140
141            if (isFirstUpload) {
142                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
143                             alloc->mHal.state.type->getLODDimX(lod),
144                             alloc->mHal.state.type->getLODDimY(lod),
145                             0, drv->glFormat, drv->glType, p);
146            } else {
147                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
148                                alloc->mHal.state.type->getLODDimX(lod),
149                                alloc->mHal.state.type->getLODDimY(lod),
150                                drv->glFormat, drv->glType, p);
151            }
152        }
153    }
154
155    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
156        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
157    }
158    rsdGLCheckError(rsc, "Upload2DTexture");
159}
160#endif
161
162static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
163#ifndef RS_COMPATIBILITY_LIB
164    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
165
166    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
167        if (!drv->textureID) {
168            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
169        }
170        return;
171    }
172
173    if (!drv->glType || !drv->glFormat) {
174        return;
175    }
176
177    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
178        return;
179    }
180
181    bool isFirstUpload = false;
182
183    if (!drv->textureID) {
184        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
185        isFirstUpload = true;
186    }
187
188    Upload2DTexture(rsc, alloc, isFirstUpload);
189
190    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
191        if (alloc->mHal.drvState.lod[0].mallocPtr) {
192            free(alloc->mHal.drvState.lod[0].mallocPtr);
193            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
194        }
195    }
196    rsdGLCheckError(rsc, "UploadToTexture");
197#endif
198}
199
200static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
201#ifndef RS_COMPATIBILITY_LIB
202    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
203
204    if (!drv->glFormat) {
205        return;
206    }
207
208    if (!drv->renderTargetID) {
209        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
210
211        if (!drv->renderTargetID) {
212            // This should generally not happen
213            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
214            rsc->dumpDebug();
215            return;
216        }
217        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
218        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
219                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
220    }
221    rsdGLCheckError(rsc, "AllocateRenderTarget");
222#endif
223}
224
225static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
226#ifndef RS_COMPATIBILITY_LIB
227    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
228
229    rsAssert(!alloc->mHal.state.type->getDimY());
230    rsAssert(!alloc->mHal.state.type->getDimZ());
231
232    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
233
234    if (!drv->bufferID) {
235        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
236    }
237    if (!drv->bufferID) {
238        ALOGE("Upload to buffer object failed");
239        drv->uploadDeferred = true;
240        return;
241    }
242    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
243    RSD_CALL_GL(glBufferData, drv->glTarget, alloc->mHal.state.type->getSizeBytes(),
244                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
245    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
246    rsdGLCheckError(rsc, "UploadToBufferObject");
247#endif
248}
249
250
251static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
252    // YUV only supports basic 2d
253    // so we can stash the plane pointers in the mipmap levels.
254    size_t uvSize = 0;
255#ifndef RS_SERVER
256    switch(yuv) {
257    case HAL_PIXEL_FORMAT_YV12:
258        state->lod[1].dimX = state->lod[0].dimX / 2;
259        state->lod[1].dimY = state->lod[0].dimY / 2;
260        state->lod[1].stride = rsRound(state->lod[0].stride >> 1, 16);
261        state->lod[1].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
262                (state->lod[0].stride * state->lod[0].dimY);
263        uvSize += state->lod[1].stride * state->lod[1].dimY;
264
265        state->lod[2].dimX = state->lod[1].dimX;
266        state->lod[2].dimY = state->lod[1].dimY;
267        state->lod[2].stride = state->lod[1].stride;
268        state->lod[2].mallocPtr = ((uint8_t *)state->lod[1].mallocPtr) +
269                (state->lod[1].stride * state->lod[1].dimY);
270        uvSize += state->lod[2].stride * state->lod[2].dimY;
271
272        state->lodCount = 3;
273        break;
274    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
275        state->lod[1].dimX = state->lod[0].dimX;
276        state->lod[1].dimY = state->lod[0].dimY / 2;
277        state->lod[1].stride = state->lod[0].stride;
278        state->lod[1].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
279                (state->lod[0].stride * state->lod[0].dimY);
280        uvSize += state->lod[1].stride * state->lod[1].dimY;
281        state->lodCount = 2;
282        break;
283    default:
284        rsAssert(0);
285    }
286#endif
287    return uvSize;
288}
289
290
291static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
292        const Type *type, uint8_t *ptr) {
293    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
294    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
295    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
296    alloc->mHal.drvState.lod[0].mallocPtr = 0;
297    // Stride needs to be 16-byte aligned too!
298    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
299    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
300    alloc->mHal.drvState.lodCount = type->getLODCount();
301    alloc->mHal.drvState.faceCount = type->getDimFaces();
302
303    size_t offsets[Allocation::MAX_LOD];
304    memset(offsets, 0, sizeof(offsets));
305
306    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
307            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
308    if(alloc->mHal.drvState.lodCount > 1) {
309        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
310        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
311        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
312        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
313            alloc->mHal.drvState.lod[lod].dimX = tx;
314            alloc->mHal.drvState.lod[lod].dimY = ty;
315            alloc->mHal.drvState.lod[lod].dimZ = tz;
316            alloc->mHal.drvState.lod[lod].stride =
317                    rsRound(tx * type->getElementSizeBytes(), 16);
318            offsets[lod] = o;
319            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
320            if (tx > 1) tx >>= 1;
321            if (ty > 1) ty >>= 1;
322            if (tz > 1) tz >>= 1;
323        }
324    } else if (alloc->mHal.state.yuv) {
325        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
326
327        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
328            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
329        }
330    }
331
332    alloc->mHal.drvState.faceOffset = o;
333
334    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
335    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
336        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
337    }
338
339    size_t allocSize = alloc->mHal.drvState.faceOffset;
340    if(alloc->mHal.drvState.faceCount) {
341        allocSize *= 6;
342    }
343
344    return allocSize;
345}
346
347static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
348    // We align all allocations to a 16-byte boundary.
349    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
350    if (!ptr) {
351        return NULL;
352    }
353    if (forceZero) {
354        memset(ptr, 0, allocSize);
355    }
356    return ptr;
357}
358
359bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
360    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
361    if (!drv) {
362        return false;
363    }
364    alloc->mHal.drv = drv;
365
366    // Calculate the object size.
367    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
368
369    uint8_t * ptr = NULL;
370    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
371
372    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
373        // Allocation is allocated when the surface is created
374        // in getSurface
375    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
376        // user-provided allocation
377        // limitations: no faces, no LOD, USAGE_SCRIPT only
378        if (alloc->mHal.state.usageFlags != (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED)) {
379            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT and USAGE_SHARED");
380            return false;
381        }
382        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
383            ALOGE("User-allocated buffers must not have multiple faces or LODs");
384            return false;
385        }
386
387        // rows must be 16-byte aligned
388        // validate that here, otherwise fall back to not use the user-backed allocation
389        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
390            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
391            drv->useUserProvidedPtr = false;
392
393            ptr = allocAlignedMemory(allocSize, forceZero);
394            if (!ptr) {
395                alloc->mHal.drv = NULL;
396                free(drv);
397                return false;
398            }
399
400        } else {
401            drv->useUserProvidedPtr = true;
402            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
403        }
404    } else {
405        ptr = allocAlignedMemory(allocSize, forceZero);
406        if (!ptr) {
407            alloc->mHal.drv = NULL;
408            free(drv);
409            return false;
410        }
411    }
412    // Build the pointer tables
413    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
414    if(allocSize != verifySize) {
415        rsAssert(!"Size mismatch");
416    }
417
418#ifndef RS_SERVER
419    drv->glTarget = GL_NONE;
420    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
421        if (alloc->mHal.state.hasFaces) {
422            drv->glTarget = GL_TEXTURE_CUBE_MAP;
423        } else {
424            drv->glTarget = GL_TEXTURE_2D;
425        }
426    } else {
427        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
428            drv->glTarget = GL_ARRAY_BUFFER;
429        }
430    }
431#endif
432
433#ifndef RS_COMPATIBILITY_LIB
434    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
435    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
436#else
437    drv->glType = 0;
438    drv->glFormat = 0;
439#endif
440
441    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
442        drv->uploadDeferred = true;
443    }
444
445
446    drv->readBackFBO = NULL;
447
448    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
449    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
450        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
451    }
452
453    return true;
454}
455
456void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
457    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
458
459#ifndef RS_COMPATIBILITY_LIB
460    if (drv->bufferID) {
461        // Causes a SW crash....
462        //ALOGV(" mBufferID %i", mBufferID);
463        //glDeleteBuffers(1, &mBufferID);
464        //mBufferID = 0;
465    }
466    if (drv->textureID) {
467        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
468        drv->textureID = 0;
469    }
470    if (drv->renderTargetID) {
471        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
472        drv->renderTargetID = 0;
473    }
474#endif
475
476    if (alloc->mHal.drvState.lod[0].mallocPtr) {
477        // don't free user-allocated ptrs
478        if (!(drv->useUserProvidedPtr)) {
479            free(alloc->mHal.drvState.lod[0].mallocPtr);
480        }
481        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
482    }
483
484#ifndef RS_COMPATIBILITY_LIB
485    if (drv->readBackFBO != NULL) {
486        delete drv->readBackFBO;
487        drv->readBackFBO = NULL;
488    }
489
490    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
491        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
492
493        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
494        ANativeWindow *nw = drv->wndSurface;
495        if (nw) {
496            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
497            mapper.unlock(drv->wndBuffer->handle);
498            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
499        }
500    }
501#endif
502
503    free(drv);
504    alloc->mHal.drv = NULL;
505}
506
507void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
508                         const Type *newType, bool zeroNew) {
509    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
510    const uint32_t dimX = newType->getDimX();
511
512    // can't resize Allocations with user-allocated buffers
513    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
514        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
515        return;
516    }
517    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
518    // Calculate the object size
519    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
520    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
521    // Build the relative pointer tables.
522    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
523    if(s != verifySize) {
524        rsAssert(!"Size mismatch");
525    }
526
527
528    if (dimX > oldDimX) {
529        size_t stride = alloc->mHal.state.elementSizeBytes;
530        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
531                 0, stride * (dimX - oldDimX));
532    }
533}
534
535static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
536#ifndef RS_COMPATIBILITY_LIB
537    if (!alloc->getIsScript()) {
538        return; // nothing to sync
539    }
540
541    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
542    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
543
544    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
545    if (!drv->textureID && !drv->renderTargetID) {
546        return; // nothing was rendered here yet, so nothing to sync
547    }
548    if (drv->readBackFBO == NULL) {
549        drv->readBackFBO = new RsdFrameBufferObj();
550        drv->readBackFBO->setColorTarget(drv, 0);
551        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
552                                        alloc->getType()->getDimY());
553    }
554
555    // Bind the framebuffer object so we can read back from it
556    drv->readBackFBO->setActive(rsc);
557
558    // Do the readback
559    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
560                alloc->mHal.drvState.lod[0].dimY,
561                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
562
563    // Revert framebuffer to its original
564    lastFbo->setActive(rsc);
565#endif
566}
567
568
569void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
570                         RsAllocationUsageType src) {
571    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
572
573    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
574        if(!alloc->getIsRenderTarget()) {
575            rsc->setError(RS_ERROR_FATAL_DRIVER,
576                          "Attempting to sync allocation from render target, "
577                          "for non-render target allocation");
578        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
579            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
580                                                 "render target");
581        } else {
582            rsdAllocationSyncFromFBO(rsc, alloc);
583        }
584        return;
585    }
586
587    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
588
589    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
590        UploadToTexture(rsc, alloc);
591    } else {
592        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
593            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
594            AllocateRenderTarget(rsc, alloc);
595        }
596    }
597    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
598        UploadToBufferObject(rsc, alloc);
599    }
600
601    drv->uploadDeferred = false;
602}
603
604void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
605    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
606    drv->uploadDeferred = true;
607}
608
609void* rsdAllocationGetSurface(const Context *rsc, const Allocation *alloc) {
610#ifndef RS_COMPATIBILITY_LIB
611    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
612
613    // Configure CpuConsumer to be in asynchronous mode
614    drv->cpuConsumer = new CpuConsumer(2, false);
615    sp<IGraphicBufferProducer> bp = drv->cpuConsumer->getProducerInterface();
616    bp->incStrong(NULL);
617    return bp.get();
618#else
619    return NULL;
620#endif
621}
622
623#ifndef RS_COMPATIBILITY_LIB
624static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
625    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
626
627    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
628    if (r) {
629        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
630        return false;
631    }
632
633    // Must lock the whole surface
634    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
635    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
636
637    void *dst = NULL;
638    mapper.lock(drv->wndBuffer->handle,
639            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
640            bounds, &dst);
641    alloc->mHal.drvState.lod[0].mallocPtr = dst;
642    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
643    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
644
645    return true;
646}
647#endif
648
649void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
650#ifndef RS_COMPATIBILITY_LIB
651    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
652    ANativeWindow *old = drv->wndSurface;
653
654    if (nw) {
655        nw->incStrong(NULL);
656    }
657
658    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
659        //TODO finish support for render target + script
660        drv->wnd = nw;
661        return;
662    }
663
664    // Cleanup old surface if there is one.
665    if (drv->wndSurface) {
666        ANativeWindow *old = drv->wndSurface;
667        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
668        mapper.unlock(drv->wndBuffer->handle);
669        old->cancelBuffer(old, drv->wndBuffer, -1);
670        drv->wndSurface = NULL;
671        old->decStrong(NULL);
672    }
673
674    if (nw != NULL) {
675        int32_t r;
676        uint32_t flags = 0;
677        r = native_window_set_buffer_count(nw, 3);
678        if (r) {
679            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer count.");
680            goto error;
681        }
682
683        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
684            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
685        }
686        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
687            flags |= GRALLOC_USAGE_HW_RENDER;
688        }
689
690        r = native_window_set_usage(nw, flags);
691        if (r) {
692            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
693            goto error;
694        }
695
696        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
697                                                 alloc->mHal.drvState.lod[0].dimY);
698        if (r) {
699            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
700            goto error;
701        }
702
703        int format = 0;
704        const Element *e = alloc->mHal.state.type->getElement();
705        switch(e->getType()) {
706        case RS_TYPE_UNSIGNED_8:
707            switch (e->getVectorSize()) {
708            case 1:
709                rsAssert(e->getKind() == RS_KIND_PIXEL_A);
710                format = PIXEL_FORMAT_A_8;
711                break;
712            case 4:
713                rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
714                format = PIXEL_FORMAT_RGBA_8888;
715                break;
716            default:
717                rsAssert(0);
718            }
719            break;
720        default:
721            rsAssert(0);
722        }
723
724        r = native_window_set_buffers_format(nw, format);
725        if (r) {
726            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
727            goto error;
728        }
729
730        IoGetBuffer(rsc, alloc, nw);
731        drv->wndSurface = nw;
732    }
733
734    return;
735
736 error:
737
738    if (nw) {
739        nw->decStrong(NULL);
740    }
741
742
743#endif
744}
745
746void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
747#ifndef RS_COMPATIBILITY_LIB
748    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
749    ANativeWindow *nw = drv->wndSurface;
750    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
751        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
752        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
753        return;
754    }
755    if (nw) {
756        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
757            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
758            mapper.unlock(drv->wndBuffer->handle);
759            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
760            if (r) {
761                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
762                return;
763            }
764
765            IoGetBuffer(rsc, alloc, nw);
766        }
767    } else {
768        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
769        return;
770    }
771#endif
772}
773
774void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
775#ifndef RS_COMPATIBILITY_LIB
776    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
777
778    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
779        CpuConsumer::LockedBuffer lb;
780        status_t ret = drv->cpuConsumer->lockNextBuffer(&lb);
781        if (ret == OK) {
782            if (drv->lb.data != NULL) {
783                drv->cpuConsumer->unlockBuffer(drv->lb);
784            }
785            drv->lb = lb;
786            alloc->mHal.drvState.lod[0].mallocPtr = drv->lb.data;
787            alloc->mHal.drvState.lod[0].stride = drv->lb.stride *
788                    alloc->mHal.state.elementSizeBytes;
789
790            if (alloc->mHal.state.yuv) {
791                DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
792            }
793        } else if (ret == BAD_VALUE) {
794            // No new frame, don't do anything
795        } else {
796            rsc->setError(RS_ERROR_DRIVER, "Error receiving IO input buffer.");
797        }
798
799    } else {
800        drv->surfaceTexture->updateTexImage();
801    }
802
803
804#endif
805}
806
807
808void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
809                         uint32_t xoff, uint32_t lod, size_t count,
810                         const void *data, size_t sizeBytes) {
811    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
812
813    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
814    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
815    size_t size = count * eSize;
816
817    if (ptr != data) {
818        // Skip the copy if we are the same allocation. This can arise from
819        // our Bitmap optimization, where we share the same storage.
820        if (alloc->mHal.state.hasReferences) {
821            alloc->incRefs(data, count);
822            alloc->decRefs(ptr, count);
823        }
824        memcpy(ptr, data, size);
825    }
826    drv->uploadDeferred = true;
827}
828
829void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
830                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
831                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
832    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
833
834    size_t eSize = alloc->mHal.state.elementSizeBytes;
835    size_t lineSize = eSize * w;
836    if (!stride) {
837        stride = lineSize;
838    }
839
840    if (alloc->mHal.drvState.lod[0].mallocPtr) {
841        const uint8_t *src = static_cast<const uint8_t *>(data);
842        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
843        if (dst == src) {
844            // Skip the copy if we are the same allocation. This can arise from
845            // our Bitmap optimization, where we share the same storage.
846            drv->uploadDeferred = true;
847            return;
848        }
849
850        for (uint32_t line=yoff; line < (yoff+h); line++) {
851            if (alloc->mHal.state.hasReferences) {
852                alloc->incRefs(src, w);
853                alloc->decRefs(dst, w);
854            }
855            memcpy(dst, src, lineSize);
856            src += stride;
857            dst += alloc->mHal.drvState.lod[lod].stride;
858        }
859        if (alloc->mHal.state.yuv) {
860            int lod = 1;
861            while (alloc->mHal.drvState.lod[lod].mallocPtr) {
862                size_t lineSize = alloc->mHal.drvState.lod[lod].dimX;
863                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
864
865                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
866                    memcpy(dst, src, lineSize);
867                    src += lineSize;
868                    dst += alloc->mHal.drvState.lod[lod].stride;
869                }
870                lod++;
871            }
872
873        }
874        drv->uploadDeferred = true;
875    } else {
876        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
877    }
878}
879
880void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
881                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
882                         uint32_t lod, RsAllocationCubemapFace face,
883                         uint32_t w, uint32_t h, uint32_t d, const void *data, size_t sizeBytes) {
884
885}
886
887void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
888                         uint32_t xoff, uint32_t lod, size_t count,
889                         void *data, size_t sizeBytes) {
890    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
891    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
892    if (data != ptr) {
893        // Skip the copy if we are the same allocation. This can arise from
894        // our Bitmap optimization, where we share the same storage.
895        memcpy(data, ptr, count * eSize);
896    }
897}
898
899void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
900                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
901                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
902    size_t eSize = alloc->mHal.state.elementSizeBytes;
903    size_t lineSize = eSize * w;
904    if (!stride) {
905        stride = lineSize;
906    }
907
908    if (alloc->mHal.drvState.lod[0].mallocPtr) {
909        uint8_t *dst = static_cast<uint8_t *>(data);
910        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, lod, face);
911        if (dst == src) {
912            // Skip the copy if we are the same allocation. This can arise from
913            // our Bitmap optimization, where we share the same storage.
914            return;
915        }
916
917        for (uint32_t line=yoff; line < (yoff+h); line++) {
918            memcpy(dst, src, lineSize);
919            dst += stride;
920            src += alloc->mHal.drvState.lod[lod].stride;
921        }
922    } else {
923        ALOGE("Add code to readback from non-script memory");
924    }
925}
926
927
928void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
929                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
930                         uint32_t lod, RsAllocationCubemapFace face,
931                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes) {
932
933}
934
935void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
936                          const android::renderscript::Allocation *alloc) {
937    return alloc->mHal.drvState.lod[0].mallocPtr;
938}
939
940void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
941                          const android::renderscript::Allocation *alloc) {
942
943}
944
945void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
946                               const android::renderscript::Allocation *dstAlloc,
947                               uint32_t dstXoff, uint32_t dstLod, size_t count,
948                               const android::renderscript::Allocation *srcAlloc,
949                               uint32_t srcXoff, uint32_t srcLod) {
950}
951
952
953void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
954                                      const android::renderscript::Allocation *dstAlloc,
955                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
956                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
957                                      const android::renderscript::Allocation *srcAlloc,
958                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
959                                      RsAllocationCubemapFace srcFace) {
960    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
961    for (uint32_t i = 0; i < h; i ++) {
962        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstLod, dstFace);
963        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcLod, srcFace);
964        memcpy(dstPtr, srcPtr, w * elementSize);
965
966        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
967        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
968    }
969}
970
971void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
972                               const android::renderscript::Allocation *dstAlloc,
973                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
974                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
975                               const android::renderscript::Allocation *srcAlloc,
976                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
977                               RsAllocationCubemapFace srcFace) {
978    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
979        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
980                                             "yet implemented.");
981        return;
982    }
983    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
984                                     dstLod, dstFace, w, h, srcAlloc,
985                                     srcXoff, srcYoff, srcLod, srcFace);
986}
987
988void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
989                               const android::renderscript::Allocation *dstAlloc,
990                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
991                               uint32_t dstLod, RsAllocationCubemapFace dstFace,
992                               uint32_t w, uint32_t h, uint32_t d,
993                               const android::renderscript::Allocation *srcAlloc,
994                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
995                               uint32_t srcLod, RsAllocationCubemapFace srcFace) {
996}
997
998void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
999                                uint32_t x,
1000                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1001    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1002
1003    size_t eSize = alloc->mHal.state.elementSizeBytes;
1004    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1005
1006    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1007    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1008
1009    if (alloc->mHal.state.hasReferences) {
1010        e->incRefs(data);
1011        e->decRefs(ptr);
1012    }
1013
1014    memcpy(ptr, data, sizeBytes);
1015    drv->uploadDeferred = true;
1016}
1017
1018void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
1019                                uint32_t x, uint32_t y,
1020                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1021    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1022
1023    size_t eSize = alloc->mHal.state.elementSizeBytes;
1024    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1025
1026    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1027    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1028
1029    if (alloc->mHal.state.hasReferences) {
1030        e->incRefs(data);
1031        e->decRefs(ptr);
1032    }
1033
1034    memcpy(ptr, data, sizeBytes);
1035    drv->uploadDeferred = true;
1036}
1037
1038static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1039    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1040    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1041
1042    for (uint32_t y=0; y < h; y++) {
1043        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
1044        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
1045        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1046
1047        for (uint32_t x=0; x < w; x++) {
1048            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1049            oPtr ++;
1050            i1 += 2;
1051            i2 += 2;
1052        }
1053    }
1054}
1055
1056static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1057    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1058    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1059
1060    for (uint32_t y=0; y < h; y++) {
1061        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
1062        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
1063        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1064
1065        for (uint32_t x=0; x < w; x++) {
1066            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1067            oPtr ++;
1068            i1 += 2;
1069            i2 += 2;
1070        }
1071    }
1072}
1073
1074static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1075    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1076    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1077
1078    for (uint32_t y=0; y < h; y++) {
1079        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, lod + 1, face);
1080        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, lod, face);
1081        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, lod, face);
1082
1083        for (uint32_t x=0; x < w; x++) {
1084            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1085            oPtr ++;
1086            i1 += 2;
1087            i2 += 2;
1088        }
1089    }
1090}
1091
1092void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1093    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1094        return;
1095    }
1096    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1097    for (uint32_t face = 0; face < numFaces; face ++) {
1098        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1099            switch (alloc->getType()->getElement()->getSizeBits()) {
1100            case 32:
1101                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1102                break;
1103            case 16:
1104                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1105                break;
1106            case 8:
1107                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1108                break;
1109            }
1110        }
1111    }
1112}
1113