rsdAllocation.cpp revision b8a94e26c0a5e8f58d5b6ed04e46b411e95b77a4
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifdef RS_COMPATIBILITY_LIB
29#include "rsCompatibilityLib.h"
30#else
31#include "rsdFrameBufferObj.h"
32#include "gui/GLConsumer.h"
33#include "gui/CpuConsumer.h"
34#include "gui/Surface.h"
35#include "hardware/gralloc.h"
36
37#include <GLES/gl.h>
38#include <GLES2/gl2.h>
39#include <GLES/glext.h>
40#endif
41
42#ifdef RS_SERVER
43// server requires malloc.h for memalign
44#include <malloc.h>
45#endif
46
47using namespace android;
48using namespace android::renderscript;
49
50
51#ifndef RS_COMPATIBILITY_LIB
52const static GLenum gFaceOrder[] = {
53    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
54    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
55    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
56    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
57    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
58    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
59};
60
61GLenum rsdTypeToGLType(RsDataType t) {
62    switch (t) {
63    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
64    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
65    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
66
67    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
68    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
69    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
70    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
71    case RS_TYPE_SIGNED_8:      return GL_BYTE;
72    case RS_TYPE_SIGNED_16:     return GL_SHORT;
73    default:    break;
74    }
75    return 0;
76}
77
78GLenum rsdKindToGLFormat(RsDataKind k) {
79    switch (k) {
80    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
81    case RS_KIND_PIXEL_A: return GL_ALPHA;
82    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
83    case RS_KIND_PIXEL_RGB: return GL_RGB;
84    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
85    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
86    default: break;
87    }
88    return 0;
89}
90#endif
91
92uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
93                      uint32_t xoff, uint32_t yoff, uint32_t zoff,
94                      uint32_t lod, RsAllocationCubemapFace face) {
95    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
96    ptr += face * alloc->mHal.drvState.faceOffset;
97    ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
98    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
99    ptr += xoff * alloc->mHal.state.elementSizeBytes;
100    return ptr;
101}
102
103
104static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
105                            uint32_t xoff, uint32_t yoff, uint32_t lod,
106                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
107#ifndef RS_COMPATIBILITY_LIB
108    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
109
110    rsAssert(drv->textureID);
111    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
112    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
113    GLenum t = GL_TEXTURE_2D;
114    if (alloc->mHal.state.hasFaces) {
115        t = gFaceOrder[face];
116    }
117    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
118#endif
119}
120
121
122#ifndef RS_COMPATIBILITY_LIB
123static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
124    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
125
126    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
127    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
128
129    uint32_t faceCount = 1;
130    if (alloc->mHal.state.hasFaces) {
131        faceCount = 6;
132    }
133
134    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
135    for (uint32_t face = 0; face < faceCount; face ++) {
136        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
137            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
138
139            GLenum t = GL_TEXTURE_2D;
140            if (alloc->mHal.state.hasFaces) {
141                t = gFaceOrder[face];
142            }
143
144            if (isFirstUpload) {
145                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
146                             alloc->mHal.state.type->getLODDimX(lod),
147                             alloc->mHal.state.type->getLODDimY(lod),
148                             0, drv->glFormat, drv->glType, p);
149            } else {
150                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
151                                alloc->mHal.state.type->getLODDimX(lod),
152                                alloc->mHal.state.type->getLODDimY(lod),
153                                drv->glFormat, drv->glType, p);
154            }
155        }
156    }
157
158    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
159        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
160    }
161    rsdGLCheckError(rsc, "Upload2DTexture");
162}
163#endif
164
165static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
166#ifndef RS_COMPATIBILITY_LIB
167    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
168
169    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
170        if (!drv->textureID) {
171            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
172        }
173        return;
174    }
175
176    if (!drv->glType || !drv->glFormat) {
177        return;
178    }
179
180    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
181        return;
182    }
183
184    bool isFirstUpload = false;
185
186    if (!drv->textureID) {
187        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
188        isFirstUpload = true;
189    }
190
191    Upload2DTexture(rsc, alloc, isFirstUpload);
192
193    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
194        if (alloc->mHal.drvState.lod[0].mallocPtr) {
195            free(alloc->mHal.drvState.lod[0].mallocPtr);
196            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
197        }
198    }
199    rsdGLCheckError(rsc, "UploadToTexture");
200#endif
201}
202
203static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
204#ifndef RS_COMPATIBILITY_LIB
205    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
206
207    if (!drv->glFormat) {
208        return;
209    }
210
211    if (!drv->renderTargetID) {
212        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
213
214        if (!drv->renderTargetID) {
215            // This should generally not happen
216            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
217            rsc->dumpDebug();
218            return;
219        }
220        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
221        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
222                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
223    }
224    rsdGLCheckError(rsc, "AllocateRenderTarget");
225#endif
226}
227
228static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
229#ifndef RS_COMPATIBILITY_LIB
230    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
231
232    rsAssert(!alloc->mHal.state.type->getDimY());
233    rsAssert(!alloc->mHal.state.type->getDimZ());
234
235    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
236
237    if (!drv->bufferID) {
238        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
239    }
240    if (!drv->bufferID) {
241        ALOGE("Upload to buffer object failed");
242        drv->uploadDeferred = true;
243        return;
244    }
245    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
246    RSD_CALL_GL(glBufferData, drv->glTarget,
247                alloc->mHal.state.type->getPackedSizeBytes(),
248                alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
249    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
250    rsdGLCheckError(rsc, "UploadToBufferObject");
251#endif
252}
253
254
255static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
256    // YUV only supports basic 2d
257    // so we can stash the plane pointers in the mipmap levels.
258    size_t uvSize = 0;
259    state->lod[1].dimX = state->lod[0].dimX / 2;
260    state->lod[1].dimY = state->lod[0].dimY / 2;
261    state->lod[2].dimX = state->lod[0].dimX / 2;
262    state->lod[2].dimY = state->lod[0].dimY / 2;
263    state->yuv.shift = 1;
264    state->yuv.step = 1;
265    state->lodCount = 3;
266
267#ifndef RS_SERVER
268    switch(yuv) {
269    case HAL_PIXEL_FORMAT_YV12:
270        state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
271        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
272                (state->lod[0].stride * state->lod[0].dimY);
273        uvSize += state->lod[2].stride * state->lod[2].dimY;
274
275        state->lod[1].stride = state->lod[2].stride;
276        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
277                (state->lod[2].stride * state->lod[2].dimY);
278        uvSize += state->lod[1].stride * state->lod[2].dimY;
279        break;
280    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
281        //state->lod[1].dimX = state->lod[0].dimX;
282        state->lod[1].stride = state->lod[0].stride;
283        state->lod[2].stride = state->lod[0].stride;
284        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
285                (state->lod[0].stride * state->lod[0].dimY);
286        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
287        uvSize += state->lod[1].stride * state->lod[1].dimY;
288        state->yuv.step = 2;
289        break;
290#ifndef RS_COMPATIBILITY_LIB
291    case HAL_PIXEL_FORMAT_YCbCr_420_888:
292        // This will be filled in by ioReceive()
293        break;
294#endif
295    default:
296        rsAssert(0);
297    }
298#endif
299    return uvSize;
300}
301
302
303static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
304        const Type *type, uint8_t *ptr) {
305    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
306    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
307    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
308    alloc->mHal.drvState.lod[0].mallocPtr = 0;
309    // Stride needs to be 16-byte aligned too!
310    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
311    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
312    alloc->mHal.drvState.lodCount = type->getLODCount();
313    alloc->mHal.drvState.faceCount = type->getDimFaces();
314
315    size_t offsets[Allocation::MAX_LOD];
316    memset(offsets, 0, sizeof(offsets));
317
318    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
319            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
320    if(alloc->mHal.drvState.lodCount > 1) {
321        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
322        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
323        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
324        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
325            alloc->mHal.drvState.lod[lod].dimX = tx;
326            alloc->mHal.drvState.lod[lod].dimY = ty;
327            alloc->mHal.drvState.lod[lod].dimZ = tz;
328            alloc->mHal.drvState.lod[lod].stride =
329                    rsRound(tx * type->getElementSizeBytes(), 16);
330            offsets[lod] = o;
331            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
332            if (tx > 1) tx >>= 1;
333            if (ty > 1) ty >>= 1;
334            if (tz > 1) tz >>= 1;
335        }
336    } else if (alloc->mHal.state.yuv) {
337        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
338
339        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
340            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
341        }
342    }
343
344    alloc->mHal.drvState.faceOffset = o;
345
346    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
347    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
348        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
349    }
350
351    size_t allocSize = alloc->mHal.drvState.faceOffset;
352    if(alloc->mHal.drvState.faceCount) {
353        allocSize *= 6;
354    }
355
356    return allocSize;
357}
358
359static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
360    // We align all allocations to a 16-byte boundary.
361    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
362    if (!ptr) {
363        return NULL;
364    }
365    if (forceZero) {
366        memset(ptr, 0, allocSize);
367    }
368    return ptr;
369}
370
371bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
372    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
373    if (!drv) {
374        return false;
375    }
376    alloc->mHal.drv = drv;
377
378    // Calculate the object size.
379    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
380
381    uint8_t * ptr = NULL;
382    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
383
384    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
385        // Allocation is allocated when the surface is created
386        // in getSurface
387    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
388        // user-provided allocation
389        // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
390        if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
391              alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
392            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
393            return false;
394        }
395        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
396            ALOGE("User-allocated buffers must not have multiple faces or LODs");
397            return false;
398        }
399
400        // rows must be 16-byte aligned
401        // validate that here, otherwise fall back to not use the user-backed allocation
402        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
403            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
404            drv->useUserProvidedPtr = false;
405
406            ptr = allocAlignedMemory(allocSize, forceZero);
407            if (!ptr) {
408                alloc->mHal.drv = NULL;
409                free(drv);
410                return false;
411            }
412
413        } else {
414            drv->useUserProvidedPtr = true;
415            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
416        }
417    } else {
418        ptr = allocAlignedMemory(allocSize, forceZero);
419        if (!ptr) {
420            alloc->mHal.drv = NULL;
421            free(drv);
422            return false;
423        }
424    }
425    // Build the pointer tables
426    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
427    if(allocSize != verifySize) {
428        rsAssert(!"Size mismatch");
429    }
430
431#ifndef RS_SERVER
432    drv->glTarget = GL_NONE;
433    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
434        if (alloc->mHal.state.hasFaces) {
435            drv->glTarget = GL_TEXTURE_CUBE_MAP;
436        } else {
437            drv->glTarget = GL_TEXTURE_2D;
438        }
439    } else {
440        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
441            drv->glTarget = GL_ARRAY_BUFFER;
442        }
443    }
444#endif
445
446#ifndef RS_COMPATIBILITY_LIB
447    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
448    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
449#else
450    drv->glType = 0;
451    drv->glFormat = 0;
452#endif
453
454    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
455        drv->uploadDeferred = true;
456    }
457
458
459    drv->readBackFBO = NULL;
460
461    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
462    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
463        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
464    }
465
466    return true;
467}
468
469void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
470    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
471
472#ifndef RS_COMPATIBILITY_LIB
473    if (drv->bufferID) {
474        // Causes a SW crash....
475        //ALOGV(" mBufferID %i", mBufferID);
476        //glDeleteBuffers(1, &mBufferID);
477        //mBufferID = 0;
478    }
479    if (drv->textureID) {
480        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
481        drv->textureID = 0;
482    }
483    if (drv->renderTargetID) {
484        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
485        drv->renderTargetID = 0;
486    }
487#endif
488
489    if (alloc->mHal.drvState.lod[0].mallocPtr) {
490        // don't free user-allocated ptrs or IO_OUTPUT buffers
491        if (!(drv->useUserProvidedPtr) &&
492            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
493            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
494                free(alloc->mHal.drvState.lod[0].mallocPtr);
495        }
496        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
497    }
498
499#ifndef RS_COMPATIBILITY_LIB
500    if (drv->readBackFBO != NULL) {
501        delete drv->readBackFBO;
502        drv->readBackFBO = NULL;
503    }
504
505    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
506        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
507
508        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
509        ANativeWindow *nw = drv->wndSurface;
510        if (nw) {
511            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
512            mapper.unlock(drv->wndBuffer->handle);
513            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
514        }
515    }
516#endif
517
518    free(drv);
519    alloc->mHal.drv = NULL;
520}
521
522void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
523                         const Type *newType, bool zeroNew) {
524    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
525    const uint32_t dimX = newType->getDimX();
526
527    // can't resize Allocations with user-allocated buffers
528    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
529        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
530        return;
531    }
532    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
533    // Calculate the object size
534    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
535    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
536    // Build the relative pointer tables.
537    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
538    if(s != verifySize) {
539        rsAssert(!"Size mismatch");
540    }
541
542
543    if (dimX > oldDimX) {
544        size_t stride = alloc->mHal.state.elementSizeBytes;
545        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
546                 0, stride * (dimX - oldDimX));
547    }
548}
549
550static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
551#ifndef RS_COMPATIBILITY_LIB
552    if (!alloc->getIsScript()) {
553        return; // nothing to sync
554    }
555
556    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
557    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
558
559    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
560    if (!drv->textureID && !drv->renderTargetID) {
561        return; // nothing was rendered here yet, so nothing to sync
562    }
563    if (drv->readBackFBO == NULL) {
564        drv->readBackFBO = new RsdFrameBufferObj();
565        drv->readBackFBO->setColorTarget(drv, 0);
566        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
567                                        alloc->getType()->getDimY());
568    }
569
570    // Bind the framebuffer object so we can read back from it
571    drv->readBackFBO->setActive(rsc);
572
573    // Do the readback
574    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
575                alloc->mHal.drvState.lod[0].dimY,
576                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
577
578    // Revert framebuffer to its original
579    lastFbo->setActive(rsc);
580#endif
581}
582
583
584void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
585                         RsAllocationUsageType src) {
586    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
587
588    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
589        if(!alloc->getIsRenderTarget()) {
590            rsc->setError(RS_ERROR_FATAL_DRIVER,
591                          "Attempting to sync allocation from render target, "
592                          "for non-render target allocation");
593        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
594            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
595                                                 "render target");
596        } else {
597            rsdAllocationSyncFromFBO(rsc, alloc);
598        }
599        return;
600    }
601
602    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT || src == RS_ALLOCATION_USAGE_SHARED);
603
604    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
605        UploadToTexture(rsc, alloc);
606    } else {
607        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
608            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
609            AllocateRenderTarget(rsc, alloc);
610        }
611    }
612    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
613        UploadToBufferObject(rsc, alloc);
614    }
615
616    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
617
618        if (src == RS_ALLOCATION_USAGE_SHARED) {
619            // just a memory fence for the CPU driver
620            // vendor drivers probably want to flush any dirty cachelines for
621            // this particular Allocation
622            __sync_synchronize();
623        }
624    }
625
626    drv->uploadDeferred = false;
627}
628
629void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
630    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
631    drv->uploadDeferred = true;
632}
633
634#ifndef RS_COMPATIBILITY_LIB
635static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
636    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
637
638    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
639    if (r) {
640        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
641        return false;
642    }
643
644    // Must lock the whole surface
645    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
646    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
647
648    void *dst = NULL;
649    mapper.lock(drv->wndBuffer->handle,
650            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
651            bounds, &dst);
652    alloc->mHal.drvState.lod[0].mallocPtr = dst;
653    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
654    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
655
656    return true;
657}
658#endif
659
660void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
661#ifndef RS_COMPATIBILITY_LIB
662    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
663    ANativeWindow *old = drv->wndSurface;
664
665    if (nw) {
666        nw->incStrong(NULL);
667    }
668
669    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
670        //TODO finish support for render target + script
671        drv->wnd = nw;
672        return;
673    }
674
675    // Cleanup old surface if there is one.
676    if (drv->wndSurface) {
677        ANativeWindow *old = drv->wndSurface;
678        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
679        mapper.unlock(drv->wndBuffer->handle);
680        old->cancelBuffer(old, drv->wndBuffer, -1);
681        drv->wndSurface = NULL;
682
683        native_window_api_disconnect(old, NATIVE_WINDOW_API_CPU);
684        old->decStrong(NULL);
685    }
686
687    if (nw != NULL) {
688        int32_t r;
689        uint32_t flags = 0;
690
691        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
692            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
693        }
694        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
695            flags |= GRALLOC_USAGE_HW_RENDER;
696        }
697
698        r = native_window_api_connect(nw, NATIVE_WINDOW_API_CPU);
699        if (r) {
700            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
701            goto error;
702        }
703
704        r = native_window_set_usage(nw, flags);
705        if (r) {
706            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
707            goto error;
708        }
709
710        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
711                                                 alloc->mHal.drvState.lod[0].dimY);
712        if (r) {
713            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
714            goto error;
715        }
716
717        int format = 0;
718        const Element *e = alloc->mHal.state.type->getElement();
719        rsAssert(e->getType() == RS_TYPE_UNSIGNED_8);
720        rsAssert(e->getVectorSize() == 4);
721        rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
722        format = PIXEL_FORMAT_RGBA_8888;
723
724        r = native_window_set_buffers_format(nw, format);
725        if (r) {
726            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
727            goto error;
728        }
729
730        IoGetBuffer(rsc, alloc, nw);
731        drv->wndSurface = nw;
732    }
733
734    return;
735
736 error:
737
738    if (nw) {
739        nw->decStrong(NULL);
740    }
741
742
743#endif
744}
745
746void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
747#ifndef RS_COMPATIBILITY_LIB
748    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
749    ANativeWindow *nw = drv->wndSurface;
750    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
751        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
752        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
753        return;
754    }
755    if (nw) {
756        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
757            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
758            mapper.unlock(drv->wndBuffer->handle);
759            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
760            if (r) {
761                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
762                return;
763            }
764
765            IoGetBuffer(rsc, alloc, nw);
766        }
767    } else {
768        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
769        return;
770    }
771#endif
772}
773
774void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
775#ifndef RS_COMPATIBILITY_LIB
776    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
777    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
778        drv->surfaceTexture->updateTexImage();
779    }
780#endif
781}
782
783
784void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
785                         uint32_t xoff, uint32_t lod, size_t count,
786                         const void *data, size_t sizeBytes) {
787    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
788
789    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
790    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
791    size_t size = count * eSize;
792    if (ptr != data) {
793        // Skip the copy if we are the same allocation. This can arise from
794        // our Bitmap optimization, where we share the same storage.
795        if (alloc->mHal.state.hasReferences) {
796            alloc->incRefs(data, count);
797            alloc->decRefs(ptr, count);
798        }
799        memcpy(ptr, data, size);
800    }
801    drv->uploadDeferred = true;
802}
803
804void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
805                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
806                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
807    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
808
809    size_t eSize = alloc->mHal.state.elementSizeBytes;
810    size_t lineSize = eSize * w;
811    if (!stride) {
812        stride = lineSize;
813    }
814
815    if (alloc->mHal.drvState.lod[0].mallocPtr) {
816        const uint8_t *src = static_cast<const uint8_t *>(data);
817        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
818        if (dst == src) {
819            // Skip the copy if we are the same allocation. This can arise from
820            // our Bitmap optimization, where we share the same storage.
821            drv->uploadDeferred = true;
822            return;
823        }
824
825        for (uint32_t line=yoff; line < (yoff+h); line++) {
826            if (alloc->mHal.state.hasReferences) {
827                alloc->incRefs(src, w);
828                alloc->decRefs(dst, w);
829            }
830            memcpy(dst, src, lineSize);
831            src += stride;
832            dst += alloc->mHal.drvState.lod[lod].stride;
833        }
834        if (alloc->mHal.state.yuv) {
835            size_t clineSize = lineSize;
836            int lod = 1;
837            int maxLod = 2;
838            if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YV12) {
839                maxLod = 3;
840                clineSize >>= 1;
841            } else if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
842                lod = 2;
843                maxLod = 3;
844            }
845
846            while (lod < maxLod) {
847                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
848
849                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
850                    memcpy(dst, src, clineSize);
851                    src += alloc->mHal.drvState.lod[lod].stride;
852                    dst += alloc->mHal.drvState.lod[lod].stride;
853                }
854                lod++;
855            }
856
857        }
858        drv->uploadDeferred = true;
859    } else {
860        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
861    }
862}
863
864void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
865                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
866                         uint32_t lod,
867                         uint32_t w, uint32_t h, uint32_t d, const void *data,
868                         size_t sizeBytes, size_t stride) {
869    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
870
871    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
872    uint32_t lineSize = eSize * w;
873    if (!stride) {
874        stride = lineSize;
875    }
876
877    if (alloc->mHal.drvState.lod[0].mallocPtr) {
878        const uint8_t *src = static_cast<const uint8_t *>(data);
879        for (uint32_t z = zoff; z < d; z++) {
880            uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
881                                        RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
882            if (dst == src) {
883                // Skip the copy if we are the same allocation. This can arise from
884                // our Bitmap optimization, where we share the same storage.
885                drv->uploadDeferred = true;
886                return;
887            }
888
889            for (uint32_t line=yoff; line < (yoff+h); line++) {
890                if (alloc->mHal.state.hasReferences) {
891                    alloc->incRefs(src, w);
892                    alloc->decRefs(dst, w);
893                }
894                memcpy(dst, src, lineSize);
895                src += stride;
896                dst += alloc->mHal.drvState.lod[lod].stride;
897            }
898        }
899        drv->uploadDeferred = true;
900    }
901}
902
903void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
904                         uint32_t xoff, uint32_t lod, size_t count,
905                         void *data, size_t sizeBytes) {
906    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
907    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
908    if (data != ptr) {
909        // Skip the copy if we are the same allocation. This can arise from
910        // our Bitmap optimization, where we share the same storage.
911        memcpy(data, ptr, count * eSize);
912    }
913}
914
915void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
916                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
917                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
918    size_t eSize = alloc->mHal.state.elementSizeBytes;
919    size_t lineSize = eSize * w;
920    if (!stride) {
921        stride = lineSize;
922    }
923
924    if (alloc->mHal.drvState.lod[0].mallocPtr) {
925        uint8_t *dst = static_cast<uint8_t *>(data);
926        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
927        if (dst == src) {
928            // Skip the copy if we are the same allocation. This can arise from
929            // our Bitmap optimization, where we share the same storage.
930            return;
931        }
932
933        for (uint32_t line=yoff; line < (yoff+h); line++) {
934            memcpy(dst, src, lineSize);
935            dst += stride;
936            src += alloc->mHal.drvState.lod[lod].stride;
937        }
938    } else {
939        ALOGE("Add code to readback from non-script memory");
940    }
941}
942
943
944void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
945                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
946                         uint32_t lod,
947                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
948    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
949    uint32_t lineSize = eSize * w;
950    if (!stride) {
951        stride = lineSize;
952    }
953
954    if (alloc->mHal.drvState.lod[0].mallocPtr) {
955        uint8_t *dst = static_cast<uint8_t *>(data);
956        for (uint32_t z = zoff; z < d; z++) {
957            const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
958                                              RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
959            if (dst == src) {
960                // Skip the copy if we are the same allocation. This can arise from
961                // our Bitmap optimization, where we share the same storage.
962                return;
963            }
964
965            for (uint32_t line=yoff; line < (yoff+h); line++) {
966                memcpy(dst, src, lineSize);
967                dst += stride;
968                src += alloc->mHal.drvState.lod[lod].stride;
969            }
970        }
971    }
972}
973
974void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
975                          const android::renderscript::Allocation *alloc) {
976    return alloc->mHal.drvState.lod[0].mallocPtr;
977}
978
979void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
980                          const android::renderscript::Allocation *alloc) {
981
982}
983
984void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
985                               const android::renderscript::Allocation *dstAlloc,
986                               uint32_t dstXoff, uint32_t dstLod, size_t count,
987                               const android::renderscript::Allocation *srcAlloc,
988                               uint32_t srcXoff, uint32_t srcLod) {
989}
990
991
992void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
993                                      const android::renderscript::Allocation *dstAlloc,
994                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
995                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
996                                      const android::renderscript::Allocation *srcAlloc,
997                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
998                                      RsAllocationCubemapFace srcFace) {
999    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1000    for (uint32_t i = 0; i < h; i ++) {
1001        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
1002        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
1003        memcpy(dstPtr, srcPtr, w * elementSize);
1004
1005        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1006        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1007    }
1008}
1009
1010void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1011                                      const android::renderscript::Allocation *dstAlloc,
1012                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1013                                      uint32_t w, uint32_t h, uint32_t d,
1014                                      const android::renderscript::Allocation *srcAlloc,
1015                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1016    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1017    for (uint32_t j = 0; j < d; j++) {
1018        for (uint32_t i = 0; i < h; i ++) {
1019            uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1020                                           dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1021            uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1022                                           srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1023            memcpy(dstPtr, srcPtr, w * elementSize);
1024
1025            //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1026            //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1027        }
1028    }
1029}
1030
1031void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1032                               const android::renderscript::Allocation *dstAlloc,
1033                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1034                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1035                               const android::renderscript::Allocation *srcAlloc,
1036                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1037                               RsAllocationCubemapFace srcFace) {
1038    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1039        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1040                                             "yet implemented.");
1041        return;
1042    }
1043    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1044                                     dstLod, dstFace, w, h, srcAlloc,
1045                                     srcXoff, srcYoff, srcLod, srcFace);
1046}
1047
1048void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1049                               const android::renderscript::Allocation *dstAlloc,
1050                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1051                               uint32_t dstLod,
1052                               uint32_t w, uint32_t h, uint32_t d,
1053                               const android::renderscript::Allocation *srcAlloc,
1054                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1055                               uint32_t srcLod) {
1056    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1057        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1058                                             "yet implemented.");
1059        return;
1060    }
1061    rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1062                                     dstLod, w, h, d, srcAlloc,
1063                                     srcXoff, srcYoff, srcZoff, srcLod);
1064}
1065
1066void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
1067                                uint32_t x,
1068                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1069    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1070
1071    size_t eSize = alloc->mHal.state.elementSizeBytes;
1072    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1073
1074    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1075    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1076
1077    if (alloc->mHal.state.hasReferences) {
1078        e->incRefs(data);
1079        e->decRefs(ptr);
1080    }
1081
1082    memcpy(ptr, data, sizeBytes);
1083    drv->uploadDeferred = true;
1084}
1085
1086void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
1087                                uint32_t x, uint32_t y,
1088                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1089    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1090
1091    size_t eSize = alloc->mHal.state.elementSizeBytes;
1092    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1093
1094    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1095    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1096
1097    if (alloc->mHal.state.hasReferences) {
1098        e->incRefs(data);
1099        e->decRefs(ptr);
1100    }
1101
1102    memcpy(ptr, data, sizeBytes);
1103    drv->uploadDeferred = true;
1104}
1105
1106static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1107    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1108    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1109
1110    for (uint32_t y=0; y < h; y++) {
1111        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1112        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1113        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1114
1115        for (uint32_t x=0; x < w; x++) {
1116            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1117            oPtr ++;
1118            i1 += 2;
1119            i2 += 2;
1120        }
1121    }
1122}
1123
1124static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1125    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1126    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1127
1128    for (uint32_t y=0; y < h; y++) {
1129        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1130        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1131        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1132
1133        for (uint32_t x=0; x < w; x++) {
1134            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1135            oPtr ++;
1136            i1 += 2;
1137            i2 += 2;
1138        }
1139    }
1140}
1141
1142static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1143    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1144    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1145
1146    for (uint32_t y=0; y < h; y++) {
1147        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1148        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1149        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1150
1151        for (uint32_t x=0; x < w; x++) {
1152            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1153            oPtr ++;
1154            i1 += 2;
1155            i2 += 2;
1156        }
1157    }
1158}
1159
1160void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1161    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1162        return;
1163    }
1164    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1165    for (uint32_t face = 0; face < numFaces; face ++) {
1166        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1167            switch (alloc->getType()->getElement()->getSizeBits()) {
1168            case 32:
1169                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1170                break;
1171            case 16:
1172                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1173                break;
1174            case 8:
1175                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1176                break;
1177            }
1178        }
1179    }
1180}
1181
1182uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
1183                                  android::renderscript::Allocation *alloc)
1184{
1185    return 0;
1186}
1187
1188