rsdAllocation.cpp revision 9dae48ed019a7bc4e1bd31471540cefcc667fab4
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifdef RS_COMPATIBILITY_LIB
29#include "rsCompatibilityLib.h"
30#else
31#include "rsdFrameBufferObj.h"
32#include "gui/GLConsumer.h"
33#include "gui/CpuConsumer.h"
34#include "gui/Surface.h"
35#include "hardware/gralloc.h"
36
37#include <GLES/gl.h>
38#include <GLES2/gl2.h>
39#include <GLES/glext.h>
40#endif
41
42#ifdef RS_SERVER
43// server requires malloc.h for memalign
44#include <malloc.h>
45#endif
46
47using namespace android;
48using namespace android::renderscript;
49
50
51#ifndef RS_COMPATIBILITY_LIB
52const static GLenum gFaceOrder[] = {
53    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
54    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
55    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
56    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
57    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
58    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
59};
60
61GLenum rsdTypeToGLType(RsDataType t) {
62    switch (t) {
63    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
64    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
65    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
66
67    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
68    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
69    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
70    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
71    case RS_TYPE_SIGNED_8:      return GL_BYTE;
72    case RS_TYPE_SIGNED_16:     return GL_SHORT;
73    default:    break;
74    }
75    return 0;
76}
77
78GLenum rsdKindToGLFormat(RsDataKind k) {
79    switch (k) {
80    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
81    case RS_KIND_PIXEL_A: return GL_ALPHA;
82    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
83    case RS_KIND_PIXEL_RGB: return GL_RGB;
84    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
85    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
86    default: break;
87    }
88    return 0;
89}
90#endif
91
92uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
93                      uint32_t xoff, uint32_t yoff, uint32_t zoff,
94                      uint32_t lod, RsAllocationCubemapFace face) {
95    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
96    ptr += face * alloc->mHal.drvState.faceOffset;
97    ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
98    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
99    ptr += xoff * alloc->mHal.state.elementSizeBytes;
100    return ptr;
101}
102
103
104static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
105                            uint32_t xoff, uint32_t yoff, uint32_t lod,
106                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
107#ifndef RS_COMPATIBILITY_LIB
108    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
109
110    rsAssert(drv->textureID);
111    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
112    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
113    GLenum t = GL_TEXTURE_2D;
114    if (alloc->mHal.state.hasFaces) {
115        t = gFaceOrder[face];
116    }
117    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
118#endif
119}
120
121
122#ifndef RS_COMPATIBILITY_LIB
123static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
124    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
125
126    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
127    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
128
129    uint32_t faceCount = 1;
130    if (alloc->mHal.state.hasFaces) {
131        faceCount = 6;
132    }
133
134    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
135    for (uint32_t face = 0; face < faceCount; face ++) {
136        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
137            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
138
139            GLenum t = GL_TEXTURE_2D;
140            if (alloc->mHal.state.hasFaces) {
141                t = gFaceOrder[face];
142            }
143
144            if (isFirstUpload) {
145                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
146                             alloc->mHal.state.type->getLODDimX(lod),
147                             alloc->mHal.state.type->getLODDimY(lod),
148                             0, drv->glFormat, drv->glType, p);
149            } else {
150                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
151                                alloc->mHal.state.type->getLODDimX(lod),
152                                alloc->mHal.state.type->getLODDimY(lod),
153                                drv->glFormat, drv->glType, p);
154            }
155        }
156    }
157
158    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
159        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
160    }
161    rsdGLCheckError(rsc, "Upload2DTexture");
162}
163#endif
164
165static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
166#ifndef RS_COMPATIBILITY_LIB
167    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
168
169    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
170        if (!drv->textureID) {
171            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
172        }
173        return;
174    }
175
176    if (!drv->glType || !drv->glFormat) {
177        return;
178    }
179
180    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
181        return;
182    }
183
184    bool isFirstUpload = false;
185
186    if (!drv->textureID) {
187        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
188        isFirstUpload = true;
189    }
190
191    Upload2DTexture(rsc, alloc, isFirstUpload);
192
193    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
194        if (alloc->mHal.drvState.lod[0].mallocPtr) {
195            free(alloc->mHal.drvState.lod[0].mallocPtr);
196            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
197        }
198    }
199    rsdGLCheckError(rsc, "UploadToTexture");
200#endif
201}
202
203static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
204#ifndef RS_COMPATIBILITY_LIB
205    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
206
207    if (!drv->glFormat) {
208        return;
209    }
210
211    if (!drv->renderTargetID) {
212        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
213
214        if (!drv->renderTargetID) {
215            // This should generally not happen
216            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
217            rsc->dumpDebug();
218            return;
219        }
220        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
221        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
222                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
223    }
224    rsdGLCheckError(rsc, "AllocateRenderTarget");
225#endif
226}
227
228static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
229#ifndef RS_COMPATIBILITY_LIB
230    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
231
232    rsAssert(!alloc->mHal.state.type->getDimY());
233    rsAssert(!alloc->mHal.state.type->getDimZ());
234
235    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
236
237    if (!drv->bufferID) {
238        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
239    }
240    if (!drv->bufferID) {
241        ALOGE("Upload to buffer object failed");
242        drv->uploadDeferred = true;
243        return;
244    }
245    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
246    RSD_CALL_GL(glBufferData, drv->glTarget,
247                alloc->mHal.state.type->getPackedSizeBytes(),
248                alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
249    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
250    rsdGLCheckError(rsc, "UploadToBufferObject");
251#endif
252}
253
254
255static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
256    // YUV only supports basic 2d
257    // so we can stash the plane pointers in the mipmap levels.
258    size_t uvSize = 0;
259    state->lod[1].dimX = state->lod[0].dimX / 2;
260    state->lod[1].dimY = state->lod[0].dimY / 2;
261    state->lod[2].dimX = state->lod[0].dimX / 2;
262    state->lod[2].dimY = state->lod[0].dimY / 2;
263    state->yuv.shift = 1;
264    state->yuv.step = 1;
265    state->lodCount = 3;
266
267#ifndef RS_SERVER
268    switch(yuv) {
269    case HAL_PIXEL_FORMAT_YV12:
270        state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
271        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
272                (state->lod[0].stride * state->lod[0].dimY);
273        uvSize += state->lod[2].stride * state->lod[2].dimY;
274
275        state->lod[1].stride = state->lod[2].stride;
276        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
277                (state->lod[2].stride * state->lod[2].dimY);
278        uvSize += state->lod[1].stride * state->lod[2].dimY;
279        break;
280    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
281        //state->lod[1].dimX = state->lod[0].dimX;
282        state->lod[1].stride = state->lod[0].stride;
283        state->lod[2].stride = state->lod[0].stride;
284        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
285                (state->lod[0].stride * state->lod[0].dimY);
286        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
287        uvSize += state->lod[1].stride * state->lod[1].dimY;
288        state->yuv.step = 2;
289        break;
290#ifndef RS_COMPATIBILITY_LIB
291    case HAL_PIXEL_FORMAT_YCbCr_420_888:
292        // This will be filled in by ioReceive()
293        break;
294#endif
295    default:
296        rsAssert(0);
297    }
298#endif
299    return uvSize;
300}
301
302
303static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
304        const Type *type, uint8_t *ptr) {
305    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
306    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
307    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
308    alloc->mHal.drvState.lod[0].mallocPtr = 0;
309    // Stride needs to be 16-byte aligned too!
310    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
311    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
312    alloc->mHal.drvState.lodCount = type->getLODCount();
313    alloc->mHal.drvState.faceCount = type->getDimFaces();
314
315    size_t offsets[Allocation::MAX_LOD];
316    memset(offsets, 0, sizeof(offsets));
317
318    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
319            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
320    if(alloc->mHal.drvState.lodCount > 1) {
321        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
322        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
323        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
324        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
325            alloc->mHal.drvState.lod[lod].dimX = tx;
326            alloc->mHal.drvState.lod[lod].dimY = ty;
327            alloc->mHal.drvState.lod[lod].dimZ = tz;
328            alloc->mHal.drvState.lod[lod].stride =
329                    rsRound(tx * type->getElementSizeBytes(), 16);
330            offsets[lod] = o;
331            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
332            if (tx > 1) tx >>= 1;
333            if (ty > 1) ty >>= 1;
334            if (tz > 1) tz >>= 1;
335        }
336    } else if (alloc->mHal.state.yuv) {
337        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
338
339        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
340            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
341        }
342    }
343
344    alloc->mHal.drvState.faceOffset = o;
345
346    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
347    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
348        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
349    }
350
351    size_t allocSize = alloc->mHal.drvState.faceOffset;
352    if(alloc->mHal.drvState.faceCount) {
353        allocSize *= 6;
354    }
355
356    return allocSize;
357}
358
359static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
360    // We align all allocations to a 16-byte boundary.
361    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
362    if (!ptr) {
363        return NULL;
364    }
365    if (forceZero) {
366        memset(ptr, 0, allocSize);
367    }
368    return ptr;
369}
370
371bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
372    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
373    if (!drv) {
374        return false;
375    }
376    alloc->mHal.drv = drv;
377
378    // Calculate the object size.
379    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
380
381    uint8_t * ptr = NULL;
382    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
383
384    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
385        // Allocation is allocated when the surface is created
386        // in getSurface
387    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
388        // user-provided allocation
389        // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
390        if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
391              alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
392            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
393            return false;
394        }
395        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
396            ALOGE("User-allocated buffers must not have multiple faces or LODs");
397            return false;
398        }
399
400        // rows must be 16-byte aligned
401        // validate that here, otherwise fall back to not use the user-backed allocation
402        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
403            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
404            drv->useUserProvidedPtr = false;
405
406            ptr = allocAlignedMemory(allocSize, forceZero);
407            if (!ptr) {
408                alloc->mHal.drv = NULL;
409                free(drv);
410                return false;
411            }
412
413        } else {
414            drv->useUserProvidedPtr = true;
415            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
416        }
417    } else {
418        ptr = allocAlignedMemory(allocSize, forceZero);
419        if (!ptr) {
420            alloc->mHal.drv = NULL;
421            free(drv);
422            return false;
423        }
424    }
425    // Build the pointer tables
426    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
427    if(allocSize != verifySize) {
428        rsAssert(!"Size mismatch");
429    }
430
431#ifndef RS_SERVER
432    drv->glTarget = GL_NONE;
433    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
434        if (alloc->mHal.state.hasFaces) {
435            drv->glTarget = GL_TEXTURE_CUBE_MAP;
436        } else {
437            drv->glTarget = GL_TEXTURE_2D;
438        }
439    } else {
440        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
441            drv->glTarget = GL_ARRAY_BUFFER;
442        }
443    }
444#endif
445
446#ifndef RS_COMPATIBILITY_LIB
447    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
448    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
449#else
450    drv->glType = 0;
451    drv->glFormat = 0;
452#endif
453
454    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
455        drv->uploadDeferred = true;
456    }
457
458
459    drv->readBackFBO = NULL;
460
461    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
462    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
463        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
464    }
465
466    return true;
467}
468
469void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
470    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
471
472#ifndef RS_COMPATIBILITY_LIB
473    if (drv->bufferID) {
474        // Causes a SW crash....
475        //ALOGV(" mBufferID %i", mBufferID);
476        //glDeleteBuffers(1, &mBufferID);
477        //mBufferID = 0;
478    }
479    if (drv->textureID) {
480        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
481        drv->textureID = 0;
482    }
483    if (drv->renderTargetID) {
484        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
485        drv->renderTargetID = 0;
486    }
487#endif
488
489    if (alloc->mHal.drvState.lod[0].mallocPtr) {
490        // don't free user-allocated ptrs or IO_OUTPUT buffers
491        if (!(drv->useUserProvidedPtr) &&
492            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
493            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
494                free(alloc->mHal.drvState.lod[0].mallocPtr);
495        }
496        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
497    }
498
499#ifndef RS_COMPATIBILITY_LIB
500    if (drv->readBackFBO != NULL) {
501        delete drv->readBackFBO;
502        drv->readBackFBO = NULL;
503    }
504
505    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
506        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
507
508        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
509        ANativeWindow *nw = drv->wndSurface;
510        if (nw) {
511            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
512            mapper.unlock(drv->wndBuffer->handle);
513            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
514        }
515    }
516#endif
517
518    free(drv);
519    alloc->mHal.drv = NULL;
520}
521
522void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
523                         const Type *newType, bool zeroNew) {
524    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
525    const uint32_t dimX = newType->getDimX();
526
527    // can't resize Allocations with user-allocated buffers
528    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
529        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
530        return;
531    }
532    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
533    // Calculate the object size
534    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
535    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
536    // Build the relative pointer tables.
537    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
538    if(s != verifySize) {
539        rsAssert(!"Size mismatch");
540    }
541
542
543    if (dimX > oldDimX) {
544        size_t stride = alloc->mHal.state.elementSizeBytes;
545        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
546                 0, stride * (dimX - oldDimX));
547    }
548}
549
550static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
551#ifndef RS_COMPATIBILITY_LIB
552    if (!alloc->getIsScript()) {
553        return; // nothing to sync
554    }
555
556    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
557    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
558
559    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
560    if (!drv->textureID && !drv->renderTargetID) {
561        return; // nothing was rendered here yet, so nothing to sync
562    }
563    if (drv->readBackFBO == NULL) {
564        drv->readBackFBO = new RsdFrameBufferObj();
565        drv->readBackFBO->setColorTarget(drv, 0);
566        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
567                                        alloc->getType()->getDimY());
568    }
569
570    // Bind the framebuffer object so we can read back from it
571    drv->readBackFBO->setActive(rsc);
572
573    // Do the readback
574    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
575                alloc->mHal.drvState.lod[0].dimY,
576                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
577
578    // Revert framebuffer to its original
579    lastFbo->setActive(rsc);
580#endif
581}
582
583
584void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
585                         RsAllocationUsageType src) {
586    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
587
588    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
589        if(!alloc->getIsRenderTarget()) {
590            rsc->setError(RS_ERROR_FATAL_DRIVER,
591                          "Attempting to sync allocation from render target, "
592                          "for non-render target allocation");
593        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
594            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
595                                                 "render target");
596        } else {
597            rsdAllocationSyncFromFBO(rsc, alloc);
598        }
599        return;
600    }
601
602    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
603
604    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
605        UploadToTexture(rsc, alloc);
606    } else {
607        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
608            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
609            AllocateRenderTarget(rsc, alloc);
610        }
611    }
612    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
613        UploadToBufferObject(rsc, alloc);
614    }
615
616    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
617        // NOP in CPU driver for now
618    }
619
620    drv->uploadDeferred = false;
621}
622
623void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
624    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
625    drv->uploadDeferred = true;
626}
627
628#ifndef RS_COMPATIBILITY_LIB
629static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
630    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
631
632    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
633    if (r) {
634        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
635        return false;
636    }
637
638    // Must lock the whole surface
639    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
640    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
641
642    void *dst = NULL;
643    mapper.lock(drv->wndBuffer->handle,
644            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
645            bounds, &dst);
646    alloc->mHal.drvState.lod[0].mallocPtr = dst;
647    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
648    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
649
650    return true;
651}
652#endif
653
654void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
655#ifndef RS_COMPATIBILITY_LIB
656    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
657    ANativeWindow *old = drv->wndSurface;
658
659    if (nw) {
660        nw->incStrong(NULL);
661    }
662
663    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
664        //TODO finish support for render target + script
665        drv->wnd = nw;
666        return;
667    }
668
669    // Cleanup old surface if there is one.
670    if (drv->wndSurface) {
671        ANativeWindow *old = drv->wndSurface;
672        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
673        mapper.unlock(drv->wndBuffer->handle);
674        old->cancelBuffer(old, drv->wndBuffer, -1);
675        drv->wndSurface = NULL;
676
677        native_window_api_disconnect(old, NATIVE_WINDOW_API_CPU);
678        old->decStrong(NULL);
679    }
680
681    if (nw != NULL) {
682        int32_t r;
683        uint32_t flags = 0;
684
685        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
686            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
687        }
688        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
689            flags |= GRALLOC_USAGE_HW_RENDER;
690        }
691
692        r = native_window_api_connect(nw, NATIVE_WINDOW_API_CPU);
693        if (r) {
694            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
695            goto error;
696        }
697
698        r = native_window_set_usage(nw, flags);
699        if (r) {
700            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
701            goto error;
702        }
703
704        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
705                                                 alloc->mHal.drvState.lod[0].dimY);
706        if (r) {
707            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
708            goto error;
709        }
710
711        int format = 0;
712        const Element *e = alloc->mHal.state.type->getElement();
713        rsAssert(e->getType() == RS_TYPE_UNSIGNED_8);
714        rsAssert(e->getVectorSize() == 4);
715        rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
716        format = PIXEL_FORMAT_RGBA_8888;
717
718        r = native_window_set_buffers_format(nw, format);
719        if (r) {
720            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
721            goto error;
722        }
723
724        IoGetBuffer(rsc, alloc, nw);
725        drv->wndSurface = nw;
726    }
727
728    return;
729
730 error:
731
732    if (nw) {
733        nw->decStrong(NULL);
734    }
735
736
737#endif
738}
739
740void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
741#ifndef RS_COMPATIBILITY_LIB
742    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
743    ANativeWindow *nw = drv->wndSurface;
744    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
745        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
746        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
747        return;
748    }
749    if (nw) {
750        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
751            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
752            mapper.unlock(drv->wndBuffer->handle);
753            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
754            if (r) {
755                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
756                return;
757            }
758
759            IoGetBuffer(rsc, alloc, nw);
760        }
761    } else {
762        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
763        return;
764    }
765#endif
766}
767
768void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
769#ifndef RS_COMPATIBILITY_LIB
770    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
771    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
772        drv->surfaceTexture->updateTexImage();
773    }
774#endif
775}
776
777
778void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
779                         uint32_t xoff, uint32_t lod, size_t count,
780                         const void *data, size_t sizeBytes) {
781    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
782
783    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
784    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
785    size_t size = count * eSize;
786    if (ptr != data) {
787        // Skip the copy if we are the same allocation. This can arise from
788        // our Bitmap optimization, where we share the same storage.
789        if (alloc->mHal.state.hasReferences) {
790            alloc->incRefs(data, count);
791            alloc->decRefs(ptr, count);
792        }
793        memcpy(ptr, data, size);
794    }
795    drv->uploadDeferred = true;
796}
797
798void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
799                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
800                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
801    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
802
803    size_t eSize = alloc->mHal.state.elementSizeBytes;
804    size_t lineSize = eSize * w;
805    if (!stride) {
806        stride = lineSize;
807    }
808
809    if (alloc->mHal.drvState.lod[0].mallocPtr) {
810        const uint8_t *src = static_cast<const uint8_t *>(data);
811        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
812        if (dst == src) {
813            // Skip the copy if we are the same allocation. This can arise from
814            // our Bitmap optimization, where we share the same storage.
815            drv->uploadDeferred = true;
816            return;
817        }
818
819        for (uint32_t line=yoff; line < (yoff+h); line++) {
820            if (alloc->mHal.state.hasReferences) {
821                alloc->incRefs(src, w);
822                alloc->decRefs(dst, w);
823            }
824            memcpy(dst, src, lineSize);
825            src += stride;
826            dst += alloc->mHal.drvState.lod[lod].stride;
827        }
828        if (alloc->mHal.state.yuv) {
829            size_t clineSize = lineSize;
830            int lod = 1;
831            int maxLod = 2;
832            if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YV12) {
833                maxLod = 3;
834                clineSize >>= 1;
835            } else if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
836                lod = 2;
837                maxLod = 3;
838            }
839
840            while (lod < maxLod) {
841                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
842
843                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
844                    memcpy(dst, src, clineSize);
845                    src += alloc->mHal.drvState.lod[lod].stride;
846                    dst += alloc->mHal.drvState.lod[lod].stride;
847                }
848                lod++;
849            }
850
851        }
852        drv->uploadDeferred = true;
853    } else {
854        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
855    }
856}
857
858void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
859                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
860                         uint32_t lod,
861                         uint32_t w, uint32_t h, uint32_t d, const void *data,
862                         size_t sizeBytes, size_t stride) {
863    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
864
865    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
866    uint32_t lineSize = eSize * w;
867    if (!stride) {
868        stride = lineSize;
869    }
870
871    if (alloc->mHal.drvState.lod[0].mallocPtr) {
872        const uint8_t *src = static_cast<const uint8_t *>(data);
873        for (uint32_t z = zoff; z < d; z++) {
874            uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
875                                        RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
876            if (dst == src) {
877                // Skip the copy if we are the same allocation. This can arise from
878                // our Bitmap optimization, where we share the same storage.
879                drv->uploadDeferred = true;
880                return;
881            }
882
883            for (uint32_t line=yoff; line < (yoff+h); line++) {
884                if (alloc->mHal.state.hasReferences) {
885                    alloc->incRefs(src, w);
886                    alloc->decRefs(dst, w);
887                }
888                memcpy(dst, src, lineSize);
889                src += stride;
890                dst += alloc->mHal.drvState.lod[lod].stride;
891            }
892        }
893        drv->uploadDeferred = true;
894    }
895}
896
897void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
898                         uint32_t xoff, uint32_t lod, size_t count,
899                         void *data, size_t sizeBytes) {
900    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
901    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
902    if (data != ptr) {
903        // Skip the copy if we are the same allocation. This can arise from
904        // our Bitmap optimization, where we share the same storage.
905        memcpy(data, ptr, count * eSize);
906    }
907}
908
909void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
910                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
911                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
912    size_t eSize = alloc->mHal.state.elementSizeBytes;
913    size_t lineSize = eSize * w;
914    if (!stride) {
915        stride = lineSize;
916    }
917
918    if (alloc->mHal.drvState.lod[0].mallocPtr) {
919        uint8_t *dst = static_cast<uint8_t *>(data);
920        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
921        if (dst == src) {
922            // Skip the copy if we are the same allocation. This can arise from
923            // our Bitmap optimization, where we share the same storage.
924            return;
925        }
926
927        for (uint32_t line=yoff; line < (yoff+h); line++) {
928            memcpy(dst, src, lineSize);
929            dst += stride;
930            src += alloc->mHal.drvState.lod[lod].stride;
931        }
932    } else {
933        ALOGE("Add code to readback from non-script memory");
934    }
935}
936
937
938void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
939                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
940                         uint32_t lod,
941                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
942    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
943    uint32_t lineSize = eSize * w;
944    if (!stride) {
945        stride = lineSize;
946    }
947
948    if (alloc->mHal.drvState.lod[0].mallocPtr) {
949        uint8_t *dst = static_cast<uint8_t *>(data);
950        for (uint32_t z = zoff; z < d; z++) {
951            const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
952                                              RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
953            if (dst == src) {
954                // Skip the copy if we are the same allocation. This can arise from
955                // our Bitmap optimization, where we share the same storage.
956                return;
957            }
958
959            for (uint32_t line=yoff; line < (yoff+h); line++) {
960                memcpy(dst, src, lineSize);
961                dst += stride;
962                src += alloc->mHal.drvState.lod[lod].stride;
963            }
964        }
965    }
966}
967
968void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
969                          const android::renderscript::Allocation *alloc) {
970    return alloc->mHal.drvState.lod[0].mallocPtr;
971}
972
973void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
974                          const android::renderscript::Allocation *alloc) {
975
976}
977
978void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
979                               const android::renderscript::Allocation *dstAlloc,
980                               uint32_t dstXoff, uint32_t dstLod, size_t count,
981                               const android::renderscript::Allocation *srcAlloc,
982                               uint32_t srcXoff, uint32_t srcLod) {
983}
984
985
986void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
987                                      const android::renderscript::Allocation *dstAlloc,
988                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
989                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
990                                      const android::renderscript::Allocation *srcAlloc,
991                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
992                                      RsAllocationCubemapFace srcFace) {
993    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
994    for (uint32_t i = 0; i < h; i ++) {
995        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
996        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
997        memcpy(dstPtr, srcPtr, w * elementSize);
998
999        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1000        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1001    }
1002}
1003
1004void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1005                                      const android::renderscript::Allocation *dstAlloc,
1006                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1007                                      uint32_t w, uint32_t h, uint32_t d,
1008                                      const android::renderscript::Allocation *srcAlloc,
1009                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1010    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1011    for (uint32_t j = 0; j < d; j++) {
1012        for (uint32_t i = 0; i < h; i ++) {
1013            uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1014                                           dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1015            uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1016                                           srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1017            memcpy(dstPtr, srcPtr, w * elementSize);
1018
1019            //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1020            //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1021        }
1022    }
1023}
1024
1025void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1026                               const android::renderscript::Allocation *dstAlloc,
1027                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1028                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1029                               const android::renderscript::Allocation *srcAlloc,
1030                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1031                               RsAllocationCubemapFace srcFace) {
1032    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1033        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1034                                             "yet implemented.");
1035        return;
1036    }
1037    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1038                                     dstLod, dstFace, w, h, srcAlloc,
1039                                     srcXoff, srcYoff, srcLod, srcFace);
1040}
1041
1042void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1043                               const android::renderscript::Allocation *dstAlloc,
1044                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1045                               uint32_t dstLod,
1046                               uint32_t w, uint32_t h, uint32_t d,
1047                               const android::renderscript::Allocation *srcAlloc,
1048                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1049                               uint32_t srcLod) {
1050    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1051        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1052                                             "yet implemented.");
1053        return;
1054    }
1055    rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1056                                     dstLod, w, h, d, srcAlloc,
1057                                     srcXoff, srcYoff, srcZoff, srcLod);
1058}
1059
1060void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
1061                                uint32_t x,
1062                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1063    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1064
1065    size_t eSize = alloc->mHal.state.elementSizeBytes;
1066    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1067
1068    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1069    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1070
1071    if (alloc->mHal.state.hasReferences) {
1072        e->incRefs(data);
1073        e->decRefs(ptr);
1074    }
1075
1076    memcpy(ptr, data, sizeBytes);
1077    drv->uploadDeferred = true;
1078}
1079
1080void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
1081                                uint32_t x, uint32_t y,
1082                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1083    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1084
1085    size_t eSize = alloc->mHal.state.elementSizeBytes;
1086    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1087
1088    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1089    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1090
1091    if (alloc->mHal.state.hasReferences) {
1092        e->incRefs(data);
1093        e->decRefs(ptr);
1094    }
1095
1096    memcpy(ptr, data, sizeBytes);
1097    drv->uploadDeferred = true;
1098}
1099
1100static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1101    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1102    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1103
1104    for (uint32_t y=0; y < h; y++) {
1105        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1106        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1107        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1108
1109        for (uint32_t x=0; x < w; x++) {
1110            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1111            oPtr ++;
1112            i1 += 2;
1113            i2 += 2;
1114        }
1115    }
1116}
1117
1118static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1119    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1120    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1121
1122    for (uint32_t y=0; y < h; y++) {
1123        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1124        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1125        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1126
1127        for (uint32_t x=0; x < w; x++) {
1128            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1129            oPtr ++;
1130            i1 += 2;
1131            i2 += 2;
1132        }
1133    }
1134}
1135
1136static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1137    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1138    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1139
1140    for (uint32_t y=0; y < h; y++) {
1141        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1142        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1143        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1144
1145        for (uint32_t x=0; x < w; x++) {
1146            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1147            oPtr ++;
1148            i1 += 2;
1149            i2 += 2;
1150        }
1151    }
1152}
1153
1154void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1155    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1156        return;
1157    }
1158    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1159    for (uint32_t face = 0; face < numFaces; face ++) {
1160        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1161            switch (alloc->getType()->getElement()->getSizeBits()) {
1162            case 32:
1163                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1164                break;
1165            case 16:
1166                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1167                break;
1168            case 8:
1169                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1170                break;
1171            }
1172        }
1173    }
1174}
1175
1176uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
1177                                  android::renderscript::Allocation *alloc)
1178{
1179    return 0;
1180}
1181
1182