rsdAllocation.cpp revision 4961cceab2b71bf0ab59e1b66a7559f67ed28781
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#ifndef RS_SERVER
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifndef RS_COMPATIBILITY_LIB
29#include "rsdFrameBufferObj.h"
30#include "gui/GLConsumer.h"
31#include "gui/CpuConsumer.h"
32#include "gui/Surface.h"
33#include "hardware/gralloc.h"
34
35#include <GLES/gl.h>
36#include <GLES2/gl2.h>
37#include <GLES/glext.h>
38#endif
39
40#ifdef RS_SERVER
41// server requires malloc.h for memalign
42#include <malloc.h>
43#endif
44
45using namespace android;
46using namespace android::renderscript;
47
48
49#ifndef RS_COMPATIBILITY_LIB
50const static GLenum gFaceOrder[] = {
51    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
52    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
53    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
54    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
55    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
56    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
57};
58
59GLenum rsdTypeToGLType(RsDataType t) {
60    switch (t) {
61    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
62    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
63    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
64
65    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
66    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
67    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
68    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
69    case RS_TYPE_SIGNED_8:      return GL_BYTE;
70    case RS_TYPE_SIGNED_16:     return GL_SHORT;
71    default:    break;
72    }
73    return 0;
74}
75
76GLenum rsdKindToGLFormat(RsDataKind k) {
77    switch (k) {
78    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
79    case RS_KIND_PIXEL_A: return GL_ALPHA;
80    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
81    case RS_KIND_PIXEL_RGB: return GL_RGB;
82    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
83    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
84    default: break;
85    }
86    return 0;
87}
88#endif
89
90uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
91                      uint32_t xoff, uint32_t yoff, uint32_t zoff,
92                      uint32_t lod, RsAllocationCubemapFace face) {
93    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
94    ptr += face * alloc->mHal.drvState.faceOffset;
95    ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
96    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
97    ptr += xoff * alloc->mHal.state.elementSizeBytes;
98    return ptr;
99}
100
101
102static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
103                            uint32_t xoff, uint32_t yoff, uint32_t lod,
104                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
105#ifndef RS_COMPATIBILITY_LIB
106    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
107
108    rsAssert(drv->textureID);
109    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
110    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
111    GLenum t = GL_TEXTURE_2D;
112    if (alloc->mHal.state.hasFaces) {
113        t = gFaceOrder[face];
114    }
115    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
116#endif
117}
118
119
120#ifndef RS_COMPATIBILITY_LIB
121static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
122    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
123
124    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
125    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
126
127    uint32_t faceCount = 1;
128    if (alloc->mHal.state.hasFaces) {
129        faceCount = 6;
130    }
131
132    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
133    for (uint32_t face = 0; face < faceCount; face ++) {
134        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
135            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
136
137            GLenum t = GL_TEXTURE_2D;
138            if (alloc->mHal.state.hasFaces) {
139                t = gFaceOrder[face];
140            }
141
142            if (isFirstUpload) {
143                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
144                             alloc->mHal.state.type->getLODDimX(lod),
145                             alloc->mHal.state.type->getLODDimY(lod),
146                             0, drv->glFormat, drv->glType, p);
147            } else {
148                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
149                                alloc->mHal.state.type->getLODDimX(lod),
150                                alloc->mHal.state.type->getLODDimY(lod),
151                                drv->glFormat, drv->glType, p);
152            }
153        }
154    }
155
156    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
157        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
158    }
159    rsdGLCheckError(rsc, "Upload2DTexture");
160}
161#endif
162
163static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
164#ifndef RS_COMPATIBILITY_LIB
165    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
166
167    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
168        if (!drv->textureID) {
169            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
170        }
171        return;
172    }
173
174    if (!drv->glType || !drv->glFormat) {
175        return;
176    }
177
178    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
179        return;
180    }
181
182    bool isFirstUpload = false;
183
184    if (!drv->textureID) {
185        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
186        isFirstUpload = true;
187    }
188
189    Upload2DTexture(rsc, alloc, isFirstUpload);
190
191    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
192        if (alloc->mHal.drvState.lod[0].mallocPtr) {
193            free(alloc->mHal.drvState.lod[0].mallocPtr);
194            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
195        }
196    }
197    rsdGLCheckError(rsc, "UploadToTexture");
198#endif
199}
200
201static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
202#ifndef RS_COMPATIBILITY_LIB
203    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
204
205    if (!drv->glFormat) {
206        return;
207    }
208
209    if (!drv->renderTargetID) {
210        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
211
212        if (!drv->renderTargetID) {
213            // This should generally not happen
214            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
215            rsc->dumpDebug();
216            return;
217        }
218        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
219        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
220                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
221    }
222    rsdGLCheckError(rsc, "AllocateRenderTarget");
223#endif
224}
225
226static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
227#ifndef RS_COMPATIBILITY_LIB
228    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
229
230    rsAssert(!alloc->mHal.state.type->getDimY());
231    rsAssert(!alloc->mHal.state.type->getDimZ());
232
233    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
234
235    if (!drv->bufferID) {
236        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
237    }
238    if (!drv->bufferID) {
239        ALOGE("Upload to buffer object failed");
240        drv->uploadDeferred = true;
241        return;
242    }
243    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
244    RSD_CALL_GL(glBufferData, drv->glTarget, alloc->mHal.state.type->getSizeBytes(),
245                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
246    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
247    rsdGLCheckError(rsc, "UploadToBufferObject");
248#endif
249}
250
251
252static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
253    // YUV only supports basic 2d
254    // so we can stash the plane pointers in the mipmap levels.
255    size_t uvSize = 0;
256#ifndef RS_SERVER
257    switch(yuv) {
258    case HAL_PIXEL_FORMAT_YV12:
259        state->lod[2].dimX = state->lod[0].dimX / 2;
260        state->lod[2].dimY = state->lod[0].dimY / 2;
261        state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
262        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
263                (state->lod[0].stride * state->lod[0].dimY);
264        uvSize += state->lod[2].stride * state->lod[2].dimY;
265
266        state->lod[1].dimX = state->lod[2].dimX;
267        state->lod[1].dimY = state->lod[2].dimY;
268        state->lod[1].stride = state->lod[2].stride;
269        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
270                (state->lod[2].stride * state->lod[2].dimY);
271        uvSize += state->lod[1].stride * state->lod[2].dimY;
272
273        state->lodCount = 3;
274        break;
275    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
276        state->lod[1].dimX = state->lod[0].dimX;
277        state->lod[1].dimY = state->lod[0].dimY / 2;
278        state->lod[1].stride = state->lod[0].stride;
279        state->lod[1].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
280                (state->lod[0].stride * state->lod[0].dimY);
281        uvSize += state->lod[1].stride * state->lod[1].dimY;
282        state->lodCount = 2;
283        break;
284    default:
285        rsAssert(0);
286    }
287#endif
288    return uvSize;
289}
290
291
292static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
293        const Type *type, uint8_t *ptr) {
294    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
295    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
296    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
297    alloc->mHal.drvState.lod[0].mallocPtr = 0;
298    // Stride needs to be 16-byte aligned too!
299    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
300    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
301    alloc->mHal.drvState.lodCount = type->getLODCount();
302    alloc->mHal.drvState.faceCount = type->getDimFaces();
303
304    size_t offsets[Allocation::MAX_LOD];
305    memset(offsets, 0, sizeof(offsets));
306
307    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
308            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
309    if(alloc->mHal.drvState.lodCount > 1) {
310        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
311        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
312        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
313        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
314            alloc->mHal.drvState.lod[lod].dimX = tx;
315            alloc->mHal.drvState.lod[lod].dimY = ty;
316            alloc->mHal.drvState.lod[lod].dimZ = tz;
317            alloc->mHal.drvState.lod[lod].stride =
318                    rsRound(tx * type->getElementSizeBytes(), 16);
319            offsets[lod] = o;
320            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
321            if (tx > 1) tx >>= 1;
322            if (ty > 1) ty >>= 1;
323            if (tz > 1) tz >>= 1;
324        }
325    } else if (alloc->mHal.state.yuv) {
326        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
327
328        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
329            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
330        }
331    }
332
333    alloc->mHal.drvState.faceOffset = o;
334
335    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
336    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
337        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
338    }
339
340    size_t allocSize = alloc->mHal.drvState.faceOffset;
341    if(alloc->mHal.drvState.faceCount) {
342        allocSize *= 6;
343    }
344
345    return allocSize;
346}
347
348static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
349    // We align all allocations to a 16-byte boundary.
350    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
351    if (!ptr) {
352        return NULL;
353    }
354    if (forceZero) {
355        memset(ptr, 0, allocSize);
356    }
357    return ptr;
358}
359
360bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
361    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
362    if (!drv) {
363        return false;
364    }
365    alloc->mHal.drv = drv;
366
367    // Calculate the object size.
368    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
369
370    uint8_t * ptr = NULL;
371    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
372
373    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
374        // Allocation is allocated when the surface is created
375        // in getSurface
376    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
377        // user-provided allocation
378        // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
379        if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
380              alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
381            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
382            return false;
383        }
384        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
385            ALOGE("User-allocated buffers must not have multiple faces or LODs");
386            return false;
387        }
388
389        // rows must be 16-byte aligned
390        // validate that here, otherwise fall back to not use the user-backed allocation
391        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
392            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
393            drv->useUserProvidedPtr = false;
394
395            ptr = allocAlignedMemory(allocSize, forceZero);
396            if (!ptr) {
397                alloc->mHal.drv = NULL;
398                free(drv);
399                return false;
400            }
401
402        } else {
403            drv->useUserProvidedPtr = true;
404            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
405        }
406    } else {
407        ptr = allocAlignedMemory(allocSize, forceZero);
408        if (!ptr) {
409            alloc->mHal.drv = NULL;
410            free(drv);
411            return false;
412        }
413    }
414    // Build the pointer tables
415    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
416    if(allocSize != verifySize) {
417        rsAssert(!"Size mismatch");
418    }
419
420#ifndef RS_SERVER
421    drv->glTarget = GL_NONE;
422    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
423        if (alloc->mHal.state.hasFaces) {
424            drv->glTarget = GL_TEXTURE_CUBE_MAP;
425        } else {
426            drv->glTarget = GL_TEXTURE_2D;
427        }
428    } else {
429        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
430            drv->glTarget = GL_ARRAY_BUFFER;
431        }
432    }
433#endif
434
435#ifndef RS_COMPATIBILITY_LIB
436    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
437    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
438#else
439    drv->glType = 0;
440    drv->glFormat = 0;
441#endif
442
443    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
444        drv->uploadDeferred = true;
445    }
446
447
448    drv->readBackFBO = NULL;
449
450    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
451    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
452        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
453    }
454
455    return true;
456}
457
458void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
459    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
460
461#ifndef RS_COMPATIBILITY_LIB
462    if (drv->bufferID) {
463        // Causes a SW crash....
464        //ALOGV(" mBufferID %i", mBufferID);
465        //glDeleteBuffers(1, &mBufferID);
466        //mBufferID = 0;
467    }
468    if (drv->textureID) {
469        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
470        drv->textureID = 0;
471    }
472    if (drv->renderTargetID) {
473        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
474        drv->renderTargetID = 0;
475    }
476#endif
477
478    if (alloc->mHal.drvState.lod[0].mallocPtr) {
479        // don't free user-allocated ptrs
480        if (!(drv->useUserProvidedPtr)) {
481            free(alloc->mHal.drvState.lod[0].mallocPtr);
482        }
483        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
484    }
485
486#ifndef RS_COMPATIBILITY_LIB
487    if (drv->readBackFBO != NULL) {
488        delete drv->readBackFBO;
489        drv->readBackFBO = NULL;
490    }
491
492    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
493        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
494
495        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
496        ANativeWindow *nw = drv->wndSurface;
497        if (nw) {
498            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
499            mapper.unlock(drv->wndBuffer->handle);
500            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
501        }
502    }
503#endif
504
505    free(drv);
506    alloc->mHal.drv = NULL;
507}
508
509void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
510                         const Type *newType, bool zeroNew) {
511    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
512    const uint32_t dimX = newType->getDimX();
513
514    // can't resize Allocations with user-allocated buffers
515    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
516        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
517        return;
518    }
519    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
520    // Calculate the object size
521    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
522    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
523    // Build the relative pointer tables.
524    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
525    if(s != verifySize) {
526        rsAssert(!"Size mismatch");
527    }
528
529
530    if (dimX > oldDimX) {
531        size_t stride = alloc->mHal.state.elementSizeBytes;
532        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
533                 0, stride * (dimX - oldDimX));
534    }
535}
536
537static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
538#ifndef RS_COMPATIBILITY_LIB
539    if (!alloc->getIsScript()) {
540        return; // nothing to sync
541    }
542
543    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
544    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
545
546    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
547    if (!drv->textureID && !drv->renderTargetID) {
548        return; // nothing was rendered here yet, so nothing to sync
549    }
550    if (drv->readBackFBO == NULL) {
551        drv->readBackFBO = new RsdFrameBufferObj();
552        drv->readBackFBO->setColorTarget(drv, 0);
553        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
554                                        alloc->getType()->getDimY());
555    }
556
557    // Bind the framebuffer object so we can read back from it
558    drv->readBackFBO->setActive(rsc);
559
560    // Do the readback
561    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
562                alloc->mHal.drvState.lod[0].dimY,
563                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
564
565    // Revert framebuffer to its original
566    lastFbo->setActive(rsc);
567#endif
568}
569
570
571void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
572                         RsAllocationUsageType src) {
573    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
574
575    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
576        if(!alloc->getIsRenderTarget()) {
577            rsc->setError(RS_ERROR_FATAL_DRIVER,
578                          "Attempting to sync allocation from render target, "
579                          "for non-render target allocation");
580        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
581            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
582                                                 "render target");
583        } else {
584            rsdAllocationSyncFromFBO(rsc, alloc);
585        }
586        return;
587    }
588
589    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
590
591    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
592        UploadToTexture(rsc, alloc);
593    } else {
594        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
595            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
596            AllocateRenderTarget(rsc, alloc);
597        }
598    }
599    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
600        UploadToBufferObject(rsc, alloc);
601    }
602
603    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
604        // NOP in CPU driver for now
605    }
606
607    drv->uploadDeferred = false;
608}
609
610void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
611    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
612    drv->uploadDeferred = true;
613}
614
615#ifndef RS_COMPATIBILITY_LIB
616void DrvAllocation::NewBufferListener::onFrameAvailable() {
617    intptr_t ip = (intptr_t)alloc;
618    rsc->sendMessageToClient(NULL, RS_MESSAGE_TO_CLIENT_NEW_BUFFER, ip, 0, true);
619}
620#endif
621
622void* rsdAllocationGetSurface(const Context *rsc, const Allocation *alloc) {
623#ifndef RS_COMPATIBILITY_LIB
624    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
625
626    // Configure CpuConsumer to be in asynchronous mode
627    drv->cpuConsumer = new CpuConsumer(2, false);
628    sp<IGraphicBufferProducer> bp = drv->cpuConsumer->getProducerInterface();
629    bp->incStrong(NULL);
630
631    drv->mBufferListener = new DrvAllocation::NewBufferListener();
632    drv->mBufferListener->rsc = rsc;
633    drv->mBufferListener->alloc = alloc;
634
635    drv->cpuConsumer->setFrameAvailableListener(drv->mBufferListener);
636
637    return bp.get();
638#else
639    return NULL;
640#endif
641}
642
643#ifndef RS_COMPATIBILITY_LIB
644static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
645    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
646
647    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
648    if (r) {
649        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
650        return false;
651    }
652
653    // Must lock the whole surface
654    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
655    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
656
657    void *dst = NULL;
658    mapper.lock(drv->wndBuffer->handle,
659            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
660            bounds, &dst);
661    alloc->mHal.drvState.lod[0].mallocPtr = dst;
662    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
663    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
664
665    return true;
666}
667#endif
668
669void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
670#ifndef RS_COMPATIBILITY_LIB
671    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
672    ANativeWindow *old = drv->wndSurface;
673
674    if (nw) {
675        nw->incStrong(NULL);
676    }
677
678    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
679        //TODO finish support for render target + script
680        drv->wnd = nw;
681        return;
682    }
683
684    // Cleanup old surface if there is one.
685    if (drv->wndSurface) {
686        ANativeWindow *old = drv->wndSurface;
687        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
688        mapper.unlock(drv->wndBuffer->handle);
689        old->cancelBuffer(old, drv->wndBuffer, -1);
690        drv->wndSurface = NULL;
691        old->decStrong(NULL);
692    }
693
694    if (nw != NULL) {
695        int32_t r;
696        uint32_t flags = 0;
697        r = native_window_set_buffer_count(nw, 3);
698        if (r) {
699            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer count.");
700            goto error;
701        }
702
703        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
704            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
705        }
706        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
707            flags |= GRALLOC_USAGE_HW_RENDER;
708        }
709
710        r = native_window_set_usage(nw, flags);
711        if (r) {
712            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
713            goto error;
714        }
715
716        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
717                                                 alloc->mHal.drvState.lod[0].dimY);
718        if (r) {
719            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
720            goto error;
721        }
722
723        int format = 0;
724        const Element *e = alloc->mHal.state.type->getElement();
725        switch(e->getType()) {
726        case RS_TYPE_UNSIGNED_8:
727            switch (e->getVectorSize()) {
728            case 1:
729                rsAssert(e->getKind() == RS_KIND_PIXEL_A);
730                format = PIXEL_FORMAT_A_8;
731                break;
732            case 4:
733                rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
734                format = PIXEL_FORMAT_RGBA_8888;
735                break;
736            default:
737                rsAssert(0);
738            }
739            break;
740        default:
741            rsAssert(0);
742        }
743
744        r = native_window_set_buffers_format(nw, format);
745        if (r) {
746            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
747            goto error;
748        }
749
750        IoGetBuffer(rsc, alloc, nw);
751        drv->wndSurface = nw;
752    }
753
754    return;
755
756 error:
757
758    if (nw) {
759        nw->decStrong(NULL);
760    }
761
762
763#endif
764}
765
766void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
767#ifndef RS_COMPATIBILITY_LIB
768    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
769    ANativeWindow *nw = drv->wndSurface;
770    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
771        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
772        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
773        return;
774    }
775    if (nw) {
776        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
777            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
778            mapper.unlock(drv->wndBuffer->handle);
779            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
780            if (r) {
781                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
782                return;
783            }
784
785            IoGetBuffer(rsc, alloc, nw);
786        }
787    } else {
788        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
789        return;
790    }
791#endif
792}
793
794void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
795#ifndef RS_COMPATIBILITY_LIB
796    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
797
798    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
799        CpuConsumer::LockedBuffer lb;
800        status_t ret = drv->cpuConsumer->lockNextBuffer(&lb);
801        if (ret == OK) {
802            if (drv->lb.data != NULL) {
803                drv->cpuConsumer->unlockBuffer(drv->lb);
804            }
805            drv->lb = lb;
806            alloc->mHal.drvState.lod[0].mallocPtr = drv->lb.data;
807            alloc->mHal.drvState.lod[0].stride = drv->lb.stride *
808                    alloc->mHal.state.elementSizeBytes;
809
810            if (alloc->mHal.state.yuv) {
811                DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
812            }
813        } else if (ret == BAD_VALUE) {
814            // No new frame, don't do anything
815        } else {
816            rsc->setError(RS_ERROR_DRIVER, "Error receiving IO input buffer.");
817        }
818
819    } else {
820        drv->surfaceTexture->updateTexImage();
821    }
822
823
824#endif
825}
826
827
828void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
829                         uint32_t xoff, uint32_t lod, size_t count,
830                         const void *data, size_t sizeBytes) {
831    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
832
833    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
834    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
835    size_t size = count * eSize;
836
837    if (ptr != data) {
838        // Skip the copy if we are the same allocation. This can arise from
839        // our Bitmap optimization, where we share the same storage.
840        if (alloc->mHal.state.hasReferences) {
841            alloc->incRefs(data, count);
842            alloc->decRefs(ptr, count);
843        }
844        memcpy(ptr, data, size);
845    }
846    drv->uploadDeferred = true;
847}
848
849void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
850                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
851                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
852    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
853
854    size_t eSize = alloc->mHal.state.elementSizeBytes;
855    size_t lineSize = eSize * w;
856    if (!stride) {
857        stride = lineSize;
858    }
859
860    if (alloc->mHal.drvState.lod[0].mallocPtr) {
861        const uint8_t *src = static_cast<const uint8_t *>(data);
862        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
863        if (dst == src) {
864            // Skip the copy if we are the same allocation. This can arise from
865            // our Bitmap optimization, where we share the same storage.
866            drv->uploadDeferred = true;
867            return;
868        }
869
870        for (uint32_t line=yoff; line < (yoff+h); line++) {
871            if (alloc->mHal.state.hasReferences) {
872                alloc->incRefs(src, w);
873                alloc->decRefs(dst, w);
874            }
875            memcpy(dst, src, lineSize);
876            src += stride;
877            dst += alloc->mHal.drvState.lod[lod].stride;
878        }
879        if (alloc->mHal.state.yuv) {
880            int lod = 1;
881            while (alloc->mHal.drvState.lod[lod].mallocPtr) {
882                size_t lineSize = alloc->mHal.drvState.lod[lod].dimX;
883                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
884
885                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
886                    memcpy(dst, src, lineSize);
887                    src += lineSize;
888                    dst += alloc->mHal.drvState.lod[lod].stride;
889                }
890                lod++;
891            }
892
893        }
894        drv->uploadDeferred = true;
895    } else {
896        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
897    }
898}
899
900void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
901                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
902                         uint32_t lod,
903                         uint32_t w, uint32_t h, uint32_t d, const void *data,
904                         size_t sizeBytes, size_t stride) {
905    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
906
907    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
908    uint32_t lineSize = eSize * w;
909    if (!stride) {
910        stride = lineSize;
911    }
912
913    if (alloc->mHal.drvState.lod[0].mallocPtr) {
914        const uint8_t *src = static_cast<const uint8_t *>(data);
915        for (uint32_t z = zoff; z < d; z++) {
916            uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
917                                        RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
918            if (dst == src) {
919                // Skip the copy if we are the same allocation. This can arise from
920                // our Bitmap optimization, where we share the same storage.
921                drv->uploadDeferred = true;
922                return;
923            }
924
925            for (uint32_t line=yoff; line < (yoff+h); line++) {
926                if (alloc->mHal.state.hasReferences) {
927                    alloc->incRefs(src, w);
928                    alloc->decRefs(dst, w);
929                }
930                memcpy(dst, src, lineSize);
931                src += stride;
932                dst += alloc->mHal.drvState.lod[lod].stride;
933            }
934        }
935        drv->uploadDeferred = true;
936    }
937}
938
939void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
940                         uint32_t xoff, uint32_t lod, size_t count,
941                         void *data, size_t sizeBytes) {
942    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
943    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
944    if (data != ptr) {
945        // Skip the copy if we are the same allocation. This can arise from
946        // our Bitmap optimization, where we share the same storage.
947        memcpy(data, ptr, count * eSize);
948    }
949}
950
951void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
952                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
953                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
954    size_t eSize = alloc->mHal.state.elementSizeBytes;
955    size_t lineSize = eSize * w;
956    if (!stride) {
957        stride = lineSize;
958    }
959
960    if (alloc->mHal.drvState.lod[0].mallocPtr) {
961        uint8_t *dst = static_cast<uint8_t *>(data);
962        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
963        if (dst == src) {
964            // Skip the copy if we are the same allocation. This can arise from
965            // our Bitmap optimization, where we share the same storage.
966            return;
967        }
968
969        for (uint32_t line=yoff; line < (yoff+h); line++) {
970            memcpy(dst, src, lineSize);
971            dst += stride;
972            src += alloc->mHal.drvState.lod[lod].stride;
973        }
974    } else {
975        ALOGE("Add code to readback from non-script memory");
976    }
977}
978
979
980void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
981                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
982                         uint32_t lod,
983                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
984    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
985    uint32_t lineSize = eSize * w;
986    if (!stride) {
987        stride = lineSize;
988    }
989
990    if (alloc->mHal.drvState.lod[0].mallocPtr) {
991        uint8_t *dst = static_cast<uint8_t *>(data);
992        for (uint32_t z = zoff; z < d; z++) {
993            const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
994                                              RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
995            if (dst == src) {
996                // Skip the copy if we are the same allocation. This can arise from
997                // our Bitmap optimization, where we share the same storage.
998                return;
999            }
1000
1001            for (uint32_t line=yoff; line < (yoff+h); line++) {
1002                memcpy(dst, src, lineSize);
1003                dst += stride;
1004                src += alloc->mHal.drvState.lod[lod].stride;
1005            }
1006        }
1007    }
1008}
1009
1010void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
1011                          const android::renderscript::Allocation *alloc) {
1012    return alloc->mHal.drvState.lod[0].mallocPtr;
1013}
1014
1015void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
1016                          const android::renderscript::Allocation *alloc) {
1017
1018}
1019
1020void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
1021                               const android::renderscript::Allocation *dstAlloc,
1022                               uint32_t dstXoff, uint32_t dstLod, size_t count,
1023                               const android::renderscript::Allocation *srcAlloc,
1024                               uint32_t srcXoff, uint32_t srcLod) {
1025}
1026
1027
1028void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
1029                                      const android::renderscript::Allocation *dstAlloc,
1030                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1031                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1032                                      const android::renderscript::Allocation *srcAlloc,
1033                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1034                                      RsAllocationCubemapFace srcFace) {
1035    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1036    for (uint32_t i = 0; i < h; i ++) {
1037        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
1038        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
1039        memcpy(dstPtr, srcPtr, w * elementSize);
1040
1041        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1042        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1043    }
1044}
1045
1046void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1047                                      const android::renderscript::Allocation *dstAlloc,
1048                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1049                                      uint32_t w, uint32_t h, uint32_t d,
1050                                      const android::renderscript::Allocation *srcAlloc,
1051                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1052    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1053    for (uint32_t j = 0; j < d; j++) {
1054        for (uint32_t i = 0; i < h; i ++) {
1055            uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1056                                           dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1057            uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1058                                           srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1059            memcpy(dstPtr, srcPtr, w * elementSize);
1060
1061            //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1062            //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1063        }
1064    }
1065}
1066
1067void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1068                               const android::renderscript::Allocation *dstAlloc,
1069                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1070                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1071                               const android::renderscript::Allocation *srcAlloc,
1072                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1073                               RsAllocationCubemapFace srcFace) {
1074    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1075        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1076                                             "yet implemented.");
1077        return;
1078    }
1079    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1080                                     dstLod, dstFace, w, h, srcAlloc,
1081                                     srcXoff, srcYoff, srcLod, srcFace);
1082}
1083
1084void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1085                               const android::renderscript::Allocation *dstAlloc,
1086                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1087                               uint32_t dstLod,
1088                               uint32_t w, uint32_t h, uint32_t d,
1089                               const android::renderscript::Allocation *srcAlloc,
1090                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1091                               uint32_t srcLod) {
1092    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1093        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1094                                             "yet implemented.");
1095        return;
1096    }
1097    rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1098                                     dstLod, w, h, d, srcAlloc,
1099                                     srcXoff, srcYoff, srcZoff, srcLod);
1100}
1101
1102void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
1103                                uint32_t x,
1104                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1105    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1106
1107    size_t eSize = alloc->mHal.state.elementSizeBytes;
1108    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1109
1110    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1111    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1112
1113    if (alloc->mHal.state.hasReferences) {
1114        e->incRefs(data);
1115        e->decRefs(ptr);
1116    }
1117
1118    memcpy(ptr, data, sizeBytes);
1119    drv->uploadDeferred = true;
1120}
1121
1122void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
1123                                uint32_t x, uint32_t y,
1124                                const void *data, uint32_t cIdx, size_t sizeBytes) {
1125    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1126
1127    size_t eSize = alloc->mHal.state.elementSizeBytes;
1128    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1129
1130    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1131    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1132
1133    if (alloc->mHal.state.hasReferences) {
1134        e->incRefs(data);
1135        e->decRefs(ptr);
1136    }
1137
1138    memcpy(ptr, data, sizeBytes);
1139    drv->uploadDeferred = true;
1140}
1141
1142static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1143    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1144    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1145
1146    for (uint32_t y=0; y < h; y++) {
1147        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1148        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1149        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1150
1151        for (uint32_t x=0; x < w; x++) {
1152            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1153            oPtr ++;
1154            i1 += 2;
1155            i2 += 2;
1156        }
1157    }
1158}
1159
1160static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1161    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1162    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1163
1164    for (uint32_t y=0; y < h; y++) {
1165        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1166        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1167        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1168
1169        for (uint32_t x=0; x < w; x++) {
1170            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1171            oPtr ++;
1172            i1 += 2;
1173            i2 += 2;
1174        }
1175    }
1176}
1177
1178static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1179    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1180    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1181
1182    for (uint32_t y=0; y < h; y++) {
1183        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1184        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1185        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1186
1187        for (uint32_t x=0; x < w; x++) {
1188            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1189            oPtr ++;
1190            i1 += 2;
1191            i2 += 2;
1192        }
1193    }
1194}
1195
1196void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1197    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1198        return;
1199    }
1200    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1201    for (uint32_t face = 0; face < numFaces; face ++) {
1202        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1203            switch (alloc->getType()->getElement()->getSizeBits()) {
1204            case 32:
1205                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1206                break;
1207            case 16:
1208                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1209                break;
1210            case 8:
1211                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1212                break;
1213            }
1214        }
1215    }
1216}
1217