rsdAllocation.cpp revision cc8cea7477352898921044483a6c803e25d02665
1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "rsdCore.h"
18#include "rsdAllocation.h"
19
20#include "rsAllocation.h"
21
22#if !defined(RS_SERVER) && !defined(RS_COMPATIBILITY_LIB)
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26#endif
27
28#ifdef RS_COMPATIBILITY_LIB
29#include "rsCompatibilityLib.h"
30#else
31#include "rsdFrameBufferObj.h"
32#include "gui/GLConsumer.h"
33#include "gui/CpuConsumer.h"
34#include "gui/Surface.h"
35#include "hardware/gralloc.h"
36
37#include <GLES/gl.h>
38#include <GLES2/gl2.h>
39#include <GLES/glext.h>
40#endif
41
42#ifdef RS_SERVER
43// server requires malloc.h for memalign
44#include <malloc.h>
45#endif
46
47using namespace android;
48using namespace android::renderscript;
49
50#ifndef RS_COMPATIBILITY_LIB
51const static GLenum gFaceOrder[] = {
52    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
53    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
54    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
55    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
56    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
57    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
58};
59
60GLenum rsdTypeToGLType(RsDataType t) {
61    switch (t) {
62    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
63    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
64    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
65
66    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
67    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
68    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
69    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
70    case RS_TYPE_SIGNED_8:      return GL_BYTE;
71    case RS_TYPE_SIGNED_16:     return GL_SHORT;
72    default:    break;
73    }
74    return 0;
75}
76
77GLenum rsdKindToGLFormat(RsDataKind k) {
78    switch (k) {
79    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
80    case RS_KIND_PIXEL_A: return GL_ALPHA;
81    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
82    case RS_KIND_PIXEL_RGB: return GL_RGB;
83    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
84    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
85    default: break;
86    }
87    return 0;
88}
89#endif
90
91uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
92                      uint32_t xoff, uint32_t yoff, uint32_t zoff,
93                      uint32_t lod, RsAllocationCubemapFace face) {
94    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
95    ptr += face * alloc->mHal.drvState.faceOffset;
96    ptr += zoff * alloc->mHal.drvState.lod[lod].dimY * alloc->mHal.drvState.lod[lod].stride;
97    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
98    ptr += xoff * alloc->mHal.state.elementSizeBytes;
99    return ptr;
100}
101
102
103static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
104                            uint32_t xoff, uint32_t yoff, uint32_t lod,
105                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
106#ifndef RS_COMPATIBILITY_LIB
107    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
108
109    rsAssert(drv->textureID);
110    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
111    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
112    GLenum t = GL_TEXTURE_2D;
113    if (alloc->mHal.state.hasFaces) {
114        t = gFaceOrder[face];
115    }
116    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
117#endif
118}
119
120
121#ifndef RS_COMPATIBILITY_LIB
122static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
123    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
124
125    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
126    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
127
128    uint32_t faceCount = 1;
129    if (alloc->mHal.state.hasFaces) {
130        faceCount = 6;
131    }
132
133    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
134    for (uint32_t face = 0; face < faceCount; face ++) {
135        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
136            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, 0, lod, (RsAllocationCubemapFace)face);
137
138            GLenum t = GL_TEXTURE_2D;
139            if (alloc->mHal.state.hasFaces) {
140                t = gFaceOrder[face];
141            }
142
143            if (isFirstUpload) {
144                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
145                             alloc->mHal.state.type->getLODDimX(lod),
146                             alloc->mHal.state.type->getLODDimY(lod),
147                             0, drv->glFormat, drv->glType, p);
148            } else {
149                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
150                                alloc->mHal.state.type->getLODDimX(lod),
151                                alloc->mHal.state.type->getLODDimY(lod),
152                                drv->glFormat, drv->glType, p);
153            }
154        }
155    }
156
157    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
158        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
159    }
160    rsdGLCheckError(rsc, "Upload2DTexture");
161}
162#endif
163
164static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
165#ifndef RS_COMPATIBILITY_LIB
166    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
167
168    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
169        if (!drv->textureID) {
170            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
171        }
172        return;
173    }
174
175    if (!drv->glType || !drv->glFormat) {
176        return;
177    }
178
179    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
180        return;
181    }
182
183    bool isFirstUpload = false;
184
185    if (!drv->textureID) {
186        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
187        isFirstUpload = true;
188    }
189
190    Upload2DTexture(rsc, alloc, isFirstUpload);
191
192    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
193        if (alloc->mHal.drvState.lod[0].mallocPtr) {
194            free(alloc->mHal.drvState.lod[0].mallocPtr);
195            alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
196        }
197    }
198    rsdGLCheckError(rsc, "UploadToTexture");
199#endif
200}
201
202static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
203#ifndef RS_COMPATIBILITY_LIB
204    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
205
206    if (!drv->glFormat) {
207        return;
208    }
209
210    if (!drv->renderTargetID) {
211        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
212
213        if (!drv->renderTargetID) {
214            // This should generally not happen
215            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
216            rsc->dumpDebug();
217            return;
218        }
219        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
220        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
221                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
222    }
223    rsdGLCheckError(rsc, "AllocateRenderTarget");
224#endif
225}
226
227static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
228#ifndef RS_COMPATIBILITY_LIB
229    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
230
231    rsAssert(!alloc->mHal.state.type->getDimY());
232    rsAssert(!alloc->mHal.state.type->getDimZ());
233
234    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
235
236    if (!drv->bufferID) {
237        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
238    }
239    if (!drv->bufferID) {
240        ALOGE("Upload to buffer object failed");
241        drv->uploadDeferred = true;
242        return;
243    }
244    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
245    RSD_CALL_GL(glBufferData, drv->glTarget,
246                alloc->mHal.state.type->getPackedSizeBytes(),
247                alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
248    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
249    rsdGLCheckError(rsc, "UploadToBufferObject");
250#endif
251}
252
253
254static size_t DeriveYUVLayout(int yuv, Allocation::Hal::DrvState *state) {
255    // YUV only supports basic 2d
256    // so we can stash the plane pointers in the mipmap levels.
257    size_t uvSize = 0;
258    state->lod[1].dimX = state->lod[0].dimX / 2;
259    state->lod[1].dimY = state->lod[0].dimY / 2;
260    state->lod[2].dimX = state->lod[0].dimX / 2;
261    state->lod[2].dimY = state->lod[0].dimY / 2;
262    state->yuv.shift = 1;
263    state->yuv.step = 1;
264    state->lodCount = 3;
265
266#ifndef RS_SERVER
267    switch(yuv) {
268    case HAL_PIXEL_FORMAT_YV12:
269        state->lod[2].stride = rsRound(state->lod[0].stride >> 1, 16);
270        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
271                (state->lod[0].stride * state->lod[0].dimY);
272        uvSize += state->lod[2].stride * state->lod[2].dimY;
273
274        state->lod[1].stride = state->lod[2].stride;
275        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) +
276                (state->lod[2].stride * state->lod[2].dimY);
277        uvSize += state->lod[1].stride * state->lod[2].dimY;
278        break;
279    case HAL_PIXEL_FORMAT_YCrCb_420_SP:  // NV21
280        //state->lod[1].dimX = state->lod[0].dimX;
281        state->lod[1].stride = state->lod[0].stride;
282        state->lod[2].stride = state->lod[0].stride;
283        state->lod[2].mallocPtr = ((uint8_t *)state->lod[0].mallocPtr) +
284                (state->lod[0].stride * state->lod[0].dimY);
285        state->lod[1].mallocPtr = ((uint8_t *)state->lod[2].mallocPtr) + 1;
286        uvSize += state->lod[1].stride * state->lod[1].dimY;
287        state->yuv.step = 2;
288        break;
289#ifndef RS_COMPATIBILITY_LIB
290    case HAL_PIXEL_FORMAT_YCbCr_420_888:
291        // This will be filled in by ioReceive()
292        break;
293#endif
294    default:
295        rsAssert(0);
296    }
297#endif
298    return uvSize;
299}
300
301
302static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
303        const Type *type, uint8_t *ptr) {
304    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
305    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
306    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
307    alloc->mHal.drvState.lod[0].mallocPtr = 0;
308    // Stride needs to be 16-byte aligned too!
309    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
310    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
311    alloc->mHal.drvState.lodCount = type->getLODCount();
312    alloc->mHal.drvState.faceCount = type->getDimFaces();
313
314    size_t offsets[Allocation::MAX_LOD];
315    memset(offsets, 0, sizeof(offsets));
316
317    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
318            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
319    if (alloc->mHal.state.yuv) {
320        o += DeriveYUVLayout(alloc->mHal.state.yuv, &alloc->mHal.drvState);
321
322        for (uint32_t ct = 1; ct < alloc->mHal.drvState.lodCount; ct++) {
323            offsets[ct] = (size_t)alloc->mHal.drvState.lod[ct].mallocPtr;
324        }
325    } else if(alloc->mHal.drvState.lodCount > 1) {
326        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
327        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
328        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
329        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
330            alloc->mHal.drvState.lod[lod].dimX = tx;
331            alloc->mHal.drvState.lod[lod].dimY = ty;
332            alloc->mHal.drvState.lod[lod].dimZ = tz;
333            alloc->mHal.drvState.lod[lod].stride =
334                    rsRound(tx * type->getElementSizeBytes(), 16);
335            offsets[lod] = o;
336            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
337            if (tx > 1) tx >>= 1;
338            if (ty > 1) ty >>= 1;
339            if (tz > 1) tz >>= 1;
340        }
341    }
342
343    alloc->mHal.drvState.faceOffset = o;
344
345    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
346    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
347        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
348    }
349
350    size_t allocSize = alloc->mHal.drvState.faceOffset;
351    if(alloc->mHal.drvState.faceCount) {
352        allocSize *= 6;
353    }
354
355    return allocSize;
356}
357
358static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
359    // We align all allocations to a 16-byte boundary.
360    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
361    if (!ptr) {
362        return nullptr;
363    }
364    if (forceZero) {
365        memset(ptr, 0, allocSize);
366    }
367    return ptr;
368}
369
370bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
371    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
372    if (!drv) {
373        return false;
374    }
375    alloc->mHal.drv = drv;
376
377    // Calculate the object size.
378    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), nullptr);
379
380    uint8_t * ptr = nullptr;
381    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
382
383    } else if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
384        // Allocation is allocated when the surface is created
385        // in getSurface
386    } else if (alloc->mHal.state.userProvidedPtr != nullptr) {
387        // user-provided allocation
388        // limitations: no faces, no LOD, USAGE_SCRIPT or SCRIPT+TEXTURE only
389        if (!(alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED) ||
390              alloc->mHal.state.usageFlags == (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED | RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE))) {
391            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT | USAGE_SHARED or USAGE_SCRIPT | USAGE_SHARED | USAGE_GRAPHICS_TEXTURE");
392            return false;
393        }
394        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
395            ALOGE("User-allocated buffers must not have multiple faces or LODs");
396            return false;
397        }
398
399        // rows must be 16-byte aligned
400        // validate that here, otherwise fall back to not use the user-backed allocation
401        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
402            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
403            drv->useUserProvidedPtr = false;
404
405            ptr = allocAlignedMemory(allocSize, forceZero);
406            if (!ptr) {
407                alloc->mHal.drv = nullptr;
408                free(drv);
409                return false;
410            }
411
412        } else {
413            drv->useUserProvidedPtr = true;
414            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
415        }
416    } else {
417        ptr = allocAlignedMemory(allocSize, forceZero);
418        if (!ptr) {
419            alloc->mHal.drv = nullptr;
420            free(drv);
421            return false;
422        }
423    }
424    // Build the pointer tables
425    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
426    if(allocSize != verifySize) {
427        rsAssert(!"Size mismatch");
428    }
429
430#ifndef RS_SERVER
431    drv->glTarget = GL_NONE;
432    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
433        if (alloc->mHal.state.hasFaces) {
434            drv->glTarget = GL_TEXTURE_CUBE_MAP;
435        } else {
436            drv->glTarget = GL_TEXTURE_2D;
437        }
438    } else {
439        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
440            drv->glTarget = GL_ARRAY_BUFFER;
441        }
442    }
443#endif
444
445#ifndef RS_COMPATIBILITY_LIB
446    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
447    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
448#else
449    drv->glType = 0;
450    drv->glFormat = 0;
451#endif
452
453    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
454        drv->uploadDeferred = true;
455    }
456
457
458    drv->readBackFBO = nullptr;
459
460    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
461    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
462        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
463    }
464
465
466#ifdef RS_FIND_OFFSETS
467    ALOGE("pointer for allocation: %p", alloc);
468    ALOGE("pointer for allocation.drv: %p", &alloc->mHal.drv);
469#endif
470
471
472    return true;
473}
474
475void rsdAllocationAdapterOffset(const Context *rsc, const Allocation *alloc) {
476    //ALOGE("rsdAllocationAdapterOffset");
477
478    // Get a base pointer to the new LOD
479    const Allocation *base = alloc->mHal.state.baseAlloc;
480    const Type *type = alloc->mHal.state.type;
481    if (base == nullptr) {
482        return;
483    }
484
485    uint8_t * ptrA = (uint8_t *)base->getPointerUnchecked(alloc->mHal.state.originX, alloc->mHal.state.originY);
486    uint8_t * ptrB = (uint8_t *)base->getPointerUnchecked(0, 0);
487
488    //ALOGE("rsdAllocationAdapterOffset  %p  %p", ptrA, ptrB);
489    //ALOGE("rsdAllocationAdapterOffset  lodCount %i", alloc->mHal.drvState.lodCount);
490
491    const int lodBias = alloc->mHal.state.originLOD;
492    uint32_t lodCount = rsMax(alloc->mHal.drvState.lodCount, (uint32_t)1);
493    for (uint32_t lod=0; lod < lodCount; lod++) {
494        alloc->mHal.drvState.lod[lod] = base->mHal.drvState.lod[lod + lodBias];
495        alloc->mHal.drvState.lod[lod].mallocPtr =
496                ((uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr + (ptrA - ptrB));
497        //ALOGE("rsdAllocationAdapterOffset  lod  %p  %i %i", alloc->mHal.drvState.lod[lod].mallocPtr, alloc->mHal.drvState.lod[lod].dimX, alloc->mHal.drvState.lod[lod].dimY);
498    }
499}
500
501bool rsdAllocationAdapterInit(const Context *rsc, Allocation *alloc) {
502    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
503    if (!drv) {
504        return false;
505    }
506    alloc->mHal.drv = drv;
507
508    // We need to build an allocation that looks like a subset of the parent allocation
509    rsdAllocationAdapterOffset(rsc, alloc);
510
511    return true;
512}
513
514void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
515    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
516
517    if (alloc->mHal.state.baseAlloc == nullptr) {
518#ifndef RS_COMPATIBILITY_LIB
519        if (drv->bufferID) {
520            // Causes a SW crash....
521            //ALOGV(" mBufferID %i", mBufferID);
522            //glDeleteBuffers(1, &mBufferID);
523            //mBufferID = 0;
524        }
525        if (drv->textureID) {
526            RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
527            drv->textureID = 0;
528        }
529        if (drv->renderTargetID) {
530            RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
531            drv->renderTargetID = 0;
532        }
533#endif
534
535        if (alloc->mHal.drvState.lod[0].mallocPtr) {
536            // don't free user-allocated ptrs or IO_OUTPUT buffers
537            if (!(drv->useUserProvidedPtr) &&
538                !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) &&
539                !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
540                    free(alloc->mHal.drvState.lod[0].mallocPtr);
541            }
542            alloc->mHal.drvState.lod[0].mallocPtr = nullptr;
543        }
544
545#ifndef RS_COMPATIBILITY_LIB
546        if (drv->readBackFBO != nullptr) {
547            delete drv->readBackFBO;
548            drv->readBackFBO = nullptr;
549        }
550
551        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
552            (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
553
554            DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
555            ANativeWindow *nw = drv->wndSurface;
556            if (nw) {
557                GraphicBufferMapper &mapper = GraphicBufferMapper::get();
558                mapper.unlock(drv->wndBuffer->handle);
559                int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
560
561                drv->wndSurface = nullptr;
562                native_window_api_disconnect(nw, NATIVE_WINDOW_API_CPU);
563                nw->decStrong(nullptr);
564            }
565        }
566#endif
567    }
568
569    free(drv);
570    alloc->mHal.drv = nullptr;
571}
572
573void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
574                         const Type *newType, bool zeroNew) {
575    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
576    const uint32_t dimX = newType->getDimX();
577
578    // can't resize Allocations with user-allocated buffers
579    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
580        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
581        return;
582    }
583    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
584    // Calculate the object size
585    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, nullptr);
586    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
587    // Build the relative pointer tables.
588    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
589    if(s != verifySize) {
590        rsAssert(!"Size mismatch");
591    }
592
593
594    if (dimX > oldDimX) {
595        size_t stride = alloc->mHal.state.elementSizeBytes;
596        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
597                 0, stride * (dimX - oldDimX));
598    }
599}
600
601static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
602#ifndef RS_COMPATIBILITY_LIB
603    if (!alloc->getIsScript()) {
604        return; // nothing to sync
605    }
606
607    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
608    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
609
610    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
611    if (!drv->textureID && !drv->renderTargetID) {
612        return; // nothing was rendered here yet, so nothing to sync
613    }
614    if (drv->readBackFBO == nullptr) {
615        drv->readBackFBO = new RsdFrameBufferObj();
616        drv->readBackFBO->setColorTarget(drv, 0);
617        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
618                                        alloc->getType()->getDimY());
619    }
620
621    // Bind the framebuffer object so we can read back from it
622    drv->readBackFBO->setActive(rsc);
623
624    // Do the readback
625    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
626                alloc->mHal.drvState.lod[0].dimY,
627                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
628
629    // Revert framebuffer to its original
630    lastFbo->setActive(rsc);
631#endif
632}
633
634
635void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
636                         RsAllocationUsageType src) {
637    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
638
639    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
640        if(!alloc->getIsRenderTarget()) {
641            rsc->setError(RS_ERROR_FATAL_DRIVER,
642                          "Attempting to sync allocation from render target, "
643                          "for non-render target allocation");
644        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
645            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
646                                                 "render target");
647        } else {
648            rsdAllocationSyncFromFBO(rsc, alloc);
649        }
650        return;
651    }
652
653    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT || src == RS_ALLOCATION_USAGE_SHARED);
654
655    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
656        UploadToTexture(rsc, alloc);
657    } else {
658        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
659            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
660            AllocateRenderTarget(rsc, alloc);
661        }
662    }
663    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
664        UploadToBufferObject(rsc, alloc);
665    }
666
667    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
668
669        if (src == RS_ALLOCATION_USAGE_SHARED) {
670            // just a memory fence for the CPU driver
671            // vendor drivers probably want to flush any dirty cachelines for
672            // this particular Allocation
673            __sync_synchronize();
674        }
675    }
676
677    drv->uploadDeferred = false;
678}
679
680void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
681    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
682    drv->uploadDeferred = true;
683}
684
685#ifndef RS_COMPATIBILITY_LIB
686static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
687    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
688
689    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
690    if (r) {
691        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
692        return false;
693    }
694
695    // Must lock the whole surface
696    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
697    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
698
699    void *dst = nullptr;
700    mapper.lock(drv->wndBuffer->handle,
701            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
702            bounds, &dst);
703    alloc->mHal.drvState.lod[0].mallocPtr = dst;
704    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
705    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
706
707    return true;
708}
709#endif
710
711void rsdAllocationSetSurface(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
712#ifndef RS_COMPATIBILITY_LIB
713    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
714    ANativeWindow *old = drv->wndSurface;
715
716    if (nw) {
717        nw->incStrong(nullptr);
718    }
719
720    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
721        //TODO finish support for render target + script
722        drv->wnd = nw;
723        return;
724    }
725
726    // Cleanup old surface if there is one.
727    if (drv->wndSurface) {
728        ANativeWindow *old = drv->wndSurface;
729        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
730        mapper.unlock(drv->wndBuffer->handle);
731        old->cancelBuffer(old, drv->wndBuffer, -1);
732        drv->wndSurface = nullptr;
733
734        native_window_api_disconnect(old, NATIVE_WINDOW_API_CPU);
735        old->decStrong(nullptr);
736    }
737
738    if (nw != nullptr) {
739        int32_t r;
740        uint32_t flags = 0;
741
742        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
743            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
744        }
745        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
746            flags |= GRALLOC_USAGE_HW_RENDER;
747        }
748
749        r = native_window_api_connect(nw, NATIVE_WINDOW_API_CPU);
750        if (r) {
751            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
752            goto error;
753        }
754
755        r = native_window_set_usage(nw, flags);
756        if (r) {
757            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
758            goto error;
759        }
760
761        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
762                                                 alloc->mHal.drvState.lod[0].dimY);
763        if (r) {
764            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
765            goto error;
766        }
767
768        int format = 0;
769        const Element *e = alloc->mHal.state.type->getElement();
770        rsAssert(e->getType() == RS_TYPE_UNSIGNED_8);
771        rsAssert(e->getVectorSize() == 4);
772        rsAssert(e->getKind() == RS_KIND_PIXEL_RGBA);
773        format = PIXEL_FORMAT_RGBA_8888;
774
775        r = native_window_set_buffers_format(nw, format);
776        if (r) {
777            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer format.");
778            goto error;
779        }
780
781        IoGetBuffer(rsc, alloc, nw);
782        drv->wndSurface = nw;
783    }
784
785    return;
786
787 error:
788
789    if (nw) {
790        nw->decStrong(nullptr);
791    }
792
793
794#endif
795}
796
797void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
798#ifndef RS_COMPATIBILITY_LIB
799    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
800    ANativeWindow *nw = drv->wndSurface;
801    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
802        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
803        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
804        return;
805    }
806    if (nw) {
807        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
808            GraphicBufferMapper &mapper = GraphicBufferMapper::get();
809            mapper.unlock(drv->wndBuffer->handle);
810            int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
811            if (r) {
812                rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
813                return;
814            }
815
816            IoGetBuffer(rsc, alloc, nw);
817        }
818    } else {
819        rsc->setError(RS_ERROR_DRIVER, "Sent IO buffer with no attached surface.");
820        return;
821    }
822#endif
823}
824
825void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
826#ifndef RS_COMPATIBILITY_LIB
827    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
828    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
829        drv->surfaceTexture->updateTexImage();
830    }
831#endif
832}
833
834
835void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
836                         uint32_t xoff, uint32_t lod, size_t count,
837                         const void *data, size_t sizeBytes) {
838    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
839
840    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
841    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
842    size_t size = count * eSize;
843    if (ptr != data) {
844        // Skip the copy if we are the same allocation. This can arise from
845        // our Bitmap optimization, where we share the same storage.
846        if (alloc->mHal.state.hasReferences) {
847            alloc->incRefs(data, count);
848            alloc->decRefs(ptr, count);
849        }
850        memcpy(ptr, data, size);
851    }
852    drv->uploadDeferred = true;
853}
854
855void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
856                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
857                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
858    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
859
860    size_t eSize = alloc->mHal.state.elementSizeBytes;
861    size_t lineSize = eSize * w;
862    if (!stride) {
863        stride = lineSize;
864    }
865
866    if (alloc->mHal.drvState.lod[0].mallocPtr) {
867        const uint8_t *src = static_cast<const uint8_t *>(data);
868        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
869        if (dst == src) {
870            // Skip the copy if we are the same allocation. This can arise from
871            // our Bitmap optimization, where we share the same storage.
872            drv->uploadDeferred = true;
873            return;
874        }
875
876        for (uint32_t line=yoff; line < (yoff+h); line++) {
877            if (alloc->mHal.state.hasReferences) {
878                alloc->incRefs(src, w);
879                alloc->decRefs(dst, w);
880            }
881            memcpy(dst, src, lineSize);
882            src += stride;
883            dst += alloc->mHal.drvState.lod[lod].stride;
884        }
885        if (alloc->mHal.state.yuv) {
886            size_t clineSize = lineSize;
887            int lod = 1;
888            int maxLod = 2;
889            if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YV12) {
890                maxLod = 3;
891                clineSize >>= 1;
892            } else if (alloc->mHal.state.yuv == HAL_PIXEL_FORMAT_YCrCb_420_SP) {
893                lod = 2;
894                maxLod = 3;
895            }
896
897            while (lod < maxLod) {
898                uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
899
900                for (uint32_t line=(yoff >> 1); line < ((yoff+h)>>1); line++) {
901                    memcpy(dst, src, clineSize);
902                    src += alloc->mHal.drvState.lod[lod].stride;
903                    dst += alloc->mHal.drvState.lod[lod].stride;
904                }
905                lod++;
906            }
907
908        }
909        drv->uploadDeferred = true;
910    } else {
911        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
912    }
913}
914
915void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
916                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
917                         uint32_t lod,
918                         uint32_t w, uint32_t h, uint32_t d, const void *data,
919                         size_t sizeBytes, size_t stride) {
920    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
921
922    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
923    uint32_t lineSize = eSize * w;
924    if (!stride) {
925        stride = lineSize;
926    }
927
928    if (alloc->mHal.drvState.lod[0].mallocPtr) {
929        const uint8_t *src = static_cast<const uint8_t *>(data);
930        for (uint32_t z = zoff; z < d; z++) {
931            uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, z, lod,
932                                        RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
933            if (dst == src) {
934                // Skip the copy if we are the same allocation. This can arise from
935                // our Bitmap optimization, where we share the same storage.
936                drv->uploadDeferred = true;
937                return;
938            }
939
940            for (uint32_t line=yoff; line < (yoff+h); line++) {
941                if (alloc->mHal.state.hasReferences) {
942                    alloc->incRefs(src, w);
943                    alloc->decRefs(dst, w);
944                }
945                memcpy(dst, src, lineSize);
946                src += stride;
947                dst += alloc->mHal.drvState.lod[lod].stride;
948            }
949        }
950        drv->uploadDeferred = true;
951    }
952}
953
954void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
955                         uint32_t xoff, uint32_t lod, size_t count,
956                         void *data, size_t sizeBytes) {
957    const size_t eSize = alloc->mHal.state.type->getElementSizeBytes();
958    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
959    if (data != ptr) {
960        // Skip the copy if we are the same allocation. This can arise from
961        // our Bitmap optimization, where we share the same storage.
962        memcpy(data, ptr, count * eSize);
963    }
964}
965
966void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
967                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
968                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
969    size_t eSize = alloc->mHal.state.elementSizeBytes;
970    size_t lineSize = eSize * w;
971    if (!stride) {
972        stride = lineSize;
973    }
974
975    if (alloc->mHal.drvState.lod[0].mallocPtr) {
976        uint8_t *dst = static_cast<uint8_t *>(data);
977        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, 0, lod, face);
978        if (dst == src) {
979            // Skip the copy if we are the same allocation. This can arise from
980            // our Bitmap optimization, where we share the same storage.
981            return;
982        }
983
984        for (uint32_t line=yoff; line < (yoff+h); line++) {
985            memcpy(dst, src, lineSize);
986            dst += stride;
987            src += alloc->mHal.drvState.lod[lod].stride;
988        }
989    } else {
990        ALOGE("Add code to readback from non-script memory");
991    }
992}
993
994
995void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
996                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
997                         uint32_t lod,
998                         uint32_t w, uint32_t h, uint32_t d, void *data, size_t sizeBytes, size_t stride) {
999    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
1000    uint32_t lineSize = eSize * w;
1001    if (!stride) {
1002        stride = lineSize;
1003    }
1004
1005    if (alloc->mHal.drvState.lod[0].mallocPtr) {
1006        uint8_t *dst = static_cast<uint8_t *>(data);
1007        for (uint32_t z = zoff; z < d; z++) {
1008            const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, z, lod,
1009                                              RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1010            if (dst == src) {
1011                // Skip the copy if we are the same allocation. This can arise from
1012                // our Bitmap optimization, where we share the same storage.
1013                return;
1014            }
1015
1016            for (uint32_t line=yoff; line < (yoff+h); line++) {
1017                memcpy(dst, src, lineSize);
1018                dst += stride;
1019                src += alloc->mHal.drvState.lod[lod].stride;
1020            }
1021        }
1022    }
1023}
1024
1025void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
1026                          const android::renderscript::Allocation *alloc) {
1027    return alloc->mHal.drvState.lod[0].mallocPtr;
1028}
1029
1030void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
1031                          const android::renderscript::Allocation *alloc) {
1032
1033}
1034
1035void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
1036                               const android::renderscript::Allocation *dstAlloc,
1037                               uint32_t dstXoff, uint32_t dstLod, size_t count,
1038                               const android::renderscript::Allocation *srcAlloc,
1039                               uint32_t srcXoff, uint32_t srcLod) {
1040}
1041
1042
1043void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
1044                                      const android::renderscript::Allocation *dstAlloc,
1045                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1046                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1047                                      const android::renderscript::Allocation *srcAlloc,
1048                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1049                                      RsAllocationCubemapFace srcFace) {
1050    size_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1051    for (uint32_t i = 0; i < h; i ++) {
1052        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, 0, dstLod, dstFace);
1053        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, 0, srcLod, srcFace);
1054        memcpy(dstPtr, srcPtr, w * elementSize);
1055
1056        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1057        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1058    }
1059}
1060
1061void rsdAllocationData3D_alloc_script(const android::renderscript::Context *rsc,
1062                                      const android::renderscript::Allocation *dstAlloc,
1063                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff, uint32_t dstLod,
1064                                      uint32_t w, uint32_t h, uint32_t d,
1065                                      const android::renderscript::Allocation *srcAlloc,
1066                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff, uint32_t srcLod) {
1067    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
1068    for (uint32_t j = 0; j < d; j++) {
1069        for (uint32_t i = 0; i < h; i ++) {
1070            uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstZoff + j,
1071                                           dstLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1072            uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcZoff + j,
1073                                           srcLod, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1074            memcpy(dstPtr, srcPtr, w * elementSize);
1075
1076            //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
1077            //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
1078        }
1079    }
1080}
1081
1082void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
1083                               const android::renderscript::Allocation *dstAlloc,
1084                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
1085                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
1086                               const android::renderscript::Allocation *srcAlloc,
1087                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
1088                               RsAllocationCubemapFace srcFace) {
1089    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1090        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1091                                             "yet implemented.");
1092        return;
1093    }
1094    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
1095                                     dstLod, dstFace, w, h, srcAlloc,
1096                                     srcXoff, srcYoff, srcLod, srcFace);
1097}
1098
1099void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
1100                               const android::renderscript::Allocation *dstAlloc,
1101                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
1102                               uint32_t dstLod,
1103                               uint32_t w, uint32_t h, uint32_t d,
1104                               const android::renderscript::Allocation *srcAlloc,
1105                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
1106                               uint32_t srcLod) {
1107    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
1108        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
1109                                             "yet implemented.");
1110        return;
1111    }
1112    rsdAllocationData3D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff, dstZoff,
1113                                     dstLod, w, h, d, srcAlloc,
1114                                     srcXoff, srcYoff, srcZoff, srcLod);
1115}
1116
1117void rsdAllocationElementData(const Context *rsc, const Allocation *alloc,
1118                              uint32_t x, uint32_t y, uint32_t z,
1119                              const void *data, uint32_t cIdx, size_t sizeBytes) {
1120    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1121
1122    uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1123
1124    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1125    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1126
1127    if (alloc->mHal.state.hasReferences) {
1128        e->incRefs(data);
1129        e->decRefs(ptr);
1130    }
1131
1132    memcpy(ptr, data, sizeBytes);
1133    drv->uploadDeferred = true;
1134}
1135
1136void rsdAllocationElementRead(const Context *rsc, const Allocation *alloc,
1137                              uint32_t x, uint32_t y, uint32_t z,
1138                              void *data, uint32_t cIdx, size_t sizeBytes) {
1139    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
1140
1141    uint8_t * ptr = GetOffsetPtr(alloc, x, y, z, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
1142
1143    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
1144    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
1145
1146    memcpy(data, ptr, sizeBytes);
1147}
1148
1149static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1150    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1151    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1152
1153    for (uint32_t y=0; y < h; y++) {
1154        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1155        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2, lod, face);
1156        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, 0, y*2+1, lod, face);
1157
1158        for (uint32_t x=0; x < w; x++) {
1159            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
1160            oPtr ++;
1161            i1 += 2;
1162            i2 += 2;
1163        }
1164    }
1165}
1166
1167static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1168    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1169    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1170
1171    for (uint32_t y=0; y < h; y++) {
1172        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1173        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1174        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1175
1176        for (uint32_t x=0; x < w; x++) {
1177            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
1178            oPtr ++;
1179            i1 += 2;
1180            i2 += 2;
1181        }
1182    }
1183}
1184
1185static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
1186    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
1187    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
1188
1189    for (uint32_t y=0; y < h; y++) {
1190        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, 0, lod + 1, face);
1191        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, 0, lod, face);
1192        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, 0, lod, face);
1193
1194        for (uint32_t x=0; x < w; x++) {
1195            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
1196            oPtr ++;
1197            i1 += 2;
1198            i2 += 2;
1199        }
1200    }
1201}
1202
1203void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
1204    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
1205        return;
1206    }
1207    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
1208    for (uint32_t face = 0; face < numFaces; face ++) {
1209        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
1210            switch (alloc->getType()->getElement()->getSizeBits()) {
1211            case 32:
1212                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
1213                break;
1214            case 16:
1215                mip565(alloc, lod, (RsAllocationCubemapFace)face);
1216                break;
1217            case 8:
1218                mip8(alloc, lod, (RsAllocationCubemapFace)face);
1219                break;
1220            }
1221        }
1222    }
1223}
1224
1225uint32_t rsdAllocationGrallocBits(const android::renderscript::Context *rsc,
1226                                  android::renderscript::Allocation *alloc)
1227{
1228    return 0;
1229}
1230
1231void rsdAllocationUpdateCachedObject(const Context *rsc,
1232                                     const Allocation *alloc,
1233                                     rs_allocation *obj)
1234{
1235    obj->p = alloc;
1236#ifdef __LP64__
1237    if (alloc != nullptr) {
1238        obj->r = alloc->mHal.drvState.lod[0].mallocPtr;
1239        obj->v1 = alloc->mHal.drv;
1240        obj->v2 = (void *)alloc->mHal.drvState.lod[0].stride;
1241    } else {
1242        obj->r = nullptr;
1243        obj->v1 = nullptr;
1244        obj->v2 = nullptr;
1245    }
1246#endif
1247}
1248