rsdAllocation.cpp revision c2cfe6abfc74befbaa4d2ca09024a27fbfb1f515
1/*
2 * Copyright (C) 2011-2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18#include "rsdCore.h"
19#include "rsdAllocation.h"
20
21#include "rsAllocation.h"
22
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26
27#ifndef RS_COMPATIBILITY_LIB
28#include "rsdFrameBufferObj.h"
29#include "gui/GLConsumer.h"
30#include "hardware/gralloc.h"
31
32#include <GLES/gl.h>
33#include <GLES2/gl2.h>
34#include <GLES/glext.h>
35#endif
36
37using namespace android;
38using namespace android::renderscript;
39
40
41#ifndef RS_COMPATIBILITY_LIB
42const static GLenum gFaceOrder[] = {
43    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
44    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
45    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
46    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
47    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
48    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
49};
50
51GLenum rsdTypeToGLType(RsDataType t) {
52    switch (t) {
53    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
54    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
55    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
56
57    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
58    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
59    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
60    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
61    case RS_TYPE_SIGNED_8:      return GL_BYTE;
62    case RS_TYPE_SIGNED_16:     return GL_SHORT;
63    default:    break;
64    }
65    return 0;
66}
67
68GLenum rsdKindToGLFormat(RsDataKind k) {
69    switch (k) {
70    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
71    case RS_KIND_PIXEL_A: return GL_ALPHA;
72    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
73    case RS_KIND_PIXEL_RGB: return GL_RGB;
74    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
75    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
76    default: break;
77    }
78    return 0;
79}
80#endif
81
82uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
83                      uint32_t xoff, uint32_t yoff, uint32_t lod,
84                      RsAllocationCubemapFace face) {
85    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
86    ptr += face * alloc->mHal.drvState.faceOffset;
87    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
88    ptr += xoff * alloc->mHal.state.elementSizeBytes;
89    return ptr;
90}
91
92
93static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
94                            uint32_t xoff, uint32_t yoff, uint32_t lod,
95                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
96#ifndef RS_COMPATIBILITY_LIB
97    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
98
99    rsAssert(drv->textureID);
100    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
101    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
102    GLenum t = GL_TEXTURE_2D;
103    if (alloc->mHal.state.hasFaces) {
104        t = gFaceOrder[face];
105    }
106    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
107#endif
108}
109
110
111#ifndef RS_COMPATIBILITY_LIB
112static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
113    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
114
115    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
116    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
117
118    uint32_t faceCount = 1;
119    if (alloc->mHal.state.hasFaces) {
120        faceCount = 6;
121    }
122
123    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
124    for (uint32_t face = 0; face < faceCount; face ++) {
125        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
126            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, lod, (RsAllocationCubemapFace)face);
127
128            GLenum t = GL_TEXTURE_2D;
129            if (alloc->mHal.state.hasFaces) {
130                t = gFaceOrder[face];
131            }
132
133            if (isFirstUpload) {
134                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
135                             alloc->mHal.state.type->getLODDimX(lod),
136                             alloc->mHal.state.type->getLODDimY(lod),
137                             0, drv->glFormat, drv->glType, p);
138            } else {
139                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
140                                alloc->mHal.state.type->getLODDimX(lod),
141                                alloc->mHal.state.type->getLODDimY(lod),
142                                drv->glFormat, drv->glType, p);
143            }
144        }
145    }
146
147    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
148        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
149    }
150    rsdGLCheckError(rsc, "Upload2DTexture");
151}
152#endif
153
154static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
155#ifndef RS_COMPATIBILITY_LIB
156    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
157
158    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
159        if (!drv->textureID) {
160            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
161        }
162        return;
163    }
164
165    if (!drv->glType || !drv->glFormat) {
166        return;
167    }
168
169    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
170        return;
171    }
172
173    bool isFirstUpload = false;
174
175    if (!drv->textureID) {
176        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
177        isFirstUpload = true;
178    }
179
180    Upload2DTexture(rsc, alloc, isFirstUpload);
181
182    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
183        if (alloc->mHal.drvState.lod[0].mallocPtr) {
184            free(alloc->mHal.drvState.lod[0].mallocPtr);
185            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
186        }
187    }
188    rsdGLCheckError(rsc, "UploadToTexture");
189#endif
190}
191
192static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
193#ifndef RS_COMPATIBILITY_LIB
194    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
195
196    if (!drv->glFormat) {
197        return;
198    }
199
200    if (!drv->renderTargetID) {
201        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
202
203        if (!drv->renderTargetID) {
204            // This should generally not happen
205            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
206            rsc->dumpDebug();
207            return;
208        }
209        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
210        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
211                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
212    }
213    rsdGLCheckError(rsc, "AllocateRenderTarget");
214#endif
215}
216
217static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
218#ifndef RS_COMPATIBILITY_LIB
219    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
220
221    rsAssert(!alloc->mHal.state.type->getDimY());
222    rsAssert(!alloc->mHal.state.type->getDimZ());
223
224    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
225
226    if (!drv->bufferID) {
227        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
228    }
229    if (!drv->bufferID) {
230        ALOGE("Upload to buffer object failed");
231        drv->uploadDeferred = true;
232        return;
233    }
234    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
235    RSD_CALL_GL(glBufferData, drv->glTarget, alloc->mHal.state.type->getSizeBytes(),
236                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
237    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
238    rsdGLCheckError(rsc, "UploadToBufferObject");
239#endif
240}
241
242static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
243        const Type *type, uint8_t *ptr) {
244    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
245    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
246    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
247    alloc->mHal.drvState.lod[0].mallocPtr = 0;
248    // Stride needs to be 16-byte aligned too!
249    size_t stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
250    alloc->mHal.drvState.lod[0].stride = rsRound(stride, 16);
251    alloc->mHal.drvState.lodCount = type->getLODCount();
252    alloc->mHal.drvState.faceCount = type->getDimFaces();
253
254    size_t offsets[Allocation::MAX_LOD];
255    memset(offsets, 0, sizeof(offsets));
256
257    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
258            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
259    if(alloc->mHal.drvState.lodCount > 1) {
260        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
261        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
262        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
263        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
264            alloc->mHal.drvState.lod[lod].dimX = tx;
265            alloc->mHal.drvState.lod[lod].dimY = ty;
266            alloc->mHal.drvState.lod[lod].dimZ = tz;
267            alloc->mHal.drvState.lod[lod].stride =
268                    rsRound(tx * type->getElementSizeBytes(), 16);
269            offsets[lod] = o;
270            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
271            if (tx > 1) tx >>= 1;
272            if (ty > 1) ty >>= 1;
273            if (tz > 1) tz >>= 1;
274        }
275    }
276    alloc->mHal.drvState.faceOffset = o;
277
278    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
279    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
280        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
281    }
282
283    size_t allocSize = alloc->mHal.drvState.faceOffset;
284    if(alloc->mHal.drvState.faceCount) {
285        allocSize *= 6;
286    }
287
288    return allocSize;
289}
290
291static uint8_t* allocAlignedMemory(size_t allocSize, bool forceZero) {
292    // We align all allocations to a 16-byte boundary.
293    uint8_t* ptr = (uint8_t *)memalign(16, allocSize);
294    if (!ptr) {
295        return NULL;
296    }
297    if (forceZero) {
298        memset(ptr, 0, allocSize);
299    }
300    return ptr;
301}
302
303bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
304    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
305    if (!drv) {
306        return false;
307    }
308    alloc->mHal.drv = drv;
309
310    // Calculate the object size.
311    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
312
313    uint8_t * ptr = NULL;
314    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
315    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
316        // user-provided allocation
317        // limitations: no faces, no LOD, USAGE_SCRIPT only
318        if (alloc->mHal.state.usageFlags != (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED)) {
319            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT and USAGE_SHARED");
320            return false;
321        }
322        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
323            ALOGE("User-allocated buffers must not have multiple faces or LODs");
324            return false;
325        }
326
327        // rows must be 16-byte aligned
328        // validate that here, otherwise fall back to not use the user-backed allocation
329        if (((alloc->getType()->getDimX() * alloc->getType()->getElement()->getSizeBytes()) % 16) != 0) {
330            ALOGV("User-backed allocation failed stride requirement, falling back to separate allocation");
331            drv->useUserProvidedPtr = false;
332
333            ptr = allocAlignedMemory(allocSize, forceZero);
334            if (!ptr) {
335                alloc->mHal.drv = NULL;
336                free(drv);
337                return false;
338            }
339
340        } else {
341            drv->useUserProvidedPtr = true;
342            ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
343        }
344    } else {
345        ptr = allocAlignedMemory(allocSize, forceZero);
346        if (!ptr) {
347            alloc->mHal.drv = NULL;
348            free(drv);
349            return false;
350        }
351    }
352    // Build the pointer tables
353    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
354    if(allocSize != verifySize) {
355        rsAssert(!"Size mismatch");
356    }
357
358    drv->glTarget = GL_NONE;
359    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
360        if (alloc->mHal.state.hasFaces) {
361            drv->glTarget = GL_TEXTURE_CUBE_MAP;
362        } else {
363            drv->glTarget = GL_TEXTURE_2D;
364        }
365    } else {
366        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
367            drv->glTarget = GL_ARRAY_BUFFER;
368        }
369    }
370
371#ifndef RS_COMPATIBILITY_LIB
372    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
373    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
374#else
375    drv->glType = 0;
376    drv->glFormat = 0;
377#endif
378
379    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
380        drv->uploadDeferred = true;
381    }
382
383
384    drv->readBackFBO = NULL;
385
386    // fill out the initial state of the buffer if we couldn't use the user-provided ptr and USAGE_SHARED was accepted
387    if ((alloc->mHal.state.userProvidedPtr != 0) && (drv->useUserProvidedPtr == false)) {
388        rsdAllocationData2D(rsc, alloc, 0, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X, alloc->getType()->getDimX(), alloc->getType()->getDimY(), alloc->mHal.state.userProvidedPtr, allocSize, 0);
389    }
390
391    return true;
392}
393
394void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
395    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
396
397#ifndef RS_COMPATIBILITY_LIB
398    if (drv->bufferID) {
399        // Causes a SW crash....
400        //ALOGV(" mBufferID %i", mBufferID);
401        //glDeleteBuffers(1, &mBufferID);
402        //mBufferID = 0;
403    }
404    if (drv->textureID) {
405        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
406        drv->textureID = 0;
407    }
408    if (drv->renderTargetID) {
409        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
410        drv->renderTargetID = 0;
411    }
412#endif
413
414    if (alloc->mHal.drvState.lod[0].mallocPtr) {
415        // don't free user-allocated ptrs
416        if (!(drv->useUserProvidedPtr)) {
417            free(alloc->mHal.drvState.lod[0].mallocPtr);
418        }
419        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
420    }
421
422#ifndef RS_COMPATIBILITY_LIB
423    if (drv->readBackFBO != NULL) {
424        delete drv->readBackFBO;
425        drv->readBackFBO = NULL;
426    }
427
428    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
429        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
430
431        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
432        ANativeWindow *nw = alloc->mHal.state.wndSurface;
433
434        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
435        mapper.unlock(drv->wndBuffer->handle);
436        int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
437    }
438#endif
439
440    free(drv);
441    alloc->mHal.drv = NULL;
442}
443
444void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
445                         const Type *newType, bool zeroNew) {
446    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
447    const uint32_t dimX = newType->getDimX();
448
449    // can't resize Allocations with user-allocated buffers
450    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
451        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
452        return;
453    }
454    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
455    // Calculate the object size
456    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
457    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
458    // Build the relative pointer tables.
459    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
460    if(s != verifySize) {
461        rsAssert(!"Size mismatch");
462    }
463
464
465    if (dimX > oldDimX) {
466        uint32_t stride = alloc->mHal.state.elementSizeBytes;
467        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
468                 0, stride * (dimX - oldDimX));
469    }
470}
471
472static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
473#ifndef RS_COMPATIBILITY_LIB
474    if (!alloc->getIsScript()) {
475        return; // nothing to sync
476    }
477
478    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
479    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
480
481    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
482    if (!drv->textureID && !drv->renderTargetID) {
483        return; // nothing was rendered here yet, so nothing to sync
484    }
485    if (drv->readBackFBO == NULL) {
486        drv->readBackFBO = new RsdFrameBufferObj();
487        drv->readBackFBO->setColorTarget(drv, 0);
488        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
489                                        alloc->getType()->getDimY());
490    }
491
492    // Bind the framebuffer object so we can read back from it
493    drv->readBackFBO->setActive(rsc);
494
495    // Do the readback
496    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
497                alloc->mHal.drvState.lod[0].dimY,
498                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
499
500    // Revert framebuffer to its original
501    lastFbo->setActive(rsc);
502#endif
503}
504
505
506void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
507                         RsAllocationUsageType src) {
508    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
509
510    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
511        if(!alloc->getIsRenderTarget()) {
512            rsc->setError(RS_ERROR_FATAL_DRIVER,
513                          "Attempting to sync allocation from render target, "
514                          "for non-render target allocation");
515        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
516            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
517                                                 "render target");
518        } else {
519            rsdAllocationSyncFromFBO(rsc, alloc);
520        }
521        return;
522    }
523
524    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
525
526    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
527        UploadToTexture(rsc, alloc);
528    } else {
529        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
530            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
531            AllocateRenderTarget(rsc, alloc);
532        }
533    }
534    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
535        UploadToBufferObject(rsc, alloc);
536    }
537
538    drv->uploadDeferred = false;
539}
540
541void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
542    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
543    drv->uploadDeferred = true;
544}
545
546int32_t rsdAllocationInitSurfaceTexture(const Context *rsc, const Allocation *alloc) {
547#ifndef RS_COMPATIBILITY_LIB
548    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
549    UploadToTexture(rsc, alloc);
550    return drv->textureID;
551#else
552    return 0;
553#endif
554}
555
556#ifndef RS_COMPATIBILITY_LIB
557static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
558    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
559
560    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
561    if (r) {
562        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
563        return false;
564    }
565
566    // Must lock the whole surface
567    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
568    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
569
570    void *dst = NULL;
571    mapper.lock(drv->wndBuffer->handle,
572            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
573            bounds, &dst);
574    alloc->mHal.drvState.lod[0].mallocPtr = dst;
575    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
576    rsAssert((alloc->mHal.drvState.lod[0].stride & 0xf) == 0);
577
578    return true;
579}
580#endif
581
582void rsdAllocationSetSurfaceTexture(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
583#ifndef RS_COMPATIBILITY_LIB
584    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
585
586    //ALOGE("rsdAllocationSetSurfaceTexture %p  %p", alloc, nw);
587
588    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
589        //TODO finish support for render target + script
590        drv->wnd = nw;
591        return;
592    }
593
594
595    // Cleanup old surface if there is one.
596    if (alloc->mHal.state.wndSurface) {
597        ANativeWindow *old = alloc->mHal.state.wndSurface;
598        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
599        mapper.unlock(drv->wndBuffer->handle);
600        old->queueBuffer(old, drv->wndBuffer, -1);
601    }
602
603    if (nw != NULL) {
604        int32_t r;
605        uint32_t flags = 0;
606        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
607            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
608        }
609        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
610            flags |= GRALLOC_USAGE_HW_RENDER;
611        }
612
613        r = native_window_set_usage(nw, flags);
614        if (r) {
615            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
616            return;
617        }
618
619        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
620                                                 alloc->mHal.drvState.lod[0].dimY);
621        if (r) {
622            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
623            return;
624        }
625
626        r = native_window_set_buffer_count(nw, 3);
627        if (r) {
628            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer count.");
629            return;
630        }
631
632        IoGetBuffer(rsc, alloc, nw);
633    }
634#endif
635}
636
637void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
638#ifndef RS_COMPATIBILITY_LIB
639    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
640    ANativeWindow *nw = alloc->mHal.state.wndSurface;
641
642    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
643        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
644        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
645        return;
646    }
647
648    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
649        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
650        mapper.unlock(drv->wndBuffer->handle);
651        int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
652        if (r) {
653            rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
654            return;
655        }
656
657        IoGetBuffer(rsc, alloc, nw);
658    }
659#endif
660}
661
662void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
663#ifndef RS_COMPATIBILITY_LIB
664    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
665    alloc->mHal.state.surfaceTexture->updateTexImage();
666#endif
667}
668
669
670void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
671                         uint32_t xoff, uint32_t lod, uint32_t count,
672                         const void *data, size_t sizeBytes) {
673    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
674
675    const uint32_t eSize = alloc->mHal.state.type->getElementSizeBytes();
676    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
677    uint32_t size = count * eSize;
678
679    if (ptr != data) {
680        // Skip the copy if we are the same allocation. This can arise from
681        // our Bitmap optimization, where we share the same storage.
682        if (alloc->mHal.state.hasReferences) {
683            alloc->incRefs(data, count);
684            alloc->decRefs(ptr, count);
685        }
686        memcpy(ptr, data, size);
687    }
688    drv->uploadDeferred = true;
689}
690
691void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
692                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
693                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
694    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
695
696    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
697    uint32_t lineSize = eSize * w;
698    if (!stride) {
699        stride = lineSize;
700    }
701
702    if (alloc->mHal.drvState.lod[0].mallocPtr) {
703        const uint8_t *src = static_cast<const uint8_t *>(data);
704        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
705        if (dst == src) {
706            // Skip the copy if we are the same allocation. This can arise from
707            // our Bitmap optimization, where we share the same storage.
708            drv->uploadDeferred = true;
709            return;
710        }
711
712        for (uint32_t line=yoff; line < (yoff+h); line++) {
713            if (alloc->mHal.state.hasReferences) {
714                alloc->incRefs(src, w);
715                alloc->decRefs(dst, w);
716            }
717            memcpy(dst, src, lineSize);
718            src += stride;
719            dst += alloc->mHal.drvState.lod[lod].stride;
720        }
721        drv->uploadDeferred = true;
722    } else {
723        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
724    }
725}
726
727void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
728                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
729                         uint32_t lod, RsAllocationCubemapFace face,
730                         uint32_t w, uint32_t h, uint32_t d, const void *data, uint32_t sizeBytes) {
731
732}
733
734void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
735                         uint32_t xoff, uint32_t lod, uint32_t count,
736                         void *data, size_t sizeBytes) {
737    const uint32_t eSize = alloc->mHal.state.type->getElementSizeBytes();
738    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
739    if (data != ptr) {
740        // Skip the copy if we are the same allocation. This can arise from
741        // our Bitmap optimization, where we share the same storage.
742        memcpy(data, ptr, count * eSize);
743    }
744}
745
746void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
747                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
748                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
749    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
750    uint32_t lineSize = eSize * w;
751    if (!stride) {
752        stride = lineSize;
753    }
754
755    if (alloc->mHal.drvState.lod[0].mallocPtr) {
756        uint8_t *dst = static_cast<uint8_t *>(data);
757        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, lod, face);
758        if (dst == src) {
759            // Skip the copy if we are the same allocation. This can arise from
760            // our Bitmap optimization, where we share the same storage.
761            return;
762        }
763
764        for (uint32_t line=yoff; line < (yoff+h); line++) {
765            memcpy(dst, src, lineSize);
766            dst += stride;
767            src += alloc->mHal.drvState.lod[lod].stride;
768        }
769    } else {
770        ALOGE("Add code to readback from non-script memory");
771    }
772}
773
774
775void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
776                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
777                         uint32_t lod, RsAllocationCubemapFace face,
778                         uint32_t w, uint32_t h, uint32_t d, void *data, uint32_t sizeBytes) {
779
780}
781
782void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
783                          const android::renderscript::Allocation *alloc) {
784    return alloc->mHal.drvState.lod[0].mallocPtr;
785}
786
787void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
788                          const android::renderscript::Allocation *alloc) {
789
790}
791
792void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
793                               const android::renderscript::Allocation *dstAlloc,
794                               uint32_t dstXoff, uint32_t dstLod, uint32_t count,
795                               const android::renderscript::Allocation *srcAlloc,
796                               uint32_t srcXoff, uint32_t srcLod) {
797}
798
799
800void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
801                                      const android::renderscript::Allocation *dstAlloc,
802                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
803                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
804                                      const android::renderscript::Allocation *srcAlloc,
805                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
806                                      RsAllocationCubemapFace srcFace) {
807    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
808    for (uint32_t i = 0; i < h; i ++) {
809        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstLod, dstFace);
810        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcLod, srcFace);
811        memcpy(dstPtr, srcPtr, w * elementSize);
812
813        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
814        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
815    }
816}
817
818void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
819                               const android::renderscript::Allocation *dstAlloc,
820                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
821                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
822                               const android::renderscript::Allocation *srcAlloc,
823                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
824                               RsAllocationCubemapFace srcFace) {
825    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
826        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
827                                             "yet implemented.");
828        return;
829    }
830    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
831                                     dstLod, dstFace, w, h, srcAlloc,
832                                     srcXoff, srcYoff, srcLod, srcFace);
833}
834
835void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
836                               const android::renderscript::Allocation *dstAlloc,
837                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
838                               uint32_t dstLod, RsAllocationCubemapFace dstFace,
839                               uint32_t w, uint32_t h, uint32_t d,
840                               const android::renderscript::Allocation *srcAlloc,
841                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
842                               uint32_t srcLod, RsAllocationCubemapFace srcFace) {
843}
844
845void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
846                                uint32_t x,
847                                const void *data, uint32_t cIdx, uint32_t sizeBytes) {
848    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
849
850    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
851    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
852
853    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
854    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
855
856    if (alloc->mHal.state.hasReferences) {
857        e->incRefs(data);
858        e->decRefs(ptr);
859    }
860
861    memcpy(ptr, data, sizeBytes);
862    drv->uploadDeferred = true;
863}
864
865void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
866                                uint32_t x, uint32_t y,
867                                const void *data, uint32_t cIdx, uint32_t sizeBytes) {
868    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
869
870    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
871    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
872
873    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
874    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
875
876    if (alloc->mHal.state.hasReferences) {
877        e->incRefs(data);
878        e->decRefs(ptr);
879    }
880
881    memcpy(ptr, data, sizeBytes);
882    drv->uploadDeferred = true;
883}
884
885static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
886    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
887    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
888
889    for (uint32_t y=0; y < h; y++) {
890        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
891        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
892        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
893
894        for (uint32_t x=0; x < w; x++) {
895            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
896            oPtr ++;
897            i1 += 2;
898            i2 += 2;
899        }
900    }
901}
902
903static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
904    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
905    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
906
907    for (uint32_t y=0; y < h; y++) {
908        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
909        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
910        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
911
912        for (uint32_t x=0; x < w; x++) {
913            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
914            oPtr ++;
915            i1 += 2;
916            i2 += 2;
917        }
918    }
919}
920
921static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
922    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
923    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
924
925    for (uint32_t y=0; y < h; y++) {
926        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, lod + 1, face);
927        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, lod, face);
928        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, lod, face);
929
930        for (uint32_t x=0; x < w; x++) {
931            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
932            oPtr ++;
933            i1 += 2;
934            i2 += 2;
935        }
936    }
937}
938
939void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
940    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
941        return;
942    }
943    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
944    for (uint32_t face = 0; face < numFaces; face ++) {
945        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
946            switch (alloc->getType()->getElement()->getSizeBits()) {
947            case 32:
948                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
949                break;
950            case 16:
951                mip565(alloc, lod, (RsAllocationCubemapFace)face);
952                break;
953            case 8:
954                mip8(alloc, lod, (RsAllocationCubemapFace)face);
955                break;
956            }
957        }
958    }
959}
960
961
962