rsdAllocation.cpp revision 10c9dd709f8b52213c6792961afa9c5de807db5c
1/*
2 * Copyright (C) 2011-2012 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17
18#include "rsdCore.h"
19#include "rsdAllocation.h"
20
21#include "rsAllocation.h"
22
23#include "system/window.h"
24#include "ui/Rect.h"
25#include "ui/GraphicBufferMapper.h"
26
27#ifndef RS_COMPATIBILITY_LIB
28#include "rsdFrameBufferObj.h"
29#include "gui/GLConsumer.h"
30#include "hardware/gralloc.h"
31
32#include <GLES/gl.h>
33#include <GLES2/gl2.h>
34#include <GLES/glext.h>
35#endif
36
37using namespace android;
38using namespace android::renderscript;
39
40
41#ifndef RS_COMPATIBILITY_LIB
42const static GLenum gFaceOrder[] = {
43    GL_TEXTURE_CUBE_MAP_POSITIVE_X,
44    GL_TEXTURE_CUBE_MAP_NEGATIVE_X,
45    GL_TEXTURE_CUBE_MAP_POSITIVE_Y,
46    GL_TEXTURE_CUBE_MAP_NEGATIVE_Y,
47    GL_TEXTURE_CUBE_MAP_POSITIVE_Z,
48    GL_TEXTURE_CUBE_MAP_NEGATIVE_Z
49};
50
51GLenum rsdTypeToGLType(RsDataType t) {
52    switch (t) {
53    case RS_TYPE_UNSIGNED_5_6_5:    return GL_UNSIGNED_SHORT_5_6_5;
54    case RS_TYPE_UNSIGNED_5_5_5_1:  return GL_UNSIGNED_SHORT_5_5_5_1;
55    case RS_TYPE_UNSIGNED_4_4_4_4:  return GL_UNSIGNED_SHORT_4_4_4_4;
56
57    //case RS_TYPE_FLOAT_16:      return GL_HALF_FLOAT;
58    case RS_TYPE_FLOAT_32:      return GL_FLOAT;
59    case RS_TYPE_UNSIGNED_8:    return GL_UNSIGNED_BYTE;
60    case RS_TYPE_UNSIGNED_16:   return GL_UNSIGNED_SHORT;
61    case RS_TYPE_SIGNED_8:      return GL_BYTE;
62    case RS_TYPE_SIGNED_16:     return GL_SHORT;
63    default:    break;
64    }
65    return 0;
66}
67
68GLenum rsdKindToGLFormat(RsDataKind k) {
69    switch (k) {
70    case RS_KIND_PIXEL_L: return GL_LUMINANCE;
71    case RS_KIND_PIXEL_A: return GL_ALPHA;
72    case RS_KIND_PIXEL_LA: return GL_LUMINANCE_ALPHA;
73    case RS_KIND_PIXEL_RGB: return GL_RGB;
74    case RS_KIND_PIXEL_RGBA: return GL_RGBA;
75    case RS_KIND_PIXEL_DEPTH: return GL_DEPTH_COMPONENT16;
76    default: break;
77    }
78    return 0;
79}
80#endif
81
82uint8_t *GetOffsetPtr(const android::renderscript::Allocation *alloc,
83                      uint32_t xoff, uint32_t yoff, uint32_t lod,
84                      RsAllocationCubemapFace face) {
85    uint8_t *ptr = (uint8_t *)alloc->mHal.drvState.lod[lod].mallocPtr;
86    ptr += face * alloc->mHal.drvState.faceOffset;
87    ptr += yoff * alloc->mHal.drvState.lod[lod].stride;
88    ptr += xoff * alloc->mHal.state.elementSizeBytes;
89    return ptr;
90}
91
92
93static void Update2DTexture(const Context *rsc, const Allocation *alloc, const void *ptr,
94                            uint32_t xoff, uint32_t yoff, uint32_t lod,
95                            RsAllocationCubemapFace face, uint32_t w, uint32_t h) {
96#ifndef RS_COMPATIBILITY_LIB
97    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
98
99    rsAssert(drv->textureID);
100    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
101    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
102    GLenum t = GL_TEXTURE_2D;
103    if (alloc->mHal.state.hasFaces) {
104        t = gFaceOrder[face];
105    }
106    RSD_CALL_GL(glTexSubImage2D, t, lod, xoff, yoff, w, h, drv->glFormat, drv->glType, ptr);
107#endif
108}
109
110
111#ifndef RS_COMPATIBILITY_LIB
112static void Upload2DTexture(const Context *rsc, const Allocation *alloc, bool isFirstUpload) {
113    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
114
115    RSD_CALL_GL(glBindTexture, drv->glTarget, drv->textureID);
116    RSD_CALL_GL(glPixelStorei, GL_UNPACK_ALIGNMENT, 1);
117
118    uint32_t faceCount = 1;
119    if (alloc->mHal.state.hasFaces) {
120        faceCount = 6;
121    }
122
123    rsdGLCheckError(rsc, "Upload2DTexture 1 ");
124    for (uint32_t face = 0; face < faceCount; face ++) {
125        for (uint32_t lod = 0; lod < alloc->mHal.state.type->getLODCount(); lod++) {
126            const uint8_t *p = GetOffsetPtr(alloc, 0, 0, lod, (RsAllocationCubemapFace)face);
127
128            GLenum t = GL_TEXTURE_2D;
129            if (alloc->mHal.state.hasFaces) {
130                t = gFaceOrder[face];
131            }
132
133            if (isFirstUpload) {
134                RSD_CALL_GL(glTexImage2D, t, lod, drv->glFormat,
135                             alloc->mHal.state.type->getLODDimX(lod),
136                             alloc->mHal.state.type->getLODDimY(lod),
137                             0, drv->glFormat, drv->glType, p);
138            } else {
139                RSD_CALL_GL(glTexSubImage2D, t, lod, 0, 0,
140                                alloc->mHal.state.type->getLODDimX(lod),
141                                alloc->mHal.state.type->getLODDimY(lod),
142                                drv->glFormat, drv->glType, p);
143            }
144        }
145    }
146
147    if (alloc->mHal.state.mipmapControl == RS_ALLOCATION_MIPMAP_ON_SYNC_TO_TEXTURE) {
148        RSD_CALL_GL(glGenerateMipmap, drv->glTarget);
149    }
150    rsdGLCheckError(rsc, "Upload2DTexture");
151}
152#endif
153
154static void UploadToTexture(const Context *rsc, const Allocation *alloc) {
155#ifndef RS_COMPATIBILITY_LIB
156    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
157
158    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_INPUT) {
159        if (!drv->textureID) {
160            RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
161        }
162        return;
163    }
164
165    if (!drv->glType || !drv->glFormat) {
166        return;
167    }
168
169    if (!alloc->mHal.drvState.lod[0].mallocPtr) {
170        return;
171    }
172
173    bool isFirstUpload = false;
174
175    if (!drv->textureID) {
176        RSD_CALL_GL(glGenTextures, 1, &drv->textureID);
177        isFirstUpload = true;
178    }
179
180    Upload2DTexture(rsc, alloc, isFirstUpload);
181
182    if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
183        if (alloc->mHal.drvState.lod[0].mallocPtr) {
184            free(alloc->mHal.drvState.lod[0].mallocPtr);
185            alloc->mHal.drvState.lod[0].mallocPtr = NULL;
186        }
187    }
188    rsdGLCheckError(rsc, "UploadToTexture");
189#endif
190}
191
192static void AllocateRenderTarget(const Context *rsc, const Allocation *alloc) {
193#ifndef RS_COMPATIBILITY_LIB
194    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
195
196    if (!drv->glFormat) {
197        return;
198    }
199
200    if (!drv->renderTargetID) {
201        RSD_CALL_GL(glGenRenderbuffers, 1, &drv->renderTargetID);
202
203        if (!drv->renderTargetID) {
204            // This should generally not happen
205            ALOGE("allocateRenderTarget failed to gen mRenderTargetID");
206            rsc->dumpDebug();
207            return;
208        }
209        RSD_CALL_GL(glBindRenderbuffer, GL_RENDERBUFFER, drv->renderTargetID);
210        RSD_CALL_GL(glRenderbufferStorage, GL_RENDERBUFFER, drv->glFormat,
211                    alloc->mHal.drvState.lod[0].dimX, alloc->mHal.drvState.lod[0].dimY);
212    }
213    rsdGLCheckError(rsc, "AllocateRenderTarget");
214#endif
215}
216
217static void UploadToBufferObject(const Context *rsc, const Allocation *alloc) {
218#ifndef RS_COMPATIBILITY_LIB
219    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
220
221    rsAssert(!alloc->mHal.state.type->getDimY());
222    rsAssert(!alloc->mHal.state.type->getDimZ());
223
224    //alloc->mHal.state.usageFlags |= RS_ALLOCATION_USAGE_GRAPHICS_VERTEX;
225
226    if (!drv->bufferID) {
227        RSD_CALL_GL(glGenBuffers, 1, &drv->bufferID);
228    }
229    if (!drv->bufferID) {
230        ALOGE("Upload to buffer object failed");
231        drv->uploadDeferred = true;
232        return;
233    }
234    RSD_CALL_GL(glBindBuffer, drv->glTarget, drv->bufferID);
235    RSD_CALL_GL(glBufferData, drv->glTarget, alloc->mHal.state.type->getSizeBytes(),
236                 alloc->mHal.drvState.lod[0].mallocPtr, GL_DYNAMIC_DRAW);
237    RSD_CALL_GL(glBindBuffer, drv->glTarget, 0);
238    rsdGLCheckError(rsc, "UploadToBufferObject");
239#endif
240}
241
242static size_t AllocationBuildPointerTable(const Context *rsc, const Allocation *alloc,
243        const Type *type, uint8_t *ptr) {
244    alloc->mHal.drvState.lod[0].dimX = type->getDimX();
245    alloc->mHal.drvState.lod[0].dimY = type->getDimY();
246    alloc->mHal.drvState.lod[0].dimZ = type->getDimZ();
247    alloc->mHal.drvState.lod[0].mallocPtr = 0;
248    alloc->mHal.drvState.lod[0].stride = alloc->mHal.drvState.lod[0].dimX * type->getElementSizeBytes();
249    alloc->mHal.drvState.lodCount = type->getLODCount();
250    alloc->mHal.drvState.faceCount = type->getDimFaces();
251
252    size_t offsets[Allocation::MAX_LOD];
253    memset(offsets, 0, sizeof(offsets));
254
255    size_t o = alloc->mHal.drvState.lod[0].stride * rsMax(alloc->mHal.drvState.lod[0].dimY, 1u) *
256            rsMax(alloc->mHal.drvState.lod[0].dimZ, 1u);
257    if(alloc->mHal.drvState.lodCount > 1) {
258        uint32_t tx = alloc->mHal.drvState.lod[0].dimX;
259        uint32_t ty = alloc->mHal.drvState.lod[0].dimY;
260        uint32_t tz = alloc->mHal.drvState.lod[0].dimZ;
261        for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
262            alloc->mHal.drvState.lod[lod].dimX = tx;
263            alloc->mHal.drvState.lod[lod].dimY = ty;
264            alloc->mHal.drvState.lod[lod].dimZ = tz;
265            alloc->mHal.drvState.lod[lod].stride = tx * type->getElementSizeBytes();
266            offsets[lod] = o;
267            o += alloc->mHal.drvState.lod[lod].stride * rsMax(ty, 1u) * rsMax(tz, 1u);
268            if (tx > 1) tx >>= 1;
269            if (ty > 1) ty >>= 1;
270            if (tz > 1) tz >>= 1;
271        }
272    }
273    alloc->mHal.drvState.faceOffset = o;
274
275    alloc->mHal.drvState.lod[0].mallocPtr = ptr;
276    for (uint32_t lod=1; lod < alloc->mHal.drvState.lodCount; lod++) {
277        alloc->mHal.drvState.lod[lod].mallocPtr = ptr + offsets[lod];
278    }
279
280    size_t allocSize = alloc->mHal.drvState.faceOffset;
281    if(alloc->mHal.drvState.faceCount) {
282        allocSize *= 6;
283    }
284
285    return allocSize;
286}
287
288bool rsdAllocationInit(const Context *rsc, Allocation *alloc, bool forceZero) {
289    DrvAllocation *drv = (DrvAllocation *)calloc(1, sizeof(DrvAllocation));
290    if (!drv) {
291        return false;
292    }
293    alloc->mHal.drv = drv;
294
295    // Calculate the object size.
296    size_t allocSize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), NULL);
297
298    uint8_t * ptr = NULL;
299    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) {
300    } else if (alloc->mHal.state.userProvidedPtr != NULL) {
301        // user-provided allocation
302        // limitations: no faces, no LOD, USAGE_SCRIPT only
303        if (alloc->mHal.state.usageFlags != (RS_ALLOCATION_USAGE_SCRIPT | RS_ALLOCATION_USAGE_SHARED)) {
304            ALOGE("Can't use user-allocated buffers if usage is not USAGE_SCRIPT and USAGE_SHARED");
305            return false;
306        }
307        if (alloc->getType()->getDimLOD() || alloc->getType()->getDimFaces()) {
308            ALOGE("User-allocated buffers must not have multiple faces or LODs");
309            return false;
310        }
311        ptr = (uint8_t*)alloc->mHal.state.userProvidedPtr;
312    } else {
313        // We align all allocations to a 16-byte boundary.
314        ptr = (uint8_t *)memalign(16, allocSize);
315        if (!ptr) {
316            alloc->mHal.drv = NULL;
317            free(drv);
318            return false;
319        }
320        if (forceZero) {
321            memset(ptr, 0, allocSize);
322        }
323    }
324    // Build the pointer tables
325    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, alloc->getType(), ptr);
326    if(allocSize != verifySize) {
327        rsAssert(!"Size mismatch");
328    }
329
330    drv->glTarget = GL_NONE;
331    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
332        if (alloc->mHal.state.hasFaces) {
333            drv->glTarget = GL_TEXTURE_CUBE_MAP;
334        } else {
335            drv->glTarget = GL_TEXTURE_2D;
336        }
337    } else {
338        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
339            drv->glTarget = GL_ARRAY_BUFFER;
340        }
341    }
342
343#ifndef RS_COMPATIBILITY_LIB
344    drv->glType = rsdTypeToGLType(alloc->mHal.state.type->getElement()->getComponent().getType());
345    drv->glFormat = rsdKindToGLFormat(alloc->mHal.state.type->getElement()->getComponent().getKind());
346#else
347    drv->glType = 0;
348    drv->glFormat = 0;
349#endif
350
351    if (alloc->mHal.state.usageFlags & ~RS_ALLOCATION_USAGE_SCRIPT) {
352        drv->uploadDeferred = true;
353    }
354
355
356    drv->readBackFBO = NULL;
357
358    return true;
359}
360
361void rsdAllocationDestroy(const Context *rsc, Allocation *alloc) {
362    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
363
364#ifndef RS_COMPATIBILITY_LIB
365    if (drv->bufferID) {
366        // Causes a SW crash....
367        //ALOGV(" mBufferID %i", mBufferID);
368        //glDeleteBuffers(1, &mBufferID);
369        //mBufferID = 0;
370    }
371    if (drv->textureID) {
372        RSD_CALL_GL(glDeleteTextures, 1, &drv->textureID);
373        drv->textureID = 0;
374    }
375    if (drv->renderTargetID) {
376        RSD_CALL_GL(glDeleteRenderbuffers, 1, &drv->renderTargetID);
377        drv->renderTargetID = 0;
378    }
379#endif
380
381    if (alloc->mHal.drvState.lod[0].mallocPtr) {
382        // don't free user-allocated ptrs
383        if (!(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED)) {
384            free(alloc->mHal.drvState.lod[0].mallocPtr);
385        }
386        alloc->mHal.drvState.lod[0].mallocPtr = NULL;
387    }
388
389#ifndef RS_COMPATIBILITY_LIB
390    if (drv->readBackFBO != NULL) {
391        delete drv->readBackFBO;
392        drv->readBackFBO = NULL;
393    }
394
395    if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT) &&
396        (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT)) {
397
398        DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
399        ANativeWindow *nw = alloc->mHal.state.wndSurface;
400
401        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
402        mapper.unlock(drv->wndBuffer->handle);
403        int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
404    }
405#endif
406
407    free(drv);
408    alloc->mHal.drv = NULL;
409}
410
411void rsdAllocationResize(const Context *rsc, const Allocation *alloc,
412                         const Type *newType, bool zeroNew) {
413    const uint32_t oldDimX = alloc->mHal.drvState.lod[0].dimX;
414    const uint32_t dimX = newType->getDimX();
415
416    // can't resize Allocations with user-allocated buffers
417    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SHARED) {
418        ALOGE("Resize cannot be called on a USAGE_SHARED allocation");
419        return;
420    }
421    void * oldPtr = alloc->mHal.drvState.lod[0].mallocPtr;
422    // Calculate the object size
423    size_t s = AllocationBuildPointerTable(rsc, alloc, newType, NULL);
424    uint8_t *ptr = (uint8_t *)realloc(oldPtr, s);
425    // Build the relative pointer tables.
426    size_t verifySize = AllocationBuildPointerTable(rsc, alloc, newType, ptr);
427    if(s != verifySize) {
428        rsAssert(!"Size mismatch");
429    }
430
431
432    if (dimX > oldDimX) {
433        uint32_t stride = alloc->mHal.state.elementSizeBytes;
434        memset(((uint8_t *)alloc->mHal.drvState.lod[0].mallocPtr) + stride * oldDimX,
435                 0, stride * (dimX - oldDimX));
436    }
437}
438
439static void rsdAllocationSyncFromFBO(const Context *rsc, const Allocation *alloc) {
440#ifndef RS_COMPATIBILITY_LIB
441    if (!alloc->getIsScript()) {
442        return; // nothing to sync
443    }
444
445    RsdHal *dc = (RsdHal *)rsc->mHal.drv;
446    RsdFrameBufferObj *lastFbo = dc->gl.currentFrameBuffer;
447
448    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
449    if (!drv->textureID && !drv->renderTargetID) {
450        return; // nothing was rendered here yet, so nothing to sync
451    }
452    if (drv->readBackFBO == NULL) {
453        drv->readBackFBO = new RsdFrameBufferObj();
454        drv->readBackFBO->setColorTarget(drv, 0);
455        drv->readBackFBO->setDimensions(alloc->getType()->getDimX(),
456                                        alloc->getType()->getDimY());
457    }
458
459    // Bind the framebuffer object so we can read back from it
460    drv->readBackFBO->setActive(rsc);
461
462    // Do the readback
463    RSD_CALL_GL(glReadPixels, 0, 0, alloc->mHal.drvState.lod[0].dimX,
464                alloc->mHal.drvState.lod[0].dimY,
465                drv->glFormat, drv->glType, alloc->mHal.drvState.lod[0].mallocPtr);
466
467    // Revert framebuffer to its original
468    lastFbo->setActive(rsc);
469#endif
470}
471
472
473void rsdAllocationSyncAll(const Context *rsc, const Allocation *alloc,
474                         RsAllocationUsageType src) {
475    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
476
477    if (src == RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
478        if(!alloc->getIsRenderTarget()) {
479            rsc->setError(RS_ERROR_FATAL_DRIVER,
480                          "Attempting to sync allocation from render target, "
481                          "for non-render target allocation");
482        } else if (alloc->getType()->getElement()->getKind() != RS_KIND_PIXEL_RGBA) {
483            rsc->setError(RS_ERROR_FATAL_DRIVER, "Cannot only sync from RGBA"
484                                                 "render target");
485        } else {
486            rsdAllocationSyncFromFBO(rsc, alloc);
487        }
488        return;
489    }
490
491    rsAssert(src == RS_ALLOCATION_USAGE_SCRIPT);
492
493    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_TEXTURE) {
494        UploadToTexture(rsc, alloc);
495    } else {
496        if ((alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) &&
497            !(alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_IO_OUTPUT)) {
498            AllocateRenderTarget(rsc, alloc);
499        }
500    }
501    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_VERTEX) {
502        UploadToBufferObject(rsc, alloc);
503    }
504
505    drv->uploadDeferred = false;
506}
507
508void rsdAllocationMarkDirty(const Context *rsc, const Allocation *alloc) {
509    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
510    drv->uploadDeferred = true;
511}
512
513int32_t rsdAllocationInitSurfaceTexture(const Context *rsc, const Allocation *alloc) {
514#ifndef RS_COMPATIBILITY_LIB
515    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
516    UploadToTexture(rsc, alloc);
517    return drv->textureID;
518#else
519    return 0;
520#endif
521}
522
523#ifndef RS_COMPATIBILITY_LIB
524static bool IoGetBuffer(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
525    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
526
527    int32_t r = native_window_dequeue_buffer_and_wait(nw, &drv->wndBuffer);
528    if (r) {
529        rsc->setError(RS_ERROR_DRIVER, "Error getting next IO output buffer.");
530        return false;
531    }
532
533    // Must lock the whole surface
534    GraphicBufferMapper &mapper = GraphicBufferMapper::get();
535    Rect bounds(drv->wndBuffer->width, drv->wndBuffer->height);
536
537    void *dst = NULL;
538    mapper.lock(drv->wndBuffer->handle,
539            GRALLOC_USAGE_SW_READ_NEVER | GRALLOC_USAGE_SW_WRITE_OFTEN,
540            bounds, &dst);
541    alloc->mHal.drvState.lod[0].mallocPtr = dst;
542    alloc->mHal.drvState.lod[0].stride = drv->wndBuffer->stride * alloc->mHal.state.elementSizeBytes;
543
544    return true;
545}
546#endif
547
548void rsdAllocationSetSurfaceTexture(const Context *rsc, Allocation *alloc, ANativeWindow *nw) {
549#ifndef RS_COMPATIBILITY_LIB
550    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
551
552    //ALOGE("rsdAllocationSetSurfaceTexture %p  %p", alloc, nw);
553
554    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
555        //TODO finish support for render target + script
556        drv->wnd = nw;
557        return;
558    }
559
560
561    // Cleanup old surface if there is one.
562    if (alloc->mHal.state.wndSurface) {
563        ANativeWindow *old = alloc->mHal.state.wndSurface;
564        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
565        mapper.unlock(drv->wndBuffer->handle);
566        old->queueBuffer(old, drv->wndBuffer, -1);
567    }
568
569    if (nw != NULL) {
570        int32_t r;
571        uint32_t flags = 0;
572        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
573            flags |= GRALLOC_USAGE_SW_READ_RARELY | GRALLOC_USAGE_SW_WRITE_OFTEN;
574        }
575        if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
576            flags |= GRALLOC_USAGE_HW_RENDER;
577        }
578
579        r = native_window_set_usage(nw, flags);
580        if (r) {
581            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer usage.");
582            return;
583        }
584
585        r = native_window_set_buffers_dimensions(nw, alloc->mHal.drvState.lod[0].dimX,
586                                                 alloc->mHal.drvState.lod[0].dimY);
587        if (r) {
588            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer dimensions.");
589            return;
590        }
591
592        r = native_window_set_buffer_count(nw, 3);
593        if (r) {
594            rsc->setError(RS_ERROR_DRIVER, "Error setting IO output buffer count.");
595            return;
596        }
597
598        IoGetBuffer(rsc, alloc, nw);
599    }
600#endif
601}
602
603void rsdAllocationIoSend(const Context *rsc, Allocation *alloc) {
604#ifndef RS_COMPATIBILITY_LIB
605    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
606    ANativeWindow *nw = alloc->mHal.state.wndSurface;
607
608    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_GRAPHICS_RENDER_TARGET) {
609        RsdHal *dc = (RsdHal *)rsc->mHal.drv;
610        RSD_CALL_GL(eglSwapBuffers, dc->gl.egl.display, dc->gl.egl.surface);
611        return;
612    }
613
614    if (alloc->mHal.state.usageFlags & RS_ALLOCATION_USAGE_SCRIPT) {
615        GraphicBufferMapper &mapper = GraphicBufferMapper::get();
616        mapper.unlock(drv->wndBuffer->handle);
617        int32_t r = nw->queueBuffer(nw, drv->wndBuffer, -1);
618        if (r) {
619            rsc->setError(RS_ERROR_DRIVER, "Error sending IO output buffer.");
620            return;
621        }
622
623        IoGetBuffer(rsc, alloc, nw);
624    }
625#endif
626}
627
628void rsdAllocationIoReceive(const Context *rsc, Allocation *alloc) {
629#ifndef RS_COMPATIBILITY_LIB
630    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
631    alloc->mHal.state.surfaceTexture->updateTexImage();
632#endif
633}
634
635
636void rsdAllocationData1D(const Context *rsc, const Allocation *alloc,
637                         uint32_t xoff, uint32_t lod, uint32_t count,
638                         const void *data, size_t sizeBytes) {
639    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
640
641    const uint32_t eSize = alloc->mHal.state.type->getElementSizeBytes();
642    uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
643    uint32_t size = count * eSize;
644
645    if (ptr != data) {
646        // Skip the copy if we are the same allocation. This can arise from
647        // our Bitmap optimization, where we share the same storage.
648        if (alloc->mHal.state.hasReferences) {
649            alloc->incRefs(data, count);
650            alloc->decRefs(ptr, count);
651        }
652        memcpy(ptr, data, size);
653    }
654    drv->uploadDeferred = true;
655}
656
657void rsdAllocationData2D(const Context *rsc, const Allocation *alloc,
658                         uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
659                         uint32_t w, uint32_t h, const void *data, size_t sizeBytes, size_t stride) {
660    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
661
662    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
663    uint32_t lineSize = eSize * w;
664    if (!stride) {
665        stride = lineSize;
666    }
667
668    if (alloc->mHal.drvState.lod[0].mallocPtr) {
669        const uint8_t *src = static_cast<const uint8_t *>(data);
670        uint8_t *dst = GetOffsetPtr(alloc, xoff, yoff, lod, face);
671        if (dst == src) {
672            // Skip the copy if we are the same allocation. This can arise from
673            // our Bitmap optimization, where we share the same storage.
674            drv->uploadDeferred = true;
675            return;
676        }
677
678        for (uint32_t line=yoff; line < (yoff+h); line++) {
679            if (alloc->mHal.state.hasReferences) {
680                alloc->incRefs(src, w);
681                alloc->decRefs(dst, w);
682            }
683            memcpy(dst, src, lineSize);
684            src += stride;
685            dst += alloc->mHal.drvState.lod[lod].stride;
686        }
687        drv->uploadDeferred = true;
688    } else {
689        Update2DTexture(rsc, alloc, data, xoff, yoff, lod, face, w, h);
690    }
691}
692
693void rsdAllocationData3D(const Context *rsc, const Allocation *alloc,
694                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
695                         uint32_t lod, RsAllocationCubemapFace face,
696                         uint32_t w, uint32_t h, uint32_t d, const void *data, uint32_t sizeBytes) {
697
698}
699
700void rsdAllocationRead1D(const Context *rsc, const Allocation *alloc,
701                         uint32_t xoff, uint32_t lod, uint32_t count,
702                         void *data, size_t sizeBytes) {
703    const uint32_t eSize = alloc->mHal.state.type->getElementSizeBytes();
704    const uint8_t * ptr = GetOffsetPtr(alloc, xoff, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
705    if (data != ptr) {
706        // Skip the copy if we are the same allocation. This can arise from
707        // our Bitmap optimization, where we share the same storage.
708        memcpy(data, ptr, count * eSize);
709    }
710}
711
712void rsdAllocationRead2D(const Context *rsc, const Allocation *alloc,
713                                uint32_t xoff, uint32_t yoff, uint32_t lod, RsAllocationCubemapFace face,
714                                uint32_t w, uint32_t h, void *data, size_t sizeBytes, size_t stride) {
715    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
716    uint32_t lineSize = eSize * w;
717    if (!stride) {
718        stride = lineSize;
719    }
720
721    if (alloc->mHal.drvState.lod[0].mallocPtr) {
722        uint8_t *dst = static_cast<uint8_t *>(data);
723        const uint8_t *src = GetOffsetPtr(alloc, xoff, yoff, lod, face);
724        if (dst == src) {
725            // Skip the copy if we are the same allocation. This can arise from
726            // our Bitmap optimization, where we share the same storage.
727            return;
728        }
729
730        for (uint32_t line=yoff; line < (yoff+h); line++) {
731            memcpy(dst, src, lineSize);
732            dst += stride;
733            src += alloc->mHal.drvState.lod[lod].stride;
734        }
735    } else {
736        ALOGE("Add code to readback from non-script memory");
737    }
738}
739
740
741void rsdAllocationRead3D(const Context *rsc, const Allocation *alloc,
742                         uint32_t xoff, uint32_t yoff, uint32_t zoff,
743                         uint32_t lod, RsAllocationCubemapFace face,
744                         uint32_t w, uint32_t h, uint32_t d, void *data, uint32_t sizeBytes) {
745
746}
747
748void * rsdAllocationLock1D(const android::renderscript::Context *rsc,
749                          const android::renderscript::Allocation *alloc) {
750    return alloc->mHal.drvState.lod[0].mallocPtr;
751}
752
753void rsdAllocationUnlock1D(const android::renderscript::Context *rsc,
754                          const android::renderscript::Allocation *alloc) {
755
756}
757
758void rsdAllocationData1D_alloc(const android::renderscript::Context *rsc,
759                               const android::renderscript::Allocation *dstAlloc,
760                               uint32_t dstXoff, uint32_t dstLod, uint32_t count,
761                               const android::renderscript::Allocation *srcAlloc,
762                               uint32_t srcXoff, uint32_t srcLod) {
763}
764
765
766void rsdAllocationData2D_alloc_script(const android::renderscript::Context *rsc,
767                                      const android::renderscript::Allocation *dstAlloc,
768                                      uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
769                                      RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
770                                      const android::renderscript::Allocation *srcAlloc,
771                                      uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
772                                      RsAllocationCubemapFace srcFace) {
773    uint32_t elementSize = dstAlloc->getType()->getElementSizeBytes();
774    for (uint32_t i = 0; i < h; i ++) {
775        uint8_t *dstPtr = GetOffsetPtr(dstAlloc, dstXoff, dstYoff + i, dstLod, dstFace);
776        uint8_t *srcPtr = GetOffsetPtr(srcAlloc, srcXoff, srcYoff + i, srcLod, srcFace);
777        memcpy(dstPtr, srcPtr, w * elementSize);
778
779        //ALOGE("COPIED dstXoff(%u), dstYoff(%u), dstLod(%u), dstFace(%u), w(%u), h(%u), srcXoff(%u), srcYoff(%u), srcLod(%u), srcFace(%u)",
780        //     dstXoff, dstYoff, dstLod, dstFace, w, h, srcXoff, srcYoff, srcLod, srcFace);
781    }
782}
783
784void rsdAllocationData2D_alloc(const android::renderscript::Context *rsc,
785                               const android::renderscript::Allocation *dstAlloc,
786                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstLod,
787                               RsAllocationCubemapFace dstFace, uint32_t w, uint32_t h,
788                               const android::renderscript::Allocation *srcAlloc,
789                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcLod,
790                               RsAllocationCubemapFace srcFace) {
791    if (!dstAlloc->getIsScript() && !srcAlloc->getIsScript()) {
792        rsc->setError(RS_ERROR_FATAL_DRIVER, "Non-script allocation copies not "
793                                             "yet implemented.");
794        return;
795    }
796    rsdAllocationData2D_alloc_script(rsc, dstAlloc, dstXoff, dstYoff,
797                                     dstLod, dstFace, w, h, srcAlloc,
798                                     srcXoff, srcYoff, srcLod, srcFace);
799}
800
801void rsdAllocationData3D_alloc(const android::renderscript::Context *rsc,
802                               const android::renderscript::Allocation *dstAlloc,
803                               uint32_t dstXoff, uint32_t dstYoff, uint32_t dstZoff,
804                               uint32_t dstLod, RsAllocationCubemapFace dstFace,
805                               uint32_t w, uint32_t h, uint32_t d,
806                               const android::renderscript::Allocation *srcAlloc,
807                               uint32_t srcXoff, uint32_t srcYoff, uint32_t srcZoff,
808                               uint32_t srcLod, RsAllocationCubemapFace srcFace) {
809}
810
811void rsdAllocationElementData1D(const Context *rsc, const Allocation *alloc,
812                                uint32_t x,
813                                const void *data, uint32_t cIdx, uint32_t sizeBytes) {
814    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
815
816    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
817    uint8_t * ptr = GetOffsetPtr(alloc, x, 0, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
818
819    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
820    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
821
822    if (alloc->mHal.state.hasReferences) {
823        e->incRefs(data);
824        e->decRefs(ptr);
825    }
826
827    memcpy(ptr, data, sizeBytes);
828    drv->uploadDeferred = true;
829}
830
831void rsdAllocationElementData2D(const Context *rsc, const Allocation *alloc,
832                                uint32_t x, uint32_t y,
833                                const void *data, uint32_t cIdx, uint32_t sizeBytes) {
834    DrvAllocation *drv = (DrvAllocation *)alloc->mHal.drv;
835
836    uint32_t eSize = alloc->mHal.state.elementSizeBytes;
837    uint8_t * ptr = GetOffsetPtr(alloc, x, y, 0, RS_ALLOCATION_CUBEMAP_FACE_POSITIVE_X);
838
839    const Element * e = alloc->mHal.state.type->getElement()->getField(cIdx);
840    ptr += alloc->mHal.state.type->getElement()->getFieldOffsetBytes(cIdx);
841
842    if (alloc->mHal.state.hasReferences) {
843        e->incRefs(data);
844        e->decRefs(ptr);
845    }
846
847    memcpy(ptr, data, sizeBytes);
848    drv->uploadDeferred = true;
849}
850
851static void mip565(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
852    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
853    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
854
855    for (uint32_t y=0; y < h; y++) {
856        uint16_t *oPtr = (uint16_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
857        const uint16_t *i1 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
858        const uint16_t *i2 = (uint16_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
859
860        for (uint32_t x=0; x < w; x++) {
861            *oPtr = rsBoxFilter565(i1[0], i1[1], i2[0], i2[1]);
862            oPtr ++;
863            i1 += 2;
864            i2 += 2;
865        }
866    }
867}
868
869static void mip8888(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
870    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
871    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
872
873    for (uint32_t y=0; y < h; y++) {
874        uint32_t *oPtr = (uint32_t *)GetOffsetPtr(alloc, 0, y, lod + 1, face);
875        const uint32_t *i1 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2, lod, face);
876        const uint32_t *i2 = (uint32_t *)GetOffsetPtr(alloc, 0, y*2+1, lod, face);
877
878        for (uint32_t x=0; x < w; x++) {
879            *oPtr = rsBoxFilter8888(i1[0], i1[1], i2[0], i2[1]);
880            oPtr ++;
881            i1 += 2;
882            i2 += 2;
883        }
884    }
885}
886
887static void mip8(const Allocation *alloc, int lod, RsAllocationCubemapFace face) {
888    uint32_t w = alloc->mHal.drvState.lod[lod + 1].dimX;
889    uint32_t h = alloc->mHal.drvState.lod[lod + 1].dimY;
890
891    for (uint32_t y=0; y < h; y++) {
892        uint8_t *oPtr = GetOffsetPtr(alloc, 0, y, lod + 1, face);
893        const uint8_t *i1 = GetOffsetPtr(alloc, 0, y*2, lod, face);
894        const uint8_t *i2 = GetOffsetPtr(alloc, 0, y*2+1, lod, face);
895
896        for (uint32_t x=0; x < w; x++) {
897            *oPtr = (uint8_t)(((uint32_t)i1[0] + i1[1] + i2[0] + i2[1]) * 0.25f);
898            oPtr ++;
899            i1 += 2;
900            i2 += 2;
901        }
902    }
903}
904
905void rsdAllocationGenerateMipmaps(const Context *rsc, const Allocation *alloc) {
906    if(!alloc->mHal.drvState.lod[0].mallocPtr) {
907        return;
908    }
909    uint32_t numFaces = alloc->getType()->getDimFaces() ? 6 : 1;
910    for (uint32_t face = 0; face < numFaces; face ++) {
911        for (uint32_t lod=0; lod < (alloc->getType()->getLODCount() -1); lod++) {
912            switch (alloc->getType()->getElement()->getSizeBits()) {
913            case 32:
914                mip8888(alloc, lod, (RsAllocationCubemapFace)face);
915                break;
916            case 16:
917                mip565(alloc, lod, (RsAllocationCubemapFace)face);
918                break;
919            case 8:
920                mip8(alloc, lod, (RsAllocationCubemapFace)face);
921                break;
922            }
923        }
924    }
925}
926
927
928