SharedBufferStack.cpp revision 57d89899c9fb978a1c097f298aa94c5db1f61bb6
1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "SharedBufferStack"
18
19#include <stdint.h>
20#include <sys/types.h>
21
22#include <utils/Debug.h>
23#include <utils/Log.h>
24#include <utils/threads.h>
25
26#include <private/surfaceflinger/SharedBufferStack.h>
27
28#include <ui/Rect.h>
29#include <ui/Region.h>
30
31#define DEBUG_ATOMICS 0
32
33namespace android {
34// ----------------------------------------------------------------------------
35
36SharedClient::SharedClient()
37    : lock(Mutex::SHARED), cv(Condition::SHARED)
38{
39}
40
41SharedClient::~SharedClient() {
42}
43
44
45// these functions are used by the clients
46status_t SharedClient::validate(size_t i) const {
47    if (uint32_t(i) >= uint32_t(SharedBufferStack::NUM_LAYERS_MAX))
48        return BAD_INDEX;
49    return surfaces[i].status;
50}
51
52uint32_t SharedClient::getIdentity(size_t token) const {
53    return uint32_t(surfaces[token].identity);
54}
55
56// ----------------------------------------------------------------------------
57
58
59SharedBufferStack::SharedBufferStack()
60{
61}
62
63void SharedBufferStack::init(int32_t i)
64{
65    inUse = -1;
66    status = NO_ERROR;
67    identity = i;
68}
69
70status_t SharedBufferStack::setCrop(int buffer, const Rect& crop)
71{
72    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
73        return BAD_INDEX;
74
75    buffers[buffer].crop.l = uint16_t(crop.left);
76    buffers[buffer].crop.t = uint16_t(crop.top);
77    buffers[buffer].crop.r = uint16_t(crop.right);
78    buffers[buffer].crop.b = uint16_t(crop.bottom);
79    return NO_ERROR;
80}
81
82status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty)
83{
84    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
85        return BAD_INDEX;
86
87    FlatRegion& reg(buffers[buffer].dirtyRegion);
88    if (dirty.isEmpty()) {
89        reg.count = 0;
90        return NO_ERROR;
91    }
92
93    size_t count;
94    Rect const* r = dirty.getArray(&count);
95    if (count > FlatRegion::NUM_RECT_MAX) {
96        const Rect bounds(dirty.getBounds());
97        reg.count = 1;
98        reg.rects[0].l = uint16_t(bounds.left);
99        reg.rects[0].t = uint16_t(bounds.top);
100        reg.rects[0].r = uint16_t(bounds.right);
101        reg.rects[0].b = uint16_t(bounds.bottom);
102    } else {
103        reg.count = count;
104        for (size_t i=0 ; i<count ; i++) {
105            reg.rects[i].l = uint16_t(r[i].left);
106            reg.rects[i].t = uint16_t(r[i].top);
107            reg.rects[i].r = uint16_t(r[i].right);
108            reg.rects[i].b = uint16_t(r[i].bottom);
109        }
110    }
111    return NO_ERROR;
112}
113
114Region SharedBufferStack::getDirtyRegion(int buffer) const
115{
116    Region res;
117    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
118        return res;
119
120    const FlatRegion& reg(buffers[buffer].dirtyRegion);
121    if (reg.count > FlatRegion::NUM_RECT_MAX)
122        return res;
123
124    if (reg.count == 1) {
125        const Rect r(
126                reg.rects[0].l,
127                reg.rects[0].t,
128                reg.rects[0].r,
129                reg.rects[0].b);
130        res.set(r);
131    } else {
132        for (size_t i=0 ; i<reg.count ; i++) {
133            const Rect r(
134                    reg.rects[i].l,
135                    reg.rects[i].t,
136                    reg.rects[i].r,
137                    reg.rects[i].b);
138            res.orSelf(r);
139        }
140    }
141    return res;
142}
143
144// ----------------------------------------------------------------------------
145
146SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
147        int surface, int32_t identity)
148    : mSharedClient(sharedClient),
149      mSharedStack(sharedClient->surfaces + surface),
150      mIdentity(identity)
151{
152}
153
154SharedBufferBase::~SharedBufferBase()
155{
156}
157
158uint32_t SharedBufferBase::getIdentity()
159{
160    SharedBufferStack& stack( *mSharedStack );
161    return stack.identity;
162}
163
164status_t SharedBufferBase::getStatus() const
165{
166    SharedBufferStack& stack( *mSharedStack );
167    return stack.status;
168}
169
170size_t SharedBufferBase::getFrontBuffer() const
171{
172    SharedBufferStack& stack( *mSharedStack );
173    return size_t( stack.head );
174}
175
176String8 SharedBufferBase::dump(char const* prefix) const
177{
178    const size_t SIZE = 1024;
179    char buffer[SIZE];
180    String8 result;
181    SharedBufferStack& stack( *mSharedStack );
182    snprintf(buffer, SIZE,
183            "%s[ head=%2d, available=%2d, queued=%2d ] "
184            "reallocMask=%08x, inUse=%2d, identity=%d, status=%d",
185            prefix, stack.head, stack.available, stack.queued,
186            stack.reallocMask, stack.inUse, stack.identity, stack.status);
187    result.append(buffer);
188    result.append("\n");
189    return result;
190}
191
192status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
193{
194    const SharedBufferStack& stack( *mSharedStack );
195    SharedClient& client( *mSharedClient );
196    const nsecs_t TIMEOUT = s2ns(1);
197    const int identity = mIdentity;
198
199    Mutex::Autolock _l(client.lock);
200    while ((condition()==false) &&
201            (stack.identity == identity) &&
202            (stack.status == NO_ERROR))
203    {
204        status_t err = client.cv.waitRelative(client.lock, TIMEOUT);
205        // handle errors and timeouts
206        if (CC_UNLIKELY(err != NO_ERROR)) {
207            if (err == TIMED_OUT) {
208                if (condition()) {
209                    LOGE("waitForCondition(%s) timed out (identity=%d), "
210                        "but condition is true! We recovered but it "
211                        "shouldn't happen." , condition.name(), stack.identity);
212                    break;
213                } else {
214                    LOGW("waitForCondition(%s) timed out "
215                        "(identity=%d, status=%d). "
216                        "CPU may be pegged. trying again.", condition.name(),
217                        stack.identity, stack.status);
218                }
219            } else {
220                LOGE("waitForCondition(%s) error (%s) ",
221                        condition.name(), strerror(-err));
222                return err;
223            }
224        }
225    }
226    return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
227}
228// ============================================================================
229// conditions and updates
230// ============================================================================
231
232SharedBufferClient::DequeueCondition::DequeueCondition(
233        SharedBufferClient* sbc) : ConditionBase(sbc)  {
234}
235bool SharedBufferClient::DequeueCondition::operator()() const {
236    return stack.available > 0;
237}
238
239SharedBufferClient::LockCondition::LockCondition(
240        SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) {
241}
242bool SharedBufferClient::LockCondition::operator()() const {
243    // NOTE: if stack.head is messed up, we could crash the client
244    // or cause some drawing artifacts. This is okay, as long as it is
245    // limited to the client.
246    return (buf != stack.index[stack.head] ||
247            (stack.queued > 0 && stack.inUse != buf));
248}
249
250SharedBufferServer::ReallocateCondition::ReallocateCondition(
251        SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) {
252}
253bool SharedBufferServer::ReallocateCondition::operator()() const {
254    int32_t head = stack.head;
255    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) {
256        // if stack.head is messed up, we cannot allow the server to
257        // crash (since stack.head is mapped on the client side)
258        stack.status = BAD_VALUE;
259        return false;
260    }
261    // TODO: we should also check that buf has been dequeued
262    return (buf != stack.index[head]);
263}
264
265// ----------------------------------------------------------------------------
266
267SharedBufferClient::QueueUpdate::QueueUpdate(SharedBufferBase* sbb)
268    : UpdateBase(sbb) {
269}
270ssize_t SharedBufferClient::QueueUpdate::operator()() {
271    android_atomic_inc(&stack.queued);
272    return NO_ERROR;
273}
274
275SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
276    : UpdateBase(sbb) {
277}
278ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
279    android_atomic_inc(&stack.available);
280    return NO_ERROR;
281}
282
283SharedBufferServer::UnlockUpdate::UnlockUpdate(
284        SharedBufferBase* sbb, int lockedBuffer)
285    : UpdateBase(sbb), lockedBuffer(lockedBuffer) {
286}
287ssize_t SharedBufferServer::UnlockUpdate::operator()() {
288    if (stack.inUse != lockedBuffer) {
289        LOGE("unlocking %d, but currently locked buffer is %d",
290                lockedBuffer, stack.inUse);
291        return BAD_VALUE;
292    }
293    android_atomic_write(-1, &stack.inUse);
294    return NO_ERROR;
295}
296
297SharedBufferServer::RetireUpdate::RetireUpdate(
298        SharedBufferBase* sbb, int numBuffers)
299    : UpdateBase(sbb), numBuffers(numBuffers) {
300}
301ssize_t SharedBufferServer::RetireUpdate::operator()() {
302    int32_t head = stack.head;
303    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
304        return BAD_VALUE;
305
306    // Preventively lock the current buffer before updating queued.
307    android_atomic_write(stack.index[head], &stack.inUse);
308
309    // Decrement the number of queued buffers
310    int32_t queued;
311    do {
312        queued = stack.queued;
313        if (queued == 0) {
314            return NOT_ENOUGH_DATA;
315        }
316    } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
317
318    // lock the buffer before advancing head, which automatically unlocks
319    // the buffer we preventively locked upon entering this function
320
321    head = (head + 1) % numBuffers;
322    android_atomic_write(stack.index[head], &stack.inUse);
323
324    // head is only modified here, so we don't need to use cmpxchg
325    android_atomic_write(head, &stack.head);
326
327    // now that head has moved, we can increment the number of available buffers
328    android_atomic_inc(&stack.available);
329    return head;
330}
331
332SharedBufferServer::StatusUpdate::StatusUpdate(
333        SharedBufferBase* sbb, status_t status)
334    : UpdateBase(sbb), status(status) {
335}
336
337ssize_t SharedBufferServer::StatusUpdate::operator()() {
338    android_atomic_write(status, &stack.status);
339    return NO_ERROR;
340}
341
342// ============================================================================
343
344SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
345        int surface, int num, int32_t identity)
346    : SharedBufferBase(sharedClient, surface, identity),
347      mNumBuffers(num), tail(0), undoDequeueTail(0)
348{
349    SharedBufferStack& stack( *mSharedStack );
350    tail = computeTail();
351    queued_head = stack.head;
352}
353
354int32_t SharedBufferClient::computeTail() const
355{
356    SharedBufferStack& stack( *mSharedStack );
357    return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
358}
359
360ssize_t SharedBufferClient::dequeue()
361{
362    SharedBufferStack& stack( *mSharedStack );
363
364    if (stack.head == tail && stack.available == mNumBuffers) {
365        LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
366                tail, stack.head, stack.available, stack.queued);
367    }
368
369    RWLock::AutoRLock _rd(mLock);
370
371    const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
372
373    //LOGD("[%d] about to dequeue a buffer",
374    //        mSharedStack->identity);
375    DequeueCondition condition(this);
376    status_t err = waitForCondition(condition);
377    if (err != NO_ERROR)
378        return ssize_t(err);
379
380    // NOTE: 'stack.available' is part of the conditions, however
381    // decrementing it, never changes any conditions, so we don't need
382    // to do this as part of an update.
383    if (android_atomic_dec(&stack.available) == 0) {
384        LOGW("dequeue probably called from multiple threads!");
385    }
386
387    undoDequeueTail = tail;
388    int dequeued = stack.index[tail];
389    tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
390    LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
391            dequeued, tail, dump("").string());
392
393    mDequeueTime[dequeued] = dequeueTime;
394
395    return dequeued;
396}
397
398status_t SharedBufferClient::undoDequeue(int buf)
399{
400    RWLock::AutoRLock _rd(mLock);
401
402    // TODO: we can only undo the previous dequeue, we should
403    // enforce that in the api
404    UndoDequeueUpdate update(this);
405    status_t err = updateCondition( update );
406    if (err == NO_ERROR) {
407        tail = undoDequeueTail;
408    }
409    return err;
410}
411
412status_t SharedBufferClient::lock(int buf)
413{
414    RWLock::AutoRLock _rd(mLock);
415
416    SharedBufferStack& stack( *mSharedStack );
417    LockCondition condition(this, buf);
418    status_t err = waitForCondition(condition);
419    return err;
420}
421
422status_t SharedBufferClient::queue(int buf)
423{
424    RWLock::AutoRLock _rd(mLock);
425
426    SharedBufferStack& stack( *mSharedStack );
427
428    queued_head = (queued_head + 1) % mNumBuffers;
429    stack.index[queued_head] = buf;
430
431    QueueUpdate update(this);
432    status_t err = updateCondition( update );
433    LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string());
434
435    const nsecs_t now = systemTime(SYSTEM_TIME_THREAD);
436    stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
437    return err;
438}
439
440bool SharedBufferClient::needNewBuffer(int buf) const
441{
442    SharedBufferStack& stack( *mSharedStack );
443    const uint32_t mask = 1<<(31-buf);
444    return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
445}
446
447status_t SharedBufferClient::setCrop(int buf, const Rect& crop)
448{
449    SharedBufferStack& stack( *mSharedStack );
450    return stack.setCrop(buf, crop);
451}
452
453status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
454{
455    SharedBufferStack& stack( *mSharedStack );
456    return stack.setDirtyRegion(buf, reg);
457}
458
459status_t SharedBufferClient::setBufferCount(
460        int bufferCount, const SetBufferCountCallback& ipc)
461{
462    SharedBufferStack& stack( *mSharedStack );
463    if (uint32_t(bufferCount) >= SharedBufferStack::NUM_BUFFER_MAX)
464        return BAD_VALUE;
465
466    if (uint32_t(bufferCount) < SharedBufferStack::NUM_BUFFER_MIN)
467        return BAD_VALUE;
468
469    RWLock::AutoWLock _wr(mLock);
470
471    status_t err = ipc(bufferCount);
472    if (err == NO_ERROR) {
473        mNumBuffers = bufferCount;
474        queued_head = (stack.head + stack.queued) % mNumBuffers;
475    }
476    return err;
477}
478
479// ----------------------------------------------------------------------------
480
481SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
482        int surface, int num, int32_t identity)
483    : SharedBufferBase(sharedClient, surface, identity),
484      mNumBuffers(num)
485{
486    mSharedStack->init(identity);
487    mSharedStack->head = num-1;
488    mSharedStack->available = num;
489    mSharedStack->queued = 0;
490    mSharedStack->reallocMask = 0;
491    memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers));
492    for (int i=0 ; i<num ; i++) {
493        mBufferList.add(i);
494        mSharedStack->index[i] = i;
495    }
496}
497
498ssize_t SharedBufferServer::retireAndLock()
499{
500    RWLock::AutoRLock _l(mLock);
501
502    RetireUpdate update(this, mNumBuffers);
503    ssize_t buf = updateCondition( update );
504    if (buf >= 0) {
505        if (uint32_t(buf) >= SharedBufferStack::NUM_BUFFER_MAX)
506            return BAD_VALUE;
507        SharedBufferStack& stack( *mSharedStack );
508        buf = stack.index[buf];
509        LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s",
510                int(buf), dump("").string());
511    }
512    return buf;
513}
514
515status_t SharedBufferServer::unlock(int buf)
516{
517    UnlockUpdate update(this, buf);
518    status_t err = updateCondition( update );
519    return err;
520}
521
522void SharedBufferServer::setStatus(status_t status)
523{
524    if (status < NO_ERROR) {
525        StatusUpdate update(this, status);
526        updateCondition( update );
527    }
528}
529
530status_t SharedBufferServer::reallocate()
531{
532    RWLock::AutoRLock _l(mLock);
533
534    SharedBufferStack& stack( *mSharedStack );
535    uint32_t mask = mBufferList.getMask();
536    android_atomic_or(mask, &stack.reallocMask);
537    return NO_ERROR;
538}
539
540int32_t SharedBufferServer::getQueuedCount() const
541{
542    SharedBufferStack& stack( *mSharedStack );
543    return stack.queued;
544}
545
546status_t SharedBufferServer::assertReallocate(int buf)
547{
548    /*
549     * NOTE: it's safe to hold mLock for read while waiting for
550     * the ReallocateCondition because that condition is not updated
551     * by the thread that holds mLock for write.
552     */
553    RWLock::AutoRLock _l(mLock);
554
555    // TODO: need to validate "buf"
556    ReallocateCondition condition(this, buf);
557    status_t err = waitForCondition(condition);
558    return err;
559}
560
561Region SharedBufferServer::getDirtyRegion(int buf) const
562{
563    SharedBufferStack& stack( *mSharedStack );
564    return stack.getDirtyRegion(buf);
565}
566
567/*
568 * NOTE: this is not thread-safe on the server-side, meaning
569 * 'head' cannot move during this operation. The client-side
570 * can safely operate an usual.
571 *
572 */
573status_t SharedBufferServer::resize(int newNumBuffers)
574{
575    if (uint32_t(newNumBuffers) >= SharedBufferStack::NUM_BUFFER_MAX)
576        return BAD_VALUE;
577
578    RWLock::AutoWLock _l(mLock);
579
580    // for now we're not supporting shrinking
581    const int numBuffers = mNumBuffers;
582    if (newNumBuffers < numBuffers)
583        return BAD_VALUE;
584
585    SharedBufferStack& stack( *mSharedStack );
586    const int extra = newNumBuffers - numBuffers;
587
588    // read the head, make sure it's valid
589    int32_t head = stack.head;
590    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
591        return BAD_VALUE;
592
593    int base = numBuffers;
594    int32_t avail = stack.available;
595    int tail = head - avail + 1;
596
597    if (tail >= 0) {
598        int8_t* const index = const_cast<int8_t*>(stack.index);
599        const int nb = numBuffers - head;
600        memmove(&index[head + extra], &index[head], nb);
601        base = head;
602        // move head 'extra' ahead, this doesn't impact stack.index[head];
603        stack.head = head + extra;
604    }
605    stack.available += extra;
606
607    // fill the new free space with unused buffers
608    BufferList::const_iterator curr(mBufferList.free_begin());
609    for (int i=0 ; i<extra ; i++) {
610        stack.index[base+i] = *curr;
611        mBufferList.add(*curr);
612        ++curr;
613    }
614
615    mNumBuffers = newNumBuffers;
616    return NO_ERROR;
617}
618
619SharedBufferStack::Statistics SharedBufferServer::getStats() const
620{
621    SharedBufferStack& stack( *mSharedStack );
622    return stack.stats;
623}
624
625// ---------------------------------------------------------------------------
626status_t SharedBufferServer::BufferList::add(int value)
627{
628    if (uint32_t(value) >= mCapacity)
629        return BAD_VALUE;
630    uint32_t mask = 1<<(31-value);
631    if (mList & mask)
632        return ALREADY_EXISTS;
633    mList |= mask;
634    return NO_ERROR;
635}
636
637status_t SharedBufferServer::BufferList::remove(int value)
638{
639    if (uint32_t(value) >= mCapacity)
640        return BAD_VALUE;
641    uint32_t mask = 1<<(31-value);
642    if (!(mList & mask))
643        return NAME_NOT_FOUND;
644    mList &= ~mask;
645    return NO_ERROR;
646}
647
648
649// ---------------------------------------------------------------------------
650}; // namespace android
651