SharedBufferStack.cpp revision 5e14010b1fc066dfcbc0a577d59492687c99667d
1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "SharedBufferStack"
18
19#include <stdint.h>
20#include <sys/types.h>
21
22#include <utils/Debug.h>
23#include <utils/Log.h>
24#include <utils/threads.h>
25
26#include <private/surfaceflinger/SharedBufferStack.h>
27
28#include <ui/Rect.h>
29#include <ui/Region.h>
30
31#define DEBUG_ATOMICS 0
32
33namespace android {
34// ----------------------------------------------------------------------------
35
36SharedClient::SharedClient()
37    : lock(Mutex::SHARED), cv(Condition::SHARED)
38{
39}
40
41SharedClient::~SharedClient() {
42}
43
44
45// these functions are used by the clients
46status_t SharedClient::validate(size_t i) const {
47    if (uint32_t(i) >= uint32_t(SharedBufferStack::NUM_LAYERS_MAX))
48        return BAD_INDEX;
49    return surfaces[i].status;
50}
51
52// ----------------------------------------------------------------------------
53
54
55SharedBufferStack::SharedBufferStack()
56{
57}
58
59void SharedBufferStack::init(int32_t i)
60{
61    inUse = -2;
62    status = NO_ERROR;
63    identity = i;
64}
65
66status_t SharedBufferStack::setCrop(int buffer, const Rect& crop)
67{
68    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
69        return BAD_INDEX;
70
71    buffers[buffer].crop.l = uint16_t(crop.left);
72    buffers[buffer].crop.t = uint16_t(crop.top);
73    buffers[buffer].crop.r = uint16_t(crop.right);
74    buffers[buffer].crop.b = uint16_t(crop.bottom);
75    return NO_ERROR;
76}
77
78status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty)
79{
80    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
81        return BAD_INDEX;
82
83    FlatRegion& reg(buffers[buffer].dirtyRegion);
84    if (dirty.isEmpty()) {
85        reg.count = 0;
86        return NO_ERROR;
87    }
88
89    size_t count;
90    Rect const* r = dirty.getArray(&count);
91    if (count > FlatRegion::NUM_RECT_MAX) {
92        const Rect bounds(dirty.getBounds());
93        reg.count = 1;
94        reg.rects[0].l = uint16_t(bounds.left);
95        reg.rects[0].t = uint16_t(bounds.top);
96        reg.rects[0].r = uint16_t(bounds.right);
97        reg.rects[0].b = uint16_t(bounds.bottom);
98    } else {
99        reg.count = count;
100        for (size_t i=0 ; i<count ; i++) {
101            reg.rects[i].l = uint16_t(r[i].left);
102            reg.rects[i].t = uint16_t(r[i].top);
103            reg.rects[i].r = uint16_t(r[i].right);
104            reg.rects[i].b = uint16_t(r[i].bottom);
105        }
106    }
107    return NO_ERROR;
108}
109
110Region SharedBufferStack::getDirtyRegion(int buffer) const
111{
112    Region res;
113    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
114        return res;
115
116    const FlatRegion& reg(buffers[buffer].dirtyRegion);
117    if (reg.count > FlatRegion::NUM_RECT_MAX)
118        return res;
119
120    if (reg.count == 1) {
121        const Rect r(
122                reg.rects[0].l,
123                reg.rects[0].t,
124                reg.rects[0].r,
125                reg.rects[0].b);
126        res.set(r);
127    } else {
128        for (size_t i=0 ; i<reg.count ; i++) {
129            const Rect r(
130                    reg.rects[i].l,
131                    reg.rects[i].t,
132                    reg.rects[i].r,
133                    reg.rects[i].b);
134            res.orSelf(r);
135        }
136    }
137    return res;
138}
139
140// ----------------------------------------------------------------------------
141
142SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
143        int surface, int32_t identity)
144    : mSharedClient(sharedClient),
145      mSharedStack(sharedClient->surfaces + surface),
146      mIdentity(identity)
147{
148}
149
150SharedBufferBase::~SharedBufferBase()
151{
152}
153
154status_t SharedBufferBase::getStatus() const
155{
156    SharedBufferStack& stack( *mSharedStack );
157    return stack.status;
158}
159
160int32_t SharedBufferBase::getIdentity() const
161{
162    SharedBufferStack& stack( *mSharedStack );
163    return stack.identity;
164}
165
166size_t SharedBufferBase::getFrontBuffer() const
167{
168    SharedBufferStack& stack( *mSharedStack );
169    return size_t( stack.head );
170}
171
172String8 SharedBufferBase::dump(char const* prefix) const
173{
174    const size_t SIZE = 1024;
175    char buffer[SIZE];
176    String8 result;
177    SharedBufferStack& stack( *mSharedStack );
178    snprintf(buffer, SIZE,
179            "%s[ head=%2d, available=%2d, queued=%2d ] "
180            "reallocMask=%08x, inUse=%2d, identity=%d, status=%d",
181            prefix, stack.head, stack.available, stack.queued,
182            stack.reallocMask, stack.inUse, stack.identity, stack.status);
183    result.append(buffer);
184    result.append("\n");
185    return result;
186}
187
188status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
189{
190    const SharedBufferStack& stack( *mSharedStack );
191    SharedClient& client( *mSharedClient );
192    const nsecs_t TIMEOUT = s2ns(1);
193    const int identity = mIdentity;
194
195    Mutex::Autolock _l(client.lock);
196    while ((condition()==false) &&
197            (stack.identity == identity) &&
198            (stack.status == NO_ERROR))
199    {
200        status_t err = client.cv.waitRelative(client.lock, TIMEOUT);
201        // handle errors and timeouts
202        if (CC_UNLIKELY(err != NO_ERROR)) {
203            if (err == TIMED_OUT) {
204                if (condition()) {
205                    LOGE("waitForCondition(%s) timed out (identity=%d), "
206                        "but condition is true! We recovered but it "
207                        "shouldn't happen." , condition.name(), stack.identity);
208                    break;
209                } else {
210                    LOGW("waitForCondition(%s) timed out "
211                        "(identity=%d, status=%d). "
212                        "CPU may be pegged. trying again.", condition.name(),
213                        stack.identity, stack.status);
214                }
215            } else {
216                LOGE("waitForCondition(%s) error (%s) ",
217                        condition.name(), strerror(-err));
218                return err;
219            }
220        }
221    }
222    return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
223}
224// ============================================================================
225// conditions and updates
226// ============================================================================
227
228SharedBufferClient::DequeueCondition::DequeueCondition(
229        SharedBufferClient* sbc) : ConditionBase(sbc)  {
230}
231bool SharedBufferClient::DequeueCondition::operator()() const {
232    return stack.available > 0;
233}
234
235SharedBufferClient::LockCondition::LockCondition(
236        SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) {
237}
238bool SharedBufferClient::LockCondition::operator()() const {
239    // NOTE: if stack.head is messed up, we could crash the client
240    // or cause some drawing artifacts. This is okay, as long as it is
241    // limited to the client.
242    return (buf != stack.index[stack.head] ||
243            (stack.queued > 0 && stack.inUse != buf));
244}
245
246SharedBufferServer::ReallocateCondition::ReallocateCondition(
247        SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) {
248}
249bool SharedBufferServer::ReallocateCondition::operator()() const {
250    int32_t head = stack.head;
251    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX) {
252        // if stack.head is messed up, we cannot allow the server to
253        // crash (since stack.head is mapped on the client side)
254        stack.status = BAD_VALUE;
255        return false;
256    }
257    // TODO: we should also check that buf has been dequeued
258    return (buf != stack.index[head]);
259}
260
261// ----------------------------------------------------------------------------
262
263SharedBufferClient::QueueUpdate::QueueUpdate(SharedBufferBase* sbb)
264    : UpdateBase(sbb) {
265}
266ssize_t SharedBufferClient::QueueUpdate::operator()() {
267    android_atomic_inc(&stack.queued);
268    return NO_ERROR;
269}
270
271SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
272    : UpdateBase(sbb) {
273}
274ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
275    android_atomic_inc(&stack.available);
276    return NO_ERROR;
277}
278
279SharedBufferServer::UnlockUpdate::UnlockUpdate(
280        SharedBufferBase* sbb, int lockedBuffer)
281    : UpdateBase(sbb), lockedBuffer(lockedBuffer) {
282}
283ssize_t SharedBufferServer::UnlockUpdate::operator()() {
284    if (stack.inUse != lockedBuffer) {
285        LOGE("unlocking %d, but currently locked buffer is %d "
286             "(identity=%d, token=%d)",
287                lockedBuffer, stack.inUse,
288                stack.identity, stack.token);
289        return BAD_VALUE;
290    }
291    android_atomic_write(-1, &stack.inUse);
292    return NO_ERROR;
293}
294
295SharedBufferServer::RetireUpdate::RetireUpdate(
296        SharedBufferBase* sbb, int numBuffers)
297    : UpdateBase(sbb), numBuffers(numBuffers) {
298}
299ssize_t SharedBufferServer::RetireUpdate::operator()() {
300    int32_t head = stack.head;
301    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
302        return BAD_VALUE;
303
304    // Preventively lock the current buffer before updating queued.
305    android_atomic_write(stack.index[head], &stack.inUse);
306
307    // Decrement the number of queued buffers
308    int32_t queued;
309    do {
310        queued = stack.queued;
311        if (queued == 0) {
312            return NOT_ENOUGH_DATA;
313        }
314    } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
315
316    // lock the buffer before advancing head, which automatically unlocks
317    // the buffer we preventively locked upon entering this function
318
319    head = (head + 1) % numBuffers;
320    android_atomic_write(stack.index[head], &stack.inUse);
321
322    // head is only modified here, so we don't need to use cmpxchg
323    android_atomic_write(head, &stack.head);
324
325    // now that head has moved, we can increment the number of available buffers
326    android_atomic_inc(&stack.available);
327    return head;
328}
329
330SharedBufferServer::StatusUpdate::StatusUpdate(
331        SharedBufferBase* sbb, status_t status)
332    : UpdateBase(sbb), status(status) {
333}
334
335ssize_t SharedBufferServer::StatusUpdate::operator()() {
336    android_atomic_write(status, &stack.status);
337    return NO_ERROR;
338}
339
340// ============================================================================
341
342SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
343        int surface, int num, int32_t identity)
344    : SharedBufferBase(sharedClient, surface, identity),
345      mNumBuffers(num), tail(0), undoDequeueTail(0)
346{
347    SharedBufferStack& stack( *mSharedStack );
348    tail = computeTail();
349    queued_head = stack.head;
350}
351
352int32_t SharedBufferClient::computeTail() const
353{
354    SharedBufferStack& stack( *mSharedStack );
355    return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
356}
357
358ssize_t SharedBufferClient::dequeue()
359{
360    SharedBufferStack& stack( *mSharedStack );
361
362    if (stack.head == tail && stack.available == mNumBuffers) {
363        LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
364                tail, stack.head, stack.available, stack.queued);
365    }
366
367    RWLock::AutoRLock _rd(mLock);
368
369    const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
370
371    //LOGD("[%d] about to dequeue a buffer",
372    //        mSharedStack->identity);
373    DequeueCondition condition(this);
374    status_t err = waitForCondition(condition);
375    if (err != NO_ERROR)
376        return ssize_t(err);
377
378    // NOTE: 'stack.available' is part of the conditions, however
379    // decrementing it, never changes any conditions, so we don't need
380    // to do this as part of an update.
381    if (android_atomic_dec(&stack.available) == 0) {
382        LOGW("dequeue probably called from multiple threads!");
383    }
384
385    undoDequeueTail = tail;
386    int dequeued = stack.index[tail];
387    tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
388    LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
389            dequeued, tail, dump("").string());
390
391    mDequeueTime[dequeued] = dequeueTime;
392
393    return dequeued;
394}
395
396status_t SharedBufferClient::undoDequeue(int buf)
397{
398    RWLock::AutoRLock _rd(mLock);
399
400    // TODO: we can only undo the previous dequeue, we should
401    // enforce that in the api
402    UndoDequeueUpdate update(this);
403    status_t err = updateCondition( update );
404    if (err == NO_ERROR) {
405        tail = undoDequeueTail;
406    }
407    return err;
408}
409
410status_t SharedBufferClient::lock(int buf)
411{
412    RWLock::AutoRLock _rd(mLock);
413
414    SharedBufferStack& stack( *mSharedStack );
415    LockCondition condition(this, buf);
416    status_t err = waitForCondition(condition);
417    return err;
418}
419
420status_t SharedBufferClient::queue(int buf)
421{
422    RWLock::AutoRLock _rd(mLock);
423
424    SharedBufferStack& stack( *mSharedStack );
425
426    queued_head = (queued_head + 1) % mNumBuffers;
427    stack.index[queued_head] = buf;
428
429    QueueUpdate update(this);
430    status_t err = updateCondition( update );
431    LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string());
432
433    const nsecs_t now = systemTime(SYSTEM_TIME_THREAD);
434    stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
435    return err;
436}
437
438bool SharedBufferClient::needNewBuffer(int buf) const
439{
440    SharedBufferStack& stack( *mSharedStack );
441    const uint32_t mask = 1<<(31-buf);
442    return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
443}
444
445status_t SharedBufferClient::setCrop(int buf, const Rect& crop)
446{
447    SharedBufferStack& stack( *mSharedStack );
448    return stack.setCrop(buf, crop);
449}
450
451status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
452{
453    SharedBufferStack& stack( *mSharedStack );
454    return stack.setDirtyRegion(buf, reg);
455}
456
457status_t SharedBufferClient::setBufferCount(
458        int bufferCount, const SetBufferCountCallback& ipc)
459{
460    SharedBufferStack& stack( *mSharedStack );
461    if (uint32_t(bufferCount) >= SharedBufferStack::NUM_BUFFER_MAX)
462        return BAD_VALUE;
463
464    if (uint32_t(bufferCount) < SharedBufferStack::NUM_BUFFER_MIN)
465        return BAD_VALUE;
466
467    RWLock::AutoWLock _wr(mLock);
468
469    status_t err = ipc(bufferCount);
470    if (err == NO_ERROR) {
471        mNumBuffers = bufferCount;
472        queued_head = (stack.head + stack.queued) % mNumBuffers;
473    }
474    return err;
475}
476
477// ----------------------------------------------------------------------------
478
479SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
480        int surface, int num, int32_t identity)
481    : SharedBufferBase(sharedClient, surface, identity),
482      mNumBuffers(num)
483{
484    mSharedStack->init(identity);
485    mSharedStack->token = surface;
486    mSharedStack->head = num-1;
487    mSharedStack->available = num;
488    mSharedStack->queued = 0;
489    mSharedStack->reallocMask = 0;
490    memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers));
491    for (int i=0 ; i<num ; i++) {
492        mBufferList.add(i);
493        mSharedStack->index[i] = i;
494    }
495}
496
497SharedBufferServer::~SharedBufferServer()
498{
499}
500
501ssize_t SharedBufferServer::retireAndLock()
502{
503    RWLock::AutoRLock _l(mLock);
504
505    RetireUpdate update(this, mNumBuffers);
506    ssize_t buf = updateCondition( update );
507    if (buf >= 0) {
508        if (uint32_t(buf) >= SharedBufferStack::NUM_BUFFER_MAX)
509            return BAD_VALUE;
510        SharedBufferStack& stack( *mSharedStack );
511        buf = stack.index[buf];
512        LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s",
513                int(buf), dump("").string());
514    }
515    return buf;
516}
517
518status_t SharedBufferServer::unlock(int buf)
519{
520    UnlockUpdate update(this, buf);
521    status_t err = updateCondition( update );
522    return err;
523}
524
525void SharedBufferServer::setStatus(status_t status)
526{
527    if (status < NO_ERROR) {
528        StatusUpdate update(this, status);
529        updateCondition( update );
530    }
531}
532
533status_t SharedBufferServer::reallocateAll()
534{
535    RWLock::AutoRLock _l(mLock);
536
537    SharedBufferStack& stack( *mSharedStack );
538    uint32_t mask = mBufferList.getMask();
539    android_atomic_or(mask, &stack.reallocMask);
540    return NO_ERROR;
541}
542
543status_t SharedBufferServer::reallocateAllExcept(int buffer)
544{
545    RWLock::AutoRLock _l(mLock);
546
547    SharedBufferStack& stack( *mSharedStack );
548    BufferList temp(mBufferList);
549    temp.remove(buffer);
550    uint32_t mask = temp.getMask();
551    android_atomic_or(mask, &stack.reallocMask);
552    return NO_ERROR;
553}
554
555int32_t SharedBufferServer::getQueuedCount() const
556{
557    SharedBufferStack& stack( *mSharedStack );
558    return stack.queued;
559}
560
561status_t SharedBufferServer::assertReallocate(int buf)
562{
563    /*
564     * NOTE: it's safe to hold mLock for read while waiting for
565     * the ReallocateCondition because that condition is not updated
566     * by the thread that holds mLock for write.
567     */
568    RWLock::AutoRLock _l(mLock);
569
570    // TODO: need to validate "buf"
571    ReallocateCondition condition(this, buf);
572    status_t err = waitForCondition(condition);
573    return err;
574}
575
576Region SharedBufferServer::getDirtyRegion(int buf) const
577{
578    SharedBufferStack& stack( *mSharedStack );
579    return stack.getDirtyRegion(buf);
580}
581
582/*
583 * NOTE: this is not thread-safe on the server-side, meaning
584 * 'head' cannot move during this operation. The client-side
585 * can safely operate an usual.
586 *
587 */
588status_t SharedBufferServer::resize(int newNumBuffers)
589{
590    if (uint32_t(newNumBuffers) >= SharedBufferStack::NUM_BUFFER_MAX)
591        return BAD_VALUE;
592
593    RWLock::AutoWLock _l(mLock);
594
595    // for now we're not supporting shrinking
596    const int numBuffers = mNumBuffers;
597    if (newNumBuffers < numBuffers)
598        return BAD_VALUE;
599
600    SharedBufferStack& stack( *mSharedStack );
601    const int extra = newNumBuffers - numBuffers;
602
603    // read the head, make sure it's valid
604    int32_t head = stack.head;
605    if (uint32_t(head) >= SharedBufferStack::NUM_BUFFER_MAX)
606        return BAD_VALUE;
607
608    int base = numBuffers;
609    int32_t avail = stack.available;
610    int tail = head - avail + 1;
611
612    if (tail >= 0) {
613        int8_t* const index = const_cast<int8_t*>(stack.index);
614        const int nb = numBuffers - head;
615        memmove(&index[head + extra], &index[head], nb);
616        base = head;
617        // move head 'extra' ahead, this doesn't impact stack.index[head];
618        stack.head = head + extra;
619    }
620    stack.available += extra;
621
622    // fill the new free space with unused buffers
623    BufferList::const_iterator curr(mBufferList.free_begin());
624    for (int i=0 ; i<extra ; i++) {
625        stack.index[base+i] = *curr;
626        mBufferList.add(*curr);
627        ++curr;
628    }
629
630    mNumBuffers = newNumBuffers;
631    return NO_ERROR;
632}
633
634SharedBufferStack::Statistics SharedBufferServer::getStats() const
635{
636    SharedBufferStack& stack( *mSharedStack );
637    return stack.stats;
638}
639
640// ---------------------------------------------------------------------------
641status_t SharedBufferServer::BufferList::add(int value)
642{
643    if (uint32_t(value) >= mCapacity)
644        return BAD_VALUE;
645    uint32_t mask = 1<<(31-value);
646    if (mList & mask)
647        return ALREADY_EXISTS;
648    mList |= mask;
649    return NO_ERROR;
650}
651
652status_t SharedBufferServer::BufferList::remove(int value)
653{
654    if (uint32_t(value) >= mCapacity)
655        return BAD_VALUE;
656    uint32_t mask = 1<<(31-value);
657    if (!(mList & mask))
658        return NAME_NOT_FOUND;
659    mList &= ~mask;
660    return NO_ERROR;
661}
662
663
664// ---------------------------------------------------------------------------
665}; // namespace android
666