SharedBufferStack.cpp revision bfe7f0b12165a1ad4a73b6d8f013cb9e115a3c60
1/*
2 * Copyright (C) 2007 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "SharedBufferStack"
18
19#include <stdint.h>
20#include <sys/types.h>
21
22#include <utils/Debug.h>
23#include <utils/Log.h>
24#include <utils/threads.h>
25
26#include <private/surfaceflinger/SharedBufferStack.h>
27
28#include <ui/Rect.h>
29#include <ui/Region.h>
30
31#define DEBUG_ATOMICS 0
32
33namespace android {
34// ----------------------------------------------------------------------------
35
36SharedClient::SharedClient()
37    : lock(Mutex::SHARED), cv(Condition::SHARED)
38{
39}
40
41SharedClient::~SharedClient() {
42}
43
44
45// these functions are used by the clients
46status_t SharedClient::validate(size_t i) const {
47    if (uint32_t(i) >= uint32_t(NUM_LAYERS_MAX))
48        return BAD_INDEX;
49    return surfaces[i].status;
50}
51
52uint32_t SharedClient::getIdentity(size_t token) const {
53    return uint32_t(surfaces[token].identity);
54}
55
56// ----------------------------------------------------------------------------
57
58
59SharedBufferStack::SharedBufferStack()
60{
61}
62
63void SharedBufferStack::init(int32_t i)
64{
65    inUse = -1;
66    status = NO_ERROR;
67    identity = i;
68}
69
70status_t SharedBufferStack::setCrop(int buffer, const Rect& crop)
71{
72    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
73        return BAD_INDEX;
74
75    buffers[buffer].crop.l = uint16_t(crop.left);
76    buffers[buffer].crop.t = uint16_t(crop.top);
77    buffers[buffer].crop.r = uint16_t(crop.right);
78    buffers[buffer].crop.b = uint16_t(crop.bottom);
79    return NO_ERROR;
80}
81
82status_t SharedBufferStack::setDirtyRegion(int buffer, const Region& dirty)
83{
84    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
85        return BAD_INDEX;
86
87    FlatRegion& reg(buffers[buffer].dirtyRegion);
88    if (dirty.isEmpty()) {
89        reg.count = 0;
90        return NO_ERROR;
91    }
92
93    size_t count;
94    Rect const* r = dirty.getArray(&count);
95    if (count > FlatRegion::NUM_RECT_MAX) {
96        const Rect bounds(dirty.getBounds());
97        reg.count = 1;
98        reg.rects[0].l = uint16_t(bounds.left);
99        reg.rects[0].t = uint16_t(bounds.top);
100        reg.rects[0].r = uint16_t(bounds.right);
101        reg.rects[0].b = uint16_t(bounds.bottom);
102    } else {
103        reg.count = count;
104        for (size_t i=0 ; i<count ; i++) {
105            reg.rects[i].l = uint16_t(r[i].left);
106            reg.rects[i].t = uint16_t(r[i].top);
107            reg.rects[i].r = uint16_t(r[i].right);
108            reg.rects[i].b = uint16_t(r[i].bottom);
109        }
110    }
111    return NO_ERROR;
112}
113
114Region SharedBufferStack::getDirtyRegion(int buffer) const
115{
116    Region res;
117    if (uint32_t(buffer) >= NUM_BUFFER_MAX)
118        return res;
119
120    const FlatRegion& reg(buffers[buffer].dirtyRegion);
121    if (reg.count > FlatRegion::NUM_RECT_MAX)
122        return res;
123
124    if (reg.count == 1) {
125        const Rect r(
126                reg.rects[0].l,
127                reg.rects[0].t,
128                reg.rects[0].r,
129                reg.rects[0].b);
130        res.set(r);
131    } else {
132        for (size_t i=0 ; i<reg.count ; i++) {
133            const Rect r(
134                    reg.rects[i].l,
135                    reg.rects[i].t,
136                    reg.rects[i].r,
137                    reg.rects[i].b);
138            res.orSelf(r);
139        }
140    }
141    return res;
142}
143
144// ----------------------------------------------------------------------------
145
146SharedBufferBase::SharedBufferBase(SharedClient* sharedClient,
147        int surface, int num, int32_t identity)
148    : mSharedClient(sharedClient),
149      mSharedStack(sharedClient->surfaces + surface),
150      mNumBuffers(num), mIdentity(identity)
151{
152}
153
154SharedBufferBase::~SharedBufferBase()
155{
156}
157
158uint32_t SharedBufferBase::getIdentity()
159{
160    SharedBufferStack& stack( *mSharedStack );
161    return stack.identity;
162}
163
164status_t SharedBufferBase::getStatus() const
165{
166    SharedBufferStack& stack( *mSharedStack );
167    return stack.status;
168}
169
170size_t SharedBufferBase::getFrontBuffer() const
171{
172    SharedBufferStack& stack( *mSharedStack );
173    return size_t( stack.head );
174}
175
176String8 SharedBufferBase::dump(char const* prefix) const
177{
178    const size_t SIZE = 1024;
179    char buffer[SIZE];
180    String8 result;
181    SharedBufferStack& stack( *mSharedStack );
182    int tail = computeTail();
183    snprintf(buffer, SIZE,
184            "%s[ head=%2d, available=%2d, queued=%2d, tail=%2d ] "
185            "reallocMask=%08x, inUse=%2d, identity=%d, status=%d\n",
186            prefix, stack.head, stack.available, stack.queued, tail,
187            stack.reallocMask, stack.inUse, stack.identity, stack.status);
188    result.append(buffer);
189    return result;
190}
191
192int32_t SharedBufferBase::computeTail() const
193{
194    SharedBufferStack& stack( *mSharedStack );
195    return (mNumBuffers + stack.head - stack.available + 1) % mNumBuffers;
196}
197
198status_t SharedBufferBase::waitForCondition(const ConditionBase& condition)
199{
200    const SharedBufferStack& stack( *mSharedStack );
201    SharedClient& client( *mSharedClient );
202    const nsecs_t TIMEOUT = s2ns(1);
203    const int identity = mIdentity;
204
205    Mutex::Autolock _l(client.lock);
206    while ((condition()==false) &&
207            (stack.identity == identity) &&
208            (stack.status == NO_ERROR))
209    {
210        status_t err = client.cv.waitRelative(client.lock, TIMEOUT);
211        // handle errors and timeouts
212        if (CC_UNLIKELY(err != NO_ERROR)) {
213            if (err == TIMED_OUT) {
214                if (condition()) {
215                    LOGE("waitForCondition(%s) timed out (identity=%d), "
216                        "but condition is true! We recovered but it "
217                        "shouldn't happen." , condition.name(), stack.identity);
218                    break;
219                } else {
220                    LOGW("waitForCondition(%s) timed out "
221                        "(identity=%d, status=%d). "
222                        "CPU may be pegged. trying again.", condition.name(),
223                        stack.identity, stack.status);
224                }
225            } else {
226                LOGE("waitForCondition(%s) error (%s) ",
227                        condition.name(), strerror(-err));
228                return err;
229            }
230        }
231    }
232    return (stack.identity != mIdentity) ? status_t(BAD_INDEX) : stack.status;
233}
234// ============================================================================
235// conditions and updates
236// ============================================================================
237
238SharedBufferClient::DequeueCondition::DequeueCondition(
239        SharedBufferClient* sbc) : ConditionBase(sbc)  {
240}
241bool SharedBufferClient::DequeueCondition::operator()() const {
242    return stack.available > 0;
243}
244
245SharedBufferClient::LockCondition::LockCondition(
246        SharedBufferClient* sbc, int buf) : ConditionBase(sbc), buf(buf) {
247}
248bool SharedBufferClient::LockCondition::operator()() const {
249    return (buf != stack.index[stack.head] ||
250            (stack.queued > 0 && stack.inUse != buf));
251}
252
253SharedBufferServer::ReallocateCondition::ReallocateCondition(
254        SharedBufferBase* sbb, int buf) : ConditionBase(sbb), buf(buf) {
255}
256bool SharedBufferServer::ReallocateCondition::operator()() const {
257    // TODO: we should also check that buf has been dequeued
258    return (buf != stack.index[stack.head]);
259}
260
261// ----------------------------------------------------------------------------
262
263SharedBufferClient::QueueUpdate::QueueUpdate(SharedBufferBase* sbb)
264    : UpdateBase(sbb) {
265}
266ssize_t SharedBufferClient::QueueUpdate::operator()() {
267    android_atomic_inc(&stack.queued);
268    return NO_ERROR;
269}
270
271SharedBufferClient::UndoDequeueUpdate::UndoDequeueUpdate(SharedBufferBase* sbb)
272    : UpdateBase(sbb) {
273}
274ssize_t SharedBufferClient::UndoDequeueUpdate::operator()() {
275    android_atomic_inc(&stack.available);
276    return NO_ERROR;
277}
278
279SharedBufferServer::UnlockUpdate::UnlockUpdate(
280        SharedBufferBase* sbb, int lockedBuffer)
281    : UpdateBase(sbb), lockedBuffer(lockedBuffer) {
282}
283ssize_t SharedBufferServer::UnlockUpdate::operator()() {
284    if (stack.inUse != lockedBuffer) {
285        LOGE("unlocking %d, but currently locked buffer is %d",
286                lockedBuffer, stack.inUse);
287        return BAD_VALUE;
288    }
289    android_atomic_write(-1, &stack.inUse);
290    return NO_ERROR;
291}
292
293SharedBufferServer::RetireUpdate::RetireUpdate(
294        SharedBufferBase* sbb, int numBuffers)
295    : UpdateBase(sbb), numBuffers(numBuffers) {
296}
297ssize_t SharedBufferServer::RetireUpdate::operator()() {
298    // head is only written in this function, which is single-thread.
299    int32_t head = stack.head;
300
301    // Preventively lock the current buffer before updating queued.
302    android_atomic_write(stack.index[head], &stack.inUse);
303
304    // Decrement the number of queued buffers
305    int32_t queued;
306    do {
307        queued = stack.queued;
308        if (queued == 0) {
309            return NOT_ENOUGH_DATA;
310        }
311    } while (android_atomic_cmpxchg(queued, queued-1, &stack.queued));
312
313    // update the head pointer
314    head = ((head+1 >= numBuffers) ? 0 : head+1);
315
316    // lock the buffer before advancing head, which automatically unlocks
317    // the buffer we preventively locked upon entering this function
318    android_atomic_write(stack.index[head], &stack.inUse);
319
320    // advance head
321    android_atomic_write(head, &stack.head);
322
323    // now that head has moved, we can increment the number of available buffers
324    android_atomic_inc(&stack.available);
325    return head;
326}
327
328SharedBufferServer::StatusUpdate::StatusUpdate(
329        SharedBufferBase* sbb, status_t status)
330    : UpdateBase(sbb), status(status) {
331}
332
333ssize_t SharedBufferServer::StatusUpdate::operator()() {
334    android_atomic_write(status, &stack.status);
335    return NO_ERROR;
336}
337
338// ============================================================================
339
340SharedBufferClient::SharedBufferClient(SharedClient* sharedClient,
341        int surface, int num, int32_t identity)
342    : SharedBufferBase(sharedClient, surface, num, identity),
343      tail(0), undoDequeueTail(0)
344{
345    SharedBufferStack& stack( *mSharedStack );
346    tail = computeTail();
347    queued_head = stack.head;
348}
349
350ssize_t SharedBufferClient::dequeue()
351{
352    SharedBufferStack& stack( *mSharedStack );
353
354    if (stack.head == tail && stack.available == mNumBuffers) {
355        LOGW("dequeue: tail=%d, head=%d, avail=%d, queued=%d",
356                tail, stack.head, stack.available, stack.queued);
357    }
358
359    const nsecs_t dequeueTime = systemTime(SYSTEM_TIME_THREAD);
360
361    //LOGD("[%d] about to dequeue a buffer",
362    //        mSharedStack->identity);
363    DequeueCondition condition(this);
364    status_t err = waitForCondition(condition);
365    if (err != NO_ERROR)
366        return ssize_t(err);
367
368    // NOTE: 'stack.available' is part of the conditions, however
369    // decrementing it, never changes any conditions, so we don't need
370    // to do this as part of an update.
371    if (android_atomic_dec(&stack.available) == 0) {
372        LOGW("dequeue probably called from multiple threads!");
373    }
374
375    undoDequeueTail = tail;
376    int dequeued = stack.index[tail];
377    tail = ((tail+1 >= mNumBuffers) ? 0 : tail+1);
378    LOGD_IF(DEBUG_ATOMICS, "dequeued=%d, tail++=%d, %s",
379            dequeued, tail, dump("").string());
380
381    mDequeueTime[dequeued] = dequeueTime;
382
383    return dequeued;
384}
385
386status_t SharedBufferClient::undoDequeue(int buf)
387{
388    // TODO: we can only undo the previous dequeue, we should
389    // enforce that in the api
390    UndoDequeueUpdate update(this);
391    status_t err = updateCondition( update );
392    if (err == NO_ERROR) {
393        tail = undoDequeueTail;
394    }
395    return err;
396}
397
398status_t SharedBufferClient::lock(int buf)
399{
400    SharedBufferStack& stack( *mSharedStack );
401    LockCondition condition(this, buf);
402    status_t err = waitForCondition(condition);
403    return err;
404}
405
406status_t SharedBufferClient::queue(int buf)
407{
408    SharedBufferStack& stack( *mSharedStack );
409
410    queued_head = ((queued_head+1 >= mNumBuffers) ? 0 : queued_head+1);
411    stack.index[queued_head] = buf;
412
413    QueueUpdate update(this);
414    status_t err = updateCondition( update );
415    LOGD_IF(DEBUG_ATOMICS, "queued=%d, %s", buf, dump("").string());
416
417    const nsecs_t now = systemTime(SYSTEM_TIME_THREAD);
418    stack.stats.totalTime = ns2us(now - mDequeueTime[buf]);
419    return err;
420}
421
422bool SharedBufferClient::needNewBuffer(int buf) const
423{
424    SharedBufferStack& stack( *mSharedStack );
425    const uint32_t mask = 1<<buf;
426    return (android_atomic_and(~mask, &stack.reallocMask) & mask) != 0;
427}
428
429status_t SharedBufferClient::setCrop(int buf, const Rect& crop)
430{
431    SharedBufferStack& stack( *mSharedStack );
432    return stack.setCrop(buf, crop);
433}
434
435status_t SharedBufferClient::setDirtyRegion(int buf, const Region& reg)
436{
437    SharedBufferStack& stack( *mSharedStack );
438    return stack.setDirtyRegion(buf, reg);
439}
440
441// ----------------------------------------------------------------------------
442
443SharedBufferServer::SharedBufferServer(SharedClient* sharedClient,
444        int surface, int num, int32_t identity)
445    : SharedBufferBase(sharedClient, surface, num, identity)
446{
447    mSharedStack->init(identity);
448    mSharedStack->head = num-1;
449    mSharedStack->available = num;
450    mSharedStack->queued = 0;
451    mSharedStack->reallocMask = 0;
452    memset(mSharedStack->buffers, 0, sizeof(mSharedStack->buffers));
453    for (int i=0 ; i<num ; i++) {
454        mSharedStack->index[i] = i;
455    }
456}
457
458ssize_t SharedBufferServer::retireAndLock()
459{
460    RetireUpdate update(this, mNumBuffers);
461    ssize_t buf = updateCondition( update );
462    if (buf >= 0) {
463        SharedBufferStack& stack( *mSharedStack );
464        buf = stack.index[buf];
465        LOGD_IF(DEBUG_ATOMICS && buf>=0, "retire=%d, %s",
466                int(buf), dump("").string());
467    }
468    return buf;
469}
470
471status_t SharedBufferServer::unlock(int buf)
472{
473    UnlockUpdate update(this, buf);
474    status_t err = updateCondition( update );
475    return err;
476}
477
478void SharedBufferServer::setStatus(status_t status)
479{
480    if (status < NO_ERROR) {
481        StatusUpdate update(this, status);
482        updateCondition( update );
483    }
484}
485
486status_t SharedBufferServer::reallocate()
487{
488    SharedBufferStack& stack( *mSharedStack );
489    uint32_t mask = (1<<mNumBuffers)-1;
490    android_atomic_or(mask, &stack.reallocMask);
491    return NO_ERROR;
492}
493
494int32_t SharedBufferServer::getQueuedCount() const
495{
496    SharedBufferStack& stack( *mSharedStack );
497    return stack.queued;
498}
499
500status_t SharedBufferServer::assertReallocate(int buf)
501{
502    ReallocateCondition condition(this, buf);
503    status_t err = waitForCondition(condition);
504    return err;
505}
506
507Region SharedBufferServer::getDirtyRegion(int buf) const
508{
509    SharedBufferStack& stack( *mSharedStack );
510    return stack.getDirtyRegion(buf);
511}
512
513SharedBufferStack::Statistics SharedBufferServer::getStats() const
514{
515    SharedBufferStack& stack( *mSharedStack );
516    return stack.stats;
517}
518
519
520// ---------------------------------------------------------------------------
521}; // namespace android
522