SkScan_AntiPath.cpp revision f4e5995ac70d4614e0a05b92a8a03e2b7d76bd9c
1
2/*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "SkScanPriv.h"
11#include "SkPath.h"
12#include "SkMatrix.h"
13#include "SkBlitter.h"
14#include "SkRegion.h"
15#include "SkAntiRun.h"
16
17#define SHIFT   2
18#define SCALE   (1 << SHIFT)
19#define MASK    (SCALE - 1)
20
21/** @file
22    We have two techniques for capturing the output of the supersampler:
23    - SUPERMASK, which records a large mask-bitmap
24        this is often faster for small, complex objects
25    - RLE, which records a rle-encoded scanline
26        this is often faster for large objects with big spans
27
28    These blitters use two coordinate systems:
29    - destination coordinates, scale equal to the output - often
30        abbreviated with 'i' or 'I' in variable names
31    - supersampled coordinates, scale equal to the output * SCALE
32
33    Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as
34    they were before the fix that unified the output of the RLE and MASK
35    supersamplers.
36 */
37
38//#define FORCE_SUPERMASK
39//#define FORCE_RLE
40//#define SK_USE_LEGACY_AA_COVERAGE
41
42///////////////////////////////////////////////////////////////////////////////
43
44/// Base class for a single-pass supersampled blitter.
45class BaseSuperBlitter : public SkBlitter {
46public:
47    BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
48                     const SkRegion& clip, bool isInverse);
49
50    /// Must be explicitly defined on subclasses.
51    virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
52                           const int16_t runs[]) SK_OVERRIDE {
53        SkDEBUGFAIL("How did I get here?");
54    }
55    /// May not be called on BaseSuperBlitter because it blits out of order.
56    virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE {
57        SkDEBUGFAIL("How did I get here?");
58    }
59
60protected:
61    SkBlitter*  fRealBlitter;
62    /// Current y coordinate, in destination coordinates.
63    int         fCurrIY;
64    /// Widest row of region to be blitted, in destination coordinates.
65    int         fWidth;
66    /// Leftmost x coordinate in any row, in destination coordinates.
67    int         fLeft;
68    /// Leftmost x coordinate in any row, in supersampled coordinates.
69    int         fSuperLeft;
70
71    SkDEBUGCODE(int fCurrX;)
72    /// Current y coordinate in supersampled coordinates.
73    int fCurrY;
74    /// Initial y coordinate (top of bounds).
75    int fTop;
76
77    SkIRect fSectBounds;
78};
79
80BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlit, const SkIRect& ir, const SkRegion& clip,
81                                   bool isInverse) {
82    fRealBlitter = realBlit;
83
84    SkIRect sectBounds;
85    if (isInverse) {
86        // We use the clip bounds instead of the ir, since we may be asked to
87        //draw outside of the rect when we're a inverse filltype
88        sectBounds = clip.getBounds();
89    } else {
90        if (!sectBounds.intersect(ir, clip.getBounds())) {
91            sectBounds.setEmpty();
92        }
93    }
94
95    const int left = sectBounds.left();
96    const int right = sectBounds.right();
97
98    fLeft = left;
99    fSuperLeft = left << SHIFT;
100    fWidth = right - left;
101    fTop = sectBounds.top();
102    fCurrIY = fTop - 1;
103    fCurrY = (fTop << SHIFT) - 1;
104
105    SkDEBUGCODE(fCurrX = -1;)
106}
107
108/// Run-length-encoded supersampling antialiased blitter.
109class SuperBlitter : public BaseSuperBlitter {
110public:
111    SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip, bool isInverse);
112
113    virtual ~SuperBlitter() {
114        this->flush();
115    }
116
117    /// Once fRuns contains a complete supersampled row, flush() blits
118    /// it out through the wrapped blitter.
119    void flush();
120
121    /// Blits a row of pixels, with location and width specified
122    /// in supersampled coordinates.
123    virtual void blitH(int x, int y, int width) SK_OVERRIDE;
124    /// Blits a rectangle of pixels, with location and size specified
125    /// in supersampled coordinates.
126    virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE;
127
128private:
129    // The next three variables are used to track a circular buffer that
130    // contains the values used in SkAlphaRuns. These variables should only
131    // ever be updated in advanceRuns(), and fRuns should always point to
132    // a valid SkAlphaRuns...
133    int         fRunsToBuffer;
134    void*       fRunsBuffer;
135    int         fCurrentRun;
136    SkAlphaRuns fRuns;
137
138    // extra one to store the zero at the end
139    int getRunsSz() const { return (fWidth + 1 + (fWidth + 2)/2) * sizeof(int16_t); }
140
141    // This function updates the fRuns variable to point to the next buffer space
142    // with adequate storage for a SkAlphaRuns. It mostly just advances fCurrentRun
143    // and resets fRuns to point to an empty scanline.
144    void advanceRuns() {
145        const size_t kRunsSz = this->getRunsSz();
146        fCurrentRun = (fCurrentRun + 1) % fRunsToBuffer;
147        fRuns.fRuns = reinterpret_cast<int16_t*>(
148            reinterpret_cast<uint8_t*>(fRunsBuffer) + fCurrentRun * kRunsSz);
149        fRuns.fAlpha = reinterpret_cast<SkAlpha*>(fRuns.fRuns + fWidth + 1);
150        fRuns.reset(fWidth);
151    }
152
153    int         fOffsetX;
154};
155
156SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
157                           bool isInverse)
158        : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
159{
160    fRunsToBuffer = realBlitter->requestRowsPreserved();
161    fRunsBuffer = realBlitter->allocBlitMemory(fRunsToBuffer * this->getRunsSz());
162    fCurrentRun = -1;
163
164    this->advanceRuns();
165
166    fOffsetX = 0;
167}
168
169void SuperBlitter::flush() {
170    if (fCurrIY >= fTop) {
171
172        SkASSERT(fCurrentRun < fRunsToBuffer);
173        if (!fRuns.empty()) {
174            // SkDEBUGCODE(fRuns.dump();)
175            fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
176            this->advanceRuns();
177            fOffsetX = 0;
178        }
179
180        fCurrIY = fTop - 1;
181        SkDEBUGCODE(fCurrX = -1;)
182    }
183}
184
185/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
186    *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
187    to produce a final value in [0, 255] and handles clamping 256->255
188    itself, with the same (alpha - (alpha >> 8)) correction as
189    coverage_to_exact_alpha().
190*/
191static inline int coverage_to_partial_alpha(int aa) {
192    aa <<= 8 - 2*SHIFT;
193#ifdef SK_USE_LEGACY_AA_COVERAGE
194    aa -= aa >> (8 - SHIFT - 1);
195#endif
196    return aa;
197}
198
199/** coverage_to_exact_alpha() is being used by our blitter, which wants
200    a final value in [0, 255].
201*/
202static inline int coverage_to_exact_alpha(int aa) {
203    int alpha = (256 >> SHIFT) * aa;
204    // clamp 256->255
205    return alpha - (alpha >> 8);
206}
207
208void SuperBlitter::blitH(int x, int y, int width) {
209    SkASSERT(width > 0);
210
211    int iy = y >> SHIFT;
212    SkASSERT(iy >= fCurrIY);
213
214    x -= fSuperLeft;
215    // hack, until I figure out why my cubics (I think) go beyond the bounds
216    if (x < 0) {
217        width += x;
218        x = 0;
219    }
220
221#ifdef SK_DEBUG
222    SkASSERT(y != fCurrY || x >= fCurrX);
223#endif
224    SkASSERT(y >= fCurrY);
225    if (fCurrY != y) {
226        fOffsetX = 0;
227        fCurrY = y;
228    }
229
230    if (iy != fCurrIY) {  // new scanline
231        this->flush();
232        fCurrIY = iy;
233    }
234
235    int start = x;
236    int stop = x + width;
237
238    SkASSERT(start >= 0 && stop > start);
239    // integer-pixel-aligned ends of blit, rounded out
240    int fb = start & MASK;
241    int fe = stop & MASK;
242    int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
243
244    if (n < 0) {
245        fb = fe - fb;
246        n = 0;
247        fe = 0;
248    } else {
249        if (fb == 0) {
250            n += 1;
251        } else {
252            fb = SCALE - fb;
253        }
254    }
255
256    fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
257                         n, coverage_to_partial_alpha(fe),
258                         (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
259                         fOffsetX);
260
261#ifdef SK_DEBUG
262    fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
263    fCurrX = x + width;
264#endif
265}
266
267#if 0 // UNUSED
268static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
269                               int n, U8CPU riteA) {
270    SkASSERT(leftA <= 0xFF);
271    SkASSERT(riteA <= 0xFF);
272
273    int16_t* run = runs.fRuns;
274    uint8_t* aa = runs.fAlpha;
275
276    if (ileft > 0) {
277        run[0] = ileft;
278        aa[0] = 0;
279        run += ileft;
280        aa += ileft;
281    }
282
283    SkASSERT(leftA < 0xFF);
284    if (leftA > 0) {
285        *run++ = 1;
286        *aa++ = leftA;
287    }
288
289    if (n > 0) {
290        run[0] = n;
291        aa[0] = 0xFF;
292        run += n;
293        aa += n;
294    }
295
296    SkASSERT(riteA < 0xFF);
297    if (riteA > 0) {
298        *run++ = 1;
299        *aa++ = riteA;
300    }
301    run[0] = 0;
302}
303#endif
304
305void SuperBlitter::blitRect(int x, int y, int width, int height) {
306    SkASSERT(width > 0);
307    SkASSERT(height > 0);
308
309    // blit leading rows
310    while ((y & MASK)) {
311        this->blitH(x, y++, width);
312        if (--height <= 0) {
313            return;
314        }
315    }
316    SkASSERT(height > 0);
317
318    // Since this is a rect, instead of blitting supersampled rows one at a
319    // time and then resolving to the destination canvas, we can blit
320    // directly to the destintion canvas one row per SCALE supersampled rows.
321    int start_y = y >> SHIFT;
322    int stop_y = (y + height) >> SHIFT;
323    int count = stop_y - start_y;
324    if (count > 0) {
325        y += count << SHIFT;
326        height -= count << SHIFT;
327
328        // save original X for our tail blitH() loop at the bottom
329        int origX = x;
330
331        x -= fSuperLeft;
332        // hack, until I figure out why my cubics (I think) go beyond the bounds
333        if (x < 0) {
334            width += x;
335            x = 0;
336        }
337
338        // There is always a left column, a middle, and a right column.
339        // ileft is the destination x of the first pixel of the entire rect.
340        // xleft is (SCALE - # of covered supersampled pixels) in that
341        // destination pixel.
342        int ileft = x >> SHIFT;
343        int xleft = x & MASK;
344        // irite is the destination x of the last pixel of the OPAQUE section.
345        // xrite is the number of supersampled pixels extending beyond irite;
346        // xrite/SCALE should give us alpha.
347        int irite = (x + width) >> SHIFT;
348        int xrite = (x + width) & MASK;
349        if (!xrite) {
350            xrite = SCALE;
351            irite--;
352        }
353
354        // Need to call flush() to clean up pending draws before we
355        // even consider blitV(), since otherwise it can look nonmonotonic.
356        SkASSERT(start_y > fCurrIY);
357        this->flush();
358
359        int n = irite - ileft - 1;
360        if (n < 0) {
361            // If n < 0, we'll only have a single partially-transparent column
362            // of pixels to render.
363            xleft = xrite - xleft;
364            SkASSERT(xleft <= SCALE);
365            SkASSERT(xleft > 0);
366            xrite = 0;
367            fRealBlitter->blitV(ileft + fLeft, start_y, count,
368                coverage_to_exact_alpha(xleft));
369        } else {
370            // With n = 0, we have two possibly-transparent columns of pixels
371            // to render; with n > 0, we have opaque columns between them.
372
373            xleft = SCALE - xleft;
374
375            // Using coverage_to_exact_alpha is not consistent with blitH()
376            const int coverageL = coverage_to_exact_alpha(xleft);
377            const int coverageR = coverage_to_exact_alpha(xrite);
378
379            SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
380            SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
381
382            fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
383                                       coverageL, coverageR);
384        }
385
386        // preamble for our next call to blitH()
387        fCurrIY = stop_y - 1;
388        fOffsetX = 0;
389        fCurrY = y - 1;
390        fRuns.reset(fWidth);
391        x = origX;
392    }
393
394    // catch any remaining few rows
395    SkASSERT(height <= MASK);
396    while (--height >= 0) {
397        this->blitH(x, y++, width);
398    }
399}
400
401///////////////////////////////////////////////////////////////////////////////
402
403/// Masked supersampling antialiased blitter.
404class MaskSuperBlitter : public BaseSuperBlitter {
405public:
406    MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion&, bool isInverse);
407    virtual ~MaskSuperBlitter() {
408        fRealBlitter->blitMask(fMask, fClipRect);
409    }
410
411    virtual void blitH(int x, int y, int width) SK_OVERRIDE;
412
413    static bool CanHandleRect(const SkIRect& bounds) {
414#ifdef FORCE_RLE
415        return false;
416#endif
417        int width = bounds.width();
418        int64_t rb = SkAlign4(width);
419        // use 64bits to detect overflow
420        int64_t storage = rb * bounds.height();
421
422        return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
423               (storage <= MaskSuperBlitter::kMAX_STORAGE);
424    }
425
426private:
427    enum {
428#ifdef FORCE_SUPERMASK
429        kMAX_WIDTH = 2048,
430        kMAX_STORAGE = 1024 * 1024 * 2
431#else
432        kMAX_WIDTH = 32,    // so we don't try to do very wide things, where the RLE blitter would be faster
433        kMAX_STORAGE = 1024
434#endif
435    };
436
437    SkMask      fMask;
438    SkIRect     fClipRect;
439    // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
440    // perform a test to see if stopAlpha != 0
441    uint32_t    fStorage[(kMAX_STORAGE >> 2) + 1];
442};
443
444MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir, const SkRegion& clip,
445                                   bool isInverse)
446    : BaseSuperBlitter(realBlitter, ir, clip, isInverse)
447{
448    SkASSERT(CanHandleRect(ir));
449    SkASSERT(!isInverse);
450
451    fMask.fImage    = (uint8_t*)fStorage;
452    fMask.fBounds   = ir;
453    fMask.fRowBytes = ir.width();
454    fMask.fFormat   = SkMask::kA8_Format;
455
456    fClipRect = ir;
457    if (!fClipRect.intersect(clip.getBounds())) {
458        SkASSERT(0);
459        fClipRect.setEmpty();
460    }
461
462    // For valgrind, write 1 extra byte at the end so we don't read
463    // uninitialized memory. See comment in add_aa_span and fStorage[].
464    memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
465}
466
467static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
468    /*  I should be able to just add alpha[x] + startAlpha.
469        However, if the trailing edge of the previous span and the leading
470        edge of the current span round to the same super-sampled x value,
471        I might overflow to 256 with this add, hence the funny subtract.
472    */
473    unsigned tmp = *alpha + startAlpha;
474    SkASSERT(tmp <= 256);
475    *alpha = SkToU8(tmp - (tmp >> 8));
476}
477
478static inline uint32_t quadplicate_byte(U8CPU value) {
479    uint32_t pair = (value << 8) | value;
480    return (pair << 16) | pair;
481}
482
483// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
484// only ever call us with at most enough to hit 256 (never larger), so it is
485// enough to just subtract the high-bit. Actually clamping with a branch would
486// be slower (e.g. if (tmp > 255) tmp = 255;)
487//
488static inline void saturated_add(uint8_t* ptr, U8CPU add) {
489    unsigned tmp = *ptr + add;
490    SkASSERT(tmp <= 256);
491    *ptr = SkToU8(tmp - (tmp >> 8));
492}
493
494// minimum count before we want to setup an inner loop, adding 4-at-a-time
495#define MIN_COUNT_FOR_QUAD_LOOP  16
496
497static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
498                        U8CPU stopAlpha, U8CPU maxValue) {
499    SkASSERT(middleCount >= 0);
500
501    saturated_add(alpha, startAlpha);
502    alpha += 1;
503
504    if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
505        // loop until we're quad-byte aligned
506        while (SkTCast<intptr_t>(alpha) & 0x3) {
507            alpha[0] = SkToU8(alpha[0] + maxValue);
508            alpha += 1;
509            middleCount -= 1;
510        }
511
512        int bigCount = middleCount >> 2;
513        uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
514        uint32_t qval = quadplicate_byte(maxValue);
515        do {
516            *qptr++ += qval;
517        } while (--bigCount > 0);
518
519        middleCount &= 3;
520        alpha = reinterpret_cast<uint8_t*> (qptr);
521        // fall through to the following while-loop
522    }
523
524    while (--middleCount >= 0) {
525        alpha[0] = SkToU8(alpha[0] + maxValue);
526        alpha += 1;
527    }
528
529    // potentially this can be off the end of our "legal" alpha values, but that
530    // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
531    // every time (slow), we just do it, and ensure that we've allocated extra space
532    // (see the + 1 comment in fStorage[]
533    saturated_add(alpha, stopAlpha);
534}
535
536void MaskSuperBlitter::blitH(int x, int y, int width) {
537    int iy = (y >> SHIFT);
538
539    SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
540    iy -= fMask.fBounds.fTop;   // make it relative to 0
541
542    // This should never happen, but it does.  Until the true cause is
543    // discovered, let's skip this span instead of crashing.
544    // See http://crbug.com/17569.
545    if (iy < 0) {
546        return;
547    }
548
549#ifdef SK_DEBUG
550    {
551        int ix = x >> SHIFT;
552        SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
553    }
554#endif
555
556    x -= (fMask.fBounds.fLeft << SHIFT);
557
558    // hack, until I figure out why my cubics (I think) go beyond the bounds
559    if (x < 0) {
560        width += x;
561        x = 0;
562    }
563
564    uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
565
566    int start = x;
567    int stop = x + width;
568
569    SkASSERT(start >= 0 && stop > start);
570    int fb = start & MASK;
571    int fe = stop & MASK;
572    int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
573
574
575    if (n < 0) {
576        SkASSERT(row >= fMask.fImage);
577        SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
578        add_aa_span(row, coverage_to_partial_alpha(fe - fb));
579    } else {
580        fb = SCALE - fb;
581        SkASSERT(row >= fMask.fImage);
582        SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
583        add_aa_span(row,  coverage_to_partial_alpha(fb),
584                    n, coverage_to_partial_alpha(fe),
585                    (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
586    }
587
588#ifdef SK_DEBUG
589    fCurrX = x + width;
590#endif
591}
592
593///////////////////////////////////////////////////////////////////////////////
594
595static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
596    const SkScalar min = -max;
597    return  r.fLeft > min && r.fTop > min &&
598            r.fRight < max && r.fBottom < max;
599}
600
601static int overflows_short_shift(int value, int shift) {
602    const int s = 16 + shift;
603    return (value << s >> s) - value;
604}
605
606/**
607  Would any of the coordinates of this rectangle not fit in a short,
608  when left-shifted by shift?
609*/
610static int rect_overflows_short_shift(SkIRect rect, int shift) {
611    SkASSERT(!overflows_short_shift(8191, SHIFT));
612    SkASSERT(overflows_short_shift(8192, SHIFT));
613    SkASSERT(!overflows_short_shift(32767, 0));
614    SkASSERT(overflows_short_shift(32768, 0));
615
616    // Since we expect these to succeed, we bit-or together
617    // for a tiny extra bit of speed.
618    return overflows_short_shift(rect.fLeft, SHIFT) |
619           overflows_short_shift(rect.fRight, SHIFT) |
620           overflows_short_shift(rect.fTop, SHIFT) |
621           overflows_short_shift(rect.fBottom, SHIFT);
622}
623
624static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
625    const SkScalar maxScalar = SkIntToScalar(maxInt);
626
627    if (fitsInsideLimit(src, maxScalar)) {
628        src.roundOut(dst);
629        return true;
630    }
631    return false;
632}
633
634void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
635                          SkBlitter* blitter, bool forceRLE) {
636    if (origClip.isEmpty()) {
637        return;
638    }
639
640    const bool isInverse = path.isInverseFillType();
641    SkIRect ir;
642
643    if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
644#if 0
645        const SkRect& r = path.getBounds();
646        SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
647#endif
648        return;
649    }
650    if (ir.isEmpty()) {
651        if (isInverse) {
652            blitter->blitRegion(origClip);
653        }
654        return;
655    }
656
657    // If the intersection of the path bounds and the clip bounds
658    // will overflow 32767 when << by SHIFT, we can't supersample,
659    // so draw without antialiasing.
660    SkIRect clippedIR;
661    if (isInverse) {
662       // If the path is an inverse fill, it's going to fill the entire
663       // clip, and we care whether the entire clip exceeds our limits.
664       clippedIR = origClip.getBounds();
665    } else {
666       if (!clippedIR.intersect(ir, origClip.getBounds())) {
667           return;
668       }
669    }
670    if (rect_overflows_short_shift(clippedIR, SHIFT)) {
671        SkScan::FillPath(path, origClip, blitter);
672        return;
673    }
674
675    // Our antialiasing can't handle a clip larger than 32767, so we restrict
676    // the clip to that limit here. (the runs[] uses int16_t for its index).
677    //
678    // A more general solution (one that could also eliminate the need to
679    // disable aa based on ir bounds (see overflows_short_shift) would be
680    // to tile the clip/target...
681    SkRegion tmpClipStorage;
682    const SkRegion* clipRgn = &origClip;
683    {
684        static const int32_t kMaxClipCoord = 32767;
685        const SkIRect& bounds = origClip.getBounds();
686        if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
687            SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
688            tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
689            clipRgn = &tmpClipStorage;
690        }
691    }
692    // for here down, use clipRgn, not origClip
693
694    SkScanClipper   clipper(blitter, clipRgn, ir);
695    const SkIRect*  clipRect = clipper.getClipRect();
696
697    if (clipper.getBlitter() == NULL) { // clipped out
698        if (isInverse) {
699            blitter->blitRegion(*clipRgn);
700        }
701        return;
702    }
703
704    // now use the (possibly wrapped) blitter
705    blitter = clipper.getBlitter();
706
707    if (isInverse) {
708        sk_blit_above(blitter, ir, *clipRgn);
709    }
710
711    SkIRect superRect, *superClipRect = NULL;
712
713    if (clipRect) {
714        superRect.set(  clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT,
715                        clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT);
716        superClipRect = &superRect;
717    }
718
719    SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
720
721    // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
722    // if we're an inverse filltype
723    if (!isInverse && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
724        MaskSuperBlitter    superBlit(blitter, ir, *clipRgn, isInverse);
725        SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
726        sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
727    } else {
728        SuperBlitter    superBlit(blitter, ir, *clipRgn, isInverse);
729        sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
730    }
731
732    if (isInverse) {
733        sk_blit_below(blitter, ir, *clipRgn);
734    }
735}
736
737///////////////////////////////////////////////////////////////////////////////
738
739#include "SkRasterClip.h"
740
741void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
742                          SkBlitter* blitter) {
743    if (clip.isEmpty()) {
744        return;
745    }
746
747    if (clip.isBW()) {
748        FillPath(path, clip.bwRgn(), blitter);
749    } else {
750        SkRegion        tmp;
751        SkAAClipBlitter aaBlitter;
752
753        tmp.setRect(clip.getBounds());
754        aaBlitter.init(blitter, &clip.aaRgn());
755        SkScan::FillPath(path, tmp, &aaBlitter);
756    }
757}
758
759void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
760                          SkBlitter* blitter) {
761    if (clip.isEmpty()) {
762        return;
763    }
764
765    if (clip.isBW()) {
766        AntiFillPath(path, clip.bwRgn(), blitter);
767    } else {
768        SkRegion        tmp;
769        SkAAClipBlitter aaBlitter;
770
771        tmp.setRect(clip.getBounds());
772        aaBlitter.init(blitter, &clip.aaRgn());
773        SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
774    }
775}
776