SkScan_AntiPath.cpp revision 5b7d603e179a38735056db5f9092d18c187e76a0
1
2/*
3 * Copyright 2006 The Android Open Source Project
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "SkScanPriv.h"
11#include "SkPath.h"
12#include "SkMatrix.h"
13#include "SkBlitter.h"
14#include "SkRegion.h"
15#include "SkAntiRun.h"
16
17#define SHIFT   2
18#define SCALE   (1 << SHIFT)
19#define MASK    (SCALE - 1)
20
21/** @file
22    We have two techniques for capturing the output of the supersampler:
23    - SUPERMASK, which records a large mask-bitmap
24        this is often faster for small, complex objects
25    - RLE, which records a rle-encoded scanline
26        this is often faster for large objects with big spans
27
28    These blitters use two coordinate systems:
29    - destination coordinates, scale equal to the output - often
30        abbreviated with 'i' or 'I' in variable names
31    - supersampled coordinates, scale equal to the output * SCALE
32
33    Enabling SK_USE_LEGACY_AA_COVERAGE keeps the aa coverage calculations as
34    they were before the fix that unified the output of the RLE and MASK
35    supersamplers.
36 */
37
38//#define FORCE_SUPERMASK
39//#define FORCE_RLE
40#define SK_USE_LEGACY_AA_COVERAGE
41
42///////////////////////////////////////////////////////////////////////////////
43
44/// Base class for a single-pass supersampled blitter.
45class BaseSuperBlitter : public SkBlitter {
46public:
47    BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
48                     const SkRegion& clip);
49
50    /// Must be explicitly defined on subclasses.
51    virtual void blitAntiH(int x, int y, const SkAlpha antialias[],
52                           const int16_t runs[]) SK_OVERRIDE {
53        SkDEBUGFAIL("How did I get here?");
54    }
55    /// May not be called on BaseSuperBlitter because it blits out of order.
56    virtual void blitV(int x, int y, int height, SkAlpha alpha) SK_OVERRIDE {
57        SkDEBUGFAIL("How did I get here?");
58    }
59
60protected:
61    SkBlitter*  fRealBlitter;
62    /// Current y coordinate, in destination coordinates.
63    int         fCurrIY;
64    /// Widest row of region to be blitted, in destination coordinates.
65    int         fWidth;
66    /// Leftmost x coordinate in any row, in destination coordinates.
67    int         fLeft;
68    /// Leftmost x coordinate in any row, in supersampled coordinates.
69    int         fSuperLeft;
70
71    SkDEBUGCODE(int fCurrX;)
72    /// Current y coordinate in supersampled coordinates.
73    int fCurrY;
74    /// Initial y coordinate (top of bounds).
75    int fTop;
76};
77
78BaseSuperBlitter::BaseSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
79                                   const SkRegion& clip) {
80    fRealBlitter = realBlitter;
81
82    /*
83     *  We use the clip bounds instead of the ir, since we may be asked to
84     *  draw outside of the rect if we're a inverse filltype
85     */
86    const int left = clip.getBounds().fLeft;
87    const int right = clip.getBounds().fRight;
88
89    fLeft = left;
90    fSuperLeft = left << SHIFT;
91    fWidth = right - left;
92#if 0
93    fCurrIY = -1;
94    fCurrY = -1;
95#else
96    fTop = ir.fTop;
97    fCurrIY = ir.fTop - 1;
98    fCurrY = (ir.fTop << SHIFT) - 1;
99#endif
100    SkDEBUGCODE(fCurrX = -1;)
101}
102
103/// Run-length-encoded supersampling antialiased blitter.
104class SuperBlitter : public BaseSuperBlitter {
105public:
106    SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
107                 const SkRegion& clip);
108
109    virtual ~SuperBlitter() {
110        this->flush();
111        sk_free(fRuns.fRuns);
112    }
113
114    /// Once fRuns contains a complete supersampled row, flush() blits
115    /// it out through the wrapped blitter.
116    void flush();
117
118    /// Blits a row of pixels, with location and width specified
119    /// in supersampled coordinates.
120    virtual void blitH(int x, int y, int width) SK_OVERRIDE;
121    /// Blits a rectangle of pixels, with location and size specified
122    /// in supersampled coordinates.
123    virtual void blitRect(int x, int y, int width, int height) SK_OVERRIDE;
124
125private:
126    SkAlphaRuns fRuns;
127    int         fOffsetX;
128};
129
130SuperBlitter::SuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
131                           const SkRegion& clip)
132        : BaseSuperBlitter(realBlitter, ir, clip) {
133    const int width = fWidth;
134
135    // extra one to store the zero at the end
136    fRuns.fRuns = (int16_t*)sk_malloc_throw((width + 1 + (width + 2)/2) * sizeof(int16_t));
137    fRuns.fAlpha = (uint8_t*)(fRuns.fRuns + width + 1);
138    fRuns.reset(width);
139
140    fOffsetX = 0;
141}
142
143void SuperBlitter::flush() {
144    if (fCurrIY >= fTop) {
145        if (!fRuns.empty()) {
146        //  SkDEBUGCODE(fRuns.dump();)
147            fRealBlitter->blitAntiH(fLeft, fCurrIY, fRuns.fAlpha, fRuns.fRuns);
148            fRuns.reset(fWidth);
149            fOffsetX = 0;
150        }
151        fCurrIY = fTop - 1;
152        SkDEBUGCODE(fCurrX = -1;)
153    }
154}
155
156/** coverage_to_partial_alpha() is being used by SkAlphaRuns, which
157    *accumulates* SCALE pixels worth of "alpha" in [0,(256/SCALE)]
158    to produce a final value in [0, 255] and handles clamping 256->255
159    itself, with the same (alpha - (alpha >> 8)) correction as
160    coverage_to_exact_alpha().
161*/
162static inline int coverage_to_partial_alpha(int aa) {
163    aa <<= 8 - 2*SHIFT;
164#ifdef SK_USE_LEGACY_AA_COVERAGE
165    aa -= aa >> (8 - SHIFT - 1);
166#endif
167    return aa;
168}
169
170/** coverage_to_exact_alpha() is being used by our blitter, which wants
171    a final value in [0, 255].
172*/
173static inline int coverage_to_exact_alpha(int aa) {
174    int alpha = (256 >> SHIFT) * aa;
175    // clamp 256->255
176    return alpha - (alpha >> 8);
177}
178
179void SuperBlitter::blitH(int x, int y, int width) {
180    SkASSERT(width > 0);
181
182    int iy = y >> SHIFT;
183    SkASSERT(iy >= fCurrIY);
184
185    x -= fSuperLeft;
186    // hack, until I figure out why my cubics (I think) go beyond the bounds
187    if (x < 0) {
188        width += x;
189        x = 0;
190    }
191
192#ifdef SK_DEBUG
193    SkASSERT(y != fCurrY || x >= fCurrX);
194#endif
195    SkASSERT(y >= fCurrY);
196    if (fCurrY != y) {
197        fOffsetX = 0;
198        fCurrY = y;
199    }
200
201    if (iy != fCurrIY) {  // new scanline
202        this->flush();
203        fCurrIY = iy;
204    }
205
206    int start = x;
207    int stop = x + width;
208
209    SkASSERT(start >= 0 && stop > start);
210    // integer-pixel-aligned ends of blit, rounded out
211    int fb = start & MASK;
212    int fe = stop & MASK;
213    int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
214
215    if (n < 0) {
216        fb = fe - fb;
217        n = 0;
218        fe = 0;
219    } else {
220        if (fb == 0) {
221            n += 1;
222        } else {
223            fb = SCALE - fb;
224        }
225    }
226
227    fOffsetX = fRuns.add(x >> SHIFT, coverage_to_partial_alpha(fb),
228                         n, coverage_to_partial_alpha(fe),
229                         (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT),
230                         fOffsetX);
231
232#ifdef SK_DEBUG
233    fRuns.assertValid(y & MASK, (1 << (8 - SHIFT)));
234    fCurrX = x + width;
235#endif
236}
237
238static void set_left_rite_runs(SkAlphaRuns& runs, int ileft, U8CPU leftA,
239                               int n, U8CPU riteA) {
240    SkASSERT(leftA <= 0xFF);
241    SkASSERT(riteA <= 0xFF);
242
243    int16_t* run = runs.fRuns;
244    uint8_t* aa = runs.fAlpha;
245
246    if (ileft > 0) {
247        run[0] = ileft;
248        aa[0] = 0;
249        run += ileft;
250        aa += ileft;
251    }
252
253    SkASSERT(leftA < 0xFF);
254    if (leftA > 0) {
255        *run++ = 1;
256        *aa++ = leftA;
257    }
258
259    if (n > 0) {
260        run[0] = n;
261        aa[0] = 0xFF;
262        run += n;
263        aa += n;
264    }
265
266    SkASSERT(riteA < 0xFF);
267    if (riteA > 0) {
268        *run++ = 1;
269        *aa++ = riteA;
270    }
271    run[0] = 0;
272}
273
274void SuperBlitter::blitRect(int x, int y, int width, int height) {
275    SkASSERT(width > 0);
276    SkASSERT(height > 0);
277
278    // blit leading rows
279    while ((y & MASK)) {
280        this->blitH(x, y++, width);
281        if (--height <= 0) {
282            return;
283        }
284    }
285    SkASSERT(height > 0);
286
287    // Since this is a rect, instead of blitting supersampled rows one at a
288    // time and then resolving to the destination canvas, we can blit
289    // directly to the destintion canvas one row per SCALE supersampled rows.
290    int start_y = y >> SHIFT;
291    int stop_y = (y + height) >> SHIFT;
292    int count = stop_y - start_y;
293    if (count > 0) {
294        y += count << SHIFT;
295        height -= count << SHIFT;
296
297        // save original X for our tail blitH() loop at the bottom
298        int origX = x;
299
300        x -= fSuperLeft;
301        // hack, until I figure out why my cubics (I think) go beyond the bounds
302        if (x < 0) {
303            width += x;
304            x = 0;
305        }
306
307        // There is always a left column, a middle, and a right column.
308        // ileft is the destination x of the first pixel of the entire rect.
309        // xleft is (SCALE - # of covered supersampled pixels) in that
310        // destination pixel.
311        int ileft = x >> SHIFT;
312        int xleft = x & MASK;
313        // irite is the destination x of the last pixel of the OPAQUE section.
314        // xrite is the number of supersampled pixels extending beyond irite;
315        // xrite/SCALE should give us alpha.
316        int irite = (x + width) >> SHIFT;
317        int xrite = (x + width) & MASK;
318        if (!xrite) {
319            xrite = SCALE;
320            irite--;
321        }
322
323        // Need to call flush() to clean up pending draws before we
324        // even consider blitV(), since otherwise it can look nonmonotonic.
325        SkASSERT(start_y > fCurrIY);
326        this->flush();
327
328        int n = irite - ileft - 1;
329        if (n < 0) {
330            // If n < 0, we'll only have a single partially-transparent column
331            // of pixels to render.
332            xleft = xrite - xleft;
333            SkASSERT(xleft <= SCALE);
334            SkASSERT(xleft > 0);
335            xrite = 0;
336            fRealBlitter->blitV(ileft + fLeft, start_y, count,
337                coverage_to_exact_alpha(xleft));
338        } else {
339            // With n = 0, we have two possibly-transparent columns of pixels
340            // to render; with n > 0, we have opaque columns between them.
341
342            xleft = SCALE - xleft;
343
344            // Using coverage_to_exact_alpha is not consistent with blitH()
345            const int coverageL = coverage_to_exact_alpha(xleft);
346            const int coverageR = coverage_to_exact_alpha(xrite);
347
348            SkASSERT(coverageL > 0 || n > 0 || coverageR > 0);
349            SkASSERT((coverageL != 0) + n + (coverageR != 0) <= fWidth);
350
351            fRealBlitter->blitAntiRect(ileft + fLeft, start_y, n, count,
352                                       coverageL, coverageR);
353        }
354
355        // preamble for our next call to blitH()
356        fCurrIY = stop_y - 1;
357        fOffsetX = 0;
358        fCurrY = y - 1;
359        fRuns.reset(fWidth);
360        x = origX;
361    }
362
363    // catch any remaining few rows
364    SkASSERT(height <= MASK);
365    while (--height >= 0) {
366        this->blitH(x, y++, width);
367    }
368}
369
370///////////////////////////////////////////////////////////////////////////////
371
372/// Masked supersampling antialiased blitter.
373class MaskSuperBlitter : public BaseSuperBlitter {
374public:
375    MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
376                     const SkRegion& clip);
377    virtual ~MaskSuperBlitter() {
378        fRealBlitter->blitMask(fMask, fClipRect);
379    }
380
381    virtual void blitH(int x, int y, int width) SK_OVERRIDE;
382
383    static bool CanHandleRect(const SkIRect& bounds) {
384#ifdef FORCE_RLE
385        return false;
386#endif
387        int width = bounds.width();
388        int64_t rb = SkAlign4(width);
389        // use 64bits to detect overflow
390        int64_t storage = rb * bounds.height();
391
392        return (width <= MaskSuperBlitter::kMAX_WIDTH) &&
393               (storage <= MaskSuperBlitter::kMAX_STORAGE);
394    }
395
396private:
397    enum {
398#ifdef FORCE_SUPERMASK
399        kMAX_WIDTH = 2048,
400        kMAX_STORAGE = 1024 * 1024 * 2
401#else
402        kMAX_WIDTH = 32,    // so we don't try to do very wide things, where the RLE blitter would be faster
403        kMAX_STORAGE = 1024
404#endif
405    };
406
407    SkMask      fMask;
408    SkIRect     fClipRect;
409    // we add 1 because add_aa_span can write (unchanged) 1 extra byte at the end, rather than
410    // perform a test to see if stopAlpha != 0
411    uint32_t    fStorage[(kMAX_STORAGE >> 2) + 1];
412};
413
414MaskSuperBlitter::MaskSuperBlitter(SkBlitter* realBlitter, const SkIRect& ir,
415                                   const SkRegion& clip)
416        : BaseSuperBlitter(realBlitter, ir, clip) {
417    SkASSERT(CanHandleRect(ir));
418
419    fMask.fImage    = (uint8_t*)fStorage;
420    fMask.fBounds   = ir;
421    fMask.fRowBytes = ir.width();
422    fMask.fFormat   = SkMask::kA8_Format;
423
424    fClipRect = ir;
425    fClipRect.intersect(clip.getBounds());
426
427    // For valgrind, write 1 extra byte at the end so we don't read
428    // uninitialized memory. See comment in add_aa_span and fStorage[].
429    memset(fStorage, 0, fMask.fBounds.height() * fMask.fRowBytes + 1);
430}
431
432static void add_aa_span(uint8_t* alpha, U8CPU startAlpha) {
433    /*  I should be able to just add alpha[x] + startAlpha.
434        However, if the trailing edge of the previous span and the leading
435        edge of the current span round to the same super-sampled x value,
436        I might overflow to 256 with this add, hence the funny subtract.
437    */
438    unsigned tmp = *alpha + startAlpha;
439    SkASSERT(tmp <= 256);
440    *alpha = SkToU8(tmp - (tmp >> 8));
441}
442
443static inline uint32_t quadplicate_byte(U8CPU value) {
444    uint32_t pair = (value << 8) | value;
445    return (pair << 16) | pair;
446}
447
448// Perform this tricky subtract, to avoid overflowing to 256. Our caller should
449// only ever call us with at most enough to hit 256 (never larger), so it is
450// enough to just subtract the high-bit. Actually clamping with a branch would
451// be slower (e.g. if (tmp > 255) tmp = 255;)
452//
453static inline void saturated_add(uint8_t* ptr, U8CPU add) {
454    unsigned tmp = *ptr + add;
455    SkASSERT(tmp <= 256);
456    *ptr = SkToU8(tmp - (tmp >> 8));
457}
458
459// minimum count before we want to setup an inner loop, adding 4-at-a-time
460#define MIN_COUNT_FOR_QUAD_LOOP  16
461
462static void add_aa_span(uint8_t* alpha, U8CPU startAlpha, int middleCount,
463                        U8CPU stopAlpha, U8CPU maxValue) {
464    SkASSERT(middleCount >= 0);
465
466    saturated_add(alpha, startAlpha);
467    alpha += 1;
468
469    if (middleCount >= MIN_COUNT_FOR_QUAD_LOOP) {
470        // loop until we're quad-byte aligned
471        while (SkTCast<intptr_t>(alpha) & 0x3) {
472            alpha[0] = SkToU8(alpha[0] + maxValue);
473            alpha += 1;
474            middleCount -= 1;
475        }
476
477        int bigCount = middleCount >> 2;
478        uint32_t* qptr = reinterpret_cast<uint32_t*>(alpha);
479        uint32_t qval = quadplicate_byte(maxValue);
480        do {
481            *qptr++ += qval;
482        } while (--bigCount > 0);
483
484        middleCount &= 3;
485        alpha = reinterpret_cast<uint8_t*> (qptr);
486        // fall through to the following while-loop
487    }
488
489    while (--middleCount >= 0) {
490        alpha[0] = SkToU8(alpha[0] + maxValue);
491        alpha += 1;
492    }
493
494    // potentially this can be off the end of our "legal" alpha values, but that
495    // only happens if stopAlpha is also 0. Rather than test for stopAlpha != 0
496    // every time (slow), we just do it, and ensure that we've allocated extra space
497    // (see the + 1 comment in fStorage[]
498    saturated_add(alpha, stopAlpha);
499}
500
501void MaskSuperBlitter::blitH(int x, int y, int width) {
502    int iy = (y >> SHIFT);
503
504    SkASSERT(iy >= fMask.fBounds.fTop && iy < fMask.fBounds.fBottom);
505    iy -= fMask.fBounds.fTop;   // make it relative to 0
506
507    // This should never happen, but it does.  Until the true cause is
508    // discovered, let's skip this span instead of crashing.
509    // See http://crbug.com/17569.
510    if (iy < 0) {
511        return;
512    }
513
514#ifdef SK_DEBUG
515    {
516        int ix = x >> SHIFT;
517        SkASSERT(ix >= fMask.fBounds.fLeft && ix < fMask.fBounds.fRight);
518    }
519#endif
520
521    x -= (fMask.fBounds.fLeft << SHIFT);
522
523    // hack, until I figure out why my cubics (I think) go beyond the bounds
524    if (x < 0) {
525        width += x;
526        x = 0;
527    }
528
529    uint8_t* row = fMask.fImage + iy * fMask.fRowBytes + (x >> SHIFT);
530
531    int start = x;
532    int stop = x + width;
533
534    SkASSERT(start >= 0 && stop > start);
535    int fb = start & MASK;
536    int fe = stop & MASK;
537    int n = (stop >> SHIFT) - (start >> SHIFT) - 1;
538
539
540    if (n < 0) {
541        SkASSERT(row >= fMask.fImage);
542        SkASSERT(row < fMask.fImage + kMAX_STORAGE + 1);
543        add_aa_span(row, coverage_to_partial_alpha(fe - fb));
544    } else {
545        fb = SCALE - fb;
546        SkASSERT(row >= fMask.fImage);
547        SkASSERT(row + n + 1 < fMask.fImage + kMAX_STORAGE + 1);
548        add_aa_span(row,  coverage_to_partial_alpha(fb),
549                    n, coverage_to_partial_alpha(fe),
550                    (1 << (8 - SHIFT)) - (((y & MASK) + 1) >> SHIFT));
551    }
552
553#ifdef SK_DEBUG
554    fCurrX = x + width;
555#endif
556}
557
558///////////////////////////////////////////////////////////////////////////////
559
560static bool fitsInsideLimit(const SkRect& r, SkScalar max) {
561    const SkScalar min = -max;
562    return  r.fLeft > min && r.fTop > min &&
563            r.fRight < max && r.fBottom < max;
564}
565
566static int overflows_short_shift(int value, int shift) {
567    const int s = 16 + shift;
568    return (value << s >> s) - value;
569}
570
571/**
572  Would any of the coordinates of this rectangle not fit in a short,
573  when left-shifted by shift?
574*/
575static int rect_overflows_short_shift(SkIRect rect, int shift) {
576    SkASSERT(!overflows_short_shift(8191, SHIFT));
577    SkASSERT(overflows_short_shift(8192, SHIFT));
578    SkASSERT(!overflows_short_shift(32767, 0));
579    SkASSERT(overflows_short_shift(32768, 0));
580
581    // Since we expect these to succeed, we bit-or together
582    // for a tiny extra bit of speed.
583    return overflows_short_shift(rect.fLeft, SHIFT) |
584           overflows_short_shift(rect.fRight, SHIFT) |
585           overflows_short_shift(rect.fTop, SHIFT) |
586           overflows_short_shift(rect.fBottom, SHIFT);
587}
588
589static bool safeRoundOut(const SkRect& src, SkIRect* dst, int32_t maxInt) {
590#ifdef SK_SCALAR_IS_FIXED
591    // the max-int (shifted) is exactly what we want to compare against, to know
592    // if we can survive shifting our fixed-point coordinates
593    const SkFixed maxScalar = maxInt;
594#else
595    const SkScalar maxScalar = SkIntToScalar(maxInt);
596#endif
597    if (fitsInsideLimit(src, maxScalar)) {
598        src.roundOut(dst);
599        return true;
600    }
601    return false;
602}
603
604void SkScan::AntiFillPath(const SkPath& path, const SkRegion& origClip,
605                          SkBlitter* blitter, bool forceRLE) {
606    if (origClip.isEmpty()) {
607        return;
608    }
609
610    SkIRect ir;
611
612    if (!safeRoundOut(path.getBounds(), &ir, SK_MaxS32 >> SHIFT)) {
613#if 0
614        const SkRect& r = path.getBounds();
615        SkDebugf("--- bounds can't fit in SkIRect\n", r.fLeft, r.fTop, r.fRight, r.fBottom);
616#endif
617        return;
618    }
619    if (ir.isEmpty()) {
620        if (path.isInverseFillType()) {
621            blitter->blitRegion(origClip);
622        }
623        return;
624    }
625
626    // If the intersection of the path bounds and the clip bounds
627    // will overflow 32767 when << by SHIFT, we can't supersample,
628    // so draw without antialiasing.
629    SkIRect clippedIR;
630    if (path.isInverseFillType()) {
631       // If the path is an inverse fill, it's going to fill the entire
632       // clip, and we care whether the entire clip exceeds our limits.
633       clippedIR = origClip.getBounds();
634    } else {
635       if (!clippedIR.intersect(ir, origClip.getBounds())) {
636           return;
637       }
638    }
639    if (rect_overflows_short_shift(clippedIR, SHIFT)) {
640        SkScan::FillPath(path, origClip, blitter);
641        return;
642    }
643
644    // Our antialiasing can't handle a clip larger than 32767, so we restrict
645    // the clip to that limit here. (the runs[] uses int16_t for its index).
646    //
647    // A more general solution (one that could also eliminate the need to
648    // disable aa based on ir bounds (see overflows_short_shift) would be
649    // to tile the clip/target...
650    SkRegion tmpClipStorage;
651    const SkRegion* clipRgn = &origClip;
652    {
653        static const int32_t kMaxClipCoord = 32767;
654        const SkIRect& bounds = origClip.getBounds();
655        if (bounds.fRight > kMaxClipCoord || bounds.fBottom > kMaxClipCoord) {
656            SkIRect limit = { 0, 0, kMaxClipCoord, kMaxClipCoord };
657            tmpClipStorage.op(origClip, limit, SkRegion::kIntersect_Op);
658            clipRgn = &tmpClipStorage;
659        }
660    }
661    // for here down, use clipRgn, not origClip
662
663    SkScanClipper   clipper(blitter, clipRgn, ir);
664    const SkIRect*  clipRect = clipper.getClipRect();
665
666    if (clipper.getBlitter() == NULL) { // clipped out
667        if (path.isInverseFillType()) {
668            blitter->blitRegion(*clipRgn);
669        }
670        return;
671    }
672
673    // now use the (possibly wrapped) blitter
674    blitter = clipper.getBlitter();
675
676    if (path.isInverseFillType()) {
677        sk_blit_above(blitter, ir, *clipRgn);
678    }
679
680    SkIRect superRect, *superClipRect = NULL;
681
682    if (clipRect) {
683        superRect.set(  clipRect->fLeft << SHIFT, clipRect->fTop << SHIFT,
684                        clipRect->fRight << SHIFT, clipRect->fBottom << SHIFT);
685        superClipRect = &superRect;
686    }
687
688    SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
689
690    // MaskSuperBlitter can't handle drawing outside of ir, so we can't use it
691    // if we're an inverse filltype
692    if (!path.isInverseFillType() && MaskSuperBlitter::CanHandleRect(ir) && !forceRLE) {
693        MaskSuperBlitter    superBlit(blitter, ir, *clipRgn);
694        SkASSERT(SkIntToScalar(ir.fTop) <= path.getBounds().fTop);
695        sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
696    } else {
697        SuperBlitter    superBlit(blitter, ir, *clipRgn);
698        sk_fill_path(path, superClipRect, &superBlit, ir.fTop, ir.fBottom, SHIFT, *clipRgn);
699    }
700
701    if (path.isInverseFillType()) {
702        sk_blit_below(blitter, ir, *clipRgn);
703    }
704}
705
706///////////////////////////////////////////////////////////////////////////////
707
708#include "SkRasterClip.h"
709
710void SkScan::FillPath(const SkPath& path, const SkRasterClip& clip,
711                          SkBlitter* blitter) {
712    if (clip.isEmpty()) {
713        return;
714    }
715
716    if (clip.isBW()) {
717        FillPath(path, clip.bwRgn(), blitter);
718    } else {
719        SkRegion        tmp;
720        SkAAClipBlitter aaBlitter;
721
722        tmp.setRect(clip.getBounds());
723        aaBlitter.init(blitter, &clip.aaRgn());
724        SkScan::FillPath(path, tmp, &aaBlitter);
725    }
726}
727
728void SkScan::AntiFillPath(const SkPath& path, const SkRasterClip& clip,
729                          SkBlitter* blitter) {
730    if (clip.isEmpty()) {
731        return;
732    }
733
734    if (clip.isBW()) {
735        AntiFillPath(path, clip.bwRgn(), blitter);
736    } else {
737        SkRegion        tmp;
738        SkAAClipBlitter aaBlitter;
739
740        tmp.setRect(clip.getBounds());
741        aaBlitter.init(blitter, &clip.aaRgn());
742        SkScan::AntiFillPath(path, tmp, &aaBlitter, true);
743    }
744}
745
746