fontchain_lint.py revision fe952f3a0b0ad5c481fa3e52385866f777a4d6e2
1#!/usr/bin/env python
2
3import collections
4import glob
5from os import path
6import sys
7from xml.etree import ElementTree
8
9from fontTools import ttLib
10
11LANG_TO_SCRIPT = {
12    'as': 'Beng',
13    'bn': 'Beng',
14    'cy': 'Latn',
15    'da': 'Latn',
16    'de': 'Latn',
17    'en': 'Latn',
18    'es': 'Latn',
19    'et': 'Latn',
20    'eu': 'Latn',
21    'fr': 'Latn',
22    'ga': 'Latn',
23    'gu': 'Gujr',
24    'hi': 'Deva',
25    'hr': 'Latn',
26    'hu': 'Latn',
27    'hy': 'Armn',
28    'ja': 'Jpan',
29    'kn': 'Knda',
30    'ko': 'Kore',
31    'ml': 'Mlym',
32    'mn': 'Cyrl',
33    'mr': 'Deva',
34    'nb': 'Latn',
35    'nn': 'Latn',
36    'or': 'Orya',
37    'pa': 'Guru',
38    'pt': 'Latn',
39    'sl': 'Latn',
40    'ta': 'Taml',
41    'te': 'Telu',
42    'tk': 'Latn',
43}
44
45def lang_to_script(lang_code):
46    lang = lang_code.lower()
47    while lang not in LANG_TO_SCRIPT:
48        hyphen_idx = lang.rfind('-')
49        assert hyphen_idx != -1, (
50            'We do not know what script the "%s" language is written in.'
51            % lang_code)
52        assumed_script = lang[hyphen_idx+1:]
53        if len(assumed_script) == 4 and assumed_script.isalpha():
54            # This is actually the script
55            return assumed_script.title()
56        lang = lang[:hyphen_idx]
57    return LANG_TO_SCRIPT[lang]
58
59
60def get_best_cmap(font):
61    font_file, index = font
62    font_path = path.join(_fonts_dir, font_file)
63    if index is not None:
64        ttfont = ttLib.TTFont(font_path, fontNumber=index)
65    else:
66        ttfont = ttLib.TTFont(font_path)
67    all_unicode_cmap = None
68    bmp_cmap = None
69    for cmap in ttfont['cmap'].tables:
70        specifier = (cmap.format, cmap.platformID, cmap.platEncID)
71        if specifier == (4, 3, 1):
72            assert bmp_cmap is None, 'More than one BMP cmap in %s' % (font, )
73            bmp_cmap = cmap
74        elif specifier == (12, 3, 10):
75            assert all_unicode_cmap is None, (
76                'More than one UCS-4 cmap in %s' % (font, ))
77            all_unicode_cmap = cmap
78
79    return all_unicode_cmap.cmap if all_unicode_cmap else bmp_cmap.cmap
80
81
82def assert_font_supports_any_of_chars(font, chars):
83    best_cmap = get_best_cmap(font)
84    for char in chars:
85        if char in best_cmap:
86            return
87    sys.exit('None of characters in %s were found in %s' % (chars, font))
88
89
90def assert_font_supports_all_of_chars(font, chars):
91    best_cmap = get_best_cmap(font)
92    for char in chars:
93        assert char in best_cmap, (
94            'U+%04X was not found in %s' % (char, font))
95
96
97def assert_font_supports_none_of_chars(font, chars):
98    best_cmap = get_best_cmap(font)
99    for char in chars:
100        assert char not in best_cmap, (
101            'U+%04X was found in %s' % (char, font))
102
103
104def check_hyphens(hyphens_dir):
105    # Find all the scripts that need automatic hyphenation
106    scripts = set()
107    for hyb_file in glob.iglob(path.join(hyphens_dir, '*.hyb')):
108        hyb_file = path.basename(hyb_file)
109        assert hyb_file.startswith('hyph-'), (
110            'Unknown hyphenation file %s' % hyb_file)
111        lang_code = hyb_file[hyb_file.index('-')+1:hyb_file.index('.')]
112        scripts.add(lang_to_script(lang_code))
113
114    HYPHENS = {0x002D, 0x2010}
115    for script in scripts:
116        fonts = _script_to_font_map[script]
117        assert fonts, 'No fonts found for the "%s" script' % script
118        for font in fonts:
119            assert_font_supports_any_of_chars(font, HYPHENS)
120
121
122def parse_fonts_xml(fonts_xml_path):
123    global _script_to_font_map, _fallback_chain
124    _script_to_font_map = collections.defaultdict(set)
125    _fallback_chain = []
126    tree = ElementTree.parse(fonts_xml_path)
127    for family in tree.findall('family'):
128        name = family.get('name')
129        variant = family.get('variant')
130        langs = family.get('lang')
131        if name:
132            assert variant is None, (
133                'No variant expected for LGC font %s.' % name)
134            assert langs is None, (
135                'No language expected for LGC fonts %s.' % name)
136        else:
137            assert variant in {None, 'elegant', 'compact'}, (
138                'Unexpected value for variant: %s' % variant)
139
140        if langs:
141            langs = langs.split()
142            scripts = {lang_to_script(lang) for lang in langs}
143        else:
144            scripts = set()
145
146        for child in family:
147            assert child.tag == 'font', (
148                'Unknown tag <%s>' % child.tag)
149            font_file = child.text
150            weight = int(child.get('weight'))
151            assert weight % 100 == 0, (
152                'Font weight "%d" is not a multiple of 100.' % weight)
153
154            style = child.get('style')
155            assert style in {'normal', 'italic'}, (
156                'Unknown style "%s"' % style)
157
158            index = child.get('index')
159            if index:
160                index = int(index)
161
162            _fallback_chain.append((
163                name,
164                frozenset(scripts),
165                variant,
166                weight,
167                style,
168                (font_file, index)))
169
170            if name: # non-empty names are used for default LGC fonts
171                map_scripts = {'Latn', 'Grek', 'Cyrl'}
172            else:
173                map_scripts = scripts
174            for script in map_scripts:
175                _script_to_font_map[script].add((font_file, index))
176
177
178def check_emoji_availability():
179    emoji_fonts = [font[5] for font in _fallback_chain if 'Zsye' in font[1]]
180    emoji_chars = _emoji_properties['Emoji']
181    for emoji_font in emoji_fonts:
182        assert_font_supports_all_of_chars(emoji_font, emoji_chars)
183
184
185def check_emoji_defaults():
186    default_emoji_chars = _emoji_properties['Emoji_Presentation']
187    missing_text_chars = _emoji_properties['Emoji'] - default_emoji_chars
188    emoji_font_seen = False
189    for name, scripts, variant, weight, style, font in _fallback_chain:
190        if 'Zsye' in scripts:
191            emoji_font_seen = True
192            # No need to check the emoji font
193            continue
194        # For later fonts, we only check them if they have a script
195        # defined, since the defined script may get them to a higher
196        # score even if they appear after the emoji font.
197        if emoji_font_seen and not scripts:
198            continue
199
200        # Check default emoji-style characters
201        assert_font_supports_none_of_chars(font, sorted(default_emoji_chars))
202
203        # Mark default text-style characters appearing in fonts above the emoji
204        # font as seen
205        if not emoji_font_seen:
206            missing_text_chars -= set(get_best_cmap(font))
207
208    # Noto does not have monochrome symbols for Unicode 7.0 wingdings and
209    # webdings
210    missing_text_chars -= _chars_by_age['7.0']
211    # TODO: Remove these after b/26113320 is fixed
212    missing_text_chars -= {
213        0x263A, # WHITE SMILING FACE
214        0x270C, # VICTORY HAND
215        0x2744, # SNOWFLAKE
216        0x2764, # HEAVY BLACK HEART
217    }
218    assert missing_text_chars == set(), (
219        'Text style version of some emoji characters are missing.')
220
221
222# Setting reverse to true returns a dictionary that maps the values to sets of
223# characters, useful for some binary properties. Otherwise, we get a
224# dictionary that maps characters to the property values, assuming there's only
225# one property in the file.
226def parse_unicode_datafile(file_path, reverse=False):
227    if reverse:
228        output_dict = collections.defaultdict(set)
229    else:
230        output_dict = {}
231    with open(file_path) as datafile:
232        for line in datafile:
233            if '#' in line:
234                line = line[:line.index('#')]
235            line = line.strip()
236            if not line:
237                continue
238            char_range, prop = line.split(';')
239            char_range = char_range.strip()
240            prop = prop.strip()
241            if '..' in char_range:
242                char_start, char_end = char_range.split('..')
243            else:
244                char_start = char_end = char_range
245            char_start = int(char_start, 16)
246            char_end = int(char_end, 16)
247            char_range = xrange(char_start, char_end+1)
248            if reverse:
249                output_dict[prop].update(char_range)
250            else:
251                for char in char_range:
252                    assert char not in output_dict
253                    output_dict[char] = prop
254    return output_dict
255
256
257def parse_ucd(ucd_path):
258    global _emoji_properties, _chars_by_age
259    _emoji_properties = parse_unicode_datafile(
260        path.join(ucd_path, 'emoji-data.txt'), reverse=True)
261    _chars_by_age = parse_unicode_datafile(
262        path.join(ucd_path, 'DerivedAge.txt'), reverse=True)
263
264
265def main():
266    target_out = sys.argv[1]
267    global _fonts_dir
268    _fonts_dir = path.join(target_out, 'fonts')
269
270    fonts_xml_path = path.join(target_out, 'etc', 'fonts.xml')
271    parse_fonts_xml(fonts_xml_path)
272
273    hyphens_dir = path.join(target_out, 'usr', 'hyphen-data')
274    check_hyphens(hyphens_dir)
275
276    ucd_path = sys.argv[2]
277    parse_ucd(ucd_path)
278    # Temporarily disable emoji checks for Bug 27785690
279    # check_emoji_availability()
280    # check_emoji_defaults()
281
282
283if __name__ == '__main__':
284    main()
285