otBase.py revision b7a2d797a40fb658d1e6dca6c08c9d2e1d83e78a
1from .DefaultTable import DefaultTable
2import struct
3from fontTools.misc.py23 import *
4
5class OverflowErrorRecord:
6	def __init__(self, overflowTuple):
7		self.tableType = overflowTuple[0]
8		self.LookupListIndex = overflowTuple[1]
9		self.SubTableIndex = overflowTuple[2]
10		self.itemName = overflowTuple[3]
11		self.itemIndex = overflowTuple[4]
12
13	def __repr__(self):
14		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
15
16class OTLOffsetOverflowError(Exception):
17	def __init__(self, overflowErrorRecord):
18		self.value = overflowErrorRecord
19
20	def __str__(self):
21		return repr(self.value)
22
23
24class BaseTTXConverter(DefaultTable):
25
26	"""Generic base class for TTX table converters. It functions as an
27	adapter between the TTX (ttLib actually) table model and the model
28	we use for OpenType tables, which is necessarily subtly different.
29	"""
30
31	def decompile(self, data, font):
32		from . import otTables
33		cachingStats = None if True else {}
34		class GlobalState:
35			def __init__(self, tableType, cachingStats):
36				self.tableType = tableType
37				self.cachingStats = cachingStats
38		globalState = GlobalState(tableType=self.tableTag,
39					  cachingStats=cachingStats)
40		reader = OTTableReader(data, globalState)
41		tableClass = getattr(otTables, self.tableTag)
42		self.table = tableClass()
43		self.table.decompile(reader, font)
44		if cachingStats:
45			stats = sorted([(v, k) for k, v in cachingStats.items()])
46			stats.reverse()
47			print("cachingsstats for ", self.tableTag)
48			for v, k in stats:
49				if v < 2:
50					break
51				print(v, k)
52			print("---", len(stats))
53
54	def compile(self, font):
55		""" Create a top-level OTFWriter for the GPOS/GSUB table.
56			Call the compile method for the the table
57				for each 'converter' record in the table converter list
58					call converter's write method for each item in the value.
59						- For simple items, the write method adds a string to the
60						writer's self.items list.
61						- For Struct/Table/Subtable items, it add first adds new writer to the
62						to the writer's self.items, then calls the item's compile method.
63						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
64						each writer representing a table, and the writer.items list containing
65						the child data strings and writers.
66			call the getAllData method
67				call _doneWriting, which removes duplicates
68				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
69				Traverse the flat list of tables, calling getDataLength on each to update their position
70				Traverse the flat list of tables again, calling getData each get the data in the table, now that
71				pos's and offset are known.
72
73				If a lookup subtable overflows an offset, we have to start all over.
74		"""
75		class GlobalState:
76			def __init__(self, tableType):
77				self.tableType = tableType
78		globalState = GlobalState(tableType=self.tableTag)
79		writer = OTTableWriter(globalState)
80		writer.parent = None
81		self.table.compile(writer, font)
82		return writer.getAllData()
83
84	def toXML(self, writer, font):
85		self.table.toXML2(writer, font)
86
87	def fromXML(self, name, attrs, content, font):
88		from . import otTables
89		if not hasattr(self, "table"):
90			tableClass = getattr(otTables, self.tableTag)
91			self.table = tableClass()
92		self.table.fromXML(name, attrs, content, font)
93
94
95class OTTableReader(object):
96
97	"""Helper class to retrieve data from an OpenType table."""
98
99	__slots__ = ('data', 'offset', 'pos', 'globalState', 'localState')
100
101	def __init__(self, data, globalState={}, localState=None, offset=0):
102		self.data = data
103		self.offset = offset
104		self.pos = offset
105		self.globalState = globalState
106		self.localState = localState
107
108	def getSubReader(self, offset):
109		offset = self.offset + offset
110		cachingStats = self.globalState.cachingStats
111		if cachingStats is not None:
112			cachingStats[offset] = cachingStats.get(offset, 0) + 1
113		return self.__class__(self.data, self.globalState, self.localState, offset)
114
115	def readUShort(self):
116		pos = self.pos
117		newpos = pos + 2
118		value, = struct.unpack(">H", self.data[pos:newpos])
119		self.pos = newpos
120		return value
121
122	def readShort(self):
123		pos = self.pos
124		newpos = pos + 2
125		value, = struct.unpack(">h", self.data[pos:newpos])
126		self.pos = newpos
127		return value
128
129	def readLong(self):
130		pos = self.pos
131		newpos = pos + 4
132		value, = struct.unpack(">l", self.data[pos:newpos])
133		self.pos = newpos
134		return value
135
136	def readUInt24(self):
137		pos = self.pos
138		newpos = pos + 3
139		value = (ord(self.data[pos]) << 16) | (ord(self.data[pos+1]) << 8) | ord(self.data[pos+2])
140		value, = struct.unpack(">H", self.data[pos:newpos])
141		self.pos = newpos
142		return value
143
144	def readULong(self):
145		pos = self.pos
146		newpos = pos + 4
147		value, = struct.unpack(">L", self.data[pos:newpos])
148		self.pos = newpos
149		return value
150
151	def readTag(self):
152		pos = self.pos
153		newpos = pos + 4
154		value = self.data[pos:newpos]
155		assert len(value) == 4
156		self.pos = newpos
157		return value
158
159	def __setitem__(self, name, value):
160		state = self.localState.copy() if self.localState else dict()
161		state[name] = value
162		self.localState = state
163
164	def __getitem__(self, name):
165		return self.localState[name]
166
167
168class OTTableWriter(object):
169
170	"""Helper class to gather and assemble data for OpenType tables."""
171
172	def __init__(self, globalState, localState=None):
173		self.items = []
174		self.pos = None
175		self.globalState = globalState
176		self.localState = localState
177
178	def __setitem__(self, name, value):
179		state = self.localState.copy() if self.localState else dict()
180		state[name] = value
181		self.localState = state
182
183	def __getitem__(self, name):
184		return self.localState[name]
185
186	# assembler interface
187
188	def getAllData(self):
189		"""Assemble all data, including all subtables."""
190		self._doneWriting()
191		tables, extTables = self._gatherTables()
192		tables.reverse()
193		extTables.reverse()
194		# Gather all data in two passes: the absolute positions of all
195		# subtable are needed before the actual data can be assembled.
196		pos = 0
197		for table in tables:
198			table.pos = pos
199			pos = pos + table.getDataLength()
200
201		for table in extTables:
202			table.pos = pos
203			pos = pos + table.getDataLength()
204
205
206		data = []
207		for table in tables:
208			tableData = table.getData()
209			data.append(tableData)
210
211		for table in extTables:
212			tableData = table.getData()
213			data.append(tableData)
214
215		return "".join(data)
216
217	def getDataLength(self):
218		"""Return the length of this table in bytes, without subtables."""
219		l = 0
220		for item in self.items:
221			if hasattr(item, "getData") or hasattr(item, "getCountData"):
222				if item.longOffset:
223					l = l + 4  # sizeof(ULong)
224				else:
225					l = l + 2  # sizeof(UShort)
226			else:
227				l = l + len(item)
228		return l
229
230	def getData(self):
231		"""Assemble the data for this writer/table, without subtables."""
232		items = list(self.items)  # make a shallow copy
233		pos = self.pos
234		numItems = len(items)
235		for i in range(numItems):
236			item = items[i]
237
238			if hasattr(item, "getData"):
239				if item.longOffset:
240					items[i] = packULong(item.pos - pos)
241				else:
242					try:
243						items[i] = packUShort(item.pos - pos)
244					except AssertionError:
245						# provide data to fix overflow problem.
246						# If the overflow is to a lookup, or from a lookup to a subtable,
247						# just report the current item.
248						if self.name in [ 'LookupList', 'Lookup']:
249							overflowErrorRecord = self.getOverflowErrorRecord(item)
250						else:
251							# overflow is within a subTable. Life is more complicated.
252							# If we split the sub-table just before the current item, we may still suffer overflow.
253							# This is because duplicate table merging is done only within an Extension subTable tree;
254							# when we split the subtable in two, some items may no longer be duplicates.
255							# Get worst case by adding up all the item lengths, depth first traversal.
256							# and then report the first item that overflows a short.
257							def getDeepItemLength(table):
258								if hasattr(table, "getDataLength"):
259									length = 0
260									for item in table.items:
261										length = length + getDeepItemLength(item)
262								else:
263									length = len(table)
264								return length
265
266							length = self.getDataLength()
267							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
268								# Coverage is first in the item list, but last in the table list,
269								# The original overflow is really in the item list. Skip the Coverage
270								# table in the following test.
271								items = items[i+1:]
272
273							for j in range(len(items)):
274								item = items[j]
275								length = length + getDeepItemLength(item)
276								if length > 65535:
277									break
278						overflowErrorRecord = self.getOverflowErrorRecord(item)
279
280
281						raise OTLOffsetOverflowError(overflowErrorRecord)
282
283		return "".join(items)
284
285	def __hash__(self):
286		# only works after self._doneWriting() has been called
287		return hash(self.items)
288
289	def __cmp__(self, other):
290		if not isinstance(self, type(other)): return cmp(type(self), type(other))
291		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
292
293		return cmp(self.items, other.items)
294
295	def _doneWriting(self, internedTables=None):
296		# Convert CountData references to data string items
297		# collapse duplicate table references to a unique entry
298		# "tables" are OTTableWriter objects.
299
300		# For Extension Lookup types, we can
301		# eliminate duplicates only within the tree under the Extension Lookup,
302		# as offsets may exceed 64K even between Extension LookupTable subtables.
303		if internedTables is None:
304			internedTables = {}
305		items = self.items
306		iRange = list(range(len(items)))
307
308		if hasattr(self, "Extension"):
309			newTree = 1
310		else:
311			newTree = 0
312		for i in iRange:
313			item = items[i]
314			if hasattr(item, "getCountData"):
315				items[i] = item.getCountData()
316			elif hasattr(item, "getData"):
317				if newTree:
318					item._doneWriting()
319				else:
320					item._doneWriting(internedTables)
321					if item in internedTables:
322						items[i] = item = internedTables[item]
323					else:
324						internedTables[item] = item
325		self.items = tuple(items)
326
327	def _gatherTables(self, tables=None, extTables=None, done=None):
328		# Convert table references in self.items tree to a flat
329		# list of tables in depth-first traversal order.
330		# "tables" are OTTableWriter objects.
331		# We do the traversal in reverse order at each level, in order to
332		# resolve duplicate references to be the last reference in the list of tables.
333		# For extension lookups, duplicate references can be merged only within the
334		# writer tree under the  extension lookup.
335		if tables is None: # init call for first time.
336			tables = []
337			extTables = []
338			done = {}
339
340		done[self] = 1
341
342		numItems = len(self.items)
343		iRange = list(range(numItems))
344		iRange.reverse()
345
346		if hasattr(self, "Extension"):
347			appendExtensions = 1
348		else:
349			appendExtensions = 0
350
351		# add Coverage table if it is sorted last.
352		sortCoverageLast = 0
353		if hasattr(self, "sortCoverageLast"):
354			# Find coverage table
355			for i in range(numItems):
356				item = self.items[i]
357				if hasattr(item, "name") and (item.name == "Coverage"):
358					sortCoverageLast = 1
359					break
360			if item not in done:
361				item._gatherTables(tables, extTables, done)
362			else:
363				index = max(item.parent.keys())
364				item.parent[index + 1] = self
365
366		saveItem = None
367		for i in iRange:
368			item = self.items[i]
369			if not hasattr(item, "getData"):
370				continue
371
372			if sortCoverageLast and (i==1) and item.name == 'Coverage':
373				# we've already 'gathered' it above
374				continue
375
376			if appendExtensions:
377				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
378				newDone = {}
379				item._gatherTables(extTables, None, newDone)
380
381			elif item not in done:
382				item._gatherTables(tables, extTables, done)
383			else:
384				index = max(item.parent.keys())
385				item.parent[index + 1] = self
386
387
388		tables.append(self)
389		return tables, extTables
390
391	# interface for gathering data, as used by table.compile()
392
393	def getSubWriter(self):
394		subwriter = self.__class__(self.globalState, self.localState)
395		subwriter.parent = {0:self} # because some subtables have idential values, we discard
396									# the duplicates under the getAllData method. Hence some
397									# subtable writers can have more than one parent writer.
398		return subwriter
399
400	def writeUShort(self, value):
401		assert 0 <= value < 0x10000
402		self.items.append(struct.pack(">H", value))
403
404	def writeShort(self, value):
405		self.items.append(struct.pack(">h", value))
406
407	def writeUInt24(self, value):
408		assert 0 <= value < 0x1000000
409		self.items.append(''.join(bytechr(v) for v in (value>>16, (value>>8)&0xFF, value&0xff)))
410
411	def writeLong(self, value):
412		self.items.append(struct.pack(">l", value))
413
414	def writeULong(self, value):
415		self.items.append(struct.pack(">L", value))
416
417	def writeTag(self, tag):
418		assert len(tag) == 4
419		self.items.append(tag)
420
421	def writeSubTable(self, subWriter):
422		self.items.append(subWriter)
423
424	def writeCountReference(self, table, name):
425		ref = CountReference(table, name)
426		self.items.append(ref)
427		return ref
428
429	def writeStruct(self, format, values):
430		data = struct.pack(*(format,) + values)
431		self.items.append(data)
432
433	def writeData(self, data):
434		self.items.append(data)
435
436	def	getOverflowErrorRecord(self, item):
437		LookupListIndex = SubTableIndex = itemName = itemIndex = None
438		if self.name == 'LookupList':
439			LookupListIndex = item.repeatIndex
440		elif self.name == 'Lookup':
441			LookupListIndex = self.repeatIndex
442			SubTableIndex = item.repeatIndex
443		else:
444			itemName = item.name
445			if hasattr(item, 'repeatIndex'):
446				itemIndex = item.repeatIndex
447			if self.name == 'SubTable':
448				LookupListIndex = self.parent[0].repeatIndex
449				SubTableIndex = self.repeatIndex
450			elif self.name == 'ExtSubTable':
451				LookupListIndex = self.parent[0].parent[0].repeatIndex
452				SubTableIndex = self.parent[0].repeatIndex
453			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
454				itemName = ".".join(self.name, item.name)
455				p1 = self.parent[0]
456				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
457					itemName = ".".join(p1.name, item.name)
458					p1 = p1.parent[0]
459				if p1:
460					if p1.name == 'ExtSubTable':
461						LookupListIndex = self.parent[0].parent[0].repeatIndex
462						SubTableIndex = self.parent[0].repeatIndex
463					else:
464						LookupListIndex = self.parent[0].repeatIndex
465						SubTableIndex = self.repeatIndex
466
467		return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
468
469
470class CountReference:
471	"""A reference to a Count value, not a count of references."""
472	def __init__(self, table, name):
473		self.table = table
474		self.name = name
475	def setValue(self, value):
476		table = self.table
477		name = self.name
478		if table[name] is None:
479			table[name] = value
480		else:
481			assert table[name] == value, (name, table[name], value)
482	def getCountData(self):
483		return packUShort(self.table[self.name])
484
485
486def packUShort(value):
487	assert 0 <= value < 0x10000, value
488	return struct.pack(">H", value)
489
490
491def packULong(value):
492	assert 0 <= value < 0x100000000, value
493	return struct.pack(">L", value)
494
495
496class BaseTable(object):
497	def __init__(self):
498		self.compileStatus = 0 # 0 means table was created
499									# 1 means the table.read() function was called by a table which is subject
500									# to delayed compilation
501									# 2 means that it was subject to delayed compilation, and
502									# has been decompiled
503
504		self.recurse = 0
505
506	def __getattr__(self, attr):
507		# we get here only when the table does not have the attribute.
508		# This method ovveride exists so that we can try to de-compile
509		# a table which is subject to delayed decompilation, and then try
510		# to get the value again after decompilation.
511		self.recurse +=1
512		if self.recurse > 2:
513			# shouldn't ever get here - we should only get to two levels of recursion.
514			# this guards against self.decompile NOT setting compileStatus to other than 1.
515			raise AttributeError(attr)
516		if self.compileStatus == 1:
517			self.ensureDecompiled()
518			val = getattr(self, attr)
519			self.recurse -=1
520			return val
521
522		raise AttributeError(attr)
523
524
525	"""Generic base class for all OpenType (sub)tables."""
526
527	def getConverters(self):
528		return self.converters
529
530	def getConverterByName(self, name):
531		return self.convertersByName[name]
532
533	def decompile(self, reader, font):
534		self.compileStatus = 2 # table has been decompiled.
535		self.readFormat(reader)
536		table = {}
537		self.__rawTable = table  # for debugging
538		converters = self.getConverters()
539		for conv in converters:
540			if conv.name == "SubTable":
541				conv = conv.getConverter(reader.globalState.tableType,
542						table["LookupType"])
543			if conv.name == "ExtSubTable":
544				conv = conv.getConverter(reader.globalState.tableType,
545						table["ExtensionLookupType"])
546			if conv.name == "FeatureParams":
547				conv = conv.getConverter(reader["FeatureTag"])
548			if conv.repeat:
549				l = []
550				if conv.repeat in table:
551					countValue = table[conv.repeat]
552				else:
553					# conv.repeat is a propagated count
554					countValue = reader[conv.repeat]
555				for i in range(countValue + conv.aux):
556					l.append(conv.read(reader, font, table))
557				table[conv.name] = l
558			else:
559				if conv.aux and not eval(conv.aux, None, table):
560					continue
561				table[conv.name] = conv.read(reader, font, table)
562				if conv.isPropagated:
563					reader[conv.name] = table[conv.name]
564
565		self.postRead(table, font)
566
567		del self.__rawTable  # succeeded, get rid of debugging info
568
569	def ensureDecompiled(self):
570		if self.compileStatus != 1:
571			return
572		self.decompile(self.reader, self.font)
573		del self.reader, self.font
574
575	def compile(self, writer, font):
576		self.ensureDecompiled()
577		table = self.preWrite(font)
578
579		if hasattr(self, 'sortCoverageLast'):
580			writer.sortCoverageLast = 1
581
582		self.writeFormat(writer)
583		for conv in self.getConverters():
584			value = table.get(conv.name)
585			if conv.repeat:
586				if value is None:
587					value = []
588				countValue = len(value) - conv.aux
589				if conv.repeat in table:
590					ref = table[conv.repeat]
591					table[conv.repeat] = None
592					ref.setValue(countValue)
593				else:
594					# conv.repeat is a propagated count
595					writer[conv.repeat].setValue(countValue)
596				for i in range(len(value)):
597					conv.write(writer, font, table, value[i], i)
598			elif conv.isCount:
599				# Special-case Count values.
600				# Assumption: a Count field will *always* precede
601				# the actual array(s).
602				# We need a default value, as it may be set later by a nested
603				# table. We will later store it here.
604				# We add a reference: by the time the data is assembled
605				# the Count value will be filled in.
606				ref = writer.writeCountReference(table, conv.name)
607				if conv.isPropagated:
608					table[conv.name] = None
609					writer[conv.name] = ref
610				else:
611					table[conv.name] = ref
612			else:
613				if conv.aux and not eval(conv.aux, None, table):
614					continue
615				conv.write(writer, font, table, value)
616				if conv.isPropagated:
617					writer[conv.name] = value
618
619	def readFormat(self, reader):
620		pass
621
622	def writeFormat(self, writer):
623		pass
624
625	def postRead(self, table, font):
626		self.__dict__.update(table)
627
628	def preWrite(self, font):
629		return self.__dict__.copy()
630
631	def toXML(self, xmlWriter, font, attrs=None):
632		tableName = self.__class__.__name__
633		if attrs is None:
634			attrs = []
635		if hasattr(self, "Format"):
636			attrs = attrs + [("Format", self.Format)]
637		xmlWriter.begintag(tableName, attrs)
638		xmlWriter.newline()
639		self.toXML2(xmlWriter, font)
640		xmlWriter.endtag(tableName)
641		xmlWriter.newline()
642
643	def toXML2(self, xmlWriter, font):
644		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
645		# This is because in TTX our parent writes our main tag, and in otBase.py we
646		# do it ourselves. I think I'm getting schizophrenic...
647		for conv in self.getConverters():
648			if conv.repeat:
649				value = getattr(self, conv.name)
650				for i in range(len(value)):
651					item = value[i]
652					conv.xmlWrite(xmlWriter, font, item, conv.name,
653							[("index", i)])
654			else:
655				if conv.aux and not eval(conv.aux, None, vars(self)):
656					continue
657				value = getattr(self, conv.name)
658				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
659
660	def fromXML(self, name, attrs, content, font):
661		try:
662			conv = self.getConverterByName(name)
663		except KeyError:
664			raise    # XXX on KeyError, raise nice error
665		value = conv.xmlRead(attrs, content, font)
666		if conv.repeat:
667			seq = getattr(self, conv.name, None)
668			if seq is None:
669				seq = []
670				setattr(self, conv.name, seq)
671			seq.append(value)
672		else:
673			setattr(self, conv.name, value)
674
675	def __cmp__(self, other):
676		if not isinstance(self, type(other)): return cmp(type(self), type(other))
677		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
678
679		self.ensureDecompiled()
680
681		return cmp(self.__dict__, other.__dict__)
682
683
684class FormatSwitchingBaseTable(BaseTable):
685
686	"""Minor specialization of BaseTable, for tables that have multiple
687	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
688
689	def getConverters(self):
690		return self.converters[self.Format]
691
692	def getConverterByName(self, name):
693		return self.convertersByName[self.Format][name]
694
695	def readFormat(self, reader):
696		self.Format = reader.readUShort()
697		assert self.Format != 0, (self, reader.pos, len(reader.data))
698
699	def writeFormat(self, writer):
700		writer.writeUShort(self.Format)
701
702
703#
704# Support for ValueRecords
705#
706# This data type is so different from all other OpenType data types that
707# it requires quite a bit of code for itself. It even has special support
708# in OTTableReader and OTTableWriter...
709#
710
711valueRecordFormat = [
712#	Mask	 Name            isDevice  signed
713	(0x0001, "XPlacement",   0,        1),
714	(0x0002, "YPlacement",   0,        1),
715	(0x0004, "XAdvance",     0,        1),
716	(0x0008, "YAdvance",     0,        1),
717	(0x0010, "XPlaDevice",   1,        0),
718	(0x0020, "YPlaDevice",   1,        0),
719	(0x0040, "XAdvDevice",   1,        0),
720	(0x0080, "YAdvDevice",   1,        0),
721# 	reserved:
722	(0x0100, "Reserved1",    0,        0),
723	(0x0200, "Reserved2",    0,        0),
724	(0x0400, "Reserved3",    0,        0),
725	(0x0800, "Reserved4",    0,        0),
726	(0x1000, "Reserved5",    0,        0),
727	(0x2000, "Reserved6",    0,        0),
728	(0x4000, "Reserved7",    0,        0),
729	(0x8000, "Reserved8",    0,        0),
730]
731
732def _buildDict():
733	d = {}
734	for mask, name, isDevice, signed in valueRecordFormat:
735		d[name] = mask, isDevice, signed
736	return d
737
738valueRecordFormatDict = _buildDict()
739
740
741class ValueRecordFactory:
742
743	"""Given a format code, this object convert ValueRecords."""
744
745	def __init__(self, valueFormat):
746		format = []
747		for mask, name, isDevice, signed in valueRecordFormat:
748			if valueFormat & mask:
749				format.append((name, isDevice, signed))
750		self.format = format
751
752	def readValueRecord(self, reader, font):
753		format = self.format
754		if not format:
755			return None
756		valueRecord = ValueRecord()
757		for name, isDevice, signed in format:
758			if signed:
759				value = reader.readShort()
760			else:
761				value = reader.readUShort()
762			if isDevice:
763				if value:
764					from . import otTables
765					subReader = reader.getSubReader(value)
766					value = getattr(otTables, name)()
767					value.decompile(subReader, font)
768				else:
769					value = None
770			setattr(valueRecord, name, value)
771		return valueRecord
772
773	def writeValueRecord(self, writer, font, valueRecord):
774		for name, isDevice, signed in self.format:
775			value = getattr(valueRecord, name, 0)
776			if isDevice:
777				if value:
778					subWriter = writer.getSubWriter()
779					writer.writeSubTable(subWriter)
780					value.compile(subWriter, font)
781				else:
782					writer.writeUShort(0)
783			elif signed:
784				writer.writeShort(value)
785			else:
786				writer.writeUShort(value)
787
788
789class ValueRecord:
790
791	# see ValueRecordFactory
792
793	def getFormat(self):
794		format = 0
795		for name in self.__dict__.keys():
796			format = format | valueRecordFormatDict[name][0]
797		return format
798
799	def toXML(self, xmlWriter, font, valueName, attrs=None):
800		if attrs is None:
801			simpleItems = []
802		else:
803			simpleItems = list(attrs)
804		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
805			if hasattr(self, name):
806				simpleItems.append((name, getattr(self, name)))
807		deviceItems = []
808		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
809			if hasattr(self, name):
810				device = getattr(self, name)
811				if device is not None:
812					deviceItems.append((name, device))
813		if deviceItems:
814			xmlWriter.begintag(valueName, simpleItems)
815			xmlWriter.newline()
816			for name, deviceRecord in deviceItems:
817				if deviceRecord is not None:
818					deviceRecord.toXML(xmlWriter, font)
819			xmlWriter.endtag(valueName)
820			xmlWriter.newline()
821		else:
822			xmlWriter.simpletag(valueName, simpleItems)
823			xmlWriter.newline()
824
825	def fromXML(self, name, attrs, content, font):
826		from . import otTables
827		for k, v in attrs.items():
828			setattr(self, k, int(v))
829		for element in content:
830			if not isinstance(element, tuple):
831				continue
832			name, attrs, content = element
833			value = getattr(otTables, name)()
834			for elem2 in content:
835				if not isinstance(elem2, tuple):
836					continue
837				name2, attrs2, content2 = elem2
838				value.fromXML(name2, attrs2, content2, font)
839			setattr(self, name, value)
840
841	def __cmp__(self, other):
842		if not isinstance(self, type(other)): return cmp(type(self), type(other))
843		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
844
845		return cmp(self.__dict__, other.__dict__)
846