otBase.py revision 3ec6a258238b6068e4eef3fe579f1f5c0a06bbba
1from .DefaultTable import DefaultTable
2from . import otData
3import struct
4from types import TupleType
5
6class OverflowErrorRecord:
7	def __init__(self, overflowTuple):
8		self.tableType = overflowTuple[0]
9		self.LookupListIndex = overflowTuple[1]
10		self.SubTableIndex = overflowTuple[2]
11		self.itemName = overflowTuple[3]
12		self.itemIndex = overflowTuple[4]
13
14	def __repr__(self):
15		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
16
17class OTLOffsetOverflowError(Exception):
18	def __init__(self, overflowErrorRecord):
19		self.value = overflowErrorRecord
20
21	def __str__(self):
22		return repr(self.value)
23
24
25class BaseTTXConverter(DefaultTable):
26
27	"""Generic base class for TTX table converters. It functions as an
28	adapter between the TTX (ttLib actually) table model and the model
29	we use for OpenType tables, which is necessarily subtly different.
30	"""
31
32	def decompile(self, data, font):
33		from . import otTables
34		cachingStats = None if True else {}
35		class GlobalState:
36			def __init__(self, tableType, cachingStats):
37				self.tableType = tableType
38				self.cachingStats = cachingStats
39		globalState = GlobalState(tableType=self.tableTag,
40					  cachingStats=cachingStats)
41		reader = OTTableReader(data, globalState)
42		tableClass = getattr(otTables, self.tableTag)
43		self.table = tableClass()
44		self.table.decompile(reader, font)
45		if cachingStats:
46			stats = sorted([(v, k) for k, v in cachingStats.items()])
47			stats.reverse()
48			print("cachingsstats for ", self.tableTag)
49			for v, k in stats:
50				if v < 2:
51					break
52				print(v, k)
53			print("---", len(stats))
54
55	def compile(self, font):
56		""" Create a top-level OTFWriter for the GPOS/GSUB table.
57			Call the compile method for the the table
58				for each 'converter' record in the table converter list
59					call converter's write method for each item in the value.
60						- For simple items, the write method adds a string to the
61						writer's self.items list.
62						- For Struct/Table/Subtable items, it add first adds new writer to the
63						to the writer's self.items, then calls the item's compile method.
64						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
65						each writer representing a table, and the writer.items list containing
66						the child data strings and writers.
67			call the getAllData method
68				call _doneWriting, which removes duplicates
69				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
70				Traverse the flat list of tables, calling getDataLength on each to update their position
71				Traverse the flat list of tables again, calling getData each get the data in the table, now that
72				pos's and offset are known.
73
74				If a lookup subtable overflows an offset, we have to start all over.
75		"""
76		class GlobalState:
77			def __init__(self, tableType):
78				self.tableType = tableType
79		globalState = GlobalState(tableType=self.tableTag)
80		writer = OTTableWriter(globalState)
81		writer.parent = None
82		self.table.compile(writer, font)
83		return writer.getAllData()
84
85	def toXML(self, writer, font):
86		self.table.toXML2(writer, font)
87
88	def fromXML(self, name, attrs, content, font):
89		from . import otTables
90		if not hasattr(self, "table"):
91			tableClass = getattr(otTables, self.tableTag)
92			self.table = tableClass()
93		self.table.fromXML(name, attrs, content, font)
94
95
96class OTTableReader(object):
97
98	"""Helper class to retrieve data from an OpenType table."""
99
100	__slots__ = ('data', 'offset', 'pos', 'globalState', 'localState')
101
102	def __init__(self, data, globalState={}, localState=None, offset=0):
103		self.data = data
104		self.offset = offset
105		self.pos = offset
106		self.globalState = globalState
107		self.localState = localState
108
109	def getSubReader(self, offset):
110		offset = self.offset + offset
111		cachingStats = self.globalState.cachingStats
112		if cachingStats is not None:
113			cachingStats[offset] = cachingStats.get(offset, 0) + 1
114		return self.__class__(self.data, self.globalState, self.localState, offset)
115
116	def readUShort(self):
117		pos = self.pos
118		newpos = pos + 2
119		value, = struct.unpack(">H", self.data[pos:newpos])
120		self.pos = newpos
121		return value
122
123	def readShort(self):
124		pos = self.pos
125		newpos = pos + 2
126		value, = struct.unpack(">h", self.data[pos:newpos])
127		self.pos = newpos
128		return value
129
130	def readLong(self):
131		pos = self.pos
132		newpos = pos + 4
133		value, = struct.unpack(">l", self.data[pos:newpos])
134		self.pos = newpos
135		return value
136
137	def readUInt24(self):
138		pos = self.pos
139		newpos = pos + 3
140		value = (ord(self.data[pos]) << 16) | (ord(self.data[pos+1]) << 8) | ord(self.data[pos+2])
141		value, = struct.unpack(">H", self.data[pos:newpos])
142		self.pos = newpos
143		return value
144
145	def readULong(self):
146		pos = self.pos
147		newpos = pos + 4
148		value, = struct.unpack(">L", self.data[pos:newpos])
149		self.pos = newpos
150		return value
151
152	def readTag(self):
153		pos = self.pos
154		newpos = pos + 4
155		value = self.data[pos:newpos]
156		assert len(value) == 4
157		self.pos = newpos
158		return value
159
160	def __setitem__(self, name, value):
161		state = self.localState.copy() if self.localState else dict()
162		state[name] = value
163		self.localState = state
164
165	def __getitem__(self, name):
166		return self.localState[name]
167
168
169class OTTableWriter(object):
170
171	"""Helper class to gather and assemble data for OpenType tables."""
172
173	def __init__(self, globalState, localState=None):
174		self.items = []
175		self.pos = None
176		self.globalState = globalState
177		self.localState = localState
178
179	def __setitem__(self, name, value):
180		state = self.localState.copy() if self.localState else dict()
181		state[name] = value
182		self.localState = state
183
184	def __getitem__(self, name):
185		return self.localState[name]
186
187	# assembler interface
188
189	def getAllData(self):
190		"""Assemble all data, including all subtables."""
191		self._doneWriting()
192		tables, extTables = self._gatherTables()
193		tables.reverse()
194		extTables.reverse()
195		# Gather all data in two passes: the absolute positions of all
196		# subtable are needed before the actual data can be assembled.
197		pos = 0
198		for table in tables:
199			table.pos = pos
200			pos = pos + table.getDataLength()
201
202		for table in extTables:
203			table.pos = pos
204			pos = pos + table.getDataLength()
205
206
207		data = []
208		for table in tables:
209			tableData = table.getData()
210			data.append(tableData)
211
212		for table in extTables:
213			tableData = table.getData()
214			data.append(tableData)
215
216		return "".join(data)
217
218	def getDataLength(self):
219		"""Return the length of this table in bytes, without subtables."""
220		l = 0
221		for item in self.items:
222			if hasattr(item, "getData") or hasattr(item, "getCountData"):
223				if item.longOffset:
224					l = l + 4  # sizeof(ULong)
225				else:
226					l = l + 2  # sizeof(UShort)
227			else:
228				l = l + len(item)
229		return l
230
231	def getData(self):
232		"""Assemble the data for this writer/table, without subtables."""
233		items = list(self.items)  # make a shallow copy
234		pos = self.pos
235		numItems = len(items)
236		for i in range(numItems):
237			item = items[i]
238
239			if hasattr(item, "getData"):
240				if item.longOffset:
241					items[i] = packULong(item.pos - pos)
242				else:
243					try:
244						items[i] = packUShort(item.pos - pos)
245					except AssertionError:
246						# provide data to fix overflow problem.
247						# If the overflow is to a lookup, or from a lookup to a subtable,
248						# just report the current item.
249						if self.name in [ 'LookupList', 'Lookup']:
250							overflowErrorRecord = self.getOverflowErrorRecord(item)
251						else:
252							# overflow is within a subTable. Life is more complicated.
253							# If we split the sub-table just before the current item, we may still suffer overflow.
254							# This is because duplicate table merging is done only within an Extension subTable tree;
255							# when we split the subtable in two, some items may no longer be duplicates.
256							# Get worst case by adding up all the item lengths, depth first traversal.
257							# and then report the first item that overflows a short.
258							def getDeepItemLength(table):
259								if hasattr(table, "getDataLength"):
260									length = 0
261									for item in table.items:
262										length = length + getDeepItemLength(item)
263								else:
264									length = len(table)
265								return length
266
267							length = self.getDataLength()
268							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
269								# Coverage is first in the item list, but last in the table list,
270								# The original overflow is really in the item list. Skip the Coverage
271								# table in the following test.
272								items = items[i+1:]
273
274							for j in range(len(items)):
275								item = items[j]
276								length = length + getDeepItemLength(item)
277								if length > 65535:
278									break
279						overflowErrorRecord = self.getOverflowErrorRecord(item)
280
281
282						raise OTLOffsetOverflowError(overflowErrorRecord)
283
284		return "".join(items)
285
286	def __hash__(self):
287		# only works after self._doneWriting() has been called
288		return hash(self.items)
289
290	def __cmp__(self, other):
291		if not isinstance(self, type(other)): return cmp(type(self), type(other))
292		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
293
294		return cmp(self.items, other.items)
295
296	def _doneWriting(self, internedTables=None):
297		# Convert CountData references to data string items
298		# collapse duplicate table references to a unique entry
299		# "tables" are OTTableWriter objects.
300
301		# For Extension Lookup types, we can
302		# eliminate duplicates only within the tree under the Extension Lookup,
303		# as offsets may exceed 64K even between Extension LookupTable subtables.
304		if internedTables is None:
305			internedTables = {}
306		items = self.items
307		iRange = list(range(len(items)))
308
309		if hasattr(self, "Extension"):
310			newTree = 1
311		else:
312			newTree = 0
313		for i in iRange:
314			item = items[i]
315			if hasattr(item, "getCountData"):
316				items[i] = item.getCountData()
317			elif hasattr(item, "getData"):
318				if newTree:
319					item._doneWriting()
320				else:
321					item._doneWriting(internedTables)
322					if item in internedTables:
323						items[i] = item = internedTables[item]
324					else:
325						internedTables[item] = item
326		self.items = tuple(items)
327
328	def _gatherTables(self, tables=None, extTables=None, done=None):
329		# Convert table references in self.items tree to a flat
330		# list of tables in depth-first traversal order.
331		# "tables" are OTTableWriter objects.
332		# We do the traversal in reverse order at each level, in order to
333		# resolve duplicate references to be the last reference in the list of tables.
334		# For extension lookups, duplicate references can be merged only within the
335		# writer tree under the  extension lookup.
336		if tables is None: # init call for first time.
337			tables = []
338			extTables = []
339			done = {}
340
341		done[self] = 1
342
343		numItems = len(self.items)
344		iRange = list(range(numItems))
345		iRange.reverse()
346
347		if hasattr(self, "Extension"):
348			appendExtensions = 1
349		else:
350			appendExtensions = 0
351
352		# add Coverage table if it is sorted last.
353		sortCoverageLast = 0
354		if hasattr(self, "sortCoverageLast"):
355			# Find coverage table
356			for i in range(numItems):
357				item = self.items[i]
358				if hasattr(item, "name") and (item.name == "Coverage"):
359					sortCoverageLast = 1
360					break
361			if item not in done:
362				item._gatherTables(tables, extTables, done)
363			else:
364				index = max(item.parent.keys())
365				item.parent[index + 1] = self
366
367		saveItem = None
368		for i in iRange:
369			item = self.items[i]
370			if not hasattr(item, "getData"):
371				continue
372
373			if sortCoverageLast and (i==1) and item.name == 'Coverage':
374				# we've already 'gathered' it above
375				continue
376
377			if appendExtensions:
378				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
379				newDone = {}
380				item._gatherTables(extTables, None, newDone)
381
382			elif item not in done:
383				item._gatherTables(tables, extTables, done)
384			else:
385				index = max(item.parent.keys())
386				item.parent[index + 1] = self
387
388
389		tables.append(self)
390		return tables, extTables
391
392	# interface for gathering data, as used by table.compile()
393
394	def getSubWriter(self):
395		subwriter = self.__class__(self.globalState, self.localState)
396		subwriter.parent = {0:self} # because some subtables have idential values, we discard
397									# the duplicates under the getAllData method. Hence some
398									# subtable writers can have more than one parent writer.
399		return subwriter
400
401	def writeUShort(self, value):
402		assert 0 <= value < 0x10000
403		self.items.append(struct.pack(">H", value))
404
405	def writeShort(self, value):
406		self.items.append(struct.pack(">h", value))
407
408	def writeUInt24(self, value):
409		assert 0 <= value < 0x1000000
410		self.items.append(''.join(chr(v) for v in (value>>16, (value>>8)&0xFF, value&0xff)))
411
412	def writeLong(self, value):
413		self.items.append(struct.pack(">l", value))
414
415	def writeULong(self, value):
416		self.items.append(struct.pack(">L", value))
417
418	def writeTag(self, tag):
419		assert len(tag) == 4
420		self.items.append(tag)
421
422	def writeSubTable(self, subWriter):
423		self.items.append(subWriter)
424
425	def writeCountReference(self, table, name):
426		ref = CountReference(table, name)
427		self.items.append(ref)
428		return ref
429
430	def writeStruct(self, format, values):
431		data = struct.pack(*(format,) + values)
432		self.items.append(data)
433
434	def writeData(self, data):
435		self.items.append(data)
436
437	def	getOverflowErrorRecord(self, item):
438		LookupListIndex = SubTableIndex = itemName = itemIndex = None
439		if self.name == 'LookupList':
440			LookupListIndex = item.repeatIndex
441		elif self.name == 'Lookup':
442			LookupListIndex = self.repeatIndex
443			SubTableIndex = item.repeatIndex
444		else:
445			itemName = item.name
446			if hasattr(item, 'repeatIndex'):
447				itemIndex = item.repeatIndex
448			if self.name == 'SubTable':
449				LookupListIndex = self.parent[0].repeatIndex
450				SubTableIndex = self.repeatIndex
451			elif self.name == 'ExtSubTable':
452				LookupListIndex = self.parent[0].parent[0].repeatIndex
453				SubTableIndex = self.parent[0].repeatIndex
454			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
455				itemName = ".".join(self.name, item.name)
456				p1 = self.parent[0]
457				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
458					itemName = ".".join(p1.name, item.name)
459					p1 = p1.parent[0]
460				if p1:
461					if p1.name == 'ExtSubTable':
462						LookupListIndex = self.parent[0].parent[0].repeatIndex
463						SubTableIndex = self.parent[0].repeatIndex
464					else:
465						LookupListIndex = self.parent[0].repeatIndex
466						SubTableIndex = self.repeatIndex
467
468		return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
469
470
471class CountReference:
472	"""A reference to a Count value, not a count of references."""
473	def __init__(self, table, name):
474		self.table = table
475		self.name = name
476	def setValue(self, value):
477		table = self.table
478		name = self.name
479		if table[name] is None:
480			table[name] = value
481		else:
482			assert table[name] == value, (name, table[name], value)
483	def getCountData(self):
484		return packUShort(self.table[self.name])
485
486
487def packUShort(value):
488	assert 0 <= value < 0x10000, value
489	return struct.pack(">H", value)
490
491
492def packULong(value):
493	assert 0 <= value < 0x100000000, value
494	return struct.pack(">L", value)
495
496
497class BaseTable(object):
498	def __init__(self):
499		self.compileStatus = 0 # 0 means table was created
500									# 1 means the table.read() function was called by a table which is subject
501									# to delayed compilation
502									# 2 means that it was subject to delayed compilation, and
503									# has been decompiled
504
505		self.recurse = 0
506
507	def __getattr__(self, attr):
508		# we get here only when the table does not have the attribute.
509		# This method ovveride exists so that we can try to de-compile
510		# a table which is subject to delayed decompilation, and then try
511		# to get the value again after decompilation.
512		self.recurse +=1
513		if self.recurse > 2:
514			# shouldn't ever get here - we should only get to two levels of recursion.
515			# this guards against self.decompile NOT setting compileStatus to other than 1.
516			raise AttributeError(attr)
517		if self.compileStatus == 1:
518			self.ensureDecompiled()
519			val = getattr(self, attr)
520			self.recurse -=1
521			return val
522
523		raise AttributeError(attr)
524
525
526	"""Generic base class for all OpenType (sub)tables."""
527
528	def getConverters(self):
529		return self.converters
530
531	def getConverterByName(self, name):
532		return self.convertersByName[name]
533
534	def decompile(self, reader, font):
535		self.compileStatus = 2 # table has been decompiled.
536		self.readFormat(reader)
537		table = {}
538		self.__rawTable = table  # for debugging
539		converters = self.getConverters()
540		for conv in converters:
541			if conv.name == "SubTable":
542				conv = conv.getConverter(reader.globalState.tableType,
543						table["LookupType"])
544			if conv.name == "ExtSubTable":
545				conv = conv.getConverter(reader.globalState.tableType,
546						table["ExtensionLookupType"])
547			if conv.name == "FeatureParams":
548				conv = conv.getConverter(reader["FeatureTag"])
549			if conv.repeat:
550				l = []
551				if conv.repeat in table:
552					countValue = table[conv.repeat]
553				else:
554					# conv.repeat is a propagated count
555					countValue = reader[conv.repeat]
556				for i in range(countValue + conv.aux):
557					l.append(conv.read(reader, font, table))
558				table[conv.name] = l
559			else:
560				if conv.aux and not eval(conv.aux, None, table):
561					continue
562				table[conv.name] = conv.read(reader, font, table)
563				if conv.isPropagated:
564					reader[conv.name] = table[conv.name]
565
566		self.postRead(table, font)
567
568		del self.__rawTable  # succeeded, get rid of debugging info
569
570	def ensureDecompiled(self):
571		if self.compileStatus != 1:
572			return
573		self.decompile(self.reader, self.font)
574		del self.reader, self.font
575
576	def compile(self, writer, font):
577		self.ensureDecompiled()
578		table = self.preWrite(font)
579
580		if hasattr(self, 'sortCoverageLast'):
581			writer.sortCoverageLast = 1
582
583		self.writeFormat(writer)
584		for conv in self.getConverters():
585			value = table.get(conv.name)
586			if conv.repeat:
587				if value is None:
588					value = []
589				countValue = len(value) - conv.aux
590				if conv.repeat in table:
591					ref = table[conv.repeat]
592					table[conv.repeat] = None
593					ref.setValue(countValue)
594				else:
595					# conv.repeat is a propagated count
596					writer[conv.repeat].setValue(countValue)
597				for i in range(len(value)):
598					conv.write(writer, font, table, value[i], i)
599			elif conv.isCount:
600				# Special-case Count values.
601				# Assumption: a Count field will *always* precede
602				# the actual array(s).
603				# We need a default value, as it may be set later by a nested
604				# table. We will later store it here.
605				# We add a reference: by the time the data is assembled
606				# the Count value will be filled in.
607				ref = writer.writeCountReference(table, conv.name)
608				if conv.isPropagated:
609					table[conv.name] = None
610					writer[conv.name] = ref
611				else:
612					table[conv.name] = ref
613			else:
614				if conv.aux and not eval(conv.aux, None, table):
615					continue
616				conv.write(writer, font, table, value)
617				if conv.isPropagated:
618					writer[conv.name] = value
619
620	def readFormat(self, reader):
621		pass
622
623	def writeFormat(self, writer):
624		pass
625
626	def postRead(self, table, font):
627		self.__dict__.update(table)
628
629	def preWrite(self, font):
630		return self.__dict__.copy()
631
632	def toXML(self, xmlWriter, font, attrs=None):
633		tableName = self.__class__.__name__
634		if attrs is None:
635			attrs = []
636		if hasattr(self, "Format"):
637			attrs = attrs + [("Format", self.Format)]
638		xmlWriter.begintag(tableName, attrs)
639		xmlWriter.newline()
640		self.toXML2(xmlWriter, font)
641		xmlWriter.endtag(tableName)
642		xmlWriter.newline()
643
644	def toXML2(self, xmlWriter, font):
645		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
646		# This is because in TTX our parent writes our main tag, and in otBase.py we
647		# do it ourselves. I think I'm getting schizophrenic...
648		for conv in self.getConverters():
649			if conv.repeat:
650				value = getattr(self, conv.name)
651				for i in range(len(value)):
652					item = value[i]
653					conv.xmlWrite(xmlWriter, font, item, conv.name,
654							[("index", i)])
655			else:
656				if conv.aux and not eval(conv.aux, None, vars(self)):
657					continue
658				value = getattr(self, conv.name)
659				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
660
661	def fromXML(self, name, attrs, content, font):
662		try:
663			conv = self.getConverterByName(name)
664		except KeyError:
665			raise    # XXX on KeyError, raise nice error
666		value = conv.xmlRead(attrs, content, font)
667		if conv.repeat:
668			seq = getattr(self, conv.name, None)
669			if seq is None:
670				seq = []
671				setattr(self, conv.name, seq)
672			seq.append(value)
673		else:
674			setattr(self, conv.name, value)
675
676	def __cmp__(self, other):
677		if not isinstance(self, type(other)): return cmp(type(self), type(other))
678		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
679
680		self.ensureDecompiled()
681
682		return cmp(self.__dict__, other.__dict__)
683
684
685class FormatSwitchingBaseTable(BaseTable):
686
687	"""Minor specialization of BaseTable, for tables that have multiple
688	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
689
690	def getConverters(self):
691		return self.converters[self.Format]
692
693	def getConverterByName(self, name):
694		return self.convertersByName[self.Format][name]
695
696	def readFormat(self, reader):
697		self.Format = reader.readUShort()
698		assert self.Format != 0, (self, reader.pos, len(reader.data))
699
700	def writeFormat(self, writer):
701		writer.writeUShort(self.Format)
702
703
704#
705# Support for ValueRecords
706#
707# This data type is so different from all other OpenType data types that
708# it requires quite a bit of code for itself. It even has special support
709# in OTTableReader and OTTableWriter...
710#
711
712valueRecordFormat = [
713#	Mask	 Name            isDevice  signed
714	(0x0001, "XPlacement",   0,        1),
715	(0x0002, "YPlacement",   0,        1),
716	(0x0004, "XAdvance",     0,        1),
717	(0x0008, "YAdvance",     0,        1),
718	(0x0010, "XPlaDevice",   1,        0),
719	(0x0020, "YPlaDevice",   1,        0),
720	(0x0040, "XAdvDevice",   1,        0),
721	(0x0080, "YAdvDevice",   1,        0),
722# 	reserved:
723	(0x0100, "Reserved1",    0,        0),
724	(0x0200, "Reserved2",    0,        0),
725	(0x0400, "Reserved3",    0,        0),
726	(0x0800, "Reserved4",    0,        0),
727	(0x1000, "Reserved5",    0,        0),
728	(0x2000, "Reserved6",    0,        0),
729	(0x4000, "Reserved7",    0,        0),
730	(0x8000, "Reserved8",    0,        0),
731]
732
733def _buildDict():
734	d = {}
735	for mask, name, isDevice, signed in valueRecordFormat:
736		d[name] = mask, isDevice, signed
737	return d
738
739valueRecordFormatDict = _buildDict()
740
741
742class ValueRecordFactory:
743
744	"""Given a format code, this object convert ValueRecords."""
745
746	def __init__(self, valueFormat):
747		format = []
748		for mask, name, isDevice, signed in valueRecordFormat:
749			if valueFormat & mask:
750				format.append((name, isDevice, signed))
751		self.format = format
752
753	def readValueRecord(self, reader, font):
754		format = self.format
755		if not format:
756			return None
757		valueRecord = ValueRecord()
758		for name, isDevice, signed in format:
759			if signed:
760				value = reader.readShort()
761			else:
762				value = reader.readUShort()
763			if isDevice:
764				if value:
765					from . import otTables
766					subReader = reader.getSubReader(value)
767					value = getattr(otTables, name)()
768					value.decompile(subReader, font)
769				else:
770					value = None
771			setattr(valueRecord, name, value)
772		return valueRecord
773
774	def writeValueRecord(self, writer, font, valueRecord):
775		for name, isDevice, signed in self.format:
776			value = getattr(valueRecord, name, 0)
777			if isDevice:
778				if value:
779					subWriter = writer.getSubWriter()
780					writer.writeSubTable(subWriter)
781					value.compile(subWriter, font)
782				else:
783					writer.writeUShort(0)
784			elif signed:
785				writer.writeShort(value)
786			else:
787				writer.writeUShort(value)
788
789
790class ValueRecord:
791
792	# see ValueRecordFactory
793
794	def getFormat(self):
795		format = 0
796		for name in self.__dict__.keys():
797			format = format | valueRecordFormatDict[name][0]
798		return format
799
800	def toXML(self, xmlWriter, font, valueName, attrs=None):
801		if attrs is None:
802			simpleItems = []
803		else:
804			simpleItems = list(attrs)
805		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
806			if hasattr(self, name):
807				simpleItems.append((name, getattr(self, name)))
808		deviceItems = []
809		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
810			if hasattr(self, name):
811				device = getattr(self, name)
812				if device is not None:
813					deviceItems.append((name, device))
814		if deviceItems:
815			xmlWriter.begintag(valueName, simpleItems)
816			xmlWriter.newline()
817			for name, deviceRecord in deviceItems:
818				if deviceRecord is not None:
819					deviceRecord.toXML(xmlWriter, font)
820			xmlWriter.endtag(valueName)
821			xmlWriter.newline()
822		else:
823			xmlWriter.simpletag(valueName, simpleItems)
824			xmlWriter.newline()
825
826	def fromXML(self, name, attrs, content, font):
827		from . import otTables
828		for k, v in attrs.items():
829			setattr(self, k, int(v))
830		for element in content:
831			if not isinstance(element, TupleType):
832				continue
833			name, attrs, content = element
834			value = getattr(otTables, name)()
835			for elem2 in content:
836				if not isinstance(elem2, TupleType):
837					continue
838				name2, attrs2, content2 = elem2
839				value.fromXML(name2, attrs2, content2, font)
840			setattr(self, name, value)
841
842	def __cmp__(self, other):
843		if not isinstance(self, type(other)): return cmp(type(self), type(other))
844		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
845
846		return cmp(self.__dict__, other.__dict__)
847