otBase.py revision e388db566b9ba42669c7e353db4293cf27bc2a5b
1from __future__ import print_function, division
2from fontTools.misc.py23 import *
3from .DefaultTable import DefaultTable
4import struct
5
6class OverflowErrorRecord(object):
7	def __init__(self, overflowTuple):
8		self.tableType = overflowTuple[0]
9		self.LookupListIndex = overflowTuple[1]
10		self.SubTableIndex = overflowTuple[2]
11		self.itemName = overflowTuple[3]
12		self.itemIndex = overflowTuple[4]
13
14	def __repr__(self):
15		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
16
17class OTLOffsetOverflowError(Exception):
18	def __init__(self, overflowErrorRecord):
19		self.value = overflowErrorRecord
20
21	def __str__(self):
22		return repr(self.value)
23
24
25class BaseTTXConverter(DefaultTable):
26
27	"""Generic base class for TTX table converters. It functions as an
28	adapter between the TTX (ttLib actually) table model and the model
29	we use for OpenType tables, which is necessarily subtly different.
30	"""
31
32	def decompile(self, data, font):
33		from . import otTables
34		cachingStats = None if True else {}
35		class GlobalState(object):
36			def __init__(self, tableType, cachingStats):
37				self.tableType = tableType
38				self.cachingStats = cachingStats
39		globalState = GlobalState(tableType=self.tableTag,
40					  cachingStats=cachingStats)
41		reader = OTTableReader(data, globalState)
42		tableClass = getattr(otTables, self.tableTag)
43		self.table = tableClass()
44		self.table.decompile(reader, font)
45		if cachingStats:
46			stats = sorted([(v, k) for k, v in cachingStats.items()])
47			stats.reverse()
48			print("cachingsstats for ", self.tableTag)
49			for v, k in stats:
50				if v < 2:
51					break
52				print(v, k)
53			print("---", len(stats))
54
55	def compile(self, font):
56		""" Create a top-level OTFWriter for the GPOS/GSUB table.
57			Call the compile method for the the table
58				for each 'converter' record in the table converter list
59					call converter's write method for each item in the value.
60						- For simple items, the write method adds a string to the
61						writer's self.items list.
62						- For Struct/Table/Subtable items, it add first adds new writer to the
63						to the writer's self.items, then calls the item's compile method.
64						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
65						each writer representing a table, and the writer.items list containing
66						the child data strings and writers.
67			call the getAllData method
68				call _doneWriting, which removes duplicates
69				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
70				Traverse the flat list of tables, calling getDataLength on each to update their position
71				Traverse the flat list of tables again, calling getData each get the data in the table, now that
72				pos's and offset are known.
73
74				If a lookup subtable overflows an offset, we have to start all over.
75		"""
76		class GlobalState(object):
77			def __init__(self, tableType):
78				self.tableType = tableType
79		globalState = GlobalState(tableType=self.tableTag)
80		writer = OTTableWriter(globalState)
81		writer.parent = None
82		self.table.compile(writer, font)
83		return writer.getAllData()
84
85	def toXML(self, writer, font):
86		self.table.toXML2(writer, font)
87
88	def fromXML(self, name, attrs, content, font):
89		from . import otTables
90		if not hasattr(self, "table"):
91			tableClass = getattr(otTables, self.tableTag)
92			self.table = tableClass()
93		self.table.fromXML(name, attrs, content, font)
94
95
96class OTTableReader(object):
97
98	"""Helper class to retrieve data from an OpenType table."""
99
100	__slots__ = ('data', 'offset', 'pos', 'globalState', 'localState')
101
102	def __init__(self, data, globalState={}, localState=None, offset=0):
103		self.data = data
104		self.offset = offset
105		self.pos = offset
106		self.globalState = globalState
107		self.localState = localState
108
109	def getSubReader(self, offset):
110		offset = self.offset + offset
111		cachingStats = self.globalState.cachingStats
112		if cachingStats is not None:
113			cachingStats[offset] = cachingStats.get(offset, 0) + 1
114		return self.__class__(self.data, self.globalState, self.localState, offset)
115
116	def readUShort(self):
117		pos = self.pos
118		newpos = pos + 2
119		value, = struct.unpack(">H", self.data[pos:newpos])
120		self.pos = newpos
121		return value
122
123	def readShort(self):
124		pos = self.pos
125		newpos = pos + 2
126		value, = struct.unpack(">h", self.data[pos:newpos])
127		self.pos = newpos
128		return value
129
130	def readLong(self):
131		pos = self.pos
132		newpos = pos + 4
133		value, = struct.unpack(">l", self.data[pos:newpos])
134		self.pos = newpos
135		return value
136
137	def readUInt24(self):
138		pos = self.pos
139		newpos = pos + 3
140		value, = struct.unpack(">l", b'\0'+self.data[pos:newpos])
141		self.pos = newpos
142		return value
143
144	def readULong(self):
145		pos = self.pos
146		newpos = pos + 4
147		value, = struct.unpack(">L", self.data[pos:newpos])
148		self.pos = newpos
149		return value
150
151	def readTag(self):
152		pos = self.pos
153		newpos = pos + 4
154		value = Tag(self.data[pos:newpos])
155		assert len(value) == 4
156		self.pos = newpos
157		return value
158
159	def __setitem__(self, name, value):
160		state = self.localState.copy() if self.localState else dict()
161		state[name] = value
162		self.localState = state
163
164	def __getitem__(self, name):
165		return self.localState[name]
166
167
168class OTTableWriter(object):
169
170	"""Helper class to gather and assemble data for OpenType tables."""
171
172	def __init__(self, globalState, localState=None):
173		self.items = []
174		self.pos = None
175		self.globalState = globalState
176		self.localState = localState
177
178	def __setitem__(self, name, value):
179		state = self.localState.copy() if self.localState else dict()
180		state[name] = value
181		self.localState = state
182
183	def __getitem__(self, name):
184		return self.localState[name]
185
186	# assembler interface
187
188	def getAllData(self):
189		"""Assemble all data, including all subtables."""
190		self._doneWriting()
191		tables, extTables = self._gatherTables()
192		tables.reverse()
193		extTables.reverse()
194		# Gather all data in two passes: the absolute positions of all
195		# subtable are needed before the actual data can be assembled.
196		pos = 0
197		for table in tables:
198			table.pos = pos
199			pos = pos + table.getDataLength()
200
201		for table in extTables:
202			table.pos = pos
203			pos = pos + table.getDataLength()
204
205
206		data = []
207		for table in tables:
208			tableData = table.getData()
209			data.append(tableData)
210
211		for table in extTables:
212			tableData = table.getData()
213			data.append(tableData)
214
215		return bytesjoin(data)
216
217	def getDataLength(self):
218		"""Return the length of this table in bytes, without subtables."""
219		l = 0
220		for item in self.items:
221			if hasattr(item, "getData") or hasattr(item, "getCountData"):
222				if item.longOffset:
223					l = l + 4  # sizeof(ULong)
224				else:
225					l = l + 2  # sizeof(UShort)
226			else:
227				l = l + len(item)
228		return l
229
230	def getData(self):
231		"""Assemble the data for this writer/table, without subtables."""
232		items = list(self.items)  # make a shallow copy
233		pos = self.pos
234		numItems = len(items)
235		for i in range(numItems):
236			item = items[i]
237
238			if hasattr(item, "getData"):
239				if item.longOffset:
240					items[i] = packULong(item.pos - pos)
241				else:
242					try:
243						items[i] = packUShort(item.pos - pos)
244					except AssertionError:
245						# provide data to fix overflow problem.
246						# If the overflow is to a lookup, or from a lookup to a subtable,
247						# just report the current item.
248						if self.name in [ 'LookupList', 'Lookup']:
249							overflowErrorRecord = self.getOverflowErrorRecord(item)
250						else:
251							# overflow is within a subTable. Life is more complicated.
252							# If we split the sub-table just before the current item, we may still suffer overflow.
253							# This is because duplicate table merging is done only within an Extension subTable tree;
254							# when we split the subtable in two, some items may no longer be duplicates.
255							# Get worst case by adding up all the item lengths, depth first traversal.
256							# and then report the first item that overflows a short.
257							def getDeepItemLength(table):
258								if hasattr(table, "getDataLength"):
259									length = 0
260									for item in table.items:
261										length = length + getDeepItemLength(item)
262								else:
263									length = len(table)
264								return length
265
266							length = self.getDataLength()
267							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
268								# Coverage is first in the item list, but last in the table list,
269								# The original overflow is really in the item list. Skip the Coverage
270								# table in the following test.
271								items = items[i+1:]
272
273							for j in range(len(items)):
274								item = items[j]
275								length = length + getDeepItemLength(item)
276								if length > 65535:
277									break
278						overflowErrorRecord = self.getOverflowErrorRecord(item)
279
280
281						raise OTLOffsetOverflowError(overflowErrorRecord)
282
283		return bytesjoin(items)
284
285	def __hash__(self):
286		# only works after self._doneWriting() has been called
287		return hash(self.items)
288
289	def __eq__(self, other):
290		if type(self) != type(other):
291			raise TypeError("unordered types %s() < %s()", type(self), type(other))
292		return self.items == other.items
293
294	def _doneWriting(self, internedTables=None):
295		# Convert CountData references to data string items
296		# collapse duplicate table references to a unique entry
297		# "tables" are OTTableWriter objects.
298
299		# For Extension Lookup types, we can
300		# eliminate duplicates only within the tree under the Extension Lookup,
301		# as offsets may exceed 64K even between Extension LookupTable subtables.
302		if internedTables is None:
303			internedTables = {}
304		items = self.items
305		iRange = list(range(len(items)))
306
307		if hasattr(self, "Extension"):
308			newTree = 1
309		else:
310			newTree = 0
311		for i in iRange:
312			item = items[i]
313			if hasattr(item, "getCountData"):
314				items[i] = item.getCountData()
315			elif hasattr(item, "getData"):
316				if newTree:
317					item._doneWriting()
318				else:
319					item._doneWriting(internedTables)
320					if item in internedTables:
321						items[i] = item = internedTables[item]
322					else:
323						internedTables[item] = item
324		self.items = tuple(items)
325
326	def _gatherTables(self, tables=None, extTables=None, done=None):
327		# Convert table references in self.items tree to a flat
328		# list of tables in depth-first traversal order.
329		# "tables" are OTTableWriter objects.
330		# We do the traversal in reverse order at each level, in order to
331		# resolve duplicate references to be the last reference in the list of tables.
332		# For extension lookups, duplicate references can be merged only within the
333		# writer tree under the  extension lookup.
334		if tables is None: # init call for first time.
335			tables = []
336			extTables = []
337			done = {}
338
339		done[self] = 1
340
341		numItems = len(self.items)
342		iRange = list(range(numItems))
343		iRange.reverse()
344
345		if hasattr(self, "Extension"):
346			appendExtensions = 1
347		else:
348			appendExtensions = 0
349
350		# add Coverage table if it is sorted last.
351		sortCoverageLast = 0
352		if hasattr(self, "sortCoverageLast"):
353			# Find coverage table
354			for i in range(numItems):
355				item = self.items[i]
356				if hasattr(item, "name") and (item.name == "Coverage"):
357					sortCoverageLast = 1
358					break
359			if item not in done:
360				item._gatherTables(tables, extTables, done)
361			else:
362				index = max(item.parent.keys())
363				item.parent[index + 1] = self
364
365		saveItem = None
366		for i in iRange:
367			item = self.items[i]
368			if not hasattr(item, "getData"):
369				continue
370
371			if sortCoverageLast and (i==1) and item.name == 'Coverage':
372				# we've already 'gathered' it above
373				continue
374
375			if appendExtensions:
376				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
377				newDone = {}
378				item._gatherTables(extTables, None, newDone)
379
380			elif item not in done:
381				item._gatherTables(tables, extTables, done)
382			else:
383				index = max(item.parent.keys())
384				item.parent[index + 1] = self
385
386
387		tables.append(self)
388		return tables, extTables
389
390	# interface for gathering data, as used by table.compile()
391
392	def getSubWriter(self):
393		subwriter = self.__class__(self.globalState, self.localState)
394		subwriter.parent = {0:self} # because some subtables have idential values, we discard
395									# the duplicates under the getAllData method. Hence some
396									# subtable writers can have more than one parent writer.
397		return subwriter
398
399	def writeUShort(self, value):
400		assert 0 <= value < 0x10000
401		self.items.append(struct.pack(">H", value))
402
403	def writeShort(self, value):
404		self.items.append(struct.pack(">h", value))
405
406	def writeUInt24(self, value):
407		assert 0 <= value < 0x1000000
408		b = struct.pack(">L", value)
409		self.items.append(b[1:])
410
411	def writeLong(self, value):
412		self.items.append(struct.pack(">l", value))
413
414	def writeULong(self, value):
415		self.items.append(struct.pack(">L", value))
416
417	def writeTag(self, tag):
418		tag = Tag(tag).tobytes()
419		assert len(tag) == 4
420		self.items.append(tag)
421
422	def writeSubTable(self, subWriter):
423		self.items.append(subWriter)
424
425	def writeCountReference(self, table, name):
426		ref = CountReference(table, name)
427		self.items.append(ref)
428		return ref
429
430	def writeStruct(self, format, values):
431		data = struct.pack(*(format,) + values)
432		self.items.append(data)
433
434	def writeData(self, data):
435		self.items.append(data)
436
437	def	getOverflowErrorRecord(self, item):
438		LookupListIndex = SubTableIndex = itemName = itemIndex = None
439		if self.name == 'LookupList':
440			LookupListIndex = item.repeatIndex
441		elif self.name == 'Lookup':
442			LookupListIndex = self.repeatIndex
443			SubTableIndex = item.repeatIndex
444		else:
445			itemName = item.name
446			if hasattr(item, 'repeatIndex'):
447				itemIndex = item.repeatIndex
448			if self.name == 'SubTable':
449				LookupListIndex = self.parent[0].repeatIndex
450				SubTableIndex = self.repeatIndex
451			elif self.name == 'ExtSubTable':
452				LookupListIndex = self.parent[0].parent[0].repeatIndex
453				SubTableIndex = self.parent[0].repeatIndex
454			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
455				itemName = ".".join(self.name, item.name)
456				p1 = self.parent[0]
457				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
458					itemName = ".".join(p1.name, item.name)
459					p1 = p1.parent[0]
460				if p1:
461					if p1.name == 'ExtSubTable':
462						LookupListIndex = self.parent[0].parent[0].repeatIndex
463						SubTableIndex = self.parent[0].repeatIndex
464					else:
465						LookupListIndex = self.parent[0].repeatIndex
466						SubTableIndex = self.repeatIndex
467
468		return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
469
470
471class CountReference(object):
472	"""A reference to a Count value, not a count of references."""
473	def __init__(self, table, name):
474		self.table = table
475		self.name = name
476	def setValue(self, value):
477		table = self.table
478		name = self.name
479		if table[name] is None:
480			table[name] = value
481		else:
482			assert table[name] == value, (name, table[name], value)
483	def getCountData(self):
484		return packUShort(self.table[self.name])
485
486
487def packUShort(value):
488	assert 0 <= value < 0x10000, value
489	return struct.pack(">H", value)
490
491
492def packULong(value):
493	assert 0 <= value < 0x100000000, value
494	return struct.pack(">L", value)
495
496
497class BaseTable(object):
498	def __init__(self):
499		self.compileStatus = 0 # 0 means table was created
500									# 1 means the table.read() function was called by a table which is subject
501									# to delayed compilation
502									# 2 means that it was subject to delayed compilation, and
503									# has been decompiled
504
505		self.recurse = 0
506
507	def __getattr__(self, attr):
508		# we get here only when the table does not have the attribute.
509		# This method ovveride exists so that we can try to de-compile
510		# a table which is subject to delayed decompilation, and then try
511		# to get the value again after decompilation.
512		self.recurse +=1
513		if self.recurse > 2:
514			# shouldn't ever get here - we should only get to two levels of recursion.
515			# this guards against self.decompile NOT setting compileStatus to other than 1.
516			raise AttributeError(attr)
517		if self.compileStatus == 1:
518			self.ensureDecompiled()
519			val = getattr(self, attr)
520			self.recurse -=1
521			return val
522
523		raise AttributeError(attr)
524
525
526	"""Generic base class for all OpenType (sub)tables."""
527
528	def getConverters(self):
529		return self.converters
530
531	def getConverterByName(self, name):
532		return self.convertersByName[name]
533
534	def decompile(self, reader, font):
535		self.compileStatus = 2 # table has been decompiled.
536		self.readFormat(reader)
537		table = {}
538		self.__rawTable = table  # for debugging
539		converters = self.getConverters()
540		for conv in converters:
541			if conv.name == "SubTable":
542				conv = conv.getConverter(reader.globalState.tableType,
543						table["LookupType"])
544			if conv.name == "ExtSubTable":
545				conv = conv.getConverter(reader.globalState.tableType,
546						table["ExtensionLookupType"])
547			if conv.name == "FeatureParams":
548				conv = conv.getConverter(reader["FeatureTag"])
549			if conv.repeat:
550				l = []
551				if conv.repeat in table:
552					countValue = table[conv.repeat]
553				else:
554					# conv.repeat is a propagated count
555					countValue = reader[conv.repeat]
556				for i in range(countValue + conv.aux):
557					l.append(conv.read(reader, font, table))
558				table[conv.name] = l
559			else:
560				if conv.aux and not eval(conv.aux, None, table):
561					continue
562				table[conv.name] = conv.read(reader, font, table)
563				if conv.isPropagated:
564					reader[conv.name] = table[conv.name]
565
566		self.postRead(table, font)
567
568		del self.__rawTable  # succeeded, get rid of debugging info
569
570	def ensureDecompiled(self):
571		if self.compileStatus != 1:
572			return
573		self.decompile(self.reader, self.font)
574		del self.reader, self.font
575
576	def compile(self, writer, font):
577		self.ensureDecompiled()
578		table = self.preWrite(font)
579
580		if hasattr(self, 'sortCoverageLast'):
581			writer.sortCoverageLast = 1
582
583		self.writeFormat(writer)
584		for conv in self.getConverters():
585			value = table.get(conv.name)
586			if conv.repeat:
587				if value is None:
588					value = []
589				countValue = len(value) - conv.aux
590				if conv.repeat in table:
591					ref = table[conv.repeat]
592					table[conv.repeat] = None
593					ref.setValue(countValue)
594				else:
595					# conv.repeat is a propagated count
596					writer[conv.repeat].setValue(countValue)
597				for i in range(len(value)):
598					conv.write(writer, font, table, value[i], i)
599			elif conv.isCount:
600				# Special-case Count values.
601				# Assumption: a Count field will *always* precede
602				# the actual array(s).
603				# We need a default value, as it may be set later by a nested
604				# table. We will later store it here.
605				# We add a reference: by the time the data is assembled
606				# the Count value will be filled in.
607				ref = writer.writeCountReference(table, conv.name)
608				if conv.isPropagated:
609					table[conv.name] = None
610					writer[conv.name] = ref
611				else:
612					table[conv.name] = ref
613			else:
614				if conv.aux and not eval(conv.aux, None, table):
615					continue
616				conv.write(writer, font, table, value)
617				if conv.isPropagated:
618					writer[conv.name] = value
619
620	def readFormat(self, reader):
621		pass
622
623	def writeFormat(self, writer):
624		pass
625
626	def postRead(self, table, font):
627		self.__dict__.update(table)
628
629	def preWrite(self, font):
630		return self.__dict__.copy()
631
632	def toXML(self, xmlWriter, font, attrs=None):
633		tableName = self.__class__.__name__
634		if attrs is None:
635			attrs = []
636		if hasattr(self, "Format"):
637			attrs = attrs + [("Format", self.Format)]
638		xmlWriter.begintag(tableName, attrs)
639		xmlWriter.newline()
640		self.toXML2(xmlWriter, font)
641		xmlWriter.endtag(tableName)
642		xmlWriter.newline()
643
644	def toXML2(self, xmlWriter, font):
645		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
646		# This is because in TTX our parent writes our main tag, and in otBase.py we
647		# do it ourselves. I think I'm getting schizophrenic...
648		for conv in self.getConverters():
649			if conv.repeat:
650				value = getattr(self, conv.name)
651				for i in range(len(value)):
652					item = value[i]
653					conv.xmlWrite(xmlWriter, font, item, conv.name,
654							[("index", i)])
655			else:
656				if conv.aux and not eval(conv.aux, None, vars(self)):
657					continue
658				value = getattr(self, conv.name)
659				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
660
661	def fromXML(self, name, attrs, content, font):
662		try:
663			conv = self.getConverterByName(name)
664		except KeyError:
665			raise    # XXX on KeyError, raise nice error
666		value = conv.xmlRead(attrs, content, font)
667		if conv.repeat:
668			seq = getattr(self, conv.name, None)
669			if seq is None:
670				seq = []
671				setattr(self, conv.name, seq)
672			seq.append(value)
673		else:
674			setattr(self, conv.name, value)
675
676	def __eq__(self, other):
677		if type(self) != type(other):
678			raise TypeError("unordered types %s() < %s()", type(self), type(other))
679
680		self.ensureDecompiled()
681		other.ensureDecompiled()
682
683		return self.__dict__ == other.__dict__
684
685
686class FormatSwitchingBaseTable(BaseTable):
687
688	"""Minor specialization of BaseTable, for tables that have multiple
689	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
690
691	def getConverters(self):
692		return self.converters[self.Format]
693
694	def getConverterByName(self, name):
695		return self.convertersByName[self.Format][name]
696
697	def readFormat(self, reader):
698		self.Format = reader.readUShort()
699		assert self.Format != 0, (self, reader.pos, len(reader.data))
700
701	def writeFormat(self, writer):
702		writer.writeUShort(self.Format)
703
704
705#
706# Support for ValueRecords
707#
708# This data type is so different from all other OpenType data types that
709# it requires quite a bit of code for itself. It even has special support
710# in OTTableReader and OTTableWriter...
711#
712
713valueRecordFormat = [
714#	Mask	 Name            isDevice  signed
715	(0x0001, "XPlacement",   0,        1),
716	(0x0002, "YPlacement",   0,        1),
717	(0x0004, "XAdvance",     0,        1),
718	(0x0008, "YAdvance",     0,        1),
719	(0x0010, "XPlaDevice",   1,        0),
720	(0x0020, "YPlaDevice",   1,        0),
721	(0x0040, "XAdvDevice",   1,        0),
722	(0x0080, "YAdvDevice",   1,        0),
723# 	reserved:
724	(0x0100, "Reserved1",    0,        0),
725	(0x0200, "Reserved2",    0,        0),
726	(0x0400, "Reserved3",    0,        0),
727	(0x0800, "Reserved4",    0,        0),
728	(0x1000, "Reserved5",    0,        0),
729	(0x2000, "Reserved6",    0,        0),
730	(0x4000, "Reserved7",    0,        0),
731	(0x8000, "Reserved8",    0,        0),
732]
733
734def _buildDict():
735	d = {}
736	for mask, name, isDevice, signed in valueRecordFormat:
737		d[name] = mask, isDevice, signed
738	return d
739
740valueRecordFormatDict = _buildDict()
741
742
743class ValueRecordFactory(object):
744
745	"""Given a format code, this object convert ValueRecords."""
746
747	def __init__(self, valueFormat):
748		format = []
749		for mask, name, isDevice, signed in valueRecordFormat:
750			if valueFormat & mask:
751				format.append((name, isDevice, signed))
752		self.format = format
753
754	def readValueRecord(self, reader, font):
755		format = self.format
756		if not format:
757			return None
758		valueRecord = ValueRecord()
759		for name, isDevice, signed in format:
760			if signed:
761				value = reader.readShort()
762			else:
763				value = reader.readUShort()
764			if isDevice:
765				if value:
766					from . import otTables
767					subReader = reader.getSubReader(value)
768					value = getattr(otTables, name)()
769					value.decompile(subReader, font)
770				else:
771					value = None
772			setattr(valueRecord, name, value)
773		return valueRecord
774
775	def writeValueRecord(self, writer, font, valueRecord):
776		for name, isDevice, signed in self.format:
777			value = getattr(valueRecord, name, 0)
778			if isDevice:
779				if value:
780					subWriter = writer.getSubWriter()
781					writer.writeSubTable(subWriter)
782					value.compile(subWriter, font)
783				else:
784					writer.writeUShort(0)
785			elif signed:
786				writer.writeShort(value)
787			else:
788				writer.writeUShort(value)
789
790
791class ValueRecord(object):
792
793	# see ValueRecordFactory
794
795	def getFormat(self):
796		format = 0
797		for name in self.__dict__.keys():
798			format = format | valueRecordFormatDict[name][0]
799		return format
800
801	def toXML(self, xmlWriter, font, valueName, attrs=None):
802		if attrs is None:
803			simpleItems = []
804		else:
805			simpleItems = list(attrs)
806		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
807			if hasattr(self, name):
808				simpleItems.append((name, getattr(self, name)))
809		deviceItems = []
810		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
811			if hasattr(self, name):
812				device = getattr(self, name)
813				if device is not None:
814					deviceItems.append((name, device))
815		if deviceItems:
816			xmlWriter.begintag(valueName, simpleItems)
817			xmlWriter.newline()
818			for name, deviceRecord in deviceItems:
819				if deviceRecord is not None:
820					deviceRecord.toXML(xmlWriter, font)
821			xmlWriter.endtag(valueName)
822			xmlWriter.newline()
823		else:
824			xmlWriter.simpletag(valueName, simpleItems)
825			xmlWriter.newline()
826
827	def fromXML(self, name, attrs, content, font):
828		from . import otTables
829		for k, v in attrs.items():
830			setattr(self, k, int(v))
831		for element in content:
832			if not isinstance(element, tuple):
833				continue
834			name, attrs, content = element
835			value = getattr(otTables, name)()
836			for elem2 in content:
837				if not isinstance(elem2, tuple):
838					continue
839				name2, attrs2, content2 = elem2
840				value.fromXML(name2, attrs2, content2, font)
841			setattr(self, name, value)
842
843	def __eq__(self, other):
844		if type(self) != type(other):
845			raise TypeError("unordered types %s() < %s()", type(self), type(other))
846		return self.__dict__ == other.__dict__
847