otBase.py revision 41caf2dea1e0c58c5ac763ad2575592df734b62c
1from DefaultTable import DefaultTable
2import otData
3import struct
4from types import TupleType
5
6class OverflowErrorRecord:
7	def __init__(self, overflowTuple):
8		self.tableType = overflowTuple[0]
9		self.LookupListIndex = overflowTuple[1]
10		self.SubTableIndex = overflowTuple[2]
11		self.itemName = overflowTuple[3]
12		self.itemIndex = overflowTuple[4]
13
14	def __repr__(self):
15		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
16
17class OTLOffsetOverflowError(Exception):
18	def __init__(self, overflowErrorRecord):
19		self.value = overflowErrorRecord
20
21	def __str__(self):
22		return repr(self.value)
23
24
25class BaseTTXConverter(DefaultTable):
26
27	"""Generic base class for TTX table converters. It functions as an
28	adapter between the TTX (ttLib actually) table model and the model
29	we use for OpenType tables, which is necessarily subtly different.
30	"""
31
32	def decompile(self, data, font):
33		import otTables
34		cachingStats = None if True else {}
35		reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats)
36		tableClass = getattr(otTables, self.tableTag)
37		self.table = tableClass()
38		self.table.decompile(reader, font)
39		if cachingStats:
40			stats = [(v, k) for k, v in cachingStats.items()]
41			stats.sort()
42			stats.reverse()
43			print "cachingsstats for ", self.tableTag
44			for v, k in stats:
45				if v < 2:
46					break
47				print v, k
48			print "---", len(stats)
49
50	def compile(self, font):
51		""" Create a top-level OTFWriter for the GPOS/GSUB table.
52			Call the compile method for the the table
53				for each 'converter' record in the table converter list
54					call converter's write method for each item in the value.
55						- For simple items, the write method adds a string to the
56						writer's self.items list.
57						- For Struct/Table/Subtable items, it add first adds new writer to the
58						to the writer's self.items, then calls the item's compile method.
59						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
60						each writer representing a table, and the writer.items list containing
61						the child data strings and writers.
62			call the getAllData method
63				call _doneWriting, which removes duplicates
64				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
65				Traverse the flat list of tables, calling getDataLength on each to update their position
66				Traverse the flat list of tables again, calling getData each get the data in the table, now that
67				pos's and offset are known.
68
69				If a lookup subtable overflows an offset, we have to start all over.
70		"""
71		writer = OTTableWriter(self.tableTag)
72		writer.parent = None
73		self.table.compile(writer, font)
74		return writer.getAllData()
75
76	def toXML(self, writer, font):
77		self.table.toXML2(writer, font)
78
79	def fromXML(self, (name, attrs, content), font):
80		import otTables
81		if not hasattr(self, "table"):
82			tableClass = getattr(otTables, self.tableTag)
83			self.table = tableClass()
84		self.table.fromXML((name, attrs, content), font)
85
86
87class OTTableReader:
88
89	"""Helper class to retrieve data from an OpenType table."""
90
91	def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None):
92		self.data = data
93		self.offset = offset
94		self.pos = offset
95		self.tableType = tableType
96		if valueFormat is None:
97			valueFormat = (ValueRecordFactory(), ValueRecordFactory())
98		self.valueFormat = valueFormat
99		self.cachingStats = cachingStats
100
101	def getSubReader(self, offset, persistent=False):
102		offset = self.offset + offset
103		if self.cachingStats is not None:
104			try:
105				self.cachingStats[offset] = self.cachingStats[offset] + 1
106			except KeyError:
107				self.cachingStats[offset] = 1
108		valueFormat = self.valueFormat
109		if persistent:
110			valueFormat = tuple(ValueRecordFactory(v) for v in valueFormat)
111
112		subReader = self.__class__(self.data, self.tableType, offset,
113			valueFormat, self.cachingStats)
114		return subReader
115
116	def readUShort(self):
117		pos = self.pos
118		newpos = pos + 2
119		value, = struct.unpack(">H", self.data[pos:newpos])
120		self.pos = newpos
121		return value
122
123	def readShort(self):
124		pos = self.pos
125		newpos = pos + 2
126		value, = struct.unpack(">h", self.data[pos:newpos])
127		self.pos = newpos
128		return value
129
130	def readLong(self):
131		pos = self.pos
132		newpos = pos + 4
133		value, = struct.unpack(">l", self.data[pos:newpos])
134		self.pos = newpos
135		return value
136
137	def readULong(self):
138		pos = self.pos
139		newpos = pos + 4
140		value, = struct.unpack(">L", self.data[pos:newpos])
141		self.pos = newpos
142		return value
143
144	def readTag(self):
145		pos = self.pos
146		newpos = pos + 4
147		value = self.data[pos:newpos]
148		assert len(value) == 4
149		self.pos = newpos
150		return value
151
152	def readStruct(self, format, size=None):
153		if size is None:
154			size = struct.calcsize(format)
155		else:
156			assert size == struct.calcsize(format)
157		pos = self.pos
158		newpos = pos + size
159		values = struct.unpack(format, self.data[pos:newpos])
160		self.pos = newpos
161		return values
162
163	def setValueFormat(self, format, which):
164		self.valueFormat[which].setFormat(format)
165
166	def readValueRecord(self, font, which):
167		return self.valueFormat[which].readValueRecord(self, font)
168
169
170class OTTableWriter:
171
172	"""Helper class to gather and assemble data for OpenType tables."""
173
174	def __init__(self, tableType, valueFormat=None):
175		self.items = []
176		self.tableType = tableType
177		if valueFormat is None:
178			valueFormat = ValueRecordFactory(), ValueRecordFactory()
179		self.valueFormat = valueFormat
180		self.pos = None
181
182	# assembler interface
183
184	def getAllData(self):
185		"""Assemble all data, including all subtables."""
186		self._doneWriting()
187		tables, extTables = self._gatherTables()
188		tables.reverse()
189		extTables.reverse()
190		# Gather all data in two passes: the absolute positions of all
191		# subtable are needed before the actual data can be assembled.
192		pos = 0
193		for table in tables:
194			table.pos = pos
195			pos = pos + table.getDataLength()
196
197		for table in extTables:
198			table.pos = pos
199			pos = pos + table.getDataLength()
200
201
202		data = []
203		for table in tables:
204			tableData = table.getData()
205			data.append(tableData)
206
207		for table in extTables:
208			tableData = table.getData()
209			data.append(tableData)
210
211		return "".join(data)
212
213	def getDataLength(self):
214		"""Return the length of this table in bytes, without subtables."""
215		l = 0
216		if hasattr(self, "Extension"):
217			longOffset = 1
218		else:
219			longOffset = 0
220		for item in self.items:
221			if hasattr(item, "getData") or hasattr(item, "getCountData"):
222				if longOffset:
223					l = l + 4  # sizeof(ULong)
224				else:
225					l = l + 2  # sizeof(UShort)
226			else:
227				l = l + len(item)
228		return l
229
230	def getData(self):
231		"""Assemble the data for this writer/table, without subtables."""
232		items = list(self.items)  # make a shallow copy
233		if hasattr(self,"Extension"):
234			longOffset = 1
235		else:
236			longOffset = 0
237		pos = self.pos
238		numItems = len(items)
239		for i in range(numItems):
240			item = items[i]
241
242			if hasattr(item, "getData"):
243				if longOffset:
244					items[i] = packULong(item.pos - pos)
245				else:
246					try:
247						items[i] = packUShort(item.pos - pos)
248					except AssertionError:
249						# provide data to fix overflow problem.
250						# If the overflow is to a lookup, or from a lookup to a subtable,
251						# just report the current item.
252						if self.name in [ 'LookupList', 'Lookup']:
253							overflowErrorRecord = self.getOverflowErrorRecord(item)
254						else:
255							# overflow is within a subTable. Life is more complicated.
256							# If we split the sub-table just before the current item, we may still suffer overflow.
257							# This is because duplicate table merging is done only within an Extension subTable tree;
258							# when we split the subtable in two, some items may no longer be duplicates.
259							# Get worst case by adding up all the item lengths, depth first traversal.
260							# and then report the first item that overflows a short.
261							def getDeepItemLength(table):
262								if hasattr(table, "getDataLength"):
263									length = 0
264									for item in table.items:
265										length = length + getDeepItemLength(item)
266								else:
267									length = len(table)
268								return length
269
270							length = self.getDataLength()
271							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
272								# Coverage is first in the item list, but last in the table list,
273								# The original overflow is really in the item list. Skip the Coverage
274								# table in the following test.
275								items = items[i+1:]
276
277							for j in range(len(items)):
278								item = items[j]
279								length = length + getDeepItemLength(item)
280								if length > 65535:
281									break
282						overflowErrorRecord = self.getOverflowErrorRecord(item)
283
284
285						raise OTLOffsetOverflowError, overflowErrorRecord
286
287		return "".join(items)
288
289	def __hash__(self):
290		# only works after self._doneWriting() has been called
291		return hash(self.items)
292
293	def __cmp__(self, other):
294		if type(self) != type(other): return cmp(type(self), type(other))
295		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
296
297		return cmp(self.items, other.items)
298
299	def _doneWriting(self, internedTables=None):
300		# Convert CountData references to data string items
301		# collapse duplicate table references to a unique entry
302		# "tables" are OTTableWriter objects.
303
304		# For Extension Lookup types, we can
305		# eliminate duplicates only within the tree under the Extension Lookup,
306		# as offsets may exceed 64K even between Extension LookupTable subtables.
307		if internedTables is None:
308			internedTables = {}
309		items = self.items
310		iRange = range(len(items))
311
312		if hasattr(self, "Extension"):
313			newTree = 1
314		else:
315			newTree = 0
316		for i in iRange:
317			item = items[i]
318			if hasattr(item, "getCountData"):
319				items[i] = item.getCountData()
320			elif hasattr(item, "getData"):
321				if newTree:
322					item._doneWriting()
323				else:
324					item._doneWriting(internedTables)
325					if internedTables.has_key(item):
326						items[i] = item = internedTables[item]
327					else:
328						internedTables[item] = item
329		self.items = tuple(items)
330
331	def _gatherTables(self, tables=None, extTables=None, done=None):
332		# Convert table references in self.items tree to a flat
333		# list of tables in depth-first traversal order.
334		# "tables" are OTTableWriter objects.
335		# We do the traversal in reverse order at each level, in order to
336		# resolve duplicate references to be the last reference in the list of tables.
337		# For extension lookups, duplicate references can be merged only within the
338		# writer tree under the  extension lookup.
339		if tables is None: # init call for first time.
340			tables = []
341			extTables = []
342			done = {}
343
344		done[self] = 1
345
346		numItems = len(self.items)
347		iRange = range(numItems)
348		iRange.reverse()
349
350		if hasattr(self, "Extension"):
351			appendExtensions = 1
352		else:
353			appendExtensions = 0
354
355		# add Coverage table if it is sorted last.
356		sortCoverageLast = 0
357		if hasattr(self, "sortCoverageLast"):
358			# Find coverage table
359			for i in range(numItems):
360				item = self.items[i]
361				if hasattr(item, "name") and (item.name == "Coverage"):
362					sortCoverageLast = 1
363					break
364			if not done.has_key(item):
365				item._gatherTables(tables, extTables, done)
366			else:
367				index = max(item.parent.keys())
368				item.parent[index + 1] = self
369
370		saveItem = None
371		for i in iRange:
372			item = self.items[i]
373			if not hasattr(item, "getData"):
374				continue
375
376			if sortCoverageLast and (i==1) and item.name == 'Coverage':
377				# we've already 'gathered' it above
378				continue
379
380			if appendExtensions:
381				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
382				newDone = {}
383				item._gatherTables(extTables, None, newDone)
384
385			elif not done.has_key(item):
386				item._gatherTables(tables, extTables, done)
387			else:
388				index = max(item.parent.keys())
389				item.parent[index + 1] = self
390
391
392		tables.append(self)
393		return tables, extTables
394
395	# interface for gathering data, as used by table.compile()
396
397	def getSubWriter(self):
398		subwriter = self.__class__(self.tableType, self.valueFormat)
399		subwriter.parent = {0:self} # because some subtables have idential values, we discard
400									# the duplicates under the getAllData method. Hence some
401									# subtable writers can have more than one parent writer.
402		return subwriter
403
404	def writeUShort(self, value):
405		assert 0 <= value < 0x10000
406		self.items.append(struct.pack(">H", value))
407
408	def writeShort(self, value):
409		self.items.append(struct.pack(">h", value))
410
411	def writeLong(self, value):
412		self.items.append(struct.pack(">l", value))
413
414	def writeULong(self, value):
415		self.items.append(struct.pack(">L", value))
416
417	def writeTag(self, tag):
418		assert len(tag) == 4
419		self.items.append(tag)
420
421	def writeSubTable(self, subWriter):
422		self.items.append(subWriter)
423
424	def writeCountReference(self, table, name):
425		self.items.append(CountReference(table, name))
426
427	def writeStruct(self, format, values):
428		data = apply(struct.pack, (format,) + values)
429		self.items.append(data)
430
431	def writeData(self, data):
432		self.items.append(data)
433
434	def setValueFormat(self, format, which):
435		self.valueFormat[which].setFormat(format)
436
437	def writeValueRecord(self, value, font, which):
438		return self.valueFormat[which].writeValueRecord(self, font, value)
439
440	def	getOverflowErrorRecord(self, item):
441		LookupListIndex = SubTableIndex = itemName = itemIndex = None
442		if self.name == 'LookupList':
443			LookupListIndex = item.repeatIndex
444		elif self.name == 'Lookup':
445			LookupListIndex = self.repeatIndex
446			SubTableIndex = item.repeatIndex
447		else:
448			itemName = item.name
449			if hasattr(item, 'repeatIndex'):
450				itemIndex = item.repeatIndex
451			if self.name == 'SubTable':
452				LookupListIndex = self.parent[0].repeatIndex
453				SubTableIndex = self.repeatIndex
454			elif self.name == 'ExtSubTable':
455				LookupListIndex = self.parent[0].parent[0].repeatIndex
456				SubTableIndex = self.parent[0].repeatIndex
457			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
458				itemName = ".".join(self.name, item.name)
459				p1 = self.parent[0]
460				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
461					itemName = ".".join(p1.name, item.name)
462					p1 = p1.parent[0]
463				if p1:
464					if p1.name == 'ExtSubTable':
465						LookupListIndex = self.parent[0].parent[0].repeatIndex
466						SubTableIndex = self.parent[0].repeatIndex
467					else:
468						LookupListIndex = self.parent[0].repeatIndex
469						SubTableIndex = self.repeatIndex
470
471		return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
472
473
474class CountReference:
475	"""A reference to a Count value, not a count of references."""
476	def __init__(self, table, name):
477		self.table = table
478		self.name = name
479	def getCountData(self):
480		return packUShort(self.table[self.name])
481
482
483def packUShort(value):
484	assert 0 <= value < 0x10000, value
485	return struct.pack(">H", value)
486
487
488def packULong(value):
489	assert 0 <= value < 0x100000000, value
490	return struct.pack(">L", value)
491
492
493class BaseTable(object):
494	def __init__(self):
495		self.compileStatus = 0 # 0 means table was created
496									# 1 means the table.read() function was called by a table which is subject
497									# to delayed compilation
498									# 2 means that it was subject to delayed compilation, and
499									# has been decompiled
500									# 3 means that the start and end fields have been filled out, and that we
501									# can use the data string rather than compiling from the table data.
502
503		self.recurse = 0
504
505	def __getattr__(self, attr):
506		# we get here only when the table does not have the attribute.
507		# This method ovveride exists so that we can try to de-compile
508		# a table which is subject to delayed decompilation, and then try
509		# to get the value again after decompilation.
510		self.recurse +=1
511		if self.recurse > 2:
512			# shouldn't ever get here - we should only get to two levels of recursion.
513			# this guards against self.decompile NOT setting compileStatus to other than 1.
514			raise AttributeError, attr
515		if self.compileStatus == 1:
516			self.ensureDecompiled()
517			val = getattr(self, attr)
518			self.recurse -=1
519			return val
520
521		raise AttributeError, attr
522
523
524	"""Generic base class for all OpenType (sub)tables."""
525
526	def getConverters(self):
527		return self.converters
528
529	def getConverterByName(self, name):
530		return self.convertersByName[name]
531
532	def decompile(self, reader, font, countVars=None):
533		self.compileStatus = 2 # table has been decompiled.
534		if countVars is None:
535			countVars = {}
536		self.readFormat(reader)
537		counts = []
538		table = {}
539		self.__rawTable = table  # for debugging
540		converters = self.getConverters()
541		for conv in converters:
542			if conv.name == "SubTable":
543				conv = conv.getConverter(reader.tableType,
544						table["LookupType"])
545			if conv.name == "ExtSubTable":
546				conv = conv.getConverter(reader.tableType,
547						table["ExtensionLookupType"])
548			if conv.repeat:
549				l = []
550				for i in range(countVars[conv.repeat] + conv.repeatOffset):
551					l.append(conv.read(reader, font, countVars))
552				table[conv.name] = l
553				if conv.repeat in counts:
554					del countVars[conv.repeat]
555					counts.remove(conv.repeat)
556
557			else:
558				table[conv.name] = conv.read(reader, font, countVars)
559				if conv.isCount:
560					counts.append(conv.name)
561					countVars[conv.name] = table[conv.name]
562
563		for count in counts:
564			del countVars[count]
565
566		self.postRead(table, font)
567
568		del self.__rawTable  # succeeded, get rid of debugging info
569
570	def ensureDecompiled(self):
571		if self.compileStatus != 1:
572			return
573		self.decompile(self.reader, self.font, self.countVars)
574		del self.reader, self.font, self.countVars
575
576	def preCompile(self):
577		pass # used only by the LookupList class
578
579	def compile(self, writer, font, countVars=None):
580		if countVars is None:
581			countVars = {}
582		counts = []
583		table = self.preWrite(font)
584
585		if hasattr(self, 'sortCoverageLast'):
586			writer.sortCoverageLast = 1
587
588		self.writeFormat(writer)
589		for conv in self.getConverters():
590			value = table.get(conv.name)
591			if conv.repeat:
592				if value is None:
593					value = []
594				countVars[conv.repeat](len(value) - conv.repeatOffset)
595				for i in range(len(value)):
596					conv.write(writer, font, countVars, value[i], i)
597				if conv.repeat in counts:
598					del countVars[conv.repeat]
599					counts.remove(conv.repeat)
600			elif conv.isCount:
601				# Special-case Count values.
602				# Assumption: a Count field will *always* precede
603				# the actual array.
604				# We need a default value, as it may be set later by a nested
605				# table. We will later store it here.
606				table[conv.name] = None
607				# We add a reference: by the time the data is assembled
608				# the Count value will be filled in.
609				name = conv.name
610				writer.writeCountReference(table, name)
611				counts.append(name)
612				def storeValue(value):
613					if table[name] is None:
614						table[name] = value
615					else:
616						assert table[name] == value, (table[name], value)
617				countVars[name] = storeValue
618			else:
619				conv.write(writer, font, countVars, value)
620
621		for count in counts:
622			del countVars[count]
623
624	def readFormat(self, reader):
625		pass
626
627	def writeFormat(self, writer):
628		pass
629
630	def postRead(self, table, font):
631		self.__dict__.update(table)
632
633	def preWrite(self, font):
634		self.ensureDecompiled()
635		return self.__dict__.copy()
636
637	def toXML(self, xmlWriter, font, attrs=None):
638		tableName = self.__class__.__name__
639		if attrs is None:
640			attrs = []
641		if hasattr(self, "Format"):
642			attrs = attrs + [("Format", self.Format)]
643		xmlWriter.begintag(tableName, attrs)
644		xmlWriter.newline()
645		self.toXML2(xmlWriter, font)
646		xmlWriter.endtag(tableName)
647		xmlWriter.newline()
648
649	def toXML2(self, xmlWriter, font):
650		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
651		# This is because in TTX our parent writes our main tag, and in otBase.py we
652		# do it ourselves. I think I'm getting schizophrenic...
653		for conv in self.getConverters():
654			value = getattr(self, conv.name)
655			if conv.repeat:
656				for i in range(len(value)):
657					item = value[i]
658					conv.xmlWrite(xmlWriter, font, item, conv.name,
659							[("index", i)])
660			else:
661				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
662
663	def fromXML(self, (name, attrs, content), font):
664		try:
665			conv = self.getConverterByName(name)
666		except KeyError:
667			raise    # XXX on KeyError, raise nice error
668		value = conv.xmlRead(attrs, content, font)
669		if conv.repeat:
670			seq = getattr(self, conv.name, None)
671			if seq is None:
672				seq = []
673				setattr(self, conv.name, seq)
674			seq.append(value)
675		else:
676			setattr(self, conv.name, value)
677
678	def __cmp__(self, other):
679		if type(self) != type(other): return cmp(type(self), type(other))
680		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
681
682		self.ensureDecompiled()
683
684		return cmp(self.__dict__, other.__dict__)
685
686
687class FormatSwitchingBaseTable(BaseTable):
688
689	"""Minor specialization of BaseTable, for tables that have multiple
690	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
691
692	def getConverters(self):
693		return self.converters[self.Format]
694
695	def getConverterByName(self, name):
696		return self.convertersByName[self.Format][name]
697
698	def readFormat(self, reader):
699		self.Format = reader.readUShort()
700		assert self.Format <> 0, (self, reader.pos, len(reader.data))
701
702	def writeFormat(self, writer):
703		writer.writeUShort(self.Format)
704
705
706#
707# Support for ValueRecords
708#
709# This data type is so different from all other OpenType data types that
710# it requires quite a bit of code for itself. It even has special support
711# in OTTableReader and OTTableWriter...
712#
713
714valueRecordFormat = [
715#	Mask	 Name            isDevice  signed
716	(0x0001, "XPlacement",   0,        1),
717	(0x0002, "YPlacement",   0,        1),
718	(0x0004, "XAdvance",     0,        1),
719	(0x0008, "YAdvance",     0,        1),
720	(0x0010, "XPlaDevice",   1,        0),
721	(0x0020, "YPlaDevice",   1,        0),
722	(0x0040, "XAdvDevice",   1,        0),
723	(0x0080, "YAdvDevice",   1,        0),
724# 	reserved:
725	(0x0100, "Reserved1",    0,        0),
726	(0x0200, "Reserved2",    0,        0),
727	(0x0400, "Reserved3",    0,        0),
728	(0x0800, "Reserved4",    0,        0),
729	(0x1000, "Reserved5",    0,        0),
730	(0x2000, "Reserved6",    0,        0),
731	(0x4000, "Reserved7",    0,        0),
732	(0x8000, "Reserved8",    0,        0),
733]
734
735def _buildDict():
736	d = {}
737	for mask, name, isDevice, signed in valueRecordFormat:
738		d[name] = mask, isDevice, signed
739	return d
740
741valueRecordFormatDict = _buildDict()
742
743
744class ValueRecordFactory:
745
746	"""Given a format code, this object convert ValueRecords."""
747
748	def __init__(self, other=None):
749		self.format = other.format if other else None
750
751	def setFormat(self, valueFormat):
752		format = []
753		for mask, name, isDevice, signed in valueRecordFormat:
754			if valueFormat & mask:
755				format.append((name, isDevice, signed))
756		self.format = format
757
758	def readValueRecord(self, reader, font):
759		format = self.format
760		if not format:
761			return None
762		valueRecord = ValueRecord()
763		for name, isDevice, signed in format:
764			if signed:
765				value = reader.readShort()
766			else:
767				value = reader.readUShort()
768			if isDevice:
769				if value:
770					import otTables
771					subReader = reader.getSubReader(value)
772					value = getattr(otTables, name)()
773					value.decompile(subReader, font)
774				else:
775					value = None
776			setattr(valueRecord, name, value)
777		return valueRecord
778
779	def writeValueRecord(self, writer, font, valueRecord):
780		for name, isDevice, signed in self.format:
781			value = getattr(valueRecord, name, 0)
782			if isDevice:
783				if value:
784					subWriter = writer.getSubWriter()
785					writer.writeSubTable(subWriter)
786					value.compile(subWriter, font)
787				else:
788					writer.writeUShort(0)
789			elif signed:
790				writer.writeShort(value)
791			else:
792				writer.writeUShort(value)
793
794
795class ValueRecord:
796
797	# see ValueRecordFactory
798
799	def getFormat(self):
800		format = 0
801		for name in self.__dict__.keys():
802			format = format | valueRecordFormatDict[name][0]
803		return format
804
805	def toXML(self, xmlWriter, font, valueName, attrs=None):
806		if attrs is None:
807			simpleItems = []
808		else:
809			simpleItems = list(attrs)
810		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
811			if hasattr(self, name):
812				simpleItems.append((name, getattr(self, name)))
813		deviceItems = []
814		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
815			if hasattr(self, name):
816				device = getattr(self, name)
817				if device is not None:
818					deviceItems.append((name, device))
819		if deviceItems:
820			xmlWriter.begintag(valueName, simpleItems)
821			xmlWriter.newline()
822			for name, deviceRecord in deviceItems:
823				if deviceRecord is not None:
824					deviceRecord.toXML(xmlWriter, font)
825			xmlWriter.endtag(valueName)
826			xmlWriter.newline()
827		else:
828			xmlWriter.simpletag(valueName, simpleItems)
829			xmlWriter.newline()
830
831	def fromXML(self, (name, attrs, content), font):
832		import otTables
833		for k, v in attrs.items():
834			setattr(self, k, int(v))
835		for element in content:
836			if type(element) <> TupleType:
837				continue
838			name, attrs, content = element
839			value = getattr(otTables, name)()
840			for elem2 in content:
841				if type(elem2) <> TupleType:
842					continue
843				value.fromXML(elem2, font)
844			setattr(self, name, value)
845
846	def __cmp__(self, other):
847		if type(self) != type(other): return cmp(type(self), type(other))
848		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
849
850		return cmp(self.__dict__, other.__dict__)
851