otBase.py revision 078b36325d7f64ae989e7db22e4910bf6550562d
1from DefaultTable import DefaultTable
2import otData
3import struct
4from types import TupleType
5
6class OverflowErrorRecord:
7	def __init__(self, overflowTuple):
8		self.tableType = overflowTuple[0]
9		self.LookupListIndex = overflowTuple[1]
10		self.SubTableIndex = overflowTuple[2]
11		self.itemName = overflowTuple[3]
12		self.itemIndex = overflowTuple[4]
13
14	def __repr__(self):
15		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
16
17class OTLOffsetOverflowError(Exception):
18	def __init__(self, overflowErrorRecord):
19		self.value = overflowErrorRecord
20
21	def __str__(self):
22		return repr(self.value)
23
24
25class BaseTTXConverter(DefaultTable):
26
27	"""Generic base class for TTX table converters. It functions as an
28	adapter between the TTX (ttLib actually) table model and the model
29	we use for OpenType tables, which is necessarily subtly different.
30	"""
31
32	def decompile(self, data, font):
33		import otTables
34		cachingStats = None if True else {}
35		reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats)
36		tableClass = getattr(otTables, self.tableTag)
37		self.table = tableClass()
38		self.table.decompile(reader, font)
39		if cachingStats:
40			stats = [(v, k) for k, v in cachingStats.items()]
41			stats.sort()
42			stats.reverse()
43			print "cachingsstats for ", self.tableTag
44			for v, k in stats:
45				if v < 2:
46					break
47				print v, k
48			print "---", len(stats)
49
50	def compile(self, font):
51		""" Create a top-level OTFWriter for the GPOS/GSUB table.
52			Call the compile method for the the table
53				for each 'converter' record in the table converter list
54					call converter's write method for each item in the value.
55						- For simple items, the write method adds a string to the
56						writer's self.items list.
57						- For Struct/Table/Subtable items, it add first adds new writer to the
58						to the writer's self.items, then calls the item's compile method.
59						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
60						each writer representing a table, and the writer.items list containing
61						the child data strings and writers.
62			call the getAllData method
63				call _doneWriting, which removes duplicates
64				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
65				Traverse the flat list of tables, calling getDataLength on each to update their position
66				Traverse the flat list of tables again, calling getData each get the data in the table, now that
67				pos's and offset are known.
68
69				If a lookup subtable overflows an offset, we have to start all over.
70		"""
71		writer = OTTableWriter(self.tableTag)
72		writer.parent = None
73		self.table.compile(writer, font)
74		return writer.getAllData()
75
76	def toXML(self, writer, font):
77		self.table.toXML2(writer, font)
78
79	def fromXML(self, (name, attrs, content), font):
80		import otTables
81		if not hasattr(self, "table"):
82			tableClass = getattr(otTables, self.tableTag)
83			self.table = tableClass()
84		self.table.fromXML((name, attrs, content), font)
85
86
87class OTTableReader(object):
88
89	"""Helper class to retrieve data from an OpenType table."""
90
91	__slots__ = ('data', 'offset', 'pos', 'tableType', 'valueFormat', 'counts', 'cachingStats')
92
93	def __init__(self, data, tableType, offset=0, valueFormat=None, counts=None, cachingStats=None):
94		self.data = data
95		self.offset = offset
96		self.pos = offset
97		self.tableType = tableType
98		if valueFormat is None:
99			valueFormat = [None, None]
100		self.counts = counts
101		self.valueFormat = valueFormat
102		self.cachingStats = cachingStats
103
104	def getSubReader(self, offset, persistent=False):
105		offset = self.offset + offset
106		if self.cachingStats is not None:
107			try:
108				self.cachingStats[offset] = self.cachingStats[offset] + 1
109			except KeyError:
110				self.cachingStats[offset] = 1
111
112		subReader = self.__class__(self.data, self.tableType, offset,
113			self.valueFormat, self.counts, self.cachingStats)
114		return subReader
115
116	def readUShort(self):
117		pos = self.pos
118		newpos = pos + 2
119		value, = struct.unpack(">H", self.data[pos:newpos])
120		self.pos = newpos
121		return value
122
123	def readShort(self):
124		pos = self.pos
125		newpos = pos + 2
126		value, = struct.unpack(">h", self.data[pos:newpos])
127		self.pos = newpos
128		return value
129
130	def readLong(self):
131		pos = self.pos
132		newpos = pos + 4
133		value, = struct.unpack(">l", self.data[pos:newpos])
134		self.pos = newpos
135		return value
136
137	def readULong(self):
138		pos = self.pos
139		newpos = pos + 4
140		value, = struct.unpack(">L", self.data[pos:newpos])
141		self.pos = newpos
142		return value
143
144	def readTag(self):
145		pos = self.pos
146		newpos = pos + 4
147		value = self.data[pos:newpos]
148		assert len(value) == 4
149		self.pos = newpos
150		return value
151
152	def readStruct(self, format, size=None):
153		if size is None:
154			size = struct.calcsize(format)
155		else:
156			assert size == struct.calcsize(format)
157		pos = self.pos
158		newpos = pos + size
159		values = struct.unpack(format, self.data[pos:newpos])
160		self.pos = newpos
161		return values
162
163	def setValueFormat(self, format, which):
164		self.valueFormat[which] = ValueRecordFactory(format)
165
166	def readValueRecord(self, font, which):
167		return self.valueFormat[which].readValueRecord(self, font)
168
169	def setCount(self, name, value):
170		self.counts = self.counts.copy() if self.counts else dict()
171		self.counts[name] = value
172
173	def getCount(self, name):
174		return self.counts[name]
175
176
177class OTTableWriter(object):
178
179	"""Helper class to gather and assemble data for OpenType tables."""
180
181	def __init__(self, tableType, valueFormat=None, counts=None):
182		self.items = []
183		self.tableType = tableType
184		if valueFormat is None:
185			valueFormat = [None, None]
186		self.valueFormat = valueFormat
187		self.counts = None
188		self.pos = None
189
190	def setValueFormat(self, format, which):
191		self.valueFormat[which] = ValueRecordFactory(format)
192
193	def setCount(self, name, value):
194		self.counts = self.counts.copy() if self.counts else dict()
195		self.counts[name] = value
196
197	def getCount(self, name):
198		return self.counts[name]
199
200	# assembler interface
201
202	def getAllData(self):
203		"""Assemble all data, including all subtables."""
204		self._doneWriting()
205		tables, extTables = self._gatherTables()
206		tables.reverse()
207		extTables.reverse()
208		# Gather all data in two passes: the absolute positions of all
209		# subtable are needed before the actual data can be assembled.
210		pos = 0
211		for table in tables:
212			table.pos = pos
213			pos = pos + table.getDataLength()
214
215		for table in extTables:
216			table.pos = pos
217			pos = pos + table.getDataLength()
218
219
220		data = []
221		for table in tables:
222			tableData = table.getData()
223			data.append(tableData)
224
225		for table in extTables:
226			tableData = table.getData()
227			data.append(tableData)
228
229		return "".join(data)
230
231	def getDataLength(self):
232		"""Return the length of this table in bytes, without subtables."""
233		l = 0
234		if hasattr(self, "Extension"):
235			longOffset = 1
236		else:
237			longOffset = 0
238		for item in self.items:
239			if hasattr(item, "getData") or hasattr(item, "getCountData"):
240				if longOffset:
241					l = l + 4  # sizeof(ULong)
242				else:
243					l = l + 2  # sizeof(UShort)
244			else:
245				l = l + len(item)
246		return l
247
248	def getData(self):
249		"""Assemble the data for this writer/table, without subtables."""
250		items = list(self.items)  # make a shallow copy
251		if hasattr(self,"Extension"):
252			longOffset = 1
253		else:
254			longOffset = 0
255		pos = self.pos
256		numItems = len(items)
257		for i in range(numItems):
258			item = items[i]
259
260			if hasattr(item, "getData"):
261				if longOffset:
262					items[i] = packULong(item.pos - pos)
263				else:
264					try:
265						items[i] = packUShort(item.pos - pos)
266					except AssertionError:
267						# provide data to fix overflow problem.
268						# If the overflow is to a lookup, or from a lookup to a subtable,
269						# just report the current item.
270						if self.name in [ 'LookupList', 'Lookup']:
271							overflowErrorRecord = self.getOverflowErrorRecord(item)
272						else:
273							# overflow is within a subTable. Life is more complicated.
274							# If we split the sub-table just before the current item, we may still suffer overflow.
275							# This is because duplicate table merging is done only within an Extension subTable tree;
276							# when we split the subtable in two, some items may no longer be duplicates.
277							# Get worst case by adding up all the item lengths, depth first traversal.
278							# and then report the first item that overflows a short.
279							def getDeepItemLength(table):
280								if hasattr(table, "getDataLength"):
281									length = 0
282									for item in table.items:
283										length = length + getDeepItemLength(item)
284								else:
285									length = len(table)
286								return length
287
288							length = self.getDataLength()
289							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
290								# Coverage is first in the item list, but last in the table list,
291								# The original overflow is really in the item list. Skip the Coverage
292								# table in the following test.
293								items = items[i+1:]
294
295							for j in range(len(items)):
296								item = items[j]
297								length = length + getDeepItemLength(item)
298								if length > 65535:
299									break
300						overflowErrorRecord = self.getOverflowErrorRecord(item)
301
302
303						raise OTLOffsetOverflowError, overflowErrorRecord
304
305		return "".join(items)
306
307	def __hash__(self):
308		# only works after self._doneWriting() has been called
309		return hash(self.items)
310
311	def __cmp__(self, other):
312		if type(self) != type(other): return cmp(type(self), type(other))
313		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
314
315		return cmp(self.items, other.items)
316
317	def _doneWriting(self, internedTables=None):
318		# Convert CountData references to data string items
319		# collapse duplicate table references to a unique entry
320		# "tables" are OTTableWriter objects.
321
322		# For Extension Lookup types, we can
323		# eliminate duplicates only within the tree under the Extension Lookup,
324		# as offsets may exceed 64K even between Extension LookupTable subtables.
325		if internedTables is None:
326			internedTables = {}
327		items = self.items
328		iRange = range(len(items))
329
330		if hasattr(self, "Extension"):
331			newTree = 1
332		else:
333			newTree = 0
334		for i in iRange:
335			item = items[i]
336			if hasattr(item, "getCountData"):
337				items[i] = item.getCountData()
338			elif hasattr(item, "getData"):
339				if newTree:
340					item._doneWriting()
341				else:
342					item._doneWriting(internedTables)
343					if internedTables.has_key(item):
344						items[i] = item = internedTables[item]
345					else:
346						internedTables[item] = item
347		self.items = tuple(items)
348
349	def _gatherTables(self, tables=None, extTables=None, done=None):
350		# Convert table references in self.items tree to a flat
351		# list of tables in depth-first traversal order.
352		# "tables" are OTTableWriter objects.
353		# We do the traversal in reverse order at each level, in order to
354		# resolve duplicate references to be the last reference in the list of tables.
355		# For extension lookups, duplicate references can be merged only within the
356		# writer tree under the  extension lookup.
357		if tables is None: # init call for first time.
358			tables = []
359			extTables = []
360			done = {}
361
362		done[self] = 1
363
364		numItems = len(self.items)
365		iRange = range(numItems)
366		iRange.reverse()
367
368		if hasattr(self, "Extension"):
369			appendExtensions = 1
370		else:
371			appendExtensions = 0
372
373		# add Coverage table if it is sorted last.
374		sortCoverageLast = 0
375		if hasattr(self, "sortCoverageLast"):
376			# Find coverage table
377			for i in range(numItems):
378				item = self.items[i]
379				if hasattr(item, "name") and (item.name == "Coverage"):
380					sortCoverageLast = 1
381					break
382			if not done.has_key(item):
383				item._gatherTables(tables, extTables, done)
384			else:
385				index = max(item.parent.keys())
386				item.parent[index + 1] = self
387
388		saveItem = None
389		for i in iRange:
390			item = self.items[i]
391			if not hasattr(item, "getData"):
392				continue
393
394			if sortCoverageLast and (i==1) and item.name == 'Coverage':
395				# we've already 'gathered' it above
396				continue
397
398			if appendExtensions:
399				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
400				newDone = {}
401				item._gatherTables(extTables, None, newDone)
402
403			elif not done.has_key(item):
404				item._gatherTables(tables, extTables, done)
405			else:
406				index = max(item.parent.keys())
407				item.parent[index + 1] = self
408
409
410		tables.append(self)
411		return tables, extTables
412
413	# interface for gathering data, as used by table.compile()
414
415	def getSubWriter(self):
416		subwriter = self.__class__(self.tableType, self.valueFormat, self.counts)
417		subwriter.parent = {0:self} # because some subtables have idential values, we discard
418									# the duplicates under the getAllData method. Hence some
419									# subtable writers can have more than one parent writer.
420		return subwriter
421
422	def writeUShort(self, value):
423		assert 0 <= value < 0x10000
424		self.items.append(struct.pack(">H", value))
425
426	def writeShort(self, value):
427		self.items.append(struct.pack(">h", value))
428
429	def writeLong(self, value):
430		self.items.append(struct.pack(">l", value))
431
432	def writeULong(self, value):
433		self.items.append(struct.pack(">L", value))
434
435	def writeTag(self, tag):
436		assert len(tag) == 4
437		self.items.append(tag)
438
439	def writeSubTable(self, subWriter):
440		self.items.append(subWriter)
441
442	def writeCountReference(self, table, name):
443		ref = CountReference(table, name)
444		self.items.append(ref)
445		self.setCount(name, ref)
446
447	def writeStruct(self, format, values):
448		data = apply(struct.pack, (format,) + values)
449		self.items.append(data)
450
451	def writeData(self, data):
452		self.items.append(data)
453
454	def writeValueRecord(self, value, font, which):
455		return self.valueFormat[which].writeValueRecord(self, font, value)
456
457	def	getOverflowErrorRecord(self, item):
458		LookupListIndex = SubTableIndex = itemName = itemIndex = None
459		if self.name == 'LookupList':
460			LookupListIndex = item.repeatIndex
461		elif self.name == 'Lookup':
462			LookupListIndex = self.repeatIndex
463			SubTableIndex = item.repeatIndex
464		else:
465			itemName = item.name
466			if hasattr(item, 'repeatIndex'):
467				itemIndex = item.repeatIndex
468			if self.name == 'SubTable':
469				LookupListIndex = self.parent[0].repeatIndex
470				SubTableIndex = self.repeatIndex
471			elif self.name == 'ExtSubTable':
472				LookupListIndex = self.parent[0].parent[0].repeatIndex
473				SubTableIndex = self.parent[0].repeatIndex
474			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
475				itemName = ".".join(self.name, item.name)
476				p1 = self.parent[0]
477				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
478					itemName = ".".join(p1.name, item.name)
479					p1 = p1.parent[0]
480				if p1:
481					if p1.name == 'ExtSubTable':
482						LookupListIndex = self.parent[0].parent[0].repeatIndex
483						SubTableIndex = self.parent[0].repeatIndex
484					else:
485						LookupListIndex = self.parent[0].repeatIndex
486						SubTableIndex = self.repeatIndex
487
488		return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
489
490
491class CountReference:
492	"""A reference to a Count value, not a count of references."""
493	def __init__(self, table, name):
494		self.table = table
495		self.name = name
496	def setValue(self, value):
497		table = self.table
498		name = self.name
499		if table[name] is None:
500			table[name] = value
501		else:
502			assert table[name] == value, (table[name], value)
503	def getCountData(self):
504		return packUShort(self.table[self.name])
505
506
507def packUShort(value):
508	assert 0 <= value < 0x10000, value
509	return struct.pack(">H", value)
510
511
512def packULong(value):
513	assert 0 <= value < 0x100000000, value
514	return struct.pack(">L", value)
515
516
517class BaseTable(object):
518	def __init__(self):
519		self.compileStatus = 0 # 0 means table was created
520									# 1 means the table.read() function was called by a table which is subject
521									# to delayed compilation
522									# 2 means that it was subject to delayed compilation, and
523									# has been decompiled
524									# 3 means that the start and end fields have been filled out, and that we
525									# can use the data string rather than compiling from the table data.
526
527		self.recurse = 0
528
529	def __getattr__(self, attr):
530		# we get here only when the table does not have the attribute.
531		# This method ovveride exists so that we can try to de-compile
532		# a table which is subject to delayed decompilation, and then try
533		# to get the value again after decompilation.
534		self.recurse +=1
535		if self.recurse > 2:
536			# shouldn't ever get here - we should only get to two levels of recursion.
537			# this guards against self.decompile NOT setting compileStatus to other than 1.
538			raise AttributeError, attr
539		if self.compileStatus == 1:
540			self.ensureDecompiled()
541			val = getattr(self, attr)
542			self.recurse -=1
543			return val
544
545		raise AttributeError, attr
546
547
548	"""Generic base class for all OpenType (sub)tables."""
549
550	def getConverters(self):
551		return self.converters
552
553	def getConverterByName(self, name):
554		return self.convertersByName[name]
555
556	def decompile(self, reader, font):
557		self.compileStatus = 2 # table has been decompiled.
558		self.readFormat(reader)
559		table = {}
560		self.__rawTable = table  # for debugging
561		converters = self.getConverters()
562		for conv in converters:
563			if conv.name == "SubTable":
564				conv = conv.getConverter(reader.tableType,
565						table["LookupType"])
566			if conv.name == "ExtSubTable":
567				conv = conv.getConverter(reader.tableType,
568						table["ExtensionLookupType"])
569			if conv.repeat:
570				l = []
571				for i in range(reader.getCount(conv.repeat) + conv.repeatOffset):
572					l.append(conv.read(reader, font, table))
573				table[conv.name] = l
574
575			else:
576				table[conv.name] = conv.read(reader, font, table)
577				if conv.isCount or conv.isSize:
578					reader.setCount(conv.name, table[conv.name])
579
580		self.postRead(table, font)
581
582		del self.__rawTable  # succeeded, get rid of debugging info
583
584	def ensureDecompiled(self):
585		if self.compileStatus != 1:
586			return
587		self.decompile(self.reader, self.font)
588		del self.reader, self.font
589
590	def preCompile(self):
591		pass # used only by the LookupList class
592
593	def compile(self, writer, font):
594		table = self.preWrite(font)
595
596		if hasattr(self, 'sortCoverageLast'):
597			writer.sortCoverageLast = 1
598
599		self.writeFormat(writer)
600		for conv in self.getConverters():
601			value = table.get(conv.name)
602			if conv.repeat:
603				if value is None:
604					value = []
605				writer.getCount(conv.repeat).setValue(len(value) - conv.repeatOffset)
606				for i in range(len(value)):
607					conv.write(writer, font, table, value[i], i)
608			elif conv.isCount:
609				# Special-case Count values.
610				# Assumption: a Count field will *always* precede
611				# the actual array.
612				# We need a default value, as it may be set later by a nested
613				# table. We will later store it here.
614				table[conv.name] = None
615				# We add a reference: by the time the data is assembled
616				# the Count value will be filled in.
617				writer.writeCountReference(table, conv.name)
618			else:
619				conv.write(writer, font, table, value)
620				if conv.isSize:
621					writer.setCount(conv.name, value)
622
623	def readFormat(self, reader):
624		pass
625
626	def writeFormat(self, writer):
627		pass
628
629	def postRead(self, table, font):
630		self.__dict__.update(table)
631
632	def preWrite(self, font):
633		self.ensureDecompiled()
634		return self.__dict__.copy()
635
636	def toXML(self, xmlWriter, font, attrs=None):
637		tableName = self.__class__.__name__
638		if attrs is None:
639			attrs = []
640		if hasattr(self, "Format"):
641			attrs = attrs + [("Format", self.Format)]
642		xmlWriter.begintag(tableName, attrs)
643		xmlWriter.newline()
644		self.toXML2(xmlWriter, font)
645		xmlWriter.endtag(tableName)
646		xmlWriter.newline()
647
648	def toXML2(self, xmlWriter, font):
649		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
650		# This is because in TTX our parent writes our main tag, and in otBase.py we
651		# do it ourselves. I think I'm getting schizophrenic...
652		for conv in self.getConverters():
653			value = getattr(self, conv.name)
654			if conv.repeat:
655				for i in range(len(value)):
656					item = value[i]
657					conv.xmlWrite(xmlWriter, font, item, conv.name,
658							[("index", i)])
659			else:
660				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
661
662	def fromXML(self, (name, attrs, content), font):
663		try:
664			conv = self.getConverterByName(name)
665		except KeyError:
666			raise    # XXX on KeyError, raise nice error
667		value = conv.xmlRead(attrs, content, font)
668		if conv.repeat:
669			seq = getattr(self, conv.name, None)
670			if seq is None:
671				seq = []
672				setattr(self, conv.name, seq)
673			seq.append(value)
674		else:
675			setattr(self, conv.name, value)
676
677	def __cmp__(self, other):
678		if type(self) != type(other): return cmp(type(self), type(other))
679		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
680
681		self.ensureDecompiled()
682
683		return cmp(self.__dict__, other.__dict__)
684
685
686class FormatSwitchingBaseTable(BaseTable):
687
688	"""Minor specialization of BaseTable, for tables that have multiple
689	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
690
691	def getConverters(self):
692		return self.converters[self.Format]
693
694	def getConverterByName(self, name):
695		return self.convertersByName[self.Format][name]
696
697	def readFormat(self, reader):
698		self.Format = reader.readUShort()
699		assert self.Format <> 0, (self, reader.pos, len(reader.data))
700
701	def writeFormat(self, writer):
702		writer.writeUShort(self.Format)
703
704
705#
706# Support for ValueRecords
707#
708# This data type is so different from all other OpenType data types that
709# it requires quite a bit of code for itself. It even has special support
710# in OTTableReader and OTTableWriter...
711#
712
713valueRecordFormat = [
714#	Mask	 Name            isDevice  signed
715	(0x0001, "XPlacement",   0,        1),
716	(0x0002, "YPlacement",   0,        1),
717	(0x0004, "XAdvance",     0,        1),
718	(0x0008, "YAdvance",     0,        1),
719	(0x0010, "XPlaDevice",   1,        0),
720	(0x0020, "YPlaDevice",   1,        0),
721	(0x0040, "XAdvDevice",   1,        0),
722	(0x0080, "YAdvDevice",   1,        0),
723# 	reserved:
724	(0x0100, "Reserved1",    0,        0),
725	(0x0200, "Reserved2",    0,        0),
726	(0x0400, "Reserved3",    0,        0),
727	(0x0800, "Reserved4",    0,        0),
728	(0x1000, "Reserved5",    0,        0),
729	(0x2000, "Reserved6",    0,        0),
730	(0x4000, "Reserved7",    0,        0),
731	(0x8000, "Reserved8",    0,        0),
732]
733
734def _buildDict():
735	d = {}
736	for mask, name, isDevice, signed in valueRecordFormat:
737		d[name] = mask, isDevice, signed
738	return d
739
740valueRecordFormatDict = _buildDict()
741
742
743class ValueRecordFactory:
744
745	"""Given a format code, this object convert ValueRecords."""
746
747	def __init__(self, valueFormat):
748		format = []
749		for mask, name, isDevice, signed in valueRecordFormat:
750			if valueFormat & mask:
751				format.append((name, isDevice, signed))
752		self.format = format
753
754	def readValueRecord(self, reader, font):
755		format = self.format
756		if not format:
757			return None
758		valueRecord = ValueRecord()
759		for name, isDevice, signed in format:
760			if signed:
761				value = reader.readShort()
762			else:
763				value = reader.readUShort()
764			if isDevice:
765				if value:
766					import otTables
767					subReader = reader.getSubReader(value)
768					value = getattr(otTables, name)()
769					value.decompile(subReader, font)
770				else:
771					value = None
772			setattr(valueRecord, name, value)
773		return valueRecord
774
775	def writeValueRecord(self, writer, font, valueRecord):
776		for name, isDevice, signed in self.format:
777			value = getattr(valueRecord, name, 0)
778			if isDevice:
779				if value:
780					subWriter = writer.getSubWriter()
781					writer.writeSubTable(subWriter)
782					value.compile(subWriter, font)
783				else:
784					writer.writeUShort(0)
785			elif signed:
786				writer.writeShort(value)
787			else:
788				writer.writeUShort(value)
789
790
791class ValueRecord:
792
793	# see ValueRecordFactory
794
795	def getFormat(self):
796		format = 0
797		for name in self.__dict__.keys():
798			format = format | valueRecordFormatDict[name][0]
799		return format
800
801	def toXML(self, xmlWriter, font, valueName, attrs=None):
802		if attrs is None:
803			simpleItems = []
804		else:
805			simpleItems = list(attrs)
806		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
807			if hasattr(self, name):
808				simpleItems.append((name, getattr(self, name)))
809		deviceItems = []
810		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
811			if hasattr(self, name):
812				device = getattr(self, name)
813				if device is not None:
814					deviceItems.append((name, device))
815		if deviceItems:
816			xmlWriter.begintag(valueName, simpleItems)
817			xmlWriter.newline()
818			for name, deviceRecord in deviceItems:
819				if deviceRecord is not None:
820					deviceRecord.toXML(xmlWriter, font)
821			xmlWriter.endtag(valueName)
822			xmlWriter.newline()
823		else:
824			xmlWriter.simpletag(valueName, simpleItems)
825			xmlWriter.newline()
826
827	def fromXML(self, (name, attrs, content), font):
828		import otTables
829		for k, v in attrs.items():
830			setattr(self, k, int(v))
831		for element in content:
832			if type(element) <> TupleType:
833				continue
834			name, attrs, content = element
835			value = getattr(otTables, name)()
836			for elem2 in content:
837				if type(elem2) <> TupleType:
838					continue
839				value.fromXML(elem2, font)
840			setattr(self, name, value)
841
842	def __cmp__(self, other):
843		if type(self) != type(other): return cmp(type(self), type(other))
844		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
845
846		return cmp(self.__dict__, other.__dict__)
847