otBase.py revision d58c38dc3660f764b659ec13fcbc6e54c1ec2078
1from DefaultTable import DefaultTable
2import otData
3import struct
4from types import TupleType
5
6class OverflowErrorRecord:
7	def __init__(self, overflowTuple):
8		self.tableType = overflowTuple[0]
9		self.LookupListIndex = overflowTuple[1]
10		self.SubTableIndex = overflowTuple[2]
11		self.itemName = overflowTuple[3]
12		self.itemIndex = overflowTuple[4]
13
14	def __repr__(self):
15		return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex))
16
17class OTLOffsetOverflowError(Exception):
18	def __init__(self, overflowErrorRecord):
19		self.value = overflowErrorRecord
20
21	def __str__(self):
22		return repr(self.value)
23
24
25class BaseTTXConverter(DefaultTable):
26
27	"""Generic base class for TTX table converters. It functions as an
28	adapter between the TTX (ttLib actually) table model and the model
29	we use for OpenType tables, which is necessarily subtly different.
30	"""
31
32	def decompile(self, data, font):
33		import otTables
34		cachingStats = None if True else {}
35		reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats)
36		tableClass = getattr(otTables, self.tableTag)
37		self.table = tableClass()
38		self.table.decompile(reader, font)
39		if cachingStats:
40			stats = [(v, k) for k, v in cachingStats.items()]
41			stats.sort()
42			stats.reverse()
43			print "cachingsstats for ", self.tableTag
44			for v, k in stats:
45				if v < 2:
46					break
47				print v, k
48			print "---", len(stats)
49
50	def compile(self, font):
51		""" Create a top-level OTFWriter for the GPOS/GSUB table.
52			Call the compile method for the the table
53				for each 'converter' record in the table converter list
54					call converter's write method for each item in the value.
55						- For simple items, the write method adds a string to the
56						writer's self.items list.
57						- For Struct/Table/Subtable items, it add first adds new writer to the
58						to the writer's self.items, then calls the item's compile method.
59						This creates a tree of writers, rooted at the GUSB/GPOS writer, with
60						each writer representing a table, and the writer.items list containing
61						the child data strings and writers.
62			call the getAllData method
63				call _doneWriting, which removes duplicates
64				call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables
65				Traverse the flat list of tables, calling getDataLength on each to update their position
66				Traverse the flat list of tables again, calling getData each get the data in the table, now that
67				pos's and offset are known.
68
69				If a lookup subtable overflows an offset, we have to start all over.
70		"""
71		writer = OTTableWriter(self.tableTag)
72		writer.parent = None
73		self.table.compile(writer, font)
74		return writer.getAllData()
75
76	def toXML(self, writer, font):
77		self.table.toXML2(writer, font)
78
79	def fromXML(self, (name, attrs, content), font):
80		import otTables
81		if not hasattr(self, "table"):
82			tableClass = getattr(otTables, self.tableTag)
83			self.table = tableClass()
84		self.table.fromXML((name, attrs, content), font)
85
86
87class OTTableReader(object):
88
89	"""Helper class to retrieve data from an OpenType table."""
90
91	__slots__ = ('data', 'offset', 'pos', 'tableType', 'valueFormat', 'counts', 'cachingStats')
92
93	def __init__(self, data, tableType, offset=0, valueFormat=None, counts=None, cachingStats=None):
94		self.data = data
95		self.offset = offset
96		self.pos = offset
97		self.tableType = tableType
98		if valueFormat is None:
99			valueFormat = [None, None]
100		self.counts = counts
101		self.valueFormat = valueFormat
102		self.cachingStats = cachingStats
103
104	def getSubReader(self, offset):
105		offset = self.offset + offset
106		if self.cachingStats is not None:
107			self.cachingStats[offset] = self.cachingStats.get(offset, 0) + 1
108
109		subReader = self.__class__(self.data, self.tableType, offset,
110			self.valueFormat, self.counts, self.cachingStats)
111		return subReader
112
113	def readUShort(self):
114		pos = self.pos
115		newpos = pos + 2
116		value, = struct.unpack(">H", self.data[pos:newpos])
117		self.pos = newpos
118		return value
119
120	def readShort(self):
121		pos = self.pos
122		newpos = pos + 2
123		value, = struct.unpack(">h", self.data[pos:newpos])
124		self.pos = newpos
125		return value
126
127	def readLong(self):
128		pos = self.pos
129		newpos = pos + 4
130		value, = struct.unpack(">l", self.data[pos:newpos])
131		self.pos = newpos
132		return value
133
134	def readULong(self):
135		pos = self.pos
136		newpos = pos + 4
137		value, = struct.unpack(">L", self.data[pos:newpos])
138		self.pos = newpos
139		return value
140
141	def readTag(self):
142		pos = self.pos
143		newpos = pos + 4
144		value = self.data[pos:newpos]
145		assert len(value) == 4
146		self.pos = newpos
147		return value
148
149	def setValueFormat(self, format, which):
150		self.valueFormat[which] = ValueRecordFactory(format)
151
152	def readValueRecord(self, font, which):
153		return self.valueFormat[which].readValueRecord(self, font)
154
155	def setCount(self, name, value):
156		self.counts = self.counts.copy() if self.counts else dict()
157		self.counts[name] = value
158
159	def getCount(self, name):
160		return self.counts[name]
161
162
163class OTTableWriter(object):
164
165	"""Helper class to gather and assemble data for OpenType tables."""
166
167	def __init__(self, tableType, valueFormat=None, counts=None):
168		self.items = []
169		self.tableType = tableType
170		if valueFormat is None:
171			valueFormat = [None, None]
172		self.valueFormat = valueFormat
173		self.counts = counts
174		self.pos = None
175
176	def setValueFormat(self, format, which):
177		self.valueFormat[which] = ValueRecordFactory(format)
178
179	def setCount(self, name, value):
180		self.counts = self.counts.copy() if self.counts else dict()
181		self.counts[name] = value
182
183	def getCount(self, name):
184		return self.counts[name]
185
186	# assembler interface
187
188	def getAllData(self):
189		"""Assemble all data, including all subtables."""
190		self._doneWriting()
191		tables, extTables = self._gatherTables()
192		tables.reverse()
193		extTables.reverse()
194		# Gather all data in two passes: the absolute positions of all
195		# subtable are needed before the actual data can be assembled.
196		pos = 0
197		for table in tables:
198			table.pos = pos
199			pos = pos + table.getDataLength()
200
201		for table in extTables:
202			table.pos = pos
203			pos = pos + table.getDataLength()
204
205
206		data = []
207		for table in tables:
208			tableData = table.getData()
209			data.append(tableData)
210
211		for table in extTables:
212			tableData = table.getData()
213			data.append(tableData)
214
215		return "".join(data)
216
217	def getDataLength(self):
218		"""Return the length of this table in bytes, without subtables."""
219		l = 0
220		for item in self.items:
221			if hasattr(item, "getData") or hasattr(item, "getCountData"):
222				if self.longOffset:
223					l = l + 4  # sizeof(ULong)
224				else:
225					l = l + 2  # sizeof(UShort)
226			else:
227				l = l + len(item)
228		return l
229
230	def getData(self):
231		"""Assemble the data for this writer/table, without subtables."""
232		items = list(self.items)  # make a shallow copy
233		pos = self.pos
234		numItems = len(items)
235		for i in range(numItems):
236			item = items[i]
237
238			if hasattr(item, "getData"):
239				if self.longOffset:
240					items[i] = packULong(item.pos - pos)
241				else:
242					try:
243						items[i] = packUShort(item.pos - pos)
244					except AssertionError:
245						# provide data to fix overflow problem.
246						# If the overflow is to a lookup, or from a lookup to a subtable,
247						# just report the current item.
248						if self.name in [ 'LookupList', 'Lookup']:
249							overflowErrorRecord = self.getOverflowErrorRecord(item)
250						else:
251							# overflow is within a subTable. Life is more complicated.
252							# If we split the sub-table just before the current item, we may still suffer overflow.
253							# This is because duplicate table merging is done only within an Extension subTable tree;
254							# when we split the subtable in two, some items may no longer be duplicates.
255							# Get worst case by adding up all the item lengths, depth first traversal.
256							# and then report the first item that overflows a short.
257							def getDeepItemLength(table):
258								if hasattr(table, "getDataLength"):
259									length = 0
260									for item in table.items:
261										length = length + getDeepItemLength(item)
262								else:
263									length = len(table)
264								return length
265
266							length = self.getDataLength()
267							if hasattr(self, "sortCoverageLast") and item.name == "Coverage":
268								# Coverage is first in the item list, but last in the table list,
269								# The original overflow is really in the item list. Skip the Coverage
270								# table in the following test.
271								items = items[i+1:]
272
273							for j in range(len(items)):
274								item = items[j]
275								length = length + getDeepItemLength(item)
276								if length > 65535:
277									break
278						overflowErrorRecord = self.getOverflowErrorRecord(item)
279
280
281						raise OTLOffsetOverflowError, overflowErrorRecord
282
283		return "".join(items)
284
285	def __hash__(self):
286		# only works after self._doneWriting() has been called
287		return hash(self.items)
288
289	def __cmp__(self, other):
290		if type(self) != type(other): return cmp(type(self), type(other))
291		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
292
293		return cmp(self.items, other.items)
294
295	def _doneWriting(self, internedTables=None):
296		# Convert CountData references to data string items
297		# collapse duplicate table references to a unique entry
298		# "tables" are OTTableWriter objects.
299
300		# For Extension Lookup types, we can
301		# eliminate duplicates only within the tree under the Extension Lookup,
302		# as offsets may exceed 64K even between Extension LookupTable subtables.
303		if internedTables is None:
304			internedTables = {}
305		items = self.items
306		iRange = range(len(items))
307
308		if hasattr(self, "Extension"):
309			newTree = 1
310		else:
311			newTree = 0
312		for i in iRange:
313			item = items[i]
314			if hasattr(item, "getCountData"):
315				items[i] = item.getCountData()
316			elif hasattr(item, "getData"):
317				if newTree:
318					item._doneWriting()
319				else:
320					item._doneWriting(internedTables)
321					if internedTables.has_key(item):
322						items[i] = item = internedTables[item]
323					else:
324						internedTables[item] = item
325		self.items = tuple(items)
326
327	def _gatherTables(self, tables=None, extTables=None, done=None):
328		# Convert table references in self.items tree to a flat
329		# list of tables in depth-first traversal order.
330		# "tables" are OTTableWriter objects.
331		# We do the traversal in reverse order at each level, in order to
332		# resolve duplicate references to be the last reference in the list of tables.
333		# For extension lookups, duplicate references can be merged only within the
334		# writer tree under the  extension lookup.
335		if tables is None: # init call for first time.
336			tables = []
337			extTables = []
338			done = {}
339
340		done[self] = 1
341
342		numItems = len(self.items)
343		iRange = range(numItems)
344		iRange.reverse()
345
346		if hasattr(self, "Extension"):
347			appendExtensions = 1
348		else:
349			appendExtensions = 0
350
351		# add Coverage table if it is sorted last.
352		sortCoverageLast = 0
353		if hasattr(self, "sortCoverageLast"):
354			# Find coverage table
355			for i in range(numItems):
356				item = self.items[i]
357				if hasattr(item, "name") and (item.name == "Coverage"):
358					sortCoverageLast = 1
359					break
360			if not done.has_key(item):
361				item._gatherTables(tables, extTables, done)
362			else:
363				index = max(item.parent.keys())
364				item.parent[index + 1] = self
365
366		saveItem = None
367		for i in iRange:
368			item = self.items[i]
369			if not hasattr(item, "getData"):
370				continue
371
372			if sortCoverageLast and (i==1) and item.name == 'Coverage':
373				# we've already 'gathered' it above
374				continue
375
376			if appendExtensions:
377				assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables"
378				newDone = {}
379				item._gatherTables(extTables, None, newDone)
380
381			elif not done.has_key(item):
382				item._gatherTables(tables, extTables, done)
383			else:
384				index = max(item.parent.keys())
385				item.parent[index + 1] = self
386
387
388		tables.append(self)
389		return tables, extTables
390
391	# interface for gathering data, as used by table.compile()
392
393	def getSubWriter(self):
394		subwriter = self.__class__(self.tableType, self.valueFormat, self.counts)
395		subwriter.parent = {0:self} # because some subtables have idential values, we discard
396									# the duplicates under the getAllData method. Hence some
397									# subtable writers can have more than one parent writer.
398		return subwriter
399
400	def writeUShort(self, value):
401		assert 0 <= value < 0x10000
402		self.items.append(struct.pack(">H", value))
403
404	def writeShort(self, value):
405		self.items.append(struct.pack(">h", value))
406
407	def writeLong(self, value):
408		self.items.append(struct.pack(">l", value))
409
410	def writeULong(self, value):
411		self.items.append(struct.pack(">L", value))
412
413	def writeTag(self, tag):
414		assert len(tag) == 4
415		self.items.append(tag)
416
417	def writeSubTable(self, subWriter):
418		self.items.append(subWriter)
419
420	def writeCountReference(self, table, name):
421		ref = CountReference(table, name)
422		self.items.append(ref)
423		return ref
424
425	def writeStruct(self, format, values):
426		data = apply(struct.pack, (format,) + values)
427		self.items.append(data)
428
429	def writeData(self, data):
430		self.items.append(data)
431
432	def writeValueRecord(self, value, font, which):
433		return self.valueFormat[which].writeValueRecord(self, font, value)
434
435	def	getOverflowErrorRecord(self, item):
436		LookupListIndex = SubTableIndex = itemName = itemIndex = None
437		if self.name == 'LookupList':
438			LookupListIndex = item.repeatIndex
439		elif self.name == 'Lookup':
440			LookupListIndex = self.repeatIndex
441			SubTableIndex = item.repeatIndex
442		else:
443			itemName = item.name
444			if hasattr(item, 'repeatIndex'):
445				itemIndex = item.repeatIndex
446			if self.name == 'SubTable':
447				LookupListIndex = self.parent[0].repeatIndex
448				SubTableIndex = self.repeatIndex
449			elif self.name == 'ExtSubTable':
450				LookupListIndex = self.parent[0].parent[0].repeatIndex
451				SubTableIndex = self.parent[0].repeatIndex
452			else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable.
453				itemName = ".".join(self.name, item.name)
454				p1 = self.parent[0]
455				while p1 and p1.name not in ['ExtSubTable', 'SubTable']:
456					itemName = ".".join(p1.name, item.name)
457					p1 = p1.parent[0]
458				if p1:
459					if p1.name == 'ExtSubTable':
460						LookupListIndex = self.parent[0].parent[0].repeatIndex
461						SubTableIndex = self.parent[0].repeatIndex
462					else:
463						LookupListIndex = self.parent[0].repeatIndex
464						SubTableIndex = self.repeatIndex
465
466		return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) )
467
468
469class CountReference:
470	"""A reference to a Count value, not a count of references."""
471	def __init__(self, table, name):
472		self.table = table
473		self.name = name
474	def setValue(self, value):
475		table = self.table
476		name = self.name
477		if table[name] is None:
478			table[name] = value
479		else:
480			assert table[name] == value, (table[name], value)
481	def getCountData(self):
482		return packUShort(self.table[self.name])
483
484
485def packUShort(value):
486	assert 0 <= value < 0x10000, value
487	return struct.pack(">H", value)
488
489
490def packULong(value):
491	assert 0 <= value < 0x100000000, value
492	return struct.pack(">L", value)
493
494
495class BaseTable(object):
496	def __init__(self):
497		self.compileStatus = 0 # 0 means table was created
498									# 1 means the table.read() function was called by a table which is subject
499									# to delayed compilation
500									# 2 means that it was subject to delayed compilation, and
501									# has been decompiled
502									# 3 means that the start and end fields have been filled out, and that we
503									# can use the data string rather than compiling from the table data.
504
505		self.recurse = 0
506
507	def __getattr__(self, attr):
508		# we get here only when the table does not have the attribute.
509		# This method ovveride exists so that we can try to de-compile
510		# a table which is subject to delayed decompilation, and then try
511		# to get the value again after decompilation.
512		self.recurse +=1
513		if self.recurse > 2:
514			# shouldn't ever get here - we should only get to two levels of recursion.
515			# this guards against self.decompile NOT setting compileStatus to other than 1.
516			raise AttributeError, attr
517		if self.compileStatus == 1:
518			self.ensureDecompiled()
519			val = getattr(self, attr)
520			self.recurse -=1
521			return val
522
523		raise AttributeError, attr
524
525
526	"""Generic base class for all OpenType (sub)tables."""
527
528	def getConverters(self):
529		return self.converters
530
531	def getConverterByName(self, name):
532		return self.convertersByName[name]
533
534	def decompile(self, reader, font):
535		self.compileStatus = 2 # table has been decompiled.
536		self.readFormat(reader)
537		table = {}
538		self.__rawTable = table  # for debugging
539		converters = self.getConverters()
540		for conv in converters:
541			if conv.name == "SubTable":
542				conv = conv.getConverter(reader.tableType,
543						table["LookupType"])
544			if conv.name == "ExtSubTable":
545				conv = conv.getConverter(reader.tableType,
546						table["ExtensionLookupType"])
547			if conv.repeat:
548				l = []
549				if conv.repeat in table:
550					countValue = table[conv.repeat]
551				else:
552					# conv.repeat is a propagated count
553					countValue = reader.getCount(conv.repeat)
554				for i in range(countValue + conv.repeatOffset):
555					l.append(conv.read(reader, font, table))
556				table[conv.name] = l
557			else:
558				table[conv.name] = conv.read(reader, font, table)
559				if conv.isPropagatedCount:
560					reader.setCount(conv.name, table[conv.name])
561
562		self.postRead(table, font)
563
564		del self.__rawTable  # succeeded, get rid of debugging info
565
566	def ensureDecompiled(self):
567		if self.compileStatus != 1:
568			return
569		subReader = self.reader.getSubReader(self.offset)
570		self.decompile(subReader, self.font)
571		del self.reader, self.font, self.offset
572
573	def compile(self, writer, font):
574		table = self.preWrite(font)
575
576		if hasattr(self, 'sortCoverageLast'):
577			writer.sortCoverageLast = 1
578
579		self.writeFormat(writer)
580		for conv in self.getConverters():
581			value = table.get(conv.name)
582			if conv.repeat:
583				if value is None:
584					value = []
585				countValue = len(value) - conv.repeatOffset
586				if conv.repeat in table:
587					ref = table[conv.repeat]
588					table[conv.repeat] = None
589					ref.setValue(countValue)
590				else:
591					# conv.repeat is a propagated count
592					writer.getCount(conv.repeat).setValue(countValue)
593				for i in range(len(value)):
594					conv.write(writer, font, table, value[i], i)
595			elif conv.isCount:
596				# Special-case Count values.
597				# Assumption: a Count field will *always* precede
598				# the actual array(s).
599				# We need a default value, as it may be set later by a nested
600				# table. We will later store it here.
601				# We add a reference: by the time the data is assembled
602				# the Count value will be filled in.
603				ref = writer.writeCountReference(table, conv.name)
604				if conv.isPropagatedCount:
605					table[conv.name] = None
606					writer.setCount(conv.name, ref)
607				else:
608					table[conv.name] = ref
609			else:
610				conv.write(writer, font, table, value)
611
612	def readFormat(self, reader):
613		pass
614
615	def writeFormat(self, writer):
616		pass
617
618	def postRead(self, table, font):
619		self.__dict__.update(table)
620
621	def preWrite(self, font):
622		self.ensureDecompiled()
623		return self.__dict__.copy()
624
625	def toXML(self, xmlWriter, font, attrs=None):
626		tableName = self.__class__.__name__
627		if attrs is None:
628			attrs = []
629		if hasattr(self, "Format"):
630			attrs = attrs + [("Format", self.Format)]
631		xmlWriter.begintag(tableName, attrs)
632		xmlWriter.newline()
633		self.toXML2(xmlWriter, font)
634		xmlWriter.endtag(tableName)
635		xmlWriter.newline()
636
637	def toXML2(self, xmlWriter, font):
638		# Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB).
639		# This is because in TTX our parent writes our main tag, and in otBase.py we
640		# do it ourselves. I think I'm getting schizophrenic...
641		for conv in self.getConverters():
642			value = getattr(self, conv.name)
643			if conv.repeat:
644				for i in range(len(value)):
645					item = value[i]
646					conv.xmlWrite(xmlWriter, font, item, conv.name,
647							[("index", i)])
648			else:
649				conv.xmlWrite(xmlWriter, font, value, conv.name, [])
650
651	def fromXML(self, (name, attrs, content), font):
652		try:
653			conv = self.getConverterByName(name)
654		except KeyError:
655			raise    # XXX on KeyError, raise nice error
656		value = conv.xmlRead(attrs, content, font)
657		if conv.repeat:
658			seq = getattr(self, conv.name, None)
659			if seq is None:
660				seq = []
661				setattr(self, conv.name, seq)
662			seq.append(value)
663		else:
664			setattr(self, conv.name, value)
665
666	def __cmp__(self, other):
667		if type(self) != type(other): return cmp(type(self), type(other))
668		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
669
670		self.ensureDecompiled()
671
672		return cmp(self.__dict__, other.__dict__)
673
674
675class FormatSwitchingBaseTable(BaseTable):
676
677	"""Minor specialization of BaseTable, for tables that have multiple
678	formats, eg. CoverageFormat1 vs. CoverageFormat2."""
679
680	def getConverters(self):
681		return self.converters[self.Format]
682
683	def getConverterByName(self, name):
684		return self.convertersByName[self.Format][name]
685
686	def readFormat(self, reader):
687		self.Format = reader.readUShort()
688		assert self.Format <> 0, (self, reader.pos, len(reader.data))
689
690	def writeFormat(self, writer):
691		writer.writeUShort(self.Format)
692
693
694#
695# Support for ValueRecords
696#
697# This data type is so different from all other OpenType data types that
698# it requires quite a bit of code for itself. It even has special support
699# in OTTableReader and OTTableWriter...
700#
701
702valueRecordFormat = [
703#	Mask	 Name            isDevice  signed
704	(0x0001, "XPlacement",   0,        1),
705	(0x0002, "YPlacement",   0,        1),
706	(0x0004, "XAdvance",     0,        1),
707	(0x0008, "YAdvance",     0,        1),
708	(0x0010, "XPlaDevice",   1,        0),
709	(0x0020, "YPlaDevice",   1,        0),
710	(0x0040, "XAdvDevice",   1,        0),
711	(0x0080, "YAdvDevice",   1,        0),
712# 	reserved:
713	(0x0100, "Reserved1",    0,        0),
714	(0x0200, "Reserved2",    0,        0),
715	(0x0400, "Reserved3",    0,        0),
716	(0x0800, "Reserved4",    0,        0),
717	(0x1000, "Reserved5",    0,        0),
718	(0x2000, "Reserved6",    0,        0),
719	(0x4000, "Reserved7",    0,        0),
720	(0x8000, "Reserved8",    0,        0),
721]
722
723def _buildDict():
724	d = {}
725	for mask, name, isDevice, signed in valueRecordFormat:
726		d[name] = mask, isDevice, signed
727	return d
728
729valueRecordFormatDict = _buildDict()
730
731
732class ValueRecordFactory:
733
734	"""Given a format code, this object convert ValueRecords."""
735
736	def __init__(self, valueFormat):
737		format = []
738		for mask, name, isDevice, signed in valueRecordFormat:
739			if valueFormat & mask:
740				format.append((name, isDevice, signed))
741		self.format = format
742
743	def readValueRecord(self, reader, font):
744		format = self.format
745		if not format:
746			return None
747		valueRecord = ValueRecord()
748		for name, isDevice, signed in format:
749			if signed:
750				value = reader.readShort()
751			else:
752				value = reader.readUShort()
753			if isDevice:
754				if value:
755					import otTables
756					subReader = reader.getSubReader(value)
757					value = getattr(otTables, name)()
758					value.decompile(subReader, font)
759				else:
760					value = None
761			setattr(valueRecord, name, value)
762		return valueRecord
763
764	def writeValueRecord(self, writer, font, valueRecord):
765		for name, isDevice, signed in self.format:
766			value = getattr(valueRecord, name, 0)
767			if isDevice:
768				if value:
769					subWriter = writer.getSubWriter()
770					writer.writeSubTable(subWriter)
771					value.compile(subWriter, font)
772				else:
773					writer.writeUShort(0)
774			elif signed:
775				writer.writeShort(value)
776			else:
777				writer.writeUShort(value)
778
779
780class ValueRecord:
781
782	# see ValueRecordFactory
783
784	def getFormat(self):
785		format = 0
786		for name in self.__dict__.keys():
787			format = format | valueRecordFormatDict[name][0]
788		return format
789
790	def toXML(self, xmlWriter, font, valueName, attrs=None):
791		if attrs is None:
792			simpleItems = []
793		else:
794			simpleItems = list(attrs)
795		for mask, name, isDevice, format in valueRecordFormat[:4]:  # "simple" values
796			if hasattr(self, name):
797				simpleItems.append((name, getattr(self, name)))
798		deviceItems = []
799		for mask, name, isDevice, format in valueRecordFormat[4:8]:  # device records
800			if hasattr(self, name):
801				device = getattr(self, name)
802				if device is not None:
803					deviceItems.append((name, device))
804		if deviceItems:
805			xmlWriter.begintag(valueName, simpleItems)
806			xmlWriter.newline()
807			for name, deviceRecord in deviceItems:
808				if deviceRecord is not None:
809					deviceRecord.toXML(xmlWriter, font)
810			xmlWriter.endtag(valueName)
811			xmlWriter.newline()
812		else:
813			xmlWriter.simpletag(valueName, simpleItems)
814			xmlWriter.newline()
815
816	def fromXML(self, (name, attrs, content), font):
817		import otTables
818		for k, v in attrs.items():
819			setattr(self, k, int(v))
820		for element in content:
821			if type(element) <> TupleType:
822				continue
823			name, attrs, content = element
824			value = getattr(otTables, name)()
825			for elem2 in content:
826				if type(elem2) <> TupleType:
827					continue
828				value.fromXML(elem2, font)
829			setattr(self, name, value)
830
831	def __cmp__(self, other):
832		if type(self) != type(other): return cmp(type(self), type(other))
833		if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__)
834
835		return cmp(self.__dict__, other.__dict__)
836