otBase.py revision 3879cf94396869f645ae4c34dcdafed40ccdcdd4
1from DefaultTable import DefaultTable 2import otData 3import struct 4from types import TupleType 5 6class OverflowErrorRecord: 7 def __init__(self, overflowTuple): 8 self.tableType = overflowTuple[0] 9 self.LookupListIndex = overflowTuple[1] 10 self.SubTableIndex = overflowTuple[2] 11 self.itemName = overflowTuple[3] 12 self.itemIndex = overflowTuple[4] 13 14 def __repr__(self): 15 return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) 16 17class OTLOffsetOverflowError(Exception): 18 def __init__(self, overflowErrorRecord): 19 self.value = overflowErrorRecord 20 21 def __str__(self): 22 return repr(self.value) 23 24 25class BaseTTXConverter(DefaultTable): 26 27 """Generic base class for TTX table converters. It functions as an 28 adapter between the TTX (ttLib actually) table model and the model 29 we use for OpenType tables, which is necessarily subtly different. 30 """ 31 32 def decompile(self, data, font): 33 import otTables 34 cachingStats = None if True else {} 35 reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats) 36 tableClass = getattr(otTables, self.tableTag) 37 self.table = tableClass() 38 self.table.decompile(reader, font) 39 if cachingStats: 40 stats = [(v, k) for k, v in cachingStats.items()] 41 stats.sort() 42 stats.reverse() 43 print "cachingsstats for ", self.tableTag 44 for v, k in stats: 45 if v < 2: 46 break 47 print v, k 48 print "---", len(stats) 49 50 def compile(self, font): 51 """ Create a top-level OTFWriter for the GPOS/GSUB table. 52 Call the compile method for the the table 53 for each 'converter' record in the table converter list 54 call converter's write method for each item in the value. 55 - For simple items, the write method adds a string to the 56 writer's self.items list. 57 - For Struct/Table/Subtable items, it add first adds new writer to the 58 to the writer's self.items, then calls the item's compile method. 59 This creates a tree of writers, rooted at the GUSB/GPOS writer, with 60 each writer representing a table, and the writer.items list containing 61 the child data strings and writers. 62 call the getAllData method 63 call _doneWriting, which removes duplicates 64 call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables 65 Traverse the flat list of tables, calling getDataLength on each to update their position 66 Traverse the flat list of tables again, calling getData each get the data in the table, now that 67 pos's and offset are known. 68 69 If a lookup subtable overflows an offset, we have to start all over. 70 """ 71 writer = OTTableWriter(self.tableTag) 72 writer.parent = None 73 self.table.compile(writer, font) 74 return writer.getAllData() 75 76 def toXML(self, writer, font): 77 self.table.toXML2(writer, font) 78 79 def fromXML(self, (name, attrs, content), font): 80 import otTables 81 if not hasattr(self, "table"): 82 tableClass = getattr(otTables, self.tableTag) 83 self.table = tableClass() 84 self.table.fromXML((name, attrs, content), font) 85 86 87class OTTableReader(object): 88 89 """Helper class to retrieve data from an OpenType table.""" 90 91 __slots__ = ('data', 'offset', 'pos', 'tableType', 'valueFormat', 'cachingStats') 92 93 def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None): 94 self.data = data 95 self.offset = offset 96 self.pos = offset 97 self.tableType = tableType 98 if valueFormat is None: 99 valueFormat = [None, None] 100 self.valueFormat = valueFormat 101 self.cachingStats = cachingStats 102 103 def getSubReader(self, offset, persistent=False): 104 offset = self.offset + offset 105 if self.cachingStats is not None: 106 try: 107 self.cachingStats[offset] = self.cachingStats[offset] + 1 108 except KeyError: 109 self.cachingStats[offset] = 1 110 111 subReader = self.__class__(self.data, self.tableType, offset, 112 self.valueFormat, self.cachingStats) 113 return subReader 114 115 def readUShort(self): 116 pos = self.pos 117 newpos = pos + 2 118 value, = struct.unpack(">H", self.data[pos:newpos]) 119 self.pos = newpos 120 return value 121 122 def readShort(self): 123 pos = self.pos 124 newpos = pos + 2 125 value, = struct.unpack(">h", self.data[pos:newpos]) 126 self.pos = newpos 127 return value 128 129 def readLong(self): 130 pos = self.pos 131 newpos = pos + 4 132 value, = struct.unpack(">l", self.data[pos:newpos]) 133 self.pos = newpos 134 return value 135 136 def readULong(self): 137 pos = self.pos 138 newpos = pos + 4 139 value, = struct.unpack(">L", self.data[pos:newpos]) 140 self.pos = newpos 141 return value 142 143 def readTag(self): 144 pos = self.pos 145 newpos = pos + 4 146 value = self.data[pos:newpos] 147 assert len(value) == 4 148 self.pos = newpos 149 return value 150 151 def readStruct(self, format, size=None): 152 if size is None: 153 size = struct.calcsize(format) 154 else: 155 assert size == struct.calcsize(format) 156 pos = self.pos 157 newpos = pos + size 158 values = struct.unpack(format, self.data[pos:newpos]) 159 self.pos = newpos 160 return values 161 162 def setValueFormat(self, format, which): 163 self.valueFormat[which] = ValueRecordFactory(format) 164 165 def readValueRecord(self, font, which): 166 return self.valueFormat[which].readValueRecord(self, font) 167 168 169class OTTableWriter(object): 170 171 """Helper class to gather and assemble data for OpenType tables.""" 172 173 def __init__(self, tableType, valueFormat=None): 174 self.items = [] 175 self.tableType = tableType 176 if valueFormat is None: 177 valueFormat = [None, None] 178 self.valueFormat = valueFormat 179 self.pos = None 180 181 # assembler interface 182 183 def getAllData(self): 184 """Assemble all data, including all subtables.""" 185 self._doneWriting() 186 tables, extTables = self._gatherTables() 187 tables.reverse() 188 extTables.reverse() 189 # Gather all data in two passes: the absolute positions of all 190 # subtable are needed before the actual data can be assembled. 191 pos = 0 192 for table in tables: 193 table.pos = pos 194 pos = pos + table.getDataLength() 195 196 for table in extTables: 197 table.pos = pos 198 pos = pos + table.getDataLength() 199 200 201 data = [] 202 for table in tables: 203 tableData = table.getData() 204 data.append(tableData) 205 206 for table in extTables: 207 tableData = table.getData() 208 data.append(tableData) 209 210 return "".join(data) 211 212 def getDataLength(self): 213 """Return the length of this table in bytes, without subtables.""" 214 l = 0 215 if hasattr(self, "Extension"): 216 longOffset = 1 217 else: 218 longOffset = 0 219 for item in self.items: 220 if hasattr(item, "getData") or hasattr(item, "getCountData"): 221 if longOffset: 222 l = l + 4 # sizeof(ULong) 223 else: 224 l = l + 2 # sizeof(UShort) 225 else: 226 l = l + len(item) 227 return l 228 229 def getData(self): 230 """Assemble the data for this writer/table, without subtables.""" 231 items = list(self.items) # make a shallow copy 232 if hasattr(self,"Extension"): 233 longOffset = 1 234 else: 235 longOffset = 0 236 pos = self.pos 237 numItems = len(items) 238 for i in range(numItems): 239 item = items[i] 240 241 if hasattr(item, "getData"): 242 if longOffset: 243 items[i] = packULong(item.pos - pos) 244 else: 245 try: 246 items[i] = packUShort(item.pos - pos) 247 except AssertionError: 248 # provide data to fix overflow problem. 249 # If the overflow is to a lookup, or from a lookup to a subtable, 250 # just report the current item. 251 if self.name in [ 'LookupList', 'Lookup']: 252 overflowErrorRecord = self.getOverflowErrorRecord(item) 253 else: 254 # overflow is within a subTable. Life is more complicated. 255 # If we split the sub-table just before the current item, we may still suffer overflow. 256 # This is because duplicate table merging is done only within an Extension subTable tree; 257 # when we split the subtable in two, some items may no longer be duplicates. 258 # Get worst case by adding up all the item lengths, depth first traversal. 259 # and then report the first item that overflows a short. 260 def getDeepItemLength(table): 261 if hasattr(table, "getDataLength"): 262 length = 0 263 for item in table.items: 264 length = length + getDeepItemLength(item) 265 else: 266 length = len(table) 267 return length 268 269 length = self.getDataLength() 270 if hasattr(self, "sortCoverageLast") and item.name == "Coverage": 271 # Coverage is first in the item list, but last in the table list, 272 # The original overflow is really in the item list. Skip the Coverage 273 # table in the following test. 274 items = items[i+1:] 275 276 for j in range(len(items)): 277 item = items[j] 278 length = length + getDeepItemLength(item) 279 if length > 65535: 280 break 281 overflowErrorRecord = self.getOverflowErrorRecord(item) 282 283 284 raise OTLOffsetOverflowError, overflowErrorRecord 285 286 return "".join(items) 287 288 def __hash__(self): 289 # only works after self._doneWriting() has been called 290 return hash(self.items) 291 292 def __cmp__(self, other): 293 if type(self) != type(other): return cmp(type(self), type(other)) 294 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 295 296 return cmp(self.items, other.items) 297 298 def _doneWriting(self, internedTables=None): 299 # Convert CountData references to data string items 300 # collapse duplicate table references to a unique entry 301 # "tables" are OTTableWriter objects. 302 303 # For Extension Lookup types, we can 304 # eliminate duplicates only within the tree under the Extension Lookup, 305 # as offsets may exceed 64K even between Extension LookupTable subtables. 306 if internedTables is None: 307 internedTables = {} 308 items = self.items 309 iRange = range(len(items)) 310 311 if hasattr(self, "Extension"): 312 newTree = 1 313 else: 314 newTree = 0 315 for i in iRange: 316 item = items[i] 317 if hasattr(item, "getCountData"): 318 items[i] = item.getCountData() 319 elif hasattr(item, "getData"): 320 if newTree: 321 item._doneWriting() 322 else: 323 item._doneWriting(internedTables) 324 if internedTables.has_key(item): 325 items[i] = item = internedTables[item] 326 else: 327 internedTables[item] = item 328 self.items = tuple(items) 329 330 def _gatherTables(self, tables=None, extTables=None, done=None): 331 # Convert table references in self.items tree to a flat 332 # list of tables in depth-first traversal order. 333 # "tables" are OTTableWriter objects. 334 # We do the traversal in reverse order at each level, in order to 335 # resolve duplicate references to be the last reference in the list of tables. 336 # For extension lookups, duplicate references can be merged only within the 337 # writer tree under the extension lookup. 338 if tables is None: # init call for first time. 339 tables = [] 340 extTables = [] 341 done = {} 342 343 done[self] = 1 344 345 numItems = len(self.items) 346 iRange = range(numItems) 347 iRange.reverse() 348 349 if hasattr(self, "Extension"): 350 appendExtensions = 1 351 else: 352 appendExtensions = 0 353 354 # add Coverage table if it is sorted last. 355 sortCoverageLast = 0 356 if hasattr(self, "sortCoverageLast"): 357 # Find coverage table 358 for i in range(numItems): 359 item = self.items[i] 360 if hasattr(item, "name") and (item.name == "Coverage"): 361 sortCoverageLast = 1 362 break 363 if not done.has_key(item): 364 item._gatherTables(tables, extTables, done) 365 else: 366 index = max(item.parent.keys()) 367 item.parent[index + 1] = self 368 369 saveItem = None 370 for i in iRange: 371 item = self.items[i] 372 if not hasattr(item, "getData"): 373 continue 374 375 if sortCoverageLast and (i==1) and item.name == 'Coverage': 376 # we've already 'gathered' it above 377 continue 378 379 if appendExtensions: 380 assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" 381 newDone = {} 382 item._gatherTables(extTables, None, newDone) 383 384 elif not done.has_key(item): 385 item._gatherTables(tables, extTables, done) 386 else: 387 index = max(item.parent.keys()) 388 item.parent[index + 1] = self 389 390 391 tables.append(self) 392 return tables, extTables 393 394 # interface for gathering data, as used by table.compile() 395 396 def getSubWriter(self): 397 subwriter = self.__class__(self.tableType, self.valueFormat) 398 subwriter.parent = {0:self} # because some subtables have idential values, we discard 399 # the duplicates under the getAllData method. Hence some 400 # subtable writers can have more than one parent writer. 401 return subwriter 402 403 def writeUShort(self, value): 404 assert 0 <= value < 0x10000 405 self.items.append(struct.pack(">H", value)) 406 407 def writeShort(self, value): 408 self.items.append(struct.pack(">h", value)) 409 410 def writeLong(self, value): 411 self.items.append(struct.pack(">l", value)) 412 413 def writeULong(self, value): 414 self.items.append(struct.pack(">L", value)) 415 416 def writeTag(self, tag): 417 assert len(tag) == 4 418 self.items.append(tag) 419 420 def writeSubTable(self, subWriter): 421 self.items.append(subWriter) 422 423 def writeCountReference(self, table, name): 424 self.items.append(CountReference(table, name)) 425 426 def writeStruct(self, format, values): 427 data = apply(struct.pack, (format,) + values) 428 self.items.append(data) 429 430 def writeData(self, data): 431 self.items.append(data) 432 433 def setValueFormat(self, format, which): 434 self.valueFormat[which] = ValueRecordFactory(format) 435 436 def writeValueRecord(self, value, font, which): 437 return self.valueFormat[which].writeValueRecord(self, font, value) 438 439 def getOverflowErrorRecord(self, item): 440 LookupListIndex = SubTableIndex = itemName = itemIndex = None 441 if self.name == 'LookupList': 442 LookupListIndex = item.repeatIndex 443 elif self.name == 'Lookup': 444 LookupListIndex = self.repeatIndex 445 SubTableIndex = item.repeatIndex 446 else: 447 itemName = item.name 448 if hasattr(item, 'repeatIndex'): 449 itemIndex = item.repeatIndex 450 if self.name == 'SubTable': 451 LookupListIndex = self.parent[0].repeatIndex 452 SubTableIndex = self.repeatIndex 453 elif self.name == 'ExtSubTable': 454 LookupListIndex = self.parent[0].parent[0].repeatIndex 455 SubTableIndex = self.parent[0].repeatIndex 456 else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. 457 itemName = ".".join(self.name, item.name) 458 p1 = self.parent[0] 459 while p1 and p1.name not in ['ExtSubTable', 'SubTable']: 460 itemName = ".".join(p1.name, item.name) 461 p1 = p1.parent[0] 462 if p1: 463 if p1.name == 'ExtSubTable': 464 LookupListIndex = self.parent[0].parent[0].repeatIndex 465 SubTableIndex = self.parent[0].repeatIndex 466 else: 467 LookupListIndex = self.parent[0].repeatIndex 468 SubTableIndex = self.repeatIndex 469 470 return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) 471 472 473class CountReference: 474 """A reference to a Count value, not a count of references.""" 475 def __init__(self, table, name): 476 self.table = table 477 self.name = name 478 def getCountData(self): 479 return packUShort(self.table[self.name]) 480 481 482def packUShort(value): 483 assert 0 <= value < 0x10000, value 484 return struct.pack(">H", value) 485 486 487def packULong(value): 488 assert 0 <= value < 0x100000000, value 489 return struct.pack(">L", value) 490 491 492class BaseTable(object): 493 def __init__(self): 494 self.compileStatus = 0 # 0 means table was created 495 # 1 means the table.read() function was called by a table which is subject 496 # to delayed compilation 497 # 2 means that it was subject to delayed compilation, and 498 # has been decompiled 499 # 3 means that the start and end fields have been filled out, and that we 500 # can use the data string rather than compiling from the table data. 501 502 self.recurse = 0 503 504 def __getattr__(self, attr): 505 # we get here only when the table does not have the attribute. 506 # This method ovveride exists so that we can try to de-compile 507 # a table which is subject to delayed decompilation, and then try 508 # to get the value again after decompilation. 509 self.recurse +=1 510 if self.recurse > 2: 511 # shouldn't ever get here - we should only get to two levels of recursion. 512 # this guards against self.decompile NOT setting compileStatus to other than 1. 513 raise AttributeError, attr 514 if self.compileStatus == 1: 515 self.ensureDecompiled() 516 val = getattr(self, attr) 517 self.recurse -=1 518 return val 519 520 raise AttributeError, attr 521 522 523 """Generic base class for all OpenType (sub)tables.""" 524 525 def getConverters(self): 526 return self.converters 527 528 def getConverterByName(self, name): 529 return self.convertersByName[name] 530 531 def decompile(self, reader, font, countVars=None): 532 self.compileStatus = 2 # table has been decompiled. 533 if countVars is None: 534 countVars = {} 535 self.readFormat(reader) 536 counts = [] 537 table = {} 538 self.__rawTable = table # for debugging 539 converters = self.getConverters() 540 for conv in converters: 541 if conv.name == "SubTable": 542 conv = conv.getConverter(reader.tableType, 543 table["LookupType"]) 544 if conv.name == "ExtSubTable": 545 conv = conv.getConverter(reader.tableType, 546 table["ExtensionLookupType"]) 547 if conv.repeat: 548 l = [] 549 for i in range(countVars[conv.repeat] + conv.repeatOffset): 550 l.append(conv.read(reader, font, countVars)) 551 table[conv.name] = l 552 if conv.repeat in counts: 553 del countVars[conv.repeat] 554 counts.remove(conv.repeat) 555 556 else: 557 table[conv.name] = conv.read(reader, font, countVars) 558 if conv.isCount: 559 counts.append(conv.name) 560 countVars[conv.name] = table[conv.name] 561 562 for count in counts: 563 del countVars[count] 564 565 self.postRead(table, font) 566 567 del self.__rawTable # succeeded, get rid of debugging info 568 569 def ensureDecompiled(self): 570 if self.compileStatus != 1: 571 return 572 self.decompile(self.reader, self.font, self.countVars) 573 del self.reader, self.font, self.countVars 574 575 def preCompile(self): 576 pass # used only by the LookupList class 577 578 def compile(self, writer, font, countVars=None): 579 if countVars is None: 580 countVars = {} 581 counts = [] 582 table = self.preWrite(font) 583 584 if hasattr(self, 'sortCoverageLast'): 585 writer.sortCoverageLast = 1 586 587 self.writeFormat(writer) 588 for conv in self.getConverters(): 589 value = table.get(conv.name) 590 if conv.repeat: 591 if value is None: 592 value = [] 593 countVars[conv.repeat](len(value) - conv.repeatOffset) 594 for i in range(len(value)): 595 conv.write(writer, font, countVars, value[i], i) 596 if conv.repeat in counts: 597 del countVars[conv.repeat] 598 counts.remove(conv.repeat) 599 elif conv.isCount: 600 # Special-case Count values. 601 # Assumption: a Count field will *always* precede 602 # the actual array. 603 # We need a default value, as it may be set later by a nested 604 # table. We will later store it here. 605 table[conv.name] = None 606 # We add a reference: by the time the data is assembled 607 # the Count value will be filled in. 608 name = conv.name 609 writer.writeCountReference(table, name) 610 counts.append(name) 611 def storeValue(value): 612 if table[name] is None: 613 table[name] = value 614 else: 615 assert table[name] == value, (table[name], value) 616 countVars[name] = storeValue 617 else: 618 conv.write(writer, font, countVars, value) 619 620 for count in counts: 621 del countVars[count] 622 623 def readFormat(self, reader): 624 pass 625 626 def writeFormat(self, writer): 627 pass 628 629 def postRead(self, table, font): 630 self.__dict__.update(table) 631 632 def preWrite(self, font): 633 self.ensureDecompiled() 634 return self.__dict__.copy() 635 636 def toXML(self, xmlWriter, font, attrs=None): 637 tableName = self.__class__.__name__ 638 if attrs is None: 639 attrs = [] 640 if hasattr(self, "Format"): 641 attrs = attrs + [("Format", self.Format)] 642 xmlWriter.begintag(tableName, attrs) 643 xmlWriter.newline() 644 self.toXML2(xmlWriter, font) 645 xmlWriter.endtag(tableName) 646 xmlWriter.newline() 647 648 def toXML2(self, xmlWriter, font): 649 # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). 650 # This is because in TTX our parent writes our main tag, and in otBase.py we 651 # do it ourselves. I think I'm getting schizophrenic... 652 for conv in self.getConverters(): 653 value = getattr(self, conv.name) 654 if conv.repeat: 655 for i in range(len(value)): 656 item = value[i] 657 conv.xmlWrite(xmlWriter, font, item, conv.name, 658 [("index", i)]) 659 else: 660 conv.xmlWrite(xmlWriter, font, value, conv.name, []) 661 662 def fromXML(self, (name, attrs, content), font): 663 try: 664 conv = self.getConverterByName(name) 665 except KeyError: 666 raise # XXX on KeyError, raise nice error 667 value = conv.xmlRead(attrs, content, font) 668 if conv.repeat: 669 seq = getattr(self, conv.name, None) 670 if seq is None: 671 seq = [] 672 setattr(self, conv.name, seq) 673 seq.append(value) 674 else: 675 setattr(self, conv.name, value) 676 677 def __cmp__(self, other): 678 if type(self) != type(other): return cmp(type(self), type(other)) 679 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 680 681 self.ensureDecompiled() 682 683 return cmp(self.__dict__, other.__dict__) 684 685 686class FormatSwitchingBaseTable(BaseTable): 687 688 """Minor specialization of BaseTable, for tables that have multiple 689 formats, eg. CoverageFormat1 vs. CoverageFormat2.""" 690 691 def getConverters(self): 692 return self.converters[self.Format] 693 694 def getConverterByName(self, name): 695 return self.convertersByName[self.Format][name] 696 697 def readFormat(self, reader): 698 self.Format = reader.readUShort() 699 assert self.Format <> 0, (self, reader.pos, len(reader.data)) 700 701 def writeFormat(self, writer): 702 writer.writeUShort(self.Format) 703 704 705# 706# Support for ValueRecords 707# 708# This data type is so different from all other OpenType data types that 709# it requires quite a bit of code for itself. It even has special support 710# in OTTableReader and OTTableWriter... 711# 712 713valueRecordFormat = [ 714# Mask Name isDevice signed 715 (0x0001, "XPlacement", 0, 1), 716 (0x0002, "YPlacement", 0, 1), 717 (0x0004, "XAdvance", 0, 1), 718 (0x0008, "YAdvance", 0, 1), 719 (0x0010, "XPlaDevice", 1, 0), 720 (0x0020, "YPlaDevice", 1, 0), 721 (0x0040, "XAdvDevice", 1, 0), 722 (0x0080, "YAdvDevice", 1, 0), 723# reserved: 724 (0x0100, "Reserved1", 0, 0), 725 (0x0200, "Reserved2", 0, 0), 726 (0x0400, "Reserved3", 0, 0), 727 (0x0800, "Reserved4", 0, 0), 728 (0x1000, "Reserved5", 0, 0), 729 (0x2000, "Reserved6", 0, 0), 730 (0x4000, "Reserved7", 0, 0), 731 (0x8000, "Reserved8", 0, 0), 732] 733 734def _buildDict(): 735 d = {} 736 for mask, name, isDevice, signed in valueRecordFormat: 737 d[name] = mask, isDevice, signed 738 return d 739 740valueRecordFormatDict = _buildDict() 741 742 743class ValueRecordFactory: 744 745 """Given a format code, this object convert ValueRecords.""" 746 747 def __init__(self, valueFormat=0): 748 format = [] 749 for mask, name, isDevice, signed in valueRecordFormat: 750 if valueFormat & mask: 751 format.append((name, isDevice, signed)) 752 self.format = format 753 754 def readValueRecord(self, reader, font): 755 format = self.format 756 if not format: 757 return None 758 valueRecord = ValueRecord() 759 for name, isDevice, signed in format: 760 if signed: 761 value = reader.readShort() 762 else: 763 value = reader.readUShort() 764 if isDevice: 765 if value: 766 import otTables 767 subReader = reader.getSubReader(value) 768 value = getattr(otTables, name)() 769 value.decompile(subReader, font) 770 else: 771 value = None 772 setattr(valueRecord, name, value) 773 return valueRecord 774 775 def writeValueRecord(self, writer, font, valueRecord): 776 for name, isDevice, signed in self.format: 777 value = getattr(valueRecord, name, 0) 778 if isDevice: 779 if value: 780 subWriter = writer.getSubWriter() 781 writer.writeSubTable(subWriter) 782 value.compile(subWriter, font) 783 else: 784 writer.writeUShort(0) 785 elif signed: 786 writer.writeShort(value) 787 else: 788 writer.writeUShort(value) 789 790 791class ValueRecord: 792 793 # see ValueRecordFactory 794 795 def getFormat(self): 796 format = 0 797 for name in self.__dict__.keys(): 798 format = format | valueRecordFormatDict[name][0] 799 return format 800 801 def toXML(self, xmlWriter, font, valueName, attrs=None): 802 if attrs is None: 803 simpleItems = [] 804 else: 805 simpleItems = list(attrs) 806 for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values 807 if hasattr(self, name): 808 simpleItems.append((name, getattr(self, name))) 809 deviceItems = [] 810 for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records 811 if hasattr(self, name): 812 device = getattr(self, name) 813 if device is not None: 814 deviceItems.append((name, device)) 815 if deviceItems: 816 xmlWriter.begintag(valueName, simpleItems) 817 xmlWriter.newline() 818 for name, deviceRecord in deviceItems: 819 if deviceRecord is not None: 820 deviceRecord.toXML(xmlWriter, font) 821 xmlWriter.endtag(valueName) 822 xmlWriter.newline() 823 else: 824 xmlWriter.simpletag(valueName, simpleItems) 825 xmlWriter.newline() 826 827 def fromXML(self, (name, attrs, content), font): 828 import otTables 829 for k, v in attrs.items(): 830 setattr(self, k, int(v)) 831 for element in content: 832 if type(element) <> TupleType: 833 continue 834 name, attrs, content = element 835 value = getattr(otTables, name)() 836 for elem2 in content: 837 if type(elem2) <> TupleType: 838 continue 839 value.fromXML(elem2, font) 840 setattr(self, name, value) 841 842 def __cmp__(self, other): 843 if type(self) != type(other): return cmp(type(self), type(other)) 844 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 845 846 return cmp(self.__dict__, other.__dict__) 847