otBase.py revision 5988cc32cbe04ed8ff6330bd6ede634b9c68faf0
1from DefaultTable import DefaultTable 2import otData 3import struct 4from types import TupleType 5 6class OverflowErrorRecord: 7 def __init__(self, overflowTuple): 8 self.tableType = overflowTuple[0] 9 self.LookupListIndex = overflowTuple[1] 10 self.SubTableIndex = overflowTuple[2] 11 self.itemName = overflowTuple[3] 12 self.itemIndex = overflowTuple[4] 13 14 def __repr__(self): 15 return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) 16 17class OTLOffsetOverflowError(Exception): 18 def __init__(self, overflowErrorRecord): 19 self.value = overflowErrorRecord 20 21 def __str__(self): 22 return repr(self.value) 23 24 25class BaseTTXConverter(DefaultTable): 26 27 """Generic base class for TTX table converters. It functions as an 28 adapter between the TTX (ttLib actually) table model and the model 29 we use for OpenType tables, which is necessarily subtly different. 30 """ 31 32 def decompile(self, data, font): 33 import otTables 34 cachingStats = None 35 reader = OTTableReader(data, self.tableTag, cachingStats=cachingStats) 36 tableClass = getattr(otTables, self.tableTag) 37 self.table = tableClass() 38 self.table.decompile(reader, font) 39 if 0: 40 stats = [(v, k) for k, v in cachingStats.items()] 41 stats.sort() 42 stats.reverse() 43 print "cachingsstats for ", self.tableTag 44 for v, k in stats: 45 if v < 2: 46 break 47 print v, k 48 print "---", len(stats) 49 50 def compile(self, font): 51 """ Create a top-level OTFWriter for the GPOS/GSUB table. 52 Call the compile method for the the table 53 for each 'converter' record in the table converter list 54 call converter's write method for each item in the value. 55 - For simple items, the write method adds a string to the 56 writer's self.items list. 57 - For Struct/Table/Subtable items, it add first adds new writer to the 58 to the writer's self.items, then calls the item's compile method. 59 This creates a tree of writers, rooted at the GUSB/GPOS writer, with 60 each writer representing a table, and the writer.items list containing 61 the child data strings and writers. 62 call the getAllData method 63 call _doneWriting, which removes duplicates 64 call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables 65 Traverse the flat list of tables, calling getDataLength on each to update their position 66 Traverse the flat list of tables again, calling getData each get the data in the table, now that 67 pos's and offset are known. 68 69 If a lookup subtable overflows an offset, we have to start all over. 70 """ 71 writer = OTTableWriter(self.tableTag) 72 writer.parent = None 73 self.table.compile(writer, font) 74 return writer.getAllData() 75 76 def toXML(self, writer, font): 77 self.table.toXML2(writer, font) 78 79 def fromXML(self, (name, attrs, content), font): 80 import otTables 81 if not hasattr(self, "table"): 82 tableClass = getattr(otTables, self.tableTag) 83 self.table = tableClass() 84 self.table.fromXML((name, attrs, content), font) 85 86 87class OTTableReader: 88 89 """Helper class to retrieve data from an OpenType table.""" 90 91 def __init__(self, data, tableType, offset=0, valueFormat=None, cachingStats=None): 92 self.data = data 93 self.offset = offset 94 self.pos = offset 95 self.tableType = tableType 96 if valueFormat is None: 97 valueFormat = (ValueRecordFactory(), ValueRecordFactory()) 98 self.valueFormat = valueFormat 99 self.cachingStats = cachingStats 100 101 def getSubReader(self, offset): 102 offset = self.offset + offset 103 if self.cachingStats is not None: 104 try: 105 self.cachingStats[offset] = self.cachingStats[offset] + 1 106 except KeyError: 107 self.cachingStats[offset] = 1 108 109 subReader = self.__class__(self.data, self.tableType, offset, 110 self.valueFormat, self.cachingStats) 111 return subReader 112 113 def readUShort(self): 114 pos = self.pos 115 newpos = pos + 2 116 value, = struct.unpack(">H", self.data[pos:newpos]) 117 self.pos = newpos 118 return value 119 120 def readShort(self): 121 pos = self.pos 122 newpos = pos + 2 123 value, = struct.unpack(">h", self.data[pos:newpos]) 124 self.pos = newpos 125 return value 126 127 def readLong(self): 128 pos = self.pos 129 newpos = pos + 4 130 value, = struct.unpack(">l", self.data[pos:newpos]) 131 self.pos = newpos 132 return value 133 134 def readULong(self): 135 pos = self.pos 136 newpos = pos + 4 137 value, = struct.unpack(">L", self.data[pos:newpos]) 138 self.pos = newpos 139 return value 140 141 def readTag(self): 142 pos = self.pos 143 newpos = pos + 4 144 value = self.data[pos:newpos] 145 assert len(value) == 4 146 self.pos = newpos 147 return value 148 149 def readStruct(self, format, size=None): 150 if size is None: 151 size = struct.calcsize(format) 152 else: 153 assert size == struct.calcsize(format) 154 pos = self.pos 155 newpos = pos + size 156 values = struct.unpack(format, self.data[pos:newpos]) 157 self.pos = newpos 158 return values 159 160 def setValueFormat(self, format, which): 161 self.valueFormat[which].setFormat(format) 162 163 def readValueRecord(self, font, which): 164 return self.valueFormat[which].readValueRecord(self, font) 165 166 167class OTTableWriter: 168 169 """Helper class to gather and assemble data for OpenType tables.""" 170 171 def __init__(self, tableType, valueFormat=None): 172 self.items = [] 173 self.tableType = tableType 174 if valueFormat is None: 175 valueFormat = ValueRecordFactory(), ValueRecordFactory() 176 self.valueFormat = valueFormat 177 self.pos = None 178 179 # assembler interface 180 181 def getAllData(self): 182 """Assemble all data, including all subtables.""" 183 self._doneWriting() 184 tables, extTables = self._gatherTables() 185 tables.reverse() 186 extTables.reverse() 187 # Gather all data in two passes: the absolute positions of all 188 # subtable are needed before the actual data can be assembled. 189 pos = 0 190 for table in tables: 191 table.pos = pos 192 pos = pos + table.getDataLength() 193 194 for table in extTables: 195 table.pos = pos 196 pos = pos + table.getDataLength() 197 198 199 data = [] 200 for table in tables: 201 tableData = table.getData() 202 data.append(tableData) 203 204 for table in extTables: 205 tableData = table.getData() 206 data.append(tableData) 207 208 return "".join(data) 209 210 def getDataLength(self): 211 """Return the length of this table in bytes, without subtables.""" 212 l = 0 213 if hasattr(self, "Extension"): 214 longOffset = 1 215 else: 216 longOffset = 0 217 for item in self.items: 218 if hasattr(item, "getData") or hasattr(item, "getCountData"): 219 if longOffset: 220 l = l + 4 # sizeof(ULong) 221 else: 222 l = l + 2 # sizeof(UShort) 223 else: 224 l = l + len(item) 225 return l 226 227 def getData(self): 228 """Assemble the data for this writer/table, without subtables.""" 229 items = list(self.items) # make a shallow copy 230 if hasattr(self,"Extension"): 231 longOffset = 1 232 else: 233 longOffset = 0 234 pos = self.pos 235 numItems = len(items) 236 for i in range(numItems): 237 item = items[i] 238 239 if hasattr(item, "getData"): 240 if longOffset: 241 items[i] = packULong(item.pos - pos) 242 else: 243 try: 244 items[i] = packUShort(item.pos - pos) 245 except AssertionError: 246 # provide data to fix overflow problem. 247 # If the overflow is to a lookup, or from a lookup to a subtable, 248 # just report the current item. 249 if self.name in [ 'LookupList', 'Lookup']: 250 overflowErrorRecord = self.getOverflowErrorRecord(item) 251 else: 252 # overflow is within a subTable. Life is more complicated. 253 # If we split the sub-table just before the current item, we may still suffer overflow. 254 # This is because duplicate table merging is done only within an Extension subTable tree; 255 # when we split the subtable in two, some items may no longer be duplicates. 256 # Get worst case by adding up all the item lengths, depth first traversal. 257 # and then report the first item that overflows a short. 258 def getDeepItemLength(table): 259 if hasattr(table, "getDataLength"): 260 length = 0 261 for item in table.items: 262 length = length + getDeepItemLength(item) 263 else: 264 length = len(table) 265 return length 266 267 length = self.getDataLength() 268 if hasattr(self, "sortCoverageLast") and item.name == "Coverage": 269 # Coverage is first in the item list, but last in the table list, 270 # The original overflow is really in the item list. Skip the Coverage 271 # table in the following test. 272 items = items[i+1:] 273 274 for j in range(len(items)): 275 item = items[j] 276 length = length + getDeepItemLength(item) 277 if length > 65535: 278 break 279 overflowErrorRecord = self.getOverflowErrorRecord(item) 280 281 282 raise OTLOffsetOverflowError, overflowErrorRecord 283 284 return "".join(items) 285 286 def __hash__(self): 287 # only works after self._doneWriting() has been called 288 return hash(self.items) 289 290 def __cmp__(self, other): 291 if type(self) != type(other): return cmp(type(self), type(other)) 292 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 293 294 return cmp(self.items, other.items) 295 296 def _doneWriting(self, internedTables=None): 297 # Convert CountData references to data string items 298 # collapse duplicate table references to a unique entry 299 # "tables" are OTTableWriter objects. 300 301 # For Extension Lookup types, we can 302 # eliminate duplicates only within the tree under the Extension Lookup, 303 # as offsets may exceed 64K even between Extension LookupTable subtables. 304 if internedTables is None: 305 internedTables = {} 306 items = self.items 307 iRange = range(len(items)) 308 309 if hasattr(self, "Extension"): 310 newTree = 1 311 else: 312 newTree = 0 313 for i in iRange: 314 item = items[i] 315 if hasattr(item, "getCountData"): 316 items[i] = item.getCountData() 317 elif hasattr(item, "getData"): 318 if newTree: 319 item._doneWriting() 320 else: 321 item._doneWriting(internedTables) 322 if internedTables.has_key(item): 323 items[i] = item = internedTables[item] 324 else: 325 internedTables[item] = item 326 self.items = tuple(items) 327 328 def _gatherTables(self, tables=None, extTables=None, done=None): 329 # Convert table references in self.items tree to a flat 330 # list of tables in depth-first traversal order. 331 # "tables" are OTTableWriter objects. 332 # We do the traversal in reverse order at each level, in order to 333 # resolve duplicate references to be the last reference in the list of tables. 334 # For extension lookups, duplicate references can be merged only within the 335 # writer tree under the extension lookup. 336 if tables is None: # init call for first time. 337 tables = [] 338 extTables = [] 339 done = {} 340 341 done[self] = 1 342 343 numItems = len(self.items) 344 iRange = range(numItems) 345 iRange.reverse() 346 347 if hasattr(self, "Extension"): 348 appendExtensions = 1 349 else: 350 appendExtensions = 0 351 352 # add Coverage table if it is sorted last. 353 sortCoverageLast = 0 354 if hasattr(self, "sortCoverageLast"): 355 # Find coverage table 356 for i in range(numItems): 357 item = self.items[i] 358 if hasattr(item, "name") and (item.name == "Coverage"): 359 sortCoverageLast = 1 360 break 361 if not done.has_key(item): 362 item._gatherTables(tables, extTables, done) 363 else: 364 index = max(item.parent.keys()) 365 item.parent[index + 1] = self 366 367 saveItem = None 368 for i in iRange: 369 item = self.items[i] 370 if not hasattr(item, "getData"): 371 continue 372 373 if sortCoverageLast and (i==1) and item.name == 'Coverage': 374 # we've already 'gathered' it above 375 continue 376 377 if appendExtensions: 378 assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" 379 newDone = {} 380 item._gatherTables(extTables, None, newDone) 381 382 elif not done.has_key(item): 383 item._gatherTables(tables, extTables, done) 384 else: 385 index = max(item.parent.keys()) 386 item.parent[index + 1] = self 387 388 389 tables.append(self) 390 return tables, extTables 391 392 # interface for gathering data, as used by table.compile() 393 394 def getSubWriter(self): 395 subwriter = self.__class__(self.tableType, self.valueFormat) 396 subwriter.parent = {0:self} # because some subtables have idential values, we discard 397 # the duplicates under the getAllData method. Hence some 398 # subtable writers can have more than one parent writer. 399 return subwriter 400 401 def writeUShort(self, value): 402 assert 0 <= value < 0x10000 403 self.items.append(struct.pack(">H", value)) 404 405 def writeShort(self, value): 406 self.items.append(struct.pack(">h", value)) 407 408 def writeLong(self, value): 409 self.items.append(struct.pack(">l", value)) 410 411 def writeULong(self, value): 412 self.items.append(struct.pack(">L", value)) 413 414 def writeTag(self, tag): 415 assert len(tag) == 4 416 self.items.append(tag) 417 418 def writeSubTable(self, subWriter): 419 self.items.append(subWriter) 420 421 def writeCountReference(self, table, name): 422 self.items.append(CountReference(table, name)) 423 424 def writeStruct(self, format, values): 425 data = apply(struct.pack, (format,) + values) 426 self.items.append(data) 427 428 def writeData(self, data): 429 self.items.append(data) 430 431 def setValueFormat(self, format, which): 432 self.valueFormat[which].setFormat(format) 433 434 def writeValueRecord(self, value, font, which): 435 return self.valueFormat[which].writeValueRecord(self, font, value) 436 437 def getOverflowErrorRecord(self, item): 438 LookupListIndex = SubTableIndex = itemName = itemIndex = None 439 if self.name == 'LookupList': 440 LookupListIndex = item.repeatIndex 441 elif self.name == 'Lookup': 442 LookupListIndex = self.repeatIndex 443 SubTableIndex = item.repeatIndex 444 else: 445 itemName = item.name 446 if hasattr(item, 'repeatIndex'): 447 itemIndex = item.repeatIndex 448 if self.name == 'SubTable': 449 LookupListIndex = self.parent[0].repeatIndex 450 SubTableIndex = self.repeatIndex 451 elif self.name == 'ExtSubTable': 452 LookupListIndex = self.parent[0].parent[0].repeatIndex 453 SubTableIndex = self.parent[0].repeatIndex 454 else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. 455 itemName = ".".join(self.name, item.name) 456 p1 = self.parent[0] 457 while p1 and p1.name not in ['ExtSubTable', 'SubTable']: 458 itemName = ".".join(p1.name, item.name) 459 p1 = p1.parent[0] 460 if p1: 461 if p1.name == 'ExtSubTable': 462 LookupListIndex = self.parent[0].parent[0].repeatIndex 463 SubTableIndex = self.parent[0].repeatIndex 464 else: 465 LookupListIndex = self.parent[0].repeatIndex 466 SubTableIndex = self.repeatIndex 467 468 return OverflowErrorRecord( (self.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) 469 470 471class CountReference: 472 """A reference to a Count value, not a count of references.""" 473 def __init__(self, table, name): 474 self.table = table 475 self.name = name 476 def getCountData(self): 477 return packUShort(self.table[self.name]) 478 479 480def packUShort(value): 481 assert 0 <= value < 0x10000, value 482 return struct.pack(">H", value) 483 484 485def packULong(value): 486 assert 0 <= value < 0x100000000, value 487 return struct.pack(">L", value) 488 489 490 491class TableStack: 492 """A stack of table dicts, working as a stack of namespaces so we can 493 retrieve values from (and store values to) tables higher up the stack.""" 494 def __init__(self): 495 self.stack = [] 496 def push(self, table): 497 self.stack.append(table) 498 def pop(self): 499 self.stack.pop() 500 def getTop(self): 501 return self.stack[-1] 502 def getValue(self, name): 503 return self.__findTable(name)[name] 504 def storeValue(self, name, value): 505 table = self.__findTable(name) 506 if table[name] is None: 507 table[name] = value 508 else: 509 assert table[name] == value, (table[name], value) 510 def __findTable(self, name): 511 for table in reversed(self.stack): 512 if table.has_key(name): 513 return table 514 raise KeyError, name 515 516 517class BaseTable(object): 518 def __init__(self): 519 self.compileStatus = 0 # 0 means table was created 520 # 1 means the table.read() function was called by a table which is subject 521 # to delayed compilation 522 # 2 means that it was subject to delayed compilation, and 523 # has been decompiled 524 # 3 means that the start and end fields have been filled out, and that we 525 # can use the data string rather than compiling from the table data. 526 527 self.recurse = 0 528 529 def __getattr__(self, attr): 530 # we get here only when the table does not have the attribute. 531 # This method ovveride exists so that we can try to de-compile 532 # a table which is subject to delayed decompilation, and then try 533 # to get the value again after decompilation. 534 self.recurse +=1 535 if self.recurse > 2: 536 # shouldn't ever get here - we should only get to two levels of recursion. 537 # this guards against self.decompile NOT setting compileStatus to other than 1. 538 raise AttributeError, attr 539 if self.compileStatus == 1: 540 # table.read() has been called, but table has not yet been decompiled 541 # This happens only for extension tables. 542 self.decompile(self.reader, self.font) 543 val = getattr(self, attr) 544 self.recurse -=1 545 return val 546 547 raise AttributeError, attr 548 549 550 """Generic base class for all OpenType (sub)tables.""" 551 552 def getConverters(self): 553 return self.converters 554 555 def getConverterByName(self, name): 556 return self.convertersByName[name] 557 558 def decompile(self, reader, font, tableStack=None): 559 self.compileStatus = 2 # table has been decompiled. 560 if tableStack is None: 561 tableStack = TableStack() 562 self.readFormat(reader) 563 table = {} 564 self.__rawTable = table # for debugging 565 tableStack.push(table) 566 for conv in self.getConverters(): 567 if conv.name == "SubTable": 568 conv = conv.getConverter(reader.tableType, 569 table["LookupType"]) 570 if conv.name == "ExtSubTable": 571 conv = conv.getConverter(reader.tableType, 572 table["ExtensionLookupType"]) 573 if conv.repeat: 574 l = [] 575 for i in range(tableStack.getValue(conv.repeat) + conv.repeatOffset): 576 l.append(conv.read(reader, font, tableStack)) 577 table[conv.name] = l 578 else: 579 table[conv.name] = conv.read(reader, font, tableStack) 580 tableStack.pop() 581 self.postRead(table, font) 582 del self.__rawTable # succeeded, get rid of debugging info 583 584 def preCompile(self): 585 pass # used only by the LookupList class 586 587 def compile(self, writer, font, tableStack=None): 588 if tableStack is None: 589 tableStack = TableStack() 590 table = self.preWrite(font) 591 592 if hasattr(self, 'sortCoverageLast'): 593 writer.sortCoverageLast = 1 594 595 self.writeFormat(writer) 596 tableStack.push(table) 597 for conv in self.getConverters(): 598 value = table.get(conv.name) 599 if conv.repeat: 600 if value is None: 601 value = [] 602 tableStack.storeValue(conv.repeat, len(value) - conv.repeatOffset) 603 for i in range(len(value)): 604 conv.write(writer, font, tableStack, value[i], i) 605 elif conv.isCount: 606 # Special-case Count values. 607 # Assumption: a Count field will *always* precede 608 # the actual array. 609 # We need a default value, as it may be set later by a nested 610 # table. TableStack.storeValue() will then find it here. 611 table[conv.name] = None 612 # We add a reference: by the time the data is assembled 613 # the Count value will be filled in. 614 writer.writeCountReference(table, conv.name) 615 else: 616 conv.write(writer, font, tableStack, value) 617 tableStack.pop() 618 619 def readFormat(self, reader): 620 pass 621 622 def writeFormat(self, writer): 623 pass 624 625 def postRead(self, table, font): 626 self.__dict__.update(table) 627 628 def preWrite(self, font): 629 return self.__dict__.copy() 630 631 def toXML(self, xmlWriter, font, attrs=None): 632 tableName = self.__class__.__name__ 633 if attrs is None: 634 attrs = [] 635 if hasattr(self, "Format"): 636 attrs = attrs + [("Format", self.Format)] 637 xmlWriter.begintag(tableName, attrs) 638 xmlWriter.newline() 639 self.toXML2(xmlWriter, font) 640 xmlWriter.endtag(tableName) 641 xmlWriter.newline() 642 643 def toXML2(self, xmlWriter, font): 644 # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). 645 # This is because in TTX our parent writes our main tag, and in otBase.py we 646 # do it ourselves. I think I'm getting schizophrenic... 647 for conv in self.getConverters(): 648 value = getattr(self, conv.name) 649 if conv.repeat: 650 for i in range(len(value)): 651 item = value[i] 652 conv.xmlWrite(xmlWriter, font, item, conv.name, 653 [("index", i)]) 654 else: 655 conv.xmlWrite(xmlWriter, font, value, conv.name, []) 656 657 def fromXML(self, (name, attrs, content), font): 658 try: 659 conv = self.getConverterByName(name) 660 except KeyError: 661 raise # XXX on KeyError, raise nice error 662 value = conv.xmlRead(attrs, content, font) 663 if conv.repeat: 664 seq = getattr(self, conv.name, None) 665 if seq is None: 666 seq = [] 667 setattr(self, conv.name, seq) 668 seq.append(value) 669 else: 670 setattr(self, conv.name, value) 671 672 def __cmp__(self, other): 673 if type(self) != type(other): return cmp(type(self), type(other)) 674 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 675 676 return cmp(self.__dict__, other.__dict__) 677 678 679class FormatSwitchingBaseTable(BaseTable): 680 681 """Minor specialization of BaseTable, for tables that have multiple 682 formats, eg. CoverageFormat1 vs. CoverageFormat2.""" 683 684 def getConverters(self): 685 return self.converters[self.Format] 686 687 def getConverterByName(self, name): 688 return self.convertersByName[self.Format][name] 689 690 def readFormat(self, reader): 691 self.Format = reader.readUShort() 692 assert self.Format <> 0, (self, reader.pos, len(reader.data)) 693 694 def writeFormat(self, writer): 695 writer.writeUShort(self.Format) 696 697 698# 699# Support for ValueRecords 700# 701# This data type is so different from all other OpenType data types that 702# it requires quite a bit of code for itself. It even has special support 703# in OTTableReader and OTTableWriter... 704# 705 706valueRecordFormat = [ 707# Mask Name isDevice signed 708 (0x0001, "XPlacement", 0, 1), 709 (0x0002, "YPlacement", 0, 1), 710 (0x0004, "XAdvance", 0, 1), 711 (0x0008, "YAdvance", 0, 1), 712 (0x0010, "XPlaDevice", 1, 0), 713 (0x0020, "YPlaDevice", 1, 0), 714 (0x0040, "XAdvDevice", 1, 0), 715 (0x0080, "YAdvDevice", 1, 0), 716# reserved: 717 (0x0100, "Reserved1", 0, 0), 718 (0x0200, "Reserved2", 0, 0), 719 (0x0400, "Reserved3", 0, 0), 720 (0x0800, "Reserved4", 0, 0), 721 (0x1000, "Reserved5", 0, 0), 722 (0x2000, "Reserved6", 0, 0), 723 (0x4000, "Reserved7", 0, 0), 724 (0x8000, "Reserved8", 0, 0), 725] 726 727def _buildDict(): 728 d = {} 729 for mask, name, isDevice, signed in valueRecordFormat: 730 d[name] = mask, isDevice, signed 731 return d 732 733valueRecordFormatDict = _buildDict() 734 735 736class ValueRecordFactory: 737 738 """Given a format code, this object convert ValueRecords.""" 739 740 def setFormat(self, valueFormat): 741 format = [] 742 for mask, name, isDevice, signed in valueRecordFormat: 743 if valueFormat & mask: 744 format.append((name, isDevice, signed)) 745 self.format = format 746 747 def readValueRecord(self, reader, font): 748 format = self.format 749 if not format: 750 return None 751 valueRecord = ValueRecord() 752 for name, isDevice, signed in format: 753 if signed: 754 value = reader.readShort() 755 else: 756 value = reader.readUShort() 757 if isDevice: 758 if value: 759 import otTables 760 subReader = reader.getSubReader(value) 761 value = getattr(otTables, name)() 762 value.decompile(subReader, font) 763 else: 764 value = None 765 setattr(valueRecord, name, value) 766 return valueRecord 767 768 def writeValueRecord(self, writer, font, valueRecord): 769 for name, isDevice, signed in self.format: 770 value = getattr(valueRecord, name, 0) 771 if isDevice: 772 if value: 773 subWriter = writer.getSubWriter() 774 writer.writeSubTable(subWriter) 775 value.compile(subWriter, font) 776 else: 777 writer.writeUShort(0) 778 elif signed: 779 writer.writeShort(value) 780 else: 781 writer.writeUShort(value) 782 783 784class ValueRecord: 785 786 # see ValueRecordFactory 787 788 def getFormat(self): 789 format = 0 790 for name in self.__dict__.keys(): 791 format = format | valueRecordFormatDict[name][0] 792 return format 793 794 def toXML(self, xmlWriter, font, valueName, attrs=None): 795 if attrs is None: 796 simpleItems = [] 797 else: 798 simpleItems = list(attrs) 799 for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values 800 if hasattr(self, name): 801 simpleItems.append((name, getattr(self, name))) 802 deviceItems = [] 803 for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records 804 if hasattr(self, name): 805 device = getattr(self, name) 806 if device is not None: 807 deviceItems.append((name, device)) 808 if deviceItems: 809 xmlWriter.begintag(valueName, simpleItems) 810 xmlWriter.newline() 811 for name, deviceRecord in deviceItems: 812 if deviceRecord is not None: 813 deviceRecord.toXML(xmlWriter, font) 814 xmlWriter.endtag(valueName) 815 xmlWriter.newline() 816 else: 817 xmlWriter.simpletag(valueName, simpleItems) 818 xmlWriter.newline() 819 820 def fromXML(self, (name, attrs, content), font): 821 import otTables 822 for k, v in attrs.items(): 823 setattr(self, k, int(v)) 824 for element in content: 825 if type(element) <> TupleType: 826 continue 827 name, attrs, content = element 828 value = getattr(otTables, name)() 829 for elem2 in content: 830 if type(elem2) <> TupleType: 831 continue 832 value.fromXML(elem2, font) 833 setattr(self, name, value) 834 835 def __cmp__(self, other): 836 if type(self) != type(other): return cmp(type(self), type(other)) 837 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 838 839 return cmp(self.__dict__, other.__dict__) 840