otBase.py revision 79f734414c2a8d6851fb1b3ec69287ff0e0077b9
1from DefaultTable import DefaultTable 2import otData 3import struct 4from types import TupleType 5 6class OverflowErrorRecord: 7 def __init__(self, overflowTuple): 8 self.tableType = overflowTuple[0] 9 self.LookupListIndex = overflowTuple[1] 10 self.SubTableIndex = overflowTuple[2] 11 self.itemName = overflowTuple[3] 12 self.itemIndex = overflowTuple[4] 13 14 def __repr__(self): 15 return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) 16 17class OTLOffsetOverflowError(Exception): 18 def __init__(self, overflowErrorRecord): 19 self.value = overflowErrorRecord 20 21 def __str__(self): 22 return repr(self.value) 23 24 25class BaseTTXConverter(DefaultTable): 26 27 """Generic base class for TTX table converters. It functions as an 28 adapter between the TTX (ttLib actually) table model and the model 29 we use for OpenType tables, which is necessarily subtly different. 30 """ 31 32 def decompile(self, data, font): 33 import otTables 34 cachingStats = None if True else {} 35 class GlobalState: 36 def __init__(self, tableType, cachingStats): 37 self.tableType = tableType 38 self.cachingStats = cachingStats 39 globalState = GlobalState(tableType=self.tableTag, 40 cachingStats=cachingStats) 41 reader = OTTableReader(data, globalState) 42 tableClass = getattr(otTables, self.tableTag) 43 self.table = tableClass() 44 self.table.decompile(reader, font) 45 if cachingStats: 46 stats = [(v, k) for k, v in cachingStats.items()] 47 stats.sort() 48 stats.reverse() 49 print "cachingsstats for ", self.tableTag 50 for v, k in stats: 51 if v < 2: 52 break 53 print v, k 54 print "---", len(stats) 55 56 def compile(self, font): 57 """ Create a top-level OTFWriter for the GPOS/GSUB table. 58 Call the compile method for the the table 59 for each 'converter' record in the table converter list 60 call converter's write method for each item in the value. 61 - For simple items, the write method adds a string to the 62 writer's self.items list. 63 - For Struct/Table/Subtable items, it add first adds new writer to the 64 to the writer's self.items, then calls the item's compile method. 65 This creates a tree of writers, rooted at the GUSB/GPOS writer, with 66 each writer representing a table, and the writer.items list containing 67 the child data strings and writers. 68 call the getAllData method 69 call _doneWriting, which removes duplicates 70 call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables 71 Traverse the flat list of tables, calling getDataLength on each to update their position 72 Traverse the flat list of tables again, calling getData each get the data in the table, now that 73 pos's and offset are known. 74 75 If a lookup subtable overflows an offset, we have to start all over. 76 """ 77 class GlobalState: 78 def __init__(self, tableType): 79 self.tableType = tableType 80 globalState = GlobalState(tableType=self.tableTag) 81 writer = OTTableWriter(globalState) 82 writer.parent = None 83 self.table.compile(writer, font) 84 return writer.getAllData() 85 86 def toXML(self, writer, font): 87 self.table.toXML2(writer, font) 88 89 def fromXML(self, (name, attrs, content), font): 90 import otTables 91 if not hasattr(self, "table"): 92 tableClass = getattr(otTables, self.tableTag) 93 self.table = tableClass() 94 self.table.fromXML((name, attrs, content), font) 95 96 97class OTTableReader(object): 98 99 """Helper class to retrieve data from an OpenType table.""" 100 101 __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') 102 103 def __init__(self, data, globalState={}, localState=None, offset=0): 104 self.data = data 105 self.offset = offset 106 self.pos = offset 107 self.globalState = globalState 108 self.localState = localState 109 110 def getSubReader(self, offset): 111 offset = self.offset + offset 112 cachingStats = self.globalState.cachingStats 113 if cachingStats is not None: 114 cachingStats[offset] = cachingStats.get(offset, 0) + 1 115 return self.__class__(self.data, self.globalState, self.localState, offset) 116 117 def readUShort(self): 118 pos = self.pos 119 newpos = pos + 2 120 value, = struct.unpack(">H", self.data[pos:newpos]) 121 self.pos = newpos 122 return value 123 124 def readShort(self): 125 pos = self.pos 126 newpos = pos + 2 127 value, = struct.unpack(">h", self.data[pos:newpos]) 128 self.pos = newpos 129 return value 130 131 def readLong(self): 132 pos = self.pos 133 newpos = pos + 4 134 value, = struct.unpack(">l", self.data[pos:newpos]) 135 self.pos = newpos 136 return value 137 138 def readULong(self): 139 pos = self.pos 140 newpos = pos + 4 141 value, = struct.unpack(">L", self.data[pos:newpos]) 142 self.pos = newpos 143 return value 144 145 def readTag(self): 146 pos = self.pos 147 newpos = pos + 4 148 value = self.data[pos:newpos] 149 assert len(value) == 4 150 self.pos = newpos 151 return value 152 153 def __setitem__(self, name, value): 154 state = self.localState.copy() if self.localState else dict() 155 state[name] = value 156 self.localState = state 157 158 def __getitem__(self, name): 159 return self.localState[name] 160 161 162class OTTableWriter(object): 163 164 """Helper class to gather and assemble data for OpenType tables.""" 165 166 def __init__(self, globalState, localState=None): 167 self.items = [] 168 self.pos = None 169 self.globalState = globalState 170 self.localState = localState 171 172 def __setitem__(self, name, value): 173 state = self.localState.copy() if self.localState else dict() 174 state[name] = value 175 self.localState = state 176 177 def __getitem__(self, name): 178 return self.localState[name] 179 180 # assembler interface 181 182 def getAllData(self): 183 """Assemble all data, including all subtables.""" 184 self._doneWriting() 185 tables, extTables = self._gatherTables() 186 tables.reverse() 187 extTables.reverse() 188 # Gather all data in two passes: the absolute positions of all 189 # subtable are needed before the actual data can be assembled. 190 pos = 0 191 for table in tables: 192 table.pos = pos 193 pos = pos + table.getDataLength() 194 195 for table in extTables: 196 table.pos = pos 197 pos = pos + table.getDataLength() 198 199 200 data = [] 201 for table in tables: 202 tableData = table.getData() 203 data.append(tableData) 204 205 for table in extTables: 206 tableData = table.getData() 207 data.append(tableData) 208 209 return "".join(data) 210 211 def getDataLength(self): 212 """Return the length of this table in bytes, without subtables.""" 213 l = 0 214 for item in self.items: 215 if hasattr(item, "getData") or hasattr(item, "getCountData"): 216 if item.longOffset: 217 l = l + 4 # sizeof(ULong) 218 else: 219 l = l + 2 # sizeof(UShort) 220 else: 221 l = l + len(item) 222 return l 223 224 def getData(self): 225 """Assemble the data for this writer/table, without subtables.""" 226 items = list(self.items) # make a shallow copy 227 pos = self.pos 228 numItems = len(items) 229 for i in range(numItems): 230 item = items[i] 231 232 if hasattr(item, "getData"): 233 if item.longOffset: 234 items[i] = packULong(item.pos - pos) 235 else: 236 try: 237 items[i] = packUShort(item.pos - pos) 238 except AssertionError: 239 # provide data to fix overflow problem. 240 # If the overflow is to a lookup, or from a lookup to a subtable, 241 # just report the current item. 242 if self.name in [ 'LookupList', 'Lookup']: 243 overflowErrorRecord = self.getOverflowErrorRecord(item) 244 else: 245 # overflow is within a subTable. Life is more complicated. 246 # If we split the sub-table just before the current item, we may still suffer overflow. 247 # This is because duplicate table merging is done only within an Extension subTable tree; 248 # when we split the subtable in two, some items may no longer be duplicates. 249 # Get worst case by adding up all the item lengths, depth first traversal. 250 # and then report the first item that overflows a short. 251 def getDeepItemLength(table): 252 if hasattr(table, "getDataLength"): 253 length = 0 254 for item in table.items: 255 length = length + getDeepItemLength(item) 256 else: 257 length = len(table) 258 return length 259 260 length = self.getDataLength() 261 if hasattr(self, "sortCoverageLast") and item.name == "Coverage": 262 # Coverage is first in the item list, but last in the table list, 263 # The original overflow is really in the item list. Skip the Coverage 264 # table in the following test. 265 items = items[i+1:] 266 267 for j in range(len(items)): 268 item = items[j] 269 length = length + getDeepItemLength(item) 270 if length > 65535: 271 break 272 overflowErrorRecord = self.getOverflowErrorRecord(item) 273 274 275 raise OTLOffsetOverflowError, overflowErrorRecord 276 277 return "".join(items) 278 279 def __hash__(self): 280 # only works after self._doneWriting() has been called 281 return hash(self.items) 282 283 def __cmp__(self, other): 284 if type(self) != type(other): return cmp(type(self), type(other)) 285 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 286 287 return cmp(self.items, other.items) 288 289 def _doneWriting(self, internedTables=None): 290 # Convert CountData references to data string items 291 # collapse duplicate table references to a unique entry 292 # "tables" are OTTableWriter objects. 293 294 # For Extension Lookup types, we can 295 # eliminate duplicates only within the tree under the Extension Lookup, 296 # as offsets may exceed 64K even between Extension LookupTable subtables. 297 if internedTables is None: 298 internedTables = {} 299 items = self.items 300 iRange = range(len(items)) 301 302 if hasattr(self, "Extension"): 303 newTree = 1 304 else: 305 newTree = 0 306 for i in iRange: 307 item = items[i] 308 if hasattr(item, "getCountData"): 309 items[i] = item.getCountData() 310 elif hasattr(item, "getData"): 311 if newTree: 312 item._doneWriting() 313 else: 314 item._doneWriting(internedTables) 315 if internedTables.has_key(item): 316 items[i] = item = internedTables[item] 317 else: 318 internedTables[item] = item 319 self.items = tuple(items) 320 321 def _gatherTables(self, tables=None, extTables=None, done=None): 322 # Convert table references in self.items tree to a flat 323 # list of tables in depth-first traversal order. 324 # "tables" are OTTableWriter objects. 325 # We do the traversal in reverse order at each level, in order to 326 # resolve duplicate references to be the last reference in the list of tables. 327 # For extension lookups, duplicate references can be merged only within the 328 # writer tree under the extension lookup. 329 if tables is None: # init call for first time. 330 tables = [] 331 extTables = [] 332 done = {} 333 334 done[self] = 1 335 336 numItems = len(self.items) 337 iRange = range(numItems) 338 iRange.reverse() 339 340 if hasattr(self, "Extension"): 341 appendExtensions = 1 342 else: 343 appendExtensions = 0 344 345 # add Coverage table if it is sorted last. 346 sortCoverageLast = 0 347 if hasattr(self, "sortCoverageLast"): 348 # Find coverage table 349 for i in range(numItems): 350 item = self.items[i] 351 if hasattr(item, "name") and (item.name == "Coverage"): 352 sortCoverageLast = 1 353 break 354 if not done.has_key(item): 355 item._gatherTables(tables, extTables, done) 356 else: 357 index = max(item.parent.keys()) 358 item.parent[index + 1] = self 359 360 saveItem = None 361 for i in iRange: 362 item = self.items[i] 363 if not hasattr(item, "getData"): 364 continue 365 366 if sortCoverageLast and (i==1) and item.name == 'Coverage': 367 # we've already 'gathered' it above 368 continue 369 370 if appendExtensions: 371 assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" 372 newDone = {} 373 item._gatherTables(extTables, None, newDone) 374 375 elif not done.has_key(item): 376 item._gatherTables(tables, extTables, done) 377 else: 378 index = max(item.parent.keys()) 379 item.parent[index + 1] = self 380 381 382 tables.append(self) 383 return tables, extTables 384 385 # interface for gathering data, as used by table.compile() 386 387 def getSubWriter(self): 388 subwriter = self.__class__(self.globalState, self.localState) 389 subwriter.parent = {0:self} # because some subtables have idential values, we discard 390 # the duplicates under the getAllData method. Hence some 391 # subtable writers can have more than one parent writer. 392 return subwriter 393 394 def writeUShort(self, value): 395 assert 0 <= value < 0x10000 396 self.items.append(struct.pack(">H", value)) 397 398 def writeShort(self, value): 399 self.items.append(struct.pack(">h", value)) 400 401 def writeLong(self, value): 402 self.items.append(struct.pack(">l", value)) 403 404 def writeULong(self, value): 405 self.items.append(struct.pack(">L", value)) 406 407 def writeTag(self, tag): 408 assert len(tag) == 4 409 self.items.append(tag) 410 411 def writeSubTable(self, subWriter): 412 self.items.append(subWriter) 413 414 def writeCountReference(self, table, name): 415 ref = CountReference(table, name) 416 self.items.append(ref) 417 return ref 418 419 def writeStruct(self, format, values): 420 data = apply(struct.pack, (format,) + values) 421 self.items.append(data) 422 423 def writeData(self, data): 424 self.items.append(data) 425 426 def getOverflowErrorRecord(self, item): 427 LookupListIndex = SubTableIndex = itemName = itemIndex = None 428 if self.name == 'LookupList': 429 LookupListIndex = item.repeatIndex 430 elif self.name == 'Lookup': 431 LookupListIndex = self.repeatIndex 432 SubTableIndex = item.repeatIndex 433 else: 434 itemName = item.name 435 if hasattr(item, 'repeatIndex'): 436 itemIndex = item.repeatIndex 437 if self.name == 'SubTable': 438 LookupListIndex = self.parent[0].repeatIndex 439 SubTableIndex = self.repeatIndex 440 elif self.name == 'ExtSubTable': 441 LookupListIndex = self.parent[0].parent[0].repeatIndex 442 SubTableIndex = self.parent[0].repeatIndex 443 else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. 444 itemName = ".".join(self.name, item.name) 445 p1 = self.parent[0] 446 while p1 and p1.name not in ['ExtSubTable', 'SubTable']: 447 itemName = ".".join(p1.name, item.name) 448 p1 = p1.parent[0] 449 if p1: 450 if p1.name == 'ExtSubTable': 451 LookupListIndex = self.parent[0].parent[0].repeatIndex 452 SubTableIndex = self.parent[0].repeatIndex 453 else: 454 LookupListIndex = self.parent[0].repeatIndex 455 SubTableIndex = self.repeatIndex 456 457 return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) 458 459 460class CountReference: 461 """A reference to a Count value, not a count of references.""" 462 def __init__(self, table, name): 463 self.table = table 464 self.name = name 465 def setValue(self, value): 466 table = self.table 467 name = self.name 468 if table[name] is None: 469 table[name] = value 470 else: 471 assert table[name] == value, (table[name], value) 472 def getCountData(self): 473 return packUShort(self.table[self.name]) 474 475 476def packUShort(value): 477 assert 0 <= value < 0x10000, value 478 return struct.pack(">H", value) 479 480 481def packULong(value): 482 assert 0 <= value < 0x100000000, value 483 return struct.pack(">L", value) 484 485 486class BaseTable(object): 487 def __init__(self): 488 self.compileStatus = 0 # 0 means table was created 489 # 1 means the table.read() function was called by a table which is subject 490 # to delayed compilation 491 # 2 means that it was subject to delayed compilation, and 492 # has been decompiled 493 494 self.recurse = 0 495 496 def __getattr__(self, attr): 497 # we get here only when the table does not have the attribute. 498 # This method ovveride exists so that we can try to de-compile 499 # a table which is subject to delayed decompilation, and then try 500 # to get the value again after decompilation. 501 self.recurse +=1 502 if self.recurse > 2: 503 # shouldn't ever get here - we should only get to two levels of recursion. 504 # this guards against self.decompile NOT setting compileStatus to other than 1. 505 raise AttributeError, attr 506 if self.compileStatus == 1: 507 self.ensureDecompiled() 508 val = getattr(self, attr) 509 self.recurse -=1 510 return val 511 512 raise AttributeError, attr 513 514 515 """Generic base class for all OpenType (sub)tables.""" 516 517 def getConverters(self): 518 return self.converters 519 520 def getConverterByName(self, name): 521 return self.convertersByName[name] 522 523 def decompile(self, reader, font): 524 self.compileStatus = 2 # table has been decompiled. 525 self.readFormat(reader) 526 table = {} 527 self.__rawTable = table # for debugging 528 converters = self.getConverters() 529 for conv in converters: 530 if conv.name == "SubTable": 531 conv = conv.getConverter(reader.globalState.tableType, 532 table["LookupType"]) 533 if conv.name == "ExtSubTable": 534 conv = conv.getConverter(reader.globalState.tableType, 535 table["ExtensionLookupType"]) 536 if conv.repeat: 537 l = [] 538 if conv.repeat in table: 539 countValue = table[conv.repeat] 540 else: 541 # conv.repeat is a propagated count 542 countValue = reader[conv.repeat] 543 for i in range(countValue + conv.aux): 544 l.append(conv.read(reader, font, table)) 545 table[conv.name] = l 546 else: 547 if conv.aux and not eval(conv.aux, None, table): 548 continue 549 table[conv.name] = conv.read(reader, font, table) 550 if conv.isPropagatedCount: 551 reader[conv.name] = table[conv.name] 552 553 self.postRead(table, font) 554 555 del self.__rawTable # succeeded, get rid of debugging info 556 557 def ensureDecompiled(self): 558 if self.compileStatus != 1: 559 return 560 subReader = self.reader.getSubReader(self.offset) 561 self.decompile(subReader, self.font) 562 del self.reader, self.font, self.offset 563 564 def compile(self, writer, font): 565 table = self.preWrite(font) 566 567 if hasattr(self, 'sortCoverageLast'): 568 writer.sortCoverageLast = 1 569 570 self.writeFormat(writer) 571 for conv in self.getConverters(): 572 value = table.get(conv.name) 573 if conv.repeat: 574 if value is None: 575 value = [] 576 countValue = len(value) - conv.aux 577 if conv.repeat in table: 578 ref = table[conv.repeat] 579 table[conv.repeat] = None 580 ref.setValue(countValue) 581 else: 582 # conv.repeat is a propagated count 583 writer[conv.repeat].setValue(countValue) 584 for i in range(len(value)): 585 conv.write(writer, font, table, value[i], i) 586 elif conv.isCount: 587 # Special-case Count values. 588 # Assumption: a Count field will *always* precede 589 # the actual array(s). 590 # We need a default value, as it may be set later by a nested 591 # table. We will later store it here. 592 # We add a reference: by the time the data is assembled 593 # the Count value will be filled in. 594 ref = writer.writeCountReference(table, conv.name) 595 if conv.isPropagatedCount: 596 table[conv.name] = None 597 writer[conv.name] = ref 598 else: 599 table[conv.name] = ref 600 else: 601 if conv.aux and not eval(conv.aux, None, table): 602 continue 603 conv.write(writer, font, table, value) 604 605 def readFormat(self, reader): 606 pass 607 608 def writeFormat(self, writer): 609 pass 610 611 def postRead(self, table, font): 612 self.__dict__.update(table) 613 614 def preWrite(self, font): 615 self.ensureDecompiled() 616 return self.__dict__.copy() 617 618 def toXML(self, xmlWriter, font, attrs=None): 619 tableName = self.__class__.__name__ 620 if attrs is None: 621 attrs = [] 622 if hasattr(self, "Format"): 623 attrs = attrs + [("Format", self.Format)] 624 xmlWriter.begintag(tableName, attrs) 625 xmlWriter.newline() 626 self.toXML2(xmlWriter, font) 627 xmlWriter.endtag(tableName) 628 xmlWriter.newline() 629 630 def toXML2(self, xmlWriter, font): 631 # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). 632 # This is because in TTX our parent writes our main tag, and in otBase.py we 633 # do it ourselves. I think I'm getting schizophrenic... 634 for conv in self.getConverters(): 635 if conv.repeat: 636 value = getattr(self, conv.name) 637 for i in range(len(value)): 638 item = value[i] 639 conv.xmlWrite(xmlWriter, font, item, conv.name, 640 [("index", i)]) 641 else: 642 if conv.aux and not eval(conv.aux, None, vars(self)): 643 continue 644 value = getattr(self, conv.name) 645 conv.xmlWrite(xmlWriter, font, value, conv.name, []) 646 647 def fromXML(self, (name, attrs, content), font): 648 try: 649 conv = self.getConverterByName(name) 650 except KeyError: 651 raise # XXX on KeyError, raise nice error 652 value = conv.xmlRead(attrs, content, font) 653 if conv.repeat: 654 seq = getattr(self, conv.name, None) 655 if seq is None: 656 seq = [] 657 setattr(self, conv.name, seq) 658 seq.append(value) 659 else: 660 setattr(self, conv.name, value) 661 662 def __cmp__(self, other): 663 if type(self) != type(other): return cmp(type(self), type(other)) 664 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 665 666 self.ensureDecompiled() 667 668 return cmp(self.__dict__, other.__dict__) 669 670 671class FormatSwitchingBaseTable(BaseTable): 672 673 """Minor specialization of BaseTable, for tables that have multiple 674 formats, eg. CoverageFormat1 vs. CoverageFormat2.""" 675 676 def getConverters(self): 677 return self.converters[self.Format] 678 679 def getConverterByName(self, name): 680 return self.convertersByName[self.Format][name] 681 682 def readFormat(self, reader): 683 self.Format = reader.readUShort() 684 assert self.Format <> 0, (self, reader.pos, len(reader.data)) 685 686 def writeFormat(self, writer): 687 writer.writeUShort(self.Format) 688 689 690# 691# Support for ValueRecords 692# 693# This data type is so different from all other OpenType data types that 694# it requires quite a bit of code for itself. It even has special support 695# in OTTableReader and OTTableWriter... 696# 697 698valueRecordFormat = [ 699# Mask Name isDevice signed 700 (0x0001, "XPlacement", 0, 1), 701 (0x0002, "YPlacement", 0, 1), 702 (0x0004, "XAdvance", 0, 1), 703 (0x0008, "YAdvance", 0, 1), 704 (0x0010, "XPlaDevice", 1, 0), 705 (0x0020, "YPlaDevice", 1, 0), 706 (0x0040, "XAdvDevice", 1, 0), 707 (0x0080, "YAdvDevice", 1, 0), 708# reserved: 709 (0x0100, "Reserved1", 0, 0), 710 (0x0200, "Reserved2", 0, 0), 711 (0x0400, "Reserved3", 0, 0), 712 (0x0800, "Reserved4", 0, 0), 713 (0x1000, "Reserved5", 0, 0), 714 (0x2000, "Reserved6", 0, 0), 715 (0x4000, "Reserved7", 0, 0), 716 (0x8000, "Reserved8", 0, 0), 717] 718 719def _buildDict(): 720 d = {} 721 for mask, name, isDevice, signed in valueRecordFormat: 722 d[name] = mask, isDevice, signed 723 return d 724 725valueRecordFormatDict = _buildDict() 726 727 728class ValueRecordFactory: 729 730 """Given a format code, this object convert ValueRecords.""" 731 732 def __init__(self, valueFormat): 733 format = [] 734 for mask, name, isDevice, signed in valueRecordFormat: 735 if valueFormat & mask: 736 format.append((name, isDevice, signed)) 737 self.format = format 738 739 def readValueRecord(self, reader, font): 740 format = self.format 741 if not format: 742 return None 743 valueRecord = ValueRecord() 744 for name, isDevice, signed in format: 745 if signed: 746 value = reader.readShort() 747 else: 748 value = reader.readUShort() 749 if isDevice: 750 if value: 751 import otTables 752 subReader = reader.getSubReader(value) 753 value = getattr(otTables, name)() 754 value.decompile(subReader, font) 755 else: 756 value = None 757 setattr(valueRecord, name, value) 758 return valueRecord 759 760 def writeValueRecord(self, writer, font, valueRecord): 761 for name, isDevice, signed in self.format: 762 value = getattr(valueRecord, name, 0) 763 if isDevice: 764 if value: 765 subWriter = writer.getSubWriter() 766 writer.writeSubTable(subWriter) 767 value.compile(subWriter, font) 768 else: 769 writer.writeUShort(0) 770 elif signed: 771 writer.writeShort(value) 772 else: 773 writer.writeUShort(value) 774 775 776class ValueRecord: 777 778 # see ValueRecordFactory 779 780 def getFormat(self): 781 format = 0 782 for name in self.__dict__.keys(): 783 format = format | valueRecordFormatDict[name][0] 784 return format 785 786 def toXML(self, xmlWriter, font, valueName, attrs=None): 787 if attrs is None: 788 simpleItems = [] 789 else: 790 simpleItems = list(attrs) 791 for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values 792 if hasattr(self, name): 793 simpleItems.append((name, getattr(self, name))) 794 deviceItems = [] 795 for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records 796 if hasattr(self, name): 797 device = getattr(self, name) 798 if device is not None: 799 deviceItems.append((name, device)) 800 if deviceItems: 801 xmlWriter.begintag(valueName, simpleItems) 802 xmlWriter.newline() 803 for name, deviceRecord in deviceItems: 804 if deviceRecord is not None: 805 deviceRecord.toXML(xmlWriter, font) 806 xmlWriter.endtag(valueName) 807 xmlWriter.newline() 808 else: 809 xmlWriter.simpletag(valueName, simpleItems) 810 xmlWriter.newline() 811 812 def fromXML(self, (name, attrs, content), font): 813 import otTables 814 for k, v in attrs.items(): 815 setattr(self, k, int(v)) 816 for element in content: 817 if type(element) <> TupleType: 818 continue 819 name, attrs, content = element 820 value = getattr(otTables, name)() 821 for elem2 in content: 822 if type(elem2) <> TupleType: 823 continue 824 value.fromXML(elem2, font) 825 setattr(self, name, value) 826 827 def __cmp__(self, other): 828 if type(self) != type(other): return cmp(type(self), type(other)) 829 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 830 831 return cmp(self.__dict__, other.__dict__) 832