otBase.py revision 3ac9e63fce920ca3cf12f53e067641849c9cdbb4
1from DefaultTable import DefaultTable 2import otData 3import struct 4from types import TupleType 5 6class OverflowErrorRecord: 7 def __init__(self, overflowTuple): 8 self.tableType = overflowTuple[0] 9 self.LookupListIndex = overflowTuple[1] 10 self.SubTableIndex = overflowTuple[2] 11 self.itemName = overflowTuple[3] 12 self.itemIndex = overflowTuple[4] 13 14 def __repr__(self): 15 return str((self.tableType, "LookupIndex:", self.LookupListIndex, "SubTableIndex:", self.SubTableIndex, "ItemName:", self.itemName, "ItemIndex:", self.itemIndex)) 16 17class OTLOffsetOverflowError(Exception): 18 def __init__(self, overflowErrorRecord): 19 self.value = overflowErrorRecord 20 21 def __str__(self): 22 return repr(self.value) 23 24 25class BaseTTXConverter(DefaultTable): 26 27 """Generic base class for TTX table converters. It functions as an 28 adapter between the TTX (ttLib actually) table model and the model 29 we use for OpenType tables, which is necessarily subtly different. 30 """ 31 32 def decompile(self, data, font): 33 import otTables 34 cachingStats = None if True else {} 35 class GlobalState: 36 def __init__(self, tableType, cachingStats): 37 self.tableType = tableType 38 self.cachingStats = cachingStats 39 globalState = GlobalState(tableType=self.tableTag, 40 cachingStats=cachingStats) 41 reader = OTTableReader(data, globalState) 42 tableClass = getattr(otTables, self.tableTag) 43 self.table = tableClass() 44 self.table.decompile(reader, font) 45 if cachingStats: 46 stats = [(v, k) for k, v in cachingStats.items()] 47 stats.sort() 48 stats.reverse() 49 print "cachingsstats for ", self.tableTag 50 for v, k in stats: 51 if v < 2: 52 break 53 print v, k 54 print "---", len(stats) 55 56 def compile(self, font): 57 """ Create a top-level OTFWriter for the GPOS/GSUB table. 58 Call the compile method for the the table 59 for each 'converter' record in the table converter list 60 call converter's write method for each item in the value. 61 - For simple items, the write method adds a string to the 62 writer's self.items list. 63 - For Struct/Table/Subtable items, it add first adds new writer to the 64 to the writer's self.items, then calls the item's compile method. 65 This creates a tree of writers, rooted at the GUSB/GPOS writer, with 66 each writer representing a table, and the writer.items list containing 67 the child data strings and writers. 68 call the getAllData method 69 call _doneWriting, which removes duplicates 70 call _gatherTables. This traverses the tables, adding unique occurences to a flat list of tables 71 Traverse the flat list of tables, calling getDataLength on each to update their position 72 Traverse the flat list of tables again, calling getData each get the data in the table, now that 73 pos's and offset are known. 74 75 If a lookup subtable overflows an offset, we have to start all over. 76 """ 77 class GlobalState: 78 def __init__(self, tableType): 79 self.tableType = tableType 80 globalState = GlobalState(tableType=self.tableTag) 81 writer = OTTableWriter(globalState) 82 writer.parent = None 83 self.table.compile(writer, font) 84 return writer.getAllData() 85 86 def toXML(self, writer, font): 87 self.table.toXML2(writer, font) 88 89 def fromXML(self, (name, attrs, content), font): 90 import otTables 91 if not hasattr(self, "table"): 92 tableClass = getattr(otTables, self.tableTag) 93 self.table = tableClass() 94 self.table.fromXML((name, attrs, content), font) 95 96 97class OTTableReader(object): 98 99 """Helper class to retrieve data from an OpenType table.""" 100 101 __slots__ = ('data', 'offset', 'pos', 'globalState', 'localState') 102 103 def __init__(self, data, globalState={}, localState=None, offset=0): 104 self.data = data 105 self.offset = offset 106 self.pos = offset 107 self.globalState = globalState 108 self.localState = localState 109 110 def getSubReader(self, offset): 111 offset = self.offset + offset 112 cachingStats = self.globalState.cachingStats 113 if cachingStats is not None: 114 cachingStats[offset] = cachingStats.get(offset, 0) + 1 115 return self.__class__(self.data, self.globalState, self.localState, offset) 116 117 def readUShort(self): 118 pos = self.pos 119 newpos = pos + 2 120 value, = struct.unpack(">H", self.data[pos:newpos]) 121 self.pos = newpos 122 return value 123 124 def readShort(self): 125 pos = self.pos 126 newpos = pos + 2 127 value, = struct.unpack(">h", self.data[pos:newpos]) 128 self.pos = newpos 129 return value 130 131 def readLong(self): 132 pos = self.pos 133 newpos = pos + 4 134 value, = struct.unpack(">l", self.data[pos:newpos]) 135 self.pos = newpos 136 return value 137 138 def readUInt24(self): 139 pos = self.pos 140 newpos = pos + 3 141 value = (ord(self.data[pos]) << 16) | (ord(self.data[pos+1]) << 8) | ord(self.data[pos+2]) 142 value, = struct.unpack(">H", self.data[pos:newpos]) 143 self.pos = newpos 144 return value 145 146 def readULong(self): 147 pos = self.pos 148 newpos = pos + 4 149 value, = struct.unpack(">L", self.data[pos:newpos]) 150 self.pos = newpos 151 return value 152 153 def readTag(self): 154 pos = self.pos 155 newpos = pos + 4 156 value = self.data[pos:newpos] 157 assert len(value) == 4 158 self.pos = newpos 159 return value 160 161 def __setitem__(self, name, value): 162 state = self.localState.copy() if self.localState else dict() 163 state[name] = value 164 self.localState = state 165 166 def __getitem__(self, name): 167 return self.localState[name] 168 169 170class OTTableWriter(object): 171 172 """Helper class to gather and assemble data for OpenType tables.""" 173 174 def __init__(self, globalState, localState=None): 175 self.items = [] 176 self.pos = None 177 self.globalState = globalState 178 self.localState = localState 179 180 def __setitem__(self, name, value): 181 state = self.localState.copy() if self.localState else dict() 182 state[name] = value 183 self.localState = state 184 185 def __getitem__(self, name): 186 return self.localState[name] 187 188 # assembler interface 189 190 def getAllData(self): 191 """Assemble all data, including all subtables.""" 192 self._doneWriting() 193 tables, extTables = self._gatherTables() 194 tables.reverse() 195 extTables.reverse() 196 # Gather all data in two passes: the absolute positions of all 197 # subtable are needed before the actual data can be assembled. 198 pos = 0 199 for table in tables: 200 table.pos = pos 201 pos = pos + table.getDataLength() 202 203 for table in extTables: 204 table.pos = pos 205 pos = pos + table.getDataLength() 206 207 208 data = [] 209 for table in tables: 210 tableData = table.getData() 211 data.append(tableData) 212 213 for table in extTables: 214 tableData = table.getData() 215 data.append(tableData) 216 217 return "".join(data) 218 219 def getDataLength(self): 220 """Return the length of this table in bytes, without subtables.""" 221 l = 0 222 for item in self.items: 223 if hasattr(item, "getData") or hasattr(item, "getCountData"): 224 if item.longOffset: 225 l = l + 4 # sizeof(ULong) 226 else: 227 l = l + 2 # sizeof(UShort) 228 else: 229 l = l + len(item) 230 return l 231 232 def getData(self): 233 """Assemble the data for this writer/table, without subtables.""" 234 items = list(self.items) # make a shallow copy 235 pos = self.pos 236 numItems = len(items) 237 for i in range(numItems): 238 item = items[i] 239 240 if hasattr(item, "getData"): 241 if item.longOffset: 242 items[i] = packULong(item.pos - pos) 243 else: 244 try: 245 items[i] = packUShort(item.pos - pos) 246 except AssertionError: 247 # provide data to fix overflow problem. 248 # If the overflow is to a lookup, or from a lookup to a subtable, 249 # just report the current item. 250 if self.name in [ 'LookupList', 'Lookup']: 251 overflowErrorRecord = self.getOverflowErrorRecord(item) 252 else: 253 # overflow is within a subTable. Life is more complicated. 254 # If we split the sub-table just before the current item, we may still suffer overflow. 255 # This is because duplicate table merging is done only within an Extension subTable tree; 256 # when we split the subtable in two, some items may no longer be duplicates. 257 # Get worst case by adding up all the item lengths, depth first traversal. 258 # and then report the first item that overflows a short. 259 def getDeepItemLength(table): 260 if hasattr(table, "getDataLength"): 261 length = 0 262 for item in table.items: 263 length = length + getDeepItemLength(item) 264 else: 265 length = len(table) 266 return length 267 268 length = self.getDataLength() 269 if hasattr(self, "sortCoverageLast") and item.name == "Coverage": 270 # Coverage is first in the item list, but last in the table list, 271 # The original overflow is really in the item list. Skip the Coverage 272 # table in the following test. 273 items = items[i+1:] 274 275 for j in range(len(items)): 276 item = items[j] 277 length = length + getDeepItemLength(item) 278 if length > 65535: 279 break 280 overflowErrorRecord = self.getOverflowErrorRecord(item) 281 282 283 raise OTLOffsetOverflowError, overflowErrorRecord 284 285 return "".join(items) 286 287 def __hash__(self): 288 # only works after self._doneWriting() has been called 289 return hash(self.items) 290 291 def __cmp__(self, other): 292 if type(self) != type(other): return cmp(type(self), type(other)) 293 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 294 295 return cmp(self.items, other.items) 296 297 def _doneWriting(self, internedTables=None): 298 # Convert CountData references to data string items 299 # collapse duplicate table references to a unique entry 300 # "tables" are OTTableWriter objects. 301 302 # For Extension Lookup types, we can 303 # eliminate duplicates only within the tree under the Extension Lookup, 304 # as offsets may exceed 64K even between Extension LookupTable subtables. 305 if internedTables is None: 306 internedTables = {} 307 items = self.items 308 iRange = range(len(items)) 309 310 if hasattr(self, "Extension"): 311 newTree = 1 312 else: 313 newTree = 0 314 for i in iRange: 315 item = items[i] 316 if hasattr(item, "getCountData"): 317 items[i] = item.getCountData() 318 elif hasattr(item, "getData"): 319 if newTree: 320 item._doneWriting() 321 else: 322 item._doneWriting(internedTables) 323 if internedTables.has_key(item): 324 items[i] = item = internedTables[item] 325 else: 326 internedTables[item] = item 327 self.items = tuple(items) 328 329 def _gatherTables(self, tables=None, extTables=None, done=None): 330 # Convert table references in self.items tree to a flat 331 # list of tables in depth-first traversal order. 332 # "tables" are OTTableWriter objects. 333 # We do the traversal in reverse order at each level, in order to 334 # resolve duplicate references to be the last reference in the list of tables. 335 # For extension lookups, duplicate references can be merged only within the 336 # writer tree under the extension lookup. 337 if tables is None: # init call for first time. 338 tables = [] 339 extTables = [] 340 done = {} 341 342 done[self] = 1 343 344 numItems = len(self.items) 345 iRange = range(numItems) 346 iRange.reverse() 347 348 if hasattr(self, "Extension"): 349 appendExtensions = 1 350 else: 351 appendExtensions = 0 352 353 # add Coverage table if it is sorted last. 354 sortCoverageLast = 0 355 if hasattr(self, "sortCoverageLast"): 356 # Find coverage table 357 for i in range(numItems): 358 item = self.items[i] 359 if hasattr(item, "name") and (item.name == "Coverage"): 360 sortCoverageLast = 1 361 break 362 if not done.has_key(item): 363 item._gatherTables(tables, extTables, done) 364 else: 365 index = max(item.parent.keys()) 366 item.parent[index + 1] = self 367 368 saveItem = None 369 for i in iRange: 370 item = self.items[i] 371 if not hasattr(item, "getData"): 372 continue 373 374 if sortCoverageLast and (i==1) and item.name == 'Coverage': 375 # we've already 'gathered' it above 376 continue 377 378 if appendExtensions: 379 assert extTables != None, "Program or XML editing error. Extension subtables cannot contain extensions subtables" 380 newDone = {} 381 item._gatherTables(extTables, None, newDone) 382 383 elif not done.has_key(item): 384 item._gatherTables(tables, extTables, done) 385 else: 386 index = max(item.parent.keys()) 387 item.parent[index + 1] = self 388 389 390 tables.append(self) 391 return tables, extTables 392 393 # interface for gathering data, as used by table.compile() 394 395 def getSubWriter(self): 396 subwriter = self.__class__(self.globalState, self.localState) 397 subwriter.parent = {0:self} # because some subtables have idential values, we discard 398 # the duplicates under the getAllData method. Hence some 399 # subtable writers can have more than one parent writer. 400 return subwriter 401 402 def writeUShort(self, value): 403 assert 0 <= value < 0x10000 404 self.items.append(struct.pack(">H", value)) 405 406 def writeShort(self, value): 407 self.items.append(struct.pack(">h", value)) 408 409 def writeUInt24(self, value): 410 assert 0 <= value < 0x1000000 411 self.items.append(''.join(chr(v) for v in (value>>16, (value>>8)&0xFF, value&0xff))) 412 413 def writeLong(self, value): 414 self.items.append(struct.pack(">l", value)) 415 416 def writeULong(self, value): 417 self.items.append(struct.pack(">L", value)) 418 419 def writeTag(self, tag): 420 assert len(tag) == 4 421 self.items.append(tag) 422 423 def writeSubTable(self, subWriter): 424 self.items.append(subWriter) 425 426 def writeCountReference(self, table, name): 427 ref = CountReference(table, name) 428 self.items.append(ref) 429 return ref 430 431 def writeStruct(self, format, values): 432 data = apply(struct.pack, (format,) + values) 433 self.items.append(data) 434 435 def writeData(self, data): 436 self.items.append(data) 437 438 def getOverflowErrorRecord(self, item): 439 LookupListIndex = SubTableIndex = itemName = itemIndex = None 440 if self.name == 'LookupList': 441 LookupListIndex = item.repeatIndex 442 elif self.name == 'Lookup': 443 LookupListIndex = self.repeatIndex 444 SubTableIndex = item.repeatIndex 445 else: 446 itemName = item.name 447 if hasattr(item, 'repeatIndex'): 448 itemIndex = item.repeatIndex 449 if self.name == 'SubTable': 450 LookupListIndex = self.parent[0].repeatIndex 451 SubTableIndex = self.repeatIndex 452 elif self.name == 'ExtSubTable': 453 LookupListIndex = self.parent[0].parent[0].repeatIndex 454 SubTableIndex = self.parent[0].repeatIndex 455 else: # who knows how far below the SubTable level we are! Climb back up to the nearest subtable. 456 itemName = ".".join(self.name, item.name) 457 p1 = self.parent[0] 458 while p1 and p1.name not in ['ExtSubTable', 'SubTable']: 459 itemName = ".".join(p1.name, item.name) 460 p1 = p1.parent[0] 461 if p1: 462 if p1.name == 'ExtSubTable': 463 LookupListIndex = self.parent[0].parent[0].repeatIndex 464 SubTableIndex = self.parent[0].repeatIndex 465 else: 466 LookupListIndex = self.parent[0].repeatIndex 467 SubTableIndex = self.repeatIndex 468 469 return OverflowErrorRecord( (self.globalState.tableType, LookupListIndex, SubTableIndex, itemName, itemIndex) ) 470 471 472class CountReference: 473 """A reference to a Count value, not a count of references.""" 474 def __init__(self, table, name): 475 self.table = table 476 self.name = name 477 def setValue(self, value): 478 table = self.table 479 name = self.name 480 if table[name] is None: 481 table[name] = value 482 else: 483 assert table[name] == value, (name, table[name], value) 484 def getCountData(self): 485 return packUShort(self.table[self.name]) 486 487 488def packUShort(value): 489 assert 0 <= value < 0x10000, value 490 return struct.pack(">H", value) 491 492 493def packULong(value): 494 assert 0 <= value < 0x100000000, value 495 return struct.pack(">L", value) 496 497 498class BaseTable(object): 499 def __init__(self): 500 self.compileStatus = 0 # 0 means table was created 501 # 1 means the table.read() function was called by a table which is subject 502 # to delayed compilation 503 # 2 means that it was subject to delayed compilation, and 504 # has been decompiled 505 506 self.recurse = 0 507 508 def __getattr__(self, attr): 509 # we get here only when the table does not have the attribute. 510 # This method ovveride exists so that we can try to de-compile 511 # a table which is subject to delayed decompilation, and then try 512 # to get the value again after decompilation. 513 self.recurse +=1 514 if self.recurse > 2: 515 # shouldn't ever get here - we should only get to two levels of recursion. 516 # this guards against self.decompile NOT setting compileStatus to other than 1. 517 raise AttributeError, attr 518 if self.compileStatus == 1: 519 self.ensureDecompiled() 520 val = getattr(self, attr) 521 self.recurse -=1 522 return val 523 524 raise AttributeError, attr 525 526 527 """Generic base class for all OpenType (sub)tables.""" 528 529 def getConverters(self): 530 return self.converters 531 532 def getConverterByName(self, name): 533 return self.convertersByName[name] 534 535 def decompile(self, reader, font): 536 self.compileStatus = 2 # table has been decompiled. 537 self.readFormat(reader) 538 table = {} 539 self.__rawTable = table # for debugging 540 converters = self.getConverters() 541 for conv in converters: 542 if conv.name == "SubTable": 543 conv = conv.getConverter(reader.globalState.tableType, 544 table["LookupType"]) 545 if conv.name == "ExtSubTable": 546 conv = conv.getConverter(reader.globalState.tableType, 547 table["ExtensionLookupType"]) 548 if conv.name == "FeatureParams": 549 conv = conv.getConverter(reader["FeatureTag"]) 550 if conv.repeat: 551 l = [] 552 if conv.repeat in table: 553 countValue = table[conv.repeat] 554 else: 555 # conv.repeat is a propagated count 556 countValue = reader[conv.repeat] 557 for i in range(countValue + conv.aux): 558 l.append(conv.read(reader, font, table)) 559 table[conv.name] = l 560 else: 561 if conv.aux and not eval(conv.aux, None, table): 562 continue 563 table[conv.name] = conv.read(reader, font, table) 564 if conv.isPropagated: 565 reader[conv.name] = table[conv.name] 566 567 self.postRead(table, font) 568 569 del self.__rawTable # succeeded, get rid of debugging info 570 571 def ensureDecompiled(self): 572 if self.compileStatus != 1: 573 return 574 self.decompile(self.reader, self.font) 575 del self.reader, self.font 576 577 def compile(self, writer, font): 578 self.ensureDecompiled() 579 table = self.preWrite(font) 580 581 if hasattr(self, 'sortCoverageLast'): 582 writer.sortCoverageLast = 1 583 584 self.writeFormat(writer) 585 for conv in self.getConverters(): 586 value = table.get(conv.name) 587 if conv.repeat: 588 if value is None: 589 value = [] 590 countValue = len(value) - conv.aux 591 if conv.repeat in table: 592 ref = table[conv.repeat] 593 table[conv.repeat] = None 594 ref.setValue(countValue) 595 else: 596 # conv.repeat is a propagated count 597 writer[conv.repeat].setValue(countValue) 598 for i in range(len(value)): 599 conv.write(writer, font, table, value[i], i) 600 elif conv.isCount: 601 # Special-case Count values. 602 # Assumption: a Count field will *always* precede 603 # the actual array(s). 604 # We need a default value, as it may be set later by a nested 605 # table. We will later store it here. 606 # We add a reference: by the time the data is assembled 607 # the Count value will be filled in. 608 ref = writer.writeCountReference(table, conv.name) 609 if conv.isPropagated: 610 table[conv.name] = None 611 writer[conv.name] = ref 612 else: 613 table[conv.name] = ref 614 else: 615 if conv.aux and not eval(conv.aux, None, table): 616 continue 617 conv.write(writer, font, table, value) 618 if conv.isPropagated: 619 writer[conv.name] = value 620 621 def readFormat(self, reader): 622 pass 623 624 def writeFormat(self, writer): 625 pass 626 627 def postRead(self, table, font): 628 self.__dict__.update(table) 629 630 def preWrite(self, font): 631 return self.__dict__.copy() 632 633 def toXML(self, xmlWriter, font, attrs=None): 634 tableName = self.__class__.__name__ 635 if attrs is None: 636 attrs = [] 637 if hasattr(self, "Format"): 638 attrs = attrs + [("Format", self.Format)] 639 xmlWriter.begintag(tableName, attrs) 640 xmlWriter.newline() 641 self.toXML2(xmlWriter, font) 642 xmlWriter.endtag(tableName) 643 xmlWriter.newline() 644 645 def toXML2(self, xmlWriter, font): 646 # Simpler variant of toXML, *only* for the top level tables (like GPOS, GSUB). 647 # This is because in TTX our parent writes our main tag, and in otBase.py we 648 # do it ourselves. I think I'm getting schizophrenic... 649 for conv in self.getConverters(): 650 if conv.repeat: 651 value = getattr(self, conv.name) 652 for i in range(len(value)): 653 item = value[i] 654 conv.xmlWrite(xmlWriter, font, item, conv.name, 655 [("index", i)]) 656 else: 657 if conv.aux and not eval(conv.aux, None, vars(self)): 658 continue 659 value = getattr(self, conv.name) 660 conv.xmlWrite(xmlWriter, font, value, conv.name, []) 661 662 def fromXML(self, (name, attrs, content), font): 663 try: 664 conv = self.getConverterByName(name) 665 except KeyError: 666 raise # XXX on KeyError, raise nice error 667 value = conv.xmlRead(attrs, content, font) 668 if conv.repeat: 669 seq = getattr(self, conv.name, None) 670 if seq is None: 671 seq = [] 672 setattr(self, conv.name, seq) 673 seq.append(value) 674 else: 675 setattr(self, conv.name, value) 676 677 def __cmp__(self, other): 678 if type(self) != type(other): return cmp(type(self), type(other)) 679 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 680 681 self.ensureDecompiled() 682 683 return cmp(self.__dict__, other.__dict__) 684 685 686class FormatSwitchingBaseTable(BaseTable): 687 688 """Minor specialization of BaseTable, for tables that have multiple 689 formats, eg. CoverageFormat1 vs. CoverageFormat2.""" 690 691 def getConverters(self): 692 return self.converters[self.Format] 693 694 def getConverterByName(self, name): 695 return self.convertersByName[self.Format][name] 696 697 def readFormat(self, reader): 698 self.Format = reader.readUShort() 699 assert self.Format <> 0, (self, reader.pos, len(reader.data)) 700 701 def writeFormat(self, writer): 702 writer.writeUShort(self.Format) 703 704 705# 706# Support for ValueRecords 707# 708# This data type is so different from all other OpenType data types that 709# it requires quite a bit of code for itself. It even has special support 710# in OTTableReader and OTTableWriter... 711# 712 713valueRecordFormat = [ 714# Mask Name isDevice signed 715 (0x0001, "XPlacement", 0, 1), 716 (0x0002, "YPlacement", 0, 1), 717 (0x0004, "XAdvance", 0, 1), 718 (0x0008, "YAdvance", 0, 1), 719 (0x0010, "XPlaDevice", 1, 0), 720 (0x0020, "YPlaDevice", 1, 0), 721 (0x0040, "XAdvDevice", 1, 0), 722 (0x0080, "YAdvDevice", 1, 0), 723# reserved: 724 (0x0100, "Reserved1", 0, 0), 725 (0x0200, "Reserved2", 0, 0), 726 (0x0400, "Reserved3", 0, 0), 727 (0x0800, "Reserved4", 0, 0), 728 (0x1000, "Reserved5", 0, 0), 729 (0x2000, "Reserved6", 0, 0), 730 (0x4000, "Reserved7", 0, 0), 731 (0x8000, "Reserved8", 0, 0), 732] 733 734def _buildDict(): 735 d = {} 736 for mask, name, isDevice, signed in valueRecordFormat: 737 d[name] = mask, isDevice, signed 738 return d 739 740valueRecordFormatDict = _buildDict() 741 742 743class ValueRecordFactory: 744 745 """Given a format code, this object convert ValueRecords.""" 746 747 def __init__(self, valueFormat): 748 format = [] 749 for mask, name, isDevice, signed in valueRecordFormat: 750 if valueFormat & mask: 751 format.append((name, isDevice, signed)) 752 self.format = format 753 754 def readValueRecord(self, reader, font): 755 format = self.format 756 if not format: 757 return None 758 valueRecord = ValueRecord() 759 for name, isDevice, signed in format: 760 if signed: 761 value = reader.readShort() 762 else: 763 value = reader.readUShort() 764 if isDevice: 765 if value: 766 import otTables 767 subReader = reader.getSubReader(value) 768 value = getattr(otTables, name)() 769 value.decompile(subReader, font) 770 else: 771 value = None 772 setattr(valueRecord, name, value) 773 return valueRecord 774 775 def writeValueRecord(self, writer, font, valueRecord): 776 for name, isDevice, signed in self.format: 777 value = getattr(valueRecord, name, 0) 778 if isDevice: 779 if value: 780 subWriter = writer.getSubWriter() 781 writer.writeSubTable(subWriter) 782 value.compile(subWriter, font) 783 else: 784 writer.writeUShort(0) 785 elif signed: 786 writer.writeShort(value) 787 else: 788 writer.writeUShort(value) 789 790 791class ValueRecord: 792 793 # see ValueRecordFactory 794 795 def getFormat(self): 796 format = 0 797 for name in self.__dict__.keys(): 798 format = format | valueRecordFormatDict[name][0] 799 return format 800 801 def toXML(self, xmlWriter, font, valueName, attrs=None): 802 if attrs is None: 803 simpleItems = [] 804 else: 805 simpleItems = list(attrs) 806 for mask, name, isDevice, format in valueRecordFormat[:4]: # "simple" values 807 if hasattr(self, name): 808 simpleItems.append((name, getattr(self, name))) 809 deviceItems = [] 810 for mask, name, isDevice, format in valueRecordFormat[4:8]: # device records 811 if hasattr(self, name): 812 device = getattr(self, name) 813 if device is not None: 814 deviceItems.append((name, device)) 815 if deviceItems: 816 xmlWriter.begintag(valueName, simpleItems) 817 xmlWriter.newline() 818 for name, deviceRecord in deviceItems: 819 if deviceRecord is not None: 820 deviceRecord.toXML(xmlWriter, font) 821 xmlWriter.endtag(valueName) 822 xmlWriter.newline() 823 else: 824 xmlWriter.simpletag(valueName, simpleItems) 825 xmlWriter.newline() 826 827 def fromXML(self, (name, attrs, content), font): 828 import otTables 829 for k, v in attrs.items(): 830 setattr(self, k, int(v)) 831 for element in content: 832 if type(element) <> TupleType: 833 continue 834 name, attrs, content = element 835 value = getattr(otTables, name)() 836 for elem2 in content: 837 if type(elem2) <> TupleType: 838 continue 839 value.fromXML(elem2, font) 840 setattr(self, name, value) 841 842 def __cmp__(self, other): 843 if type(self) != type(other): return cmp(type(self), type(other)) 844 if self.__class__ != other.__class__: return cmp(self.__class__, other.__class__) 845 846 return cmp(self.__dict__, other.__dict__) 847