1/* 2 * Copyright (C) 2007-2010 Advanced Micro Devices, Inc. 3 * Author: Joerg Roedel <joerg.roedel@amd.com> 4 * Leo Duran <leo.duran@amd.com> 5 * 6 * This program is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 as published 8 * by the Free Software Foundation. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 */ 19 20#ifndef _ASM_X86_AMD_IOMMU_TYPES_H 21#define _ASM_X86_AMD_IOMMU_TYPES_H 22 23#include <linux/types.h> 24#include <linux/mutex.h> 25#include <linux/list.h> 26#include <linux/spinlock.h> 27#include <linux/pci.h> 28#include <linux/irqreturn.h> 29 30/* 31 * Maximum number of IOMMUs supported 32 */ 33#define MAX_IOMMUS 32 34 35/* 36 * some size calculation constants 37 */ 38#define DEV_TABLE_ENTRY_SIZE 32 39#define ALIAS_TABLE_ENTRY_SIZE 2 40#define RLOOKUP_TABLE_ENTRY_SIZE (sizeof(void *)) 41 42/* Capability offsets used by the driver */ 43#define MMIO_CAP_HDR_OFFSET 0x00 44#define MMIO_RANGE_OFFSET 0x0c 45#define MMIO_MISC_OFFSET 0x10 46 47/* Masks, shifts and macros to parse the device range capability */ 48#define MMIO_RANGE_LD_MASK 0xff000000 49#define MMIO_RANGE_FD_MASK 0x00ff0000 50#define MMIO_RANGE_BUS_MASK 0x0000ff00 51#define MMIO_RANGE_LD_SHIFT 24 52#define MMIO_RANGE_FD_SHIFT 16 53#define MMIO_RANGE_BUS_SHIFT 8 54#define MMIO_GET_LD(x) (((x) & MMIO_RANGE_LD_MASK) >> MMIO_RANGE_LD_SHIFT) 55#define MMIO_GET_FD(x) (((x) & MMIO_RANGE_FD_MASK) >> MMIO_RANGE_FD_SHIFT) 56#define MMIO_GET_BUS(x) (((x) & MMIO_RANGE_BUS_MASK) >> MMIO_RANGE_BUS_SHIFT) 57#define MMIO_MSI_NUM(x) ((x) & 0x1f) 58 59/* Flag masks for the AMD IOMMU exclusion range */ 60#define MMIO_EXCL_ENABLE_MASK 0x01ULL 61#define MMIO_EXCL_ALLOW_MASK 0x02ULL 62 63/* Used offsets into the MMIO space */ 64#define MMIO_DEV_TABLE_OFFSET 0x0000 65#define MMIO_CMD_BUF_OFFSET 0x0008 66#define MMIO_EVT_BUF_OFFSET 0x0010 67#define MMIO_CONTROL_OFFSET 0x0018 68#define MMIO_EXCL_BASE_OFFSET 0x0020 69#define MMIO_EXCL_LIMIT_OFFSET 0x0028 70#define MMIO_EXT_FEATURES 0x0030 71#define MMIO_PPR_LOG_OFFSET 0x0038 72#define MMIO_CMD_HEAD_OFFSET 0x2000 73#define MMIO_CMD_TAIL_OFFSET 0x2008 74#define MMIO_EVT_HEAD_OFFSET 0x2010 75#define MMIO_EVT_TAIL_OFFSET 0x2018 76#define MMIO_STATUS_OFFSET 0x2020 77#define MMIO_PPR_HEAD_OFFSET 0x2030 78#define MMIO_PPR_TAIL_OFFSET 0x2038 79#define MMIO_CNTR_CONF_OFFSET 0x4000 80#define MMIO_CNTR_REG_OFFSET 0x40000 81#define MMIO_REG_END_OFFSET 0x80000 82 83 84 85/* Extended Feature Bits */ 86#define FEATURE_PREFETCH (1ULL<<0) 87#define FEATURE_PPR (1ULL<<1) 88#define FEATURE_X2APIC (1ULL<<2) 89#define FEATURE_NX (1ULL<<3) 90#define FEATURE_GT (1ULL<<4) 91#define FEATURE_IA (1ULL<<6) 92#define FEATURE_GA (1ULL<<7) 93#define FEATURE_HE (1ULL<<8) 94#define FEATURE_PC (1ULL<<9) 95 96#define FEATURE_PASID_SHIFT 32 97#define FEATURE_PASID_MASK (0x1fULL << FEATURE_PASID_SHIFT) 98 99#define FEATURE_GLXVAL_SHIFT 14 100#define FEATURE_GLXVAL_MASK (0x03ULL << FEATURE_GLXVAL_SHIFT) 101 102/* Note: 103 * The current driver only support 16-bit PASID. 104 * Currently, hardware only implement upto 16-bit PASID 105 * even though the spec says it could have upto 20 bits. 106 */ 107#define PASID_MASK 0x0000ffff 108 109/* MMIO status bits */ 110#define MMIO_STATUS_EVT_INT_MASK (1 << 1) 111#define MMIO_STATUS_COM_WAIT_INT_MASK (1 << 2) 112#define MMIO_STATUS_PPR_INT_MASK (1 << 6) 113 114/* event logging constants */ 115#define EVENT_ENTRY_SIZE 0x10 116#define EVENT_TYPE_SHIFT 28 117#define EVENT_TYPE_MASK 0xf 118#define EVENT_TYPE_ILL_DEV 0x1 119#define EVENT_TYPE_IO_FAULT 0x2 120#define EVENT_TYPE_DEV_TAB_ERR 0x3 121#define EVENT_TYPE_PAGE_TAB_ERR 0x4 122#define EVENT_TYPE_ILL_CMD 0x5 123#define EVENT_TYPE_CMD_HARD_ERR 0x6 124#define EVENT_TYPE_IOTLB_INV_TO 0x7 125#define EVENT_TYPE_INV_DEV_REQ 0x8 126#define EVENT_DEVID_MASK 0xffff 127#define EVENT_DEVID_SHIFT 0 128#define EVENT_DOMID_MASK 0xffff 129#define EVENT_DOMID_SHIFT 0 130#define EVENT_FLAGS_MASK 0xfff 131#define EVENT_FLAGS_SHIFT 0x10 132 133/* feature control bits */ 134#define CONTROL_IOMMU_EN 0x00ULL 135#define CONTROL_HT_TUN_EN 0x01ULL 136#define CONTROL_EVT_LOG_EN 0x02ULL 137#define CONTROL_EVT_INT_EN 0x03ULL 138#define CONTROL_COMWAIT_EN 0x04ULL 139#define CONTROL_INV_TIMEOUT 0x05ULL 140#define CONTROL_PASSPW_EN 0x08ULL 141#define CONTROL_RESPASSPW_EN 0x09ULL 142#define CONTROL_COHERENT_EN 0x0aULL 143#define CONTROL_ISOC_EN 0x0bULL 144#define CONTROL_CMDBUF_EN 0x0cULL 145#define CONTROL_PPFLOG_EN 0x0dULL 146#define CONTROL_PPFINT_EN 0x0eULL 147#define CONTROL_PPR_EN 0x0fULL 148#define CONTROL_GT_EN 0x10ULL 149 150#define CTRL_INV_TO_MASK (7 << CONTROL_INV_TIMEOUT) 151#define CTRL_INV_TO_NONE 0 152#define CTRL_INV_TO_1MS 1 153#define CTRL_INV_TO_10MS 2 154#define CTRL_INV_TO_100MS 3 155#define CTRL_INV_TO_1S 4 156#define CTRL_INV_TO_10S 5 157#define CTRL_INV_TO_100S 6 158 159/* command specific defines */ 160#define CMD_COMPL_WAIT 0x01 161#define CMD_INV_DEV_ENTRY 0x02 162#define CMD_INV_IOMMU_PAGES 0x03 163#define CMD_INV_IOTLB_PAGES 0x04 164#define CMD_INV_IRT 0x05 165#define CMD_COMPLETE_PPR 0x07 166#define CMD_INV_ALL 0x08 167 168#define CMD_COMPL_WAIT_STORE_MASK 0x01 169#define CMD_COMPL_WAIT_INT_MASK 0x02 170#define CMD_INV_IOMMU_PAGES_SIZE_MASK 0x01 171#define CMD_INV_IOMMU_PAGES_PDE_MASK 0x02 172#define CMD_INV_IOMMU_PAGES_GN_MASK 0x04 173 174#define PPR_STATUS_MASK 0xf 175#define PPR_STATUS_SHIFT 12 176 177#define CMD_INV_IOMMU_ALL_PAGES_ADDRESS 0x7fffffffffffffffULL 178 179/* macros and definitions for device table entries */ 180#define DEV_ENTRY_VALID 0x00 181#define DEV_ENTRY_TRANSLATION 0x01 182#define DEV_ENTRY_IR 0x3d 183#define DEV_ENTRY_IW 0x3e 184#define DEV_ENTRY_NO_PAGE_FAULT 0x62 185#define DEV_ENTRY_EX 0x67 186#define DEV_ENTRY_SYSMGT1 0x68 187#define DEV_ENTRY_SYSMGT2 0x69 188#define DEV_ENTRY_IRQ_TBL_EN 0x80 189#define DEV_ENTRY_INIT_PASS 0xb8 190#define DEV_ENTRY_EINT_PASS 0xb9 191#define DEV_ENTRY_NMI_PASS 0xba 192#define DEV_ENTRY_LINT0_PASS 0xbe 193#define DEV_ENTRY_LINT1_PASS 0xbf 194#define DEV_ENTRY_MODE_MASK 0x07 195#define DEV_ENTRY_MODE_SHIFT 0x09 196 197#define MAX_DEV_TABLE_ENTRIES 0xffff 198 199/* constants to configure the command buffer */ 200#define CMD_BUFFER_SIZE 8192 201#define CMD_BUFFER_UNINITIALIZED 1 202#define CMD_BUFFER_ENTRIES 512 203#define MMIO_CMD_SIZE_SHIFT 56 204#define MMIO_CMD_SIZE_512 (0x9ULL << MMIO_CMD_SIZE_SHIFT) 205 206/* constants for event buffer handling */ 207#define EVT_BUFFER_SIZE 8192 /* 512 entries */ 208#define EVT_LEN_MASK (0x9ULL << 56) 209 210/* Constants for PPR Log handling */ 211#define PPR_LOG_ENTRIES 512 212#define PPR_LOG_SIZE_SHIFT 56 213#define PPR_LOG_SIZE_512 (0x9ULL << PPR_LOG_SIZE_SHIFT) 214#define PPR_ENTRY_SIZE 16 215#define PPR_LOG_SIZE (PPR_ENTRY_SIZE * PPR_LOG_ENTRIES) 216 217#define PPR_REQ_TYPE(x) (((x) >> 60) & 0xfULL) 218#define PPR_FLAGS(x) (((x) >> 48) & 0xfffULL) 219#define PPR_DEVID(x) ((x) & 0xffffULL) 220#define PPR_TAG(x) (((x) >> 32) & 0x3ffULL) 221#define PPR_PASID1(x) (((x) >> 16) & 0xffffULL) 222#define PPR_PASID2(x) (((x) >> 42) & 0xfULL) 223#define PPR_PASID(x) ((PPR_PASID2(x) << 16) | PPR_PASID1(x)) 224 225#define PPR_REQ_FAULT 0x01 226 227#define PAGE_MODE_NONE 0x00 228#define PAGE_MODE_1_LEVEL 0x01 229#define PAGE_MODE_2_LEVEL 0x02 230#define PAGE_MODE_3_LEVEL 0x03 231#define PAGE_MODE_4_LEVEL 0x04 232#define PAGE_MODE_5_LEVEL 0x05 233#define PAGE_MODE_6_LEVEL 0x06 234 235#define PM_LEVEL_SHIFT(x) (12 + ((x) * 9)) 236#define PM_LEVEL_SIZE(x) (((x) < 6) ? \ 237 ((1ULL << PM_LEVEL_SHIFT((x))) - 1): \ 238 (0xffffffffffffffffULL)) 239#define PM_LEVEL_INDEX(x, a) (((a) >> PM_LEVEL_SHIFT((x))) & 0x1ffULL) 240#define PM_LEVEL_ENC(x) (((x) << 9) & 0xe00ULL) 241#define PM_LEVEL_PDE(x, a) ((a) | PM_LEVEL_ENC((x)) | \ 242 IOMMU_PTE_P | IOMMU_PTE_IR | IOMMU_PTE_IW) 243#define PM_PTE_LEVEL(pte) (((pte) >> 9) & 0x7ULL) 244 245#define PM_MAP_4k 0 246#define PM_ADDR_MASK 0x000ffffffffff000ULL 247#define PM_MAP_MASK(lvl) (PM_ADDR_MASK & \ 248 (~((1ULL << (12 + ((lvl) * 9))) - 1))) 249#define PM_ALIGNED(lvl, addr) ((PM_MAP_MASK(lvl) & (addr)) == (addr)) 250 251/* 252 * Returns the page table level to use for a given page size 253 * Pagesize is expected to be a power-of-two 254 */ 255#define PAGE_SIZE_LEVEL(pagesize) \ 256 ((__ffs(pagesize) - 12) / 9) 257/* 258 * Returns the number of ptes to use for a given page size 259 * Pagesize is expected to be a power-of-two 260 */ 261#define PAGE_SIZE_PTE_COUNT(pagesize) \ 262 (1ULL << ((__ffs(pagesize) - 12) % 9)) 263 264/* 265 * Aligns a given io-virtual address to a given page size 266 * Pagesize is expected to be a power-of-two 267 */ 268#define PAGE_SIZE_ALIGN(address, pagesize) \ 269 ((address) & ~((pagesize) - 1)) 270/* 271 * Creates an IOMMU PTE for an address and a given pagesize 272 * The PTE has no permission bits set 273 * Pagesize is expected to be a power-of-two larger than 4096 274 */ 275#define PAGE_SIZE_PTE(address, pagesize) \ 276 (((address) | ((pagesize) - 1)) & \ 277 (~(pagesize >> 1)) & PM_ADDR_MASK) 278 279/* 280 * Takes a PTE value with mode=0x07 and returns the page size it maps 281 */ 282#define PTE_PAGE_SIZE(pte) \ 283 (1ULL << (1 + ffz(((pte) | 0xfffULL)))) 284 285#define IOMMU_PTE_P (1ULL << 0) 286#define IOMMU_PTE_TV (1ULL << 1) 287#define IOMMU_PTE_U (1ULL << 59) 288#define IOMMU_PTE_FC (1ULL << 60) 289#define IOMMU_PTE_IR (1ULL << 61) 290#define IOMMU_PTE_IW (1ULL << 62) 291 292#define DTE_FLAG_IOTLB (0x01UL << 32) 293#define DTE_FLAG_GV (0x01ULL << 55) 294#define DTE_GLX_SHIFT (56) 295#define DTE_GLX_MASK (3) 296 297#define DTE_GCR3_VAL_A(x) (((x) >> 12) & 0x00007ULL) 298#define DTE_GCR3_VAL_B(x) (((x) >> 15) & 0x0ffffULL) 299#define DTE_GCR3_VAL_C(x) (((x) >> 31) & 0xfffffULL) 300 301#define DTE_GCR3_INDEX_A 0 302#define DTE_GCR3_INDEX_B 1 303#define DTE_GCR3_INDEX_C 1 304 305#define DTE_GCR3_SHIFT_A 58 306#define DTE_GCR3_SHIFT_B 16 307#define DTE_GCR3_SHIFT_C 43 308 309#define GCR3_VALID 0x01ULL 310 311#define IOMMU_PAGE_MASK (((1ULL << 52) - 1) & ~0xfffULL) 312#define IOMMU_PTE_PRESENT(pte) ((pte) & IOMMU_PTE_P) 313#define IOMMU_PTE_PAGE(pte) (phys_to_virt((pte) & IOMMU_PAGE_MASK)) 314#define IOMMU_PTE_MODE(pte) (((pte) >> 9) & 0x07) 315 316#define IOMMU_PROT_MASK 0x03 317#define IOMMU_PROT_IR 0x01 318#define IOMMU_PROT_IW 0x02 319 320/* IOMMU capabilities */ 321#define IOMMU_CAP_IOTLB 24 322#define IOMMU_CAP_NPCACHE 26 323#define IOMMU_CAP_EFR 27 324 325#define MAX_DOMAIN_ID 65536 326 327/* Protection domain flags */ 328#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ 329#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops 330 domain for an IOMMU */ 331#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page 332 translation */ 333#define PD_IOMMUV2_MASK (1UL << 3) /* domain has gcr3 table */ 334 335extern bool amd_iommu_dump; 336#define DUMP_printk(format, arg...) \ 337 do { \ 338 if (amd_iommu_dump) \ 339 printk(KERN_INFO "AMD-Vi: " format, ## arg); \ 340 } while(0); 341 342/* global flag if IOMMUs cache non-present entries */ 343extern bool amd_iommu_np_cache; 344/* Only true if all IOMMUs support device IOTLBs */ 345extern bool amd_iommu_iotlb_sup; 346 347#define MAX_IRQS_PER_TABLE 256 348#define IRQ_TABLE_ALIGNMENT 128 349 350struct irq_remap_table { 351 spinlock_t lock; 352 unsigned min_index; 353 u32 *table; 354}; 355 356extern struct irq_remap_table **irq_lookup_table; 357 358/* Interrupt remapping feature used? */ 359extern bool amd_iommu_irq_remap; 360 361/* kmem_cache to get tables with 128 byte alignement */ 362extern struct kmem_cache *amd_iommu_irq_cache; 363 364/* 365 * Make iterating over all IOMMUs easier 366 */ 367#define for_each_iommu(iommu) \ 368 list_for_each_entry((iommu), &amd_iommu_list, list) 369#define for_each_iommu_safe(iommu, next) \ 370 list_for_each_entry_safe((iommu), (next), &amd_iommu_list, list) 371 372#define APERTURE_RANGE_SHIFT 27 /* 128 MB */ 373#define APERTURE_RANGE_SIZE (1ULL << APERTURE_RANGE_SHIFT) 374#define APERTURE_RANGE_PAGES (APERTURE_RANGE_SIZE >> PAGE_SHIFT) 375#define APERTURE_MAX_RANGES 32 /* allows 4GB of DMA address space */ 376#define APERTURE_RANGE_INDEX(a) ((a) >> APERTURE_RANGE_SHIFT) 377#define APERTURE_PAGE_INDEX(a) (((a) >> 21) & 0x3fULL) 378 379 380/* 381 * This struct is used to pass information about 382 * incoming PPR faults around. 383 */ 384struct amd_iommu_fault { 385 u64 address; /* IO virtual address of the fault*/ 386 u32 pasid; /* Address space identifier */ 387 u16 device_id; /* Originating PCI device id */ 388 u16 tag; /* PPR tag */ 389 u16 flags; /* Fault flags */ 390 391}; 392 393 394struct iommu_domain; 395 396/* 397 * This structure contains generic data for IOMMU protection domains 398 * independent of their use. 399 */ 400struct protection_domain { 401 struct list_head list; /* for list of all protection domains */ 402 struct list_head dev_list; /* List of all devices in this domain */ 403 spinlock_t lock; /* mostly used to lock the page table*/ 404 struct mutex api_lock; /* protect page tables in the iommu-api path */ 405 u16 id; /* the domain id written to the device table */ 406 int mode; /* paging mode (0-6 levels) */ 407 u64 *pt_root; /* page table root pointer */ 408 int glx; /* Number of levels for GCR3 table */ 409 u64 *gcr3_tbl; /* Guest CR3 table */ 410 unsigned long flags; /* flags to find out type of domain */ 411 bool updated; /* complete domain flush required */ 412 unsigned dev_cnt; /* devices assigned to this domain */ 413 unsigned dev_iommu[MAX_IOMMUS]; /* per-IOMMU reference count */ 414 void *priv; /* private data */ 415 struct iommu_domain *iommu_domain; /* Pointer to generic 416 domain structure */ 417 418}; 419 420/* 421 * For dynamic growth the aperture size is split into ranges of 128MB of 422 * DMA address space each. This struct represents one such range. 423 */ 424struct aperture_range { 425 426 /* address allocation bitmap */ 427 unsigned long *bitmap; 428 429 /* 430 * Array of PTE pages for the aperture. In this array we save all the 431 * leaf pages of the domain page table used for the aperture. This way 432 * we don't need to walk the page table to find a specific PTE. We can 433 * just calculate its address in constant time. 434 */ 435 u64 *pte_pages[64]; 436 437 unsigned long offset; 438}; 439 440/* 441 * Data container for a dma_ops specific protection domain 442 */ 443struct dma_ops_domain { 444 struct list_head list; 445 446 /* generic protection domain information */ 447 struct protection_domain domain; 448 449 /* size of the aperture for the mappings */ 450 unsigned long aperture_size; 451 452 /* address we start to search for free addresses */ 453 unsigned long next_address; 454 455 /* address space relevant data */ 456 struct aperture_range *aperture[APERTURE_MAX_RANGES]; 457 458 /* This will be set to true when TLB needs to be flushed */ 459 bool need_flush; 460 461 /* 462 * if this is a preallocated domain, keep the device for which it was 463 * preallocated in this variable 464 */ 465 u16 target_dev; 466}; 467 468/* 469 * Structure where we save information about one hardware AMD IOMMU in the 470 * system. 471 */ 472struct amd_iommu { 473 struct list_head list; 474 475 /* Index within the IOMMU array */ 476 int index; 477 478 /* locks the accesses to the hardware */ 479 spinlock_t lock; 480 481 /* Pointer to PCI device of this IOMMU */ 482 struct pci_dev *dev; 483 484 /* Cache pdev to root device for resume quirks */ 485 struct pci_dev *root_pdev; 486 487 /* physical address of MMIO space */ 488 u64 mmio_phys; 489 490 /* physical end address of MMIO space */ 491 u64 mmio_phys_end; 492 493 /* virtual address of MMIO space */ 494 u8 __iomem *mmio_base; 495 496 /* capabilities of that IOMMU read from ACPI */ 497 u32 cap; 498 499 /* flags read from acpi table */ 500 u8 acpi_flags; 501 502 /* Extended features */ 503 u64 features; 504 505 /* IOMMUv2 */ 506 bool is_iommu_v2; 507 508 /* PCI device id of the IOMMU device */ 509 u16 devid; 510 511 /* 512 * Capability pointer. There could be more than one IOMMU per PCI 513 * device function if there are more than one AMD IOMMU capability 514 * pointers. 515 */ 516 u16 cap_ptr; 517 518 /* pci domain of this IOMMU */ 519 u16 pci_seg; 520 521 /* first device this IOMMU handles. read from PCI */ 522 u16 first_device; 523 /* last device this IOMMU handles. read from PCI */ 524 u16 last_device; 525 526 /* start of exclusion range of that IOMMU */ 527 u64 exclusion_start; 528 /* length of exclusion range of that IOMMU */ 529 u64 exclusion_length; 530 531 /* command buffer virtual address */ 532 u8 *cmd_buf; 533 /* size of command buffer */ 534 u32 cmd_buf_size; 535 536 /* size of event buffer */ 537 u32 evt_buf_size; 538 /* event buffer virtual address */ 539 u8 *evt_buf; 540 541 /* Base of the PPR log, if present */ 542 u8 *ppr_log; 543 544 /* true if interrupts for this IOMMU are already enabled */ 545 bool int_enabled; 546 547 /* if one, we need to send a completion wait command */ 548 bool need_sync; 549 550 /* default dma_ops domain for that IOMMU */ 551 struct dma_ops_domain *default_dom; 552 553 /* IOMMU sysfs device */ 554 struct device *iommu_dev; 555 556 /* 557 * We can't rely on the BIOS to restore all values on reinit, so we 558 * need to stash them 559 */ 560 561 /* The iommu BAR */ 562 u32 stored_addr_lo; 563 u32 stored_addr_hi; 564 565 /* 566 * Each iommu has 6 l1s, each of which is documented as having 0x12 567 * registers 568 */ 569 u32 stored_l1[6][0x12]; 570 571 /* The l2 indirect registers */ 572 u32 stored_l2[0x83]; 573 574 /* The maximum PC banks and counters/bank (PCSup=1) */ 575 u8 max_banks; 576 u8 max_counters; 577}; 578 579struct devid_map { 580 struct list_head list; 581 u8 id; 582 u16 devid; 583 bool cmd_line; 584}; 585 586/* Map HPET and IOAPIC ids to the devid used by the IOMMU */ 587extern struct list_head ioapic_map; 588extern struct list_head hpet_map; 589 590/* 591 * List with all IOMMUs in the system. This list is not locked because it is 592 * only written and read at driver initialization or suspend time 593 */ 594extern struct list_head amd_iommu_list; 595 596/* 597 * Array with pointers to each IOMMU struct 598 * The indices are referenced in the protection domains 599 */ 600extern struct amd_iommu *amd_iommus[MAX_IOMMUS]; 601 602/* Number of IOMMUs present in the system */ 603extern int amd_iommus_present; 604 605/* 606 * Declarations for the global list of all protection domains 607 */ 608extern spinlock_t amd_iommu_pd_lock; 609extern struct list_head amd_iommu_pd_list; 610 611/* 612 * Structure defining one entry in the device table 613 */ 614struct dev_table_entry { 615 u64 data[4]; 616}; 617 618/* 619 * One entry for unity mappings parsed out of the ACPI table. 620 */ 621struct unity_map_entry { 622 struct list_head list; 623 624 /* starting device id this entry is used for (including) */ 625 u16 devid_start; 626 /* end device id this entry is used for (including) */ 627 u16 devid_end; 628 629 /* start address to unity map (including) */ 630 u64 address_start; 631 /* end address to unity map (including) */ 632 u64 address_end; 633 634 /* required protection */ 635 int prot; 636}; 637 638/* 639 * List of all unity mappings. It is not locked because as runtime it is only 640 * read. It is created at ACPI table parsing time. 641 */ 642extern struct list_head amd_iommu_unity_map; 643 644/* 645 * Data structures for device handling 646 */ 647 648/* 649 * Device table used by hardware. Read and write accesses by software are 650 * locked with the amd_iommu_pd_table lock. 651 */ 652extern struct dev_table_entry *amd_iommu_dev_table; 653 654/* 655 * Alias table to find requestor ids to device ids. Not locked because only 656 * read on runtime. 657 */ 658extern u16 *amd_iommu_alias_table; 659 660/* 661 * Reverse lookup table to find the IOMMU which translates a specific device. 662 */ 663extern struct amd_iommu **amd_iommu_rlookup_table; 664 665/* size of the dma_ops aperture as power of 2 */ 666extern unsigned amd_iommu_aperture_order; 667 668/* largest PCI device id we expect translation requests for */ 669extern u16 amd_iommu_last_bdf; 670 671/* allocation bitmap for domain ids */ 672extern unsigned long *amd_iommu_pd_alloc_bitmap; 673 674/* 675 * If true, the addresses will be flushed on unmap time, not when 676 * they are reused 677 */ 678extern u32 amd_iommu_unmap_flush; 679 680/* Smallest max PASID supported by any IOMMU in the system */ 681extern u32 amd_iommu_max_pasid; 682 683extern bool amd_iommu_v2_present; 684 685extern bool amd_iommu_force_isolation; 686 687/* Max levels of glxval supported */ 688extern int amd_iommu_max_glx_val; 689 690/* 691 * This function flushes all internal caches of 692 * the IOMMU used by this driver. 693 */ 694extern void iommu_flush_all_caches(struct amd_iommu *iommu); 695 696static inline int get_ioapic_devid(int id) 697{ 698 struct devid_map *entry; 699 700 list_for_each_entry(entry, &ioapic_map, list) { 701 if (entry->id == id) 702 return entry->devid; 703 } 704 705 return -EINVAL; 706} 707 708static inline int get_hpet_devid(int id) 709{ 710 struct devid_map *entry; 711 712 list_for_each_entry(entry, &hpet_map, list) { 713 if (entry->id == id) 714 return entry->devid; 715 } 716 717 return -EINVAL; 718} 719 720#ifdef CONFIG_AMD_IOMMU_STATS 721 722struct __iommu_counter { 723 char *name; 724 struct dentry *dent; 725 u64 value; 726}; 727 728#define DECLARE_STATS_COUNTER(nm) \ 729 static struct __iommu_counter nm = { \ 730 .name = #nm, \ 731 } 732 733#define INC_STATS_COUNTER(name) name.value += 1 734#define ADD_STATS_COUNTER(name, x) name.value += (x) 735#define SUB_STATS_COUNTER(name, x) name.value -= (x) 736 737#else /* CONFIG_AMD_IOMMU_STATS */ 738 739#define DECLARE_STATS_COUNTER(name) 740#define INC_STATS_COUNTER(name) 741#define ADD_STATS_COUNTER(name, x) 742#define SUB_STATS_COUNTER(name, x) 743 744#endif /* CONFIG_AMD_IOMMU_STATS */ 745 746#endif /* _ASM_X86_AMD_IOMMU_TYPES_H */ 747