pat.c revision 8d4a4300854f3971502e81dacd930704cb88f606
1/* 2 * Handle caching attributes in page tables (PAT) 3 * 4 * Authors: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> 5 * Suresh B Siddha <suresh.b.siddha@intel.com> 6 * 7 * Loosely based on earlier PAT patchset from Eric Biederman and Andi Kleen. 8 */ 9 10#include <linux/mm.h> 11#include <linux/kernel.h> 12#include <linux/gfp.h> 13#include <linux/fs.h> 14#include <linux/bootmem.h> 15 16#include <asm/msr.h> 17#include <asm/tlbflush.h> 18#include <asm/processor.h> 19#include <asm/page.h> 20#include <asm/pgtable.h> 21#include <asm/pat.h> 22#include <asm/e820.h> 23#include <asm/cacheflush.h> 24#include <asm/fcntl.h> 25#include <asm/mtrr.h> 26#include <asm/io.h> 27 28#ifdef CONFIG_X86_PAT 29int __read_mostly pat_wc_enabled = 1; 30 31void __init pat_disable(char *reason) 32{ 33 pat_wc_enabled = 0; 34 printk(KERN_INFO "%s\n", reason); 35} 36 37static int nopat(char *str) 38{ 39 pat_disable("PAT support disabled."); 40 return 0; 41} 42early_param("nopat", nopat); 43#endif 44 45static u64 __read_mostly boot_pat_state; 46 47enum { 48 PAT_UC = 0, /* uncached */ 49 PAT_WC = 1, /* Write combining */ 50 PAT_WT = 4, /* Write Through */ 51 PAT_WP = 5, /* Write Protected */ 52 PAT_WB = 6, /* Write Back (default) */ 53 PAT_UC_MINUS = 7, /* UC, but can be overriden by MTRR */ 54}; 55 56#define PAT(x,y) ((u64)PAT_ ## y << ((x)*8)) 57 58void pat_init(void) 59{ 60 u64 pat; 61 62 if (!pat_wc_enabled) 63 return; 64 65 /* Paranoia check. */ 66 if (!cpu_has_pat) { 67 printk(KERN_ERR "PAT enabled, but CPU feature cleared\n"); 68 /* 69 * Panic if this happens on the secondary CPU, and we 70 * switched to PAT on the boot CPU. We have no way to 71 * undo PAT. 72 */ 73 BUG_ON(boot_pat_state); 74 } 75 76 /* Set PWT to Write-Combining. All other bits stay the same */ 77 /* 78 * PTE encoding used in Linux: 79 * PAT 80 * |PCD 81 * ||PWT 82 * ||| 83 * 000 WB _PAGE_CACHE_WB 84 * 001 WC _PAGE_CACHE_WC 85 * 010 UC- _PAGE_CACHE_UC_MINUS 86 * 011 UC _PAGE_CACHE_UC 87 * PAT bit unused 88 */ 89 pat = PAT(0,WB) | PAT(1,WC) | PAT(2,UC_MINUS) | PAT(3,UC) | 90 PAT(4,WB) | PAT(5,WC) | PAT(6,UC_MINUS) | PAT(7,UC); 91 92 /* Boot CPU check */ 93 if (!boot_pat_state) 94 rdmsrl(MSR_IA32_CR_PAT, boot_pat_state); 95 96 wrmsrl(MSR_IA32_CR_PAT, pat); 97 printk(KERN_INFO "x86 PAT enabled: cpu %d, old 0x%Lx, new 0x%Lx\n", 98 smp_processor_id(), boot_pat_state, pat); 99} 100 101#undef PAT 102 103static char *cattr_name(unsigned long flags) 104{ 105 switch (flags & _PAGE_CACHE_MASK) { 106 case _PAGE_CACHE_UC: return "uncached"; 107 case _PAGE_CACHE_UC_MINUS: return "uncached-minus"; 108 case _PAGE_CACHE_WB: return "write-back"; 109 case _PAGE_CACHE_WC: return "write-combining"; 110 default: return "broken"; 111 } 112} 113 114/* 115 * The global memtype list keeps track of memory type for specific 116 * physical memory areas. Conflicting memory types in different 117 * mappings can cause CPU cache corruption. To avoid this we keep track. 118 * 119 * The list is sorted based on starting address and can contain multiple 120 * entries for each address (this allows reference counting for overlapping 121 * areas). All the aliases have the same cache attributes of course. 122 * Zero attributes are represented as holes. 123 * 124 * Currently the data structure is a list because the number of mappings 125 * are expected to be relatively small. If this should be a problem 126 * it could be changed to a rbtree or similar. 127 * 128 * memtype_lock protects the whole list. 129 */ 130 131struct memtype { 132 u64 start; 133 u64 end; 134 unsigned long type; 135 struct list_head nd; 136}; 137 138static LIST_HEAD(memtype_list); 139static DEFINE_SPINLOCK(memtype_lock); /* protects memtype list */ 140 141/* 142 * Does intersection of PAT memory type and MTRR memory type and returns 143 * the resulting memory type as PAT understands it. 144 * (Type in pat and mtrr will not have same value) 145 * The intersection is based on "Effective Memory Type" tables in IA-32 146 * SDM vol 3a 147 */ 148static int pat_x_mtrr_type(u64 start, u64 end, unsigned long prot, 149 unsigned long *ret_prot) 150{ 151 unsigned long pat_type; 152 u8 mtrr_type; 153 154 mtrr_type = mtrr_type_lookup(start, end); 155 if (mtrr_type == 0xFF) { /* MTRR not enabled */ 156 *ret_prot = prot; 157 return 0; 158 } 159 if (mtrr_type == 0xFE) { /* MTRR match error */ 160 *ret_prot = _PAGE_CACHE_UC; 161 return -1; 162 } 163 if (mtrr_type != MTRR_TYPE_UNCACHABLE && 164 mtrr_type != MTRR_TYPE_WRBACK && 165 mtrr_type != MTRR_TYPE_WRCOMB) { /* MTRR type unhandled */ 166 *ret_prot = _PAGE_CACHE_UC; 167 return -1; 168 } 169 170 pat_type = prot & _PAGE_CACHE_MASK; 171 prot &= (~_PAGE_CACHE_MASK); 172 173 /* Currently doing intersection by hand. Optimize it later. */ 174 if (pat_type == _PAGE_CACHE_WC) { 175 *ret_prot = prot | _PAGE_CACHE_WC; 176 } else if (pat_type == _PAGE_CACHE_UC_MINUS) { 177 *ret_prot = prot | _PAGE_CACHE_UC_MINUS; 178 } else if (pat_type == _PAGE_CACHE_UC || 179 mtrr_type == MTRR_TYPE_UNCACHABLE) { 180 *ret_prot = prot | _PAGE_CACHE_UC; 181 } else if (mtrr_type == MTRR_TYPE_WRCOMB) { 182 *ret_prot = prot | _PAGE_CACHE_WC; 183 } else { 184 *ret_prot = prot | _PAGE_CACHE_WB; 185 } 186 187 return 0; 188} 189 190/* 191 * req_type typically has one of the: 192 * - _PAGE_CACHE_WB 193 * - _PAGE_CACHE_WC 194 * - _PAGE_CACHE_UC_MINUS 195 * - _PAGE_CACHE_UC 196 * 197 * req_type will have a special case value '-1', when requester want to inherit 198 * the memory type from mtrr (if WB), existing PAT, defaulting to UC_MINUS. 199 * 200 * If ret_type is NULL, function will return an error if it cannot reserve the 201 * region with req_type. If ret_type is non-null, function will return 202 * available type in ret_type in case of no error. In case of any error 203 * it will return a negative return value. 204 */ 205int reserve_memtype(u64 start, u64 end, unsigned long req_type, 206 unsigned long *ret_type) 207{ 208 struct memtype *new_entry = NULL; 209 struct memtype *parse; 210 unsigned long actual_type; 211 int err = 0; 212 213 /* Only track when pat_wc_enabled */ 214 if (!pat_wc_enabled) { 215 /* This is identical to page table setting without PAT */ 216 if (ret_type) { 217 if (req_type == -1) { 218 *ret_type = _PAGE_CACHE_WB; 219 } else { 220 *ret_type = req_type; 221 } 222 } 223 return 0; 224 } 225 226 /* Low ISA region is always mapped WB in page table. No need to track */ 227 if (start >= ISA_START_ADDRESS && (end - 1) <= ISA_END_ADDRESS) { 228 if (ret_type) 229 *ret_type = _PAGE_CACHE_WB; 230 231 return 0; 232 } 233 234 if (req_type == -1) { 235 /* 236 * Special case where caller wants to inherit from mtrr or 237 * existing pat mapping, defaulting to UC_MINUS in case of 238 * no match. 239 */ 240 u8 mtrr_type = mtrr_type_lookup(start, end); 241 if (mtrr_type == 0xFE) { /* MTRR match error */ 242 err = -1; 243 } 244 245 if (mtrr_type == MTRR_TYPE_WRBACK) { 246 req_type = _PAGE_CACHE_WB; 247 actual_type = _PAGE_CACHE_WB; 248 } else { 249 req_type = _PAGE_CACHE_UC_MINUS; 250 actual_type = _PAGE_CACHE_UC_MINUS; 251 } 252 } else { 253 req_type &= _PAGE_CACHE_MASK; 254 err = pat_x_mtrr_type(start, end, req_type, &actual_type); 255 } 256 257 if (err) { 258 if (ret_type) 259 *ret_type = actual_type; 260 261 return -EINVAL; 262 } 263 264 new_entry = kmalloc(sizeof(struct memtype), GFP_KERNEL); 265 if (!new_entry) 266 return -ENOMEM; 267 268 new_entry->start = start; 269 new_entry->end = end; 270 new_entry->type = actual_type; 271 272 if (ret_type) 273 *ret_type = actual_type; 274 275 spin_lock(&memtype_lock); 276 277 /* Search for existing mapping that overlaps the current range */ 278 list_for_each_entry(parse, &memtype_list, nd) { 279 struct memtype *saved_ptr; 280 281 if (parse->start >= end) { 282 pr_debug("New Entry\n"); 283 list_add(&new_entry->nd, parse->nd.prev); 284 new_entry = NULL; 285 break; 286 } 287 288 if (start <= parse->start && end >= parse->start) { 289 if (actual_type != parse->type && ret_type) { 290 actual_type = parse->type; 291 *ret_type = actual_type; 292 new_entry->type = actual_type; 293 } 294 295 if (actual_type != parse->type) { 296 printk( 297 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 298 current->comm, current->pid, 299 start, end, 300 cattr_name(actual_type), 301 cattr_name(parse->type)); 302 err = -EBUSY; 303 break; 304 } 305 306 saved_ptr = parse; 307 /* 308 * Check to see whether the request overlaps more 309 * than one entry in the list 310 */ 311 list_for_each_entry_continue(parse, &memtype_list, nd) { 312 if (end <= parse->start) { 313 break; 314 } 315 316 if (actual_type != parse->type) { 317 printk( 318 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 319 current->comm, current->pid, 320 start, end, 321 cattr_name(actual_type), 322 cattr_name(parse->type)); 323 err = -EBUSY; 324 break; 325 } 326 } 327 328 if (err) { 329 break; 330 } 331 332 pr_debug("Overlap at 0x%Lx-0x%Lx\n", 333 saved_ptr->start, saved_ptr->end); 334 /* No conflict. Go ahead and add this new entry */ 335 list_add(&new_entry->nd, saved_ptr->nd.prev); 336 new_entry = NULL; 337 break; 338 } 339 340 if (start < parse->end) { 341 if (actual_type != parse->type && ret_type) { 342 actual_type = parse->type; 343 *ret_type = actual_type; 344 new_entry->type = actual_type; 345 } 346 347 if (actual_type != parse->type) { 348 printk( 349 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 350 current->comm, current->pid, 351 start, end, 352 cattr_name(actual_type), 353 cattr_name(parse->type)); 354 err = -EBUSY; 355 break; 356 } 357 358 saved_ptr = parse; 359 /* 360 * Check to see whether the request overlaps more 361 * than one entry in the list 362 */ 363 list_for_each_entry_continue(parse, &memtype_list, nd) { 364 if (end <= parse->start) { 365 break; 366 } 367 368 if (actual_type != parse->type) { 369 printk( 370 KERN_INFO "%s:%d conflicting memory types %Lx-%Lx %s<->%s\n", 371 current->comm, current->pid, 372 start, end, 373 cattr_name(actual_type), 374 cattr_name(parse->type)); 375 err = -EBUSY; 376 break; 377 } 378 } 379 380 if (err) { 381 break; 382 } 383 384 pr_debug(KERN_INFO "Overlap at 0x%Lx-0x%Lx\n", 385 saved_ptr->start, saved_ptr->end); 386 /* No conflict. Go ahead and add this new entry */ 387 list_add(&new_entry->nd, &saved_ptr->nd); 388 new_entry = NULL; 389 break; 390 } 391 } 392 393 if (err) { 394 printk(KERN_INFO 395 "reserve_memtype failed 0x%Lx-0x%Lx, track %s, req %s\n", 396 start, end, cattr_name(new_entry->type), 397 cattr_name(req_type)); 398 kfree(new_entry); 399 spin_unlock(&memtype_lock); 400 return err; 401 } 402 403 if (new_entry) { 404 /* No conflict. Not yet added to the list. Add to the tail */ 405 list_add_tail(&new_entry->nd, &memtype_list); 406 pr_debug("New Entry\n"); 407 } 408 409 if (ret_type) { 410 pr_debug( 411 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 412 start, end, cattr_name(actual_type), 413 cattr_name(req_type), cattr_name(*ret_type)); 414 } else { 415 pr_debug( 416 "reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s\n", 417 start, end, cattr_name(actual_type), 418 cattr_name(req_type)); 419 } 420 421 spin_unlock(&memtype_lock); 422 return err; 423} 424 425int free_memtype(u64 start, u64 end) 426{ 427 struct memtype *ml; 428 int err = -EINVAL; 429 430 /* Only track when pat_wc_enabled */ 431 if (!pat_wc_enabled) { 432 return 0; 433 } 434 435 /* Low ISA region is always mapped WB. No need to track */ 436 if (start >= ISA_START_ADDRESS && end <= ISA_END_ADDRESS) { 437 return 0; 438 } 439 440 spin_lock(&memtype_lock); 441 list_for_each_entry(ml, &memtype_list, nd) { 442 if (ml->start == start && ml->end == end) { 443 list_del(&ml->nd); 444 kfree(ml); 445 err = 0; 446 break; 447 } 448 } 449 spin_unlock(&memtype_lock); 450 451 if (err) { 452 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", 453 current->comm, current->pid, start, end); 454 } 455 456 pr_debug("free_memtype request 0x%Lx-0x%Lx\n", start, end); 457 return err; 458} 459 460 461/* 462 * /dev/mem mmap interface. The memtype used for mapping varies: 463 * - Use UC for mappings with O_SYNC flag 464 * - Without O_SYNC flag, if there is any conflict in reserve_memtype, 465 * inherit the memtype from existing mapping. 466 * - Else use UC_MINUS memtype (for backward compatibility with existing 467 * X drivers. 468 */ 469pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, 470 unsigned long size, pgprot_t vma_prot) 471{ 472 return vma_prot; 473} 474 475#ifdef CONFIG_NONPROMISC_DEVMEM 476/* This check is done in drivers/char/mem.c in case of NONPROMISC_DEVMEM*/ 477static inline int range_is_allowed(unsigned long pfn, unsigned long size) 478{ 479 return 1; 480} 481#else 482static inline int range_is_allowed(unsigned long pfn, unsigned long size) 483{ 484 u64 from = ((u64)pfn) << PAGE_SHIFT; 485 u64 to = from + size; 486 u64 cursor = from; 487 488 while (cursor < to) { 489 if (!devmem_is_allowed(pfn)) { 490 printk(KERN_INFO 491 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 492 current->comm, from, to); 493 return 0; 494 } 495 cursor += PAGE_SIZE; 496 pfn++; 497 } 498 return 1; 499} 500#endif /* CONFIG_NONPROMISC_DEVMEM */ 501 502int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn, 503 unsigned long size, pgprot_t *vma_prot) 504{ 505 u64 offset = ((u64) pfn) << PAGE_SHIFT; 506 unsigned long flags = _PAGE_CACHE_UC_MINUS; 507 int retval; 508 509 if (!range_is_allowed(pfn, size)) 510 return 0; 511 512 if (file->f_flags & O_SYNC) { 513 flags = _PAGE_CACHE_UC; 514 } 515 516#ifdef CONFIG_X86_32 517 /* 518 * On the PPro and successors, the MTRRs are used to set 519 * memory types for physical addresses outside main memory, 520 * so blindly setting UC or PWT on those pages is wrong. 521 * For Pentiums and earlier, the surround logic should disable 522 * caching for the high addresses through the KEN pin, but 523 * we maintain the tradition of paranoia in this code. 524 */ 525 if (!pat_wc_enabled && 526 ! ( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || 527 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || 528 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || 529 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability)) && 530 (pfn << PAGE_SHIFT) >= __pa(high_memory)) { 531 flags = _PAGE_CACHE_UC; 532 } 533#endif 534 535 /* 536 * With O_SYNC, we can only take UC mapping. Fail if we cannot. 537 * Without O_SYNC, we want to get 538 * - WB for WB-able memory and no other conflicting mappings 539 * - UC_MINUS for non-WB-able memory with no other conflicting mappings 540 * - Inherit from confliting mappings otherwise 541 */ 542 if (flags != _PAGE_CACHE_UC_MINUS) { 543 retval = reserve_memtype(offset, offset + size, flags, NULL); 544 } else { 545 retval = reserve_memtype(offset, offset + size, -1, &flags); 546 } 547 548 if (retval < 0) 549 return 0; 550 551 if (pfn <= max_pfn_mapped && 552 ioremap_change_attr((unsigned long)__va(offset), size, flags) < 0) { 553 free_memtype(offset, offset + size); 554 printk(KERN_INFO 555 "%s:%d /dev/mem ioremap_change_attr failed %s for %Lx-%Lx\n", 556 current->comm, current->pid, 557 cattr_name(flags), 558 offset, offset + size); 559 return 0; 560 } 561 562 *vma_prot = __pgprot((pgprot_val(*vma_prot) & ~_PAGE_CACHE_MASK) | 563 flags); 564 return 1; 565} 566 567void map_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) 568{ 569 u64 addr = (u64)pfn << PAGE_SHIFT; 570 unsigned long flags; 571 unsigned long want_flags = (pgprot_val(vma_prot) & _PAGE_CACHE_MASK); 572 573 reserve_memtype(addr, addr + size, want_flags, &flags); 574 if (flags != want_flags) { 575 printk(KERN_INFO 576 "%s:%d /dev/mem expected mapping type %s for %Lx-%Lx, got %s\n", 577 current->comm, current->pid, 578 cattr_name(want_flags), 579 addr, addr + size, 580 cattr_name(flags)); 581 } 582} 583 584void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot) 585{ 586 u64 addr = (u64)pfn << PAGE_SHIFT; 587 588 free_memtype(addr, addr + size); 589} 590 591