1/* 2 * Copyright (c) 2004-2008 Reyk Floeter <reyk@openbsd.org> 3 * Copyright (c) 2006-2008 Nick Kossifidis <mickflemm@gmail.com> 4 * 5 * Permission to use, copy, modify, and distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 * 17 */ 18 19/*************************************\ 20* DMA and interrupt masking functions * 21\*************************************/ 22 23/** 24 * DOC: DMA and interrupt masking functions 25 * 26 * Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and 27 * handle queue setup for 5210 chipset (rest are handled on qcu.c). 28 * Also we setup interrupt mask register (IMR) and read the various interrupt 29 * status registers (ISR). 30 */ 31 32#include "ath5k.h" 33#include "reg.h" 34#include "debug.h" 35 36 37/*********\ 38* Receive * 39\*********/ 40 41/** 42 * ath5k_hw_start_rx_dma() - Start DMA receive 43 * @ah: The &struct ath5k_hw 44 */ 45void 46ath5k_hw_start_rx_dma(struct ath5k_hw *ah) 47{ 48 ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR); 49 ath5k_hw_reg_read(ah, AR5K_CR); 50} 51 52/** 53 * ath5k_hw_stop_rx_dma() - Stop DMA receive 54 * @ah: The &struct ath5k_hw 55 */ 56static int 57ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 58{ 59 unsigned int i; 60 61 ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR); 62 63 /* 64 * It may take some time to disable the DMA receive unit 65 */ 66 for (i = 1000; i > 0 && 67 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 68 i--) 69 udelay(100); 70 71 if (!i) 72 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 73 "failed to stop RX DMA !\n"); 74 75 return i ? 0 : -EBUSY; 76} 77 78/** 79 * ath5k_hw_get_rxdp() - Get RX Descriptor's address 80 * @ah: The &struct ath5k_hw 81 */ 82u32 83ath5k_hw_get_rxdp(struct ath5k_hw *ah) 84{ 85 return ath5k_hw_reg_read(ah, AR5K_RXDP); 86} 87 88/** 89 * ath5k_hw_set_rxdp() - Set RX Descriptor's address 90 * @ah: The &struct ath5k_hw 91 * @phys_addr: RX descriptor address 92 * 93 * Returns -EIO if rx is active 94 */ 95int 96ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 97{ 98 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) { 99 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 100 "tried to set RXDP while rx was active !\n"); 101 return -EIO; 102 } 103 104 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 105 return 0; 106} 107 108 109/**********\ 110* Transmit * 111\**********/ 112 113/** 114 * ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue 115 * @ah: The &struct ath5k_hw 116 * @queue: The hw queue number 117 * 118 * Start DMA transmit for a specific queue and since 5210 doesn't have 119 * QCU/DCU, set up queue parameters for 5210 here based on queue type (one 120 * queue for normal data and one queue for beacons). For queue setup 121 * on newer chips check out qcu.c. Returns -EINVAL if queue number is out 122 * of range or if queue is already disabled. 123 * 124 * NOTE: Must be called after setting up tx control descriptor for that 125 * queue (see below). 126 */ 127int 128ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) 129{ 130 u32 tx_queue; 131 132 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 133 134 /* Return if queue is declared inactive */ 135 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 136 return -EINVAL; 137 138 if (ah->ah_version == AR5K_AR5210) { 139 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 140 141 /* 142 * Set the queue by type on 5210 143 */ 144 switch (ah->ah_txq[queue].tqi_type) { 145 case AR5K_TX_QUEUE_DATA: 146 tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0; 147 break; 148 case AR5K_TX_QUEUE_BEACON: 149 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 150 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE, 151 AR5K_BSR); 152 break; 153 case AR5K_TX_QUEUE_CAB: 154 tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1; 155 ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V | 156 AR5K_BCR_BDMAE, AR5K_BSR); 157 break; 158 default: 159 return -EINVAL; 160 } 161 /* Start queue */ 162 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 163 ath5k_hw_reg_read(ah, AR5K_CR); 164 } else { 165 /* Return if queue is disabled */ 166 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) 167 return -EIO; 168 169 /* Start queue */ 170 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); 171 } 172 173 return 0; 174} 175 176/** 177 * ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue 178 * @ah: The &struct ath5k_hw 179 * @queue: The hw queue number 180 * 181 * Stop DMA transmit on a specific hw queue and drain queue so we don't 182 * have any pending frames. Returns -EBUSY if we still have pending frames, 183 * -EINVAL if queue number is out of range or inactive. 184 */ 185static int 186ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 187{ 188 unsigned int i = 40; 189 u32 tx_queue, pending; 190 191 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 192 193 /* Return if queue is declared inactive */ 194 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 195 return -EINVAL; 196 197 if (ah->ah_version == AR5K_AR5210) { 198 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 199 200 /* 201 * Set by queue type 202 */ 203 switch (ah->ah_txq[queue].tqi_type) { 204 case AR5K_TX_QUEUE_DATA: 205 tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0; 206 break; 207 case AR5K_TX_QUEUE_BEACON: 208 case AR5K_TX_QUEUE_CAB: 209 /* XXX Fix me... */ 210 tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1; 211 ath5k_hw_reg_write(ah, 0, AR5K_BSR); 212 break; 213 default: 214 return -EINVAL; 215 } 216 217 /* Stop queue */ 218 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 219 ath5k_hw_reg_read(ah, AR5K_CR); 220 } else { 221 222 /* 223 * Enable DCU early termination to quickly 224 * flush any pending frames from QCU 225 */ 226 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 227 AR5K_QCU_MISC_DCU_EARLY); 228 229 /* 230 * Schedule TX disable and wait until queue is empty 231 */ 232 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 233 234 /* Wait for queue to stop */ 235 for (i = 1000; i > 0 && 236 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0); 237 i--) 238 udelay(100); 239 240 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 241 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 242 "queue %i didn't stop !\n", queue); 243 244 /* Check for pending frames */ 245 i = 1000; 246 do { 247 pending = ath5k_hw_reg_read(ah, 248 AR5K_QUEUE_STATUS(queue)) & 249 AR5K_QCU_STS_FRMPENDCNT; 250 udelay(100); 251 } while (--i && pending); 252 253 /* For 2413+ order PCU to drop packets using 254 * QUIET mechanism */ 255 if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) && 256 pending) { 257 /* Set periodicity and duration */ 258 ath5k_hw_reg_write(ah, 259 AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)| 260 AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR), 261 AR5K_QUIET_CTL2); 262 263 /* Enable quiet period for current TSF */ 264 ath5k_hw_reg_write(ah, 265 AR5K_QUIET_CTL1_QT_EN | 266 AR5K_REG_SM(ath5k_hw_reg_read(ah, 267 AR5K_TSF_L32_5211) >> 10, 268 AR5K_QUIET_CTL1_NEXT_QT_TSF), 269 AR5K_QUIET_CTL1); 270 271 /* Force channel idle high */ 272 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 273 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 274 275 /* Wait a while and disable mechanism */ 276 udelay(400); 277 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 278 AR5K_QUIET_CTL1_QT_EN); 279 280 /* Re-check for pending frames */ 281 i = 100; 282 do { 283 pending = ath5k_hw_reg_read(ah, 284 AR5K_QUEUE_STATUS(queue)) & 285 AR5K_QCU_STS_FRMPENDCNT; 286 udelay(100); 287 } while (--i && pending); 288 289 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 290 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 291 292 if (pending) 293 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 294 "quiet mechanism didn't work q:%i !\n", 295 queue); 296 } 297 298 /* 299 * Disable DCU early termination 300 */ 301 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 302 AR5K_QCU_MISC_DCU_EARLY); 303 304 /* Clear register */ 305 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 306 if (pending) { 307 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 308 "tx dma didn't stop (q:%i, frm:%i) !\n", 309 queue, pending); 310 return -EBUSY; 311 } 312 } 313 314 /* TODO: Check for success on 5210 else return error */ 315 return 0; 316} 317 318/** 319 * ath5k_hw_stop_beacon_queue() - Stop beacon queue 320 * @ah: The &struct ath5k_hw 321 * @queue: The queue number 322 * 323 * Returns -EIO if queue didn't stop 324 */ 325int 326ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue) 327{ 328 int ret; 329 ret = ath5k_hw_stop_tx_dma(ah, queue); 330 if (ret) { 331 ATH5K_DBG(ah, ATH5K_DEBUG_DMA, 332 "beacon queue didn't stop !\n"); 333 return -EIO; 334 } 335 return 0; 336} 337 338/** 339 * ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue 340 * @ah: The &struct ath5k_hw 341 * @queue: The hw queue number 342 * 343 * Get TX descriptor's address for a specific queue. For 5210 we ignore 344 * the queue number and use tx queue type since we only have 2 queues. 345 * We use TXDP0 for normal data queue and TXDP1 for beacon queue. 346 * For newer chips with QCU/DCU we just read the corresponding TXDP register. 347 * 348 * XXX: Is TXDP read and clear ? 349 */ 350u32 351ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue) 352{ 353 u16 tx_reg; 354 355 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 356 357 /* 358 * Get the transmit queue descriptor pointer from the selected queue 359 */ 360 /*5210 doesn't have QCU*/ 361 if (ah->ah_version == AR5K_AR5210) { 362 switch (ah->ah_txq[queue].tqi_type) { 363 case AR5K_TX_QUEUE_DATA: 364 tx_reg = AR5K_NOQCU_TXDP0; 365 break; 366 case AR5K_TX_QUEUE_BEACON: 367 case AR5K_TX_QUEUE_CAB: 368 tx_reg = AR5K_NOQCU_TXDP1; 369 break; 370 default: 371 return 0xffffffff; 372 } 373 } else { 374 tx_reg = AR5K_QUEUE_TXDP(queue); 375 } 376 377 return ath5k_hw_reg_read(ah, tx_reg); 378} 379 380/** 381 * ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue 382 * @ah: The &struct ath5k_hw 383 * @queue: The hw queue number 384 * @phys_addr: The physical address 385 * 386 * Set TX descriptor's address for a specific queue. For 5210 we ignore 387 * the queue number and we use tx queue type since we only have 2 queues 388 * so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue. 389 * For newer chips with QCU/DCU we just set the corresponding TXDP register. 390 * Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still 391 * active. 392 */ 393int 394ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr) 395{ 396 u16 tx_reg; 397 398 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); 399 400 /* 401 * Set the transmit queue descriptor pointer register by type 402 * on 5210 403 */ 404 if (ah->ah_version == AR5K_AR5210) { 405 switch (ah->ah_txq[queue].tqi_type) { 406 case AR5K_TX_QUEUE_DATA: 407 tx_reg = AR5K_NOQCU_TXDP0; 408 break; 409 case AR5K_TX_QUEUE_BEACON: 410 case AR5K_TX_QUEUE_CAB: 411 tx_reg = AR5K_NOQCU_TXDP1; 412 break; 413 default: 414 return -EINVAL; 415 } 416 } else { 417 /* 418 * Set the transmit queue descriptor pointer for 419 * the selected queue on QCU for 5211+ 420 * (this won't work if the queue is still active) 421 */ 422 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue)) 423 return -EIO; 424 425 tx_reg = AR5K_QUEUE_TXDP(queue); 426 } 427 428 /* Set descriptor pointer */ 429 ath5k_hw_reg_write(ah, phys_addr, tx_reg); 430 431 return 0; 432} 433 434/** 435 * ath5k_hw_update_tx_triglevel() - Update tx trigger level 436 * @ah: The &struct ath5k_hw 437 * @increase: Flag to force increase of trigger level 438 * 439 * This function increases/decreases the tx trigger level for the tx fifo 440 * buffer (aka FIFO threshold) that is used to indicate when PCU flushes 441 * the buffer and transmits its data. Lowering this results sending small 442 * frames more quickly but can lead to tx underruns, raising it a lot can 443 * result other problems. Right now we start with the lowest possible 444 * (64Bytes) and if we get tx underrun we increase it using the increase 445 * flag. Returns -EIO if we have reached maximum/minimum. 446 * 447 * XXX: Link this with tx DMA size ? 448 * XXX2: Use it to save interrupts ? 449 */ 450int 451ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase) 452{ 453 u32 trigger_level, imr; 454 int ret = -EIO; 455 456 /* 457 * Disable interrupts by setting the mask 458 */ 459 imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL); 460 461 trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG), 462 AR5K_TXCFG_TXFULL); 463 464 if (!increase) { 465 if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES) 466 goto done; 467 } else 468 trigger_level += 469 ((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2); 470 471 /* 472 * Update trigger level on success 473 */ 474 if (ah->ah_version == AR5K_AR5210) 475 ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL); 476 else 477 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 478 AR5K_TXCFG_TXFULL, trigger_level); 479 480 ret = 0; 481 482done: 483 /* 484 * Restore interrupt mask 485 */ 486 ath5k_hw_set_imr(ah, imr); 487 488 return ret; 489} 490 491 492/*******************\ 493* Interrupt masking * 494\*******************/ 495 496/** 497 * ath5k_hw_is_intr_pending() - Check if we have pending interrupts 498 * @ah: The &struct ath5k_hw 499 * 500 * Check if we have pending interrupts to process. Returns 1 if we 501 * have pending interrupts and 0 if we haven't. 502 */ 503bool 504ath5k_hw_is_intr_pending(struct ath5k_hw *ah) 505{ 506 return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0; 507} 508 509/** 510 * ath5k_hw_get_isr() - Get interrupt status 511 * @ah: The @struct ath5k_hw 512 * @interrupt_mask: Driver's interrupt mask used to filter out 513 * interrupts in sw. 514 * 515 * This function is used inside our interrupt handler to determine the reason 516 * for the interrupt by reading Primary Interrupt Status Register. Returns an 517 * abstract interrupt status mask which is mostly ISR with some uncommon bits 518 * being mapped on some standard non hw-specific positions 519 * (check out &ath5k_int). 520 * 521 * NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this 522 * function gets called are cleared on return. 523 */ 524int 525ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask) 526{ 527 u32 data = 0; 528 529 /* 530 * Read interrupt status from Primary Interrupt 531 * Register. 532 * 533 * Note: PISR/SISR Not available on 5210 534 */ 535 if (ah->ah_version == AR5K_AR5210) { 536 u32 isr = 0; 537 isr = ath5k_hw_reg_read(ah, AR5K_ISR); 538 if (unlikely(isr == AR5K_INT_NOCARD)) { 539 *interrupt_mask = isr; 540 return -ENODEV; 541 } 542 543 /* 544 * Filter out the non-common bits from the interrupt 545 * status. 546 */ 547 *interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr; 548 549 /* Hanlde INT_FATAL */ 550 if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT 551 | AR5K_ISR_DPERR))) 552 *interrupt_mask |= AR5K_INT_FATAL; 553 554 /* 555 * XXX: BMISS interrupts may occur after association. 556 * I found this on 5210 code but it needs testing. If this is 557 * true we should disable them before assoc and re-enable them 558 * after a successful assoc + some jiffies. 559 interrupt_mask &= ~AR5K_INT_BMISS; 560 */ 561 562 data = isr; 563 } else { 564 u32 pisr = 0; 565 u32 pisr_clear = 0; 566 u32 sisr0 = 0; 567 u32 sisr1 = 0; 568 u32 sisr2 = 0; 569 u32 sisr3 = 0; 570 u32 sisr4 = 0; 571 572 /* Read PISR and SISRs... */ 573 pisr = ath5k_hw_reg_read(ah, AR5K_PISR); 574 if (unlikely(pisr == AR5K_INT_NOCARD)) { 575 *interrupt_mask = pisr; 576 return -ENODEV; 577 } 578 579 sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0); 580 sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1); 581 sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2); 582 sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3); 583 sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4); 584 585 /* 586 * PISR holds the logical OR of interrupt bits 587 * from SISR registers: 588 * 589 * TXOK and TXDESC -> Logical OR of TXOK and TXDESC 590 * per-queue bits on SISR0 591 * 592 * TXERR and TXEOL -> Logical OR of TXERR and TXEOL 593 * per-queue bits on SISR1 594 * 595 * TXURN -> Logical OR of TXURN per-queue bits on SISR2 596 * 597 * HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2 598 * 599 * BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC 600 * BCN_TIMEOUT, CAB_TIMEOUT and DTIM 601 * (and TSFOOR ?) bits on SISR2 602 * 603 * QCBRORN and QCBRURN -> Logical OR of QCBRORN and 604 * QCBRURN per-queue bits on SISR3 605 * QTRIG -> Logical OR of QTRIG per-queue bits on SISR4 606 * 607 * If we clean these bits on PISR we 'll also clear all 608 * related bits from SISRs, e.g. if we write the TXOK bit on 609 * PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK 610 * interrupt got fired for another queue while we were reading 611 * the interrupt registers and we write back the TXOK bit on 612 * PISR we 'll lose it. So make sure that we don't write back 613 * on PISR any bits that come from SISRs. Clearing them from 614 * SISRs will also clear PISR so no need to worry here. 615 */ 616 617 pisr_clear = pisr & ~AR5K_ISR_BITS_FROM_SISRS; 618 619 /* 620 * Write to clear them... 621 * Note: This means that each bit we write back 622 * to the registers will get cleared, leaving the 623 * rest unaffected. So this won't affect new interrupts 624 * we didn't catch while reading/processing, we 'll get 625 * them next time get_isr gets called. 626 */ 627 ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0); 628 ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1); 629 ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2); 630 ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3); 631 ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4); 632 ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR); 633 /* Flush previous write */ 634 ath5k_hw_reg_read(ah, AR5K_PISR); 635 636 /* 637 * Filter out the non-common bits from the interrupt 638 * status. 639 */ 640 *interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr; 641 642 643 /* We treat TXOK,TXDESC, TXERR and TXEOL 644 * the same way (schedule the tx tasklet) 645 * so we track them all together per queue */ 646 if (pisr & AR5K_ISR_TXOK) 647 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 648 AR5K_SISR0_QCU_TXOK); 649 650 if (pisr & AR5K_ISR_TXDESC) 651 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0, 652 AR5K_SISR0_QCU_TXDESC); 653 654 if (pisr & AR5K_ISR_TXERR) 655 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 656 AR5K_SISR1_QCU_TXERR); 657 658 if (pisr & AR5K_ISR_TXEOL) 659 ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1, 660 AR5K_SISR1_QCU_TXEOL); 661 662 /* Currently this is not much usefull since we treat 663 * all queues the same way if we get a TXURN (update 664 * tx trigger level) but we might need it later on*/ 665 if (pisr & AR5K_ISR_TXURN) 666 ah->ah_txq_isr_txurn |= AR5K_REG_MS(sisr2, 667 AR5K_SISR2_QCU_TXURN); 668 669 /* Misc Beacon related interrupts */ 670 671 /* For AR5211 */ 672 if (pisr & AR5K_ISR_TIM) 673 *interrupt_mask |= AR5K_INT_TIM; 674 675 /* For AR5212+ */ 676 if (pisr & AR5K_ISR_BCNMISC) { 677 if (sisr2 & AR5K_SISR2_TIM) 678 *interrupt_mask |= AR5K_INT_TIM; 679 if (sisr2 & AR5K_SISR2_DTIM) 680 *interrupt_mask |= AR5K_INT_DTIM; 681 if (sisr2 & AR5K_SISR2_DTIM_SYNC) 682 *interrupt_mask |= AR5K_INT_DTIM_SYNC; 683 if (sisr2 & AR5K_SISR2_BCN_TIMEOUT) 684 *interrupt_mask |= AR5K_INT_BCN_TIMEOUT; 685 if (sisr2 & AR5K_SISR2_CAB_TIMEOUT) 686 *interrupt_mask |= AR5K_INT_CAB_TIMEOUT; 687 } 688 689 /* Below interrupts are unlikely to happen */ 690 691 /* HIU = Host Interface Unit (PCI etc) 692 * Can be one of MCABT, SSERR, DPERR from SISR2 */ 693 if (unlikely(pisr & (AR5K_ISR_HIUERR))) 694 *interrupt_mask |= AR5K_INT_FATAL; 695 696 /*Beacon Not Ready*/ 697 if (unlikely(pisr & (AR5K_ISR_BNR))) 698 *interrupt_mask |= AR5K_INT_BNR; 699 700 /* A queue got CBR overrun */ 701 if (unlikely(pisr & (AR5K_ISR_QCBRORN))) { 702 *interrupt_mask |= AR5K_INT_QCBRORN; 703 ah->ah_txq_isr_qcborn |= AR5K_REG_MS(sisr3, 704 AR5K_SISR3_QCBRORN); 705 } 706 707 /* A queue got CBR underrun */ 708 if (unlikely(pisr & (AR5K_ISR_QCBRURN))) { 709 *interrupt_mask |= AR5K_INT_QCBRURN; 710 ah->ah_txq_isr_qcburn |= AR5K_REG_MS(sisr3, 711 AR5K_SISR3_QCBRURN); 712 } 713 714 /* A queue got triggered */ 715 if (unlikely(pisr & (AR5K_ISR_QTRIG))) { 716 *interrupt_mask |= AR5K_INT_QTRIG; 717 ah->ah_txq_isr_qtrig |= AR5K_REG_MS(sisr4, 718 AR5K_SISR4_QTRIG); 719 } 720 721 data = pisr; 722 } 723 724 /* 725 * In case we didn't handle anything, 726 * print the register value. 727 */ 728 if (unlikely(*interrupt_mask == 0 && net_ratelimit())) 729 ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr); 730 731 return 0; 732} 733 734/** 735 * ath5k_hw_set_imr() - Set interrupt mask 736 * @ah: The &struct ath5k_hw 737 * @new_mask: The new interrupt mask to be set 738 * 739 * Set the interrupt mask in hw to save interrupts. We do that by mapping 740 * ath5k_int bits to hw-specific bits to remove abstraction and writing 741 * Interrupt Mask Register. 742 */ 743enum ath5k_int 744ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask) 745{ 746 enum ath5k_int old_mask, int_mask; 747 748 old_mask = ah->ah_imr; 749 750 /* 751 * Disable card interrupts to prevent any race conditions 752 * (they will be re-enabled afterwards if AR5K_INT GLOBAL 753 * is set again on the new mask). 754 */ 755 if (old_mask & AR5K_INT_GLOBAL) { 756 ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER); 757 ath5k_hw_reg_read(ah, AR5K_IER); 758 } 759 760 /* 761 * Add additional, chipset-dependent interrupt mask flags 762 * and write them to the IMR (interrupt mask register). 763 */ 764 int_mask = new_mask & AR5K_INT_COMMON; 765 766 if (ah->ah_version != AR5K_AR5210) { 767 /* Preserve per queue TXURN interrupt mask */ 768 u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2) 769 & AR5K_SIMR2_QCU_TXURN; 770 771 /* Fatal interrupt abstraction for 5211+ */ 772 if (new_mask & AR5K_INT_FATAL) { 773 int_mask |= AR5K_IMR_HIUERR; 774 simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR 775 | AR5K_SIMR2_DPERR); 776 } 777 778 /* Misc beacon related interrupts */ 779 if (new_mask & AR5K_INT_TIM) 780 int_mask |= AR5K_IMR_TIM; 781 782 if (new_mask & AR5K_INT_TIM) 783 simr2 |= AR5K_SISR2_TIM; 784 if (new_mask & AR5K_INT_DTIM) 785 simr2 |= AR5K_SISR2_DTIM; 786 if (new_mask & AR5K_INT_DTIM_SYNC) 787 simr2 |= AR5K_SISR2_DTIM_SYNC; 788 if (new_mask & AR5K_INT_BCN_TIMEOUT) 789 simr2 |= AR5K_SISR2_BCN_TIMEOUT; 790 if (new_mask & AR5K_INT_CAB_TIMEOUT) 791 simr2 |= AR5K_SISR2_CAB_TIMEOUT; 792 793 /*Beacon Not Ready*/ 794 if (new_mask & AR5K_INT_BNR) 795 int_mask |= AR5K_INT_BNR; 796 797 /* Note: Per queue interrupt masks 798 * are set via ath5k_hw_reset_tx_queue() (qcu.c) */ 799 ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR); 800 ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2); 801 802 } else { 803 /* Fatal interrupt abstraction for 5210 */ 804 if (new_mask & AR5K_INT_FATAL) 805 int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT 806 | AR5K_IMR_HIUERR | AR5K_IMR_DPERR); 807 808 /* Only common interrupts left for 5210 (no SIMRs) */ 809 ath5k_hw_reg_write(ah, int_mask, AR5K_IMR); 810 } 811 812 /* If RXNOFRM interrupt is masked disable it 813 * by setting AR5K_RXNOFRM to zero */ 814 if (!(new_mask & AR5K_INT_RXNOFRM)) 815 ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM); 816 817 /* Store new interrupt mask */ 818 ah->ah_imr = new_mask; 819 820 /* ..re-enable interrupts if AR5K_INT_GLOBAL is set */ 821 if (new_mask & AR5K_INT_GLOBAL) { 822 ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER); 823 ath5k_hw_reg_read(ah, AR5K_IER); 824 } 825 826 return old_mask; 827} 828 829 830/********************\ 831 Init/Stop functions 832\********************/ 833 834/** 835 * ath5k_hw_dma_init() - Initialize DMA unit 836 * @ah: The &struct ath5k_hw 837 * 838 * Set DMA size and pre-enable interrupts 839 * (driver handles tx/rx buffer setup and 840 * dma start/stop) 841 * 842 * XXX: Save/restore RXDP/TXDP registers ? 843 */ 844void 845ath5k_hw_dma_init(struct ath5k_hw *ah) 846{ 847 /* 848 * Set Rx/Tx DMA Configuration 849 * 850 * Set standard DMA size (128). Note that 851 * a DMA size of 512 causes rx overruns and tx errors 852 * on pci-e cards (tested on 5424 but since rx overruns 853 * also occur on 5416/5418 with madwifi we set 128 854 * for all PCI-E cards to be safe). 855 * 856 * XXX: need to check 5210 for this 857 * TODO: Check out tx trigger level, it's always 64 on dumps but I 858 * guess we can tweak it and see how it goes ;-) 859 */ 860 if (ah->ah_version != AR5K_AR5210) { 861 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG, 862 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B); 863 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG, 864 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B); 865 } 866 867 /* Pre-enable interrupts on 5211/5212*/ 868 if (ah->ah_version != AR5K_AR5210) 869 ath5k_hw_set_imr(ah, ah->ah_imr); 870 871} 872 873/** 874 * ath5k_hw_dma_stop() - stop DMA unit 875 * @ah: The &struct ath5k_hw 876 * 877 * Stop tx/rx DMA and interrupts. Returns 878 * -EBUSY if tx or rx dma failed to stop. 879 * 880 * XXX: Sometimes DMA unit hangs and we have 881 * stuck frames on tx queues, only a reset 882 * can fix that. 883 */ 884int 885ath5k_hw_dma_stop(struct ath5k_hw *ah) 886{ 887 int i, qmax, err; 888 err = 0; 889 890 /* Disable interrupts */ 891 ath5k_hw_set_imr(ah, 0); 892 893 /* Stop rx dma */ 894 err = ath5k_hw_stop_rx_dma(ah); 895 if (err) 896 return err; 897 898 /* Clear any pending interrupts 899 * and disable tx dma */ 900 if (ah->ah_version != AR5K_AR5210) { 901 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR); 902 qmax = AR5K_NUM_TX_QUEUES; 903 } else { 904 /* PISR/SISR Not available on 5210 */ 905 ath5k_hw_reg_read(ah, AR5K_ISR); 906 qmax = AR5K_NUM_TX_QUEUES_NOQCU; 907 } 908 909 for (i = 0; i < qmax; i++) { 910 err = ath5k_hw_stop_tx_dma(ah, i); 911 /* -EINVAL -> queue inactive */ 912 if (err && err != -EINVAL) 913 return err; 914 } 915 916 return 0; 917} 918