internal.h revision d4578ea810ce468fdb8e1b7014818c31db9be5e2
1/****************************************************************************** 2 * 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 4 * 5 * Portions of this file are derived from the ipw3945 project, as well 6 * as portions of the ieee80211 subsystem header files. 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of version 2 of the GNU General Public License as 10 * published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but WITHOUT 13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 15 * more details. 16 * 17 * You should have received a copy of the GNU General Public License along with 18 * this program; if not, write to the Free Software Foundation, Inc., 19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA 20 * 21 * The full GNU General Public License is included in this distribution in the 22 * file called LICENSE. 23 * 24 * Contact Information: 25 * Intel Linux Wireless <ilw@linux.intel.com> 26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 27 * 28 *****************************************************************************/ 29#ifndef __iwl_trans_int_pcie_h__ 30#define __iwl_trans_int_pcie_h__ 31 32#include <linux/spinlock.h> 33#include <linux/interrupt.h> 34#include <linux/skbuff.h> 35#include <linux/wait.h> 36#include <linux/pci.h> 37#include <linux/timer.h> 38 39#include "iwl-fh.h" 40#include "iwl-csr.h" 41#include "iwl-trans.h" 42#include "iwl-debug.h" 43#include "iwl-io.h" 44#include "iwl-op-mode.h" 45 46struct iwl_host_cmd; 47 48/*This file includes the declaration that are internal to the 49 * trans_pcie layer */ 50 51struct iwl_rx_mem_buffer { 52 dma_addr_t page_dma; 53 struct page *page; 54 struct list_head list; 55}; 56 57/** 58 * struct isr_statistics - interrupt statistics 59 * 60 */ 61struct isr_statistics { 62 u32 hw; 63 u32 sw; 64 u32 err_code; 65 u32 sch; 66 u32 alive; 67 u32 rfkill; 68 u32 ctkill; 69 u32 wakeup; 70 u32 rx; 71 u32 tx; 72 u32 unhandled; 73}; 74 75/** 76 * struct iwl_rxq - Rx queue 77 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @pool: 80 * @queue: 81 * @read: Shared index to newest available Rx buffer 82 * @write: Shared index to oldest written Rx packet 83 * @free_count: Number of pre-allocated buffers in rx_free 84 * @write_actual: 85 * @rx_free: list of free SKBs for use 86 * @rx_used: List of Rx buffers with no SKB 87 * @need_update: flag to indicate we need to update read/write index 88 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts_dma: bus address of receive buffer status 90 * @lock: 91 * 92 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 */ 94struct iwl_rxq { 95 __le32 *bd; 96 dma_addr_t bd_dma; 97 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS]; 98 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE]; 99 u32 read; 100 u32 write; 101 u32 free_count; 102 u32 write_actual; 103 struct list_head rx_free; 104 struct list_head rx_used; 105 bool need_update; 106 struct iwl_rb_status *rb_stts; 107 dma_addr_t rb_stts_dma; 108 spinlock_t lock; 109}; 110 111struct iwl_dma_ptr { 112 dma_addr_t dma; 113 void *addr; 114 size_t size; 115}; 116 117/** 118 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 119 * @index -- current index 120 */ 121static inline int iwl_queue_inc_wrap(int index) 122{ 123 return ++index & (TFD_QUEUE_SIZE_MAX - 1); 124} 125 126/** 127 * iwl_queue_dec_wrap - decrement queue index, wrap back to end 128 * @index -- current index 129 */ 130static inline int iwl_queue_dec_wrap(int index) 131{ 132 return --index & (TFD_QUEUE_SIZE_MAX - 1); 133} 134 135struct iwl_cmd_meta { 136 /* only for SYNC commands, iff the reply skb is wanted */ 137 struct iwl_host_cmd *source; 138 u32 flags; 139}; 140 141/* 142 * Generic queue structure 143 * 144 * Contains common data for Rx and Tx queues. 145 * 146 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware 147 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless 148 * there might be HW changes in the future). For the normal TX 149 * queues, n_window, which is the size of the software queue data 150 * is also 256; however, for the command queue, n_window is only 151 * 32 since we don't need so many commands pending. Since the HW 152 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result, 153 * the software buffers (in the variables @meta, @txb in struct 154 * iwl_txq) only have 32 entries, while the HW buffers (@tfds in 155 * the same struct) have 256. 156 * This means that we end up with the following: 157 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 | 158 * SW entries: | 0 | ... | 31 | 159 * where N is a number between 0 and 7. This means that the SW 160 * data is a window overlayed over the HW queue. 161 */ 162struct iwl_queue { 163 int write_ptr; /* 1-st empty entry (index) host_w*/ 164 int read_ptr; /* last used entry (index) host_r*/ 165 /* use for monitoring and recovering the stuck queue */ 166 dma_addr_t dma_addr; /* physical addr for BD's */ 167 int n_window; /* safe queue window */ 168 u32 id; 169 int low_mark; /* low watermark, resume queue if free 170 * space more than this */ 171 int high_mark; /* high watermark, stop queue if free 172 * space less than this */ 173}; 174 175#define TFD_TX_CMD_SLOTS 256 176#define TFD_CMD_SLOTS 32 177 178/* 179 * The FH will write back to the first TB only, so we need 180 * to copy some data into the buffer regardless of whether 181 * it should be mapped or not. This indicates how big the 182 * first TB must be to include the scratch buffer. Since 183 * the scratch is 4 bytes at offset 12, it's 16 now. If we 184 * make it bigger then allocations will be bigger and copy 185 * slower, so that's probably not useful. 186 */ 187#define IWL_HCMD_SCRATCHBUF_SIZE 16 188 189struct iwl_pcie_txq_entry { 190 struct iwl_device_cmd *cmd; 191 struct sk_buff *skb; 192 /* buffer to free after command completes */ 193 const void *free_buf; 194 struct iwl_cmd_meta meta; 195}; 196 197struct iwl_pcie_txq_scratch_buf { 198 struct iwl_cmd_header hdr; 199 u8 buf[8]; 200 __le32 scratch; 201}; 202 203/** 204 * struct iwl_txq - Tx Queue for DMA 205 * @q: generic Rx/Tx queue descriptor 206 * @tfds: transmit frame descriptors (DMA memory) 207 * @scratchbufs: start of command headers, including scratch buffers, for 208 * the writeback -- this is DMA memory and an array holding one buffer 209 * for each command on the queue 210 * @scratchbufs_dma: DMA address for the scratchbufs start 211 * @entries: transmit entries (driver state) 212 * @lock: queue lock 213 * @stuck_timer: timer that fires if queue gets stuck 214 * @trans_pcie: pointer back to transport (for timer) 215 * @need_update: indicates need to update read/write index 216 * @active: stores if queue is active 217 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID 218 * 219 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame 220 * descriptors) and required locking structures. 221 */ 222struct iwl_txq { 223 struct iwl_queue q; 224 struct iwl_tfd *tfds; 225 struct iwl_pcie_txq_scratch_buf *scratchbufs; 226 dma_addr_t scratchbufs_dma; 227 struct iwl_pcie_txq_entry *entries; 228 spinlock_t lock; 229 struct timer_list stuck_timer; 230 struct iwl_trans_pcie *trans_pcie; 231 bool need_update; 232 u8 active; 233 bool ampdu; 234}; 235 236static inline dma_addr_t 237iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) 238{ 239 return txq->scratchbufs_dma + 240 sizeof(struct iwl_pcie_txq_scratch_buf) * idx; 241} 242 243/** 244 * struct iwl_trans_pcie - PCIe transport specific data 245 * @rxq: all the RX queue data 246 * @rx_replenish: work that will be called when buffers need to be allocated 247 * @drv - pointer to iwl_drv 248 * @trans: pointer to the generic transport area 249 * @scd_base_addr: scheduler sram base address in SRAM 250 * @scd_bc_tbls: pointer to the byte count table of the scheduler 251 * @kw: keep warm address 252 * @pci_dev: basic pci-network driver stuff 253 * @hw_base: pci hardware address support 254 * @ucode_write_complete: indicates that the ucode has been copied. 255 * @ucode_write_waitq: wait queue for uCode load 256 * @cmd_queue - command queue number 257 * @rx_buf_size_8k: 8 kB RX buffer size 258 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) 259 * @rx_page_order: page order for receive buffer size 260 * @wd_timeout: queue watchdog timeout (jiffies) 261 * @reg_lock: protect hw register access 262 * @cmd_in_flight: true when we have a host command in flight 263 * @fw_mon_phys: physical address of the buffer for the firmware monitor 264 * @fw_mon_page: points to the first page of the buffer for the firmware monitor 265 * @fw_mon_size: size of the buffer for the firmware monitor 266 */ 267struct iwl_trans_pcie { 268 struct iwl_rxq rxq; 269 struct work_struct rx_replenish; 270 struct iwl_trans *trans; 271 struct iwl_drv *drv; 272 273 struct net_device napi_dev; 274 struct napi_struct napi; 275 276 /* INT ICT Table */ 277 __le32 *ict_tbl; 278 dma_addr_t ict_tbl_dma; 279 int ict_index; 280 bool use_ict; 281 struct isr_statistics isr_stats; 282 283 spinlock_t irq_lock; 284 u32 inta_mask; 285 u32 scd_base_addr; 286 struct iwl_dma_ptr scd_bc_tbls; 287 struct iwl_dma_ptr kw; 288 289 struct iwl_txq *txq; 290 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 291 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_HW_QUEUES)]; 292 293 /* PCI bus related data */ 294 struct pci_dev *pci_dev; 295 void __iomem *hw_base; 296 297 bool ucode_write_complete; 298 wait_queue_head_t ucode_write_waitq; 299 wait_queue_head_t wait_command_queue; 300 301 u8 cmd_queue; 302 u8 cmd_fifo; 303 u8 n_no_reclaim_cmds; 304 u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; 305 306 bool rx_buf_size_8k; 307 bool bc_table_dword; 308 u32 rx_page_order; 309 310 const char *const *command_names; 311 312 /* queue watchdog */ 313 unsigned long wd_timeout; 314 315 /*protect hw register */ 316 spinlock_t reg_lock; 317 bool cmd_in_flight; 318 319 dma_addr_t fw_mon_phys; 320 struct page *fw_mon_page; 321 u32 fw_mon_size; 322}; 323 324#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \ 325 ((struct iwl_trans_pcie *) ((_iwl_trans)->trans_specific)) 326 327static inline struct iwl_trans * 328iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) 329{ 330 return container_of((void *)trans_pcie, struct iwl_trans, 331 trans_specific); 332} 333 334/* 335 * Convention: trans API functions: iwl_trans_pcie_XXX 336 * Other functions: iwl_pcie_XXX 337 */ 338struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, 339 const struct pci_device_id *ent, 340 const struct iwl_cfg *cfg); 341void iwl_trans_pcie_free(struct iwl_trans *trans); 342 343/***************************************************** 344* RX 345******************************************************/ 346int iwl_pcie_rx_init(struct iwl_trans *trans); 347irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id); 348int iwl_pcie_rx_stop(struct iwl_trans *trans); 349void iwl_pcie_rx_free(struct iwl_trans *trans); 350 351/***************************************************** 352* ICT - interrupt handling 353******************************************************/ 354irqreturn_t iwl_pcie_isr(int irq, void *data); 355int iwl_pcie_alloc_ict(struct iwl_trans *trans); 356void iwl_pcie_free_ict(struct iwl_trans *trans); 357void iwl_pcie_reset_ict(struct iwl_trans *trans); 358void iwl_pcie_disable_ict(struct iwl_trans *trans); 359 360/***************************************************** 361* TX / HCMD 362******************************************************/ 363int iwl_pcie_tx_init(struct iwl_trans *trans); 364void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); 365int iwl_pcie_tx_stop(struct iwl_trans *trans); 366void iwl_pcie_tx_free(struct iwl_trans *trans); 367void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int queue, u16 ssn, 368 const struct iwl_trans_txq_scd_cfg *cfg); 369void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue, 370 bool configure_scd); 371int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 372 struct iwl_device_cmd *dev_cmd, int txq_id); 373void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); 374int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); 375void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 376 struct iwl_rx_cmd_buffer *rxb, int handler_status); 377void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 378 struct sk_buff_head *skbs); 379void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); 380 381static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx) 382{ 383 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 384 385 return le16_to_cpu(tb->hi_n_len) >> 4; 386} 387 388/***************************************************** 389* Error handling 390******************************************************/ 391void iwl_pcie_dump_csr(struct iwl_trans *trans); 392 393/***************************************************** 394* Helpers 395******************************************************/ 396static inline void iwl_disable_interrupts(struct iwl_trans *trans) 397{ 398 clear_bit(STATUS_INT_ENABLED, &trans->status); 399 400 /* disable interrupts from uCode/NIC to host */ 401 iwl_write32(trans, CSR_INT_MASK, 0x00000000); 402 403 /* acknowledge/clear/reset any interrupts still pending 404 * from uCode or flow handler (Rx/Tx DMA) */ 405 iwl_write32(trans, CSR_INT, 0xffffffff); 406 iwl_write32(trans, CSR_FH_INT_STATUS, 0xffffffff); 407 IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); 408} 409 410static inline void iwl_enable_interrupts(struct iwl_trans *trans) 411{ 412 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 413 414 IWL_DEBUG_ISR(trans, "Enabling interrupts\n"); 415 set_bit(STATUS_INT_ENABLED, &trans->status); 416 trans_pcie->inta_mask = CSR_INI_SET_MASK; 417 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 418} 419 420static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) 421{ 422 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 423 424 IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n"); 425 trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL; 426 iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask); 427} 428 429static inline void iwl_wake_queue(struct iwl_trans *trans, 430 struct iwl_txq *txq) 431{ 432 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 433 434 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { 435 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); 436 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); 437 } 438} 439 440static inline void iwl_stop_queue(struct iwl_trans *trans, 441 struct iwl_txq *txq) 442{ 443 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 444 445 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { 446 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); 447 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); 448 } else 449 IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", 450 txq->q.id); 451} 452 453static inline bool iwl_queue_used(const struct iwl_queue *q, int i) 454{ 455 return q->write_ptr >= q->read_ptr ? 456 (i >= q->read_ptr && i < q->write_ptr) : 457 !(i < q->read_ptr && i >= q->write_ptr); 458} 459 460static inline u8 get_cmd_index(struct iwl_queue *q, u32 index) 461{ 462 return index & (q->n_window - 1); 463} 464 465static inline const char *get_cmd_string(struct iwl_trans_pcie *trans_pcie, 466 u8 cmd) 467{ 468 if (!trans_pcie->command_names || !trans_pcie->command_names[cmd]) 469 return "UNKNOWN"; 470 return trans_pcie->command_names[cmd]; 471} 472 473static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) 474{ 475 return !(iwl_read32(trans, CSR_GP_CNTRL) & 476 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW); 477} 478 479static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, 480 u32 reg, u32 mask, u32 value) 481{ 482 u32 v; 483 484#ifdef CONFIG_IWLWIFI_DEBUG 485 WARN_ON_ONCE(value & ~mask); 486#endif 487 488 v = iwl_read32(trans, reg); 489 v &= ~mask; 490 v |= value; 491 iwl_write32(trans, reg, v); 492} 493 494static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans, 495 u32 reg, u32 mask) 496{ 497 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0); 498} 499 500static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, 501 u32 reg, u32 mask) 502{ 503 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); 504} 505 506void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); 507 508#endif /* __iwl_trans_int_pcie_h__ */ 509