tx.c revision 6f266e912c0733e77f63e9ad245db3c966b75942
1/* 2 * This file is part of wl1271 3 * 4 * Copyright (C) 2009 Nokia Corporation 5 * 6 * Contact: Luciano Coelho <luciano.coelho@nokia.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License 10 * version 2 as published by the Free Software Foundation. 11 * 12 * This program is distributed in the hope that it will be useful, but 13 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * General Public License for more details. 16 * 17 * You should have received a copy of the GNU General Public License 18 * along with this program; if not, write to the Free Software 19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 20 * 02110-1301 USA 21 * 22 */ 23 24#include <linux/kernel.h> 25#include <linux/module.h> 26#include <linux/etherdevice.h> 27 28#include "wlcore.h" 29#include "debug.h" 30#include "io.h" 31#include "ps.h" 32#include "tx.h" 33#include "event.h" 34#include "hw_ops.h" 35 36/* 37 * TODO: this is here just for now, it must be removed when the data 38 * operations are in place. 39 */ 40#include "../wl12xx/reg.h" 41 42static int wl1271_set_default_wep_key(struct wl1271 *wl, 43 struct wl12xx_vif *wlvif, u8 id) 44{ 45 int ret; 46 bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS); 47 48 if (is_ap) 49 ret = wl12xx_cmd_set_default_wep_key(wl, id, 50 wlvif->ap.bcast_hlid); 51 else 52 ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid); 53 54 if (ret < 0) 55 return ret; 56 57 wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id); 58 return 0; 59} 60 61static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb) 62{ 63 int id; 64 65 id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc); 66 if (id >= wl->num_tx_desc) 67 return -EBUSY; 68 69 __set_bit(id, wl->tx_frames_map); 70 wl->tx_frames[id] = skb; 71 wl->tx_frames_cnt++; 72 return id; 73} 74 75static void wl1271_free_tx_id(struct wl1271 *wl, int id) 76{ 77 if (__test_and_clear_bit(id, wl->tx_frames_map)) { 78 if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc)) 79 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 80 81 wl->tx_frames[id] = NULL; 82 wl->tx_frames_cnt--; 83 } 84} 85 86static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl, 87 struct sk_buff *skb) 88{ 89 struct ieee80211_hdr *hdr; 90 91 /* 92 * add the station to the known list before transmitting the 93 * authentication response. this way it won't get de-authed by FW 94 * when transmitting too soon. 95 */ 96 hdr = (struct ieee80211_hdr *)(skb->data + 97 sizeof(struct wl1271_tx_hw_descr)); 98 if (ieee80211_is_auth(hdr->frame_control)) 99 wl1271_acx_set_inconnection_sta(wl, hdr->addr1); 100} 101 102static void wl1271_tx_regulate_link(struct wl1271 *wl, 103 struct wl12xx_vif *wlvif, 104 u8 hlid) 105{ 106 bool fw_ps, single_sta; 107 u8 tx_pkts; 108 109 if (WARN_ON(!test_bit(hlid, wlvif->links_map))) 110 return; 111 112 fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); 113 tx_pkts = wl->links[hlid].allocated_pkts; 114 single_sta = (wl->active_sta_count == 1); 115 116 /* 117 * if in FW PS and there is enough data in FW we can put the link 118 * into high-level PS and clean out its TX queues. 119 * Make an exception if this is the only connected station. In this 120 * case FW-memory congestion is not a problem. 121 */ 122 if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) 123 wl12xx_ps_link_start(wl, wlvif, hlid, true); 124} 125 126bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb) 127{ 128 return wl->dummy_packet == skb; 129} 130 131u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif, 132 struct sk_buff *skb) 133{ 134 struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb); 135 136 if (control->control.sta) { 137 struct wl1271_station *wl_sta; 138 139 wl_sta = (struct wl1271_station *) 140 control->control.sta->drv_priv; 141 return wl_sta->hlid; 142 } else { 143 struct ieee80211_hdr *hdr; 144 145 if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) 146 return wl->system_hlid; 147 148 hdr = (struct ieee80211_hdr *)skb->data; 149 if (ieee80211_is_mgmt(hdr->frame_control)) 150 return wlvif->ap.global_hlid; 151 else 152 return wlvif->ap.bcast_hlid; 153 } 154} 155 156u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif, 157 struct sk_buff *skb) 158{ 159 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 160 161 if (!wlvif || wl12xx_is_dummy_packet(wl, skb)) 162 return wl->system_hlid; 163 164 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 165 return wl12xx_tx_get_hlid_ap(wl, wlvif, skb); 166 167 if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) || 168 test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) && 169 !ieee80211_is_auth(hdr->frame_control) && 170 !ieee80211_is_assoc_req(hdr->frame_control)) 171 return wlvif->sta.hlid; 172 else 173 return wlvif->dev_hlid; 174} 175 176unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl, 177 unsigned int packet_length) 178{ 179 if (wl->quirks & WLCORE_QUIRK_NO_BLOCKSIZE_ALIGNMENT) 180 return ALIGN(packet_length, WL1271_TX_ALIGN_TO); 181 else 182 return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE); 183} 184EXPORT_SYMBOL(wlcore_calc_packet_alignment); 185 186static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif, 187 struct sk_buff *skb, u32 extra, u32 buf_offset, 188 u8 hlid) 189{ 190 struct wl1271_tx_hw_descr *desc; 191 u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra; 192 u32 total_blocks; 193 int id, ret = -EBUSY, ac; 194 u32 spare_blocks = wl->normal_tx_spare; 195 bool is_dummy = false; 196 197 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 198 return -EAGAIN; 199 200 /* allocate free identifier for the packet */ 201 id = wl1271_alloc_tx_id(wl, skb); 202 if (id < 0) 203 return id; 204 205 if (unlikely(wl12xx_is_dummy_packet(wl, skb))) 206 is_dummy = true; 207 else if (wlvif->is_gem) 208 spare_blocks = wl->gem_tx_spare; 209 210 total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks); 211 212 if (total_blocks <= wl->tx_blocks_available) { 213 desc = (struct wl1271_tx_hw_descr *)skb_push( 214 skb, total_len - skb->len); 215 216 wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks, 217 spare_blocks); 218 219 desc->id = id; 220 221 wl->tx_blocks_available -= total_blocks; 222 wl->tx_allocated_blocks += total_blocks; 223 224 /* If the FW was empty before, arm the Tx watchdog */ 225 if (wl->tx_allocated_blocks == total_blocks) 226 wl12xx_rearm_tx_watchdog_locked(wl); 227 228 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 229 wl->tx_allocated_pkts[ac]++; 230 231 if (!is_dummy && wlvif && 232 wlvif->bss_type == BSS_TYPE_AP_BSS && 233 test_bit(hlid, wlvif->ap.sta_hlid_map)) 234 wl->links[hlid].allocated_pkts++; 235 236 ret = 0; 237 238 wl1271_debug(DEBUG_TX, 239 "tx_allocate: size: %d, blocks: %d, id: %d", 240 total_len, total_blocks, id); 241 } else { 242 wl1271_free_tx_id(wl, id); 243 } 244 245 return ret; 246} 247 248static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif, 249 struct sk_buff *skb, u32 extra, 250 struct ieee80211_tx_info *control, u8 hlid) 251{ 252 struct timespec ts; 253 struct wl1271_tx_hw_descr *desc; 254 int ac, rate_idx; 255 s64 hosttime; 256 u16 tx_attr = 0; 257 __le16 frame_control; 258 struct ieee80211_hdr *hdr; 259 u8 *frame_start; 260 bool is_dummy; 261 262 desc = (struct wl1271_tx_hw_descr *) skb->data; 263 frame_start = (u8 *)(desc + 1); 264 hdr = (struct ieee80211_hdr *)(frame_start + extra); 265 frame_control = hdr->frame_control; 266 267 /* relocate space for security header */ 268 if (extra) { 269 int hdrlen = ieee80211_hdrlen(frame_control); 270 memmove(frame_start, hdr, hdrlen); 271 } 272 273 /* configure packet life time */ 274 getnstimeofday(&ts); 275 hosttime = (timespec_to_ns(&ts) >> 10); 276 desc->start_time = cpu_to_le32(hosttime - wl->time_offset); 277 278 is_dummy = wl12xx_is_dummy_packet(wl, skb); 279 if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS) 280 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU); 281 else 282 desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU); 283 284 /* queue */ 285 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 286 desc->tid = skb->priority; 287 288 if (is_dummy) { 289 /* 290 * FW expects the dummy packet to have an invalid session id - 291 * any session id that is different than the one set in the join 292 */ 293 tx_attr = (SESSION_COUNTER_INVALID << 294 TX_HW_ATTR_OFST_SESSION_COUNTER) & 295 TX_HW_ATTR_SESSION_COUNTER; 296 297 tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ; 298 } else if (wlvif) { 299 /* configure the tx attributes */ 300 tx_attr = wlvif->session_counter << 301 TX_HW_ATTR_OFST_SESSION_COUNTER; 302 } 303 304 desc->hlid = hlid; 305 if (is_dummy || !wlvif) 306 rate_idx = 0; 307 else if (wlvif->bss_type != BSS_TYPE_AP_BSS) { 308 /* if the packets are destined for AP (have a STA entry) 309 send them with AP rate policies, otherwise use default 310 basic rates */ 311 if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 312 rate_idx = wlvif->sta.p2p_rate_idx; 313 else if (control->control.sta) 314 rate_idx = wlvif->sta.ap_rate_idx; 315 else 316 rate_idx = wlvif->sta.basic_rate_idx; 317 } else { 318 if (hlid == wlvif->ap.global_hlid) 319 rate_idx = wlvif->ap.mgmt_rate_idx; 320 else if (hlid == wlvif->ap.bcast_hlid) 321 rate_idx = wlvif->ap.bcast_rate_idx; 322 else 323 rate_idx = wlvif->ap.ucast_rate_idx[ac]; 324 } 325 326 tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY; 327 328 /* for WEP shared auth - no fw encryption is needed */ 329 if (ieee80211_is_auth(frame_control) && 330 ieee80211_has_protected(frame_control)) 331 tx_attr |= TX_HW_ATTR_HOST_ENCRYPT; 332 333 desc->reserved = 0; 334 desc->tx_attr = cpu_to_le16(tx_attr); 335 336 wlcore_hw_set_tx_desc_data_len(wl, desc, skb); 337} 338 339/* caller must hold wl->mutex */ 340static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif, 341 struct sk_buff *skb, u32 buf_offset) 342{ 343 struct ieee80211_tx_info *info; 344 u32 extra = 0; 345 int ret = 0; 346 u32 total_len; 347 u8 hlid; 348 bool is_dummy; 349 350 if (!skb) 351 return -EINVAL; 352 353 info = IEEE80211_SKB_CB(skb); 354 355 /* TODO: handle dummy packets on multi-vifs */ 356 is_dummy = wl12xx_is_dummy_packet(wl, skb); 357 358 if (info->control.hw_key && 359 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) 360 extra = WL1271_EXTRA_SPACE_TKIP; 361 362 if (info->control.hw_key) { 363 bool is_wep; 364 u8 idx = info->control.hw_key->hw_key_idx; 365 u32 cipher = info->control.hw_key->cipher; 366 367 is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) || 368 (cipher == WLAN_CIPHER_SUITE_WEP104); 369 370 if (unlikely(is_wep && wlvif->default_key != idx)) { 371 ret = wl1271_set_default_wep_key(wl, wlvif, idx); 372 if (ret < 0) 373 return ret; 374 wlvif->default_key = idx; 375 } 376 } 377 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 378 if (hlid == WL12XX_INVALID_LINK_ID) { 379 wl1271_error("invalid hlid. dropping skb 0x%p", skb); 380 return -EINVAL; 381 } 382 383 ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid); 384 if (ret < 0) 385 return ret; 386 387 wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid); 388 389 if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) { 390 wl1271_tx_ap_update_inconnection_sta(wl, skb); 391 wl1271_tx_regulate_link(wl, wlvif, hlid); 392 } 393 394 /* 395 * The length of each packet is stored in terms of 396 * words. Thus, we must pad the skb data to make sure its 397 * length is aligned. The number of padding bytes is computed 398 * and set in wl1271_tx_fill_hdr. 399 * In special cases, we want to align to a specific block size 400 * (eg. for wl128x with SDIO we align to 256). 401 */ 402 total_len = wlcore_calc_packet_alignment(wl, skb->len); 403 404 memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len); 405 memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len); 406 407 /* Revert side effects in the dummy packet skb, so it can be reused */ 408 if (is_dummy) 409 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 410 411 return total_len; 412} 413 414u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set, 415 enum ieee80211_band rate_band) 416{ 417 struct ieee80211_supported_band *band; 418 u32 enabled_rates = 0; 419 int bit; 420 421 band = wl->hw->wiphy->bands[rate_band]; 422 for (bit = 0; bit < band->n_bitrates; bit++) { 423 if (rate_set & 0x1) 424 enabled_rates |= band->bitrates[bit].hw_value; 425 rate_set >>= 1; 426 } 427 428 /* MCS rates indication are on bits 16 - 23 */ 429 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates; 430 431 for (bit = 0; bit < 8; bit++) { 432 if (rate_set & 0x1) 433 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit); 434 rate_set >>= 1; 435 } 436 437 return enabled_rates; 438} 439 440void wl1271_handle_tx_low_watermark(struct wl1271 *wl) 441{ 442 unsigned long flags; 443 int i; 444 445 for (i = 0; i < NUM_TX_QUEUES; i++) { 446 if (test_bit(i, &wl->stopped_queues_map) && 447 wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) { 448 /* firmware buffer has space, restart queues */ 449 spin_lock_irqsave(&wl->wl_lock, flags); 450 ieee80211_wake_queue(wl->hw, 451 wl1271_tx_get_mac80211_queue(i)); 452 clear_bit(i, &wl->stopped_queues_map); 453 spin_unlock_irqrestore(&wl->wl_lock, flags); 454 } 455 } 456} 457 458static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl, 459 struct sk_buff_head *queues) 460{ 461 int i, q = -1, ac; 462 u32 min_pkts = 0xffffffff; 463 464 /* 465 * Find a non-empty ac where: 466 * 1. There are packets to transmit 467 * 2. The FW has the least allocated blocks 468 * 469 * We prioritize the ACs according to VO>VI>BE>BK 470 */ 471 for (i = 0; i < NUM_TX_QUEUES; i++) { 472 ac = wl1271_tx_get_queue(i); 473 if (!skb_queue_empty(&queues[ac]) && 474 (wl->tx_allocated_pkts[ac] < min_pkts)) { 475 q = ac; 476 min_pkts = wl->tx_allocated_pkts[q]; 477 } 478 } 479 480 if (q == -1) 481 return NULL; 482 483 return &queues[q]; 484} 485 486static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl, 487 struct wl1271_link *lnk) 488{ 489 struct sk_buff *skb; 490 unsigned long flags; 491 struct sk_buff_head *queue; 492 493 queue = wl1271_select_queue(wl, lnk->tx_queue); 494 if (!queue) 495 return NULL; 496 497 skb = skb_dequeue(queue); 498 if (skb) { 499 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 500 spin_lock_irqsave(&wl->wl_lock, flags); 501 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 502 wl->tx_queue_count[q]--; 503 spin_unlock_irqrestore(&wl->wl_lock, flags); 504 } 505 506 return skb; 507} 508 509static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl, 510 struct wl12xx_vif *wlvif) 511{ 512 struct sk_buff *skb = NULL; 513 int i, h, start_hlid; 514 515 /* start from the link after the last one */ 516 start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS; 517 518 /* dequeue according to AC, round robin on each link */ 519 for (i = 0; i < WL12XX_MAX_LINKS; i++) { 520 h = (start_hlid + i) % WL12XX_MAX_LINKS; 521 522 /* only consider connected stations */ 523 if (!test_bit(h, wlvif->links_map)) 524 continue; 525 526 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]); 527 if (!skb) 528 continue; 529 530 wlvif->last_tx_hlid = h; 531 break; 532 } 533 534 if (!skb) 535 wlvif->last_tx_hlid = 0; 536 537 return skb; 538} 539 540static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl) 541{ 542 unsigned long flags; 543 struct wl12xx_vif *wlvif = wl->last_wlvif; 544 struct sk_buff *skb = NULL; 545 546 /* continue from last wlvif (round robin) */ 547 if (wlvif) { 548 wl12xx_for_each_wlvif_continue(wl, wlvif) { 549 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 550 if (skb) { 551 wl->last_wlvif = wlvif; 552 break; 553 } 554 } 555 } 556 557 /* dequeue from the system HLID before the restarting wlvif list */ 558 if (!skb) 559 skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]); 560 561 /* do a new pass over the wlvif list */ 562 if (!skb) { 563 wl12xx_for_each_wlvif(wl, wlvif) { 564 skb = wl12xx_vif_skb_dequeue(wl, wlvif); 565 if (skb) { 566 wl->last_wlvif = wlvif; 567 break; 568 } 569 570 /* 571 * No need to continue after last_wlvif. The previous 572 * pass should have found it. 573 */ 574 if (wlvif == wl->last_wlvif) 575 break; 576 } 577 } 578 579 if (!skb && 580 test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) { 581 int q; 582 583 skb = wl->dummy_packet; 584 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 585 spin_lock_irqsave(&wl->wl_lock, flags); 586 WARN_ON_ONCE(wl->tx_queue_count[q] <= 0); 587 wl->tx_queue_count[q]--; 588 spin_unlock_irqrestore(&wl->wl_lock, flags); 589 } 590 591 return skb; 592} 593 594static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif, 595 struct sk_buff *skb) 596{ 597 unsigned long flags; 598 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 599 600 if (wl12xx_is_dummy_packet(wl, skb)) { 601 set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); 602 } else { 603 u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb); 604 skb_queue_head(&wl->links[hlid].tx_queue[q], skb); 605 606 /* make sure we dequeue the same packet next time */ 607 wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) % 608 WL12XX_MAX_LINKS; 609 } 610 611 spin_lock_irqsave(&wl->wl_lock, flags); 612 wl->tx_queue_count[q]++; 613 spin_unlock_irqrestore(&wl->wl_lock, flags); 614} 615 616static bool wl1271_tx_is_data_present(struct sk_buff *skb) 617{ 618 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data); 619 620 return ieee80211_is_data_present(hdr->frame_control); 621} 622 623void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids) 624{ 625 struct wl12xx_vif *wlvif; 626 u32 timeout; 627 u8 hlid; 628 629 if (!wl->conf.rx_streaming.interval) 630 return; 631 632 if (!wl->conf.rx_streaming.always && 633 !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)) 634 return; 635 636 timeout = wl->conf.rx_streaming.duration; 637 wl12xx_for_each_wlvif_sta(wl, wlvif) { 638 bool found = false; 639 for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) { 640 if (test_bit(hlid, wlvif->links_map)) { 641 found = true; 642 break; 643 } 644 } 645 646 if (!found) 647 continue; 648 649 /* enable rx streaming */ 650 if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags)) 651 ieee80211_queue_work(wl->hw, 652 &wlvif->rx_streaming_enable_work); 653 654 mod_timer(&wlvif->rx_streaming_timer, 655 jiffies + msecs_to_jiffies(timeout)); 656 } 657} 658 659void wl1271_tx_work_locked(struct wl1271 *wl) 660{ 661 struct wl12xx_vif *wlvif; 662 struct sk_buff *skb; 663 struct wl1271_tx_hw_descr *desc; 664 u32 buf_offset = 0; 665 bool sent_packets = false; 666 unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0}; 667 int ret; 668 669 if (unlikely(wl->state == WL1271_STATE_OFF)) 670 return; 671 672 while ((skb = wl1271_skb_dequeue(wl))) { 673 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 674 bool has_data = false; 675 676 wlvif = NULL; 677 if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif) 678 wlvif = wl12xx_vif_to_data(info->control.vif); 679 680 has_data = wlvif && wl1271_tx_is_data_present(skb); 681 ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset); 682 if (ret == -EAGAIN) { 683 /* 684 * Aggregation buffer is full. 685 * Flush buffer and try again. 686 */ 687 wl1271_skb_queue_head(wl, wlvif, skb); 688 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 689 buf_offset, true); 690 sent_packets = true; 691 buf_offset = 0; 692 continue; 693 } else if (ret == -EBUSY) { 694 /* 695 * Firmware buffer is full. 696 * Queue back last skb, and stop aggregating. 697 */ 698 wl1271_skb_queue_head(wl, wlvif, skb); 699 /* No work left, avoid scheduling redundant tx work */ 700 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); 701 goto out_ack; 702 } else if (ret < 0) { 703 if (wl12xx_is_dummy_packet(wl, skb)) 704 /* 705 * fw still expects dummy packet, 706 * so re-enqueue it 707 */ 708 wl1271_skb_queue_head(wl, wlvif, skb); 709 else 710 ieee80211_free_txskb(wl->hw, skb); 711 goto out_ack; 712 } 713 buf_offset += ret; 714 wl->tx_packets_count++; 715 if (has_data) { 716 desc = (struct wl1271_tx_hw_descr *) skb->data; 717 __set_bit(desc->hlid, active_hlids); 718 } 719 } 720 721out_ack: 722 if (buf_offset) { 723 wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf, 724 buf_offset, true); 725 sent_packets = true; 726 } 727 if (sent_packets) { 728 /* 729 * Interrupt the firmware with the new packets. This is only 730 * required for older hardware revisions 731 */ 732 if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) 733 wl1271_write32(wl, WL12XX_HOST_WR_ACCESS, 734 wl->tx_packets_count); 735 736 wl1271_handle_tx_low_watermark(wl); 737 } 738 wl12xx_rearm_rx_streaming(wl, active_hlids); 739} 740 741void wl1271_tx_work(struct work_struct *work) 742{ 743 struct wl1271 *wl = container_of(work, struct wl1271, tx_work); 744 int ret; 745 746 mutex_lock(&wl->mutex); 747 ret = wl1271_ps_elp_wakeup(wl); 748 if (ret < 0) 749 goto out; 750 751 wl1271_tx_work_locked(wl); 752 753 wl1271_ps_elp_sleep(wl); 754out: 755 mutex_unlock(&wl->mutex); 756} 757 758static u8 wl1271_tx_get_rate_flags(u8 rate_class_index) 759{ 760 u8 flags = 0; 761 762 if (rate_class_index >= CONF_HW_RXTX_RATE_MCS_MIN && 763 rate_class_index <= CONF_HW_RXTX_RATE_MCS_MAX) 764 flags |= IEEE80211_TX_RC_MCS; 765 if (rate_class_index == CONF_HW_RXTX_RATE_MCS7_SGI) 766 flags |= IEEE80211_TX_RC_SHORT_GI; 767 return flags; 768} 769 770static void wl1271_tx_complete_packet(struct wl1271 *wl, 771 struct wl1271_tx_hw_res_descr *result) 772{ 773 struct ieee80211_tx_info *info; 774 struct ieee80211_vif *vif; 775 struct wl12xx_vif *wlvif; 776 struct sk_buff *skb; 777 int id = result->id; 778 int rate = -1; 779 u8 rate_flags = 0; 780 u8 retries = 0; 781 782 /* check for id legality */ 783 if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) { 784 wl1271_warning("TX result illegal id: %d", id); 785 return; 786 } 787 788 skb = wl->tx_frames[id]; 789 info = IEEE80211_SKB_CB(skb); 790 791 if (wl12xx_is_dummy_packet(wl, skb)) { 792 wl1271_free_tx_id(wl, id); 793 return; 794 } 795 796 /* info->control is valid as long as we don't update info->status */ 797 vif = info->control.vif; 798 wlvif = wl12xx_vif_to_data(vif); 799 800 /* update the TX status info */ 801 if (result->status == TX_SUCCESS) { 802 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 803 info->flags |= IEEE80211_TX_STAT_ACK; 804 rate = wl1271_rate_to_idx(result->rate_class_index, 805 wlvif->band); 806 rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index); 807 retries = result->ack_failures; 808 } else if (result->status == TX_RETRY_EXCEEDED) { 809 wl->stats.excessive_retries++; 810 retries = result->ack_failures; 811 } 812 813 info->status.rates[0].idx = rate; 814 info->status.rates[0].count = retries; 815 info->status.rates[0].flags = rate_flags; 816 info->status.ack_signal = -1; 817 818 wl->stats.retry_count += result->ack_failures; 819 820 /* 821 * update sequence number only when relevant, i.e. only in 822 * sessions of TKIP, AES and GEM (not in open or WEP sessions) 823 */ 824 if (info->control.hw_key && 825 (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP || 826 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP || 827 info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) { 828 u8 fw_lsb = result->tx_security_sequence_number_lsb; 829 u8 cur_lsb = wlvif->tx_security_last_seq_lsb; 830 831 /* 832 * update security sequence number, taking care of potential 833 * wrap-around 834 */ 835 wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff; 836 wlvif->tx_security_last_seq_lsb = fw_lsb; 837 } 838 839 /* remove private header from packet */ 840 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 841 842 /* remove TKIP header space if present */ 843 if (info->control.hw_key && 844 info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { 845 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 846 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, 847 hdrlen); 848 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 849 } 850 851 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 852 " status 0x%x", 853 result->id, skb, result->ack_failures, 854 result->rate_class_index, result->status); 855 856 /* return the packet to the stack */ 857 skb_queue_tail(&wl->deferred_tx_queue, skb); 858 queue_work(wl->freezable_wq, &wl->netstack_work); 859 wl1271_free_tx_id(wl, result->id); 860} 861 862/* Called upon reception of a TX complete interrupt */ 863void wl1271_tx_complete(struct wl1271 *wl) 864{ 865 struct wl1271_acx_mem_map *memmap = 866 (struct wl1271_acx_mem_map *)wl->target_mem_map; 867 u32 count, fw_counter; 868 u32 i; 869 870 /* read the tx results from the chipset */ 871 wl1271_read(wl, le32_to_cpu(memmap->tx_result), 872 wl->tx_res_if, sizeof(*wl->tx_res_if), false); 873 fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter); 874 875 /* write host counter to chipset (to ack) */ 876 wl1271_write32(wl, le32_to_cpu(memmap->tx_result) + 877 offsetof(struct wl1271_tx_hw_res_if, 878 tx_result_host_counter), fw_counter); 879 880 count = fw_counter - wl->tx_results_count; 881 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 882 883 /* verify that the result buffer is not getting overrun */ 884 if (unlikely(count > TX_HW_RESULT_QUEUE_LEN)) 885 wl1271_warning("TX result overflow from chipset: %d", count); 886 887 /* process the results */ 888 for (i = 0; i < count; i++) { 889 struct wl1271_tx_hw_res_descr *result; 890 u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK; 891 892 /* process the packet */ 893 result = &(wl->tx_res_if->tx_results_queue[offset]); 894 wl1271_tx_complete_packet(wl, result); 895 896 wl->tx_results_count++; 897 } 898} 899 900void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid) 901{ 902 struct sk_buff *skb; 903 int i; 904 unsigned long flags; 905 struct ieee80211_tx_info *info; 906 int total[NUM_TX_QUEUES]; 907 908 for (i = 0; i < NUM_TX_QUEUES; i++) { 909 total[i] = 0; 910 while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) { 911 wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb); 912 913 if (!wl12xx_is_dummy_packet(wl, skb)) { 914 info = IEEE80211_SKB_CB(skb); 915 info->status.rates[0].idx = -1; 916 info->status.rates[0].count = 0; 917 ieee80211_tx_status_ni(wl->hw, skb); 918 } 919 920 total[i]++; 921 } 922 } 923 924 spin_lock_irqsave(&wl->wl_lock, flags); 925 for (i = 0; i < NUM_TX_QUEUES; i++) 926 wl->tx_queue_count[i] -= total[i]; 927 spin_unlock_irqrestore(&wl->wl_lock, flags); 928 929 wl1271_handle_tx_low_watermark(wl); 930} 931 932/* caller must hold wl->mutex and TX must be stopped */ 933void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif) 934{ 935 int i; 936 937 /* TX failure */ 938 for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) { 939 if (wlvif->bss_type == BSS_TYPE_AP_BSS) 940 wl1271_free_sta(wl, wlvif, i); 941 else 942 wlvif->sta.ba_rx_bitmap = 0; 943 944 wl->links[i].allocated_pkts = 0; 945 wl->links[i].prev_freed_pkts = 0; 946 } 947 wlvif->last_tx_hlid = 0; 948 949} 950/* caller must hold wl->mutex and TX must be stopped */ 951void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues) 952{ 953 int i; 954 struct sk_buff *skb; 955 struct ieee80211_tx_info *info; 956 957 /* only reset the queues if something bad happened */ 958 if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) { 959 for (i = 0; i < WL12XX_MAX_LINKS; i++) 960 wl1271_tx_reset_link_queues(wl, i); 961 962 for (i = 0; i < NUM_TX_QUEUES; i++) 963 wl->tx_queue_count[i] = 0; 964 } 965 966 wl->stopped_queues_map = 0; 967 968 /* 969 * Make sure the driver is at a consistent state, in case this 970 * function is called from a context other than interface removal. 971 * This call will always wake the TX queues. 972 */ 973 if (reset_tx_queues) 974 wl1271_handle_tx_low_watermark(wl); 975 976 for (i = 0; i < wl->num_tx_desc; i++) { 977 if (wl->tx_frames[i] == NULL) 978 continue; 979 980 skb = wl->tx_frames[i]; 981 wl1271_free_tx_id(wl, i); 982 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 983 984 if (!wl12xx_is_dummy_packet(wl, skb)) { 985 /* 986 * Remove private headers before passing the skb to 987 * mac80211 988 */ 989 info = IEEE80211_SKB_CB(skb); 990 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr)); 991 if (info->control.hw_key && 992 info->control.hw_key->cipher == 993 WLAN_CIPHER_SUITE_TKIP) { 994 int hdrlen = ieee80211_get_hdrlen_from_skb(skb); 995 memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, 996 skb->data, hdrlen); 997 skb_pull(skb, WL1271_EXTRA_SPACE_TKIP); 998 } 999 1000 info->status.rates[0].idx = -1; 1001 info->status.rates[0].count = 0; 1002 1003 ieee80211_tx_status_ni(wl->hw, skb); 1004 } 1005 } 1006} 1007 1008#define WL1271_TX_FLUSH_TIMEOUT 500000 1009 1010/* caller must *NOT* hold wl->mutex */ 1011void wl1271_tx_flush(struct wl1271 *wl) 1012{ 1013 unsigned long timeout; 1014 int i; 1015 timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT); 1016 1017 while (!time_after(jiffies, timeout)) { 1018 mutex_lock(&wl->mutex); 1019 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d", 1020 wl->tx_frames_cnt, 1021 wl1271_tx_total_queue_count(wl)); 1022 if ((wl->tx_frames_cnt == 0) && 1023 (wl1271_tx_total_queue_count(wl) == 0)) { 1024 mutex_unlock(&wl->mutex); 1025 return; 1026 } 1027 mutex_unlock(&wl->mutex); 1028 msleep(1); 1029 } 1030 1031 wl1271_warning("Unable to flush all TX buffers, timed out."); 1032 1033 /* forcibly flush all Tx buffers on our queues */ 1034 mutex_lock(&wl->mutex); 1035 for (i = 0; i < WL12XX_MAX_LINKS; i++) 1036 wl1271_tx_reset_link_queues(wl, i); 1037 mutex_unlock(&wl->mutex); 1038} 1039 1040u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set) 1041{ 1042 if (WARN_ON(!rate_set)) 1043 return 0; 1044 1045 return BIT(__ffs(rate_set)); 1046} 1047