1/* 2 * Copyright (c) 2007-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2012 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18#include "core.h" 19#include "hif.h" 20#include "debug.h" 21#include "hif-ops.h" 22#include <asm/unaligned.h> 23 24#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask)) 25 26/* threshold to re-enable Tx bundling for an AC*/ 27#define TX_RESUME_BUNDLE_THRESHOLD 1500 28 29/* Functions for Tx credit handling */ 30static void ath6kl_credit_deposit(struct ath6kl_htc_credit_info *cred_info, 31 struct htc_endpoint_credit_dist *ep_dist, 32 int credits) 33{ 34 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit deposit ep %d credits %d\n", 35 ep_dist->endpoint, credits); 36 37 ep_dist->credits += credits; 38 ep_dist->cred_assngd += credits; 39 cred_info->cur_free_credits -= credits; 40} 41 42static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info, 43 struct list_head *ep_list, 44 int tot_credits) 45{ 46 struct htc_endpoint_credit_dist *cur_ep_dist; 47 int count; 48 49 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit init total %d\n", tot_credits); 50 51 cred_info->cur_free_credits = tot_credits; 52 cred_info->total_avail_credits = tot_credits; 53 54 list_for_each_entry(cur_ep_dist, ep_list, list) { 55 if (cur_ep_dist->endpoint == ENDPOINT_0) 56 continue; 57 58 cur_ep_dist->cred_min = cur_ep_dist->cred_per_msg; 59 60 if (tot_credits > 4) { 61 if ((cur_ep_dist->svc_id == WMI_DATA_BK_SVC) || 62 (cur_ep_dist->svc_id == WMI_DATA_BE_SVC)) { 63 ath6kl_credit_deposit(cred_info, 64 cur_ep_dist, 65 cur_ep_dist->cred_min); 66 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; 67 } 68 } 69 70 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) { 71 ath6kl_credit_deposit(cred_info, cur_ep_dist, 72 cur_ep_dist->cred_min); 73 /* 74 * Control service is always marked active, it 75 * never goes inactive EVER. 76 */ 77 cur_ep_dist->dist_flags |= HTC_EP_ACTIVE; 78 } else if (cur_ep_dist->svc_id == WMI_DATA_BK_SVC) 79 /* this is the lowest priority data endpoint */ 80 /* FIXME: this looks fishy, check */ 81 cred_info->lowestpri_ep_dist = cur_ep_dist->list; 82 83 /* 84 * Streams have to be created (explicit | implicit) for all 85 * kinds of traffic. BE endpoints are also inactive in the 86 * beginning. When BE traffic starts it creates implicit 87 * streams that redistributes credits. 88 * 89 * Note: all other endpoints have minimums set but are 90 * initially given NO credits. credits will be distributed 91 * as traffic activity demands 92 */ 93 } 94 95 WARN_ON(cred_info->cur_free_credits <= 0); 96 97 list_for_each_entry(cur_ep_dist, ep_list, list) { 98 if (cur_ep_dist->endpoint == ENDPOINT_0) 99 continue; 100 101 if (cur_ep_dist->svc_id == WMI_CONTROL_SVC) 102 cur_ep_dist->cred_norm = cur_ep_dist->cred_per_msg; 103 else { 104 /* 105 * For the remaining data endpoints, we assume that 106 * each cred_per_msg are the same. We use a simple 107 * calculation here, we take the remaining credits 108 * and determine how many max messages this can 109 * cover and then set each endpoint's normal value 110 * equal to 3/4 this amount. 111 */ 112 count = (cred_info->cur_free_credits / 113 cur_ep_dist->cred_per_msg) 114 * cur_ep_dist->cred_per_msg; 115 count = (count * 3) >> 2; 116 count = max(count, cur_ep_dist->cred_per_msg); 117 cur_ep_dist->cred_norm = count; 118 119 } 120 121 ath6kl_dbg(ATH6KL_DBG_CREDIT, 122 "credit ep %d svc_id %d credits %d per_msg %d norm %d min %d\n", 123 cur_ep_dist->endpoint, 124 cur_ep_dist->svc_id, 125 cur_ep_dist->credits, 126 cur_ep_dist->cred_per_msg, 127 cur_ep_dist->cred_norm, 128 cur_ep_dist->cred_min); 129 } 130} 131 132/* initialize and setup credit distribution */ 133int ath6kl_credit_setup(void *htc_handle, 134 struct ath6kl_htc_credit_info *cred_info) 135{ 136 u16 servicepriority[5]; 137 138 memset(cred_info, 0, sizeof(struct ath6kl_htc_credit_info)); 139 140 servicepriority[0] = WMI_CONTROL_SVC; /* highest */ 141 servicepriority[1] = WMI_DATA_VO_SVC; 142 servicepriority[2] = WMI_DATA_VI_SVC; 143 servicepriority[3] = WMI_DATA_BE_SVC; 144 servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */ 145 146 /* set priority list */ 147 ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5); 148 149 return 0; 150} 151 152/* reduce an ep's credits back to a set limit */ 153static void ath6kl_credit_reduce(struct ath6kl_htc_credit_info *cred_info, 154 struct htc_endpoint_credit_dist *ep_dist, 155 int limit) 156{ 157 int credits; 158 159 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit reduce ep %d limit %d\n", 160 ep_dist->endpoint, limit); 161 162 ep_dist->cred_assngd = limit; 163 164 if (ep_dist->credits <= limit) 165 return; 166 167 credits = ep_dist->credits - limit; 168 ep_dist->credits -= credits; 169 cred_info->cur_free_credits += credits; 170} 171 172static void ath6kl_credit_update(struct ath6kl_htc_credit_info *cred_info, 173 struct list_head *epdist_list) 174{ 175 struct htc_endpoint_credit_dist *cur_list; 176 177 list_for_each_entry(cur_list, epdist_list, list) { 178 if (cur_list->endpoint == ENDPOINT_0) 179 continue; 180 181 if (cur_list->cred_to_dist > 0) { 182 cur_list->credits += cur_list->cred_to_dist; 183 cur_list->cred_to_dist = 0; 184 185 if (cur_list->credits > cur_list->cred_assngd) 186 ath6kl_credit_reduce(cred_info, 187 cur_list, 188 cur_list->cred_assngd); 189 190 if (cur_list->credits > cur_list->cred_norm) 191 ath6kl_credit_reduce(cred_info, cur_list, 192 cur_list->cred_norm); 193 194 if (!(cur_list->dist_flags & HTC_EP_ACTIVE)) { 195 if (cur_list->txq_depth == 0) 196 ath6kl_credit_reduce(cred_info, 197 cur_list, 0); 198 } 199 } 200 } 201} 202 203/* 204 * HTC has an endpoint that needs credits, ep_dist is the endpoint in 205 * question. 206 */ 207static void ath6kl_credit_seek(struct ath6kl_htc_credit_info *cred_info, 208 struct htc_endpoint_credit_dist *ep_dist) 209{ 210 struct htc_endpoint_credit_dist *curdist_list; 211 int credits = 0; 212 int need; 213 214 if (ep_dist->svc_id == WMI_CONTROL_SVC) 215 goto out; 216 217 if ((ep_dist->svc_id == WMI_DATA_VI_SVC) || 218 (ep_dist->svc_id == WMI_DATA_VO_SVC)) 219 if ((ep_dist->cred_assngd >= ep_dist->cred_norm)) 220 goto out; 221 222 /* 223 * For all other services, we follow a simple algorithm of: 224 * 225 * 1. checking the free pool for credits 226 * 2. checking lower priority endpoints for credits to take 227 */ 228 229 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); 230 231 if (credits >= ep_dist->seek_cred) 232 goto out; 233 234 /* 235 * We don't have enough in the free pool, try taking away from 236 * lower priority services The rule for taking away credits: 237 * 238 * 1. Only take from lower priority endpoints 239 * 2. Only take what is allocated above the minimum (never 240 * starve an endpoint completely) 241 * 3. Only take what you need. 242 */ 243 244 list_for_each_entry_reverse(curdist_list, 245 &cred_info->lowestpri_ep_dist, 246 list) { 247 if (curdist_list == ep_dist) 248 break; 249 250 need = ep_dist->seek_cred - cred_info->cur_free_credits; 251 252 if ((curdist_list->cred_assngd - need) >= 253 curdist_list->cred_min) { 254 /* 255 * The current one has been allocated more than 256 * it's minimum and it has enough credits assigned 257 * above it's minimum to fulfill our need try to 258 * take away just enough to fulfill our need. 259 */ 260 ath6kl_credit_reduce(cred_info, curdist_list, 261 curdist_list->cred_assngd - need); 262 263 if (cred_info->cur_free_credits >= 264 ep_dist->seek_cred) 265 break; 266 } 267 268 if (curdist_list->endpoint == ENDPOINT_0) 269 break; 270 } 271 272 credits = min(cred_info->cur_free_credits, ep_dist->seek_cred); 273 274out: 275 /* did we find some credits? */ 276 if (credits) 277 ath6kl_credit_deposit(cred_info, ep_dist, credits); 278 279 ep_dist->seek_cred = 0; 280} 281 282/* redistribute credits based on activity change */ 283static void ath6kl_credit_redistribute(struct ath6kl_htc_credit_info *info, 284 struct list_head *ep_dist_list) 285{ 286 struct htc_endpoint_credit_dist *curdist_list; 287 288 list_for_each_entry(curdist_list, ep_dist_list, list) { 289 if (curdist_list->endpoint == ENDPOINT_0) 290 continue; 291 292 if ((curdist_list->svc_id == WMI_DATA_BK_SVC) || 293 (curdist_list->svc_id == WMI_DATA_BE_SVC)) 294 curdist_list->dist_flags |= HTC_EP_ACTIVE; 295 296 if ((curdist_list->svc_id != WMI_CONTROL_SVC) && 297 !(curdist_list->dist_flags & HTC_EP_ACTIVE)) { 298 if (curdist_list->txq_depth == 0) 299 ath6kl_credit_reduce(info, curdist_list, 0); 300 else 301 ath6kl_credit_reduce(info, 302 curdist_list, 303 curdist_list->cred_min); 304 } 305 } 306} 307 308/* 309 * 310 * This function is invoked whenever endpoints require credit 311 * distributions. A lock is held while this function is invoked, this 312 * function shall NOT block. The ep_dist_list is a list of distribution 313 * structures in prioritized order as defined by the call to the 314 * htc_set_credit_dist() api. 315 */ 316static void ath6kl_credit_distribute(struct ath6kl_htc_credit_info *cred_info, 317 struct list_head *ep_dist_list, 318 enum htc_credit_dist_reason reason) 319{ 320 switch (reason) { 321 case HTC_CREDIT_DIST_SEND_COMPLETE: 322 ath6kl_credit_update(cred_info, ep_dist_list); 323 break; 324 case HTC_CREDIT_DIST_ACTIVITY_CHANGE: 325 ath6kl_credit_redistribute(cred_info, ep_dist_list); 326 break; 327 default: 328 break; 329 } 330 331 WARN_ON(cred_info->cur_free_credits > cred_info->total_avail_credits); 332 WARN_ON(cred_info->cur_free_credits < 0); 333} 334 335static void ath6kl_htc_tx_buf_align(u8 **buf, unsigned long len) 336{ 337 u8 *align_addr; 338 339 if (!IS_ALIGNED((unsigned long) *buf, 4)) { 340 align_addr = PTR_ALIGN(*buf - 4, 4); 341 memmove(align_addr, *buf, len); 342 *buf = align_addr; 343 } 344} 345 346static void ath6kl_htc_tx_prep_pkt(struct htc_packet *packet, u8 flags, 347 int ctrl0, int ctrl1) 348{ 349 struct htc_frame_hdr *hdr; 350 351 packet->buf -= HTC_HDR_LENGTH; 352 hdr = (struct htc_frame_hdr *)packet->buf; 353 354 /* Endianess? */ 355 put_unaligned((u16)packet->act_len, &hdr->payld_len); 356 hdr->flags = flags; 357 hdr->eid = packet->endpoint; 358 hdr->ctrl[0] = ctrl0; 359 hdr->ctrl[1] = ctrl1; 360} 361 362static void htc_reclaim_txctrl_buf(struct htc_target *target, 363 struct htc_packet *pkt) 364{ 365 spin_lock_bh(&target->htc_lock); 366 list_add_tail(&pkt->list, &target->free_ctrl_txbuf); 367 spin_unlock_bh(&target->htc_lock); 368} 369 370static struct htc_packet *htc_get_control_buf(struct htc_target *target, 371 bool tx) 372{ 373 struct htc_packet *packet = NULL; 374 struct list_head *buf_list; 375 376 buf_list = tx ? &target->free_ctrl_txbuf : &target->free_ctrl_rxbuf; 377 378 spin_lock_bh(&target->htc_lock); 379 380 if (list_empty(buf_list)) { 381 spin_unlock_bh(&target->htc_lock); 382 return NULL; 383 } 384 385 packet = list_first_entry(buf_list, struct htc_packet, list); 386 list_del(&packet->list); 387 spin_unlock_bh(&target->htc_lock); 388 389 if (tx) 390 packet->buf = packet->buf_start + HTC_HDR_LENGTH; 391 392 return packet; 393} 394 395static void htc_tx_comp_update(struct htc_target *target, 396 struct htc_endpoint *endpoint, 397 struct htc_packet *packet) 398{ 399 packet->completion = NULL; 400 packet->buf += HTC_HDR_LENGTH; 401 402 if (!packet->status) 403 return; 404 405 ath6kl_err("req failed (status:%d, ep:%d, len:%d creds:%d)\n", 406 packet->status, packet->endpoint, packet->act_len, 407 packet->info.tx.cred_used); 408 409 /* on failure to submit, reclaim credits for this packet */ 410 spin_lock_bh(&target->tx_lock); 411 endpoint->cred_dist.cred_to_dist += 412 packet->info.tx.cred_used; 413 endpoint->cred_dist.txq_depth = get_queue_depth(&endpoint->txq); 414 415 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx ctxt 0x%p dist 0x%p\n", 416 target->credit_info, &target->cred_dist_list); 417 418 ath6kl_credit_distribute(target->credit_info, 419 &target->cred_dist_list, 420 HTC_CREDIT_DIST_SEND_COMPLETE); 421 422 spin_unlock_bh(&target->tx_lock); 423} 424 425static void htc_tx_complete(struct htc_endpoint *endpoint, 426 struct list_head *txq) 427{ 428 if (list_empty(txq)) 429 return; 430 431 ath6kl_dbg(ATH6KL_DBG_HTC, 432 "htc tx complete ep %d pkts %d\n", 433 endpoint->eid, get_queue_depth(txq)); 434 435 ath6kl_tx_complete(endpoint->target->dev->ar, txq); 436} 437 438static void htc_tx_comp_handler(struct htc_target *target, 439 struct htc_packet *packet) 440{ 441 struct htc_endpoint *endpoint = &target->endpoint[packet->endpoint]; 442 struct list_head container; 443 444 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx complete seqno %d\n", 445 packet->info.tx.seqno); 446 447 htc_tx_comp_update(target, endpoint, packet); 448 INIT_LIST_HEAD(&container); 449 list_add_tail(&packet->list, &container); 450 /* do completion */ 451 htc_tx_complete(endpoint, &container); 452} 453 454static void htc_async_tx_scat_complete(struct htc_target *target, 455 struct hif_scatter_req *scat_req) 456{ 457 struct htc_endpoint *endpoint; 458 struct htc_packet *packet; 459 struct list_head tx_compq; 460 int i; 461 462 INIT_LIST_HEAD(&tx_compq); 463 464 ath6kl_dbg(ATH6KL_DBG_HTC, 465 "htc tx scat complete len %d entries %d\n", 466 scat_req->len, scat_req->scat_entries); 467 468 if (scat_req->status) 469 ath6kl_err("send scatter req failed: %d\n", scat_req->status); 470 471 packet = scat_req->scat_list[0].packet; 472 endpoint = &target->endpoint[packet->endpoint]; 473 474 /* walk through the scatter list and process */ 475 for (i = 0; i < scat_req->scat_entries; i++) { 476 packet = scat_req->scat_list[i].packet; 477 if (!packet) { 478 WARN_ON(1); 479 return; 480 } 481 482 packet->status = scat_req->status; 483 htc_tx_comp_update(target, endpoint, packet); 484 list_add_tail(&packet->list, &tx_compq); 485 } 486 487 /* free scatter request */ 488 hif_scatter_req_add(target->dev->ar, scat_req); 489 490 /* complete all packets */ 491 htc_tx_complete(endpoint, &tx_compq); 492} 493 494static int ath6kl_htc_tx_issue(struct htc_target *target, 495 struct htc_packet *packet) 496{ 497 int status; 498 bool sync = false; 499 u32 padded_len, send_len; 500 501 if (!packet->completion) 502 sync = true; 503 504 send_len = packet->act_len + HTC_HDR_LENGTH; 505 506 padded_len = CALC_TXRX_PADDED_LEN(target, send_len); 507 508 ath6kl_dbg(ATH6KL_DBG_HTC, 509 "htc tx issue len %d seqno %d padded_len %d mbox 0x%X %s\n", 510 send_len, packet->info.tx.seqno, padded_len, 511 target->dev->ar->mbox_info.htc_addr, 512 sync ? "sync" : "async"); 513 514 if (sync) { 515 status = hif_read_write_sync(target->dev->ar, 516 target->dev->ar->mbox_info.htc_addr, 517 packet->buf, padded_len, 518 HIF_WR_SYNC_BLOCK_INC); 519 520 packet->status = status; 521 packet->buf += HTC_HDR_LENGTH; 522 } else 523 status = hif_write_async(target->dev->ar, 524 target->dev->ar->mbox_info.htc_addr, 525 packet->buf, padded_len, 526 HIF_WR_ASYNC_BLOCK_INC, packet); 527 528 return status; 529} 530 531static int htc_check_credits(struct htc_target *target, 532 struct htc_endpoint *ep, u8 *flags, 533 enum htc_endpoint_id eid, unsigned int len, 534 int *req_cred) 535{ 536 537 *req_cred = (len > target->tgt_cred_sz) ? 538 DIV_ROUND_UP(len, target->tgt_cred_sz) : 1; 539 540 ath6kl_dbg(ATH6KL_DBG_CREDIT, "credit check need %d got %d\n", 541 *req_cred, ep->cred_dist.credits); 542 543 if (ep->cred_dist.credits < *req_cred) { 544 if (eid == ENDPOINT_0) 545 return -EINVAL; 546 547 /* Seek more credits */ 548 ep->cred_dist.seek_cred = *req_cred - ep->cred_dist.credits; 549 550 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); 551 552 ep->cred_dist.seek_cred = 0; 553 554 if (ep->cred_dist.credits < *req_cred) { 555 ath6kl_dbg(ATH6KL_DBG_CREDIT, 556 "credit not found for ep %d\n", 557 eid); 558 return -EINVAL; 559 } 560 } 561 562 ep->cred_dist.credits -= *req_cred; 563 ep->ep_st.cred_cosumd += *req_cred; 564 565 /* When we are getting low on credits, ask for more */ 566 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { 567 ep->cred_dist.seek_cred = 568 ep->cred_dist.cred_per_msg - ep->cred_dist.credits; 569 570 ath6kl_credit_seek(target->credit_info, &ep->cred_dist); 571 572 /* see if we were successful in getting more */ 573 if (ep->cred_dist.credits < ep->cred_dist.cred_per_msg) { 574 /* tell the target we need credits ASAP! */ 575 *flags |= HTC_FLAGS_NEED_CREDIT_UPDATE; 576 ep->ep_st.cred_low_indicate += 1; 577 ath6kl_dbg(ATH6KL_DBG_CREDIT, 578 "credit we need credits asap\n"); 579 } 580 } 581 582 return 0; 583} 584 585static void ath6kl_htc_tx_pkts_get(struct htc_target *target, 586 struct htc_endpoint *endpoint, 587 struct list_head *queue) 588{ 589 int req_cred; 590 u8 flags; 591 struct htc_packet *packet; 592 unsigned int len; 593 594 while (true) { 595 596 flags = 0; 597 598 if (list_empty(&endpoint->txq)) 599 break; 600 packet = list_first_entry(&endpoint->txq, struct htc_packet, 601 list); 602 603 ath6kl_dbg(ATH6KL_DBG_HTC, 604 "htc tx got packet 0x%p queue depth %d\n", 605 packet, get_queue_depth(&endpoint->txq)); 606 607 len = CALC_TXRX_PADDED_LEN(target, 608 packet->act_len + HTC_HDR_LENGTH); 609 610 if (htc_check_credits(target, endpoint, &flags, 611 packet->endpoint, len, &req_cred)) 612 break; 613 614 /* now we can fully move onto caller's queue */ 615 packet = list_first_entry(&endpoint->txq, struct htc_packet, 616 list); 617 list_move_tail(&packet->list, queue); 618 619 /* save the number of credits this packet consumed */ 620 packet->info.tx.cred_used = req_cred; 621 622 /* all TX packets are handled asynchronously */ 623 packet->completion = htc_tx_comp_handler; 624 packet->context = target; 625 endpoint->ep_st.tx_issued += 1; 626 627 /* save send flags */ 628 packet->info.tx.flags = flags; 629 packet->info.tx.seqno = endpoint->seqno; 630 endpoint->seqno++; 631 } 632} 633 634/* See if the padded tx length falls on a credit boundary */ 635static int htc_get_credit_padding(unsigned int cred_sz, int *len, 636 struct htc_endpoint *ep) 637{ 638 int rem_cred, cred_pad; 639 640 rem_cred = *len % cred_sz; 641 642 /* No padding needed */ 643 if (!rem_cred) 644 return 0; 645 646 if (!(ep->conn_flags & HTC_FLGS_TX_BNDL_PAD_EN)) 647 return -1; 648 649 /* 650 * The transfer consumes a "partial" credit, this 651 * packet cannot be bundled unless we add 652 * additional "dummy" padding (max 255 bytes) to 653 * consume the entire credit. 654 */ 655 cred_pad = *len < cred_sz ? (cred_sz - *len) : rem_cred; 656 657 if ((cred_pad > 0) && (cred_pad <= 255)) 658 *len += cred_pad; 659 else 660 /* The amount of padding is too large, send as non-bundled */ 661 return -1; 662 663 return cred_pad; 664} 665 666static int ath6kl_htc_tx_setup_scat_list(struct htc_target *target, 667 struct htc_endpoint *endpoint, 668 struct hif_scatter_req *scat_req, 669 int n_scat, 670 struct list_head *queue) 671{ 672 struct htc_packet *packet; 673 int i, len, rem_scat, cred_pad; 674 int status = 0; 675 u8 flags; 676 677 rem_scat = target->max_tx_bndl_sz; 678 679 for (i = 0; i < n_scat; i++) { 680 scat_req->scat_list[i].packet = NULL; 681 682 if (list_empty(queue)) 683 break; 684 685 packet = list_first_entry(queue, struct htc_packet, list); 686 len = CALC_TXRX_PADDED_LEN(target, 687 packet->act_len + HTC_HDR_LENGTH); 688 689 cred_pad = htc_get_credit_padding(target->tgt_cred_sz, 690 &len, endpoint); 691 if (cred_pad < 0 || rem_scat < len) { 692 status = -ENOSPC; 693 break; 694 } 695 696 rem_scat -= len; 697 /* now remove it from the queue */ 698 list_del(&packet->list); 699 700 scat_req->scat_list[i].packet = packet; 701 /* prepare packet and flag message as part of a send bundle */ 702 flags = packet->info.tx.flags | HTC_FLAGS_SEND_BUNDLE; 703 ath6kl_htc_tx_prep_pkt(packet, flags, 704 cred_pad, packet->info.tx.seqno); 705 /* Make sure the buffer is 4-byte aligned */ 706 ath6kl_htc_tx_buf_align(&packet->buf, 707 packet->act_len + HTC_HDR_LENGTH); 708 scat_req->scat_list[i].buf = packet->buf; 709 scat_req->scat_list[i].len = len; 710 711 scat_req->len += len; 712 scat_req->scat_entries++; 713 ath6kl_dbg(ATH6KL_DBG_HTC, 714 "htc tx adding (%d) pkt 0x%p seqno %d len %d remaining %d\n", 715 i, packet, packet->info.tx.seqno, len, rem_scat); 716 } 717 718 /* Roll back scatter setup in case of any failure */ 719 if (scat_req->scat_entries < HTC_MIN_HTC_MSGS_TO_BUNDLE) { 720 for (i = scat_req->scat_entries - 1; i >= 0; i--) { 721 packet = scat_req->scat_list[i].packet; 722 if (packet) { 723 packet->buf += HTC_HDR_LENGTH; 724 list_add(&packet->list, queue); 725 } 726 } 727 return -EAGAIN; 728 } 729 730 return status; 731} 732 733/* 734 * Drain a queue and send as bundles this function may return without fully 735 * draining the queue when 736 * 737 * 1. scatter resources are exhausted 738 * 2. a message that will consume a partial credit will stop the 739 * bundling process early 740 * 3. we drop below the minimum number of messages for a bundle 741 */ 742static void ath6kl_htc_tx_bundle(struct htc_endpoint *endpoint, 743 struct list_head *queue, 744 int *sent_bundle, int *n_bundle_pkts) 745{ 746 struct htc_target *target = endpoint->target; 747 struct hif_scatter_req *scat_req = NULL; 748 int n_scat, n_sent_bundle = 0, tot_pkts_bundle = 0; 749 int status; 750 u32 txb_mask; 751 u8 ac = WMM_NUM_AC; 752 753 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) || 754 (WMI_CONTROL_SVC != endpoint->svc_id)) 755 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 756 757 while (true) { 758 status = 0; 759 n_scat = get_queue_depth(queue); 760 n_scat = min(n_scat, target->msg_per_bndl_max); 761 762 if (n_scat < HTC_MIN_HTC_MSGS_TO_BUNDLE) 763 /* not enough to bundle */ 764 break; 765 766 scat_req = hif_scatter_req_get(target->dev->ar); 767 768 if (!scat_req) { 769 /* no scatter resources */ 770 ath6kl_dbg(ATH6KL_DBG_HTC, 771 "htc tx no more scatter resources\n"); 772 break; 773 } 774 775 if ((ac < WMM_NUM_AC) && (ac != WMM_AC_BK)) { 776 if (WMM_AC_BE == ac) 777 /* 778 * BE, BK have priorities and bit 779 * positions reversed 780 */ 781 txb_mask = (1 << WMM_AC_BK); 782 else 783 /* 784 * any AC with priority lower than 785 * itself 786 */ 787 txb_mask = ((1 << ac) - 1); 788 /* 789 * when the scatter request resources drop below a 790 * certain threshold, disable Tx bundling for all 791 * AC's with priority lower than the current requesting 792 * AC. Otherwise re-enable Tx bundling for them 793 */ 794 if (scat_req->scat_q_depth < ATH6KL_SCATTER_REQS) 795 target->tx_bndl_mask &= ~txb_mask; 796 else 797 target->tx_bndl_mask |= txb_mask; 798 } 799 800 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx pkts to scatter: %d\n", 801 n_scat); 802 803 scat_req->len = 0; 804 scat_req->scat_entries = 0; 805 806 status = ath6kl_htc_tx_setup_scat_list(target, endpoint, 807 scat_req, n_scat, 808 queue); 809 if (status == -EAGAIN) { 810 hif_scatter_req_add(target->dev->ar, scat_req); 811 break; 812 } 813 814 /* send path is always asynchronous */ 815 scat_req->complete = htc_async_tx_scat_complete; 816 n_sent_bundle++; 817 tot_pkts_bundle += scat_req->scat_entries; 818 819 ath6kl_dbg(ATH6KL_DBG_HTC, 820 "htc tx scatter bytes %d entries %d\n", 821 scat_req->len, scat_req->scat_entries); 822 ath6kl_hif_submit_scat_req(target->dev, scat_req, false); 823 824 if (status) 825 break; 826 } 827 828 *sent_bundle = n_sent_bundle; 829 *n_bundle_pkts = tot_pkts_bundle; 830 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx bundle sent %d pkts\n", 831 n_sent_bundle); 832 833 return; 834} 835 836static void ath6kl_htc_tx_from_queue(struct htc_target *target, 837 struct htc_endpoint *endpoint) 838{ 839 struct list_head txq; 840 struct htc_packet *packet; 841 int bundle_sent; 842 int n_pkts_bundle; 843 u8 ac = WMM_NUM_AC; 844 845 spin_lock_bh(&target->tx_lock); 846 847 endpoint->tx_proc_cnt++; 848 if (endpoint->tx_proc_cnt > 1) { 849 endpoint->tx_proc_cnt--; 850 spin_unlock_bh(&target->tx_lock); 851 ath6kl_dbg(ATH6KL_DBG_HTC, "htc tx busy\n"); 852 return; 853 } 854 855 /* 856 * drain the endpoint TX queue for transmission as long 857 * as we have enough credits. 858 */ 859 INIT_LIST_HEAD(&txq); 860 861 if ((HTC_CTRL_RSVD_SVC != endpoint->svc_id) || 862 (WMI_CONTROL_SVC != endpoint->svc_id)) 863 ac = target->dev->ar->ep2ac_map[endpoint->eid]; 864 865 while (true) { 866 867 if (list_empty(&endpoint->txq)) 868 break; 869 870 ath6kl_htc_tx_pkts_get(target, endpoint, &txq); 871 872 if (list_empty(&txq)) 873 break; 874 875 spin_unlock_bh(&target->tx_lock); 876 877 bundle_sent = 0; 878 n_pkts_bundle = 0; 879 880 while (true) { 881 /* try to send a bundle on each pass */ 882 if ((target->tx_bndl_mask) && 883 (get_queue_depth(&txq) >= 884 HTC_MIN_HTC_MSGS_TO_BUNDLE)) { 885 int temp1 = 0, temp2 = 0; 886 887 /* check if bundling is enabled for an AC */ 888 if (target->tx_bndl_mask & (1 << ac)) { 889 ath6kl_htc_tx_bundle(endpoint, &txq, 890 &temp1, &temp2); 891 bundle_sent += temp1; 892 n_pkts_bundle += temp2; 893 } 894 } 895 896 if (list_empty(&txq)) 897 break; 898 899 packet = list_first_entry(&txq, struct htc_packet, 900 list); 901 list_del(&packet->list); 902 903 ath6kl_htc_tx_prep_pkt(packet, packet->info.tx.flags, 904 0, packet->info.tx.seqno); 905 ath6kl_htc_tx_issue(target, packet); 906 } 907 908 spin_lock_bh(&target->tx_lock); 909 910 endpoint->ep_st.tx_bundles += bundle_sent; 911 endpoint->ep_st.tx_pkt_bundled += n_pkts_bundle; 912 913 /* 914 * if an AC has bundling disabled and no tx bundling 915 * has occured continously for a certain number of TX, 916 * enable tx bundling for this AC 917 */ 918 if (!bundle_sent) { 919 if (!(target->tx_bndl_mask & (1 << ac)) && 920 (ac < WMM_NUM_AC)) { 921 if (++target->ac_tx_count[ac] >= 922 TX_RESUME_BUNDLE_THRESHOLD) { 923 target->ac_tx_count[ac] = 0; 924 target->tx_bndl_mask |= (1 << ac); 925 } 926 } 927 } else { 928 /* tx bundling will reset the counter */ 929 if (ac < WMM_NUM_AC) 930 target->ac_tx_count[ac] = 0; 931 } 932 } 933 934 endpoint->tx_proc_cnt = 0; 935 spin_unlock_bh(&target->tx_lock); 936} 937 938static bool ath6kl_htc_tx_try(struct htc_target *target, 939 struct htc_endpoint *endpoint, 940 struct htc_packet *tx_pkt) 941{ 942 struct htc_ep_callbacks ep_cb; 943 int txq_depth; 944 bool overflow = false; 945 946 ep_cb = endpoint->ep_cb; 947 948 spin_lock_bh(&target->tx_lock); 949 txq_depth = get_queue_depth(&endpoint->txq); 950 spin_unlock_bh(&target->tx_lock); 951 952 if (txq_depth >= endpoint->max_txq_depth) 953 overflow = true; 954 955 if (overflow) 956 ath6kl_dbg(ATH6KL_DBG_HTC, 957 "htc tx overflow ep %d depth %d max %d\n", 958 endpoint->eid, txq_depth, 959 endpoint->max_txq_depth); 960 961 if (overflow && ep_cb.tx_full) { 962 if (ep_cb.tx_full(endpoint->target, tx_pkt) == 963 HTC_SEND_FULL_DROP) { 964 endpoint->ep_st.tx_dropped += 1; 965 return false; 966 } 967 } 968 969 spin_lock_bh(&target->tx_lock); 970 list_add_tail(&tx_pkt->list, &endpoint->txq); 971 spin_unlock_bh(&target->tx_lock); 972 973 ath6kl_htc_tx_from_queue(target, endpoint); 974 975 return true; 976} 977 978static void htc_chk_ep_txq(struct htc_target *target) 979{ 980 struct htc_endpoint *endpoint; 981 struct htc_endpoint_credit_dist *cred_dist; 982 983 /* 984 * Run through the credit distribution list to see if there are 985 * packets queued. NOTE: no locks need to be taken since the 986 * distribution list is not dynamic (cannot be re-ordered) and we 987 * are not modifying any state. 988 */ 989 list_for_each_entry(cred_dist, &target->cred_dist_list, list) { 990 endpoint = cred_dist->htc_ep; 991 992 spin_lock_bh(&target->tx_lock); 993 if (!list_empty(&endpoint->txq)) { 994 ath6kl_dbg(ATH6KL_DBG_HTC, 995 "htc creds ep %d credits %d pkts %d\n", 996 cred_dist->endpoint, 997 endpoint->cred_dist.credits, 998 get_queue_depth(&endpoint->txq)); 999 spin_unlock_bh(&target->tx_lock); 1000 /* 1001 * Try to start the stalled queue, this list is 1002 * ordered by priority. If there are credits 1003 * available the highest priority queue will get a 1004 * chance to reclaim credits from lower priority 1005 * ones. 1006 */ 1007 ath6kl_htc_tx_from_queue(target, endpoint); 1008 spin_lock_bh(&target->tx_lock); 1009 } 1010 spin_unlock_bh(&target->tx_lock); 1011 } 1012} 1013 1014static int htc_setup_tx_complete(struct htc_target *target) 1015{ 1016 struct htc_packet *send_pkt = NULL; 1017 int status; 1018 1019 send_pkt = htc_get_control_buf(target, true); 1020 1021 if (!send_pkt) 1022 return -ENOMEM; 1023 1024 if (target->htc_tgt_ver >= HTC_VERSION_2P1) { 1025 struct htc_setup_comp_ext_msg *setup_comp_ext; 1026 u32 flags = 0; 1027 1028 setup_comp_ext = 1029 (struct htc_setup_comp_ext_msg *)send_pkt->buf; 1030 memset(setup_comp_ext, 0, sizeof(*setup_comp_ext)); 1031 setup_comp_ext->msg_id = 1032 cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID); 1033 1034 if (target->msg_per_bndl_max > 0) { 1035 /* Indicate HTC bundling to the target */ 1036 flags |= HTC_SETUP_COMP_FLG_RX_BNDL_EN; 1037 setup_comp_ext->msg_per_rxbndl = 1038 target->msg_per_bndl_max; 1039 } 1040 1041 memcpy(&setup_comp_ext->flags, &flags, 1042 sizeof(setup_comp_ext->flags)); 1043 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp_ext, 1044 sizeof(struct htc_setup_comp_ext_msg), 1045 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 1046 1047 } else { 1048 struct htc_setup_comp_msg *setup_comp; 1049 setup_comp = (struct htc_setup_comp_msg *)send_pkt->buf; 1050 memset(setup_comp, 0, sizeof(struct htc_setup_comp_msg)); 1051 setup_comp->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_ID); 1052 set_htc_pkt_info(send_pkt, NULL, (u8 *) setup_comp, 1053 sizeof(struct htc_setup_comp_msg), 1054 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 1055 } 1056 1057 /* we want synchronous operation */ 1058 send_pkt->completion = NULL; 1059 ath6kl_htc_tx_prep_pkt(send_pkt, 0, 0, 0); 1060 status = ath6kl_htc_tx_issue(target, send_pkt); 1061 1062 if (send_pkt != NULL) 1063 htc_reclaim_txctrl_buf(target, send_pkt); 1064 1065 return status; 1066} 1067 1068void ath6kl_htc_set_credit_dist(struct htc_target *target, 1069 struct ath6kl_htc_credit_info *credit_info, 1070 u16 srvc_pri_order[], int list_len) 1071{ 1072 struct htc_endpoint *endpoint; 1073 int i, ep; 1074 1075 target->credit_info = credit_info; 1076 1077 list_add_tail(&target->endpoint[ENDPOINT_0].cred_dist.list, 1078 &target->cred_dist_list); 1079 1080 for (i = 0; i < list_len; i++) { 1081 for (ep = ENDPOINT_1; ep < ENDPOINT_MAX; ep++) { 1082 endpoint = &target->endpoint[ep]; 1083 if (endpoint->svc_id == srvc_pri_order[i]) { 1084 list_add_tail(&endpoint->cred_dist.list, 1085 &target->cred_dist_list); 1086 break; 1087 } 1088 } 1089 if (ep >= ENDPOINT_MAX) { 1090 WARN_ON(1); 1091 return; 1092 } 1093 } 1094} 1095 1096int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet) 1097{ 1098 struct htc_endpoint *endpoint; 1099 struct list_head queue; 1100 1101 ath6kl_dbg(ATH6KL_DBG_HTC, 1102 "htc tx ep id %d buf 0x%p len %d\n", 1103 packet->endpoint, packet->buf, packet->act_len); 1104 1105 if (packet->endpoint >= ENDPOINT_MAX) { 1106 WARN_ON(1); 1107 return -EINVAL; 1108 } 1109 1110 endpoint = &target->endpoint[packet->endpoint]; 1111 1112 if (!ath6kl_htc_tx_try(target, endpoint, packet)) { 1113 packet->status = (target->htc_flags & HTC_OP_STATE_STOPPING) ? 1114 -ECANCELED : -ENOSPC; 1115 INIT_LIST_HEAD(&queue); 1116 list_add(&packet->list, &queue); 1117 htc_tx_complete(endpoint, &queue); 1118 } 1119 1120 return 0; 1121} 1122 1123/* flush endpoint TX queue */ 1124void ath6kl_htc_flush_txep(struct htc_target *target, 1125 enum htc_endpoint_id eid, u16 tag) 1126{ 1127 struct htc_packet *packet, *tmp_pkt; 1128 struct list_head discard_q, container; 1129 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1130 1131 if (!endpoint->svc_id) { 1132 WARN_ON(1); 1133 return; 1134 } 1135 1136 /* initialize the discard queue */ 1137 INIT_LIST_HEAD(&discard_q); 1138 1139 spin_lock_bh(&target->tx_lock); 1140 1141 list_for_each_entry_safe(packet, tmp_pkt, &endpoint->txq, list) { 1142 if ((tag == HTC_TX_PACKET_TAG_ALL) || 1143 (tag == packet->info.tx.tag)) 1144 list_move_tail(&packet->list, &discard_q); 1145 } 1146 1147 spin_unlock_bh(&target->tx_lock); 1148 1149 list_for_each_entry_safe(packet, tmp_pkt, &discard_q, list) { 1150 packet->status = -ECANCELED; 1151 list_del(&packet->list); 1152 ath6kl_dbg(ATH6KL_DBG_HTC, 1153 "htc tx flushing pkt 0x%p len %d ep %d tag 0x%x\n", 1154 packet, packet->act_len, 1155 packet->endpoint, packet->info.tx.tag); 1156 1157 INIT_LIST_HEAD(&container); 1158 list_add_tail(&packet->list, &container); 1159 htc_tx_complete(endpoint, &container); 1160 } 1161 1162} 1163 1164static void ath6kl_htc_flush_txep_all(struct htc_target *target) 1165{ 1166 struct htc_endpoint *endpoint; 1167 int i; 1168 1169 dump_cred_dist_stats(target); 1170 1171 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 1172 endpoint = &target->endpoint[i]; 1173 if (endpoint->svc_id == 0) 1174 /* not in use.. */ 1175 continue; 1176 ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL); 1177 } 1178} 1179 1180void ath6kl_htc_indicate_activity_change(struct htc_target *target, 1181 enum htc_endpoint_id eid, bool active) 1182{ 1183 struct htc_endpoint *endpoint = &target->endpoint[eid]; 1184 bool dist = false; 1185 1186 if (endpoint->svc_id == 0) { 1187 WARN_ON(1); 1188 return; 1189 } 1190 1191 spin_lock_bh(&target->tx_lock); 1192 1193 if (active) { 1194 if (!(endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE)) { 1195 endpoint->cred_dist.dist_flags |= HTC_EP_ACTIVE; 1196 dist = true; 1197 } 1198 } else { 1199 if (endpoint->cred_dist.dist_flags & HTC_EP_ACTIVE) { 1200 endpoint->cred_dist.dist_flags &= ~HTC_EP_ACTIVE; 1201 dist = true; 1202 } 1203 } 1204 1205 if (dist) { 1206 endpoint->cred_dist.txq_depth = 1207 get_queue_depth(&endpoint->txq); 1208 1209 ath6kl_dbg(ATH6KL_DBG_HTC, 1210 "htc tx activity ctxt 0x%p dist 0x%p\n", 1211 target->credit_info, &target->cred_dist_list); 1212 1213 ath6kl_credit_distribute(target->credit_info, 1214 &target->cred_dist_list, 1215 HTC_CREDIT_DIST_ACTIVITY_CHANGE); 1216 } 1217 1218 spin_unlock_bh(&target->tx_lock); 1219 1220 if (dist && !active) 1221 htc_chk_ep_txq(target); 1222} 1223 1224/* HTC Rx */ 1225 1226static inline void ath6kl_htc_rx_update_stats(struct htc_endpoint *endpoint, 1227 int n_look_ahds) 1228{ 1229 endpoint->ep_st.rx_pkts++; 1230 if (n_look_ahds == 1) 1231 endpoint->ep_st.rx_lkahds++; 1232 else if (n_look_ahds > 1) 1233 endpoint->ep_st.rx_bundle_lkahd++; 1234} 1235 1236static inline bool htc_valid_rx_frame_len(struct htc_target *target, 1237 enum htc_endpoint_id eid, int len) 1238{ 1239 return (eid == target->dev->ar->ctrl_ep) ? 1240 len <= ATH6KL_BUFFER_SIZE : len <= ATH6KL_AMSDU_BUFFER_SIZE; 1241} 1242 1243static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet) 1244{ 1245 struct list_head queue; 1246 1247 INIT_LIST_HEAD(&queue); 1248 list_add_tail(&packet->list, &queue); 1249 return ath6kl_htc_add_rxbuf_multiple(target, &queue); 1250} 1251 1252static void htc_reclaim_rxbuf(struct htc_target *target, 1253 struct htc_packet *packet, 1254 struct htc_endpoint *ep) 1255{ 1256 if (packet->info.rx.rx_flags & HTC_RX_PKT_NO_RECYCLE) { 1257 htc_rxpkt_reset(packet); 1258 packet->status = -ECANCELED; 1259 ep->ep_cb.rx(ep->target, packet); 1260 } else { 1261 htc_rxpkt_reset(packet); 1262 htc_add_rxbuf((void *)(target), packet); 1263 } 1264} 1265 1266static void reclaim_rx_ctrl_buf(struct htc_target *target, 1267 struct htc_packet *packet) 1268{ 1269 spin_lock_bh(&target->htc_lock); 1270 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 1271 spin_unlock_bh(&target->htc_lock); 1272} 1273 1274static int ath6kl_htc_rx_packet(struct htc_target *target, 1275 struct htc_packet *packet, 1276 u32 rx_len) 1277{ 1278 struct ath6kl_device *dev = target->dev; 1279 u32 padded_len; 1280 int status; 1281 1282 padded_len = CALC_TXRX_PADDED_LEN(target, rx_len); 1283 1284 if (padded_len > packet->buf_len) { 1285 ath6kl_err("not enough receive space for packet - padlen %d recvlen %d bufferlen %d\n", 1286 padded_len, rx_len, packet->buf_len); 1287 return -ENOMEM; 1288 } 1289 1290 ath6kl_dbg(ATH6KL_DBG_HTC, 1291 "htc rx 0x%p hdr x%x len %d mbox 0x%x\n", 1292 packet, packet->info.rx.exp_hdr, 1293 padded_len, dev->ar->mbox_info.htc_addr); 1294 1295 status = hif_read_write_sync(dev->ar, 1296 dev->ar->mbox_info.htc_addr, 1297 packet->buf, padded_len, 1298 HIF_RD_SYNC_BLOCK_FIX); 1299 1300 packet->status = status; 1301 1302 return status; 1303} 1304 1305/* 1306 * optimization for recv packets, we can indicate a 1307 * "hint" that there are more single-packets to fetch 1308 * on this endpoint. 1309 */ 1310static void ath6kl_htc_rx_set_indicate(u32 lk_ahd, 1311 struct htc_endpoint *endpoint, 1312 struct htc_packet *packet) 1313{ 1314 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)&lk_ahd; 1315 1316 if (htc_hdr->eid == packet->endpoint) { 1317 if (!list_empty(&endpoint->rx_bufq)) 1318 packet->info.rx.indicat_flags |= 1319 HTC_RX_FLAGS_INDICATE_MORE_PKTS; 1320 } 1321} 1322 1323static void ath6kl_htc_rx_chk_water_mark(struct htc_endpoint *endpoint) 1324{ 1325 struct htc_ep_callbacks ep_cb = endpoint->ep_cb; 1326 1327 if (ep_cb.rx_refill_thresh > 0) { 1328 spin_lock_bh(&endpoint->target->rx_lock); 1329 if (get_queue_depth(&endpoint->rx_bufq) 1330 < ep_cb.rx_refill_thresh) { 1331 spin_unlock_bh(&endpoint->target->rx_lock); 1332 ep_cb.rx_refill(endpoint->target, endpoint->eid); 1333 return; 1334 } 1335 spin_unlock_bh(&endpoint->target->rx_lock); 1336 } 1337} 1338 1339/* This function is called with rx_lock held */ 1340static int ath6kl_htc_rx_setup(struct htc_target *target, 1341 struct htc_endpoint *ep, 1342 u32 *lk_ahds, struct list_head *queue, int n_msg) 1343{ 1344 struct htc_packet *packet; 1345 /* FIXME: type of lk_ahds can't be right */ 1346 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)lk_ahds; 1347 struct htc_ep_callbacks ep_cb; 1348 int status = 0, j, full_len; 1349 bool no_recycle; 1350 1351 full_len = CALC_TXRX_PADDED_LEN(target, 1352 le16_to_cpu(htc_hdr->payld_len) + 1353 sizeof(*htc_hdr)); 1354 1355 if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) { 1356 ath6kl_warn("Rx buffer requested with invalid length\n"); 1357 return -EINVAL; 1358 } 1359 1360 ep_cb = ep->ep_cb; 1361 for (j = 0; j < n_msg; j++) { 1362 1363 /* 1364 * Reset flag, any packets allocated using the 1365 * rx_alloc() API cannot be recycled on 1366 * cleanup,they must be explicitly returned. 1367 */ 1368 no_recycle = false; 1369 1370 if (ep_cb.rx_allocthresh && 1371 (full_len > ep_cb.rx_alloc_thresh)) { 1372 ep->ep_st.rx_alloc_thresh_hit += 1; 1373 ep->ep_st.rxalloc_thresh_byte += 1374 le16_to_cpu(htc_hdr->payld_len); 1375 1376 spin_unlock_bh(&target->rx_lock); 1377 no_recycle = true; 1378 1379 packet = ep_cb.rx_allocthresh(ep->target, ep->eid, 1380 full_len); 1381 spin_lock_bh(&target->rx_lock); 1382 } else { 1383 /* refill handler is being used */ 1384 if (list_empty(&ep->rx_bufq)) { 1385 if (ep_cb.rx_refill) { 1386 spin_unlock_bh(&target->rx_lock); 1387 ep_cb.rx_refill(ep->target, ep->eid); 1388 spin_lock_bh(&target->rx_lock); 1389 } 1390 } 1391 1392 if (list_empty(&ep->rx_bufq)) 1393 packet = NULL; 1394 else { 1395 packet = list_first_entry(&ep->rx_bufq, 1396 struct htc_packet, list); 1397 list_del(&packet->list); 1398 } 1399 } 1400 1401 if (!packet) { 1402 target->rx_st_flags |= HTC_RECV_WAIT_BUFFERS; 1403 target->ep_waiting = ep->eid; 1404 return -ENOSPC; 1405 } 1406 1407 /* clear flags */ 1408 packet->info.rx.rx_flags = 0; 1409 packet->info.rx.indicat_flags = 0; 1410 packet->status = 0; 1411 1412 if (no_recycle) 1413 /* 1414 * flag that these packets cannot be 1415 * recycled, they have to be returned to 1416 * the user 1417 */ 1418 packet->info.rx.rx_flags |= HTC_RX_PKT_NO_RECYCLE; 1419 1420 /* Caller needs to free this upon any failure */ 1421 list_add_tail(&packet->list, queue); 1422 1423 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 1424 status = -ECANCELED; 1425 break; 1426 } 1427 1428 if (j) { 1429 packet->info.rx.rx_flags |= HTC_RX_PKT_REFRESH_HDR; 1430 packet->info.rx.exp_hdr = 0xFFFFFFFF; 1431 } else 1432 /* set expected look ahead */ 1433 packet->info.rx.exp_hdr = *lk_ahds; 1434 1435 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + 1436 HTC_HDR_LENGTH; 1437 } 1438 1439 return status; 1440} 1441 1442static int ath6kl_htc_rx_alloc(struct htc_target *target, 1443 u32 lk_ahds[], int msg, 1444 struct htc_endpoint *endpoint, 1445 struct list_head *queue) 1446{ 1447 int status = 0; 1448 struct htc_packet *packet, *tmp_pkt; 1449 struct htc_frame_hdr *htc_hdr; 1450 int i, n_msg; 1451 1452 spin_lock_bh(&target->rx_lock); 1453 1454 for (i = 0; i < msg; i++) { 1455 1456 htc_hdr = (struct htc_frame_hdr *)&lk_ahds[i]; 1457 1458 if (htc_hdr->eid >= ENDPOINT_MAX) { 1459 ath6kl_err("invalid ep in look-ahead: %d\n", 1460 htc_hdr->eid); 1461 status = -ENOMEM; 1462 break; 1463 } 1464 1465 if (htc_hdr->eid != endpoint->eid) { 1466 ath6kl_err("invalid ep in look-ahead: %d should be : %d (index:%d)\n", 1467 htc_hdr->eid, endpoint->eid, i); 1468 status = -ENOMEM; 1469 break; 1470 } 1471 1472 if (le16_to_cpu(htc_hdr->payld_len) > HTC_MAX_PAYLOAD_LENGTH) { 1473 ath6kl_err("payload len %d exceeds max htc : %d !\n", 1474 htc_hdr->payld_len, 1475 (u32) HTC_MAX_PAYLOAD_LENGTH); 1476 status = -ENOMEM; 1477 break; 1478 } 1479 1480 if (endpoint->svc_id == 0) { 1481 ath6kl_err("ep %d is not connected !\n", htc_hdr->eid); 1482 status = -ENOMEM; 1483 break; 1484 } 1485 1486 if (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) { 1487 /* 1488 * HTC header indicates that every packet to follow 1489 * has the same padded length so that it can be 1490 * optimally fetched as a full bundle. 1491 */ 1492 n_msg = (htc_hdr->flags & HTC_FLG_RX_BNDL_CNT) >> 1493 HTC_FLG_RX_BNDL_CNT_S; 1494 1495 /* the count doesn't include the starter frame */ 1496 n_msg++; 1497 if (n_msg > target->msg_per_bndl_max) { 1498 status = -ENOMEM; 1499 break; 1500 } 1501 1502 endpoint->ep_st.rx_bundle_from_hdr += 1; 1503 ath6kl_dbg(ATH6KL_DBG_HTC, 1504 "htc rx bundle pkts %d\n", 1505 n_msg); 1506 } else 1507 /* HTC header only indicates 1 message to fetch */ 1508 n_msg = 1; 1509 1510 /* Setup packet buffers for each message */ 1511 status = ath6kl_htc_rx_setup(target, endpoint, &lk_ahds[i], 1512 queue, n_msg); 1513 1514 /* 1515 * This is due to unavailabilty of buffers to rx entire data. 1516 * Return no error so that free buffers from queue can be used 1517 * to receive partial data. 1518 */ 1519 if (status == -ENOSPC) { 1520 spin_unlock_bh(&target->rx_lock); 1521 return 0; 1522 } 1523 1524 if (status) 1525 break; 1526 } 1527 1528 spin_unlock_bh(&target->rx_lock); 1529 1530 if (status) { 1531 list_for_each_entry_safe(packet, tmp_pkt, queue, list) { 1532 list_del(&packet->list); 1533 htc_reclaim_rxbuf(target, packet, 1534 &target->endpoint[packet->endpoint]); 1535 } 1536 } 1537 1538 return status; 1539} 1540 1541static void htc_ctrl_rx(struct htc_target *context, struct htc_packet *packets) 1542{ 1543 if (packets->endpoint != ENDPOINT_0) { 1544 WARN_ON(1); 1545 return; 1546 } 1547 1548 if (packets->status == -ECANCELED) { 1549 reclaim_rx_ctrl_buf(context, packets); 1550 return; 1551 } 1552 1553 if (packets->act_len > 0) { 1554 ath6kl_err("htc_ctrl_rx, got message with len:%zu\n", 1555 packets->act_len + HTC_HDR_LENGTH); 1556 1557 ath6kl_dbg_dump(ATH6KL_DBG_HTC, 1558 "htc rx unexpected endpoint 0 message", "", 1559 packets->buf - HTC_HDR_LENGTH, 1560 packets->act_len + HTC_HDR_LENGTH); 1561 } 1562 1563 htc_reclaim_rxbuf(context, packets, &context->endpoint[0]); 1564} 1565 1566static void htc_proc_cred_rpt(struct htc_target *target, 1567 struct htc_credit_report *rpt, 1568 int n_entries, 1569 enum htc_endpoint_id from_ep) 1570{ 1571 struct htc_endpoint *endpoint; 1572 int tot_credits = 0, i; 1573 bool dist = false; 1574 1575 spin_lock_bh(&target->tx_lock); 1576 1577 for (i = 0; i < n_entries; i++, rpt++) { 1578 if (rpt->eid >= ENDPOINT_MAX) { 1579 WARN_ON(1); 1580 spin_unlock_bh(&target->tx_lock); 1581 return; 1582 } 1583 1584 endpoint = &target->endpoint[rpt->eid]; 1585 1586 ath6kl_dbg(ATH6KL_DBG_CREDIT, 1587 "credit report ep %d credits %d\n", 1588 rpt->eid, rpt->credits); 1589 1590 endpoint->ep_st.tx_cred_rpt += 1; 1591 endpoint->ep_st.cred_retnd += rpt->credits; 1592 1593 if (from_ep == rpt->eid) { 1594 /* 1595 * This credit report arrived on the same endpoint 1596 * indicating it arrived in an RX packet. 1597 */ 1598 endpoint->ep_st.cred_from_rx += rpt->credits; 1599 endpoint->ep_st.cred_rpt_from_rx += 1; 1600 } else if (from_ep == ENDPOINT_0) { 1601 /* credit arrived on endpoint 0 as a NULL message */ 1602 endpoint->ep_st.cred_from_ep0 += rpt->credits; 1603 endpoint->ep_st.cred_rpt_ep0 += 1; 1604 } else { 1605 endpoint->ep_st.cred_from_other += rpt->credits; 1606 endpoint->ep_st.cred_rpt_from_other += 1; 1607 } 1608 1609 if (rpt->eid == ENDPOINT_0) 1610 /* always give endpoint 0 credits back */ 1611 endpoint->cred_dist.credits += rpt->credits; 1612 else { 1613 endpoint->cred_dist.cred_to_dist += rpt->credits; 1614 dist = true; 1615 } 1616 1617 /* 1618 * Refresh tx depth for distribution function that will 1619 * recover these credits NOTE: this is only valid when 1620 * there are credits to recover! 1621 */ 1622 endpoint->cred_dist.txq_depth = 1623 get_queue_depth(&endpoint->txq); 1624 1625 tot_credits += rpt->credits; 1626 } 1627 1628 if (dist) { 1629 /* 1630 * This was a credit return based on a completed send 1631 * operations note, this is done with the lock held 1632 */ 1633 ath6kl_credit_distribute(target->credit_info, 1634 &target->cred_dist_list, 1635 HTC_CREDIT_DIST_SEND_COMPLETE); 1636 } 1637 1638 spin_unlock_bh(&target->tx_lock); 1639 1640 if (tot_credits) 1641 htc_chk_ep_txq(target); 1642} 1643 1644static int htc_parse_trailer(struct htc_target *target, 1645 struct htc_record_hdr *record, 1646 u8 *record_buf, u32 *next_lk_ahds, 1647 enum htc_endpoint_id endpoint, 1648 int *n_lk_ahds) 1649{ 1650 struct htc_bundle_lkahd_rpt *bundle_lkahd_rpt; 1651 struct htc_lookahead_report *lk_ahd; 1652 int len; 1653 1654 switch (record->rec_id) { 1655 case HTC_RECORD_CREDITS: 1656 len = record->len / sizeof(struct htc_credit_report); 1657 if (!len) { 1658 WARN_ON(1); 1659 return -EINVAL; 1660 } 1661 1662 htc_proc_cred_rpt(target, 1663 (struct htc_credit_report *) record_buf, 1664 len, endpoint); 1665 break; 1666 case HTC_RECORD_LOOKAHEAD: 1667 len = record->len / sizeof(*lk_ahd); 1668 if (!len) { 1669 WARN_ON(1); 1670 return -EINVAL; 1671 } 1672 1673 lk_ahd = (struct htc_lookahead_report *) record_buf; 1674 if ((lk_ahd->pre_valid == ((~lk_ahd->post_valid) & 0xFF)) && 1675 next_lk_ahds) { 1676 1677 ath6kl_dbg(ATH6KL_DBG_HTC, 1678 "htc rx lk_ahd found pre_valid 0x%x post_valid 0x%x\n", 1679 lk_ahd->pre_valid, lk_ahd->post_valid); 1680 1681 /* look ahead bytes are valid, copy them over */ 1682 memcpy((u8 *)&next_lk_ahds[0], lk_ahd->lk_ahd, 4); 1683 1684 ath6kl_dbg_dump(ATH6KL_DBG_HTC, 1685 "htc rx next look ahead", 1686 "", next_lk_ahds, 4); 1687 1688 *n_lk_ahds = 1; 1689 } 1690 break; 1691 case HTC_RECORD_LOOKAHEAD_BUNDLE: 1692 len = record->len / sizeof(*bundle_lkahd_rpt); 1693 if (!len || (len > HTC_HOST_MAX_MSG_PER_BUNDLE)) { 1694 WARN_ON(1); 1695 return -EINVAL; 1696 } 1697 1698 if (next_lk_ahds) { 1699 int i; 1700 1701 bundle_lkahd_rpt = 1702 (struct htc_bundle_lkahd_rpt *) record_buf; 1703 1704 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bundle lk_ahd", 1705 "", record_buf, record->len); 1706 1707 for (i = 0; i < len; i++) { 1708 memcpy((u8 *)&next_lk_ahds[i], 1709 bundle_lkahd_rpt->lk_ahd, 4); 1710 bundle_lkahd_rpt++; 1711 } 1712 1713 *n_lk_ahds = i; 1714 } 1715 break; 1716 default: 1717 ath6kl_err("unhandled record: id:%d len:%d\n", 1718 record->rec_id, record->len); 1719 break; 1720 } 1721 1722 return 0; 1723 1724} 1725 1726static int htc_proc_trailer(struct htc_target *target, 1727 u8 *buf, int len, u32 *next_lk_ahds, 1728 int *n_lk_ahds, enum htc_endpoint_id endpoint) 1729{ 1730 struct htc_record_hdr *record; 1731 int orig_len; 1732 int status; 1733 u8 *record_buf; 1734 u8 *orig_buf; 1735 1736 ath6kl_dbg(ATH6KL_DBG_HTC, "htc rx trailer len %d\n", len); 1737 ath6kl_dbg_dump(ATH6KL_DBG_HTC, NULL, "", buf, len); 1738 1739 orig_buf = buf; 1740 orig_len = len; 1741 status = 0; 1742 1743 while (len > 0) { 1744 1745 if (len < sizeof(struct htc_record_hdr)) { 1746 status = -ENOMEM; 1747 break; 1748 } 1749 /* these are byte aligned structs */ 1750 record = (struct htc_record_hdr *) buf; 1751 len -= sizeof(struct htc_record_hdr); 1752 buf += sizeof(struct htc_record_hdr); 1753 1754 if (record->len > len) { 1755 ath6kl_err("invalid record len: %d (id:%d) buf has: %d bytes left\n", 1756 record->len, record->rec_id, len); 1757 status = -ENOMEM; 1758 break; 1759 } 1760 record_buf = buf; 1761 1762 status = htc_parse_trailer(target, record, record_buf, 1763 next_lk_ahds, endpoint, n_lk_ahds); 1764 1765 if (status) 1766 break; 1767 1768 /* advance buffer past this record for next time around */ 1769 buf += record->len; 1770 len -= record->len; 1771 } 1772 1773 if (status) 1774 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad trailer", 1775 "", orig_buf, orig_len); 1776 1777 return status; 1778} 1779 1780static int ath6kl_htc_rx_process_hdr(struct htc_target *target, 1781 struct htc_packet *packet, 1782 u32 *next_lkahds, int *n_lkahds) 1783{ 1784 int status = 0; 1785 u16 payload_len; 1786 u32 lk_ahd; 1787 struct htc_frame_hdr *htc_hdr = (struct htc_frame_hdr *)packet->buf; 1788 1789 if (n_lkahds != NULL) 1790 *n_lkahds = 0; 1791 1792 /* 1793 * NOTE: we cannot assume the alignment of buf, so we use the safe 1794 * macros to retrieve 16 bit fields. 1795 */ 1796 payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len)); 1797 1798 memcpy((u8 *)&lk_ahd, packet->buf, sizeof(lk_ahd)); 1799 1800 if (packet->info.rx.rx_flags & HTC_RX_PKT_REFRESH_HDR) { 1801 /* 1802 * Refresh the expected header and the actual length as it 1803 * was unknown when this packet was grabbed as part of the 1804 * bundle. 1805 */ 1806 packet->info.rx.exp_hdr = lk_ahd; 1807 packet->act_len = payload_len + HTC_HDR_LENGTH; 1808 1809 /* validate the actual header that was refreshed */ 1810 if (packet->act_len > packet->buf_len) { 1811 ath6kl_err("refreshed hdr payload len (%d) in bundled recv is invalid (hdr: 0x%X)\n", 1812 payload_len, lk_ahd); 1813 /* 1814 * Limit this to max buffer just to print out some 1815 * of the buffer. 1816 */ 1817 packet->act_len = min(packet->act_len, packet->buf_len); 1818 status = -ENOMEM; 1819 goto fail_rx; 1820 } 1821 1822 if (packet->endpoint != htc_hdr->eid) { 1823 ath6kl_err("refreshed hdr ep (%d) does not match expected ep (%d)\n", 1824 htc_hdr->eid, packet->endpoint); 1825 status = -ENOMEM; 1826 goto fail_rx; 1827 } 1828 } 1829 1830 if (lk_ahd != packet->info.rx.exp_hdr) { 1831 ath6kl_err("%s(): lk_ahd mismatch! (pPkt:0x%p flags:0x%X)\n", 1832 __func__, packet, packet->info.rx.rx_flags); 1833 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx expected lk_ahd", 1834 "", &packet->info.rx.exp_hdr, 4); 1835 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx current header", 1836 "", (u8 *)&lk_ahd, sizeof(lk_ahd)); 1837 status = -ENOMEM; 1838 goto fail_rx; 1839 } 1840 1841 if (htc_hdr->flags & HTC_FLG_RX_TRAILER) { 1842 if (htc_hdr->ctrl[0] < sizeof(struct htc_record_hdr) || 1843 htc_hdr->ctrl[0] > payload_len) { 1844 ath6kl_err("%s(): invalid hdr (payload len should be :%d, CB[0] is:%d)\n", 1845 __func__, payload_len, htc_hdr->ctrl[0]); 1846 status = -ENOMEM; 1847 goto fail_rx; 1848 } 1849 1850 if (packet->info.rx.rx_flags & HTC_RX_PKT_IGNORE_LOOKAHEAD) { 1851 next_lkahds = NULL; 1852 n_lkahds = NULL; 1853 } 1854 1855 status = htc_proc_trailer(target, packet->buf + HTC_HDR_LENGTH 1856 + payload_len - htc_hdr->ctrl[0], 1857 htc_hdr->ctrl[0], next_lkahds, 1858 n_lkahds, packet->endpoint); 1859 1860 if (status) 1861 goto fail_rx; 1862 1863 packet->act_len -= htc_hdr->ctrl[0]; 1864 } 1865 1866 packet->buf += HTC_HDR_LENGTH; 1867 packet->act_len -= HTC_HDR_LENGTH; 1868 1869fail_rx: 1870 if (status) 1871 ath6kl_dbg_dump(ATH6KL_DBG_HTC, "htc rx bad packet", 1872 "", packet->buf, packet->act_len); 1873 1874 return status; 1875} 1876 1877static void ath6kl_htc_rx_complete(struct htc_endpoint *endpoint, 1878 struct htc_packet *packet) 1879{ 1880 ath6kl_dbg(ATH6KL_DBG_HTC, 1881 "htc rx complete ep %d packet 0x%p\n", 1882 endpoint->eid, packet); 1883 endpoint->ep_cb.rx(endpoint->target, packet); 1884} 1885 1886static int ath6kl_htc_rx_bundle(struct htc_target *target, 1887 struct list_head *rxq, 1888 struct list_head *sync_compq, 1889 int *n_pkt_fetched, bool part_bundle) 1890{ 1891 struct hif_scatter_req *scat_req; 1892 struct htc_packet *packet; 1893 int rem_space = target->max_rx_bndl_sz; 1894 int n_scat_pkt, status = 0, i, len; 1895 1896 n_scat_pkt = get_queue_depth(rxq); 1897 n_scat_pkt = min(n_scat_pkt, target->msg_per_bndl_max); 1898 1899 if ((get_queue_depth(rxq) - n_scat_pkt) > 0) { 1900 /* 1901 * We were forced to split this bundle receive operation 1902 * all packets in this partial bundle must have their 1903 * lookaheads ignored. 1904 */ 1905 part_bundle = true; 1906 1907 /* 1908 * This would only happen if the target ignored our max 1909 * bundle limit. 1910 */ 1911 ath6kl_warn("%s(): partial bundle detected num:%d , %d\n", 1912 __func__, get_queue_depth(rxq), n_scat_pkt); 1913 } 1914 1915 len = 0; 1916 1917 ath6kl_dbg(ATH6KL_DBG_HTC, 1918 "htc rx bundle depth %d pkts %d\n", 1919 get_queue_depth(rxq), n_scat_pkt); 1920 1921 scat_req = hif_scatter_req_get(target->dev->ar); 1922 1923 if (scat_req == NULL) 1924 goto fail_rx_pkt; 1925 1926 for (i = 0; i < n_scat_pkt; i++) { 1927 int pad_len; 1928 1929 packet = list_first_entry(rxq, struct htc_packet, list); 1930 list_del(&packet->list); 1931 1932 pad_len = CALC_TXRX_PADDED_LEN(target, 1933 packet->act_len); 1934 1935 if ((rem_space - pad_len) < 0) { 1936 list_add(&packet->list, rxq); 1937 break; 1938 } 1939 1940 rem_space -= pad_len; 1941 1942 if (part_bundle || (i < (n_scat_pkt - 1))) 1943 /* 1944 * Packet 0..n-1 cannot be checked for look-aheads 1945 * since we are fetching a bundle the last packet 1946 * however can have it's lookahead used 1947 */ 1948 packet->info.rx.rx_flags |= 1949 HTC_RX_PKT_IGNORE_LOOKAHEAD; 1950 1951 /* NOTE: 1 HTC packet per scatter entry */ 1952 scat_req->scat_list[i].buf = packet->buf; 1953 scat_req->scat_list[i].len = pad_len; 1954 1955 packet->info.rx.rx_flags |= HTC_RX_PKT_PART_OF_BUNDLE; 1956 1957 list_add_tail(&packet->list, sync_compq); 1958 1959 WARN_ON(!scat_req->scat_list[i].len); 1960 len += scat_req->scat_list[i].len; 1961 } 1962 1963 scat_req->len = len; 1964 scat_req->scat_entries = i; 1965 1966 status = ath6kl_hif_submit_scat_req(target->dev, scat_req, true); 1967 1968 if (!status) 1969 *n_pkt_fetched = i; 1970 1971 /* free scatter request */ 1972 hif_scatter_req_add(target->dev->ar, scat_req); 1973 1974fail_rx_pkt: 1975 1976 return status; 1977} 1978 1979static int ath6kl_htc_rx_process_packets(struct htc_target *target, 1980 struct list_head *comp_pktq, 1981 u32 lk_ahds[], 1982 int *n_lk_ahd) 1983{ 1984 struct htc_packet *packet, *tmp_pkt; 1985 struct htc_endpoint *ep; 1986 int status = 0; 1987 1988 list_for_each_entry_safe(packet, tmp_pkt, comp_pktq, list) { 1989 ep = &target->endpoint[packet->endpoint]; 1990 1991 /* process header for each of the recv packet */ 1992 status = ath6kl_htc_rx_process_hdr(target, packet, lk_ahds, 1993 n_lk_ahd); 1994 if (status) 1995 return status; 1996 1997 list_del(&packet->list); 1998 1999 if (list_empty(comp_pktq)) { 2000 /* 2001 * Last packet's more packet flag is set 2002 * based on the lookahead. 2003 */ 2004 if (*n_lk_ahd > 0) 2005 ath6kl_htc_rx_set_indicate(lk_ahds[0], 2006 ep, packet); 2007 } else 2008 /* 2009 * Packets in a bundle automatically have 2010 * this flag set. 2011 */ 2012 packet->info.rx.indicat_flags |= 2013 HTC_RX_FLAGS_INDICATE_MORE_PKTS; 2014 2015 ath6kl_htc_rx_update_stats(ep, *n_lk_ahd); 2016 2017 if (packet->info.rx.rx_flags & HTC_RX_PKT_PART_OF_BUNDLE) 2018 ep->ep_st.rx_bundl += 1; 2019 2020 ath6kl_htc_rx_complete(ep, packet); 2021 } 2022 2023 return status; 2024} 2025 2026static int ath6kl_htc_rx_fetch(struct htc_target *target, 2027 struct list_head *rx_pktq, 2028 struct list_head *comp_pktq) 2029{ 2030 int fetched_pkts; 2031 bool part_bundle = false; 2032 int status = 0; 2033 struct list_head tmp_rxq; 2034 struct htc_packet *packet, *tmp_pkt; 2035 2036 /* now go fetch the list of HTC packets */ 2037 while (!list_empty(rx_pktq)) { 2038 fetched_pkts = 0; 2039 2040 INIT_LIST_HEAD(&tmp_rxq); 2041 2042 if (target->rx_bndl_enable && (get_queue_depth(rx_pktq) > 1)) { 2043 /* 2044 * There are enough packets to attempt a 2045 * bundle transfer and recv bundling is 2046 * allowed. 2047 */ 2048 status = ath6kl_htc_rx_bundle(target, rx_pktq, 2049 &tmp_rxq, 2050 &fetched_pkts, 2051 part_bundle); 2052 if (status) 2053 goto fail_rx; 2054 2055 if (!list_empty(rx_pktq)) 2056 part_bundle = true; 2057 2058 list_splice_tail_init(&tmp_rxq, comp_pktq); 2059 } 2060 2061 if (!fetched_pkts) { 2062 2063 packet = list_first_entry(rx_pktq, struct htc_packet, 2064 list); 2065 2066 /* fully synchronous */ 2067 packet->completion = NULL; 2068 2069 if (!list_is_singular(rx_pktq)) 2070 /* 2071 * look_aheads in all packet 2072 * except the last one in the 2073 * bundle must be ignored 2074 */ 2075 packet->info.rx.rx_flags |= 2076 HTC_RX_PKT_IGNORE_LOOKAHEAD; 2077 2078 /* go fetch the packet */ 2079 status = ath6kl_htc_rx_packet(target, packet, 2080 packet->act_len); 2081 2082 list_move_tail(&packet->list, &tmp_rxq); 2083 2084 if (status) 2085 goto fail_rx; 2086 2087 list_splice_tail_init(&tmp_rxq, comp_pktq); 2088 } 2089 } 2090 2091 return 0; 2092 2093fail_rx: 2094 2095 /* 2096 * Cleanup any packets we allocated but didn't use to 2097 * actually fetch any packets. 2098 */ 2099 2100 list_for_each_entry_safe(packet, tmp_pkt, rx_pktq, list) { 2101 list_del(&packet->list); 2102 htc_reclaim_rxbuf(target, packet, 2103 &target->endpoint[packet->endpoint]); 2104 } 2105 2106 list_for_each_entry_safe(packet, tmp_pkt, &tmp_rxq, list) { 2107 list_del(&packet->list); 2108 htc_reclaim_rxbuf(target, packet, 2109 &target->endpoint[packet->endpoint]); 2110 } 2111 2112 return status; 2113} 2114 2115int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target, 2116 u32 msg_look_ahead, int *num_pkts) 2117{ 2118 struct htc_packet *packets, *tmp_pkt; 2119 struct htc_endpoint *endpoint; 2120 struct list_head rx_pktq, comp_pktq; 2121 int status = 0; 2122 u32 look_aheads[HTC_HOST_MAX_MSG_PER_BUNDLE]; 2123 int num_look_ahead = 1; 2124 enum htc_endpoint_id id; 2125 int n_fetched = 0; 2126 2127 INIT_LIST_HEAD(&comp_pktq); 2128 *num_pkts = 0; 2129 2130 /* 2131 * On first entry copy the look_aheads into our temp array for 2132 * processing 2133 */ 2134 look_aheads[0] = msg_look_ahead; 2135 2136 while (true) { 2137 2138 /* 2139 * First lookahead sets the expected endpoint IDs for all 2140 * packets in a bundle. 2141 */ 2142 id = ((struct htc_frame_hdr *)&look_aheads[0])->eid; 2143 endpoint = &target->endpoint[id]; 2144 2145 if (id >= ENDPOINT_MAX) { 2146 ath6kl_err("MsgPend, invalid endpoint in look-ahead: %d\n", 2147 id); 2148 status = -ENOMEM; 2149 break; 2150 } 2151 2152 INIT_LIST_HEAD(&rx_pktq); 2153 INIT_LIST_HEAD(&comp_pktq); 2154 2155 /* 2156 * Try to allocate as many HTC RX packets indicated by the 2157 * look_aheads. 2158 */ 2159 status = ath6kl_htc_rx_alloc(target, look_aheads, 2160 num_look_ahead, endpoint, 2161 &rx_pktq); 2162 if (status) 2163 break; 2164 2165 if (get_queue_depth(&rx_pktq) >= 2) 2166 /* 2167 * A recv bundle was detected, force IRQ status 2168 * re-check again 2169 */ 2170 target->chk_irq_status_cnt = 1; 2171 2172 n_fetched += get_queue_depth(&rx_pktq); 2173 2174 num_look_ahead = 0; 2175 2176 status = ath6kl_htc_rx_fetch(target, &rx_pktq, &comp_pktq); 2177 2178 if (!status) 2179 ath6kl_htc_rx_chk_water_mark(endpoint); 2180 2181 /* Process fetched packets */ 2182 status = ath6kl_htc_rx_process_packets(target, &comp_pktq, 2183 look_aheads, 2184 &num_look_ahead); 2185 2186 if (!num_look_ahead || status) 2187 break; 2188 2189 /* 2190 * For SYNCH processing, if we get here, we are running 2191 * through the loop again due to a detected lookahead. Set 2192 * flag that we should re-check IRQ status registers again 2193 * before leaving IRQ processing, this can net better 2194 * performance in high throughput situations. 2195 */ 2196 target->chk_irq_status_cnt = 1; 2197 } 2198 2199 if (status) { 2200 ath6kl_err("failed to get pending recv messages: %d\n", 2201 status); 2202 2203 /* cleanup any packets in sync completion queue */ 2204 list_for_each_entry_safe(packets, tmp_pkt, &comp_pktq, list) { 2205 list_del(&packets->list); 2206 htc_reclaim_rxbuf(target, packets, 2207 &target->endpoint[packets->endpoint]); 2208 } 2209 2210 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 2211 ath6kl_warn("host is going to stop blocking receiver for htc_stop\n"); 2212 ath6kl_hif_rx_control(target->dev, false); 2213 } 2214 } 2215 2216 /* 2217 * Before leaving, check to see if host ran out of buffers and 2218 * needs to stop the receiver. 2219 */ 2220 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2221 ath6kl_warn("host has no rx buffers blocking receiver to prevent overrun\n"); 2222 ath6kl_hif_rx_control(target->dev, false); 2223 } 2224 *num_pkts = n_fetched; 2225 2226 return status; 2227} 2228 2229/* 2230 * Synchronously wait for a control message from the target, 2231 * This function is used at initialization time ONLY. At init messages 2232 * on ENDPOINT 0 are expected. 2233 */ 2234static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target) 2235{ 2236 struct htc_packet *packet = NULL; 2237 struct htc_frame_hdr *htc_hdr; 2238 u32 look_ahead; 2239 2240 if (ath6kl_hif_poll_mboxmsg_rx(target->dev, &look_ahead, 2241 HTC_TARGET_RESPONSE_TIMEOUT)) 2242 return NULL; 2243 2244 ath6kl_dbg(ATH6KL_DBG_HTC, 2245 "htc rx wait ctrl look_ahead 0x%X\n", look_ahead); 2246 2247 htc_hdr = (struct htc_frame_hdr *)&look_ahead; 2248 2249 if (htc_hdr->eid != ENDPOINT_0) 2250 return NULL; 2251 2252 packet = htc_get_control_buf(target, false); 2253 2254 if (!packet) 2255 return NULL; 2256 2257 packet->info.rx.rx_flags = 0; 2258 packet->info.rx.exp_hdr = look_ahead; 2259 packet->act_len = le16_to_cpu(htc_hdr->payld_len) + HTC_HDR_LENGTH; 2260 2261 if (packet->act_len > packet->buf_len) 2262 goto fail_ctrl_rx; 2263 2264 /* we want synchronous operation */ 2265 packet->completion = NULL; 2266 2267 /* get the message from the device, this will block */ 2268 if (ath6kl_htc_rx_packet(target, packet, packet->act_len)) 2269 goto fail_ctrl_rx; 2270 2271 /* process receive header */ 2272 packet->status = ath6kl_htc_rx_process_hdr(target, packet, NULL, NULL); 2273 2274 if (packet->status) { 2275 ath6kl_err("htc_wait_for_ctrl_msg, ath6kl_htc_rx_process_hdr failed (status = %d)\n", 2276 packet->status); 2277 goto fail_ctrl_rx; 2278 } 2279 2280 return packet; 2281 2282fail_ctrl_rx: 2283 if (packet != NULL) { 2284 htc_rxpkt_reset(packet); 2285 reclaim_rx_ctrl_buf(target, packet); 2286 } 2287 2288 return NULL; 2289} 2290 2291int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target, 2292 struct list_head *pkt_queue) 2293{ 2294 struct htc_endpoint *endpoint; 2295 struct htc_packet *first_pkt; 2296 bool rx_unblock = false; 2297 int status = 0, depth; 2298 2299 if (list_empty(pkt_queue)) 2300 return -ENOMEM; 2301 2302 first_pkt = list_first_entry(pkt_queue, struct htc_packet, list); 2303 2304 if (first_pkt->endpoint >= ENDPOINT_MAX) 2305 return status; 2306 2307 depth = get_queue_depth(pkt_queue); 2308 2309 ath6kl_dbg(ATH6KL_DBG_HTC, 2310 "htc rx add multiple ep id %d cnt %d len %d\n", 2311 first_pkt->endpoint, depth, first_pkt->buf_len); 2312 2313 endpoint = &target->endpoint[first_pkt->endpoint]; 2314 2315 if (target->htc_flags & HTC_OP_STATE_STOPPING) { 2316 struct htc_packet *packet, *tmp_pkt; 2317 2318 /* walk through queue and mark each one canceled */ 2319 list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) { 2320 packet->status = -ECANCELED; 2321 list_del(&packet->list); 2322 ath6kl_htc_rx_complete(endpoint, packet); 2323 } 2324 2325 return status; 2326 } 2327 2328 spin_lock_bh(&target->rx_lock); 2329 2330 list_splice_tail_init(pkt_queue, &endpoint->rx_bufq); 2331 2332 /* check if we are blocked waiting for a new buffer */ 2333 if (target->rx_st_flags & HTC_RECV_WAIT_BUFFERS) { 2334 if (target->ep_waiting == first_pkt->endpoint) { 2335 ath6kl_dbg(ATH6KL_DBG_HTC, 2336 "htc rx blocked on ep %d, unblocking\n", 2337 target->ep_waiting); 2338 target->rx_st_flags &= ~HTC_RECV_WAIT_BUFFERS; 2339 target->ep_waiting = ENDPOINT_MAX; 2340 rx_unblock = true; 2341 } 2342 } 2343 2344 spin_unlock_bh(&target->rx_lock); 2345 2346 if (rx_unblock && !(target->htc_flags & HTC_OP_STATE_STOPPING)) 2347 /* TODO : implement a buffer threshold count? */ 2348 ath6kl_hif_rx_control(target->dev, true); 2349 2350 return status; 2351} 2352 2353void ath6kl_htc_flush_rx_buf(struct htc_target *target) 2354{ 2355 struct htc_endpoint *endpoint; 2356 struct htc_packet *packet, *tmp_pkt; 2357 int i; 2358 2359 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 2360 endpoint = &target->endpoint[i]; 2361 if (!endpoint->svc_id) 2362 /* not in use.. */ 2363 continue; 2364 2365 spin_lock_bh(&target->rx_lock); 2366 list_for_each_entry_safe(packet, tmp_pkt, 2367 &endpoint->rx_bufq, list) { 2368 list_del(&packet->list); 2369 spin_unlock_bh(&target->rx_lock); 2370 ath6kl_dbg(ATH6KL_DBG_HTC, 2371 "htc rx flush pkt 0x%p len %d ep %d\n", 2372 packet, packet->buf_len, 2373 packet->endpoint); 2374 /* 2375 * packets in rx_bufq of endpoint 0 have originally 2376 * been queued from target->free_ctrl_rxbuf where 2377 * packet and packet->buf_start are allocated 2378 * separately using kmalloc(). For other endpoint 2379 * rx_bufq, it is allocated as skb where packet is 2380 * skb->head. Take care of this difference while freeing 2381 * the memory. 2382 */ 2383 if (packet->endpoint == ENDPOINT_0) { 2384 kfree(packet->buf_start); 2385 kfree(packet); 2386 } else { 2387 dev_kfree_skb(packet->pkt_cntxt); 2388 } 2389 spin_lock_bh(&target->rx_lock); 2390 } 2391 spin_unlock_bh(&target->rx_lock); 2392 } 2393} 2394 2395int ath6kl_htc_conn_service(struct htc_target *target, 2396 struct htc_service_connect_req *conn_req, 2397 struct htc_service_connect_resp *conn_resp) 2398{ 2399 struct htc_packet *rx_pkt = NULL; 2400 struct htc_packet *tx_pkt = NULL; 2401 struct htc_conn_service_resp *resp_msg; 2402 struct htc_conn_service_msg *conn_msg; 2403 struct htc_endpoint *endpoint; 2404 enum htc_endpoint_id assigned_ep = ENDPOINT_MAX; 2405 unsigned int max_msg_sz = 0; 2406 int status = 0; 2407 u16 msg_id; 2408 2409 ath6kl_dbg(ATH6KL_DBG_HTC, 2410 "htc connect service target 0x%p service id 0x%x\n", 2411 target, conn_req->svc_id); 2412 2413 if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) { 2414 /* special case for pseudo control service */ 2415 assigned_ep = ENDPOINT_0; 2416 max_msg_sz = HTC_MAX_CTRL_MSG_LEN; 2417 } else { 2418 /* allocate a packet to send to the target */ 2419 tx_pkt = htc_get_control_buf(target, true); 2420 2421 if (!tx_pkt) 2422 return -ENOMEM; 2423 2424 conn_msg = (struct htc_conn_service_msg *)tx_pkt->buf; 2425 memset(conn_msg, 0, sizeof(*conn_msg)); 2426 conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID); 2427 conn_msg->svc_id = cpu_to_le16(conn_req->svc_id); 2428 conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags); 2429 2430 set_htc_pkt_info(tx_pkt, NULL, (u8 *) conn_msg, 2431 sizeof(*conn_msg) + conn_msg->svc_meta_len, 2432 ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG); 2433 2434 /* we want synchronous operation */ 2435 tx_pkt->completion = NULL; 2436 ath6kl_htc_tx_prep_pkt(tx_pkt, 0, 0, 0); 2437 status = ath6kl_htc_tx_issue(target, tx_pkt); 2438 2439 if (status) 2440 goto fail_tx; 2441 2442 /* wait for response */ 2443 rx_pkt = htc_wait_for_ctrl_msg(target); 2444 2445 if (!rx_pkt) { 2446 status = -ENOMEM; 2447 goto fail_tx; 2448 } 2449 2450 resp_msg = (struct htc_conn_service_resp *)rx_pkt->buf; 2451 msg_id = le16_to_cpu(resp_msg->msg_id); 2452 2453 if ((msg_id != HTC_MSG_CONN_SVC_RESP_ID) || 2454 (rx_pkt->act_len < sizeof(*resp_msg))) { 2455 status = -ENOMEM; 2456 goto fail_tx; 2457 } 2458 2459 conn_resp->resp_code = resp_msg->status; 2460 /* check response status */ 2461 if (resp_msg->status != HTC_SERVICE_SUCCESS) { 2462 ath6kl_err("target failed service 0x%X connect request (status:%d)\n", 2463 resp_msg->svc_id, resp_msg->status); 2464 status = -ENOMEM; 2465 goto fail_tx; 2466 } 2467 2468 assigned_ep = (enum htc_endpoint_id)resp_msg->eid; 2469 max_msg_sz = le16_to_cpu(resp_msg->max_msg_sz); 2470 } 2471 2472 if (assigned_ep >= ENDPOINT_MAX || !max_msg_sz) { 2473 status = -ENOMEM; 2474 goto fail_tx; 2475 } 2476 2477 endpoint = &target->endpoint[assigned_ep]; 2478 endpoint->eid = assigned_ep; 2479 if (endpoint->svc_id) { 2480 status = -ENOMEM; 2481 goto fail_tx; 2482 } 2483 2484 /* return assigned endpoint to caller */ 2485 conn_resp->endpoint = assigned_ep; 2486 conn_resp->len_max = max_msg_sz; 2487 2488 /* setup the endpoint */ 2489 2490 /* this marks the endpoint in use */ 2491 endpoint->svc_id = conn_req->svc_id; 2492 2493 endpoint->max_txq_depth = conn_req->max_txq_depth; 2494 endpoint->len_max = max_msg_sz; 2495 endpoint->ep_cb = conn_req->ep_cb; 2496 endpoint->cred_dist.svc_id = conn_req->svc_id; 2497 endpoint->cred_dist.htc_ep = endpoint; 2498 endpoint->cred_dist.endpoint = assigned_ep; 2499 endpoint->cred_dist.cred_sz = target->tgt_cred_sz; 2500 2501 switch (endpoint->svc_id) { 2502 case WMI_DATA_BK_SVC: 2503 endpoint->tx_drop_packet_threshold = MAX_DEF_COOKIE_NUM / 3; 2504 break; 2505 default: 2506 endpoint->tx_drop_packet_threshold = MAX_HI_COOKIE_NUM; 2507 break; 2508 } 2509 2510 if (conn_req->max_rxmsg_sz) { 2511 /* 2512 * Override cred_per_msg calculation, this optimizes 2513 * the credit-low indications since the host will actually 2514 * issue smaller messages in the Send path. 2515 */ 2516 if (conn_req->max_rxmsg_sz > max_msg_sz) { 2517 status = -ENOMEM; 2518 goto fail_tx; 2519 } 2520 endpoint->cred_dist.cred_per_msg = 2521 conn_req->max_rxmsg_sz / target->tgt_cred_sz; 2522 } else 2523 endpoint->cred_dist.cred_per_msg = 2524 max_msg_sz / target->tgt_cred_sz; 2525 2526 if (!endpoint->cred_dist.cred_per_msg) 2527 endpoint->cred_dist.cred_per_msg = 1; 2528 2529 /* save local connection flags */ 2530 endpoint->conn_flags = conn_req->flags; 2531 2532fail_tx: 2533 if (tx_pkt) 2534 htc_reclaim_txctrl_buf(target, tx_pkt); 2535 2536 if (rx_pkt) { 2537 htc_rxpkt_reset(rx_pkt); 2538 reclaim_rx_ctrl_buf(target, rx_pkt); 2539 } 2540 2541 return status; 2542} 2543 2544static void reset_ep_state(struct htc_target *target) 2545{ 2546 struct htc_endpoint *endpoint; 2547 int i; 2548 2549 for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) { 2550 endpoint = &target->endpoint[i]; 2551 memset(&endpoint->cred_dist, 0, sizeof(endpoint->cred_dist)); 2552 endpoint->svc_id = 0; 2553 endpoint->len_max = 0; 2554 endpoint->max_txq_depth = 0; 2555 memset(&endpoint->ep_st, 0, 2556 sizeof(endpoint->ep_st)); 2557 INIT_LIST_HEAD(&endpoint->rx_bufq); 2558 INIT_LIST_HEAD(&endpoint->txq); 2559 endpoint->target = target; 2560 } 2561 2562 /* reset distribution list */ 2563 /* FIXME: free existing entries */ 2564 INIT_LIST_HEAD(&target->cred_dist_list); 2565} 2566 2567int ath6kl_htc_get_rxbuf_num(struct htc_target *target, 2568 enum htc_endpoint_id endpoint) 2569{ 2570 int num; 2571 2572 spin_lock_bh(&target->rx_lock); 2573 num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq)); 2574 spin_unlock_bh(&target->rx_lock); 2575 return num; 2576} 2577 2578static void htc_setup_msg_bndl(struct htc_target *target) 2579{ 2580 /* limit what HTC can handle */ 2581 target->msg_per_bndl_max = min(HTC_HOST_MAX_MSG_PER_BUNDLE, 2582 target->msg_per_bndl_max); 2583 2584 if (ath6kl_hif_enable_scatter(target->dev->ar)) { 2585 target->msg_per_bndl_max = 0; 2586 return; 2587 } 2588 2589 /* limit bundle what the device layer can handle */ 2590 target->msg_per_bndl_max = min(target->max_scat_entries, 2591 target->msg_per_bndl_max); 2592 2593 ath6kl_dbg(ATH6KL_DBG_BOOT, 2594 "htc bundling allowed msg_per_bndl_max %d\n", 2595 target->msg_per_bndl_max); 2596 2597 /* Max rx bundle size is limited by the max tx bundle size */ 2598 target->max_rx_bndl_sz = target->max_xfer_szper_scatreq; 2599 /* Max tx bundle size if limited by the extended mbox address range */ 2600 target->max_tx_bndl_sz = min(HIF_MBOX0_EXT_WIDTH, 2601 target->max_xfer_szper_scatreq); 2602 2603 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc max_rx_bndl_sz %d max_tx_bndl_sz %d\n", 2604 target->max_rx_bndl_sz, target->max_tx_bndl_sz); 2605 2606 if (target->max_tx_bndl_sz) 2607 /* tx_bndl_mask is enabled per AC, each has 1 bit */ 2608 target->tx_bndl_mask = (1 << WMM_NUM_AC) - 1; 2609 2610 if (target->max_rx_bndl_sz) 2611 target->rx_bndl_enable = true; 2612 2613 if ((target->tgt_cred_sz % target->block_sz) != 0) { 2614 ath6kl_warn("credit size: %d is not block aligned! Disabling send bundling\n", 2615 target->tgt_cred_sz); 2616 2617 /* 2618 * Disallow send bundling since the credit size is 2619 * not aligned to a block size the I/O block 2620 * padding will spill into the next credit buffer 2621 * which is fatal. 2622 */ 2623 target->tx_bndl_mask = 0; 2624 } 2625} 2626 2627int ath6kl_htc_wait_target(struct htc_target *target) 2628{ 2629 struct htc_packet *packet = NULL; 2630 struct htc_ready_ext_msg *rdy_msg; 2631 struct htc_service_connect_req connect; 2632 struct htc_service_connect_resp resp; 2633 int status; 2634 2635 /* FIXME: remove once USB support is implemented */ 2636 if (target->dev->ar->hif_type == ATH6KL_HIF_TYPE_USB) { 2637 ath6kl_err("HTC doesn't support USB yet. Patience!\n"); 2638 return -EOPNOTSUPP; 2639 } 2640 2641 /* we should be getting 1 control message that the target is ready */ 2642 packet = htc_wait_for_ctrl_msg(target); 2643 2644 if (!packet) 2645 return -ENOMEM; 2646 2647 /* we controlled the buffer creation so it's properly aligned */ 2648 rdy_msg = (struct htc_ready_ext_msg *)packet->buf; 2649 2650 if ((le16_to_cpu(rdy_msg->ver2_0_info.msg_id) != HTC_MSG_READY_ID) || 2651 (packet->act_len < sizeof(struct htc_ready_msg))) { 2652 status = -ENOMEM; 2653 goto fail_wait_target; 2654 } 2655 2656 if (!rdy_msg->ver2_0_info.cred_cnt || !rdy_msg->ver2_0_info.cred_sz) { 2657 status = -ENOMEM; 2658 goto fail_wait_target; 2659 } 2660 2661 target->tgt_creds = le16_to_cpu(rdy_msg->ver2_0_info.cred_cnt); 2662 target->tgt_cred_sz = le16_to_cpu(rdy_msg->ver2_0_info.cred_sz); 2663 2664 ath6kl_dbg(ATH6KL_DBG_BOOT, 2665 "htc target ready credits %d size %d\n", 2666 target->tgt_creds, target->tgt_cred_sz); 2667 2668 /* check if this is an extended ready message */ 2669 if (packet->act_len >= sizeof(struct htc_ready_ext_msg)) { 2670 /* this is an extended message */ 2671 target->htc_tgt_ver = rdy_msg->htc_ver; 2672 target->msg_per_bndl_max = rdy_msg->msg_per_htc_bndl; 2673 } else { 2674 /* legacy */ 2675 target->htc_tgt_ver = HTC_VERSION_2P0; 2676 target->msg_per_bndl_max = 0; 2677 } 2678 2679 ath6kl_dbg(ATH6KL_DBG_BOOT, "htc using protocol %s (%d)\n", 2680 (target->htc_tgt_ver == HTC_VERSION_2P0) ? "2.0" : ">= 2.1", 2681 target->htc_tgt_ver); 2682 2683 if (target->msg_per_bndl_max > 0) 2684 htc_setup_msg_bndl(target); 2685 2686 /* setup our pseudo HTC control endpoint connection */ 2687 memset(&connect, 0, sizeof(connect)); 2688 memset(&resp, 0, sizeof(resp)); 2689 connect.ep_cb.rx = htc_ctrl_rx; 2690 connect.ep_cb.rx_refill = NULL; 2691 connect.ep_cb.tx_full = NULL; 2692 connect.max_txq_depth = NUM_CONTROL_BUFFERS; 2693 connect.svc_id = HTC_CTRL_RSVD_SVC; 2694 2695 /* connect fake service */ 2696 status = ath6kl_htc_conn_service((void *)target, &connect, &resp); 2697 2698 if (status) 2699 /* 2700 * FIXME: this call doesn't make sense, the caller should 2701 * call ath6kl_htc_cleanup() when it wants remove htc 2702 */ 2703 ath6kl_hif_cleanup_scatter(target->dev->ar); 2704 2705fail_wait_target: 2706 if (packet) { 2707 htc_rxpkt_reset(packet); 2708 reclaim_rx_ctrl_buf(target, packet); 2709 } 2710 2711 return status; 2712} 2713 2714/* 2715 * Start HTC, enable interrupts and let the target know 2716 * host has finished setup. 2717 */ 2718int ath6kl_htc_start(struct htc_target *target) 2719{ 2720 struct htc_packet *packet; 2721 int status; 2722 2723 memset(&target->dev->irq_proc_reg, 0, 2724 sizeof(target->dev->irq_proc_reg)); 2725 2726 /* Disable interrupts at the chip level */ 2727 ath6kl_hif_disable_intrs(target->dev); 2728 2729 target->htc_flags = 0; 2730 target->rx_st_flags = 0; 2731 2732 /* Push control receive buffers into htc control endpoint */ 2733 while ((packet = htc_get_control_buf(target, false)) != NULL) { 2734 status = htc_add_rxbuf(target, packet); 2735 if (status) 2736 return status; 2737 } 2738 2739 /* NOTE: the first entry in the distribution list is ENDPOINT_0 */ 2740 ath6kl_credit_init(target->credit_info, &target->cred_dist_list, 2741 target->tgt_creds); 2742 2743 dump_cred_dist_stats(target); 2744 2745 /* Indicate to the target of the setup completion */ 2746 status = htc_setup_tx_complete(target); 2747 2748 if (status) 2749 return status; 2750 2751 /* unmask interrupts */ 2752 status = ath6kl_hif_unmask_intrs(target->dev); 2753 2754 if (status) 2755 ath6kl_htc_stop(target); 2756 2757 return status; 2758} 2759 2760static int ath6kl_htc_reset(struct htc_target *target) 2761{ 2762 u32 block_size, ctrl_bufsz; 2763 struct htc_packet *packet; 2764 int i; 2765 2766 reset_ep_state(target); 2767 2768 block_size = target->dev->ar->mbox_info.block_size; 2769 2770 ctrl_bufsz = (block_size > HTC_MAX_CTRL_MSG_LEN) ? 2771 (block_size + HTC_HDR_LENGTH) : 2772 (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH); 2773 2774 for (i = 0; i < NUM_CONTROL_BUFFERS; i++) { 2775 packet = kzalloc(sizeof(*packet), GFP_KERNEL); 2776 if (!packet) 2777 return -ENOMEM; 2778 2779 packet->buf_start = kzalloc(ctrl_bufsz, GFP_KERNEL); 2780 if (!packet->buf_start) { 2781 kfree(packet); 2782 return -ENOMEM; 2783 } 2784 2785 packet->buf_len = ctrl_bufsz; 2786 if (i < NUM_CONTROL_RX_BUFFERS) { 2787 packet->act_len = 0; 2788 packet->buf = packet->buf_start; 2789 packet->endpoint = ENDPOINT_0; 2790 list_add_tail(&packet->list, &target->free_ctrl_rxbuf); 2791 } else 2792 list_add_tail(&packet->list, &target->free_ctrl_txbuf); 2793 } 2794 2795 return 0; 2796} 2797 2798/* htc_stop: stop interrupt reception, and flush all queued buffers */ 2799void ath6kl_htc_stop(struct htc_target *target) 2800{ 2801 spin_lock_bh(&target->htc_lock); 2802 target->htc_flags |= HTC_OP_STATE_STOPPING; 2803 spin_unlock_bh(&target->htc_lock); 2804 2805 /* 2806 * Masking interrupts is a synchronous operation, when this 2807 * function returns all pending HIF I/O has completed, we can 2808 * safely flush the queues. 2809 */ 2810 ath6kl_hif_mask_intrs(target->dev); 2811 2812 ath6kl_htc_flush_txep_all(target); 2813 2814 ath6kl_htc_flush_rx_buf(target); 2815 2816 ath6kl_htc_reset(target); 2817} 2818 2819void *ath6kl_htc_create(struct ath6kl *ar) 2820{ 2821 struct htc_target *target = NULL; 2822 int status = 0; 2823 2824 target = kzalloc(sizeof(*target), GFP_KERNEL); 2825 if (!target) { 2826 ath6kl_err("unable to allocate memory\n"); 2827 return NULL; 2828 } 2829 2830 target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL); 2831 if (!target->dev) { 2832 ath6kl_err("unable to allocate memory\n"); 2833 status = -ENOMEM; 2834 goto err_htc_cleanup; 2835 } 2836 2837 spin_lock_init(&target->htc_lock); 2838 spin_lock_init(&target->rx_lock); 2839 spin_lock_init(&target->tx_lock); 2840 2841 INIT_LIST_HEAD(&target->free_ctrl_txbuf); 2842 INIT_LIST_HEAD(&target->free_ctrl_rxbuf); 2843 INIT_LIST_HEAD(&target->cred_dist_list); 2844 2845 target->dev->ar = ar; 2846 target->dev->htc_cnxt = target; 2847 target->ep_waiting = ENDPOINT_MAX; 2848 2849 status = ath6kl_hif_setup(target->dev); 2850 if (status) 2851 goto err_htc_cleanup; 2852 2853 status = ath6kl_htc_reset(target); 2854 if (status) 2855 goto err_htc_cleanup; 2856 2857 return target; 2858 2859err_htc_cleanup: 2860 ath6kl_htc_cleanup(target); 2861 2862 return NULL; 2863} 2864 2865/* cleanup the HTC instance */ 2866void ath6kl_htc_cleanup(struct htc_target *target) 2867{ 2868 struct htc_packet *packet, *tmp_packet; 2869 2870 /* FIXME: remove check once USB support is implemented */ 2871 if (target->dev->ar->hif_type != ATH6KL_HIF_TYPE_USB) 2872 ath6kl_hif_cleanup_scatter(target->dev->ar); 2873 2874 list_for_each_entry_safe(packet, tmp_packet, 2875 &target->free_ctrl_txbuf, list) { 2876 list_del(&packet->list); 2877 kfree(packet->buf_start); 2878 kfree(packet); 2879 } 2880 2881 list_for_each_entry_safe(packet, tmp_packet, 2882 &target->free_ctrl_rxbuf, list) { 2883 list_del(&packet->list); 2884 kfree(packet->buf_start); 2885 kfree(packet); 2886 } 2887 2888 kfree(target->dev); 2889 kfree(target); 2890} 2891