ce.c revision e9780367b0f43536b460fd83931f7c111ec99470
1/* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18#include "hif.h" 19#include "pci.h" 20#include "ce.h" 21#include "debug.h" 22 23/* 24 * Support for Copy Engine hardware, which is mainly used for 25 * communication between Host and Target over a PCIe interconnect. 26 */ 27 28/* 29 * A single CopyEngine (CE) comprises two "rings": 30 * a source ring 31 * a destination ring 32 * 33 * Each ring consists of a number of descriptors which specify 34 * an address, length, and meta-data. 35 * 36 * Typically, one side of the PCIe interconnect (Host or Target) 37 * controls one ring and the other side controls the other ring. 38 * The source side chooses when to initiate a transfer and it 39 * chooses what to send (buffer address, length). The destination 40 * side keeps a supply of "anonymous receive buffers" available and 41 * it handles incoming data as it arrives (when the destination 42 * recieves an interrupt). 43 * 44 * The sender may send a simple buffer (address/length) or it may 45 * send a small list of buffers. When a small list is sent, hardware 46 * "gathers" these and they end up in a single destination buffer 47 * with a single interrupt. 48 * 49 * There are several "contexts" managed by this layer -- more, it 50 * may seem -- than should be needed. These are provided mainly for 51 * maximum flexibility and especially to facilitate a simpler HIF 52 * implementation. There are per-CopyEngine recv, send, and watermark 53 * contexts. These are supplied by the caller when a recv, send, 54 * or watermark handler is established and they are echoed back to 55 * the caller when the respective callbacks are invoked. There is 56 * also a per-transfer context supplied by the caller when a buffer 57 * (or sendlist) is sent and when a buffer is enqueued for recv. 58 * These per-transfer contexts are echoed back to the caller when 59 * the buffer is sent/received. 60 */ 61 62static inline void ath10k_ce_dest_ring_write_index_set(struct ath10k *ar, 63 u32 ce_ctrl_addr, 64 unsigned int n) 65{ 66 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS, n); 67} 68 69static inline u32 ath10k_ce_dest_ring_write_index_get(struct ath10k *ar, 70 u32 ce_ctrl_addr) 71{ 72 return ath10k_pci_read32(ar, ce_ctrl_addr + DST_WR_INDEX_ADDRESS); 73} 74 75static inline void ath10k_ce_src_ring_write_index_set(struct ath10k *ar, 76 u32 ce_ctrl_addr, 77 unsigned int n) 78{ 79 ath10k_pci_write32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS, n); 80} 81 82static inline u32 ath10k_ce_src_ring_write_index_get(struct ath10k *ar, 83 u32 ce_ctrl_addr) 84{ 85 return ath10k_pci_read32(ar, ce_ctrl_addr + SR_WR_INDEX_ADDRESS); 86} 87 88static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar, 89 u32 ce_ctrl_addr) 90{ 91 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS); 92} 93 94static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar, 95 u32 ce_ctrl_addr, 96 unsigned int addr) 97{ 98 ath10k_pci_write32(ar, ce_ctrl_addr + SR_BA_ADDRESS, addr); 99} 100 101static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar, 102 u32 ce_ctrl_addr, 103 unsigned int n) 104{ 105 ath10k_pci_write32(ar, ce_ctrl_addr + SR_SIZE_ADDRESS, n); 106} 107 108static inline void ath10k_ce_src_ring_dmax_set(struct ath10k *ar, 109 u32 ce_ctrl_addr, 110 unsigned int n) 111{ 112 u32 ctrl1_addr = ath10k_pci_read32((ar), 113 (ce_ctrl_addr) + CE_CTRL1_ADDRESS); 114 115 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, 116 (ctrl1_addr & ~CE_CTRL1_DMAX_LENGTH_MASK) | 117 CE_CTRL1_DMAX_LENGTH_SET(n)); 118} 119 120static inline void ath10k_ce_src_ring_byte_swap_set(struct ath10k *ar, 121 u32 ce_ctrl_addr, 122 unsigned int n) 123{ 124 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); 125 126 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, 127 (ctrl1_addr & ~CE_CTRL1_SRC_RING_BYTE_SWAP_EN_MASK) | 128 CE_CTRL1_SRC_RING_BYTE_SWAP_EN_SET(n)); 129} 130 131static inline void ath10k_ce_dest_ring_byte_swap_set(struct ath10k *ar, 132 u32 ce_ctrl_addr, 133 unsigned int n) 134{ 135 u32 ctrl1_addr = ath10k_pci_read32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS); 136 137 ath10k_pci_write32(ar, ce_ctrl_addr + CE_CTRL1_ADDRESS, 138 (ctrl1_addr & ~CE_CTRL1_DST_RING_BYTE_SWAP_EN_MASK) | 139 CE_CTRL1_DST_RING_BYTE_SWAP_EN_SET(n)); 140} 141 142static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar, 143 u32 ce_ctrl_addr) 144{ 145 return ath10k_pci_read32(ar, ce_ctrl_addr + CURRENT_DRRI_ADDRESS); 146} 147 148static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar, 149 u32 ce_ctrl_addr, 150 u32 addr) 151{ 152 ath10k_pci_write32(ar, ce_ctrl_addr + DR_BA_ADDRESS, addr); 153} 154 155static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar, 156 u32 ce_ctrl_addr, 157 unsigned int n) 158{ 159 ath10k_pci_write32(ar, ce_ctrl_addr + DR_SIZE_ADDRESS, n); 160} 161 162static inline void ath10k_ce_src_ring_highmark_set(struct ath10k *ar, 163 u32 ce_ctrl_addr, 164 unsigned int n) 165{ 166 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); 167 168 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, 169 (addr & ~SRC_WATERMARK_HIGH_MASK) | 170 SRC_WATERMARK_HIGH_SET(n)); 171} 172 173static inline void ath10k_ce_src_ring_lowmark_set(struct ath10k *ar, 174 u32 ce_ctrl_addr, 175 unsigned int n) 176{ 177 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS); 178 179 ath10k_pci_write32(ar, ce_ctrl_addr + SRC_WATERMARK_ADDRESS, 180 (addr & ~SRC_WATERMARK_LOW_MASK) | 181 SRC_WATERMARK_LOW_SET(n)); 182} 183 184static inline void ath10k_ce_dest_ring_highmark_set(struct ath10k *ar, 185 u32 ce_ctrl_addr, 186 unsigned int n) 187{ 188 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); 189 190 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, 191 (addr & ~DST_WATERMARK_HIGH_MASK) | 192 DST_WATERMARK_HIGH_SET(n)); 193} 194 195static inline void ath10k_ce_dest_ring_lowmark_set(struct ath10k *ar, 196 u32 ce_ctrl_addr, 197 unsigned int n) 198{ 199 u32 addr = ath10k_pci_read32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS); 200 201 ath10k_pci_write32(ar, ce_ctrl_addr + DST_WATERMARK_ADDRESS, 202 (addr & ~DST_WATERMARK_LOW_MASK) | 203 DST_WATERMARK_LOW_SET(n)); 204} 205 206static inline void ath10k_ce_copy_complete_inter_enable(struct ath10k *ar, 207 u32 ce_ctrl_addr) 208{ 209 u32 host_ie_addr = ath10k_pci_read32(ar, 210 ce_ctrl_addr + HOST_IE_ADDRESS); 211 212 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, 213 host_ie_addr | HOST_IE_COPY_COMPLETE_MASK); 214} 215 216static inline void ath10k_ce_copy_complete_intr_disable(struct ath10k *ar, 217 u32 ce_ctrl_addr) 218{ 219 u32 host_ie_addr = ath10k_pci_read32(ar, 220 ce_ctrl_addr + HOST_IE_ADDRESS); 221 222 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, 223 host_ie_addr & ~HOST_IE_COPY_COMPLETE_MASK); 224} 225 226static inline void ath10k_ce_watermark_intr_disable(struct ath10k *ar, 227 u32 ce_ctrl_addr) 228{ 229 u32 host_ie_addr = ath10k_pci_read32(ar, 230 ce_ctrl_addr + HOST_IE_ADDRESS); 231 232 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IE_ADDRESS, 233 host_ie_addr & ~CE_WATERMARK_MASK); 234} 235 236static inline void ath10k_ce_error_intr_enable(struct ath10k *ar, 237 u32 ce_ctrl_addr) 238{ 239 u32 misc_ie_addr = ath10k_pci_read32(ar, 240 ce_ctrl_addr + MISC_IE_ADDRESS); 241 242 ath10k_pci_write32(ar, ce_ctrl_addr + MISC_IE_ADDRESS, 243 misc_ie_addr | CE_ERROR_MASK); 244} 245 246static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar, 247 u32 ce_ctrl_addr, 248 unsigned int mask) 249{ 250 ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask); 251} 252 253 254/* 255 * Guts of ath10k_ce_send, used by both ath10k_ce_send and 256 * ath10k_ce_sendlist_send. 257 * The caller takes responsibility for any needed locking. 258 */ 259static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state, 260 void *per_transfer_context, 261 u32 buffer, 262 unsigned int nbytes, 263 unsigned int transfer_id, 264 unsigned int flags) 265{ 266 struct ath10k *ar = ce_state->ar; 267 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 268 struct ce_desc *desc, *sdesc; 269 unsigned int nentries_mask = src_ring->nentries_mask; 270 unsigned int sw_index = src_ring->sw_index; 271 unsigned int write_index = src_ring->write_index; 272 u32 ctrl_addr = ce_state->ctrl_addr; 273 u32 desc_flags = 0; 274 int ret = 0; 275 276 if (nbytes > ce_state->src_sz_max) 277 ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n", 278 __func__, nbytes, ce_state->src_sz_max); 279 280 ret = ath10k_pci_wake(ar); 281 if (ret) 282 return ret; 283 284 if (unlikely(CE_RING_DELTA(nentries_mask, 285 write_index, sw_index - 1) <= 0)) { 286 ret = -EIO; 287 goto exit; 288 } 289 290 desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space, 291 write_index); 292 sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index); 293 294 desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA); 295 296 if (flags & CE_SEND_FLAG_GATHER) 297 desc_flags |= CE_DESC_FLAGS_GATHER; 298 if (flags & CE_SEND_FLAG_BYTE_SWAP) 299 desc_flags |= CE_DESC_FLAGS_BYTE_SWAP; 300 301 sdesc->addr = __cpu_to_le32(buffer); 302 sdesc->nbytes = __cpu_to_le16(nbytes); 303 sdesc->flags = __cpu_to_le16(desc_flags); 304 305 *desc = *sdesc; 306 307 src_ring->per_transfer_context[write_index] = per_transfer_context; 308 309 /* Update Source Ring Write Index */ 310 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 311 312 /* WORKAROUND */ 313 if (!(flags & CE_SEND_FLAG_GATHER)) 314 ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index); 315 316 src_ring->write_index = write_index; 317exit: 318 ath10k_pci_sleep(ar); 319 return ret; 320} 321 322int ath10k_ce_send(struct ath10k_ce_pipe *ce_state, 323 void *per_transfer_context, 324 u32 buffer, 325 unsigned int nbytes, 326 unsigned int transfer_id, 327 unsigned int flags) 328{ 329 struct ath10k *ar = ce_state->ar; 330 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 331 int ret; 332 333 spin_lock_bh(&ar_pci->ce_lock); 334 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, 335 buffer, nbytes, transfer_id, flags); 336 spin_unlock_bh(&ar_pci->ce_lock); 337 338 return ret; 339} 340 341void ath10k_ce_sendlist_buf_add(struct ce_sendlist *sendlist, u32 buffer, 342 unsigned int nbytes, u32 flags) 343{ 344 unsigned int num_items = sendlist->num_items; 345 struct ce_sendlist_item *item; 346 347 item = &sendlist->item[num_items]; 348 item->data = buffer; 349 item->u.nbytes = nbytes; 350 item->flags = flags; 351 sendlist->num_items++; 352} 353 354int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state, 355 void *per_transfer_context, 356 struct ce_sendlist *sendlist, 357 unsigned int transfer_id) 358{ 359 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 360 struct ce_sendlist_item *item; 361 struct ath10k *ar = ce_state->ar; 362 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 363 unsigned int nentries_mask = src_ring->nentries_mask; 364 unsigned int num_items = sendlist->num_items; 365 unsigned int sw_index; 366 unsigned int write_index; 367 int i, delta, ret = -ENOMEM; 368 369 spin_lock_bh(&ar_pci->ce_lock); 370 371 sw_index = src_ring->sw_index; 372 write_index = src_ring->write_index; 373 374 delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1); 375 376 if (delta >= num_items) { 377 /* 378 * Handle all but the last item uniformly. 379 */ 380 for (i = 0; i < num_items - 1; i++) { 381 item = &sendlist->item[i]; 382 ret = ath10k_ce_send_nolock(ce_state, 383 CE_SENDLIST_ITEM_CTXT, 384 (u32) item->data, 385 item->u.nbytes, transfer_id, 386 item->flags | 387 CE_SEND_FLAG_GATHER); 388 if (ret) 389 ath10k_warn("CE send failed for item: %d\n", i); 390 } 391 /* 392 * Provide valid context pointer for final item. 393 */ 394 item = &sendlist->item[i]; 395 ret = ath10k_ce_send_nolock(ce_state, per_transfer_context, 396 (u32) item->data, item->u.nbytes, 397 transfer_id, item->flags); 398 if (ret) 399 ath10k_warn("CE send failed for last item: %d\n", i); 400 } 401 402 spin_unlock_bh(&ar_pci->ce_lock); 403 404 return ret; 405} 406 407int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state, 408 void *per_recv_context, 409 u32 buffer) 410{ 411 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 412 u32 ctrl_addr = ce_state->ctrl_addr; 413 struct ath10k *ar = ce_state->ar; 414 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 415 unsigned int nentries_mask = dest_ring->nentries_mask; 416 unsigned int write_index; 417 unsigned int sw_index; 418 int ret; 419 420 spin_lock_bh(&ar_pci->ce_lock); 421 write_index = dest_ring->write_index; 422 sw_index = dest_ring->sw_index; 423 424 ret = ath10k_pci_wake(ar); 425 if (ret) 426 goto out; 427 428 if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) { 429 struct ce_desc *base = dest_ring->base_addr_owner_space; 430 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index); 431 432 /* Update destination descriptor */ 433 desc->addr = __cpu_to_le32(buffer); 434 desc->nbytes = 0; 435 436 dest_ring->per_transfer_context[write_index] = 437 per_recv_context; 438 439 /* Update Destination Ring Write Index */ 440 write_index = CE_RING_IDX_INCR(nentries_mask, write_index); 441 ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index); 442 dest_ring->write_index = write_index; 443 ret = 0; 444 } else { 445 ret = -EIO; 446 } 447 ath10k_pci_sleep(ar); 448 449out: 450 spin_unlock_bh(&ar_pci->ce_lock); 451 452 return ret; 453} 454 455/* 456 * Guts of ath10k_ce_completed_recv_next. 457 * The caller takes responsibility for any necessary locking. 458 */ 459static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state, 460 void **per_transfer_contextp, 461 u32 *bufferp, 462 unsigned int *nbytesp, 463 unsigned int *transfer_idp, 464 unsigned int *flagsp) 465{ 466 struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; 467 unsigned int nentries_mask = dest_ring->nentries_mask; 468 unsigned int sw_index = dest_ring->sw_index; 469 470 struct ce_desc *base = dest_ring->base_addr_owner_space; 471 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); 472 struct ce_desc sdesc; 473 u16 nbytes; 474 475 /* Copy in one go for performance reasons */ 476 sdesc = *desc; 477 478 nbytes = __le16_to_cpu(sdesc.nbytes); 479 if (nbytes == 0) { 480 /* 481 * This closes a relatively unusual race where the Host 482 * sees the updated DRRI before the update to the 483 * corresponding descriptor has completed. We treat this 484 * as a descriptor that is not yet done. 485 */ 486 return -EIO; 487 } 488 489 desc->nbytes = 0; 490 491 /* Return data from completed destination descriptor */ 492 *bufferp = __le32_to_cpu(sdesc.addr); 493 *nbytesp = nbytes; 494 *transfer_idp = MS(__le16_to_cpu(sdesc.flags), CE_DESC_FLAGS_META_DATA); 495 496 if (__le16_to_cpu(sdesc.flags) & CE_DESC_FLAGS_BYTE_SWAP) 497 *flagsp = CE_RECV_FLAG_SWAPPED; 498 else 499 *flagsp = 0; 500 501 if (per_transfer_contextp) 502 *per_transfer_contextp = 503 dest_ring->per_transfer_context[sw_index]; 504 505 /* sanity */ 506 dest_ring->per_transfer_context[sw_index] = NULL; 507 508 /* Update sw_index */ 509 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 510 dest_ring->sw_index = sw_index; 511 512 return 0; 513} 514 515int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, 516 void **per_transfer_contextp, 517 u32 *bufferp, 518 unsigned int *nbytesp, 519 unsigned int *transfer_idp, 520 unsigned int *flagsp) 521{ 522 struct ath10k *ar = ce_state->ar; 523 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 524 int ret; 525 526 spin_lock_bh(&ar_pci->ce_lock); 527 ret = ath10k_ce_completed_recv_next_nolock(ce_state, 528 per_transfer_contextp, 529 bufferp, nbytesp, 530 transfer_idp, flagsp); 531 spin_unlock_bh(&ar_pci->ce_lock); 532 533 return ret; 534} 535 536int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, 537 void **per_transfer_contextp, 538 u32 *bufferp) 539{ 540 struct ath10k_ce_ring *dest_ring; 541 unsigned int nentries_mask; 542 unsigned int sw_index; 543 unsigned int write_index; 544 int ret; 545 struct ath10k *ar; 546 struct ath10k_pci *ar_pci; 547 548 dest_ring = ce_state->dest_ring; 549 550 if (!dest_ring) 551 return -EIO; 552 553 ar = ce_state->ar; 554 ar_pci = ath10k_pci_priv(ar); 555 556 spin_lock_bh(&ar_pci->ce_lock); 557 558 nentries_mask = dest_ring->nentries_mask; 559 sw_index = dest_ring->sw_index; 560 write_index = dest_ring->write_index; 561 if (write_index != sw_index) { 562 struct ce_desc *base = dest_ring->base_addr_owner_space; 563 struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, sw_index); 564 565 /* Return data from completed destination descriptor */ 566 *bufferp = __le32_to_cpu(desc->addr); 567 568 if (per_transfer_contextp) 569 *per_transfer_contextp = 570 dest_ring->per_transfer_context[sw_index]; 571 572 /* sanity */ 573 dest_ring->per_transfer_context[sw_index] = NULL; 574 575 /* Update sw_index */ 576 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 577 dest_ring->sw_index = sw_index; 578 ret = 0; 579 } else { 580 ret = -EIO; 581 } 582 583 spin_unlock_bh(&ar_pci->ce_lock); 584 585 return ret; 586} 587 588/* 589 * Guts of ath10k_ce_completed_send_next. 590 * The caller takes responsibility for any necessary locking. 591 */ 592static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, 593 void **per_transfer_contextp, 594 u32 *bufferp, 595 unsigned int *nbytesp, 596 unsigned int *transfer_idp) 597{ 598 struct ath10k_ce_ring *src_ring = ce_state->src_ring; 599 u32 ctrl_addr = ce_state->ctrl_addr; 600 struct ath10k *ar = ce_state->ar; 601 unsigned int nentries_mask = src_ring->nentries_mask; 602 unsigned int sw_index = src_ring->sw_index; 603 struct ce_desc *sdesc, *sbase; 604 unsigned int read_index; 605 int ret; 606 607 if (src_ring->hw_index == sw_index) { 608 /* 609 * The SW completion index has caught up with the cached 610 * version of the HW completion index. 611 * Update the cached HW completion index to see whether 612 * the SW has really caught up to the HW, or if the cached 613 * value of the HW index has become stale. 614 */ 615 616 ret = ath10k_pci_wake(ar); 617 if (ret) 618 return ret; 619 620 src_ring->hw_index = 621 ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 622 src_ring->hw_index &= nentries_mask; 623 624 ath10k_pci_sleep(ar); 625 } 626 627 read_index = src_ring->hw_index; 628 629 if ((read_index == sw_index) || (read_index == 0xffffffff)) 630 return -EIO; 631 632 sbase = src_ring->shadow_base; 633 sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index); 634 635 /* Return data from completed source descriptor */ 636 *bufferp = __le32_to_cpu(sdesc->addr); 637 *nbytesp = __le16_to_cpu(sdesc->nbytes); 638 *transfer_idp = MS(__le16_to_cpu(sdesc->flags), 639 CE_DESC_FLAGS_META_DATA); 640 641 if (per_transfer_contextp) 642 *per_transfer_contextp = 643 src_ring->per_transfer_context[sw_index]; 644 645 /* sanity */ 646 src_ring->per_transfer_context[sw_index] = NULL; 647 648 /* Update sw_index */ 649 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 650 src_ring->sw_index = sw_index; 651 652 return 0; 653} 654 655/* NB: Modeled after ath10k_ce_completed_send_next */ 656int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, 657 void **per_transfer_contextp, 658 u32 *bufferp, 659 unsigned int *nbytesp, 660 unsigned int *transfer_idp) 661{ 662 struct ath10k_ce_ring *src_ring; 663 unsigned int nentries_mask; 664 unsigned int sw_index; 665 unsigned int write_index; 666 int ret; 667 struct ath10k *ar; 668 struct ath10k_pci *ar_pci; 669 670 src_ring = ce_state->src_ring; 671 672 if (!src_ring) 673 return -EIO; 674 675 ar = ce_state->ar; 676 ar_pci = ath10k_pci_priv(ar); 677 678 spin_lock_bh(&ar_pci->ce_lock); 679 680 nentries_mask = src_ring->nentries_mask; 681 sw_index = src_ring->sw_index; 682 write_index = src_ring->write_index; 683 684 if (write_index != sw_index) { 685 struct ce_desc *base = src_ring->base_addr_owner_space; 686 struct ce_desc *desc = CE_SRC_RING_TO_DESC(base, sw_index); 687 688 /* Return data from completed source descriptor */ 689 *bufferp = __le32_to_cpu(desc->addr); 690 *nbytesp = __le16_to_cpu(desc->nbytes); 691 *transfer_idp = MS(__le16_to_cpu(desc->flags), 692 CE_DESC_FLAGS_META_DATA); 693 694 if (per_transfer_contextp) 695 *per_transfer_contextp = 696 src_ring->per_transfer_context[sw_index]; 697 698 /* sanity */ 699 src_ring->per_transfer_context[sw_index] = NULL; 700 701 /* Update sw_index */ 702 sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index); 703 src_ring->sw_index = sw_index; 704 ret = 0; 705 } else { 706 ret = -EIO; 707 } 708 709 spin_unlock_bh(&ar_pci->ce_lock); 710 711 return ret; 712} 713 714int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, 715 void **per_transfer_contextp, 716 u32 *bufferp, 717 unsigned int *nbytesp, 718 unsigned int *transfer_idp) 719{ 720 struct ath10k *ar = ce_state->ar; 721 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 722 int ret; 723 724 spin_lock_bh(&ar_pci->ce_lock); 725 ret = ath10k_ce_completed_send_next_nolock(ce_state, 726 per_transfer_contextp, 727 bufferp, nbytesp, 728 transfer_idp); 729 spin_unlock_bh(&ar_pci->ce_lock); 730 731 return ret; 732} 733 734/* 735 * Guts of interrupt handler for per-engine interrupts on a particular CE. 736 * 737 * Invokes registered callbacks for recv_complete, 738 * send_complete, and watermarks. 739 */ 740void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id) 741{ 742 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 743 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; 744 u32 ctrl_addr = ce_state->ctrl_addr; 745 void *transfer_context; 746 u32 buf; 747 unsigned int nbytes; 748 unsigned int id; 749 unsigned int flags; 750 int ret; 751 752 ret = ath10k_pci_wake(ar); 753 if (ret) 754 return; 755 756 spin_lock_bh(&ar_pci->ce_lock); 757 758 /* Clear the copy-complete interrupts that will be handled here. */ 759 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, 760 HOST_IS_COPY_COMPLETE_MASK); 761 762 if (ce_state->recv_cb) { 763 /* 764 * Pop completed recv buffers and call the registered 765 * recv callback for each 766 */ 767 while (ath10k_ce_completed_recv_next_nolock(ce_state, 768 &transfer_context, 769 &buf, &nbytes, 770 &id, &flags) == 0) { 771 spin_unlock_bh(&ar_pci->ce_lock); 772 ce_state->recv_cb(ce_state, transfer_context, buf, 773 nbytes, id, flags); 774 spin_lock_bh(&ar_pci->ce_lock); 775 } 776 } 777 778 if (ce_state->send_cb) { 779 /* 780 * Pop completed send buffers and call the registered 781 * send callback for each 782 */ 783 while (ath10k_ce_completed_send_next_nolock(ce_state, 784 &transfer_context, 785 &buf, 786 &nbytes, 787 &id) == 0) { 788 spin_unlock_bh(&ar_pci->ce_lock); 789 ce_state->send_cb(ce_state, transfer_context, 790 buf, nbytes, id); 791 spin_lock_bh(&ar_pci->ce_lock); 792 } 793 } 794 795 /* 796 * Misc CE interrupts are not being handled, but still need 797 * to be cleared. 798 */ 799 ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK); 800 801 spin_unlock_bh(&ar_pci->ce_lock); 802 ath10k_pci_sleep(ar); 803} 804 805/* 806 * Handler for per-engine interrupts on ALL active CEs. 807 * This is used in cases where the system is sharing a 808 * single interrput for all CEs 809 */ 810 811void ath10k_ce_per_engine_service_any(struct ath10k *ar) 812{ 813 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 814 int ce_id, ret; 815 u32 intr_summary; 816 817 ret = ath10k_pci_wake(ar); 818 if (ret) 819 return; 820 821 intr_summary = CE_INTERRUPT_SUMMARY(ar); 822 823 for (ce_id = 0; intr_summary && (ce_id < ar_pci->ce_count); ce_id++) { 824 if (intr_summary & (1 << ce_id)) 825 intr_summary &= ~(1 << ce_id); 826 else 827 /* no intr pending on this CE */ 828 continue; 829 830 ath10k_ce_per_engine_service(ar, ce_id); 831 } 832 833 ath10k_pci_sleep(ar); 834} 835 836/* 837 * Adjust interrupts for the copy complete handler. 838 * If it's needed for either send or recv, then unmask 839 * this interrupt; otherwise, mask it. 840 * 841 * Called with ce_lock held. 842 */ 843static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state, 844 int disable_copy_compl_intr) 845{ 846 u32 ctrl_addr = ce_state->ctrl_addr; 847 struct ath10k *ar = ce_state->ar; 848 int ret; 849 850 ret = ath10k_pci_wake(ar); 851 if (ret) 852 return; 853 854 if ((!disable_copy_compl_intr) && 855 (ce_state->send_cb || ce_state->recv_cb)) 856 ath10k_ce_copy_complete_inter_enable(ar, ctrl_addr); 857 else 858 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 859 860 ath10k_ce_watermark_intr_disable(ar, ctrl_addr); 861 862 ath10k_pci_sleep(ar); 863} 864 865void ath10k_ce_disable_interrupts(struct ath10k *ar) 866{ 867 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 868 int ce_id, ret; 869 870 ret = ath10k_pci_wake(ar); 871 if (ret) 872 return; 873 874 for (ce_id = 0; ce_id < ar_pci->ce_count; ce_id++) { 875 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; 876 u32 ctrl_addr = ce_state->ctrl_addr; 877 878 ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); 879 } 880 ath10k_pci_sleep(ar); 881} 882 883void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state, 884 void (*send_cb)(struct ath10k_ce_pipe *ce_state, 885 void *transfer_context, 886 u32 buffer, 887 unsigned int nbytes, 888 unsigned int transfer_id), 889 int disable_interrupts) 890{ 891 struct ath10k *ar = ce_state->ar; 892 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 893 894 spin_lock_bh(&ar_pci->ce_lock); 895 ce_state->send_cb = send_cb; 896 ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts); 897 spin_unlock_bh(&ar_pci->ce_lock); 898} 899 900void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state, 901 void (*recv_cb)(struct ath10k_ce_pipe *ce_state, 902 void *transfer_context, 903 u32 buffer, 904 unsigned int nbytes, 905 unsigned int transfer_id, 906 unsigned int flags)) 907{ 908 struct ath10k *ar = ce_state->ar; 909 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 910 911 spin_lock_bh(&ar_pci->ce_lock); 912 ce_state->recv_cb = recv_cb; 913 ath10k_ce_per_engine_handler_adjust(ce_state, 0); 914 spin_unlock_bh(&ar_pci->ce_lock); 915} 916 917static int ath10k_ce_init_src_ring(struct ath10k *ar, 918 unsigned int ce_id, 919 struct ath10k_ce_pipe *ce_state, 920 const struct ce_attr *attr) 921{ 922 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 923 struct ath10k_ce_ring *src_ring; 924 unsigned int nentries = attr->src_nentries; 925 unsigned int ce_nbytes; 926 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 927 dma_addr_t base_addr; 928 char *ptr; 929 930 nentries = roundup_pow_of_two(nentries); 931 932 if (ce_state->src_ring) { 933 WARN_ON(ce_state->src_ring->nentries != nentries); 934 return 0; 935 } 936 937 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 938 ptr = kzalloc(ce_nbytes, GFP_KERNEL); 939 if (ptr == NULL) 940 return -ENOMEM; 941 942 ce_state->src_ring = (struct ath10k_ce_ring *)ptr; 943 src_ring = ce_state->src_ring; 944 945 ptr += sizeof(struct ath10k_ce_ring); 946 src_ring->nentries = nentries; 947 src_ring->nentries_mask = nentries - 1; 948 949 src_ring->sw_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr); 950 src_ring->sw_index &= src_ring->nentries_mask; 951 src_ring->hw_index = src_ring->sw_index; 952 953 src_ring->write_index = 954 ath10k_ce_src_ring_write_index_get(ar, ctrl_addr); 955 src_ring->write_index &= src_ring->nentries_mask; 956 957 src_ring->per_transfer_context = (void **)ptr; 958 959 /* 960 * Legacy platforms that do not support cache 961 * coherent DMA are unsupported 962 */ 963 src_ring->base_addr_owner_space_unaligned = 964 pci_alloc_consistent(ar_pci->pdev, 965 (nentries * sizeof(struct ce_desc) + 966 CE_DESC_RING_ALIGN), 967 &base_addr); 968 if (!src_ring->base_addr_owner_space_unaligned) { 969 kfree(ce_state->src_ring); 970 ce_state->src_ring = NULL; 971 return -ENOMEM; 972 } 973 974 src_ring->base_addr_ce_space_unaligned = base_addr; 975 976 src_ring->base_addr_owner_space = PTR_ALIGN( 977 src_ring->base_addr_owner_space_unaligned, 978 CE_DESC_RING_ALIGN); 979 src_ring->base_addr_ce_space = ALIGN( 980 src_ring->base_addr_ce_space_unaligned, 981 CE_DESC_RING_ALIGN); 982 983 /* 984 * Also allocate a shadow src ring in regular 985 * mem to use for faster access. 986 */ 987 src_ring->shadow_base_unaligned = 988 kmalloc((nentries * sizeof(struct ce_desc) + 989 CE_DESC_RING_ALIGN), GFP_KERNEL); 990 if (!src_ring->shadow_base_unaligned) { 991 pci_free_consistent(ar_pci->pdev, 992 (nentries * sizeof(struct ce_desc) + 993 CE_DESC_RING_ALIGN), 994 src_ring->base_addr_owner_space, 995 src_ring->base_addr_ce_space); 996 kfree(ce_state->src_ring); 997 ce_state->src_ring = NULL; 998 return -ENOMEM; 999 } 1000 1001 src_ring->shadow_base = PTR_ALIGN( 1002 src_ring->shadow_base_unaligned, 1003 CE_DESC_RING_ALIGN); 1004 1005 ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 1006 src_ring->base_addr_ce_space); 1007 ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries); 1008 ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max); 1009 ath10k_ce_src_ring_byte_swap_set(ar, ctrl_addr, 0); 1010 ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0); 1011 ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries); 1012 1013 return 0; 1014} 1015 1016static int ath10k_ce_init_dest_ring(struct ath10k *ar, 1017 unsigned int ce_id, 1018 struct ath10k_ce_pipe *ce_state, 1019 const struct ce_attr *attr) 1020{ 1021 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1022 struct ath10k_ce_ring *dest_ring; 1023 unsigned int nentries = attr->dest_nentries; 1024 unsigned int ce_nbytes; 1025 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1026 dma_addr_t base_addr; 1027 char *ptr; 1028 1029 nentries = roundup_pow_of_two(nentries); 1030 1031 if (ce_state->dest_ring) { 1032 WARN_ON(ce_state->dest_ring->nentries != nentries); 1033 return 0; 1034 } 1035 1036 ce_nbytes = sizeof(struct ath10k_ce_ring) + (nentries * sizeof(void *)); 1037 ptr = kzalloc(ce_nbytes, GFP_KERNEL); 1038 if (ptr == NULL) 1039 return -ENOMEM; 1040 1041 ce_state->dest_ring = (struct ath10k_ce_ring *)ptr; 1042 dest_ring = ce_state->dest_ring; 1043 1044 ptr += sizeof(struct ath10k_ce_ring); 1045 dest_ring->nentries = nentries; 1046 dest_ring->nentries_mask = nentries - 1; 1047 1048 dest_ring->sw_index = ath10k_ce_dest_ring_read_index_get(ar, ctrl_addr); 1049 dest_ring->sw_index &= dest_ring->nentries_mask; 1050 dest_ring->write_index = 1051 ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr); 1052 dest_ring->write_index &= dest_ring->nentries_mask; 1053 1054 dest_ring->per_transfer_context = (void **)ptr; 1055 1056 /* 1057 * Legacy platforms that do not support cache 1058 * coherent DMA are unsupported 1059 */ 1060 dest_ring->base_addr_owner_space_unaligned = 1061 pci_alloc_consistent(ar_pci->pdev, 1062 (nentries * sizeof(struct ce_desc) + 1063 CE_DESC_RING_ALIGN), 1064 &base_addr); 1065 if (!dest_ring->base_addr_owner_space_unaligned) { 1066 kfree(ce_state->dest_ring); 1067 ce_state->dest_ring = NULL; 1068 return -ENOMEM; 1069 } 1070 1071 dest_ring->base_addr_ce_space_unaligned = base_addr; 1072 1073 /* 1074 * Correctly initialize memory to 0 to prevent garbage 1075 * data crashing system when download firmware 1076 */ 1077 memset(dest_ring->base_addr_owner_space_unaligned, 0, 1078 nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN); 1079 1080 dest_ring->base_addr_owner_space = PTR_ALIGN( 1081 dest_ring->base_addr_owner_space_unaligned, 1082 CE_DESC_RING_ALIGN); 1083 dest_ring->base_addr_ce_space = ALIGN( 1084 dest_ring->base_addr_ce_space_unaligned, 1085 CE_DESC_RING_ALIGN); 1086 1087 ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 1088 dest_ring->base_addr_ce_space); 1089 ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries); 1090 ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0); 1091 ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0); 1092 ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries); 1093 1094 return 0; 1095} 1096 1097static struct ath10k_ce_pipe *ath10k_ce_init_state(struct ath10k *ar, 1098 unsigned int ce_id, 1099 const struct ce_attr *attr) 1100{ 1101 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1102 struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; 1103 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1104 1105 spin_lock_bh(&ar_pci->ce_lock); 1106 1107 ce_state->ar = ar; 1108 ce_state->id = ce_id; 1109 ce_state->ctrl_addr = ctrl_addr; 1110 ce_state->attr_flags = attr->flags; 1111 ce_state->src_sz_max = attr->src_sz_max; 1112 1113 spin_unlock_bh(&ar_pci->ce_lock); 1114 1115 return ce_state; 1116} 1117 1118/* 1119 * Initialize a Copy Engine based on caller-supplied attributes. 1120 * This may be called once to initialize both source and destination 1121 * rings or it may be called twice for separate source and destination 1122 * initialization. It may be that only one side or the other is 1123 * initialized by software/firmware. 1124 */ 1125struct ath10k_ce_pipe *ath10k_ce_init(struct ath10k *ar, 1126 unsigned int ce_id, 1127 const struct ce_attr *attr) 1128{ 1129 struct ath10k_ce_pipe *ce_state; 1130 u32 ctrl_addr = ath10k_ce_base_address(ce_id); 1131 int ret; 1132 1133 ret = ath10k_pci_wake(ar); 1134 if (ret) 1135 return NULL; 1136 1137 ce_state = ath10k_ce_init_state(ar, ce_id, attr); 1138 if (!ce_state) { 1139 ath10k_err("Failed to initialize CE state for ID: %d\n", ce_id); 1140 return NULL; 1141 } 1142 1143 if (attr->src_nentries) { 1144 ret = ath10k_ce_init_src_ring(ar, ce_id, ce_state, attr); 1145 if (ret) { 1146 ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n", 1147 ce_id, ret); 1148 ath10k_ce_deinit(ce_state); 1149 return NULL; 1150 } 1151 } 1152 1153 if (attr->dest_nentries) { 1154 ret = ath10k_ce_init_dest_ring(ar, ce_id, ce_state, attr); 1155 if (ret) { 1156 ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n", 1157 ce_id, ret); 1158 ath10k_ce_deinit(ce_state); 1159 return NULL; 1160 } 1161 } 1162 1163 /* Enable CE error interrupts */ 1164 ath10k_ce_error_intr_enable(ar, ctrl_addr); 1165 1166 ath10k_pci_sleep(ar); 1167 1168 return ce_state; 1169} 1170 1171void ath10k_ce_deinit(struct ath10k_ce_pipe *ce_state) 1172{ 1173 struct ath10k *ar = ce_state->ar; 1174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); 1175 1176 if (ce_state->src_ring) { 1177 kfree(ce_state->src_ring->shadow_base_unaligned); 1178 pci_free_consistent(ar_pci->pdev, 1179 (ce_state->src_ring->nentries * 1180 sizeof(struct ce_desc) + 1181 CE_DESC_RING_ALIGN), 1182 ce_state->src_ring->base_addr_owner_space, 1183 ce_state->src_ring->base_addr_ce_space); 1184 kfree(ce_state->src_ring); 1185 } 1186 1187 if (ce_state->dest_ring) { 1188 pci_free_consistent(ar_pci->pdev, 1189 (ce_state->dest_ring->nentries * 1190 sizeof(struct ce_desc) + 1191 CE_DESC_RING_ALIGN), 1192 ce_state->dest_ring->base_addr_owner_space, 1193 ce_state->dest_ring->base_addr_ce_space); 1194 kfree(ce_state->dest_ring); 1195 } 1196 1197 ce_state->src_ring = NULL; 1198 ce_state->dest_ring = NULL; 1199} 1200