/drivers/staging/iio/meter/ |
H A D | ade7758.h | 112 * @tx: transmit buffer 114 * @buf_lock: mutex to protect tx and rx 119 u8 *tx; member in struct:ade7758_state
|
/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
H A D | o2iblnd.c | 1101 * and connection establishment with a NULL tx */ 1224 kib_tx_t *tx; local 1233 tx = &tpo->tpo_tx_descs[i]; 1235 KIBLND_UNMAP_ADDR(tx, tx_msgunmap, 1236 tx->tx_msgaddr), 1278 kib_tx_t *tx; local 1297 tx = &tpo->tpo_tx_descs[i]; 1299 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + 1302 tx->tx_msgaddr = kiblnd_dma_map_single( 1303 tpo->tpo_hdev->ibh_ibdev, tx 1994 kib_tx_t *tx = &tpo->tpo_tx_descs[i]; local 2070 kib_tx_t *tx = &tpo->tpo_tx_descs[i]; local 2122 kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); local [all...] |
H A D | o2iblnd_cb.c | 44 kiblnd_tx_done (lnet_ni_t *ni, kib_tx_t *tx) argument 53 LASSERT (!tx->tx_queued); /* mustn't be queued for sending */ 54 LASSERT (tx->tx_sending == 0); /* mustn't be awaiting sent callback */ 55 LASSERT (!tx->tx_waiting); /* mustn't be awaiting peer response */ 56 LASSERT (tx->tx_pool != NULL); 58 kiblnd_unmap_tx(ni, tx); 60 /* tx may have up to 2 lnet msgs to finalise */ 61 lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; 62 lntmsg[1] = tx 89 kib_tx_t *tx; local 107 kib_tx_t *tx; local 218 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); local 240 kib_tx_t *tx; local 280 kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); local 302 kib_tx_t *tx; local 551 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) argument 596 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) argument 632 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) argument 655 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nfrags) argument 699 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, unsigned int niov, struct iovec *iov, int offset, int nob) argument 752 kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, int nkiov, lnet_kiov_t *kiov, int offset, int nob) argument 930 kib_tx_t *tx; local 998 kiblnd_tx_complete(kib_tx_t *tx, int status) argument 1052 kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) argument 1086 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, int resid, kib_rdma_desc_t *dstrd, __u64 dstcookie) argument 1171 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) argument 1224 kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) argument 1336 kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) argument 1470 kib_tx_t *tx; local 1636 kib_tx_t *tx; local 1697 kib_tx_t *tx; local 1963 kib_tx_t *tx; local 2076 kib_tx_t *tx; local 2991 kib_tx_t *tx; local [all...] |
/drivers/staging/lustre/lnet/klnds/socklnd/ |
H A D | socklnd.c | 1025 ksock_tx_t *tx; local 1273 list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) { 1274 if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO) 1277 list_del (&tx->tx_list); 1278 ksocknal_queue_tx_locked (tx, conn); 1447 ksock_tx_t *tx; local 1453 list_for_each_entry(tx, &peer->ksnp_tx_queue, 1455 ksocknal_tx_prep(conn, tx); 1513 ksock_tx_t *tx; local 2239 ksock_tx_t *tx; local [all...] |
H A D | socklnd.h | 65 /* zombie noop tx list */ 108 int *ksnd_tx_buffer_size; /* socket tx buffer size */ 185 struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */ 212 atomic_t tx_refcount; /* tx reference count */ 225 unsigned long tx_deadline; /* when (in jiffies) tx times out */ 304 unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ 374 ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */ 463 ksocknal_tx_addref (ksock_tx_t *tx) argument 465 LASSERT (atomic_read(&tx->tx_refcount) > 0); 466 atomic_inc(&tx 473 ksocknal_tx_decref(ksock_tx_t *tx) argument [all...] |
H A D | socklnd_cb.c | 32 ksock_tx_t *tx = NULL; local 37 /* searching for a noop tx in free list */ 41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \ 43 LASSERT(tx->tx_desc_size == size); 44 list_del(&tx->tx_list); 50 if (tx == NULL) 51 LIBCFS_ALLOC(tx, size); 53 if (tx == NULL) 56 atomic_set(&tx->tx_refcount, 1); 57 tx 70 ksock_tx_t *tx; local 94 ksocknal_free_tx(ksock_tx_t *tx) argument 111 ksocknal_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) argument 148 ksocknal_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) argument 186 ksocknal_transmit(ksock_conn_t *conn, ksock_tx_t *tx) argument 389 ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx) argument 410 ksock_tx_t *tx; local 433 ksocknal_check_zc_req(ksock_tx_t *tx) argument 477 ksocknal_uncheck_zc_req(ksock_tx_t *tx) argument 503 ksocknal_process_transmit(ksock_conn_t *conn, ksock_tx_t *tx) argument 621 ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) argument 678 ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) argument 688 ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn) argument 836 ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) argument 937 ksock_tx_t *tx; local 1395 ksock_tx_t *tx; local 2333 ksock_tx_t *tx; local 2360 ksock_tx_t *tx; local 2422 ksock_tx_t *tx; local 2457 ksock_tx_t *tx = local [all...] |
H A D | socklnd_lib-linux.c | 78 ksocknal_lib_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) argument 86 tx->tx_nob == tx->tx_resid && /* frist sending */ 87 tx->tx_msg.ksm_csum == 0) /* not checksummed */ 88 ksocknal_lib_csum_tx(tx); 100 unsigned int niov = tx->tx_niov; 106 scratchiov[i] = tx->tx_iov[i]; 111 nob < tx->tx_resid) 120 ksocknal_lib_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) argument 123 lnet_kiov_t *kiov = tx 387 ksocknal_lib_csum_tx(ksock_tx_t *tx) argument [all...] |
H A D | socklnd_proto.c | 55 ksock_tx_t *tx = conn->ksnc_tx_carrier; local 59 LASSERT (tx != NULL); 62 if (tx->tx_list.next == &conn->ksnc_tx_queue) { 66 conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, 68 LASSERT (conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); 76 ksock_tx_t *tx = conn->ksnc_tx_carrier; local 83 * . no tx can piggyback cookie of tx_ack (or cookie), just 85 * . There is tx can piggyback cookie of tx_ack (or cookie), 86 * piggyback the cookie and return the tx. 88 if (tx 122 ksock_tx_t *tx = conn->ksnc_tx_carrier; local 159 ksock_tx_t *tx; local 274 ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) argument 318 ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) argument 366 ksock_tx_t *tx; local 409 ksock_tx_t *tx; local 715 ksocknal_pack_msg_v1(ksock_tx_t *tx) argument 728 ksocknal_pack_msg_v2(ksock_tx_t *tx) argument [all...] |
/drivers/staging/media/lirc/ |
H A D | lirc_imon.c | 107 int status; /* status of tx completion */ 108 } tx; member in struct:imon_context 341 init_completion(&context->tx.finished); 342 atomic_set(&(context->tx.busy), 1); 346 atomic_set(&(context->tx.busy), 0); 353 &context->tx.finished); 359 retval = context->tx.status; 362 "%s: packet tx failed (%d)\n", 421 memcpy(context->tx.data_buf, data_buf, n_bytes); 425 context->tx [all...] |
H A D | lirc_sasem.c | 110 int status; /* status of tx completion */ 111 } tx; member in struct:sasem_context 334 init_completion(&context->tx.finished); 335 atomic_set(&(context->tx.busy), 1); 339 atomic_set(&(context->tx.busy), 0); 345 wait_for_completion(&context->tx.finished); 348 retval = context->tx.status; 351 "%s: packet tx failed (%d)\n", 399 memcpy(context->tx.data_buf, data_buf, n_bytes); 403 context->tx [all...] |
H A D | lirc_zilog.c | 115 struct IR_tx *tx; member in struct:IR 194 * ir->tx set to NULL and deallocated - happens before ir->tx->ir put() 287 struct IR_tx *tx; local 290 tx = ir->tx; 291 if (tx != NULL) 292 kref_get(&tx->ref); 294 return tx; 299 struct IR_tx *tx local 308 put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held) argument 333 struct IR_tx *tx; local 652 send_data_block(struct IR_tx *tx, unsigned char *data_block) argument 677 send_boot_data(struct IR_tx *tx) argument 754 fw_load(struct IR_tx *tx) argument 983 send_code(struct IR_tx *tx, unsigned int code, unsigned int key) argument 1104 struct IR_tx *tx; local 1407 struct IR_tx *tx = i2c_get_clientdata(client); local 1449 struct IR_tx *tx; local [all...] |
/drivers/staging/nvec/ |
H A D | nvec.h | 125 * @tx: The message currently being transferred 153 struct nvec_msg *tx; member in struct:nvec_chip
|
/drivers/staging/rtl8188eu/core/ |
H A D | rtw_ap.c | 1357 void update_beacon(struct adapter *padapter, u8 ie_id, u8 *oui, u8 tx) argument 1403 if (tx)
|
/drivers/staging/rtl8723au/core/ |
H A D | rtw_ap.c | 1244 void update_beacon23a(struct rtw_adapter *padapter, u8 ie_id, u8 *oui, u8 tx) argument 1303 if (tx)
|
/drivers/staging/vt6656/ |
H A D | rxtx.h | 211 union vnt_tx tx; member in struct:vnt_tx_head::__anon6916 215 union vnt_tx tx; member in struct:vnt_tx_head::__anon6917 219 union vnt_tx tx; member in struct:vnt_tx_head::__anon6918
|
/drivers/misc/mic/host/ |
H A D | mic_virtio.c | 42 struct dma_async_tx_descriptor *tx; local 50 tx = mic_ch->device->device_prep_dma_memcpy(mic_ch, dst, src, len, 52 if (!tx) { 56 dma_cookie_t cookie = tx->tx_submit(tx);
|
/drivers/mmc/card/ |
H A D | sdio_uart.c | 60 __u32 tx; member in struct:uart_icount 440 port->icount.tx++; 457 port->icount.tx++; 979 seq_printf(m, " tx:%d rx:%d", 980 port->icount.tx, port->icount.rx);
|
/drivers/mmc/host/ |
H A D | omap.c | 985 struct dma_async_tx_descriptor *tx; local 1039 tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, 1042 if (!tx) 1047 tx->callback = mmc_omap_dma_callback; 1048 tx->callback_param = host; 1049 dmaengine_submit(tx); 1390 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 1394 omap_dma_filter_fn, &sig, &pdev->dev, "tx");
|
H A D | omap_hsmmc.c | 1386 struct dma_async_tx_descriptor *tx; local 1424 tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, 1427 if (!tx) { 1433 tx->callback = omap_hsmmc_dma_callback; 1434 tx->callback_param = host; 1437 dmaengine_submit(tx); 2159 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); 2191 &tx_req, &pdev->dev, "tx");
|
H A D | rtsx_usb_sdmmc.c | 590 static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx) argument 596 __func__, tx ? "TX" : "RX", sample_point); 602 if (tx)
|
/drivers/mtd/nand/ |
H A D | atmel_nand.c | 367 struct dma_async_tx_descriptor *tx = NULL; local 406 tx = dma_dev->device_prep_dma_memcpy(host->dma_chan, dma_dst_addr, 408 if (!tx) { 414 tx->callback = dma_complete_func; 415 tx->callback_param = &host->comp; 417 cookie = tx->tx_submit(tx);
|
H A D | fsmc_nand.c | 560 struct dma_async_tx_descriptor *tx; local 584 tx = dma_dev->device_prep_dma_memcpy(chan, dma_dst, dma_src, 586 if (!tx) { 592 tx->callback = dma_complete; 593 tx->callback_param = host; 594 cookie = tx->tx_submit(tx);
|
H A D | nandsim.c | 1392 ssize_t tx; local 1399 tx = kernel_read(file, pos, buf, count); 1402 return tx; 1407 ssize_t tx; local 1414 tx = kernel_write(file, buf, count, pos); 1417 return tx; 1478 ssize_t tx; local 1485 tx = read_file(ns, ns->cfile, ns->buf.byte, num, pos); 1486 if (tx != num) { 1487 NS_ERR("read_page: read error for page %d ret %ld\n", ns->regs.row, (long)tx); 1548 ssize_t tx; local [all...] |
H A D | omap2.c | 459 struct dma_async_tx_descriptor *tx; local 488 tx = dmaengine_prep_slave_sg(info->dma, &sg, n, 491 if (!tx) 494 tx->callback = omap_nand_dma_callback; 495 tx->callback_param = &info->comp; 496 dmaengine_submit(tx);
|
/drivers/net/can/ |
H A D | grcan.c | 227 struct grcan_dma_buffer tx; member in struct:grcan_dma 265 * frames can be echoed back. See the "Notes on the tx cyclic buffer 270 /* Lock for controlling changes to the netif tx queue state, accesses to 277 * whether to wake up the tx queue. 279 * The tx queue must never be woken up if there is a running reset or 531 dma->tx.size); 562 txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size); 602 netdev_dbg(dev, "tx message lost\n"); 699 if (grcan_txspace(dma->tx.size, txwr, 727 txrx = "on tx "; [all...] |