dma-helpers.c revision 5d8f37ad78fc66901af50c762029a501561f3b23
1/*
2 * DMA helper functions
3 *
4 * Copyright (c) 2009 Red Hat
5 *
6 * This work is licensed under the terms of the GNU General Public License
7 * (GNU GPL), version 2 or later.
8 */
9
10#include "dma.h"
11#include "block_int.h"
12
13void qemu_sglist_init(QEMUSGList *qsg, int alloc_hint)
14{
15    qsg->sg = qemu_malloc(alloc_hint * sizeof(ScatterGatherEntry));
16    qsg->nsg = 0;
17    qsg->nalloc = alloc_hint;
18    qsg->size = 0;
19}
20
21void qemu_sglist_add(QEMUSGList *qsg, target_phys_addr_t base,
22                     target_phys_addr_t len)
23{
24    if (qsg->nsg == qsg->nalloc) {
25        qsg->nalloc = 2 * qsg->nalloc + 1;
26        qsg->sg = qemu_realloc(qsg->sg, qsg->nalloc * sizeof(ScatterGatherEntry));
27    }
28    qsg->sg[qsg->nsg].base = base;
29    qsg->sg[qsg->nsg].len = len;
30    qsg->size += len;
31    ++qsg->nsg;
32}
33
34void qemu_sglist_destroy(QEMUSGList *qsg)
35{
36    qemu_free(qsg->sg);
37}
38
39typedef struct {
40    BlockDriverAIOCB common;
41    BlockDriverState *bs;
42    BlockDriverAIOCB *acb;
43    QEMUSGList *sg;
44    uint64_t sector_num;
45    int is_write;
46    int sg_cur_index;
47    target_phys_addr_t sg_cur_byte;
48    QEMUIOVector iov;
49    QEMUBH *bh;
50} DMAAIOCB;
51
52static void dma_bdrv_cb(void *opaque, int ret);
53
54static void reschedule_dma(void *opaque)
55{
56    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
57
58    qemu_bh_delete(dbs->bh);
59    dbs->bh = NULL;
60    dma_bdrv_cb(opaque, 0);
61}
62
63static void continue_after_map_failure(void *opaque)
64{
65    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
66
67    dbs->bh = qemu_bh_new(reschedule_dma, dbs);
68    qemu_bh_schedule(dbs->bh);
69}
70
71static void dma_bdrv_unmap(DMAAIOCB *dbs)
72{
73    int i;
74
75    for (i = 0; i < dbs->iov.niov; ++i) {
76        cpu_physical_memory_unmap(dbs->iov.iov[i].iov_base,
77                                  dbs->iov.iov[i].iov_len, !dbs->is_write,
78                                  dbs->iov.iov[i].iov_len);
79    }
80}
81
82static void dma_bdrv_cb(void *opaque, int ret)
83{
84    DMAAIOCB *dbs = (DMAAIOCB *)opaque;
85    target_phys_addr_t cur_addr, cur_len;
86    void *mem;
87
88    dbs->acb = NULL;
89    dbs->sector_num += dbs->iov.size / 512;
90    dma_bdrv_unmap(dbs);
91    qemu_iovec_reset(&dbs->iov);
92
93    if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) {
94        dbs->common.cb(dbs->common.opaque, ret);
95        qemu_iovec_destroy(&dbs->iov);
96        qemu_aio_release(dbs);
97        return;
98    }
99
100    while (dbs->sg_cur_index < dbs->sg->nsg) {
101        cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte;
102        cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte;
103        mem = cpu_physical_memory_map(cur_addr, &cur_len, !dbs->is_write);
104        if (!mem)
105            break;
106        qemu_iovec_add(&dbs->iov, mem, cur_len);
107        dbs->sg_cur_byte += cur_len;
108        if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) {
109            dbs->sg_cur_byte = 0;
110            ++dbs->sg_cur_index;
111        }
112    }
113
114    if (dbs->iov.size == 0) {
115        cpu_register_map_client(dbs, continue_after_map_failure);
116        return;
117    }
118
119    if (dbs->is_write) {
120        dbs->acb = bdrv_aio_writev(dbs->bs, dbs->sector_num, &dbs->iov,
121                                   dbs->iov.size / 512, dma_bdrv_cb, dbs);
122    } else {
123        dbs->acb = bdrv_aio_readv(dbs->bs, dbs->sector_num, &dbs->iov,
124                                  dbs->iov.size / 512, dma_bdrv_cb, dbs);
125    }
126    if (!dbs->acb) {
127        dma_bdrv_unmap(dbs);
128        qemu_iovec_destroy(&dbs->iov);
129        return;
130    }
131}
132
133static void dma_aio_cancel(BlockDriverAIOCB *acb)
134{
135    DMAAIOCB *dbs = container_of(acb, DMAAIOCB, common);
136
137    if (dbs->acb) {
138        bdrv_aio_cancel(dbs->acb);
139    }
140}
141
142static AIOPool dma_aio_pool = {
143    .aiocb_size         = sizeof(DMAAIOCB),
144    .cancel             = dma_aio_cancel,
145};
146
147static BlockDriverAIOCB *dma_bdrv_io(
148    BlockDriverState *bs, QEMUSGList *sg, uint64_t sector_num,
149    BlockDriverCompletionFunc *cb, void *opaque,
150    int is_write)
151{
152    DMAAIOCB *dbs =  qemu_aio_get(&dma_aio_pool, bs, cb, opaque);
153
154    dbs->acb = NULL;
155    dbs->bs = bs;
156    dbs->sg = sg;
157    dbs->sector_num = sector_num;
158    dbs->sg_cur_index = 0;
159    dbs->sg_cur_byte = 0;
160    dbs->is_write = is_write;
161    dbs->bh = NULL;
162    qemu_iovec_init(&dbs->iov, sg->nsg);
163    dma_bdrv_cb(dbs, 0);
164    if (!dbs->acb) {
165        qemu_aio_release(dbs);
166        return NULL;
167    }
168    return &dbs->common;
169}
170
171
172BlockDriverAIOCB *dma_bdrv_read(BlockDriverState *bs,
173                                QEMUSGList *sg, uint64_t sector,
174                                void (*cb)(void *opaque, int ret), void *opaque)
175{
176    return dma_bdrv_io(bs, sg, sector, cb, opaque, 0);
177}
178
179BlockDriverAIOCB *dma_bdrv_write(BlockDriverState *bs,
180                                 QEMUSGList *sg, uint64_t sector,
181                                 void (*cb)(void *opaque, int ret), void *opaque)
182{
183    return dma_bdrv_io(bs, sg, sector, cb, opaque, 1);
184}
185