1/* 2 * QEMU aio implementation 3 * 4 * Copyright IBM, Corp. 2008 5 * 6 * Authors: 7 * Anthony Liguori <aliguori@us.ibm.com> 8 * 9 * This work is licensed under the terms of the GNU GPL, version 2. See 10 * the COPYING file in the top-level directory. 11 * 12 */ 13 14#include "qemu-common.h" 15#include "block.h" 16#include "qemu-queue.h" 17#include "qemu_socket.h" 18 19typedef struct AioHandler AioHandler; 20 21/* The list of registered AIO handlers */ 22static QLIST_HEAD(, AioHandler) aio_handlers; 23 24/* This is a simple lock used to protect the aio_handlers list. Specifically, 25 * it's used to ensure that no callbacks are removed while we're walking and 26 * dispatching callbacks. 27 */ 28static int walking_handlers; 29 30struct AioHandler 31{ 32 int fd; 33 IOHandler *io_read; 34 IOHandler *io_write; 35 AioFlushHandler *io_flush; 36 AioProcessQueue *io_process_queue; 37 int deleted; 38 void *opaque; 39 QLIST_ENTRY(AioHandler) node; 40}; 41 42static AioHandler *find_aio_handler(int fd) 43{ 44 AioHandler *node; 45 46 QLIST_FOREACH(node, &aio_handlers, node) { 47 if (node->fd == fd) 48 if (!node->deleted) 49 return node; 50 } 51 52 return NULL; 53} 54 55int qemu_aio_set_fd_handler(int fd, 56 IOHandler *io_read, 57 IOHandler *io_write, 58 AioFlushHandler *io_flush, 59 AioProcessQueue *io_process_queue, 60 void *opaque) 61{ 62 AioHandler *node; 63 64 node = find_aio_handler(fd); 65 66 /* Are we deleting the fd handler? */ 67 if (!io_read && !io_write) { 68 if (node) { 69 /* If the lock is held, just mark the node as deleted */ 70 if (walking_handlers) 71 node->deleted = 1; 72 else { 73 /* Otherwise, delete it for real. We can't just mark it as 74 * deleted because deleted nodes are only cleaned up after 75 * releasing the walking_handlers lock. 76 */ 77 QLIST_REMOVE(node, node); 78 qemu_free(node); 79 } 80 } 81 } else { 82 if (node == NULL) { 83 /* Alloc and insert if it's not already there */ 84 node = qemu_mallocz(sizeof(AioHandler)); 85 node->fd = fd; 86 QLIST_INSERT_HEAD(&aio_handlers, node, node); 87 } 88 /* Update handler with latest information */ 89 node->io_read = io_read; 90 node->io_write = io_write; 91 node->io_flush = io_flush; 92 node->io_process_queue = io_process_queue; 93 node->opaque = opaque; 94 } 95 96 qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque); 97 98 return 0; 99} 100 101void qemu_aio_flush(void) 102{ 103 AioHandler *node; 104 int ret; 105 106 do { 107 ret = 0; 108 109 /* 110 * If there are pending emulated aio start them now so flush 111 * will be able to return 1. 112 */ 113 qemu_aio_wait(); 114 115 QLIST_FOREACH(node, &aio_handlers, node) { 116 if (node->io_flush) { 117 ret |= node->io_flush(node->opaque); 118 } 119 } 120 } while (qemu_bh_poll() || ret > 0); 121} 122 123int qemu_aio_process_queue(void) 124{ 125 AioHandler *node; 126 int ret = 0; 127 128 walking_handlers = 1; 129 130 QLIST_FOREACH(node, &aio_handlers, node) { 131 if (node->io_process_queue) { 132 if (node->io_process_queue(node->opaque)) { 133 ret = 1; 134 } 135 } 136 } 137 138 walking_handlers = 0; 139 140 return ret; 141} 142 143void qemu_aio_wait(void) 144{ 145 int ret; 146 147 if (qemu_bh_poll()) 148 return; 149 150 /* 151 * If there are callbacks left that have been queued, we need to call then. 152 * Return afterwards to avoid waiting needlessly in select(). 153 */ 154 if (qemu_aio_process_queue()) 155 return; 156 157 do { 158 AioHandler *node; 159 fd_set rdfds, wrfds; 160 int max_fd = -1; 161 162 walking_handlers = 1; 163 164 FD_ZERO(&rdfds); 165 FD_ZERO(&wrfds); 166 167 /* fill fd sets */ 168 QLIST_FOREACH(node, &aio_handlers, node) { 169 /* If there aren't pending AIO operations, don't invoke callbacks. 170 * Otherwise, if there are no AIO requests, qemu_aio_wait() would 171 * wait indefinitely. 172 */ 173 if (node->io_flush && node->io_flush(node->opaque) == 0) 174 continue; 175 176 if (!node->deleted && node->io_read) { 177 FD_SET(node->fd, &rdfds); 178 max_fd = MAX(max_fd, node->fd + 1); 179 } 180 if (!node->deleted && node->io_write) { 181 FD_SET(node->fd, &wrfds); 182 max_fd = MAX(max_fd, node->fd + 1); 183 } 184 } 185 186 walking_handlers = 0; 187 188 /* No AIO operations? Get us out of here */ 189 if (max_fd == -1) 190 break; 191 192 /* wait until next event */ 193 ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); 194 if (ret == -1 && errno == EINTR) 195 continue; 196 197 /* if we have any readable fds, dispatch event */ 198 if (ret > 0) { 199 walking_handlers = 1; 200 201 /* we have to walk very carefully in case 202 * qemu_aio_set_fd_handler is called while we're walking */ 203 node = QLIST_FIRST(&aio_handlers); 204 while (node) { 205 AioHandler *tmp; 206 207 if (!node->deleted && 208 FD_ISSET(node->fd, &rdfds) && 209 node->io_read) { 210 node->io_read(node->opaque); 211 } 212 if (!node->deleted && 213 FD_ISSET(node->fd, &wrfds) && 214 node->io_write) { 215 node->io_write(node->opaque); 216 } 217 218 tmp = node; 219 node = QLIST_NEXT(node, node); 220 221 if (tmp->deleted) { 222 QLIST_REMOVE(tmp, node); 223 qemu_free(tmp); 224 } 225 } 226 227 walking_handlers = 0; 228 } 229 } while (ret == 0); 230} 231