aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #include "qemu-common.h" |
| 15 | #include "block.h" |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 16 | #include "qemu-queue.h" |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 17 | #include "qemu_socket.h" |
| 18 | |
| 19 | typedef struct AioHandler AioHandler; |
| 20 | |
| 21 | /* The list of registered AIO handlers */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 22 | static QLIST_HEAD(, AioHandler) aio_handlers; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 23 | |
| 24 | /* This is a simple lock used to protect the aio_handlers list. Specifically, |
| 25 | * it's used to ensure that no callbacks are removed while we're walking and |
| 26 | * dispatching callbacks. |
| 27 | */ |
| 28 | static int walking_handlers; |
| 29 | |
| 30 | struct AioHandler |
| 31 | { |
| 32 | int fd; |
| 33 | IOHandler *io_read; |
| 34 | IOHandler *io_write; |
| 35 | AioFlushHandler *io_flush; |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 36 | AioProcessQueue *io_process_queue; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 37 | int deleted; |
| 38 | void *opaque; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 39 | QLIST_ENTRY(AioHandler) node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 40 | }; |
| 41 | |
| 42 | static AioHandler *find_aio_handler(int fd) |
| 43 | { |
| 44 | AioHandler *node; |
| 45 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 46 | QLIST_FOREACH(node, &aio_handlers, node) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 47 | if (node->fd == fd) |
Alexander Graf | 79d5ca5 | 2009-05-06 02:58:48 +0200 | [diff] [blame] | 48 | if (!node->deleted) |
| 49 | return node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 50 | } |
| 51 | |
| 52 | return NULL; |
| 53 | } |
| 54 | |
| 55 | int qemu_aio_set_fd_handler(int fd, |
| 56 | IOHandler *io_read, |
| 57 | IOHandler *io_write, |
| 58 | AioFlushHandler *io_flush, |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 59 | AioProcessQueue *io_process_queue, |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 60 | void *opaque) |
| 61 | { |
| 62 | AioHandler *node; |
| 63 | |
| 64 | node = find_aio_handler(fd); |
| 65 | |
| 66 | /* Are we deleting the fd handler? */ |
| 67 | if (!io_read && !io_write) { |
| 68 | if (node) { |
| 69 | /* If the lock is held, just mark the node as deleted */ |
| 70 | if (walking_handlers) |
| 71 | node->deleted = 1; |
| 72 | else { |
| 73 | /* Otherwise, delete it for real. We can't just mark it as |
| 74 | * deleted because deleted nodes are only cleaned up after |
| 75 | * releasing the walking_handlers lock. |
| 76 | */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 77 | QLIST_REMOVE(node, node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 78 | qemu_free(node); |
| 79 | } |
| 80 | } |
| 81 | } else { |
| 82 | if (node == NULL) { |
| 83 | /* Alloc and insert if it's not already there */ |
| 84 | node = qemu_mallocz(sizeof(AioHandler)); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 85 | node->fd = fd; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 86 | QLIST_INSERT_HEAD(&aio_handlers, node, node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 87 | } |
| 88 | /* Update handler with latest information */ |
| 89 | node->io_read = io_read; |
| 90 | node->io_write = io_write; |
| 91 | node->io_flush = io_flush; |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 92 | node->io_process_queue = io_process_queue; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 93 | node->opaque = opaque; |
| 94 | } |
| 95 | |
| 96 | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque); |
| 97 | |
| 98 | return 0; |
| 99 | } |
| 100 | |
| 101 | void qemu_aio_flush(void) |
| 102 | { |
| 103 | AioHandler *node; |
| 104 | int ret; |
| 105 | |
| 106 | do { |
| 107 | ret = 0; |
| 108 | |
Andrea Arcangeli | 986c28d | 2009-06-15 13:52:27 +0200 | [diff] [blame] | 109 | /* |
| 110 | * If there are pending emulated aio start them now so flush |
| 111 | * will be able to return 1. |
| 112 | */ |
| 113 | qemu_aio_wait(); |
| 114 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 115 | QLIST_FOREACH(node, &aio_handlers, node) { |
Avi Kivity | c53a728 | 2010-05-16 14:59:57 +0300 | [diff] [blame] | 116 | if (node->io_flush) { |
| 117 | ret |= node->io_flush(node->opaque); |
| 118 | } |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 119 | } |
Nolan | 6e5d97d | 2009-07-20 14:01:25 -0700 | [diff] [blame] | 120 | } while (qemu_bh_poll() || ret > 0); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 121 | } |
| 122 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 123 | int qemu_aio_process_queue(void) |
| 124 | { |
| 125 | AioHandler *node; |
| 126 | int ret = 0; |
| 127 | |
| 128 | walking_handlers = 1; |
| 129 | |
| 130 | QLIST_FOREACH(node, &aio_handlers, node) { |
| 131 | if (node->io_process_queue) { |
| 132 | if (node->io_process_queue(node->opaque)) { |
| 133 | ret = 1; |
| 134 | } |
| 135 | } |
| 136 | } |
| 137 | |
| 138 | walking_handlers = 0; |
| 139 | |
| 140 | return ret; |
| 141 | } |
| 142 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 143 | void qemu_aio_wait(void) |
| 144 | { |
| 145 | int ret; |
| 146 | |
| 147 | if (qemu_bh_poll()) |
| 148 | return; |
| 149 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 150 | /* |
| 151 | * If there are callbacks left that have been queued, we need to call then. |
| 152 | * Return afterwards to avoid waiting needlessly in select(). |
| 153 | */ |
| 154 | if (qemu_aio_process_queue()) |
| 155 | return; |
| 156 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 157 | do { |
| 158 | AioHandler *node; |
| 159 | fd_set rdfds, wrfds; |
| 160 | int max_fd = -1; |
| 161 | |
| 162 | walking_handlers = 1; |
| 163 | |
aliguori | f71903d | 2008-10-12 21:19:57 +0000 | [diff] [blame] | 164 | FD_ZERO(&rdfds); |
| 165 | FD_ZERO(&wrfds); |
| 166 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 167 | /* fill fd sets */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 168 | QLIST_FOREACH(node, &aio_handlers, node) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 169 | /* If there aren't pending AIO operations, don't invoke callbacks. |
| 170 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would |
| 171 | * wait indefinitely. |
| 172 | */ |
| 173 | if (node->io_flush && node->io_flush(node->opaque) == 0) |
| 174 | continue; |
| 175 | |
| 176 | if (!node->deleted && node->io_read) { |
| 177 | FD_SET(node->fd, &rdfds); |
| 178 | max_fd = MAX(max_fd, node->fd + 1); |
| 179 | } |
| 180 | if (!node->deleted && node->io_write) { |
| 181 | FD_SET(node->fd, &wrfds); |
| 182 | max_fd = MAX(max_fd, node->fd + 1); |
| 183 | } |
| 184 | } |
| 185 | |
| 186 | walking_handlers = 0; |
| 187 | |
| 188 | /* No AIO operations? Get us out of here */ |
| 189 | if (max_fd == -1) |
| 190 | break; |
| 191 | |
| 192 | /* wait until next event */ |
| 193 | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
| 194 | if (ret == -1 && errno == EINTR) |
| 195 | continue; |
| 196 | |
| 197 | /* if we have any readable fds, dispatch event */ |
| 198 | if (ret > 0) { |
| 199 | walking_handlers = 1; |
| 200 | |
| 201 | /* we have to walk very carefully in case |
| 202 | * qemu_aio_set_fd_handler is called while we're walking */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 203 | node = QLIST_FIRST(&aio_handlers); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 204 | while (node) { |
| 205 | AioHandler *tmp; |
| 206 | |
| 207 | if (!node->deleted && |
| 208 | FD_ISSET(node->fd, &rdfds) && |
| 209 | node->io_read) { |
| 210 | node->io_read(node->opaque); |
| 211 | } |
| 212 | if (!node->deleted && |
| 213 | FD_ISSET(node->fd, &wrfds) && |
| 214 | node->io_write) { |
| 215 | node->io_write(node->opaque); |
| 216 | } |
| 217 | |
| 218 | tmp = node; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 219 | node = QLIST_NEXT(node, node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 220 | |
| 221 | if (tmp->deleted) { |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 222 | QLIST_REMOVE(tmp, node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 223 | qemu_free(tmp); |
| 224 | } |
| 225 | } |
| 226 | |
| 227 | walking_handlers = 0; |
| 228 | } |
| 229 | } while (ret == 0); |
| 230 | } |