blob: e906abebb380d696f2580b307ef2775315d21439 [file] [log] [blame]
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +02001/*
2 * Linux native AIO support.
3 *
4 * Copyright (C) 2009 IBM, Corp.
5 * Copyright (C) 2009 Red Hat, Inc.
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 */
Peter Maydell80c71a22016-01-18 18:01:42 +000010#include "qemu/osdep.h"
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020011#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010012#include "block/aio.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010013#include "qemu/queue.h"
Kevin Wolf2174f122014-08-06 17:18:07 +020014#include "block/block.h"
Paolo Bonzini9f8540e2012-06-09 10:57:37 +020015#include "block/raw-aio.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010016#include "qemu/event_notifier.h"
Kevin Wolf2174f122014-08-06 17:18:07 +020017#include "qemu/coroutine.h"
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020018
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020019#include <libaio.h>
20
21/*
22 * Queue size (per-device).
23 *
24 * XXX: eventually we need to communicate this to the guest and/or make it
25 * tunable by the guest. If we get more outstanding requests at a time
26 * than this we will get EAGAIN from io_submit which is communicated to
27 * the guest as an I/O error.
28 */
29#define MAX_EVENTS 128
30
31struct qemu_laiocb {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020032 BlockAIOCB common;
Kevin Wolf2174f122014-08-06 17:18:07 +020033 Coroutine *co;
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +020034 LinuxAioState *ctx;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020035 struct iocb iocb;
36 ssize_t ret;
37 size_t nbytes;
Kevin Wolfb161e2e2011-10-13 15:42:52 +020038 QEMUIOVector *qiov;
39 bool is_read;
Paolo Bonzini28b24082014-12-11 14:52:26 +010040 QSIMPLEQ_ENTRY(qemu_laiocb) next;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020041};
42
Ming Lei1b3abdc2014-07-04 18:04:34 +080043typedef struct {
Ming Lei1b3abdc2014-07-04 18:04:34 +080044 int plugged;
Roman Pen5e1b34a2016-07-13 15:03:24 +020045 unsigned int in_queue;
46 unsigned int in_flight;
Paolo Bonzini43f23762014-12-11 14:52:27 +010047 bool blocked;
Paolo Bonzini28b24082014-12-11 14:52:26 +010048 QSIMPLEQ_HEAD(, qemu_laiocb) pending;
Ming Lei1b3abdc2014-07-04 18:04:34 +080049} LaioQueue;
50
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +020051struct LinuxAioState {
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020052 AioContext *aio_context;
53
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020054 io_context_t ctx;
Paolo Bonzinic90caf22012-02-24 08:39:02 +010055 EventNotifier e;
Ming Lei1b3abdc2014-07-04 18:04:34 +080056
57 /* io queue for submit at batch */
58 LaioQueue io_q;
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +010059
60 /* I/O completion processing */
61 QEMUBH *completion_bh;
62 struct io_event events[MAX_EVENTS];
63 int event_idx;
64 int event_max;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020065};
66
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +020067static void ioq_submit(LinuxAioState *s);
Paolo Bonzini28b24082014-12-11 14:52:26 +010068
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +020069static inline ssize_t io_event_ret(struct io_event *ev)
70{
71 return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
72}
73
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020074/*
75 * Completes an AIO request (calls the callback and frees the ACB).
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020076 */
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +020077static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020078{
79 int ret;
80
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020081 ret = laiocb->ret;
82 if (ret != -ECANCELED) {
Kevin Wolfb161e2e2011-10-13 15:42:52 +020083 if (ret == laiocb->nbytes) {
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020084 ret = 0;
Kevin Wolfb161e2e2011-10-13 15:42:52 +020085 } else if (ret >= 0) {
86 /* Short reads mean EOF, pad with zeros. */
87 if (laiocb->is_read) {
Michael Tokarev3d9b4922012-03-10 16:54:23 +040088 qemu_iovec_memset(laiocb->qiov, ret, 0,
89 laiocb->qiov->size - ret);
Kevin Wolfb161e2e2011-10-13 15:42:52 +020090 } else {
Denis V. Lunev1c42f142016-06-23 14:37:16 +030091 ret = -ENOSPC;
Kevin Wolfb161e2e2011-10-13 15:42:52 +020092 }
93 }
Kevin Wolfdb0ffc22009-10-22 17:54:41 +020094 }
95
Kevin Wolf2174f122014-08-06 17:18:07 +020096 laiocb->ret = ret;
97 if (laiocb->co) {
Paolo Bonzini0b8b8752016-07-04 19:10:01 +020098 qemu_coroutine_enter(laiocb->co);
Kevin Wolf2174f122014-08-06 17:18:07 +020099 } else {
100 laiocb->common.cb(laiocb->common.opaque, ret);
101 qemu_aio_unref(laiocb);
102 }
Kevin Wolfdb0ffc22009-10-22 17:54:41 +0200103}
104
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100105/* The completion BH fetches completed I/O requests and invokes their
106 * callbacks.
107 *
108 * The function is somewhat tricky because it supports nested event loops, for
109 * example when a request callback invokes aio_poll(). In order to do this,
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200110 * the completion events array and index are kept in LinuxAioState. The BH
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100111 * reschedules itself as long as there are completions pending so it will
112 * either be called again in a nested event loop or will be called after all
113 * events have been completed. When there are no events left to complete, the
114 * BH returns without rescheduling.
115 */
116static void qemu_laio_completion_bh(void *opaque)
117{
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200118 LinuxAioState *s = opaque;
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100119
120 /* Fetch more completion events when empty */
121 if (s->event_idx == s->event_max) {
122 do {
123 struct timespec ts = { 0 };
124 s->event_max = io_getevents(s->ctx, MAX_EVENTS, MAX_EVENTS,
125 s->events, &ts);
126 } while (s->event_max == -EINTR);
127
128 s->event_idx = 0;
129 if (s->event_max <= 0) {
130 s->event_max = 0;
131 return; /* no more events */
132 }
Roman Pen5e1b34a2016-07-13 15:03:24 +0200133 s->io_q.in_flight -= s->event_max;
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100134 }
135
136 /* Reschedule so nested event loops see currently pending completions */
137 qemu_bh_schedule(s->completion_bh);
138
139 /* Process completion events */
140 while (s->event_idx < s->event_max) {
141 struct iocb *iocb = s->events[s->event_idx].obj;
142 struct qemu_laiocb *laiocb =
143 container_of(iocb, struct qemu_laiocb, iocb);
144
145 laiocb->ret = io_event_ret(&s->events[s->event_idx]);
146 s->event_idx++;
147
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200148 qemu_laio_process_completion(laiocb);
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100149 }
Paolo Bonzini28b24082014-12-11 14:52:26 +0100150
151 if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
152 ioq_submit(s);
153 }
Kevin Wolfccb9dc12014-11-28 15:23:12 +0100154
155 qemu_bh_cancel(s->completion_bh);
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100156}
157
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100158static void qemu_laio_completion_cb(EventNotifier *e)
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200159{
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200160 LinuxAioState *s = container_of(e, LinuxAioState, e);
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200161
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100162 if (event_notifier_test_and_clear(&s->e)) {
Kevin Wolfccb9dc12014-11-28 15:23:12 +0100163 qemu_laio_completion_bh(s);
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200164 }
165}
166
Markus Armbruster7c84b1b2014-10-07 13:59:14 +0200167static void laio_cancel(BlockAIOCB *blockacb)
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200168{
169 struct qemu_laiocb *laiocb = (struct qemu_laiocb *)blockacb;
170 struct io_event event;
171 int ret;
172
Fam Zheng771b64d2014-09-11 13:41:13 +0800173 if (laiocb->ret != -EINPROGRESS) {
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200174 return;
Fam Zheng771b64d2014-09-11 13:41:13 +0800175 }
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200176 ret = io_cancel(laiocb->ctx->ctx, &laiocb->iocb, &event);
Fam Zheng771b64d2014-09-11 13:41:13 +0800177 laiocb->ret = -ECANCELED;
178 if (ret != 0) {
179 /* iocb is not cancelled, cb will be called by the event loop later */
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200180 return;
181 }
182
Fam Zheng771b64d2014-09-11 13:41:13 +0800183 laiocb->common.cb(laiocb->common.opaque, laiocb->ret);
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200184}
185
Stefan Hajnoczid7331be2012-10-31 16:34:37 +0100186static const AIOCBInfo laio_aiocb_info = {
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200187 .aiocb_size = sizeof(struct qemu_laiocb),
Fam Zheng771b64d2014-09-11 13:41:13 +0800188 .cancel_async = laio_cancel,
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200189};
190
Ming Lei1b3abdc2014-07-04 18:04:34 +0800191static void ioq_init(LaioQueue *io_q)
192{
Paolo Bonzini28b24082014-12-11 14:52:26 +0100193 QSIMPLEQ_INIT(&io_q->pending);
Ming Lei1b3abdc2014-07-04 18:04:34 +0800194 io_q->plugged = 0;
Roman Pen5e1b34a2016-07-13 15:03:24 +0200195 io_q->in_queue = 0;
196 io_q->in_flight = 0;
Paolo Bonzini43f23762014-12-11 14:52:27 +0100197 io_q->blocked = false;
Ming Lei1b3abdc2014-07-04 18:04:34 +0800198}
199
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200200static void ioq_submit(LinuxAioState *s)
Ming Lei1b3abdc2014-07-04 18:04:34 +0800201{
Paolo Bonzini82595da2014-12-11 14:52:30 +0100202 int ret, len;
Paolo Bonzini28b24082014-12-11 14:52:26 +0100203 struct qemu_laiocb *aiocb;
Roman Pen5e1b34a2016-07-13 15:03:24 +0200204 struct iocb *iocbs[MAX_EVENTS];
Paolo Bonzini82595da2014-12-11 14:52:30 +0100205 QSIMPLEQ_HEAD(, qemu_laiocb) completed;
Ming Lei1b3abdc2014-07-04 18:04:34 +0800206
Paolo Bonzini43f23762014-12-11 14:52:27 +0100207 do {
Roman Pen5e1b34a2016-07-13 15:03:24 +0200208 if (s->io_q.in_flight >= MAX_EVENTS) {
209 break;
210 }
Paolo Bonzini43f23762014-12-11 14:52:27 +0100211 len = 0;
212 QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
213 iocbs[len++] = &aiocb->iocb;
Roman Pen5e1b34a2016-07-13 15:03:24 +0200214 if (s->io_q.in_flight + len >= MAX_EVENTS) {
Paolo Bonzini43f23762014-12-11 14:52:27 +0100215 break;
216 }
Paolo Bonzini28b24082014-12-11 14:52:26 +0100217 }
Ming Lei1b3abdc2014-07-04 18:04:34 +0800218
Paolo Bonzini43f23762014-12-11 14:52:27 +0100219 ret = io_submit(s->ctx, len, iocbs);
220 if (ret == -EAGAIN) {
Paolo Bonzini82595da2014-12-11 14:52:30 +0100221 break;
Paolo Bonzini43f23762014-12-11 14:52:27 +0100222 }
223 if (ret < 0) {
Kevin Wolf44713c92016-08-09 13:20:19 +0200224 /* Fail the first request, retry the rest */
225 aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
226 QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
227 s->io_q.in_queue--;
228 aiocb->ret = ret;
229 qemu_laio_process_completion(aiocb);
230 continue;
Paolo Bonzini43f23762014-12-11 14:52:27 +0100231 }
Ming Lei1b3abdc2014-07-04 18:04:34 +0800232
Roman Pen5e1b34a2016-07-13 15:03:24 +0200233 s->io_q.in_flight += ret;
234 s->io_q.in_queue -= ret;
Paolo Bonzini82595da2014-12-11 14:52:30 +0100235 aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
236 QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
Paolo Bonzini43f23762014-12-11 14:52:27 +0100237 } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
Roman Pen5e1b34a2016-07-13 15:03:24 +0200238 s->io_q.blocked = (s->io_q.in_queue > 0);
Ming Lei1b3abdc2014-07-04 18:04:34 +0800239}
240
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200241void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
Ming Lei1b3abdc2014-07-04 18:04:34 +0800242{
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200243 s->io_q.plugged++;
Ming Lei1b3abdc2014-07-04 18:04:34 +0800244}
245
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200246void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s)
Ming Lei1b3abdc2014-07-04 18:04:34 +0800247{
Paolo Bonzini6b98bd62016-04-07 18:33:34 +0200248 assert(s->io_q.plugged);
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200249 if (--s->io_q.plugged == 0 &&
250 !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
Paolo Bonzinide354642014-12-11 14:52:29 +0100251 ioq_submit(s);
Ming Lei1b3abdc2014-07-04 18:04:34 +0800252 }
Ming Lei1b3abdc2014-07-04 18:04:34 +0800253}
254
Kevin Wolf2174f122014-08-06 17:18:07 +0200255static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
256 int type)
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200257{
Kevin Wolf2174f122014-08-06 17:18:07 +0200258 LinuxAioState *s = laiocb->ctx;
259 struct iocb *iocbs = &laiocb->iocb;
260 QEMUIOVector *qiov = laiocb->qiov;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200261
262 switch (type) {
263 case QEMU_AIO_WRITE:
264 io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
265 break;
266 case QEMU_AIO_READ:
267 io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
268 break;
Frediano Ziglioc30e6242011-08-30 09:46:11 +0200269 /* Currently Linux kernel does not support other operations */
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200270 default:
271 fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
272 __func__, type);
Kevin Wolf2174f122014-08-06 17:18:07 +0200273 return -EIO;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200274 }
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100275 io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200276
Paolo Bonzini28b24082014-12-11 14:52:26 +0100277 QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
Roman Pen5e1b34a2016-07-13 15:03:24 +0200278 s->io_q.in_queue++;
Paolo Bonzini43f23762014-12-11 14:52:27 +0100279 if (!s->io_q.blocked &&
Roman Pen5e1b34a2016-07-13 15:03:24 +0200280 (!s->io_q.plugged ||
281 s->io_q.in_flight + s->io_q.in_queue >= MAX_EVENTS)) {
Paolo Bonzini28b24082014-12-11 14:52:26 +0100282 ioq_submit(s);
Ming Lei1b3abdc2014-07-04 18:04:34 +0800283 }
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200284
Kevin Wolf2174f122014-08-06 17:18:07 +0200285 return 0;
286}
287
288int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
Kevin Wolf9d52aa32016-06-03 17:36:27 +0200289 uint64_t offset, QEMUIOVector *qiov, int type)
Kevin Wolf2174f122014-08-06 17:18:07 +0200290{
Kevin Wolf2174f122014-08-06 17:18:07 +0200291 int ret;
Kevin Wolf2174f122014-08-06 17:18:07 +0200292 struct qemu_laiocb laiocb = {
293 .co = qemu_coroutine_self(),
Kevin Wolf9d52aa32016-06-03 17:36:27 +0200294 .nbytes = qiov->size,
Kevin Wolf2174f122014-08-06 17:18:07 +0200295 .ctx = s,
296 .is_read = (type == QEMU_AIO_READ),
297 .qiov = qiov,
298 };
299
300 ret = laio_do_submit(fd, &laiocb, offset, type);
301 if (ret < 0) {
302 return ret;
303 }
304
305 qemu_coroutine_yield();
306 return laiocb.ret;
307}
308
309BlockAIOCB *laio_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
310 int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
311 BlockCompletionFunc *cb, void *opaque, int type)
312{
313 struct qemu_laiocb *laiocb;
314 off_t offset = sector_num * BDRV_SECTOR_SIZE;
315 int ret;
316
317 laiocb = qemu_aio_get(&laio_aiocb_info, bs, cb, opaque);
318 laiocb->nbytes = nb_sectors * BDRV_SECTOR_SIZE;
319 laiocb->ctx = s;
320 laiocb->ret = -EINPROGRESS;
321 laiocb->is_read = (type == QEMU_AIO_READ);
322 laiocb->qiov = qiov;
323
324 ret = laio_do_submit(fd, laiocb, offset, type);
325 if (ret < 0) {
326 qemu_aio_unref(laiocb);
327 return NULL;
328 }
329
330 return &laiocb->common;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200331}
332
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200333void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
Stefan Hajnoczic2f34262014-05-08 16:34:47 +0200334{
Fam Zhengdca21ef2015-10-23 11:08:05 +0800335 aio_set_event_notifier(old_context, &s->e, false, NULL);
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100336 qemu_bh_delete(s->completion_bh);
Stefan Hajnoczic2f34262014-05-08 16:34:47 +0200337}
338
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200339void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
Stefan Hajnoczic2f34262014-05-08 16:34:47 +0200340{
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200341 s->aio_context = new_context;
Stefan Hajnoczi2cdff7f2014-08-04 16:56:33 +0100342 s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
Fam Zhengdca21ef2015-10-23 11:08:05 +0800343 aio_set_event_notifier(new_context, &s->e, false,
344 qemu_laio_completion_cb);
Stefan Hajnoczic2f34262014-05-08 16:34:47 +0200345}
346
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200347LinuxAioState *laio_init(void)
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200348{
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200349 LinuxAioState *s;
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200350
Anthony Liguori7267c092011-08-20 22:09:37 -0500351 s = g_malloc0(sizeof(*s));
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100352 if (event_notifier_init(&s->e, false) < 0) {
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200353 goto out_free_state;
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100354 }
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200355
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100356 if (io_setup(MAX_EVENTS, &s->ctx) != 0) {
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200357 goto out_close_efd;
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100358 }
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200359
Ming Lei1b3abdc2014-07-04 18:04:34 +0800360 ioq_init(&s->io_q);
361
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200362 return s;
363
364out_close_efd:
Paolo Bonzinic90caf22012-02-24 08:39:02 +0100365 event_notifier_cleanup(&s->e);
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200366out_free_state:
Anthony Liguori7267c092011-08-20 22:09:37 -0500367 g_free(s);
Christoph Hellwig5c6c3a62009-08-20 16:58:35 +0200368 return NULL;
369}
Stefan Hajnocziabd269b2014-05-08 16:34:48 +0200370
Paolo Bonzinidd7f7ed2016-04-07 18:33:35 +0200371void laio_cleanup(LinuxAioState *s)
Stefan Hajnocziabd269b2014-05-08 16:34:48 +0200372{
Stefan Hajnocziabd269b2014-05-08 16:34:48 +0200373 event_notifier_cleanup(&s->e);
Gongleia1abf402014-07-12 11:43:37 +0800374
375 if (io_destroy(s->ctx) != 0) {
376 fprintf(stderr, "%s: destroy AIO context %p failed\n",
377 __func__, &s->ctx);
378 }
Stefan Hajnocziabd269b2014-05-08 16:34:48 +0200379 g_free(s);
380}