dma-helpers: Fix race condition of continue_after_map_failure and dma_aio_cancel

If DMA's owning thread cancels the IO while the bounce buffer's owning thread
is notifying the "cpu client list", a use-after-free happens:

     continue_after_map_failure               dma_aio_cancel
     ------------------------------------------------------------------
     aio_bh_new
                                              qemu_bh_delete
     qemu_bh_schedule (use after free)

Also, the old code doesn't run the bh in the right AioContext.

Fix both problems by passing a QEMUBH to cpu_register_map_client.

Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <1426496617-10702-6-git-send-email-famz@redhat.com>
[Remove unnecessary forward declaration. - Paolo]
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
diff --git a/exec.c b/exec.c
index 2c87f1d..065f5e8 100644
--- a/exec.c
+++ b/exec.c
@@ -2479,8 +2479,7 @@
 static BounceBuffer bounce;
 
 typedef struct MapClient {
-    void *opaque;
-    void (*callback)(void *opaque);
+    QEMUBH *bh;
     QLIST_ENTRY(MapClient) link;
 } MapClient;
 
@@ -2488,31 +2487,34 @@
 static QLIST_HEAD(map_client_list, MapClient) map_client_list
     = QLIST_HEAD_INITIALIZER(map_client_list);
 
-static void cpu_unregister_map_client(void *_client);
+static void cpu_unregister_map_client_do(MapClient *client)
+{
+    QLIST_REMOVE(client, link);
+    g_free(client);
+}
+
 static void cpu_notify_map_clients_locked(void)
 {
     MapClient *client;
 
     while (!QLIST_EMPTY(&map_client_list)) {
         client = QLIST_FIRST(&map_client_list);
-        client->callback(client->opaque);
-        cpu_unregister_map_client(client);
+        qemu_bh_schedule(client->bh);
+        cpu_unregister_map_client_do(client);
     }
 }
 
-void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
+void cpu_register_map_client(QEMUBH *bh)
 {
     MapClient *client = g_malloc(sizeof(*client));
 
     qemu_mutex_lock(&map_client_list_lock);
-    client->opaque = opaque;
-    client->callback = callback;
+    client->bh = bh;
     QLIST_INSERT_HEAD(&map_client_list, client, link);
     if (!atomic_read(&bounce.in_use)) {
         cpu_notify_map_clients_locked();
     }
     qemu_mutex_unlock(&map_client_list_lock);
-    return client;
 }
 
 void cpu_exec_init_all(void)
@@ -2523,12 +2525,18 @@
     qemu_mutex_init(&map_client_list_lock);
 }
 
-static void cpu_unregister_map_client(void *_client)
+void cpu_unregister_map_client(QEMUBH *bh)
 {
-    MapClient *client = (MapClient *)_client;
+    MapClient *client;
 
-    QLIST_REMOVE(client, link);
-    g_free(client);
+    qemu_mutex_lock(&map_client_list_lock);
+    QLIST_FOREACH(client, &map_client_list, link) {
+        if (client->bh == bh) {
+            cpu_unregister_map_client_do(client);
+            break;
+        }
+    }
+    qemu_mutex_unlock(&map_client_list_lock);
 }
 
 static void cpu_notify_map_clients(void)