blob: 406b52f91d6c2d442d4431b35e48b7db66d05f0d [file] [log] [blame]
Paolo Bonzini9257d462011-03-12 17:43:52 +01001/*
2 * Win32 implementation for mutex/cond/thread functions
3 *
4 * Copyright Red Hat, Inc. 2010
5 *
6 * Author:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
11 *
12 */
13#include "qemu-common.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010014#include "qemu/thread.h"
Paolo Bonzinief571372014-12-02 12:05:45 +010015#include "qemu/notify.h"
Paolo Bonzini9257d462011-03-12 17:43:52 +010016#include <process.h>
17#include <assert.h>
18#include <limits.h>
19
Dr. David Alan Gilbert8f480de2014-01-30 10:20:31 +000020static bool name_threads;
21
22void qemu_thread_naming(bool enable)
23{
24 /* But note we don't actually name them on Windows yet */
25 name_threads = enable;
Dr. David Alan Gilbert5c312072014-03-12 11:48:18 +000026
27 fprintf(stderr, "qemu: thread naming not supported on this host\n");
Dr. David Alan Gilbert8f480de2014-01-30 10:20:31 +000028}
29
Paolo Bonzini9257d462011-03-12 17:43:52 +010030static void error_exit(int err, const char *msg)
31{
32 char *pstr;
33
34 FormatMessage(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_ALLOCATE_BUFFER,
35 NULL, err, 0, (LPTSTR)&pstr, 2, NULL);
36 fprintf(stderr, "qemu: %s: %s\n", msg, pstr);
37 LocalFree(pstr);
Jan Kiszka53380ac2011-09-21 09:28:31 +020038 abort();
Paolo Bonzini9257d462011-03-12 17:43:52 +010039}
40
41void qemu_mutex_init(QemuMutex *mutex)
42{
43 mutex->owner = 0;
44 InitializeCriticalSection(&mutex->lock);
45}
46
Stefan Weil1a290ae2011-03-13 19:00:52 +010047void qemu_mutex_destroy(QemuMutex *mutex)
48{
49 assert(mutex->owner == 0);
50 DeleteCriticalSection(&mutex->lock);
51}
52
Paolo Bonzini9257d462011-03-12 17:43:52 +010053void qemu_mutex_lock(QemuMutex *mutex)
54{
55 EnterCriticalSection(&mutex->lock);
56
57 /* Win32 CRITICAL_SECTIONs are recursive. Assert that we're not
58 * using them as such.
59 */
60 assert(mutex->owner == 0);
61 mutex->owner = GetCurrentThreadId();
62}
63
64int qemu_mutex_trylock(QemuMutex *mutex)
65{
66 int owned;
67
68 owned = TryEnterCriticalSection(&mutex->lock);
69 if (owned) {
70 assert(mutex->owner == 0);
71 mutex->owner = GetCurrentThreadId();
72 }
73 return !owned;
74}
75
76void qemu_mutex_unlock(QemuMutex *mutex)
77{
78 assert(mutex->owner == GetCurrentThreadId());
79 mutex->owner = 0;
80 LeaveCriticalSection(&mutex->lock);
81}
82
83void qemu_cond_init(QemuCond *cond)
84{
85 memset(cond, 0, sizeof(*cond));
86
87 cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
88 if (!cond->sema) {
89 error_exit(GetLastError(), __func__);
90 }
91 cond->continue_event = CreateEvent(NULL, /* security */
92 FALSE, /* auto-reset */
93 FALSE, /* not signaled */
94 NULL); /* name */
95 if (!cond->continue_event) {
96 error_exit(GetLastError(), __func__);
97 }
98}
99
Stefan Weil1a290ae2011-03-13 19:00:52 +0100100void qemu_cond_destroy(QemuCond *cond)
101{
102 BOOL result;
103 result = CloseHandle(cond->continue_event);
104 if (!result) {
105 error_exit(GetLastError(), __func__);
106 }
107 cond->continue_event = 0;
108 result = CloseHandle(cond->sema);
109 if (!result) {
110 error_exit(GetLastError(), __func__);
111 }
112 cond->sema = 0;
113}
114
Paolo Bonzini9257d462011-03-12 17:43:52 +0100115void qemu_cond_signal(QemuCond *cond)
116{
117 DWORD result;
118
119 /*
120 * Signal only when there are waiters. cond->waiters is
121 * incremented by pthread_cond_wait under the external lock,
122 * so we are safe about that.
123 */
124 if (cond->waiters == 0) {
125 return;
126 }
127
128 /*
129 * Waiting threads decrement it outside the external lock, but
130 * only if another thread is executing pthread_cond_broadcast and
131 * has the mutex. So, it also cannot be decremented concurrently
132 * with this particular access.
133 */
134 cond->target = cond->waiters - 1;
135 result = SignalObjectAndWait(cond->sema, cond->continue_event,
136 INFINITE, FALSE);
137 if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
138 error_exit(GetLastError(), __func__);
139 }
140}
141
142void qemu_cond_broadcast(QemuCond *cond)
143{
144 BOOLEAN result;
145 /*
146 * As in pthread_cond_signal, access to cond->waiters and
147 * cond->target is locked via the external mutex.
148 */
149 if (cond->waiters == 0) {
150 return;
151 }
152
153 cond->target = 0;
154 result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
155 if (!result) {
156 error_exit(GetLastError(), __func__);
157 }
158
159 /*
160 * At this point all waiters continue. Each one takes its
161 * slice of the semaphore. Now it's our turn to wait: Since
162 * the external mutex is held, no thread can leave cond_wait,
163 * yet. For this reason, we can be sure that no thread gets
164 * a chance to eat *more* than one slice. OTOH, it means
165 * that the last waiter must send us a wake-up.
166 */
167 WaitForSingleObject(cond->continue_event, INFINITE);
168}
169
170void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex)
171{
172 /*
173 * This access is protected under the mutex.
174 */
175 cond->waiters++;
176
177 /*
178 * Unlock external mutex and wait for signal.
179 * NOTE: we've held mutex locked long enough to increment
180 * waiters count above, so there's no problem with
181 * leaving mutex unlocked before we wait on semaphore.
182 */
183 qemu_mutex_unlock(mutex);
184 WaitForSingleObject(cond->sema, INFINITE);
185
186 /* Now waiters must rendez-vous with the signaling thread and
187 * let it continue. For cond_broadcast this has heavy contention
188 * and triggers thundering herd. So goes life.
189 *
190 * Decrease waiters count. The mutex is not taken, so we have
191 * to do this atomically.
192 *
193 * All waiters contend for the mutex at the end of this function
194 * until the signaling thread relinquishes it. To ensure
195 * each waiter consumes exactly one slice of the semaphore,
196 * the signaling thread stops until it is told by the last
197 * waiter that it can go on.
198 */
199 if (InterlockedDecrement(&cond->waiters) == cond->target) {
200 SetEvent(cond->continue_event);
201 }
202
203 qemu_mutex_lock(mutex);
204}
205
Paolo Bonzini38b14db2011-08-08 14:36:41 +0200206void qemu_sem_init(QemuSemaphore *sem, int init)
207{
208 /* Manual reset. */
209 sem->sema = CreateSemaphore(NULL, init, LONG_MAX, NULL);
210}
211
212void qemu_sem_destroy(QemuSemaphore *sem)
213{
214 CloseHandle(sem->sema);
215}
216
217void qemu_sem_post(QemuSemaphore *sem)
218{
219 ReleaseSemaphore(sem->sema, 1, NULL);
220}
221
222int qemu_sem_timedwait(QemuSemaphore *sem, int ms)
223{
224 int rc = WaitForSingleObject(sem->sema, ms);
225 if (rc == WAIT_OBJECT_0) {
226 return 0;
227 }
228 if (rc != WAIT_TIMEOUT) {
229 error_exit(GetLastError(), __func__);
230 }
231 return -1;
232}
233
234void qemu_sem_wait(QemuSemaphore *sem)
235{
236 if (WaitForSingleObject(sem->sema, INFINITE) != WAIT_OBJECT_0) {
237 error_exit(GetLastError(), __func__);
238 }
239}
240
Paolo Bonzinic7c4d062013-09-25 14:20:59 +0800241void qemu_event_init(QemuEvent *ev, bool init)
242{
243 /* Manual reset. */
244 ev->event = CreateEvent(NULL, TRUE, init, NULL);
245}
246
247void qemu_event_destroy(QemuEvent *ev)
248{
249 CloseHandle(ev->event);
250}
251
252void qemu_event_set(QemuEvent *ev)
253{
254 SetEvent(ev->event);
255}
256
257void qemu_event_reset(QemuEvent *ev)
258{
259 ResetEvent(ev->event);
260}
261
262void qemu_event_wait(QemuEvent *ev)
263{
264 WaitForSingleObject(ev->event, INFINITE);
265}
266
Paolo Bonzini9257d462011-03-12 17:43:52 +0100267struct QemuThreadData {
Paolo Bonzini403e6332011-12-12 17:21:33 +0100268 /* Passed to win32_start_routine. */
269 void *(*start_routine)(void *);
270 void *arg;
271 short mode;
Paolo Bonzinief571372014-12-02 12:05:45 +0100272 NotifierList exit;
Paolo Bonzini403e6332011-12-12 17:21:33 +0100273
274 /* Only used for joinable threads. */
275 bool exited;
276 void *ret;
277 CRITICAL_SECTION cs;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100278};
279
Paolo Bonzinief571372014-12-02 12:05:45 +0100280static bool atexit_registered;
281static NotifierList main_thread_exit;
282
Jan Kiszka6265e4f2012-11-23 12:12:01 +0100283static __thread QemuThreadData *qemu_thread_data;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100284
Paolo Bonzinief571372014-12-02 12:05:45 +0100285static void run_main_thread_exit(void)
286{
287 notifier_list_notify(&main_thread_exit, NULL);
288}
289
290void qemu_thread_atexit_add(Notifier *notifier)
291{
292 if (!qemu_thread_data) {
293 if (!atexit_registered) {
294 atexit_registered = true;
295 atexit(run_main_thread_exit);
296 }
297 notifier_list_add(&main_thread_exit, notifier);
298 } else {
299 notifier_list_add(&qemu_thread_data->exit, notifier);
300 }
301}
302
303void qemu_thread_atexit_remove(Notifier *notifier)
304{
305 notifier_remove(notifier);
306}
307
Paolo Bonzini9257d462011-03-12 17:43:52 +0100308static unsigned __stdcall win32_start_routine(void *arg)
309{
Paolo Bonzini403e6332011-12-12 17:21:33 +0100310 QemuThreadData *data = (QemuThreadData *) arg;
311 void *(*start_routine)(void *) = data->start_routine;
312 void *thread_arg = data->arg;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100313
Jan Kiszka6265e4f2012-11-23 12:12:01 +0100314 qemu_thread_data = data;
Paolo Bonzini403e6332011-12-12 17:21:33 +0100315 qemu_thread_exit(start_routine(thread_arg));
Paolo Bonzini9257d462011-03-12 17:43:52 +0100316 abort();
317}
318
319void qemu_thread_exit(void *arg)
320{
Jan Kiszka6265e4f2012-11-23 12:12:01 +0100321 QemuThreadData *data = qemu_thread_data;
322
Paolo Bonzinief571372014-12-02 12:05:45 +0100323 notifier_list_notify(&data->exit, NULL);
324 if (data->mode == QEMU_THREAD_JOINABLE) {
Paolo Bonzini403e6332011-12-12 17:21:33 +0100325 data->ret = arg;
326 EnterCriticalSection(&data->cs);
327 data->exited = true;
328 LeaveCriticalSection(&data->cs);
Paolo Bonzinief571372014-12-02 12:05:45 +0100329 } else {
330 g_free(data);
Paolo Bonzini403e6332011-12-12 17:21:33 +0100331 }
332 _endthreadex(0);
333}
334
335void *qemu_thread_join(QemuThread *thread)
336{
337 QemuThreadData *data;
338 void *ret;
339 HANDLE handle;
340
341 data = thread->data;
Paolo Bonzinief571372014-12-02 12:05:45 +0100342 if (data->mode == QEMU_THREAD_DETACHED) {
Paolo Bonzini403e6332011-12-12 17:21:33 +0100343 return NULL;
344 }
Paolo Bonzinief571372014-12-02 12:05:45 +0100345
Paolo Bonzini403e6332011-12-12 17:21:33 +0100346 /*
347 * Because multiple copies of the QemuThread can exist via
348 * qemu_thread_get_self, we need to store a value that cannot
349 * leak there. The simplest, non racy way is to store the TID,
350 * discard the handle that _beginthreadex gives back, and
351 * get another copy of the handle here.
352 */
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +0100353 handle = qemu_thread_get_handle(thread);
354 if (handle) {
Paolo Bonzini403e6332011-12-12 17:21:33 +0100355 WaitForSingleObject(handle, INFINITE);
356 CloseHandle(handle);
Paolo Bonzini403e6332011-12-12 17:21:33 +0100357 }
358 ret = data->ret;
359 DeleteCriticalSection(&data->cs);
360 g_free(data);
361 return ret;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100362}
363
Dr. David Alan Gilbert49001162014-01-30 10:20:32 +0000364void qemu_thread_create(QemuThread *thread, const char *name,
Paolo Bonzini9257d462011-03-12 17:43:52 +0100365 void *(*start_routine)(void *),
Jan Kiszkacf218712011-12-12 17:21:31 +0100366 void *arg, int mode)
Paolo Bonzini9257d462011-03-12 17:43:52 +0100367{
368 HANDLE hThread;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100369 struct QemuThreadData *data;
Jan Kiszka6265e4f2012-11-23 12:12:01 +0100370
Anthony Liguori7267c092011-08-20 22:09:37 -0500371 data = g_malloc(sizeof *data);
Paolo Bonzini9257d462011-03-12 17:43:52 +0100372 data->start_routine = start_routine;
373 data->arg = arg;
Paolo Bonzini403e6332011-12-12 17:21:33 +0100374 data->mode = mode;
375 data->exited = false;
Paolo Bonzinief571372014-12-02 12:05:45 +0100376 notifier_list_init(&data->exit);
Paolo Bonzini9257d462011-03-12 17:43:52 +0100377
Stefan Weiledc1de92012-01-31 07:14:15 +0100378 if (data->mode != QEMU_THREAD_DETACHED) {
379 InitializeCriticalSection(&data->cs);
380 }
381
Paolo Bonzini9257d462011-03-12 17:43:52 +0100382 hThread = (HANDLE) _beginthreadex(NULL, 0, win32_start_routine,
Paolo Bonzini403e6332011-12-12 17:21:33 +0100383 data, 0, &thread->tid);
Paolo Bonzini9257d462011-03-12 17:43:52 +0100384 if (!hThread) {
385 error_exit(GetLastError(), __func__);
386 }
387 CloseHandle(hThread);
Paolo Bonzinief571372014-12-02 12:05:45 +0100388 thread->data = data;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100389}
390
391void qemu_thread_get_self(QemuThread *thread)
392{
Jan Kiszka6265e4f2012-11-23 12:12:01 +0100393 thread->data = qemu_thread_data;
Paolo Bonzini403e6332011-12-12 17:21:33 +0100394 thread->tid = GetCurrentThreadId();
Paolo Bonzini9257d462011-03-12 17:43:52 +0100395}
396
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +0100397HANDLE qemu_thread_get_handle(QemuThread *thread)
398{
399 QemuThreadData *data;
400 HANDLE handle;
401
402 data = thread->data;
Paolo Bonzinief571372014-12-02 12:05:45 +0100403 if (data->mode == QEMU_THREAD_DETACHED) {
Paolo Bonzini1ecf47b2011-12-13 13:43:52 +0100404 return NULL;
405 }
406
407 EnterCriticalSection(&data->cs);
408 if (!data->exited) {
409 handle = OpenThread(SYNCHRONIZE | THREAD_SUSPEND_RESUME, FALSE,
410 thread->tid);
411 } else {
412 handle = NULL;
413 }
414 LeaveCriticalSection(&data->cs);
415 return handle;
416}
417
Andreas Färber2d797b62012-05-02 17:21:31 +0200418bool qemu_thread_is_self(QemuThread *thread)
Paolo Bonzini9257d462011-03-12 17:43:52 +0100419{
Paolo Bonzini403e6332011-12-12 17:21:33 +0100420 return GetCurrentThreadId() == thread->tid;
Paolo Bonzini9257d462011-03-12 17:43:52 +0100421}