aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU KVM support |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 5 | * Red Hat, Inc. 2008 |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 6 | * |
| 7 | * Authors: |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 9 | * Glauber Costa <gcosta@redhat.com> |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 12 | * See the COPYING file in the top-level directory. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <sys/types.h> |
| 17 | #include <sys/ioctl.h> |
| 18 | #include <sys/mman.h> |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 19 | #include <stdarg.h> |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 20 | |
| 21 | #include <linux/kvm.h> |
| 22 | |
| 23 | #include "qemu-common.h" |
Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 24 | #include "qemu-barrier.h" |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 25 | #include "sysemu.h" |
Jan Kiszka | d33a181 | 2009-05-02 00:29:37 +0200 | [diff] [blame] | 26 | #include "hw/hw.h" |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 27 | #include "gdbstub.h" |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 28 | #include "kvm.h" |
| 29 | |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 30 | /* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */ |
| 31 | #define PAGE_SIZE TARGET_PAGE_SIZE |
| 32 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 33 | //#define DEBUG_KVM |
| 34 | |
| 35 | #ifdef DEBUG_KVM |
| 36 | #define dprintf(fmt, ...) \ |
| 37 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
| 38 | #else |
| 39 | #define dprintf(fmt, ...) \ |
| 40 | do { } while (0) |
| 41 | #endif |
| 42 | |
aliguori | 34fc643 | 2008-11-19 17:41:58 +0000 | [diff] [blame] | 43 | typedef struct KVMSlot |
| 44 | { |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 45 | target_phys_addr_t start_addr; |
| 46 | ram_addr_t memory_size; |
| 47 | ram_addr_t phys_offset; |
aliguori | 34fc643 | 2008-11-19 17:41:58 +0000 | [diff] [blame] | 48 | int slot; |
| 49 | int flags; |
| 50 | } KVMSlot; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 51 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 52 | typedef struct kvm_dirty_log KVMDirtyLog; |
| 53 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 54 | struct KVMState |
| 55 | { |
| 56 | KVMSlot slots[32]; |
| 57 | int fd; |
| 58 | int vmfd; |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 59 | int coalesced_mmio; |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 60 | #ifdef KVM_CAP_COALESCED_MMIO |
| 61 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
| 62 | #endif |
Jan Kiszka | e69917e | 2009-05-01 20:42:15 +0200 | [diff] [blame] | 63 | int broken_set_mem_region; |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 64 | int migration_log; |
Jan Kiszka | a0fb002 | 2009-11-25 00:33:03 +0100 | [diff] [blame] | 65 | int vcpu_events; |
Jan Kiszka | b0b1d69 | 2010-03-01 19:10:29 +0100 | [diff] [blame] | 66 | int robust_singlestep; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 67 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
| 68 | struct kvm_sw_breakpoint_head kvm_sw_breakpoints; |
| 69 | #endif |
Glauber Costa | 6f725c1 | 2009-07-21 12:26:58 -0300 | [diff] [blame] | 70 | int irqchip_in_kernel; |
| 71 | int pit_in_kernel; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 72 | }; |
| 73 | |
| 74 | static KVMState *kvm_state; |
| 75 | |
| 76 | static KVMSlot *kvm_alloc_slot(KVMState *s) |
| 77 | { |
| 78 | int i; |
| 79 | |
| 80 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
aliguori | 62d60e8 | 2008-11-18 15:41:18 +0000 | [diff] [blame] | 81 | /* KVM private memory slots */ |
| 82 | if (i >= 8 && i < 12) |
| 83 | continue; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 84 | if (s->slots[i].memory_size == 0) |
| 85 | return &s->slots[i]; |
| 86 | } |
| 87 | |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 88 | fprintf(stderr, "%s: no free slot available\n", __func__); |
| 89 | abort(); |
| 90 | } |
| 91 | |
| 92 | static KVMSlot *kvm_lookup_matching_slot(KVMState *s, |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 93 | target_phys_addr_t start_addr, |
| 94 | target_phys_addr_t end_addr) |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 95 | { |
| 96 | int i; |
| 97 | |
| 98 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
| 99 | KVMSlot *mem = &s->slots[i]; |
| 100 | |
| 101 | if (start_addr == mem->start_addr && |
| 102 | end_addr == mem->start_addr + mem->memory_size) { |
| 103 | return mem; |
| 104 | } |
| 105 | } |
| 106 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 107 | return NULL; |
| 108 | } |
| 109 | |
aliguori | 6152e2a | 2009-04-17 14:26:33 +0000 | [diff] [blame] | 110 | /* |
| 111 | * Find overlapping slot with lowest start address |
| 112 | */ |
| 113 | static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 114 | target_phys_addr_t start_addr, |
| 115 | target_phys_addr_t end_addr) |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 116 | { |
aliguori | 6152e2a | 2009-04-17 14:26:33 +0000 | [diff] [blame] | 117 | KVMSlot *found = NULL; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 118 | int i; |
| 119 | |
| 120 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
| 121 | KVMSlot *mem = &s->slots[i]; |
| 122 | |
aliguori | 6152e2a | 2009-04-17 14:26:33 +0000 | [diff] [blame] | 123 | if (mem->memory_size == 0 || |
| 124 | (found && found->start_addr < mem->start_addr)) { |
| 125 | continue; |
| 126 | } |
| 127 | |
| 128 | if (end_addr > mem->start_addr && |
| 129 | start_addr < mem->start_addr + mem->memory_size) { |
| 130 | found = mem; |
| 131 | } |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 132 | } |
| 133 | |
aliguori | 6152e2a | 2009-04-17 14:26:33 +0000 | [diff] [blame] | 134 | return found; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 135 | } |
| 136 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 137 | static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) |
| 138 | { |
| 139 | struct kvm_userspace_memory_region mem; |
| 140 | |
| 141 | mem.slot = slot->slot; |
| 142 | mem.guest_phys_addr = slot->start_addr; |
| 143 | mem.memory_size = slot->memory_size; |
pbrook | 5579c7f | 2009-04-11 14:47:08 +0000 | [diff] [blame] | 144 | mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset); |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 145 | mem.flags = slot->flags; |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 146 | if (s->migration_log) { |
| 147 | mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; |
| 148 | } |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 149 | return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem); |
| 150 | } |
| 151 | |
Jan Kiszka | 8d2ba1f | 2009-06-27 09:24:58 +0200 | [diff] [blame] | 152 | static void kvm_reset_vcpu(void *opaque) |
| 153 | { |
| 154 | CPUState *env = opaque; |
| 155 | |
Jan Kiszka | caa5af0 | 2009-11-06 19:39:24 +0100 | [diff] [blame] | 156 | kvm_arch_reset_vcpu(env); |
Jan Kiszka | 8d2ba1f | 2009-06-27 09:24:58 +0200 | [diff] [blame] | 157 | } |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 158 | |
Glauber Costa | 6f725c1 | 2009-07-21 12:26:58 -0300 | [diff] [blame] | 159 | int kvm_irqchip_in_kernel(void) |
| 160 | { |
| 161 | return kvm_state->irqchip_in_kernel; |
| 162 | } |
| 163 | |
| 164 | int kvm_pit_in_kernel(void) |
| 165 | { |
| 166 | return kvm_state->pit_in_kernel; |
| 167 | } |
| 168 | |
| 169 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 170 | int kvm_init_vcpu(CPUState *env) |
| 171 | { |
| 172 | KVMState *s = kvm_state; |
| 173 | long mmap_size; |
| 174 | int ret; |
| 175 | |
| 176 | dprintf("kvm_init_vcpu\n"); |
| 177 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 178 | ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 179 | if (ret < 0) { |
| 180 | dprintf("kvm_create_vcpu failed\n"); |
| 181 | goto err; |
| 182 | } |
| 183 | |
| 184 | env->kvm_fd = ret; |
| 185 | env->kvm_state = s; |
| 186 | |
| 187 | mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0); |
| 188 | if (mmap_size < 0) { |
| 189 | dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n"); |
| 190 | goto err; |
| 191 | } |
| 192 | |
| 193 | env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED, |
| 194 | env->kvm_fd, 0); |
| 195 | if (env->kvm_run == MAP_FAILED) { |
| 196 | ret = -errno; |
| 197 | dprintf("mmap'ing vcpu state failed\n"); |
| 198 | goto err; |
| 199 | } |
| 200 | |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 201 | #ifdef KVM_CAP_COALESCED_MMIO |
| 202 | if (s->coalesced_mmio && !s->coalesced_mmio_ring) |
| 203 | s->coalesced_mmio_ring = (void *) env->kvm_run + |
| 204 | s->coalesced_mmio * PAGE_SIZE; |
| 205 | #endif |
| 206 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 207 | ret = kvm_arch_init_vcpu(env); |
Jan Kiszka | 8d2ba1f | 2009-06-27 09:24:58 +0200 | [diff] [blame] | 208 | if (ret == 0) { |
Jan Kiszka | a08d436 | 2009-06-27 09:25:07 +0200 | [diff] [blame] | 209 | qemu_register_reset(kvm_reset_vcpu, env); |
Jan Kiszka | caa5af0 | 2009-11-06 19:39:24 +0100 | [diff] [blame] | 210 | kvm_arch_reset_vcpu(env); |
Jan Kiszka | 8d2ba1f | 2009-06-27 09:24:58 +0200 | [diff] [blame] | 211 | } |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 212 | err: |
| 213 | return ret; |
| 214 | } |
| 215 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 216 | /* |
| 217 | * dirty pages logging control |
| 218 | */ |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 219 | static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr, |
| 220 | ram_addr_t size, int flags, int mask) |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 221 | { |
| 222 | KVMState *s = kvm_state; |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 223 | KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 224 | int old_flags; |
| 225 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 226 | if (mem == NULL) { |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 227 | fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" |
| 228 | TARGET_FMT_plx "\n", __func__, phys_addr, |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 229 | (target_phys_addr_t)(phys_addr + size - 1)); |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 230 | return -EINVAL; |
| 231 | } |
| 232 | |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 233 | old_flags = mem->flags; |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 234 | |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 235 | flags = (mem->flags & ~mask) | flags; |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 236 | mem->flags = flags; |
| 237 | |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 238 | /* If nothing changed effectively, no need to issue ioctl */ |
| 239 | if (s->migration_log) { |
| 240 | flags |= KVM_MEM_LOG_DIRTY_PAGES; |
| 241 | } |
| 242 | if (flags == old_flags) { |
| 243 | return 0; |
| 244 | } |
| 245 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 246 | return kvm_set_user_memory_region(s, mem); |
| 247 | } |
| 248 | |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 249 | int kvm_log_start(target_phys_addr_t phys_addr, ram_addr_t size) |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 250 | { |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 251 | return kvm_dirty_pages_log_change(phys_addr, size, |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 252 | KVM_MEM_LOG_DIRTY_PAGES, |
| 253 | KVM_MEM_LOG_DIRTY_PAGES); |
| 254 | } |
| 255 | |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 256 | int kvm_log_stop(target_phys_addr_t phys_addr, ram_addr_t size) |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 257 | { |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 258 | return kvm_dirty_pages_log_change(phys_addr, size, |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 259 | 0, |
| 260 | KVM_MEM_LOG_DIRTY_PAGES); |
| 261 | } |
| 262 | |
Michael S. Tsirkin | 7b8f3b7 | 2010-01-27 22:07:21 +0200 | [diff] [blame] | 263 | static int kvm_set_migration_log(int enable) |
Jan Kiszka | 4495d6a | 2009-05-01 20:52:46 +0200 | [diff] [blame] | 264 | { |
| 265 | KVMState *s = kvm_state; |
| 266 | KVMSlot *mem; |
| 267 | int i, err; |
| 268 | |
| 269 | s->migration_log = enable; |
| 270 | |
| 271 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) { |
| 272 | mem = &s->slots[i]; |
| 273 | |
| 274 | if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) { |
| 275 | continue; |
| 276 | } |
| 277 | err = kvm_set_user_memory_region(s, mem); |
| 278 | if (err) { |
| 279 | return err; |
| 280 | } |
| 281 | } |
| 282 | return 0; |
| 283 | } |
| 284 | |
Alexander Graf | 96c1606 | 2009-07-27 12:49:56 +0200 | [diff] [blame] | 285 | static int test_le_bit(unsigned long nr, unsigned char *addr) |
| 286 | { |
| 287 | return (addr[nr >> 3] >> (nr & 7)) & 1; |
| 288 | } |
| 289 | |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 290 | /** |
| 291 | * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space |
| 292 | * This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty(). |
| 293 | * This means all bits are set to dirty. |
| 294 | * |
aliguori | d3f8d37 | 2009-04-17 14:26:29 +0000 | [diff] [blame] | 295 | * @start_add: start of logged region. |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 296 | * @end_addr: end of logged region. |
| 297 | */ |
Michael S. Tsirkin | 7b8f3b7 | 2010-01-27 22:07:21 +0200 | [diff] [blame] | 298 | static int kvm_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, |
| 299 | target_phys_addr_t end_addr) |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 300 | { |
| 301 | KVMState *s = kvm_state; |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 302 | unsigned long size, allocated_size = 0; |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 303 | target_phys_addr_t phys_addr; |
| 304 | ram_addr_t addr; |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 305 | KVMDirtyLog d; |
| 306 | KVMSlot *mem; |
| 307 | int ret = 0; |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 308 | |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 309 | d.dirty_bitmap = NULL; |
| 310 | while (start_addr < end_addr) { |
| 311 | mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); |
| 312 | if (mem == NULL) { |
| 313 | break; |
| 314 | } |
| 315 | |
| 316 | size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8; |
| 317 | if (!d.dirty_bitmap) { |
| 318 | d.dirty_bitmap = qemu_malloc(size); |
| 319 | } else if (size > allocated_size) { |
| 320 | d.dirty_bitmap = qemu_realloc(d.dirty_bitmap, size); |
| 321 | } |
| 322 | allocated_size = size; |
| 323 | memset(d.dirty_bitmap, 0, allocated_size); |
| 324 | |
| 325 | d.slot = mem->slot; |
| 326 | |
Anthony Liguori | 6e489f3 | 2009-07-27 15:23:59 -0500 | [diff] [blame] | 327 | if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 328 | dprintf("ioctl failed %d\n", errno); |
| 329 | ret = -1; |
| 330 | break; |
| 331 | } |
| 332 | |
| 333 | for (phys_addr = mem->start_addr, addr = mem->phys_offset; |
| 334 | phys_addr < mem->start_addr + mem->memory_size; |
| 335 | phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) { |
Alexander Graf | 96c1606 | 2009-07-27 12:49:56 +0200 | [diff] [blame] | 336 | unsigned char *bitmap = (unsigned char *)d.dirty_bitmap; |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 337 | unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS; |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 338 | |
Alexander Graf | 96c1606 | 2009-07-27 12:49:56 +0200 | [diff] [blame] | 339 | if (test_le_bit(nr, bitmap)) { |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 340 | cpu_physical_memory_set_dirty(addr); |
| 341 | } |
| 342 | } |
| 343 | start_addr = phys_addr; |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 344 | } |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 345 | qemu_free(d.dirty_bitmap); |
Jan Kiszka | 151f774 | 2009-05-01 20:52:47 +0200 | [diff] [blame] | 346 | |
| 347 | return ret; |
aliguori | 5832d1f | 2008-11-24 19:36:26 +0000 | [diff] [blame] | 348 | } |
| 349 | |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 350 | int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 351 | { |
| 352 | int ret = -ENOSYS; |
| 353 | #ifdef KVM_CAP_COALESCED_MMIO |
| 354 | KVMState *s = kvm_state; |
| 355 | |
| 356 | if (s->coalesced_mmio) { |
| 357 | struct kvm_coalesced_mmio_zone zone; |
| 358 | |
| 359 | zone.addr = start; |
| 360 | zone.size = size; |
| 361 | |
| 362 | ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone); |
| 363 | } |
| 364 | #endif |
| 365 | |
| 366 | return ret; |
| 367 | } |
| 368 | |
Anthony Liguori | c227f09 | 2009-10-01 16:12:16 -0500 | [diff] [blame] | 369 | int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size) |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 370 | { |
| 371 | int ret = -ENOSYS; |
| 372 | #ifdef KVM_CAP_COALESCED_MMIO |
| 373 | KVMState *s = kvm_state; |
| 374 | |
| 375 | if (s->coalesced_mmio) { |
| 376 | struct kvm_coalesced_mmio_zone zone; |
| 377 | |
| 378 | zone.addr = start; |
| 379 | zone.size = size; |
| 380 | |
| 381 | ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone); |
| 382 | } |
| 383 | #endif |
| 384 | |
| 385 | return ret; |
| 386 | } |
| 387 | |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 388 | int kvm_check_extension(KVMState *s, unsigned int extension) |
| 389 | { |
| 390 | int ret; |
| 391 | |
| 392 | ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension); |
| 393 | if (ret < 0) { |
| 394 | ret = 0; |
| 395 | } |
| 396 | |
| 397 | return ret; |
| 398 | } |
| 399 | |
Michael S. Tsirkin | 7b8f3b7 | 2010-01-27 22:07:21 +0200 | [diff] [blame] | 400 | static void kvm_set_phys_mem(target_phys_addr_t start_addr, |
| 401 | ram_addr_t size, |
| 402 | ram_addr_t phys_offset) |
Michael S. Tsirkin | 46dbef6 | 2010-01-27 22:07:08 +0200 | [diff] [blame] | 403 | { |
| 404 | KVMState *s = kvm_state; |
| 405 | ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK; |
| 406 | KVMSlot *mem, old; |
| 407 | int err; |
| 408 | |
| 409 | if (start_addr & ~TARGET_PAGE_MASK) { |
| 410 | if (flags >= IO_MEM_UNASSIGNED) { |
| 411 | if (!kvm_lookup_overlapping_slot(s, start_addr, |
| 412 | start_addr + size)) { |
| 413 | return; |
| 414 | } |
| 415 | fprintf(stderr, "Unaligned split of a KVM memory slot\n"); |
| 416 | } else { |
| 417 | fprintf(stderr, "Only page-aligned memory slots supported\n"); |
| 418 | } |
| 419 | abort(); |
| 420 | } |
| 421 | |
| 422 | /* KVM does not support read-only slots */ |
| 423 | phys_offset &= ~IO_MEM_ROM; |
| 424 | |
| 425 | while (1) { |
| 426 | mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); |
| 427 | if (!mem) { |
| 428 | break; |
| 429 | } |
| 430 | |
| 431 | if (flags < IO_MEM_UNASSIGNED && start_addr >= mem->start_addr && |
| 432 | (start_addr + size <= mem->start_addr + mem->memory_size) && |
| 433 | (phys_offset - start_addr == mem->phys_offset - mem->start_addr)) { |
| 434 | /* The new slot fits into the existing one and comes with |
| 435 | * identical parameters - nothing to be done. */ |
| 436 | return; |
| 437 | } |
| 438 | |
| 439 | old = *mem; |
| 440 | |
| 441 | /* unregister the overlapping slot */ |
| 442 | mem->memory_size = 0; |
| 443 | err = kvm_set_user_memory_region(s, mem); |
| 444 | if (err) { |
| 445 | fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", |
| 446 | __func__, strerror(-err)); |
| 447 | abort(); |
| 448 | } |
| 449 | |
| 450 | /* Workaround for older KVM versions: we can't join slots, even not by |
| 451 | * unregistering the previous ones and then registering the larger |
| 452 | * slot. We have to maintain the existing fragmentation. Sigh. |
| 453 | * |
| 454 | * This workaround assumes that the new slot starts at the same |
| 455 | * address as the first existing one. If not or if some overlapping |
| 456 | * slot comes around later, we will fail (not seen in practice so far) |
| 457 | * - and actually require a recent KVM version. */ |
| 458 | if (s->broken_set_mem_region && |
| 459 | old.start_addr == start_addr && old.memory_size < size && |
| 460 | flags < IO_MEM_UNASSIGNED) { |
| 461 | mem = kvm_alloc_slot(s); |
| 462 | mem->memory_size = old.memory_size; |
| 463 | mem->start_addr = old.start_addr; |
| 464 | mem->phys_offset = old.phys_offset; |
| 465 | mem->flags = 0; |
| 466 | |
| 467 | err = kvm_set_user_memory_region(s, mem); |
| 468 | if (err) { |
| 469 | fprintf(stderr, "%s: error updating slot: %s\n", __func__, |
| 470 | strerror(-err)); |
| 471 | abort(); |
| 472 | } |
| 473 | |
| 474 | start_addr += old.memory_size; |
| 475 | phys_offset += old.memory_size; |
| 476 | size -= old.memory_size; |
| 477 | continue; |
| 478 | } |
| 479 | |
| 480 | /* register prefix slot */ |
| 481 | if (old.start_addr < start_addr) { |
| 482 | mem = kvm_alloc_slot(s); |
| 483 | mem->memory_size = start_addr - old.start_addr; |
| 484 | mem->start_addr = old.start_addr; |
| 485 | mem->phys_offset = old.phys_offset; |
| 486 | mem->flags = 0; |
| 487 | |
| 488 | err = kvm_set_user_memory_region(s, mem); |
| 489 | if (err) { |
| 490 | fprintf(stderr, "%s: error registering prefix slot: %s\n", |
| 491 | __func__, strerror(-err)); |
| 492 | abort(); |
| 493 | } |
| 494 | } |
| 495 | |
| 496 | /* register suffix slot */ |
| 497 | if (old.start_addr + old.memory_size > start_addr + size) { |
| 498 | ram_addr_t size_delta; |
| 499 | |
| 500 | mem = kvm_alloc_slot(s); |
| 501 | mem->start_addr = start_addr + size; |
| 502 | size_delta = mem->start_addr - old.start_addr; |
| 503 | mem->memory_size = old.memory_size - size_delta; |
| 504 | mem->phys_offset = old.phys_offset + size_delta; |
| 505 | mem->flags = 0; |
| 506 | |
| 507 | err = kvm_set_user_memory_region(s, mem); |
| 508 | if (err) { |
| 509 | fprintf(stderr, "%s: error registering suffix slot: %s\n", |
| 510 | __func__, strerror(-err)); |
| 511 | abort(); |
| 512 | } |
| 513 | } |
| 514 | } |
| 515 | |
| 516 | /* in case the KVM bug workaround already "consumed" the new slot */ |
| 517 | if (!size) |
| 518 | return; |
| 519 | |
| 520 | /* KVM does not need to know about this memory */ |
| 521 | if (flags >= IO_MEM_UNASSIGNED) |
| 522 | return; |
| 523 | |
| 524 | mem = kvm_alloc_slot(s); |
| 525 | mem->memory_size = size; |
| 526 | mem->start_addr = start_addr; |
| 527 | mem->phys_offset = phys_offset; |
| 528 | mem->flags = 0; |
| 529 | |
| 530 | err = kvm_set_user_memory_region(s, mem); |
| 531 | if (err) { |
| 532 | fprintf(stderr, "%s: error registering slot: %s\n", __func__, |
| 533 | strerror(-err)); |
| 534 | abort(); |
| 535 | } |
| 536 | } |
| 537 | |
Michael S. Tsirkin | 7b8f3b7 | 2010-01-27 22:07:21 +0200 | [diff] [blame] | 538 | static void kvm_client_set_memory(struct CPUPhysMemoryClient *client, |
| 539 | target_phys_addr_t start_addr, |
| 540 | ram_addr_t size, |
| 541 | ram_addr_t phys_offset) |
| 542 | { |
| 543 | kvm_set_phys_mem(start_addr, size, phys_offset); |
| 544 | } |
| 545 | |
| 546 | static int kvm_client_sync_dirty_bitmap(struct CPUPhysMemoryClient *client, |
| 547 | target_phys_addr_t start_addr, |
| 548 | target_phys_addr_t end_addr) |
| 549 | { |
| 550 | return kvm_physical_sync_dirty_bitmap(start_addr, end_addr); |
| 551 | } |
| 552 | |
| 553 | static int kvm_client_migration_log(struct CPUPhysMemoryClient *client, |
| 554 | int enable) |
| 555 | { |
| 556 | return kvm_set_migration_log(enable); |
| 557 | } |
| 558 | |
| 559 | static CPUPhysMemoryClient kvm_cpu_phys_memory_client = { |
| 560 | .set_memory = kvm_client_set_memory, |
| 561 | .sync_dirty_bitmap = kvm_client_sync_dirty_bitmap, |
| 562 | .migration_log = kvm_client_migration_log, |
| 563 | }; |
| 564 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 565 | int kvm_init(int smp_cpus) |
| 566 | { |
Jan Kiszka | 168ccc1 | 2009-06-07 11:30:25 +0200 | [diff] [blame] | 567 | static const char upgrade_note[] = |
| 568 | "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n" |
| 569 | "(see http://sourceforge.net/projects/kvm).\n"; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 570 | KVMState *s; |
| 571 | int ret; |
| 572 | int i; |
| 573 | |
Mark McLoughlin | 9f8fd69 | 2009-05-12 12:43:19 +0100 | [diff] [blame] | 574 | if (smp_cpus > 1) { |
| 575 | fprintf(stderr, "No SMP KVM support, use '-smp 1'\n"); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 576 | return -EINVAL; |
Mark McLoughlin | 9f8fd69 | 2009-05-12 12:43:19 +0100 | [diff] [blame] | 577 | } |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 578 | |
| 579 | s = qemu_mallocz(sizeof(KVMState)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 580 | |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 581 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 582 | QTAILQ_INIT(&s->kvm_sw_breakpoints); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 583 | #endif |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 584 | for (i = 0; i < ARRAY_SIZE(s->slots); i++) |
| 585 | s->slots[i].slot = i; |
| 586 | |
| 587 | s->vmfd = -1; |
Kevin Wolf | 40ff6d7 | 2009-12-02 12:24:42 +0100 | [diff] [blame] | 588 | s->fd = qemu_open("/dev/kvm", O_RDWR); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 589 | if (s->fd == -1) { |
| 590 | fprintf(stderr, "Could not access KVM kernel module: %m\n"); |
| 591 | ret = -errno; |
| 592 | goto err; |
| 593 | } |
| 594 | |
| 595 | ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0); |
| 596 | if (ret < KVM_API_VERSION) { |
| 597 | if (ret > 0) |
| 598 | ret = -EINVAL; |
| 599 | fprintf(stderr, "kvm version too old\n"); |
| 600 | goto err; |
| 601 | } |
| 602 | |
| 603 | if (ret > KVM_API_VERSION) { |
| 604 | ret = -EINVAL; |
| 605 | fprintf(stderr, "kvm version not supported\n"); |
| 606 | goto err; |
| 607 | } |
| 608 | |
| 609 | s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0); |
| 610 | if (s->vmfd < 0) |
| 611 | goto err; |
| 612 | |
| 613 | /* initially, KVM allocated its own memory and we had to jump through |
| 614 | * hooks to make phys_ram_base point to this. Modern versions of KVM |
pbrook | 5579c7f | 2009-04-11 14:47:08 +0000 | [diff] [blame] | 615 | * just use a user allocated buffer so we can use regular pages |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 616 | * unmodified. Make sure we have a sufficiently modern version of KVM. |
| 617 | */ |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 618 | if (!kvm_check_extension(s, KVM_CAP_USER_MEMORY)) { |
| 619 | ret = -EINVAL; |
Jan Kiszka | 168ccc1 | 2009-06-07 11:30:25 +0200 | [diff] [blame] | 620 | fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n%s", |
| 621 | upgrade_note); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 622 | goto err; |
| 623 | } |
| 624 | |
aliguori | d85dc28 | 2008-12-09 19:59:09 +0000 | [diff] [blame] | 625 | /* There was a nasty bug in < kvm-80 that prevents memory slots from being |
| 626 | * destroyed properly. Since we rely on this capability, refuse to work |
| 627 | * with any kernel without this capability. */ |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 628 | if (!kvm_check_extension(s, KVM_CAP_DESTROY_MEMORY_REGION_WORKS)) { |
| 629 | ret = -EINVAL; |
aliguori | d85dc28 | 2008-12-09 19:59:09 +0000 | [diff] [blame] | 630 | |
| 631 | fprintf(stderr, |
Jan Kiszka | 168ccc1 | 2009-06-07 11:30:25 +0200 | [diff] [blame] | 632 | "KVM kernel module broken (DESTROY_MEMORY_REGION).\n%s", |
| 633 | upgrade_note); |
aliguori | d85dc28 | 2008-12-09 19:59:09 +0000 | [diff] [blame] | 634 | goto err; |
| 635 | } |
| 636 | |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 637 | s->coalesced_mmio = 0; |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 638 | #ifdef KVM_CAP_COALESCED_MMIO |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 639 | s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO); |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 640 | s->coalesced_mmio_ring = NULL; |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 641 | #endif |
| 642 | |
Jan Kiszka | e69917e | 2009-05-01 20:42:15 +0200 | [diff] [blame] | 643 | s->broken_set_mem_region = 1; |
| 644 | #ifdef KVM_CAP_JOIN_MEMORY_REGIONS_WORKS |
| 645 | ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS); |
| 646 | if (ret > 0) { |
| 647 | s->broken_set_mem_region = 0; |
| 648 | } |
| 649 | #endif |
| 650 | |
Jan Kiszka | a0fb002 | 2009-11-25 00:33:03 +0100 | [diff] [blame] | 651 | s->vcpu_events = 0; |
| 652 | #ifdef KVM_CAP_VCPU_EVENTS |
| 653 | s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS); |
| 654 | #endif |
| 655 | |
Jan Kiszka | b0b1d69 | 2010-03-01 19:10:29 +0100 | [diff] [blame] | 656 | s->robust_singlestep = 0; |
| 657 | #ifdef KVM_CAP_X86_ROBUST_SINGLESTEP |
| 658 | s->robust_singlestep = |
| 659 | kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP); |
| 660 | #endif |
| 661 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 662 | ret = kvm_arch_init(s, smp_cpus); |
| 663 | if (ret < 0) |
| 664 | goto err; |
| 665 | |
| 666 | kvm_state = s; |
Michael S. Tsirkin | 7b8f3b7 | 2010-01-27 22:07:21 +0200 | [diff] [blame] | 667 | cpu_register_phys_memory_client(&kvm_cpu_phys_memory_client); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 668 | |
| 669 | return 0; |
| 670 | |
| 671 | err: |
| 672 | if (s) { |
| 673 | if (s->vmfd != -1) |
| 674 | close(s->vmfd); |
| 675 | if (s->fd != -1) |
| 676 | close(s->fd); |
| 677 | } |
| 678 | qemu_free(s); |
| 679 | |
| 680 | return ret; |
| 681 | } |
| 682 | |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 683 | static int kvm_handle_io(uint16_t port, void *data, int direction, int size, |
| 684 | uint32_t count) |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 685 | { |
| 686 | int i; |
| 687 | uint8_t *ptr = data; |
| 688 | |
| 689 | for (i = 0; i < count; i++) { |
| 690 | if (direction == KVM_EXIT_IO_IN) { |
| 691 | switch (size) { |
| 692 | case 1: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 693 | stb_p(ptr, cpu_inb(port)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 694 | break; |
| 695 | case 2: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 696 | stw_p(ptr, cpu_inw(port)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 697 | break; |
| 698 | case 4: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 699 | stl_p(ptr, cpu_inl(port)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 700 | break; |
| 701 | } |
| 702 | } else { |
| 703 | switch (size) { |
| 704 | case 1: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 705 | cpu_outb(port, ldub_p(ptr)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 706 | break; |
| 707 | case 2: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 708 | cpu_outw(port, lduw_p(ptr)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 709 | break; |
| 710 | case 4: |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 711 | cpu_outl(port, ldl_p(ptr)); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 712 | break; |
| 713 | } |
| 714 | } |
| 715 | |
| 716 | ptr += size; |
| 717 | } |
| 718 | |
| 719 | return 1; |
| 720 | } |
| 721 | |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 722 | void kvm_flush_coalesced_mmio_buffer(void) |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 723 | { |
| 724 | #ifdef KVM_CAP_COALESCED_MMIO |
| 725 | KVMState *s = kvm_state; |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 726 | if (s->coalesced_mmio_ring) { |
| 727 | struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring; |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 728 | while (ring->first != ring->last) { |
| 729 | struct kvm_coalesced_mmio *ent; |
| 730 | |
| 731 | ent = &ring->coalesced_mmio[ring->first]; |
| 732 | |
| 733 | cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len); |
Marcelo Tosatti | 8519947 | 2010-02-22 13:57:54 -0300 | [diff] [blame] | 734 | smp_wmb(); |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 735 | ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX; |
| 736 | } |
| 737 | } |
| 738 | #endif |
| 739 | } |
| 740 | |
Avi Kivity | 4c0960c | 2009-08-17 23:19:53 +0300 | [diff] [blame] | 741 | void kvm_cpu_synchronize_state(CPUState *env) |
| 742 | { |
Jan Kiszka | 9ded274 | 2010-02-03 21:17:05 +0100 | [diff] [blame] | 743 | if (!env->kvm_vcpu_dirty) { |
Avi Kivity | 4c0960c | 2009-08-17 23:19:53 +0300 | [diff] [blame] | 744 | kvm_arch_get_registers(env); |
Jan Kiszka | 9ded274 | 2010-02-03 21:17:05 +0100 | [diff] [blame] | 745 | env->kvm_vcpu_dirty = 1; |
Avi Kivity | 4c0960c | 2009-08-17 23:19:53 +0300 | [diff] [blame] | 746 | } |
| 747 | } |
| 748 | |
Jan Kiszka | ea375f9 | 2010-03-01 19:10:30 +0100 | [diff] [blame] | 749 | void kvm_cpu_synchronize_post_reset(CPUState *env) |
| 750 | { |
| 751 | kvm_arch_put_registers(env, KVM_PUT_RESET_STATE); |
| 752 | env->kvm_vcpu_dirty = 0; |
| 753 | } |
| 754 | |
| 755 | void kvm_cpu_synchronize_post_init(CPUState *env) |
| 756 | { |
| 757 | kvm_arch_put_registers(env, KVM_PUT_FULL_STATE); |
| 758 | env->kvm_vcpu_dirty = 0; |
| 759 | } |
| 760 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 761 | int kvm_cpu_exec(CPUState *env) |
| 762 | { |
| 763 | struct kvm_run *run = env->kvm_run; |
| 764 | int ret; |
| 765 | |
| 766 | dprintf("kvm_cpu_exec()\n"); |
| 767 | |
| 768 | do { |
Marcelo Tosatti | 6312b92 | 2010-02-17 20:14:43 -0200 | [diff] [blame] | 769 | #ifndef CONFIG_IOTHREAD |
aurel32 | be214e6 | 2009-03-06 21:48:00 +0000 | [diff] [blame] | 770 | if (env->exit_request) { |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 771 | dprintf("interrupt exit requested\n"); |
| 772 | ret = 0; |
| 773 | break; |
| 774 | } |
Marcelo Tosatti | 6312b92 | 2010-02-17 20:14:43 -0200 | [diff] [blame] | 775 | #endif |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 776 | |
Jan Kiszka | 9ded274 | 2010-02-03 21:17:05 +0100 | [diff] [blame] | 777 | if (env->kvm_vcpu_dirty) { |
Jan Kiszka | ea375f9 | 2010-03-01 19:10:30 +0100 | [diff] [blame] | 778 | kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE); |
Jan Kiszka | 9ded274 | 2010-02-03 21:17:05 +0100 | [diff] [blame] | 779 | env->kvm_vcpu_dirty = 0; |
Avi Kivity | 4c0960c | 2009-08-17 23:19:53 +0300 | [diff] [blame] | 780 | } |
| 781 | |
Jan Kiszka | 8c14c17 | 2009-05-30 10:01:45 +0200 | [diff] [blame] | 782 | kvm_arch_pre_run(env, run); |
Glauber Costa | d549db5 | 2009-10-07 16:38:03 -0300 | [diff] [blame] | 783 | qemu_mutex_unlock_iothread(); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 784 | ret = kvm_vcpu_ioctl(env, KVM_RUN, 0); |
Glauber Costa | d549db5 | 2009-10-07 16:38:03 -0300 | [diff] [blame] | 785 | qemu_mutex_lock_iothread(); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 786 | kvm_arch_post_run(env, run); |
| 787 | |
| 788 | if (ret == -EINTR || ret == -EAGAIN) { |
Marcelo Tosatti | cc84de9 | 2010-02-17 20:14:42 -0200 | [diff] [blame] | 789 | cpu_exit(env); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 790 | dprintf("io window exit\n"); |
| 791 | ret = 0; |
| 792 | break; |
| 793 | } |
| 794 | |
| 795 | if (ret < 0) { |
| 796 | dprintf("kvm run failed %s\n", strerror(-ret)); |
| 797 | abort(); |
| 798 | } |
| 799 | |
Sheng Yang | 62a2744 | 2010-01-26 19:21:16 +0800 | [diff] [blame] | 800 | kvm_flush_coalesced_mmio_buffer(); |
aliguori | f65ed4c | 2008-12-09 20:09:57 +0000 | [diff] [blame] | 801 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 802 | ret = 0; /* exit loop */ |
| 803 | switch (run->exit_reason) { |
| 804 | case KVM_EXIT_IO: |
| 805 | dprintf("handle_io\n"); |
Blue Swirl | afcea8c | 2009-09-20 16:05:47 +0000 | [diff] [blame] | 806 | ret = kvm_handle_io(run->io.port, |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 807 | (uint8_t *)run + run->io.data_offset, |
| 808 | run->io.direction, |
| 809 | run->io.size, |
| 810 | run->io.count); |
| 811 | break; |
| 812 | case KVM_EXIT_MMIO: |
| 813 | dprintf("handle_mmio\n"); |
| 814 | cpu_physical_memory_rw(run->mmio.phys_addr, |
| 815 | run->mmio.data, |
| 816 | run->mmio.len, |
| 817 | run->mmio.is_write); |
| 818 | ret = 1; |
| 819 | break; |
| 820 | case KVM_EXIT_IRQ_WINDOW_OPEN: |
| 821 | dprintf("irq_window_open\n"); |
| 822 | break; |
| 823 | case KVM_EXIT_SHUTDOWN: |
| 824 | dprintf("shutdown\n"); |
| 825 | qemu_system_reset_request(); |
| 826 | ret = 1; |
| 827 | break; |
| 828 | case KVM_EXIT_UNKNOWN: |
| 829 | dprintf("kvm_exit_unknown\n"); |
| 830 | break; |
| 831 | case KVM_EXIT_FAIL_ENTRY: |
| 832 | dprintf("kvm_exit_fail_entry\n"); |
| 833 | break; |
| 834 | case KVM_EXIT_EXCEPTION: |
| 835 | dprintf("kvm_exit_exception\n"); |
| 836 | break; |
| 837 | case KVM_EXIT_DEBUG: |
| 838 | dprintf("kvm_exit_debug\n"); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 839 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
| 840 | if (kvm_arch_debug(&run->debug.arch)) { |
| 841 | gdb_set_stop_cpu(env); |
| 842 | vm_stop(EXCP_DEBUG); |
| 843 | env->exception_index = EXCP_DEBUG; |
| 844 | return 0; |
| 845 | } |
| 846 | /* re-enter, this exception was guest-internal */ |
| 847 | ret = 1; |
| 848 | #endif /* KVM_CAP_SET_GUEST_DEBUG */ |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 849 | break; |
| 850 | default: |
| 851 | dprintf("kvm_arch_handle_exit\n"); |
| 852 | ret = kvm_arch_handle_exit(env, run); |
| 853 | break; |
| 854 | } |
| 855 | } while (ret > 0); |
| 856 | |
aurel32 | be214e6 | 2009-03-06 21:48:00 +0000 | [diff] [blame] | 857 | if (env->exit_request) { |
| 858 | env->exit_request = 0; |
aliguori | becfc39 | 2008-11-10 15:55:14 +0000 | [diff] [blame] | 859 | env->exception_index = EXCP_INTERRUPT; |
| 860 | } |
| 861 | |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 862 | return ret; |
| 863 | } |
| 864 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 865 | int kvm_ioctl(KVMState *s, int type, ...) |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 866 | { |
| 867 | int ret; |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 868 | void *arg; |
| 869 | va_list ap; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 870 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 871 | va_start(ap, type); |
| 872 | arg = va_arg(ap, void *); |
| 873 | va_end(ap); |
| 874 | |
| 875 | ret = ioctl(s->fd, type, arg); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 876 | if (ret == -1) |
| 877 | ret = -errno; |
| 878 | |
| 879 | return ret; |
| 880 | } |
| 881 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 882 | int kvm_vm_ioctl(KVMState *s, int type, ...) |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 883 | { |
| 884 | int ret; |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 885 | void *arg; |
| 886 | va_list ap; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 887 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 888 | va_start(ap, type); |
| 889 | arg = va_arg(ap, void *); |
| 890 | va_end(ap); |
| 891 | |
| 892 | ret = ioctl(s->vmfd, type, arg); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 893 | if (ret == -1) |
| 894 | ret = -errno; |
| 895 | |
| 896 | return ret; |
| 897 | } |
| 898 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 899 | int kvm_vcpu_ioctl(CPUState *env, int type, ...) |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 900 | { |
| 901 | int ret; |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 902 | void *arg; |
| 903 | va_list ap; |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 904 | |
aliguori | 984b518 | 2008-11-13 19:21:00 +0000 | [diff] [blame] | 905 | va_start(ap, type); |
| 906 | arg = va_arg(ap, void *); |
| 907 | va_end(ap); |
| 908 | |
| 909 | ret = ioctl(env->kvm_fd, type, arg); |
aliguori | 0533044 | 2008-11-05 16:29:27 +0000 | [diff] [blame] | 910 | if (ret == -1) |
| 911 | ret = -errno; |
| 912 | |
| 913 | return ret; |
| 914 | } |
aliguori | bd32208 | 2008-12-04 20:33:06 +0000 | [diff] [blame] | 915 | |
| 916 | int kvm_has_sync_mmu(void) |
| 917 | { |
aurel32 | a9c1152 | 2008-12-18 22:42:51 +0000 | [diff] [blame] | 918 | #ifdef KVM_CAP_SYNC_MMU |
aliguori | bd32208 | 2008-12-04 20:33:06 +0000 | [diff] [blame] | 919 | KVMState *s = kvm_state; |
| 920 | |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 921 | return kvm_check_extension(s, KVM_CAP_SYNC_MMU); |
| 922 | #else |
aliguori | bd32208 | 2008-12-04 20:33:06 +0000 | [diff] [blame] | 923 | return 0; |
Anthony Liguori | ad7b8b3 | 2009-05-08 15:33:24 -0500 | [diff] [blame] | 924 | #endif |
aliguori | bd32208 | 2008-12-04 20:33:06 +0000 | [diff] [blame] | 925 | } |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 926 | |
Jan Kiszka | a0fb002 | 2009-11-25 00:33:03 +0100 | [diff] [blame] | 927 | int kvm_has_vcpu_events(void) |
| 928 | { |
| 929 | return kvm_state->vcpu_events; |
| 930 | } |
| 931 | |
Jan Kiszka | b0b1d69 | 2010-03-01 19:10:29 +0100 | [diff] [blame] | 932 | int kvm_has_robust_singlestep(void) |
| 933 | { |
| 934 | return kvm_state->robust_singlestep; |
| 935 | } |
| 936 | |
Jan Kiszka | 6f0437e | 2009-04-26 18:03:40 +0200 | [diff] [blame] | 937 | void kvm_setup_guest_memory(void *start, size_t size) |
| 938 | { |
| 939 | if (!kvm_has_sync_mmu()) { |
| 940 | #ifdef MADV_DONTFORK |
| 941 | int ret = madvise(start, size, MADV_DONTFORK); |
| 942 | |
| 943 | if (ret) { |
| 944 | perror("madvice"); |
| 945 | exit(1); |
| 946 | } |
| 947 | #else |
| 948 | fprintf(stderr, |
| 949 | "Need MADV_DONTFORK in absence of synchronous KVM MMU\n"); |
| 950 | exit(1); |
| 951 | #endif |
| 952 | } |
| 953 | } |
| 954 | |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 955 | #ifdef KVM_CAP_SET_GUEST_DEBUG |
Luiz Capitulino | fc5d642 | 2009-07-22 15:32:52 -0300 | [diff] [blame] | 956 | static void on_vcpu(CPUState *env, void (*func)(void *data), void *data) |
| 957 | { |
Glauber Costa | 828566b | 2009-09-17 20:10:06 +0200 | [diff] [blame] | 958 | #ifdef CONFIG_IOTHREAD |
Amit Shah | a2eebe8 | 2010-02-04 20:16:22 +0530 | [diff] [blame] | 959 | if (env != cpu_single_env) { |
| 960 | abort(); |
Luiz Capitulino | fc5d642 | 2009-07-22 15:32:52 -0300 | [diff] [blame] | 961 | } |
Glauber Costa | 828566b | 2009-09-17 20:10:06 +0200 | [diff] [blame] | 962 | #endif |
Amit Shah | a2eebe8 | 2010-02-04 20:16:22 +0530 | [diff] [blame] | 963 | func(data); |
Luiz Capitulino | fc5d642 | 2009-07-22 15:32:52 -0300 | [diff] [blame] | 964 | } |
| 965 | |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 966 | struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *env, |
| 967 | target_ulong pc) |
| 968 | { |
| 969 | struct kvm_sw_breakpoint *bp; |
| 970 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 971 | QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) { |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 972 | if (bp->pc == pc) |
| 973 | return bp; |
| 974 | } |
| 975 | return NULL; |
| 976 | } |
| 977 | |
| 978 | int kvm_sw_breakpoints_active(CPUState *env) |
| 979 | { |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 980 | return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 981 | } |
| 982 | |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 983 | struct kvm_set_guest_debug_data { |
| 984 | struct kvm_guest_debug dbg; |
| 985 | CPUState *env; |
| 986 | int err; |
| 987 | }; |
| 988 | |
| 989 | static void kvm_invoke_set_guest_debug(void *data) |
| 990 | { |
| 991 | struct kvm_set_guest_debug_data *dbg_data = data; |
Jan Kiszka | b380772 | 2009-09-17 20:05:58 +0200 | [diff] [blame] | 992 | CPUState *env = dbg_data->env; |
| 993 | |
Jan Kiszka | b380772 | 2009-09-17 20:05:58 +0200 | [diff] [blame] | 994 | dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg); |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 995 | } |
| 996 | |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 997 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
| 998 | { |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 999 | struct kvm_set_guest_debug_data data; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1000 | |
Jan Kiszka | b0b1d69 | 2010-03-01 19:10:29 +0100 | [diff] [blame] | 1001 | data.dbg.control = reinject_trap; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1002 | |
Jan Kiszka | b0b1d69 | 2010-03-01 19:10:29 +0100 | [diff] [blame] | 1003 | if (env->singlestep_enabled) { |
| 1004 | data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP; |
| 1005 | } |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 1006 | kvm_arch_update_guest_debug(env, &data.dbg); |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 1007 | data.env = env; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1008 | |
Glauber Costa | 452e475 | 2009-07-16 17:55:28 -0400 | [diff] [blame] | 1009 | on_vcpu(env, kvm_invoke_set_guest_debug, &data); |
| 1010 | return data.err; |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1011 | } |
| 1012 | |
| 1013 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, |
| 1014 | target_ulong len, int type) |
| 1015 | { |
| 1016 | struct kvm_sw_breakpoint *bp; |
| 1017 | CPUState *env; |
| 1018 | int err; |
| 1019 | |
| 1020 | if (type == GDB_BREAKPOINT_SW) { |
| 1021 | bp = kvm_find_sw_breakpoint(current_env, addr); |
| 1022 | if (bp) { |
| 1023 | bp->use_count++; |
| 1024 | return 0; |
| 1025 | } |
| 1026 | |
| 1027 | bp = qemu_malloc(sizeof(struct kvm_sw_breakpoint)); |
| 1028 | if (!bp) |
| 1029 | return -ENOMEM; |
| 1030 | |
| 1031 | bp->pc = addr; |
| 1032 | bp->use_count = 1; |
| 1033 | err = kvm_arch_insert_sw_breakpoint(current_env, bp); |
| 1034 | if (err) { |
| 1035 | free(bp); |
| 1036 | return err; |
| 1037 | } |
| 1038 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 1039 | QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints, |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1040 | bp, entry); |
| 1041 | } else { |
| 1042 | err = kvm_arch_insert_hw_breakpoint(addr, len, type); |
| 1043 | if (err) |
| 1044 | return err; |
| 1045 | } |
| 1046 | |
| 1047 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
| 1048 | err = kvm_update_guest_debug(env, 0); |
| 1049 | if (err) |
| 1050 | return err; |
| 1051 | } |
| 1052 | return 0; |
| 1053 | } |
| 1054 | |
| 1055 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, |
| 1056 | target_ulong len, int type) |
| 1057 | { |
| 1058 | struct kvm_sw_breakpoint *bp; |
| 1059 | CPUState *env; |
| 1060 | int err; |
| 1061 | |
| 1062 | if (type == GDB_BREAKPOINT_SW) { |
| 1063 | bp = kvm_find_sw_breakpoint(current_env, addr); |
| 1064 | if (!bp) |
| 1065 | return -ENOENT; |
| 1066 | |
| 1067 | if (bp->use_count > 1) { |
| 1068 | bp->use_count--; |
| 1069 | return 0; |
| 1070 | } |
| 1071 | |
| 1072 | err = kvm_arch_remove_sw_breakpoint(current_env, bp); |
| 1073 | if (err) |
| 1074 | return err; |
| 1075 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 1076 | QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry); |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1077 | qemu_free(bp); |
| 1078 | } else { |
| 1079 | err = kvm_arch_remove_hw_breakpoint(addr, len, type); |
| 1080 | if (err) |
| 1081 | return err; |
| 1082 | } |
| 1083 | |
| 1084 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
| 1085 | err = kvm_update_guest_debug(env, 0); |
| 1086 | if (err) |
| 1087 | return err; |
| 1088 | } |
| 1089 | return 0; |
| 1090 | } |
| 1091 | |
| 1092 | void kvm_remove_all_breakpoints(CPUState *current_env) |
| 1093 | { |
| 1094 | struct kvm_sw_breakpoint *bp, *next; |
| 1095 | KVMState *s = current_env->kvm_state; |
| 1096 | CPUState *env; |
| 1097 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 1098 | QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) { |
aliguori | e22a25c | 2009-03-12 20:12:48 +0000 | [diff] [blame] | 1099 | if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) { |
| 1100 | /* Try harder to find a CPU that currently sees the breakpoint. */ |
| 1101 | for (env = first_cpu; env != NULL; env = env->next_cpu) { |
| 1102 | if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) |
| 1103 | break; |
| 1104 | } |
| 1105 | } |
| 1106 | } |
| 1107 | kvm_arch_remove_all_hw_breakpoints(); |
| 1108 | |
| 1109 | for (env = first_cpu; env != NULL; env = env->next_cpu) |
| 1110 | kvm_update_guest_debug(env, 0); |
| 1111 | } |
| 1112 | |
| 1113 | #else /* !KVM_CAP_SET_GUEST_DEBUG */ |
| 1114 | |
| 1115 | int kvm_update_guest_debug(CPUState *env, unsigned long reinject_trap) |
| 1116 | { |
| 1117 | return -EINVAL; |
| 1118 | } |
| 1119 | |
| 1120 | int kvm_insert_breakpoint(CPUState *current_env, target_ulong addr, |
| 1121 | target_ulong len, int type) |
| 1122 | { |
| 1123 | return -EINVAL; |
| 1124 | } |
| 1125 | |
| 1126 | int kvm_remove_breakpoint(CPUState *current_env, target_ulong addr, |
| 1127 | target_ulong len, int type) |
| 1128 | { |
| 1129 | return -EINVAL; |
| 1130 | } |
| 1131 | |
| 1132 | void kvm_remove_all_breakpoints(CPUState *current_env) |
| 1133 | { |
| 1134 | } |
| 1135 | #endif /* !KVM_CAP_SET_GUEST_DEBUG */ |
Marcelo Tosatti | cc84de9 | 2010-02-17 20:14:42 -0200 | [diff] [blame] | 1136 | |
| 1137 | int kvm_set_signal_mask(CPUState *env, const sigset_t *sigset) |
| 1138 | { |
| 1139 | struct kvm_signal_mask *sigmask; |
| 1140 | int r; |
| 1141 | |
| 1142 | if (!sigset) |
| 1143 | return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL); |
| 1144 | |
| 1145 | sigmask = qemu_malloc(sizeof(*sigmask) + sizeof(*sigset)); |
| 1146 | |
| 1147 | sigmask->len = 8; |
| 1148 | memcpy(sigmask->sigset, sigset, sizeof(*sigset)); |
| 1149 | r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask); |
| 1150 | free(sigmask); |
| 1151 | |
| 1152 | return r; |
| 1153 | } |
Michael S. Tsirkin | ca82180 | 2010-03-17 13:07:54 +0200 | [diff] [blame^] | 1154 | |
| 1155 | #ifdef KVM_IOEVENTFD |
| 1156 | int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign) |
| 1157 | { |
| 1158 | struct kvm_ioeventfd kick = { |
| 1159 | .datamatch = val, |
| 1160 | .addr = addr, |
| 1161 | .len = 2, |
| 1162 | .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO, |
| 1163 | .fd = fd, |
| 1164 | }; |
| 1165 | int r; |
| 1166 | if (!kvm_enabled()) |
| 1167 | return -ENOSYS; |
| 1168 | if (!assign) |
| 1169 | kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; |
| 1170 | r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); |
| 1171 | if (r < 0) |
| 1172 | return r; |
| 1173 | return 0; |
| 1174 | } |
| 1175 | #endif |