blob: bb6aa4a0708c78111d496a8c353ab3764d6a6116 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson9b9c37c2012-09-21 10:34:21 -070089#if defined(__arm__) || defined(__sparc__)
blueswir1141ac462008-07-26 15:05:57 +000090/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
bellard7cb69ca2008-05-10 10:55:51 +0000223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
bellard43694152008-05-29 09:35:57 +0000234 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000235
bellard43694152008-05-29 09:35:57 +0000236 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000237 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000238 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000241 end += page_size - 1;
242 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
bellardb346ff42003-06-15 20:05:50 +0000249static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000250{
bellard83fb7ad2004-07-05 21:25:26 +0000251 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000252 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
bellard83fb7ad2004-07-05 21:25:26 +0000263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000268
Paul Brook2e9a5712010-05-05 16:32:59 +0100269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000270 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
balrog50a95692007-12-12 01:16:23 +0000301 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000302
pbrook07765902008-05-31 16:33:53 +0000303 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000306 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307 mmap_lock();
308
balrog50a95692007-12-12 01:16:23 +0000309 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000324 }
325 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
balrog50a95692007-12-12 01:16:23 +0000327 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000329 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100330#endif
balrog50a95692007-12-12 01:16:23 +0000331 }
332#endif
bellard54936002003-05-13 00:25:15 +0000333}
334
Paul Brook41c1b1c2010-03-12 16:54:58 +0000335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000336{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337 PageDesc *pd;
338 void **lp;
339 int i;
340
pbrook17e23772008-06-09 13:47:45 +0000341#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500342 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000348#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500350 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000351#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
366 }
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
378 }
379
380#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383}
384
Paul Brook41c1b1c2010-03-12 16:54:58 +0000385static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000386{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000388}
389
Paul Brook6d9a1302010-02-28 23:55:53 +0000390#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392static void phys_map_node_reserve(unsigned nodes)
393{
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
402}
403
404static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405{
406 unsigned i;
407 uint16_t ret;
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200410 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200413 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424
Avi Kivity29990972012-02-13 20:21:20 +0200425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428{
429 PhysPageEntry *p;
430 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200432
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200438 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200439 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441 }
442 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200443 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200444 }
Avi Kivity29990972012-02-13 20:21:20 +0200445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity29990972012-02-13 20:21:20 +0200447 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200450 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 *index += step;
452 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458}
459
Avi Kivity29990972012-02-13 20:21:20 +0200460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivity29990972012-02-13 20:21:20 +0200463 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000465
Avi Kivity29990972012-02-13 20:21:20 +0200466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000467}
468
Blue Swirl0cac1b62012-04-09 16:50:52 +0000469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000470{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475
Avi Kivity07f07b32012-02-13 20:45:32 +0200476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200478 goto not_found;
479 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200480 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200482 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200483
Avi Kivityc19e8802012-02-13 20:25:31 +0200484 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200486 return &phys_sections[s_index];
487}
488
Blue Swirle5548612012-04-21 13:08:33 +0000489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
pbrookc8a706f2008-06-02 16:16:42 +0000496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000498#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000499
bellard43694152008-05-29 09:35:57 +0000500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100503/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000511#endif
512
blueswir18fcd3692008-08-17 20:26:25 +0000513static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000514{
bellard43694152008-05-29 09:35:57 +0000515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
bellard26a5f132008-05-28 12:30:31 +0000520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000522#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100525 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000527#endif
bellard26a5f132008-05-28 12:30:31 +0000528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000536 void *start = NULL;
537
bellard26a5f132008-05-28 12:30:31 +0000538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700544#elif defined(__sparc__) && HOST_LONG_BITS == 64
blueswir1141ac462008-07-26 15:05:57 +0000545 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700546 start = (void *) 0x40000000UL;
blueswir1141ac462008-07-26 15:05:57 +0000547 if (code_gen_buffer_size > (512 * 1024 * 1024))
548 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000549#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100550 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000551 if (code_gen_buffer_size > 16 * 1024 * 1024)
552 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700553#elif defined(__s390x__)
554 /* Map the buffer so that we can use direct calls and branches. */
555 /* We have a +- 4GB range on the branches; leave some slop. */
556 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
557 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
558 }
559 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000560#endif
blueswir1141ac462008-07-26 15:05:57 +0000561 code_gen_buffer = mmap(start, code_gen_buffer_size,
562 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000563 flags, -1, 0);
564 if (code_gen_buffer == MAP_FAILED) {
565 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
566 exit(1);
567 }
568 }
Bradcbb608a2010-12-20 21:25:40 -0500569#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000570 || defined(__DragonFly__) || defined(__OpenBSD__) \
571 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000572 {
573 int flags;
574 void *addr = NULL;
575 flags = MAP_PRIVATE | MAP_ANONYMOUS;
576#if defined(__x86_64__)
577 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
578 * 0x40000000 is free */
579 flags |= MAP_FIXED;
580 addr = (void *)0x40000000;
581 /* Cannot map more than that */
582 if (code_gen_buffer_size > (800 * 1024 * 1024))
583 code_gen_buffer_size = (800 * 1024 * 1024);
Richard Henderson9b9c37c2012-09-21 10:34:21 -0700584#elif defined(__sparc__) && HOST_LONG_BITS == 64
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000585 // Map the buffer below 2G, so we can use direct calls and branches
Richard Hendersond5dd6962012-09-21 10:40:48 -0700586 addr = (void *) 0x40000000UL;
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000587 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
588 code_gen_buffer_size = (512 * 1024 * 1024);
589 }
aliguori06e67a82008-09-27 15:32:41 +0000590#endif
591 code_gen_buffer = mmap(addr, code_gen_buffer_size,
592 PROT_WRITE | PROT_READ | PROT_EXEC,
593 flags, -1, 0);
594 if (code_gen_buffer == MAP_FAILED) {
595 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
596 exit(1);
597 }
598 }
bellard26a5f132008-05-28 12:30:31 +0000599#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500600 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000601 map_exec(code_gen_buffer, code_gen_buffer_size);
602#endif
bellard43694152008-05-29 09:35:57 +0000603#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000604 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100605 code_gen_buffer_max_size = code_gen_buffer_size -
606 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000607 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500608 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000609}
610
611/* Must be called before using the QEMU cpus. 'tb_size' is the size
612 (in bytes) allocated to the translation buffer. Zero means default
613 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200614void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000615{
bellard26a5f132008-05-28 12:30:31 +0000616 cpu_gen_init();
617 code_gen_alloc(tb_size);
618 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700619 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000620 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700621#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
622 /* There's no guest base to take into account, so go ahead and
623 initialize the prologue now. */
624 tcg_prologue_init(&tcg_ctx);
625#endif
bellard26a5f132008-05-28 12:30:31 +0000626}
627
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200628bool tcg_enabled(void)
629{
630 return code_gen_buffer != NULL;
631}
632
633void cpu_exec_init_all(void)
634{
635#if !defined(CONFIG_USER_ONLY)
636 memory_map_init();
637 io_mem_init();
638#endif
639}
640
pbrook9656f322008-07-01 20:01:19 +0000641#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
642
Juan Quintelae59fb372009-09-29 22:48:21 +0200643static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200644{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100645 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200646
aurel323098dba2009-03-07 21:28:24 +0000647 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
648 version_id is increased. */
649 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000650 tlb_flush(env, 1);
651
652 return 0;
653}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200654
655static const VMStateDescription vmstate_cpu_common = {
656 .name = "cpu_common",
657 .version_id = 1,
658 .minimum_version_id = 1,
659 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200660 .post_load = cpu_common_post_load,
661 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100662 VMSTATE_UINT32(halted, CPUArchState),
663 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200664 VMSTATE_END_OF_LIST()
665 }
666};
pbrook9656f322008-07-01 20:01:19 +0000667#endif
668
Andreas Färber9349b4f2012-03-14 01:38:32 +0100669CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400670{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100671 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400672
673 while (env) {
674 if (env->cpu_index == cpu)
675 break;
676 env = env->next_cpu;
677 }
678
679 return env;
680}
681
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000683{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000685 int cpu_index;
686
pbrookc2764712009-03-07 15:24:59 +0000687#if defined(CONFIG_USER_ONLY)
688 cpu_list_lock();
689#endif
bellard6a00d602005-11-21 23:25:50 +0000690 env->next_cpu = NULL;
691 penv = &first_cpu;
692 cpu_index = 0;
693 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700694 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000695 cpu_index++;
696 }
697 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000698 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000699 QTAILQ_INIT(&env->breakpoints);
700 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100701#ifndef CONFIG_USER_ONLY
702 env->thread_id = qemu_get_thread_id();
703#endif
bellard6a00d602005-11-21 23:25:50 +0000704 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000705#if defined(CONFIG_USER_ONLY)
706 cpu_list_unlock();
707#endif
pbrookb3c77242008-06-30 16:31:04 +0000708#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600709 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
710 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000711 cpu_save, cpu_load, env);
712#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000713}
714
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100715/* Allocate a new translation block. Flush the translation buffer if
716 too many translation blocks or too much generated code. */
717static TranslationBlock *tb_alloc(target_ulong pc)
718{
719 TranslationBlock *tb;
720
721 if (nb_tbs >= code_gen_max_blocks ||
722 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
723 return NULL;
724 tb = &tbs[nb_tbs++];
725 tb->pc = pc;
726 tb->cflags = 0;
727 return tb;
728}
729
730void tb_free(TranslationBlock *tb)
731{
732 /* In practice this is mostly used for single use temporary TB
733 Ignore the hard cases and just back up if this TB happens to
734 be the last one generated. */
735 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
736 code_gen_ptr = tb->tc_ptr;
737 nb_tbs--;
738 }
739}
740
bellard9fa3e852004-01-04 18:06:42 +0000741static inline void invalidate_page_bitmap(PageDesc *p)
742{
743 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500744 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000745 p->code_bitmap = NULL;
746 }
747 p->code_write_count = 0;
748}
749
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800750/* Set to NULL all the 'first_tb' fields in all PageDescs. */
751
752static void page_flush_tb_1 (int level, void **lp)
753{
754 int i;
755
756 if (*lp == NULL) {
757 return;
758 }
759 if (level == 0) {
760 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000761 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800762 pd[i].first_tb = NULL;
763 invalidate_page_bitmap(pd + i);
764 }
765 } else {
766 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000767 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800768 page_flush_tb_1 (level - 1, pp + i);
769 }
770 }
771}
772
bellardfd6ce8f2003-05-14 19:00:11 +0000773static void page_flush_tb(void)
774{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800775 int i;
776 for (i = 0; i < V_L1_SIZE; i++) {
777 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000778 }
779}
780
781/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000782/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100783void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000784{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100785 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000786#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000787 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
788 (unsigned long)(code_gen_ptr - code_gen_buffer),
789 nb_tbs, nb_tbs > 0 ?
790 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000791#endif
bellard26a5f132008-05-28 12:30:31 +0000792 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000793 cpu_abort(env1, "Internal error: code buffer overflow\n");
794
bellardfd6ce8f2003-05-14 19:00:11 +0000795 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000796
bellard6a00d602005-11-21 23:25:50 +0000797 for(env = first_cpu; env != NULL; env = env->next_cpu) {
798 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
799 }
bellard9fa3e852004-01-04 18:06:42 +0000800
bellard8a8a6082004-10-03 13:36:49 +0000801 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000802 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000803
bellardfd6ce8f2003-05-14 19:00:11 +0000804 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000805 /* XXX: flush processor icache at this point if cache flush is
806 expensive */
bellarde3db7222005-01-26 22:00:47 +0000807 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000808}
809
810#ifdef DEBUG_TB_CHECK
811
j_mayerbc98a7e2007-04-04 07:55:12 +0000812static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000813{
814 TranslationBlock *tb;
815 int i;
816 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000817 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
818 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000819 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
820 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000821 printf("ERROR invalidate: address=" TARGET_FMT_lx
822 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000823 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000824 }
825 }
826 }
827}
828
829/* verify that all the pages have correct rights for code */
830static void tb_page_check(void)
831{
832 TranslationBlock *tb;
833 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000834
pbrook99773bd2006-04-16 15:14:59 +0000835 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
836 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000837 flags1 = page_get_flags(tb->pc);
838 flags2 = page_get_flags(tb->pc + tb->size - 1);
839 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
840 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000841 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000842 }
843 }
844 }
845}
846
847#endif
848
849/* invalidate one TB */
850static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
851 int next_offset)
852{
853 TranslationBlock *tb1;
854 for(;;) {
855 tb1 = *ptb;
856 if (tb1 == tb) {
857 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
858 break;
859 }
860 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
861 }
862}
863
bellard9fa3e852004-01-04 18:06:42 +0000864static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
865{
866 TranslationBlock *tb1;
867 unsigned int n1;
868
869 for(;;) {
870 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200871 n1 = (uintptr_t)tb1 & 3;
872 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000873 if (tb1 == tb) {
874 *ptb = tb1->page_next[n1];
875 break;
876 }
877 ptb = &tb1->page_next[n1];
878 }
879}
880
bellardd4e81642003-05-25 16:46:15 +0000881static inline void tb_jmp_remove(TranslationBlock *tb, int n)
882{
883 TranslationBlock *tb1, **ptb;
884 unsigned int n1;
885
886 ptb = &tb->jmp_next[n];
887 tb1 = *ptb;
888 if (tb1) {
889 /* find tb(n) in circular list */
890 for(;;) {
891 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200892 n1 = (uintptr_t)tb1 & 3;
893 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000894 if (n1 == n && tb1 == tb)
895 break;
896 if (n1 == 2) {
897 ptb = &tb1->jmp_first;
898 } else {
899 ptb = &tb1->jmp_next[n1];
900 }
901 }
902 /* now we can suppress tb(n) from the list */
903 *ptb = tb->jmp_next[n];
904
905 tb->jmp_next[n] = NULL;
906 }
907}
908
909/* reset the jump entry 'n' of a TB so that it is not chained to
910 another TB */
911static inline void tb_reset_jump(TranslationBlock *tb, int n)
912{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200913 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000914}
915
Paul Brook41c1b1c2010-03-12 16:54:58 +0000916void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000917{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100918 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000919 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000920 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000921 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000922 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000923
bellard9fa3e852004-01-04 18:06:42 +0000924 /* remove the TB from the hash list */
925 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
926 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000927 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000928 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000929
bellard9fa3e852004-01-04 18:06:42 +0000930 /* remove the TB from the page list */
931 if (tb->page_addr[0] != page_addr) {
932 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
933 tb_page_remove(&p->first_tb, tb);
934 invalidate_page_bitmap(p);
935 }
936 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
937 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
938 tb_page_remove(&p->first_tb, tb);
939 invalidate_page_bitmap(p);
940 }
941
bellard8a40a182005-11-20 10:35:40 +0000942 tb_invalidated_flag = 1;
943
944 /* remove the TB from the hash list */
945 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000946 for(env = first_cpu; env != NULL; env = env->next_cpu) {
947 if (env->tb_jmp_cache[h] == tb)
948 env->tb_jmp_cache[h] = NULL;
949 }
bellard8a40a182005-11-20 10:35:40 +0000950
951 /* suppress this TB from the two jump lists */
952 tb_jmp_remove(tb, 0);
953 tb_jmp_remove(tb, 1);
954
955 /* suppress any remaining jumps to this TB */
956 tb1 = tb->jmp_first;
957 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200958 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000959 if (n1 == 2)
960 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200961 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000962 tb2 = tb1->jmp_next[n1];
963 tb_reset_jump(tb1, n1);
964 tb1->jmp_next[n1] = NULL;
965 tb1 = tb2;
966 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200967 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000968
bellarde3db7222005-01-26 22:00:47 +0000969 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000970}
971
972static inline void set_bits(uint8_t *tab, int start, int len)
973{
974 int end, mask, end1;
975
976 end = start + len;
977 tab += start >> 3;
978 mask = 0xff << (start & 7);
979 if ((start & ~7) == (end & ~7)) {
980 if (start < end) {
981 mask &= ~(0xff << (end & 7));
982 *tab |= mask;
983 }
984 } else {
985 *tab++ |= mask;
986 start = (start + 8) & ~7;
987 end1 = end & ~7;
988 while (start < end1) {
989 *tab++ = 0xff;
990 start += 8;
991 }
992 if (start < end) {
993 mask = ~(0xff << (end & 7));
994 *tab |= mask;
995 }
996 }
997}
998
999static void build_page_bitmap(PageDesc *p)
1000{
1001 int n, tb_start, tb_end;
1002 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001003
Anthony Liguori7267c092011-08-20 22:09:37 -05001004 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001005
1006 tb = p->first_tb;
1007 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001008 n = (uintptr_t)tb & 3;
1009 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001010 /* NOTE: this is subtle as a TB may span two physical pages */
1011 if (n == 0) {
1012 /* NOTE: tb_end may be after the end of the page, but
1013 it is not a problem */
1014 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1015 tb_end = tb_start + tb->size;
1016 if (tb_end > TARGET_PAGE_SIZE)
1017 tb_end = TARGET_PAGE_SIZE;
1018 } else {
1019 tb_start = 0;
1020 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1021 }
1022 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1023 tb = tb->page_next[n];
1024 }
1025}
1026
Andreas Färber9349b4f2012-03-14 01:38:32 +01001027TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001028 target_ulong pc, target_ulong cs_base,
1029 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001030{
1031 TranslationBlock *tb;
1032 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001033 tb_page_addr_t phys_pc, phys_page2;
1034 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001035 int code_gen_size;
1036
Paul Brook41c1b1c2010-03-12 16:54:58 +00001037 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001038 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001039 if (!tb) {
1040 /* flush must be done */
1041 tb_flush(env);
1042 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001043 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001044 /* Don't forget to invalidate previous TB info. */
1045 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001046 }
1047 tc_ptr = code_gen_ptr;
1048 tb->tc_ptr = tc_ptr;
1049 tb->cs_base = cs_base;
1050 tb->flags = flags;
1051 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001052 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001053 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1054 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001055
bellardd720b932004-04-25 17:57:43 +00001056 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001057 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001058 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001059 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001060 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001061 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001063 return tb;
bellardd720b932004-04-25 17:57:43 +00001064}
ths3b46e622007-09-17 08:09:54 +00001065
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001066/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001067 * Invalidate all TBs which intersect with the target physical address range
1068 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1069 * 'is_cpu_write_access' should be true if called from a real cpu write
1070 * access: the virtual CPU will exit the current TB if code is modified inside
1071 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001072 */
1073void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1074 int is_cpu_write_access)
1075{
1076 while (start < end) {
1077 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1078 start &= TARGET_PAGE_MASK;
1079 start += TARGET_PAGE_SIZE;
1080 }
1081}
1082
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001083/*
1084 * Invalidate all TBs which intersect with the target physical address range
1085 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1086 * 'is_cpu_write_access' should be true if called from a real cpu write
1087 * access: the virtual CPU will exit the current TB if code is modified inside
1088 * this TB.
1089 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001090void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001091 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001092{
aliguori6b917542008-11-18 19:46:41 +00001093 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001094 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001095 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001096 PageDesc *p;
1097 int n;
1098#ifdef TARGET_HAS_PRECISE_SMC
1099 int current_tb_not_found = is_cpu_write_access;
1100 TranslationBlock *current_tb = NULL;
1101 int current_tb_modified = 0;
1102 target_ulong current_pc = 0;
1103 target_ulong current_cs_base = 0;
1104 int current_flags = 0;
1105#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001106
1107 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001108 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001109 return;
ths5fafdf22007-09-16 21:08:06 +00001110 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001111 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1112 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001113 /* build code bitmap */
1114 build_page_bitmap(p);
1115 }
1116
1117 /* we remove all the TBs in the range [start, end[ */
1118 /* XXX: see if in some cases it could be faster to invalidate all the code */
1119 tb = p->first_tb;
1120 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001121 n = (uintptr_t)tb & 3;
1122 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001123 tb_next = tb->page_next[n];
1124 /* NOTE: this is subtle as a TB may span two physical pages */
1125 if (n == 0) {
1126 /* NOTE: tb_end may be after the end of the page, but
1127 it is not a problem */
1128 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1129 tb_end = tb_start + tb->size;
1130 } else {
1131 tb_start = tb->page_addr[1];
1132 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1133 }
1134 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001135#ifdef TARGET_HAS_PRECISE_SMC
1136 if (current_tb_not_found) {
1137 current_tb_not_found = 0;
1138 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001139 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001140 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001141 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001142 }
1143 }
1144 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001145 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001146 /* If we are modifying the current TB, we must stop
1147 its execution. We could be more precise by checking
1148 that the modification is after the current PC, but it
1149 would require a specialized function to partially
1150 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001151
bellardd720b932004-04-25 17:57:43 +00001152 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001153 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001154 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1155 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001156 }
1157#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001158 /* we need to do that to handle the case where a signal
1159 occurs while doing tb_phys_invalidate() */
1160 saved_tb = NULL;
1161 if (env) {
1162 saved_tb = env->current_tb;
1163 env->current_tb = NULL;
1164 }
bellard9fa3e852004-01-04 18:06:42 +00001165 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001166 if (env) {
1167 env->current_tb = saved_tb;
1168 if (env->interrupt_request && env->current_tb)
1169 cpu_interrupt(env, env->interrupt_request);
1170 }
bellard9fa3e852004-01-04 18:06:42 +00001171 }
1172 tb = tb_next;
1173 }
1174#if !defined(CONFIG_USER_ONLY)
1175 /* if no code remaining, no need to continue to use slow writes */
1176 if (!p->first_tb) {
1177 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001178 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001179 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001180 }
1181 }
1182#endif
1183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb_modified) {
1185 /* we generate a block containing just the instruction
1186 modifying the memory. It will ensure that it cannot modify
1187 itself */
bellardea1c1802004-06-14 18:56:36 +00001188 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001189 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001190 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001191 }
1192#endif
1193}
1194
1195/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001196static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001197{
1198 PageDesc *p;
1199 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001200#if 0
bellarda4193c82004-06-03 14:01:43 +00001201 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001202 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1203 cpu_single_env->mem_io_vaddr, len,
1204 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001205 cpu_single_env->eip +
1206 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001207 }
1208#endif
bellard9fa3e852004-01-04 18:06:42 +00001209 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001210 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001211 return;
1212 if (p->code_bitmap) {
1213 offset = start & ~TARGET_PAGE_MASK;
1214 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1215 if (b & ((1 << len) - 1))
1216 goto do_invalidate;
1217 } else {
1218 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001219 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001220 }
1221}
1222
bellard9fa3e852004-01-04 18:06:42 +00001223#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001224static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001225 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001226{
aliguori6b917542008-11-18 19:46:41 +00001227 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001228 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001229 int n;
bellardd720b932004-04-25 17:57:43 +00001230#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001231 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001232 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001233 int current_tb_modified = 0;
1234 target_ulong current_pc = 0;
1235 target_ulong current_cs_base = 0;
1236 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001237#endif
bellard9fa3e852004-01-04 18:06:42 +00001238
1239 addr &= TARGET_PAGE_MASK;
1240 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001241 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001242 return;
1243 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001244#ifdef TARGET_HAS_PRECISE_SMC
1245 if (tb && pc != 0) {
1246 current_tb = tb_find_pc(pc);
1247 }
1248#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001249 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001250 n = (uintptr_t)tb & 3;
1251 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001252#ifdef TARGET_HAS_PRECISE_SMC
1253 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001254 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001255 /* If we are modifying the current TB, we must stop
1256 its execution. We could be more precise by checking
1257 that the modification is after the current PC, but it
1258 would require a specialized function to partially
1259 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001260
bellardd720b932004-04-25 17:57:43 +00001261 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001262 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001263 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1264 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001265 }
1266#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001267 tb_phys_invalidate(tb, addr);
1268 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001269 }
1270 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001271#ifdef TARGET_HAS_PRECISE_SMC
1272 if (current_tb_modified) {
1273 /* we generate a block containing just the instruction
1274 modifying the memory. It will ensure that it cannot modify
1275 itself */
bellardea1c1802004-06-14 18:56:36 +00001276 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001277 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001278 cpu_resume_from_signal(env, puc);
1279 }
1280#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001281}
bellard9fa3e852004-01-04 18:06:42 +00001282#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001283
1284/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001285static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001286 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001287{
1288 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001289#ifndef CONFIG_USER_ONLY
1290 bool page_already_protected;
1291#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001292
bellard9fa3e852004-01-04 18:06:42 +00001293 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001294 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001295 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001296#ifndef CONFIG_USER_ONLY
1297 page_already_protected = p->first_tb != NULL;
1298#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001299 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001300 invalidate_page_bitmap(p);
1301
bellard107db442004-06-22 18:48:46 +00001302#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001303
bellard9fa3e852004-01-04 18:06:42 +00001304#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001305 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001306 target_ulong addr;
1307 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001308 int prot;
1309
bellardfd6ce8f2003-05-14 19:00:11 +00001310 /* force the host page as non writable (writes will have a
1311 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001312 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001313 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001314 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1315 addr += TARGET_PAGE_SIZE) {
1316
1317 p2 = page_find (addr >> TARGET_PAGE_BITS);
1318 if (!p2)
1319 continue;
1320 prot |= p2->flags;
1321 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001322 }
ths5fafdf22007-09-16 21:08:06 +00001323 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001324 (prot & PAGE_BITS) & ~PAGE_WRITE);
1325#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001326 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001327 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001328#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001329 }
bellard9fa3e852004-01-04 18:06:42 +00001330#else
1331 /* if some code is already present, then the pages are already
1332 protected. So we handle the case where only the first TB is
1333 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001334 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001335 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001336 }
1337#endif
bellardd720b932004-04-25 17:57:43 +00001338
1339#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001340}
1341
bellard9fa3e852004-01-04 18:06:42 +00001342/* add a new TB and link it to the physical page tables. phys_page2 is
1343 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001344void tb_link_page(TranslationBlock *tb,
1345 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001346{
bellard9fa3e852004-01-04 18:06:42 +00001347 unsigned int h;
1348 TranslationBlock **ptb;
1349
pbrookc8a706f2008-06-02 16:16:42 +00001350 /* Grab the mmap lock to stop another thread invalidating this TB
1351 before we are done. */
1352 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001353 /* add in the physical hash table */
1354 h = tb_phys_hash_func(phys_pc);
1355 ptb = &tb_phys_hash[h];
1356 tb->phys_hash_next = *ptb;
1357 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001358
1359 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001360 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1361 if (phys_page2 != -1)
1362 tb_alloc_page(tb, 1, phys_page2);
1363 else
1364 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001365
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001366 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001367 tb->jmp_next[0] = NULL;
1368 tb->jmp_next[1] = NULL;
1369
1370 /* init original jump addresses */
1371 if (tb->tb_next_offset[0] != 0xffff)
1372 tb_reset_jump(tb, 0);
1373 if (tb->tb_next_offset[1] != 0xffff)
1374 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001375
1376#ifdef DEBUG_TB_CHECK
1377 tb_page_check();
1378#endif
pbrookc8a706f2008-06-02 16:16:42 +00001379 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001380}
1381
bellarda513fe12003-05-27 23:29:48 +00001382/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1383 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001384TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001385{
1386 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001387 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001388 TranslationBlock *tb;
1389
1390 if (nb_tbs <= 0)
1391 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001392 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1393 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001394 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001395 }
bellarda513fe12003-05-27 23:29:48 +00001396 /* binary search (cf Knuth) */
1397 m_min = 0;
1398 m_max = nb_tbs - 1;
1399 while (m_min <= m_max) {
1400 m = (m_min + m_max) >> 1;
1401 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001402 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001403 if (v == tc_ptr)
1404 return tb;
1405 else if (tc_ptr < v) {
1406 m_max = m - 1;
1407 } else {
1408 m_min = m + 1;
1409 }
ths5fafdf22007-09-16 21:08:06 +00001410 }
bellarda513fe12003-05-27 23:29:48 +00001411 return &tbs[m_max];
1412}
bellard75012672003-06-21 13:11:07 +00001413
bellardea041c02003-06-25 16:16:50 +00001414static void tb_reset_jump_recursive(TranslationBlock *tb);
1415
1416static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1417{
1418 TranslationBlock *tb1, *tb_next, **ptb;
1419 unsigned int n1;
1420
1421 tb1 = tb->jmp_next[n];
1422 if (tb1 != NULL) {
1423 /* find head of list */
1424 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001425 n1 = (uintptr_t)tb1 & 3;
1426 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001427 if (n1 == 2)
1428 break;
1429 tb1 = tb1->jmp_next[n1];
1430 }
1431 /* we are now sure now that tb jumps to tb1 */
1432 tb_next = tb1;
1433
1434 /* remove tb from the jmp_first list */
1435 ptb = &tb_next->jmp_first;
1436 for(;;) {
1437 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001438 n1 = (uintptr_t)tb1 & 3;
1439 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001440 if (n1 == n && tb1 == tb)
1441 break;
1442 ptb = &tb1->jmp_next[n1];
1443 }
1444 *ptb = tb->jmp_next[n];
1445 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001446
bellardea041c02003-06-25 16:16:50 +00001447 /* suppress the jump to next tb in generated code */
1448 tb_reset_jump(tb, n);
1449
bellard01243112004-01-04 15:48:17 +00001450 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001451 tb_reset_jump_recursive(tb_next);
1452 }
1453}
1454
1455static void tb_reset_jump_recursive(TranslationBlock *tb)
1456{
1457 tb_reset_jump_recursive2(tb, 0);
1458 tb_reset_jump_recursive2(tb, 1);
1459}
1460
bellard1fddef42005-04-17 19:16:13 +00001461#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001462#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001463static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001464{
1465 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1466}
1467#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001468void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001469{
Anthony Liguoric227f092009-10-01 16:12:16 -05001470 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001471 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001472
Avi Kivity06ef3522012-02-13 16:11:22 +02001473 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001474 if (!(memory_region_is_ram(section->mr)
1475 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001476 return;
1477 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001478 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001479 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001480 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001481}
Max Filippov1e7855a2012-04-10 02:48:17 +04001482
1483static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1484{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001485 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1486 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001487}
bellardc27004e2005-01-03 23:35:10 +00001488#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001489#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001490
Paul Brookc527ee82010-03-01 03:31:14 +00001491#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001492void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001493
1494{
1495}
1496
Andreas Färber9349b4f2012-03-14 01:38:32 +01001497int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001498 int flags, CPUWatchpoint **watchpoint)
1499{
1500 return -ENOSYS;
1501}
1502#else
pbrook6658ffb2007-03-16 23:58:11 +00001503/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001504int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001505 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001506{
aliguorib4051332008-11-18 20:14:20 +00001507 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001508 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001509
aliguorib4051332008-11-18 20:14:20 +00001510 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001511 if ((len & (len - 1)) || (addr & ~len_mask) ||
1512 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001513 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1514 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1515 return -EINVAL;
1516 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001517 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001518
aliguoria1d1bb32008-11-18 20:07:32 +00001519 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001520 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001521 wp->flags = flags;
1522
aliguori2dc9f412008-11-18 20:56:59 +00001523 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001524 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001526 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001528
pbrook6658ffb2007-03-16 23:58:11 +00001529 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001530
1531 if (watchpoint)
1532 *watchpoint = wp;
1533 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001534}
1535
aliguoria1d1bb32008-11-18 20:07:32 +00001536/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001537int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001538 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001539{
aliguorib4051332008-11-18 20:14:20 +00001540 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001541 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001542
Blue Swirl72cf2d42009-09-12 07:36:22 +00001543 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001544 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001545 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001546 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001547 return 0;
1548 }
1549 }
aliguoria1d1bb32008-11-18 20:07:32 +00001550 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001551}
1552
aliguoria1d1bb32008-11-18 20:07:32 +00001553/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001554void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001555{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001556 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001557
aliguoria1d1bb32008-11-18 20:07:32 +00001558 tlb_flush_page(env, watchpoint->vaddr);
1559
Anthony Liguori7267c092011-08-20 22:09:37 -05001560 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001561}
1562
aliguoria1d1bb32008-11-18 20:07:32 +00001563/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001564void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001565{
aliguoric0ce9982008-11-25 22:13:57 +00001566 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001567
Blue Swirl72cf2d42009-09-12 07:36:22 +00001568 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001569 if (wp->flags & mask)
1570 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001571 }
aliguoria1d1bb32008-11-18 20:07:32 +00001572}
Paul Brookc527ee82010-03-01 03:31:14 +00001573#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001574
1575/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001576int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001577 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001578{
bellard1fddef42005-04-17 19:16:13 +00001579#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001580 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001581
Anthony Liguori7267c092011-08-20 22:09:37 -05001582 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001583
1584 bp->pc = pc;
1585 bp->flags = flags;
1586
aliguori2dc9f412008-11-18 20:56:59 +00001587 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001588 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001589 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001590 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001591 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001592
1593 breakpoint_invalidate(env, pc);
1594
1595 if (breakpoint)
1596 *breakpoint = bp;
1597 return 0;
1598#else
1599 return -ENOSYS;
1600#endif
1601}
1602
1603/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001604int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001605{
1606#if defined(TARGET_HAS_ICE)
1607 CPUBreakpoint *bp;
1608
Blue Swirl72cf2d42009-09-12 07:36:22 +00001609 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001610 if (bp->pc == pc && bp->flags == flags) {
1611 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001612 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001613 }
bellard4c3a88a2003-07-26 12:06:08 +00001614 }
aliguoria1d1bb32008-11-18 20:07:32 +00001615 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001616#else
aliguoria1d1bb32008-11-18 20:07:32 +00001617 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001618#endif
1619}
1620
aliguoria1d1bb32008-11-18 20:07:32 +00001621/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001622void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001623{
bellard1fddef42005-04-17 19:16:13 +00001624#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001625 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001626
aliguoria1d1bb32008-11-18 20:07:32 +00001627 breakpoint_invalidate(env, breakpoint->pc);
1628
Anthony Liguori7267c092011-08-20 22:09:37 -05001629 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001630#endif
1631}
1632
1633/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001634void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001635{
1636#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001637 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001638
Blue Swirl72cf2d42009-09-12 07:36:22 +00001639 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001640 if (bp->flags & mask)
1641 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001642 }
bellard4c3a88a2003-07-26 12:06:08 +00001643#endif
1644}
1645
bellardc33a3462003-07-29 20:50:33 +00001646/* enable or disable single step mode. EXCP_DEBUG is returned by the
1647 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001648void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001649{
bellard1fddef42005-04-17 19:16:13 +00001650#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001651 if (env->singlestep_enabled != enabled) {
1652 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001653 if (kvm_enabled())
1654 kvm_update_guest_debug(env, 0);
1655 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001656 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001657 /* XXX: only flush what is necessary */
1658 tb_flush(env);
1659 }
bellardc33a3462003-07-29 20:50:33 +00001660 }
1661#endif
1662}
1663
Andreas Färber9349b4f2012-03-14 01:38:32 +01001664static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001665{
pbrookd5975362008-06-07 20:50:51 +00001666 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1667 problem and hope the cpu will stop of its own accord. For userspace
1668 emulation this often isn't actually as bad as it sounds. Often
1669 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001670 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001671 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001672
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001673 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001674 tb = env->current_tb;
1675 /* if the cpu is currently executing code, we must unlink it and
1676 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001677 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001678 env->current_tb = NULL;
1679 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001680 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001681 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001682}
1683
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001684#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001685/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001686static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001687{
1688 int old_mask;
1689
1690 old_mask = env->interrupt_request;
1691 env->interrupt_request |= mask;
1692
aliguori8edac962009-04-24 18:03:45 +00001693 /*
1694 * If called from iothread context, wake the target cpu in
1695 * case its halted.
1696 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001697 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001698 qemu_cpu_kick(env);
1699 return;
1700 }
aliguori8edac962009-04-24 18:03:45 +00001701
pbrook2e70f6e2008-06-29 01:03:05 +00001702 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001703 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001704 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001705 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001706 cpu_abort(env, "Raised interrupt while not in I/O function");
1707 }
pbrook2e70f6e2008-06-29 01:03:05 +00001708 } else {
aurel323098dba2009-03-07 21:28:24 +00001709 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001710 }
1711}
1712
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001713CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1714
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001715#else /* CONFIG_USER_ONLY */
1716
Andreas Färber9349b4f2012-03-14 01:38:32 +01001717void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001718{
1719 env->interrupt_request |= mask;
1720 cpu_unlink_tb(env);
1721}
1722#endif /* CONFIG_USER_ONLY */
1723
Andreas Färber9349b4f2012-03-14 01:38:32 +01001724void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001725{
1726 env->interrupt_request &= ~mask;
1727}
1728
Andreas Färber9349b4f2012-03-14 01:38:32 +01001729void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001730{
1731 env->exit_request = 1;
1732 cpu_unlink_tb(env);
1733}
1734
Andreas Färber9349b4f2012-03-14 01:38:32 +01001735void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001736{
1737 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001738 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001739
1740 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001741 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001742 fprintf(stderr, "qemu: fatal: ");
1743 vfprintf(stderr, fmt, ap);
1744 fprintf(stderr, "\n");
1745#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001746 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1747#else
1748 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001749#endif
aliguori93fcfe32009-01-15 22:34:14 +00001750 if (qemu_log_enabled()) {
1751 qemu_log("qemu: fatal: ");
1752 qemu_log_vprintf(fmt, ap2);
1753 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001754#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001755 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001756#else
aliguori93fcfe32009-01-15 22:34:14 +00001757 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001758#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001759 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001760 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001761 }
pbrook493ae1f2007-11-23 16:53:59 +00001762 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001763 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001764#if defined(CONFIG_USER_ONLY)
1765 {
1766 struct sigaction act;
1767 sigfillset(&act.sa_mask);
1768 act.sa_handler = SIG_DFL;
1769 sigaction(SIGABRT, &act, NULL);
1770 }
1771#endif
bellard75012672003-06-21 13:11:07 +00001772 abort();
1773}
1774
Andreas Färber9349b4f2012-03-14 01:38:32 +01001775CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001776{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001777 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1778 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001779 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001780#if defined(TARGET_HAS_ICE)
1781 CPUBreakpoint *bp;
1782 CPUWatchpoint *wp;
1783#endif
1784
Andreas Färber9349b4f2012-03-14 01:38:32 +01001785 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001786
1787 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001788 new_env->next_cpu = next_cpu;
1789 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001790
1791 /* Clone all break/watchpoints.
1792 Note: Once we support ptrace with hw-debug register access, make sure
1793 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001794 QTAILQ_INIT(&env->breakpoints);
1795 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001796#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001797 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001798 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1799 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001800 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001801 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1802 wp->flags, NULL);
1803 }
1804#endif
1805
thsc5be9f02007-02-28 20:20:53 +00001806 return new_env;
1807}
1808
bellard01243112004-01-04 15:48:17 +00001809#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001810void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001811{
1812 unsigned int i;
1813
1814 /* Discard jump cache entries for any tb which might potentially
1815 overlap the flushed page. */
1816 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1817 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001818 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001819
1820 i = tb_jmp_cache_hash_page(addr);
1821 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001822 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001823}
1824
Juan Quintelad24981d2012-05-22 00:42:40 +02001825static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1826 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001827{
Juan Quintelad24981d2012-05-22 00:42:40 +02001828 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001829
bellard1ccde1c2004-02-06 19:46:14 +00001830 /* we modify the TLB cache so that the dirty bit will be set again
1831 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001832 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001833 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001834 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001835 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001836 != (end - 1) - start) {
1837 abort();
1838 }
Blue Swirle5548612012-04-21 13:08:33 +00001839 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001840
1841}
1842
1843/* Note: start and end must be within the same ram block. */
1844void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1845 int dirty_flags)
1846{
1847 uintptr_t length;
1848
1849 start &= TARGET_PAGE_MASK;
1850 end = TARGET_PAGE_ALIGN(end);
1851
1852 length = end - start;
1853 if (length == 0)
1854 return;
1855 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1856
1857 if (tcg_enabled()) {
1858 tlb_reset_dirty_range_all(start, end, length);
1859 }
bellard1ccde1c2004-02-06 19:46:14 +00001860}
1861
aliguori74576192008-10-06 14:02:03 +00001862int cpu_physical_memory_set_dirty_tracking(int enable)
1863{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001864 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001865 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001866 return ret;
aliguori74576192008-10-06 14:02:03 +00001867}
1868
Blue Swirle5548612012-04-21 13:08:33 +00001869target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1870 MemoryRegionSection *section,
1871 target_ulong vaddr,
1872 target_phys_addr_t paddr,
1873 int prot,
1874 target_ulong *address)
1875{
1876 target_phys_addr_t iotlb;
1877 CPUWatchpoint *wp;
1878
Blue Swirlcc5bea62012-04-14 14:56:48 +00001879 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001880 /* Normal RAM. */
1881 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001882 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001883 if (!section->readonly) {
1884 iotlb |= phys_section_notdirty;
1885 } else {
1886 iotlb |= phys_section_rom;
1887 }
1888 } else {
1889 /* IO handlers are currently passed a physical address.
1890 It would be nice to pass an offset from the base address
1891 of that region. This would avoid having to special case RAM,
1892 and avoid full address decoding in every device.
1893 We can't use the high bits of pd for this because
1894 IO_MEM_ROMD uses these as a ram address. */
1895 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001896 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001897 }
1898
1899 /* Make accesses to pages with watchpoints go via the
1900 watchpoint trap routines. */
1901 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1902 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1903 /* Avoid trapping reads of pages with a write breakpoint. */
1904 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1905 iotlb = phys_section_watch + paddr;
1906 *address |= TLB_MMIO;
1907 break;
1908 }
1909 }
1910 }
1911
1912 return iotlb;
1913}
1914
bellard01243112004-01-04 15:48:17 +00001915#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001916/*
1917 * Walks guest process memory "regions" one by one
1918 * and calls callback function 'fn' for each region.
1919 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001920
1921struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001922{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001923 walk_memory_regions_fn fn;
1924 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001925 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001926 int prot;
1927};
bellard9fa3e852004-01-04 18:06:42 +00001928
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001929static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001930 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001931{
1932 if (data->start != -1ul) {
1933 int rc = data->fn(data->priv, data->start, end, data->prot);
1934 if (rc != 0) {
1935 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001936 }
bellard33417e72003-08-10 21:47:01 +00001937 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001938
1939 data->start = (new_prot ? end : -1ul);
1940 data->prot = new_prot;
1941
1942 return 0;
1943}
1944
1945static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001946 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001947{
Paul Brookb480d9b2010-03-12 23:23:29 +00001948 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001949 int i, rc;
1950
1951 if (*lp == NULL) {
1952 return walk_memory_regions_end(data, base, 0);
1953 }
1954
1955 if (level == 0) {
1956 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001957 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001958 int prot = pd[i].flags;
1959
1960 pa = base | (i << TARGET_PAGE_BITS);
1961 if (prot != data->prot) {
1962 rc = walk_memory_regions_end(data, pa, prot);
1963 if (rc != 0) {
1964 return rc;
1965 }
1966 }
1967 }
1968 } else {
1969 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001970 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001971 pa = base | ((abi_ulong)i <<
1972 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001973 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1974 if (rc != 0) {
1975 return rc;
1976 }
1977 }
1978 }
1979
1980 return 0;
1981}
1982
1983int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1984{
1985 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001986 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001987
1988 data.fn = fn;
1989 data.priv = priv;
1990 data.start = -1ul;
1991 data.prot = 0;
1992
1993 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001994 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001995 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1996 if (rc != 0) {
1997 return rc;
1998 }
1999 }
2000
2001 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002002}
2003
Paul Brookb480d9b2010-03-12 23:23:29 +00002004static int dump_region(void *priv, abi_ulong start,
2005 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002006{
2007 FILE *f = (FILE *)priv;
2008
Paul Brookb480d9b2010-03-12 23:23:29 +00002009 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2010 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002011 start, end, end - start,
2012 ((prot & PAGE_READ) ? 'r' : '-'),
2013 ((prot & PAGE_WRITE) ? 'w' : '-'),
2014 ((prot & PAGE_EXEC) ? 'x' : '-'));
2015
2016 return (0);
2017}
2018
2019/* dump memory mappings */
2020void page_dump(FILE *f)
2021{
2022 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2023 "start", "end", "size", "prot");
2024 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002025}
2026
pbrook53a59602006-03-25 19:31:22 +00002027int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002028{
bellard9fa3e852004-01-04 18:06:42 +00002029 PageDesc *p;
2030
2031 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002032 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002033 return 0;
2034 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002035}
2036
Richard Henderson376a7902010-03-10 15:57:04 -08002037/* Modify the flags of a page and invalidate the code if necessary.
2038 The flag PAGE_WRITE_ORG is positioned automatically depending
2039 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002040void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002041{
Richard Henderson376a7902010-03-10 15:57:04 -08002042 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002043
Richard Henderson376a7902010-03-10 15:57:04 -08002044 /* This function should never be called with addresses outside the
2045 guest address space. If this assert fires, it probably indicates
2046 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002047#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2048 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002049#endif
2050 assert(start < end);
2051
bellard9fa3e852004-01-04 18:06:42 +00002052 start = start & TARGET_PAGE_MASK;
2053 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002054
2055 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002056 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002057 }
2058
2059 for (addr = start, len = end - start;
2060 len != 0;
2061 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2062 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2063
2064 /* If the write protection bit is set, then we invalidate
2065 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002066 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002067 (flags & PAGE_WRITE) &&
2068 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002069 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002070 }
2071 p->flags = flags;
2072 }
bellard9fa3e852004-01-04 18:06:42 +00002073}
2074
ths3d97b402007-11-02 19:02:07 +00002075int page_check_range(target_ulong start, target_ulong len, int flags)
2076{
2077 PageDesc *p;
2078 target_ulong end;
2079 target_ulong addr;
2080
Richard Henderson376a7902010-03-10 15:57:04 -08002081 /* This function should never be called with addresses outside the
2082 guest address space. If this assert fires, it probably indicates
2083 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002084#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2085 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002086#endif
2087
Richard Henderson3e0650a2010-03-29 10:54:42 -07002088 if (len == 0) {
2089 return 0;
2090 }
Richard Henderson376a7902010-03-10 15:57:04 -08002091 if (start + len - 1 < start) {
2092 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002093 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002094 }
balrog55f280c2008-10-28 10:24:11 +00002095
ths3d97b402007-11-02 19:02:07 +00002096 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2097 start = start & TARGET_PAGE_MASK;
2098
Richard Henderson376a7902010-03-10 15:57:04 -08002099 for (addr = start, len = end - start;
2100 len != 0;
2101 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002102 p = page_find(addr >> TARGET_PAGE_BITS);
2103 if( !p )
2104 return -1;
2105 if( !(p->flags & PAGE_VALID) )
2106 return -1;
2107
bellarddae32702007-11-14 10:51:00 +00002108 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002109 return -1;
bellarddae32702007-11-14 10:51:00 +00002110 if (flags & PAGE_WRITE) {
2111 if (!(p->flags & PAGE_WRITE_ORG))
2112 return -1;
2113 /* unprotect the page if it was put read-only because it
2114 contains translated code */
2115 if (!(p->flags & PAGE_WRITE)) {
2116 if (!page_unprotect(addr, 0, NULL))
2117 return -1;
2118 }
2119 return 0;
2120 }
ths3d97b402007-11-02 19:02:07 +00002121 }
2122 return 0;
2123}
2124
bellard9fa3e852004-01-04 18:06:42 +00002125/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002126 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002127int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002128{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002129 unsigned int prot;
2130 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002131 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002132
pbrookc8a706f2008-06-02 16:16:42 +00002133 /* Technically this isn't safe inside a signal handler. However we
2134 know this only ever happens in a synchronous SEGV handler, so in
2135 practice it seems to be ok. */
2136 mmap_lock();
2137
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002138 p = page_find(address >> TARGET_PAGE_BITS);
2139 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002140 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002141 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002142 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002143
bellard9fa3e852004-01-04 18:06:42 +00002144 /* if the page was really writable, then we change its
2145 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002146 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2147 host_start = address & qemu_host_page_mask;
2148 host_end = host_start + qemu_host_page_size;
2149
2150 prot = 0;
2151 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2152 p = page_find(addr >> TARGET_PAGE_BITS);
2153 p->flags |= PAGE_WRITE;
2154 prot |= p->flags;
2155
bellard9fa3e852004-01-04 18:06:42 +00002156 /* and since the content will be modified, we must invalidate
2157 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002158 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002159#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002160 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002161#endif
bellard9fa3e852004-01-04 18:06:42 +00002162 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002163 mprotect((void *)g2h(host_start), qemu_host_page_size,
2164 prot & PAGE_BITS);
2165
2166 mmap_unlock();
2167 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002168 }
pbrookc8a706f2008-06-02 16:16:42 +00002169 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002170 return 0;
2171}
bellard9fa3e852004-01-04 18:06:42 +00002172#endif /* defined(CONFIG_USER_ONLY) */
2173
pbrooke2eef172008-06-08 01:09:01 +00002174#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002175
Paul Brookc04b2b72010-03-01 03:31:14 +00002176#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2177typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002178 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002179 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002180 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002181} subpage_t;
2182
Anthony Liguoric227f092009-10-01 16:12:16 -05002183static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002184 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002185static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002186static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002187{
Avi Kivity5312bd82012-02-12 18:32:55 +02002188 MemoryRegionSection *section = &phys_sections[section_index];
2189 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002190
2191 if (mr->subpage) {
2192 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2193 memory_region_destroy(&subpage->iomem);
2194 g_free(subpage);
2195 }
2196}
2197
Avi Kivity4346ae32012-02-10 17:00:01 +02002198static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002199{
2200 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002201 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002202
Avi Kivityc19e8802012-02-13 20:25:31 +02002203 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002204 return;
2205 }
2206
Avi Kivityc19e8802012-02-13 20:25:31 +02002207 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002208 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002209 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002210 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002211 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002212 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002213 }
Avi Kivity54688b12012-02-09 17:34:32 +02002214 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002215 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002216 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002217}
2218
2219static void destroy_all_mappings(void)
2220{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002221 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002222 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002223}
2224
Avi Kivity5312bd82012-02-12 18:32:55 +02002225static uint16_t phys_section_add(MemoryRegionSection *section)
2226{
2227 if (phys_sections_nb == phys_sections_nb_alloc) {
2228 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2229 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2230 phys_sections_nb_alloc);
2231 }
2232 phys_sections[phys_sections_nb] = *section;
2233 return phys_sections_nb++;
2234}
2235
2236static void phys_sections_clear(void)
2237{
2238 phys_sections_nb = 0;
2239}
2240
Avi Kivity0f0cb162012-02-13 17:14:32 +02002241static void register_subpage(MemoryRegionSection *section)
2242{
2243 subpage_t *subpage;
2244 target_phys_addr_t base = section->offset_within_address_space
2245 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002246 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002247 MemoryRegionSection subsection = {
2248 .offset_within_address_space = base,
2249 .size = TARGET_PAGE_SIZE,
2250 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002251 target_phys_addr_t start, end;
2252
Avi Kivityf3705d52012-03-08 16:16:34 +02002253 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002254
Avi Kivityf3705d52012-03-08 16:16:34 +02002255 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002256 subpage = subpage_init(base);
2257 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002258 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2259 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002260 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002261 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002262 }
2263 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002264 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002265 subpage_register(subpage, start, end, phys_section_add(section));
2266}
2267
2268
2269static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002270{
Avi Kivitydd811242012-01-02 12:17:03 +02002271 target_phys_addr_t start_addr = section->offset_within_address_space;
2272 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002273 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002274 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002275
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002276 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002277
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002278 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002279 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2280 section_index);
bellard33417e72003-08-10 21:47:01 +00002281}
2282
Avi Kivity0f0cb162012-02-13 17:14:32 +02002283void cpu_register_physical_memory_log(MemoryRegionSection *section,
2284 bool readonly)
2285{
2286 MemoryRegionSection now = *section, remain = *section;
2287
2288 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2289 || (now.size < TARGET_PAGE_SIZE)) {
2290 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2291 - now.offset_within_address_space,
2292 now.size);
2293 register_subpage(&now);
2294 remain.size -= now.size;
2295 remain.offset_within_address_space += now.size;
2296 remain.offset_within_region += now.size;
2297 }
Tyler Hall69b67642012-07-25 18:45:04 -04002298 while (remain.size >= TARGET_PAGE_SIZE) {
2299 now = remain;
2300 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2301 now.size = TARGET_PAGE_SIZE;
2302 register_subpage(&now);
2303 } else {
2304 now.size &= TARGET_PAGE_MASK;
2305 register_multipage(&now);
2306 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002307 remain.size -= now.size;
2308 remain.offset_within_address_space += now.size;
2309 remain.offset_within_region += now.size;
2310 }
2311 now = remain;
2312 if (now.size) {
2313 register_subpage(&now);
2314 }
2315}
2316
2317
Anthony Liguoric227f092009-10-01 16:12:16 -05002318void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002319{
2320 if (kvm_enabled())
2321 kvm_coalesce_mmio_region(addr, size);
2322}
2323
Anthony Liguoric227f092009-10-01 16:12:16 -05002324void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002325{
2326 if (kvm_enabled())
2327 kvm_uncoalesce_mmio_region(addr, size);
2328}
2329
Sheng Yang62a27442010-01-26 19:21:16 +08002330void qemu_flush_coalesced_mmio_buffer(void)
2331{
2332 if (kvm_enabled())
2333 kvm_flush_coalesced_mmio_buffer();
2334}
2335
Marcelo Tosattic9027602010-03-01 20:25:08 -03002336#if defined(__linux__) && !defined(TARGET_S390X)
2337
2338#include <sys/vfs.h>
2339
2340#define HUGETLBFS_MAGIC 0x958458f6
2341
2342static long gethugepagesize(const char *path)
2343{
2344 struct statfs fs;
2345 int ret;
2346
2347 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002348 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002349 } while (ret != 0 && errno == EINTR);
2350
2351 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002352 perror(path);
2353 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002354 }
2355
2356 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002357 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002358
2359 return fs.f_bsize;
2360}
2361
Alex Williamson04b16652010-07-02 11:13:17 -06002362static void *file_ram_alloc(RAMBlock *block,
2363 ram_addr_t memory,
2364 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002365{
2366 char *filename;
2367 void *area;
2368 int fd;
2369#ifdef MAP_POPULATE
2370 int flags;
2371#endif
2372 unsigned long hpagesize;
2373
2374 hpagesize = gethugepagesize(path);
2375 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002376 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002377 }
2378
2379 if (memory < hpagesize) {
2380 return NULL;
2381 }
2382
2383 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2384 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2385 return NULL;
2386 }
2387
2388 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002389 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002390 }
2391
2392 fd = mkstemp(filename);
2393 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002394 perror("unable to create backing store for hugepages");
2395 free(filename);
2396 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002397 }
2398 unlink(filename);
2399 free(filename);
2400
2401 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2402
2403 /*
2404 * ftruncate is not supported by hugetlbfs in older
2405 * hosts, so don't bother bailing out on errors.
2406 * If anything goes wrong with it under other filesystems,
2407 * mmap will fail.
2408 */
2409 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002410 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002411
2412#ifdef MAP_POPULATE
2413 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2414 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2415 * to sidestep this quirk.
2416 */
2417 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2418 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2419#else
2420 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2421#endif
2422 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002423 perror("file_ram_alloc: can't mmap RAM pages");
2424 close(fd);
2425 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002426 }
Alex Williamson04b16652010-07-02 11:13:17 -06002427 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002428 return area;
2429}
2430#endif
2431
Alex Williamsond17b5282010-06-25 11:08:38 -06002432static ram_addr_t find_ram_offset(ram_addr_t size)
2433{
Alex Williamson04b16652010-07-02 11:13:17 -06002434 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002435 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002436
2437 if (QLIST_EMPTY(&ram_list.blocks))
2438 return 0;
2439
2440 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002441 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002442
2443 end = block->offset + block->length;
2444
2445 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2446 if (next_block->offset >= end) {
2447 next = MIN(next, next_block->offset);
2448 }
2449 }
2450 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002451 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002452 mingap = next - end;
2453 }
2454 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002455
2456 if (offset == RAM_ADDR_MAX) {
2457 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2458 (uint64_t)size);
2459 abort();
2460 }
2461
Alex Williamson04b16652010-07-02 11:13:17 -06002462 return offset;
2463}
2464
2465static ram_addr_t last_ram_offset(void)
2466{
Alex Williamsond17b5282010-06-25 11:08:38 -06002467 RAMBlock *block;
2468 ram_addr_t last = 0;
2469
2470 QLIST_FOREACH(block, &ram_list.blocks, next)
2471 last = MAX(last, block->offset + block->length);
2472
2473 return last;
2474}
2475
Jason Baronddb97f12012-08-02 15:44:16 -04002476static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2477{
2478 int ret;
2479 QemuOpts *machine_opts;
2480
2481 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2482 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2483 if (machine_opts &&
2484 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2485 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2486 if (ret) {
2487 perror("qemu_madvise");
2488 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2489 "but dump_guest_core=off specified\n");
2490 }
2491 }
2492}
2493
Avi Kivityc5705a72011-12-20 15:59:12 +02002494void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002495{
2496 RAMBlock *new_block, *block;
2497
Avi Kivityc5705a72011-12-20 15:59:12 +02002498 new_block = NULL;
2499 QLIST_FOREACH(block, &ram_list.blocks, next) {
2500 if (block->offset == addr) {
2501 new_block = block;
2502 break;
2503 }
2504 }
2505 assert(new_block);
2506 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002507
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002508 if (dev) {
2509 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002510 if (id) {
2511 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002512 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002513 }
2514 }
2515 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2516
2517 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002518 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002519 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2520 new_block->idstr);
2521 abort();
2522 }
2523 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002524}
2525
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002526static int memory_try_enable_merging(void *addr, size_t len)
2527{
2528 QemuOpts *opts;
2529
2530 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2531 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2532 /* disabled by the user */
2533 return 0;
2534 }
2535
2536 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2537}
2538
Avi Kivityc5705a72011-12-20 15:59:12 +02002539ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2540 MemoryRegion *mr)
2541{
2542 RAMBlock *new_block;
2543
2544 size = TARGET_PAGE_ALIGN(size);
2545 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002546
Avi Kivity7c637362011-12-21 13:09:49 +02002547 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002548 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002549 if (host) {
2550 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002551 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002552 } else {
2553 if (mem_path) {
2554#if defined (__linux__) && !defined(TARGET_S390X)
2555 new_block->host = file_ram_alloc(new_block, size, mem_path);
2556 if (!new_block->host) {
2557 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002558 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002559 }
2560#else
2561 fprintf(stderr, "-mem-path option unsupported\n");
2562 exit(1);
2563#endif
2564 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002565 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002566 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002567 } else if (kvm_enabled()) {
2568 /* some s390/kvm configurations have special constraints */
2569 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002570 } else {
2571 new_block->host = qemu_vmalloc(size);
2572 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002573 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002574 }
2575 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002576 new_block->length = size;
2577
2578 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2579
Anthony Liguori7267c092011-08-20 22:09:37 -05002580 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002581 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002582 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2583 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002584 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002585
Jason Baronddb97f12012-08-02 15:44:16 -04002586 qemu_ram_setup_dump(new_block->host, size);
2587
Cam Macdonell84b89d72010-07-26 18:10:57 -06002588 if (kvm_enabled())
2589 kvm_setup_guest_memory(new_block->host, size);
2590
2591 return new_block->offset;
2592}
2593
Avi Kivityc5705a72011-12-20 15:59:12 +02002594ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002595{
Avi Kivityc5705a72011-12-20 15:59:12 +02002596 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002597}
bellarde9a1ab12007-02-08 23:08:38 +00002598
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002599void qemu_ram_free_from_ptr(ram_addr_t addr)
2600{
2601 RAMBlock *block;
2602
2603 QLIST_FOREACH(block, &ram_list.blocks, next) {
2604 if (addr == block->offset) {
2605 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002606 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002607 return;
2608 }
2609 }
2610}
2611
Anthony Liguoric227f092009-10-01 16:12:16 -05002612void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002613{
Alex Williamson04b16652010-07-02 11:13:17 -06002614 RAMBlock *block;
2615
2616 QLIST_FOREACH(block, &ram_list.blocks, next) {
2617 if (addr == block->offset) {
2618 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002619 if (block->flags & RAM_PREALLOC_MASK) {
2620 ;
2621 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002622#if defined (__linux__) && !defined(TARGET_S390X)
2623 if (block->fd) {
2624 munmap(block->host, block->length);
2625 close(block->fd);
2626 } else {
2627 qemu_vfree(block->host);
2628 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002629#else
2630 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002631#endif
2632 } else {
2633#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2634 munmap(block->host, block->length);
2635#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002636 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002637 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002638 } else {
2639 qemu_vfree(block->host);
2640 }
Alex Williamson04b16652010-07-02 11:13:17 -06002641#endif
2642 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002643 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002644 return;
2645 }
2646 }
2647
bellarde9a1ab12007-02-08 23:08:38 +00002648}
2649
Huang Yingcd19cfa2011-03-02 08:56:19 +01002650#ifndef _WIN32
2651void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2652{
2653 RAMBlock *block;
2654 ram_addr_t offset;
2655 int flags;
2656 void *area, *vaddr;
2657
2658 QLIST_FOREACH(block, &ram_list.blocks, next) {
2659 offset = addr - block->offset;
2660 if (offset < block->length) {
2661 vaddr = block->host + offset;
2662 if (block->flags & RAM_PREALLOC_MASK) {
2663 ;
2664 } else {
2665 flags = MAP_FIXED;
2666 munmap(vaddr, length);
2667 if (mem_path) {
2668#if defined(__linux__) && !defined(TARGET_S390X)
2669 if (block->fd) {
2670#ifdef MAP_POPULATE
2671 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2672 MAP_PRIVATE;
2673#else
2674 flags |= MAP_PRIVATE;
2675#endif
2676 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2677 flags, block->fd, offset);
2678 } else {
2679 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2680 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2681 flags, -1, 0);
2682 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002683#else
2684 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002685#endif
2686 } else {
2687#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2688 flags |= MAP_SHARED | MAP_ANONYMOUS;
2689 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2690 flags, -1, 0);
2691#else
2692 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2693 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2694 flags, -1, 0);
2695#endif
2696 }
2697 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002698 fprintf(stderr, "Could not remap addr: "
2699 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002700 length, addr);
2701 exit(1);
2702 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002703 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002704 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002705 }
2706 return;
2707 }
2708 }
2709}
2710#endif /* !_WIN32 */
2711
pbrookdc828ca2009-04-09 22:21:07 +00002712/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002713 With the exception of the softmmu code in this file, this should
2714 only be used for local memory (e.g. video ram) that the device owns,
2715 and knows it isn't going to access beyond the end of the block.
2716
2717 It should not be used for general purpose DMA.
2718 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2719 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002720void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002721{
pbrook94a6b542009-04-11 17:15:54 +00002722 RAMBlock *block;
2723
Alex Williamsonf471a172010-06-11 11:11:42 -06002724 QLIST_FOREACH(block, &ram_list.blocks, next) {
2725 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002726 /* Move this entry to to start of the list. */
2727 if (block != QLIST_FIRST(&ram_list.blocks)) {
2728 QLIST_REMOVE(block, next);
2729 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2730 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002731 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002732 /* We need to check if the requested address is in the RAM
2733 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002734 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002735 */
2736 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002737 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002738 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002739 block->host =
2740 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002741 }
2742 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002743 return block->host + (addr - block->offset);
2744 }
pbrook94a6b542009-04-11 17:15:54 +00002745 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002746
2747 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2748 abort();
2749
2750 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002751}
2752
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002753/* Return a host pointer to ram allocated with qemu_ram_alloc.
2754 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2755 */
2756void *qemu_safe_ram_ptr(ram_addr_t addr)
2757{
2758 RAMBlock *block;
2759
2760 QLIST_FOREACH(block, &ram_list.blocks, next) {
2761 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002762 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002763 /* We need to check if the requested address is in the RAM
2764 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002765 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002766 */
2767 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002768 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002769 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002770 block->host =
2771 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002772 }
2773 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002774 return block->host + (addr - block->offset);
2775 }
2776 }
2777
2778 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2779 abort();
2780
2781 return NULL;
2782}
2783
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002784/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2785 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002786void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002787{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002788 if (*size == 0) {
2789 return NULL;
2790 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002791 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002792 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002793 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002794 RAMBlock *block;
2795
2796 QLIST_FOREACH(block, &ram_list.blocks, next) {
2797 if (addr - block->offset < block->length) {
2798 if (addr - block->offset + *size > block->length)
2799 *size = block->length - addr + block->offset;
2800 return block->host + (addr - block->offset);
2801 }
2802 }
2803
2804 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2805 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002806 }
2807}
2808
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002809void qemu_put_ram_ptr(void *addr)
2810{
2811 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002812}
2813
Marcelo Tosattie8902612010-10-11 15:31:19 -03002814int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002815{
pbrook94a6b542009-04-11 17:15:54 +00002816 RAMBlock *block;
2817 uint8_t *host = ptr;
2818
Jan Kiszka868bb332011-06-21 22:59:09 +02002819 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002820 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002821 return 0;
2822 }
2823
Alex Williamsonf471a172010-06-11 11:11:42 -06002824 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002825 /* This case append when the block is not mapped. */
2826 if (block->host == NULL) {
2827 continue;
2828 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002829 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002830 *ram_addr = block->offset + (host - block->host);
2831 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002832 }
pbrook94a6b542009-04-11 17:15:54 +00002833 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002834
Marcelo Tosattie8902612010-10-11 15:31:19 -03002835 return -1;
2836}
Alex Williamsonf471a172010-06-11 11:11:42 -06002837
Marcelo Tosattie8902612010-10-11 15:31:19 -03002838/* Some of the softmmu routines need to translate from a host pointer
2839 (typically a TLB entry) back to a ram offset. */
2840ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2841{
2842 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002843
Marcelo Tosattie8902612010-10-11 15:31:19 -03002844 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2845 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2846 abort();
2847 }
2848 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002849}
2850
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002851static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2852 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002853{
pbrook67d3b952006-12-18 05:03:52 +00002854#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002855 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002856#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002857#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002858 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002859#endif
2860 return 0;
2861}
2862
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002863static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2864 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002865{
2866#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002867 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002868#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002869#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002870 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002871#endif
2872}
2873
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002874static const MemoryRegionOps unassigned_mem_ops = {
2875 .read = unassigned_mem_read,
2876 .write = unassigned_mem_write,
2877 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002878};
2879
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002880static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2881 unsigned size)
2882{
2883 abort();
2884}
2885
2886static void error_mem_write(void *opaque, target_phys_addr_t addr,
2887 uint64_t value, unsigned size)
2888{
2889 abort();
2890}
2891
2892static const MemoryRegionOps error_mem_ops = {
2893 .read = error_mem_read,
2894 .write = error_mem_write,
2895 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002896};
2897
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002898static const MemoryRegionOps rom_mem_ops = {
2899 .read = error_mem_read,
2900 .write = unassigned_mem_write,
2901 .endianness = DEVICE_NATIVE_ENDIAN,
2902};
2903
2904static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2905 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002906{
bellard3a7d9292005-08-21 09:26:42 +00002907 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002908 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002909 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2910#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002911 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002912 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002913#endif
2914 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002915 switch (size) {
2916 case 1:
2917 stb_p(qemu_get_ram_ptr(ram_addr), val);
2918 break;
2919 case 2:
2920 stw_p(qemu_get_ram_ptr(ram_addr), val);
2921 break;
2922 case 4:
2923 stl_p(qemu_get_ram_ptr(ram_addr), val);
2924 break;
2925 default:
2926 abort();
2927 }
bellardf23db162005-08-21 19:12:28 +00002928 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002929 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002930 /* we remove the notdirty callback only if the code has been
2931 flushed */
2932 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002933 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002934}
2935
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002936static const MemoryRegionOps notdirty_mem_ops = {
2937 .read = error_mem_read,
2938 .write = notdirty_mem_write,
2939 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002940};
2941
pbrook0f459d12008-06-09 00:20:13 +00002942/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002943static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002944{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002945 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002946 target_ulong pc, cs_base;
2947 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002948 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002949 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002950 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002951
aliguori06d55cc2008-11-18 20:24:06 +00002952 if (env->watchpoint_hit) {
2953 /* We re-entered the check after replacing the TB. Now raise
2954 * the debug interrupt so that is will trigger after the
2955 * current instruction. */
2956 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2957 return;
2958 }
pbrook2e70f6e2008-06-29 01:03:05 +00002959 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002960 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002961 if ((vaddr == (wp->vaddr & len_mask) ||
2962 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002963 wp->flags |= BP_WATCHPOINT_HIT;
2964 if (!env->watchpoint_hit) {
2965 env->watchpoint_hit = wp;
2966 tb = tb_find_pc(env->mem_io_pc);
2967 if (!tb) {
2968 cpu_abort(env, "check_watchpoint: could not find TB for "
2969 "pc=%p", (void *)env->mem_io_pc);
2970 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002971 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002972 tb_phys_invalidate(tb, -1);
2973 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2974 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002975 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002976 } else {
2977 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2978 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002979 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002980 }
aliguori06d55cc2008-11-18 20:24:06 +00002981 }
aliguori6e140f22008-11-18 20:37:55 +00002982 } else {
2983 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002984 }
2985 }
2986}
2987
pbrook6658ffb2007-03-16 23:58:11 +00002988/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2989 so these check for a hit then pass through to the normal out-of-line
2990 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002991static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2992 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002993{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002994 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2995 switch (size) {
2996 case 1: return ldub_phys(addr);
2997 case 2: return lduw_phys(addr);
2998 case 4: return ldl_phys(addr);
2999 default: abort();
3000 }
pbrook6658ffb2007-03-16 23:58:11 +00003001}
3002
Avi Kivity1ec9b902012-01-02 12:47:48 +02003003static void watch_mem_write(void *opaque, target_phys_addr_t addr,
3004 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003005{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003006 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3007 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003008 case 1:
3009 stb_phys(addr, val);
3010 break;
3011 case 2:
3012 stw_phys(addr, val);
3013 break;
3014 case 4:
3015 stl_phys(addr, val);
3016 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003017 default: abort();
3018 }
pbrook6658ffb2007-03-16 23:58:11 +00003019}
3020
Avi Kivity1ec9b902012-01-02 12:47:48 +02003021static const MemoryRegionOps watch_mem_ops = {
3022 .read = watch_mem_read,
3023 .write = watch_mem_write,
3024 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003025};
pbrook6658ffb2007-03-16 23:58:11 +00003026
Avi Kivity70c68e42012-01-02 12:32:48 +02003027static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
3028 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003029{
Avi Kivity70c68e42012-01-02 12:32:48 +02003030 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003031 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003032 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003033#if defined(DEBUG_SUBPAGE)
3034 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3035 mmio, len, addr, idx);
3036#endif
blueswir1db7b5422007-05-26 17:36:03 +00003037
Avi Kivity5312bd82012-02-12 18:32:55 +02003038 section = &phys_sections[mmio->sub_section[idx]];
3039 addr += mmio->base;
3040 addr -= section->offset_within_address_space;
3041 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003042 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003043}
3044
Avi Kivity70c68e42012-01-02 12:32:48 +02003045static void subpage_write(void *opaque, target_phys_addr_t addr,
3046 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003047{
Avi Kivity70c68e42012-01-02 12:32:48 +02003048 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003049 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003050 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003051#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003052 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3053 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003054 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003055#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003056
Avi Kivity5312bd82012-02-12 18:32:55 +02003057 section = &phys_sections[mmio->sub_section[idx]];
3058 addr += mmio->base;
3059 addr -= section->offset_within_address_space;
3060 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003061 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003062}
3063
Avi Kivity70c68e42012-01-02 12:32:48 +02003064static const MemoryRegionOps subpage_ops = {
3065 .read = subpage_read,
3066 .write = subpage_write,
3067 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003068};
3069
Avi Kivityde712f92012-01-02 12:41:07 +02003070static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3071 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003072{
3073 ram_addr_t raddr = addr;
3074 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003075 switch (size) {
3076 case 1: return ldub_p(ptr);
3077 case 2: return lduw_p(ptr);
3078 case 4: return ldl_p(ptr);
3079 default: abort();
3080 }
Andreas Färber56384e82011-11-30 16:26:21 +01003081}
3082
Avi Kivityde712f92012-01-02 12:41:07 +02003083static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3084 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003085{
3086 ram_addr_t raddr = addr;
3087 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003088 switch (size) {
3089 case 1: return stb_p(ptr, value);
3090 case 2: return stw_p(ptr, value);
3091 case 4: return stl_p(ptr, value);
3092 default: abort();
3093 }
Andreas Färber56384e82011-11-30 16:26:21 +01003094}
3095
Avi Kivityde712f92012-01-02 12:41:07 +02003096static const MemoryRegionOps subpage_ram_ops = {
3097 .read = subpage_ram_read,
3098 .write = subpage_ram_write,
3099 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003100};
3101
Anthony Liguoric227f092009-10-01 16:12:16 -05003102static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003103 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003104{
3105 int idx, eidx;
3106
3107 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3108 return -1;
3109 idx = SUBPAGE_IDX(start);
3110 eidx = SUBPAGE_IDX(end);
3111#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003112 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003113 mmio, start, end, idx, eidx, memory);
3114#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003115 if (memory_region_is_ram(phys_sections[section].mr)) {
3116 MemoryRegionSection new_section = phys_sections[section];
3117 new_section.mr = &io_mem_subpage_ram;
3118 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003119 }
blueswir1db7b5422007-05-26 17:36:03 +00003120 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003121 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003122 }
3123
3124 return 0;
3125}
3126
Avi Kivity0f0cb162012-02-13 17:14:32 +02003127static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003128{
Anthony Liguoric227f092009-10-01 16:12:16 -05003129 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003130
Anthony Liguori7267c092011-08-20 22:09:37 -05003131 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003132
3133 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003134 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3135 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003136 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003137#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003138 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3139 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003140#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003141 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003142
3143 return mmio;
3144}
3145
Avi Kivity5312bd82012-02-12 18:32:55 +02003146static uint16_t dummy_section(MemoryRegion *mr)
3147{
3148 MemoryRegionSection section = {
3149 .mr = mr,
3150 .offset_within_address_space = 0,
3151 .offset_within_region = 0,
3152 .size = UINT64_MAX,
3153 };
3154
3155 return phys_section_add(&section);
3156}
3157
Avi Kivity37ec01d2012-03-08 18:08:35 +02003158MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003159{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003160 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003161}
3162
Avi Kivitye9179ce2009-06-14 11:38:52 +03003163static void io_mem_init(void)
3164{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003165 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003166 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3167 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3168 "unassigned", UINT64_MAX);
3169 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3170 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003171 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3172 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003173 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3174 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003175}
3176
Avi Kivity50c1e142012-02-08 21:36:02 +02003177static void core_begin(MemoryListener *listener)
3178{
Avi Kivity54688b12012-02-09 17:34:32 +02003179 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003180 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003181 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003182 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003183 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3184 phys_section_rom = dummy_section(&io_mem_rom);
3185 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003186}
3187
3188static void core_commit(MemoryListener *listener)
3189{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003190 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003191
3192 /* since each CPU stores ram addresses in its TLB cache, we must
3193 reset the modified entries */
3194 /* XXX: slow ! */
3195 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3196 tlb_flush(env, 1);
3197 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003198}
3199
Avi Kivity93632742012-02-08 16:54:16 +02003200static void core_region_add(MemoryListener *listener,
3201 MemoryRegionSection *section)
3202{
Avi Kivity4855d412012-02-08 21:16:05 +02003203 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003204}
3205
3206static void core_region_del(MemoryListener *listener,
3207 MemoryRegionSection *section)
3208{
Avi Kivity93632742012-02-08 16:54:16 +02003209}
3210
Avi Kivity50c1e142012-02-08 21:36:02 +02003211static void core_region_nop(MemoryListener *listener,
3212 MemoryRegionSection *section)
3213{
Avi Kivity54688b12012-02-09 17:34:32 +02003214 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003215}
3216
Avi Kivity93632742012-02-08 16:54:16 +02003217static void core_log_start(MemoryListener *listener,
3218 MemoryRegionSection *section)
3219{
3220}
3221
3222static void core_log_stop(MemoryListener *listener,
3223 MemoryRegionSection *section)
3224{
3225}
3226
3227static void core_log_sync(MemoryListener *listener,
3228 MemoryRegionSection *section)
3229{
3230}
3231
3232static void core_log_global_start(MemoryListener *listener)
3233{
3234 cpu_physical_memory_set_dirty_tracking(1);
3235}
3236
3237static void core_log_global_stop(MemoryListener *listener)
3238{
3239 cpu_physical_memory_set_dirty_tracking(0);
3240}
3241
3242static void core_eventfd_add(MemoryListener *listener,
3243 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003244 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003245{
3246}
3247
3248static void core_eventfd_del(MemoryListener *listener,
3249 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003250 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003251{
3252}
3253
Avi Kivity50c1e142012-02-08 21:36:02 +02003254static void io_begin(MemoryListener *listener)
3255{
3256}
3257
3258static void io_commit(MemoryListener *listener)
3259{
3260}
3261
Avi Kivity4855d412012-02-08 21:16:05 +02003262static void io_region_add(MemoryListener *listener,
3263 MemoryRegionSection *section)
3264{
Avi Kivitya2d33522012-03-05 17:40:12 +02003265 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3266
3267 mrio->mr = section->mr;
3268 mrio->offset = section->offset_within_region;
3269 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003270 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003271 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003272}
3273
3274static void io_region_del(MemoryListener *listener,
3275 MemoryRegionSection *section)
3276{
3277 isa_unassign_ioport(section->offset_within_address_space, section->size);
3278}
3279
Avi Kivity50c1e142012-02-08 21:36:02 +02003280static void io_region_nop(MemoryListener *listener,
3281 MemoryRegionSection *section)
3282{
3283}
3284
Avi Kivity4855d412012-02-08 21:16:05 +02003285static void io_log_start(MemoryListener *listener,
3286 MemoryRegionSection *section)
3287{
3288}
3289
3290static void io_log_stop(MemoryListener *listener,
3291 MemoryRegionSection *section)
3292{
3293}
3294
3295static void io_log_sync(MemoryListener *listener,
3296 MemoryRegionSection *section)
3297{
3298}
3299
3300static void io_log_global_start(MemoryListener *listener)
3301{
3302}
3303
3304static void io_log_global_stop(MemoryListener *listener)
3305{
3306}
3307
3308static void io_eventfd_add(MemoryListener *listener,
3309 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003310 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003311{
3312}
3313
3314static void io_eventfd_del(MemoryListener *listener,
3315 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003316 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003317{
3318}
3319
Avi Kivity93632742012-02-08 16:54:16 +02003320static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003321 .begin = core_begin,
3322 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003323 .region_add = core_region_add,
3324 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003325 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003326 .log_start = core_log_start,
3327 .log_stop = core_log_stop,
3328 .log_sync = core_log_sync,
3329 .log_global_start = core_log_global_start,
3330 .log_global_stop = core_log_global_stop,
3331 .eventfd_add = core_eventfd_add,
3332 .eventfd_del = core_eventfd_del,
3333 .priority = 0,
3334};
3335
Avi Kivity4855d412012-02-08 21:16:05 +02003336static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003337 .begin = io_begin,
3338 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003339 .region_add = io_region_add,
3340 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003341 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003342 .log_start = io_log_start,
3343 .log_stop = io_log_stop,
3344 .log_sync = io_log_sync,
3345 .log_global_start = io_log_global_start,
3346 .log_global_stop = io_log_global_stop,
3347 .eventfd_add = io_eventfd_add,
3348 .eventfd_del = io_eventfd_del,
3349 .priority = 0,
3350};
3351
Avi Kivity62152b82011-07-26 14:26:14 +03003352static void memory_map_init(void)
3353{
Anthony Liguori7267c092011-08-20 22:09:37 -05003354 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003355 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003356 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003357
Anthony Liguori7267c092011-08-20 22:09:37 -05003358 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003359 memory_region_init(system_io, "io", 65536);
3360 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003361
Avi Kivity4855d412012-02-08 21:16:05 +02003362 memory_listener_register(&core_memory_listener, system_memory);
3363 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003364}
3365
3366MemoryRegion *get_system_memory(void)
3367{
3368 return system_memory;
3369}
3370
Avi Kivity309cb472011-08-08 16:09:03 +03003371MemoryRegion *get_system_io(void)
3372{
3373 return system_io;
3374}
3375
pbrooke2eef172008-06-08 01:09:01 +00003376#endif /* !defined(CONFIG_USER_ONLY) */
3377
bellard13eb76e2004-01-24 15:23:36 +00003378/* physical memory access (slow version, mainly for debug) */
3379#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003380int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003381 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003382{
3383 int l, flags;
3384 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003385 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003386
3387 while (len > 0) {
3388 page = addr & TARGET_PAGE_MASK;
3389 l = (page + TARGET_PAGE_SIZE) - addr;
3390 if (l > len)
3391 l = len;
3392 flags = page_get_flags(page);
3393 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003394 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003395 if (is_write) {
3396 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003397 return -1;
bellard579a97f2007-11-11 14:26:47 +00003398 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003399 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003400 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003401 memcpy(p, buf, l);
3402 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003403 } else {
3404 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003405 return -1;
bellard579a97f2007-11-11 14:26:47 +00003406 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003407 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003408 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003409 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003410 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003411 }
3412 len -= l;
3413 buf += l;
3414 addr += l;
3415 }
Paul Brooka68fe892010-03-01 00:08:59 +00003416 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003417}
bellard8df1cd02005-01-28 22:37:22 +00003418
bellard13eb76e2004-01-24 15:23:36 +00003419#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003420void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003421 int len, int is_write)
3422{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003423 int l;
bellard13eb76e2004-01-24 15:23:36 +00003424 uint8_t *ptr;
3425 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003426 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003427 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003428
bellard13eb76e2004-01-24 15:23:36 +00003429 while (len > 0) {
3430 page = addr & TARGET_PAGE_MASK;
3431 l = (page + TARGET_PAGE_SIZE) - addr;
3432 if (l > len)
3433 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003434 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003435
bellard13eb76e2004-01-24 15:23:36 +00003436 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003437 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003438 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003439 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003440 /* XXX: could force cpu_single_env to NULL to avoid
3441 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003442 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003443 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003444 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003445 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003446 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003447 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003448 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003449 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003450 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003451 l = 2;
3452 } else {
bellard1c213d12005-09-03 10:49:04 +00003453 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003454 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003455 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003456 l = 1;
3457 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003458 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003459 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003460 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003461 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003462 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003463 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003464 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003465 if (!cpu_physical_memory_is_dirty(addr1)) {
3466 /* invalidate code */
3467 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3468 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003469 cpu_physical_memory_set_dirty_flags(
3470 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003471 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003472 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003473 }
3474 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003475 if (!(memory_region_is_ram(section->mr) ||
3476 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003477 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003478 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003479 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003480 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003481 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003482 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003483 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003484 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003485 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003486 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003487 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003488 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003489 l = 2;
3490 } else {
bellard1c213d12005-09-03 10:49:04 +00003491 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003492 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003493 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003494 l = 1;
3495 }
3496 } else {
3497 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003498 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003499 + memory_region_section_addr(section,
3500 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003501 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003502 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003503 }
3504 }
3505 len -= l;
3506 buf += l;
3507 addr += l;
3508 }
3509}
bellard8df1cd02005-01-28 22:37:22 +00003510
bellardd0ecd2a2006-04-23 17:14:48 +00003511/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003512void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003513 const uint8_t *buf, int len)
3514{
3515 int l;
3516 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003517 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003518 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003519
bellardd0ecd2a2006-04-23 17:14:48 +00003520 while (len > 0) {
3521 page = addr & TARGET_PAGE_MASK;
3522 l = (page + TARGET_PAGE_SIZE) - addr;
3523 if (l > len)
3524 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003525 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003526
Blue Swirlcc5bea62012-04-14 14:56:48 +00003527 if (!(memory_region_is_ram(section->mr) ||
3528 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003529 /* do nothing */
3530 } else {
3531 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003532 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003533 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003534 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003535 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003536 memcpy(ptr, buf, l);
David Gibson0b57e282012-09-10 12:30:57 +10003537 if (!cpu_physical_memory_is_dirty(addr1)) {
3538 /* invalidate code */
3539 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3540 /* set dirty bit */
3541 cpu_physical_memory_set_dirty_flags(
3542 addr1, (0xff & ~CODE_DIRTY_FLAG));
3543 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003544 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003545 }
3546 len -= l;
3547 buf += l;
3548 addr += l;
3549 }
3550}
3551
aliguori6d16c2f2009-01-22 16:59:11 +00003552typedef struct {
3553 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003554 target_phys_addr_t addr;
3555 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003556} BounceBuffer;
3557
3558static BounceBuffer bounce;
3559
aliguoriba223c22009-01-22 16:59:16 +00003560typedef struct MapClient {
3561 void *opaque;
3562 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003563 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003564} MapClient;
3565
Blue Swirl72cf2d42009-09-12 07:36:22 +00003566static QLIST_HEAD(map_client_list, MapClient) map_client_list
3567 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003568
3569void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3570{
Anthony Liguori7267c092011-08-20 22:09:37 -05003571 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003572
3573 client->opaque = opaque;
3574 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003575 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003576 return client;
3577}
3578
3579void cpu_unregister_map_client(void *_client)
3580{
3581 MapClient *client = (MapClient *)_client;
3582
Blue Swirl72cf2d42009-09-12 07:36:22 +00003583 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003584 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003585}
3586
3587static void cpu_notify_map_clients(void)
3588{
3589 MapClient *client;
3590
Blue Swirl72cf2d42009-09-12 07:36:22 +00003591 while (!QLIST_EMPTY(&map_client_list)) {
3592 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003593 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003594 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003595 }
3596}
3597
aliguori6d16c2f2009-01-22 16:59:11 +00003598/* Map a physical memory region into a host virtual address.
3599 * May map a subset of the requested range, given by and returned in *plen.
3600 * May return NULL if resources needed to perform the mapping are exhausted.
3601 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003602 * Use cpu_register_map_client() to know when retrying the map operation is
3603 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003604 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003605void *cpu_physical_memory_map(target_phys_addr_t addr,
3606 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003607 int is_write)
3608{
Anthony Liguoric227f092009-10-01 16:12:16 -05003609 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003610 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003611 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003612 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003613 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003614 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003615 ram_addr_t rlen;
3616 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003617
3618 while (len > 0) {
3619 page = addr & TARGET_PAGE_MASK;
3620 l = (page + TARGET_PAGE_SIZE) - addr;
3621 if (l > len)
3622 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003623 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003624
Avi Kivityf3705d52012-03-08 16:16:34 +02003625 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003626 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003627 break;
3628 }
3629 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3630 bounce.addr = addr;
3631 bounce.len = l;
3632 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003633 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003634 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003635
3636 *plen = l;
3637 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003638 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003639 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003640 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003641 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003642 }
aliguori6d16c2f2009-01-22 16:59:11 +00003643
3644 len -= l;
3645 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003646 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003647 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003648 rlen = todo;
3649 ret = qemu_ram_ptr_length(raddr, &rlen);
3650 *plen = rlen;
3651 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003652}
3653
3654/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3655 * Will also mark the memory as dirty if is_write == 1. access_len gives
3656 * the amount of memory that was actually read or written by the caller.
3657 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003658void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3659 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003660{
3661 if (buffer != bounce.buffer) {
3662 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003663 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003664 while (access_len) {
3665 unsigned l;
3666 l = TARGET_PAGE_SIZE;
3667 if (l > access_len)
3668 l = access_len;
3669 if (!cpu_physical_memory_is_dirty(addr1)) {
3670 /* invalidate code */
3671 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3672 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003673 cpu_physical_memory_set_dirty_flags(
3674 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003675 }
3676 addr1 += l;
3677 access_len -= l;
3678 }
3679 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003680 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003681 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003682 }
aliguori6d16c2f2009-01-22 16:59:11 +00003683 return;
3684 }
3685 if (is_write) {
3686 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3687 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003688 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003689 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003690 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003691}
bellardd0ecd2a2006-04-23 17:14:48 +00003692
bellard8df1cd02005-01-28 22:37:22 +00003693/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003694static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3695 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003696{
bellard8df1cd02005-01-28 22:37:22 +00003697 uint8_t *ptr;
3698 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003699 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003700
Avi Kivity06ef3522012-02-13 16:11:22 +02003701 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003702
Blue Swirlcc5bea62012-04-14 14:56:48 +00003703 if (!(memory_region_is_ram(section->mr) ||
3704 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003705 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003706 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003707 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003708#if defined(TARGET_WORDS_BIGENDIAN)
3709 if (endian == DEVICE_LITTLE_ENDIAN) {
3710 val = bswap32(val);
3711 }
3712#else
3713 if (endian == DEVICE_BIG_ENDIAN) {
3714 val = bswap32(val);
3715 }
3716#endif
bellard8df1cd02005-01-28 22:37:22 +00003717 } else {
3718 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003719 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003720 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003721 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003722 switch (endian) {
3723 case DEVICE_LITTLE_ENDIAN:
3724 val = ldl_le_p(ptr);
3725 break;
3726 case DEVICE_BIG_ENDIAN:
3727 val = ldl_be_p(ptr);
3728 break;
3729 default:
3730 val = ldl_p(ptr);
3731 break;
3732 }
bellard8df1cd02005-01-28 22:37:22 +00003733 }
3734 return val;
3735}
3736
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003737uint32_t ldl_phys(target_phys_addr_t addr)
3738{
3739 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3740}
3741
3742uint32_t ldl_le_phys(target_phys_addr_t addr)
3743{
3744 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3745}
3746
3747uint32_t ldl_be_phys(target_phys_addr_t addr)
3748{
3749 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3750}
3751
bellard84b7b8e2005-11-28 21:19:04 +00003752/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003753static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3754 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003755{
bellard84b7b8e2005-11-28 21:19:04 +00003756 uint8_t *ptr;
3757 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003758 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003759
Avi Kivity06ef3522012-02-13 16:11:22 +02003760 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003761
Blue Swirlcc5bea62012-04-14 14:56:48 +00003762 if (!(memory_region_is_ram(section->mr) ||
3763 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003764 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003765 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003766
3767 /* XXX This is broken when device endian != cpu endian.
3768 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003769#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003770 val = io_mem_read(section->mr, addr, 4) << 32;
3771 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003772#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003773 val = io_mem_read(section->mr, addr, 4);
3774 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003775#endif
3776 } else {
3777 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003778 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003779 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003780 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003781 switch (endian) {
3782 case DEVICE_LITTLE_ENDIAN:
3783 val = ldq_le_p(ptr);
3784 break;
3785 case DEVICE_BIG_ENDIAN:
3786 val = ldq_be_p(ptr);
3787 break;
3788 default:
3789 val = ldq_p(ptr);
3790 break;
3791 }
bellard84b7b8e2005-11-28 21:19:04 +00003792 }
3793 return val;
3794}
3795
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003796uint64_t ldq_phys(target_phys_addr_t addr)
3797{
3798 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3799}
3800
3801uint64_t ldq_le_phys(target_phys_addr_t addr)
3802{
3803 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3804}
3805
3806uint64_t ldq_be_phys(target_phys_addr_t addr)
3807{
3808 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3809}
3810
bellardaab33092005-10-30 20:48:42 +00003811/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003812uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003813{
3814 uint8_t val;
3815 cpu_physical_memory_read(addr, &val, 1);
3816 return val;
3817}
3818
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003819/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003820static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3821 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003822{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003823 uint8_t *ptr;
3824 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003825 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003826
Avi Kivity06ef3522012-02-13 16:11:22 +02003827 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003828
Blue Swirlcc5bea62012-04-14 14:56:48 +00003829 if (!(memory_region_is_ram(section->mr) ||
3830 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003831 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003832 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003833 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003834#if defined(TARGET_WORDS_BIGENDIAN)
3835 if (endian == DEVICE_LITTLE_ENDIAN) {
3836 val = bswap16(val);
3837 }
3838#else
3839 if (endian == DEVICE_BIG_ENDIAN) {
3840 val = bswap16(val);
3841 }
3842#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003843 } else {
3844 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003845 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003846 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003847 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003848 switch (endian) {
3849 case DEVICE_LITTLE_ENDIAN:
3850 val = lduw_le_p(ptr);
3851 break;
3852 case DEVICE_BIG_ENDIAN:
3853 val = lduw_be_p(ptr);
3854 break;
3855 default:
3856 val = lduw_p(ptr);
3857 break;
3858 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003859 }
3860 return val;
bellardaab33092005-10-30 20:48:42 +00003861}
3862
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003863uint32_t lduw_phys(target_phys_addr_t addr)
3864{
3865 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3866}
3867
3868uint32_t lduw_le_phys(target_phys_addr_t addr)
3869{
3870 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3871}
3872
3873uint32_t lduw_be_phys(target_phys_addr_t addr)
3874{
3875 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3876}
3877
bellard8df1cd02005-01-28 22:37:22 +00003878/* warning: addr must be aligned. The ram page is not masked as dirty
3879 and the code inside is not invalidated. It is useful if the dirty
3880 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003881void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003882{
bellard8df1cd02005-01-28 22:37:22 +00003883 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003884 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003885
Avi Kivity06ef3522012-02-13 16:11:22 +02003886 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003887
Avi Kivityf3705d52012-03-08 16:16:34 +02003888 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003889 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003890 if (memory_region_is_ram(section->mr)) {
3891 section = &phys_sections[phys_section_rom];
3892 }
3893 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003894 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003895 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003896 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003897 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003898 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003899 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003900
3901 if (unlikely(in_migration)) {
3902 if (!cpu_physical_memory_is_dirty(addr1)) {
3903 /* invalidate code */
3904 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3905 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003906 cpu_physical_memory_set_dirty_flags(
3907 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003908 }
3909 }
bellard8df1cd02005-01-28 22:37:22 +00003910 }
3911}
3912
Anthony Liguoric227f092009-10-01 16:12:16 -05003913void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003914{
j_mayerbc98a7e2007-04-04 07:55:12 +00003915 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003916 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003917
Avi Kivity06ef3522012-02-13 16:11:22 +02003918 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003919
Avi Kivityf3705d52012-03-08 16:16:34 +02003920 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003921 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003922 if (memory_region_is_ram(section->mr)) {
3923 section = &phys_sections[phys_section_rom];
3924 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003925#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003926 io_mem_write(section->mr, addr, val >> 32, 4);
3927 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003928#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003929 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3930 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003931#endif
3932 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003933 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003934 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003935 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003936 stq_p(ptr, val);
3937 }
3938}
3939
bellard8df1cd02005-01-28 22:37:22 +00003940/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003941static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3942 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003943{
bellard8df1cd02005-01-28 22:37:22 +00003944 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003945 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003946
Avi Kivity06ef3522012-02-13 16:11:22 +02003947 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003948
Avi Kivityf3705d52012-03-08 16:16:34 +02003949 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003950 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003951 if (memory_region_is_ram(section->mr)) {
3952 section = &phys_sections[phys_section_rom];
3953 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003954#if defined(TARGET_WORDS_BIGENDIAN)
3955 if (endian == DEVICE_LITTLE_ENDIAN) {
3956 val = bswap32(val);
3957 }
3958#else
3959 if (endian == DEVICE_BIG_ENDIAN) {
3960 val = bswap32(val);
3961 }
3962#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003963 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003964 } else {
3965 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003966 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003967 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003968 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003969 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003970 switch (endian) {
3971 case DEVICE_LITTLE_ENDIAN:
3972 stl_le_p(ptr, val);
3973 break;
3974 case DEVICE_BIG_ENDIAN:
3975 stl_be_p(ptr, val);
3976 break;
3977 default:
3978 stl_p(ptr, val);
3979 break;
3980 }
bellard3a7d9292005-08-21 09:26:42 +00003981 if (!cpu_physical_memory_is_dirty(addr1)) {
3982 /* invalidate code */
3983 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3984 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003985 cpu_physical_memory_set_dirty_flags(addr1,
3986 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003987 }
bellard8df1cd02005-01-28 22:37:22 +00003988 }
3989}
3990
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003991void stl_phys(target_phys_addr_t addr, uint32_t val)
3992{
3993 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3994}
3995
3996void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3997{
3998 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3999}
4000
4001void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4002{
4003 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4004}
4005
bellardaab33092005-10-30 20:48:42 +00004006/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004007void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004008{
4009 uint8_t v = val;
4010 cpu_physical_memory_write(addr, &v, 1);
4011}
4012
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004013/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004014static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4015 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004016{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004017 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02004018 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004019
Avi Kivity06ef3522012-02-13 16:11:22 +02004020 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004021
Avi Kivityf3705d52012-03-08 16:16:34 +02004022 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00004023 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02004024 if (memory_region_is_ram(section->mr)) {
4025 section = &phys_sections[phys_section_rom];
4026 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004027#if defined(TARGET_WORDS_BIGENDIAN)
4028 if (endian == DEVICE_LITTLE_ENDIAN) {
4029 val = bswap16(val);
4030 }
4031#else
4032 if (endian == DEVICE_BIG_ENDIAN) {
4033 val = bswap16(val);
4034 }
4035#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004036 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004037 } else {
4038 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004039 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004040 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004041 /* RAM case */
4042 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004043 switch (endian) {
4044 case DEVICE_LITTLE_ENDIAN:
4045 stw_le_p(ptr, val);
4046 break;
4047 case DEVICE_BIG_ENDIAN:
4048 stw_be_p(ptr, val);
4049 break;
4050 default:
4051 stw_p(ptr, val);
4052 break;
4053 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004054 if (!cpu_physical_memory_is_dirty(addr1)) {
4055 /* invalidate code */
4056 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4057 /* set dirty bit */
4058 cpu_physical_memory_set_dirty_flags(addr1,
4059 (0xff & ~CODE_DIRTY_FLAG));
4060 }
4061 }
bellardaab33092005-10-30 20:48:42 +00004062}
4063
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004064void stw_phys(target_phys_addr_t addr, uint32_t val)
4065{
4066 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4067}
4068
4069void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4070{
4071 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4072}
4073
4074void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4075{
4076 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4077}
4078
bellardaab33092005-10-30 20:48:42 +00004079/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004080void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004081{
4082 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004083 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004084}
4085
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004086void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4087{
4088 val = cpu_to_le64(val);
4089 cpu_physical_memory_write(addr, &val, 8);
4090}
4091
4092void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4093{
4094 val = cpu_to_be64(val);
4095 cpu_physical_memory_write(addr, &val, 8);
4096}
4097
aliguori5e2972f2009-03-28 17:51:36 +00004098/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004099int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004100 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004101{
4102 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004103 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004104 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004105
4106 while (len > 0) {
4107 page = addr & TARGET_PAGE_MASK;
4108 phys_addr = cpu_get_phys_page_debug(env, page);
4109 /* if no physical page mapped, return an error */
4110 if (phys_addr == -1)
4111 return -1;
4112 l = (page + TARGET_PAGE_SIZE) - addr;
4113 if (l > len)
4114 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004115 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004116 if (is_write)
4117 cpu_physical_memory_write_rom(phys_addr, buf, l);
4118 else
aliguori5e2972f2009-03-28 17:51:36 +00004119 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004120 len -= l;
4121 buf += l;
4122 addr += l;
4123 }
4124 return 0;
4125}
Paul Brooka68fe892010-03-01 00:08:59 +00004126#endif
bellard13eb76e2004-01-24 15:23:36 +00004127
pbrook2e70f6e2008-06-29 01:03:05 +00004128/* in deterministic execution mode, instructions doing device I/Os
4129 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004130void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004131{
4132 TranslationBlock *tb;
4133 uint32_t n, cflags;
4134 target_ulong pc, cs_base;
4135 uint64_t flags;
4136
Blue Swirl20503962012-04-09 14:20:20 +00004137 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004138 if (!tb) {
4139 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004140 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004141 }
4142 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004143 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004144 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004145 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004146 n = n - env->icount_decr.u16.low;
4147 /* Generate a new TB ending on the I/O insn. */
4148 n++;
4149 /* On MIPS and SH, delay slot instructions can only be restarted if
4150 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004151 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004152 branch. */
4153#if defined(TARGET_MIPS)
4154 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4155 env->active_tc.PC -= 4;
4156 env->icount_decr.u16.low++;
4157 env->hflags &= ~MIPS_HFLAG_BMASK;
4158 }
4159#elif defined(TARGET_SH4)
4160 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4161 && n > 1) {
4162 env->pc -= 2;
4163 env->icount_decr.u16.low++;
4164 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4165 }
4166#endif
4167 /* This should never happen. */
4168 if (n > CF_COUNT_MASK)
4169 cpu_abort(env, "TB too big during recompile");
4170
4171 cflags = n | CF_LAST_IO;
4172 pc = tb->pc;
4173 cs_base = tb->cs_base;
4174 flags = tb->flags;
4175 tb_phys_invalidate(tb, -1);
4176 /* FIXME: In theory this could raise an exception. In practice
4177 we have already translated the block once so it's probably ok. */
4178 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004179 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004180 the first in the TB) then we end up generating a whole new TB and
4181 repeating the fault, which is horribly inefficient.
4182 Better would be to execute just this insn uncached, or generate a
4183 second new TB. */
4184 cpu_resume_from_signal(env, NULL);
4185}
4186
Paul Brookb3755a92010-03-12 16:54:58 +00004187#if !defined(CONFIG_USER_ONLY)
4188
Stefan Weil055403b2010-10-22 23:03:32 +02004189void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004190{
4191 int i, target_code_size, max_target_code_size;
4192 int direct_jmp_count, direct_jmp2_count, cross_page;
4193 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004194
bellarde3db7222005-01-26 22:00:47 +00004195 target_code_size = 0;
4196 max_target_code_size = 0;
4197 cross_page = 0;
4198 direct_jmp_count = 0;
4199 direct_jmp2_count = 0;
4200 for(i = 0; i < nb_tbs; i++) {
4201 tb = &tbs[i];
4202 target_code_size += tb->size;
4203 if (tb->size > max_target_code_size)
4204 max_target_code_size = tb->size;
4205 if (tb->page_addr[1] != -1)
4206 cross_page++;
4207 if (tb->tb_next_offset[0] != 0xffff) {
4208 direct_jmp_count++;
4209 if (tb->tb_next_offset[1] != 0xffff) {
4210 direct_jmp2_count++;
4211 }
4212 }
4213 }
4214 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004215 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004216 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004217 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4218 cpu_fprintf(f, "TB count %d/%d\n",
4219 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004220 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004221 nb_tbs ? target_code_size / nb_tbs : 0,
4222 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004223 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004224 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4225 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004226 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4227 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004228 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4229 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004230 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004231 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4232 direct_jmp2_count,
4233 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004234 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004235 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4236 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4237 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004238 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004239}
4240
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004241/*
4242 * A helper function for the _utterly broken_ virtio device model to find out if
4243 * it's running on a big endian machine. Don't do this at home kids!
4244 */
4245bool virtio_is_big_endian(void);
4246bool virtio_is_big_endian(void)
4247{
4248#if defined(TARGET_WORDS_BIGENDIAN)
4249 return true;
4250#else
4251 return false;
4252#endif
4253}
4254
bellard61382a52003-10-27 21:22:23 +00004255#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004256
4257#ifndef CONFIG_USER_ONLY
4258bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4259{
4260 MemoryRegionSection *section;
4261
4262 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4263
4264 return !(memory_region_is_ram(section->mr) ||
4265 memory_region_is_romd(section->mr));
4266}
4267#endif