blob: 8435de0bd2f9248e9050db27ff7fbd78eb4351f8 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
Peter Maydell9e119082012-10-29 11:34:32 +100037#include "dma.h"
Avi Kivity62152b82011-07-26 14:26:14 +030038#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000039#if defined(CONFIG_USER_ONLY)
40#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010041#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
42#include <sys/param.h>
43#if __FreeBSD_version >= 700104
44#define HAVE_KINFO_GETVMMAP
45#define sigqueue sigqueue_freebsd /* avoid redefinition */
46#include <sys/time.h>
47#include <sys/proc.h>
48#include <machine/profile.h>
49#define _KERNEL
50#include <sys/user.h>
51#undef _KERNEL
52#undef sigqueue
53#include <libutil.h>
54#endif
55#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010056#else /* !CONFIG_USER_ONLY */
57#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010058#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000059#endif
bellard54936002003-05-13 00:25:15 +000060
Blue Swirl0cac1b62012-04-09 16:50:52 +000061#include "cputlb.h"
62
Avi Kivity7762c2c2012-09-20 16:02:51 +030063#include "memory-internal.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020064
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
Richard Henderson4438c8a2012-10-16 17:30:13 +100089uint8_t *code_gen_prologue;
blueswir1bdaf78e2008-10-04 07:24:27 +000090static uint8_t *code_gen_buffer;
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100091static size_t code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +000092/* threshold to flush the translated code buffer */
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +100093static size_t code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +020094static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +000095
pbrooke2eef172008-06-08 01:09:01 +000096#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +000097int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +000098static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +000099
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200100RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300101
102static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300103static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300104
Avi Kivityf6790af2012-10-02 20:13:51 +0200105AddressSpace address_space_io;
106AddressSpace address_space_memory;
Peter Maydell9e119082012-10-29 11:34:32 +1000107DMAContext dma_context_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +0200108
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200109MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200110static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200111
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
Andreas Färber9349b4f2012-03-14 01:38:32 +0100114CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100117DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000122
bellard54936002003-05-13 00:25:15 +0000123typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000124 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000125 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000126 /* in order to optimize self modifying code, we count the number
127 of lookups we do to a given page to use a bitmap */
128 unsigned int code_write_count;
129 uint8_t *code_bitmap;
130#if defined(CONFIG_USER_ONLY)
131 unsigned long flags;
132#endif
bellard54936002003-05-13 00:25:15 +0000133} PageDesc;
134
Paul Brook41c1b1c2010-03-12 16:54:58 +0000135/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800136 while in user mode we want it to be based on virtual addresses. */
137#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
139# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
140#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800141# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000142#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000145#endif
bellard54936002003-05-13 00:25:15 +0000146
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147/* Size of the L2 (and L3, etc) page tables. */
148#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000149#define L2_SIZE (1 << L2_BITS)
150
Avi Kivity3eef53d2012-02-10 14:57:31 +0200151#define P_L2_LEVELS \
152 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155#define V_L1_BITS_REM \
156 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158#if V_L1_BITS_REM < 4
159#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
160#else
161#define V_L1_BITS V_L1_BITS_REM
162#endif
163
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800164#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
165
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
167
Stefan Weilc6d50672012-03-16 20:23:49 +0100168uintptr_t qemu_real_host_page_size;
169uintptr_t qemu_host_page_size;
170uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000171
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800172/* This is a multi-level map on the virtual address space.
173 The bottom level has pointers to PageDesc. */
174static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000175
pbrooke2eef172008-06-08 01:09:01 +0000176#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200177
Avi Kivity5312bd82012-02-12 18:32:55 +0200178static MemoryRegionSection *phys_sections;
179static unsigned phys_sections_nb, phys_sections_nb_alloc;
180static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200181static uint16_t phys_section_notdirty;
182static uint16_t phys_section_rom;
183static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200184
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200185/* Simple allocator for PhysPageEntry nodes */
186static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
187static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
188
Avi Kivity07f07b32012-02-13 20:45:32 +0200189#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200190
pbrooke2eef172008-06-08 01:09:01 +0000191static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300192static void memory_map_init(void);
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000193static void *qemu_safe_ram_ptr(ram_addr_t addr);
pbrooke2eef172008-06-08 01:09:01 +0000194
Avi Kivity1ec9b902012-01-02 12:47:48 +0200195static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000196#endif
Blue Swirl8b9c99d2012-10-28 11:04:51 +0000197static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
198 tb_page_addr_t phys_page2);
bellard33417e72003-08-10 21:47:01 +0000199
bellarde3db7222005-01-26 22:00:47 +0000200/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000201static int tb_flush_count;
202static int tb_phys_invalidate_count;
203
bellard7cb69ca2008-05-10 10:55:51 +0000204#ifdef _WIN32
Richard Henderson4438c8a2012-10-16 17:30:13 +1000205static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000206{
207 DWORD old_protect;
208 VirtualProtect(addr, size,
209 PAGE_EXECUTE_READWRITE, &old_protect);
210
211}
212#else
Richard Henderson4438c8a2012-10-16 17:30:13 +1000213static inline void map_exec(void *addr, long size)
bellard7cb69ca2008-05-10 10:55:51 +0000214{
bellard43694152008-05-29 09:35:57 +0000215 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000216
bellard43694152008-05-29 09:35:57 +0000217 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000218 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000219 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000220
221 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000222 end += page_size - 1;
223 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000224
225 mprotect((void *)start, end - start,
226 PROT_READ | PROT_WRITE | PROT_EXEC);
227}
228#endif
229
bellardb346ff42003-06-15 20:05:50 +0000230static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000231{
bellard83fb7ad2004-07-05 21:25:26 +0000232 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000233 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000234#ifdef _WIN32
235 {
236 SYSTEM_INFO system_info;
237
238 GetSystemInfo(&system_info);
239 qemu_real_host_page_size = system_info.dwPageSize;
240 }
241#else
242 qemu_real_host_page_size = getpagesize();
243#endif
bellard83fb7ad2004-07-05 21:25:26 +0000244 if (qemu_host_page_size == 0)
245 qemu_host_page_size = qemu_real_host_page_size;
246 if (qemu_host_page_size < TARGET_PAGE_SIZE)
247 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000248 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000249
Paul Brook2e9a5712010-05-05 16:32:59 +0100250#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000251 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100252#ifdef HAVE_KINFO_GETVMMAP
253 struct kinfo_vmentry *freep;
254 int i, cnt;
255
256 freep = kinfo_getvmmap(getpid(), &cnt);
257 if (freep) {
258 mmap_lock();
259 for (i = 0; i < cnt; i++) {
260 unsigned long startaddr, endaddr;
261
262 startaddr = freep[i].kve_start;
263 endaddr = freep[i].kve_end;
264 if (h2g_valid(startaddr)) {
265 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
266
267 if (h2g_valid(endaddr)) {
268 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200269 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100270 } else {
271#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
272 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200273 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100274#endif
275 }
276 }
277 }
278 free(freep);
279 mmap_unlock();
280 }
281#else
balrog50a95692007-12-12 01:16:23 +0000282 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000283
pbrook07765902008-05-31 16:33:53 +0000284 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800285
Aurelien Jarnofd436902010-04-10 17:20:36 +0200286 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000287 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800288 mmap_lock();
289
balrog50a95692007-12-12 01:16:23 +0000290 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800291 unsigned long startaddr, endaddr;
292 int n;
293
294 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
295
296 if (n == 2 && h2g_valid(startaddr)) {
297 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
298
299 if (h2g_valid(endaddr)) {
300 endaddr = h2g(endaddr);
301 } else {
302 endaddr = ~0ul;
303 }
304 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000305 }
306 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307
balrog50a95692007-12-12 01:16:23 +0000308 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800309 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000310 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100311#endif
balrog50a95692007-12-12 01:16:23 +0000312 }
313#endif
bellard54936002003-05-13 00:25:15 +0000314}
315
Paul Brook41c1b1c2010-03-12 16:54:58 +0000316static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000317{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000318 PageDesc *pd;
319 void **lp;
320 int i;
321
pbrook17e23772008-06-09 13:47:45 +0000322#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500323 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800324# define ALLOC(P, SIZE) \
325 do { \
326 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
327 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000329#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800330# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500331 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000332#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800333
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 /* Level 1. Always allocated. */
335 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
336
337 /* Level 2..N-1. */
338 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
339 void **p = *lp;
340
341 if (p == NULL) {
342 if (!alloc) {
343 return NULL;
344 }
345 ALLOC(p, sizeof(void *) * L2_SIZE);
346 *lp = p;
347 }
348
349 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000350 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800351
352 pd = *lp;
353 if (pd == NULL) {
354 if (!alloc) {
355 return NULL;
356 }
357 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
358 *lp = pd;
359 }
360
361#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800362
363 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000364}
365
Paul Brook41c1b1c2010-03-12 16:54:58 +0000366static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000367{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800368 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000369}
370
Paul Brook6d9a1302010-02-28 23:55:53 +0000371#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200372
Avi Kivityf7bf5462012-02-13 20:12:05 +0200373static void phys_map_node_reserve(unsigned nodes)
374{
375 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
376 typedef PhysPageEntry Node[L2_SIZE];
377 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
378 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
379 phys_map_nodes_nb + nodes);
380 phys_map_nodes = g_renew(Node, phys_map_nodes,
381 phys_map_nodes_nb_alloc);
382 }
383}
384
385static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200386{
387 unsigned i;
388 uint16_t ret;
389
Avi Kivityf7bf5462012-02-13 20:12:05 +0200390 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200393 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200394 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200395 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200396 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200397 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200398}
399
400static void phys_map_nodes_reset(void)
401{
402 phys_map_nodes_nb = 0;
403}
404
Avi Kivityf7bf5462012-02-13 20:12:05 +0200405
Avi Kivitya8170e52012-10-23 12:30:10 +0200406static void phys_page_set_level(PhysPageEntry *lp, hwaddr *index,
407 hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200408 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409{
410 PhysPageEntry *p;
411 int i;
Avi Kivitya8170e52012-10-23 12:30:10 +0200412 hwaddr step = (hwaddr)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200413
Avi Kivity07f07b32012-02-13 20:45:32 +0200414 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200415 lp->ptr = phys_map_node_alloc();
416 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200417 if (level == 0) {
418 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200419 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200420 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200421 }
422 }
423 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200424 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200425 }
Avi Kivity29990972012-02-13 20:21:20 +0200426 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200427
Avi Kivity29990972012-02-13 20:21:20 +0200428 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200429 if ((*index & (step - 1)) == 0 && *nb >= step) {
430 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200431 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200432 *index += step;
433 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200434 } else {
435 phys_page_set_level(lp, index, nb, leaf, level - 1);
436 }
437 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200438 }
439}
440
Avi Kivityac1970f2012-10-03 16:22:53 +0200441static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200442 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200443 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000444{
Avi Kivity29990972012-02-13 20:21:20 +0200445 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200446 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000447
Avi Kivityac1970f2012-10-03 16:22:53 +0200448 phys_page_set_level(&d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000449}
450
Avi Kivitya8170e52012-10-23 12:30:10 +0200451MemoryRegionSection *phys_page_find(AddressSpaceDispatch *d, hwaddr index)
bellard92e873b2004-05-21 14:52:29 +0000452{
Avi Kivityac1970f2012-10-03 16:22:53 +0200453 PhysPageEntry lp = d->phys_map;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200454 PhysPageEntry *p;
455 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200456 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200457
Avi Kivity07f07b32012-02-13 20:45:32 +0200458 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200459 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200460 goto not_found;
461 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200462 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200463 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200464 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200465
Avi Kivityc19e8802012-02-13 20:25:31 +0200466 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200467not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200468 return &phys_sections[s_index];
469}
470
Blue Swirle5548612012-04-21 13:08:33 +0000471bool memory_region_is_unassigned(MemoryRegion *mr)
472{
473 return mr != &io_mem_ram && mr != &io_mem_rom
474 && mr != &io_mem_notdirty && !mr->rom_device
475 && mr != &io_mem_watch;
476}
477
pbrookc8a706f2008-06-02 16:16:42 +0000478#define mmap_lock() do { } while(0)
479#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000480#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000481
bellard43694152008-05-29 09:35:57 +0000482#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100483/* Currently it is not recommended to allocate big chunks of data in
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000484 user mode. It will change when a dedicated libc will be used. */
485/* ??? 64-bit hosts ought to have no problem mmaping data outside the
486 region in which the guest needs to run. Revisit this. */
bellard43694152008-05-29 09:35:57 +0000487#define USE_STATIC_CODE_GEN_BUFFER
488#endif
489
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000490/* ??? Should configure for this, not list operating systems here. */
491#if (defined(__linux__) \
492 || defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
493 || defined(__DragonFly__) || defined(__OpenBSD__) \
494 || defined(__NetBSD__))
495# define USE_MMAP
496#endif
497
Richard Henderson74d590c2012-10-16 17:30:14 +1000498/* Minimum size of the code gen buffer. This number is randomly chosen,
499 but not so small that we can't have a fair number of TB's live. */
500#define MIN_CODE_GEN_BUFFER_SIZE (1024u * 1024)
501
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000502/* Maximum size of the code gen buffer we'd like to use. Unless otherwise
503 indicated, this is constrained by the range of direct branches on the
504 host cpu, as used by the TCG implementation of goto_tb. */
505#if defined(__x86_64__)
506# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
507#elif defined(__sparc__)
508# define MAX_CODE_GEN_BUFFER_SIZE (2ul * 1024 * 1024 * 1024)
509#elif defined(__arm__)
510# define MAX_CODE_GEN_BUFFER_SIZE (16u * 1024 * 1024)
511#elif defined(__s390x__)
512 /* We have a +- 4GB range on the branches; leave some slop. */
513# define MAX_CODE_GEN_BUFFER_SIZE (3ul * 1024 * 1024 * 1024)
514#else
515# define MAX_CODE_GEN_BUFFER_SIZE ((size_t)-1)
516#endif
517
Richard Henderson3d85a722012-10-16 17:30:11 +1000518#define DEFAULT_CODE_GEN_BUFFER_SIZE_1 (32u * 1024 * 1024)
519
520#define DEFAULT_CODE_GEN_BUFFER_SIZE \
521 (DEFAULT_CODE_GEN_BUFFER_SIZE_1 < MAX_CODE_GEN_BUFFER_SIZE \
522 ? DEFAULT_CODE_GEN_BUFFER_SIZE_1 : MAX_CODE_GEN_BUFFER_SIZE)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000523
524static inline size_t size_code_gen_buffer(size_t tb_size)
525{
526 /* Size the buffer. */
527 if (tb_size == 0) {
528#ifdef USE_STATIC_CODE_GEN_BUFFER
529 tb_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
530#else
531 /* ??? Needs adjustments. */
532 /* ??? If we relax the requirement that CONFIG_USER_ONLY use the
533 static buffer, we could size this on RESERVED_VA, on the text
534 segment size of the executable, or continue to use the default. */
535 tb_size = (unsigned long)(ram_size / 4);
536#endif
537 }
538 if (tb_size < MIN_CODE_GEN_BUFFER_SIZE) {
539 tb_size = MIN_CODE_GEN_BUFFER_SIZE;
540 }
541 if (tb_size > MAX_CODE_GEN_BUFFER_SIZE) {
542 tb_size = MAX_CODE_GEN_BUFFER_SIZE;
543 }
544 code_gen_buffer_size = tb_size;
545 return tb_size;
546}
547
bellard43694152008-05-29 09:35:57 +0000548#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200549static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000550 __attribute__((aligned(CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000551
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000552static inline void *alloc_code_gen_buffer(void)
bellard26a5f132008-05-28 12:30:31 +0000553{
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000554 map_exec(static_code_gen_buffer, code_gen_buffer_size);
555 return static_code_gen_buffer;
556}
557#elif defined(USE_MMAP)
558static inline void *alloc_code_gen_buffer(void)
559{
560 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
561 uintptr_t start = 0;
562 void *buf;
blueswir1141ac462008-07-26 15:05:57 +0000563
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000564 /* Constrain the position of the buffer based on the host cpu.
565 Note that these addresses are chosen in concert with the
566 addresses assigned in the relevant linker script file. */
Richard Henderson405def12012-10-16 17:30:12 +1000567# if defined(__PIE__) || defined(__PIC__)
568 /* Don't bother setting a preferred location if we're building
569 a position-independent executable. We're more likely to get
570 an address near the main executable if we let the kernel
571 choose the address. */
572# elif defined(__x86_64__) && defined(MAP_32BIT)
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000573 /* Force the memory down into low memory with the executable.
574 Leave the choice of exact location with the kernel. */
575 flags |= MAP_32BIT;
576 /* Cannot expect to map more than 800MB in low memory. */
577 if (code_gen_buffer_size > 800u * 1024 * 1024) {
578 code_gen_buffer_size = 800u * 1024 * 1024;
bellard26a5f132008-05-28 12:30:31 +0000579 }
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000580# elif defined(__sparc__)
581 start = 0x40000000ul;
582# elif defined(__s390x__)
583 start = 0x90000000ul;
584# endif
585
586 buf = mmap((void *)start, code_gen_buffer_size,
587 PROT_WRITE | PROT_READ | PROT_EXEC, flags, -1, 0);
588 return buf == MAP_FAILED ? NULL : buf;
589}
bellard26a5f132008-05-28 12:30:31 +0000590#else
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +1000591static inline void *alloc_code_gen_buffer(void)
592{
593 void *buf = g_malloc(code_gen_buffer_size);
594 if (buf) {
595 map_exec(buf, code_gen_buffer_size);
596 }
597 return buf;
598}
599#endif /* USE_STATIC_CODE_GEN_BUFFER, USE_MMAP */
600
601static inline void code_gen_alloc(size_t tb_size)
602{
603 code_gen_buffer_size = size_code_gen_buffer(tb_size);
604 code_gen_buffer = alloc_code_gen_buffer();
605 if (code_gen_buffer == NULL) {
606 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
607 exit(1);
608 }
609
Richard Henderson4438c8a2012-10-16 17:30:13 +1000610 /* Steal room for the prologue at the end of the buffer. This ensures
611 (via the MAX_CODE_GEN_BUFFER_SIZE limits above) that direct branches
612 from TB's to the prologue are going to be in range. It also means
613 that we don't need to mark (additional) portions of the data segment
614 as executable. */
615 code_gen_prologue = code_gen_buffer + code_gen_buffer_size - 1024;
616 code_gen_buffer_size -= 1024;
617
Peter Maydella884da82011-06-22 11:58:25 +0100618 code_gen_buffer_max_size = code_gen_buffer_size -
619 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000620 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500621 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000622}
623
624/* Must be called before using the QEMU cpus. 'tb_size' is the size
625 (in bytes) allocated to the translation buffer. Zero means default
626 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200627void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000628{
bellard26a5f132008-05-28 12:30:31 +0000629 cpu_gen_init();
630 code_gen_alloc(tb_size);
631 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700632 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000633 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700634#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
635 /* There's no guest base to take into account, so go ahead and
636 initialize the prologue now. */
637 tcg_prologue_init(&tcg_ctx);
638#endif
bellard26a5f132008-05-28 12:30:31 +0000639}
640
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200641bool tcg_enabled(void)
642{
643 return code_gen_buffer != NULL;
644}
645
646void cpu_exec_init_all(void)
647{
648#if !defined(CONFIG_USER_ONLY)
649 memory_map_init();
650 io_mem_init();
651#endif
652}
653
pbrook9656f322008-07-01 20:01:19 +0000654#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
655
Juan Quintelae59fb372009-09-29 22:48:21 +0200656static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200657{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100658 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200659
aurel323098dba2009-03-07 21:28:24 +0000660 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
661 version_id is increased. */
662 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000663 tlb_flush(env, 1);
664
665 return 0;
666}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200667
668static const VMStateDescription vmstate_cpu_common = {
669 .name = "cpu_common",
670 .version_id = 1,
671 .minimum_version_id = 1,
672 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200673 .post_load = cpu_common_post_load,
674 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100675 VMSTATE_UINT32(halted, CPUArchState),
676 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200677 VMSTATE_END_OF_LIST()
678 }
679};
pbrook9656f322008-07-01 20:01:19 +0000680#endif
681
Andreas Färber9349b4f2012-03-14 01:38:32 +0100682CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400683{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400685
686 while (env) {
687 if (env->cpu_index == cpu)
688 break;
689 env = env->next_cpu;
690 }
691
692 return env;
693}
694
Andreas Färber9349b4f2012-03-14 01:38:32 +0100695void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000696{
Andreas Färber9f09e182012-05-03 06:59:07 +0200697#ifndef CONFIG_USER_ONLY
698 CPUState *cpu = ENV_GET_CPU(env);
699#endif
Andreas Färber9349b4f2012-03-14 01:38:32 +0100700 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000701 int cpu_index;
702
pbrookc2764712009-03-07 15:24:59 +0000703#if defined(CONFIG_USER_ONLY)
704 cpu_list_lock();
705#endif
bellard6a00d602005-11-21 23:25:50 +0000706 env->next_cpu = NULL;
707 penv = &first_cpu;
708 cpu_index = 0;
709 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700710 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000711 cpu_index++;
712 }
713 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000714 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000715 QTAILQ_INIT(&env->breakpoints);
716 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100717#ifndef CONFIG_USER_ONLY
Andreas Färber9f09e182012-05-03 06:59:07 +0200718 cpu->thread_id = qemu_get_thread_id();
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100719#endif
bellard6a00d602005-11-21 23:25:50 +0000720 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000721#if defined(CONFIG_USER_ONLY)
722 cpu_list_unlock();
723#endif
pbrookb3c77242008-06-30 16:31:04 +0000724#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600725 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
726 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000727 cpu_save, cpu_load, env);
728#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000729}
730
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100731/* Allocate a new translation block. Flush the translation buffer if
732 too many translation blocks or too much generated code. */
733static TranslationBlock *tb_alloc(target_ulong pc)
734{
735 TranslationBlock *tb;
736
737 if (nb_tbs >= code_gen_max_blocks ||
738 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
739 return NULL;
740 tb = &tbs[nb_tbs++];
741 tb->pc = pc;
742 tb->cflags = 0;
743 return tb;
744}
745
746void tb_free(TranslationBlock *tb)
747{
748 /* In practice this is mostly used for single use temporary TB
749 Ignore the hard cases and just back up if this TB happens to
750 be the last one generated. */
751 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
752 code_gen_ptr = tb->tc_ptr;
753 nb_tbs--;
754 }
755}
756
bellard9fa3e852004-01-04 18:06:42 +0000757static inline void invalidate_page_bitmap(PageDesc *p)
758{
759 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500760 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000761 p->code_bitmap = NULL;
762 }
763 p->code_write_count = 0;
764}
765
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800766/* Set to NULL all the 'first_tb' fields in all PageDescs. */
767
768static void page_flush_tb_1 (int level, void **lp)
769{
770 int i;
771
772 if (*lp == NULL) {
773 return;
774 }
775 if (level == 0) {
776 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000777 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800778 pd[i].first_tb = NULL;
779 invalidate_page_bitmap(pd + i);
780 }
781 } else {
782 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800784 page_flush_tb_1 (level - 1, pp + i);
785 }
786 }
787}
788
bellardfd6ce8f2003-05-14 19:00:11 +0000789static void page_flush_tb(void)
790{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800791 int i;
792 for (i = 0; i < V_L1_SIZE; i++) {
793 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000794 }
795}
796
797/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000798/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100799void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000800{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100801 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000802#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000803 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
804 (unsigned long)(code_gen_ptr - code_gen_buffer),
805 nb_tbs, nb_tbs > 0 ?
806 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000807#endif
bellard26a5f132008-05-28 12:30:31 +0000808 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000809 cpu_abort(env1, "Internal error: code buffer overflow\n");
810
bellardfd6ce8f2003-05-14 19:00:11 +0000811 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000812
bellard6a00d602005-11-21 23:25:50 +0000813 for(env = first_cpu; env != NULL; env = env->next_cpu) {
814 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
815 }
bellard9fa3e852004-01-04 18:06:42 +0000816
bellard8a8a6082004-10-03 13:36:49 +0000817 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000818 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000819
bellardfd6ce8f2003-05-14 19:00:11 +0000820 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000821 /* XXX: flush processor icache at this point if cache flush is
822 expensive */
bellarde3db7222005-01-26 22:00:47 +0000823 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000824}
825
826#ifdef DEBUG_TB_CHECK
827
j_mayerbc98a7e2007-04-04 07:55:12 +0000828static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000829{
830 TranslationBlock *tb;
831 int i;
832 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000833 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
834 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000835 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
836 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000837 printf("ERROR invalidate: address=" TARGET_FMT_lx
838 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000839 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000840 }
841 }
842 }
843}
844
845/* verify that all the pages have correct rights for code */
846static void tb_page_check(void)
847{
848 TranslationBlock *tb;
849 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000850
pbrook99773bd2006-04-16 15:14:59 +0000851 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
852 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000853 flags1 = page_get_flags(tb->pc);
854 flags2 = page_get_flags(tb->pc + tb->size - 1);
855 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
856 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000857 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000858 }
859 }
860 }
861}
862
863#endif
864
865/* invalidate one TB */
866static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
867 int next_offset)
868{
869 TranslationBlock *tb1;
870 for(;;) {
871 tb1 = *ptb;
872 if (tb1 == tb) {
873 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
874 break;
875 }
876 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
877 }
878}
879
bellard9fa3e852004-01-04 18:06:42 +0000880static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
881{
882 TranslationBlock *tb1;
883 unsigned int n1;
884
885 for(;;) {
886 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200887 n1 = (uintptr_t)tb1 & 3;
888 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000889 if (tb1 == tb) {
890 *ptb = tb1->page_next[n1];
891 break;
892 }
893 ptb = &tb1->page_next[n1];
894 }
895}
896
bellardd4e81642003-05-25 16:46:15 +0000897static inline void tb_jmp_remove(TranslationBlock *tb, int n)
898{
899 TranslationBlock *tb1, **ptb;
900 unsigned int n1;
901
902 ptb = &tb->jmp_next[n];
903 tb1 = *ptb;
904 if (tb1) {
905 /* find tb(n) in circular list */
906 for(;;) {
907 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200908 n1 = (uintptr_t)tb1 & 3;
909 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000910 if (n1 == n && tb1 == tb)
911 break;
912 if (n1 == 2) {
913 ptb = &tb1->jmp_first;
914 } else {
915 ptb = &tb1->jmp_next[n1];
916 }
917 }
918 /* now we can suppress tb(n) from the list */
919 *ptb = tb->jmp_next[n];
920
921 tb->jmp_next[n] = NULL;
922 }
923}
924
925/* reset the jump entry 'n' of a TB so that it is not chained to
926 another TB */
927static inline void tb_reset_jump(TranslationBlock *tb, int n)
928{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200929 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000930}
931
Paul Brook41c1b1c2010-03-12 16:54:58 +0000932void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000933{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100934 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000935 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000936 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000937 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000938 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000939
bellard9fa3e852004-01-04 18:06:42 +0000940 /* remove the TB from the hash list */
941 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
942 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000943 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000944 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000945
bellard9fa3e852004-01-04 18:06:42 +0000946 /* remove the TB from the page list */
947 if (tb->page_addr[0] != page_addr) {
948 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
949 tb_page_remove(&p->first_tb, tb);
950 invalidate_page_bitmap(p);
951 }
952 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
953 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
954 tb_page_remove(&p->first_tb, tb);
955 invalidate_page_bitmap(p);
956 }
957
bellard8a40a182005-11-20 10:35:40 +0000958 tb_invalidated_flag = 1;
959
960 /* remove the TB from the hash list */
961 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000962 for(env = first_cpu; env != NULL; env = env->next_cpu) {
963 if (env->tb_jmp_cache[h] == tb)
964 env->tb_jmp_cache[h] = NULL;
965 }
bellard8a40a182005-11-20 10:35:40 +0000966
967 /* suppress this TB from the two jump lists */
968 tb_jmp_remove(tb, 0);
969 tb_jmp_remove(tb, 1);
970
971 /* suppress any remaining jumps to this TB */
972 tb1 = tb->jmp_first;
973 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200974 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000975 if (n1 == 2)
976 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200977 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000978 tb2 = tb1->jmp_next[n1];
979 tb_reset_jump(tb1, n1);
980 tb1->jmp_next[n1] = NULL;
981 tb1 = tb2;
982 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200983 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000984
bellarde3db7222005-01-26 22:00:47 +0000985 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000986}
987
988static inline void set_bits(uint8_t *tab, int start, int len)
989{
990 int end, mask, end1;
991
992 end = start + len;
993 tab += start >> 3;
994 mask = 0xff << (start & 7);
995 if ((start & ~7) == (end & ~7)) {
996 if (start < end) {
997 mask &= ~(0xff << (end & 7));
998 *tab |= mask;
999 }
1000 } else {
1001 *tab++ |= mask;
1002 start = (start + 8) & ~7;
1003 end1 = end & ~7;
1004 while (start < end1) {
1005 *tab++ = 0xff;
1006 start += 8;
1007 }
1008 if (start < end) {
1009 mask = ~(0xff << (end & 7));
1010 *tab |= mask;
1011 }
1012 }
1013}
1014
1015static void build_page_bitmap(PageDesc *p)
1016{
1017 int n, tb_start, tb_end;
1018 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001019
Anthony Liguori7267c092011-08-20 22:09:37 -05001020 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001021
1022 tb = p->first_tb;
1023 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001024 n = (uintptr_t)tb & 3;
1025 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001026 /* NOTE: this is subtle as a TB may span two physical pages */
1027 if (n == 0) {
1028 /* NOTE: tb_end may be after the end of the page, but
1029 it is not a problem */
1030 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1031 tb_end = tb_start + tb->size;
1032 if (tb_end > TARGET_PAGE_SIZE)
1033 tb_end = TARGET_PAGE_SIZE;
1034 } else {
1035 tb_start = 0;
1036 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1037 }
1038 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1039 tb = tb->page_next[n];
1040 }
1041}
1042
Andreas Färber9349b4f2012-03-14 01:38:32 +01001043TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001044 target_ulong pc, target_ulong cs_base,
1045 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001046{
1047 TranslationBlock *tb;
1048 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001049 tb_page_addr_t phys_pc, phys_page2;
1050 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001051 int code_gen_size;
1052
Paul Brook41c1b1c2010-03-12 16:54:58 +00001053 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001054 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001055 if (!tb) {
1056 /* flush must be done */
1057 tb_flush(env);
1058 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001059 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001060 /* Don't forget to invalidate previous TB info. */
1061 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001062 }
1063 tc_ptr = code_gen_ptr;
1064 tb->tc_ptr = tc_ptr;
1065 tb->cs_base = cs_base;
1066 tb->flags = flags;
1067 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001068 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001069 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1070 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001071
bellardd720b932004-04-25 17:57:43 +00001072 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001073 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001074 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001075 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001076 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001077 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001078 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001079 return tb;
bellardd720b932004-04-25 17:57:43 +00001080}
ths3b46e622007-09-17 08:09:54 +00001081
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001082/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001083 * Invalidate all TBs which intersect with the target physical address range
1084 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1085 * 'is_cpu_write_access' should be true if called from a real cpu write
1086 * access: the virtual CPU will exit the current TB if code is modified inside
1087 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001088 */
1089void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1090 int is_cpu_write_access)
1091{
1092 while (start < end) {
1093 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1094 start &= TARGET_PAGE_MASK;
1095 start += TARGET_PAGE_SIZE;
1096 }
1097}
1098
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001099/*
1100 * Invalidate all TBs which intersect with the target physical address range
1101 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1102 * 'is_cpu_write_access' should be true if called from a real cpu write
1103 * access: the virtual CPU will exit the current TB if code is modified inside
1104 * this TB.
1105 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001106void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001107 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001108{
aliguori6b917542008-11-18 19:46:41 +00001109 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001110 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001111 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001112 PageDesc *p;
1113 int n;
1114#ifdef TARGET_HAS_PRECISE_SMC
1115 int current_tb_not_found = is_cpu_write_access;
1116 TranslationBlock *current_tb = NULL;
1117 int current_tb_modified = 0;
1118 target_ulong current_pc = 0;
1119 target_ulong current_cs_base = 0;
1120 int current_flags = 0;
1121#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001122
1123 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001124 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001125 return;
ths5fafdf22007-09-16 21:08:06 +00001126 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001127 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1128 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001129 /* build code bitmap */
1130 build_page_bitmap(p);
1131 }
1132
1133 /* we remove all the TBs in the range [start, end[ */
1134 /* XXX: see if in some cases it could be faster to invalidate all the code */
1135 tb = p->first_tb;
1136 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001137 n = (uintptr_t)tb & 3;
1138 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001139 tb_next = tb->page_next[n];
1140 /* NOTE: this is subtle as a TB may span two physical pages */
1141 if (n == 0) {
1142 /* NOTE: tb_end may be after the end of the page, but
1143 it is not a problem */
1144 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1145 tb_end = tb_start + tb->size;
1146 } else {
1147 tb_start = tb->page_addr[1];
1148 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1149 }
1150 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001151#ifdef TARGET_HAS_PRECISE_SMC
1152 if (current_tb_not_found) {
1153 current_tb_not_found = 0;
1154 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001155 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001156 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001157 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001158 }
1159 }
1160 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001161 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001162 /* If we are modifying the current TB, we must stop
1163 its execution. We could be more precise by checking
1164 that the modification is after the current PC, but it
1165 would require a specialized function to partially
1166 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001167
bellardd720b932004-04-25 17:57:43 +00001168 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001169 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001170 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1171 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001172 }
1173#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001174 /* we need to do that to handle the case where a signal
1175 occurs while doing tb_phys_invalidate() */
1176 saved_tb = NULL;
1177 if (env) {
1178 saved_tb = env->current_tb;
1179 env->current_tb = NULL;
1180 }
bellard9fa3e852004-01-04 18:06:42 +00001181 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001182 if (env) {
1183 env->current_tb = saved_tb;
1184 if (env->interrupt_request && env->current_tb)
1185 cpu_interrupt(env, env->interrupt_request);
1186 }
bellard9fa3e852004-01-04 18:06:42 +00001187 }
1188 tb = tb_next;
1189 }
1190#if !defined(CONFIG_USER_ONLY)
1191 /* if no code remaining, no need to continue to use slow writes */
1192 if (!p->first_tb) {
1193 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001194 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001195 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001196 }
1197 }
1198#endif
1199#ifdef TARGET_HAS_PRECISE_SMC
1200 if (current_tb_modified) {
1201 /* we generate a block containing just the instruction
1202 modifying the memory. It will ensure that it cannot modify
1203 itself */
bellardea1c1802004-06-14 18:56:36 +00001204 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001205 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001206 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001207 }
1208#endif
1209}
1210
1211/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001212static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001213{
1214 PageDesc *p;
1215 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001216#if 0
bellarda4193c82004-06-03 14:01:43 +00001217 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001218 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1219 cpu_single_env->mem_io_vaddr, len,
1220 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001221 cpu_single_env->eip +
1222 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001223 }
1224#endif
bellard9fa3e852004-01-04 18:06:42 +00001225 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001226 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001227 return;
1228 if (p->code_bitmap) {
1229 offset = start & ~TARGET_PAGE_MASK;
1230 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1231 if (b & ((1 << len) - 1))
1232 goto do_invalidate;
1233 } else {
1234 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001235 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001236 }
1237}
1238
bellard9fa3e852004-01-04 18:06:42 +00001239#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001240static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001241 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001242{
aliguori6b917542008-11-18 19:46:41 +00001243 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001244 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001245 int n;
bellardd720b932004-04-25 17:57:43 +00001246#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001247 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001248 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001249 int current_tb_modified = 0;
1250 target_ulong current_pc = 0;
1251 target_ulong current_cs_base = 0;
1252 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001253#endif
bellard9fa3e852004-01-04 18:06:42 +00001254
1255 addr &= TARGET_PAGE_MASK;
1256 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001257 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001258 return;
1259 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001260#ifdef TARGET_HAS_PRECISE_SMC
1261 if (tb && pc != 0) {
1262 current_tb = tb_find_pc(pc);
1263 }
1264#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001265 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001266 n = (uintptr_t)tb & 3;
1267 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001268#ifdef TARGET_HAS_PRECISE_SMC
1269 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001270 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001271 /* If we are modifying the current TB, we must stop
1272 its execution. We could be more precise by checking
1273 that the modification is after the current PC, but it
1274 would require a specialized function to partially
1275 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001276
bellardd720b932004-04-25 17:57:43 +00001277 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001278 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001279 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1280 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001281 }
1282#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001283 tb_phys_invalidate(tb, addr);
1284 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001285 }
1286 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001287#ifdef TARGET_HAS_PRECISE_SMC
1288 if (current_tb_modified) {
1289 /* we generate a block containing just the instruction
1290 modifying the memory. It will ensure that it cannot modify
1291 itself */
bellardea1c1802004-06-14 18:56:36 +00001292 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001293 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001294 cpu_resume_from_signal(env, puc);
1295 }
1296#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001297}
bellard9fa3e852004-01-04 18:06:42 +00001298#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001299
1300/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001301static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001302 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001303{
1304 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001305#ifndef CONFIG_USER_ONLY
1306 bool page_already_protected;
1307#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001308
bellard9fa3e852004-01-04 18:06:42 +00001309 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001310 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001311 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001312#ifndef CONFIG_USER_ONLY
1313 page_already_protected = p->first_tb != NULL;
1314#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001315 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001316 invalidate_page_bitmap(p);
1317
bellard107db442004-06-22 18:48:46 +00001318#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001319
bellard9fa3e852004-01-04 18:06:42 +00001320#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001321 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001322 target_ulong addr;
1323 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001324 int prot;
1325
bellardfd6ce8f2003-05-14 19:00:11 +00001326 /* force the host page as non writable (writes will have a
1327 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001328 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001329 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001330 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1331 addr += TARGET_PAGE_SIZE) {
1332
1333 p2 = page_find (addr >> TARGET_PAGE_BITS);
1334 if (!p2)
1335 continue;
1336 prot |= p2->flags;
1337 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001338 }
ths5fafdf22007-09-16 21:08:06 +00001339 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001340 (prot & PAGE_BITS) & ~PAGE_WRITE);
1341#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001342 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001343 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001344#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001345 }
bellard9fa3e852004-01-04 18:06:42 +00001346#else
1347 /* if some code is already present, then the pages are already
1348 protected. So we handle the case where only the first TB is
1349 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001350 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001351 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001352 }
1353#endif
bellardd720b932004-04-25 17:57:43 +00001354
1355#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001356}
1357
bellard9fa3e852004-01-04 18:06:42 +00001358/* add a new TB and link it to the physical page tables. phys_page2 is
1359 (-1) to indicate that only one page contains the TB. */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001360static void tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
1361 tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001362{
bellard9fa3e852004-01-04 18:06:42 +00001363 unsigned int h;
1364 TranslationBlock **ptb;
1365
pbrookc8a706f2008-06-02 16:16:42 +00001366 /* Grab the mmap lock to stop another thread invalidating this TB
1367 before we are done. */
1368 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001369 /* add in the physical hash table */
1370 h = tb_phys_hash_func(phys_pc);
1371 ptb = &tb_phys_hash[h];
1372 tb->phys_hash_next = *ptb;
1373 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001374
1375 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001376 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1377 if (phys_page2 != -1)
1378 tb_alloc_page(tb, 1, phys_page2);
1379 else
1380 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001381
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001382 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001383 tb->jmp_next[0] = NULL;
1384 tb->jmp_next[1] = NULL;
1385
1386 /* init original jump addresses */
1387 if (tb->tb_next_offset[0] != 0xffff)
1388 tb_reset_jump(tb, 0);
1389 if (tb->tb_next_offset[1] != 0xffff)
1390 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001391
1392#ifdef DEBUG_TB_CHECK
1393 tb_page_check();
1394#endif
pbrookc8a706f2008-06-02 16:16:42 +00001395 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001396}
1397
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +09001398#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
1399/* check whether the given addr is in TCG generated code buffer or not */
1400bool is_tcg_gen_code(uintptr_t tc_ptr)
1401{
1402 /* This can be called during code generation, code_gen_buffer_max_size
1403 is used instead of code_gen_ptr for upper boundary checking */
1404 return (tc_ptr >= (uintptr_t)code_gen_buffer &&
1405 tc_ptr < (uintptr_t)(code_gen_buffer + code_gen_buffer_max_size));
1406}
1407#endif
1408
bellarda513fe12003-05-27 23:29:48 +00001409/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1410 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001411TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001412{
1413 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001414 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001415 TranslationBlock *tb;
1416
1417 if (nb_tbs <= 0)
1418 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001419 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1420 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001421 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001422 }
bellarda513fe12003-05-27 23:29:48 +00001423 /* binary search (cf Knuth) */
1424 m_min = 0;
1425 m_max = nb_tbs - 1;
1426 while (m_min <= m_max) {
1427 m = (m_min + m_max) >> 1;
1428 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001429 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001430 if (v == tc_ptr)
1431 return tb;
1432 else if (tc_ptr < v) {
1433 m_max = m - 1;
1434 } else {
1435 m_min = m + 1;
1436 }
ths5fafdf22007-09-16 21:08:06 +00001437 }
bellarda513fe12003-05-27 23:29:48 +00001438 return &tbs[m_max];
1439}
bellard75012672003-06-21 13:11:07 +00001440
bellardea041c02003-06-25 16:16:50 +00001441static void tb_reset_jump_recursive(TranslationBlock *tb);
1442
1443static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1444{
1445 TranslationBlock *tb1, *tb_next, **ptb;
1446 unsigned int n1;
1447
1448 tb1 = tb->jmp_next[n];
1449 if (tb1 != NULL) {
1450 /* find head of list */
1451 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001452 n1 = (uintptr_t)tb1 & 3;
1453 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001454 if (n1 == 2)
1455 break;
1456 tb1 = tb1->jmp_next[n1];
1457 }
1458 /* we are now sure now that tb jumps to tb1 */
1459 tb_next = tb1;
1460
1461 /* remove tb from the jmp_first list */
1462 ptb = &tb_next->jmp_first;
1463 for(;;) {
1464 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001465 n1 = (uintptr_t)tb1 & 3;
1466 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001467 if (n1 == n && tb1 == tb)
1468 break;
1469 ptb = &tb1->jmp_next[n1];
1470 }
1471 *ptb = tb->jmp_next[n];
1472 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001473
bellardea041c02003-06-25 16:16:50 +00001474 /* suppress the jump to next tb in generated code */
1475 tb_reset_jump(tb, n);
1476
bellard01243112004-01-04 15:48:17 +00001477 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001478 tb_reset_jump_recursive(tb_next);
1479 }
1480}
1481
1482static void tb_reset_jump_recursive(TranslationBlock *tb)
1483{
1484 tb_reset_jump_recursive2(tb, 0);
1485 tb_reset_jump_recursive2(tb, 1);
1486}
1487
bellard1fddef42005-04-17 19:16:13 +00001488#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001489#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001490static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001491{
1492 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1493}
1494#else
Avi Kivitya8170e52012-10-23 12:30:10 +02001495void tb_invalidate_phys_addr(hwaddr addr)
bellardd720b932004-04-25 17:57:43 +00001496{
Anthony Liguoric227f092009-10-01 16:12:16 -05001497 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001498 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001499
Avi Kivityac1970f2012-10-03 16:22:53 +02001500 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001501 if (!(memory_region_is_ram(section->mr)
1502 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001503 return;
1504 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001505 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001506 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001507 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001508}
Max Filippov1e7855a2012-04-10 02:48:17 +04001509
1510static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1511{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001512 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1513 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001514}
bellardc27004e2005-01-03 23:35:10 +00001515#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001516#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001517
Paul Brookc527ee82010-03-01 03:31:14 +00001518#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001519void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001520
1521{
1522}
1523
Andreas Färber9349b4f2012-03-14 01:38:32 +01001524int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001525 int flags, CPUWatchpoint **watchpoint)
1526{
1527 return -ENOSYS;
1528}
1529#else
pbrook6658ffb2007-03-16 23:58:11 +00001530/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001531int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001532 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001533{
aliguorib4051332008-11-18 20:14:20 +00001534 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001535 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001536
aliguorib4051332008-11-18 20:14:20 +00001537 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001538 if ((len & (len - 1)) || (addr & ~len_mask) ||
1539 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001540 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1541 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1542 return -EINVAL;
1543 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001544 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001545
aliguoria1d1bb32008-11-18 20:07:32 +00001546 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001547 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001548 wp->flags = flags;
1549
aliguori2dc9f412008-11-18 20:56:59 +00001550 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001551 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001552 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001553 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001554 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001555
pbrook6658ffb2007-03-16 23:58:11 +00001556 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001557
1558 if (watchpoint)
1559 *watchpoint = wp;
1560 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001561}
1562
aliguoria1d1bb32008-11-18 20:07:32 +00001563/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001564int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001565 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001566{
aliguorib4051332008-11-18 20:14:20 +00001567 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001568 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001569
Blue Swirl72cf2d42009-09-12 07:36:22 +00001570 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001571 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001572 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001573 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001574 return 0;
1575 }
1576 }
aliguoria1d1bb32008-11-18 20:07:32 +00001577 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001578}
1579
aliguoria1d1bb32008-11-18 20:07:32 +00001580/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001581void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001582{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001583 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001584
aliguoria1d1bb32008-11-18 20:07:32 +00001585 tlb_flush_page(env, watchpoint->vaddr);
1586
Anthony Liguori7267c092011-08-20 22:09:37 -05001587 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001588}
1589
aliguoria1d1bb32008-11-18 20:07:32 +00001590/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001591void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001592{
aliguoric0ce9982008-11-25 22:13:57 +00001593 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001594
Blue Swirl72cf2d42009-09-12 07:36:22 +00001595 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001596 if (wp->flags & mask)
1597 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001598 }
aliguoria1d1bb32008-11-18 20:07:32 +00001599}
Paul Brookc527ee82010-03-01 03:31:14 +00001600#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001601
1602/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001603int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001604 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001605{
bellard1fddef42005-04-17 19:16:13 +00001606#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001607 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001608
Anthony Liguori7267c092011-08-20 22:09:37 -05001609 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001610
1611 bp->pc = pc;
1612 bp->flags = flags;
1613
aliguori2dc9f412008-11-18 20:56:59 +00001614 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001615 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001616 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001617 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001618 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001619
1620 breakpoint_invalidate(env, pc);
1621
1622 if (breakpoint)
1623 *breakpoint = bp;
1624 return 0;
1625#else
1626 return -ENOSYS;
1627#endif
1628}
1629
1630/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001631int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001632{
1633#if defined(TARGET_HAS_ICE)
1634 CPUBreakpoint *bp;
1635
Blue Swirl72cf2d42009-09-12 07:36:22 +00001636 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001637 if (bp->pc == pc && bp->flags == flags) {
1638 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001639 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001640 }
bellard4c3a88a2003-07-26 12:06:08 +00001641 }
aliguoria1d1bb32008-11-18 20:07:32 +00001642 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001643#else
aliguoria1d1bb32008-11-18 20:07:32 +00001644 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001645#endif
1646}
1647
aliguoria1d1bb32008-11-18 20:07:32 +00001648/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001649void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001650{
bellard1fddef42005-04-17 19:16:13 +00001651#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001652 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001653
aliguoria1d1bb32008-11-18 20:07:32 +00001654 breakpoint_invalidate(env, breakpoint->pc);
1655
Anthony Liguori7267c092011-08-20 22:09:37 -05001656 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001657#endif
1658}
1659
1660/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001661void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001662{
1663#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001664 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001665
Blue Swirl72cf2d42009-09-12 07:36:22 +00001666 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001667 if (bp->flags & mask)
1668 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001669 }
bellard4c3a88a2003-07-26 12:06:08 +00001670#endif
1671}
1672
bellardc33a3462003-07-29 20:50:33 +00001673/* enable or disable single step mode. EXCP_DEBUG is returned by the
1674 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001675void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001676{
bellard1fddef42005-04-17 19:16:13 +00001677#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001678 if (env->singlestep_enabled != enabled) {
1679 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001680 if (kvm_enabled())
1681 kvm_update_guest_debug(env, 0);
1682 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001683 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001684 /* XXX: only flush what is necessary */
1685 tb_flush(env);
1686 }
bellardc33a3462003-07-29 20:50:33 +00001687 }
1688#endif
1689}
1690
Andreas Färber9349b4f2012-03-14 01:38:32 +01001691static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001692{
pbrookd5975362008-06-07 20:50:51 +00001693 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1694 problem and hope the cpu will stop of its own accord. For userspace
1695 emulation this often isn't actually as bad as it sounds. Often
1696 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001697 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001698 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001699
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001700 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001701 tb = env->current_tb;
1702 /* if the cpu is currently executing code, we must unlink it and
1703 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001704 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001705 env->current_tb = NULL;
1706 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001707 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001708 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001709}
1710
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001711#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001712/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001713static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001714{
Andreas Färber60e82572012-05-02 22:23:49 +02001715 CPUState *cpu = ENV_GET_CPU(env);
aurel323098dba2009-03-07 21:28:24 +00001716 int old_mask;
1717
1718 old_mask = env->interrupt_request;
1719 env->interrupt_request |= mask;
1720
aliguori8edac962009-04-24 18:03:45 +00001721 /*
1722 * If called from iothread context, wake the target cpu in
1723 * case its halted.
1724 */
Andreas Färber60e82572012-05-02 22:23:49 +02001725 if (!qemu_cpu_is_self(cpu)) {
Andreas Färberc08d7422012-05-03 04:34:15 +02001726 qemu_cpu_kick(cpu);
aliguori8edac962009-04-24 18:03:45 +00001727 return;
1728 }
aliguori8edac962009-04-24 18:03:45 +00001729
pbrook2e70f6e2008-06-29 01:03:05 +00001730 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001731 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001732 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001733 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001734 cpu_abort(env, "Raised interrupt while not in I/O function");
1735 }
pbrook2e70f6e2008-06-29 01:03:05 +00001736 } else {
aurel323098dba2009-03-07 21:28:24 +00001737 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001738 }
1739}
1740
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001741CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1742
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001743#else /* CONFIG_USER_ONLY */
1744
Andreas Färber9349b4f2012-03-14 01:38:32 +01001745void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001746{
1747 env->interrupt_request |= mask;
1748 cpu_unlink_tb(env);
1749}
1750#endif /* CONFIG_USER_ONLY */
1751
Andreas Färber9349b4f2012-03-14 01:38:32 +01001752void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001753{
1754 env->interrupt_request &= ~mask;
1755}
1756
Andreas Färber9349b4f2012-03-14 01:38:32 +01001757void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001758{
1759 env->exit_request = 1;
1760 cpu_unlink_tb(env);
1761}
1762
Andreas Färber9349b4f2012-03-14 01:38:32 +01001763void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001764{
1765 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001766 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001767
1768 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001769 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001770 fprintf(stderr, "qemu: fatal: ");
1771 vfprintf(stderr, fmt, ap);
1772 fprintf(stderr, "\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001773 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +00001774 if (qemu_log_enabled()) {
1775 qemu_log("qemu: fatal: ");
1776 qemu_log_vprintf(fmt, ap2);
1777 qemu_log("\n");
Peter Maydell6fd2a022012-10-05 15:04:43 +01001778 log_cpu_state(env, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +00001779 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001780 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001781 }
pbrook493ae1f2007-11-23 16:53:59 +00001782 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001783 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001784#if defined(CONFIG_USER_ONLY)
1785 {
1786 struct sigaction act;
1787 sigfillset(&act.sa_mask);
1788 act.sa_handler = SIG_DFL;
1789 sigaction(SIGABRT, &act, NULL);
1790 }
1791#endif
bellard75012672003-06-21 13:11:07 +00001792 abort();
1793}
1794
Andreas Färber9349b4f2012-03-14 01:38:32 +01001795CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001796{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001797 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1798 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001799 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001800#if defined(TARGET_HAS_ICE)
1801 CPUBreakpoint *bp;
1802 CPUWatchpoint *wp;
1803#endif
1804
Andreas Färber9349b4f2012-03-14 01:38:32 +01001805 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001806
1807 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001808 new_env->next_cpu = next_cpu;
1809 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001810
1811 /* Clone all break/watchpoints.
1812 Note: Once we support ptrace with hw-debug register access, make sure
1813 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001814 QTAILQ_INIT(&env->breakpoints);
1815 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001816#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001817 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001818 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1819 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001820 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001821 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1822 wp->flags, NULL);
1823 }
1824#endif
1825
thsc5be9f02007-02-28 20:20:53 +00001826 return new_env;
1827}
1828
bellard01243112004-01-04 15:48:17 +00001829#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001830void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001831{
1832 unsigned int i;
1833
1834 /* Discard jump cache entries for any tb which might potentially
1835 overlap the flushed page. */
1836 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1837 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001838 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001839
1840 i = tb_jmp_cache_hash_page(addr);
1841 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001842 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001843}
1844
Juan Quintelad24981d2012-05-22 00:42:40 +02001845static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t end,
1846 uintptr_t length)
bellard1ccde1c2004-02-06 19:46:14 +00001847{
Juan Quintelad24981d2012-05-22 00:42:40 +02001848 uintptr_t start1;
bellardf23db162005-08-21 19:12:28 +00001849
bellard1ccde1c2004-02-06 19:46:14 +00001850 /* we modify the TLB cache so that the dirty bit will be set again
1851 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001852 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001853 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001854 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001855 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001856 != (end - 1) - start) {
1857 abort();
1858 }
Blue Swirle5548612012-04-21 13:08:33 +00001859 cpu_tlb_reset_dirty_all(start1, length);
Juan Quintelad24981d2012-05-22 00:42:40 +02001860
1861}
1862
1863/* Note: start and end must be within the same ram block. */
1864void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
1865 int dirty_flags)
1866{
1867 uintptr_t length;
1868
1869 start &= TARGET_PAGE_MASK;
1870 end = TARGET_PAGE_ALIGN(end);
1871
1872 length = end - start;
1873 if (length == 0)
1874 return;
1875 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
1876
1877 if (tcg_enabled()) {
1878 tlb_reset_dirty_range_all(start, end, length);
1879 }
bellard1ccde1c2004-02-06 19:46:14 +00001880}
1881
Blue Swirl8b9c99d2012-10-28 11:04:51 +00001882static int cpu_physical_memory_set_dirty_tracking(int enable)
aliguori74576192008-10-06 14:02:03 +00001883{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001884 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001885 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001886 return ret;
aliguori74576192008-10-06 14:02:03 +00001887}
1888
Avi Kivitya8170e52012-10-23 12:30:10 +02001889hwaddr memory_region_section_get_iotlb(CPUArchState *env,
Blue Swirle5548612012-04-21 13:08:33 +00001890 MemoryRegionSection *section,
1891 target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +02001892 hwaddr paddr,
Blue Swirle5548612012-04-21 13:08:33 +00001893 int prot,
1894 target_ulong *address)
1895{
Avi Kivitya8170e52012-10-23 12:30:10 +02001896 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +00001897 CPUWatchpoint *wp;
1898
Blue Swirlcc5bea62012-04-14 14:56:48 +00001899 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001900 /* Normal RAM. */
1901 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001902 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001903 if (!section->readonly) {
1904 iotlb |= phys_section_notdirty;
1905 } else {
1906 iotlb |= phys_section_rom;
1907 }
1908 } else {
1909 /* IO handlers are currently passed a physical address.
1910 It would be nice to pass an offset from the base address
1911 of that region. This would avoid having to special case RAM,
1912 and avoid full address decoding in every device.
1913 We can't use the high bits of pd for this because
1914 IO_MEM_ROMD uses these as a ram address. */
1915 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001916 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001917 }
1918
1919 /* Make accesses to pages with watchpoints go via the
1920 watchpoint trap routines. */
1921 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1922 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1923 /* Avoid trapping reads of pages with a write breakpoint. */
1924 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1925 iotlb = phys_section_watch + paddr;
1926 *address |= TLB_MMIO;
1927 break;
1928 }
1929 }
1930 }
1931
1932 return iotlb;
1933}
1934
bellard01243112004-01-04 15:48:17 +00001935#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001936/*
1937 * Walks guest process memory "regions" one by one
1938 * and calls callback function 'fn' for each region.
1939 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001940
1941struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001942{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001943 walk_memory_regions_fn fn;
1944 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001945 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001946 int prot;
1947};
bellard9fa3e852004-01-04 18:06:42 +00001948
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001949static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001950 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001951{
1952 if (data->start != -1ul) {
1953 int rc = data->fn(data->priv, data->start, end, data->prot);
1954 if (rc != 0) {
1955 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001956 }
bellard33417e72003-08-10 21:47:01 +00001957 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001958
1959 data->start = (new_prot ? end : -1ul);
1960 data->prot = new_prot;
1961
1962 return 0;
1963}
1964
1965static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001966 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001967{
Paul Brookb480d9b2010-03-12 23:23:29 +00001968 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001969 int i, rc;
1970
1971 if (*lp == NULL) {
1972 return walk_memory_regions_end(data, base, 0);
1973 }
1974
1975 if (level == 0) {
1976 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001977 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001978 int prot = pd[i].flags;
1979
1980 pa = base | (i << TARGET_PAGE_BITS);
1981 if (prot != data->prot) {
1982 rc = walk_memory_regions_end(data, pa, prot);
1983 if (rc != 0) {
1984 return rc;
1985 }
1986 }
1987 }
1988 } else {
1989 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001990 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001991 pa = base | ((abi_ulong)i <<
1992 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001993 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1994 if (rc != 0) {
1995 return rc;
1996 }
1997 }
1998 }
1999
2000 return 0;
2001}
2002
2003int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2004{
2005 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02002006 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002007
2008 data.fn = fn;
2009 data.priv = priv;
2010 data.start = -1ul;
2011 data.prot = 0;
2012
2013 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002014 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002015 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2016 if (rc != 0) {
2017 return rc;
2018 }
2019 }
2020
2021 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002022}
2023
Paul Brookb480d9b2010-03-12 23:23:29 +00002024static int dump_region(void *priv, abi_ulong start,
2025 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002026{
2027 FILE *f = (FILE *)priv;
2028
Paul Brookb480d9b2010-03-12 23:23:29 +00002029 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2030 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002031 start, end, end - start,
2032 ((prot & PAGE_READ) ? 'r' : '-'),
2033 ((prot & PAGE_WRITE) ? 'w' : '-'),
2034 ((prot & PAGE_EXEC) ? 'x' : '-'));
2035
2036 return (0);
2037}
2038
2039/* dump memory mappings */
2040void page_dump(FILE *f)
2041{
2042 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2043 "start", "end", "size", "prot");
2044 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002045}
2046
pbrook53a59602006-03-25 19:31:22 +00002047int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002048{
bellard9fa3e852004-01-04 18:06:42 +00002049 PageDesc *p;
2050
2051 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002052 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002053 return 0;
2054 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002055}
2056
Richard Henderson376a7902010-03-10 15:57:04 -08002057/* Modify the flags of a page and invalidate the code if necessary.
2058 The flag PAGE_WRITE_ORG is positioned automatically depending
2059 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002060void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002061{
Richard Henderson376a7902010-03-10 15:57:04 -08002062 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002063
Richard Henderson376a7902010-03-10 15:57:04 -08002064 /* This function should never be called with addresses outside the
2065 guest address space. If this assert fires, it probably indicates
2066 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002067#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2068 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002069#endif
2070 assert(start < end);
2071
bellard9fa3e852004-01-04 18:06:42 +00002072 start = start & TARGET_PAGE_MASK;
2073 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002074
2075 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002076 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002077 }
2078
2079 for (addr = start, len = end - start;
2080 len != 0;
2081 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2082 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2083
2084 /* If the write protection bit is set, then we invalidate
2085 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002086 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002087 (flags & PAGE_WRITE) &&
2088 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002089 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002090 }
2091 p->flags = flags;
2092 }
bellard9fa3e852004-01-04 18:06:42 +00002093}
2094
ths3d97b402007-11-02 19:02:07 +00002095int page_check_range(target_ulong start, target_ulong len, int flags)
2096{
2097 PageDesc *p;
2098 target_ulong end;
2099 target_ulong addr;
2100
Richard Henderson376a7902010-03-10 15:57:04 -08002101 /* This function should never be called with addresses outside the
2102 guest address space. If this assert fires, it probably indicates
2103 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002104#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2105 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002106#endif
2107
Richard Henderson3e0650a2010-03-29 10:54:42 -07002108 if (len == 0) {
2109 return 0;
2110 }
Richard Henderson376a7902010-03-10 15:57:04 -08002111 if (start + len - 1 < start) {
2112 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002113 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002114 }
balrog55f280c2008-10-28 10:24:11 +00002115
ths3d97b402007-11-02 19:02:07 +00002116 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2117 start = start & TARGET_PAGE_MASK;
2118
Richard Henderson376a7902010-03-10 15:57:04 -08002119 for (addr = start, len = end - start;
2120 len != 0;
2121 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002122 p = page_find(addr >> TARGET_PAGE_BITS);
2123 if( !p )
2124 return -1;
2125 if( !(p->flags & PAGE_VALID) )
2126 return -1;
2127
bellarddae32702007-11-14 10:51:00 +00002128 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002129 return -1;
bellarddae32702007-11-14 10:51:00 +00002130 if (flags & PAGE_WRITE) {
2131 if (!(p->flags & PAGE_WRITE_ORG))
2132 return -1;
2133 /* unprotect the page if it was put read-only because it
2134 contains translated code */
2135 if (!(p->flags & PAGE_WRITE)) {
2136 if (!page_unprotect(addr, 0, NULL))
2137 return -1;
2138 }
2139 return 0;
2140 }
ths3d97b402007-11-02 19:02:07 +00002141 }
2142 return 0;
2143}
2144
bellard9fa3e852004-01-04 18:06:42 +00002145/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002146 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002147int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002148{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002149 unsigned int prot;
2150 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002151 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002152
pbrookc8a706f2008-06-02 16:16:42 +00002153 /* Technically this isn't safe inside a signal handler. However we
2154 know this only ever happens in a synchronous SEGV handler, so in
2155 practice it seems to be ok. */
2156 mmap_lock();
2157
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002158 p = page_find(address >> TARGET_PAGE_BITS);
2159 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002160 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002161 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002162 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002163
bellard9fa3e852004-01-04 18:06:42 +00002164 /* if the page was really writable, then we change its
2165 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002166 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2167 host_start = address & qemu_host_page_mask;
2168 host_end = host_start + qemu_host_page_size;
2169
2170 prot = 0;
2171 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2172 p = page_find(addr >> TARGET_PAGE_BITS);
2173 p->flags |= PAGE_WRITE;
2174 prot |= p->flags;
2175
bellard9fa3e852004-01-04 18:06:42 +00002176 /* and since the content will be modified, we must invalidate
2177 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002178 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002179#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002180 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002181#endif
bellard9fa3e852004-01-04 18:06:42 +00002182 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002183 mprotect((void *)g2h(host_start), qemu_host_page_size,
2184 prot & PAGE_BITS);
2185
2186 mmap_unlock();
2187 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002188 }
pbrookc8a706f2008-06-02 16:16:42 +00002189 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002190 return 0;
2191}
bellard9fa3e852004-01-04 18:06:42 +00002192#endif /* defined(CONFIG_USER_ONLY) */
2193
pbrooke2eef172008-06-08 01:09:01 +00002194#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002195
Paul Brookc04b2b72010-03-01 03:31:14 +00002196#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2197typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002198 MemoryRegion iomem;
Avi Kivitya8170e52012-10-23 12:30:10 +02002199 hwaddr base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002200 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002201} subpage_t;
2202
Anthony Liguoric227f092009-10-01 16:12:16 -05002203static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002204 uint16_t section);
Avi Kivitya8170e52012-10-23 12:30:10 +02002205static subpage_t *subpage_init(hwaddr base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002206static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002207{
Avi Kivity5312bd82012-02-12 18:32:55 +02002208 MemoryRegionSection *section = &phys_sections[section_index];
2209 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002210
2211 if (mr->subpage) {
2212 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2213 memory_region_destroy(&subpage->iomem);
2214 g_free(subpage);
2215 }
2216}
2217
Avi Kivity4346ae32012-02-10 17:00:01 +02002218static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002219{
2220 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002221 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002222
Avi Kivityc19e8802012-02-13 20:25:31 +02002223 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002224 return;
2225 }
2226
Avi Kivityc19e8802012-02-13 20:25:31 +02002227 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002228 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002229 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002230 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002231 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002232 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002233 }
Avi Kivity54688b12012-02-09 17:34:32 +02002234 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002235 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002236 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002237}
2238
Avi Kivityac1970f2012-10-03 16:22:53 +02002239static void destroy_all_mappings(AddressSpaceDispatch *d)
Avi Kivity54688b12012-02-09 17:34:32 +02002240{
Avi Kivityac1970f2012-10-03 16:22:53 +02002241 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002242 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002243}
2244
Avi Kivity5312bd82012-02-12 18:32:55 +02002245static uint16_t phys_section_add(MemoryRegionSection *section)
2246{
2247 if (phys_sections_nb == phys_sections_nb_alloc) {
2248 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2249 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2250 phys_sections_nb_alloc);
2251 }
2252 phys_sections[phys_sections_nb] = *section;
2253 return phys_sections_nb++;
2254}
2255
2256static void phys_sections_clear(void)
2257{
2258 phys_sections_nb = 0;
2259}
2260
Avi Kivityac1970f2012-10-03 16:22:53 +02002261static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002262{
2263 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +02002264 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +02002265 & TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002266 MemoryRegionSection *existing = phys_page_find(d, base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002267 MemoryRegionSection subsection = {
2268 .offset_within_address_space = base,
2269 .size = TARGET_PAGE_SIZE,
2270 };
Avi Kivitya8170e52012-10-23 12:30:10 +02002271 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002272
Avi Kivityf3705d52012-03-08 16:16:34 +02002273 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002274
Avi Kivityf3705d52012-03-08 16:16:34 +02002275 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002276 subpage = subpage_init(base);
2277 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +02002278 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Avi Kivity29990972012-02-13 20:21:20 +02002279 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002280 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002281 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002282 }
2283 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Tyler Halladb2a9b2012-07-25 18:45:03 -04002284 end = start + section->size - 1;
Avi Kivity0f0cb162012-02-13 17:14:32 +02002285 subpage_register(subpage, start, end, phys_section_add(section));
2286}
2287
2288
Avi Kivityac1970f2012-10-03 16:22:53 +02002289static void register_multipage(AddressSpaceDispatch *d, MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002290{
Avi Kivitya8170e52012-10-23 12:30:10 +02002291 hwaddr start_addr = section->offset_within_address_space;
Avi Kivitydd811242012-01-02 12:17:03 +02002292 ram_addr_t size = section->size;
Avi Kivitya8170e52012-10-23 12:30:10 +02002293 hwaddr addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002294 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002295
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002296 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002297
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002298 addr = start_addr;
Avi Kivityac1970f2012-10-03 16:22:53 +02002299 phys_page_set(d, addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
Avi Kivity29990972012-02-13 20:21:20 +02002300 section_index);
bellard33417e72003-08-10 21:47:01 +00002301}
2302
Avi Kivityac1970f2012-10-03 16:22:53 +02002303static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02002304{
Avi Kivityac1970f2012-10-03 16:22:53 +02002305 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002306 MemoryRegionSection now = *section, remain = *section;
2307
2308 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2309 || (now.size < TARGET_PAGE_SIZE)) {
2310 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2311 - now.offset_within_address_space,
2312 now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02002313 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002314 remain.size -= now.size;
2315 remain.offset_within_address_space += now.size;
2316 remain.offset_within_region += now.size;
2317 }
Tyler Hall69b67642012-07-25 18:45:04 -04002318 while (remain.size >= TARGET_PAGE_SIZE) {
2319 now = remain;
2320 if (remain.offset_within_region & ~TARGET_PAGE_MASK) {
2321 now.size = TARGET_PAGE_SIZE;
Avi Kivityac1970f2012-10-03 16:22:53 +02002322 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002323 } else {
2324 now.size &= TARGET_PAGE_MASK;
Avi Kivityac1970f2012-10-03 16:22:53 +02002325 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04002326 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02002327 remain.size -= now.size;
2328 remain.offset_within_address_space += now.size;
2329 remain.offset_within_region += now.size;
2330 }
2331 now = remain;
2332 if (now.size) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002333 register_subpage(d, &now);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002334 }
2335}
2336
Sheng Yang62a27442010-01-26 19:21:16 +08002337void qemu_flush_coalesced_mmio_buffer(void)
2338{
2339 if (kvm_enabled())
2340 kvm_flush_coalesced_mmio_buffer();
2341}
2342
Marcelo Tosattic9027602010-03-01 20:25:08 -03002343#if defined(__linux__) && !defined(TARGET_S390X)
2344
2345#include <sys/vfs.h>
2346
2347#define HUGETLBFS_MAGIC 0x958458f6
2348
2349static long gethugepagesize(const char *path)
2350{
2351 struct statfs fs;
2352 int ret;
2353
2354 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002355 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002356 } while (ret != 0 && errno == EINTR);
2357
2358 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002359 perror(path);
2360 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002361 }
2362
2363 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002364 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002365
2366 return fs.f_bsize;
2367}
2368
Alex Williamson04b16652010-07-02 11:13:17 -06002369static void *file_ram_alloc(RAMBlock *block,
2370 ram_addr_t memory,
2371 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002372{
2373 char *filename;
2374 void *area;
2375 int fd;
2376#ifdef MAP_POPULATE
2377 int flags;
2378#endif
2379 unsigned long hpagesize;
2380
2381 hpagesize = gethugepagesize(path);
2382 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002383 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002384 }
2385
2386 if (memory < hpagesize) {
2387 return NULL;
2388 }
2389
2390 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2391 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2392 return NULL;
2393 }
2394
2395 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002396 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002397 }
2398
2399 fd = mkstemp(filename);
2400 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002401 perror("unable to create backing store for hugepages");
2402 free(filename);
2403 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002404 }
2405 unlink(filename);
2406 free(filename);
2407
2408 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2409
2410 /*
2411 * ftruncate is not supported by hugetlbfs in older
2412 * hosts, so don't bother bailing out on errors.
2413 * If anything goes wrong with it under other filesystems,
2414 * mmap will fail.
2415 */
2416 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002417 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002418
2419#ifdef MAP_POPULATE
2420 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2421 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2422 * to sidestep this quirk.
2423 */
2424 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2425 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2426#else
2427 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2428#endif
2429 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002430 perror("file_ram_alloc: can't mmap RAM pages");
2431 close(fd);
2432 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002433 }
Alex Williamson04b16652010-07-02 11:13:17 -06002434 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002435 return area;
2436}
2437#endif
2438
Alex Williamsond17b5282010-06-25 11:08:38 -06002439static ram_addr_t find_ram_offset(ram_addr_t size)
2440{
Alex Williamson04b16652010-07-02 11:13:17 -06002441 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002442 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002443
2444 if (QLIST_EMPTY(&ram_list.blocks))
2445 return 0;
2446
2447 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002448 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002449
2450 end = block->offset + block->length;
2451
2452 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2453 if (next_block->offset >= end) {
2454 next = MIN(next, next_block->offset);
2455 }
2456 }
2457 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002458 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002459 mingap = next - end;
2460 }
2461 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002462
2463 if (offset == RAM_ADDR_MAX) {
2464 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2465 (uint64_t)size);
2466 abort();
2467 }
2468
Alex Williamson04b16652010-07-02 11:13:17 -06002469 return offset;
2470}
2471
Juan Quintela652d7ec2012-07-20 10:37:54 +02002472ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06002473{
Alex Williamsond17b5282010-06-25 11:08:38 -06002474 RAMBlock *block;
2475 ram_addr_t last = 0;
2476
2477 QLIST_FOREACH(block, &ram_list.blocks, next)
2478 last = MAX(last, block->offset + block->length);
2479
2480 return last;
2481}
2482
Jason Baronddb97f12012-08-02 15:44:16 -04002483static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
2484{
2485 int ret;
2486 QemuOpts *machine_opts;
2487
2488 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
2489 machine_opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2490 if (machine_opts &&
2491 !qemu_opt_get_bool(machine_opts, "dump-guest-core", true)) {
2492 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
2493 if (ret) {
2494 perror("qemu_madvise");
2495 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
2496 "but dump_guest_core=off specified\n");
2497 }
2498 }
2499}
2500
Avi Kivityc5705a72011-12-20 15:59:12 +02002501void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002502{
2503 RAMBlock *new_block, *block;
2504
Avi Kivityc5705a72011-12-20 15:59:12 +02002505 new_block = NULL;
2506 QLIST_FOREACH(block, &ram_list.blocks, next) {
2507 if (block->offset == addr) {
2508 new_block = block;
2509 break;
2510 }
2511 }
2512 assert(new_block);
2513 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002514
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002515 if (dev) {
2516 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002517 if (id) {
2518 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002519 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002520 }
2521 }
2522 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2523
2524 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002525 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002526 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2527 new_block->idstr);
2528 abort();
2529 }
2530 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002531}
2532
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002533static int memory_try_enable_merging(void *addr, size_t len)
2534{
2535 QemuOpts *opts;
2536
2537 opts = qemu_opts_find(qemu_find_opts("machine"), 0);
2538 if (opts && !qemu_opt_get_bool(opts, "mem-merge", true)) {
2539 /* disabled by the user */
2540 return 0;
2541 }
2542
2543 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
2544}
2545
Avi Kivityc5705a72011-12-20 15:59:12 +02002546ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2547 MemoryRegion *mr)
2548{
2549 RAMBlock *new_block;
2550
2551 size = TARGET_PAGE_ALIGN(size);
2552 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002553
Avi Kivity7c637362011-12-21 13:09:49 +02002554 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002555 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002556 if (host) {
2557 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002558 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002559 } else {
2560 if (mem_path) {
2561#if defined (__linux__) && !defined(TARGET_S390X)
2562 new_block->host = file_ram_alloc(new_block, size, mem_path);
2563 if (!new_block->host) {
2564 new_block->host = qemu_vmalloc(size);
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002565 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002566 }
2567#else
2568 fprintf(stderr, "-mem-path option unsupported\n");
2569 exit(1);
2570#endif
2571 } else {
Jan Kiszka868bb332011-06-21 22:59:09 +02002572 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002573 xen_ram_alloc(new_block->offset, size, mr);
Christian Borntraegerfdec9912012-06-15 05:10:30 +00002574 } else if (kvm_enabled()) {
2575 /* some s390/kvm configurations have special constraints */
2576 new_block->host = kvm_vmalloc(size);
Jun Nakajima432d2682010-08-31 16:41:25 +01002577 } else {
2578 new_block->host = qemu_vmalloc(size);
2579 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002580 memory_try_enable_merging(new_block->host, size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002581 }
2582 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002583 new_block->length = size;
2584
2585 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2586
Anthony Liguori7267c092011-08-20 22:09:37 -05002587 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002588 last_ram_offset() >> TARGET_PAGE_BITS);
Igor Mitsyanko5fda0432012-08-10 18:45:11 +04002589 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2590 0, size >> TARGET_PAGE_BITS);
Juan Quintela1720aee2012-06-22 13:14:17 +02002591 cpu_physical_memory_set_dirty_range(new_block->offset, size, 0xff);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002592
Jason Baronddb97f12012-08-02 15:44:16 -04002593 qemu_ram_setup_dump(new_block->host, size);
Luiz Capitulinoad0b5322012-10-05 16:47:57 -03002594 qemu_madvise(new_block->host, size, QEMU_MADV_HUGEPAGE);
Jason Baronddb97f12012-08-02 15:44:16 -04002595
Cam Macdonell84b89d72010-07-26 18:10:57 -06002596 if (kvm_enabled())
2597 kvm_setup_guest_memory(new_block->host, size);
2598
2599 return new_block->offset;
2600}
2601
Avi Kivityc5705a72011-12-20 15:59:12 +02002602ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002603{
Avi Kivityc5705a72011-12-20 15:59:12 +02002604 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002605}
bellarde9a1ab12007-02-08 23:08:38 +00002606
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002607void qemu_ram_free_from_ptr(ram_addr_t addr)
2608{
2609 RAMBlock *block;
2610
2611 QLIST_FOREACH(block, &ram_list.blocks, next) {
2612 if (addr == block->offset) {
2613 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002614 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002615 return;
2616 }
2617 }
2618}
2619
Anthony Liguoric227f092009-10-01 16:12:16 -05002620void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002621{
Alex Williamson04b16652010-07-02 11:13:17 -06002622 RAMBlock *block;
2623
2624 QLIST_FOREACH(block, &ram_list.blocks, next) {
2625 if (addr == block->offset) {
2626 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002627 if (block->flags & RAM_PREALLOC_MASK) {
2628 ;
2629 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002630#if defined (__linux__) && !defined(TARGET_S390X)
2631 if (block->fd) {
2632 munmap(block->host, block->length);
2633 close(block->fd);
2634 } else {
2635 qemu_vfree(block->host);
2636 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002637#else
2638 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002639#endif
2640 } else {
2641#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2642 munmap(block->host, block->length);
2643#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002644 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002645 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002646 } else {
2647 qemu_vfree(block->host);
2648 }
Alex Williamson04b16652010-07-02 11:13:17 -06002649#endif
2650 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002651 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002652 return;
2653 }
2654 }
2655
bellarde9a1ab12007-02-08 23:08:38 +00002656}
2657
Huang Yingcd19cfa2011-03-02 08:56:19 +01002658#ifndef _WIN32
2659void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2660{
2661 RAMBlock *block;
2662 ram_addr_t offset;
2663 int flags;
2664 void *area, *vaddr;
2665
2666 QLIST_FOREACH(block, &ram_list.blocks, next) {
2667 offset = addr - block->offset;
2668 if (offset < block->length) {
2669 vaddr = block->host + offset;
2670 if (block->flags & RAM_PREALLOC_MASK) {
2671 ;
2672 } else {
2673 flags = MAP_FIXED;
2674 munmap(vaddr, length);
2675 if (mem_path) {
2676#if defined(__linux__) && !defined(TARGET_S390X)
2677 if (block->fd) {
2678#ifdef MAP_POPULATE
2679 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2680 MAP_PRIVATE;
2681#else
2682 flags |= MAP_PRIVATE;
2683#endif
2684 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2685 flags, block->fd, offset);
2686 } else {
2687 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2688 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2689 flags, -1, 0);
2690 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002691#else
2692 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002693#endif
2694 } else {
2695#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2696 flags |= MAP_SHARED | MAP_ANONYMOUS;
2697 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2698 flags, -1, 0);
2699#else
2700 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2701 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2702 flags, -1, 0);
2703#endif
2704 }
2705 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002706 fprintf(stderr, "Could not remap addr: "
2707 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002708 length, addr);
2709 exit(1);
2710 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03002711 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04002712 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002713 }
2714 return;
2715 }
2716 }
2717}
2718#endif /* !_WIN32 */
2719
pbrookdc828ca2009-04-09 22:21:07 +00002720/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002721 With the exception of the softmmu code in this file, this should
2722 only be used for local memory (e.g. video ram) that the device owns,
2723 and knows it isn't going to access beyond the end of the block.
2724
2725 It should not be used for general purpose DMA.
2726 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2727 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002728void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002729{
pbrook94a6b542009-04-11 17:15:54 +00002730 RAMBlock *block;
2731
Alex Williamsonf471a172010-06-11 11:11:42 -06002732 QLIST_FOREACH(block, &ram_list.blocks, next) {
2733 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002734 /* Move this entry to to start of the list. */
2735 if (block != QLIST_FIRST(&ram_list.blocks)) {
2736 QLIST_REMOVE(block, next);
2737 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2738 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002739 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002740 /* We need to check if the requested address is in the RAM
2741 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002742 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002743 */
2744 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002745 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002746 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002747 block->host =
2748 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002749 }
2750 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002751 return block->host + (addr - block->offset);
2752 }
pbrook94a6b542009-04-11 17:15:54 +00002753 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002754
2755 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2756 abort();
2757
2758 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002759}
2760
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002761/* Return a host pointer to ram allocated with qemu_ram_alloc.
2762 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2763 */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002764static void *qemu_safe_ram_ptr(ram_addr_t addr)
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002765{
2766 RAMBlock *block;
2767
2768 QLIST_FOREACH(block, &ram_list.blocks, next) {
2769 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002770 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002771 /* We need to check if the requested address is in the RAM
2772 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002773 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002774 */
2775 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002776 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002777 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002778 block->host =
2779 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002780 }
2781 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002782 return block->host + (addr - block->offset);
2783 }
2784 }
2785
2786 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2787 abort();
2788
2789 return NULL;
2790}
2791
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002792/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2793 * but takes a size argument */
Blue Swirl8b9c99d2012-10-28 11:04:51 +00002794static void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002795{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002796 if (*size == 0) {
2797 return NULL;
2798 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002799 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002800 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002801 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002802 RAMBlock *block;
2803
2804 QLIST_FOREACH(block, &ram_list.blocks, next) {
2805 if (addr - block->offset < block->length) {
2806 if (addr - block->offset + *size > block->length)
2807 *size = block->length - addr + block->offset;
2808 return block->host + (addr - block->offset);
2809 }
2810 }
2811
2812 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2813 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002814 }
2815}
2816
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002817void qemu_put_ram_ptr(void *addr)
2818{
2819 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002820}
2821
Marcelo Tosattie8902612010-10-11 15:31:19 -03002822int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002823{
pbrook94a6b542009-04-11 17:15:54 +00002824 RAMBlock *block;
2825 uint8_t *host = ptr;
2826
Jan Kiszka868bb332011-06-21 22:59:09 +02002827 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002828 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002829 return 0;
2830 }
2831
Alex Williamsonf471a172010-06-11 11:11:42 -06002832 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002833 /* This case append when the block is not mapped. */
2834 if (block->host == NULL) {
2835 continue;
2836 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002837 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002838 *ram_addr = block->offset + (host - block->host);
2839 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002840 }
pbrook94a6b542009-04-11 17:15:54 +00002841 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002842
Marcelo Tosattie8902612010-10-11 15:31:19 -03002843 return -1;
2844}
Alex Williamsonf471a172010-06-11 11:11:42 -06002845
Marcelo Tosattie8902612010-10-11 15:31:19 -03002846/* Some of the softmmu routines need to translate from a host pointer
2847 (typically a TLB entry) back to a ram offset. */
2848ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2849{
2850 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002851
Marcelo Tosattie8902612010-10-11 15:31:19 -03002852 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2853 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2854 abort();
2855 }
2856 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002857}
2858
Avi Kivitya8170e52012-10-23 12:30:10 +02002859static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002860 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002861{
pbrook67d3b952006-12-18 05:03:52 +00002862#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002863 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002864#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002865#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002866 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002867#endif
2868 return 0;
2869}
2870
Avi Kivitya8170e52012-10-23 12:30:10 +02002871static void unassigned_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002872 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002873{
2874#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002875 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002876#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002877#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002878 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002879#endif
2880}
2881
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002882static const MemoryRegionOps unassigned_mem_ops = {
2883 .read = unassigned_mem_read,
2884 .write = unassigned_mem_write,
2885 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002886};
2887
Avi Kivitya8170e52012-10-23 12:30:10 +02002888static uint64_t error_mem_read(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002889 unsigned size)
2890{
2891 abort();
2892}
2893
Avi Kivitya8170e52012-10-23 12:30:10 +02002894static void error_mem_write(void *opaque, hwaddr addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002895 uint64_t value, unsigned size)
2896{
2897 abort();
2898}
2899
2900static const MemoryRegionOps error_mem_ops = {
2901 .read = error_mem_read,
2902 .write = error_mem_write,
2903 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002904};
2905
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002906static const MemoryRegionOps rom_mem_ops = {
2907 .read = error_mem_read,
2908 .write = unassigned_mem_write,
2909 .endianness = DEVICE_NATIVE_ENDIAN,
2910};
2911
Avi Kivitya8170e52012-10-23 12:30:10 +02002912static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002913 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002914{
bellard3a7d9292005-08-21 09:26:42 +00002915 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002916 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002917 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2918#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002919 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002920 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002921#endif
2922 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002923 switch (size) {
2924 case 1:
2925 stb_p(qemu_get_ram_ptr(ram_addr), val);
2926 break;
2927 case 2:
2928 stw_p(qemu_get_ram_ptr(ram_addr), val);
2929 break;
2930 case 4:
2931 stl_p(qemu_get_ram_ptr(ram_addr), val);
2932 break;
2933 default:
2934 abort();
2935 }
bellardf23db162005-08-21 19:12:28 +00002936 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002937 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002938 /* we remove the notdirty callback only if the code has been
2939 flushed */
2940 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002941 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002942}
2943
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002944static const MemoryRegionOps notdirty_mem_ops = {
2945 .read = error_mem_read,
2946 .write = notdirty_mem_write,
2947 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002948};
2949
pbrook0f459d12008-06-09 00:20:13 +00002950/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002951static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002952{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002953 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002954 target_ulong pc, cs_base;
2955 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002956 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002957 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002958 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002959
aliguori06d55cc2008-11-18 20:24:06 +00002960 if (env->watchpoint_hit) {
2961 /* We re-entered the check after replacing the TB. Now raise
2962 * the debug interrupt so that is will trigger after the
2963 * current instruction. */
2964 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2965 return;
2966 }
pbrook2e70f6e2008-06-29 01:03:05 +00002967 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002968 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002969 if ((vaddr == (wp->vaddr & len_mask) ||
2970 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002971 wp->flags |= BP_WATCHPOINT_HIT;
2972 if (!env->watchpoint_hit) {
2973 env->watchpoint_hit = wp;
2974 tb = tb_find_pc(env->mem_io_pc);
2975 if (!tb) {
2976 cpu_abort(env, "check_watchpoint: could not find TB for "
2977 "pc=%p", (void *)env->mem_io_pc);
2978 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002979 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002980 tb_phys_invalidate(tb, -1);
2981 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2982 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002983 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002984 } else {
2985 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2986 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002987 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002988 }
aliguori06d55cc2008-11-18 20:24:06 +00002989 }
aliguori6e140f22008-11-18 20:37:55 +00002990 } else {
2991 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002992 }
2993 }
2994}
2995
pbrook6658ffb2007-03-16 23:58:11 +00002996/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2997 so these check for a hit then pass through to the normal out-of-line
2998 phys routines. */
Avi Kivitya8170e52012-10-23 12:30:10 +02002999static uint64_t watch_mem_read(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003000 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003001{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003002 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
3003 switch (size) {
3004 case 1: return ldub_phys(addr);
3005 case 2: return lduw_phys(addr);
3006 case 4: return ldl_phys(addr);
3007 default: abort();
3008 }
pbrook6658ffb2007-03-16 23:58:11 +00003009}
3010
Avi Kivitya8170e52012-10-23 12:30:10 +02003011static void watch_mem_write(void *opaque, hwaddr addr,
Avi Kivity1ec9b902012-01-02 12:47:48 +02003012 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00003013{
Avi Kivity1ec9b902012-01-02 12:47:48 +02003014 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
3015 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04003016 case 1:
3017 stb_phys(addr, val);
3018 break;
3019 case 2:
3020 stw_phys(addr, val);
3021 break;
3022 case 4:
3023 stl_phys(addr, val);
3024 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02003025 default: abort();
3026 }
pbrook6658ffb2007-03-16 23:58:11 +00003027}
3028
Avi Kivity1ec9b902012-01-02 12:47:48 +02003029static const MemoryRegionOps watch_mem_ops = {
3030 .read = watch_mem_read,
3031 .write = watch_mem_write,
3032 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00003033};
pbrook6658ffb2007-03-16 23:58:11 +00003034
Avi Kivitya8170e52012-10-23 12:30:10 +02003035static uint64_t subpage_read(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003036 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003037{
Avi Kivity70c68e42012-01-02 12:32:48 +02003038 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003039 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003040 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003041#if defined(DEBUG_SUBPAGE)
3042 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3043 mmio, len, addr, idx);
3044#endif
blueswir1db7b5422007-05-26 17:36:03 +00003045
Avi Kivity5312bd82012-02-12 18:32:55 +02003046 section = &phys_sections[mmio->sub_section[idx]];
3047 addr += mmio->base;
3048 addr -= section->offset_within_address_space;
3049 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003050 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003051}
3052
Avi Kivitya8170e52012-10-23 12:30:10 +02003053static void subpage_write(void *opaque, hwaddr addr,
Avi Kivity70c68e42012-01-02 12:32:48 +02003054 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003055{
Avi Kivity70c68e42012-01-02 12:32:48 +02003056 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003057 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003058 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003059#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003060 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3061 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003062 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003063#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003064
Avi Kivity5312bd82012-02-12 18:32:55 +02003065 section = &phys_sections[mmio->sub_section[idx]];
3066 addr += mmio->base;
3067 addr -= section->offset_within_address_space;
3068 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003069 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003070}
3071
Avi Kivity70c68e42012-01-02 12:32:48 +02003072static const MemoryRegionOps subpage_ops = {
3073 .read = subpage_read,
3074 .write = subpage_write,
3075 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003076};
3077
Avi Kivitya8170e52012-10-23 12:30:10 +02003078static uint64_t subpage_ram_read(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003079 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003080{
3081 ram_addr_t raddr = addr;
3082 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003083 switch (size) {
3084 case 1: return ldub_p(ptr);
3085 case 2: return lduw_p(ptr);
3086 case 4: return ldl_p(ptr);
3087 default: abort();
3088 }
Andreas Färber56384e82011-11-30 16:26:21 +01003089}
3090
Avi Kivitya8170e52012-10-23 12:30:10 +02003091static void subpage_ram_write(void *opaque, hwaddr addr,
Avi Kivityde712f92012-01-02 12:41:07 +02003092 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003093{
3094 ram_addr_t raddr = addr;
3095 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003096 switch (size) {
3097 case 1: return stb_p(ptr, value);
3098 case 2: return stw_p(ptr, value);
3099 case 4: return stl_p(ptr, value);
3100 default: abort();
3101 }
Andreas Färber56384e82011-11-30 16:26:21 +01003102}
3103
Avi Kivityde712f92012-01-02 12:41:07 +02003104static const MemoryRegionOps subpage_ram_ops = {
3105 .read = subpage_ram_read,
3106 .write = subpage_ram_write,
3107 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003108};
3109
Anthony Liguoric227f092009-10-01 16:12:16 -05003110static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003111 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003112{
3113 int idx, eidx;
3114
3115 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3116 return -1;
3117 idx = SUBPAGE_IDX(start);
3118 eidx = SUBPAGE_IDX(end);
3119#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003120 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003121 mmio, start, end, idx, eidx, memory);
3122#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003123 if (memory_region_is_ram(phys_sections[section].mr)) {
3124 MemoryRegionSection new_section = phys_sections[section];
3125 new_section.mr = &io_mem_subpage_ram;
3126 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003127 }
blueswir1db7b5422007-05-26 17:36:03 +00003128 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003129 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003130 }
3131
3132 return 0;
3133}
3134
Avi Kivitya8170e52012-10-23 12:30:10 +02003135static subpage_t *subpage_init(hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00003136{
Anthony Liguoric227f092009-10-01 16:12:16 -05003137 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003138
Anthony Liguori7267c092011-08-20 22:09:37 -05003139 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003140
3141 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003142 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3143 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003144 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003145#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003146 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3147 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003148#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003149 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003150
3151 return mmio;
3152}
3153
Avi Kivity5312bd82012-02-12 18:32:55 +02003154static uint16_t dummy_section(MemoryRegion *mr)
3155{
3156 MemoryRegionSection section = {
3157 .mr = mr,
3158 .offset_within_address_space = 0,
3159 .offset_within_region = 0,
3160 .size = UINT64_MAX,
3161 };
3162
3163 return phys_section_add(&section);
3164}
3165
Avi Kivitya8170e52012-10-23 12:30:10 +02003166MemoryRegion *iotlb_to_region(hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02003167{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003168 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003169}
3170
Avi Kivitye9179ce2009-06-14 11:38:52 +03003171static void io_mem_init(void)
3172{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003173 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003174 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3175 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3176 "unassigned", UINT64_MAX);
3177 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3178 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003179 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3180 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003181 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3182 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003183}
3184
Avi Kivityac1970f2012-10-03 16:22:53 +02003185static void mem_begin(MemoryListener *listener)
3186{
3187 AddressSpaceDispatch *d = container_of(listener, AddressSpaceDispatch, listener);
3188
3189 destroy_all_mappings(d);
3190 d->phys_map.ptr = PHYS_MAP_NODE_NIL;
3191}
3192
Avi Kivity50c1e142012-02-08 21:36:02 +02003193static void core_begin(MemoryListener *listener)
3194{
Avi Kivity5312bd82012-02-12 18:32:55 +02003195 phys_sections_clear();
3196 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003197 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3198 phys_section_rom = dummy_section(&io_mem_rom);
3199 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003200}
3201
Avi Kivity1d711482012-10-02 18:54:45 +02003202static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02003203{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003204 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003205
3206 /* since each CPU stores ram addresses in its TLB cache, we must
3207 reset the modified entries */
3208 /* XXX: slow ! */
3209 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3210 tlb_flush(env, 1);
3211 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003212}
3213
Avi Kivity93632742012-02-08 16:54:16 +02003214static void core_log_global_start(MemoryListener *listener)
3215{
3216 cpu_physical_memory_set_dirty_tracking(1);
3217}
3218
3219static void core_log_global_stop(MemoryListener *listener)
3220{
3221 cpu_physical_memory_set_dirty_tracking(0);
3222}
3223
Avi Kivity4855d412012-02-08 21:16:05 +02003224static void io_region_add(MemoryListener *listener,
3225 MemoryRegionSection *section)
3226{
Avi Kivitya2d33522012-03-05 17:40:12 +02003227 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3228
3229 mrio->mr = section->mr;
3230 mrio->offset = section->offset_within_region;
3231 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003232 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003233 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003234}
3235
3236static void io_region_del(MemoryListener *listener,
3237 MemoryRegionSection *section)
3238{
3239 isa_unassign_ioport(section->offset_within_address_space, section->size);
3240}
3241
Avi Kivity93632742012-02-08 16:54:16 +02003242static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003243 .begin = core_begin,
Avi Kivity93632742012-02-08 16:54:16 +02003244 .log_global_start = core_log_global_start,
3245 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02003246 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02003247};
3248
Avi Kivity4855d412012-02-08 21:16:05 +02003249static MemoryListener io_memory_listener = {
3250 .region_add = io_region_add,
3251 .region_del = io_region_del,
Avi Kivity4855d412012-02-08 21:16:05 +02003252 .priority = 0,
3253};
3254
Avi Kivity1d711482012-10-02 18:54:45 +02003255static MemoryListener tcg_memory_listener = {
3256 .commit = tcg_commit,
3257};
3258
Avi Kivityac1970f2012-10-03 16:22:53 +02003259void address_space_init_dispatch(AddressSpace *as)
3260{
3261 AddressSpaceDispatch *d = g_new(AddressSpaceDispatch, 1);
3262
3263 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
3264 d->listener = (MemoryListener) {
3265 .begin = mem_begin,
3266 .region_add = mem_add,
3267 .region_nop = mem_add,
3268 .priority = 0,
3269 };
3270 as->dispatch = d;
3271 memory_listener_register(&d->listener, as);
3272}
3273
Avi Kivity83f3c252012-10-07 12:59:55 +02003274void address_space_destroy_dispatch(AddressSpace *as)
3275{
3276 AddressSpaceDispatch *d = as->dispatch;
3277
3278 memory_listener_unregister(&d->listener);
3279 destroy_l2_mapping(&d->phys_map, P_L2_LEVELS - 1);
3280 g_free(d);
3281 as->dispatch = NULL;
3282}
3283
Avi Kivity62152b82011-07-26 14:26:14 +03003284static void memory_map_init(void)
3285{
Anthony Liguori7267c092011-08-20 22:09:37 -05003286 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003287 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003288 address_space_init(&address_space_memory, system_memory);
3289 address_space_memory.name = "memory";
Avi Kivity309cb472011-08-08 16:09:03 +03003290
Anthony Liguori7267c092011-08-20 22:09:37 -05003291 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003292 memory_region_init(system_io, "io", 65536);
Avi Kivity2673a5d2012-10-02 18:49:28 +02003293 address_space_init(&address_space_io, system_io);
3294 address_space_io.name = "I/O";
Avi Kivity93632742012-02-08 16:54:16 +02003295
Avi Kivityf6790af2012-10-02 20:13:51 +02003296 memory_listener_register(&core_memory_listener, &address_space_memory);
3297 memory_listener_register(&io_memory_listener, &address_space_io);
3298 memory_listener_register(&tcg_memory_listener, &address_space_memory);
Peter Maydell9e119082012-10-29 11:34:32 +10003299
3300 dma_context_init(&dma_context_memory, &address_space_memory,
3301 NULL, NULL, NULL);
Avi Kivity62152b82011-07-26 14:26:14 +03003302}
3303
3304MemoryRegion *get_system_memory(void)
3305{
3306 return system_memory;
3307}
3308
Avi Kivity309cb472011-08-08 16:09:03 +03003309MemoryRegion *get_system_io(void)
3310{
3311 return system_io;
3312}
3313
pbrooke2eef172008-06-08 01:09:01 +00003314#endif /* !defined(CONFIG_USER_ONLY) */
3315
bellard13eb76e2004-01-24 15:23:36 +00003316/* physical memory access (slow version, mainly for debug) */
3317#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003318int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003319 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003320{
3321 int l, flags;
3322 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003323 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003324
3325 while (len > 0) {
3326 page = addr & TARGET_PAGE_MASK;
3327 l = (page + TARGET_PAGE_SIZE) - addr;
3328 if (l > len)
3329 l = len;
3330 flags = page_get_flags(page);
3331 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003332 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003333 if (is_write) {
3334 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003335 return -1;
bellard579a97f2007-11-11 14:26:47 +00003336 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003337 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003338 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003339 memcpy(p, buf, l);
3340 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003341 } else {
3342 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003343 return -1;
bellard579a97f2007-11-11 14:26:47 +00003344 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003345 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003346 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003347 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003348 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003349 }
3350 len -= l;
3351 buf += l;
3352 addr += l;
3353 }
Paul Brooka68fe892010-03-01 00:08:59 +00003354 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003355}
bellard8df1cd02005-01-28 22:37:22 +00003356
bellard13eb76e2004-01-24 15:23:36 +00003357#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003358
Avi Kivitya8170e52012-10-23 12:30:10 +02003359static void invalidate_and_set_dirty(hwaddr addr,
3360 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003361{
3362 if (!cpu_physical_memory_is_dirty(addr)) {
3363 /* invalidate code */
3364 tb_invalidate_phys_page_range(addr, addr + length, 0);
3365 /* set dirty bit */
3366 cpu_physical_memory_set_dirty_flags(addr, (0xff & ~CODE_DIRTY_FLAG));
3367 }
Anthony PERARDe2269392012-10-03 13:49:22 +00003368 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003369}
3370
Avi Kivitya8170e52012-10-23 12:30:10 +02003371void address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003372 int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00003373{
Avi Kivityac1970f2012-10-03 16:22:53 +02003374 AddressSpaceDispatch *d = as->dispatch;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003375 int l;
bellard13eb76e2004-01-24 15:23:36 +00003376 uint8_t *ptr;
3377 uint32_t val;
Avi Kivitya8170e52012-10-23 12:30:10 +02003378 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003379 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003380
bellard13eb76e2004-01-24 15:23:36 +00003381 while (len > 0) {
3382 page = addr & TARGET_PAGE_MASK;
3383 l = (page + TARGET_PAGE_SIZE) - addr;
3384 if (l > len)
3385 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003386 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003387
bellard13eb76e2004-01-24 15:23:36 +00003388 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003389 if (!memory_region_is_ram(section->mr)) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003390 hwaddr addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003391 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003392 /* XXX: could force cpu_single_env to NULL to avoid
3393 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003394 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003395 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003396 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003397 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003398 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003399 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003400 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003401 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003402 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003403 l = 2;
3404 } else {
bellard1c213d12005-09-03 10:49:04 +00003405 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003406 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003407 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003408 l = 1;
3409 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003410 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003411 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003412 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003413 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003414 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003415 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003416 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003417 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003418 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003419 }
3420 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003421 if (!(memory_region_is_ram(section->mr) ||
3422 memory_region_is_romd(section->mr))) {
Avi Kivitya8170e52012-10-23 12:30:10 +02003423 hwaddr addr1;
bellard13eb76e2004-01-24 15:23:36 +00003424 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003425 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003426 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003427 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003428 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003429 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003430 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003431 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003432 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003433 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003434 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003435 l = 2;
3436 } else {
bellard1c213d12005-09-03 10:49:04 +00003437 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003438 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003439 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003440 l = 1;
3441 }
3442 } else {
3443 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003444 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003445 + memory_region_section_addr(section,
3446 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003447 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003448 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003449 }
3450 }
3451 len -= l;
3452 buf += l;
3453 addr += l;
3454 }
3455}
bellard8df1cd02005-01-28 22:37:22 +00003456
Avi Kivitya8170e52012-10-23 12:30:10 +02003457void address_space_write(AddressSpace *as, hwaddr addr,
Avi Kivityac1970f2012-10-03 16:22:53 +02003458 const uint8_t *buf, int len)
3459{
3460 address_space_rw(as, addr, (uint8_t *)buf, len, true);
3461}
3462
3463/**
3464 * address_space_read: read from an address space.
3465 *
3466 * @as: #AddressSpace to be accessed
3467 * @addr: address within that address space
3468 * @buf: buffer with the data transferred
3469 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003470void address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003471{
3472 address_space_rw(as, addr, buf, len, false);
3473}
3474
3475
Avi Kivitya8170e52012-10-23 12:30:10 +02003476void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02003477 int len, int is_write)
3478{
3479 return address_space_rw(&address_space_memory, addr, buf, len, is_write);
3480}
3481
bellardd0ecd2a2006-04-23 17:14:48 +00003482/* used for ROM loading : can write in RAM and ROM */
Avi Kivitya8170e52012-10-23 12:30:10 +02003483void cpu_physical_memory_write_rom(hwaddr addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003484 const uint8_t *buf, int len)
3485{
Avi Kivityac1970f2012-10-03 16:22:53 +02003486 AddressSpaceDispatch *d = address_space_memory.dispatch;
bellardd0ecd2a2006-04-23 17:14:48 +00003487 int l;
3488 uint8_t *ptr;
Avi Kivitya8170e52012-10-23 12:30:10 +02003489 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003490 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003491
bellardd0ecd2a2006-04-23 17:14:48 +00003492 while (len > 0) {
3493 page = addr & TARGET_PAGE_MASK;
3494 l = (page + TARGET_PAGE_SIZE) - addr;
3495 if (l > len)
3496 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003497 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003498
Blue Swirlcc5bea62012-04-14 14:56:48 +00003499 if (!(memory_region_is_ram(section->mr) ||
3500 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003501 /* do nothing */
3502 } else {
3503 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003504 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003505 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003506 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003507 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003508 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003509 invalidate_and_set_dirty(addr1, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003510 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003511 }
3512 len -= l;
3513 buf += l;
3514 addr += l;
3515 }
3516}
3517
aliguori6d16c2f2009-01-22 16:59:11 +00003518typedef struct {
3519 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02003520 hwaddr addr;
3521 hwaddr len;
aliguori6d16c2f2009-01-22 16:59:11 +00003522} BounceBuffer;
3523
3524static BounceBuffer bounce;
3525
aliguoriba223c22009-01-22 16:59:16 +00003526typedef struct MapClient {
3527 void *opaque;
3528 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003529 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003530} MapClient;
3531
Blue Swirl72cf2d42009-09-12 07:36:22 +00003532static QLIST_HEAD(map_client_list, MapClient) map_client_list
3533 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003534
3535void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3536{
Anthony Liguori7267c092011-08-20 22:09:37 -05003537 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003538
3539 client->opaque = opaque;
3540 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003541 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003542 return client;
3543}
3544
Blue Swirl8b9c99d2012-10-28 11:04:51 +00003545static void cpu_unregister_map_client(void *_client)
aliguoriba223c22009-01-22 16:59:16 +00003546{
3547 MapClient *client = (MapClient *)_client;
3548
Blue Swirl72cf2d42009-09-12 07:36:22 +00003549 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003550 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003551}
3552
3553static void cpu_notify_map_clients(void)
3554{
3555 MapClient *client;
3556
Blue Swirl72cf2d42009-09-12 07:36:22 +00003557 while (!QLIST_EMPTY(&map_client_list)) {
3558 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003559 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003560 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003561 }
3562}
3563
aliguori6d16c2f2009-01-22 16:59:11 +00003564/* Map a physical memory region into a host virtual address.
3565 * May map a subset of the requested range, given by and returned in *plen.
3566 * May return NULL if resources needed to perform the mapping are exhausted.
3567 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003568 * Use cpu_register_map_client() to know when retrying the map operation is
3569 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003570 */
Avi Kivityac1970f2012-10-03 16:22:53 +02003571void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02003572 hwaddr addr,
3573 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003574 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00003575{
Avi Kivityac1970f2012-10-03 16:22:53 +02003576 AddressSpaceDispatch *d = as->dispatch;
Avi Kivitya8170e52012-10-23 12:30:10 +02003577 hwaddr len = *plen;
3578 hwaddr todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003579 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003580 hwaddr page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003581 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003582 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003583 ram_addr_t rlen;
3584 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003585
3586 while (len > 0) {
3587 page = addr & TARGET_PAGE_MASK;
3588 l = (page + TARGET_PAGE_SIZE) - addr;
3589 if (l > len)
3590 l = len;
Avi Kivityac1970f2012-10-03 16:22:53 +02003591 section = phys_page_find(d, page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003592
Avi Kivityf3705d52012-03-08 16:16:34 +02003593 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003594 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003595 break;
3596 }
3597 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3598 bounce.addr = addr;
3599 bounce.len = l;
3600 if (!is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003601 address_space_read(as, addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003602 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003603
3604 *plen = l;
3605 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003606 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003607 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003608 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003609 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003610 }
aliguori6d16c2f2009-01-22 16:59:11 +00003611
3612 len -= l;
3613 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003614 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003615 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003616 rlen = todo;
3617 ret = qemu_ram_ptr_length(raddr, &rlen);
3618 *plen = rlen;
3619 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003620}
3621
Avi Kivityac1970f2012-10-03 16:22:53 +02003622/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00003623 * Will also mark the memory as dirty if is_write == 1. access_len gives
3624 * the amount of memory that was actually read or written by the caller.
3625 */
Avi Kivitya8170e52012-10-23 12:30:10 +02003626void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
3627 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003628{
3629 if (buffer != bounce.buffer) {
3630 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003631 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003632 while (access_len) {
3633 unsigned l;
3634 l = TARGET_PAGE_SIZE;
3635 if (l > access_len)
3636 l = access_len;
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003637 invalidate_and_set_dirty(addr1, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003638 addr1 += l;
3639 access_len -= l;
3640 }
3641 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003642 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003643 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003644 }
aliguori6d16c2f2009-01-22 16:59:11 +00003645 return;
3646 }
3647 if (is_write) {
Avi Kivityac1970f2012-10-03 16:22:53 +02003648 address_space_write(as, bounce.addr, bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00003649 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003650 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003651 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003652 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003653}
bellardd0ecd2a2006-04-23 17:14:48 +00003654
Avi Kivitya8170e52012-10-23 12:30:10 +02003655void *cpu_physical_memory_map(hwaddr addr,
3656 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02003657 int is_write)
3658{
3659 return address_space_map(&address_space_memory, addr, plen, is_write);
3660}
3661
Avi Kivitya8170e52012-10-23 12:30:10 +02003662void cpu_physical_memory_unmap(void *buffer, hwaddr len,
3663 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02003664{
3665 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
3666}
3667
bellard8df1cd02005-01-28 22:37:22 +00003668/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003669static inline uint32_t ldl_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003670 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003671{
bellard8df1cd02005-01-28 22:37:22 +00003672 uint8_t *ptr;
3673 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003674 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003675
Avi Kivityac1970f2012-10-03 16:22:53 +02003676 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003677
Blue Swirlcc5bea62012-04-14 14:56:48 +00003678 if (!(memory_region_is_ram(section->mr) ||
3679 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003680 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003681 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003682 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003683#if defined(TARGET_WORDS_BIGENDIAN)
3684 if (endian == DEVICE_LITTLE_ENDIAN) {
3685 val = bswap32(val);
3686 }
3687#else
3688 if (endian == DEVICE_BIG_ENDIAN) {
3689 val = bswap32(val);
3690 }
3691#endif
bellard8df1cd02005-01-28 22:37:22 +00003692 } else {
3693 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003694 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003695 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003696 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003697 switch (endian) {
3698 case DEVICE_LITTLE_ENDIAN:
3699 val = ldl_le_p(ptr);
3700 break;
3701 case DEVICE_BIG_ENDIAN:
3702 val = ldl_be_p(ptr);
3703 break;
3704 default:
3705 val = ldl_p(ptr);
3706 break;
3707 }
bellard8df1cd02005-01-28 22:37:22 +00003708 }
3709 return val;
3710}
3711
Avi Kivitya8170e52012-10-23 12:30:10 +02003712uint32_t ldl_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003713{
3714 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3715}
3716
Avi Kivitya8170e52012-10-23 12:30:10 +02003717uint32_t ldl_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003718{
3719 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3720}
3721
Avi Kivitya8170e52012-10-23 12:30:10 +02003722uint32_t ldl_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003723{
3724 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3725}
3726
bellard84b7b8e2005-11-28 21:19:04 +00003727/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003728static inline uint64_t ldq_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003729 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003730{
bellard84b7b8e2005-11-28 21:19:04 +00003731 uint8_t *ptr;
3732 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003733 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003734
Avi Kivityac1970f2012-10-03 16:22:53 +02003735 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003736
Blue Swirlcc5bea62012-04-14 14:56:48 +00003737 if (!(memory_region_is_ram(section->mr) ||
3738 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003739 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003740 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003741
3742 /* XXX This is broken when device endian != cpu endian.
3743 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003744#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003745 val = io_mem_read(section->mr, addr, 4) << 32;
3746 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003747#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003748 val = io_mem_read(section->mr, addr, 4);
3749 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003750#endif
3751 } else {
3752 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003753 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003754 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003755 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003756 switch (endian) {
3757 case DEVICE_LITTLE_ENDIAN:
3758 val = ldq_le_p(ptr);
3759 break;
3760 case DEVICE_BIG_ENDIAN:
3761 val = ldq_be_p(ptr);
3762 break;
3763 default:
3764 val = ldq_p(ptr);
3765 break;
3766 }
bellard84b7b8e2005-11-28 21:19:04 +00003767 }
3768 return val;
3769}
3770
Avi Kivitya8170e52012-10-23 12:30:10 +02003771uint64_t ldq_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003772{
3773 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3774}
3775
Avi Kivitya8170e52012-10-23 12:30:10 +02003776uint64_t ldq_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003777{
3778 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3779}
3780
Avi Kivitya8170e52012-10-23 12:30:10 +02003781uint64_t ldq_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003782{
3783 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3784}
3785
bellardaab33092005-10-30 20:48:42 +00003786/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003787uint32_t ldub_phys(hwaddr addr)
bellardaab33092005-10-30 20:48:42 +00003788{
3789 uint8_t val;
3790 cpu_physical_memory_read(addr, &val, 1);
3791 return val;
3792}
3793
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003794/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003795static inline uint32_t lduw_phys_internal(hwaddr addr,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003796 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003797{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003798 uint8_t *ptr;
3799 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003800 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003801
Avi Kivityac1970f2012-10-03 16:22:53 +02003802 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003803
Blue Swirlcc5bea62012-04-14 14:56:48 +00003804 if (!(memory_region_is_ram(section->mr) ||
3805 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003806 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003807 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003808 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003809#if defined(TARGET_WORDS_BIGENDIAN)
3810 if (endian == DEVICE_LITTLE_ENDIAN) {
3811 val = bswap16(val);
3812 }
3813#else
3814 if (endian == DEVICE_BIG_ENDIAN) {
3815 val = bswap16(val);
3816 }
3817#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003818 } else {
3819 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003820 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003821 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003822 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003823 switch (endian) {
3824 case DEVICE_LITTLE_ENDIAN:
3825 val = lduw_le_p(ptr);
3826 break;
3827 case DEVICE_BIG_ENDIAN:
3828 val = lduw_be_p(ptr);
3829 break;
3830 default:
3831 val = lduw_p(ptr);
3832 break;
3833 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003834 }
3835 return val;
bellardaab33092005-10-30 20:48:42 +00003836}
3837
Avi Kivitya8170e52012-10-23 12:30:10 +02003838uint32_t lduw_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003839{
3840 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3841}
3842
Avi Kivitya8170e52012-10-23 12:30:10 +02003843uint32_t lduw_le_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003844{
3845 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3846}
3847
Avi Kivitya8170e52012-10-23 12:30:10 +02003848uint32_t lduw_be_phys(hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003849{
3850 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3851}
3852
bellard8df1cd02005-01-28 22:37:22 +00003853/* warning: addr must be aligned. The ram page is not masked as dirty
3854 and the code inside is not invalidated. It is useful if the dirty
3855 bits are used to track modified PTEs */
Avi Kivitya8170e52012-10-23 12:30:10 +02003856void stl_phys_notdirty(hwaddr addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003857{
bellard8df1cd02005-01-28 22:37:22 +00003858 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003859 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003860
Avi Kivityac1970f2012-10-03 16:22:53 +02003861 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003862
Avi Kivityf3705d52012-03-08 16:16:34 +02003863 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003864 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003865 if (memory_region_is_ram(section->mr)) {
3866 section = &phys_sections[phys_section_rom];
3867 }
3868 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003869 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003870 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003871 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003872 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003873 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003874 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003875
3876 if (unlikely(in_migration)) {
3877 if (!cpu_physical_memory_is_dirty(addr1)) {
3878 /* invalidate code */
3879 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3880 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003881 cpu_physical_memory_set_dirty_flags(
3882 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003883 }
3884 }
bellard8df1cd02005-01-28 22:37:22 +00003885 }
3886}
3887
Avi Kivitya8170e52012-10-23 12:30:10 +02003888void stq_phys_notdirty(hwaddr addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003889{
j_mayerbc98a7e2007-04-04 07:55:12 +00003890 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003891 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003892
Avi Kivityac1970f2012-10-03 16:22:53 +02003893 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003894
Avi Kivityf3705d52012-03-08 16:16:34 +02003895 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003896 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003897 if (memory_region_is_ram(section->mr)) {
3898 section = &phys_sections[phys_section_rom];
3899 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003900#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003901 io_mem_write(section->mr, addr, val >> 32, 4);
3902 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003903#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003904 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3905 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003906#endif
3907 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003908 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003909 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003910 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003911 stq_p(ptr, val);
3912 }
3913}
3914
bellard8df1cd02005-01-28 22:37:22 +00003915/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003916static inline void stl_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003917 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003918{
bellard8df1cd02005-01-28 22:37:22 +00003919 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003920 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003921
Avi Kivityac1970f2012-10-03 16:22:53 +02003922 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003923
Avi Kivityf3705d52012-03-08 16:16:34 +02003924 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003925 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003926 if (memory_region_is_ram(section->mr)) {
3927 section = &phys_sections[phys_section_rom];
3928 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003929#if defined(TARGET_WORDS_BIGENDIAN)
3930 if (endian == DEVICE_LITTLE_ENDIAN) {
3931 val = bswap32(val);
3932 }
3933#else
3934 if (endian == DEVICE_BIG_ENDIAN) {
3935 val = bswap32(val);
3936 }
3937#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003938 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003939 } else {
3940 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003941 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003942 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003943 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003944 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003945 switch (endian) {
3946 case DEVICE_LITTLE_ENDIAN:
3947 stl_le_p(ptr, val);
3948 break;
3949 case DEVICE_BIG_ENDIAN:
3950 stl_be_p(ptr, val);
3951 break;
3952 default:
3953 stl_p(ptr, val);
3954 break;
3955 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003956 invalidate_and_set_dirty(addr1, 4);
bellard8df1cd02005-01-28 22:37:22 +00003957 }
3958}
3959
Avi Kivitya8170e52012-10-23 12:30:10 +02003960void stl_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003961{
3962 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3963}
3964
Avi Kivitya8170e52012-10-23 12:30:10 +02003965void stl_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003966{
3967 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3968}
3969
Avi Kivitya8170e52012-10-23 12:30:10 +02003970void stl_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003971{
3972 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3973}
3974
bellardaab33092005-10-30 20:48:42 +00003975/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02003976void stb_phys(hwaddr addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003977{
3978 uint8_t v = val;
3979 cpu_physical_memory_write(addr, &v, 1);
3980}
3981
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003982/* warning: addr must be aligned */
Avi Kivitya8170e52012-10-23 12:30:10 +02003983static inline void stw_phys_internal(hwaddr addr, uint32_t val,
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003984 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003985{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003986 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003987 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003988
Avi Kivityac1970f2012-10-03 16:22:53 +02003989 section = phys_page_find(address_space_memory.dispatch, addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003990
Avi Kivityf3705d52012-03-08 16:16:34 +02003991 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003992 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003993 if (memory_region_is_ram(section->mr)) {
3994 section = &phys_sections[phys_section_rom];
3995 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003996#if defined(TARGET_WORDS_BIGENDIAN)
3997 if (endian == DEVICE_LITTLE_ENDIAN) {
3998 val = bswap16(val);
3999 }
4000#else
4001 if (endian == DEVICE_BIG_ENDIAN) {
4002 val = bswap16(val);
4003 }
4004#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004005 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004006 } else {
4007 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004008 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004009 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004010 /* RAM case */
4011 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004012 switch (endian) {
4013 case DEVICE_LITTLE_ENDIAN:
4014 stw_le_p(ptr, val);
4015 break;
4016 case DEVICE_BIG_ENDIAN:
4017 stw_be_p(ptr, val);
4018 break;
4019 default:
4020 stw_p(ptr, val);
4021 break;
4022 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00004023 invalidate_and_set_dirty(addr1, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004024 }
bellardaab33092005-10-30 20:48:42 +00004025}
4026
Avi Kivitya8170e52012-10-23 12:30:10 +02004027void stw_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004028{
4029 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4030}
4031
Avi Kivitya8170e52012-10-23 12:30:10 +02004032void stw_le_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004033{
4034 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4035}
4036
Avi Kivitya8170e52012-10-23 12:30:10 +02004037void stw_be_phys(hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004038{
4039 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4040}
4041
bellardaab33092005-10-30 20:48:42 +00004042/* XXX: optimize */
Avi Kivitya8170e52012-10-23 12:30:10 +02004043void stq_phys(hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004044{
4045 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004046 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004047}
4048
Avi Kivitya8170e52012-10-23 12:30:10 +02004049void stq_le_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004050{
4051 val = cpu_to_le64(val);
4052 cpu_physical_memory_write(addr, &val, 8);
4053}
4054
Avi Kivitya8170e52012-10-23 12:30:10 +02004055void stq_be_phys(hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004056{
4057 val = cpu_to_be64(val);
4058 cpu_physical_memory_write(addr, &val, 8);
4059}
4060
aliguori5e2972f2009-03-28 17:51:36 +00004061/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004062int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004063 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004064{
4065 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02004066 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004067 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004068
4069 while (len > 0) {
4070 page = addr & TARGET_PAGE_MASK;
4071 phys_addr = cpu_get_phys_page_debug(env, page);
4072 /* if no physical page mapped, return an error */
4073 if (phys_addr == -1)
4074 return -1;
4075 l = (page + TARGET_PAGE_SIZE) - addr;
4076 if (l > len)
4077 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004078 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004079 if (is_write)
4080 cpu_physical_memory_write_rom(phys_addr, buf, l);
4081 else
aliguori5e2972f2009-03-28 17:51:36 +00004082 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004083 len -= l;
4084 buf += l;
4085 addr += l;
4086 }
4087 return 0;
4088}
Paul Brooka68fe892010-03-01 00:08:59 +00004089#endif
bellard13eb76e2004-01-24 15:23:36 +00004090
pbrook2e70f6e2008-06-29 01:03:05 +00004091/* in deterministic execution mode, instructions doing device I/Os
4092 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004093void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004094{
4095 TranslationBlock *tb;
4096 uint32_t n, cflags;
4097 target_ulong pc, cs_base;
4098 uint64_t flags;
4099
Blue Swirl20503962012-04-09 14:20:20 +00004100 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004101 if (!tb) {
4102 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004103 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004104 }
4105 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004106 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004107 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004108 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004109 n = n - env->icount_decr.u16.low;
4110 /* Generate a new TB ending on the I/O insn. */
4111 n++;
4112 /* On MIPS and SH, delay slot instructions can only be restarted if
4113 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004114 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004115 branch. */
4116#if defined(TARGET_MIPS)
4117 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4118 env->active_tc.PC -= 4;
4119 env->icount_decr.u16.low++;
4120 env->hflags &= ~MIPS_HFLAG_BMASK;
4121 }
4122#elif defined(TARGET_SH4)
4123 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4124 && n > 1) {
4125 env->pc -= 2;
4126 env->icount_decr.u16.low++;
4127 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4128 }
4129#endif
4130 /* This should never happen. */
4131 if (n > CF_COUNT_MASK)
4132 cpu_abort(env, "TB too big during recompile");
4133
4134 cflags = n | CF_LAST_IO;
4135 pc = tb->pc;
4136 cs_base = tb->cs_base;
4137 flags = tb->flags;
4138 tb_phys_invalidate(tb, -1);
4139 /* FIXME: In theory this could raise an exception. In practice
4140 we have already translated the block once so it's probably ok. */
4141 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004142 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004143 the first in the TB) then we end up generating a whole new TB and
4144 repeating the fault, which is horribly inefficient.
4145 Better would be to execute just this insn uncached, or generate a
4146 second new TB. */
4147 cpu_resume_from_signal(env, NULL);
4148}
4149
Paul Brookb3755a92010-03-12 16:54:58 +00004150#if !defined(CONFIG_USER_ONLY)
4151
Stefan Weil055403b2010-10-22 23:03:32 +02004152void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004153{
4154 int i, target_code_size, max_target_code_size;
4155 int direct_jmp_count, direct_jmp2_count, cross_page;
4156 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004157
bellarde3db7222005-01-26 22:00:47 +00004158 target_code_size = 0;
4159 max_target_code_size = 0;
4160 cross_page = 0;
4161 direct_jmp_count = 0;
4162 direct_jmp2_count = 0;
4163 for(i = 0; i < nb_tbs; i++) {
4164 tb = &tbs[i];
4165 target_code_size += tb->size;
4166 if (tb->size > max_target_code_size)
4167 max_target_code_size = tb->size;
4168 if (tb->page_addr[1] != -1)
4169 cross_page++;
4170 if (tb->tb_next_offset[0] != 0xffff) {
4171 direct_jmp_count++;
4172 if (tb->tb_next_offset[1] != 0xffff) {
4173 direct_jmp2_count++;
4174 }
4175 }
4176 }
4177 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004178 cpu_fprintf(f, "Translation buffer state:\n");
Richard Hendersonf1bc0bc2012-10-16 17:30:10 +10004179 cpu_fprintf(f, "gen code size %td/%zd\n",
bellard26a5f132008-05-28 12:30:31 +00004180 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4181 cpu_fprintf(f, "TB count %d/%d\n",
4182 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004183 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004184 nb_tbs ? target_code_size / nb_tbs : 0,
4185 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004186 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004187 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4188 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004189 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4190 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004191 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4192 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004193 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004194 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4195 direct_jmp2_count,
4196 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004197 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004198 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4199 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4200 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004201 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004202}
4203
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004204/*
4205 * A helper function for the _utterly broken_ virtio device model to find out if
4206 * it's running on a big endian machine. Don't do this at home kids!
4207 */
4208bool virtio_is_big_endian(void);
4209bool virtio_is_big_endian(void)
4210{
4211#if defined(TARGET_WORDS_BIGENDIAN)
4212 return true;
4213#else
4214 return false;
4215#endif
4216}
4217
bellard61382a52003-10-27 21:22:23 +00004218#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004219
4220#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02004221bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08004222{
4223 MemoryRegionSection *section;
4224
Avi Kivityac1970f2012-10-03 16:22:53 +02004225 section = phys_page_find(address_space_memory.dispatch,
4226 phys_addr >> TARGET_PAGE_BITS);
Wen Congyang76f35532012-05-07 12:04:18 +08004227
4228 return !(memory_region_is_ram(section->mr) ||
4229 memory_region_is_romd(section->mr));
4230}
4231#endif