blob: 29b5078bbdabdd182ee388ad3c6fda1ee4f7c678 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
Blue Swirl0cac1b62012-04-09 16:50:52 +000060#include "cputlb.h"
61
Avi Kivity67d95c12011-12-15 15:25:22 +020062#define WANT_EXEC_OBSOLETE
63#include "exec-obsolete.h"
64
bellardfd6ce8f2003-05-14 19:00:11 +000065//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000066//#define DEBUG_FLUSH
pbrook67d3b952006-12-18 05:03:52 +000067//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000068
69/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000070//#define DEBUG_TB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000071
ths1196be32007-03-17 15:17:58 +000072//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000073//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000074
pbrook99773bd2006-04-16 15:14:59 +000075#if !defined(CONFIG_USER_ONLY)
76/* TB consistency checks only implemented for usermode emulation. */
77#undef DEBUG_TB_CHECK
78#endif
79
bellard9fa3e852004-01-04 18:06:42 +000080#define SMC_BITMAP_USE_THRESHOLD 10
81
blueswir1bdaf78e2008-10-04 07:24:27 +000082static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020083static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000084TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000085static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000086/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050087spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000088
blueswir1141ac462008-07-26 15:05:57 +000089#if defined(__arm__) || defined(__sparc_v9__)
90/* The prologue must be reachable with a direct jump. ARM and Sparc64
91 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000092 section close to code segment. */
93#define code_gen_section \
94 __attribute__((__section__(".gen_code"))) \
95 __attribute__((aligned (32)))
Stefan Weil68409812012-04-04 07:45:21 +020096#elif defined(_WIN32) && !defined(_WIN64)
Stefan Weilf8e2af12009-06-18 23:04:48 +020097#define code_gen_section \
98 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000099#else
100#define code_gen_section \
101 __attribute__((aligned (32)))
102#endif
103
104uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000105static uint8_t *code_gen_buffer;
106static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000107/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000108static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200109static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000110
pbrooke2eef172008-06-08 01:09:01 +0000111#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000112int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000113static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000114
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200115RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300116
117static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300118static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300119
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200120MemoryRegion io_mem_ram, io_mem_rom, io_mem_unassigned, io_mem_notdirty;
Avi Kivityde712f92012-01-02 12:41:07 +0200121static MemoryRegion io_mem_subpage_ram;
Avi Kivity0e0df1e2012-01-02 00:32:15 +0200122
pbrooke2eef172008-06-08 01:09:01 +0000123#endif
bellard9fa3e852004-01-04 18:06:42 +0000124
Andreas Färber9349b4f2012-03-14 01:38:32 +0100125CPUArchState *first_cpu;
bellard6a00d602005-11-21 23:25:50 +0000126/* current CPU in the current thread. It is only valid inside
127 cpu_exec() */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100128DEFINE_TLS(CPUArchState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000129/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000130 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000131 2 = Adaptive rate instruction counting. */
132int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000133
bellard54936002003-05-13 00:25:15 +0000134typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000135 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000136 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000137 /* in order to optimize self modifying code, we count the number
138 of lookups we do to a given page to use a bitmap */
139 unsigned int code_write_count;
140 uint8_t *code_bitmap;
141#if defined(CONFIG_USER_ONLY)
142 unsigned long flags;
143#endif
bellard54936002003-05-13 00:25:15 +0000144} PageDesc;
145
Paul Brook41c1b1c2010-03-12 16:54:58 +0000146/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147 while in user mode we want it to be based on virtual addresses. */
148#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000149#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
150# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
151#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800152# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000153#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000154#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800155# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000156#endif
bellard54936002003-05-13 00:25:15 +0000157
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800158/* Size of the L2 (and L3, etc) page tables. */
159#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000160#define L2_SIZE (1 << L2_BITS)
161
Avi Kivity3eef53d2012-02-10 14:57:31 +0200162#define P_L2_LEVELS \
163 (((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / L2_BITS) + 1)
164
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800165/* The bits remaining after N lower levels of page tables. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800166#define V_L1_BITS_REM \
167 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
168
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800169#if V_L1_BITS_REM < 4
170#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
171#else
172#define V_L1_BITS V_L1_BITS_REM
173#endif
174
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800175#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
176
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
Stefan Weilc6d50672012-03-16 20:23:49 +0100179uintptr_t qemu_real_host_page_size;
180uintptr_t qemu_host_page_size;
181uintptr_t qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000182
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800183/* This is a multi-level map on the virtual address space.
184 The bottom level has pointers to PageDesc. */
185static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000186
pbrooke2eef172008-06-08 01:09:01 +0000187#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200188typedef struct PhysPageEntry PhysPageEntry;
189
Avi Kivity5312bd82012-02-12 18:32:55 +0200190static MemoryRegionSection *phys_sections;
191static unsigned phys_sections_nb, phys_sections_nb_alloc;
192static uint16_t phys_section_unassigned;
Avi Kivityaa102232012-03-08 17:06:55 +0200193static uint16_t phys_section_notdirty;
194static uint16_t phys_section_rom;
195static uint16_t phys_section_watch;
Avi Kivity5312bd82012-02-12 18:32:55 +0200196
Avi Kivity4346ae32012-02-10 17:00:01 +0200197struct PhysPageEntry {
Avi Kivity07f07b32012-02-13 20:45:32 +0200198 uint16_t is_leaf : 1;
199 /* index into phys_sections (is_leaf) or phys_map_nodes (!is_leaf) */
200 uint16_t ptr : 15;
Avi Kivity4346ae32012-02-10 17:00:01 +0200201};
202
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200203/* Simple allocator for PhysPageEntry nodes */
204static PhysPageEntry (*phys_map_nodes)[L2_SIZE];
205static unsigned phys_map_nodes_nb, phys_map_nodes_nb_alloc;
206
Avi Kivity07f07b32012-02-13 20:45:32 +0200207#define PHYS_MAP_NODE_NIL (((uint16_t)~0) >> 1)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200208
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800209/* This is a multi-level map on the physical address space.
Avi Kivity06ef3522012-02-13 16:11:22 +0200210 The bottom level has pointers to MemoryRegionSections. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200211static PhysPageEntry phys_map = { .ptr = PHYS_MAP_NODE_NIL, .is_leaf = 0 };
Paul Brook6d9a1302010-02-28 23:55:53 +0000212
pbrooke2eef172008-06-08 01:09:01 +0000213static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300214static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000215
Avi Kivity1ec9b902012-01-02 12:47:48 +0200216static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000217#endif
bellard33417e72003-08-10 21:47:01 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
bellarde3db7222005-01-26 22:00:47 +0000220static int tb_flush_count;
221static int tb_phys_invalidate_count;
222
bellard7cb69ca2008-05-10 10:55:51 +0000223#ifdef _WIN32
224static void map_exec(void *addr, long size)
225{
226 DWORD old_protect;
227 VirtualProtect(addr, size,
228 PAGE_EXECUTE_READWRITE, &old_protect);
229
230}
231#else
232static void map_exec(void *addr, long size)
233{
bellard43694152008-05-29 09:35:57 +0000234 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000235
bellard43694152008-05-29 09:35:57 +0000236 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000237 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000238 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000239
240 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000241 end += page_size - 1;
242 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000243
244 mprotect((void *)start, end - start,
245 PROT_READ | PROT_WRITE | PROT_EXEC);
246}
247#endif
248
bellardb346ff42003-06-15 20:05:50 +0000249static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000250{
bellard83fb7ad2004-07-05 21:25:26 +0000251 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000252 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000253#ifdef _WIN32
254 {
255 SYSTEM_INFO system_info;
256
257 GetSystemInfo(&system_info);
258 qemu_real_host_page_size = system_info.dwPageSize;
259 }
260#else
261 qemu_real_host_page_size = getpagesize();
262#endif
bellard83fb7ad2004-07-05 21:25:26 +0000263 if (qemu_host_page_size == 0)
264 qemu_host_page_size = qemu_real_host_page_size;
265 if (qemu_host_page_size < TARGET_PAGE_SIZE)
266 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000267 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000268
Paul Brook2e9a5712010-05-05 16:32:59 +0100269#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000270 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100271#ifdef HAVE_KINFO_GETVMMAP
272 struct kinfo_vmentry *freep;
273 int i, cnt;
274
275 freep = kinfo_getvmmap(getpid(), &cnt);
276 if (freep) {
277 mmap_lock();
278 for (i = 0; i < cnt; i++) {
279 unsigned long startaddr, endaddr;
280
281 startaddr = freep[i].kve_start;
282 endaddr = freep[i].kve_end;
283 if (h2g_valid(startaddr)) {
284 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
285
286 if (h2g_valid(endaddr)) {
287 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200288 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100289 } else {
290#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
291 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200292 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100293#endif
294 }
295 }
296 }
297 free(freep);
298 mmap_unlock();
299 }
300#else
balrog50a95692007-12-12 01:16:23 +0000301 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000302
pbrook07765902008-05-31 16:33:53 +0000303 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800304
Aurelien Jarnofd436902010-04-10 17:20:36 +0200305 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000306 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800307 mmap_lock();
308
balrog50a95692007-12-12 01:16:23 +0000309 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310 unsigned long startaddr, endaddr;
311 int n;
312
313 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
314
315 if (n == 2 && h2g_valid(startaddr)) {
316 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
317
318 if (h2g_valid(endaddr)) {
319 endaddr = h2g(endaddr);
320 } else {
321 endaddr = ~0ul;
322 }
323 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000324 }
325 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800326
balrog50a95692007-12-12 01:16:23 +0000327 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800328 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000329 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100330#endif
balrog50a95692007-12-12 01:16:23 +0000331 }
332#endif
bellard54936002003-05-13 00:25:15 +0000333}
334
Paul Brook41c1b1c2010-03-12 16:54:58 +0000335static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000336{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000337 PageDesc *pd;
338 void **lp;
339 int i;
340
pbrook17e23772008-06-09 13:47:45 +0000341#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500342 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800343# define ALLOC(P, SIZE) \
344 do { \
345 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
346 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800347 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000348#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500350 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000351#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800352
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 /* Level 1. Always allocated. */
354 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
355
356 /* Level 2..N-1. */
357 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
358 void **p = *lp;
359
360 if (p == NULL) {
361 if (!alloc) {
362 return NULL;
363 }
364 ALLOC(p, sizeof(void *) * L2_SIZE);
365 *lp = p;
366 }
367
368 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000369 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800370
371 pd = *lp;
372 if (pd == NULL) {
373 if (!alloc) {
374 return NULL;
375 }
376 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
377 *lp = pd;
378 }
379
380#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800381
382 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000383}
384
Paul Brook41c1b1c2010-03-12 16:54:58 +0000385static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000386{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000388}
389
Paul Brook6d9a1302010-02-28 23:55:53 +0000390#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200391
Avi Kivityf7bf5462012-02-13 20:12:05 +0200392static void phys_map_node_reserve(unsigned nodes)
393{
394 if (phys_map_nodes_nb + nodes > phys_map_nodes_nb_alloc) {
395 typedef PhysPageEntry Node[L2_SIZE];
396 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc * 2, 16);
397 phys_map_nodes_nb_alloc = MAX(phys_map_nodes_nb_alloc,
398 phys_map_nodes_nb + nodes);
399 phys_map_nodes = g_renew(Node, phys_map_nodes,
400 phys_map_nodes_nb_alloc);
401 }
402}
403
404static uint16_t phys_map_node_alloc(void)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200405{
406 unsigned i;
407 uint16_t ret;
408
Avi Kivityf7bf5462012-02-13 20:12:05 +0200409 ret = phys_map_nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200410 assert(ret != PHYS_MAP_NODE_NIL);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200411 assert(ret != phys_map_nodes_nb_alloc);
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200412 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200413 phys_map_nodes[ret][i].is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200414 phys_map_nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200415 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200416 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200417}
418
419static void phys_map_nodes_reset(void)
420{
421 phys_map_nodes_nb = 0;
422}
423
Avi Kivityf7bf5462012-02-13 20:12:05 +0200424
Avi Kivity29990972012-02-13 20:21:20 +0200425static void phys_page_set_level(PhysPageEntry *lp, target_phys_addr_t *index,
426 target_phys_addr_t *nb, uint16_t leaf,
427 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200428{
429 PhysPageEntry *p;
430 int i;
Avi Kivity07f07b32012-02-13 20:45:32 +0200431 target_phys_addr_t step = (target_phys_addr_t)1 << (level * L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200432
Avi Kivity07f07b32012-02-13 20:45:32 +0200433 if (!lp->is_leaf && lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200434 lp->ptr = phys_map_node_alloc();
435 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200436 if (level == 0) {
437 for (i = 0; i < L2_SIZE; i++) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200438 p[i].is_leaf = 1;
Avi Kivityc19e8802012-02-13 20:25:31 +0200439 p[i].ptr = phys_section_unassigned;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200440 }
441 }
442 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +0200443 p = phys_map_nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200444 }
Avi Kivity29990972012-02-13 20:21:20 +0200445 lp = &p[(*index >> (level * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200446
Avi Kivity29990972012-02-13 20:21:20 +0200447 while (*nb && lp < &p[L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200448 if ((*index & (step - 1)) == 0 && *nb >= step) {
449 lp->is_leaf = true;
Avi Kivityc19e8802012-02-13 20:25:31 +0200450 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200451 *index += step;
452 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200453 } else {
454 phys_page_set_level(lp, index, nb, leaf, level - 1);
455 }
456 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200457 }
458}
459
Avi Kivity29990972012-02-13 20:21:20 +0200460static void phys_page_set(target_phys_addr_t index, target_phys_addr_t nb,
461 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000462{
Avi Kivity29990972012-02-13 20:21:20 +0200463 /* Wildly overreserve - it doesn't matter much. */
Avi Kivity07f07b32012-02-13 20:45:32 +0200464 phys_map_node_reserve(3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000465
Avi Kivity29990972012-02-13 20:21:20 +0200466 phys_page_set_level(&phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000467}
468
Blue Swirl0cac1b62012-04-09 16:50:52 +0000469MemoryRegionSection *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000470{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200471 PhysPageEntry lp = phys_map;
472 PhysPageEntry *p;
473 int i;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200474 uint16_t s_index = phys_section_unassigned;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200475
Avi Kivity07f07b32012-02-13 20:45:32 +0200476 for (i = P_L2_LEVELS - 1; i >= 0 && !lp.is_leaf; i--) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200477 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity31ab2b42012-02-13 16:44:19 +0200478 goto not_found;
479 }
Avi Kivityc19e8802012-02-13 20:25:31 +0200480 p = phys_map_nodes[lp.ptr];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200481 lp = p[(index >> (i * L2_BITS)) & (L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200482 }
Avi Kivity31ab2b42012-02-13 16:44:19 +0200483
Avi Kivityc19e8802012-02-13 20:25:31 +0200484 s_index = lp.ptr;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200485not_found:
Avi Kivityf3705d52012-03-08 16:16:34 +0200486 return &phys_sections[s_index];
487}
488
Blue Swirle5548612012-04-21 13:08:33 +0000489bool memory_region_is_unassigned(MemoryRegion *mr)
490{
491 return mr != &io_mem_ram && mr != &io_mem_rom
492 && mr != &io_mem_notdirty && !mr->rom_device
493 && mr != &io_mem_watch;
494}
495
pbrookc8a706f2008-06-02 16:16:42 +0000496#define mmap_lock() do { } while(0)
497#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000498#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000499
bellard43694152008-05-29 09:35:57 +0000500#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
501
502#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100503/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000504 user mode. It will change when a dedicated libc will be used */
505#define USE_STATIC_CODE_GEN_BUFFER
506#endif
507
508#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200509static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
510 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000511#endif
512
blueswir18fcd3692008-08-17 20:26:25 +0000513static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000514{
bellard43694152008-05-29 09:35:57 +0000515#ifdef USE_STATIC_CODE_GEN_BUFFER
516 code_gen_buffer = static_code_gen_buffer;
517 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
518 map_exec(code_gen_buffer, code_gen_buffer_size);
519#else
bellard26a5f132008-05-28 12:30:31 +0000520 code_gen_buffer_size = tb_size;
521 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000522#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000523 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
524#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100525 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000526 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000527#endif
bellard26a5f132008-05-28 12:30:31 +0000528 }
529 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
530 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
531 /* The code gen buffer location may have constraints depending on
532 the host cpu and OS */
533#if defined(__linux__)
534 {
535 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000536 void *start = NULL;
537
bellard26a5f132008-05-28 12:30:31 +0000538 flags = MAP_PRIVATE | MAP_ANONYMOUS;
539#if defined(__x86_64__)
540 flags |= MAP_32BIT;
541 /* Cannot map more than that */
542 if (code_gen_buffer_size > (800 * 1024 * 1024))
543 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000544#elif defined(__sparc_v9__)
545 // Map the buffer below 2G, so we can use direct calls and branches
546 flags |= MAP_FIXED;
547 start = (void *) 0x60000000UL;
548 if (code_gen_buffer_size > (512 * 1024 * 1024))
549 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000550#elif defined(__arm__)
Aurelien Jarno5c84bd92012-01-07 21:00:25 +0100551 /* Keep the buffer no bigger than 16MB to branch between blocks */
balrog1cb06612008-12-01 02:10:17 +0000552 if (code_gen_buffer_size > 16 * 1024 * 1024)
553 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700554#elif defined(__s390x__)
555 /* Map the buffer so that we can use direct calls and branches. */
556 /* We have a +- 4GB range on the branches; leave some slop. */
557 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
558 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
559 }
560 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000561#endif
blueswir1141ac462008-07-26 15:05:57 +0000562 code_gen_buffer = mmap(start, code_gen_buffer_size,
563 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000564 flags, -1, 0);
565 if (code_gen_buffer == MAP_FAILED) {
566 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
567 exit(1);
568 }
569 }
Bradcbb608a2010-12-20 21:25:40 -0500570#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000571 || defined(__DragonFly__) || defined(__OpenBSD__) \
572 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000573 {
574 int flags;
575 void *addr = NULL;
576 flags = MAP_PRIVATE | MAP_ANONYMOUS;
577#if defined(__x86_64__)
578 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
579 * 0x40000000 is free */
580 flags |= MAP_FIXED;
581 addr = (void *)0x40000000;
582 /* Cannot map more than that */
583 if (code_gen_buffer_size > (800 * 1024 * 1024))
584 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000585#elif defined(__sparc_v9__)
586 // Map the buffer below 2G, so we can use direct calls and branches
587 flags |= MAP_FIXED;
588 addr = (void *) 0x60000000UL;
589 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
590 code_gen_buffer_size = (512 * 1024 * 1024);
591 }
aliguori06e67a82008-09-27 15:32:41 +0000592#endif
593 code_gen_buffer = mmap(addr, code_gen_buffer_size,
594 PROT_WRITE | PROT_READ | PROT_EXEC,
595 flags, -1, 0);
596 if (code_gen_buffer == MAP_FAILED) {
597 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
598 exit(1);
599 }
600 }
bellard26a5f132008-05-28 12:30:31 +0000601#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500602 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000603 map_exec(code_gen_buffer, code_gen_buffer_size);
604#endif
bellard43694152008-05-29 09:35:57 +0000605#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000606 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100607 code_gen_buffer_max_size = code_gen_buffer_size -
608 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000609 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500610 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000611}
612
613/* Must be called before using the QEMU cpus. 'tb_size' is the size
614 (in bytes) allocated to the translation buffer. Zero means default
615 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200616void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000617{
bellard26a5f132008-05-28 12:30:31 +0000618 cpu_gen_init();
619 code_gen_alloc(tb_size);
620 code_gen_ptr = code_gen_buffer;
Richard Henderson813da622012-03-19 12:25:11 -0700621 tcg_register_jit(code_gen_buffer, code_gen_buffer_size);
bellard43694152008-05-29 09:35:57 +0000622 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700623#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
624 /* There's no guest base to take into account, so go ahead and
625 initialize the prologue now. */
626 tcg_prologue_init(&tcg_ctx);
627#endif
bellard26a5f132008-05-28 12:30:31 +0000628}
629
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200630bool tcg_enabled(void)
631{
632 return code_gen_buffer != NULL;
633}
634
635void cpu_exec_init_all(void)
636{
637#if !defined(CONFIG_USER_ONLY)
638 memory_map_init();
639 io_mem_init();
640#endif
641}
642
pbrook9656f322008-07-01 20:01:19 +0000643#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
644
Juan Quintelae59fb372009-09-29 22:48:21 +0200645static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200646{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100647 CPUArchState *env = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200648
aurel323098dba2009-03-07 21:28:24 +0000649 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
650 version_id is increased. */
651 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000652 tlb_flush(env, 1);
653
654 return 0;
655}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200656
657static const VMStateDescription vmstate_cpu_common = {
658 .name = "cpu_common",
659 .version_id = 1,
660 .minimum_version_id = 1,
661 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200662 .post_load = cpu_common_post_load,
663 .fields = (VMStateField []) {
Andreas Färber9349b4f2012-03-14 01:38:32 +0100664 VMSTATE_UINT32(halted, CPUArchState),
665 VMSTATE_UINT32(interrupt_request, CPUArchState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200666 VMSTATE_END_OF_LIST()
667 }
668};
pbrook9656f322008-07-01 20:01:19 +0000669#endif
670
Andreas Färber9349b4f2012-03-14 01:38:32 +0100671CPUArchState *qemu_get_cpu(int cpu)
Glauber Costa950f1472009-06-09 12:15:18 -0400672{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100673 CPUArchState *env = first_cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400674
675 while (env) {
676 if (env->cpu_index == cpu)
677 break;
678 env = env->next_cpu;
679 }
680
681 return env;
682}
683
Andreas Färber9349b4f2012-03-14 01:38:32 +0100684void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000685{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100686 CPUArchState **penv;
bellard6a00d602005-11-21 23:25:50 +0000687 int cpu_index;
688
pbrookc2764712009-03-07 15:24:59 +0000689#if defined(CONFIG_USER_ONLY)
690 cpu_list_lock();
691#endif
bellard6a00d602005-11-21 23:25:50 +0000692 env->next_cpu = NULL;
693 penv = &first_cpu;
694 cpu_index = 0;
695 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700696 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000697 cpu_index++;
698 }
699 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000700 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000701 QTAILQ_INIT(&env->breakpoints);
702 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100703#ifndef CONFIG_USER_ONLY
704 env->thread_id = qemu_get_thread_id();
705#endif
bellard6a00d602005-11-21 23:25:50 +0000706 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000707#if defined(CONFIG_USER_ONLY)
708 cpu_list_unlock();
709#endif
pbrookb3c77242008-06-30 16:31:04 +0000710#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600711 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
712 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000713 cpu_save, cpu_load, env);
714#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000715}
716
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100717/* Allocate a new translation block. Flush the translation buffer if
718 too many translation blocks or too much generated code. */
719static TranslationBlock *tb_alloc(target_ulong pc)
720{
721 TranslationBlock *tb;
722
723 if (nb_tbs >= code_gen_max_blocks ||
724 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
725 return NULL;
726 tb = &tbs[nb_tbs++];
727 tb->pc = pc;
728 tb->cflags = 0;
729 return tb;
730}
731
732void tb_free(TranslationBlock *tb)
733{
734 /* In practice this is mostly used for single use temporary TB
735 Ignore the hard cases and just back up if this TB happens to
736 be the last one generated. */
737 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
738 code_gen_ptr = tb->tc_ptr;
739 nb_tbs--;
740 }
741}
742
bellard9fa3e852004-01-04 18:06:42 +0000743static inline void invalidate_page_bitmap(PageDesc *p)
744{
745 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500746 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000747 p->code_bitmap = NULL;
748 }
749 p->code_write_count = 0;
750}
751
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800752/* Set to NULL all the 'first_tb' fields in all PageDescs. */
753
754static void page_flush_tb_1 (int level, void **lp)
755{
756 int i;
757
758 if (*lp == NULL) {
759 return;
760 }
761 if (level == 0) {
762 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000763 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800764 pd[i].first_tb = NULL;
765 invalidate_page_bitmap(pd + i);
766 }
767 } else {
768 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000769 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800770 page_flush_tb_1 (level - 1, pp + i);
771 }
772 }
773}
774
bellardfd6ce8f2003-05-14 19:00:11 +0000775static void page_flush_tb(void)
776{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800777 int i;
778 for (i = 0; i < V_L1_SIZE; i++) {
779 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000780 }
781}
782
783/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000784/* XXX: tb_flush is currently not thread safe */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100785void tb_flush(CPUArchState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000786{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100787 CPUArchState *env;
bellard01243112004-01-04 15:48:17 +0000788#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000789 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
790 (unsigned long)(code_gen_ptr - code_gen_buffer),
791 nb_tbs, nb_tbs > 0 ?
792 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000793#endif
bellard26a5f132008-05-28 12:30:31 +0000794 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000795 cpu_abort(env1, "Internal error: code buffer overflow\n");
796
bellardfd6ce8f2003-05-14 19:00:11 +0000797 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000798
bellard6a00d602005-11-21 23:25:50 +0000799 for(env = first_cpu; env != NULL; env = env->next_cpu) {
800 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
801 }
bellard9fa3e852004-01-04 18:06:42 +0000802
bellard8a8a6082004-10-03 13:36:49 +0000803 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000804 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000805
bellardfd6ce8f2003-05-14 19:00:11 +0000806 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000807 /* XXX: flush processor icache at this point if cache flush is
808 expensive */
bellarde3db7222005-01-26 22:00:47 +0000809 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000810}
811
812#ifdef DEBUG_TB_CHECK
813
j_mayerbc98a7e2007-04-04 07:55:12 +0000814static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000815{
816 TranslationBlock *tb;
817 int i;
818 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000819 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
820 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000821 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
822 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000823 printf("ERROR invalidate: address=" TARGET_FMT_lx
824 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000825 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000826 }
827 }
828 }
829}
830
831/* verify that all the pages have correct rights for code */
832static void tb_page_check(void)
833{
834 TranslationBlock *tb;
835 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000836
pbrook99773bd2006-04-16 15:14:59 +0000837 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
838 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000839 flags1 = page_get_flags(tb->pc);
840 flags2 = page_get_flags(tb->pc + tb->size - 1);
841 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
842 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000843 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000844 }
845 }
846 }
847}
848
849#endif
850
851/* invalidate one TB */
852static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
853 int next_offset)
854{
855 TranslationBlock *tb1;
856 for(;;) {
857 tb1 = *ptb;
858 if (tb1 == tb) {
859 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
860 break;
861 }
862 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
863 }
864}
865
bellard9fa3e852004-01-04 18:06:42 +0000866static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
867{
868 TranslationBlock *tb1;
869 unsigned int n1;
870
871 for(;;) {
872 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200873 n1 = (uintptr_t)tb1 & 3;
874 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard9fa3e852004-01-04 18:06:42 +0000875 if (tb1 == tb) {
876 *ptb = tb1->page_next[n1];
877 break;
878 }
879 ptb = &tb1->page_next[n1];
880 }
881}
882
bellardd4e81642003-05-25 16:46:15 +0000883static inline void tb_jmp_remove(TranslationBlock *tb, int n)
884{
885 TranslationBlock *tb1, **ptb;
886 unsigned int n1;
887
888 ptb = &tb->jmp_next[n];
889 tb1 = *ptb;
890 if (tb1) {
891 /* find tb(n) in circular list */
892 for(;;) {
893 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200894 n1 = (uintptr_t)tb1 & 3;
895 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardd4e81642003-05-25 16:46:15 +0000896 if (n1 == n && tb1 == tb)
897 break;
898 if (n1 == 2) {
899 ptb = &tb1->jmp_first;
900 } else {
901 ptb = &tb1->jmp_next[n1];
902 }
903 }
904 /* now we can suppress tb(n) from the list */
905 *ptb = tb->jmp_next[n];
906
907 tb->jmp_next[n] = NULL;
908 }
909}
910
911/* reset the jump entry 'n' of a TB so that it is not chained to
912 another TB */
913static inline void tb_reset_jump(TranslationBlock *tb, int n)
914{
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200915 tb_set_jmp_target(tb, n, (uintptr_t)(tb->tc_ptr + tb->tb_next_offset[n]));
bellardd4e81642003-05-25 16:46:15 +0000916}
917
Paul Brook41c1b1c2010-03-12 16:54:58 +0000918void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000919{
Andreas Färber9349b4f2012-03-14 01:38:32 +0100920 CPUArchState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000921 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000922 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000923 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000924 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000925
bellard9fa3e852004-01-04 18:06:42 +0000926 /* remove the TB from the hash list */
927 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
928 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000929 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000930 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000931
bellard9fa3e852004-01-04 18:06:42 +0000932 /* remove the TB from the page list */
933 if (tb->page_addr[0] != page_addr) {
934 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
935 tb_page_remove(&p->first_tb, tb);
936 invalidate_page_bitmap(p);
937 }
938 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
939 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
940 tb_page_remove(&p->first_tb, tb);
941 invalidate_page_bitmap(p);
942 }
943
bellard8a40a182005-11-20 10:35:40 +0000944 tb_invalidated_flag = 1;
945
946 /* remove the TB from the hash list */
947 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000948 for(env = first_cpu; env != NULL; env = env->next_cpu) {
949 if (env->tb_jmp_cache[h] == tb)
950 env->tb_jmp_cache[h] = NULL;
951 }
bellard8a40a182005-11-20 10:35:40 +0000952
953 /* suppress this TB from the two jump lists */
954 tb_jmp_remove(tb, 0);
955 tb_jmp_remove(tb, 1);
956
957 /* suppress any remaining jumps to this TB */
958 tb1 = tb->jmp_first;
959 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200960 n1 = (uintptr_t)tb1 & 3;
bellard8a40a182005-11-20 10:35:40 +0000961 if (n1 == 2)
962 break;
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200963 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellard8a40a182005-11-20 10:35:40 +0000964 tb2 = tb1->jmp_next[n1];
965 tb_reset_jump(tb1, n1);
966 tb1->jmp_next[n1] = NULL;
967 tb1 = tb2;
968 }
Stefan Weil8efe0ca2012-04-12 15:42:19 +0200969 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2); /* fail safe */
bellard8a40a182005-11-20 10:35:40 +0000970
bellarde3db7222005-01-26 22:00:47 +0000971 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000972}
973
974static inline void set_bits(uint8_t *tab, int start, int len)
975{
976 int end, mask, end1;
977
978 end = start + len;
979 tab += start >> 3;
980 mask = 0xff << (start & 7);
981 if ((start & ~7) == (end & ~7)) {
982 if (start < end) {
983 mask &= ~(0xff << (end & 7));
984 *tab |= mask;
985 }
986 } else {
987 *tab++ |= mask;
988 start = (start + 8) & ~7;
989 end1 = end & ~7;
990 while (start < end1) {
991 *tab++ = 0xff;
992 start += 8;
993 }
994 if (start < end) {
995 mask = ~(0xff << (end & 7));
996 *tab |= mask;
997 }
998 }
999}
1000
1001static void build_page_bitmap(PageDesc *p)
1002{
1003 int n, tb_start, tb_end;
1004 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00001005
Anthony Liguori7267c092011-08-20 22:09:37 -05001006 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +00001007
1008 tb = p->first_tb;
1009 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001010 n = (uintptr_t)tb & 3;
1011 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001012 /* NOTE: this is subtle as a TB may span two physical pages */
1013 if (n == 0) {
1014 /* NOTE: tb_end may be after the end of the page, but
1015 it is not a problem */
1016 tb_start = tb->pc & ~TARGET_PAGE_MASK;
1017 tb_end = tb_start + tb->size;
1018 if (tb_end > TARGET_PAGE_SIZE)
1019 tb_end = TARGET_PAGE_SIZE;
1020 } else {
1021 tb_start = 0;
1022 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1023 }
1024 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
1025 tb = tb->page_next[n];
1026 }
1027}
1028
Andreas Färber9349b4f2012-03-14 01:38:32 +01001029TranslationBlock *tb_gen_code(CPUArchState *env,
pbrook2e70f6e2008-06-29 01:03:05 +00001030 target_ulong pc, target_ulong cs_base,
1031 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +00001032{
1033 TranslationBlock *tb;
1034 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001035 tb_page_addr_t phys_pc, phys_page2;
1036 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +00001037 int code_gen_size;
1038
Paul Brook41c1b1c2010-03-12 16:54:58 +00001039 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +00001040 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +00001041 if (!tb) {
1042 /* flush must be done */
1043 tb_flush(env);
1044 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +00001045 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +00001046 /* Don't forget to invalidate previous TB info. */
1047 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +00001048 }
1049 tc_ptr = code_gen_ptr;
1050 tb->tc_ptr = tc_ptr;
1051 tb->cs_base = cs_base;
1052 tb->flags = flags;
1053 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001054 cpu_gen_code(env, tb, &code_gen_size);
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001055 code_gen_ptr = (void *)(((uintptr_t)code_gen_ptr + code_gen_size +
1056 CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001057
bellardd720b932004-04-25 17:57:43 +00001058 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001059 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001060 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001061 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001062 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001063 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001064 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001065 return tb;
bellardd720b932004-04-25 17:57:43 +00001066}
ths3b46e622007-09-17 08:09:54 +00001067
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001068/*
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001069 * Invalidate all TBs which intersect with the target physical address range
1070 * [start;end[. NOTE: start and end may refer to *different* physical pages.
1071 * 'is_cpu_write_access' should be true if called from a real cpu write
1072 * access: the virtual CPU will exit the current TB if code is modified inside
1073 * this TB.
Alexander Graf77a8f1a2012-05-10 22:40:10 +00001074 */
1075void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
1076 int is_cpu_write_access)
1077{
1078 while (start < end) {
1079 tb_invalidate_phys_page_range(start, end, is_cpu_write_access);
1080 start &= TARGET_PAGE_MASK;
1081 start += TARGET_PAGE_SIZE;
1082 }
1083}
1084
Jan Kiszka8e0fdce2012-05-23 23:41:53 -03001085/*
1086 * Invalidate all TBs which intersect with the target physical address range
1087 * [start;end[. NOTE: start and end must refer to the *same* physical page.
1088 * 'is_cpu_write_access' should be true if called from a real cpu write
1089 * access: the virtual CPU will exit the current TB if code is modified inside
1090 * this TB.
1091 */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001092void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001093 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001094{
aliguori6b917542008-11-18 19:46:41 +00001095 TranslationBlock *tb, *tb_next, *saved_tb;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001096 CPUArchState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001097 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001098 PageDesc *p;
1099 int n;
1100#ifdef TARGET_HAS_PRECISE_SMC
1101 int current_tb_not_found = is_cpu_write_access;
1102 TranslationBlock *current_tb = NULL;
1103 int current_tb_modified = 0;
1104 target_ulong current_pc = 0;
1105 target_ulong current_cs_base = 0;
1106 int current_flags = 0;
1107#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001108
1109 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001110 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001111 return;
ths5fafdf22007-09-16 21:08:06 +00001112 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001113 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1114 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001115 /* build code bitmap */
1116 build_page_bitmap(p);
1117 }
1118
1119 /* we remove all the TBs in the range [start, end[ */
1120 /* XXX: see if in some cases it could be faster to invalidate all the code */
1121 tb = p->first_tb;
1122 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001123 n = (uintptr_t)tb & 3;
1124 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellard9fa3e852004-01-04 18:06:42 +00001125 tb_next = tb->page_next[n];
1126 /* NOTE: this is subtle as a TB may span two physical pages */
1127 if (n == 0) {
1128 /* NOTE: tb_end may be after the end of the page, but
1129 it is not a problem */
1130 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1131 tb_end = tb_start + tb->size;
1132 } else {
1133 tb_start = tb->page_addr[1];
1134 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1135 }
1136 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001137#ifdef TARGET_HAS_PRECISE_SMC
1138 if (current_tb_not_found) {
1139 current_tb_not_found = 0;
1140 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001141 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001142 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001143 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001144 }
1145 }
1146 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001147 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001148 /* If we are modifying the current TB, we must stop
1149 its execution. We could be more precise by checking
1150 that the modification is after the current PC, but it
1151 would require a specialized function to partially
1152 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001153
bellardd720b932004-04-25 17:57:43 +00001154 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001155 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001156 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1157 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001158 }
1159#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001160 /* we need to do that to handle the case where a signal
1161 occurs while doing tb_phys_invalidate() */
1162 saved_tb = NULL;
1163 if (env) {
1164 saved_tb = env->current_tb;
1165 env->current_tb = NULL;
1166 }
bellard9fa3e852004-01-04 18:06:42 +00001167 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001168 if (env) {
1169 env->current_tb = saved_tb;
1170 if (env->interrupt_request && env->current_tb)
1171 cpu_interrupt(env, env->interrupt_request);
1172 }
bellard9fa3e852004-01-04 18:06:42 +00001173 }
1174 tb = tb_next;
1175 }
1176#if !defined(CONFIG_USER_ONLY)
1177 /* if no code remaining, no need to continue to use slow writes */
1178 if (!p->first_tb) {
1179 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001180 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001181 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001182 }
1183 }
1184#endif
1185#ifdef TARGET_HAS_PRECISE_SMC
1186 if (current_tb_modified) {
1187 /* we generate a block containing just the instruction
1188 modifying the memory. It will ensure that it cannot modify
1189 itself */
bellardea1c1802004-06-14 18:56:36 +00001190 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001191 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001192 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001193 }
1194#endif
1195}
1196
1197/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001198static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001199{
1200 PageDesc *p;
1201 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001202#if 0
bellarda4193c82004-06-03 14:01:43 +00001203 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001204 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1205 cpu_single_env->mem_io_vaddr, len,
1206 cpu_single_env->eip,
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001207 cpu_single_env->eip +
1208 (intptr_t)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001209 }
1210#endif
bellard9fa3e852004-01-04 18:06:42 +00001211 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001212 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001213 return;
1214 if (p->code_bitmap) {
1215 offset = start & ~TARGET_PAGE_MASK;
1216 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1217 if (b & ((1 << len) - 1))
1218 goto do_invalidate;
1219 } else {
1220 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001221 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001222 }
1223}
1224
bellard9fa3e852004-01-04 18:06:42 +00001225#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001226static void tb_invalidate_phys_page(tb_page_addr_t addr,
Blue Swirl20503962012-04-09 14:20:20 +00001227 uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001228{
aliguori6b917542008-11-18 19:46:41 +00001229 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001230 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001231 int n;
bellardd720b932004-04-25 17:57:43 +00001232#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001233 TranslationBlock *current_tb = NULL;
Andreas Färber9349b4f2012-03-14 01:38:32 +01001234 CPUArchState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001235 int current_tb_modified = 0;
1236 target_ulong current_pc = 0;
1237 target_ulong current_cs_base = 0;
1238 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001239#endif
bellard9fa3e852004-01-04 18:06:42 +00001240
1241 addr &= TARGET_PAGE_MASK;
1242 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001243 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001244 return;
1245 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001246#ifdef TARGET_HAS_PRECISE_SMC
1247 if (tb && pc != 0) {
1248 current_tb = tb_find_pc(pc);
1249 }
1250#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001251 while (tb != NULL) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001252 n = (uintptr_t)tb & 3;
1253 tb = (TranslationBlock *)((uintptr_t)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001254#ifdef TARGET_HAS_PRECISE_SMC
1255 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001256 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001257 /* If we are modifying the current TB, we must stop
1258 its execution. We could be more precise by checking
1259 that the modification is after the current PC, but it
1260 would require a specialized function to partially
1261 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001262
bellardd720b932004-04-25 17:57:43 +00001263 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001264 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001265 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1266 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001267 }
1268#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001269 tb_phys_invalidate(tb, addr);
1270 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001271 }
1272 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001273#ifdef TARGET_HAS_PRECISE_SMC
1274 if (current_tb_modified) {
1275 /* we generate a block containing just the instruction
1276 modifying the memory. It will ensure that it cannot modify
1277 itself */
bellardea1c1802004-06-14 18:56:36 +00001278 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001279 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001280 cpu_resume_from_signal(env, puc);
1281 }
1282#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001283}
bellard9fa3e852004-01-04 18:06:42 +00001284#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001285
1286/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001287static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001288 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001289{
1290 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001291#ifndef CONFIG_USER_ONLY
1292 bool page_already_protected;
1293#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001294
bellard9fa3e852004-01-04 18:06:42 +00001295 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001296 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001297 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001298#ifndef CONFIG_USER_ONLY
1299 page_already_protected = p->first_tb != NULL;
1300#endif
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001301 p->first_tb = (TranslationBlock *)((uintptr_t)tb | n);
bellard9fa3e852004-01-04 18:06:42 +00001302 invalidate_page_bitmap(p);
1303
bellard107db442004-06-22 18:48:46 +00001304#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001305
bellard9fa3e852004-01-04 18:06:42 +00001306#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001307 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001308 target_ulong addr;
1309 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001310 int prot;
1311
bellardfd6ce8f2003-05-14 19:00:11 +00001312 /* force the host page as non writable (writes will have a
1313 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001314 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001315 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001316 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1317 addr += TARGET_PAGE_SIZE) {
1318
1319 p2 = page_find (addr >> TARGET_PAGE_BITS);
1320 if (!p2)
1321 continue;
1322 prot |= p2->flags;
1323 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001324 }
ths5fafdf22007-09-16 21:08:06 +00001325 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001326 (prot & PAGE_BITS) & ~PAGE_WRITE);
1327#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001328 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001329 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001330#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001331 }
bellard9fa3e852004-01-04 18:06:42 +00001332#else
1333 /* if some code is already present, then the pages are already
1334 protected. So we handle the case where only the first TB is
1335 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001336 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001337 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001338 }
1339#endif
bellardd720b932004-04-25 17:57:43 +00001340
1341#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001342}
1343
bellard9fa3e852004-01-04 18:06:42 +00001344/* add a new TB and link it to the physical page tables. phys_page2 is
1345 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001346void tb_link_page(TranslationBlock *tb,
1347 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001348{
bellard9fa3e852004-01-04 18:06:42 +00001349 unsigned int h;
1350 TranslationBlock **ptb;
1351
pbrookc8a706f2008-06-02 16:16:42 +00001352 /* Grab the mmap lock to stop another thread invalidating this TB
1353 before we are done. */
1354 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001355 /* add in the physical hash table */
1356 h = tb_phys_hash_func(phys_pc);
1357 ptb = &tb_phys_hash[h];
1358 tb->phys_hash_next = *ptb;
1359 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001360
1361 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001362 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1363 if (phys_page2 != -1)
1364 tb_alloc_page(tb, 1, phys_page2);
1365 else
1366 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001367
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001368 tb->jmp_first = (TranslationBlock *)((uintptr_t)tb | 2);
bellardd4e81642003-05-25 16:46:15 +00001369 tb->jmp_next[0] = NULL;
1370 tb->jmp_next[1] = NULL;
1371
1372 /* init original jump addresses */
1373 if (tb->tb_next_offset[0] != 0xffff)
1374 tb_reset_jump(tb, 0);
1375 if (tb->tb_next_offset[1] != 0xffff)
1376 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001377
1378#ifdef DEBUG_TB_CHECK
1379 tb_page_check();
1380#endif
pbrookc8a706f2008-06-02 16:16:42 +00001381 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001382}
1383
bellarda513fe12003-05-27 23:29:48 +00001384/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1385 tb[1].tc_ptr. Return NULL if not found */
Stefan Weil6375e092012-04-06 22:26:15 +02001386TranslationBlock *tb_find_pc(uintptr_t tc_ptr)
bellarda513fe12003-05-27 23:29:48 +00001387{
1388 int m_min, m_max, m;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001389 uintptr_t v;
bellarda513fe12003-05-27 23:29:48 +00001390 TranslationBlock *tb;
1391
1392 if (nb_tbs <= 0)
1393 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001394 if (tc_ptr < (uintptr_t)code_gen_buffer ||
1395 tc_ptr >= (uintptr_t)code_gen_ptr) {
bellarda513fe12003-05-27 23:29:48 +00001396 return NULL;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001397 }
bellarda513fe12003-05-27 23:29:48 +00001398 /* binary search (cf Knuth) */
1399 m_min = 0;
1400 m_max = nb_tbs - 1;
1401 while (m_min <= m_max) {
1402 m = (m_min + m_max) >> 1;
1403 tb = &tbs[m];
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001404 v = (uintptr_t)tb->tc_ptr;
bellarda513fe12003-05-27 23:29:48 +00001405 if (v == tc_ptr)
1406 return tb;
1407 else if (tc_ptr < v) {
1408 m_max = m - 1;
1409 } else {
1410 m_min = m + 1;
1411 }
ths5fafdf22007-09-16 21:08:06 +00001412 }
bellarda513fe12003-05-27 23:29:48 +00001413 return &tbs[m_max];
1414}
bellard75012672003-06-21 13:11:07 +00001415
bellardea041c02003-06-25 16:16:50 +00001416static void tb_reset_jump_recursive(TranslationBlock *tb);
1417
1418static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1419{
1420 TranslationBlock *tb1, *tb_next, **ptb;
1421 unsigned int n1;
1422
1423 tb1 = tb->jmp_next[n];
1424 if (tb1 != NULL) {
1425 /* find head of list */
1426 for(;;) {
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001427 n1 = (uintptr_t)tb1 & 3;
1428 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001429 if (n1 == 2)
1430 break;
1431 tb1 = tb1->jmp_next[n1];
1432 }
1433 /* we are now sure now that tb jumps to tb1 */
1434 tb_next = tb1;
1435
1436 /* remove tb from the jmp_first list */
1437 ptb = &tb_next->jmp_first;
1438 for(;;) {
1439 tb1 = *ptb;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001440 n1 = (uintptr_t)tb1 & 3;
1441 tb1 = (TranslationBlock *)((uintptr_t)tb1 & ~3);
bellardea041c02003-06-25 16:16:50 +00001442 if (n1 == n && tb1 == tb)
1443 break;
1444 ptb = &tb1->jmp_next[n1];
1445 }
1446 *ptb = tb->jmp_next[n];
1447 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001448
bellardea041c02003-06-25 16:16:50 +00001449 /* suppress the jump to next tb in generated code */
1450 tb_reset_jump(tb, n);
1451
bellard01243112004-01-04 15:48:17 +00001452 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001453 tb_reset_jump_recursive(tb_next);
1454 }
1455}
1456
1457static void tb_reset_jump_recursive(TranslationBlock *tb)
1458{
1459 tb_reset_jump_recursive2(tb, 0);
1460 tb_reset_jump_recursive2(tb, 1);
1461}
1462
bellard1fddef42005-04-17 19:16:13 +00001463#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001464#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001465static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +00001466{
1467 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1468}
1469#else
Max Filippov1e7855a2012-04-10 02:48:17 +04001470void tb_invalidate_phys_addr(target_phys_addr_t addr)
bellardd720b932004-04-25 17:57:43 +00001471{
Anthony Liguoric227f092009-10-01 16:12:16 -05001472 ram_addr_t ram_addr;
Avi Kivityf3705d52012-03-08 16:16:34 +02001473 MemoryRegionSection *section;
bellardd720b932004-04-25 17:57:43 +00001474
Avi Kivity06ef3522012-02-13 16:11:22 +02001475 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Avi Kivityf3705d52012-03-08 16:16:34 +02001476 if (!(memory_region_is_ram(section->mr)
1477 || (section->mr->rom_device && section->mr->readable))) {
Avi Kivity06ef3522012-02-13 16:11:22 +02001478 return;
1479 }
Avi Kivityf3705d52012-03-08 16:16:34 +02001480 ram_addr = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001481 + memory_region_section_addr(section, addr);
pbrook706cd4b2006-04-08 17:36:21 +00001482 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001483}
Max Filippov1e7855a2012-04-10 02:48:17 +04001484
1485static void breakpoint_invalidate(CPUArchState *env, target_ulong pc)
1486{
Max Filippov9d70c4b2012-05-27 20:21:08 +04001487 tb_invalidate_phys_addr(cpu_get_phys_page_debug(env, pc) |
1488 (pc & ~TARGET_PAGE_MASK));
Max Filippov1e7855a2012-04-10 02:48:17 +04001489}
bellardc27004e2005-01-03 23:35:10 +00001490#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001491#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001492
Paul Brookc527ee82010-03-01 03:31:14 +00001493#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01001494void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +00001495
1496{
1497}
1498
Andreas Färber9349b4f2012-03-14 01:38:32 +01001499int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
Paul Brookc527ee82010-03-01 03:31:14 +00001500 int flags, CPUWatchpoint **watchpoint)
1501{
1502 return -ENOSYS;
1503}
1504#else
pbrook6658ffb2007-03-16 23:58:11 +00001505/* Add a watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001506int cpu_watchpoint_insert(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001507 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001508{
aliguorib4051332008-11-18 20:14:20 +00001509 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001510 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001511
aliguorib4051332008-11-18 20:14:20 +00001512 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
Max Filippov0dc23822012-01-29 03:15:23 +04001513 if ((len & (len - 1)) || (addr & ~len_mask) ||
1514 len == 0 || len > TARGET_PAGE_SIZE) {
aliguorib4051332008-11-18 20:14:20 +00001515 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1516 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1517 return -EINVAL;
1518 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001519 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001520
aliguoria1d1bb32008-11-18 20:07:32 +00001521 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001522 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001523 wp->flags = flags;
1524
aliguori2dc9f412008-11-18 20:56:59 +00001525 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001526 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001527 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001528 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001529 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001530
pbrook6658ffb2007-03-16 23:58:11 +00001531 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001532
1533 if (watchpoint)
1534 *watchpoint = wp;
1535 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001536}
1537
aliguoria1d1bb32008-11-18 20:07:32 +00001538/* Remove a specific watchpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001539int cpu_watchpoint_remove(CPUArchState *env, target_ulong addr, target_ulong len,
aliguoria1d1bb32008-11-18 20:07:32 +00001540 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001541{
aliguorib4051332008-11-18 20:14:20 +00001542 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001543 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001544
Blue Swirl72cf2d42009-09-12 07:36:22 +00001545 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001546 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001547 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001548 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001549 return 0;
1550 }
1551 }
aliguoria1d1bb32008-11-18 20:07:32 +00001552 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001553}
1554
aliguoria1d1bb32008-11-18 20:07:32 +00001555/* Remove a specific watchpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001556void cpu_watchpoint_remove_by_ref(CPUArchState *env, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +00001557{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001558 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001559
aliguoria1d1bb32008-11-18 20:07:32 +00001560 tlb_flush_page(env, watchpoint->vaddr);
1561
Anthony Liguori7267c092011-08-20 22:09:37 -05001562 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001563}
1564
aliguoria1d1bb32008-11-18 20:07:32 +00001565/* Remove all matching watchpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001566void cpu_watchpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001567{
aliguoric0ce9982008-11-25 22:13:57 +00001568 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001569
Blue Swirl72cf2d42009-09-12 07:36:22 +00001570 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001571 if (wp->flags & mask)
1572 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001573 }
aliguoria1d1bb32008-11-18 20:07:32 +00001574}
Paul Brookc527ee82010-03-01 03:31:14 +00001575#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001576
1577/* Add a breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001578int cpu_breakpoint_insert(CPUArchState *env, target_ulong pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +00001579 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001580{
bellard1fddef42005-04-17 19:16:13 +00001581#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001582 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001583
Anthony Liguori7267c092011-08-20 22:09:37 -05001584 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001585
1586 bp->pc = pc;
1587 bp->flags = flags;
1588
aliguori2dc9f412008-11-18 20:56:59 +00001589 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001590 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001591 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001592 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001593 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001594
1595 breakpoint_invalidate(env, pc);
1596
1597 if (breakpoint)
1598 *breakpoint = bp;
1599 return 0;
1600#else
1601 return -ENOSYS;
1602#endif
1603}
1604
1605/* Remove a specific breakpoint. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001606int cpu_breakpoint_remove(CPUArchState *env, target_ulong pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +00001607{
1608#if defined(TARGET_HAS_ICE)
1609 CPUBreakpoint *bp;
1610
Blue Swirl72cf2d42009-09-12 07:36:22 +00001611 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001612 if (bp->pc == pc && bp->flags == flags) {
1613 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001614 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001615 }
bellard4c3a88a2003-07-26 12:06:08 +00001616 }
aliguoria1d1bb32008-11-18 20:07:32 +00001617 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001618#else
aliguoria1d1bb32008-11-18 20:07:32 +00001619 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001620#endif
1621}
1622
aliguoria1d1bb32008-11-18 20:07:32 +00001623/* Remove a specific breakpoint by reference. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001624void cpu_breakpoint_remove_by_ref(CPUArchState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001625{
bellard1fddef42005-04-17 19:16:13 +00001626#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001627 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001628
aliguoria1d1bb32008-11-18 20:07:32 +00001629 breakpoint_invalidate(env, breakpoint->pc);
1630
Anthony Liguori7267c092011-08-20 22:09:37 -05001631 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001632#endif
1633}
1634
1635/* Remove all matching breakpoints. */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001636void cpu_breakpoint_remove_all(CPUArchState *env, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +00001637{
1638#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001639 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001640
Blue Swirl72cf2d42009-09-12 07:36:22 +00001641 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001642 if (bp->flags & mask)
1643 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001644 }
bellard4c3a88a2003-07-26 12:06:08 +00001645#endif
1646}
1647
bellardc33a3462003-07-29 20:50:33 +00001648/* enable or disable single step mode. EXCP_DEBUG is returned by the
1649 CPU loop after each instruction */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001650void cpu_single_step(CPUArchState *env, int enabled)
bellardc33a3462003-07-29 20:50:33 +00001651{
bellard1fddef42005-04-17 19:16:13 +00001652#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001653 if (env->singlestep_enabled != enabled) {
1654 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001655 if (kvm_enabled())
1656 kvm_update_guest_debug(env, 0);
1657 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001658 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001659 /* XXX: only flush what is necessary */
1660 tb_flush(env);
1661 }
bellardc33a3462003-07-29 20:50:33 +00001662 }
1663#endif
1664}
1665
Andreas Färber9349b4f2012-03-14 01:38:32 +01001666static void cpu_unlink_tb(CPUArchState *env)
bellardea041c02003-06-25 16:16:50 +00001667{
pbrookd5975362008-06-07 20:50:51 +00001668 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1669 problem and hope the cpu will stop of its own accord. For userspace
1670 emulation this often isn't actually as bad as it sounds. Often
1671 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001672 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001673 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001674
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001675 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001676 tb = env->current_tb;
1677 /* if the cpu is currently executing code, we must unlink it and
1678 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001679 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001680 env->current_tb = NULL;
1681 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001682 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001683 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001684}
1685
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001686#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001687/* mask must never be zero, except for A20 change call */
Andreas Färber9349b4f2012-03-14 01:38:32 +01001688static void tcg_handle_interrupt(CPUArchState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001689{
1690 int old_mask;
1691
1692 old_mask = env->interrupt_request;
1693 env->interrupt_request |= mask;
1694
aliguori8edac962009-04-24 18:03:45 +00001695 /*
1696 * If called from iothread context, wake the target cpu in
1697 * case its halted.
1698 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001699 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001700 qemu_cpu_kick(env);
1701 return;
1702 }
aliguori8edac962009-04-24 18:03:45 +00001703
pbrook2e70f6e2008-06-29 01:03:05 +00001704 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001705 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001706 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001707 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001708 cpu_abort(env, "Raised interrupt while not in I/O function");
1709 }
pbrook2e70f6e2008-06-29 01:03:05 +00001710 } else {
aurel323098dba2009-03-07 21:28:24 +00001711 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001712 }
1713}
1714
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001715CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1716
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001717#else /* CONFIG_USER_ONLY */
1718
Andreas Färber9349b4f2012-03-14 01:38:32 +01001719void cpu_interrupt(CPUArchState *env, int mask)
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001720{
1721 env->interrupt_request |= mask;
1722 cpu_unlink_tb(env);
1723}
1724#endif /* CONFIG_USER_ONLY */
1725
Andreas Färber9349b4f2012-03-14 01:38:32 +01001726void cpu_reset_interrupt(CPUArchState *env, int mask)
bellardb54ad042004-05-20 13:42:52 +00001727{
1728 env->interrupt_request &= ~mask;
1729}
1730
Andreas Färber9349b4f2012-03-14 01:38:32 +01001731void cpu_exit(CPUArchState *env)
aurel323098dba2009-03-07 21:28:24 +00001732{
1733 env->exit_request = 1;
1734 cpu_unlink_tb(env);
1735}
1736
Andreas Färber9349b4f2012-03-14 01:38:32 +01001737void cpu_abort(CPUArchState *env, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +00001738{
1739 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001740 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001741
1742 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001743 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001744 fprintf(stderr, "qemu: fatal: ");
1745 vfprintf(stderr, fmt, ap);
1746 fprintf(stderr, "\n");
1747#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001748 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1749#else
1750 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001751#endif
aliguori93fcfe32009-01-15 22:34:14 +00001752 if (qemu_log_enabled()) {
1753 qemu_log("qemu: fatal: ");
1754 qemu_log_vprintf(fmt, ap2);
1755 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001756#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001757 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001758#else
aliguori93fcfe32009-01-15 22:34:14 +00001759 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001760#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001761 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001762 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001763 }
pbrook493ae1f2007-11-23 16:53:59 +00001764 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001765 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001766#if defined(CONFIG_USER_ONLY)
1767 {
1768 struct sigaction act;
1769 sigfillset(&act.sa_mask);
1770 act.sa_handler = SIG_DFL;
1771 sigaction(SIGABRT, &act, NULL);
1772 }
1773#endif
bellard75012672003-06-21 13:11:07 +00001774 abort();
1775}
1776
Andreas Färber9349b4f2012-03-14 01:38:32 +01001777CPUArchState *cpu_copy(CPUArchState *env)
thsc5be9f02007-02-28 20:20:53 +00001778{
Andreas Färber9349b4f2012-03-14 01:38:32 +01001779 CPUArchState *new_env = cpu_init(env->cpu_model_str);
1780 CPUArchState *next_cpu = new_env->next_cpu;
thsc5be9f02007-02-28 20:20:53 +00001781 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001782#if defined(TARGET_HAS_ICE)
1783 CPUBreakpoint *bp;
1784 CPUWatchpoint *wp;
1785#endif
1786
Andreas Färber9349b4f2012-03-14 01:38:32 +01001787 memcpy(new_env, env, sizeof(CPUArchState));
aliguori5a38f082009-01-15 20:16:51 +00001788
1789 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001790 new_env->next_cpu = next_cpu;
1791 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001792
1793 /* Clone all break/watchpoints.
1794 Note: Once we support ptrace with hw-debug register access, make sure
1795 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001796 QTAILQ_INIT(&env->breakpoints);
1797 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001798#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001799 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001800 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1801 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001802 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001803 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1804 wp->flags, NULL);
1805 }
1806#endif
1807
thsc5be9f02007-02-28 20:20:53 +00001808 return new_env;
1809}
1810
bellard01243112004-01-04 15:48:17 +00001811#if !defined(CONFIG_USER_ONLY)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001812void tb_flush_jmp_cache(CPUArchState *env, target_ulong addr)
edgar_igl5c751e92008-05-06 08:44:21 +00001813{
1814 unsigned int i;
1815
1816 /* Discard jump cache entries for any tb which might potentially
1817 overlap the flushed page. */
1818 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1819 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001820 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001821
1822 i = tb_jmp_cache_hash_page(addr);
1823 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001824 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001825}
1826
pbrook5579c7f2009-04-11 14:47:08 +00001827/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05001828void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00001829 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00001830{
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001831 uintptr_t length, start1;
bellard1ccde1c2004-02-06 19:46:14 +00001832
1833 start &= TARGET_PAGE_MASK;
1834 end = TARGET_PAGE_ALIGN(end);
1835
1836 length = end - start;
1837 if (length == 0)
1838 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09001839 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00001840
bellard1ccde1c2004-02-06 19:46:14 +00001841 /* we modify the TLB cache so that the dirty bit will be set again
1842 when accessing the range */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001843 start1 = (uintptr_t)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02001844 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00001845 address comparisons below. */
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001846 if ((uintptr_t)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00001847 != (end - 1) - start) {
1848 abort();
1849 }
Blue Swirle5548612012-04-21 13:08:33 +00001850 cpu_tlb_reset_dirty_all(start1, length);
bellard1ccde1c2004-02-06 19:46:14 +00001851}
1852
aliguori74576192008-10-06 14:02:03 +00001853int cpu_physical_memory_set_dirty_tracking(int enable)
1854{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001855 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00001856 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001857 return ret;
aliguori74576192008-10-06 14:02:03 +00001858}
1859
Blue Swirle5548612012-04-21 13:08:33 +00001860target_phys_addr_t memory_region_section_get_iotlb(CPUArchState *env,
1861 MemoryRegionSection *section,
1862 target_ulong vaddr,
1863 target_phys_addr_t paddr,
1864 int prot,
1865 target_ulong *address)
1866{
1867 target_phys_addr_t iotlb;
1868 CPUWatchpoint *wp;
1869
Blue Swirlcc5bea62012-04-14 14:56:48 +00001870 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +00001871 /* Normal RAM. */
1872 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00001873 + memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001874 if (!section->readonly) {
1875 iotlb |= phys_section_notdirty;
1876 } else {
1877 iotlb |= phys_section_rom;
1878 }
1879 } else {
1880 /* IO handlers are currently passed a physical address.
1881 It would be nice to pass an offset from the base address
1882 of that region. This would avoid having to special case RAM,
1883 and avoid full address decoding in every device.
1884 We can't use the high bits of pd for this because
1885 IO_MEM_ROMD uses these as a ram address. */
1886 iotlb = section - phys_sections;
Blue Swirlcc5bea62012-04-14 14:56:48 +00001887 iotlb += memory_region_section_addr(section, paddr);
Blue Swirle5548612012-04-21 13:08:33 +00001888 }
1889
1890 /* Make accesses to pages with watchpoints go via the
1891 watchpoint trap routines. */
1892 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
1893 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
1894 /* Avoid trapping reads of pages with a write breakpoint. */
1895 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
1896 iotlb = phys_section_watch + paddr;
1897 *address |= TLB_MMIO;
1898 break;
1899 }
1900 }
1901 }
1902
1903 return iotlb;
1904}
1905
bellard01243112004-01-04 15:48:17 +00001906#else
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001907/*
1908 * Walks guest process memory "regions" one by one
1909 * and calls callback function 'fn' for each region.
1910 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001911
1912struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00001913{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001914 walk_memory_regions_fn fn;
1915 void *priv;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001916 uintptr_t start;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001917 int prot;
1918};
bellard9fa3e852004-01-04 18:06:42 +00001919
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001920static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001921 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001922{
1923 if (data->start != -1ul) {
1924 int rc = data->fn(data->priv, data->start, end, data->prot);
1925 if (rc != 0) {
1926 return rc;
bellard9fa3e852004-01-04 18:06:42 +00001927 }
bellard33417e72003-08-10 21:47:01 +00001928 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001929
1930 data->start = (new_prot ? end : -1ul);
1931 data->prot = new_prot;
1932
1933 return 0;
1934}
1935
1936static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00001937 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001938{
Paul Brookb480d9b2010-03-12 23:23:29 +00001939 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001940 int i, rc;
1941
1942 if (*lp == NULL) {
1943 return walk_memory_regions_end(data, base, 0);
1944 }
1945
1946 if (level == 0) {
1947 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001948 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001949 int prot = pd[i].flags;
1950
1951 pa = base | (i << TARGET_PAGE_BITS);
1952 if (prot != data->prot) {
1953 rc = walk_memory_regions_end(data, pa, prot);
1954 if (rc != 0) {
1955 return rc;
1956 }
1957 }
1958 }
1959 } else {
1960 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001961 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001962 pa = base | ((abi_ulong)i <<
1963 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001964 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
1965 if (rc != 0) {
1966 return rc;
1967 }
1968 }
1969 }
1970
1971 return 0;
1972}
1973
1974int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
1975{
1976 struct walk_memory_regions_data data;
Stefan Weil8efe0ca2012-04-12 15:42:19 +02001977 uintptr_t i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001978
1979 data.fn = fn;
1980 data.priv = priv;
1981 data.start = -1ul;
1982 data.prot = 0;
1983
1984 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00001985 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001986 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
1987 if (rc != 0) {
1988 return rc;
1989 }
1990 }
1991
1992 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001993}
1994
Paul Brookb480d9b2010-03-12 23:23:29 +00001995static int dump_region(void *priv, abi_ulong start,
1996 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03001997{
1998 FILE *f = (FILE *)priv;
1999
Paul Brookb480d9b2010-03-12 23:23:29 +00002000 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2001 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002002 start, end, end - start,
2003 ((prot & PAGE_READ) ? 'r' : '-'),
2004 ((prot & PAGE_WRITE) ? 'w' : '-'),
2005 ((prot & PAGE_EXEC) ? 'x' : '-'));
2006
2007 return (0);
2008}
2009
2010/* dump memory mappings */
2011void page_dump(FILE *f)
2012{
2013 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2014 "start", "end", "size", "prot");
2015 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002016}
2017
pbrook53a59602006-03-25 19:31:22 +00002018int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002019{
bellard9fa3e852004-01-04 18:06:42 +00002020 PageDesc *p;
2021
2022 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002023 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002024 return 0;
2025 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002026}
2027
Richard Henderson376a7902010-03-10 15:57:04 -08002028/* Modify the flags of a page and invalidate the code if necessary.
2029 The flag PAGE_WRITE_ORG is positioned automatically depending
2030 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002031void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002032{
Richard Henderson376a7902010-03-10 15:57:04 -08002033 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002034
Richard Henderson376a7902010-03-10 15:57:04 -08002035 /* This function should never be called with addresses outside the
2036 guest address space. If this assert fires, it probably indicates
2037 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002038#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2039 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002040#endif
2041 assert(start < end);
2042
bellard9fa3e852004-01-04 18:06:42 +00002043 start = start & TARGET_PAGE_MASK;
2044 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002045
2046 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002047 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002048 }
2049
2050 for (addr = start, len = end - start;
2051 len != 0;
2052 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2053 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2054
2055 /* If the write protection bit is set, then we invalidate
2056 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002057 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002058 (flags & PAGE_WRITE) &&
2059 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002060 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002061 }
2062 p->flags = flags;
2063 }
bellard9fa3e852004-01-04 18:06:42 +00002064}
2065
ths3d97b402007-11-02 19:02:07 +00002066int page_check_range(target_ulong start, target_ulong len, int flags)
2067{
2068 PageDesc *p;
2069 target_ulong end;
2070 target_ulong addr;
2071
Richard Henderson376a7902010-03-10 15:57:04 -08002072 /* This function should never be called with addresses outside the
2073 guest address space. If this assert fires, it probably indicates
2074 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002075#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2076 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002077#endif
2078
Richard Henderson3e0650a2010-03-29 10:54:42 -07002079 if (len == 0) {
2080 return 0;
2081 }
Richard Henderson376a7902010-03-10 15:57:04 -08002082 if (start + len - 1 < start) {
2083 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002084 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002085 }
balrog55f280c2008-10-28 10:24:11 +00002086
ths3d97b402007-11-02 19:02:07 +00002087 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2088 start = start & TARGET_PAGE_MASK;
2089
Richard Henderson376a7902010-03-10 15:57:04 -08002090 for (addr = start, len = end - start;
2091 len != 0;
2092 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002093 p = page_find(addr >> TARGET_PAGE_BITS);
2094 if( !p )
2095 return -1;
2096 if( !(p->flags & PAGE_VALID) )
2097 return -1;
2098
bellarddae32702007-11-14 10:51:00 +00002099 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002100 return -1;
bellarddae32702007-11-14 10:51:00 +00002101 if (flags & PAGE_WRITE) {
2102 if (!(p->flags & PAGE_WRITE_ORG))
2103 return -1;
2104 /* unprotect the page if it was put read-only because it
2105 contains translated code */
2106 if (!(p->flags & PAGE_WRITE)) {
2107 if (!page_unprotect(addr, 0, NULL))
2108 return -1;
2109 }
2110 return 0;
2111 }
ths3d97b402007-11-02 19:02:07 +00002112 }
2113 return 0;
2114}
2115
bellard9fa3e852004-01-04 18:06:42 +00002116/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002117 page. Return TRUE if the fault was successfully handled. */
Stefan Weil6375e092012-04-06 22:26:15 +02002118int page_unprotect(target_ulong address, uintptr_t pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002119{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002120 unsigned int prot;
2121 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002122 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002123
pbrookc8a706f2008-06-02 16:16:42 +00002124 /* Technically this isn't safe inside a signal handler. However we
2125 know this only ever happens in a synchronous SEGV handler, so in
2126 practice it seems to be ok. */
2127 mmap_lock();
2128
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002129 p = page_find(address >> TARGET_PAGE_BITS);
2130 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002131 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002132 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002133 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002134
bellard9fa3e852004-01-04 18:06:42 +00002135 /* if the page was really writable, then we change its
2136 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002137 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2138 host_start = address & qemu_host_page_mask;
2139 host_end = host_start + qemu_host_page_size;
2140
2141 prot = 0;
2142 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2143 p = page_find(addr >> TARGET_PAGE_BITS);
2144 p->flags |= PAGE_WRITE;
2145 prot |= p->flags;
2146
bellard9fa3e852004-01-04 18:06:42 +00002147 /* and since the content will be modified, we must invalidate
2148 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002149 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002150#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002151 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002152#endif
bellard9fa3e852004-01-04 18:06:42 +00002153 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002154 mprotect((void *)g2h(host_start), qemu_host_page_size,
2155 prot & PAGE_BITS);
2156
2157 mmap_unlock();
2158 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002159 }
pbrookc8a706f2008-06-02 16:16:42 +00002160 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002161 return 0;
2162}
bellard9fa3e852004-01-04 18:06:42 +00002163#endif /* defined(CONFIG_USER_ONLY) */
2164
pbrooke2eef172008-06-08 01:09:01 +00002165#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002166
Paul Brookc04b2b72010-03-01 03:31:14 +00002167#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2168typedef struct subpage_t {
Avi Kivity70c68e42012-01-02 12:32:48 +02002169 MemoryRegion iomem;
Paul Brookc04b2b72010-03-01 03:31:14 +00002170 target_phys_addr_t base;
Avi Kivity5312bd82012-02-12 18:32:55 +02002171 uint16_t sub_section[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002172} subpage_t;
2173
Anthony Liguoric227f092009-10-01 16:12:16 -05002174static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002175 uint16_t section);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002176static subpage_t *subpage_init(target_phys_addr_t base);
Avi Kivity5312bd82012-02-12 18:32:55 +02002177static void destroy_page_desc(uint16_t section_index)
Avi Kivity54688b12012-02-09 17:34:32 +02002178{
Avi Kivity5312bd82012-02-12 18:32:55 +02002179 MemoryRegionSection *section = &phys_sections[section_index];
2180 MemoryRegion *mr = section->mr;
Avi Kivity54688b12012-02-09 17:34:32 +02002181
2182 if (mr->subpage) {
2183 subpage_t *subpage = container_of(mr, subpage_t, iomem);
2184 memory_region_destroy(&subpage->iomem);
2185 g_free(subpage);
2186 }
2187}
2188
Avi Kivity4346ae32012-02-10 17:00:01 +02002189static void destroy_l2_mapping(PhysPageEntry *lp, unsigned level)
Avi Kivity54688b12012-02-09 17:34:32 +02002190{
2191 unsigned i;
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002192 PhysPageEntry *p;
Avi Kivity54688b12012-02-09 17:34:32 +02002193
Avi Kivityc19e8802012-02-13 20:25:31 +02002194 if (lp->ptr == PHYS_MAP_NODE_NIL) {
Avi Kivity54688b12012-02-09 17:34:32 +02002195 return;
2196 }
2197
Avi Kivityc19e8802012-02-13 20:25:31 +02002198 p = phys_map_nodes[lp->ptr];
Avi Kivity4346ae32012-02-10 17:00:01 +02002199 for (i = 0; i < L2_SIZE; ++i) {
Avi Kivity07f07b32012-02-13 20:45:32 +02002200 if (!p[i].is_leaf) {
Avi Kivity54688b12012-02-09 17:34:32 +02002201 destroy_l2_mapping(&p[i], level - 1);
Avi Kivity4346ae32012-02-10 17:00:01 +02002202 } else {
Avi Kivityc19e8802012-02-13 20:25:31 +02002203 destroy_page_desc(p[i].ptr);
Avi Kivity54688b12012-02-09 17:34:32 +02002204 }
Avi Kivity54688b12012-02-09 17:34:32 +02002205 }
Avi Kivity07f07b32012-02-13 20:45:32 +02002206 lp->is_leaf = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +02002207 lp->ptr = PHYS_MAP_NODE_NIL;
Avi Kivity54688b12012-02-09 17:34:32 +02002208}
2209
2210static void destroy_all_mappings(void)
2211{
Avi Kivity3eef53d2012-02-10 14:57:31 +02002212 destroy_l2_mapping(&phys_map, P_L2_LEVELS - 1);
Avi Kivityd6f2ea22012-02-12 20:12:49 +02002213 phys_map_nodes_reset();
Avi Kivity54688b12012-02-09 17:34:32 +02002214}
2215
Avi Kivity5312bd82012-02-12 18:32:55 +02002216static uint16_t phys_section_add(MemoryRegionSection *section)
2217{
2218 if (phys_sections_nb == phys_sections_nb_alloc) {
2219 phys_sections_nb_alloc = MAX(phys_sections_nb_alloc * 2, 16);
2220 phys_sections = g_renew(MemoryRegionSection, phys_sections,
2221 phys_sections_nb_alloc);
2222 }
2223 phys_sections[phys_sections_nb] = *section;
2224 return phys_sections_nb++;
2225}
2226
2227static void phys_sections_clear(void)
2228{
2229 phys_sections_nb = 0;
2230}
2231
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002232/* register physical memory.
2233 For RAM, 'size' must be a multiple of the target page size.
2234 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002235 io memory page. The address used when calling the IO function is
2236 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002237 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002238 before calculating this offset. This should not be a problem unless
2239 the low bits of start_addr and region_offset differ. */
Avi Kivity0f0cb162012-02-13 17:14:32 +02002240static void register_subpage(MemoryRegionSection *section)
2241{
2242 subpage_t *subpage;
2243 target_phys_addr_t base = section->offset_within_address_space
2244 & TARGET_PAGE_MASK;
Avi Kivityf3705d52012-03-08 16:16:34 +02002245 MemoryRegionSection *existing = phys_page_find(base >> TARGET_PAGE_BITS);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002246 MemoryRegionSection subsection = {
2247 .offset_within_address_space = base,
2248 .size = TARGET_PAGE_SIZE,
2249 };
Avi Kivity0f0cb162012-02-13 17:14:32 +02002250 target_phys_addr_t start, end;
2251
Avi Kivityf3705d52012-03-08 16:16:34 +02002252 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002253
Avi Kivityf3705d52012-03-08 16:16:34 +02002254 if (!(existing->mr->subpage)) {
Avi Kivity0f0cb162012-02-13 17:14:32 +02002255 subpage = subpage_init(base);
2256 subsection.mr = &subpage->iomem;
Avi Kivity29990972012-02-13 20:21:20 +02002257 phys_page_set(base >> TARGET_PAGE_BITS, 1,
2258 phys_section_add(&subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02002259 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02002260 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02002261 }
2262 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
2263 end = start + section->size;
2264 subpage_register(subpage, start, end, phys_section_add(section));
2265}
2266
2267
2268static void register_multipage(MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00002269{
Avi Kivitydd811242012-01-02 12:17:03 +02002270 target_phys_addr_t start_addr = section->offset_within_address_space;
2271 ram_addr_t size = section->size;
Avi Kivity29990972012-02-13 20:21:20 +02002272 target_phys_addr_t addr;
Avi Kivity5312bd82012-02-12 18:32:55 +02002273 uint16_t section_index = phys_section_add(section);
Avi Kivitydd811242012-01-02 12:17:03 +02002274
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002275 assert(size);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002276
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002277 addr = start_addr;
Avi Kivity29990972012-02-13 20:21:20 +02002278 phys_page_set(addr >> TARGET_PAGE_BITS, size >> TARGET_PAGE_BITS,
2279 section_index);
bellard33417e72003-08-10 21:47:01 +00002280}
2281
Avi Kivity0f0cb162012-02-13 17:14:32 +02002282void cpu_register_physical_memory_log(MemoryRegionSection *section,
2283 bool readonly)
2284{
2285 MemoryRegionSection now = *section, remain = *section;
2286
2287 if ((now.offset_within_address_space & ~TARGET_PAGE_MASK)
2288 || (now.size < TARGET_PAGE_SIZE)) {
2289 now.size = MIN(TARGET_PAGE_ALIGN(now.offset_within_address_space)
2290 - now.offset_within_address_space,
2291 now.size);
2292 register_subpage(&now);
2293 remain.size -= now.size;
2294 remain.offset_within_address_space += now.size;
2295 remain.offset_within_region += now.size;
2296 }
2297 now = remain;
2298 now.size &= TARGET_PAGE_MASK;
2299 if (now.size) {
2300 register_multipage(&now);
2301 remain.size -= now.size;
2302 remain.offset_within_address_space += now.size;
2303 remain.offset_within_region += now.size;
2304 }
2305 now = remain;
2306 if (now.size) {
2307 register_subpage(&now);
2308 }
2309}
2310
2311
Anthony Liguoric227f092009-10-01 16:12:16 -05002312void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002313{
2314 if (kvm_enabled())
2315 kvm_coalesce_mmio_region(addr, size);
2316}
2317
Anthony Liguoric227f092009-10-01 16:12:16 -05002318void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002319{
2320 if (kvm_enabled())
2321 kvm_uncoalesce_mmio_region(addr, size);
2322}
2323
Sheng Yang62a27442010-01-26 19:21:16 +08002324void qemu_flush_coalesced_mmio_buffer(void)
2325{
2326 if (kvm_enabled())
2327 kvm_flush_coalesced_mmio_buffer();
2328}
2329
Marcelo Tosattic9027602010-03-01 20:25:08 -03002330#if defined(__linux__) && !defined(TARGET_S390X)
2331
2332#include <sys/vfs.h>
2333
2334#define HUGETLBFS_MAGIC 0x958458f6
2335
2336static long gethugepagesize(const char *path)
2337{
2338 struct statfs fs;
2339 int ret;
2340
2341 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002342 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002343 } while (ret != 0 && errno == EINTR);
2344
2345 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002346 perror(path);
2347 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002348 }
2349
2350 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002351 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002352
2353 return fs.f_bsize;
2354}
2355
Alex Williamson04b16652010-07-02 11:13:17 -06002356static void *file_ram_alloc(RAMBlock *block,
2357 ram_addr_t memory,
2358 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002359{
2360 char *filename;
2361 void *area;
2362 int fd;
2363#ifdef MAP_POPULATE
2364 int flags;
2365#endif
2366 unsigned long hpagesize;
2367
2368 hpagesize = gethugepagesize(path);
2369 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002370 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002371 }
2372
2373 if (memory < hpagesize) {
2374 return NULL;
2375 }
2376
2377 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2378 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2379 return NULL;
2380 }
2381
2382 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002383 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002384 }
2385
2386 fd = mkstemp(filename);
2387 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002388 perror("unable to create backing store for hugepages");
2389 free(filename);
2390 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002391 }
2392 unlink(filename);
2393 free(filename);
2394
2395 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2396
2397 /*
2398 * ftruncate is not supported by hugetlbfs in older
2399 * hosts, so don't bother bailing out on errors.
2400 * If anything goes wrong with it under other filesystems,
2401 * mmap will fail.
2402 */
2403 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002404 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002405
2406#ifdef MAP_POPULATE
2407 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2408 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2409 * to sidestep this quirk.
2410 */
2411 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2412 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2413#else
2414 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2415#endif
2416 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002417 perror("file_ram_alloc: can't mmap RAM pages");
2418 close(fd);
2419 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002420 }
Alex Williamson04b16652010-07-02 11:13:17 -06002421 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002422 return area;
2423}
2424#endif
2425
Alex Williamsond17b5282010-06-25 11:08:38 -06002426static ram_addr_t find_ram_offset(ram_addr_t size)
2427{
Alex Williamson04b16652010-07-02 11:13:17 -06002428 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002429 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002430
2431 if (QLIST_EMPTY(&ram_list.blocks))
2432 return 0;
2433
2434 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002435 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002436
2437 end = block->offset + block->length;
2438
2439 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2440 if (next_block->offset >= end) {
2441 next = MIN(next, next_block->offset);
2442 }
2443 }
2444 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002445 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002446 mingap = next - end;
2447 }
2448 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002449
2450 if (offset == RAM_ADDR_MAX) {
2451 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2452 (uint64_t)size);
2453 abort();
2454 }
2455
Alex Williamson04b16652010-07-02 11:13:17 -06002456 return offset;
2457}
2458
2459static ram_addr_t last_ram_offset(void)
2460{
Alex Williamsond17b5282010-06-25 11:08:38 -06002461 RAMBlock *block;
2462 ram_addr_t last = 0;
2463
2464 QLIST_FOREACH(block, &ram_list.blocks, next)
2465 last = MAX(last, block->offset + block->length);
2466
2467 return last;
2468}
2469
Avi Kivityc5705a72011-12-20 15:59:12 +02002470void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002471{
2472 RAMBlock *new_block, *block;
2473
Avi Kivityc5705a72011-12-20 15:59:12 +02002474 new_block = NULL;
2475 QLIST_FOREACH(block, &ram_list.blocks, next) {
2476 if (block->offset == addr) {
2477 new_block = block;
2478 break;
2479 }
2480 }
2481 assert(new_block);
2482 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002483
Anthony Liguori09e5ab62012-02-03 12:28:43 -06002484 if (dev) {
2485 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002486 if (id) {
2487 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002488 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002489 }
2490 }
2491 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2492
2493 QLIST_FOREACH(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02002494 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06002495 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2496 new_block->idstr);
2497 abort();
2498 }
2499 }
Avi Kivityc5705a72011-12-20 15:59:12 +02002500}
2501
2502ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
2503 MemoryRegion *mr)
2504{
2505 RAMBlock *new_block;
2506
2507 size = TARGET_PAGE_ALIGN(size);
2508 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002509
Avi Kivity7c637362011-12-21 13:09:49 +02002510 new_block->mr = mr;
Jun Nakajima432d2682010-08-31 16:41:25 +01002511 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002512 if (host) {
2513 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002514 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002515 } else {
2516 if (mem_path) {
2517#if defined (__linux__) && !defined(TARGET_S390X)
2518 new_block->host = file_ram_alloc(new_block, size, mem_path);
2519 if (!new_block->host) {
2520 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002521 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002522 }
2523#else
2524 fprintf(stderr, "-mem-path option unsupported\n");
2525 exit(1);
2526#endif
2527 } else {
2528#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002529 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2530 an system defined value, which is at least 256GB. Larger systems
2531 have larger values. We put the guest between the end of data
2532 segment (system break) and this value. We use 32GB as a base to
2533 have enough room for the system break to grow. */
2534 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002535 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002536 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002537 if (new_block->host == MAP_FAILED) {
2538 fprintf(stderr, "Allocating RAM failed\n");
2539 abort();
2540 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002541#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002542 if (xen_enabled()) {
Avi Kivityfce537d2011-12-18 15:48:55 +02002543 xen_ram_alloc(new_block->offset, size, mr);
Jun Nakajima432d2682010-08-31 16:41:25 +01002544 } else {
2545 new_block->host = qemu_vmalloc(size);
2546 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002547#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002548 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002549 }
2550 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002551 new_block->length = size;
2552
2553 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2554
Anthony Liguori7267c092011-08-20 22:09:37 -05002555 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002556 last_ram_offset() >> TARGET_PAGE_BITS);
2557 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2558 0xff, size >> TARGET_PAGE_BITS);
2559
2560 if (kvm_enabled())
2561 kvm_setup_guest_memory(new_block->host, size);
2562
2563 return new_block->offset;
2564}
2565
Avi Kivityc5705a72011-12-20 15:59:12 +02002566ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr)
pbrook94a6b542009-04-11 17:15:54 +00002567{
Avi Kivityc5705a72011-12-20 15:59:12 +02002568 return qemu_ram_alloc_from_ptr(size, NULL, mr);
pbrook94a6b542009-04-11 17:15:54 +00002569}
bellarde9a1ab12007-02-08 23:08:38 +00002570
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002571void qemu_ram_free_from_ptr(ram_addr_t addr)
2572{
2573 RAMBlock *block;
2574
2575 QLIST_FOREACH(block, &ram_list.blocks, next) {
2576 if (addr == block->offset) {
2577 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05002578 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002579 return;
2580 }
2581 }
2582}
2583
Anthony Liguoric227f092009-10-01 16:12:16 -05002584void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002585{
Alex Williamson04b16652010-07-02 11:13:17 -06002586 RAMBlock *block;
2587
2588 QLIST_FOREACH(block, &ram_list.blocks, next) {
2589 if (addr == block->offset) {
2590 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01002591 if (block->flags & RAM_PREALLOC_MASK) {
2592 ;
2593 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06002594#if defined (__linux__) && !defined(TARGET_S390X)
2595 if (block->fd) {
2596 munmap(block->host, block->length);
2597 close(block->fd);
2598 } else {
2599 qemu_vfree(block->host);
2600 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002601#else
2602 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06002603#endif
2604 } else {
2605#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2606 munmap(block->host, block->length);
2607#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002608 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002609 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01002610 } else {
2611 qemu_vfree(block->host);
2612 }
Alex Williamson04b16652010-07-02 11:13:17 -06002613#endif
2614 }
Anthony Liguori7267c092011-08-20 22:09:37 -05002615 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06002616 return;
2617 }
2618 }
2619
bellarde9a1ab12007-02-08 23:08:38 +00002620}
2621
Huang Yingcd19cfa2011-03-02 08:56:19 +01002622#ifndef _WIN32
2623void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
2624{
2625 RAMBlock *block;
2626 ram_addr_t offset;
2627 int flags;
2628 void *area, *vaddr;
2629
2630 QLIST_FOREACH(block, &ram_list.blocks, next) {
2631 offset = addr - block->offset;
2632 if (offset < block->length) {
2633 vaddr = block->host + offset;
2634 if (block->flags & RAM_PREALLOC_MASK) {
2635 ;
2636 } else {
2637 flags = MAP_FIXED;
2638 munmap(vaddr, length);
2639 if (mem_path) {
2640#if defined(__linux__) && !defined(TARGET_S390X)
2641 if (block->fd) {
2642#ifdef MAP_POPULATE
2643 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
2644 MAP_PRIVATE;
2645#else
2646 flags |= MAP_PRIVATE;
2647#endif
2648 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2649 flags, block->fd, offset);
2650 } else {
2651 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2652 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2653 flags, -1, 0);
2654 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01002655#else
2656 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01002657#endif
2658 } else {
2659#if defined(TARGET_S390X) && defined(CONFIG_KVM)
2660 flags |= MAP_SHARED | MAP_ANONYMOUS;
2661 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
2662 flags, -1, 0);
2663#else
2664 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
2665 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
2666 flags, -1, 0);
2667#endif
2668 }
2669 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002670 fprintf(stderr, "Could not remap addr: "
2671 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01002672 length, addr);
2673 exit(1);
2674 }
2675 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
2676 }
2677 return;
2678 }
2679 }
2680}
2681#endif /* !_WIN32 */
2682
pbrookdc828ca2009-04-09 22:21:07 +00002683/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00002684 With the exception of the softmmu code in this file, this should
2685 only be used for local memory (e.g. video ram) that the device owns,
2686 and knows it isn't going to access beyond the end of the block.
2687
2688 It should not be used for general purpose DMA.
2689 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
2690 */
Anthony Liguoric227f092009-10-01 16:12:16 -05002691void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00002692{
pbrook94a6b542009-04-11 17:15:54 +00002693 RAMBlock *block;
2694
Alex Williamsonf471a172010-06-11 11:11:42 -06002695 QLIST_FOREACH(block, &ram_list.blocks, next) {
2696 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05002697 /* Move this entry to to start of the list. */
2698 if (block != QLIST_FIRST(&ram_list.blocks)) {
2699 QLIST_REMOVE(block, next);
2700 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
2701 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002702 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002703 /* We need to check if the requested address is in the RAM
2704 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002705 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002706 */
2707 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002708 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002709 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002710 block->host =
2711 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002712 }
2713 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002714 return block->host + (addr - block->offset);
2715 }
pbrook94a6b542009-04-11 17:15:54 +00002716 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002717
2718 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2719 abort();
2720
2721 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00002722}
2723
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002724/* Return a host pointer to ram allocated with qemu_ram_alloc.
2725 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
2726 */
2727void *qemu_safe_ram_ptr(ram_addr_t addr)
2728{
2729 RAMBlock *block;
2730
2731 QLIST_FOREACH(block, &ram_list.blocks, next) {
2732 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02002733 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002734 /* We need to check if the requested address is in the RAM
2735 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002736 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01002737 */
2738 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002739 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01002740 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002741 block->host =
2742 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01002743 }
2744 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002745 return block->host + (addr - block->offset);
2746 }
2747 }
2748
2749 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2750 abort();
2751
2752 return NULL;
2753}
2754
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002755/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
2756 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002757void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002758{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002759 if (*size == 0) {
2760 return NULL;
2761 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002762 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002763 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02002764 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002765 RAMBlock *block;
2766
2767 QLIST_FOREACH(block, &ram_list.blocks, next) {
2768 if (addr - block->offset < block->length) {
2769 if (addr - block->offset + *size > block->length)
2770 *size = block->length - addr + block->offset;
2771 return block->host + (addr - block->offset);
2772 }
2773 }
2774
2775 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
2776 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01002777 }
2778}
2779
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002780void qemu_put_ram_ptr(void *addr)
2781{
2782 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002783}
2784
Marcelo Tosattie8902612010-10-11 15:31:19 -03002785int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00002786{
pbrook94a6b542009-04-11 17:15:54 +00002787 RAMBlock *block;
2788 uint8_t *host = ptr;
2789
Jan Kiszka868bb332011-06-21 22:59:09 +02002790 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002791 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01002792 return 0;
2793 }
2794
Alex Williamsonf471a172010-06-11 11:11:42 -06002795 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002796 /* This case append when the block is not mapped. */
2797 if (block->host == NULL) {
2798 continue;
2799 }
Alex Williamsonf471a172010-06-11 11:11:42 -06002800 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03002801 *ram_addr = block->offset + (host - block->host);
2802 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06002803 }
pbrook94a6b542009-04-11 17:15:54 +00002804 }
Jun Nakajima432d2682010-08-31 16:41:25 +01002805
Marcelo Tosattie8902612010-10-11 15:31:19 -03002806 return -1;
2807}
Alex Williamsonf471a172010-06-11 11:11:42 -06002808
Marcelo Tosattie8902612010-10-11 15:31:19 -03002809/* Some of the softmmu routines need to translate from a host pointer
2810 (typically a TLB entry) back to a ram offset. */
2811ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
2812{
2813 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06002814
Marcelo Tosattie8902612010-10-11 15:31:19 -03002815 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
2816 fprintf(stderr, "Bad ram pointer %p\n", ptr);
2817 abort();
2818 }
2819 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002820}
2821
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002822static uint64_t unassigned_mem_read(void *opaque, target_phys_addr_t addr,
2823 unsigned size)
bellard33417e72003-08-10 21:47:01 +00002824{
pbrook67d3b952006-12-18 05:03:52 +00002825#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00002826 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00002827#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002828#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002829 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002830#endif
2831 return 0;
2832}
2833
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002834static void unassigned_mem_write(void *opaque, target_phys_addr_t addr,
2835 uint64_t val, unsigned size)
blueswir1e18231a2008-10-06 18:46:28 +00002836{
2837#ifdef DEBUG_UNASSIGNED
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002838 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
blueswir1e18231a2008-10-06 18:46:28 +00002839#endif
Richard Henderson5b450402011-04-18 16:13:12 -07002840#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002841 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, size);
blueswir1e18231a2008-10-06 18:46:28 +00002842#endif
2843}
2844
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002845static const MemoryRegionOps unassigned_mem_ops = {
2846 .read = unassigned_mem_read,
2847 .write = unassigned_mem_write,
2848 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002849};
2850
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002851static uint64_t error_mem_read(void *opaque, target_phys_addr_t addr,
2852 unsigned size)
2853{
2854 abort();
2855}
2856
2857static void error_mem_write(void *opaque, target_phys_addr_t addr,
2858 uint64_t value, unsigned size)
2859{
2860 abort();
2861}
2862
2863static const MemoryRegionOps error_mem_ops = {
2864 .read = error_mem_read,
2865 .write = error_mem_write,
2866 .endianness = DEVICE_NATIVE_ENDIAN,
bellard33417e72003-08-10 21:47:01 +00002867};
2868
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002869static const MemoryRegionOps rom_mem_ops = {
2870 .read = error_mem_read,
2871 .write = unassigned_mem_write,
2872 .endianness = DEVICE_NATIVE_ENDIAN,
2873};
2874
2875static void notdirty_mem_write(void *opaque, target_phys_addr_t ram_addr,
2876 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00002877{
bellard3a7d9292005-08-21 09:26:42 +00002878 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002879 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002880 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
2881#if !defined(CONFIG_USER_ONLY)
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002882 tb_invalidate_phys_page_fast(ram_addr, size);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002883 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00002884#endif
2885 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002886 switch (size) {
2887 case 1:
2888 stb_p(qemu_get_ram_ptr(ram_addr), val);
2889 break;
2890 case 2:
2891 stw_p(qemu_get_ram_ptr(ram_addr), val);
2892 break;
2893 case 4:
2894 stl_p(qemu_get_ram_ptr(ram_addr), val);
2895 break;
2896 default:
2897 abort();
2898 }
bellardf23db162005-08-21 19:12:28 +00002899 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002900 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002901 /* we remove the notdirty callback only if the code has been
2902 flushed */
2903 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00002904 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00002905}
2906
Avi Kivity0e0df1e2012-01-02 00:32:15 +02002907static const MemoryRegionOps notdirty_mem_ops = {
2908 .read = error_mem_read,
2909 .write = notdirty_mem_write,
2910 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00002911};
2912
pbrook0f459d12008-06-09 00:20:13 +00002913/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00002914static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00002915{
Andreas Färber9349b4f2012-03-14 01:38:32 +01002916 CPUArchState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00002917 target_ulong pc, cs_base;
2918 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00002919 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00002920 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00002921 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00002922
aliguori06d55cc2008-11-18 20:24:06 +00002923 if (env->watchpoint_hit) {
2924 /* We re-entered the check after replacing the TB. Now raise
2925 * the debug interrupt so that is will trigger after the
2926 * current instruction. */
2927 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
2928 return;
2929 }
pbrook2e70f6e2008-06-29 01:03:05 +00002930 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002931 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00002932 if ((vaddr == (wp->vaddr & len_mask) ||
2933 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00002934 wp->flags |= BP_WATCHPOINT_HIT;
2935 if (!env->watchpoint_hit) {
2936 env->watchpoint_hit = wp;
2937 tb = tb_find_pc(env->mem_io_pc);
2938 if (!tb) {
2939 cpu_abort(env, "check_watchpoint: could not find TB for "
2940 "pc=%p", (void *)env->mem_io_pc);
2941 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00002942 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00002943 tb_phys_invalidate(tb, -1);
2944 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
2945 env->exception_index = EXCP_DEBUG;
Max Filippov488d6572012-01-29 02:24:39 +04002946 cpu_loop_exit(env);
aliguori6e140f22008-11-18 20:37:55 +00002947 } else {
2948 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
2949 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
Max Filippov488d6572012-01-29 02:24:39 +04002950 cpu_resume_from_signal(env, NULL);
aliguori6e140f22008-11-18 20:37:55 +00002951 }
aliguori06d55cc2008-11-18 20:24:06 +00002952 }
aliguori6e140f22008-11-18 20:37:55 +00002953 } else {
2954 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00002955 }
2956 }
2957}
2958
pbrook6658ffb2007-03-16 23:58:11 +00002959/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
2960 so these check for a hit then pass through to the normal out-of-line
2961 phys routines. */
Avi Kivity1ec9b902012-01-02 12:47:48 +02002962static uint64_t watch_mem_read(void *opaque, target_phys_addr_t addr,
2963 unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002964{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002965 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_READ);
2966 switch (size) {
2967 case 1: return ldub_phys(addr);
2968 case 2: return lduw_phys(addr);
2969 case 4: return ldl_phys(addr);
2970 default: abort();
2971 }
pbrook6658ffb2007-03-16 23:58:11 +00002972}
2973
Avi Kivity1ec9b902012-01-02 12:47:48 +02002974static void watch_mem_write(void *opaque, target_phys_addr_t addr,
2975 uint64_t val, unsigned size)
pbrook6658ffb2007-03-16 23:58:11 +00002976{
Avi Kivity1ec9b902012-01-02 12:47:48 +02002977 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~(size - 1), BP_MEM_WRITE);
2978 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04002979 case 1:
2980 stb_phys(addr, val);
2981 break;
2982 case 2:
2983 stw_phys(addr, val);
2984 break;
2985 case 4:
2986 stl_phys(addr, val);
2987 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02002988 default: abort();
2989 }
pbrook6658ffb2007-03-16 23:58:11 +00002990}
2991
Avi Kivity1ec9b902012-01-02 12:47:48 +02002992static const MemoryRegionOps watch_mem_ops = {
2993 .read = watch_mem_read,
2994 .write = watch_mem_write,
2995 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00002996};
pbrook6658ffb2007-03-16 23:58:11 +00002997
Avi Kivity70c68e42012-01-02 12:32:48 +02002998static uint64_t subpage_read(void *opaque, target_phys_addr_t addr,
2999 unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003000{
Avi Kivity70c68e42012-01-02 12:32:48 +02003001 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003002 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003003 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003004#if defined(DEBUG_SUBPAGE)
3005 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3006 mmio, len, addr, idx);
3007#endif
blueswir1db7b5422007-05-26 17:36:03 +00003008
Avi Kivity5312bd82012-02-12 18:32:55 +02003009 section = &phys_sections[mmio->sub_section[idx]];
3010 addr += mmio->base;
3011 addr -= section->offset_within_address_space;
3012 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003013 return io_mem_read(section->mr, addr, len);
blueswir1db7b5422007-05-26 17:36:03 +00003014}
3015
Avi Kivity70c68e42012-01-02 12:32:48 +02003016static void subpage_write(void *opaque, target_phys_addr_t addr,
3017 uint64_t value, unsigned len)
blueswir1db7b5422007-05-26 17:36:03 +00003018{
Avi Kivity70c68e42012-01-02 12:32:48 +02003019 subpage_t *mmio = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003020 unsigned int idx = SUBPAGE_IDX(addr);
Avi Kivity5312bd82012-02-12 18:32:55 +02003021 MemoryRegionSection *section;
blueswir1db7b5422007-05-26 17:36:03 +00003022#if defined(DEBUG_SUBPAGE)
Avi Kivity70c68e42012-01-02 12:32:48 +02003023 printf("%s: subpage %p len %d addr " TARGET_FMT_plx
3024 " idx %d value %"PRIx64"\n",
Richard Hendersonf6405242010-04-22 16:47:31 -07003025 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003026#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003027
Avi Kivity5312bd82012-02-12 18:32:55 +02003028 section = &phys_sections[mmio->sub_section[idx]];
3029 addr += mmio->base;
3030 addr -= section->offset_within_address_space;
3031 addr += section->offset_within_region;
Avi Kivity37ec01d2012-03-08 18:08:35 +02003032 io_mem_write(section->mr, addr, value, len);
blueswir1db7b5422007-05-26 17:36:03 +00003033}
3034
Avi Kivity70c68e42012-01-02 12:32:48 +02003035static const MemoryRegionOps subpage_ops = {
3036 .read = subpage_read,
3037 .write = subpage_write,
3038 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00003039};
3040
Avi Kivityde712f92012-01-02 12:41:07 +02003041static uint64_t subpage_ram_read(void *opaque, target_phys_addr_t addr,
3042 unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003043{
3044 ram_addr_t raddr = addr;
3045 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003046 switch (size) {
3047 case 1: return ldub_p(ptr);
3048 case 2: return lduw_p(ptr);
3049 case 4: return ldl_p(ptr);
3050 default: abort();
3051 }
Andreas Färber56384e82011-11-30 16:26:21 +01003052}
3053
Avi Kivityde712f92012-01-02 12:41:07 +02003054static void subpage_ram_write(void *opaque, target_phys_addr_t addr,
3055 uint64_t value, unsigned size)
Andreas Färber56384e82011-11-30 16:26:21 +01003056{
3057 ram_addr_t raddr = addr;
3058 void *ptr = qemu_get_ram_ptr(raddr);
Avi Kivityde712f92012-01-02 12:41:07 +02003059 switch (size) {
3060 case 1: return stb_p(ptr, value);
3061 case 2: return stw_p(ptr, value);
3062 case 4: return stl_p(ptr, value);
3063 default: abort();
3064 }
Andreas Färber56384e82011-11-30 16:26:21 +01003065}
3066
Avi Kivityde712f92012-01-02 12:41:07 +02003067static const MemoryRegionOps subpage_ram_ops = {
3068 .read = subpage_ram_read,
3069 .write = subpage_ram_write,
3070 .endianness = DEVICE_NATIVE_ENDIAN,
Andreas Färber56384e82011-11-30 16:26:21 +01003071};
3072
Anthony Liguoric227f092009-10-01 16:12:16 -05003073static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02003074 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00003075{
3076 int idx, eidx;
3077
3078 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3079 return -1;
3080 idx = SUBPAGE_IDX(start);
3081 eidx = SUBPAGE_IDX(end);
3082#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003083 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003084 mmio, start, end, idx, eidx, memory);
3085#endif
Avi Kivity5312bd82012-02-12 18:32:55 +02003086 if (memory_region_is_ram(phys_sections[section].mr)) {
3087 MemoryRegionSection new_section = phys_sections[section];
3088 new_section.mr = &io_mem_subpage_ram;
3089 section = phys_section_add(&new_section);
Andreas Färber56384e82011-11-30 16:26:21 +01003090 }
blueswir1db7b5422007-05-26 17:36:03 +00003091 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02003092 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00003093 }
3094
3095 return 0;
3096}
3097
Avi Kivity0f0cb162012-02-13 17:14:32 +02003098static subpage_t *subpage_init(target_phys_addr_t base)
blueswir1db7b5422007-05-26 17:36:03 +00003099{
Anthony Liguoric227f092009-10-01 16:12:16 -05003100 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003101
Anthony Liguori7267c092011-08-20 22:09:37 -05003102 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003103
3104 mmio->base = base;
Avi Kivity70c68e42012-01-02 12:32:48 +02003105 memory_region_init_io(&mmio->iomem, &subpage_ops, mmio,
3106 "subpage", TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02003107 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00003108#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003109 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3110 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003111#endif
Avi Kivity0f0cb162012-02-13 17:14:32 +02003112 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, phys_section_unassigned);
blueswir1db7b5422007-05-26 17:36:03 +00003113
3114 return mmio;
3115}
3116
Avi Kivity5312bd82012-02-12 18:32:55 +02003117static uint16_t dummy_section(MemoryRegion *mr)
3118{
3119 MemoryRegionSection section = {
3120 .mr = mr,
3121 .offset_within_address_space = 0,
3122 .offset_within_region = 0,
3123 .size = UINT64_MAX,
3124 };
3125
3126 return phys_section_add(&section);
3127}
3128
Avi Kivity37ec01d2012-03-08 18:08:35 +02003129MemoryRegion *iotlb_to_region(target_phys_addr_t index)
Avi Kivityaa102232012-03-08 17:06:55 +02003130{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003131 return phys_sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02003132}
3133
Avi Kivitye9179ce2009-06-14 11:38:52 +03003134static void io_mem_init(void)
3135{
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003136 memory_region_init_io(&io_mem_ram, &error_mem_ops, NULL, "ram", UINT64_MAX);
Avi Kivity0e0df1e2012-01-02 00:32:15 +02003137 memory_region_init_io(&io_mem_rom, &rom_mem_ops, NULL, "rom", UINT64_MAX);
3138 memory_region_init_io(&io_mem_unassigned, &unassigned_mem_ops, NULL,
3139 "unassigned", UINT64_MAX);
3140 memory_region_init_io(&io_mem_notdirty, &notdirty_mem_ops, NULL,
3141 "notdirty", UINT64_MAX);
Avi Kivityde712f92012-01-02 12:41:07 +02003142 memory_region_init_io(&io_mem_subpage_ram, &subpage_ram_ops, NULL,
3143 "subpage-ram", UINT64_MAX);
Avi Kivity1ec9b902012-01-02 12:47:48 +02003144 memory_region_init_io(&io_mem_watch, &watch_mem_ops, NULL,
3145 "watch", UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003146}
3147
Avi Kivity50c1e142012-02-08 21:36:02 +02003148static void core_begin(MemoryListener *listener)
3149{
Avi Kivity54688b12012-02-09 17:34:32 +02003150 destroy_all_mappings();
Avi Kivity5312bd82012-02-12 18:32:55 +02003151 phys_sections_clear();
Avi Kivityc19e8802012-02-13 20:25:31 +02003152 phys_map.ptr = PHYS_MAP_NODE_NIL;
Avi Kivity5312bd82012-02-12 18:32:55 +02003153 phys_section_unassigned = dummy_section(&io_mem_unassigned);
Avi Kivityaa102232012-03-08 17:06:55 +02003154 phys_section_notdirty = dummy_section(&io_mem_notdirty);
3155 phys_section_rom = dummy_section(&io_mem_rom);
3156 phys_section_watch = dummy_section(&io_mem_watch);
Avi Kivity50c1e142012-02-08 21:36:02 +02003157}
3158
3159static void core_commit(MemoryListener *listener)
3160{
Andreas Färber9349b4f2012-03-14 01:38:32 +01003161 CPUArchState *env;
Avi Kivity117712c2012-02-12 21:23:17 +02003162
3163 /* since each CPU stores ram addresses in its TLB cache, we must
3164 reset the modified entries */
3165 /* XXX: slow ! */
3166 for(env = first_cpu; env != NULL; env = env->next_cpu) {
3167 tlb_flush(env, 1);
3168 }
Avi Kivity50c1e142012-02-08 21:36:02 +02003169}
3170
Avi Kivity93632742012-02-08 16:54:16 +02003171static void core_region_add(MemoryListener *listener,
3172 MemoryRegionSection *section)
3173{
Avi Kivity4855d412012-02-08 21:16:05 +02003174 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity93632742012-02-08 16:54:16 +02003175}
3176
3177static void core_region_del(MemoryListener *listener,
3178 MemoryRegionSection *section)
3179{
Avi Kivity93632742012-02-08 16:54:16 +02003180}
3181
Avi Kivity50c1e142012-02-08 21:36:02 +02003182static void core_region_nop(MemoryListener *listener,
3183 MemoryRegionSection *section)
3184{
Avi Kivity54688b12012-02-09 17:34:32 +02003185 cpu_register_physical_memory_log(section, section->readonly);
Avi Kivity50c1e142012-02-08 21:36:02 +02003186}
3187
Avi Kivity93632742012-02-08 16:54:16 +02003188static void core_log_start(MemoryListener *listener,
3189 MemoryRegionSection *section)
3190{
3191}
3192
3193static void core_log_stop(MemoryListener *listener,
3194 MemoryRegionSection *section)
3195{
3196}
3197
3198static void core_log_sync(MemoryListener *listener,
3199 MemoryRegionSection *section)
3200{
3201}
3202
3203static void core_log_global_start(MemoryListener *listener)
3204{
3205 cpu_physical_memory_set_dirty_tracking(1);
3206}
3207
3208static void core_log_global_stop(MemoryListener *listener)
3209{
3210 cpu_physical_memory_set_dirty_tracking(0);
3211}
3212
3213static void core_eventfd_add(MemoryListener *listener,
3214 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003215 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003216{
3217}
3218
3219static void core_eventfd_del(MemoryListener *listener,
3220 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003221 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity93632742012-02-08 16:54:16 +02003222{
3223}
3224
Avi Kivity50c1e142012-02-08 21:36:02 +02003225static void io_begin(MemoryListener *listener)
3226{
3227}
3228
3229static void io_commit(MemoryListener *listener)
3230{
3231}
3232
Avi Kivity4855d412012-02-08 21:16:05 +02003233static void io_region_add(MemoryListener *listener,
3234 MemoryRegionSection *section)
3235{
Avi Kivitya2d33522012-03-05 17:40:12 +02003236 MemoryRegionIORange *mrio = g_new(MemoryRegionIORange, 1);
3237
3238 mrio->mr = section->mr;
3239 mrio->offset = section->offset_within_region;
3240 iorange_init(&mrio->iorange, &memory_region_iorange_ops,
Avi Kivity4855d412012-02-08 21:16:05 +02003241 section->offset_within_address_space, section->size);
Avi Kivitya2d33522012-03-05 17:40:12 +02003242 ioport_register(&mrio->iorange);
Avi Kivity4855d412012-02-08 21:16:05 +02003243}
3244
3245static void io_region_del(MemoryListener *listener,
3246 MemoryRegionSection *section)
3247{
3248 isa_unassign_ioport(section->offset_within_address_space, section->size);
3249}
3250
Avi Kivity50c1e142012-02-08 21:36:02 +02003251static void io_region_nop(MemoryListener *listener,
3252 MemoryRegionSection *section)
3253{
3254}
3255
Avi Kivity4855d412012-02-08 21:16:05 +02003256static void io_log_start(MemoryListener *listener,
3257 MemoryRegionSection *section)
3258{
3259}
3260
3261static void io_log_stop(MemoryListener *listener,
3262 MemoryRegionSection *section)
3263{
3264}
3265
3266static void io_log_sync(MemoryListener *listener,
3267 MemoryRegionSection *section)
3268{
3269}
3270
3271static void io_log_global_start(MemoryListener *listener)
3272{
3273}
3274
3275static void io_log_global_stop(MemoryListener *listener)
3276{
3277}
3278
3279static void io_eventfd_add(MemoryListener *listener,
3280 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003281 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003282{
3283}
3284
3285static void io_eventfd_del(MemoryListener *listener,
3286 MemoryRegionSection *section,
Paolo Bonzini753d5e12012-07-05 17:16:27 +02003287 bool match_data, uint64_t data, EventNotifier *e)
Avi Kivity4855d412012-02-08 21:16:05 +02003288{
3289}
3290
Avi Kivity93632742012-02-08 16:54:16 +02003291static MemoryListener core_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003292 .begin = core_begin,
3293 .commit = core_commit,
Avi Kivity93632742012-02-08 16:54:16 +02003294 .region_add = core_region_add,
3295 .region_del = core_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003296 .region_nop = core_region_nop,
Avi Kivity93632742012-02-08 16:54:16 +02003297 .log_start = core_log_start,
3298 .log_stop = core_log_stop,
3299 .log_sync = core_log_sync,
3300 .log_global_start = core_log_global_start,
3301 .log_global_stop = core_log_global_stop,
3302 .eventfd_add = core_eventfd_add,
3303 .eventfd_del = core_eventfd_del,
3304 .priority = 0,
3305};
3306
Avi Kivity4855d412012-02-08 21:16:05 +02003307static MemoryListener io_memory_listener = {
Avi Kivity50c1e142012-02-08 21:36:02 +02003308 .begin = io_begin,
3309 .commit = io_commit,
Avi Kivity4855d412012-02-08 21:16:05 +02003310 .region_add = io_region_add,
3311 .region_del = io_region_del,
Avi Kivity50c1e142012-02-08 21:36:02 +02003312 .region_nop = io_region_nop,
Avi Kivity4855d412012-02-08 21:16:05 +02003313 .log_start = io_log_start,
3314 .log_stop = io_log_stop,
3315 .log_sync = io_log_sync,
3316 .log_global_start = io_log_global_start,
3317 .log_global_stop = io_log_global_stop,
3318 .eventfd_add = io_eventfd_add,
3319 .eventfd_del = io_eventfd_del,
3320 .priority = 0,
3321};
3322
Avi Kivity62152b82011-07-26 14:26:14 +03003323static void memory_map_init(void)
3324{
Anthony Liguori7267c092011-08-20 22:09:37 -05003325 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003326 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003327 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003328
Anthony Liguori7267c092011-08-20 22:09:37 -05003329 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003330 memory_region_init(system_io, "io", 65536);
3331 set_system_io_map(system_io);
Avi Kivity93632742012-02-08 16:54:16 +02003332
Avi Kivity4855d412012-02-08 21:16:05 +02003333 memory_listener_register(&core_memory_listener, system_memory);
3334 memory_listener_register(&io_memory_listener, system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003335}
3336
3337MemoryRegion *get_system_memory(void)
3338{
3339 return system_memory;
3340}
3341
Avi Kivity309cb472011-08-08 16:09:03 +03003342MemoryRegion *get_system_io(void)
3343{
3344 return system_io;
3345}
3346
pbrooke2eef172008-06-08 01:09:01 +00003347#endif /* !defined(CONFIG_USER_ONLY) */
3348
bellard13eb76e2004-01-24 15:23:36 +00003349/* physical memory access (slow version, mainly for debug) */
3350#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +01003351int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00003352 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003353{
3354 int l, flags;
3355 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003356 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003357
3358 while (len > 0) {
3359 page = addr & TARGET_PAGE_MASK;
3360 l = (page + TARGET_PAGE_SIZE) - addr;
3361 if (l > len)
3362 l = len;
3363 flags = page_get_flags(page);
3364 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003365 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003366 if (is_write) {
3367 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003368 return -1;
bellard579a97f2007-11-11 14:26:47 +00003369 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003370 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003371 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003372 memcpy(p, buf, l);
3373 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003374 } else {
3375 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003376 return -1;
bellard579a97f2007-11-11 14:26:47 +00003377 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003378 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003379 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003380 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003381 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003382 }
3383 len -= l;
3384 buf += l;
3385 addr += l;
3386 }
Paul Brooka68fe892010-03-01 00:08:59 +00003387 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003388}
bellard8df1cd02005-01-28 22:37:22 +00003389
bellard13eb76e2004-01-24 15:23:36 +00003390#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003391void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003392 int len, int is_write)
3393{
Avi Kivity37ec01d2012-03-08 18:08:35 +02003394 int l;
bellard13eb76e2004-01-24 15:23:36 +00003395 uint8_t *ptr;
3396 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003397 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003398 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003399
bellard13eb76e2004-01-24 15:23:36 +00003400 while (len > 0) {
3401 page = addr & TARGET_PAGE_MASK;
3402 l = (page + TARGET_PAGE_SIZE) - addr;
3403 if (l > len)
3404 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003405 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003406
bellard13eb76e2004-01-24 15:23:36 +00003407 if (is_write) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003408 if (!memory_region_is_ram(section->mr)) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003409 target_phys_addr_t addr1;
Blue Swirlcc5bea62012-04-14 14:56:48 +00003410 addr1 = memory_region_section_addr(section, addr);
bellard6a00d602005-11-21 23:25:50 +00003411 /* XXX: could force cpu_single_env to NULL to avoid
3412 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003413 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003414 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003415 val = ldl_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003416 io_mem_write(section->mr, addr1, val, 4);
bellard13eb76e2004-01-24 15:23:36 +00003417 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003418 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003419 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003420 val = lduw_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003421 io_mem_write(section->mr, addr1, val, 2);
bellard13eb76e2004-01-24 15:23:36 +00003422 l = 2;
3423 } else {
bellard1c213d12005-09-03 10:49:04 +00003424 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003425 val = ldub_p(buf);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003426 io_mem_write(section->mr, addr1, val, 1);
bellard13eb76e2004-01-24 15:23:36 +00003427 l = 1;
3428 }
Avi Kivityf3705d52012-03-08 16:16:34 +02003429 } else if (!section->readonly) {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003430 ram_addr_t addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003431 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003432 + memory_region_section_addr(section, addr);
bellard13eb76e2004-01-24 15:23:36 +00003433 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003434 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003435 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003436 if (!cpu_physical_memory_is_dirty(addr1)) {
3437 /* invalidate code */
3438 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3439 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003440 cpu_physical_memory_set_dirty_flags(
3441 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003442 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003443 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003444 }
3445 } else {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003446 if (!(memory_region_is_ram(section->mr) ||
3447 memory_region_is_romd(section->mr))) {
Avi Kivityf1f6e3b2011-11-20 17:52:22 +02003448 target_phys_addr_t addr1;
bellard13eb76e2004-01-24 15:23:36 +00003449 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003450 addr1 = memory_region_section_addr(section, addr);
aurel326c2934d2009-02-18 21:37:17 +00003451 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003452 /* 32 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003453 val = io_mem_read(section->mr, addr1, 4);
bellardc27004e2005-01-03 23:35:10 +00003454 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003455 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003456 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003457 /* 16 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003458 val = io_mem_read(section->mr, addr1, 2);
bellardc27004e2005-01-03 23:35:10 +00003459 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003460 l = 2;
3461 } else {
bellard1c213d12005-09-03 10:49:04 +00003462 /* 8 bit read access */
Avi Kivity37ec01d2012-03-08 18:08:35 +02003463 val = io_mem_read(section->mr, addr1, 1);
bellardc27004e2005-01-03 23:35:10 +00003464 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003465 l = 1;
3466 }
3467 } else {
3468 /* RAM case */
Anthony PERARD0a1b3572012-03-19 15:54:34 +00003469 ptr = qemu_get_ram_ptr(section->mr->ram_addr
Blue Swirlcc5bea62012-04-14 14:56:48 +00003470 + memory_region_section_addr(section,
3471 addr));
Avi Kivityf3705d52012-03-08 16:16:34 +02003472 memcpy(buf, ptr, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003473 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003474 }
3475 }
3476 len -= l;
3477 buf += l;
3478 addr += l;
3479 }
3480}
bellard8df1cd02005-01-28 22:37:22 +00003481
bellardd0ecd2a2006-04-23 17:14:48 +00003482/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003483void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003484 const uint8_t *buf, int len)
3485{
3486 int l;
3487 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003488 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003489 MemoryRegionSection *section;
ths3b46e622007-09-17 08:09:54 +00003490
bellardd0ecd2a2006-04-23 17:14:48 +00003491 while (len > 0) {
3492 page = addr & TARGET_PAGE_MASK;
3493 l = (page + TARGET_PAGE_SIZE) - addr;
3494 if (l > len)
3495 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003496 section = phys_page_find(page >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003497
Blue Swirlcc5bea62012-04-14 14:56:48 +00003498 if (!(memory_region_is_ram(section->mr) ||
3499 memory_region_is_romd(section->mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00003500 /* do nothing */
3501 } else {
3502 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003503 addr1 = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003504 + memory_region_section_addr(section, addr);
bellardd0ecd2a2006-04-23 17:14:48 +00003505 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003506 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003507 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003508 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003509 }
3510 len -= l;
3511 buf += l;
3512 addr += l;
3513 }
3514}
3515
aliguori6d16c2f2009-01-22 16:59:11 +00003516typedef struct {
3517 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003518 target_phys_addr_t addr;
3519 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003520} BounceBuffer;
3521
3522static BounceBuffer bounce;
3523
aliguoriba223c22009-01-22 16:59:16 +00003524typedef struct MapClient {
3525 void *opaque;
3526 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00003527 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00003528} MapClient;
3529
Blue Swirl72cf2d42009-09-12 07:36:22 +00003530static QLIST_HEAD(map_client_list, MapClient) map_client_list
3531 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003532
3533void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
3534{
Anthony Liguori7267c092011-08-20 22:09:37 -05003535 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00003536
3537 client->opaque = opaque;
3538 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003539 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00003540 return client;
3541}
3542
3543void cpu_unregister_map_client(void *_client)
3544{
3545 MapClient *client = (MapClient *)_client;
3546
Blue Swirl72cf2d42009-09-12 07:36:22 +00003547 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05003548 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00003549}
3550
3551static void cpu_notify_map_clients(void)
3552{
3553 MapClient *client;
3554
Blue Swirl72cf2d42009-09-12 07:36:22 +00003555 while (!QLIST_EMPTY(&map_client_list)) {
3556 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00003557 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09003558 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00003559 }
3560}
3561
aliguori6d16c2f2009-01-22 16:59:11 +00003562/* Map a physical memory region into a host virtual address.
3563 * May map a subset of the requested range, given by and returned in *plen.
3564 * May return NULL if resources needed to perform the mapping are exhausted.
3565 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00003566 * Use cpu_register_map_client() to know when retrying the map operation is
3567 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00003568 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003569void *cpu_physical_memory_map(target_phys_addr_t addr,
3570 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00003571 int is_write)
3572{
Anthony Liguoric227f092009-10-01 16:12:16 -05003573 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003574 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00003575 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05003576 target_phys_addr_t page;
Avi Kivityf3705d52012-03-08 16:16:34 +02003577 MemoryRegionSection *section;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003578 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003579 ram_addr_t rlen;
3580 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003581
3582 while (len > 0) {
3583 page = addr & TARGET_PAGE_MASK;
3584 l = (page + TARGET_PAGE_SIZE) - addr;
3585 if (l > len)
3586 l = len;
Avi Kivity06ef3522012-02-13 16:11:22 +02003587 section = phys_page_find(page >> TARGET_PAGE_BITS);
aliguori6d16c2f2009-01-22 16:59:11 +00003588
Avi Kivityf3705d52012-03-08 16:16:34 +02003589 if (!(memory_region_is_ram(section->mr) && !section->readonly)) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003590 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00003591 break;
3592 }
3593 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
3594 bounce.addr = addr;
3595 bounce.len = l;
3596 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02003597 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00003598 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003599
3600 *plen = l;
3601 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00003602 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003603 if (!todo) {
Avi Kivityf3705d52012-03-08 16:16:34 +02003604 raddr = memory_region_get_ram_addr(section->mr)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003605 + memory_region_section_addr(section, addr);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003606 }
aliguori6d16c2f2009-01-22 16:59:11 +00003607
3608 len -= l;
3609 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003610 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00003611 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003612 rlen = todo;
3613 ret = qemu_ram_ptr_length(raddr, &rlen);
3614 *plen = rlen;
3615 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00003616}
3617
3618/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
3619 * Will also mark the memory as dirty if is_write == 1. access_len gives
3620 * the amount of memory that was actually read or written by the caller.
3621 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003622void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
3623 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00003624{
3625 if (buffer != bounce.buffer) {
3626 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003627 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003628 while (access_len) {
3629 unsigned l;
3630 l = TARGET_PAGE_SIZE;
3631 if (l > access_len)
3632 l = access_len;
3633 if (!cpu_physical_memory_is_dirty(addr1)) {
3634 /* invalidate code */
3635 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3636 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003637 cpu_physical_memory_set_dirty_flags(
3638 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00003639 }
3640 addr1 += l;
3641 access_len -= l;
3642 }
3643 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003644 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003645 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003646 }
aliguori6d16c2f2009-01-22 16:59:11 +00003647 return;
3648 }
3649 if (is_write) {
3650 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
3651 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00003652 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00003653 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00003654 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00003655}
bellardd0ecd2a2006-04-23 17:14:48 +00003656
bellard8df1cd02005-01-28 22:37:22 +00003657/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003658static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
3659 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003660{
bellard8df1cd02005-01-28 22:37:22 +00003661 uint8_t *ptr;
3662 uint32_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003663 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003664
Avi Kivity06ef3522012-02-13 16:11:22 +02003665 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003666
Blue Swirlcc5bea62012-04-14 14:56:48 +00003667 if (!(memory_region_is_ram(section->mr) ||
3668 memory_region_is_romd(section->mr))) {
bellard8df1cd02005-01-28 22:37:22 +00003669 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003670 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003671 val = io_mem_read(section->mr, addr, 4);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003672#if defined(TARGET_WORDS_BIGENDIAN)
3673 if (endian == DEVICE_LITTLE_ENDIAN) {
3674 val = bswap32(val);
3675 }
3676#else
3677 if (endian == DEVICE_BIG_ENDIAN) {
3678 val = bswap32(val);
3679 }
3680#endif
bellard8df1cd02005-01-28 22:37:22 +00003681 } else {
3682 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003683 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003684 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003685 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003686 switch (endian) {
3687 case DEVICE_LITTLE_ENDIAN:
3688 val = ldl_le_p(ptr);
3689 break;
3690 case DEVICE_BIG_ENDIAN:
3691 val = ldl_be_p(ptr);
3692 break;
3693 default:
3694 val = ldl_p(ptr);
3695 break;
3696 }
bellard8df1cd02005-01-28 22:37:22 +00003697 }
3698 return val;
3699}
3700
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003701uint32_t ldl_phys(target_phys_addr_t addr)
3702{
3703 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3704}
3705
3706uint32_t ldl_le_phys(target_phys_addr_t addr)
3707{
3708 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3709}
3710
3711uint32_t ldl_be_phys(target_phys_addr_t addr)
3712{
3713 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
3714}
3715
bellard84b7b8e2005-11-28 21:19:04 +00003716/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003717static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
3718 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00003719{
bellard84b7b8e2005-11-28 21:19:04 +00003720 uint8_t *ptr;
3721 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003722 MemoryRegionSection *section;
bellard84b7b8e2005-11-28 21:19:04 +00003723
Avi Kivity06ef3522012-02-13 16:11:22 +02003724 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003725
Blue Swirlcc5bea62012-04-14 14:56:48 +00003726 if (!(memory_region_is_ram(section->mr) ||
3727 memory_region_is_romd(section->mr))) {
bellard84b7b8e2005-11-28 21:19:04 +00003728 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003729 addr = memory_region_section_addr(section, addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003730
3731 /* XXX This is broken when device endian != cpu endian.
3732 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00003733#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003734 val = io_mem_read(section->mr, addr, 4) << 32;
3735 val |= io_mem_read(section->mr, addr + 4, 4);
bellard84b7b8e2005-11-28 21:19:04 +00003736#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003737 val = io_mem_read(section->mr, addr, 4);
3738 val |= io_mem_read(section->mr, addr + 4, 4) << 32;
bellard84b7b8e2005-11-28 21:19:04 +00003739#endif
3740 } else {
3741 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003742 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003743 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003744 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003745 switch (endian) {
3746 case DEVICE_LITTLE_ENDIAN:
3747 val = ldq_le_p(ptr);
3748 break;
3749 case DEVICE_BIG_ENDIAN:
3750 val = ldq_be_p(ptr);
3751 break;
3752 default:
3753 val = ldq_p(ptr);
3754 break;
3755 }
bellard84b7b8e2005-11-28 21:19:04 +00003756 }
3757 return val;
3758}
3759
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003760uint64_t ldq_phys(target_phys_addr_t addr)
3761{
3762 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3763}
3764
3765uint64_t ldq_le_phys(target_phys_addr_t addr)
3766{
3767 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3768}
3769
3770uint64_t ldq_be_phys(target_phys_addr_t addr)
3771{
3772 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
3773}
3774
bellardaab33092005-10-30 20:48:42 +00003775/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003776uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00003777{
3778 uint8_t val;
3779 cpu_physical_memory_read(addr, &val, 1);
3780 return val;
3781}
3782
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003783/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003784static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
3785 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003786{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003787 uint8_t *ptr;
3788 uint64_t val;
Avi Kivityf3705d52012-03-08 16:16:34 +02003789 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003790
Avi Kivity06ef3522012-02-13 16:11:22 +02003791 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003792
Blue Swirlcc5bea62012-04-14 14:56:48 +00003793 if (!(memory_region_is_ram(section->mr) ||
3794 memory_region_is_romd(section->mr))) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003795 /* I/O case */
Blue Swirlcc5bea62012-04-14 14:56:48 +00003796 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003797 val = io_mem_read(section->mr, addr, 2);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003798#if defined(TARGET_WORDS_BIGENDIAN)
3799 if (endian == DEVICE_LITTLE_ENDIAN) {
3800 val = bswap16(val);
3801 }
3802#else
3803 if (endian == DEVICE_BIG_ENDIAN) {
3804 val = bswap16(val);
3805 }
3806#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003807 } else {
3808 /* RAM case */
Avi Kivityf3705d52012-03-08 16:16:34 +02003809 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003810 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003811 + memory_region_section_addr(section, addr));
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003812 switch (endian) {
3813 case DEVICE_LITTLE_ENDIAN:
3814 val = lduw_le_p(ptr);
3815 break;
3816 case DEVICE_BIG_ENDIAN:
3817 val = lduw_be_p(ptr);
3818 break;
3819 default:
3820 val = lduw_p(ptr);
3821 break;
3822 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003823 }
3824 return val;
bellardaab33092005-10-30 20:48:42 +00003825}
3826
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003827uint32_t lduw_phys(target_phys_addr_t addr)
3828{
3829 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
3830}
3831
3832uint32_t lduw_le_phys(target_phys_addr_t addr)
3833{
3834 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
3835}
3836
3837uint32_t lduw_be_phys(target_phys_addr_t addr)
3838{
3839 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
3840}
3841
bellard8df1cd02005-01-28 22:37:22 +00003842/* warning: addr must be aligned. The ram page is not masked as dirty
3843 and the code inside is not invalidated. It is useful if the dirty
3844 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05003845void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00003846{
bellard8df1cd02005-01-28 22:37:22 +00003847 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003848 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003849
Avi Kivity06ef3522012-02-13 16:11:22 +02003850 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003851
Avi Kivityf3705d52012-03-08 16:16:34 +02003852 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003853 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003854 if (memory_region_is_ram(section->mr)) {
3855 section = &phys_sections[phys_section_rom];
3856 }
3857 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003858 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003859 unsigned long addr1 = (memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003860 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003861 + memory_region_section_addr(section, addr);
pbrook5579c7f2009-04-11 14:47:08 +00003862 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003863 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003864
3865 if (unlikely(in_migration)) {
3866 if (!cpu_physical_memory_is_dirty(addr1)) {
3867 /* invalidate code */
3868 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3869 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003870 cpu_physical_memory_set_dirty_flags(
3871 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00003872 }
3873 }
bellard8df1cd02005-01-28 22:37:22 +00003874 }
3875}
3876
Anthony Liguoric227f092009-10-01 16:12:16 -05003877void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00003878{
j_mayerbc98a7e2007-04-04 07:55:12 +00003879 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003880 MemoryRegionSection *section;
j_mayerbc98a7e2007-04-04 07:55:12 +00003881
Avi Kivity06ef3522012-02-13 16:11:22 +02003882 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003883
Avi Kivityf3705d52012-03-08 16:16:34 +02003884 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003885 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003886 if (memory_region_is_ram(section->mr)) {
3887 section = &phys_sections[phys_section_rom];
3888 }
j_mayerbc98a7e2007-04-04 07:55:12 +00003889#ifdef TARGET_WORDS_BIGENDIAN
Avi Kivity37ec01d2012-03-08 18:08:35 +02003890 io_mem_write(section->mr, addr, val >> 32, 4);
3891 io_mem_write(section->mr, addr + 4, (uint32_t)val, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003892#else
Avi Kivity37ec01d2012-03-08 18:08:35 +02003893 io_mem_write(section->mr, addr, (uint32_t)val, 4);
3894 io_mem_write(section->mr, addr + 4, val >> 32, 4);
j_mayerbc98a7e2007-04-04 07:55:12 +00003895#endif
3896 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02003897 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(section->mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02003898 & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003899 + memory_region_section_addr(section, addr));
j_mayerbc98a7e2007-04-04 07:55:12 +00003900 stq_p(ptr, val);
3901 }
3902}
3903
bellard8df1cd02005-01-28 22:37:22 +00003904/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003905static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
3906 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003907{
bellard8df1cd02005-01-28 22:37:22 +00003908 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003909 MemoryRegionSection *section;
bellard8df1cd02005-01-28 22:37:22 +00003910
Avi Kivity06ef3522012-02-13 16:11:22 +02003911 section = phys_page_find(addr >> TARGET_PAGE_BITS);
ths3b46e622007-09-17 08:09:54 +00003912
Avi Kivityf3705d52012-03-08 16:16:34 +02003913 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003914 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003915 if (memory_region_is_ram(section->mr)) {
3916 section = &phys_sections[phys_section_rom];
3917 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003918#if defined(TARGET_WORDS_BIGENDIAN)
3919 if (endian == DEVICE_LITTLE_ENDIAN) {
3920 val = bswap32(val);
3921 }
3922#else
3923 if (endian == DEVICE_BIG_ENDIAN) {
3924 val = bswap32(val);
3925 }
3926#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02003927 io_mem_write(section->mr, addr, val, 4);
bellard8df1cd02005-01-28 22:37:22 +00003928 } else {
3929 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02003930 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00003931 + memory_region_section_addr(section, addr);
bellard8df1cd02005-01-28 22:37:22 +00003932 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003933 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003934 switch (endian) {
3935 case DEVICE_LITTLE_ENDIAN:
3936 stl_le_p(ptr, val);
3937 break;
3938 case DEVICE_BIG_ENDIAN:
3939 stl_be_p(ptr, val);
3940 break;
3941 default:
3942 stl_p(ptr, val);
3943 break;
3944 }
bellard3a7d9292005-08-21 09:26:42 +00003945 if (!cpu_physical_memory_is_dirty(addr1)) {
3946 /* invalidate code */
3947 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3948 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003949 cpu_physical_memory_set_dirty_flags(addr1,
3950 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003951 }
bellard8df1cd02005-01-28 22:37:22 +00003952 }
3953}
3954
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003955void stl_phys(target_phys_addr_t addr, uint32_t val)
3956{
3957 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
3958}
3959
3960void stl_le_phys(target_phys_addr_t addr, uint32_t val)
3961{
3962 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
3963}
3964
3965void stl_be_phys(target_phys_addr_t addr, uint32_t val)
3966{
3967 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
3968}
3969
bellardaab33092005-10-30 20:48:42 +00003970/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05003971void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00003972{
3973 uint8_t v = val;
3974 cpu_physical_memory_write(addr, &v, 1);
3975}
3976
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003977/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003978static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
3979 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003980{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003981 uint8_t *ptr;
Avi Kivityf3705d52012-03-08 16:16:34 +02003982 MemoryRegionSection *section;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003983
Avi Kivity06ef3522012-02-13 16:11:22 +02003984 section = phys_page_find(addr >> TARGET_PAGE_BITS);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003985
Avi Kivityf3705d52012-03-08 16:16:34 +02003986 if (!memory_region_is_ram(section->mr) || section->readonly) {
Blue Swirlcc5bea62012-04-14 14:56:48 +00003987 addr = memory_region_section_addr(section, addr);
Avi Kivity37ec01d2012-03-08 18:08:35 +02003988 if (memory_region_is_ram(section->mr)) {
3989 section = &phys_sections[phys_section_rom];
3990 }
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003991#if defined(TARGET_WORDS_BIGENDIAN)
3992 if (endian == DEVICE_LITTLE_ENDIAN) {
3993 val = bswap16(val);
3994 }
3995#else
3996 if (endian == DEVICE_BIG_ENDIAN) {
3997 val = bswap16(val);
3998 }
3999#endif
Avi Kivity37ec01d2012-03-08 18:08:35 +02004000 io_mem_write(section->mr, addr, val, 2);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004001 } else {
4002 unsigned long addr1;
Avi Kivityf3705d52012-03-08 16:16:34 +02004003 addr1 = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Blue Swirlcc5bea62012-04-14 14:56:48 +00004004 + memory_region_section_addr(section, addr);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004005 /* RAM case */
4006 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004007 switch (endian) {
4008 case DEVICE_LITTLE_ENDIAN:
4009 stw_le_p(ptr, val);
4010 break;
4011 case DEVICE_BIG_ENDIAN:
4012 stw_be_p(ptr, val);
4013 break;
4014 default:
4015 stw_p(ptr, val);
4016 break;
4017 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004018 if (!cpu_physical_memory_is_dirty(addr1)) {
4019 /* invalidate code */
4020 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4021 /* set dirty bit */
4022 cpu_physical_memory_set_dirty_flags(addr1,
4023 (0xff & ~CODE_DIRTY_FLAG));
4024 }
4025 }
bellardaab33092005-10-30 20:48:42 +00004026}
4027
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004028void stw_phys(target_phys_addr_t addr, uint32_t val)
4029{
4030 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4031}
4032
4033void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4034{
4035 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4036}
4037
4038void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4039{
4040 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4041}
4042
bellardaab33092005-10-30 20:48:42 +00004043/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004044void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004045{
4046 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004047 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004048}
4049
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004050void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4051{
4052 val = cpu_to_le64(val);
4053 cpu_physical_memory_write(addr, &val, 8);
4054}
4055
4056void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4057{
4058 val = cpu_to_be64(val);
4059 cpu_physical_memory_write(addr, &val, 8);
4060}
4061
aliguori5e2972f2009-03-28 17:51:36 +00004062/* virtual memory access for debug (includes writing to ROM) */
Andreas Färber9349b4f2012-03-14 01:38:32 +01004063int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004064 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004065{
4066 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004067 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004068 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004069
4070 while (len > 0) {
4071 page = addr & TARGET_PAGE_MASK;
4072 phys_addr = cpu_get_phys_page_debug(env, page);
4073 /* if no physical page mapped, return an error */
4074 if (phys_addr == -1)
4075 return -1;
4076 l = (page + TARGET_PAGE_SIZE) - addr;
4077 if (l > len)
4078 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004079 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004080 if (is_write)
4081 cpu_physical_memory_write_rom(phys_addr, buf, l);
4082 else
aliguori5e2972f2009-03-28 17:51:36 +00004083 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004084 len -= l;
4085 buf += l;
4086 addr += l;
4087 }
4088 return 0;
4089}
Paul Brooka68fe892010-03-01 00:08:59 +00004090#endif
bellard13eb76e2004-01-24 15:23:36 +00004091
pbrook2e70f6e2008-06-29 01:03:05 +00004092/* in deterministic execution mode, instructions doing device I/Os
4093 must be at the end of the TB */
Blue Swirl20503962012-04-09 14:20:20 +00004094void cpu_io_recompile(CPUArchState *env, uintptr_t retaddr)
pbrook2e70f6e2008-06-29 01:03:05 +00004095{
4096 TranslationBlock *tb;
4097 uint32_t n, cflags;
4098 target_ulong pc, cs_base;
4099 uint64_t flags;
4100
Blue Swirl20503962012-04-09 14:20:20 +00004101 tb = tb_find_pc(retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004102 if (!tb) {
4103 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
Blue Swirl20503962012-04-09 14:20:20 +00004104 (void *)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004105 }
4106 n = env->icount_decr.u16.low + tb->icount;
Blue Swirl20503962012-04-09 14:20:20 +00004107 cpu_restore_state(tb, env, retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004108 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004109 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004110 n = n - env->icount_decr.u16.low;
4111 /* Generate a new TB ending on the I/O insn. */
4112 n++;
4113 /* On MIPS and SH, delay slot instructions can only be restarted if
4114 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004115 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004116 branch. */
4117#if defined(TARGET_MIPS)
4118 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4119 env->active_tc.PC -= 4;
4120 env->icount_decr.u16.low++;
4121 env->hflags &= ~MIPS_HFLAG_BMASK;
4122 }
4123#elif defined(TARGET_SH4)
4124 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4125 && n > 1) {
4126 env->pc -= 2;
4127 env->icount_decr.u16.low++;
4128 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4129 }
4130#endif
4131 /* This should never happen. */
4132 if (n > CF_COUNT_MASK)
4133 cpu_abort(env, "TB too big during recompile");
4134
4135 cflags = n | CF_LAST_IO;
4136 pc = tb->pc;
4137 cs_base = tb->cs_base;
4138 flags = tb->flags;
4139 tb_phys_invalidate(tb, -1);
4140 /* FIXME: In theory this could raise an exception. In practice
4141 we have already translated the block once so it's probably ok. */
4142 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004143 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004144 the first in the TB) then we end up generating a whole new TB and
4145 repeating the fault, which is horribly inefficient.
4146 Better would be to execute just this insn uncached, or generate a
4147 second new TB. */
4148 cpu_resume_from_signal(env, NULL);
4149}
4150
Paul Brookb3755a92010-03-12 16:54:58 +00004151#if !defined(CONFIG_USER_ONLY)
4152
Stefan Weil055403b2010-10-22 23:03:32 +02004153void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004154{
4155 int i, target_code_size, max_target_code_size;
4156 int direct_jmp_count, direct_jmp2_count, cross_page;
4157 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004158
bellarde3db7222005-01-26 22:00:47 +00004159 target_code_size = 0;
4160 max_target_code_size = 0;
4161 cross_page = 0;
4162 direct_jmp_count = 0;
4163 direct_jmp2_count = 0;
4164 for(i = 0; i < nb_tbs; i++) {
4165 tb = &tbs[i];
4166 target_code_size += tb->size;
4167 if (tb->size > max_target_code_size)
4168 max_target_code_size = tb->size;
4169 if (tb->page_addr[1] != -1)
4170 cross_page++;
4171 if (tb->tb_next_offset[0] != 0xffff) {
4172 direct_jmp_count++;
4173 if (tb->tb_next_offset[1] != 0xffff) {
4174 direct_jmp2_count++;
4175 }
4176 }
4177 }
4178 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004179 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004180 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004181 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4182 cpu_fprintf(f, "TB count %d/%d\n",
4183 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004184 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004185 nb_tbs ? target_code_size / nb_tbs : 0,
4186 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004187 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004188 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4189 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004190 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4191 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004192 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4193 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004194 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004195 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4196 direct_jmp2_count,
4197 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004198 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004199 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4200 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4201 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004202 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004203}
4204
Benjamin Herrenschmidt82afa582012-01-10 01:35:11 +00004205/*
4206 * A helper function for the _utterly broken_ virtio device model to find out if
4207 * it's running on a big endian machine. Don't do this at home kids!
4208 */
4209bool virtio_is_big_endian(void);
4210bool virtio_is_big_endian(void)
4211{
4212#if defined(TARGET_WORDS_BIGENDIAN)
4213 return true;
4214#else
4215 return false;
4216#endif
4217}
4218
bellard61382a52003-10-27 21:22:23 +00004219#endif
Wen Congyang76f35532012-05-07 12:04:18 +08004220
4221#ifndef CONFIG_USER_ONLY
4222bool cpu_physical_memory_is_io(target_phys_addr_t phys_addr)
4223{
4224 MemoryRegionSection *section;
4225
4226 section = phys_page_find(phys_addr >> TARGET_PAGE_BITS);
4227
4228 return !(memory_region_is_ram(section->mr) ||
4229 memory_region_is_romd(section->mr));
4230}
4231#endif