blob: 827790088b605a259d53b1b8fe2a625958c5046f [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
pbrook53a59602006-03-25 19:31:22 +000036#if defined(CONFIG_USER_ONLY)
37#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010038#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
39#include <sys/param.h>
40#if __FreeBSD_version >= 700104
41#define HAVE_KINFO_GETVMMAP
42#define sigqueue sigqueue_freebsd /* avoid redefinition */
43#include <sys/time.h>
44#include <sys/proc.h>
45#include <machine/profile.h>
46#define _KERNEL
47#include <sys/user.h>
48#undef _KERNEL
49#undef sigqueue
50#include <libutil.h>
51#endif
52#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010053#else /* !CONFIG_USER_ONLY */
54#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010055#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000056#endif
bellard54936002003-05-13 00:25:15 +000057
bellardfd6ce8f2003-05-14 19:00:11 +000058//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000059//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000060//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000061//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000062
63/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000064//#define DEBUG_TB_CHECK
65//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000066
ths1196be32007-03-17 15:17:58 +000067//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000068//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000069
pbrook99773bd2006-04-16 15:14:59 +000070#if !defined(CONFIG_USER_ONLY)
71/* TB consistency checks only implemented for usermode emulation. */
72#undef DEBUG_TB_CHECK
73#endif
74
bellard9fa3e852004-01-04 18:06:42 +000075#define SMC_BITMAP_USE_THRESHOLD 10
76
blueswir1bdaf78e2008-10-04 07:24:27 +000077static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020078static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000079TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000080static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000081/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050082spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000083
blueswir1141ac462008-07-26 15:05:57 +000084#if defined(__arm__) || defined(__sparc_v9__)
85/* The prologue must be reachable with a direct jump. ARM and Sparc64
86 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000087 section close to code segment. */
88#define code_gen_section \
89 __attribute__((__section__(".gen_code"))) \
90 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020091#elif defined(_WIN32)
92/* Maximum alignment for Win32 is 16. */
93#define code_gen_section \
94 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000095#else
96#define code_gen_section \
97 __attribute__((aligned (32)))
98#endif
99
100uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000101static uint8_t *code_gen_buffer;
102static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000103/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000104static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200105static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000106
pbrooke2eef172008-06-08 01:09:01 +0000107#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000108int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000109static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000110
Alex Williamsonf471a172010-06-11 11:11:42 -0600111RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
pbrooke2eef172008-06-08 01:09:01 +0000112#endif
bellard9fa3e852004-01-04 18:06:42 +0000113
bellard6a00d602005-11-21 23:25:50 +0000114CPUState *first_cpu;
115/* current CPU in the current thread. It is only valid inside
116 cpu_exec() */
ths5fafdf22007-09-16 21:08:06 +0000117CPUState *cpu_single_env;
pbrook2e70f6e2008-06-29 01:03:05 +0000118/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000119 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000120 2 = Adaptive rate instruction counting. */
121int use_icount = 0;
122/* Current instruction counter. While executing translated code this may
123 include some instructions that have not yet been executed. */
124int64_t qemu_icount;
bellard6a00d602005-11-21 23:25:50 +0000125
bellard54936002003-05-13 00:25:15 +0000126typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000127 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000128 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000129 /* in order to optimize self modifying code, we count the number
130 of lookups we do to a given page to use a bitmap */
131 unsigned int code_write_count;
132 uint8_t *code_bitmap;
133#if defined(CONFIG_USER_ONLY)
134 unsigned long flags;
135#endif
bellard54936002003-05-13 00:25:15 +0000136} PageDesc;
137
Paul Brook41c1b1c2010-03-12 16:54:58 +0000138/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800139 while in user mode we want it to be based on virtual addresses. */
140#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
142# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
143#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800144# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000145#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000148#endif
bellard54936002003-05-13 00:25:15 +0000149
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150/* Size of the L2 (and L3, etc) page tables. */
151#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000152#define L2_SIZE (1 << L2_BITS)
153
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800154/* The bits remaining after N lower levels of page tables. */
155#define P_L1_BITS_REM \
156 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
157#define V_L1_BITS_REM \
158 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
159
160/* Size of the L1 page table. Avoid silly small sizes. */
161#if P_L1_BITS_REM < 4
162#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
163#else
164#define P_L1_BITS P_L1_BITS_REM
165#endif
166
167#if V_L1_BITS_REM < 4
168#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
169#else
170#define V_L1_BITS V_L1_BITS_REM
171#endif
172
173#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
174#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
175
176#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
177#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
178
bellard83fb7ad2004-07-05 21:25:26 +0000179unsigned long qemu_real_host_page_size;
180unsigned long qemu_host_page_bits;
181unsigned long qemu_host_page_size;
182unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000183
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800184/* This is a multi-level map on the virtual address space.
185 The bottom level has pointers to PageDesc. */
186static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000187
pbrooke2eef172008-06-08 01:09:01 +0000188#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000189typedef struct PhysPageDesc {
190 /* offset in host memory of the page + io_index in the low bits */
191 ram_addr_t phys_offset;
192 ram_addr_t region_offset;
193} PhysPageDesc;
194
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800195/* This is a multi-level map on the physical address space.
196 The bottom level has pointers to PhysPageDesc. */
197static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000198
pbrooke2eef172008-06-08 01:09:01 +0000199static void io_mem_init(void);
200
bellard33417e72003-08-10 21:47:01 +0000201/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000202CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
203CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000204void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000205static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000206static int io_mem_watch;
207#endif
bellard33417e72003-08-10 21:47:01 +0000208
bellard34865132003-10-05 14:28:56 +0000209/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200210#ifdef WIN32
211static const char *logfilename = "qemu.log";
212#else
blueswir1d9b630f2008-10-05 09:57:08 +0000213static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200214#endif
bellard34865132003-10-05 14:28:56 +0000215FILE *logfile;
216int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000217static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000218
bellarde3db7222005-01-26 22:00:47 +0000219/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000220#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000221static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000222#endif
bellarde3db7222005-01-26 22:00:47 +0000223static int tb_flush_count;
224static int tb_phys_invalidate_count;
225
bellard7cb69ca2008-05-10 10:55:51 +0000226#ifdef _WIN32
227static void map_exec(void *addr, long size)
228{
229 DWORD old_protect;
230 VirtualProtect(addr, size,
231 PAGE_EXECUTE_READWRITE, &old_protect);
232
233}
234#else
235static void map_exec(void *addr, long size)
236{
bellard43694152008-05-29 09:35:57 +0000237 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000238
bellard43694152008-05-29 09:35:57 +0000239 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000240 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000241 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000242
243 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000244 end += page_size - 1;
245 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000246
247 mprotect((void *)start, end - start,
248 PROT_READ | PROT_WRITE | PROT_EXEC);
249}
250#endif
251
bellardb346ff42003-06-15 20:05:50 +0000252static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000253{
bellard83fb7ad2004-07-05 21:25:26 +0000254 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000255 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000256#ifdef _WIN32
257 {
258 SYSTEM_INFO system_info;
259
260 GetSystemInfo(&system_info);
261 qemu_real_host_page_size = system_info.dwPageSize;
262 }
263#else
264 qemu_real_host_page_size = getpagesize();
265#endif
bellard83fb7ad2004-07-05 21:25:26 +0000266 if (qemu_host_page_size == 0)
267 qemu_host_page_size = qemu_real_host_page_size;
268 if (qemu_host_page_size < TARGET_PAGE_SIZE)
269 qemu_host_page_size = TARGET_PAGE_SIZE;
270 qemu_host_page_bits = 0;
271 while ((1 << qemu_host_page_bits) < qemu_host_page_size)
272 qemu_host_page_bits++;
273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000274
Paul Brook2e9a5712010-05-05 16:32:59 +0100275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000276 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
balrog50a95692007-12-12 01:16:23 +0000307 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000308
pbrook07765902008-05-31 16:33:53 +0000309 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310
Aurelien Jarnofd436902010-04-10 17:20:36 +0200311 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000312 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_lock();
314
balrog50a95692007-12-12 01:16:23 +0000315 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000330 }
331 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332
balrog50a95692007-12-12 01:16:23 +0000333 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000335 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100336#endif
balrog50a95692007-12-12 01:16:23 +0000337 }
338#endif
bellard54936002003-05-13 00:25:15 +0000339}
340
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000342{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000343 PageDesc *pd;
344 void **lp;
345 int i;
346
pbrook17e23772008-06-09 13:47:45 +0000347#if defined(CONFIG_USER_ONLY)
Paul Brook2e9a5712010-05-05 16:32:59 +0100348 /* We can't use qemu_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000354#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
356 do { P = qemu_mallocz(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
372 }
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000375 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
384 }
385
386#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000389}
390
Paul Brook41c1b1c2010-03-12 16:54:58 +0000391static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000392{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook6d9a1302010-02-28 23:55:53 +0000396#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000398{
pbrooke3f4e2a2006-04-08 20:02:06 +0000399 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 void **lp;
401 int i;
bellard92e873b2004-05-21 14:52:29 +0000402
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
413 *lp = p = qemu_mallocz(sizeof(void *) * L2_SIZE);
414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000416 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000420 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
422 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000423 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
425
426 *lp = pd = qemu_malloc(sizeof(PhysPageDesc) * L2_SIZE);
427
pbrook67c4d232009-02-23 13:16:07 +0000428 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000431 }
bellard92e873b2004-05-21 14:52:29 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
434 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000435}
436
Anthony Liguoric227f092009-10-01 16:12:16 -0500437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000438{
bellard108c49b2005-07-24 12:55:09 +0000439 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Anthony Liguoric227f092009-10-01 16:12:16 -0500442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000444 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000447#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000448
bellard43694152008-05-29 09:35:57 +0000449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100452/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000460#endif
461
blueswir18fcd3692008-08-17 20:26:25 +0000462static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000463{
bellard43694152008-05-29 09:35:57 +0000464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
bellard26a5f132008-05-28 12:30:31 +0000469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000471#if defined(CONFIG_USER_ONLY)
472 /* in user mode, phys_ram_size is not meaningful */
473 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
474#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100475 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000476 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000477#endif
bellard26a5f132008-05-28 12:30:31 +0000478 }
479 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
480 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
481 /* The code gen buffer location may have constraints depending on
482 the host cpu and OS */
483#if defined(__linux__)
484 {
485 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000486 void *start = NULL;
487
bellard26a5f132008-05-28 12:30:31 +0000488 flags = MAP_PRIVATE | MAP_ANONYMOUS;
489#if defined(__x86_64__)
490 flags |= MAP_32BIT;
491 /* Cannot map more than that */
492 if (code_gen_buffer_size > (800 * 1024 * 1024))
493 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000494#elif defined(__sparc_v9__)
495 // Map the buffer below 2G, so we can use direct calls and branches
496 flags |= MAP_FIXED;
497 start = (void *) 0x60000000UL;
498 if (code_gen_buffer_size > (512 * 1024 * 1024))
499 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000500#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000501 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000502 flags |= MAP_FIXED;
503 start = (void *) 0x01000000UL;
504 if (code_gen_buffer_size > 16 * 1024 * 1024)
505 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700506#elif defined(__s390x__)
507 /* Map the buffer so that we can use direct calls and branches. */
508 /* We have a +- 4GB range on the branches; leave some slop. */
509 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
510 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
511 }
512 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000513#endif
blueswir1141ac462008-07-26 15:05:57 +0000514 code_gen_buffer = mmap(start, code_gen_buffer_size,
515 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000516 flags, -1, 0);
517 if (code_gen_buffer == MAP_FAILED) {
518 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
519 exit(1);
520 }
521 }
Bradcbb608a2010-12-20 21:25:40 -0500522#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
523 || defined(__DragonFly__) || defined(__OpenBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
aliguori06e67a82008-09-27 15:32:41 +0000543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
bellard26a5f132008-05-28 12:30:31 +0000552#else
553 code_gen_buffer = qemu_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
bellard43694152008-05-29 09:35:57 +0000556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
561 tbs = qemu_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
567void cpu_exec_init_all(unsigned long tb_size)
568{
bellard26a5f132008-05-28 12:30:31 +0000569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000572 page_init();
pbrooke2eef172008-06-08 01:09:01 +0000573#if !defined(CONFIG_USER_ONLY)
bellard26a5f132008-05-28 12:30:31 +0000574 io_mem_init();
pbrooke2eef172008-06-08 01:09:01 +0000575#endif
Richard Henderson9002ec72010-05-06 08:50:41 -0700576#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
577 /* There's no guest base to take into account, so go ahead and
578 initialize the prologue now. */
579 tcg_prologue_init(&tcg_ctx);
580#endif
bellard26a5f132008-05-28 12:30:31 +0000581}
582
pbrook9656f322008-07-01 20:01:19 +0000583#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
584
Juan Quintelae59fb372009-09-29 22:48:21 +0200585static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200586{
587 CPUState *env = opaque;
588
aurel323098dba2009-03-07 21:28:24 +0000589 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
590 version_id is increased. */
591 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000592 tlb_flush(env, 1);
593
594 return 0;
595}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200596
597static const VMStateDescription vmstate_cpu_common = {
598 .name = "cpu_common",
599 .version_id = 1,
600 .minimum_version_id = 1,
601 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200602 .post_load = cpu_common_post_load,
603 .fields = (VMStateField []) {
604 VMSTATE_UINT32(halted, CPUState),
605 VMSTATE_UINT32(interrupt_request, CPUState),
606 VMSTATE_END_OF_LIST()
607 }
608};
pbrook9656f322008-07-01 20:01:19 +0000609#endif
610
Glauber Costa950f1472009-06-09 12:15:18 -0400611CPUState *qemu_get_cpu(int cpu)
612{
613 CPUState *env = first_cpu;
614
615 while (env) {
616 if (env->cpu_index == cpu)
617 break;
618 env = env->next_cpu;
619 }
620
621 return env;
622}
623
bellard6a00d602005-11-21 23:25:50 +0000624void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000625{
bellard6a00d602005-11-21 23:25:50 +0000626 CPUState **penv;
627 int cpu_index;
628
pbrookc2764712009-03-07 15:24:59 +0000629#if defined(CONFIG_USER_ONLY)
630 cpu_list_lock();
631#endif
bellard6a00d602005-11-21 23:25:50 +0000632 env->next_cpu = NULL;
633 penv = &first_cpu;
634 cpu_index = 0;
635 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700636 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000637 cpu_index++;
638 }
639 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000640 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000641 QTAILQ_INIT(&env->breakpoints);
642 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100643#ifndef CONFIG_USER_ONLY
644 env->thread_id = qemu_get_thread_id();
645#endif
bellard6a00d602005-11-21 23:25:50 +0000646 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000647#if defined(CONFIG_USER_ONLY)
648 cpu_list_unlock();
649#endif
pbrookb3c77242008-06-30 16:31:04 +0000650#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600651 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
652 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000653 cpu_save, cpu_load, env);
654#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000655}
656
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100657/* Allocate a new translation block. Flush the translation buffer if
658 too many translation blocks or too much generated code. */
659static TranslationBlock *tb_alloc(target_ulong pc)
660{
661 TranslationBlock *tb;
662
663 if (nb_tbs >= code_gen_max_blocks ||
664 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
665 return NULL;
666 tb = &tbs[nb_tbs++];
667 tb->pc = pc;
668 tb->cflags = 0;
669 return tb;
670}
671
672void tb_free(TranslationBlock *tb)
673{
674 /* In practice this is mostly used for single use temporary TB
675 Ignore the hard cases and just back up if this TB happens to
676 be the last one generated. */
677 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
678 code_gen_ptr = tb->tc_ptr;
679 nb_tbs--;
680 }
681}
682
bellard9fa3e852004-01-04 18:06:42 +0000683static inline void invalidate_page_bitmap(PageDesc *p)
684{
685 if (p->code_bitmap) {
bellard59817cc2004-02-16 22:01:13 +0000686 qemu_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000687 p->code_bitmap = NULL;
688 }
689 p->code_write_count = 0;
690}
691
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800692/* Set to NULL all the 'first_tb' fields in all PageDescs. */
693
694static void page_flush_tb_1 (int level, void **lp)
695{
696 int i;
697
698 if (*lp == NULL) {
699 return;
700 }
701 if (level == 0) {
702 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000703 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800704 pd[i].first_tb = NULL;
705 invalidate_page_bitmap(pd + i);
706 }
707 } else {
708 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000709 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800710 page_flush_tb_1 (level - 1, pp + i);
711 }
712 }
713}
714
bellardfd6ce8f2003-05-14 19:00:11 +0000715static void page_flush_tb(void)
716{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800717 int i;
718 for (i = 0; i < V_L1_SIZE; i++) {
719 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000720 }
721}
722
723/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000724/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000725void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000726{
bellard6a00d602005-11-21 23:25:50 +0000727 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000728#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000729 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
730 (unsigned long)(code_gen_ptr - code_gen_buffer),
731 nb_tbs, nb_tbs > 0 ?
732 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000733#endif
bellard26a5f132008-05-28 12:30:31 +0000734 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000735 cpu_abort(env1, "Internal error: code buffer overflow\n");
736
bellardfd6ce8f2003-05-14 19:00:11 +0000737 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000738
bellard6a00d602005-11-21 23:25:50 +0000739 for(env = first_cpu; env != NULL; env = env->next_cpu) {
740 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
741 }
bellard9fa3e852004-01-04 18:06:42 +0000742
bellard8a8a6082004-10-03 13:36:49 +0000743 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000744 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000745
bellardfd6ce8f2003-05-14 19:00:11 +0000746 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000747 /* XXX: flush processor icache at this point if cache flush is
748 expensive */
bellarde3db7222005-01-26 22:00:47 +0000749 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000750}
751
752#ifdef DEBUG_TB_CHECK
753
j_mayerbc98a7e2007-04-04 07:55:12 +0000754static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000755{
756 TranslationBlock *tb;
757 int i;
758 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000759 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
760 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000761 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
762 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000763 printf("ERROR invalidate: address=" TARGET_FMT_lx
764 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000765 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000766 }
767 }
768 }
769}
770
771/* verify that all the pages have correct rights for code */
772static void tb_page_check(void)
773{
774 TranslationBlock *tb;
775 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000776
pbrook99773bd2006-04-16 15:14:59 +0000777 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
778 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000779 flags1 = page_get_flags(tb->pc);
780 flags2 = page_get_flags(tb->pc + tb->size - 1);
781 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
782 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000783 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000784 }
785 }
786 }
787}
788
789#endif
790
791/* invalidate one TB */
792static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
793 int next_offset)
794{
795 TranslationBlock *tb1;
796 for(;;) {
797 tb1 = *ptb;
798 if (tb1 == tb) {
799 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
800 break;
801 }
802 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
803 }
804}
805
bellard9fa3e852004-01-04 18:06:42 +0000806static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
807{
808 TranslationBlock *tb1;
809 unsigned int n1;
810
811 for(;;) {
812 tb1 = *ptb;
813 n1 = (long)tb1 & 3;
814 tb1 = (TranslationBlock *)((long)tb1 & ~3);
815 if (tb1 == tb) {
816 *ptb = tb1->page_next[n1];
817 break;
818 }
819 ptb = &tb1->page_next[n1];
820 }
821}
822
bellardd4e81642003-05-25 16:46:15 +0000823static inline void tb_jmp_remove(TranslationBlock *tb, int n)
824{
825 TranslationBlock *tb1, **ptb;
826 unsigned int n1;
827
828 ptb = &tb->jmp_next[n];
829 tb1 = *ptb;
830 if (tb1) {
831 /* find tb(n) in circular list */
832 for(;;) {
833 tb1 = *ptb;
834 n1 = (long)tb1 & 3;
835 tb1 = (TranslationBlock *)((long)tb1 & ~3);
836 if (n1 == n && tb1 == tb)
837 break;
838 if (n1 == 2) {
839 ptb = &tb1->jmp_first;
840 } else {
841 ptb = &tb1->jmp_next[n1];
842 }
843 }
844 /* now we can suppress tb(n) from the list */
845 *ptb = tb->jmp_next[n];
846
847 tb->jmp_next[n] = NULL;
848 }
849}
850
851/* reset the jump entry 'n' of a TB so that it is not chained to
852 another TB */
853static inline void tb_reset_jump(TranslationBlock *tb, int n)
854{
855 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
856}
857
Paul Brook41c1b1c2010-03-12 16:54:58 +0000858void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000859{
bellard6a00d602005-11-21 23:25:50 +0000860 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000861 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000862 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000863 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000864 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000865
bellard9fa3e852004-01-04 18:06:42 +0000866 /* remove the TB from the hash list */
867 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
868 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000869 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000870 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000871
bellard9fa3e852004-01-04 18:06:42 +0000872 /* remove the TB from the page list */
873 if (tb->page_addr[0] != page_addr) {
874 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
875 tb_page_remove(&p->first_tb, tb);
876 invalidate_page_bitmap(p);
877 }
878 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
879 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
880 tb_page_remove(&p->first_tb, tb);
881 invalidate_page_bitmap(p);
882 }
883
bellard8a40a182005-11-20 10:35:40 +0000884 tb_invalidated_flag = 1;
885
886 /* remove the TB from the hash list */
887 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000888 for(env = first_cpu; env != NULL; env = env->next_cpu) {
889 if (env->tb_jmp_cache[h] == tb)
890 env->tb_jmp_cache[h] = NULL;
891 }
bellard8a40a182005-11-20 10:35:40 +0000892
893 /* suppress this TB from the two jump lists */
894 tb_jmp_remove(tb, 0);
895 tb_jmp_remove(tb, 1);
896
897 /* suppress any remaining jumps to this TB */
898 tb1 = tb->jmp_first;
899 for(;;) {
900 n1 = (long)tb1 & 3;
901 if (n1 == 2)
902 break;
903 tb1 = (TranslationBlock *)((long)tb1 & ~3);
904 tb2 = tb1->jmp_next[n1];
905 tb_reset_jump(tb1, n1);
906 tb1->jmp_next[n1] = NULL;
907 tb1 = tb2;
908 }
909 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
910
bellarde3db7222005-01-26 22:00:47 +0000911 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000912}
913
914static inline void set_bits(uint8_t *tab, int start, int len)
915{
916 int end, mask, end1;
917
918 end = start + len;
919 tab += start >> 3;
920 mask = 0xff << (start & 7);
921 if ((start & ~7) == (end & ~7)) {
922 if (start < end) {
923 mask &= ~(0xff << (end & 7));
924 *tab |= mask;
925 }
926 } else {
927 *tab++ |= mask;
928 start = (start + 8) & ~7;
929 end1 = end & ~7;
930 while (start < end1) {
931 *tab++ = 0xff;
932 start += 8;
933 }
934 if (start < end) {
935 mask = ~(0xff << (end & 7));
936 *tab |= mask;
937 }
938 }
939}
940
941static void build_page_bitmap(PageDesc *p)
942{
943 int n, tb_start, tb_end;
944 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000945
pbrookb2a70812008-06-09 13:57:23 +0000946 p->code_bitmap = qemu_mallocz(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000947
948 tb = p->first_tb;
949 while (tb != NULL) {
950 n = (long)tb & 3;
951 tb = (TranslationBlock *)((long)tb & ~3);
952 /* NOTE: this is subtle as a TB may span two physical pages */
953 if (n == 0) {
954 /* NOTE: tb_end may be after the end of the page, but
955 it is not a problem */
956 tb_start = tb->pc & ~TARGET_PAGE_MASK;
957 tb_end = tb_start + tb->size;
958 if (tb_end > TARGET_PAGE_SIZE)
959 tb_end = TARGET_PAGE_SIZE;
960 } else {
961 tb_start = 0;
962 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
963 }
964 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
965 tb = tb->page_next[n];
966 }
967}
968
pbrook2e70f6e2008-06-29 01:03:05 +0000969TranslationBlock *tb_gen_code(CPUState *env,
970 target_ulong pc, target_ulong cs_base,
971 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000972{
973 TranslationBlock *tb;
974 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000975 tb_page_addr_t phys_pc, phys_page2;
976 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000977 int code_gen_size;
978
Paul Brook41c1b1c2010-03-12 16:54:58 +0000979 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000980 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000981 if (!tb) {
982 /* flush must be done */
983 tb_flush(env);
984 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000985 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000986 /* Don't forget to invalidate previous TB info. */
987 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000988 }
989 tc_ptr = code_gen_ptr;
990 tb->tc_ptr = tc_ptr;
991 tb->cs_base = cs_base;
992 tb->flags = flags;
993 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +0000994 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +0000995 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +0000996
bellardd720b932004-04-25 17:57:43 +0000997 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +0000998 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +0000999 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001000 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001001 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001002 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001003 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001004 return tb;
bellardd720b932004-04-25 17:57:43 +00001005}
ths3b46e622007-09-17 08:09:54 +00001006
bellard9fa3e852004-01-04 18:06:42 +00001007/* invalidate all TBs which intersect with the target physical page
1008 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001009 the same physical page. 'is_cpu_write_access' should be true if called
1010 from a real cpu write access: the virtual CPU will exit the current
1011 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001012void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001013 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001014{
aliguori6b917542008-11-18 19:46:41 +00001015 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001016 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001017 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001018 PageDesc *p;
1019 int n;
1020#ifdef TARGET_HAS_PRECISE_SMC
1021 int current_tb_not_found = is_cpu_write_access;
1022 TranslationBlock *current_tb = NULL;
1023 int current_tb_modified = 0;
1024 target_ulong current_pc = 0;
1025 target_ulong current_cs_base = 0;
1026 int current_flags = 0;
1027#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001028
1029 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001030 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001031 return;
ths5fafdf22007-09-16 21:08:06 +00001032 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001033 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1034 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001035 /* build code bitmap */
1036 build_page_bitmap(p);
1037 }
1038
1039 /* we remove all the TBs in the range [start, end[ */
1040 /* XXX: see if in some cases it could be faster to invalidate all the code */
1041 tb = p->first_tb;
1042 while (tb != NULL) {
1043 n = (long)tb & 3;
1044 tb = (TranslationBlock *)((long)tb & ~3);
1045 tb_next = tb->page_next[n];
1046 /* NOTE: this is subtle as a TB may span two physical pages */
1047 if (n == 0) {
1048 /* NOTE: tb_end may be after the end of the page, but
1049 it is not a problem */
1050 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1051 tb_end = tb_start + tb->size;
1052 } else {
1053 tb_start = tb->page_addr[1];
1054 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1055 }
1056 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001057#ifdef TARGET_HAS_PRECISE_SMC
1058 if (current_tb_not_found) {
1059 current_tb_not_found = 0;
1060 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001061 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001062 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001063 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001064 }
1065 }
1066 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001067 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001068 /* If we are modifying the current TB, we must stop
1069 its execution. We could be more precise by checking
1070 that the modification is after the current PC, but it
1071 would require a specialized function to partially
1072 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001073
bellardd720b932004-04-25 17:57:43 +00001074 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001075 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001076 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1077 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001078 }
1079#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001080 /* we need to do that to handle the case where a signal
1081 occurs while doing tb_phys_invalidate() */
1082 saved_tb = NULL;
1083 if (env) {
1084 saved_tb = env->current_tb;
1085 env->current_tb = NULL;
1086 }
bellard9fa3e852004-01-04 18:06:42 +00001087 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001088 if (env) {
1089 env->current_tb = saved_tb;
1090 if (env->interrupt_request && env->current_tb)
1091 cpu_interrupt(env, env->interrupt_request);
1092 }
bellard9fa3e852004-01-04 18:06:42 +00001093 }
1094 tb = tb_next;
1095 }
1096#if !defined(CONFIG_USER_ONLY)
1097 /* if no code remaining, no need to continue to use slow writes */
1098 if (!p->first_tb) {
1099 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001100 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001101 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001102 }
1103 }
1104#endif
1105#ifdef TARGET_HAS_PRECISE_SMC
1106 if (current_tb_modified) {
1107 /* we generate a block containing just the instruction
1108 modifying the memory. It will ensure that it cannot modify
1109 itself */
bellardea1c1802004-06-14 18:56:36 +00001110 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001111 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001112 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001113 }
1114#endif
1115}
1116
1117/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001118static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001119{
1120 PageDesc *p;
1121 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001122#if 0
bellarda4193c82004-06-03 14:01:43 +00001123 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001124 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1125 cpu_single_env->mem_io_vaddr, len,
1126 cpu_single_env->eip,
1127 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001128 }
1129#endif
bellard9fa3e852004-01-04 18:06:42 +00001130 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001131 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001132 return;
1133 if (p->code_bitmap) {
1134 offset = start & ~TARGET_PAGE_MASK;
1135 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1136 if (b & ((1 << len) - 1))
1137 goto do_invalidate;
1138 } else {
1139 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001140 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001141 }
1142}
1143
bellard9fa3e852004-01-04 18:06:42 +00001144#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001145static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001146 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001147{
aliguori6b917542008-11-18 19:46:41 +00001148 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001149 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001150 int n;
bellardd720b932004-04-25 17:57:43 +00001151#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001152 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001153 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001154 int current_tb_modified = 0;
1155 target_ulong current_pc = 0;
1156 target_ulong current_cs_base = 0;
1157 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001158#endif
bellard9fa3e852004-01-04 18:06:42 +00001159
1160 addr &= TARGET_PAGE_MASK;
1161 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001162 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001163 return;
1164 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001165#ifdef TARGET_HAS_PRECISE_SMC
1166 if (tb && pc != 0) {
1167 current_tb = tb_find_pc(pc);
1168 }
1169#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001170 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001171 n = (long)tb & 3;
1172 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001173#ifdef TARGET_HAS_PRECISE_SMC
1174 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001175 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001176 /* If we are modifying the current TB, we must stop
1177 its execution. We could be more precise by checking
1178 that the modification is after the current PC, but it
1179 would require a specialized function to partially
1180 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001181
bellardd720b932004-04-25 17:57:43 +00001182 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001183 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001184 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1185 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001186 }
1187#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001188 tb_phys_invalidate(tb, addr);
1189 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001190 }
1191 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001192#ifdef TARGET_HAS_PRECISE_SMC
1193 if (current_tb_modified) {
1194 /* we generate a block containing just the instruction
1195 modifying the memory. It will ensure that it cannot modify
1196 itself */
bellardea1c1802004-06-14 18:56:36 +00001197 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001198 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001199 cpu_resume_from_signal(env, puc);
1200 }
1201#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001202}
bellard9fa3e852004-01-04 18:06:42 +00001203#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001204
1205/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001206static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001207 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001208{
1209 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001210#ifndef CONFIG_USER_ONLY
1211 bool page_already_protected;
1212#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001213
bellard9fa3e852004-01-04 18:06:42 +00001214 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001215 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001216 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001217#ifndef CONFIG_USER_ONLY
1218 page_already_protected = p->first_tb != NULL;
1219#endif
bellard9fa3e852004-01-04 18:06:42 +00001220 p->first_tb = (TranslationBlock *)((long)tb | n);
1221 invalidate_page_bitmap(p);
1222
bellard107db442004-06-22 18:48:46 +00001223#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001224
bellard9fa3e852004-01-04 18:06:42 +00001225#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001226 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001227 target_ulong addr;
1228 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001229 int prot;
1230
bellardfd6ce8f2003-05-14 19:00:11 +00001231 /* force the host page as non writable (writes will have a
1232 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001233 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001234 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001235 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1236 addr += TARGET_PAGE_SIZE) {
1237
1238 p2 = page_find (addr >> TARGET_PAGE_BITS);
1239 if (!p2)
1240 continue;
1241 prot |= p2->flags;
1242 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001243 }
ths5fafdf22007-09-16 21:08:06 +00001244 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001245 (prot & PAGE_BITS) & ~PAGE_WRITE);
1246#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001247 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001248 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001249#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001250 }
bellard9fa3e852004-01-04 18:06:42 +00001251#else
1252 /* if some code is already present, then the pages are already
1253 protected. So we handle the case where only the first TB is
1254 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001255 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001256 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001257 }
1258#endif
bellardd720b932004-04-25 17:57:43 +00001259
1260#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001261}
1262
bellard9fa3e852004-01-04 18:06:42 +00001263/* add a new TB and link it to the physical page tables. phys_page2 is
1264 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001265void tb_link_page(TranslationBlock *tb,
1266 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001267{
bellard9fa3e852004-01-04 18:06:42 +00001268 unsigned int h;
1269 TranslationBlock **ptb;
1270
pbrookc8a706f2008-06-02 16:16:42 +00001271 /* Grab the mmap lock to stop another thread invalidating this TB
1272 before we are done. */
1273 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001274 /* add in the physical hash table */
1275 h = tb_phys_hash_func(phys_pc);
1276 ptb = &tb_phys_hash[h];
1277 tb->phys_hash_next = *ptb;
1278 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001279
1280 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001281 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1282 if (phys_page2 != -1)
1283 tb_alloc_page(tb, 1, phys_page2);
1284 else
1285 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001286
bellardd4e81642003-05-25 16:46:15 +00001287 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1288 tb->jmp_next[0] = NULL;
1289 tb->jmp_next[1] = NULL;
1290
1291 /* init original jump addresses */
1292 if (tb->tb_next_offset[0] != 0xffff)
1293 tb_reset_jump(tb, 0);
1294 if (tb->tb_next_offset[1] != 0xffff)
1295 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001296
1297#ifdef DEBUG_TB_CHECK
1298 tb_page_check();
1299#endif
pbrookc8a706f2008-06-02 16:16:42 +00001300 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001301}
1302
bellarda513fe12003-05-27 23:29:48 +00001303/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1304 tb[1].tc_ptr. Return NULL if not found */
1305TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1306{
1307 int m_min, m_max, m;
1308 unsigned long v;
1309 TranslationBlock *tb;
1310
1311 if (nb_tbs <= 0)
1312 return NULL;
1313 if (tc_ptr < (unsigned long)code_gen_buffer ||
1314 tc_ptr >= (unsigned long)code_gen_ptr)
1315 return NULL;
1316 /* binary search (cf Knuth) */
1317 m_min = 0;
1318 m_max = nb_tbs - 1;
1319 while (m_min <= m_max) {
1320 m = (m_min + m_max) >> 1;
1321 tb = &tbs[m];
1322 v = (unsigned long)tb->tc_ptr;
1323 if (v == tc_ptr)
1324 return tb;
1325 else if (tc_ptr < v) {
1326 m_max = m - 1;
1327 } else {
1328 m_min = m + 1;
1329 }
ths5fafdf22007-09-16 21:08:06 +00001330 }
bellarda513fe12003-05-27 23:29:48 +00001331 return &tbs[m_max];
1332}
bellard75012672003-06-21 13:11:07 +00001333
bellardea041c02003-06-25 16:16:50 +00001334static void tb_reset_jump_recursive(TranslationBlock *tb);
1335
1336static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1337{
1338 TranslationBlock *tb1, *tb_next, **ptb;
1339 unsigned int n1;
1340
1341 tb1 = tb->jmp_next[n];
1342 if (tb1 != NULL) {
1343 /* find head of list */
1344 for(;;) {
1345 n1 = (long)tb1 & 3;
1346 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1347 if (n1 == 2)
1348 break;
1349 tb1 = tb1->jmp_next[n1];
1350 }
1351 /* we are now sure now that tb jumps to tb1 */
1352 tb_next = tb1;
1353
1354 /* remove tb from the jmp_first list */
1355 ptb = &tb_next->jmp_first;
1356 for(;;) {
1357 tb1 = *ptb;
1358 n1 = (long)tb1 & 3;
1359 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1360 if (n1 == n && tb1 == tb)
1361 break;
1362 ptb = &tb1->jmp_next[n1];
1363 }
1364 *ptb = tb->jmp_next[n];
1365 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001366
bellardea041c02003-06-25 16:16:50 +00001367 /* suppress the jump to next tb in generated code */
1368 tb_reset_jump(tb, n);
1369
bellard01243112004-01-04 15:48:17 +00001370 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001371 tb_reset_jump_recursive(tb_next);
1372 }
1373}
1374
1375static void tb_reset_jump_recursive(TranslationBlock *tb)
1376{
1377 tb_reset_jump_recursive2(tb, 0);
1378 tb_reset_jump_recursive2(tb, 1);
1379}
1380
bellard1fddef42005-04-17 19:16:13 +00001381#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001382#if defined(CONFIG_USER_ONLY)
1383static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1384{
1385 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1386}
1387#else
bellardd720b932004-04-25 17:57:43 +00001388static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1389{
Anthony Liguoric227f092009-10-01 16:12:16 -05001390 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001391 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001392 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001393 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001394
pbrookc2f07f82006-04-08 17:14:56 +00001395 addr = cpu_get_phys_page_debug(env, pc);
1396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1397 if (!p) {
1398 pd = IO_MEM_UNASSIGNED;
1399 } else {
1400 pd = p->phys_offset;
1401 }
1402 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001403 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001404}
bellardc27004e2005-01-03 23:35:10 +00001405#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001406#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001407
Paul Brookc527ee82010-03-01 03:31:14 +00001408#if defined(CONFIG_USER_ONLY)
1409void cpu_watchpoint_remove_all(CPUState *env, int mask)
1410
1411{
1412}
1413
1414int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1415 int flags, CPUWatchpoint **watchpoint)
1416{
1417 return -ENOSYS;
1418}
1419#else
pbrook6658ffb2007-03-16 23:58:11 +00001420/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001421int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1422 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001423{
aliguorib4051332008-11-18 20:14:20 +00001424 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001425 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001426
aliguorib4051332008-11-18 20:14:20 +00001427 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1428 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1429 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1430 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1431 return -EINVAL;
1432 }
aliguoria1d1bb32008-11-18 20:07:32 +00001433 wp = qemu_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001434
aliguoria1d1bb32008-11-18 20:07:32 +00001435 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001436 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001437 wp->flags = flags;
1438
aliguori2dc9f412008-11-18 20:56:59 +00001439 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001440 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001441 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001442 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001443 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001444
pbrook6658ffb2007-03-16 23:58:11 +00001445 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001446
1447 if (watchpoint)
1448 *watchpoint = wp;
1449 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001450}
1451
aliguoria1d1bb32008-11-18 20:07:32 +00001452/* Remove a specific watchpoint. */
1453int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1454 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001455{
aliguorib4051332008-11-18 20:14:20 +00001456 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001457 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001458
Blue Swirl72cf2d42009-09-12 07:36:22 +00001459 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001460 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001461 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001462 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001463 return 0;
1464 }
1465 }
aliguoria1d1bb32008-11-18 20:07:32 +00001466 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001467}
1468
aliguoria1d1bb32008-11-18 20:07:32 +00001469/* Remove a specific watchpoint by reference. */
1470void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1471{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001472 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001473
aliguoria1d1bb32008-11-18 20:07:32 +00001474 tlb_flush_page(env, watchpoint->vaddr);
1475
1476 qemu_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001477}
1478
aliguoria1d1bb32008-11-18 20:07:32 +00001479/* Remove all matching watchpoints. */
1480void cpu_watchpoint_remove_all(CPUState *env, int mask)
1481{
aliguoric0ce9982008-11-25 22:13:57 +00001482 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001483
Blue Swirl72cf2d42009-09-12 07:36:22 +00001484 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001485 if (wp->flags & mask)
1486 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001487 }
aliguoria1d1bb32008-11-18 20:07:32 +00001488}
Paul Brookc527ee82010-03-01 03:31:14 +00001489#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001490
1491/* Add a breakpoint. */
1492int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1493 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001494{
bellard1fddef42005-04-17 19:16:13 +00001495#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001496 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001497
aliguoria1d1bb32008-11-18 20:07:32 +00001498 bp = qemu_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001499
1500 bp->pc = pc;
1501 bp->flags = flags;
1502
aliguori2dc9f412008-11-18 20:56:59 +00001503 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001504 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001505 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001506 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001507 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001508
1509 breakpoint_invalidate(env, pc);
1510
1511 if (breakpoint)
1512 *breakpoint = bp;
1513 return 0;
1514#else
1515 return -ENOSYS;
1516#endif
1517}
1518
1519/* Remove a specific breakpoint. */
1520int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1521{
1522#if defined(TARGET_HAS_ICE)
1523 CPUBreakpoint *bp;
1524
Blue Swirl72cf2d42009-09-12 07:36:22 +00001525 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001526 if (bp->pc == pc && bp->flags == flags) {
1527 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001528 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001529 }
bellard4c3a88a2003-07-26 12:06:08 +00001530 }
aliguoria1d1bb32008-11-18 20:07:32 +00001531 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001532#else
aliguoria1d1bb32008-11-18 20:07:32 +00001533 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001534#endif
1535}
1536
aliguoria1d1bb32008-11-18 20:07:32 +00001537/* Remove a specific breakpoint by reference. */
1538void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001539{
bellard1fddef42005-04-17 19:16:13 +00001540#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001541 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001542
aliguoria1d1bb32008-11-18 20:07:32 +00001543 breakpoint_invalidate(env, breakpoint->pc);
1544
1545 qemu_free(breakpoint);
1546#endif
1547}
1548
1549/* Remove all matching breakpoints. */
1550void cpu_breakpoint_remove_all(CPUState *env, int mask)
1551{
1552#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001553 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001554
Blue Swirl72cf2d42009-09-12 07:36:22 +00001555 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001556 if (bp->flags & mask)
1557 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001558 }
bellard4c3a88a2003-07-26 12:06:08 +00001559#endif
1560}
1561
bellardc33a3462003-07-29 20:50:33 +00001562/* enable or disable single step mode. EXCP_DEBUG is returned by the
1563 CPU loop after each instruction */
1564void cpu_single_step(CPUState *env, int enabled)
1565{
bellard1fddef42005-04-17 19:16:13 +00001566#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001567 if (env->singlestep_enabled != enabled) {
1568 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001569 if (kvm_enabled())
1570 kvm_update_guest_debug(env, 0);
1571 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001572 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001573 /* XXX: only flush what is necessary */
1574 tb_flush(env);
1575 }
bellardc33a3462003-07-29 20:50:33 +00001576 }
1577#endif
1578}
1579
bellard34865132003-10-05 14:28:56 +00001580/* enable or disable low levels log */
1581void cpu_set_log(int log_flags)
1582{
1583 loglevel = log_flags;
1584 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001585 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001586 if (!logfile) {
1587 perror(logfilename);
1588 _exit(1);
1589 }
bellard9fa3e852004-01-04 18:06:42 +00001590#if !defined(CONFIG_SOFTMMU)
1591 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1592 {
blueswir1b55266b2008-09-20 08:07:15 +00001593 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001594 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1595 }
Filip Navarabf65f532009-07-27 10:02:04 -05001596#elif !defined(_WIN32)
1597 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001598 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001599#endif
pbrooke735b912007-06-30 13:53:24 +00001600 log_append = 1;
1601 }
1602 if (!loglevel && logfile) {
1603 fclose(logfile);
1604 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001605 }
1606}
1607
1608void cpu_set_log_filename(const char *filename)
1609{
1610 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001611 if (logfile) {
1612 fclose(logfile);
1613 logfile = NULL;
1614 }
1615 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001616}
bellardc33a3462003-07-29 20:50:33 +00001617
aurel323098dba2009-03-07 21:28:24 +00001618static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001619{
pbrookd5975362008-06-07 20:50:51 +00001620 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1621 problem and hope the cpu will stop of its own accord. For userspace
1622 emulation this often isn't actually as bad as it sounds. Often
1623 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001624 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001625 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001626
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001627 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001628 tb = env->current_tb;
1629 /* if the cpu is currently executing code, we must unlink it and
1630 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001631 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001632 env->current_tb = NULL;
1633 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001634 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001635 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001636}
1637
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001638#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001639/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001640static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001641{
1642 int old_mask;
1643
1644 old_mask = env->interrupt_request;
1645 env->interrupt_request |= mask;
1646
aliguori8edac962009-04-24 18:03:45 +00001647 /*
1648 * If called from iothread context, wake the target cpu in
1649 * case its halted.
1650 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001651 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001652 qemu_cpu_kick(env);
1653 return;
1654 }
aliguori8edac962009-04-24 18:03:45 +00001655
pbrook2e70f6e2008-06-29 01:03:05 +00001656 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001657 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001658 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001659 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001660 cpu_abort(env, "Raised interrupt while not in I/O function");
1661 }
pbrook2e70f6e2008-06-29 01:03:05 +00001662 } else {
aurel323098dba2009-03-07 21:28:24 +00001663 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001664 }
1665}
1666
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001667CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1668
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001669#else /* CONFIG_USER_ONLY */
1670
1671void cpu_interrupt(CPUState *env, int mask)
1672{
1673 env->interrupt_request |= mask;
1674 cpu_unlink_tb(env);
1675}
1676#endif /* CONFIG_USER_ONLY */
1677
bellardb54ad042004-05-20 13:42:52 +00001678void cpu_reset_interrupt(CPUState *env, int mask)
1679{
1680 env->interrupt_request &= ~mask;
1681}
1682
aurel323098dba2009-03-07 21:28:24 +00001683void cpu_exit(CPUState *env)
1684{
1685 env->exit_request = 1;
1686 cpu_unlink_tb(env);
1687}
1688
blueswir1c7cd6a32008-10-02 18:27:46 +00001689const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001690 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001691 "show generated host assembly code for each compiled TB" },
1692 { CPU_LOG_TB_IN_ASM, "in_asm",
1693 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001694 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001695 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001696 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001697 "show micro ops "
1698#ifdef TARGET_I386
1699 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001700#endif
blueswir1e01a1152008-03-14 17:37:11 +00001701 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001702 { CPU_LOG_INT, "int",
1703 "show interrupts/exceptions in short format" },
1704 { CPU_LOG_EXEC, "exec",
1705 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001706 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001707 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001708#ifdef TARGET_I386
1709 { CPU_LOG_PCALL, "pcall",
1710 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001711 { CPU_LOG_RESET, "cpu_reset",
1712 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001713#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001714#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001715 { CPU_LOG_IOPORT, "ioport",
1716 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001717#endif
bellardf193c792004-03-21 17:06:25 +00001718 { 0, NULL, NULL },
1719};
1720
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001721#ifndef CONFIG_USER_ONLY
1722static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1723 = QLIST_HEAD_INITIALIZER(memory_client_list);
1724
1725static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001726 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001727 ram_addr_t phys_offset,
1728 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001729{
1730 CPUPhysMemoryClient *client;
1731 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001732 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001733 }
1734}
1735
1736static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001737 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001738{
1739 CPUPhysMemoryClient *client;
1740 QLIST_FOREACH(client, &memory_client_list, list) {
1741 int r = client->sync_dirty_bitmap(client, start, end);
1742 if (r < 0)
1743 return r;
1744 }
1745 return 0;
1746}
1747
1748static int cpu_notify_migration_log(int enable)
1749{
1750 CPUPhysMemoryClient *client;
1751 QLIST_FOREACH(client, &memory_client_list, list) {
1752 int r = client->migration_log(client, enable);
1753 if (r < 0)
1754 return r;
1755 }
1756 return 0;
1757}
1758
Alex Williamson2173a752011-05-03 12:36:58 -06001759struct last_map {
1760 target_phys_addr_t start_addr;
1761 ram_addr_t size;
1762 ram_addr_t phys_offset;
1763};
1764
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001765/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1766 * address. Each intermediate table provides the next L2_BITs of guest
1767 * physical address space. The number of levels vary based on host and
1768 * guest configuration, making it efficient to build the final guest
1769 * physical address by seeding the L1 offset and shifting and adding in
1770 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001771static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1772 void **lp, target_phys_addr_t addr,
1773 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001774{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001775 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001776
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001777 if (*lp == NULL) {
1778 return;
1779 }
1780 if (level == 0) {
1781 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001782 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001783 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001784 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001785 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1786
1787 if (map->size &&
1788 start_addr == map->start_addr + map->size &&
1789 pd[i].phys_offset == map->phys_offset + map->size) {
1790
1791 map->size += TARGET_PAGE_SIZE;
1792 continue;
1793 } else if (map->size) {
1794 client->set_memory(client, map->start_addr,
1795 map->size, map->phys_offset, false);
1796 }
1797
1798 map->start_addr = start_addr;
1799 map->size = TARGET_PAGE_SIZE;
1800 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001801 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001802 }
1803 } else {
1804 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001805 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001806 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001807 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001808 }
1809 }
1810}
1811
1812static void phys_page_for_each(CPUPhysMemoryClient *client)
1813{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001814 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001815 struct last_map map = { };
1816
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001817 for (i = 0; i < P_L1_SIZE; ++i) {
1818 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001819 l1_phys_map + i, i, &map);
1820 }
1821 if (map.size) {
1822 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1823 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001824 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001825}
1826
1827void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1828{
1829 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1830 phys_page_for_each(client);
1831}
1832
1833void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1834{
1835 QLIST_REMOVE(client, list);
1836}
1837#endif
1838
bellardf193c792004-03-21 17:06:25 +00001839static int cmp1(const char *s1, int n, const char *s2)
1840{
1841 if (strlen(s2) != n)
1842 return 0;
1843 return memcmp(s1, s2, n) == 0;
1844}
ths3b46e622007-09-17 08:09:54 +00001845
bellardf193c792004-03-21 17:06:25 +00001846/* takes a comma separated list of log masks. Return 0 if error. */
1847int cpu_str_to_log_mask(const char *str)
1848{
blueswir1c7cd6a32008-10-02 18:27:46 +00001849 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001850 int mask;
1851 const char *p, *p1;
1852
1853 p = str;
1854 mask = 0;
1855 for(;;) {
1856 p1 = strchr(p, ',');
1857 if (!p1)
1858 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001859 if(cmp1(p,p1-p,"all")) {
1860 for(item = cpu_log_items; item->mask != 0; item++) {
1861 mask |= item->mask;
1862 }
1863 } else {
1864 for(item = cpu_log_items; item->mask != 0; item++) {
1865 if (cmp1(p, p1 - p, item->name))
1866 goto found;
1867 }
1868 return 0;
bellardf193c792004-03-21 17:06:25 +00001869 }
bellardf193c792004-03-21 17:06:25 +00001870 found:
1871 mask |= item->mask;
1872 if (*p1 != ',')
1873 break;
1874 p = p1 + 1;
1875 }
1876 return mask;
1877}
bellardea041c02003-06-25 16:16:50 +00001878
bellard75012672003-06-21 13:11:07 +00001879void cpu_abort(CPUState *env, const char *fmt, ...)
1880{
1881 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001882 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001883
1884 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001885 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001886 fprintf(stderr, "qemu: fatal: ");
1887 vfprintf(stderr, fmt, ap);
1888 fprintf(stderr, "\n");
1889#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001890 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1891#else
1892 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001893#endif
aliguori93fcfe32009-01-15 22:34:14 +00001894 if (qemu_log_enabled()) {
1895 qemu_log("qemu: fatal: ");
1896 qemu_log_vprintf(fmt, ap2);
1897 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001898#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001899 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001900#else
aliguori93fcfe32009-01-15 22:34:14 +00001901 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001902#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001903 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001904 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001905 }
pbrook493ae1f2007-11-23 16:53:59 +00001906 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001907 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001908#if defined(CONFIG_USER_ONLY)
1909 {
1910 struct sigaction act;
1911 sigfillset(&act.sa_mask);
1912 act.sa_handler = SIG_DFL;
1913 sigaction(SIGABRT, &act, NULL);
1914 }
1915#endif
bellard75012672003-06-21 13:11:07 +00001916 abort();
1917}
1918
thsc5be9f02007-02-28 20:20:53 +00001919CPUState *cpu_copy(CPUState *env)
1920{
ths01ba9812007-12-09 02:22:57 +00001921 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001922 CPUState *next_cpu = new_env->next_cpu;
1923 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001924#if defined(TARGET_HAS_ICE)
1925 CPUBreakpoint *bp;
1926 CPUWatchpoint *wp;
1927#endif
1928
thsc5be9f02007-02-28 20:20:53 +00001929 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001930
1931 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001932 new_env->next_cpu = next_cpu;
1933 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001934
1935 /* Clone all break/watchpoints.
1936 Note: Once we support ptrace with hw-debug register access, make sure
1937 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001938 QTAILQ_INIT(&env->breakpoints);
1939 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001940#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001941 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001942 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1943 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001944 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001945 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1946 wp->flags, NULL);
1947 }
1948#endif
1949
thsc5be9f02007-02-28 20:20:53 +00001950 return new_env;
1951}
1952
bellard01243112004-01-04 15:48:17 +00001953#if !defined(CONFIG_USER_ONLY)
1954
edgar_igl5c751e92008-05-06 08:44:21 +00001955static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1956{
1957 unsigned int i;
1958
1959 /* Discard jump cache entries for any tb which might potentially
1960 overlap the flushed page. */
1961 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1962 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001963 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001964
1965 i = tb_jmp_cache_hash_page(addr);
1966 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001967 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001968}
1969
Igor Kovalenko08738982009-07-12 02:15:40 +04001970static CPUTLBEntry s_cputlb_empty_entry = {
1971 .addr_read = -1,
1972 .addr_write = -1,
1973 .addr_code = -1,
1974 .addend = -1,
1975};
1976
bellardee8b7022004-02-03 23:35:10 +00001977/* NOTE: if flush_global is true, also flush global entries (not
1978 implemented yet) */
1979void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001980{
bellard33417e72003-08-10 21:47:01 +00001981 int i;
bellard01243112004-01-04 15:48:17 +00001982
bellard9fa3e852004-01-04 18:06:42 +00001983#if defined(DEBUG_TLB)
1984 printf("tlb_flush:\n");
1985#endif
bellard01243112004-01-04 15:48:17 +00001986 /* must reset current TB so that interrupts cannot modify the
1987 links while we are modifying them */
1988 env->current_tb = NULL;
1989
bellard33417e72003-08-10 21:47:01 +00001990 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001991 int mmu_idx;
1992 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04001993 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09001994 }
bellard33417e72003-08-10 21:47:01 +00001995 }
bellard9fa3e852004-01-04 18:06:42 +00001996
bellard8a40a182005-11-20 10:35:40 +00001997 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00001998
Paul Brookd4c430a2010-03-17 02:14:28 +00001999 env->tlb_flush_addr = -1;
2000 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002001 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002002}
2003
bellard274da6b2004-05-20 21:56:27 +00002004static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002005{
ths5fafdf22007-09-16 21:08:06 +00002006 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002007 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002008 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002009 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002010 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002011 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002012 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002013 }
bellard61382a52003-10-27 21:22:23 +00002014}
2015
bellard2e126692004-04-25 21:28:44 +00002016void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002017{
bellard8a40a182005-11-20 10:35:40 +00002018 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002019 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002020
bellard9fa3e852004-01-04 18:06:42 +00002021#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002022 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002023#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002024 /* Check if we need to flush due to large pages. */
2025 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2026#if defined(DEBUG_TLB)
2027 printf("tlb_flush_page: forced full flush ("
2028 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2029 env->tlb_flush_addr, env->tlb_flush_mask);
2030#endif
2031 tlb_flush(env, 1);
2032 return;
2033 }
bellard01243112004-01-04 15:48:17 +00002034 /* must reset current TB so that interrupts cannot modify the
2035 links while we are modifying them */
2036 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002037
bellard61382a52003-10-27 21:22:23 +00002038 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002039 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002040 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2041 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002042
edgar_igl5c751e92008-05-06 08:44:21 +00002043 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002044}
2045
bellard9fa3e852004-01-04 18:06:42 +00002046/* update the TLBs so that writes to code in the virtual page 'addr'
2047 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002048static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002049{
ths5fafdf22007-09-16 21:08:06 +00002050 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002051 ram_addr + TARGET_PAGE_SIZE,
2052 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002053}
2054
bellard9fa3e852004-01-04 18:06:42 +00002055/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002056 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002057static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002058 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002059{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002060 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002061}
2062
ths5fafdf22007-09-16 21:08:06 +00002063static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002064 unsigned long start, unsigned long length)
2065{
2066 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002067 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2068 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002069 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002070 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002071 }
2072 }
2073}
2074
pbrook5579c7f2009-04-11 14:47:08 +00002075/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002076void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002077 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002078{
2079 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002080 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002081 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002082
2083 start &= TARGET_PAGE_MASK;
2084 end = TARGET_PAGE_ALIGN(end);
2085
2086 length = end - start;
2087 if (length == 0)
2088 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002089 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002090
bellard1ccde1c2004-02-06 19:46:14 +00002091 /* we modify the TLB cache so that the dirty bit will be set again
2092 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002093 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002094 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002095 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002096 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002097 != (end - 1) - start) {
2098 abort();
2099 }
2100
bellard6a00d602005-11-21 23:25:50 +00002101 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002102 int mmu_idx;
2103 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2104 for(i = 0; i < CPU_TLB_SIZE; i++)
2105 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2106 start1, length);
2107 }
bellard6a00d602005-11-21 23:25:50 +00002108 }
bellard1ccde1c2004-02-06 19:46:14 +00002109}
2110
aliguori74576192008-10-06 14:02:03 +00002111int cpu_physical_memory_set_dirty_tracking(int enable)
2112{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002113 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002114 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002115 ret = cpu_notify_migration_log(!!enable);
2116 return ret;
aliguori74576192008-10-06 14:02:03 +00002117}
2118
2119int cpu_physical_memory_get_dirty_tracking(void)
2120{
2121 return in_migration;
2122}
2123
Anthony Liguoric227f092009-10-01 16:12:16 -05002124int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2125 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002126{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002127 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002128
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002129 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002130 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002131}
2132
Anthony PERARDe5896b12011-02-07 12:19:23 +01002133int cpu_physical_log_start(target_phys_addr_t start_addr,
2134 ram_addr_t size)
2135{
2136 CPUPhysMemoryClient *client;
2137 QLIST_FOREACH(client, &memory_client_list, list) {
2138 if (client->log_start) {
2139 int r = client->log_start(client, start_addr, size);
2140 if (r < 0) {
2141 return r;
2142 }
2143 }
2144 }
2145 return 0;
2146}
2147
2148int cpu_physical_log_stop(target_phys_addr_t start_addr,
2149 ram_addr_t size)
2150{
2151 CPUPhysMemoryClient *client;
2152 QLIST_FOREACH(client, &memory_client_list, list) {
2153 if (client->log_stop) {
2154 int r = client->log_stop(client, start_addr, size);
2155 if (r < 0) {
2156 return r;
2157 }
2158 }
2159 }
2160 return 0;
2161}
2162
bellard3a7d9292005-08-21 09:26:42 +00002163static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2164{
Anthony Liguoric227f092009-10-01 16:12:16 -05002165 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002166 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002167
bellard84b7b8e2005-11-28 21:19:04 +00002168 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002169 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2170 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002171 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002172 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002173 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002174 }
2175 }
2176}
2177
2178/* update the TLB according to the current state of the dirty bits */
2179void cpu_tlb_update_dirty(CPUState *env)
2180{
2181 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002182 int mmu_idx;
2183 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2184 for(i = 0; i < CPU_TLB_SIZE; i++)
2185 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2186 }
bellard3a7d9292005-08-21 09:26:42 +00002187}
2188
pbrook0f459d12008-06-09 00:20:13 +00002189static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002190{
pbrook0f459d12008-06-09 00:20:13 +00002191 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2192 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002193}
2194
pbrook0f459d12008-06-09 00:20:13 +00002195/* update the TLB corresponding to virtual page vaddr
2196 so that it is no longer dirty */
2197static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002198{
bellard1ccde1c2004-02-06 19:46:14 +00002199 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002200 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002201
pbrook0f459d12008-06-09 00:20:13 +00002202 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002203 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002204 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2205 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002206}
2207
Paul Brookd4c430a2010-03-17 02:14:28 +00002208/* Our TLB does not support large pages, so remember the area covered by
2209 large pages and trigger a full TLB flush if these are invalidated. */
2210static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2211 target_ulong size)
2212{
2213 target_ulong mask = ~(size - 1);
2214
2215 if (env->tlb_flush_addr == (target_ulong)-1) {
2216 env->tlb_flush_addr = vaddr & mask;
2217 env->tlb_flush_mask = mask;
2218 return;
2219 }
2220 /* Extend the existing region to include the new page.
2221 This is a compromise between unnecessary flushes and the cost
2222 of maintaining a full variable size TLB. */
2223 mask &= env->tlb_flush_mask;
2224 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2225 mask <<= 1;
2226 }
2227 env->tlb_flush_addr &= mask;
2228 env->tlb_flush_mask = mask;
2229}
2230
2231/* Add a new TLB entry. At most one entry for a given virtual address
2232 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2233 supplied size is only used by tlb_flush_page. */
2234void tlb_set_page(CPUState *env, target_ulong vaddr,
2235 target_phys_addr_t paddr, int prot,
2236 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002237{
bellard92e873b2004-05-21 14:52:29 +00002238 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002239 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002240 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002241 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002242 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002243 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002244 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002245 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002246 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002247
Paul Brookd4c430a2010-03-17 02:14:28 +00002248 assert(size >= TARGET_PAGE_SIZE);
2249 if (size != TARGET_PAGE_SIZE) {
2250 tlb_add_large_page(env, vaddr, size);
2251 }
bellard92e873b2004-05-21 14:52:29 +00002252 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002253 if (!p) {
2254 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002255 } else {
2256 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002257 }
2258#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002259 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2260 " prot=%x idx=%d pd=0x%08lx\n",
2261 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002262#endif
2263
pbrook0f459d12008-06-09 00:20:13 +00002264 address = vaddr;
2265 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2266 /* IO memory case (romd handled later) */
2267 address |= TLB_MMIO;
2268 }
pbrook5579c7f2009-04-11 14:47:08 +00002269 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002270 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2271 /* Normal RAM. */
2272 iotlb = pd & TARGET_PAGE_MASK;
2273 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2274 iotlb |= IO_MEM_NOTDIRTY;
2275 else
2276 iotlb |= IO_MEM_ROM;
2277 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002278 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002279 It would be nice to pass an offset from the base address
2280 of that region. This would avoid having to special case RAM,
2281 and avoid full address decoding in every device.
2282 We can't use the high bits of pd for this because
2283 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002284 iotlb = (pd & ~TARGET_PAGE_MASK);
2285 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002286 iotlb += p->region_offset;
2287 } else {
2288 iotlb += paddr;
2289 }
pbrook0f459d12008-06-09 00:20:13 +00002290 }
pbrook6658ffb2007-03-16 23:58:11 +00002291
pbrook0f459d12008-06-09 00:20:13 +00002292 code_address = address;
2293 /* Make accesses to pages with watchpoints go via the
2294 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002295 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002296 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002297 /* Avoid trapping reads of pages with a write breakpoint. */
2298 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2299 iotlb = io_mem_watch + paddr;
2300 address |= TLB_MMIO;
2301 break;
2302 }
pbrook6658ffb2007-03-16 23:58:11 +00002303 }
pbrook0f459d12008-06-09 00:20:13 +00002304 }
balrogd79acba2007-06-26 20:01:13 +00002305
pbrook0f459d12008-06-09 00:20:13 +00002306 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2307 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2308 te = &env->tlb_table[mmu_idx][index];
2309 te->addend = addend - vaddr;
2310 if (prot & PAGE_READ) {
2311 te->addr_read = address;
2312 } else {
2313 te->addr_read = -1;
2314 }
edgar_igl5c751e92008-05-06 08:44:21 +00002315
pbrook0f459d12008-06-09 00:20:13 +00002316 if (prot & PAGE_EXEC) {
2317 te->addr_code = code_address;
2318 } else {
2319 te->addr_code = -1;
2320 }
2321 if (prot & PAGE_WRITE) {
2322 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2323 (pd & IO_MEM_ROMD)) {
2324 /* Write access calls the I/O callback. */
2325 te->addr_write = address | TLB_MMIO;
2326 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2327 !cpu_physical_memory_is_dirty(pd)) {
2328 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002329 } else {
pbrook0f459d12008-06-09 00:20:13 +00002330 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002331 }
pbrook0f459d12008-06-09 00:20:13 +00002332 } else {
2333 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002334 }
bellard9fa3e852004-01-04 18:06:42 +00002335}
2336
bellard01243112004-01-04 15:48:17 +00002337#else
2338
bellardee8b7022004-02-03 23:35:10 +00002339void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002340{
2341}
2342
bellard2e126692004-04-25 21:28:44 +00002343void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002344{
2345}
2346
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002347/*
2348 * Walks guest process memory "regions" one by one
2349 * and calls callback function 'fn' for each region.
2350 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002351
2352struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002353{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002354 walk_memory_regions_fn fn;
2355 void *priv;
2356 unsigned long start;
2357 int prot;
2358};
bellard9fa3e852004-01-04 18:06:42 +00002359
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002360static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002361 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002362{
2363 if (data->start != -1ul) {
2364 int rc = data->fn(data->priv, data->start, end, data->prot);
2365 if (rc != 0) {
2366 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002367 }
bellard33417e72003-08-10 21:47:01 +00002368 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002369
2370 data->start = (new_prot ? end : -1ul);
2371 data->prot = new_prot;
2372
2373 return 0;
2374}
2375
2376static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002377 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002378{
Paul Brookb480d9b2010-03-12 23:23:29 +00002379 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002380 int i, rc;
2381
2382 if (*lp == NULL) {
2383 return walk_memory_regions_end(data, base, 0);
2384 }
2385
2386 if (level == 0) {
2387 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002388 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002389 int prot = pd[i].flags;
2390
2391 pa = base | (i << TARGET_PAGE_BITS);
2392 if (prot != data->prot) {
2393 rc = walk_memory_regions_end(data, pa, prot);
2394 if (rc != 0) {
2395 return rc;
2396 }
2397 }
2398 }
2399 } else {
2400 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002401 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002402 pa = base | ((abi_ulong)i <<
2403 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002404 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2405 if (rc != 0) {
2406 return rc;
2407 }
2408 }
2409 }
2410
2411 return 0;
2412}
2413
2414int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2415{
2416 struct walk_memory_regions_data data;
2417 unsigned long i;
2418
2419 data.fn = fn;
2420 data.priv = priv;
2421 data.start = -1ul;
2422 data.prot = 0;
2423
2424 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002425 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002426 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2427 if (rc != 0) {
2428 return rc;
2429 }
2430 }
2431
2432 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002433}
2434
Paul Brookb480d9b2010-03-12 23:23:29 +00002435static int dump_region(void *priv, abi_ulong start,
2436 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002437{
2438 FILE *f = (FILE *)priv;
2439
Paul Brookb480d9b2010-03-12 23:23:29 +00002440 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2441 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002442 start, end, end - start,
2443 ((prot & PAGE_READ) ? 'r' : '-'),
2444 ((prot & PAGE_WRITE) ? 'w' : '-'),
2445 ((prot & PAGE_EXEC) ? 'x' : '-'));
2446
2447 return (0);
2448}
2449
2450/* dump memory mappings */
2451void page_dump(FILE *f)
2452{
2453 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2454 "start", "end", "size", "prot");
2455 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002456}
2457
pbrook53a59602006-03-25 19:31:22 +00002458int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002459{
bellard9fa3e852004-01-04 18:06:42 +00002460 PageDesc *p;
2461
2462 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002463 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002464 return 0;
2465 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002466}
2467
Richard Henderson376a7902010-03-10 15:57:04 -08002468/* Modify the flags of a page and invalidate the code if necessary.
2469 The flag PAGE_WRITE_ORG is positioned automatically depending
2470 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002471void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002472{
Richard Henderson376a7902010-03-10 15:57:04 -08002473 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002474
Richard Henderson376a7902010-03-10 15:57:04 -08002475 /* This function should never be called with addresses outside the
2476 guest address space. If this assert fires, it probably indicates
2477 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002478#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2479 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002480#endif
2481 assert(start < end);
2482
bellard9fa3e852004-01-04 18:06:42 +00002483 start = start & TARGET_PAGE_MASK;
2484 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002485
2486 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002487 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002488 }
2489
2490 for (addr = start, len = end - start;
2491 len != 0;
2492 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2493 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2494
2495 /* If the write protection bit is set, then we invalidate
2496 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002497 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002498 (flags & PAGE_WRITE) &&
2499 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002500 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002501 }
2502 p->flags = flags;
2503 }
bellard9fa3e852004-01-04 18:06:42 +00002504}
2505
ths3d97b402007-11-02 19:02:07 +00002506int page_check_range(target_ulong start, target_ulong len, int flags)
2507{
2508 PageDesc *p;
2509 target_ulong end;
2510 target_ulong addr;
2511
Richard Henderson376a7902010-03-10 15:57:04 -08002512 /* This function should never be called with addresses outside the
2513 guest address space. If this assert fires, it probably indicates
2514 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002515#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2516 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002517#endif
2518
Richard Henderson3e0650a2010-03-29 10:54:42 -07002519 if (len == 0) {
2520 return 0;
2521 }
Richard Henderson376a7902010-03-10 15:57:04 -08002522 if (start + len - 1 < start) {
2523 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002524 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002525 }
balrog55f280c2008-10-28 10:24:11 +00002526
ths3d97b402007-11-02 19:02:07 +00002527 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2528 start = start & TARGET_PAGE_MASK;
2529
Richard Henderson376a7902010-03-10 15:57:04 -08002530 for (addr = start, len = end - start;
2531 len != 0;
2532 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002533 p = page_find(addr >> TARGET_PAGE_BITS);
2534 if( !p )
2535 return -1;
2536 if( !(p->flags & PAGE_VALID) )
2537 return -1;
2538
bellarddae32702007-11-14 10:51:00 +00002539 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002540 return -1;
bellarddae32702007-11-14 10:51:00 +00002541 if (flags & PAGE_WRITE) {
2542 if (!(p->flags & PAGE_WRITE_ORG))
2543 return -1;
2544 /* unprotect the page if it was put read-only because it
2545 contains translated code */
2546 if (!(p->flags & PAGE_WRITE)) {
2547 if (!page_unprotect(addr, 0, NULL))
2548 return -1;
2549 }
2550 return 0;
2551 }
ths3d97b402007-11-02 19:02:07 +00002552 }
2553 return 0;
2554}
2555
bellard9fa3e852004-01-04 18:06:42 +00002556/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002557 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002558int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002559{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002560 unsigned int prot;
2561 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002562 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002563
pbrookc8a706f2008-06-02 16:16:42 +00002564 /* Technically this isn't safe inside a signal handler. However we
2565 know this only ever happens in a synchronous SEGV handler, so in
2566 practice it seems to be ok. */
2567 mmap_lock();
2568
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002569 p = page_find(address >> TARGET_PAGE_BITS);
2570 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002571 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002572 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002573 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002574
bellard9fa3e852004-01-04 18:06:42 +00002575 /* if the page was really writable, then we change its
2576 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002577 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2578 host_start = address & qemu_host_page_mask;
2579 host_end = host_start + qemu_host_page_size;
2580
2581 prot = 0;
2582 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2583 p = page_find(addr >> TARGET_PAGE_BITS);
2584 p->flags |= PAGE_WRITE;
2585 prot |= p->flags;
2586
bellard9fa3e852004-01-04 18:06:42 +00002587 /* and since the content will be modified, we must invalidate
2588 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002589 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002590#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002591 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002592#endif
bellard9fa3e852004-01-04 18:06:42 +00002593 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002594 mprotect((void *)g2h(host_start), qemu_host_page_size,
2595 prot & PAGE_BITS);
2596
2597 mmap_unlock();
2598 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002599 }
pbrookc8a706f2008-06-02 16:16:42 +00002600 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002601 return 0;
2602}
2603
bellard6a00d602005-11-21 23:25:50 +00002604static inline void tlb_set_dirty(CPUState *env,
2605 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002606{
2607}
bellard9fa3e852004-01-04 18:06:42 +00002608#endif /* defined(CONFIG_USER_ONLY) */
2609
pbrooke2eef172008-06-08 01:09:01 +00002610#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002611
Paul Brookc04b2b72010-03-01 03:31:14 +00002612#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2613typedef struct subpage_t {
2614 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002615 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2616 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002617} subpage_t;
2618
Anthony Liguoric227f092009-10-01 16:12:16 -05002619static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2620 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002621static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2622 ram_addr_t orig_memory,
2623 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002624#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2625 need_subpage) \
2626 do { \
2627 if (addr > start_addr) \
2628 start_addr2 = 0; \
2629 else { \
2630 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2631 if (start_addr2 > 0) \
2632 need_subpage = 1; \
2633 } \
2634 \
blueswir149e9fba2007-05-30 17:25:06 +00002635 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002636 end_addr2 = TARGET_PAGE_SIZE - 1; \
2637 else { \
2638 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2639 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2640 need_subpage = 1; \
2641 } \
2642 } while (0)
2643
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002644/* register physical memory.
2645 For RAM, 'size' must be a multiple of the target page size.
2646 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002647 io memory page. The address used when calling the IO function is
2648 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002649 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002650 before calculating this offset. This should not be a problem unless
2651 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002652void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002653 ram_addr_t size,
2654 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002655 ram_addr_t region_offset,
2656 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002657{
Anthony Liguoric227f092009-10-01 16:12:16 -05002658 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002659 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002660 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002661 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002662 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002663
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002664 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002665 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002666
pbrook67c4d232009-02-23 13:16:07 +00002667 if (phys_offset == IO_MEM_UNASSIGNED) {
2668 region_offset = start_addr;
2669 }
pbrook8da3ff12008-12-01 18:59:50 +00002670 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002671 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002672 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002673
2674 addr = start_addr;
2675 do {
blueswir1db7b5422007-05-26 17:36:03 +00002676 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2677 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002678 ram_addr_t orig_memory = p->phys_offset;
2679 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002680 int need_subpage = 0;
2681
2682 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2683 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002684 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002685 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2686 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002687 &p->phys_offset, orig_memory,
2688 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002689 } else {
2690 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2691 >> IO_MEM_SHIFT];
2692 }
pbrook8da3ff12008-12-01 18:59:50 +00002693 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2694 region_offset);
2695 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002696 } else {
2697 p->phys_offset = phys_offset;
2698 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2699 (phys_offset & IO_MEM_ROMD))
2700 phys_offset += TARGET_PAGE_SIZE;
2701 }
2702 } else {
2703 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2704 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002705 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002706 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002707 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002708 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002709 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002710 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002711 int need_subpage = 0;
2712
2713 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2714 end_addr2, need_subpage);
2715
Richard Hendersonf6405242010-04-22 16:47:31 -07002716 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002717 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002718 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002719 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002720 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002721 phys_offset, region_offset);
2722 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002723 }
2724 }
2725 }
pbrook8da3ff12008-12-01 18:59:50 +00002726 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002727 addr += TARGET_PAGE_SIZE;
2728 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002729
bellard9d420372006-06-25 22:25:22 +00002730 /* since each CPU stores ram addresses in its TLB cache, we must
2731 reset the modified entries */
2732 /* XXX: slow ! */
2733 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2734 tlb_flush(env, 1);
2735 }
bellard33417e72003-08-10 21:47:01 +00002736}
2737
bellardba863452006-09-24 18:41:10 +00002738/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002739ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002740{
2741 PhysPageDesc *p;
2742
2743 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2744 if (!p)
2745 return IO_MEM_UNASSIGNED;
2746 return p->phys_offset;
2747}
2748
Anthony Liguoric227f092009-10-01 16:12:16 -05002749void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002750{
2751 if (kvm_enabled())
2752 kvm_coalesce_mmio_region(addr, size);
2753}
2754
Anthony Liguoric227f092009-10-01 16:12:16 -05002755void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002756{
2757 if (kvm_enabled())
2758 kvm_uncoalesce_mmio_region(addr, size);
2759}
2760
Sheng Yang62a27442010-01-26 19:21:16 +08002761void qemu_flush_coalesced_mmio_buffer(void)
2762{
2763 if (kvm_enabled())
2764 kvm_flush_coalesced_mmio_buffer();
2765}
2766
Marcelo Tosattic9027602010-03-01 20:25:08 -03002767#if defined(__linux__) && !defined(TARGET_S390X)
2768
2769#include <sys/vfs.h>
2770
2771#define HUGETLBFS_MAGIC 0x958458f6
2772
2773static long gethugepagesize(const char *path)
2774{
2775 struct statfs fs;
2776 int ret;
2777
2778 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002779 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002780 } while (ret != 0 && errno == EINTR);
2781
2782 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002783 perror(path);
2784 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002785 }
2786
2787 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002788 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002789
2790 return fs.f_bsize;
2791}
2792
Alex Williamson04b16652010-07-02 11:13:17 -06002793static void *file_ram_alloc(RAMBlock *block,
2794 ram_addr_t memory,
2795 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002796{
2797 char *filename;
2798 void *area;
2799 int fd;
2800#ifdef MAP_POPULATE
2801 int flags;
2802#endif
2803 unsigned long hpagesize;
2804
2805 hpagesize = gethugepagesize(path);
2806 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002807 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002808 }
2809
2810 if (memory < hpagesize) {
2811 return NULL;
2812 }
2813
2814 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2815 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2816 return NULL;
2817 }
2818
2819 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002820 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002821 }
2822
2823 fd = mkstemp(filename);
2824 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002825 perror("unable to create backing store for hugepages");
2826 free(filename);
2827 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002828 }
2829 unlink(filename);
2830 free(filename);
2831
2832 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2833
2834 /*
2835 * ftruncate is not supported by hugetlbfs in older
2836 * hosts, so don't bother bailing out on errors.
2837 * If anything goes wrong with it under other filesystems,
2838 * mmap will fail.
2839 */
2840 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002841 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002842
2843#ifdef MAP_POPULATE
2844 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2845 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2846 * to sidestep this quirk.
2847 */
2848 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2849 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2850#else
2851 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2852#endif
2853 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002854 perror("file_ram_alloc: can't mmap RAM pages");
2855 close(fd);
2856 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002857 }
Alex Williamson04b16652010-07-02 11:13:17 -06002858 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002859 return area;
2860}
2861#endif
2862
Alex Williamsond17b5282010-06-25 11:08:38 -06002863static ram_addr_t find_ram_offset(ram_addr_t size)
2864{
Alex Williamson04b16652010-07-02 11:13:17 -06002865 RAMBlock *block, *next_block;
Blue Swirl09d7ae92010-07-07 19:37:53 +00002866 ram_addr_t offset = 0, mingap = ULONG_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002867
2868 if (QLIST_EMPTY(&ram_list.blocks))
2869 return 0;
2870
2871 QLIST_FOREACH(block, &ram_list.blocks, next) {
2872 ram_addr_t end, next = ULONG_MAX;
2873
2874 end = block->offset + block->length;
2875
2876 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2877 if (next_block->offset >= end) {
2878 next = MIN(next, next_block->offset);
2879 }
2880 }
2881 if (next - end >= size && next - end < mingap) {
2882 offset = end;
2883 mingap = next - end;
2884 }
2885 }
2886 return offset;
2887}
2888
2889static ram_addr_t last_ram_offset(void)
2890{
Alex Williamsond17b5282010-06-25 11:08:38 -06002891 RAMBlock *block;
2892 ram_addr_t last = 0;
2893
2894 QLIST_FOREACH(block, &ram_list.blocks, next)
2895 last = MAX(last, block->offset + block->length);
2896
2897 return last;
2898}
2899
Cam Macdonell84b89d72010-07-26 18:10:57 -06002900ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002901 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002902{
2903 RAMBlock *new_block, *block;
2904
2905 size = TARGET_PAGE_ALIGN(size);
2906 new_block = qemu_mallocz(sizeof(*new_block));
2907
2908 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2909 char *id = dev->parent_bus->info->get_dev_path(dev);
2910 if (id) {
2911 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
2912 qemu_free(id);
2913 }
2914 }
2915 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2916
2917 QLIST_FOREACH(block, &ram_list.blocks, next) {
2918 if (!strcmp(block->idstr, new_block->idstr)) {
2919 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2920 new_block->idstr);
2921 abort();
2922 }
2923 }
2924
Jun Nakajima432d2682010-08-31 16:41:25 +01002925 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002926 if (host) {
2927 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002928 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002929 } else {
2930 if (mem_path) {
2931#if defined (__linux__) && !defined(TARGET_S390X)
2932 new_block->host = file_ram_alloc(new_block, size, mem_path);
2933 if (!new_block->host) {
2934 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002935 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002936 }
2937#else
2938 fprintf(stderr, "-mem-path option unsupported\n");
2939 exit(1);
2940#endif
2941 } else {
2942#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002943 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2944 an system defined value, which is at least 256GB. Larger systems
2945 have larger values. We put the guest between the end of data
2946 segment (system break) and this value. We use 32GB as a base to
2947 have enough room for the system break to grow. */
2948 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002949 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002950 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002951 if (new_block->host == MAP_FAILED) {
2952 fprintf(stderr, "Allocating RAM failed\n");
2953 abort();
2954 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002955#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002956 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002957 xen_ram_alloc(new_block->offset, size);
2958 } else {
2959 new_block->host = qemu_vmalloc(size);
2960 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002961#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002962 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002963 }
2964 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002965 new_block->length = size;
2966
2967 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2968
2969 ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
2970 last_ram_offset() >> TARGET_PAGE_BITS);
2971 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2972 0xff, size >> TARGET_PAGE_BITS);
2973
2974 if (kvm_enabled())
2975 kvm_setup_guest_memory(new_block->host, size);
2976
2977 return new_block->offset;
2978}
2979
Alex Williamson1724f042010-06-25 11:09:35 -06002980ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002981{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002982 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00002983}
bellarde9a1ab12007-02-08 23:08:38 +00002984
Alex Williamson1f2e98b2011-05-03 12:48:09 -06002985void qemu_ram_free_from_ptr(ram_addr_t addr)
2986{
2987 RAMBlock *block;
2988
2989 QLIST_FOREACH(block, &ram_list.blocks, next) {
2990 if (addr == block->offset) {
2991 QLIST_REMOVE(block, next);
2992 qemu_free(block);
2993 return;
2994 }
2995 }
2996}
2997
Anthony Liguoric227f092009-10-01 16:12:16 -05002998void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00002999{
Alex Williamson04b16652010-07-02 11:13:17 -06003000 RAMBlock *block;
3001
3002 QLIST_FOREACH(block, &ram_list.blocks, next) {
3003 if (addr == block->offset) {
3004 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003005 if (block->flags & RAM_PREALLOC_MASK) {
3006 ;
3007 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003008#if defined (__linux__) && !defined(TARGET_S390X)
3009 if (block->fd) {
3010 munmap(block->host, block->length);
3011 close(block->fd);
3012 } else {
3013 qemu_vfree(block->host);
3014 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003015#else
3016 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003017#endif
3018 } else {
3019#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3020 munmap(block->host, block->length);
3021#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003022 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003023 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003024 } else {
3025 qemu_vfree(block->host);
3026 }
Alex Williamson04b16652010-07-02 11:13:17 -06003027#endif
3028 }
3029 qemu_free(block);
3030 return;
3031 }
3032 }
3033
bellarde9a1ab12007-02-08 23:08:38 +00003034}
3035
Huang Yingcd19cfa2011-03-02 08:56:19 +01003036#ifndef _WIN32
3037void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3038{
3039 RAMBlock *block;
3040 ram_addr_t offset;
3041 int flags;
3042 void *area, *vaddr;
3043
3044 QLIST_FOREACH(block, &ram_list.blocks, next) {
3045 offset = addr - block->offset;
3046 if (offset < block->length) {
3047 vaddr = block->host + offset;
3048 if (block->flags & RAM_PREALLOC_MASK) {
3049 ;
3050 } else {
3051 flags = MAP_FIXED;
3052 munmap(vaddr, length);
3053 if (mem_path) {
3054#if defined(__linux__) && !defined(TARGET_S390X)
3055 if (block->fd) {
3056#ifdef MAP_POPULATE
3057 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3058 MAP_PRIVATE;
3059#else
3060 flags |= MAP_PRIVATE;
3061#endif
3062 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3063 flags, block->fd, offset);
3064 } else {
3065 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3066 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3067 flags, -1, 0);
3068 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003069#else
3070 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003071#endif
3072 } else {
3073#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3074 flags |= MAP_SHARED | MAP_ANONYMOUS;
3075 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3076 flags, -1, 0);
3077#else
3078 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, -1, 0);
3081#endif
3082 }
3083 if (area != vaddr) {
3084 fprintf(stderr, "Could not remap addr: %lx@%lx\n",
3085 length, addr);
3086 exit(1);
3087 }
3088 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3089 }
3090 return;
3091 }
3092 }
3093}
3094#endif /* !_WIN32 */
3095
pbrookdc828ca2009-04-09 22:21:07 +00003096/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003097 With the exception of the softmmu code in this file, this should
3098 only be used for local memory (e.g. video ram) that the device owns,
3099 and knows it isn't going to access beyond the end of the block.
3100
3101 It should not be used for general purpose DMA.
3102 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3103 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003104void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003105{
pbrook94a6b542009-04-11 17:15:54 +00003106 RAMBlock *block;
3107
Alex Williamsonf471a172010-06-11 11:11:42 -06003108 QLIST_FOREACH(block, &ram_list.blocks, next) {
3109 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003110 /* Move this entry to to start of the list. */
3111 if (block != QLIST_FIRST(&ram_list.blocks)) {
3112 QLIST_REMOVE(block, next);
3113 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3114 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003115 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003116 /* We need to check if the requested address is in the RAM
3117 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003118 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003119 */
3120 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003121 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003122 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003123 block->host =
3124 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003125 }
3126 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003127 return block->host + (addr - block->offset);
3128 }
pbrook94a6b542009-04-11 17:15:54 +00003129 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003130
3131 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3132 abort();
3133
3134 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003135}
3136
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003137/* Return a host pointer to ram allocated with qemu_ram_alloc.
3138 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3139 */
3140void *qemu_safe_ram_ptr(ram_addr_t addr)
3141{
3142 RAMBlock *block;
3143
3144 QLIST_FOREACH(block, &ram_list.blocks, next) {
3145 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003146 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003147 /* We need to check if the requested address is in the RAM
3148 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003149 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003150 */
3151 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003152 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003153 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003154 block->host =
3155 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003156 }
3157 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003158 return block->host + (addr - block->offset);
3159 }
3160 }
3161
3162 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3163 abort();
3164
3165 return NULL;
3166}
3167
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003168/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3169 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003170void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003171{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003172 if (*size == 0) {
3173 return NULL;
3174 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003175 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003176 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003177 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003178 RAMBlock *block;
3179
3180 QLIST_FOREACH(block, &ram_list.blocks, next) {
3181 if (addr - block->offset < block->length) {
3182 if (addr - block->offset + *size > block->length)
3183 *size = block->length - addr + block->offset;
3184 return block->host + (addr - block->offset);
3185 }
3186 }
3187
3188 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3189 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003190 }
3191}
3192
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003193void qemu_put_ram_ptr(void *addr)
3194{
3195 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003196}
3197
Marcelo Tosattie8902612010-10-11 15:31:19 -03003198int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003199{
pbrook94a6b542009-04-11 17:15:54 +00003200 RAMBlock *block;
3201 uint8_t *host = ptr;
3202
Jan Kiszka868bb332011-06-21 22:59:09 +02003203 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003204 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003205 return 0;
3206 }
3207
Alex Williamsonf471a172010-06-11 11:11:42 -06003208 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003209 /* This case append when the block is not mapped. */
3210 if (block->host == NULL) {
3211 continue;
3212 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003213 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003214 *ram_addr = block->offset + (host - block->host);
3215 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003216 }
pbrook94a6b542009-04-11 17:15:54 +00003217 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003218
Marcelo Tosattie8902612010-10-11 15:31:19 -03003219 return -1;
3220}
Alex Williamsonf471a172010-06-11 11:11:42 -06003221
Marcelo Tosattie8902612010-10-11 15:31:19 -03003222/* Some of the softmmu routines need to translate from a host pointer
3223 (typically a TLB entry) back to a ram offset. */
3224ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3225{
3226 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003227
Marcelo Tosattie8902612010-10-11 15:31:19 -03003228 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3229 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3230 abort();
3231 }
3232 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003233}
3234
Anthony Liguoric227f092009-10-01 16:12:16 -05003235static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003236{
pbrook67d3b952006-12-18 05:03:52 +00003237#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003238 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003239#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003240#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003241 do_unassigned_access(addr, 0, 0, 0, 1);
3242#endif
3243 return 0;
3244}
3245
Anthony Liguoric227f092009-10-01 16:12:16 -05003246static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003247{
3248#ifdef DEBUG_UNASSIGNED
3249 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3250#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003251#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003252 do_unassigned_access(addr, 0, 0, 0, 2);
3253#endif
3254 return 0;
3255}
3256
Anthony Liguoric227f092009-10-01 16:12:16 -05003257static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003258{
3259#ifdef DEBUG_UNASSIGNED
3260 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3261#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003262#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003263 do_unassigned_access(addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003264#endif
bellard33417e72003-08-10 21:47:01 +00003265 return 0;
3266}
3267
Anthony Liguoric227f092009-10-01 16:12:16 -05003268static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003269{
pbrook67d3b952006-12-18 05:03:52 +00003270#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003271 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003272#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003273#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003274 do_unassigned_access(addr, 1, 0, 0, 1);
3275#endif
3276}
3277
Anthony Liguoric227f092009-10-01 16:12:16 -05003278static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003279{
3280#ifdef DEBUG_UNASSIGNED
3281 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3282#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003283#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003284 do_unassigned_access(addr, 1, 0, 0, 2);
3285#endif
3286}
3287
Anthony Liguoric227f092009-10-01 16:12:16 -05003288static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003289{
3290#ifdef DEBUG_UNASSIGNED
3291 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3292#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003293#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
blueswir1e18231a2008-10-06 18:46:28 +00003294 do_unassigned_access(addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003295#endif
bellard33417e72003-08-10 21:47:01 +00003296}
3297
Blue Swirld60efc62009-08-25 18:29:31 +00003298static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003299 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003300 unassigned_mem_readw,
3301 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003302};
3303
Blue Swirld60efc62009-08-25 18:29:31 +00003304static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003305 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003306 unassigned_mem_writew,
3307 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003308};
3309
Anthony Liguoric227f092009-10-01 16:12:16 -05003310static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003311 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003312{
bellard3a7d9292005-08-21 09:26:42 +00003313 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003314 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003315 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3316#if !defined(CONFIG_USER_ONLY)
3317 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003318 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003319#endif
3320 }
pbrook5579c7f2009-04-11 14:47:08 +00003321 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003322 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003323 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003324 /* we remove the notdirty callback only if the code has been
3325 flushed */
3326 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003327 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003328}
3329
Anthony Liguoric227f092009-10-01 16:12:16 -05003330static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003331 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003332{
bellard3a7d9292005-08-21 09:26:42 +00003333 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003334 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003335 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3336#if !defined(CONFIG_USER_ONLY)
3337 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003338 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003339#endif
3340 }
pbrook5579c7f2009-04-11 14:47:08 +00003341 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003342 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003343 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003344 /* we remove the notdirty callback only if the code has been
3345 flushed */
3346 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003347 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003348}
3349
Anthony Liguoric227f092009-10-01 16:12:16 -05003350static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003351 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003352{
bellard3a7d9292005-08-21 09:26:42 +00003353 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003354 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003355 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3356#if !defined(CONFIG_USER_ONLY)
3357 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003358 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003359#endif
3360 }
pbrook5579c7f2009-04-11 14:47:08 +00003361 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003362 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003363 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003364 /* we remove the notdirty callback only if the code has been
3365 flushed */
3366 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003367 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003368}
3369
Blue Swirld60efc62009-08-25 18:29:31 +00003370static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003371 NULL, /* never used */
3372 NULL, /* never used */
3373 NULL, /* never used */
3374};
3375
Blue Swirld60efc62009-08-25 18:29:31 +00003376static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003377 notdirty_mem_writeb,
3378 notdirty_mem_writew,
3379 notdirty_mem_writel,
3380};
3381
pbrook0f459d12008-06-09 00:20:13 +00003382/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003383static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003384{
3385 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003386 target_ulong pc, cs_base;
3387 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003388 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003389 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003390 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003391
aliguori06d55cc2008-11-18 20:24:06 +00003392 if (env->watchpoint_hit) {
3393 /* We re-entered the check after replacing the TB. Now raise
3394 * the debug interrupt so that is will trigger after the
3395 * current instruction. */
3396 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3397 return;
3398 }
pbrook2e70f6e2008-06-29 01:03:05 +00003399 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003400 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003401 if ((vaddr == (wp->vaddr & len_mask) ||
3402 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003403 wp->flags |= BP_WATCHPOINT_HIT;
3404 if (!env->watchpoint_hit) {
3405 env->watchpoint_hit = wp;
3406 tb = tb_find_pc(env->mem_io_pc);
3407 if (!tb) {
3408 cpu_abort(env, "check_watchpoint: could not find TB for "
3409 "pc=%p", (void *)env->mem_io_pc);
3410 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003411 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003412 tb_phys_invalidate(tb, -1);
3413 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3414 env->exception_index = EXCP_DEBUG;
3415 } else {
3416 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3417 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3418 }
3419 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003420 }
aliguori6e140f22008-11-18 20:37:55 +00003421 } else {
3422 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003423 }
3424 }
3425}
3426
pbrook6658ffb2007-03-16 23:58:11 +00003427/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3428 so these check for a hit then pass through to the normal out-of-line
3429 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003430static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003431{
aliguorib4051332008-11-18 20:14:20 +00003432 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003433 return ldub_phys(addr);
3434}
3435
Anthony Liguoric227f092009-10-01 16:12:16 -05003436static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003437{
aliguorib4051332008-11-18 20:14:20 +00003438 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003439 return lduw_phys(addr);
3440}
3441
Anthony Liguoric227f092009-10-01 16:12:16 -05003442static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003443{
aliguorib4051332008-11-18 20:14:20 +00003444 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003445 return ldl_phys(addr);
3446}
3447
Anthony Liguoric227f092009-10-01 16:12:16 -05003448static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003449 uint32_t val)
3450{
aliguorib4051332008-11-18 20:14:20 +00003451 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003452 stb_phys(addr, val);
3453}
3454
Anthony Liguoric227f092009-10-01 16:12:16 -05003455static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003456 uint32_t val)
3457{
aliguorib4051332008-11-18 20:14:20 +00003458 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003459 stw_phys(addr, val);
3460}
3461
Anthony Liguoric227f092009-10-01 16:12:16 -05003462static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003463 uint32_t val)
3464{
aliguorib4051332008-11-18 20:14:20 +00003465 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003466 stl_phys(addr, val);
3467}
3468
Blue Swirld60efc62009-08-25 18:29:31 +00003469static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003470 watch_mem_readb,
3471 watch_mem_readw,
3472 watch_mem_readl,
3473};
3474
Blue Swirld60efc62009-08-25 18:29:31 +00003475static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003476 watch_mem_writeb,
3477 watch_mem_writew,
3478 watch_mem_writel,
3479};
pbrook6658ffb2007-03-16 23:58:11 +00003480
Richard Hendersonf6405242010-04-22 16:47:31 -07003481static inline uint32_t subpage_readlen (subpage_t *mmio,
3482 target_phys_addr_t addr,
3483 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003484{
Richard Hendersonf6405242010-04-22 16:47:31 -07003485 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003486#if defined(DEBUG_SUBPAGE)
3487 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3488 mmio, len, addr, idx);
3489#endif
blueswir1db7b5422007-05-26 17:36:03 +00003490
Richard Hendersonf6405242010-04-22 16:47:31 -07003491 addr += mmio->region_offset[idx];
3492 idx = mmio->sub_io_index[idx];
3493 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003494}
3495
Anthony Liguoric227f092009-10-01 16:12:16 -05003496static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003497 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003498{
Richard Hendersonf6405242010-04-22 16:47:31 -07003499 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003500#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003501 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3502 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003503#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003504
3505 addr += mmio->region_offset[idx];
3506 idx = mmio->sub_io_index[idx];
3507 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003508}
3509
Anthony Liguoric227f092009-10-01 16:12:16 -05003510static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003511{
blueswir1db7b5422007-05-26 17:36:03 +00003512 return subpage_readlen(opaque, addr, 0);
3513}
3514
Anthony Liguoric227f092009-10-01 16:12:16 -05003515static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003516 uint32_t value)
3517{
blueswir1db7b5422007-05-26 17:36:03 +00003518 subpage_writelen(opaque, addr, value, 0);
3519}
3520
Anthony Liguoric227f092009-10-01 16:12:16 -05003521static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003522{
blueswir1db7b5422007-05-26 17:36:03 +00003523 return subpage_readlen(opaque, addr, 1);
3524}
3525
Anthony Liguoric227f092009-10-01 16:12:16 -05003526static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003527 uint32_t value)
3528{
blueswir1db7b5422007-05-26 17:36:03 +00003529 subpage_writelen(opaque, addr, value, 1);
3530}
3531
Anthony Liguoric227f092009-10-01 16:12:16 -05003532static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003533{
blueswir1db7b5422007-05-26 17:36:03 +00003534 return subpage_readlen(opaque, addr, 2);
3535}
3536
Richard Hendersonf6405242010-04-22 16:47:31 -07003537static void subpage_writel (void *opaque, target_phys_addr_t addr,
3538 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003539{
blueswir1db7b5422007-05-26 17:36:03 +00003540 subpage_writelen(opaque, addr, value, 2);
3541}
3542
Blue Swirld60efc62009-08-25 18:29:31 +00003543static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003544 &subpage_readb,
3545 &subpage_readw,
3546 &subpage_readl,
3547};
3548
Blue Swirld60efc62009-08-25 18:29:31 +00003549static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003550 &subpage_writeb,
3551 &subpage_writew,
3552 &subpage_writel,
3553};
3554
Anthony Liguoric227f092009-10-01 16:12:16 -05003555static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3556 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003557{
3558 int idx, eidx;
3559
3560 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3561 return -1;
3562 idx = SUBPAGE_IDX(start);
3563 eidx = SUBPAGE_IDX(end);
3564#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003565 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003566 mmio, start, end, idx, eidx, memory);
3567#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003568 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3569 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003570 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003571 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003572 mmio->sub_io_index[idx] = memory;
3573 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003574 }
3575
3576 return 0;
3577}
3578
Richard Hendersonf6405242010-04-22 16:47:31 -07003579static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3580 ram_addr_t orig_memory,
3581 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003582{
Anthony Liguoric227f092009-10-01 16:12:16 -05003583 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003584 int subpage_memory;
3585
Anthony Liguoric227f092009-10-01 16:12:16 -05003586 mmio = qemu_mallocz(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003587
3588 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003589 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3590 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003591#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003592 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3593 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003594#endif
aliguori1eec6142009-02-05 22:06:18 +00003595 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003596 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003597
3598 return mmio;
3599}
3600
aliguori88715652009-02-11 15:20:58 +00003601static int get_free_io_mem_idx(void)
3602{
3603 int i;
3604
3605 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3606 if (!io_mem_used[i]) {
3607 io_mem_used[i] = 1;
3608 return i;
3609 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003610 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003611 return -1;
3612}
3613
Alexander Grafdd310532010-12-08 12:05:36 +01003614/*
3615 * Usually, devices operate in little endian mode. There are devices out
3616 * there that operate in big endian too. Each device gets byte swapped
3617 * mmio if plugged onto a CPU that does the other endianness.
3618 *
3619 * CPU Device swap?
3620 *
3621 * little little no
3622 * little big yes
3623 * big little yes
3624 * big big no
3625 */
3626
3627typedef struct SwapEndianContainer {
3628 CPUReadMemoryFunc *read[3];
3629 CPUWriteMemoryFunc *write[3];
3630 void *opaque;
3631} SwapEndianContainer;
3632
3633static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3634{
3635 uint32_t val;
3636 SwapEndianContainer *c = opaque;
3637 val = c->read[0](c->opaque, addr);
3638 return val;
3639}
3640
3641static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3642{
3643 uint32_t val;
3644 SwapEndianContainer *c = opaque;
3645 val = bswap16(c->read[1](c->opaque, addr));
3646 return val;
3647}
3648
3649static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3650{
3651 uint32_t val;
3652 SwapEndianContainer *c = opaque;
3653 val = bswap32(c->read[2](c->opaque, addr));
3654 return val;
3655}
3656
3657static CPUReadMemoryFunc * const swapendian_readfn[3]={
3658 swapendian_mem_readb,
3659 swapendian_mem_readw,
3660 swapendian_mem_readl
3661};
3662
3663static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3664 uint32_t val)
3665{
3666 SwapEndianContainer *c = opaque;
3667 c->write[0](c->opaque, addr, val);
3668}
3669
3670static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3671 uint32_t val)
3672{
3673 SwapEndianContainer *c = opaque;
3674 c->write[1](c->opaque, addr, bswap16(val));
3675}
3676
3677static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3678 uint32_t val)
3679{
3680 SwapEndianContainer *c = opaque;
3681 c->write[2](c->opaque, addr, bswap32(val));
3682}
3683
3684static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3685 swapendian_mem_writeb,
3686 swapendian_mem_writew,
3687 swapendian_mem_writel
3688};
3689
3690static void swapendian_init(int io_index)
3691{
3692 SwapEndianContainer *c = qemu_malloc(sizeof(SwapEndianContainer));
3693 int i;
3694
3695 /* Swap mmio for big endian targets */
3696 c->opaque = io_mem_opaque[io_index];
3697 for (i = 0; i < 3; i++) {
3698 c->read[i] = io_mem_read[io_index][i];
3699 c->write[i] = io_mem_write[io_index][i];
3700
3701 io_mem_read[io_index][i] = swapendian_readfn[i];
3702 io_mem_write[io_index][i] = swapendian_writefn[i];
3703 }
3704 io_mem_opaque[io_index] = c;
3705}
3706
3707static void swapendian_del(int io_index)
3708{
3709 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
3710 qemu_free(io_mem_opaque[io_index]);
3711 }
3712}
3713
bellard33417e72003-08-10 21:47:01 +00003714/* mem_read and mem_write are arrays of functions containing the
3715 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003716 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003717 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003718 modified. If it is zero, a new io zone is allocated. The return
3719 value can be used with cpu_register_physical_memory(). (-1) is
3720 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003721static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003722 CPUReadMemoryFunc * const *mem_read,
3723 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003724 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003725{
Richard Henderson3cab7212010-05-07 09:52:51 -07003726 int i;
3727
bellard33417e72003-08-10 21:47:01 +00003728 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003729 io_index = get_free_io_mem_idx();
3730 if (io_index == -1)
3731 return io_index;
bellard33417e72003-08-10 21:47:01 +00003732 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003733 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003734 if (io_index >= IO_MEM_NB_ENTRIES)
3735 return -1;
3736 }
bellardb5ff1b32005-11-26 10:38:39 +00003737
Richard Henderson3cab7212010-05-07 09:52:51 -07003738 for (i = 0; i < 3; ++i) {
3739 io_mem_read[io_index][i]
3740 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3741 }
3742 for (i = 0; i < 3; ++i) {
3743 io_mem_write[io_index][i]
3744 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3745 }
bellarda4193c82004-06-03 14:01:43 +00003746 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003747
Alexander Grafdd310532010-12-08 12:05:36 +01003748 switch (endian) {
3749 case DEVICE_BIG_ENDIAN:
3750#ifndef TARGET_WORDS_BIGENDIAN
3751 swapendian_init(io_index);
3752#endif
3753 break;
3754 case DEVICE_LITTLE_ENDIAN:
3755#ifdef TARGET_WORDS_BIGENDIAN
3756 swapendian_init(io_index);
3757#endif
3758 break;
3759 case DEVICE_NATIVE_ENDIAN:
3760 default:
3761 break;
3762 }
3763
Richard Hendersonf6405242010-04-22 16:47:31 -07003764 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003765}
bellard61382a52003-10-27 21:22:23 +00003766
Blue Swirld60efc62009-08-25 18:29:31 +00003767int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3768 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003769 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003770{
Alexander Graf2507c122010-12-08 12:05:37 +01003771 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003772}
3773
aliguori88715652009-02-11 15:20:58 +00003774void cpu_unregister_io_memory(int io_table_address)
3775{
3776 int i;
3777 int io_index = io_table_address >> IO_MEM_SHIFT;
3778
Alexander Grafdd310532010-12-08 12:05:36 +01003779 swapendian_del(io_index);
3780
aliguori88715652009-02-11 15:20:58 +00003781 for (i=0;i < 3; i++) {
3782 io_mem_read[io_index][i] = unassigned_mem_read[i];
3783 io_mem_write[io_index][i] = unassigned_mem_write[i];
3784 }
3785 io_mem_opaque[io_index] = NULL;
3786 io_mem_used[io_index] = 0;
3787}
3788
Avi Kivitye9179ce2009-06-14 11:38:52 +03003789static void io_mem_init(void)
3790{
3791 int i;
3792
Alexander Graf2507c122010-12-08 12:05:37 +01003793 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3794 unassigned_mem_write, NULL,
3795 DEVICE_NATIVE_ENDIAN);
3796 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3797 unassigned_mem_write, NULL,
3798 DEVICE_NATIVE_ENDIAN);
3799 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3800 notdirty_mem_write, NULL,
3801 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003802 for (i=0; i<5; i++)
3803 io_mem_used[i] = 1;
3804
3805 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003806 watch_mem_write, NULL,
3807 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003808}
3809
pbrooke2eef172008-06-08 01:09:01 +00003810#endif /* !defined(CONFIG_USER_ONLY) */
3811
bellard13eb76e2004-01-24 15:23:36 +00003812/* physical memory access (slow version, mainly for debug) */
3813#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003814int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3815 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003816{
3817 int l, flags;
3818 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003819 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003820
3821 while (len > 0) {
3822 page = addr & TARGET_PAGE_MASK;
3823 l = (page + TARGET_PAGE_SIZE) - addr;
3824 if (l > len)
3825 l = len;
3826 flags = page_get_flags(page);
3827 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003828 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003829 if (is_write) {
3830 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003831 return -1;
bellard579a97f2007-11-11 14:26:47 +00003832 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003833 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003834 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003835 memcpy(p, buf, l);
3836 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003837 } else {
3838 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003839 return -1;
bellard579a97f2007-11-11 14:26:47 +00003840 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003841 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003842 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003843 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003844 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003845 }
3846 len -= l;
3847 buf += l;
3848 addr += l;
3849 }
Paul Brooka68fe892010-03-01 00:08:59 +00003850 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003851}
bellard8df1cd02005-01-28 22:37:22 +00003852
bellard13eb76e2004-01-24 15:23:36 +00003853#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003854void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003855 int len, int is_write)
3856{
3857 int l, io_index;
3858 uint8_t *ptr;
3859 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003860 target_phys_addr_t page;
bellard2e126692004-04-25 21:28:44 +00003861 unsigned long pd;
bellard92e873b2004-05-21 14:52:29 +00003862 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003863
bellard13eb76e2004-01-24 15:23:36 +00003864 while (len > 0) {
3865 page = addr & TARGET_PAGE_MASK;
3866 l = (page + TARGET_PAGE_SIZE) - addr;
3867 if (l > len)
3868 l = len;
bellard92e873b2004-05-21 14:52:29 +00003869 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003870 if (!p) {
3871 pd = IO_MEM_UNASSIGNED;
3872 } else {
3873 pd = p->phys_offset;
3874 }
ths3b46e622007-09-17 08:09:54 +00003875
bellard13eb76e2004-01-24 15:23:36 +00003876 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003877 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003878 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003879 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003880 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003881 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003882 /* XXX: could force cpu_single_env to NULL to avoid
3883 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003884 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003885 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003886 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003887 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003888 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003889 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003890 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003891 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003892 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003893 l = 2;
3894 } else {
bellard1c213d12005-09-03 10:49:04 +00003895 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003896 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003897 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003898 l = 1;
3899 }
3900 } else {
bellardb448f2f2004-02-25 23:24:04 +00003901 unsigned long addr1;
3902 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003903 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003904 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003905 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003906 if (!cpu_physical_memory_is_dirty(addr1)) {
3907 /* invalidate code */
3908 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3909 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003910 cpu_physical_memory_set_dirty_flags(
3911 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003912 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003913 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003914 }
3915 } else {
ths5fafdf22007-09-16 21:08:06 +00003916 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003917 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003918 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003919 /* I/O case */
3920 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003921 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003922 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3923 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003924 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003925 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003926 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003927 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003928 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003929 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003930 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003931 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003932 l = 2;
3933 } else {
bellard1c213d12005-09-03 10:49:04 +00003934 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003935 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003936 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003937 l = 1;
3938 }
3939 } else {
3940 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003941 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3942 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3943 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003944 }
3945 }
3946 len -= l;
3947 buf += l;
3948 addr += l;
3949 }
3950}
bellard8df1cd02005-01-28 22:37:22 +00003951
bellardd0ecd2a2006-04-23 17:14:48 +00003952/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003953void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003954 const uint8_t *buf, int len)
3955{
3956 int l;
3957 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003958 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003959 unsigned long pd;
3960 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003961
bellardd0ecd2a2006-04-23 17:14:48 +00003962 while (len > 0) {
3963 page = addr & TARGET_PAGE_MASK;
3964 l = (page + TARGET_PAGE_SIZE) - addr;
3965 if (l > len)
3966 l = len;
3967 p = phys_page_find(page >> TARGET_PAGE_BITS);
3968 if (!p) {
3969 pd = IO_MEM_UNASSIGNED;
3970 } else {
3971 pd = p->phys_offset;
3972 }
ths3b46e622007-09-17 08:09:54 +00003973
bellardd0ecd2a2006-04-23 17:14:48 +00003974 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00003975 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
3976 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00003977 /* do nothing */
3978 } else {
3979 unsigned long addr1;
3980 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
3981 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003982 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00003983 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003984 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00003985 }
3986 len -= l;
3987 buf += l;
3988 addr += l;
3989 }
3990}
3991
aliguori6d16c2f2009-01-22 16:59:11 +00003992typedef struct {
3993 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05003994 target_phys_addr_t addr;
3995 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00003996} BounceBuffer;
3997
3998static BounceBuffer bounce;
3999
aliguoriba223c22009-01-22 16:59:16 +00004000typedef struct MapClient {
4001 void *opaque;
4002 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004003 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004004} MapClient;
4005
Blue Swirl72cf2d42009-09-12 07:36:22 +00004006static QLIST_HEAD(map_client_list, MapClient) map_client_list
4007 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004008
4009void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4010{
4011 MapClient *client = qemu_malloc(sizeof(*client));
4012
4013 client->opaque = opaque;
4014 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004015 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004016 return client;
4017}
4018
4019void cpu_unregister_map_client(void *_client)
4020{
4021 MapClient *client = (MapClient *)_client;
4022
Blue Swirl72cf2d42009-09-12 07:36:22 +00004023 QLIST_REMOVE(client, link);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004024 qemu_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004025}
4026
4027static void cpu_notify_map_clients(void)
4028{
4029 MapClient *client;
4030
Blue Swirl72cf2d42009-09-12 07:36:22 +00004031 while (!QLIST_EMPTY(&map_client_list)) {
4032 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004033 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004034 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004035 }
4036}
4037
aliguori6d16c2f2009-01-22 16:59:11 +00004038/* Map a physical memory region into a host virtual address.
4039 * May map a subset of the requested range, given by and returned in *plen.
4040 * May return NULL if resources needed to perform the mapping are exhausted.
4041 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004042 * Use cpu_register_map_client() to know when retrying the map operation is
4043 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004044 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004045void *cpu_physical_memory_map(target_phys_addr_t addr,
4046 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004047 int is_write)
4048{
Anthony Liguoric227f092009-10-01 16:12:16 -05004049 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004050 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004051 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004052 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004053 unsigned long pd;
4054 PhysPageDesc *p;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004055 ram_addr_t raddr = ULONG_MAX;
4056 ram_addr_t rlen;
4057 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004058
4059 while (len > 0) {
4060 page = addr & TARGET_PAGE_MASK;
4061 l = (page + TARGET_PAGE_SIZE) - addr;
4062 if (l > len)
4063 l = len;
4064 p = phys_page_find(page >> TARGET_PAGE_BITS);
4065 if (!p) {
4066 pd = IO_MEM_UNASSIGNED;
4067 } else {
4068 pd = p->phys_offset;
4069 }
4070
4071 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004072 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004073 break;
4074 }
4075 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4076 bounce.addr = addr;
4077 bounce.len = l;
4078 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004079 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004080 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004081
4082 *plen = l;
4083 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004084 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004085 if (!todo) {
4086 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4087 }
aliguori6d16c2f2009-01-22 16:59:11 +00004088
4089 len -= l;
4090 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004091 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004092 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004093 rlen = todo;
4094 ret = qemu_ram_ptr_length(raddr, &rlen);
4095 *plen = rlen;
4096 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004097}
4098
4099/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4100 * Will also mark the memory as dirty if is_write == 1. access_len gives
4101 * the amount of memory that was actually read or written by the caller.
4102 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004103void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4104 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004105{
4106 if (buffer != bounce.buffer) {
4107 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004108 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004109 while (access_len) {
4110 unsigned l;
4111 l = TARGET_PAGE_SIZE;
4112 if (l > access_len)
4113 l = access_len;
4114 if (!cpu_physical_memory_is_dirty(addr1)) {
4115 /* invalidate code */
4116 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4117 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004118 cpu_physical_memory_set_dirty_flags(
4119 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004120 }
4121 addr1 += l;
4122 access_len -= l;
4123 }
4124 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004125 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004126 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004127 }
aliguori6d16c2f2009-01-22 16:59:11 +00004128 return;
4129 }
4130 if (is_write) {
4131 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4132 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004133 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004134 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004135 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004136}
bellardd0ecd2a2006-04-23 17:14:48 +00004137
bellard8df1cd02005-01-28 22:37:22 +00004138/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004139static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4140 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004141{
4142 int io_index;
4143 uint8_t *ptr;
4144 uint32_t val;
4145 unsigned long pd;
4146 PhysPageDesc *p;
4147
4148 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4149 if (!p) {
4150 pd = IO_MEM_UNASSIGNED;
4151 } else {
4152 pd = p->phys_offset;
4153 }
ths3b46e622007-09-17 08:09:54 +00004154
ths5fafdf22007-09-16 21:08:06 +00004155 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004156 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004157 /* I/O case */
4158 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004159 if (p)
4160 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004161 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004162#if defined(TARGET_WORDS_BIGENDIAN)
4163 if (endian == DEVICE_LITTLE_ENDIAN) {
4164 val = bswap32(val);
4165 }
4166#else
4167 if (endian == DEVICE_BIG_ENDIAN) {
4168 val = bswap32(val);
4169 }
4170#endif
bellard8df1cd02005-01-28 22:37:22 +00004171 } else {
4172 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004173 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004174 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004175 switch (endian) {
4176 case DEVICE_LITTLE_ENDIAN:
4177 val = ldl_le_p(ptr);
4178 break;
4179 case DEVICE_BIG_ENDIAN:
4180 val = ldl_be_p(ptr);
4181 break;
4182 default:
4183 val = ldl_p(ptr);
4184 break;
4185 }
bellard8df1cd02005-01-28 22:37:22 +00004186 }
4187 return val;
4188}
4189
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004190uint32_t ldl_phys(target_phys_addr_t addr)
4191{
4192 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4193}
4194
4195uint32_t ldl_le_phys(target_phys_addr_t addr)
4196{
4197 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4198}
4199
4200uint32_t ldl_be_phys(target_phys_addr_t addr)
4201{
4202 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4203}
4204
bellard84b7b8e2005-11-28 21:19:04 +00004205/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004206static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4207 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004208{
4209 int io_index;
4210 uint8_t *ptr;
4211 uint64_t val;
4212 unsigned long pd;
4213 PhysPageDesc *p;
4214
4215 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4216 if (!p) {
4217 pd = IO_MEM_UNASSIGNED;
4218 } else {
4219 pd = p->phys_offset;
4220 }
ths3b46e622007-09-17 08:09:54 +00004221
bellard2a4188a2006-06-25 21:54:59 +00004222 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4223 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004224 /* I/O case */
4225 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004226 if (p)
4227 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004228
4229 /* XXX This is broken when device endian != cpu endian.
4230 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004231#ifdef TARGET_WORDS_BIGENDIAN
4232 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4233 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4234#else
4235 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4236 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4237#endif
4238 } else {
4239 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004240 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004241 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004242 switch (endian) {
4243 case DEVICE_LITTLE_ENDIAN:
4244 val = ldq_le_p(ptr);
4245 break;
4246 case DEVICE_BIG_ENDIAN:
4247 val = ldq_be_p(ptr);
4248 break;
4249 default:
4250 val = ldq_p(ptr);
4251 break;
4252 }
bellard84b7b8e2005-11-28 21:19:04 +00004253 }
4254 return val;
4255}
4256
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004257uint64_t ldq_phys(target_phys_addr_t addr)
4258{
4259 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4260}
4261
4262uint64_t ldq_le_phys(target_phys_addr_t addr)
4263{
4264 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4265}
4266
4267uint64_t ldq_be_phys(target_phys_addr_t addr)
4268{
4269 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4270}
4271
bellardaab33092005-10-30 20:48:42 +00004272/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004273uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004274{
4275 uint8_t val;
4276 cpu_physical_memory_read(addr, &val, 1);
4277 return val;
4278}
4279
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004280/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004281static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4282 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004283{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004284 int io_index;
4285 uint8_t *ptr;
4286 uint64_t val;
4287 unsigned long pd;
4288 PhysPageDesc *p;
4289
4290 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4291 if (!p) {
4292 pd = IO_MEM_UNASSIGNED;
4293 } else {
4294 pd = p->phys_offset;
4295 }
4296
4297 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4298 !(pd & IO_MEM_ROMD)) {
4299 /* I/O case */
4300 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4301 if (p)
4302 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4303 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004304#if defined(TARGET_WORDS_BIGENDIAN)
4305 if (endian == DEVICE_LITTLE_ENDIAN) {
4306 val = bswap16(val);
4307 }
4308#else
4309 if (endian == DEVICE_BIG_ENDIAN) {
4310 val = bswap16(val);
4311 }
4312#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004313 } else {
4314 /* RAM case */
4315 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4316 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004317 switch (endian) {
4318 case DEVICE_LITTLE_ENDIAN:
4319 val = lduw_le_p(ptr);
4320 break;
4321 case DEVICE_BIG_ENDIAN:
4322 val = lduw_be_p(ptr);
4323 break;
4324 default:
4325 val = lduw_p(ptr);
4326 break;
4327 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004328 }
4329 return val;
bellardaab33092005-10-30 20:48:42 +00004330}
4331
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004332uint32_t lduw_phys(target_phys_addr_t addr)
4333{
4334 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4335}
4336
4337uint32_t lduw_le_phys(target_phys_addr_t addr)
4338{
4339 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4340}
4341
4342uint32_t lduw_be_phys(target_phys_addr_t addr)
4343{
4344 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4345}
4346
bellard8df1cd02005-01-28 22:37:22 +00004347/* warning: addr must be aligned. The ram page is not masked as dirty
4348 and the code inside is not invalidated. It is useful if the dirty
4349 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004350void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004351{
4352 int io_index;
4353 uint8_t *ptr;
4354 unsigned long pd;
4355 PhysPageDesc *p;
4356
4357 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4358 if (!p) {
4359 pd = IO_MEM_UNASSIGNED;
4360 } else {
4361 pd = p->phys_offset;
4362 }
ths3b46e622007-09-17 08:09:54 +00004363
bellard3a7d9292005-08-21 09:26:42 +00004364 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004365 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004366 if (p)
4367 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004368 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4369 } else {
aliguori74576192008-10-06 14:02:03 +00004370 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004371 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004372 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004373
4374 if (unlikely(in_migration)) {
4375 if (!cpu_physical_memory_is_dirty(addr1)) {
4376 /* invalidate code */
4377 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4378 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004379 cpu_physical_memory_set_dirty_flags(
4380 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004381 }
4382 }
bellard8df1cd02005-01-28 22:37:22 +00004383 }
4384}
4385
Anthony Liguoric227f092009-10-01 16:12:16 -05004386void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004387{
4388 int io_index;
4389 uint8_t *ptr;
4390 unsigned long pd;
4391 PhysPageDesc *p;
4392
4393 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4394 if (!p) {
4395 pd = IO_MEM_UNASSIGNED;
4396 } else {
4397 pd = p->phys_offset;
4398 }
ths3b46e622007-09-17 08:09:54 +00004399
j_mayerbc98a7e2007-04-04 07:55:12 +00004400 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4401 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004402 if (p)
4403 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004404#ifdef TARGET_WORDS_BIGENDIAN
4405 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4406 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4407#else
4408 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4409 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4410#endif
4411 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004412 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004413 (addr & ~TARGET_PAGE_MASK);
4414 stq_p(ptr, val);
4415 }
4416}
4417
bellard8df1cd02005-01-28 22:37:22 +00004418/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004419static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4420 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004421{
4422 int io_index;
4423 uint8_t *ptr;
4424 unsigned long pd;
4425 PhysPageDesc *p;
4426
4427 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4428 if (!p) {
4429 pd = IO_MEM_UNASSIGNED;
4430 } else {
4431 pd = p->phys_offset;
4432 }
ths3b46e622007-09-17 08:09:54 +00004433
bellard3a7d9292005-08-21 09:26:42 +00004434 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004435 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004436 if (p)
4437 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004438#if defined(TARGET_WORDS_BIGENDIAN)
4439 if (endian == DEVICE_LITTLE_ENDIAN) {
4440 val = bswap32(val);
4441 }
4442#else
4443 if (endian == DEVICE_BIG_ENDIAN) {
4444 val = bswap32(val);
4445 }
4446#endif
bellard8df1cd02005-01-28 22:37:22 +00004447 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4448 } else {
4449 unsigned long addr1;
4450 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4451 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004452 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004453 switch (endian) {
4454 case DEVICE_LITTLE_ENDIAN:
4455 stl_le_p(ptr, val);
4456 break;
4457 case DEVICE_BIG_ENDIAN:
4458 stl_be_p(ptr, val);
4459 break;
4460 default:
4461 stl_p(ptr, val);
4462 break;
4463 }
bellard3a7d9292005-08-21 09:26:42 +00004464 if (!cpu_physical_memory_is_dirty(addr1)) {
4465 /* invalidate code */
4466 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4467 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004468 cpu_physical_memory_set_dirty_flags(addr1,
4469 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004470 }
bellard8df1cd02005-01-28 22:37:22 +00004471 }
4472}
4473
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004474void stl_phys(target_phys_addr_t addr, uint32_t val)
4475{
4476 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4477}
4478
4479void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4480{
4481 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4482}
4483
4484void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4485{
4486 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4487}
4488
bellardaab33092005-10-30 20:48:42 +00004489/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004490void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004491{
4492 uint8_t v = val;
4493 cpu_physical_memory_write(addr, &v, 1);
4494}
4495
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004496/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004497static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4498 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004499{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004500 int io_index;
4501 uint8_t *ptr;
4502 unsigned long pd;
4503 PhysPageDesc *p;
4504
4505 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4506 if (!p) {
4507 pd = IO_MEM_UNASSIGNED;
4508 } else {
4509 pd = p->phys_offset;
4510 }
4511
4512 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4513 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4514 if (p)
4515 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004516#if defined(TARGET_WORDS_BIGENDIAN)
4517 if (endian == DEVICE_LITTLE_ENDIAN) {
4518 val = bswap16(val);
4519 }
4520#else
4521 if (endian == DEVICE_BIG_ENDIAN) {
4522 val = bswap16(val);
4523 }
4524#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004525 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4526 } else {
4527 unsigned long addr1;
4528 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4529 /* RAM case */
4530 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004531 switch (endian) {
4532 case DEVICE_LITTLE_ENDIAN:
4533 stw_le_p(ptr, val);
4534 break;
4535 case DEVICE_BIG_ENDIAN:
4536 stw_be_p(ptr, val);
4537 break;
4538 default:
4539 stw_p(ptr, val);
4540 break;
4541 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004542 if (!cpu_physical_memory_is_dirty(addr1)) {
4543 /* invalidate code */
4544 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4545 /* set dirty bit */
4546 cpu_physical_memory_set_dirty_flags(addr1,
4547 (0xff & ~CODE_DIRTY_FLAG));
4548 }
4549 }
bellardaab33092005-10-30 20:48:42 +00004550}
4551
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004552void stw_phys(target_phys_addr_t addr, uint32_t val)
4553{
4554 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4555}
4556
4557void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4558{
4559 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4560}
4561
4562void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4563{
4564 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4565}
4566
bellardaab33092005-10-30 20:48:42 +00004567/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004568void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004569{
4570 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004571 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004572}
4573
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004574void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4575{
4576 val = cpu_to_le64(val);
4577 cpu_physical_memory_write(addr, &val, 8);
4578}
4579
4580void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4581{
4582 val = cpu_to_be64(val);
4583 cpu_physical_memory_write(addr, &val, 8);
4584}
4585
aliguori5e2972f2009-03-28 17:51:36 +00004586/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004587int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004588 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004589{
4590 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004591 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004592 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004593
4594 while (len > 0) {
4595 page = addr & TARGET_PAGE_MASK;
4596 phys_addr = cpu_get_phys_page_debug(env, page);
4597 /* if no physical page mapped, return an error */
4598 if (phys_addr == -1)
4599 return -1;
4600 l = (page + TARGET_PAGE_SIZE) - addr;
4601 if (l > len)
4602 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004603 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004604 if (is_write)
4605 cpu_physical_memory_write_rom(phys_addr, buf, l);
4606 else
aliguori5e2972f2009-03-28 17:51:36 +00004607 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004608 len -= l;
4609 buf += l;
4610 addr += l;
4611 }
4612 return 0;
4613}
Paul Brooka68fe892010-03-01 00:08:59 +00004614#endif
bellard13eb76e2004-01-24 15:23:36 +00004615
pbrook2e70f6e2008-06-29 01:03:05 +00004616/* in deterministic execution mode, instructions doing device I/Os
4617 must be at the end of the TB */
4618void cpu_io_recompile(CPUState *env, void *retaddr)
4619{
4620 TranslationBlock *tb;
4621 uint32_t n, cflags;
4622 target_ulong pc, cs_base;
4623 uint64_t flags;
4624
4625 tb = tb_find_pc((unsigned long)retaddr);
4626 if (!tb) {
4627 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4628 retaddr);
4629 }
4630 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004631 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004632 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004633 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004634 n = n - env->icount_decr.u16.low;
4635 /* Generate a new TB ending on the I/O insn. */
4636 n++;
4637 /* On MIPS and SH, delay slot instructions can only be restarted if
4638 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004639 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004640 branch. */
4641#if defined(TARGET_MIPS)
4642 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4643 env->active_tc.PC -= 4;
4644 env->icount_decr.u16.low++;
4645 env->hflags &= ~MIPS_HFLAG_BMASK;
4646 }
4647#elif defined(TARGET_SH4)
4648 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4649 && n > 1) {
4650 env->pc -= 2;
4651 env->icount_decr.u16.low++;
4652 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4653 }
4654#endif
4655 /* This should never happen. */
4656 if (n > CF_COUNT_MASK)
4657 cpu_abort(env, "TB too big during recompile");
4658
4659 cflags = n | CF_LAST_IO;
4660 pc = tb->pc;
4661 cs_base = tb->cs_base;
4662 flags = tb->flags;
4663 tb_phys_invalidate(tb, -1);
4664 /* FIXME: In theory this could raise an exception. In practice
4665 we have already translated the block once so it's probably ok. */
4666 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004667 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004668 the first in the TB) then we end up generating a whole new TB and
4669 repeating the fault, which is horribly inefficient.
4670 Better would be to execute just this insn uncached, or generate a
4671 second new TB. */
4672 cpu_resume_from_signal(env, NULL);
4673}
4674
Paul Brookb3755a92010-03-12 16:54:58 +00004675#if !defined(CONFIG_USER_ONLY)
4676
Stefan Weil055403b2010-10-22 23:03:32 +02004677void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004678{
4679 int i, target_code_size, max_target_code_size;
4680 int direct_jmp_count, direct_jmp2_count, cross_page;
4681 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004682
bellarde3db7222005-01-26 22:00:47 +00004683 target_code_size = 0;
4684 max_target_code_size = 0;
4685 cross_page = 0;
4686 direct_jmp_count = 0;
4687 direct_jmp2_count = 0;
4688 for(i = 0; i < nb_tbs; i++) {
4689 tb = &tbs[i];
4690 target_code_size += tb->size;
4691 if (tb->size > max_target_code_size)
4692 max_target_code_size = tb->size;
4693 if (tb->page_addr[1] != -1)
4694 cross_page++;
4695 if (tb->tb_next_offset[0] != 0xffff) {
4696 direct_jmp_count++;
4697 if (tb->tb_next_offset[1] != 0xffff) {
4698 direct_jmp2_count++;
4699 }
4700 }
4701 }
4702 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004703 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004704 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004705 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4706 cpu_fprintf(f, "TB count %d/%d\n",
4707 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004708 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004709 nb_tbs ? target_code_size / nb_tbs : 0,
4710 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004711 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004712 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4713 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004714 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4715 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004716 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4717 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004718 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004719 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4720 direct_jmp2_count,
4721 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004722 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004723 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4724 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4725 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004726 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004727}
4728
bellard61382a52003-10-27 21:22:23 +00004729#define MMUSUFFIX _cmmu
4730#define GETPC() NULL
4731#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004732#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004733
4734#define SHIFT 0
4735#include "softmmu_template.h"
4736
4737#define SHIFT 1
4738#include "softmmu_template.h"
4739
4740#define SHIFT 2
4741#include "softmmu_template.h"
4742
4743#define SHIFT 3
4744#include "softmmu_template.h"
4745
4746#undef env
4747
4748#endif