blob: 6b92198e622dd2da214ff3aa56b9ef384d1efc33 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
bellardfd6ce8f2003-05-14 19:00:11 +00002 * virtual page mapping and translated block handling
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
bellardd5a8f072004-09-29 21:15:28 +000020#ifdef _WIN32
21#include <windows.h>
22#else
bellarda98d49b2004-11-14 16:22:05 +000023#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000024#include <sys/mman.h>
25#endif
bellard54936002003-05-13 00:25:15 +000026
Stefan Weil055403b2010-10-22 23:03:32 +020027#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000028#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000029#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000030#include "hw/hw.h"
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060031#include "hw/qdev.h"
aliguori74576192008-10-06 14:02:03 +000032#include "osdep.h"
aliguori7ba1e612008-11-05 16:04:33 +000033#include "kvm.h"
Jun Nakajima432d2682010-08-31 16:41:25 +010034#include "hw/xen.h"
Blue Swirl29e922b2010-03-29 19:24:00 +000035#include "qemu-timer.h"
Avi Kivity62152b82011-07-26 14:26:14 +030036#include "memory.h"
37#include "exec-memory.h"
pbrook53a59602006-03-25 19:31:22 +000038#if defined(CONFIG_USER_ONLY)
39#include <qemu.h>
Juergen Lockf01576f2010-03-25 22:32:16 +010040#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
41#include <sys/param.h>
42#if __FreeBSD_version >= 700104
43#define HAVE_KINFO_GETVMMAP
44#define sigqueue sigqueue_freebsd /* avoid redefinition */
45#include <sys/time.h>
46#include <sys/proc.h>
47#include <machine/profile.h>
48#define _KERNEL
49#include <sys/user.h>
50#undef _KERNEL
51#undef sigqueue
52#include <libutil.h>
53#endif
54#endif
Jun Nakajima432d2682010-08-31 16:41:25 +010055#else /* !CONFIG_USER_ONLY */
56#include "xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010057#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000058#endif
bellard54936002003-05-13 00:25:15 +000059
bellardfd6ce8f2003-05-14 19:00:11 +000060//#define DEBUG_TB_INVALIDATE
bellard66e85a22003-06-24 13:28:12 +000061//#define DEBUG_FLUSH
bellard9fa3e852004-01-04 18:06:42 +000062//#define DEBUG_TLB
pbrook67d3b952006-12-18 05:03:52 +000063//#define DEBUG_UNASSIGNED
bellardfd6ce8f2003-05-14 19:00:11 +000064
65/* make various TB consistency checks */
ths5fafdf22007-09-16 21:08:06 +000066//#define DEBUG_TB_CHECK
67//#define DEBUG_TLB_CHECK
bellardfd6ce8f2003-05-14 19:00:11 +000068
ths1196be32007-03-17 15:17:58 +000069//#define DEBUG_IOPORT
blueswir1db7b5422007-05-26 17:36:03 +000070//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000071
pbrook99773bd2006-04-16 15:14:59 +000072#if !defined(CONFIG_USER_ONLY)
73/* TB consistency checks only implemented for usermode emulation. */
74#undef DEBUG_TB_CHECK
75#endif
76
bellard9fa3e852004-01-04 18:06:42 +000077#define SMC_BITMAP_USE_THRESHOLD 10
78
blueswir1bdaf78e2008-10-04 07:24:27 +000079static TranslationBlock *tbs;
Stefan Weil24ab68a2010-07-19 18:23:17 +020080static int code_gen_max_blocks;
bellard9fa3e852004-01-04 18:06:42 +000081TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
blueswir1bdaf78e2008-10-04 07:24:27 +000082static int nb_tbs;
bellardeb51d102003-05-14 21:51:13 +000083/* any access to the tbs or the page table must use this lock */
Anthony Liguoric227f092009-10-01 16:12:16 -050084spinlock_t tb_lock = SPIN_LOCK_UNLOCKED;
bellardfd6ce8f2003-05-14 19:00:11 +000085
blueswir1141ac462008-07-26 15:05:57 +000086#if defined(__arm__) || defined(__sparc_v9__)
87/* The prologue must be reachable with a direct jump. ARM and Sparc64
88 have limited branch ranges (possibly also PPC) so place it in a
blueswir1d03d8602008-07-10 17:21:31 +000089 section close to code segment. */
90#define code_gen_section \
91 __attribute__((__section__(".gen_code"))) \
92 __attribute__((aligned (32)))
Stefan Weilf8e2af12009-06-18 23:04:48 +020093#elif defined(_WIN32)
94/* Maximum alignment for Win32 is 16. */
95#define code_gen_section \
96 __attribute__((aligned (16)))
blueswir1d03d8602008-07-10 17:21:31 +000097#else
98#define code_gen_section \
99 __attribute__((aligned (32)))
100#endif
101
102uint8_t code_gen_prologue[1024] code_gen_section;
blueswir1bdaf78e2008-10-04 07:24:27 +0000103static uint8_t *code_gen_buffer;
104static unsigned long code_gen_buffer_size;
bellard26a5f132008-05-28 12:30:31 +0000105/* threshold to flush the translated code buffer */
blueswir1bdaf78e2008-10-04 07:24:27 +0000106static unsigned long code_gen_buffer_max_size;
Stefan Weil24ab68a2010-07-19 18:23:17 +0200107static uint8_t *code_gen_ptr;
bellardfd6ce8f2003-05-14 19:00:11 +0000108
pbrooke2eef172008-06-08 01:09:01 +0000109#if !defined(CONFIG_USER_ONLY)
bellard9fa3e852004-01-04 18:06:42 +0000110int phys_ram_fd;
aliguori74576192008-10-06 14:02:03 +0000111static int in_migration;
pbrook94a6b542009-04-11 17:15:54 +0000112
Paolo Bonzini85d59fe2011-08-12 13:18:14 +0200113RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +0300114
115static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +0300116static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +0300117
pbrooke2eef172008-06-08 01:09:01 +0000118#endif
bellard9fa3e852004-01-04 18:06:42 +0000119
bellard6a00d602005-11-21 23:25:50 +0000120CPUState *first_cpu;
121/* current CPU in the current thread. It is only valid inside
122 cpu_exec() */
Paolo Bonzinib3c4bbe2011-10-28 10:52:42 +0100123DEFINE_TLS(CPUState *,cpu_single_env);
pbrook2e70f6e2008-06-29 01:03:05 +0000124/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +0000125 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +0000126 2 = Adaptive rate instruction counting. */
127int use_icount = 0;
bellard6a00d602005-11-21 23:25:50 +0000128
bellard54936002003-05-13 00:25:15 +0000129typedef struct PageDesc {
bellard92e873b2004-05-21 14:52:29 +0000130 /* list of TBs intersecting this ram page */
bellardfd6ce8f2003-05-14 19:00:11 +0000131 TranslationBlock *first_tb;
bellard9fa3e852004-01-04 18:06:42 +0000132 /* in order to optimize self modifying code, we count the number
133 of lookups we do to a given page to use a bitmap */
134 unsigned int code_write_count;
135 uint8_t *code_bitmap;
136#if defined(CONFIG_USER_ONLY)
137 unsigned long flags;
138#endif
bellard54936002003-05-13 00:25:15 +0000139} PageDesc;
140
Paul Brook41c1b1c2010-03-12 16:54:58 +0000141/* In system mode we want L1_MAP to be based on ram offsets,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800142 while in user mode we want it to be based on virtual addresses. */
143#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000144#if HOST_LONG_BITS < TARGET_PHYS_ADDR_SPACE_BITS
145# define L1_MAP_ADDR_SPACE_BITS HOST_LONG_BITS
146#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800147# define L1_MAP_ADDR_SPACE_BITS TARGET_PHYS_ADDR_SPACE_BITS
Paul Brook41c1b1c2010-03-12 16:54:58 +0000148#endif
j_mayerbedb69e2007-04-05 20:08:21 +0000149#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800150# define L1_MAP_ADDR_SPACE_BITS TARGET_VIRT_ADDR_SPACE_BITS
j_mayerbedb69e2007-04-05 20:08:21 +0000151#endif
bellard54936002003-05-13 00:25:15 +0000152
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800153/* Size of the L2 (and L3, etc) page tables. */
154#define L2_BITS 10
bellard54936002003-05-13 00:25:15 +0000155#define L2_SIZE (1 << L2_BITS)
156
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800157/* The bits remaining after N lower levels of page tables. */
158#define P_L1_BITS_REM \
159 ((TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
160#define V_L1_BITS_REM \
161 ((L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS) % L2_BITS)
162
163/* Size of the L1 page table. Avoid silly small sizes. */
164#if P_L1_BITS_REM < 4
165#define P_L1_BITS (P_L1_BITS_REM + L2_BITS)
166#else
167#define P_L1_BITS P_L1_BITS_REM
168#endif
169
170#if V_L1_BITS_REM < 4
171#define V_L1_BITS (V_L1_BITS_REM + L2_BITS)
172#else
173#define V_L1_BITS V_L1_BITS_REM
174#endif
175
176#define P_L1_SIZE ((target_phys_addr_t)1 << P_L1_BITS)
177#define V_L1_SIZE ((target_ulong)1 << V_L1_BITS)
178
179#define P_L1_SHIFT (TARGET_PHYS_ADDR_SPACE_BITS - TARGET_PAGE_BITS - P_L1_BITS)
180#define V_L1_SHIFT (L1_MAP_ADDR_SPACE_BITS - TARGET_PAGE_BITS - V_L1_BITS)
181
bellard83fb7ad2004-07-05 21:25:26 +0000182unsigned long qemu_real_host_page_size;
bellard83fb7ad2004-07-05 21:25:26 +0000183unsigned long qemu_host_page_size;
184unsigned long qemu_host_page_mask;
bellard54936002003-05-13 00:25:15 +0000185
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800186/* This is a multi-level map on the virtual address space.
187 The bottom level has pointers to PageDesc. */
188static void *l1_map[V_L1_SIZE];
bellard54936002003-05-13 00:25:15 +0000189
pbrooke2eef172008-06-08 01:09:01 +0000190#if !defined(CONFIG_USER_ONLY)
Paul Brook41c1b1c2010-03-12 16:54:58 +0000191typedef struct PhysPageDesc {
192 /* offset in host memory of the page + io_index in the low bits */
193 ram_addr_t phys_offset;
194 ram_addr_t region_offset;
195} PhysPageDesc;
196
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800197/* This is a multi-level map on the physical address space.
198 The bottom level has pointers to PhysPageDesc. */
199static void *l1_phys_map[P_L1_SIZE];
Paul Brook6d9a1302010-02-28 23:55:53 +0000200
pbrooke2eef172008-06-08 01:09:01 +0000201static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300202static void memory_map_init(void);
pbrooke2eef172008-06-08 01:09:01 +0000203
bellard33417e72003-08-10 21:47:01 +0000204/* io memory support */
bellard33417e72003-08-10 21:47:01 +0000205CPUWriteMemoryFunc *io_mem_write[IO_MEM_NB_ENTRIES][4];
206CPUReadMemoryFunc *io_mem_read[IO_MEM_NB_ENTRIES][4];
bellarda4193c82004-06-03 14:01:43 +0000207void *io_mem_opaque[IO_MEM_NB_ENTRIES];
blueswir1511d2b12009-03-07 15:32:56 +0000208static char io_mem_used[IO_MEM_NB_ENTRIES];
pbrook6658ffb2007-03-16 23:58:11 +0000209static int io_mem_watch;
210#endif
bellard33417e72003-08-10 21:47:01 +0000211
bellard34865132003-10-05 14:28:56 +0000212/* log support */
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200213#ifdef WIN32
214static const char *logfilename = "qemu.log";
215#else
blueswir1d9b630f2008-10-05 09:57:08 +0000216static const char *logfilename = "/tmp/qemu.log";
Juha Riihimäki1e8b27c2009-12-03 15:56:02 +0200217#endif
bellard34865132003-10-05 14:28:56 +0000218FILE *logfile;
219int loglevel;
pbrooke735b912007-06-30 13:53:24 +0000220static int log_append = 0;
bellard34865132003-10-05 14:28:56 +0000221
bellarde3db7222005-01-26 22:00:47 +0000222/* statistics */
Paul Brookb3755a92010-03-12 16:54:58 +0000223#if !defined(CONFIG_USER_ONLY)
bellarde3db7222005-01-26 22:00:47 +0000224static int tlb_flush_count;
Paul Brookb3755a92010-03-12 16:54:58 +0000225#endif
bellarde3db7222005-01-26 22:00:47 +0000226static int tb_flush_count;
227static int tb_phys_invalidate_count;
228
bellard7cb69ca2008-05-10 10:55:51 +0000229#ifdef _WIN32
230static void map_exec(void *addr, long size)
231{
232 DWORD old_protect;
233 VirtualProtect(addr, size,
234 PAGE_EXECUTE_READWRITE, &old_protect);
235
236}
237#else
238static void map_exec(void *addr, long size)
239{
bellard43694152008-05-29 09:35:57 +0000240 unsigned long start, end, page_size;
bellard7cb69ca2008-05-10 10:55:51 +0000241
bellard43694152008-05-29 09:35:57 +0000242 page_size = getpagesize();
bellard7cb69ca2008-05-10 10:55:51 +0000243 start = (unsigned long)addr;
bellard43694152008-05-29 09:35:57 +0000244 start &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000245
246 end = (unsigned long)addr + size;
bellard43694152008-05-29 09:35:57 +0000247 end += page_size - 1;
248 end &= ~(page_size - 1);
bellard7cb69ca2008-05-10 10:55:51 +0000249
250 mprotect((void *)start, end - start,
251 PROT_READ | PROT_WRITE | PROT_EXEC);
252}
253#endif
254
bellardb346ff42003-06-15 20:05:50 +0000255static void page_init(void)
bellard54936002003-05-13 00:25:15 +0000256{
bellard83fb7ad2004-07-05 21:25:26 +0000257 /* NOTE: we can always suppose that qemu_host_page_size >=
bellard54936002003-05-13 00:25:15 +0000258 TARGET_PAGE_SIZE */
aliguoric2b48b62008-11-11 22:06:42 +0000259#ifdef _WIN32
260 {
261 SYSTEM_INFO system_info;
262
263 GetSystemInfo(&system_info);
264 qemu_real_host_page_size = system_info.dwPageSize;
265 }
266#else
267 qemu_real_host_page_size = getpagesize();
268#endif
bellard83fb7ad2004-07-05 21:25:26 +0000269 if (qemu_host_page_size == 0)
270 qemu_host_page_size = qemu_real_host_page_size;
271 if (qemu_host_page_size < TARGET_PAGE_SIZE)
272 qemu_host_page_size = TARGET_PAGE_SIZE;
bellard83fb7ad2004-07-05 21:25:26 +0000273 qemu_host_page_mask = ~(qemu_host_page_size - 1);
balrog50a95692007-12-12 01:16:23 +0000274
Paul Brook2e9a5712010-05-05 16:32:59 +0100275#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
balrog50a95692007-12-12 01:16:23 +0000276 {
Juergen Lockf01576f2010-03-25 22:32:16 +0100277#ifdef HAVE_KINFO_GETVMMAP
278 struct kinfo_vmentry *freep;
279 int i, cnt;
280
281 freep = kinfo_getvmmap(getpid(), &cnt);
282 if (freep) {
283 mmap_lock();
284 for (i = 0; i < cnt; i++) {
285 unsigned long startaddr, endaddr;
286
287 startaddr = freep[i].kve_start;
288 endaddr = freep[i].kve_end;
289 if (h2g_valid(startaddr)) {
290 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
291
292 if (h2g_valid(endaddr)) {
293 endaddr = h2g(endaddr);
Aurelien Jarnofd436902010-04-10 17:20:36 +0200294 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100295 } else {
296#if TARGET_ABI_BITS <= L1_MAP_ADDR_SPACE_BITS
297 endaddr = ~0ul;
Aurelien Jarnofd436902010-04-10 17:20:36 +0200298 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
Juergen Lockf01576f2010-03-25 22:32:16 +0100299#endif
300 }
301 }
302 }
303 free(freep);
304 mmap_unlock();
305 }
306#else
balrog50a95692007-12-12 01:16:23 +0000307 FILE *f;
balrog50a95692007-12-12 01:16:23 +0000308
pbrook07765902008-05-31 16:33:53 +0000309 last_brk = (unsigned long)sbrk(0);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800310
Aurelien Jarnofd436902010-04-10 17:20:36 +0200311 f = fopen("/compat/linux/proc/self/maps", "r");
balrog50a95692007-12-12 01:16:23 +0000312 if (f) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800313 mmap_lock();
314
balrog50a95692007-12-12 01:16:23 +0000315 do {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800316 unsigned long startaddr, endaddr;
317 int n;
318
319 n = fscanf (f, "%lx-%lx %*[^\n]\n", &startaddr, &endaddr);
320
321 if (n == 2 && h2g_valid(startaddr)) {
322 startaddr = h2g(startaddr) & TARGET_PAGE_MASK;
323
324 if (h2g_valid(endaddr)) {
325 endaddr = h2g(endaddr);
326 } else {
327 endaddr = ~0ul;
328 }
329 page_set_flags(startaddr, endaddr, PAGE_RESERVED);
balrog50a95692007-12-12 01:16:23 +0000330 }
331 } while (!feof(f));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800332
balrog50a95692007-12-12 01:16:23 +0000333 fclose(f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800334 mmap_unlock();
balrog50a95692007-12-12 01:16:23 +0000335 }
Juergen Lockf01576f2010-03-25 22:32:16 +0100336#endif
balrog50a95692007-12-12 01:16:23 +0000337 }
338#endif
bellard54936002003-05-13 00:25:15 +0000339}
340
Paul Brook41c1b1c2010-03-12 16:54:58 +0000341static PageDesc *page_find_alloc(tb_page_addr_t index, int alloc)
bellard54936002003-05-13 00:25:15 +0000342{
Paul Brook41c1b1c2010-03-12 16:54:58 +0000343 PageDesc *pd;
344 void **lp;
345 int i;
346
pbrook17e23772008-06-09 13:47:45 +0000347#if defined(CONFIG_USER_ONLY)
Anthony Liguori7267c092011-08-20 22:09:37 -0500348 /* We can't use g_malloc because it may recurse into a locked mutex. */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800349# define ALLOC(P, SIZE) \
350 do { \
351 P = mmap(NULL, SIZE, PROT_READ | PROT_WRITE, \
352 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); \
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800353 } while (0)
pbrook17e23772008-06-09 13:47:45 +0000354#else
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800355# define ALLOC(P, SIZE) \
Anthony Liguori7267c092011-08-20 22:09:37 -0500356 do { P = g_malloc0(SIZE); } while (0)
pbrook17e23772008-06-09 13:47:45 +0000357#endif
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800358
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800359 /* Level 1. Always allocated. */
360 lp = l1_map + ((index >> V_L1_SHIFT) & (V_L1_SIZE - 1));
361
362 /* Level 2..N-1. */
363 for (i = V_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
364 void **p = *lp;
365
366 if (p == NULL) {
367 if (!alloc) {
368 return NULL;
369 }
370 ALLOC(p, sizeof(void *) * L2_SIZE);
371 *lp = p;
372 }
373
374 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000375 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800376
377 pd = *lp;
378 if (pd == NULL) {
379 if (!alloc) {
380 return NULL;
381 }
382 ALLOC(pd, sizeof(PageDesc) * L2_SIZE);
383 *lp = pd;
384 }
385
386#undef ALLOC
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800387
388 return pd + (index & (L2_SIZE - 1));
bellard54936002003-05-13 00:25:15 +0000389}
390
Paul Brook41c1b1c2010-03-12 16:54:58 +0000391static inline PageDesc *page_find(tb_page_addr_t index)
bellard54936002003-05-13 00:25:15 +0000392{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800393 return page_find_alloc(index, 0);
bellard54936002003-05-13 00:25:15 +0000394}
395
Paul Brook6d9a1302010-02-28 23:55:53 +0000396#if !defined(CONFIG_USER_ONLY)
Anthony Liguoric227f092009-10-01 16:12:16 -0500397static PhysPageDesc *phys_page_find_alloc(target_phys_addr_t index, int alloc)
bellard92e873b2004-05-21 14:52:29 +0000398{
pbrooke3f4e2a2006-04-08 20:02:06 +0000399 PhysPageDesc *pd;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800400 void **lp;
401 int i;
bellard92e873b2004-05-21 14:52:29 +0000402
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800403 /* Level 1. Always allocated. */
404 lp = l1_phys_map + ((index >> P_L1_SHIFT) & (P_L1_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000405
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800406 /* Level 2..N-1. */
407 for (i = P_L1_SHIFT / L2_BITS - 1; i > 0; i--) {
408 void **p = *lp;
409 if (p == NULL) {
410 if (!alloc) {
411 return NULL;
412 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500413 *lp = p = g_malloc0(sizeof(void *) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800414 }
415 lp = p + ((index >> (i * L2_BITS)) & (L2_SIZE - 1));
bellard108c49b2005-07-24 12:55:09 +0000416 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800417
pbrooke3f4e2a2006-04-08 20:02:06 +0000418 pd = *lp;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800419 if (pd == NULL) {
pbrooke3f4e2a2006-04-08 20:02:06 +0000420 int i;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800421
422 if (!alloc) {
bellard108c49b2005-07-24 12:55:09 +0000423 return NULL;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800424 }
425
Anthony Liguori7267c092011-08-20 22:09:37 -0500426 *lp = pd = g_malloc(sizeof(PhysPageDesc) * L2_SIZE);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800427
pbrook67c4d232009-02-23 13:16:07 +0000428 for (i = 0; i < L2_SIZE; i++) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800429 pd[i].phys_offset = IO_MEM_UNASSIGNED;
430 pd[i].region_offset = (index + i) << TARGET_PAGE_BITS;
pbrook67c4d232009-02-23 13:16:07 +0000431 }
bellard92e873b2004-05-21 14:52:29 +0000432 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800433
434 return pd + (index & (L2_SIZE - 1));
bellard92e873b2004-05-21 14:52:29 +0000435}
436
Anthony Liguoric227f092009-10-01 16:12:16 -0500437static inline PhysPageDesc *phys_page_find(target_phys_addr_t index)
bellard92e873b2004-05-21 14:52:29 +0000438{
bellard108c49b2005-07-24 12:55:09 +0000439 return phys_page_find_alloc(index, 0);
bellard92e873b2004-05-21 14:52:29 +0000440}
441
Anthony Liguoric227f092009-10-01 16:12:16 -0500442static void tlb_protect_code(ram_addr_t ram_addr);
443static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +0000444 target_ulong vaddr);
pbrookc8a706f2008-06-02 16:16:42 +0000445#define mmap_lock() do { } while(0)
446#define mmap_unlock() do { } while(0)
bellard9fa3e852004-01-04 18:06:42 +0000447#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000448
bellard43694152008-05-29 09:35:57 +0000449#define DEFAULT_CODE_GEN_BUFFER_SIZE (32 * 1024 * 1024)
450
451#if defined(CONFIG_USER_ONLY)
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100452/* Currently it is not recommended to allocate big chunks of data in
bellard43694152008-05-29 09:35:57 +0000453 user mode. It will change when a dedicated libc will be used */
454#define USE_STATIC_CODE_GEN_BUFFER
455#endif
456
457#ifdef USE_STATIC_CODE_GEN_BUFFER
Aurelien Jarnoebf50fb2010-03-29 02:12:51 +0200458static uint8_t static_code_gen_buffer[DEFAULT_CODE_GEN_BUFFER_SIZE]
459 __attribute__((aligned (CODE_GEN_ALIGN)));
bellard43694152008-05-29 09:35:57 +0000460#endif
461
blueswir18fcd3692008-08-17 20:26:25 +0000462static void code_gen_alloc(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000463{
bellard43694152008-05-29 09:35:57 +0000464#ifdef USE_STATIC_CODE_GEN_BUFFER
465 code_gen_buffer = static_code_gen_buffer;
466 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
467 map_exec(code_gen_buffer, code_gen_buffer_size);
468#else
bellard26a5f132008-05-28 12:30:31 +0000469 code_gen_buffer_size = tb_size;
470 if (code_gen_buffer_size == 0) {
bellard43694152008-05-29 09:35:57 +0000471#if defined(CONFIG_USER_ONLY)
bellard43694152008-05-29 09:35:57 +0000472 code_gen_buffer_size = DEFAULT_CODE_GEN_BUFFER_SIZE;
473#else
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100474 /* XXX: needs adjustments */
pbrook94a6b542009-04-11 17:15:54 +0000475 code_gen_buffer_size = (unsigned long)(ram_size / 4);
bellard43694152008-05-29 09:35:57 +0000476#endif
bellard26a5f132008-05-28 12:30:31 +0000477 }
478 if (code_gen_buffer_size < MIN_CODE_GEN_BUFFER_SIZE)
479 code_gen_buffer_size = MIN_CODE_GEN_BUFFER_SIZE;
480 /* The code gen buffer location may have constraints depending on
481 the host cpu and OS */
482#if defined(__linux__)
483 {
484 int flags;
blueswir1141ac462008-07-26 15:05:57 +0000485 void *start = NULL;
486
bellard26a5f132008-05-28 12:30:31 +0000487 flags = MAP_PRIVATE | MAP_ANONYMOUS;
488#if defined(__x86_64__)
489 flags |= MAP_32BIT;
490 /* Cannot map more than that */
491 if (code_gen_buffer_size > (800 * 1024 * 1024))
492 code_gen_buffer_size = (800 * 1024 * 1024);
blueswir1141ac462008-07-26 15:05:57 +0000493#elif defined(__sparc_v9__)
494 // Map the buffer below 2G, so we can use direct calls and branches
495 flags |= MAP_FIXED;
496 start = (void *) 0x60000000UL;
497 if (code_gen_buffer_size > (512 * 1024 * 1024))
498 code_gen_buffer_size = (512 * 1024 * 1024);
balrog1cb06612008-12-01 02:10:17 +0000499#elif defined(__arm__)
balrog63d41242008-12-01 02:19:41 +0000500 /* Map the buffer below 32M, so we can use direct calls and branches */
balrog1cb06612008-12-01 02:10:17 +0000501 flags |= MAP_FIXED;
502 start = (void *) 0x01000000UL;
503 if (code_gen_buffer_size > 16 * 1024 * 1024)
504 code_gen_buffer_size = 16 * 1024 * 1024;
Richard Hendersoneba0b892010-06-04 12:14:14 -0700505#elif defined(__s390x__)
506 /* Map the buffer so that we can use direct calls and branches. */
507 /* We have a +- 4GB range on the branches; leave some slop. */
508 if (code_gen_buffer_size > (3ul * 1024 * 1024 * 1024)) {
509 code_gen_buffer_size = 3ul * 1024 * 1024 * 1024;
510 }
511 start = (void *)0x90000000UL;
bellard26a5f132008-05-28 12:30:31 +0000512#endif
blueswir1141ac462008-07-26 15:05:57 +0000513 code_gen_buffer = mmap(start, code_gen_buffer_size,
514 PROT_WRITE | PROT_READ | PROT_EXEC,
bellard26a5f132008-05-28 12:30:31 +0000515 flags, -1, 0);
516 if (code_gen_buffer == MAP_FAILED) {
517 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
518 exit(1);
519 }
520 }
Bradcbb608a2010-12-20 21:25:40 -0500521#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
Tobias Nygren9f4b09a2011-08-07 09:57:05 +0000522 || defined(__DragonFly__) || defined(__OpenBSD__) \
523 || defined(__NetBSD__)
aliguori06e67a82008-09-27 15:32:41 +0000524 {
525 int flags;
526 void *addr = NULL;
527 flags = MAP_PRIVATE | MAP_ANONYMOUS;
528#if defined(__x86_64__)
529 /* FreeBSD doesn't have MAP_32BIT, use MAP_FIXED and assume
530 * 0x40000000 is free */
531 flags |= MAP_FIXED;
532 addr = (void *)0x40000000;
533 /* Cannot map more than that */
534 if (code_gen_buffer_size > (800 * 1024 * 1024))
535 code_gen_buffer_size = (800 * 1024 * 1024);
Blue Swirl4cd31ad2011-01-16 08:32:27 +0000536#elif defined(__sparc_v9__)
537 // Map the buffer below 2G, so we can use direct calls and branches
538 flags |= MAP_FIXED;
539 addr = (void *) 0x60000000UL;
540 if (code_gen_buffer_size > (512 * 1024 * 1024)) {
541 code_gen_buffer_size = (512 * 1024 * 1024);
542 }
aliguori06e67a82008-09-27 15:32:41 +0000543#endif
544 code_gen_buffer = mmap(addr, code_gen_buffer_size,
545 PROT_WRITE | PROT_READ | PROT_EXEC,
546 flags, -1, 0);
547 if (code_gen_buffer == MAP_FAILED) {
548 fprintf(stderr, "Could not allocate dynamic translator buffer\n");
549 exit(1);
550 }
551 }
bellard26a5f132008-05-28 12:30:31 +0000552#else
Anthony Liguori7267c092011-08-20 22:09:37 -0500553 code_gen_buffer = g_malloc(code_gen_buffer_size);
bellard26a5f132008-05-28 12:30:31 +0000554 map_exec(code_gen_buffer, code_gen_buffer_size);
555#endif
bellard43694152008-05-29 09:35:57 +0000556#endif /* !USE_STATIC_CODE_GEN_BUFFER */
bellard26a5f132008-05-28 12:30:31 +0000557 map_exec(code_gen_prologue, sizeof(code_gen_prologue));
Peter Maydella884da82011-06-22 11:58:25 +0100558 code_gen_buffer_max_size = code_gen_buffer_size -
559 (TCG_MAX_OP_SIZE * OPC_BUF_SIZE);
bellard26a5f132008-05-28 12:30:31 +0000560 code_gen_max_blocks = code_gen_buffer_size / CODE_GEN_AVG_BLOCK_SIZE;
Anthony Liguori7267c092011-08-20 22:09:37 -0500561 tbs = g_malloc(code_gen_max_blocks * sizeof(TranslationBlock));
bellard26a5f132008-05-28 12:30:31 +0000562}
563
564/* Must be called before using the QEMU cpus. 'tb_size' is the size
565 (in bytes) allocated to the translation buffer. Zero means default
566 size. */
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200567void tcg_exec_init(unsigned long tb_size)
bellard26a5f132008-05-28 12:30:31 +0000568{
bellard26a5f132008-05-28 12:30:31 +0000569 cpu_gen_init();
570 code_gen_alloc(tb_size);
571 code_gen_ptr = code_gen_buffer;
bellard43694152008-05-29 09:35:57 +0000572 page_init();
Richard Henderson9002ec72010-05-06 08:50:41 -0700573#if !defined(CONFIG_USER_ONLY) || !defined(CONFIG_USE_GUEST_BASE)
574 /* There's no guest base to take into account, so go ahead and
575 initialize the prologue now. */
576 tcg_prologue_init(&tcg_ctx);
577#endif
bellard26a5f132008-05-28 12:30:31 +0000578}
579
Jan Kiszkad5ab9712011-08-02 16:10:21 +0200580bool tcg_enabled(void)
581{
582 return code_gen_buffer != NULL;
583}
584
585void cpu_exec_init_all(void)
586{
587#if !defined(CONFIG_USER_ONLY)
588 memory_map_init();
589 io_mem_init();
590#endif
591}
592
pbrook9656f322008-07-01 20:01:19 +0000593#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
594
Juan Quintelae59fb372009-09-29 22:48:21 +0200595static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200596{
597 CPUState *env = opaque;
598
aurel323098dba2009-03-07 21:28:24 +0000599 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
600 version_id is increased. */
601 env->interrupt_request &= ~0x01;
pbrook9656f322008-07-01 20:01:19 +0000602 tlb_flush(env, 1);
603
604 return 0;
605}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200606
607static const VMStateDescription vmstate_cpu_common = {
608 .name = "cpu_common",
609 .version_id = 1,
610 .minimum_version_id = 1,
611 .minimum_version_id_old = 1,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200612 .post_load = cpu_common_post_load,
613 .fields = (VMStateField []) {
614 VMSTATE_UINT32(halted, CPUState),
615 VMSTATE_UINT32(interrupt_request, CPUState),
616 VMSTATE_END_OF_LIST()
617 }
618};
pbrook9656f322008-07-01 20:01:19 +0000619#endif
620
Glauber Costa950f1472009-06-09 12:15:18 -0400621CPUState *qemu_get_cpu(int cpu)
622{
623 CPUState *env = first_cpu;
624
625 while (env) {
626 if (env->cpu_index == cpu)
627 break;
628 env = env->next_cpu;
629 }
630
631 return env;
632}
633
bellard6a00d602005-11-21 23:25:50 +0000634void cpu_exec_init(CPUState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000635{
bellard6a00d602005-11-21 23:25:50 +0000636 CPUState **penv;
637 int cpu_index;
638
pbrookc2764712009-03-07 15:24:59 +0000639#if defined(CONFIG_USER_ONLY)
640 cpu_list_lock();
641#endif
bellard6a00d602005-11-21 23:25:50 +0000642 env->next_cpu = NULL;
643 penv = &first_cpu;
644 cpu_index = 0;
645 while (*penv != NULL) {
Nathan Froyd1e9fa732009-06-03 11:33:08 -0700646 penv = &(*penv)->next_cpu;
bellard6a00d602005-11-21 23:25:50 +0000647 cpu_index++;
648 }
649 env->cpu_index = cpu_index;
aliguori268a3622009-04-21 22:30:27 +0000650 env->numa_node = 0;
Blue Swirl72cf2d42009-09-12 07:36:22 +0000651 QTAILQ_INIT(&env->breakpoints);
652 QTAILQ_INIT(&env->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100653#ifndef CONFIG_USER_ONLY
654 env->thread_id = qemu_get_thread_id();
655#endif
bellard6a00d602005-11-21 23:25:50 +0000656 *penv = env;
pbrookc2764712009-03-07 15:24:59 +0000657#if defined(CONFIG_USER_ONLY)
658 cpu_list_unlock();
659#endif
pbrookb3c77242008-06-30 16:31:04 +0000660#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600661 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, env);
662 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000663 cpu_save, cpu_load, env);
664#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000665}
666
Tristan Gingoldd1a1eb72011-02-10 10:04:57 +0100667/* Allocate a new translation block. Flush the translation buffer if
668 too many translation blocks or too much generated code. */
669static TranslationBlock *tb_alloc(target_ulong pc)
670{
671 TranslationBlock *tb;
672
673 if (nb_tbs >= code_gen_max_blocks ||
674 (code_gen_ptr - code_gen_buffer) >= code_gen_buffer_max_size)
675 return NULL;
676 tb = &tbs[nb_tbs++];
677 tb->pc = pc;
678 tb->cflags = 0;
679 return tb;
680}
681
682void tb_free(TranslationBlock *tb)
683{
684 /* In practice this is mostly used for single use temporary TB
685 Ignore the hard cases and just back up if this TB happens to
686 be the last one generated. */
687 if (nb_tbs > 0 && tb == &tbs[nb_tbs - 1]) {
688 code_gen_ptr = tb->tc_ptr;
689 nb_tbs--;
690 }
691}
692
bellard9fa3e852004-01-04 18:06:42 +0000693static inline void invalidate_page_bitmap(PageDesc *p)
694{
695 if (p->code_bitmap) {
Anthony Liguori7267c092011-08-20 22:09:37 -0500696 g_free(p->code_bitmap);
bellard9fa3e852004-01-04 18:06:42 +0000697 p->code_bitmap = NULL;
698 }
699 p->code_write_count = 0;
700}
701
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800702/* Set to NULL all the 'first_tb' fields in all PageDescs. */
703
704static void page_flush_tb_1 (int level, void **lp)
705{
706 int i;
707
708 if (*lp == NULL) {
709 return;
710 }
711 if (level == 0) {
712 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000713 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800714 pd[i].first_tb = NULL;
715 invalidate_page_bitmap(pd + i);
716 }
717 } else {
718 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +0000719 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800720 page_flush_tb_1 (level - 1, pp + i);
721 }
722 }
723}
724
bellardfd6ce8f2003-05-14 19:00:11 +0000725static void page_flush_tb(void)
726{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800727 int i;
728 for (i = 0; i < V_L1_SIZE; i++) {
729 page_flush_tb_1(V_L1_SHIFT / L2_BITS - 1, l1_map + i);
bellardfd6ce8f2003-05-14 19:00:11 +0000730 }
731}
732
733/* flush all the translation blocks */
bellardd4e81642003-05-25 16:46:15 +0000734/* XXX: tb_flush is currently not thread safe */
bellard6a00d602005-11-21 23:25:50 +0000735void tb_flush(CPUState *env1)
bellardfd6ce8f2003-05-14 19:00:11 +0000736{
bellard6a00d602005-11-21 23:25:50 +0000737 CPUState *env;
bellard01243112004-01-04 15:48:17 +0000738#if defined(DEBUG_FLUSH)
blueswir1ab3d1722007-11-04 07:31:40 +0000739 printf("qemu: flush code_size=%ld nb_tbs=%d avg_tb_size=%ld\n",
740 (unsigned long)(code_gen_ptr - code_gen_buffer),
741 nb_tbs, nb_tbs > 0 ?
742 ((unsigned long)(code_gen_ptr - code_gen_buffer)) / nb_tbs : 0);
bellardfd6ce8f2003-05-14 19:00:11 +0000743#endif
bellard26a5f132008-05-28 12:30:31 +0000744 if ((unsigned long)(code_gen_ptr - code_gen_buffer) > code_gen_buffer_size)
pbrooka208e542008-03-31 17:07:36 +0000745 cpu_abort(env1, "Internal error: code buffer overflow\n");
746
bellardfd6ce8f2003-05-14 19:00:11 +0000747 nb_tbs = 0;
ths3b46e622007-09-17 08:09:54 +0000748
bellard6a00d602005-11-21 23:25:50 +0000749 for(env = first_cpu; env != NULL; env = env->next_cpu) {
750 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
751 }
bellard9fa3e852004-01-04 18:06:42 +0000752
bellard8a8a6082004-10-03 13:36:49 +0000753 memset (tb_phys_hash, 0, CODE_GEN_PHYS_HASH_SIZE * sizeof (void *));
bellardfd6ce8f2003-05-14 19:00:11 +0000754 page_flush_tb();
bellard9fa3e852004-01-04 18:06:42 +0000755
bellardfd6ce8f2003-05-14 19:00:11 +0000756 code_gen_ptr = code_gen_buffer;
bellardd4e81642003-05-25 16:46:15 +0000757 /* XXX: flush processor icache at this point if cache flush is
758 expensive */
bellarde3db7222005-01-26 22:00:47 +0000759 tb_flush_count++;
bellardfd6ce8f2003-05-14 19:00:11 +0000760}
761
762#ifdef DEBUG_TB_CHECK
763
j_mayerbc98a7e2007-04-04 07:55:12 +0000764static void tb_invalidate_check(target_ulong address)
bellardfd6ce8f2003-05-14 19:00:11 +0000765{
766 TranslationBlock *tb;
767 int i;
768 address &= TARGET_PAGE_MASK;
pbrook99773bd2006-04-16 15:14:59 +0000769 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
770 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000771 if (!(address + TARGET_PAGE_SIZE <= tb->pc ||
772 address >= tb->pc + tb->size)) {
Blue Swirl0bf9e312009-07-20 17:19:25 +0000773 printf("ERROR invalidate: address=" TARGET_FMT_lx
774 " PC=%08lx size=%04x\n",
pbrook99773bd2006-04-16 15:14:59 +0000775 address, (long)tb->pc, tb->size);
bellardfd6ce8f2003-05-14 19:00:11 +0000776 }
777 }
778 }
779}
780
781/* verify that all the pages have correct rights for code */
782static void tb_page_check(void)
783{
784 TranslationBlock *tb;
785 int i, flags1, flags2;
ths3b46e622007-09-17 08:09:54 +0000786
pbrook99773bd2006-04-16 15:14:59 +0000787 for(i = 0;i < CODE_GEN_PHYS_HASH_SIZE; i++) {
788 for(tb = tb_phys_hash[i]; tb != NULL; tb = tb->phys_hash_next) {
bellardfd6ce8f2003-05-14 19:00:11 +0000789 flags1 = page_get_flags(tb->pc);
790 flags2 = page_get_flags(tb->pc + tb->size - 1);
791 if ((flags1 & PAGE_WRITE) || (flags2 & PAGE_WRITE)) {
792 printf("ERROR page flags: PC=%08lx size=%04x f1=%x f2=%x\n",
pbrook99773bd2006-04-16 15:14:59 +0000793 (long)tb->pc, tb->size, flags1, flags2);
bellardfd6ce8f2003-05-14 19:00:11 +0000794 }
795 }
796 }
797}
798
799#endif
800
801/* invalidate one TB */
802static inline void tb_remove(TranslationBlock **ptb, TranslationBlock *tb,
803 int next_offset)
804{
805 TranslationBlock *tb1;
806 for(;;) {
807 tb1 = *ptb;
808 if (tb1 == tb) {
809 *ptb = *(TranslationBlock **)((char *)tb1 + next_offset);
810 break;
811 }
812 ptb = (TranslationBlock **)((char *)tb1 + next_offset);
813 }
814}
815
bellard9fa3e852004-01-04 18:06:42 +0000816static inline void tb_page_remove(TranslationBlock **ptb, TranslationBlock *tb)
817{
818 TranslationBlock *tb1;
819 unsigned int n1;
820
821 for(;;) {
822 tb1 = *ptb;
823 n1 = (long)tb1 & 3;
824 tb1 = (TranslationBlock *)((long)tb1 & ~3);
825 if (tb1 == tb) {
826 *ptb = tb1->page_next[n1];
827 break;
828 }
829 ptb = &tb1->page_next[n1];
830 }
831}
832
bellardd4e81642003-05-25 16:46:15 +0000833static inline void tb_jmp_remove(TranslationBlock *tb, int n)
834{
835 TranslationBlock *tb1, **ptb;
836 unsigned int n1;
837
838 ptb = &tb->jmp_next[n];
839 tb1 = *ptb;
840 if (tb1) {
841 /* find tb(n) in circular list */
842 for(;;) {
843 tb1 = *ptb;
844 n1 = (long)tb1 & 3;
845 tb1 = (TranslationBlock *)((long)tb1 & ~3);
846 if (n1 == n && tb1 == tb)
847 break;
848 if (n1 == 2) {
849 ptb = &tb1->jmp_first;
850 } else {
851 ptb = &tb1->jmp_next[n1];
852 }
853 }
854 /* now we can suppress tb(n) from the list */
855 *ptb = tb->jmp_next[n];
856
857 tb->jmp_next[n] = NULL;
858 }
859}
860
861/* reset the jump entry 'n' of a TB so that it is not chained to
862 another TB */
863static inline void tb_reset_jump(TranslationBlock *tb, int n)
864{
865 tb_set_jmp_target(tb, n, (unsigned long)(tb->tc_ptr + tb->tb_next_offset[n]));
866}
867
Paul Brook41c1b1c2010-03-12 16:54:58 +0000868void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +0000869{
bellard6a00d602005-11-21 23:25:50 +0000870 CPUState *env;
bellardfd6ce8f2003-05-14 19:00:11 +0000871 PageDesc *p;
bellard8a40a182005-11-20 10:35:40 +0000872 unsigned int h, n1;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000873 tb_page_addr_t phys_pc;
bellard8a40a182005-11-20 10:35:40 +0000874 TranslationBlock *tb1, *tb2;
ths3b46e622007-09-17 08:09:54 +0000875
bellard9fa3e852004-01-04 18:06:42 +0000876 /* remove the TB from the hash list */
877 phys_pc = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
878 h = tb_phys_hash_func(phys_pc);
ths5fafdf22007-09-16 21:08:06 +0000879 tb_remove(&tb_phys_hash[h], tb,
bellard9fa3e852004-01-04 18:06:42 +0000880 offsetof(TranslationBlock, phys_hash_next));
bellardfd6ce8f2003-05-14 19:00:11 +0000881
bellard9fa3e852004-01-04 18:06:42 +0000882 /* remove the TB from the page list */
883 if (tb->page_addr[0] != page_addr) {
884 p = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
885 tb_page_remove(&p->first_tb, tb);
886 invalidate_page_bitmap(p);
887 }
888 if (tb->page_addr[1] != -1 && tb->page_addr[1] != page_addr) {
889 p = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
890 tb_page_remove(&p->first_tb, tb);
891 invalidate_page_bitmap(p);
892 }
893
bellard8a40a182005-11-20 10:35:40 +0000894 tb_invalidated_flag = 1;
895
896 /* remove the TB from the hash list */
897 h = tb_jmp_cache_hash_func(tb->pc);
bellard6a00d602005-11-21 23:25:50 +0000898 for(env = first_cpu; env != NULL; env = env->next_cpu) {
899 if (env->tb_jmp_cache[h] == tb)
900 env->tb_jmp_cache[h] = NULL;
901 }
bellard8a40a182005-11-20 10:35:40 +0000902
903 /* suppress this TB from the two jump lists */
904 tb_jmp_remove(tb, 0);
905 tb_jmp_remove(tb, 1);
906
907 /* suppress any remaining jumps to this TB */
908 tb1 = tb->jmp_first;
909 for(;;) {
910 n1 = (long)tb1 & 3;
911 if (n1 == 2)
912 break;
913 tb1 = (TranslationBlock *)((long)tb1 & ~3);
914 tb2 = tb1->jmp_next[n1];
915 tb_reset_jump(tb1, n1);
916 tb1->jmp_next[n1] = NULL;
917 tb1 = tb2;
918 }
919 tb->jmp_first = (TranslationBlock *)((long)tb | 2); /* fail safe */
920
bellarde3db7222005-01-26 22:00:47 +0000921 tb_phys_invalidate_count++;
bellard9fa3e852004-01-04 18:06:42 +0000922}
923
924static inline void set_bits(uint8_t *tab, int start, int len)
925{
926 int end, mask, end1;
927
928 end = start + len;
929 tab += start >> 3;
930 mask = 0xff << (start & 7);
931 if ((start & ~7) == (end & ~7)) {
932 if (start < end) {
933 mask &= ~(0xff << (end & 7));
934 *tab |= mask;
935 }
936 } else {
937 *tab++ |= mask;
938 start = (start + 8) & ~7;
939 end1 = end & ~7;
940 while (start < end1) {
941 *tab++ = 0xff;
942 start += 8;
943 }
944 if (start < end) {
945 mask = ~(0xff << (end & 7));
946 *tab |= mask;
947 }
948 }
949}
950
951static void build_page_bitmap(PageDesc *p)
952{
953 int n, tb_start, tb_end;
954 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +0000955
Anthony Liguori7267c092011-08-20 22:09:37 -0500956 p->code_bitmap = g_malloc0(TARGET_PAGE_SIZE / 8);
bellard9fa3e852004-01-04 18:06:42 +0000957
958 tb = p->first_tb;
959 while (tb != NULL) {
960 n = (long)tb & 3;
961 tb = (TranslationBlock *)((long)tb & ~3);
962 /* NOTE: this is subtle as a TB may span two physical pages */
963 if (n == 0) {
964 /* NOTE: tb_end may be after the end of the page, but
965 it is not a problem */
966 tb_start = tb->pc & ~TARGET_PAGE_MASK;
967 tb_end = tb_start + tb->size;
968 if (tb_end > TARGET_PAGE_SIZE)
969 tb_end = TARGET_PAGE_SIZE;
970 } else {
971 tb_start = 0;
972 tb_end = ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
973 }
974 set_bits(p->code_bitmap, tb_start, tb_end - tb_start);
975 tb = tb->page_next[n];
976 }
977}
978
pbrook2e70f6e2008-06-29 01:03:05 +0000979TranslationBlock *tb_gen_code(CPUState *env,
980 target_ulong pc, target_ulong cs_base,
981 int flags, int cflags)
bellardd720b932004-04-25 17:57:43 +0000982{
983 TranslationBlock *tb;
984 uint8_t *tc_ptr;
Paul Brook41c1b1c2010-03-12 16:54:58 +0000985 tb_page_addr_t phys_pc, phys_page2;
986 target_ulong virt_page2;
bellardd720b932004-04-25 17:57:43 +0000987 int code_gen_size;
988
Paul Brook41c1b1c2010-03-12 16:54:58 +0000989 phys_pc = get_page_addr_code(env, pc);
bellardc27004e2005-01-03 23:35:10 +0000990 tb = tb_alloc(pc);
bellardd720b932004-04-25 17:57:43 +0000991 if (!tb) {
992 /* flush must be done */
993 tb_flush(env);
994 /* cannot fail at this point */
bellardc27004e2005-01-03 23:35:10 +0000995 tb = tb_alloc(pc);
pbrook2e70f6e2008-06-29 01:03:05 +0000996 /* Don't forget to invalidate previous TB info. */
997 tb_invalidated_flag = 1;
bellardd720b932004-04-25 17:57:43 +0000998 }
999 tc_ptr = code_gen_ptr;
1000 tb->tc_ptr = tc_ptr;
1001 tb->cs_base = cs_base;
1002 tb->flags = flags;
1003 tb->cflags = cflags;
blueswir1d07bde82007-12-11 19:35:45 +00001004 cpu_gen_code(env, tb, &code_gen_size);
bellardd720b932004-04-25 17:57:43 +00001005 code_gen_ptr = (void *)(((unsigned long)code_gen_ptr + code_gen_size + CODE_GEN_ALIGN - 1) & ~(CODE_GEN_ALIGN - 1));
ths3b46e622007-09-17 08:09:54 +00001006
bellardd720b932004-04-25 17:57:43 +00001007 /* check next page if needed */
bellardc27004e2005-01-03 23:35:10 +00001008 virt_page2 = (pc + tb->size - 1) & TARGET_PAGE_MASK;
bellardd720b932004-04-25 17:57:43 +00001009 phys_page2 = -1;
bellardc27004e2005-01-03 23:35:10 +00001010 if ((pc & TARGET_PAGE_MASK) != virt_page2) {
Paul Brook41c1b1c2010-03-12 16:54:58 +00001011 phys_page2 = get_page_addr_code(env, virt_page2);
bellardd720b932004-04-25 17:57:43 +00001012 }
Paul Brook41c1b1c2010-03-12 16:54:58 +00001013 tb_link_page(tb, phys_pc, phys_page2);
pbrook2e70f6e2008-06-29 01:03:05 +00001014 return tb;
bellardd720b932004-04-25 17:57:43 +00001015}
ths3b46e622007-09-17 08:09:54 +00001016
bellard9fa3e852004-01-04 18:06:42 +00001017/* invalidate all TBs which intersect with the target physical page
1018 starting in range [start;end[. NOTE: start and end must refer to
bellardd720b932004-04-25 17:57:43 +00001019 the same physical page. 'is_cpu_write_access' should be true if called
1020 from a real cpu write access: the virtual CPU will exit the current
1021 TB if code is modified inside this TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001022void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellardd720b932004-04-25 17:57:43 +00001023 int is_cpu_write_access)
bellard9fa3e852004-01-04 18:06:42 +00001024{
aliguori6b917542008-11-18 19:46:41 +00001025 TranslationBlock *tb, *tb_next, *saved_tb;
bellardd720b932004-04-25 17:57:43 +00001026 CPUState *env = cpu_single_env;
Paul Brook41c1b1c2010-03-12 16:54:58 +00001027 tb_page_addr_t tb_start, tb_end;
aliguori6b917542008-11-18 19:46:41 +00001028 PageDesc *p;
1029 int n;
1030#ifdef TARGET_HAS_PRECISE_SMC
1031 int current_tb_not_found = is_cpu_write_access;
1032 TranslationBlock *current_tb = NULL;
1033 int current_tb_modified = 0;
1034 target_ulong current_pc = 0;
1035 target_ulong current_cs_base = 0;
1036 int current_flags = 0;
1037#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001038
1039 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001040 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001041 return;
ths5fafdf22007-09-16 21:08:06 +00001042 if (!p->code_bitmap &&
bellardd720b932004-04-25 17:57:43 +00001043 ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD &&
1044 is_cpu_write_access) {
bellard9fa3e852004-01-04 18:06:42 +00001045 /* build code bitmap */
1046 build_page_bitmap(p);
1047 }
1048
1049 /* we remove all the TBs in the range [start, end[ */
1050 /* XXX: see if in some cases it could be faster to invalidate all the code */
1051 tb = p->first_tb;
1052 while (tb != NULL) {
1053 n = (long)tb & 3;
1054 tb = (TranslationBlock *)((long)tb & ~3);
1055 tb_next = tb->page_next[n];
1056 /* NOTE: this is subtle as a TB may span two physical pages */
1057 if (n == 0) {
1058 /* NOTE: tb_end may be after the end of the page, but
1059 it is not a problem */
1060 tb_start = tb->page_addr[0] + (tb->pc & ~TARGET_PAGE_MASK);
1061 tb_end = tb_start + tb->size;
1062 } else {
1063 tb_start = tb->page_addr[1];
1064 tb_end = tb_start + ((tb->pc + tb->size) & ~TARGET_PAGE_MASK);
1065 }
1066 if (!(tb_end <= start || tb_start >= end)) {
bellardd720b932004-04-25 17:57:43 +00001067#ifdef TARGET_HAS_PRECISE_SMC
1068 if (current_tb_not_found) {
1069 current_tb_not_found = 0;
1070 current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001071 if (env->mem_io_pc) {
bellardd720b932004-04-25 17:57:43 +00001072 /* now we have a real cpu fault */
pbrook2e70f6e2008-06-29 01:03:05 +00001073 current_tb = tb_find_pc(env->mem_io_pc);
bellardd720b932004-04-25 17:57:43 +00001074 }
1075 }
1076 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001077 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001078 /* If we are modifying the current TB, we must stop
1079 its execution. We could be more precise by checking
1080 that the modification is after the current PC, but it
1081 would require a specialized function to partially
1082 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001083
bellardd720b932004-04-25 17:57:43 +00001084 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001085 cpu_restore_state(current_tb, env, env->mem_io_pc);
aliguori6b917542008-11-18 19:46:41 +00001086 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1087 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001088 }
1089#endif /* TARGET_HAS_PRECISE_SMC */
bellard6f5a9f72005-11-26 20:12:28 +00001090 /* we need to do that to handle the case where a signal
1091 occurs while doing tb_phys_invalidate() */
1092 saved_tb = NULL;
1093 if (env) {
1094 saved_tb = env->current_tb;
1095 env->current_tb = NULL;
1096 }
bellard9fa3e852004-01-04 18:06:42 +00001097 tb_phys_invalidate(tb, -1);
bellard6f5a9f72005-11-26 20:12:28 +00001098 if (env) {
1099 env->current_tb = saved_tb;
1100 if (env->interrupt_request && env->current_tb)
1101 cpu_interrupt(env, env->interrupt_request);
1102 }
bellard9fa3e852004-01-04 18:06:42 +00001103 }
1104 tb = tb_next;
1105 }
1106#if !defined(CONFIG_USER_ONLY)
1107 /* if no code remaining, no need to continue to use slow writes */
1108 if (!p->first_tb) {
1109 invalidate_page_bitmap(p);
bellardd720b932004-04-25 17:57:43 +00001110 if (is_cpu_write_access) {
pbrook2e70f6e2008-06-29 01:03:05 +00001111 tlb_unprotect_code_phys(env, start, env->mem_io_vaddr);
bellardd720b932004-04-25 17:57:43 +00001112 }
1113 }
1114#endif
1115#ifdef TARGET_HAS_PRECISE_SMC
1116 if (current_tb_modified) {
1117 /* we generate a block containing just the instruction
1118 modifying the memory. It will ensure that it cannot modify
1119 itself */
bellardea1c1802004-06-14 18:56:36 +00001120 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001121 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001122 cpu_resume_from_signal(env, NULL);
bellard9fa3e852004-01-04 18:06:42 +00001123 }
1124#endif
1125}
1126
1127/* len must be <= 8 and start must be a multiple of len */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001128static inline void tb_invalidate_phys_page_fast(tb_page_addr_t start, int len)
bellard9fa3e852004-01-04 18:06:42 +00001129{
1130 PageDesc *p;
1131 int offset, b;
bellard59817cc2004-02-16 22:01:13 +00001132#if 0
bellarda4193c82004-06-03 14:01:43 +00001133 if (1) {
aliguori93fcfe32009-01-15 22:34:14 +00001134 qemu_log("modifying code at 0x%x size=%d EIP=%x PC=%08x\n",
1135 cpu_single_env->mem_io_vaddr, len,
1136 cpu_single_env->eip,
1137 cpu_single_env->eip + (long)cpu_single_env->segs[R_CS].base);
bellard59817cc2004-02-16 22:01:13 +00001138 }
1139#endif
bellard9fa3e852004-01-04 18:06:42 +00001140 p = page_find(start >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001141 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00001142 return;
1143 if (p->code_bitmap) {
1144 offset = start & ~TARGET_PAGE_MASK;
1145 b = p->code_bitmap[offset >> 3] >> (offset & 7);
1146 if (b & ((1 << len) - 1))
1147 goto do_invalidate;
1148 } else {
1149 do_invalidate:
bellardd720b932004-04-25 17:57:43 +00001150 tb_invalidate_phys_page_range(start, start + len, 1);
bellard9fa3e852004-01-04 18:06:42 +00001151 }
1152}
1153
bellard9fa3e852004-01-04 18:06:42 +00001154#if !defined(CONFIG_SOFTMMU)
Paul Brook41c1b1c2010-03-12 16:54:58 +00001155static void tb_invalidate_phys_page(tb_page_addr_t addr,
bellardd720b932004-04-25 17:57:43 +00001156 unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00001157{
aliguori6b917542008-11-18 19:46:41 +00001158 TranslationBlock *tb;
bellard9fa3e852004-01-04 18:06:42 +00001159 PageDesc *p;
aliguori6b917542008-11-18 19:46:41 +00001160 int n;
bellardd720b932004-04-25 17:57:43 +00001161#ifdef TARGET_HAS_PRECISE_SMC
aliguori6b917542008-11-18 19:46:41 +00001162 TranslationBlock *current_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001163 CPUState *env = cpu_single_env;
aliguori6b917542008-11-18 19:46:41 +00001164 int current_tb_modified = 0;
1165 target_ulong current_pc = 0;
1166 target_ulong current_cs_base = 0;
1167 int current_flags = 0;
bellardd720b932004-04-25 17:57:43 +00001168#endif
bellard9fa3e852004-01-04 18:06:42 +00001169
1170 addr &= TARGET_PAGE_MASK;
1171 p = page_find(addr >> TARGET_PAGE_BITS);
ths5fafdf22007-09-16 21:08:06 +00001172 if (!p)
bellardfd6ce8f2003-05-14 19:00:11 +00001173 return;
1174 tb = p->first_tb;
bellardd720b932004-04-25 17:57:43 +00001175#ifdef TARGET_HAS_PRECISE_SMC
1176 if (tb && pc != 0) {
1177 current_tb = tb_find_pc(pc);
1178 }
1179#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001180 while (tb != NULL) {
bellard9fa3e852004-01-04 18:06:42 +00001181 n = (long)tb & 3;
1182 tb = (TranslationBlock *)((long)tb & ~3);
bellardd720b932004-04-25 17:57:43 +00001183#ifdef TARGET_HAS_PRECISE_SMC
1184 if (current_tb == tb &&
pbrook2e70f6e2008-06-29 01:03:05 +00001185 (current_tb->cflags & CF_COUNT_MASK) != 1) {
bellardd720b932004-04-25 17:57:43 +00001186 /* If we are modifying the current TB, we must stop
1187 its execution. We could be more precise by checking
1188 that the modification is after the current PC, but it
1189 would require a specialized function to partially
1190 restore the CPU state */
ths3b46e622007-09-17 08:09:54 +00001191
bellardd720b932004-04-25 17:57:43 +00001192 current_tb_modified = 1;
Stefan Weil618ba8e2011-04-18 06:39:53 +00001193 cpu_restore_state(current_tb, env, pc);
aliguori6b917542008-11-18 19:46:41 +00001194 cpu_get_tb_cpu_state(env, &current_pc, &current_cs_base,
1195 &current_flags);
bellardd720b932004-04-25 17:57:43 +00001196 }
1197#endif /* TARGET_HAS_PRECISE_SMC */
bellard9fa3e852004-01-04 18:06:42 +00001198 tb_phys_invalidate(tb, addr);
1199 tb = tb->page_next[n];
bellardfd6ce8f2003-05-14 19:00:11 +00001200 }
1201 p->first_tb = NULL;
bellardd720b932004-04-25 17:57:43 +00001202#ifdef TARGET_HAS_PRECISE_SMC
1203 if (current_tb_modified) {
1204 /* we generate a block containing just the instruction
1205 modifying the memory. It will ensure that it cannot modify
1206 itself */
bellardea1c1802004-06-14 18:56:36 +00001207 env->current_tb = NULL;
pbrook2e70f6e2008-06-29 01:03:05 +00001208 tb_gen_code(env, current_pc, current_cs_base, current_flags, 1);
bellardd720b932004-04-25 17:57:43 +00001209 cpu_resume_from_signal(env, puc);
1210 }
1211#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001212}
bellard9fa3e852004-01-04 18:06:42 +00001213#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001214
1215/* add the tb in the target page and protect it if necessary */
ths5fafdf22007-09-16 21:08:06 +00001216static inline void tb_alloc_page(TranslationBlock *tb,
Paul Brook41c1b1c2010-03-12 16:54:58 +00001217 unsigned int n, tb_page_addr_t page_addr)
bellardfd6ce8f2003-05-14 19:00:11 +00001218{
1219 PageDesc *p;
Juan Quintela4429ab42011-06-02 01:53:44 +00001220#ifndef CONFIG_USER_ONLY
1221 bool page_already_protected;
1222#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001223
bellard9fa3e852004-01-04 18:06:42 +00001224 tb->page_addr[n] = page_addr;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001225 p = page_find_alloc(page_addr >> TARGET_PAGE_BITS, 1);
bellard9fa3e852004-01-04 18:06:42 +00001226 tb->page_next[n] = p->first_tb;
Juan Quintela4429ab42011-06-02 01:53:44 +00001227#ifndef CONFIG_USER_ONLY
1228 page_already_protected = p->first_tb != NULL;
1229#endif
bellard9fa3e852004-01-04 18:06:42 +00001230 p->first_tb = (TranslationBlock *)((long)tb | n);
1231 invalidate_page_bitmap(p);
1232
bellard107db442004-06-22 18:48:46 +00001233#if defined(TARGET_HAS_SMC) || 1
bellardd720b932004-04-25 17:57:43 +00001234
bellard9fa3e852004-01-04 18:06:42 +00001235#if defined(CONFIG_USER_ONLY)
bellardfd6ce8f2003-05-14 19:00:11 +00001236 if (p->flags & PAGE_WRITE) {
pbrook53a59602006-03-25 19:31:22 +00001237 target_ulong addr;
1238 PageDesc *p2;
bellard9fa3e852004-01-04 18:06:42 +00001239 int prot;
1240
bellardfd6ce8f2003-05-14 19:00:11 +00001241 /* force the host page as non writable (writes will have a
1242 page fault + mprotect overhead) */
pbrook53a59602006-03-25 19:31:22 +00001243 page_addr &= qemu_host_page_mask;
bellardfd6ce8f2003-05-14 19:00:11 +00001244 prot = 0;
pbrook53a59602006-03-25 19:31:22 +00001245 for(addr = page_addr; addr < page_addr + qemu_host_page_size;
1246 addr += TARGET_PAGE_SIZE) {
1247
1248 p2 = page_find (addr >> TARGET_PAGE_BITS);
1249 if (!p2)
1250 continue;
1251 prot |= p2->flags;
1252 p2->flags &= ~PAGE_WRITE;
pbrook53a59602006-03-25 19:31:22 +00001253 }
ths5fafdf22007-09-16 21:08:06 +00001254 mprotect(g2h(page_addr), qemu_host_page_size,
bellardfd6ce8f2003-05-14 19:00:11 +00001255 (prot & PAGE_BITS) & ~PAGE_WRITE);
1256#ifdef DEBUG_TB_INVALIDATE
blueswir1ab3d1722007-11-04 07:31:40 +00001257 printf("protecting code page: 0x" TARGET_FMT_lx "\n",
pbrook53a59602006-03-25 19:31:22 +00001258 page_addr);
bellardfd6ce8f2003-05-14 19:00:11 +00001259#endif
bellardfd6ce8f2003-05-14 19:00:11 +00001260 }
bellard9fa3e852004-01-04 18:06:42 +00001261#else
1262 /* if some code is already present, then the pages are already
1263 protected. So we handle the case where only the first TB is
1264 allocated in a physical page */
Juan Quintela4429ab42011-06-02 01:53:44 +00001265 if (!page_already_protected) {
bellard6a00d602005-11-21 23:25:50 +00001266 tlb_protect_code(page_addr);
bellard9fa3e852004-01-04 18:06:42 +00001267 }
1268#endif
bellardd720b932004-04-25 17:57:43 +00001269
1270#endif /* TARGET_HAS_SMC */
bellardfd6ce8f2003-05-14 19:00:11 +00001271}
1272
bellard9fa3e852004-01-04 18:06:42 +00001273/* add a new TB and link it to the physical page tables. phys_page2 is
1274 (-1) to indicate that only one page contains the TB. */
Paul Brook41c1b1c2010-03-12 16:54:58 +00001275void tb_link_page(TranslationBlock *tb,
1276 tb_page_addr_t phys_pc, tb_page_addr_t phys_page2)
bellardd4e81642003-05-25 16:46:15 +00001277{
bellard9fa3e852004-01-04 18:06:42 +00001278 unsigned int h;
1279 TranslationBlock **ptb;
1280
pbrookc8a706f2008-06-02 16:16:42 +00001281 /* Grab the mmap lock to stop another thread invalidating this TB
1282 before we are done. */
1283 mmap_lock();
bellard9fa3e852004-01-04 18:06:42 +00001284 /* add in the physical hash table */
1285 h = tb_phys_hash_func(phys_pc);
1286 ptb = &tb_phys_hash[h];
1287 tb->phys_hash_next = *ptb;
1288 *ptb = tb;
bellardfd6ce8f2003-05-14 19:00:11 +00001289
1290 /* add in the page list */
bellard9fa3e852004-01-04 18:06:42 +00001291 tb_alloc_page(tb, 0, phys_pc & TARGET_PAGE_MASK);
1292 if (phys_page2 != -1)
1293 tb_alloc_page(tb, 1, phys_page2);
1294 else
1295 tb->page_addr[1] = -1;
bellard9fa3e852004-01-04 18:06:42 +00001296
bellardd4e81642003-05-25 16:46:15 +00001297 tb->jmp_first = (TranslationBlock *)((long)tb | 2);
1298 tb->jmp_next[0] = NULL;
1299 tb->jmp_next[1] = NULL;
1300
1301 /* init original jump addresses */
1302 if (tb->tb_next_offset[0] != 0xffff)
1303 tb_reset_jump(tb, 0);
1304 if (tb->tb_next_offset[1] != 0xffff)
1305 tb_reset_jump(tb, 1);
bellard8a40a182005-11-20 10:35:40 +00001306
1307#ifdef DEBUG_TB_CHECK
1308 tb_page_check();
1309#endif
pbrookc8a706f2008-06-02 16:16:42 +00001310 mmap_unlock();
bellardfd6ce8f2003-05-14 19:00:11 +00001311}
1312
bellarda513fe12003-05-27 23:29:48 +00001313/* find the TB 'tb' such that tb[0].tc_ptr <= tc_ptr <
1314 tb[1].tc_ptr. Return NULL if not found */
1315TranslationBlock *tb_find_pc(unsigned long tc_ptr)
1316{
1317 int m_min, m_max, m;
1318 unsigned long v;
1319 TranslationBlock *tb;
1320
1321 if (nb_tbs <= 0)
1322 return NULL;
1323 if (tc_ptr < (unsigned long)code_gen_buffer ||
1324 tc_ptr >= (unsigned long)code_gen_ptr)
1325 return NULL;
1326 /* binary search (cf Knuth) */
1327 m_min = 0;
1328 m_max = nb_tbs - 1;
1329 while (m_min <= m_max) {
1330 m = (m_min + m_max) >> 1;
1331 tb = &tbs[m];
1332 v = (unsigned long)tb->tc_ptr;
1333 if (v == tc_ptr)
1334 return tb;
1335 else if (tc_ptr < v) {
1336 m_max = m - 1;
1337 } else {
1338 m_min = m + 1;
1339 }
ths5fafdf22007-09-16 21:08:06 +00001340 }
bellarda513fe12003-05-27 23:29:48 +00001341 return &tbs[m_max];
1342}
bellard75012672003-06-21 13:11:07 +00001343
bellardea041c02003-06-25 16:16:50 +00001344static void tb_reset_jump_recursive(TranslationBlock *tb);
1345
1346static inline void tb_reset_jump_recursive2(TranslationBlock *tb, int n)
1347{
1348 TranslationBlock *tb1, *tb_next, **ptb;
1349 unsigned int n1;
1350
1351 tb1 = tb->jmp_next[n];
1352 if (tb1 != NULL) {
1353 /* find head of list */
1354 for(;;) {
1355 n1 = (long)tb1 & 3;
1356 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1357 if (n1 == 2)
1358 break;
1359 tb1 = tb1->jmp_next[n1];
1360 }
1361 /* we are now sure now that tb jumps to tb1 */
1362 tb_next = tb1;
1363
1364 /* remove tb from the jmp_first list */
1365 ptb = &tb_next->jmp_first;
1366 for(;;) {
1367 tb1 = *ptb;
1368 n1 = (long)tb1 & 3;
1369 tb1 = (TranslationBlock *)((long)tb1 & ~3);
1370 if (n1 == n && tb1 == tb)
1371 break;
1372 ptb = &tb1->jmp_next[n1];
1373 }
1374 *ptb = tb->jmp_next[n];
1375 tb->jmp_next[n] = NULL;
ths3b46e622007-09-17 08:09:54 +00001376
bellardea041c02003-06-25 16:16:50 +00001377 /* suppress the jump to next tb in generated code */
1378 tb_reset_jump(tb, n);
1379
bellard01243112004-01-04 15:48:17 +00001380 /* suppress jumps in the tb on which we could have jumped */
bellardea041c02003-06-25 16:16:50 +00001381 tb_reset_jump_recursive(tb_next);
1382 }
1383}
1384
1385static void tb_reset_jump_recursive(TranslationBlock *tb)
1386{
1387 tb_reset_jump_recursive2(tb, 0);
1388 tb_reset_jump_recursive2(tb, 1);
1389}
1390
bellard1fddef42005-04-17 19:16:13 +00001391#if defined(TARGET_HAS_ICE)
Paul Brook94df27f2010-02-28 23:47:45 +00001392#if defined(CONFIG_USER_ONLY)
1393static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1394{
1395 tb_invalidate_phys_page_range(pc, pc + 1, 0);
1396}
1397#else
bellardd720b932004-04-25 17:57:43 +00001398static void breakpoint_invalidate(CPUState *env, target_ulong pc)
1399{
Anthony Liguoric227f092009-10-01 16:12:16 -05001400 target_phys_addr_t addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00001401 target_ulong pd;
Anthony Liguoric227f092009-10-01 16:12:16 -05001402 ram_addr_t ram_addr;
pbrookc2f07f82006-04-08 17:14:56 +00001403 PhysPageDesc *p;
bellardd720b932004-04-25 17:57:43 +00001404
pbrookc2f07f82006-04-08 17:14:56 +00001405 addr = cpu_get_phys_page_debug(env, pc);
1406 p = phys_page_find(addr >> TARGET_PAGE_BITS);
1407 if (!p) {
1408 pd = IO_MEM_UNASSIGNED;
1409 } else {
1410 pd = p->phys_offset;
1411 }
1412 ram_addr = (pd & TARGET_PAGE_MASK) | (pc & ~TARGET_PAGE_MASK);
pbrook706cd4b2006-04-08 17:36:21 +00001413 tb_invalidate_phys_page_range(ram_addr, ram_addr + 1, 0);
bellardd720b932004-04-25 17:57:43 +00001414}
bellardc27004e2005-01-03 23:35:10 +00001415#endif
Paul Brook94df27f2010-02-28 23:47:45 +00001416#endif /* TARGET_HAS_ICE */
bellardd720b932004-04-25 17:57:43 +00001417
Paul Brookc527ee82010-03-01 03:31:14 +00001418#if defined(CONFIG_USER_ONLY)
1419void cpu_watchpoint_remove_all(CPUState *env, int mask)
1420
1421{
1422}
1423
1424int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1425 int flags, CPUWatchpoint **watchpoint)
1426{
1427 return -ENOSYS;
1428}
1429#else
pbrook6658ffb2007-03-16 23:58:11 +00001430/* Add a watchpoint. */
aliguoria1d1bb32008-11-18 20:07:32 +00001431int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
1432 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +00001433{
aliguorib4051332008-11-18 20:14:20 +00001434 target_ulong len_mask = ~(len - 1);
aliguoric0ce9982008-11-25 22:13:57 +00001435 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001436
aliguorib4051332008-11-18 20:14:20 +00001437 /* sanity checks: allow power-of-2 lengths, deny unaligned watchpoints */
1438 if ((len != 1 && len != 2 && len != 4 && len != 8) || (addr & ~len_mask)) {
1439 fprintf(stderr, "qemu: tried to set invalid watchpoint at "
1440 TARGET_FMT_lx ", len=" TARGET_FMT_lu "\n", addr, len);
1441 return -EINVAL;
1442 }
Anthony Liguori7267c092011-08-20 22:09:37 -05001443 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +00001444
aliguoria1d1bb32008-11-18 20:07:32 +00001445 wp->vaddr = addr;
aliguorib4051332008-11-18 20:14:20 +00001446 wp->len_mask = len_mask;
aliguoria1d1bb32008-11-18 20:07:32 +00001447 wp->flags = flags;
1448
aliguori2dc9f412008-11-18 20:56:59 +00001449 /* keep all GDB-injected watchpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001450 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001451 QTAILQ_INSERT_HEAD(&env->watchpoints, wp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001452 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001453 QTAILQ_INSERT_TAIL(&env->watchpoints, wp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001454
pbrook6658ffb2007-03-16 23:58:11 +00001455 tlb_flush_page(env, addr);
aliguoria1d1bb32008-11-18 20:07:32 +00001456
1457 if (watchpoint)
1458 *watchpoint = wp;
1459 return 0;
pbrook6658ffb2007-03-16 23:58:11 +00001460}
1461
aliguoria1d1bb32008-11-18 20:07:32 +00001462/* Remove a specific watchpoint. */
1463int cpu_watchpoint_remove(CPUState *env, target_ulong addr, target_ulong len,
1464 int flags)
pbrook6658ffb2007-03-16 23:58:11 +00001465{
aliguorib4051332008-11-18 20:14:20 +00001466 target_ulong len_mask = ~(len - 1);
aliguoria1d1bb32008-11-18 20:07:32 +00001467 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +00001468
Blue Swirl72cf2d42009-09-12 07:36:22 +00001469 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00001470 if (addr == wp->vaddr && len_mask == wp->len_mask
aliguori6e140f22008-11-18 20:37:55 +00001471 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
aliguoria1d1bb32008-11-18 20:07:32 +00001472 cpu_watchpoint_remove_by_ref(env, wp);
pbrook6658ffb2007-03-16 23:58:11 +00001473 return 0;
1474 }
1475 }
aliguoria1d1bb32008-11-18 20:07:32 +00001476 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +00001477}
1478
aliguoria1d1bb32008-11-18 20:07:32 +00001479/* Remove a specific watchpoint by reference. */
1480void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint)
1481{
Blue Swirl72cf2d42009-09-12 07:36:22 +00001482 QTAILQ_REMOVE(&env->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +00001483
aliguoria1d1bb32008-11-18 20:07:32 +00001484 tlb_flush_page(env, watchpoint->vaddr);
1485
Anthony Liguori7267c092011-08-20 22:09:37 -05001486 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +00001487}
1488
aliguoria1d1bb32008-11-18 20:07:32 +00001489/* Remove all matching watchpoints. */
1490void cpu_watchpoint_remove_all(CPUState *env, int mask)
1491{
aliguoric0ce9982008-11-25 22:13:57 +00001492 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001493
Blue Swirl72cf2d42009-09-12 07:36:22 +00001494 QTAILQ_FOREACH_SAFE(wp, &env->watchpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001495 if (wp->flags & mask)
1496 cpu_watchpoint_remove_by_ref(env, wp);
aliguoric0ce9982008-11-25 22:13:57 +00001497 }
aliguoria1d1bb32008-11-18 20:07:32 +00001498}
Paul Brookc527ee82010-03-01 03:31:14 +00001499#endif
aliguoria1d1bb32008-11-18 20:07:32 +00001500
1501/* Add a breakpoint. */
1502int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
1503 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001504{
bellard1fddef42005-04-17 19:16:13 +00001505#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001506 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +00001507
Anthony Liguori7267c092011-08-20 22:09:37 -05001508 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +00001509
1510 bp->pc = pc;
1511 bp->flags = flags;
1512
aliguori2dc9f412008-11-18 20:56:59 +00001513 /* keep all GDB-injected breakpoints in front */
aliguoric0ce9982008-11-25 22:13:57 +00001514 if (flags & BP_GDB)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001515 QTAILQ_INSERT_HEAD(&env->breakpoints, bp, entry);
aliguoric0ce9982008-11-25 22:13:57 +00001516 else
Blue Swirl72cf2d42009-09-12 07:36:22 +00001517 QTAILQ_INSERT_TAIL(&env->breakpoints, bp, entry);
aliguoria1d1bb32008-11-18 20:07:32 +00001518
1519 breakpoint_invalidate(env, pc);
1520
1521 if (breakpoint)
1522 *breakpoint = bp;
1523 return 0;
1524#else
1525 return -ENOSYS;
1526#endif
1527}
1528
1529/* Remove a specific breakpoint. */
1530int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags)
1531{
1532#if defined(TARGET_HAS_ICE)
1533 CPUBreakpoint *bp;
1534
Blue Swirl72cf2d42009-09-12 07:36:22 +00001535 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00001536 if (bp->pc == pc && bp->flags == flags) {
1537 cpu_breakpoint_remove_by_ref(env, bp);
bellard4c3a88a2003-07-26 12:06:08 +00001538 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +00001539 }
bellard4c3a88a2003-07-26 12:06:08 +00001540 }
aliguoria1d1bb32008-11-18 20:07:32 +00001541 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +00001542#else
aliguoria1d1bb32008-11-18 20:07:32 +00001543 return -ENOSYS;
bellard4c3a88a2003-07-26 12:06:08 +00001544#endif
1545}
1546
aliguoria1d1bb32008-11-18 20:07:32 +00001547/* Remove a specific breakpoint by reference. */
1548void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +00001549{
bellard1fddef42005-04-17 19:16:13 +00001550#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001551 QTAILQ_REMOVE(&env->breakpoints, breakpoint, entry);
bellardd720b932004-04-25 17:57:43 +00001552
aliguoria1d1bb32008-11-18 20:07:32 +00001553 breakpoint_invalidate(env, breakpoint->pc);
1554
Anthony Liguori7267c092011-08-20 22:09:37 -05001555 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +00001556#endif
1557}
1558
1559/* Remove all matching breakpoints. */
1560void cpu_breakpoint_remove_all(CPUState *env, int mask)
1561{
1562#if defined(TARGET_HAS_ICE)
aliguoric0ce9982008-11-25 22:13:57 +00001563 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +00001564
Blue Swirl72cf2d42009-09-12 07:36:22 +00001565 QTAILQ_FOREACH_SAFE(bp, &env->breakpoints, entry, next) {
aliguoria1d1bb32008-11-18 20:07:32 +00001566 if (bp->flags & mask)
1567 cpu_breakpoint_remove_by_ref(env, bp);
aliguoric0ce9982008-11-25 22:13:57 +00001568 }
bellard4c3a88a2003-07-26 12:06:08 +00001569#endif
1570}
1571
bellardc33a3462003-07-29 20:50:33 +00001572/* enable or disable single step mode. EXCP_DEBUG is returned by the
1573 CPU loop after each instruction */
1574void cpu_single_step(CPUState *env, int enabled)
1575{
bellard1fddef42005-04-17 19:16:13 +00001576#if defined(TARGET_HAS_ICE)
bellardc33a3462003-07-29 20:50:33 +00001577 if (env->singlestep_enabled != enabled) {
1578 env->singlestep_enabled = enabled;
aliguorie22a25c2009-03-12 20:12:48 +00001579 if (kvm_enabled())
1580 kvm_update_guest_debug(env, 0);
1581 else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01001582 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +00001583 /* XXX: only flush what is necessary */
1584 tb_flush(env);
1585 }
bellardc33a3462003-07-29 20:50:33 +00001586 }
1587#endif
1588}
1589
bellard34865132003-10-05 14:28:56 +00001590/* enable or disable low levels log */
1591void cpu_set_log(int log_flags)
1592{
1593 loglevel = log_flags;
1594 if (loglevel && !logfile) {
pbrook11fcfab2007-07-01 18:21:11 +00001595 logfile = fopen(logfilename, log_append ? "a" : "w");
bellard34865132003-10-05 14:28:56 +00001596 if (!logfile) {
1597 perror(logfilename);
1598 _exit(1);
1599 }
bellard9fa3e852004-01-04 18:06:42 +00001600#if !defined(CONFIG_SOFTMMU)
1601 /* must avoid mmap() usage of glibc by setting a buffer "by hand" */
1602 {
blueswir1b55266b2008-09-20 08:07:15 +00001603 static char logfile_buf[4096];
bellard9fa3e852004-01-04 18:06:42 +00001604 setvbuf(logfile, logfile_buf, _IOLBF, sizeof(logfile_buf));
1605 }
Filip Navarabf65f532009-07-27 10:02:04 -05001606#elif !defined(_WIN32)
1607 /* Win32 doesn't support line-buffering and requires size >= 2 */
bellard34865132003-10-05 14:28:56 +00001608 setvbuf(logfile, NULL, _IOLBF, 0);
bellard9fa3e852004-01-04 18:06:42 +00001609#endif
pbrooke735b912007-06-30 13:53:24 +00001610 log_append = 1;
1611 }
1612 if (!loglevel && logfile) {
1613 fclose(logfile);
1614 logfile = NULL;
bellard34865132003-10-05 14:28:56 +00001615 }
1616}
1617
1618void cpu_set_log_filename(const char *filename)
1619{
1620 logfilename = strdup(filename);
pbrooke735b912007-06-30 13:53:24 +00001621 if (logfile) {
1622 fclose(logfile);
1623 logfile = NULL;
1624 }
1625 cpu_set_log(loglevel);
bellard34865132003-10-05 14:28:56 +00001626}
bellardc33a3462003-07-29 20:50:33 +00001627
aurel323098dba2009-03-07 21:28:24 +00001628static void cpu_unlink_tb(CPUState *env)
bellardea041c02003-06-25 16:16:50 +00001629{
pbrookd5975362008-06-07 20:50:51 +00001630 /* FIXME: TB unchaining isn't SMP safe. For now just ignore the
1631 problem and hope the cpu will stop of its own accord. For userspace
1632 emulation this often isn't actually as bad as it sounds. Often
1633 signals are used primarily to interrupt blocking syscalls. */
aurel323098dba2009-03-07 21:28:24 +00001634 TranslationBlock *tb;
Anthony Liguoric227f092009-10-01 16:12:16 -05001635 static spinlock_t interrupt_lock = SPIN_LOCK_UNLOCKED;
aurel323098dba2009-03-07 21:28:24 +00001636
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001637 spin_lock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001638 tb = env->current_tb;
1639 /* if the cpu is currently executing code, we must unlink it and
1640 all the potentially executing TB */
Riku Voipiof76cfe52009-12-04 15:16:30 +02001641 if (tb) {
aurel323098dba2009-03-07 21:28:24 +00001642 env->current_tb = NULL;
1643 tb_reset_jump_recursive(tb);
aurel323098dba2009-03-07 21:28:24 +00001644 }
Riku Voipiocab1b4b2010-01-20 12:56:27 +02001645 spin_unlock(&interrupt_lock);
aurel323098dba2009-03-07 21:28:24 +00001646}
1647
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001648#ifndef CONFIG_USER_ONLY
aurel323098dba2009-03-07 21:28:24 +00001649/* mask must never be zero, except for A20 change call */
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001650static void tcg_handle_interrupt(CPUState *env, int mask)
aurel323098dba2009-03-07 21:28:24 +00001651{
1652 int old_mask;
1653
1654 old_mask = env->interrupt_request;
1655 env->interrupt_request |= mask;
1656
aliguori8edac962009-04-24 18:03:45 +00001657 /*
1658 * If called from iothread context, wake the target cpu in
1659 * case its halted.
1660 */
Jan Kiszkab7680cb2011-03-12 17:43:51 +01001661 if (!qemu_cpu_is_self(env)) {
aliguori8edac962009-04-24 18:03:45 +00001662 qemu_cpu_kick(env);
1663 return;
1664 }
aliguori8edac962009-04-24 18:03:45 +00001665
pbrook2e70f6e2008-06-29 01:03:05 +00001666 if (use_icount) {
pbrook266910c2008-07-09 15:31:50 +00001667 env->icount_decr.u16.high = 0xffff;
pbrook2e70f6e2008-06-29 01:03:05 +00001668 if (!can_do_io(env)
aurel32be214e62009-03-06 21:48:00 +00001669 && (mask & ~old_mask) != 0) {
pbrook2e70f6e2008-06-29 01:03:05 +00001670 cpu_abort(env, "Raised interrupt while not in I/O function");
1671 }
pbrook2e70f6e2008-06-29 01:03:05 +00001672 } else {
aurel323098dba2009-03-07 21:28:24 +00001673 cpu_unlink_tb(env);
bellardea041c02003-06-25 16:16:50 +00001674 }
1675}
1676
Jan Kiszkaec6959d2011-04-13 01:32:56 +02001677CPUInterruptHandler cpu_interrupt_handler = tcg_handle_interrupt;
1678
Jan Kiszka97ffbd82011-04-13 01:32:56 +02001679#else /* CONFIG_USER_ONLY */
1680
1681void cpu_interrupt(CPUState *env, int mask)
1682{
1683 env->interrupt_request |= mask;
1684 cpu_unlink_tb(env);
1685}
1686#endif /* CONFIG_USER_ONLY */
1687
bellardb54ad042004-05-20 13:42:52 +00001688void cpu_reset_interrupt(CPUState *env, int mask)
1689{
1690 env->interrupt_request &= ~mask;
1691}
1692
aurel323098dba2009-03-07 21:28:24 +00001693void cpu_exit(CPUState *env)
1694{
1695 env->exit_request = 1;
1696 cpu_unlink_tb(env);
1697}
1698
blueswir1c7cd6a32008-10-02 18:27:46 +00001699const CPULogItem cpu_log_items[] = {
ths5fafdf22007-09-16 21:08:06 +00001700 { CPU_LOG_TB_OUT_ASM, "out_asm",
bellardf193c792004-03-21 17:06:25 +00001701 "show generated host assembly code for each compiled TB" },
1702 { CPU_LOG_TB_IN_ASM, "in_asm",
1703 "show target assembly code for each compiled TB" },
ths5fafdf22007-09-16 21:08:06 +00001704 { CPU_LOG_TB_OP, "op",
bellard57fec1f2008-02-01 10:50:11 +00001705 "show micro ops for each compiled TB" },
bellardf193c792004-03-21 17:06:25 +00001706 { CPU_LOG_TB_OP_OPT, "op_opt",
blueswir1e01a1152008-03-14 17:37:11 +00001707 "show micro ops "
1708#ifdef TARGET_I386
1709 "before eflags optimization and "
bellardf193c792004-03-21 17:06:25 +00001710#endif
blueswir1e01a1152008-03-14 17:37:11 +00001711 "after liveness analysis" },
bellardf193c792004-03-21 17:06:25 +00001712 { CPU_LOG_INT, "int",
1713 "show interrupts/exceptions in short format" },
1714 { CPU_LOG_EXEC, "exec",
1715 "show trace before each executed TB (lots of logs)" },
bellard9fddaa02004-05-21 12:59:32 +00001716 { CPU_LOG_TB_CPU, "cpu",
thse91c8a72007-06-03 13:35:16 +00001717 "show CPU state before block translation" },
bellardf193c792004-03-21 17:06:25 +00001718#ifdef TARGET_I386
1719 { CPU_LOG_PCALL, "pcall",
1720 "show protected mode far calls/returns/exceptions" },
aliguorieca1bdf2009-01-26 19:54:31 +00001721 { CPU_LOG_RESET, "cpu_reset",
1722 "show CPU state before CPU resets" },
bellardf193c792004-03-21 17:06:25 +00001723#endif
bellard8e3a9fd2004-10-09 17:32:58 +00001724#ifdef DEBUG_IOPORT
bellardfd872592004-05-12 19:11:15 +00001725 { CPU_LOG_IOPORT, "ioport",
1726 "show all i/o ports accesses" },
bellard8e3a9fd2004-10-09 17:32:58 +00001727#endif
bellardf193c792004-03-21 17:06:25 +00001728 { 0, NULL, NULL },
1729};
1730
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001731#ifndef CONFIG_USER_ONLY
1732static QLIST_HEAD(memory_client_list, CPUPhysMemoryClient) memory_client_list
1733 = QLIST_HEAD_INITIALIZER(memory_client_list);
1734
1735static void cpu_notify_set_memory(target_phys_addr_t start_addr,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001736 ram_addr_t size,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001737 ram_addr_t phys_offset,
1738 bool log_dirty)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001739{
1740 CPUPhysMemoryClient *client;
1741 QLIST_FOREACH(client, &memory_client_list, list) {
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03001742 client->set_memory(client, start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001743 }
1744}
1745
1746static int cpu_notify_sync_dirty_bitmap(target_phys_addr_t start,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001747 target_phys_addr_t end)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001748{
1749 CPUPhysMemoryClient *client;
1750 QLIST_FOREACH(client, &memory_client_list, list) {
1751 int r = client->sync_dirty_bitmap(client, start, end);
1752 if (r < 0)
1753 return r;
1754 }
1755 return 0;
1756}
1757
1758static int cpu_notify_migration_log(int enable)
1759{
1760 CPUPhysMemoryClient *client;
1761 QLIST_FOREACH(client, &memory_client_list, list) {
1762 int r = client->migration_log(client, enable);
1763 if (r < 0)
1764 return r;
1765 }
1766 return 0;
1767}
1768
Alex Williamson2173a752011-05-03 12:36:58 -06001769struct last_map {
1770 target_phys_addr_t start_addr;
1771 ram_addr_t size;
1772 ram_addr_t phys_offset;
1773};
1774
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001775/* The l1_phys_map provides the upper P_L1_BITs of the guest physical
1776 * address. Each intermediate table provides the next L2_BITs of guest
1777 * physical address space. The number of levels vary based on host and
1778 * guest configuration, making it efficient to build the final guest
1779 * physical address by seeding the L1 offset and shifting and adding in
1780 * each L2 offset as we recurse through them. */
Alex Williamson2173a752011-05-03 12:36:58 -06001781static void phys_page_for_each_1(CPUPhysMemoryClient *client, int level,
1782 void **lp, target_phys_addr_t addr,
1783 struct last_map *map)
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001784{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001785 int i;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001786
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001787 if (*lp == NULL) {
1788 return;
1789 }
1790 if (level == 0) {
1791 PhysPageDesc *pd = *lp;
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001792 addr <<= L2_BITS + TARGET_PAGE_BITS;
Paul Brook7296aba2010-03-14 14:58:46 +00001793 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001794 if (pd[i].phys_offset != IO_MEM_UNASSIGNED) {
Alex Williamson2173a752011-05-03 12:36:58 -06001795 target_phys_addr_t start_addr = addr | i << TARGET_PAGE_BITS;
1796
1797 if (map->size &&
1798 start_addr == map->start_addr + map->size &&
1799 pd[i].phys_offset == map->phys_offset + map->size) {
1800
1801 map->size += TARGET_PAGE_SIZE;
1802 continue;
1803 } else if (map->size) {
1804 client->set_memory(client, map->start_addr,
1805 map->size, map->phys_offset, false);
1806 }
1807
1808 map->start_addr = start_addr;
1809 map->size = TARGET_PAGE_SIZE;
1810 map->phys_offset = pd[i].phys_offset;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001811 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001812 }
1813 } else {
1814 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00001815 for (i = 0; i < L2_SIZE; ++i) {
Alex Williamson8d4c78e2011-05-03 12:36:46 -06001816 phys_page_for_each_1(client, level - 1, pp + i,
Alex Williamson2173a752011-05-03 12:36:58 -06001817 (addr << L2_BITS) | i, map);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001818 }
1819 }
1820}
1821
1822static void phys_page_for_each(CPUPhysMemoryClient *client)
1823{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001824 int i;
Alex Williamson2173a752011-05-03 12:36:58 -06001825 struct last_map map = { };
1826
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08001827 for (i = 0; i < P_L1_SIZE; ++i) {
1828 phys_page_for_each_1(client, P_L1_SHIFT / L2_BITS - 1,
Alex Williamson2173a752011-05-03 12:36:58 -06001829 l1_phys_map + i, i, &map);
1830 }
1831 if (map.size) {
1832 client->set_memory(client, map.start_addr, map.size, map.phys_offset,
1833 false);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001834 }
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02001835}
1836
1837void cpu_register_phys_memory_client(CPUPhysMemoryClient *client)
1838{
1839 QLIST_INSERT_HEAD(&memory_client_list, client, list);
1840 phys_page_for_each(client);
1841}
1842
1843void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *client)
1844{
1845 QLIST_REMOVE(client, list);
1846}
1847#endif
1848
bellardf193c792004-03-21 17:06:25 +00001849static int cmp1(const char *s1, int n, const char *s2)
1850{
1851 if (strlen(s2) != n)
1852 return 0;
1853 return memcmp(s1, s2, n) == 0;
1854}
ths3b46e622007-09-17 08:09:54 +00001855
bellardf193c792004-03-21 17:06:25 +00001856/* takes a comma separated list of log masks. Return 0 if error. */
1857int cpu_str_to_log_mask(const char *str)
1858{
blueswir1c7cd6a32008-10-02 18:27:46 +00001859 const CPULogItem *item;
bellardf193c792004-03-21 17:06:25 +00001860 int mask;
1861 const char *p, *p1;
1862
1863 p = str;
1864 mask = 0;
1865 for(;;) {
1866 p1 = strchr(p, ',');
1867 if (!p1)
1868 p1 = p + strlen(p);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001869 if(cmp1(p,p1-p,"all")) {
1870 for(item = cpu_log_items; item->mask != 0; item++) {
1871 mask |= item->mask;
1872 }
1873 } else {
1874 for(item = cpu_log_items; item->mask != 0; item++) {
1875 if (cmp1(p, p1 - p, item->name))
1876 goto found;
1877 }
1878 return 0;
bellardf193c792004-03-21 17:06:25 +00001879 }
bellardf193c792004-03-21 17:06:25 +00001880 found:
1881 mask |= item->mask;
1882 if (*p1 != ',')
1883 break;
1884 p = p1 + 1;
1885 }
1886 return mask;
1887}
bellardea041c02003-06-25 16:16:50 +00001888
bellard75012672003-06-21 13:11:07 +00001889void cpu_abort(CPUState *env, const char *fmt, ...)
1890{
1891 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +00001892 va_list ap2;
bellard75012672003-06-21 13:11:07 +00001893
1894 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +00001895 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +00001896 fprintf(stderr, "qemu: fatal: ");
1897 vfprintf(stderr, fmt, ap);
1898 fprintf(stderr, "\n");
1899#ifdef TARGET_I386
bellard7fe48482004-10-09 18:08:01 +00001900 cpu_dump_state(env, stderr, fprintf, X86_DUMP_FPU | X86_DUMP_CCOP);
1901#else
1902 cpu_dump_state(env, stderr, fprintf, 0);
bellard75012672003-06-21 13:11:07 +00001903#endif
aliguori93fcfe32009-01-15 22:34:14 +00001904 if (qemu_log_enabled()) {
1905 qemu_log("qemu: fatal: ");
1906 qemu_log_vprintf(fmt, ap2);
1907 qemu_log("\n");
j_mayerf9373292007-09-29 12:18:20 +00001908#ifdef TARGET_I386
aliguori93fcfe32009-01-15 22:34:14 +00001909 log_cpu_state(env, X86_DUMP_FPU | X86_DUMP_CCOP);
j_mayerf9373292007-09-29 12:18:20 +00001910#else
aliguori93fcfe32009-01-15 22:34:14 +00001911 log_cpu_state(env, 0);
j_mayerf9373292007-09-29 12:18:20 +00001912#endif
aliguori31b1a7b2009-01-15 22:35:09 +00001913 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +00001914 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +00001915 }
pbrook493ae1f2007-11-23 16:53:59 +00001916 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +00001917 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +02001918#if defined(CONFIG_USER_ONLY)
1919 {
1920 struct sigaction act;
1921 sigfillset(&act.sa_mask);
1922 act.sa_handler = SIG_DFL;
1923 sigaction(SIGABRT, &act, NULL);
1924 }
1925#endif
bellard75012672003-06-21 13:11:07 +00001926 abort();
1927}
1928
thsc5be9f02007-02-28 20:20:53 +00001929CPUState *cpu_copy(CPUState *env)
1930{
ths01ba9812007-12-09 02:22:57 +00001931 CPUState *new_env = cpu_init(env->cpu_model_str);
thsc5be9f02007-02-28 20:20:53 +00001932 CPUState *next_cpu = new_env->next_cpu;
1933 int cpu_index = new_env->cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001934#if defined(TARGET_HAS_ICE)
1935 CPUBreakpoint *bp;
1936 CPUWatchpoint *wp;
1937#endif
1938
thsc5be9f02007-02-28 20:20:53 +00001939 memcpy(new_env, env, sizeof(CPUState));
aliguori5a38f082009-01-15 20:16:51 +00001940
1941 /* Preserve chaining and index. */
thsc5be9f02007-02-28 20:20:53 +00001942 new_env->next_cpu = next_cpu;
1943 new_env->cpu_index = cpu_index;
aliguori5a38f082009-01-15 20:16:51 +00001944
1945 /* Clone all break/watchpoints.
1946 Note: Once we support ptrace with hw-debug register access, make sure
1947 BP_CPU break/watchpoints are handled correctly on clone. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00001948 QTAILQ_INIT(&env->breakpoints);
1949 QTAILQ_INIT(&env->watchpoints);
aliguori5a38f082009-01-15 20:16:51 +00001950#if defined(TARGET_HAS_ICE)
Blue Swirl72cf2d42009-09-12 07:36:22 +00001951 QTAILQ_FOREACH(bp, &env->breakpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001952 cpu_breakpoint_insert(new_env, bp->pc, bp->flags, NULL);
1953 }
Blue Swirl72cf2d42009-09-12 07:36:22 +00001954 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguori5a38f082009-01-15 20:16:51 +00001955 cpu_watchpoint_insert(new_env, wp->vaddr, (~wp->len_mask) + 1,
1956 wp->flags, NULL);
1957 }
1958#endif
1959
thsc5be9f02007-02-28 20:20:53 +00001960 return new_env;
1961}
1962
bellard01243112004-01-04 15:48:17 +00001963#if !defined(CONFIG_USER_ONLY)
1964
edgar_igl5c751e92008-05-06 08:44:21 +00001965static inline void tlb_flush_jmp_cache(CPUState *env, target_ulong addr)
1966{
1967 unsigned int i;
1968
1969 /* Discard jump cache entries for any tb which might potentially
1970 overlap the flushed page. */
1971 i = tb_jmp_cache_hash_page(addr - TARGET_PAGE_SIZE);
1972 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001973 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001974
1975 i = tb_jmp_cache_hash_page(addr);
1976 memset (&env->tb_jmp_cache[i], 0,
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001977 TB_JMP_PAGE_SIZE * sizeof(TranslationBlock *));
edgar_igl5c751e92008-05-06 08:44:21 +00001978}
1979
Igor Kovalenko08738982009-07-12 02:15:40 +04001980static CPUTLBEntry s_cputlb_empty_entry = {
1981 .addr_read = -1,
1982 .addr_write = -1,
1983 .addr_code = -1,
1984 .addend = -1,
1985};
1986
bellardee8b7022004-02-03 23:35:10 +00001987/* NOTE: if flush_global is true, also flush global entries (not
1988 implemented yet) */
1989void tlb_flush(CPUState *env, int flush_global)
bellard33417e72003-08-10 21:47:01 +00001990{
bellard33417e72003-08-10 21:47:01 +00001991 int i;
bellard01243112004-01-04 15:48:17 +00001992
bellard9fa3e852004-01-04 18:06:42 +00001993#if defined(DEBUG_TLB)
1994 printf("tlb_flush:\n");
1995#endif
bellard01243112004-01-04 15:48:17 +00001996 /* must reset current TB so that interrupts cannot modify the
1997 links while we are modifying them */
1998 env->current_tb = NULL;
1999
bellard33417e72003-08-10 21:47:01 +00002000 for(i = 0; i < CPU_TLB_SIZE; i++) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002001 int mmu_idx;
2002 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002003 env->tlb_table[mmu_idx][i] = s_cputlb_empty_entry;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002004 }
bellard33417e72003-08-10 21:47:01 +00002005 }
bellard9fa3e852004-01-04 18:06:42 +00002006
bellard8a40a182005-11-20 10:35:40 +00002007 memset (env->tb_jmp_cache, 0, TB_JMP_CACHE_SIZE * sizeof (void *));
bellard9fa3e852004-01-04 18:06:42 +00002008
Paul Brookd4c430a2010-03-17 02:14:28 +00002009 env->tlb_flush_addr = -1;
2010 env->tlb_flush_mask = 0;
bellarde3db7222005-01-26 22:00:47 +00002011 tlb_flush_count++;
bellard33417e72003-08-10 21:47:01 +00002012}
2013
bellard274da6b2004-05-20 21:56:27 +00002014static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
bellard61382a52003-10-27 21:22:23 +00002015{
ths5fafdf22007-09-16 21:08:06 +00002016 if (addr == (tlb_entry->addr_read &
bellard84b7b8e2005-11-28 21:19:04 +00002017 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002018 addr == (tlb_entry->addr_write &
bellard84b7b8e2005-11-28 21:19:04 +00002019 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
ths5fafdf22007-09-16 21:08:06 +00002020 addr == (tlb_entry->addr_code &
bellard84b7b8e2005-11-28 21:19:04 +00002021 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Igor Kovalenko08738982009-07-12 02:15:40 +04002022 *tlb_entry = s_cputlb_empty_entry;
bellard84b7b8e2005-11-28 21:19:04 +00002023 }
bellard61382a52003-10-27 21:22:23 +00002024}
2025
bellard2e126692004-04-25 21:28:44 +00002026void tlb_flush_page(CPUState *env, target_ulong addr)
bellard33417e72003-08-10 21:47:01 +00002027{
bellard8a40a182005-11-20 10:35:40 +00002028 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002029 int mmu_idx;
bellard01243112004-01-04 15:48:17 +00002030
bellard9fa3e852004-01-04 18:06:42 +00002031#if defined(DEBUG_TLB)
bellard108c49b2005-07-24 12:55:09 +00002032 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
bellard9fa3e852004-01-04 18:06:42 +00002033#endif
Paul Brookd4c430a2010-03-17 02:14:28 +00002034 /* Check if we need to flush due to large pages. */
2035 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
2036#if defined(DEBUG_TLB)
2037 printf("tlb_flush_page: forced full flush ("
2038 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
2039 env->tlb_flush_addr, env->tlb_flush_mask);
2040#endif
2041 tlb_flush(env, 1);
2042 return;
2043 }
bellard01243112004-01-04 15:48:17 +00002044 /* must reset current TB so that interrupts cannot modify the
2045 links while we are modifying them */
2046 env->current_tb = NULL;
bellard33417e72003-08-10 21:47:01 +00002047
bellard61382a52003-10-27 21:22:23 +00002048 addr &= TARGET_PAGE_MASK;
bellard33417e72003-08-10 21:47:01 +00002049 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002050 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2051 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
bellard01243112004-01-04 15:48:17 +00002052
edgar_igl5c751e92008-05-06 08:44:21 +00002053 tlb_flush_jmp_cache(env, addr);
bellard9fa3e852004-01-04 18:06:42 +00002054}
2055
bellard9fa3e852004-01-04 18:06:42 +00002056/* update the TLBs so that writes to code in the virtual page 'addr'
2057 can be detected */
Anthony Liguoric227f092009-10-01 16:12:16 -05002058static void tlb_protect_code(ram_addr_t ram_addr)
bellard61382a52003-10-27 21:22:23 +00002059{
ths5fafdf22007-09-16 21:08:06 +00002060 cpu_physical_memory_reset_dirty(ram_addr,
bellard6a00d602005-11-21 23:25:50 +00002061 ram_addr + TARGET_PAGE_SIZE,
2062 CODE_DIRTY_FLAG);
bellard9fa3e852004-01-04 18:06:42 +00002063}
2064
bellard9fa3e852004-01-04 18:06:42 +00002065/* update the TLB so that writes in physical page 'phys_addr' are no longer
bellard3a7d9292005-08-21 09:26:42 +00002066 tested for self modifying code */
Anthony Liguoric227f092009-10-01 16:12:16 -05002067static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
bellard3a7d9292005-08-21 09:26:42 +00002068 target_ulong vaddr)
bellard9fa3e852004-01-04 18:06:42 +00002069{
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002070 cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
bellard1ccde1c2004-02-06 19:46:14 +00002071}
2072
ths5fafdf22007-09-16 21:08:06 +00002073static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
bellard1ccde1c2004-02-06 19:46:14 +00002074 unsigned long start, unsigned long length)
2075{
2076 unsigned long addr;
bellard84b7b8e2005-11-28 21:19:04 +00002077 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
2078 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
bellard1ccde1c2004-02-06 19:46:14 +00002079 if ((addr - start) < length) {
pbrook0f459d12008-06-09 00:20:13 +00002080 tlb_entry->addr_write = (tlb_entry->addr_write & TARGET_PAGE_MASK) | TLB_NOTDIRTY;
bellard1ccde1c2004-02-06 19:46:14 +00002081 }
2082 }
2083}
2084
pbrook5579c7f2009-04-11 14:47:08 +00002085/* Note: start and end must be within the same ram block. */
Anthony Liguoric227f092009-10-01 16:12:16 -05002086void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
bellard0a962c02005-02-10 22:00:27 +00002087 int dirty_flags)
bellard1ccde1c2004-02-06 19:46:14 +00002088{
2089 CPUState *env;
bellard4f2ac232004-04-26 19:44:02 +00002090 unsigned long length, start1;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002091 int i;
bellard1ccde1c2004-02-06 19:46:14 +00002092
2093 start &= TARGET_PAGE_MASK;
2094 end = TARGET_PAGE_ALIGN(end);
2095
2096 length = end - start;
2097 if (length == 0)
2098 return;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09002099 cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00002100
bellard1ccde1c2004-02-06 19:46:14 +00002101 /* we modify the TLB cache so that the dirty bit will be set again
2102 when accessing the range */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002103 start1 = (unsigned long)qemu_safe_ram_ptr(start);
Stefan Weila57d23e2011-04-30 22:49:26 +02002104 /* Check that we don't span multiple blocks - this breaks the
pbrook5579c7f2009-04-11 14:47:08 +00002105 address comparisons below. */
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02002106 if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
pbrook5579c7f2009-04-11 14:47:08 +00002107 != (end - 1) - start) {
2108 abort();
2109 }
2110
bellard6a00d602005-11-21 23:25:50 +00002111 for(env = first_cpu; env != NULL; env = env->next_cpu) {
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002112 int mmu_idx;
2113 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2114 for(i = 0; i < CPU_TLB_SIZE; i++)
2115 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
2116 start1, length);
2117 }
bellard6a00d602005-11-21 23:25:50 +00002118 }
bellard1ccde1c2004-02-06 19:46:14 +00002119}
2120
aliguori74576192008-10-06 14:02:03 +00002121int cpu_physical_memory_set_dirty_tracking(int enable)
2122{
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002123 int ret = 0;
aliguori74576192008-10-06 14:02:03 +00002124 in_migration = enable;
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002125 ret = cpu_notify_migration_log(!!enable);
2126 return ret;
aliguori74576192008-10-06 14:02:03 +00002127}
2128
2129int cpu_physical_memory_get_dirty_tracking(void)
2130{
2131 return in_migration;
2132}
2133
Anthony Liguoric227f092009-10-01 16:12:16 -05002134int cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr,
2135 target_phys_addr_t end_addr)
aliguori2bec46d2008-11-24 20:21:41 +00002136{
Michael S. Tsirkin7b8f3b72010-01-27 22:07:21 +02002137 int ret;
Jan Kiszka151f7742009-05-01 20:52:47 +02002138
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002139 ret = cpu_notify_sync_dirty_bitmap(start_addr, end_addr);
Jan Kiszka151f7742009-05-01 20:52:47 +02002140 return ret;
aliguori2bec46d2008-11-24 20:21:41 +00002141}
2142
Anthony PERARDe5896b12011-02-07 12:19:23 +01002143int cpu_physical_log_start(target_phys_addr_t start_addr,
2144 ram_addr_t size)
2145{
2146 CPUPhysMemoryClient *client;
2147 QLIST_FOREACH(client, &memory_client_list, list) {
2148 if (client->log_start) {
2149 int r = client->log_start(client, start_addr, size);
2150 if (r < 0) {
2151 return r;
2152 }
2153 }
2154 }
2155 return 0;
2156}
2157
2158int cpu_physical_log_stop(target_phys_addr_t start_addr,
2159 ram_addr_t size)
2160{
2161 CPUPhysMemoryClient *client;
2162 QLIST_FOREACH(client, &memory_client_list, list) {
2163 if (client->log_stop) {
2164 int r = client->log_stop(client, start_addr, size);
2165 if (r < 0) {
2166 return r;
2167 }
2168 }
2169 }
2170 return 0;
2171}
2172
bellard3a7d9292005-08-21 09:26:42 +00002173static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
2174{
Anthony Liguoric227f092009-10-01 16:12:16 -05002175 ram_addr_t ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00002176 void *p;
bellard3a7d9292005-08-21 09:26:42 +00002177
bellard84b7b8e2005-11-28 21:19:04 +00002178 if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
pbrook5579c7f2009-04-11 14:47:08 +00002179 p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
2180 + tlb_entry->addend);
Marcelo Tosattie8902612010-10-11 15:31:19 -03002181 ram_addr = qemu_ram_addr_from_host_nofail(p);
bellard3a7d9292005-08-21 09:26:42 +00002182 if (!cpu_physical_memory_is_dirty(ram_addr)) {
pbrook0f459d12008-06-09 00:20:13 +00002183 tlb_entry->addr_write |= TLB_NOTDIRTY;
bellard3a7d9292005-08-21 09:26:42 +00002184 }
2185 }
2186}
2187
2188/* update the TLB according to the current state of the dirty bits */
2189void cpu_tlb_update_dirty(CPUState *env)
2190{
2191 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002192 int mmu_idx;
2193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
2194 for(i = 0; i < CPU_TLB_SIZE; i++)
2195 tlb_update_dirty(&env->tlb_table[mmu_idx][i]);
2196 }
bellard3a7d9292005-08-21 09:26:42 +00002197}
2198
pbrook0f459d12008-06-09 00:20:13 +00002199static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002200{
pbrook0f459d12008-06-09 00:20:13 +00002201 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY))
2202 tlb_entry->addr_write = vaddr;
bellard1ccde1c2004-02-06 19:46:14 +00002203}
2204
pbrook0f459d12008-06-09 00:20:13 +00002205/* update the TLB corresponding to virtual page vaddr
2206 so that it is no longer dirty */
2207static inline void tlb_set_dirty(CPUState *env, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002208{
bellard1ccde1c2004-02-06 19:46:14 +00002209 int i;
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002210 int mmu_idx;
bellard1ccde1c2004-02-06 19:46:14 +00002211
pbrook0f459d12008-06-09 00:20:13 +00002212 vaddr &= TARGET_PAGE_MASK;
bellard1ccde1c2004-02-06 19:46:14 +00002213 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Isaku Yamahatacfde4bd2009-05-20 11:31:43 +09002214 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++)
2215 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
bellard9fa3e852004-01-04 18:06:42 +00002216}
2217
Paul Brookd4c430a2010-03-17 02:14:28 +00002218/* Our TLB does not support large pages, so remember the area covered by
2219 large pages and trigger a full TLB flush if these are invalidated. */
2220static void tlb_add_large_page(CPUState *env, target_ulong vaddr,
2221 target_ulong size)
2222{
2223 target_ulong mask = ~(size - 1);
2224
2225 if (env->tlb_flush_addr == (target_ulong)-1) {
2226 env->tlb_flush_addr = vaddr & mask;
2227 env->tlb_flush_mask = mask;
2228 return;
2229 }
2230 /* Extend the existing region to include the new page.
2231 This is a compromise between unnecessary flushes and the cost
2232 of maintaining a full variable size TLB. */
2233 mask &= env->tlb_flush_mask;
2234 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
2235 mask <<= 1;
2236 }
2237 env->tlb_flush_addr &= mask;
2238 env->tlb_flush_mask = mask;
2239}
2240
2241/* Add a new TLB entry. At most one entry for a given virtual address
2242 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
2243 supplied size is only used by tlb_flush_page. */
2244void tlb_set_page(CPUState *env, target_ulong vaddr,
2245 target_phys_addr_t paddr, int prot,
2246 int mmu_idx, target_ulong size)
bellard9fa3e852004-01-04 18:06:42 +00002247{
bellard92e873b2004-05-21 14:52:29 +00002248 PhysPageDesc *p;
bellard4f2ac232004-04-26 19:44:02 +00002249 unsigned long pd;
bellard9fa3e852004-01-04 18:06:42 +00002250 unsigned int index;
bellard4f2ac232004-04-26 19:44:02 +00002251 target_ulong address;
pbrook0f459d12008-06-09 00:20:13 +00002252 target_ulong code_address;
Paul Brook355b1942010-04-05 00:28:53 +01002253 unsigned long addend;
bellard84b7b8e2005-11-28 21:19:04 +00002254 CPUTLBEntry *te;
aliguoria1d1bb32008-11-18 20:07:32 +00002255 CPUWatchpoint *wp;
Anthony Liguoric227f092009-10-01 16:12:16 -05002256 target_phys_addr_t iotlb;
bellard9fa3e852004-01-04 18:06:42 +00002257
Paul Brookd4c430a2010-03-17 02:14:28 +00002258 assert(size >= TARGET_PAGE_SIZE);
2259 if (size != TARGET_PAGE_SIZE) {
2260 tlb_add_large_page(env, vaddr, size);
2261 }
bellard92e873b2004-05-21 14:52:29 +00002262 p = phys_page_find(paddr >> TARGET_PAGE_BITS);
bellard9fa3e852004-01-04 18:06:42 +00002263 if (!p) {
2264 pd = IO_MEM_UNASSIGNED;
bellard9fa3e852004-01-04 18:06:42 +00002265 } else {
2266 pd = p->phys_offset;
bellard9fa3e852004-01-04 18:06:42 +00002267 }
2268#if defined(DEBUG_TLB)
Stefan Weil7fd3f492010-09-30 22:39:51 +02002269 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
2270 " prot=%x idx=%d pd=0x%08lx\n",
2271 vaddr, paddr, prot, mmu_idx, pd);
bellard9fa3e852004-01-04 18:06:42 +00002272#endif
2273
pbrook0f459d12008-06-09 00:20:13 +00002274 address = vaddr;
2275 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM && !(pd & IO_MEM_ROMD)) {
2276 /* IO memory case (romd handled later) */
2277 address |= TLB_MMIO;
2278 }
pbrook5579c7f2009-04-11 14:47:08 +00002279 addend = (unsigned long)qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
pbrook0f459d12008-06-09 00:20:13 +00002280 if ((pd & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
2281 /* Normal RAM. */
2282 iotlb = pd & TARGET_PAGE_MASK;
2283 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
2284 iotlb |= IO_MEM_NOTDIRTY;
2285 else
2286 iotlb |= IO_MEM_ROM;
2287 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002288 /* IO handlers are currently passed a physical address.
pbrook0f459d12008-06-09 00:20:13 +00002289 It would be nice to pass an offset from the base address
2290 of that region. This would avoid having to special case RAM,
2291 and avoid full address decoding in every device.
2292 We can't use the high bits of pd for this because
2293 IO_MEM_ROMD uses these as a ram address. */
pbrook8da3ff12008-12-01 18:59:50 +00002294 iotlb = (pd & ~TARGET_PAGE_MASK);
2295 if (p) {
pbrook8da3ff12008-12-01 18:59:50 +00002296 iotlb += p->region_offset;
2297 } else {
2298 iotlb += paddr;
2299 }
pbrook0f459d12008-06-09 00:20:13 +00002300 }
pbrook6658ffb2007-03-16 23:58:11 +00002301
pbrook0f459d12008-06-09 00:20:13 +00002302 code_address = address;
2303 /* Make accesses to pages with watchpoints go via the
2304 watchpoint trap routines. */
Blue Swirl72cf2d42009-09-12 07:36:22 +00002305 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +00002306 if (vaddr == (wp->vaddr & TARGET_PAGE_MASK)) {
Jun Koibf298f82010-05-06 14:36:59 +09002307 /* Avoid trapping reads of pages with a write breakpoint. */
2308 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
2309 iotlb = io_mem_watch + paddr;
2310 address |= TLB_MMIO;
2311 break;
2312 }
pbrook6658ffb2007-03-16 23:58:11 +00002313 }
pbrook0f459d12008-06-09 00:20:13 +00002314 }
balrogd79acba2007-06-26 20:01:13 +00002315
pbrook0f459d12008-06-09 00:20:13 +00002316 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
2317 env->iotlb[mmu_idx][index] = iotlb - vaddr;
2318 te = &env->tlb_table[mmu_idx][index];
2319 te->addend = addend - vaddr;
2320 if (prot & PAGE_READ) {
2321 te->addr_read = address;
2322 } else {
2323 te->addr_read = -1;
2324 }
edgar_igl5c751e92008-05-06 08:44:21 +00002325
pbrook0f459d12008-06-09 00:20:13 +00002326 if (prot & PAGE_EXEC) {
2327 te->addr_code = code_address;
2328 } else {
2329 te->addr_code = -1;
2330 }
2331 if (prot & PAGE_WRITE) {
2332 if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_ROM ||
2333 (pd & IO_MEM_ROMD)) {
2334 /* Write access calls the I/O callback. */
2335 te->addr_write = address | TLB_MMIO;
2336 } else if ((pd & ~TARGET_PAGE_MASK) == IO_MEM_RAM &&
2337 !cpu_physical_memory_is_dirty(pd)) {
2338 te->addr_write = address | TLB_NOTDIRTY;
bellard84b7b8e2005-11-28 21:19:04 +00002339 } else {
pbrook0f459d12008-06-09 00:20:13 +00002340 te->addr_write = address;
bellard9fa3e852004-01-04 18:06:42 +00002341 }
pbrook0f459d12008-06-09 00:20:13 +00002342 } else {
2343 te->addr_write = -1;
bellard9fa3e852004-01-04 18:06:42 +00002344 }
bellard9fa3e852004-01-04 18:06:42 +00002345}
2346
bellard01243112004-01-04 15:48:17 +00002347#else
2348
bellardee8b7022004-02-03 23:35:10 +00002349void tlb_flush(CPUState *env, int flush_global)
bellard01243112004-01-04 15:48:17 +00002350{
2351}
2352
bellard2e126692004-04-25 21:28:44 +00002353void tlb_flush_page(CPUState *env, target_ulong addr)
bellard01243112004-01-04 15:48:17 +00002354{
2355}
2356
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002357/*
2358 * Walks guest process memory "regions" one by one
2359 * and calls callback function 'fn' for each region.
2360 */
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002361
2362struct walk_memory_regions_data
bellard9fa3e852004-01-04 18:06:42 +00002363{
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002364 walk_memory_regions_fn fn;
2365 void *priv;
2366 unsigned long start;
2367 int prot;
2368};
bellard9fa3e852004-01-04 18:06:42 +00002369
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002370static int walk_memory_regions_end(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002371 abi_ulong end, int new_prot)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002372{
2373 if (data->start != -1ul) {
2374 int rc = data->fn(data->priv, data->start, end, data->prot);
2375 if (rc != 0) {
2376 return rc;
bellard9fa3e852004-01-04 18:06:42 +00002377 }
bellard33417e72003-08-10 21:47:01 +00002378 }
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002379
2380 data->start = (new_prot ? end : -1ul);
2381 data->prot = new_prot;
2382
2383 return 0;
2384}
2385
2386static int walk_memory_regions_1(struct walk_memory_regions_data *data,
Paul Brookb480d9b2010-03-12 23:23:29 +00002387 abi_ulong base, int level, void **lp)
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002388{
Paul Brookb480d9b2010-03-12 23:23:29 +00002389 abi_ulong pa;
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002390 int i, rc;
2391
2392 if (*lp == NULL) {
2393 return walk_memory_regions_end(data, base, 0);
2394 }
2395
2396 if (level == 0) {
2397 PageDesc *pd = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002398 for (i = 0; i < L2_SIZE; ++i) {
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002399 int prot = pd[i].flags;
2400
2401 pa = base | (i << TARGET_PAGE_BITS);
2402 if (prot != data->prot) {
2403 rc = walk_memory_regions_end(data, pa, prot);
2404 if (rc != 0) {
2405 return rc;
2406 }
2407 }
2408 }
2409 } else {
2410 void **pp = *lp;
Paul Brook7296aba2010-03-14 14:58:46 +00002411 for (i = 0; i < L2_SIZE; ++i) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002412 pa = base | ((abi_ulong)i <<
2413 (TARGET_PAGE_BITS + L2_BITS * level));
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002414 rc = walk_memory_regions_1(data, pa, level - 1, pp + i);
2415 if (rc != 0) {
2416 return rc;
2417 }
2418 }
2419 }
2420
2421 return 0;
2422}
2423
2424int walk_memory_regions(void *priv, walk_memory_regions_fn fn)
2425{
2426 struct walk_memory_regions_data data;
2427 unsigned long i;
2428
2429 data.fn = fn;
2430 data.priv = priv;
2431 data.start = -1ul;
2432 data.prot = 0;
2433
2434 for (i = 0; i < V_L1_SIZE; i++) {
Paul Brookb480d9b2010-03-12 23:23:29 +00002435 int rc = walk_memory_regions_1(&data, (abi_ulong)i << V_L1_SHIFT,
Richard Henderson5cd2c5b2010-03-10 15:53:37 -08002436 V_L1_SHIFT / L2_BITS - 1, l1_map + i);
2437 if (rc != 0) {
2438 return rc;
2439 }
2440 }
2441
2442 return walk_memory_regions_end(&data, 0, 0);
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002443}
2444
Paul Brookb480d9b2010-03-12 23:23:29 +00002445static int dump_region(void *priv, abi_ulong start,
2446 abi_ulong end, unsigned long prot)
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002447{
2448 FILE *f = (FILE *)priv;
2449
Paul Brookb480d9b2010-03-12 23:23:29 +00002450 (void) fprintf(f, TARGET_ABI_FMT_lx"-"TARGET_ABI_FMT_lx
2451 " "TARGET_ABI_FMT_lx" %c%c%c\n",
Mika Westerbergedf8e2a2009-04-07 09:57:11 +03002452 start, end, end - start,
2453 ((prot & PAGE_READ) ? 'r' : '-'),
2454 ((prot & PAGE_WRITE) ? 'w' : '-'),
2455 ((prot & PAGE_EXEC) ? 'x' : '-'));
2456
2457 return (0);
2458}
2459
2460/* dump memory mappings */
2461void page_dump(FILE *f)
2462{
2463 (void) fprintf(f, "%-8s %-8s %-8s %s\n",
2464 "start", "end", "size", "prot");
2465 walk_memory_regions(f, dump_region);
bellard33417e72003-08-10 21:47:01 +00002466}
2467
pbrook53a59602006-03-25 19:31:22 +00002468int page_get_flags(target_ulong address)
bellard33417e72003-08-10 21:47:01 +00002469{
bellard9fa3e852004-01-04 18:06:42 +00002470 PageDesc *p;
2471
2472 p = page_find(address >> TARGET_PAGE_BITS);
bellard33417e72003-08-10 21:47:01 +00002473 if (!p)
bellard9fa3e852004-01-04 18:06:42 +00002474 return 0;
2475 return p->flags;
bellard33417e72003-08-10 21:47:01 +00002476}
2477
Richard Henderson376a7902010-03-10 15:57:04 -08002478/* Modify the flags of a page and invalidate the code if necessary.
2479 The flag PAGE_WRITE_ORG is positioned automatically depending
2480 on PAGE_WRITE. The mmap_lock should already be held. */
pbrook53a59602006-03-25 19:31:22 +00002481void page_set_flags(target_ulong start, target_ulong end, int flags)
bellard9fa3e852004-01-04 18:06:42 +00002482{
Richard Henderson376a7902010-03-10 15:57:04 -08002483 target_ulong addr, len;
bellard9fa3e852004-01-04 18:06:42 +00002484
Richard Henderson376a7902010-03-10 15:57:04 -08002485 /* This function should never be called with addresses outside the
2486 guest address space. If this assert fires, it probably indicates
2487 a missing call to h2g_valid. */
Paul Brookb480d9b2010-03-12 23:23:29 +00002488#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2489 assert(end < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002490#endif
2491 assert(start < end);
2492
bellard9fa3e852004-01-04 18:06:42 +00002493 start = start & TARGET_PAGE_MASK;
2494 end = TARGET_PAGE_ALIGN(end);
Richard Henderson376a7902010-03-10 15:57:04 -08002495
2496 if (flags & PAGE_WRITE) {
bellard9fa3e852004-01-04 18:06:42 +00002497 flags |= PAGE_WRITE_ORG;
Richard Henderson376a7902010-03-10 15:57:04 -08002498 }
2499
2500 for (addr = start, len = end - start;
2501 len != 0;
2502 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
2503 PageDesc *p = page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2504
2505 /* If the write protection bit is set, then we invalidate
2506 the code inside. */
ths5fafdf22007-09-16 21:08:06 +00002507 if (!(p->flags & PAGE_WRITE) &&
bellard9fa3e852004-01-04 18:06:42 +00002508 (flags & PAGE_WRITE) &&
2509 p->first_tb) {
bellardd720b932004-04-25 17:57:43 +00002510 tb_invalidate_phys_page(addr, 0, NULL);
bellard9fa3e852004-01-04 18:06:42 +00002511 }
2512 p->flags = flags;
2513 }
bellard9fa3e852004-01-04 18:06:42 +00002514}
2515
ths3d97b402007-11-02 19:02:07 +00002516int page_check_range(target_ulong start, target_ulong len, int flags)
2517{
2518 PageDesc *p;
2519 target_ulong end;
2520 target_ulong addr;
2521
Richard Henderson376a7902010-03-10 15:57:04 -08002522 /* This function should never be called with addresses outside the
2523 guest address space. If this assert fires, it probably indicates
2524 a missing call to h2g_valid. */
Blue Swirl338e9e62010-03-13 09:48:08 +00002525#if TARGET_ABI_BITS > L1_MAP_ADDR_SPACE_BITS
2526 assert(start < ((abi_ulong)1 << L1_MAP_ADDR_SPACE_BITS));
Richard Henderson376a7902010-03-10 15:57:04 -08002527#endif
2528
Richard Henderson3e0650a2010-03-29 10:54:42 -07002529 if (len == 0) {
2530 return 0;
2531 }
Richard Henderson376a7902010-03-10 15:57:04 -08002532 if (start + len - 1 < start) {
2533 /* We've wrapped around. */
balrog55f280c2008-10-28 10:24:11 +00002534 return -1;
Richard Henderson376a7902010-03-10 15:57:04 -08002535 }
balrog55f280c2008-10-28 10:24:11 +00002536
ths3d97b402007-11-02 19:02:07 +00002537 end = TARGET_PAGE_ALIGN(start+len); /* must do before we loose bits in the next step */
2538 start = start & TARGET_PAGE_MASK;
2539
Richard Henderson376a7902010-03-10 15:57:04 -08002540 for (addr = start, len = end - start;
2541 len != 0;
2542 len -= TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
ths3d97b402007-11-02 19:02:07 +00002543 p = page_find(addr >> TARGET_PAGE_BITS);
2544 if( !p )
2545 return -1;
2546 if( !(p->flags & PAGE_VALID) )
2547 return -1;
2548
bellarddae32702007-11-14 10:51:00 +00002549 if ((flags & PAGE_READ) && !(p->flags & PAGE_READ))
ths3d97b402007-11-02 19:02:07 +00002550 return -1;
bellarddae32702007-11-14 10:51:00 +00002551 if (flags & PAGE_WRITE) {
2552 if (!(p->flags & PAGE_WRITE_ORG))
2553 return -1;
2554 /* unprotect the page if it was put read-only because it
2555 contains translated code */
2556 if (!(p->flags & PAGE_WRITE)) {
2557 if (!page_unprotect(addr, 0, NULL))
2558 return -1;
2559 }
2560 return 0;
2561 }
ths3d97b402007-11-02 19:02:07 +00002562 }
2563 return 0;
2564}
2565
bellard9fa3e852004-01-04 18:06:42 +00002566/* called from signal handler: invalidate the code and unprotect the
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002567 page. Return TRUE if the fault was successfully handled. */
pbrook53a59602006-03-25 19:31:22 +00002568int page_unprotect(target_ulong address, unsigned long pc, void *puc)
bellard9fa3e852004-01-04 18:06:42 +00002569{
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002570 unsigned int prot;
2571 PageDesc *p;
pbrook53a59602006-03-25 19:31:22 +00002572 target_ulong host_start, host_end, addr;
bellard9fa3e852004-01-04 18:06:42 +00002573
pbrookc8a706f2008-06-02 16:16:42 +00002574 /* Technically this isn't safe inside a signal handler. However we
2575 know this only ever happens in a synchronous SEGV handler, so in
2576 practice it seems to be ok. */
2577 mmap_lock();
2578
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002579 p = page_find(address >> TARGET_PAGE_BITS);
2580 if (!p) {
pbrookc8a706f2008-06-02 16:16:42 +00002581 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002582 return 0;
pbrookc8a706f2008-06-02 16:16:42 +00002583 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002584
bellard9fa3e852004-01-04 18:06:42 +00002585 /* if the page was really writable, then we change its
2586 protection back to writable */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002587 if ((p->flags & PAGE_WRITE_ORG) && !(p->flags & PAGE_WRITE)) {
2588 host_start = address & qemu_host_page_mask;
2589 host_end = host_start + qemu_host_page_size;
2590
2591 prot = 0;
2592 for (addr = host_start ; addr < host_end ; addr += TARGET_PAGE_SIZE) {
2593 p = page_find(addr >> TARGET_PAGE_BITS);
2594 p->flags |= PAGE_WRITE;
2595 prot |= p->flags;
2596
bellard9fa3e852004-01-04 18:06:42 +00002597 /* and since the content will be modified, we must invalidate
2598 the corresponding translated code. */
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002599 tb_invalidate_phys_page(addr, pc, puc);
bellard9fa3e852004-01-04 18:06:42 +00002600#ifdef DEBUG_TB_CHECK
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002601 tb_invalidate_check(addr);
bellard9fa3e852004-01-04 18:06:42 +00002602#endif
bellard9fa3e852004-01-04 18:06:42 +00002603 }
Aurelien Jarno45d679d2010-03-29 02:12:51 +02002604 mprotect((void *)g2h(host_start), qemu_host_page_size,
2605 prot & PAGE_BITS);
2606
2607 mmap_unlock();
2608 return 1;
bellard9fa3e852004-01-04 18:06:42 +00002609 }
pbrookc8a706f2008-06-02 16:16:42 +00002610 mmap_unlock();
bellard9fa3e852004-01-04 18:06:42 +00002611 return 0;
2612}
2613
bellard6a00d602005-11-21 23:25:50 +00002614static inline void tlb_set_dirty(CPUState *env,
2615 unsigned long addr, target_ulong vaddr)
bellard1ccde1c2004-02-06 19:46:14 +00002616{
2617}
bellard9fa3e852004-01-04 18:06:42 +00002618#endif /* defined(CONFIG_USER_ONLY) */
2619
pbrooke2eef172008-06-08 01:09:01 +00002620#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +00002621
Paul Brookc04b2b72010-03-01 03:31:14 +00002622#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
2623typedef struct subpage_t {
2624 target_phys_addr_t base;
Richard Hendersonf6405242010-04-22 16:47:31 -07002625 ram_addr_t sub_io_index[TARGET_PAGE_SIZE];
2626 ram_addr_t region_offset[TARGET_PAGE_SIZE];
Paul Brookc04b2b72010-03-01 03:31:14 +00002627} subpage_t;
2628
Anthony Liguoric227f092009-10-01 16:12:16 -05002629static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
2630 ram_addr_t memory, ram_addr_t region_offset);
Richard Hendersonf6405242010-04-22 16:47:31 -07002631static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
2632 ram_addr_t orig_memory,
2633 ram_addr_t region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002634#define CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2, \
2635 need_subpage) \
2636 do { \
2637 if (addr > start_addr) \
2638 start_addr2 = 0; \
2639 else { \
2640 start_addr2 = start_addr & ~TARGET_PAGE_MASK; \
2641 if (start_addr2 > 0) \
2642 need_subpage = 1; \
2643 } \
2644 \
blueswir149e9fba2007-05-30 17:25:06 +00002645 if ((start_addr + orig_size) - addr >= TARGET_PAGE_SIZE) \
blueswir1db7b5422007-05-26 17:36:03 +00002646 end_addr2 = TARGET_PAGE_SIZE - 1; \
2647 else { \
2648 end_addr2 = (start_addr + orig_size - 1) & ~TARGET_PAGE_MASK; \
2649 if (end_addr2 < TARGET_PAGE_SIZE - 1) \
2650 need_subpage = 1; \
2651 } \
2652 } while (0)
2653
Michael S. Tsirkin8f2498f2009-09-29 18:53:16 +02002654/* register physical memory.
2655 For RAM, 'size' must be a multiple of the target page size.
2656 If (phys_offset & ~TARGET_PAGE_MASK) != 0, then it is an
pbrook8da3ff12008-12-01 18:59:50 +00002657 io memory page. The address used when calling the IO function is
2658 the offset from the start of the region, plus region_offset. Both
Stuart Bradyccbb4d42009-05-03 12:15:06 +01002659 start_addr and region_offset are rounded down to a page boundary
pbrook8da3ff12008-12-01 18:59:50 +00002660 before calculating this offset. This should not be a problem unless
2661 the low bits of start_addr and region_offset differ. */
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002662void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
Anthony Liguoric227f092009-10-01 16:12:16 -05002663 ram_addr_t size,
2664 ram_addr_t phys_offset,
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002665 ram_addr_t region_offset,
2666 bool log_dirty)
bellard33417e72003-08-10 21:47:01 +00002667{
Anthony Liguoric227f092009-10-01 16:12:16 -05002668 target_phys_addr_t addr, end_addr;
bellard92e873b2004-05-21 14:52:29 +00002669 PhysPageDesc *p;
bellard9d420372006-06-25 22:25:22 +00002670 CPUState *env;
Anthony Liguoric227f092009-10-01 16:12:16 -05002671 ram_addr_t orig_size = size;
Richard Hendersonf6405242010-04-22 16:47:31 -07002672 subpage_t *subpage;
bellard33417e72003-08-10 21:47:01 +00002673
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002674 assert(size);
Michael S. Tsirkin0fd542f2011-04-06 22:25:38 +03002675 cpu_notify_set_memory(start_addr, size, phys_offset, log_dirty);
Michael S. Tsirkinf6f3fbc2010-01-27 22:06:57 +02002676
pbrook67c4d232009-02-23 13:16:07 +00002677 if (phys_offset == IO_MEM_UNASSIGNED) {
2678 region_offset = start_addr;
2679 }
pbrook8da3ff12008-12-01 18:59:50 +00002680 region_offset &= TARGET_PAGE_MASK;
bellard5fd386f2004-05-23 21:11:22 +00002681 size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
Anthony Liguoric227f092009-10-01 16:12:16 -05002682 end_addr = start_addr + (target_phys_addr_t)size;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002683
2684 addr = start_addr;
2685 do {
blueswir1db7b5422007-05-26 17:36:03 +00002686 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2687 if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
Anthony Liguoric227f092009-10-01 16:12:16 -05002688 ram_addr_t orig_memory = p->phys_offset;
2689 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002690 int need_subpage = 0;
2691
2692 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
2693 need_subpage);
Richard Hendersonf6405242010-04-22 16:47:31 -07002694 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002695 if (!(orig_memory & IO_MEM_SUBPAGE)) {
2696 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002697 &p->phys_offset, orig_memory,
2698 p->region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00002699 } else {
2700 subpage = io_mem_opaque[(orig_memory & ~TARGET_PAGE_MASK)
2701 >> IO_MEM_SHIFT];
2702 }
pbrook8da3ff12008-12-01 18:59:50 +00002703 subpage_register(subpage, start_addr2, end_addr2, phys_offset,
2704 region_offset);
2705 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002706 } else {
2707 p->phys_offset = phys_offset;
2708 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
2709 (phys_offset & IO_MEM_ROMD))
2710 phys_offset += TARGET_PAGE_SIZE;
2711 }
2712 } else {
2713 p = phys_page_find_alloc(addr >> TARGET_PAGE_BITS, 1);
2714 p->phys_offset = phys_offset;
pbrook8da3ff12008-12-01 18:59:50 +00002715 p->region_offset = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00002716 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM ||
pbrook8da3ff12008-12-01 18:59:50 +00002717 (phys_offset & IO_MEM_ROMD)) {
blueswir1db7b5422007-05-26 17:36:03 +00002718 phys_offset += TARGET_PAGE_SIZE;
pbrook0e8f0962008-12-02 09:02:15 +00002719 } else {
Anthony Liguoric227f092009-10-01 16:12:16 -05002720 target_phys_addr_t start_addr2, end_addr2;
blueswir1db7b5422007-05-26 17:36:03 +00002721 int need_subpage = 0;
2722
2723 CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
2724 end_addr2, need_subpage);
2725
Richard Hendersonf6405242010-04-22 16:47:31 -07002726 if (need_subpage) {
blueswir1db7b5422007-05-26 17:36:03 +00002727 subpage = subpage_init((addr & TARGET_PAGE_MASK),
pbrook8da3ff12008-12-01 18:59:50 +00002728 &p->phys_offset, IO_MEM_UNASSIGNED,
pbrook67c4d232009-02-23 13:16:07 +00002729 addr & TARGET_PAGE_MASK);
blueswir1db7b5422007-05-26 17:36:03 +00002730 subpage_register(subpage, start_addr2, end_addr2,
pbrook8da3ff12008-12-01 18:59:50 +00002731 phys_offset, region_offset);
2732 p->region_offset = 0;
blueswir1db7b5422007-05-26 17:36:03 +00002733 }
2734 }
2735 }
pbrook8da3ff12008-12-01 18:59:50 +00002736 region_offset += TARGET_PAGE_SIZE;
Edgar E. Iglesias3b8e6a22011-04-05 13:00:36 +02002737 addr += TARGET_PAGE_SIZE;
2738 } while (addr != end_addr);
ths3b46e622007-09-17 08:09:54 +00002739
bellard9d420372006-06-25 22:25:22 +00002740 /* since each CPU stores ram addresses in its TLB cache, we must
2741 reset the modified entries */
2742 /* XXX: slow ! */
2743 for(env = first_cpu; env != NULL; env = env->next_cpu) {
2744 tlb_flush(env, 1);
2745 }
bellard33417e72003-08-10 21:47:01 +00002746}
2747
bellardba863452006-09-24 18:41:10 +00002748/* XXX: temporary until new memory mapping API */
Anthony Liguoric227f092009-10-01 16:12:16 -05002749ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr)
bellardba863452006-09-24 18:41:10 +00002750{
2751 PhysPageDesc *p;
2752
2753 p = phys_page_find(addr >> TARGET_PAGE_BITS);
2754 if (!p)
2755 return IO_MEM_UNASSIGNED;
2756 return p->phys_offset;
2757}
2758
Anthony Liguoric227f092009-10-01 16:12:16 -05002759void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002760{
2761 if (kvm_enabled())
2762 kvm_coalesce_mmio_region(addr, size);
2763}
2764
Anthony Liguoric227f092009-10-01 16:12:16 -05002765void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
aliguorif65ed4c2008-12-09 20:09:57 +00002766{
2767 if (kvm_enabled())
2768 kvm_uncoalesce_mmio_region(addr, size);
2769}
2770
Sheng Yang62a27442010-01-26 19:21:16 +08002771void qemu_flush_coalesced_mmio_buffer(void)
2772{
2773 if (kvm_enabled())
2774 kvm_flush_coalesced_mmio_buffer();
2775}
2776
Marcelo Tosattic9027602010-03-01 20:25:08 -03002777#if defined(__linux__) && !defined(TARGET_S390X)
2778
2779#include <sys/vfs.h>
2780
2781#define HUGETLBFS_MAGIC 0x958458f6
2782
2783static long gethugepagesize(const char *path)
2784{
2785 struct statfs fs;
2786 int ret;
2787
2788 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002789 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002790 } while (ret != 0 && errno == EINTR);
2791
2792 if (ret != 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002793 perror(path);
2794 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002795 }
2796
2797 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002798 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002799
2800 return fs.f_bsize;
2801}
2802
Alex Williamson04b16652010-07-02 11:13:17 -06002803static void *file_ram_alloc(RAMBlock *block,
2804 ram_addr_t memory,
2805 const char *path)
Marcelo Tosattic9027602010-03-01 20:25:08 -03002806{
2807 char *filename;
2808 void *area;
2809 int fd;
2810#ifdef MAP_POPULATE
2811 int flags;
2812#endif
2813 unsigned long hpagesize;
2814
2815 hpagesize = gethugepagesize(path);
2816 if (!hpagesize) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002817 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002818 }
2819
2820 if (memory < hpagesize) {
2821 return NULL;
2822 }
2823
2824 if (kvm_enabled() && !kvm_has_sync_mmu()) {
2825 fprintf(stderr, "host lacks kvm mmu notifiers, -mem-path unsupported\n");
2826 return NULL;
2827 }
2828
2829 if (asprintf(&filename, "%s/qemu_back_mem.XXXXXX", path) == -1) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002830 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002831 }
2832
2833 fd = mkstemp(filename);
2834 if (fd < 0) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002835 perror("unable to create backing store for hugepages");
2836 free(filename);
2837 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002838 }
2839 unlink(filename);
2840 free(filename);
2841
2842 memory = (memory+hpagesize-1) & ~(hpagesize-1);
2843
2844 /*
2845 * ftruncate is not supported by hugetlbfs in older
2846 * hosts, so don't bother bailing out on errors.
2847 * If anything goes wrong with it under other filesystems,
2848 * mmap will fail.
2849 */
2850 if (ftruncate(fd, memory))
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002851 perror("ftruncate");
Marcelo Tosattic9027602010-03-01 20:25:08 -03002852
2853#ifdef MAP_POPULATE
2854 /* NB: MAP_POPULATE won't exhaustively alloc all phys pages in the case
2855 * MAP_PRIVATE is requested. For mem_prealloc we mmap as MAP_SHARED
2856 * to sidestep this quirk.
2857 */
2858 flags = mem_prealloc ? MAP_POPULATE | MAP_SHARED : MAP_PRIVATE;
2859 area = mmap(0, memory, PROT_READ | PROT_WRITE, flags, fd, 0);
2860#else
2861 area = mmap(0, memory, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
2862#endif
2863 if (area == MAP_FAILED) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09002864 perror("file_ram_alloc: can't mmap RAM pages");
2865 close(fd);
2866 return (NULL);
Marcelo Tosattic9027602010-03-01 20:25:08 -03002867 }
Alex Williamson04b16652010-07-02 11:13:17 -06002868 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03002869 return area;
2870}
2871#endif
2872
Alex Williamsond17b5282010-06-25 11:08:38 -06002873static ram_addr_t find_ram_offset(ram_addr_t size)
2874{
Alex Williamson04b16652010-07-02 11:13:17 -06002875 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06002876 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002877
2878 if (QLIST_EMPTY(&ram_list.blocks))
2879 return 0;
2880
2881 QLIST_FOREACH(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00002882 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06002883
2884 end = block->offset + block->length;
2885
2886 QLIST_FOREACH(next_block, &ram_list.blocks, next) {
2887 if (next_block->offset >= end) {
2888 next = MIN(next, next_block->offset);
2889 }
2890 }
2891 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06002892 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06002893 mingap = next - end;
2894 }
2895 }
Alex Williamson3e837b22011-10-31 08:54:09 -06002896
2897 if (offset == RAM_ADDR_MAX) {
2898 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
2899 (uint64_t)size);
2900 abort();
2901 }
2902
Alex Williamson04b16652010-07-02 11:13:17 -06002903 return offset;
2904}
2905
2906static ram_addr_t last_ram_offset(void)
2907{
Alex Williamsond17b5282010-06-25 11:08:38 -06002908 RAMBlock *block;
2909 ram_addr_t last = 0;
2910
2911 QLIST_FOREACH(block, &ram_list.blocks, next)
2912 last = MAX(last, block->offset + block->length);
2913
2914 return last;
2915}
2916
Cam Macdonell84b89d72010-07-26 18:10:57 -06002917ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002918 ram_addr_t size, void *host)
Cam Macdonell84b89d72010-07-26 18:10:57 -06002919{
2920 RAMBlock *new_block, *block;
2921
2922 size = TARGET_PAGE_ALIGN(size);
Anthony Liguori7267c092011-08-20 22:09:37 -05002923 new_block = g_malloc0(sizeof(*new_block));
Cam Macdonell84b89d72010-07-26 18:10:57 -06002924
2925 if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
2926 char *id = dev->parent_bus->info->get_dev_path(dev);
2927 if (id) {
2928 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05002929 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06002930 }
2931 }
2932 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
2933
2934 QLIST_FOREACH(block, &ram_list.blocks, next) {
2935 if (!strcmp(block->idstr, new_block->idstr)) {
2936 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
2937 new_block->idstr);
2938 abort();
2939 }
2940 }
2941
Jun Nakajima432d2682010-08-31 16:41:25 +01002942 new_block->offset = find_ram_offset(size);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002943 if (host) {
2944 new_block->host = host;
Huang Yingcd19cfa2011-03-02 08:56:19 +01002945 new_block->flags |= RAM_PREALLOC_MASK;
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002946 } else {
2947 if (mem_path) {
2948#if defined (__linux__) && !defined(TARGET_S390X)
2949 new_block->host = file_ram_alloc(new_block, size, mem_path);
2950 if (!new_block->host) {
2951 new_block->host = qemu_vmalloc(size);
Andreas Färbere78815a2010-09-25 11:26:05 +00002952 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002953 }
2954#else
2955 fprintf(stderr, "-mem-path option unsupported\n");
2956 exit(1);
2957#endif
2958 } else {
2959#if defined(TARGET_S390X) && defined(CONFIG_KVM)
Christian Borntraegerff836782011-05-10 14:49:10 +02002960 /* S390 KVM requires the topmost vma of the RAM to be smaller than
2961 an system defined value, which is at least 256GB. Larger systems
2962 have larger values. We put the guest between the end of data
2963 segment (system break) and this value. We use 32GB as a base to
2964 have enough room for the system break to grow. */
2965 new_block->host = mmap((void*)0x800000000, size,
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002966 PROT_EXEC|PROT_READ|PROT_WRITE,
Christian Borntraegerff836782011-05-10 14:49:10 +02002967 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
Alexander Graffb8b2732011-05-20 17:33:28 +02002968 if (new_block->host == MAP_FAILED) {
2969 fprintf(stderr, "Allocating RAM failed\n");
2970 abort();
2971 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002972#else
Jan Kiszka868bb332011-06-21 22:59:09 +02002973 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01002974 xen_ram_alloc(new_block->offset, size);
2975 } else {
2976 new_block->host = qemu_vmalloc(size);
2977 }
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002978#endif
Andreas Färbere78815a2010-09-25 11:26:05 +00002979 qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002980 }
2981 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06002982 new_block->length = size;
2983
2984 QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
2985
Anthony Liguori7267c092011-08-20 22:09:37 -05002986 ram_list.phys_dirty = g_realloc(ram_list.phys_dirty,
Cam Macdonell84b89d72010-07-26 18:10:57 -06002987 last_ram_offset() >> TARGET_PAGE_BITS);
2988 memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
2989 0xff, size >> TARGET_PAGE_BITS);
2990
2991 if (kvm_enabled())
2992 kvm_setup_guest_memory(new_block->host, size);
2993
2994 return new_block->offset;
2995}
2996
Alex Williamson1724f042010-06-25 11:09:35 -06002997ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
pbrook94a6b542009-04-11 17:15:54 +00002998{
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09002999 return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
pbrook94a6b542009-04-11 17:15:54 +00003000}
bellarde9a1ab12007-02-08 23:08:38 +00003001
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003002void qemu_ram_free_from_ptr(ram_addr_t addr)
3003{
3004 RAMBlock *block;
3005
3006 QLIST_FOREACH(block, &ram_list.blocks, next) {
3007 if (addr == block->offset) {
3008 QLIST_REMOVE(block, next);
Anthony Liguori7267c092011-08-20 22:09:37 -05003009 g_free(block);
Alex Williamson1f2e98b2011-05-03 12:48:09 -06003010 return;
3011 }
3012 }
3013}
3014
Anthony Liguoric227f092009-10-01 16:12:16 -05003015void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00003016{
Alex Williamson04b16652010-07-02 11:13:17 -06003017 RAMBlock *block;
3018
3019 QLIST_FOREACH(block, &ram_list.blocks, next) {
3020 if (addr == block->offset) {
3021 QLIST_REMOVE(block, next);
Huang Yingcd19cfa2011-03-02 08:56:19 +01003022 if (block->flags & RAM_PREALLOC_MASK) {
3023 ;
3024 } else if (mem_path) {
Alex Williamson04b16652010-07-02 11:13:17 -06003025#if defined (__linux__) && !defined(TARGET_S390X)
3026 if (block->fd) {
3027 munmap(block->host, block->length);
3028 close(block->fd);
3029 } else {
3030 qemu_vfree(block->host);
3031 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003032#else
3033 abort();
Alex Williamson04b16652010-07-02 11:13:17 -06003034#endif
3035 } else {
3036#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3037 munmap(block->host, block->length);
3038#else
Jan Kiszka868bb332011-06-21 22:59:09 +02003039 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003040 xen_invalidate_map_cache_entry(block->host);
Jun Nakajima432d2682010-08-31 16:41:25 +01003041 } else {
3042 qemu_vfree(block->host);
3043 }
Alex Williamson04b16652010-07-02 11:13:17 -06003044#endif
3045 }
Anthony Liguori7267c092011-08-20 22:09:37 -05003046 g_free(block);
Alex Williamson04b16652010-07-02 11:13:17 -06003047 return;
3048 }
3049 }
3050
bellarde9a1ab12007-02-08 23:08:38 +00003051}
3052
Huang Yingcd19cfa2011-03-02 08:56:19 +01003053#ifndef _WIN32
3054void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
3055{
3056 RAMBlock *block;
3057 ram_addr_t offset;
3058 int flags;
3059 void *area, *vaddr;
3060
3061 QLIST_FOREACH(block, &ram_list.blocks, next) {
3062 offset = addr - block->offset;
3063 if (offset < block->length) {
3064 vaddr = block->host + offset;
3065 if (block->flags & RAM_PREALLOC_MASK) {
3066 ;
3067 } else {
3068 flags = MAP_FIXED;
3069 munmap(vaddr, length);
3070 if (mem_path) {
3071#if defined(__linux__) && !defined(TARGET_S390X)
3072 if (block->fd) {
3073#ifdef MAP_POPULATE
3074 flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
3075 MAP_PRIVATE;
3076#else
3077 flags |= MAP_PRIVATE;
3078#endif
3079 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3080 flags, block->fd, offset);
3081 } else {
3082 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3083 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3084 flags, -1, 0);
3085 }
Jan Kiszkafd28aa12011-03-15 12:26:14 +01003086#else
3087 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01003088#endif
3089 } else {
3090#if defined(TARGET_S390X) && defined(CONFIG_KVM)
3091 flags |= MAP_SHARED | MAP_ANONYMOUS;
3092 area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
3093 flags, -1, 0);
3094#else
3095 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
3096 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
3097 flags, -1, 0);
3098#endif
3099 }
3100 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00003101 fprintf(stderr, "Could not remap addr: "
3102 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01003103 length, addr);
3104 exit(1);
3105 }
3106 qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
3107 }
3108 return;
3109 }
3110 }
3111}
3112#endif /* !_WIN32 */
3113
pbrookdc828ca2009-04-09 22:21:07 +00003114/* Return a host pointer to ram allocated with qemu_ram_alloc.
pbrook5579c7f2009-04-11 14:47:08 +00003115 With the exception of the softmmu code in this file, this should
3116 only be used for local memory (e.g. video ram) that the device owns,
3117 and knows it isn't going to access beyond the end of the block.
3118
3119 It should not be used for general purpose DMA.
3120 Use cpu_physical_memory_map/cpu_physical_memory_rw instead.
3121 */
Anthony Liguoric227f092009-10-01 16:12:16 -05003122void *qemu_get_ram_ptr(ram_addr_t addr)
pbrookdc828ca2009-04-09 22:21:07 +00003123{
pbrook94a6b542009-04-11 17:15:54 +00003124 RAMBlock *block;
3125
Alex Williamsonf471a172010-06-11 11:11:42 -06003126 QLIST_FOREACH(block, &ram_list.blocks, next) {
3127 if (addr - block->offset < block->length) {
Vincent Palatin7d82af32011-03-10 15:47:46 -05003128 /* Move this entry to to start of the list. */
3129 if (block != QLIST_FIRST(&ram_list.blocks)) {
3130 QLIST_REMOVE(block, next);
3131 QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
3132 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003133 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003134 /* We need to check if the requested address is in the RAM
3135 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003136 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003137 */
3138 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003139 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003140 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003141 block->host =
3142 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003143 }
3144 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003145 return block->host + (addr - block->offset);
3146 }
pbrook94a6b542009-04-11 17:15:54 +00003147 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003148
3149 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3150 abort();
3151
3152 return NULL;
pbrookdc828ca2009-04-09 22:21:07 +00003153}
3154
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003155/* Return a host pointer to ram allocated with qemu_ram_alloc.
3156 * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
3157 */
3158void *qemu_safe_ram_ptr(ram_addr_t addr)
3159{
3160 RAMBlock *block;
3161
3162 QLIST_FOREACH(block, &ram_list.blocks, next) {
3163 if (addr - block->offset < block->length) {
Jan Kiszka868bb332011-06-21 22:59:09 +02003164 if (xen_enabled()) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003165 /* We need to check if the requested address is in the RAM
3166 * because we don't want to map the entire memory in QEMU.
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003167 * In that case just map until the end of the page.
Jun Nakajima432d2682010-08-31 16:41:25 +01003168 */
3169 if (block->offset == 0) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003170 return xen_map_cache(addr, 0, 0);
Jun Nakajima432d2682010-08-31 16:41:25 +01003171 } else if (block->host == NULL) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003172 block->host =
3173 xen_map_cache(block->offset, block->length, 1);
Jun Nakajima432d2682010-08-31 16:41:25 +01003174 }
3175 }
Michael S. Tsirkinb2e0a132010-11-22 19:52:34 +02003176 return block->host + (addr - block->offset);
3177 }
3178 }
3179
3180 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3181 abort();
3182
3183 return NULL;
3184}
3185
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003186/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
3187 * but takes a size argument */
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003188void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003189{
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01003190 if (*size == 0) {
3191 return NULL;
3192 }
Jan Kiszka868bb332011-06-21 22:59:09 +02003193 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003194 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02003195 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003196 RAMBlock *block;
3197
3198 QLIST_FOREACH(block, &ram_list.blocks, next) {
3199 if (addr - block->offset < block->length) {
3200 if (addr - block->offset + *size > block->length)
3201 *size = block->length - addr + block->offset;
3202 return block->host + (addr - block->offset);
3203 }
3204 }
3205
3206 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
3207 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01003208 }
3209}
3210
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003211void qemu_put_ram_ptr(void *addr)
3212{
3213 trace_qemu_put_ram_ptr(addr);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003214}
3215
Marcelo Tosattie8902612010-10-11 15:31:19 -03003216int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00003217{
pbrook94a6b542009-04-11 17:15:54 +00003218 RAMBlock *block;
3219 uint8_t *host = ptr;
3220
Jan Kiszka868bb332011-06-21 22:59:09 +02003221 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02003222 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Stefano Stabellini712c2b42011-05-19 18:35:46 +01003223 return 0;
3224 }
3225
Alex Williamsonf471a172010-06-11 11:11:42 -06003226 QLIST_FOREACH(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01003227 /* This case append when the block is not mapped. */
3228 if (block->host == NULL) {
3229 continue;
3230 }
Alex Williamsonf471a172010-06-11 11:11:42 -06003231 if (host - block->host < block->length) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03003232 *ram_addr = block->offset + (host - block->host);
3233 return 0;
Alex Williamsonf471a172010-06-11 11:11:42 -06003234 }
pbrook94a6b542009-04-11 17:15:54 +00003235 }
Jun Nakajima432d2682010-08-31 16:41:25 +01003236
Marcelo Tosattie8902612010-10-11 15:31:19 -03003237 return -1;
3238}
Alex Williamsonf471a172010-06-11 11:11:42 -06003239
Marcelo Tosattie8902612010-10-11 15:31:19 -03003240/* Some of the softmmu routines need to translate from a host pointer
3241 (typically a TLB entry) back to a ram offset. */
3242ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
3243{
3244 ram_addr_t ram_addr;
Alex Williamsonf471a172010-06-11 11:11:42 -06003245
Marcelo Tosattie8902612010-10-11 15:31:19 -03003246 if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
3247 fprintf(stderr, "Bad ram pointer %p\n", ptr);
3248 abort();
3249 }
3250 return ram_addr;
pbrook5579c7f2009-04-11 14:47:08 +00003251}
3252
Anthony Liguoric227f092009-10-01 16:12:16 -05003253static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
bellard33417e72003-08-10 21:47:01 +00003254{
pbrook67d3b952006-12-18 05:03:52 +00003255#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003256 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
pbrook67d3b952006-12-18 05:03:52 +00003257#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003258#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003259 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003260#endif
3261 return 0;
3262}
3263
Anthony Liguoric227f092009-10-01 16:12:16 -05003264static uint32_t unassigned_mem_readw(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003265{
3266#ifdef DEBUG_UNASSIGNED
3267 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3268#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003269#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003270 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003271#endif
3272 return 0;
3273}
3274
Anthony Liguoric227f092009-10-01 16:12:16 -05003275static uint32_t unassigned_mem_readl(void *opaque, target_phys_addr_t addr)
blueswir1e18231a2008-10-06 18:46:28 +00003276{
3277#ifdef DEBUG_UNASSIGNED
3278 printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
3279#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003280#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003281 cpu_unassigned_access(cpu_single_env, addr, 0, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003282#endif
bellard33417e72003-08-10 21:47:01 +00003283 return 0;
3284}
3285
Anthony Liguoric227f092009-10-01 16:12:16 -05003286static void unassigned_mem_writeb(void *opaque, target_phys_addr_t addr, uint32_t val)
bellard33417e72003-08-10 21:47:01 +00003287{
pbrook67d3b952006-12-18 05:03:52 +00003288#ifdef DEBUG_UNASSIGNED
blueswir1ab3d1722007-11-04 07:31:40 +00003289 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
pbrook67d3b952006-12-18 05:03:52 +00003290#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003291#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003292 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 1);
blueswir1e18231a2008-10-06 18:46:28 +00003293#endif
3294}
3295
Anthony Liguoric227f092009-10-01 16:12:16 -05003296static void unassigned_mem_writew(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003297{
3298#ifdef DEBUG_UNASSIGNED
3299 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3300#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003301#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003302 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 2);
blueswir1e18231a2008-10-06 18:46:28 +00003303#endif
3304}
3305
Anthony Liguoric227f092009-10-01 16:12:16 -05003306static void unassigned_mem_writel(void *opaque, target_phys_addr_t addr, uint32_t val)
blueswir1e18231a2008-10-06 18:46:28 +00003307{
3308#ifdef DEBUG_UNASSIGNED
3309 printf("Unassigned mem write " TARGET_FMT_plx " = 0x%x\n", addr, val);
3310#endif
Richard Henderson5b450402011-04-18 16:13:12 -07003311#if defined(TARGET_ALPHA) || defined(TARGET_SPARC) || defined(TARGET_MICROBLAZE)
Blue Swirlb14ef7c2011-07-03 08:53:46 +00003312 cpu_unassigned_access(cpu_single_env, addr, 1, 0, 0, 4);
blueswir1b4f0a312007-05-06 17:59:24 +00003313#endif
bellard33417e72003-08-10 21:47:01 +00003314}
3315
Blue Swirld60efc62009-08-25 18:29:31 +00003316static CPUReadMemoryFunc * const unassigned_mem_read[3] = {
bellard33417e72003-08-10 21:47:01 +00003317 unassigned_mem_readb,
blueswir1e18231a2008-10-06 18:46:28 +00003318 unassigned_mem_readw,
3319 unassigned_mem_readl,
bellard33417e72003-08-10 21:47:01 +00003320};
3321
Blue Swirld60efc62009-08-25 18:29:31 +00003322static CPUWriteMemoryFunc * const unassigned_mem_write[3] = {
bellard33417e72003-08-10 21:47:01 +00003323 unassigned_mem_writeb,
blueswir1e18231a2008-10-06 18:46:28 +00003324 unassigned_mem_writew,
3325 unassigned_mem_writel,
bellard33417e72003-08-10 21:47:01 +00003326};
3327
Anthony Liguoric227f092009-10-01 16:12:16 -05003328static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003329 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003330{
bellard3a7d9292005-08-21 09:26:42 +00003331 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003332 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003333 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3334#if !defined(CONFIG_USER_ONLY)
3335 tb_invalidate_phys_page_fast(ram_addr, 1);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003336 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003337#endif
3338 }
pbrook5579c7f2009-04-11 14:47:08 +00003339 stb_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003340 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003341 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003342 /* we remove the notdirty callback only if the code has been
3343 flushed */
3344 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003345 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003346}
3347
Anthony Liguoric227f092009-10-01 16:12:16 -05003348static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003349 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003350{
bellard3a7d9292005-08-21 09:26:42 +00003351 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003352 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003353 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3354#if !defined(CONFIG_USER_ONLY)
3355 tb_invalidate_phys_page_fast(ram_addr, 2);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003356 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003357#endif
3358 }
pbrook5579c7f2009-04-11 14:47:08 +00003359 stw_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003360 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003361 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003362 /* we remove the notdirty callback only if the code has been
3363 flushed */
3364 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003365 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003366}
3367
Anthony Liguoric227f092009-10-01 16:12:16 -05003368static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
pbrook0f459d12008-06-09 00:20:13 +00003369 uint32_t val)
bellard1ccde1c2004-02-06 19:46:14 +00003370{
bellard3a7d9292005-08-21 09:26:42 +00003371 int dirty_flags;
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003372 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003373 if (!(dirty_flags & CODE_DIRTY_FLAG)) {
3374#if !defined(CONFIG_USER_ONLY)
3375 tb_invalidate_phys_page_fast(ram_addr, 4);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003376 dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
bellard3a7d9292005-08-21 09:26:42 +00003377#endif
3378 }
pbrook5579c7f2009-04-11 14:47:08 +00003379 stl_p(qemu_get_ram_ptr(ram_addr), val);
bellardf23db162005-08-21 19:12:28 +00003380 dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003381 cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
bellardf23db162005-08-21 19:12:28 +00003382 /* we remove the notdirty callback only if the code has been
3383 flushed */
3384 if (dirty_flags == 0xff)
pbrook2e70f6e2008-06-29 01:03:05 +00003385 tlb_set_dirty(cpu_single_env, cpu_single_env->mem_io_vaddr);
bellard1ccde1c2004-02-06 19:46:14 +00003386}
3387
Blue Swirld60efc62009-08-25 18:29:31 +00003388static CPUReadMemoryFunc * const error_mem_read[3] = {
bellard3a7d9292005-08-21 09:26:42 +00003389 NULL, /* never used */
3390 NULL, /* never used */
3391 NULL, /* never used */
3392};
3393
Blue Swirld60efc62009-08-25 18:29:31 +00003394static CPUWriteMemoryFunc * const notdirty_mem_write[3] = {
bellard1ccde1c2004-02-06 19:46:14 +00003395 notdirty_mem_writeb,
3396 notdirty_mem_writew,
3397 notdirty_mem_writel,
3398};
3399
pbrook0f459d12008-06-09 00:20:13 +00003400/* Generate a debug exception if a watchpoint has been hit. */
aliguorib4051332008-11-18 20:14:20 +00003401static void check_watchpoint(int offset, int len_mask, int flags)
pbrook0f459d12008-06-09 00:20:13 +00003402{
3403 CPUState *env = cpu_single_env;
aliguori06d55cc2008-11-18 20:24:06 +00003404 target_ulong pc, cs_base;
3405 TranslationBlock *tb;
pbrook0f459d12008-06-09 00:20:13 +00003406 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00003407 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00003408 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00003409
aliguori06d55cc2008-11-18 20:24:06 +00003410 if (env->watchpoint_hit) {
3411 /* We re-entered the check after replacing the TB. Now raise
3412 * the debug interrupt so that is will trigger after the
3413 * current instruction. */
3414 cpu_interrupt(env, CPU_INTERRUPT_DEBUG);
3415 return;
3416 }
pbrook2e70f6e2008-06-29 01:03:05 +00003417 vaddr = (env->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Blue Swirl72cf2d42009-09-12 07:36:22 +00003418 QTAILQ_FOREACH(wp, &env->watchpoints, entry) {
aliguorib4051332008-11-18 20:14:20 +00003419 if ((vaddr == (wp->vaddr & len_mask) ||
3420 (vaddr & wp->len_mask) == wp->vaddr) && (wp->flags & flags)) {
aliguori6e140f22008-11-18 20:37:55 +00003421 wp->flags |= BP_WATCHPOINT_HIT;
3422 if (!env->watchpoint_hit) {
3423 env->watchpoint_hit = wp;
3424 tb = tb_find_pc(env->mem_io_pc);
3425 if (!tb) {
3426 cpu_abort(env, "check_watchpoint: could not find TB for "
3427 "pc=%p", (void *)env->mem_io_pc);
3428 }
Stefan Weil618ba8e2011-04-18 06:39:53 +00003429 cpu_restore_state(tb, env, env->mem_io_pc);
aliguori6e140f22008-11-18 20:37:55 +00003430 tb_phys_invalidate(tb, -1);
3431 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
3432 env->exception_index = EXCP_DEBUG;
3433 } else {
3434 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
3435 tb_gen_code(env, pc, cs_base, cpu_flags, 1);
3436 }
3437 cpu_resume_from_signal(env, NULL);
aliguori06d55cc2008-11-18 20:24:06 +00003438 }
aliguori6e140f22008-11-18 20:37:55 +00003439 } else {
3440 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00003441 }
3442 }
3443}
3444
pbrook6658ffb2007-03-16 23:58:11 +00003445/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
3446 so these check for a hit then pass through to the normal out-of-line
3447 phys routines. */
Anthony Liguoric227f092009-10-01 16:12:16 -05003448static uint32_t watch_mem_readb(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003449{
aliguorib4051332008-11-18 20:14:20 +00003450 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003451 return ldub_phys(addr);
3452}
3453
Anthony Liguoric227f092009-10-01 16:12:16 -05003454static uint32_t watch_mem_readw(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003455{
aliguorib4051332008-11-18 20:14:20 +00003456 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003457 return lduw_phys(addr);
3458}
3459
Anthony Liguoric227f092009-10-01 16:12:16 -05003460static uint32_t watch_mem_readl(void *opaque, target_phys_addr_t addr)
pbrook6658ffb2007-03-16 23:58:11 +00003461{
aliguorib4051332008-11-18 20:14:20 +00003462 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_READ);
pbrook6658ffb2007-03-16 23:58:11 +00003463 return ldl_phys(addr);
3464}
3465
Anthony Liguoric227f092009-10-01 16:12:16 -05003466static void watch_mem_writeb(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003467 uint32_t val)
3468{
aliguorib4051332008-11-18 20:14:20 +00003469 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x0, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003470 stb_phys(addr, val);
3471}
3472
Anthony Liguoric227f092009-10-01 16:12:16 -05003473static void watch_mem_writew(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003474 uint32_t val)
3475{
aliguorib4051332008-11-18 20:14:20 +00003476 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x1, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003477 stw_phys(addr, val);
3478}
3479
Anthony Liguoric227f092009-10-01 16:12:16 -05003480static void watch_mem_writel(void *opaque, target_phys_addr_t addr,
pbrook6658ffb2007-03-16 23:58:11 +00003481 uint32_t val)
3482{
aliguorib4051332008-11-18 20:14:20 +00003483 check_watchpoint(addr & ~TARGET_PAGE_MASK, ~0x3, BP_MEM_WRITE);
pbrook6658ffb2007-03-16 23:58:11 +00003484 stl_phys(addr, val);
3485}
3486
Blue Swirld60efc62009-08-25 18:29:31 +00003487static CPUReadMemoryFunc * const watch_mem_read[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003488 watch_mem_readb,
3489 watch_mem_readw,
3490 watch_mem_readl,
3491};
3492
Blue Swirld60efc62009-08-25 18:29:31 +00003493static CPUWriteMemoryFunc * const watch_mem_write[3] = {
pbrook6658ffb2007-03-16 23:58:11 +00003494 watch_mem_writeb,
3495 watch_mem_writew,
3496 watch_mem_writel,
3497};
pbrook6658ffb2007-03-16 23:58:11 +00003498
Richard Hendersonf6405242010-04-22 16:47:31 -07003499static inline uint32_t subpage_readlen (subpage_t *mmio,
3500 target_phys_addr_t addr,
3501 unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003502{
Richard Hendersonf6405242010-04-22 16:47:31 -07003503 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003504#if defined(DEBUG_SUBPAGE)
3505 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d\n", __func__,
3506 mmio, len, addr, idx);
3507#endif
blueswir1db7b5422007-05-26 17:36:03 +00003508
Richard Hendersonf6405242010-04-22 16:47:31 -07003509 addr += mmio->region_offset[idx];
3510 idx = mmio->sub_io_index[idx];
3511 return io_mem_read[idx][len](io_mem_opaque[idx], addr);
blueswir1db7b5422007-05-26 17:36:03 +00003512}
3513
Anthony Liguoric227f092009-10-01 16:12:16 -05003514static inline void subpage_writelen (subpage_t *mmio, target_phys_addr_t addr,
Richard Hendersonf6405242010-04-22 16:47:31 -07003515 uint32_t value, unsigned int len)
blueswir1db7b5422007-05-26 17:36:03 +00003516{
Richard Hendersonf6405242010-04-22 16:47:31 -07003517 unsigned int idx = SUBPAGE_IDX(addr);
blueswir1db7b5422007-05-26 17:36:03 +00003518#if defined(DEBUG_SUBPAGE)
Richard Hendersonf6405242010-04-22 16:47:31 -07003519 printf("%s: subpage %p len %d addr " TARGET_FMT_plx " idx %d value %08x\n",
3520 __func__, mmio, len, addr, idx, value);
blueswir1db7b5422007-05-26 17:36:03 +00003521#endif
Richard Hendersonf6405242010-04-22 16:47:31 -07003522
3523 addr += mmio->region_offset[idx];
3524 idx = mmio->sub_io_index[idx];
3525 io_mem_write[idx][len](io_mem_opaque[idx], addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00003526}
3527
Anthony Liguoric227f092009-10-01 16:12:16 -05003528static uint32_t subpage_readb (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003529{
blueswir1db7b5422007-05-26 17:36:03 +00003530 return subpage_readlen(opaque, addr, 0);
3531}
3532
Anthony Liguoric227f092009-10-01 16:12:16 -05003533static void subpage_writeb (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003534 uint32_t value)
3535{
blueswir1db7b5422007-05-26 17:36:03 +00003536 subpage_writelen(opaque, addr, value, 0);
3537}
3538
Anthony Liguoric227f092009-10-01 16:12:16 -05003539static uint32_t subpage_readw (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003540{
blueswir1db7b5422007-05-26 17:36:03 +00003541 return subpage_readlen(opaque, addr, 1);
3542}
3543
Anthony Liguoric227f092009-10-01 16:12:16 -05003544static void subpage_writew (void *opaque, target_phys_addr_t addr,
blueswir1db7b5422007-05-26 17:36:03 +00003545 uint32_t value)
3546{
blueswir1db7b5422007-05-26 17:36:03 +00003547 subpage_writelen(opaque, addr, value, 1);
3548}
3549
Anthony Liguoric227f092009-10-01 16:12:16 -05003550static uint32_t subpage_readl (void *opaque, target_phys_addr_t addr)
blueswir1db7b5422007-05-26 17:36:03 +00003551{
blueswir1db7b5422007-05-26 17:36:03 +00003552 return subpage_readlen(opaque, addr, 2);
3553}
3554
Richard Hendersonf6405242010-04-22 16:47:31 -07003555static void subpage_writel (void *opaque, target_phys_addr_t addr,
3556 uint32_t value)
blueswir1db7b5422007-05-26 17:36:03 +00003557{
blueswir1db7b5422007-05-26 17:36:03 +00003558 subpage_writelen(opaque, addr, value, 2);
3559}
3560
Blue Swirld60efc62009-08-25 18:29:31 +00003561static CPUReadMemoryFunc * const subpage_read[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003562 &subpage_readb,
3563 &subpage_readw,
3564 &subpage_readl,
3565};
3566
Blue Swirld60efc62009-08-25 18:29:31 +00003567static CPUWriteMemoryFunc * const subpage_write[] = {
blueswir1db7b5422007-05-26 17:36:03 +00003568 &subpage_writeb,
3569 &subpage_writew,
3570 &subpage_writel,
3571};
3572
Anthony Liguoric227f092009-10-01 16:12:16 -05003573static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
3574 ram_addr_t memory, ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003575{
3576 int idx, eidx;
3577
3578 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
3579 return -1;
3580 idx = SUBPAGE_IDX(start);
3581 eidx = SUBPAGE_IDX(end);
3582#if defined(DEBUG_SUBPAGE)
Blue Swirl0bf9e312009-07-20 17:19:25 +00003583 printf("%s: %p start %08x end %08x idx %08x eidx %08x mem %ld\n", __func__,
blueswir1db7b5422007-05-26 17:36:03 +00003584 mmio, start, end, idx, eidx, memory);
3585#endif
Gleb Natapov95c318f2010-07-29 10:41:45 +03003586 if ((memory & ~TARGET_PAGE_MASK) == IO_MEM_RAM)
3587 memory = IO_MEM_UNASSIGNED;
Richard Hendersonf6405242010-04-22 16:47:31 -07003588 memory = (memory >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
blueswir1db7b5422007-05-26 17:36:03 +00003589 for (; idx <= eidx; idx++) {
Richard Hendersonf6405242010-04-22 16:47:31 -07003590 mmio->sub_io_index[idx] = memory;
3591 mmio->region_offset[idx] = region_offset;
blueswir1db7b5422007-05-26 17:36:03 +00003592 }
3593
3594 return 0;
3595}
3596
Richard Hendersonf6405242010-04-22 16:47:31 -07003597static subpage_t *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
3598 ram_addr_t orig_memory,
3599 ram_addr_t region_offset)
blueswir1db7b5422007-05-26 17:36:03 +00003600{
Anthony Liguoric227f092009-10-01 16:12:16 -05003601 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00003602 int subpage_memory;
3603
Anthony Liguori7267c092011-08-20 22:09:37 -05003604 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00003605
3606 mmio->base = base;
Alexander Graf2507c122010-12-08 12:05:37 +01003607 subpage_memory = cpu_register_io_memory(subpage_read, subpage_write, mmio,
3608 DEVICE_NATIVE_ENDIAN);
blueswir1db7b5422007-05-26 17:36:03 +00003609#if defined(DEBUG_SUBPAGE)
aliguori1eec6142009-02-05 22:06:18 +00003610 printf("%s: %p base " TARGET_FMT_plx " len %08x %d\n", __func__,
3611 mmio, base, TARGET_PAGE_SIZE, subpage_memory);
blueswir1db7b5422007-05-26 17:36:03 +00003612#endif
aliguori1eec6142009-02-05 22:06:18 +00003613 *phys = subpage_memory | IO_MEM_SUBPAGE;
Richard Hendersonf6405242010-04-22 16:47:31 -07003614 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, orig_memory, region_offset);
blueswir1db7b5422007-05-26 17:36:03 +00003615
3616 return mmio;
3617}
3618
aliguori88715652009-02-11 15:20:58 +00003619static int get_free_io_mem_idx(void)
3620{
3621 int i;
3622
3623 for (i = 0; i<IO_MEM_NB_ENTRIES; i++)
3624 if (!io_mem_used[i]) {
3625 io_mem_used[i] = 1;
3626 return i;
3627 }
Riku Voipioc6703b42009-12-03 15:56:05 +02003628 fprintf(stderr, "RAN out out io_mem_idx, max %d !\n", IO_MEM_NB_ENTRIES);
aliguori88715652009-02-11 15:20:58 +00003629 return -1;
3630}
3631
Alexander Grafdd310532010-12-08 12:05:36 +01003632/*
3633 * Usually, devices operate in little endian mode. There are devices out
3634 * there that operate in big endian too. Each device gets byte swapped
3635 * mmio if plugged onto a CPU that does the other endianness.
3636 *
3637 * CPU Device swap?
3638 *
3639 * little little no
3640 * little big yes
3641 * big little yes
3642 * big big no
3643 */
3644
3645typedef struct SwapEndianContainer {
3646 CPUReadMemoryFunc *read[3];
3647 CPUWriteMemoryFunc *write[3];
3648 void *opaque;
3649} SwapEndianContainer;
3650
3651static uint32_t swapendian_mem_readb (void *opaque, target_phys_addr_t addr)
3652{
3653 uint32_t val;
3654 SwapEndianContainer *c = opaque;
3655 val = c->read[0](c->opaque, addr);
3656 return val;
3657}
3658
3659static uint32_t swapendian_mem_readw(void *opaque, target_phys_addr_t addr)
3660{
3661 uint32_t val;
3662 SwapEndianContainer *c = opaque;
3663 val = bswap16(c->read[1](c->opaque, addr));
3664 return val;
3665}
3666
3667static uint32_t swapendian_mem_readl(void *opaque, target_phys_addr_t addr)
3668{
3669 uint32_t val;
3670 SwapEndianContainer *c = opaque;
3671 val = bswap32(c->read[2](c->opaque, addr));
3672 return val;
3673}
3674
3675static CPUReadMemoryFunc * const swapendian_readfn[3]={
3676 swapendian_mem_readb,
3677 swapendian_mem_readw,
3678 swapendian_mem_readl
3679};
3680
3681static void swapendian_mem_writeb(void *opaque, target_phys_addr_t addr,
3682 uint32_t val)
3683{
3684 SwapEndianContainer *c = opaque;
3685 c->write[0](c->opaque, addr, val);
3686}
3687
3688static void swapendian_mem_writew(void *opaque, target_phys_addr_t addr,
3689 uint32_t val)
3690{
3691 SwapEndianContainer *c = opaque;
3692 c->write[1](c->opaque, addr, bswap16(val));
3693}
3694
3695static void swapendian_mem_writel(void *opaque, target_phys_addr_t addr,
3696 uint32_t val)
3697{
3698 SwapEndianContainer *c = opaque;
3699 c->write[2](c->opaque, addr, bswap32(val));
3700}
3701
3702static CPUWriteMemoryFunc * const swapendian_writefn[3]={
3703 swapendian_mem_writeb,
3704 swapendian_mem_writew,
3705 swapendian_mem_writel
3706};
3707
3708static void swapendian_init(int io_index)
3709{
Anthony Liguori7267c092011-08-20 22:09:37 -05003710 SwapEndianContainer *c = g_malloc(sizeof(SwapEndianContainer));
Alexander Grafdd310532010-12-08 12:05:36 +01003711 int i;
3712
3713 /* Swap mmio for big endian targets */
3714 c->opaque = io_mem_opaque[io_index];
3715 for (i = 0; i < 3; i++) {
3716 c->read[i] = io_mem_read[io_index][i];
3717 c->write[i] = io_mem_write[io_index][i];
3718
3719 io_mem_read[io_index][i] = swapendian_readfn[i];
3720 io_mem_write[io_index][i] = swapendian_writefn[i];
3721 }
3722 io_mem_opaque[io_index] = c;
3723}
3724
3725static void swapendian_del(int io_index)
3726{
3727 if (io_mem_read[io_index][0] == swapendian_readfn[0]) {
Anthony Liguori7267c092011-08-20 22:09:37 -05003728 g_free(io_mem_opaque[io_index]);
Alexander Grafdd310532010-12-08 12:05:36 +01003729 }
3730}
3731
bellard33417e72003-08-10 21:47:01 +00003732/* mem_read and mem_write are arrays of functions containing the
3733 function to access byte (index 0), word (index 1) and dword (index
Paul Brook0b4e6e32009-04-30 18:37:55 +01003734 2). Functions can be omitted with a NULL function pointer.
blueswir13ee89922008-01-02 19:45:26 +00003735 If io_index is non zero, the corresponding io zone is
blueswir14254fab2008-01-01 16:57:19 +00003736 modified. If it is zero, a new io zone is allocated. The return
3737 value can be used with cpu_register_physical_memory(). (-1) is
3738 returned if error. */
Avi Kivity1eed09c2009-06-14 11:38:51 +03003739static int cpu_register_io_memory_fixed(int io_index,
Blue Swirld60efc62009-08-25 18:29:31 +00003740 CPUReadMemoryFunc * const *mem_read,
3741 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003742 void *opaque, enum device_endian endian)
bellard33417e72003-08-10 21:47:01 +00003743{
Richard Henderson3cab7212010-05-07 09:52:51 -07003744 int i;
3745
bellard33417e72003-08-10 21:47:01 +00003746 if (io_index <= 0) {
aliguori88715652009-02-11 15:20:58 +00003747 io_index = get_free_io_mem_idx();
3748 if (io_index == -1)
3749 return io_index;
bellard33417e72003-08-10 21:47:01 +00003750 } else {
Avi Kivity1eed09c2009-06-14 11:38:51 +03003751 io_index >>= IO_MEM_SHIFT;
bellard33417e72003-08-10 21:47:01 +00003752 if (io_index >= IO_MEM_NB_ENTRIES)
3753 return -1;
3754 }
bellardb5ff1b32005-11-26 10:38:39 +00003755
Richard Henderson3cab7212010-05-07 09:52:51 -07003756 for (i = 0; i < 3; ++i) {
3757 io_mem_read[io_index][i]
3758 = (mem_read[i] ? mem_read[i] : unassigned_mem_read[i]);
3759 }
3760 for (i = 0; i < 3; ++i) {
3761 io_mem_write[io_index][i]
3762 = (mem_write[i] ? mem_write[i] : unassigned_mem_write[i]);
3763 }
bellarda4193c82004-06-03 14:01:43 +00003764 io_mem_opaque[io_index] = opaque;
Richard Hendersonf6405242010-04-22 16:47:31 -07003765
Alexander Grafdd310532010-12-08 12:05:36 +01003766 switch (endian) {
3767 case DEVICE_BIG_ENDIAN:
3768#ifndef TARGET_WORDS_BIGENDIAN
3769 swapendian_init(io_index);
3770#endif
3771 break;
3772 case DEVICE_LITTLE_ENDIAN:
3773#ifdef TARGET_WORDS_BIGENDIAN
3774 swapendian_init(io_index);
3775#endif
3776 break;
3777 case DEVICE_NATIVE_ENDIAN:
3778 default:
3779 break;
3780 }
3781
Richard Hendersonf6405242010-04-22 16:47:31 -07003782 return (io_index << IO_MEM_SHIFT);
bellard33417e72003-08-10 21:47:01 +00003783}
bellard61382a52003-10-27 21:22:23 +00003784
Blue Swirld60efc62009-08-25 18:29:31 +00003785int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
3786 CPUWriteMemoryFunc * const *mem_write,
Alexander Grafdd310532010-12-08 12:05:36 +01003787 void *opaque, enum device_endian endian)
Avi Kivity1eed09c2009-06-14 11:38:51 +03003788{
Alexander Graf2507c122010-12-08 12:05:37 +01003789 return cpu_register_io_memory_fixed(0, mem_read, mem_write, opaque, endian);
Avi Kivity1eed09c2009-06-14 11:38:51 +03003790}
3791
aliguori88715652009-02-11 15:20:58 +00003792void cpu_unregister_io_memory(int io_table_address)
3793{
3794 int i;
3795 int io_index = io_table_address >> IO_MEM_SHIFT;
3796
Alexander Grafdd310532010-12-08 12:05:36 +01003797 swapendian_del(io_index);
3798
aliguori88715652009-02-11 15:20:58 +00003799 for (i=0;i < 3; i++) {
3800 io_mem_read[io_index][i] = unassigned_mem_read[i];
3801 io_mem_write[io_index][i] = unassigned_mem_write[i];
3802 }
3803 io_mem_opaque[io_index] = NULL;
3804 io_mem_used[io_index] = 0;
3805}
3806
Avi Kivitye9179ce2009-06-14 11:38:52 +03003807static void io_mem_init(void)
3808{
3809 int i;
3810
Alexander Graf2507c122010-12-08 12:05:37 +01003811 cpu_register_io_memory_fixed(IO_MEM_ROM, error_mem_read,
3812 unassigned_mem_write, NULL,
3813 DEVICE_NATIVE_ENDIAN);
3814 cpu_register_io_memory_fixed(IO_MEM_UNASSIGNED, unassigned_mem_read,
3815 unassigned_mem_write, NULL,
3816 DEVICE_NATIVE_ENDIAN);
3817 cpu_register_io_memory_fixed(IO_MEM_NOTDIRTY, error_mem_read,
3818 notdirty_mem_write, NULL,
3819 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003820 for (i=0; i<5; i++)
3821 io_mem_used[i] = 1;
3822
3823 io_mem_watch = cpu_register_io_memory(watch_mem_read,
Alexander Graf2507c122010-12-08 12:05:37 +01003824 watch_mem_write, NULL,
3825 DEVICE_NATIVE_ENDIAN);
Avi Kivitye9179ce2009-06-14 11:38:52 +03003826}
3827
Avi Kivity62152b82011-07-26 14:26:14 +03003828static void memory_map_init(void)
3829{
Anthony Liguori7267c092011-08-20 22:09:37 -05003830 system_memory = g_malloc(sizeof(*system_memory));
Avi Kivity8417ceb2011-08-03 11:56:14 +03003831 memory_region_init(system_memory, "system", INT64_MAX);
Avi Kivity62152b82011-07-26 14:26:14 +03003832 set_system_memory_map(system_memory);
Avi Kivity309cb472011-08-08 16:09:03 +03003833
Anthony Liguori7267c092011-08-20 22:09:37 -05003834 system_io = g_malloc(sizeof(*system_io));
Avi Kivity309cb472011-08-08 16:09:03 +03003835 memory_region_init(system_io, "io", 65536);
3836 set_system_io_map(system_io);
Avi Kivity62152b82011-07-26 14:26:14 +03003837}
3838
3839MemoryRegion *get_system_memory(void)
3840{
3841 return system_memory;
3842}
3843
Avi Kivity309cb472011-08-08 16:09:03 +03003844MemoryRegion *get_system_io(void)
3845{
3846 return system_io;
3847}
3848
pbrooke2eef172008-06-08 01:09:01 +00003849#endif /* !defined(CONFIG_USER_ONLY) */
3850
bellard13eb76e2004-01-24 15:23:36 +00003851/* physical memory access (slow version, mainly for debug) */
3852#if defined(CONFIG_USER_ONLY)
Paul Brooka68fe892010-03-01 00:08:59 +00003853int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
3854 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003855{
3856 int l, flags;
3857 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00003858 void * p;
bellard13eb76e2004-01-24 15:23:36 +00003859
3860 while (len > 0) {
3861 page = addr & TARGET_PAGE_MASK;
3862 l = (page + TARGET_PAGE_SIZE) - addr;
3863 if (l > len)
3864 l = len;
3865 flags = page_get_flags(page);
3866 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00003867 return -1;
bellard13eb76e2004-01-24 15:23:36 +00003868 if (is_write) {
3869 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00003870 return -1;
bellard579a97f2007-11-11 14:26:47 +00003871 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003872 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00003873 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003874 memcpy(p, buf, l);
3875 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00003876 } else {
3877 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00003878 return -1;
bellard579a97f2007-11-11 14:26:47 +00003879 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00003880 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00003881 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00003882 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00003883 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00003884 }
3885 len -= l;
3886 buf += l;
3887 addr += l;
3888 }
Paul Brooka68fe892010-03-01 00:08:59 +00003889 return 0;
bellard13eb76e2004-01-24 15:23:36 +00003890}
bellard8df1cd02005-01-28 22:37:22 +00003891
bellard13eb76e2004-01-24 15:23:36 +00003892#else
Anthony Liguoric227f092009-10-01 16:12:16 -05003893void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
bellard13eb76e2004-01-24 15:23:36 +00003894 int len, int is_write)
3895{
3896 int l, io_index;
3897 uint8_t *ptr;
3898 uint32_t val;
Anthony Liguoric227f092009-10-01 16:12:16 -05003899 target_phys_addr_t page;
Anthony PERARD8ca56922011-07-15 04:32:53 +00003900 ram_addr_t pd;
bellard92e873b2004-05-21 14:52:29 +00003901 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00003902
bellard13eb76e2004-01-24 15:23:36 +00003903 while (len > 0) {
3904 page = addr & TARGET_PAGE_MASK;
3905 l = (page + TARGET_PAGE_SIZE) - addr;
3906 if (l > len)
3907 l = len;
bellard92e873b2004-05-21 14:52:29 +00003908 p = phys_page_find(page >> TARGET_PAGE_BITS);
bellard13eb76e2004-01-24 15:23:36 +00003909 if (!p) {
3910 pd = IO_MEM_UNASSIGNED;
3911 } else {
3912 pd = p->phys_offset;
3913 }
ths3b46e622007-09-17 08:09:54 +00003914
bellard13eb76e2004-01-24 15:23:36 +00003915 if (is_write) {
bellard3a7d9292005-08-21 09:26:42 +00003916 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003917 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003918 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003919 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003920 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard6a00d602005-11-21 23:25:50 +00003921 /* XXX: could force cpu_single_env to NULL to avoid
3922 potential bugs */
aurel326c2934d2009-02-18 21:37:17 +00003923 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003924 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003925 val = ldl_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003926 io_mem_write[io_index][2](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003927 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003928 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard1c213d12005-09-03 10:49:04 +00003929 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003930 val = lduw_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003931 io_mem_write[io_index][1](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003932 l = 2;
3933 } else {
bellard1c213d12005-09-03 10:49:04 +00003934 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00003935 val = ldub_p(buf);
aurel326c2934d2009-02-18 21:37:17 +00003936 io_mem_write[io_index][0](io_mem_opaque[io_index], addr1, val);
bellard13eb76e2004-01-24 15:23:36 +00003937 l = 1;
3938 }
3939 } else {
Anthony PERARD8ca56922011-07-15 04:32:53 +00003940 ram_addr_t addr1;
bellardb448f2f2004-02-25 23:24:04 +00003941 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
bellard13eb76e2004-01-24 15:23:36 +00003942 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00003943 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00003944 memcpy(ptr, buf, l);
bellard3a7d9292005-08-21 09:26:42 +00003945 if (!cpu_physical_memory_is_dirty(addr1)) {
3946 /* invalidate code */
3947 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
3948 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09003949 cpu_physical_memory_set_dirty_flags(
3950 addr1, (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00003951 }
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003952 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003953 }
3954 } else {
ths5fafdf22007-09-16 21:08:06 +00003955 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00003956 !(pd & IO_MEM_ROMD)) {
Anthony Liguoric227f092009-10-01 16:12:16 -05003957 target_phys_addr_t addr1 = addr;
bellard13eb76e2004-01-24 15:23:36 +00003958 /* I/O case */
3959 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00003960 if (p)
aurel326c2934d2009-02-18 21:37:17 +00003961 addr1 = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
3962 if (l >= 4 && ((addr1 & 3) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003963 /* 32 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003964 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003965 stl_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003966 l = 4;
aurel326c2934d2009-02-18 21:37:17 +00003967 } else if (l >= 2 && ((addr1 & 1) == 0)) {
bellard13eb76e2004-01-24 15:23:36 +00003968 /* 16 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003969 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003970 stw_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003971 l = 2;
3972 } else {
bellard1c213d12005-09-03 10:49:04 +00003973 /* 8 bit read access */
aurel326c2934d2009-02-18 21:37:17 +00003974 val = io_mem_read[io_index][0](io_mem_opaque[io_index], addr1);
bellardc27004e2005-01-03 23:35:10 +00003975 stb_p(buf, val);
bellard13eb76e2004-01-24 15:23:36 +00003976 l = 1;
3977 }
3978 } else {
3979 /* RAM case */
Anthony PERARD050a0dd2010-09-16 13:57:49 +01003980 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK);
3981 memcpy(buf, ptr + (addr & ~TARGET_PAGE_MASK), l);
3982 qemu_put_ram_ptr(ptr);
bellard13eb76e2004-01-24 15:23:36 +00003983 }
3984 }
3985 len -= l;
3986 buf += l;
3987 addr += l;
3988 }
3989}
bellard8df1cd02005-01-28 22:37:22 +00003990
bellardd0ecd2a2006-04-23 17:14:48 +00003991/* used for ROM loading : can write in RAM and ROM */
Anthony Liguoric227f092009-10-01 16:12:16 -05003992void cpu_physical_memory_write_rom(target_phys_addr_t addr,
bellardd0ecd2a2006-04-23 17:14:48 +00003993 const uint8_t *buf, int len)
3994{
3995 int l;
3996 uint8_t *ptr;
Anthony Liguoric227f092009-10-01 16:12:16 -05003997 target_phys_addr_t page;
bellardd0ecd2a2006-04-23 17:14:48 +00003998 unsigned long pd;
3999 PhysPageDesc *p;
ths3b46e622007-09-17 08:09:54 +00004000
bellardd0ecd2a2006-04-23 17:14:48 +00004001 while (len > 0) {
4002 page = addr & TARGET_PAGE_MASK;
4003 l = (page + TARGET_PAGE_SIZE) - addr;
4004 if (l > len)
4005 l = len;
4006 p = phys_page_find(page >> TARGET_PAGE_BITS);
4007 if (!p) {
4008 pd = IO_MEM_UNASSIGNED;
4009 } else {
4010 pd = p->phys_offset;
4011 }
ths3b46e622007-09-17 08:09:54 +00004012
bellardd0ecd2a2006-04-23 17:14:48 +00004013 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM &&
bellard2a4188a2006-06-25 21:54:59 +00004014 (pd & ~TARGET_PAGE_MASK) != IO_MEM_ROM &&
4015 !(pd & IO_MEM_ROMD)) {
bellardd0ecd2a2006-04-23 17:14:48 +00004016 /* do nothing */
4017 } else {
4018 unsigned long addr1;
4019 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4020 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004021 ptr = qemu_get_ram_ptr(addr1);
bellardd0ecd2a2006-04-23 17:14:48 +00004022 memcpy(ptr, buf, l);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004023 qemu_put_ram_ptr(ptr);
bellardd0ecd2a2006-04-23 17:14:48 +00004024 }
4025 len -= l;
4026 buf += l;
4027 addr += l;
4028 }
4029}
4030
aliguori6d16c2f2009-01-22 16:59:11 +00004031typedef struct {
4032 void *buffer;
Anthony Liguoric227f092009-10-01 16:12:16 -05004033 target_phys_addr_t addr;
4034 target_phys_addr_t len;
aliguori6d16c2f2009-01-22 16:59:11 +00004035} BounceBuffer;
4036
4037static BounceBuffer bounce;
4038
aliguoriba223c22009-01-22 16:59:16 +00004039typedef struct MapClient {
4040 void *opaque;
4041 void (*callback)(void *opaque);
Blue Swirl72cf2d42009-09-12 07:36:22 +00004042 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00004043} MapClient;
4044
Blue Swirl72cf2d42009-09-12 07:36:22 +00004045static QLIST_HEAD(map_client_list, MapClient) map_client_list
4046 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004047
4048void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque))
4049{
Anthony Liguori7267c092011-08-20 22:09:37 -05004050 MapClient *client = g_malloc(sizeof(*client));
aliguoriba223c22009-01-22 16:59:16 +00004051
4052 client->opaque = opaque;
4053 client->callback = callback;
Blue Swirl72cf2d42009-09-12 07:36:22 +00004054 QLIST_INSERT_HEAD(&map_client_list, client, link);
aliguoriba223c22009-01-22 16:59:16 +00004055 return client;
4056}
4057
4058void cpu_unregister_map_client(void *_client)
4059{
4060 MapClient *client = (MapClient *)_client;
4061
Blue Swirl72cf2d42009-09-12 07:36:22 +00004062 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05004063 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00004064}
4065
4066static void cpu_notify_map_clients(void)
4067{
4068 MapClient *client;
4069
Blue Swirl72cf2d42009-09-12 07:36:22 +00004070 while (!QLIST_EMPTY(&map_client_list)) {
4071 client = QLIST_FIRST(&map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00004072 client->callback(client->opaque);
Isaku Yamahata34d5e942009-06-26 18:57:18 +09004073 cpu_unregister_map_client(client);
aliguoriba223c22009-01-22 16:59:16 +00004074 }
4075}
4076
aliguori6d16c2f2009-01-22 16:59:11 +00004077/* Map a physical memory region into a host virtual address.
4078 * May map a subset of the requested range, given by and returned in *plen.
4079 * May return NULL if resources needed to perform the mapping are exhausted.
4080 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00004081 * Use cpu_register_map_client() to know when retrying the map operation is
4082 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00004083 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004084void *cpu_physical_memory_map(target_phys_addr_t addr,
4085 target_phys_addr_t *plen,
aliguori6d16c2f2009-01-22 16:59:11 +00004086 int is_write)
4087{
Anthony Liguoric227f092009-10-01 16:12:16 -05004088 target_phys_addr_t len = *plen;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004089 target_phys_addr_t todo = 0;
aliguori6d16c2f2009-01-22 16:59:11 +00004090 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004091 target_phys_addr_t page;
aliguori6d16c2f2009-01-22 16:59:11 +00004092 unsigned long pd;
4093 PhysPageDesc *p;
Anthony PERARDf15fbc42011-07-20 08:17:42 +00004094 ram_addr_t raddr = RAM_ADDR_MAX;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004095 ram_addr_t rlen;
4096 void *ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004097
4098 while (len > 0) {
4099 page = addr & TARGET_PAGE_MASK;
4100 l = (page + TARGET_PAGE_SIZE) - addr;
4101 if (l > len)
4102 l = len;
4103 p = phys_page_find(page >> TARGET_PAGE_BITS);
4104 if (!p) {
4105 pd = IO_MEM_UNASSIGNED;
4106 } else {
4107 pd = p->phys_offset;
4108 }
4109
4110 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004111 if (todo || bounce.buffer) {
aliguori6d16c2f2009-01-22 16:59:11 +00004112 break;
4113 }
4114 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, TARGET_PAGE_SIZE);
4115 bounce.addr = addr;
4116 bounce.len = l;
4117 if (!is_write) {
Stefan Weil54f7b4a2011-04-10 18:23:39 +02004118 cpu_physical_memory_read(addr, bounce.buffer, l);
aliguori6d16c2f2009-01-22 16:59:11 +00004119 }
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004120
4121 *plen = l;
4122 return bounce.buffer;
aliguori6d16c2f2009-01-22 16:59:11 +00004123 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004124 if (!todo) {
4125 raddr = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4126 }
aliguori6d16c2f2009-01-22 16:59:11 +00004127
4128 len -= l;
4129 addr += l;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01004130 todo += l;
aliguori6d16c2f2009-01-22 16:59:11 +00004131 }
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01004132 rlen = todo;
4133 ret = qemu_ram_ptr_length(raddr, &rlen);
4134 *plen = rlen;
4135 return ret;
aliguori6d16c2f2009-01-22 16:59:11 +00004136}
4137
4138/* Unmaps a memory region previously mapped by cpu_physical_memory_map().
4139 * Will also mark the memory as dirty if is_write == 1. access_len gives
4140 * the amount of memory that was actually read or written by the caller.
4141 */
Anthony Liguoric227f092009-10-01 16:12:16 -05004142void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
4143 int is_write, target_phys_addr_t access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00004144{
4145 if (buffer != bounce.buffer) {
4146 if (is_write) {
Marcelo Tosattie8902612010-10-11 15:31:19 -03004147 ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004148 while (access_len) {
4149 unsigned l;
4150 l = TARGET_PAGE_SIZE;
4151 if (l > access_len)
4152 l = access_len;
4153 if (!cpu_physical_memory_is_dirty(addr1)) {
4154 /* invalidate code */
4155 tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
4156 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004157 cpu_physical_memory_set_dirty_flags(
4158 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori6d16c2f2009-01-22 16:59:11 +00004159 }
4160 addr1 += l;
4161 access_len -= l;
4162 }
4163 }
Jan Kiszka868bb332011-06-21 22:59:09 +02004164 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02004165 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01004166 }
aliguori6d16c2f2009-01-22 16:59:11 +00004167 return;
4168 }
4169 if (is_write) {
4170 cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
4171 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00004172 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00004173 bounce.buffer = NULL;
aliguoriba223c22009-01-22 16:59:16 +00004174 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00004175}
bellardd0ecd2a2006-04-23 17:14:48 +00004176
bellard8df1cd02005-01-28 22:37:22 +00004177/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004178static inline uint32_t ldl_phys_internal(target_phys_addr_t addr,
4179 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004180{
4181 int io_index;
4182 uint8_t *ptr;
4183 uint32_t val;
4184 unsigned long pd;
4185 PhysPageDesc *p;
4186
4187 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4188 if (!p) {
4189 pd = IO_MEM_UNASSIGNED;
4190 } else {
4191 pd = p->phys_offset;
4192 }
ths3b46e622007-09-17 08:09:54 +00004193
ths5fafdf22007-09-16 21:08:06 +00004194 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
bellard2a4188a2006-06-25 21:54:59 +00004195 !(pd & IO_MEM_ROMD)) {
bellard8df1cd02005-01-28 22:37:22 +00004196 /* I/O case */
4197 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004198 if (p)
4199 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004200 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004201#if defined(TARGET_WORDS_BIGENDIAN)
4202 if (endian == DEVICE_LITTLE_ENDIAN) {
4203 val = bswap32(val);
4204 }
4205#else
4206 if (endian == DEVICE_BIG_ENDIAN) {
4207 val = bswap32(val);
4208 }
4209#endif
bellard8df1cd02005-01-28 22:37:22 +00004210 } else {
4211 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004212 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard8df1cd02005-01-28 22:37:22 +00004213 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004214 switch (endian) {
4215 case DEVICE_LITTLE_ENDIAN:
4216 val = ldl_le_p(ptr);
4217 break;
4218 case DEVICE_BIG_ENDIAN:
4219 val = ldl_be_p(ptr);
4220 break;
4221 default:
4222 val = ldl_p(ptr);
4223 break;
4224 }
bellard8df1cd02005-01-28 22:37:22 +00004225 }
4226 return val;
4227}
4228
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004229uint32_t ldl_phys(target_phys_addr_t addr)
4230{
4231 return ldl_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4232}
4233
4234uint32_t ldl_le_phys(target_phys_addr_t addr)
4235{
4236 return ldl_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4237}
4238
4239uint32_t ldl_be_phys(target_phys_addr_t addr)
4240{
4241 return ldl_phys_internal(addr, DEVICE_BIG_ENDIAN);
4242}
4243
bellard84b7b8e2005-11-28 21:19:04 +00004244/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004245static inline uint64_t ldq_phys_internal(target_phys_addr_t addr,
4246 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00004247{
4248 int io_index;
4249 uint8_t *ptr;
4250 uint64_t val;
4251 unsigned long pd;
4252 PhysPageDesc *p;
4253
4254 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4255 if (!p) {
4256 pd = IO_MEM_UNASSIGNED;
4257 } else {
4258 pd = p->phys_offset;
4259 }
ths3b46e622007-09-17 08:09:54 +00004260
bellard2a4188a2006-06-25 21:54:59 +00004261 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4262 !(pd & IO_MEM_ROMD)) {
bellard84b7b8e2005-11-28 21:19:04 +00004263 /* I/O case */
4264 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004265 if (p)
4266 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004267
4268 /* XXX This is broken when device endian != cpu endian.
4269 Fix and add "endian" variable check */
bellard84b7b8e2005-11-28 21:19:04 +00004270#ifdef TARGET_WORDS_BIGENDIAN
4271 val = (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr) << 32;
4272 val |= io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4);
4273#else
4274 val = io_mem_read[io_index][2](io_mem_opaque[io_index], addr);
4275 val |= (uint64_t)io_mem_read[io_index][2](io_mem_opaque[io_index], addr + 4) << 32;
4276#endif
4277 } else {
4278 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004279 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
bellard84b7b8e2005-11-28 21:19:04 +00004280 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004281 switch (endian) {
4282 case DEVICE_LITTLE_ENDIAN:
4283 val = ldq_le_p(ptr);
4284 break;
4285 case DEVICE_BIG_ENDIAN:
4286 val = ldq_be_p(ptr);
4287 break;
4288 default:
4289 val = ldq_p(ptr);
4290 break;
4291 }
bellard84b7b8e2005-11-28 21:19:04 +00004292 }
4293 return val;
4294}
4295
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004296uint64_t ldq_phys(target_phys_addr_t addr)
4297{
4298 return ldq_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4299}
4300
4301uint64_t ldq_le_phys(target_phys_addr_t addr)
4302{
4303 return ldq_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4304}
4305
4306uint64_t ldq_be_phys(target_phys_addr_t addr)
4307{
4308 return ldq_phys_internal(addr, DEVICE_BIG_ENDIAN);
4309}
4310
bellardaab33092005-10-30 20:48:42 +00004311/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004312uint32_t ldub_phys(target_phys_addr_t addr)
bellardaab33092005-10-30 20:48:42 +00004313{
4314 uint8_t val;
4315 cpu_physical_memory_read(addr, &val, 1);
4316 return val;
4317}
4318
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004319/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004320static inline uint32_t lduw_phys_internal(target_phys_addr_t addr,
4321 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004322{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004323 int io_index;
4324 uint8_t *ptr;
4325 uint64_t val;
4326 unsigned long pd;
4327 PhysPageDesc *p;
4328
4329 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4330 if (!p) {
4331 pd = IO_MEM_UNASSIGNED;
4332 } else {
4333 pd = p->phys_offset;
4334 }
4335
4336 if ((pd & ~TARGET_PAGE_MASK) > IO_MEM_ROM &&
4337 !(pd & IO_MEM_ROMD)) {
4338 /* I/O case */
4339 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4340 if (p)
4341 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
4342 val = io_mem_read[io_index][1](io_mem_opaque[io_index], addr);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004343#if defined(TARGET_WORDS_BIGENDIAN)
4344 if (endian == DEVICE_LITTLE_ENDIAN) {
4345 val = bswap16(val);
4346 }
4347#else
4348 if (endian == DEVICE_BIG_ENDIAN) {
4349 val = bswap16(val);
4350 }
4351#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004352 } else {
4353 /* RAM case */
4354 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
4355 (addr & ~TARGET_PAGE_MASK);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004356 switch (endian) {
4357 case DEVICE_LITTLE_ENDIAN:
4358 val = lduw_le_p(ptr);
4359 break;
4360 case DEVICE_BIG_ENDIAN:
4361 val = lduw_be_p(ptr);
4362 break;
4363 default:
4364 val = lduw_p(ptr);
4365 break;
4366 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004367 }
4368 return val;
bellardaab33092005-10-30 20:48:42 +00004369}
4370
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004371uint32_t lduw_phys(target_phys_addr_t addr)
4372{
4373 return lduw_phys_internal(addr, DEVICE_NATIVE_ENDIAN);
4374}
4375
4376uint32_t lduw_le_phys(target_phys_addr_t addr)
4377{
4378 return lduw_phys_internal(addr, DEVICE_LITTLE_ENDIAN);
4379}
4380
4381uint32_t lduw_be_phys(target_phys_addr_t addr)
4382{
4383 return lduw_phys_internal(addr, DEVICE_BIG_ENDIAN);
4384}
4385
bellard8df1cd02005-01-28 22:37:22 +00004386/* warning: addr must be aligned. The ram page is not masked as dirty
4387 and the code inside is not invalidated. It is useful if the dirty
4388 bits are used to track modified PTEs */
Anthony Liguoric227f092009-10-01 16:12:16 -05004389void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
bellard8df1cd02005-01-28 22:37:22 +00004390{
4391 int io_index;
4392 uint8_t *ptr;
4393 unsigned long pd;
4394 PhysPageDesc *p;
4395
4396 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4397 if (!p) {
4398 pd = IO_MEM_UNASSIGNED;
4399 } else {
4400 pd = p->phys_offset;
4401 }
ths3b46e622007-09-17 08:09:54 +00004402
bellard3a7d9292005-08-21 09:26:42 +00004403 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004404 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004405 if (p)
4406 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
bellard8df1cd02005-01-28 22:37:22 +00004407 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4408 } else {
aliguori74576192008-10-06 14:02:03 +00004409 unsigned long addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
pbrook5579c7f2009-04-11 14:47:08 +00004410 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00004411 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00004412
4413 if (unlikely(in_migration)) {
4414 if (!cpu_physical_memory_is_dirty(addr1)) {
4415 /* invalidate code */
4416 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4417 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004418 cpu_physical_memory_set_dirty_flags(
4419 addr1, (0xff & ~CODE_DIRTY_FLAG));
aliguori74576192008-10-06 14:02:03 +00004420 }
4421 }
bellard8df1cd02005-01-28 22:37:22 +00004422 }
4423}
4424
Anthony Liguoric227f092009-10-01 16:12:16 -05004425void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val)
j_mayerbc98a7e2007-04-04 07:55:12 +00004426{
4427 int io_index;
4428 uint8_t *ptr;
4429 unsigned long pd;
4430 PhysPageDesc *p;
4431
4432 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4433 if (!p) {
4434 pd = IO_MEM_UNASSIGNED;
4435 } else {
4436 pd = p->phys_offset;
4437 }
ths3b46e622007-09-17 08:09:54 +00004438
j_mayerbc98a7e2007-04-04 07:55:12 +00004439 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4440 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004441 if (p)
4442 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
j_mayerbc98a7e2007-04-04 07:55:12 +00004443#ifdef TARGET_WORDS_BIGENDIAN
4444 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val >> 32);
4445 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val);
4446#else
4447 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4448 io_mem_write[io_index][2](io_mem_opaque[io_index], addr + 4, val >> 32);
4449#endif
4450 } else {
pbrook5579c7f2009-04-11 14:47:08 +00004451 ptr = qemu_get_ram_ptr(pd & TARGET_PAGE_MASK) +
j_mayerbc98a7e2007-04-04 07:55:12 +00004452 (addr & ~TARGET_PAGE_MASK);
4453 stq_p(ptr, val);
4454 }
4455}
4456
bellard8df1cd02005-01-28 22:37:22 +00004457/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004458static inline void stl_phys_internal(target_phys_addr_t addr, uint32_t val,
4459 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00004460{
4461 int io_index;
4462 uint8_t *ptr;
4463 unsigned long pd;
4464 PhysPageDesc *p;
4465
4466 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4467 if (!p) {
4468 pd = IO_MEM_UNASSIGNED;
4469 } else {
4470 pd = p->phys_offset;
4471 }
ths3b46e622007-09-17 08:09:54 +00004472
bellard3a7d9292005-08-21 09:26:42 +00004473 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
bellard8df1cd02005-01-28 22:37:22 +00004474 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
pbrook8da3ff12008-12-01 18:59:50 +00004475 if (p)
4476 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004477#if defined(TARGET_WORDS_BIGENDIAN)
4478 if (endian == DEVICE_LITTLE_ENDIAN) {
4479 val = bswap32(val);
4480 }
4481#else
4482 if (endian == DEVICE_BIG_ENDIAN) {
4483 val = bswap32(val);
4484 }
4485#endif
bellard8df1cd02005-01-28 22:37:22 +00004486 io_mem_write[io_index][2](io_mem_opaque[io_index], addr, val);
4487 } else {
4488 unsigned long addr1;
4489 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4490 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00004491 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004492 switch (endian) {
4493 case DEVICE_LITTLE_ENDIAN:
4494 stl_le_p(ptr, val);
4495 break;
4496 case DEVICE_BIG_ENDIAN:
4497 stl_be_p(ptr, val);
4498 break;
4499 default:
4500 stl_p(ptr, val);
4501 break;
4502 }
bellard3a7d9292005-08-21 09:26:42 +00004503 if (!cpu_physical_memory_is_dirty(addr1)) {
4504 /* invalidate code */
4505 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
4506 /* set dirty bit */
Yoshiaki Tamuraf7c11b52010-03-23 16:39:53 +09004507 cpu_physical_memory_set_dirty_flags(addr1,
4508 (0xff & ~CODE_DIRTY_FLAG));
bellard3a7d9292005-08-21 09:26:42 +00004509 }
bellard8df1cd02005-01-28 22:37:22 +00004510 }
4511}
4512
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004513void stl_phys(target_phys_addr_t addr, uint32_t val)
4514{
4515 stl_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4516}
4517
4518void stl_le_phys(target_phys_addr_t addr, uint32_t val)
4519{
4520 stl_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4521}
4522
4523void stl_be_phys(target_phys_addr_t addr, uint32_t val)
4524{
4525 stl_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4526}
4527
bellardaab33092005-10-30 20:48:42 +00004528/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004529void stb_phys(target_phys_addr_t addr, uint32_t val)
bellardaab33092005-10-30 20:48:42 +00004530{
4531 uint8_t v = val;
4532 cpu_physical_memory_write(addr, &v, 1);
4533}
4534
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004535/* warning: addr must be aligned */
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004536static inline void stw_phys_internal(target_phys_addr_t addr, uint32_t val,
4537 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00004538{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004539 int io_index;
4540 uint8_t *ptr;
4541 unsigned long pd;
4542 PhysPageDesc *p;
4543
4544 p = phys_page_find(addr >> TARGET_PAGE_BITS);
4545 if (!p) {
4546 pd = IO_MEM_UNASSIGNED;
4547 } else {
4548 pd = p->phys_offset;
4549 }
4550
4551 if ((pd & ~TARGET_PAGE_MASK) != IO_MEM_RAM) {
4552 io_index = (pd >> IO_MEM_SHIFT) & (IO_MEM_NB_ENTRIES - 1);
4553 if (p)
4554 addr = (addr & ~TARGET_PAGE_MASK) + p->region_offset;
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004555#if defined(TARGET_WORDS_BIGENDIAN)
4556 if (endian == DEVICE_LITTLE_ENDIAN) {
4557 val = bswap16(val);
4558 }
4559#else
4560 if (endian == DEVICE_BIG_ENDIAN) {
4561 val = bswap16(val);
4562 }
4563#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004564 io_mem_write[io_index][1](io_mem_opaque[io_index], addr, val);
4565 } else {
4566 unsigned long addr1;
4567 addr1 = (pd & TARGET_PAGE_MASK) + (addr & ~TARGET_PAGE_MASK);
4568 /* RAM case */
4569 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004570 switch (endian) {
4571 case DEVICE_LITTLE_ENDIAN:
4572 stw_le_p(ptr, val);
4573 break;
4574 case DEVICE_BIG_ENDIAN:
4575 stw_be_p(ptr, val);
4576 break;
4577 default:
4578 stw_p(ptr, val);
4579 break;
4580 }
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03004581 if (!cpu_physical_memory_is_dirty(addr1)) {
4582 /* invalidate code */
4583 tb_invalidate_phys_page_range(addr1, addr1 + 2, 0);
4584 /* set dirty bit */
4585 cpu_physical_memory_set_dirty_flags(addr1,
4586 (0xff & ~CODE_DIRTY_FLAG));
4587 }
4588 }
bellardaab33092005-10-30 20:48:42 +00004589}
4590
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004591void stw_phys(target_phys_addr_t addr, uint32_t val)
4592{
4593 stw_phys_internal(addr, val, DEVICE_NATIVE_ENDIAN);
4594}
4595
4596void stw_le_phys(target_phys_addr_t addr, uint32_t val)
4597{
4598 stw_phys_internal(addr, val, DEVICE_LITTLE_ENDIAN);
4599}
4600
4601void stw_be_phys(target_phys_addr_t addr, uint32_t val)
4602{
4603 stw_phys_internal(addr, val, DEVICE_BIG_ENDIAN);
4604}
4605
bellardaab33092005-10-30 20:48:42 +00004606/* XXX: optimize */
Anthony Liguoric227f092009-10-01 16:12:16 -05004607void stq_phys(target_phys_addr_t addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00004608{
4609 val = tswap64(val);
Stefan Weil71d2b722011-03-26 21:06:56 +01004610 cpu_physical_memory_write(addr, &val, 8);
bellardaab33092005-10-30 20:48:42 +00004611}
4612
Alexander Graf1e78bcc2011-07-06 09:09:23 +02004613void stq_le_phys(target_phys_addr_t addr, uint64_t val)
4614{
4615 val = cpu_to_le64(val);
4616 cpu_physical_memory_write(addr, &val, 8);
4617}
4618
4619void stq_be_phys(target_phys_addr_t addr, uint64_t val)
4620{
4621 val = cpu_to_be64(val);
4622 cpu_physical_memory_write(addr, &val, 8);
4623}
4624
aliguori5e2972f2009-03-28 17:51:36 +00004625/* virtual memory access for debug (includes writing to ROM) */
ths5fafdf22007-09-16 21:08:06 +00004626int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00004627 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00004628{
4629 int l;
Anthony Liguoric227f092009-10-01 16:12:16 -05004630 target_phys_addr_t phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00004631 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00004632
4633 while (len > 0) {
4634 page = addr & TARGET_PAGE_MASK;
4635 phys_addr = cpu_get_phys_page_debug(env, page);
4636 /* if no physical page mapped, return an error */
4637 if (phys_addr == -1)
4638 return -1;
4639 l = (page + TARGET_PAGE_SIZE) - addr;
4640 if (l > len)
4641 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00004642 phys_addr += (addr & ~TARGET_PAGE_MASK);
aliguori5e2972f2009-03-28 17:51:36 +00004643 if (is_write)
4644 cpu_physical_memory_write_rom(phys_addr, buf, l);
4645 else
aliguori5e2972f2009-03-28 17:51:36 +00004646 cpu_physical_memory_rw(phys_addr, buf, l, is_write);
bellard13eb76e2004-01-24 15:23:36 +00004647 len -= l;
4648 buf += l;
4649 addr += l;
4650 }
4651 return 0;
4652}
Paul Brooka68fe892010-03-01 00:08:59 +00004653#endif
bellard13eb76e2004-01-24 15:23:36 +00004654
pbrook2e70f6e2008-06-29 01:03:05 +00004655/* in deterministic execution mode, instructions doing device I/Os
4656 must be at the end of the TB */
4657void cpu_io_recompile(CPUState *env, void *retaddr)
4658{
4659 TranslationBlock *tb;
4660 uint32_t n, cflags;
4661 target_ulong pc, cs_base;
4662 uint64_t flags;
4663
4664 tb = tb_find_pc((unsigned long)retaddr);
4665 if (!tb) {
4666 cpu_abort(env, "cpu_io_recompile: could not find TB for pc=%p",
4667 retaddr);
4668 }
4669 n = env->icount_decr.u16.low + tb->icount;
Stefan Weil618ba8e2011-04-18 06:39:53 +00004670 cpu_restore_state(tb, env, (unsigned long)retaddr);
pbrook2e70f6e2008-06-29 01:03:05 +00004671 /* Calculate how many instructions had been executed before the fault
thsbf20dc02008-06-30 17:22:19 +00004672 occurred. */
pbrook2e70f6e2008-06-29 01:03:05 +00004673 n = n - env->icount_decr.u16.low;
4674 /* Generate a new TB ending on the I/O insn. */
4675 n++;
4676 /* On MIPS and SH, delay slot instructions can only be restarted if
4677 they were already the first instruction in the TB. If this is not
thsbf20dc02008-06-30 17:22:19 +00004678 the first instruction in a TB then re-execute the preceding
pbrook2e70f6e2008-06-29 01:03:05 +00004679 branch. */
4680#if defined(TARGET_MIPS)
4681 if ((env->hflags & MIPS_HFLAG_BMASK) != 0 && n > 1) {
4682 env->active_tc.PC -= 4;
4683 env->icount_decr.u16.low++;
4684 env->hflags &= ~MIPS_HFLAG_BMASK;
4685 }
4686#elif defined(TARGET_SH4)
4687 if ((env->flags & ((DELAY_SLOT | DELAY_SLOT_CONDITIONAL))) != 0
4688 && n > 1) {
4689 env->pc -= 2;
4690 env->icount_decr.u16.low++;
4691 env->flags &= ~(DELAY_SLOT | DELAY_SLOT_CONDITIONAL);
4692 }
4693#endif
4694 /* This should never happen. */
4695 if (n > CF_COUNT_MASK)
4696 cpu_abort(env, "TB too big during recompile");
4697
4698 cflags = n | CF_LAST_IO;
4699 pc = tb->pc;
4700 cs_base = tb->cs_base;
4701 flags = tb->flags;
4702 tb_phys_invalidate(tb, -1);
4703 /* FIXME: In theory this could raise an exception. In practice
4704 we have already translated the block once so it's probably ok. */
4705 tb_gen_code(env, pc, cs_base, flags, cflags);
thsbf20dc02008-06-30 17:22:19 +00004706 /* TODO: If env->pc != tb->pc (i.e. the faulting instruction was not
pbrook2e70f6e2008-06-29 01:03:05 +00004707 the first in the TB) then we end up generating a whole new TB and
4708 repeating the fault, which is horribly inefficient.
4709 Better would be to execute just this insn uncached, or generate a
4710 second new TB. */
4711 cpu_resume_from_signal(env, NULL);
4712}
4713
Paul Brookb3755a92010-03-12 16:54:58 +00004714#if !defined(CONFIG_USER_ONLY)
4715
Stefan Weil055403b2010-10-22 23:03:32 +02004716void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
bellarde3db7222005-01-26 22:00:47 +00004717{
4718 int i, target_code_size, max_target_code_size;
4719 int direct_jmp_count, direct_jmp2_count, cross_page;
4720 TranslationBlock *tb;
ths3b46e622007-09-17 08:09:54 +00004721
bellarde3db7222005-01-26 22:00:47 +00004722 target_code_size = 0;
4723 max_target_code_size = 0;
4724 cross_page = 0;
4725 direct_jmp_count = 0;
4726 direct_jmp2_count = 0;
4727 for(i = 0; i < nb_tbs; i++) {
4728 tb = &tbs[i];
4729 target_code_size += tb->size;
4730 if (tb->size > max_target_code_size)
4731 max_target_code_size = tb->size;
4732 if (tb->page_addr[1] != -1)
4733 cross_page++;
4734 if (tb->tb_next_offset[0] != 0xffff) {
4735 direct_jmp_count++;
4736 if (tb->tb_next_offset[1] != 0xffff) {
4737 direct_jmp2_count++;
4738 }
4739 }
4740 }
4741 /* XXX: avoid using doubles ? */
bellard57fec1f2008-02-01 10:50:11 +00004742 cpu_fprintf(f, "Translation buffer state:\n");
Stefan Weil055403b2010-10-22 23:03:32 +02004743 cpu_fprintf(f, "gen code size %td/%ld\n",
bellard26a5f132008-05-28 12:30:31 +00004744 code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
4745 cpu_fprintf(f, "TB count %d/%d\n",
4746 nb_tbs, code_gen_max_blocks);
ths5fafdf22007-09-16 21:08:06 +00004747 cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
bellarde3db7222005-01-26 22:00:47 +00004748 nb_tbs ? target_code_size / nb_tbs : 0,
4749 max_target_code_size);
Stefan Weil055403b2010-10-22 23:03:32 +02004750 cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
bellarde3db7222005-01-26 22:00:47 +00004751 nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
4752 target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
ths5fafdf22007-09-16 21:08:06 +00004753 cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
4754 cross_page,
bellarde3db7222005-01-26 22:00:47 +00004755 nb_tbs ? (cross_page * 100) / nb_tbs : 0);
4756 cpu_fprintf(f, "direct jump count %d (%d%%) (2 jumps=%d %d%%)\n",
ths5fafdf22007-09-16 21:08:06 +00004757 direct_jmp_count,
bellarde3db7222005-01-26 22:00:47 +00004758 nb_tbs ? (direct_jmp_count * 100) / nb_tbs : 0,
4759 direct_jmp2_count,
4760 nb_tbs ? (direct_jmp2_count * 100) / nb_tbs : 0);
bellard57fec1f2008-02-01 10:50:11 +00004761 cpu_fprintf(f, "\nStatistics:\n");
bellarde3db7222005-01-26 22:00:47 +00004762 cpu_fprintf(f, "TB flush count %d\n", tb_flush_count);
4763 cpu_fprintf(f, "TB invalidate count %d\n", tb_phys_invalidate_count);
4764 cpu_fprintf(f, "TLB flush count %d\n", tlb_flush_count);
bellardb67d9a52008-05-23 09:57:34 +00004765 tcg_dump_info(f, cpu_fprintf);
bellarde3db7222005-01-26 22:00:47 +00004766}
4767
bellard61382a52003-10-27 21:22:23 +00004768#define MMUSUFFIX _cmmu
Blue Swirl39171492011-09-21 18:13:16 +00004769#undef GETPC
bellard61382a52003-10-27 21:22:23 +00004770#define GETPC() NULL
4771#define env cpu_single_env
bellardb769d8f2004-10-03 15:07:13 +00004772#define SOFTMMU_CODE_ACCESS
bellard61382a52003-10-27 21:22:23 +00004773
4774#define SHIFT 0
4775#include "softmmu_template.h"
4776
4777#define SHIFT 1
4778#include "softmmu_template.h"
4779
4780#define SHIFT 2
4781#include "softmmu_template.h"
4782
4783#define SHIFT 3
4784#include "softmmu_template.h"
4785
4786#undef env
4787
4788#endif