blob: e19ab22cd6280d7a235a29e8da56bcf3507772e2 [file] [log] [blame]
bellard54936002003-05-13 00:25:15 +00001/*
Blue Swirl5b6dd862012-12-02 16:04:43 +00002 * Virtual page mapping
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard54936002003-05-13 00:25:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard54936002003-05-13 00:25:15 +000018 */
bellard67b915a2004-03-31 23:37:16 +000019#include "config.h"
Stefan Weil777872e2014-02-23 18:02:08 +010020#ifndef _WIN32
bellarda98d49b2004-11-14 16:22:05 +000021#include <sys/types.h>
bellardd5a8f072004-09-29 21:15:28 +000022#include <sys/mman.h>
23#endif
bellard54936002003-05-13 00:25:15 +000024
Stefan Weil055403b2010-10-22 23:03:32 +020025#include "qemu-common.h"
bellard6180a182003-09-30 21:04:53 +000026#include "cpu.h"
bellardb67d9a52008-05-23 09:57:34 +000027#include "tcg.h"
pbrookb3c77242008-06-30 16:31:04 +000028#include "hw/hw.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010029#if !defined(CONFIG_USER_ONLY)
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +020030#include "hw/boards.h"
Michael S. Tsirkin4485bd22015-03-11 07:56:34 +010031#endif
Alex Williamsoncc9e98c2010-06-25 11:09:43 -060032#include "hw/qdev.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010033#include "qemu/osdep.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010034#include "sysemu/kvm.h"
Markus Armbruster2ff3de62013-07-04 15:09:22 +020035#include "sysemu/sysemu.h"
Paolo Bonzini0d09e412013-02-05 17:06:20 +010036#include "hw/xen/xen.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010037#include "qemu/timer.h"
38#include "qemu/config-file.h"
Andreas Färber75a34032013-09-02 16:57:02 +020039#include "qemu/error-report.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010040#include "exec/memory.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010041#include "sysemu/dma.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010042#include "exec/address-spaces.h"
pbrook53a59602006-03-25 19:31:22 +000043#if defined(CONFIG_USER_ONLY)
44#include <qemu.h>
Jun Nakajima432d2682010-08-31 16:41:25 +010045#else /* !CONFIG_USER_ONLY */
Paolo Bonzini9c17d612012-12-17 18:20:04 +010046#include "sysemu/xen-mapcache.h"
Stefano Stabellini6506e4f2011-05-19 18:35:44 +010047#include "trace.h"
pbrook53a59602006-03-25 19:31:22 +000048#endif
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +010049#include "exec/cpu-all.h"
Mike Day0dc3f442013-09-05 14:41:35 -040050#include "qemu/rcu_queue.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010051#include "exec/cputlb.h"
Blue Swirl5b6dd862012-12-02 16:04:43 +000052#include "translate-all.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000053
Paolo Bonzini022c62c2012-12-17 18:19:49 +010054#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020055#include "exec/ram_addr.h"
Avi Kivity67d95c12011-12-15 15:25:22 +020056
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +020057#include "qemu/range.h"
58
blueswir1db7b5422007-05-26 17:36:03 +000059//#define DEBUG_SUBPAGE
ths1196be32007-03-17 15:17:58 +000060
pbrook99773bd2006-04-16 15:14:59 +000061#if !defined(CONFIG_USER_ONLY)
Juan Quintela981fdf22013-10-10 11:54:09 +020062static bool in_migration;
pbrook94a6b542009-04-11 17:15:54 +000063
Mike Day0dc3f442013-09-05 14:41:35 -040064/* ram_list is read under rcu_read_lock()/rcu_read_unlock(). Writes
65 * are protected by the ramlist lock.
66 */
Mike Day0d53d9f2015-01-21 13:45:24 +010067RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list.blocks) };
Avi Kivity62152b82011-07-26 14:26:14 +030068
69static MemoryRegion *system_memory;
Avi Kivity309cb472011-08-08 16:09:03 +030070static MemoryRegion *system_io;
Avi Kivity62152b82011-07-26 14:26:14 +030071
Avi Kivityf6790af2012-10-02 20:13:51 +020072AddressSpace address_space_io;
73AddressSpace address_space_memory;
Avi Kivity2673a5d2012-10-02 18:49:28 +020074
Paolo Bonzini0844e002013-05-24 14:37:28 +020075MemoryRegion io_mem_rom, io_mem_notdirty;
Jan Kiszkaacc9d802013-05-26 21:55:37 +020076static MemoryRegion io_mem_unassigned;
Avi Kivity0e0df1e2012-01-02 00:32:15 +020077
Paolo Bonzini7bd4f432014-05-14 17:43:22 +080078/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
79#define RAM_PREALLOC (1 << 0)
80
Paolo Bonzinidbcb8982014-06-10 19:15:24 +080081/* RAM is mmap-ed with MAP_SHARED */
82#define RAM_SHARED (1 << 1)
83
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +020084/* Only a portion of RAM (used_length) is actually used, and migrated.
85 * This used_length size can change across reboots.
86 */
87#define RAM_RESIZEABLE (1 << 2)
88
pbrooke2eef172008-06-08 01:09:01 +000089#endif
bellard9fa3e852004-01-04 18:06:42 +000090
Andreas Färberbdc44642013-06-24 23:50:24 +020091struct CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
bellard6a00d602005-11-21 23:25:50 +000092/* current CPU in the current thread. It is only valid inside
93 cpu_exec() */
Andreas Färber4917cf42013-05-27 05:17:50 +020094DEFINE_TLS(CPUState *, current_cpu);
pbrook2e70f6e2008-06-29 01:03:05 +000095/* 0 = Do not count executed instructions.
thsbf20dc02008-06-30 17:22:19 +000096 1 = Precise instruction counting.
pbrook2e70f6e2008-06-29 01:03:05 +000097 2 = Adaptive rate instruction counting. */
Paolo Bonzini5708fc62012-11-26 15:36:40 +010098int use_icount;
bellard6a00d602005-11-21 23:25:50 +000099
pbrooke2eef172008-06-08 01:09:01 +0000100#if !defined(CONFIG_USER_ONLY)
Avi Kivity4346ae32012-02-10 17:00:01 +0200101
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200102typedef struct PhysPageEntry PhysPageEntry;
103
104struct PhysPageEntry {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200105 /* How many bits skip to next level (in units of L2_SIZE). 0 for a leaf. */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200106 uint32_t skip : 6;
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200107 /* index into phys_sections (!skip) or phys_map_nodes (skip) */
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200108 uint32_t ptr : 26;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200109};
110
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200111#define PHYS_MAP_NODE_NIL (((uint32_t)~0) >> 6)
112
Paolo Bonzini03f49952013-11-07 17:14:36 +0100113/* Size of the L2 (and L3, etc) page tables. */
Paolo Bonzini57271d62013-11-07 17:14:37 +0100114#define ADDR_SPACE_BITS 64
Paolo Bonzini03f49952013-11-07 17:14:36 +0100115
Michael S. Tsirkin026736c2013-11-13 20:13:03 +0200116#define P_L2_BITS 9
Paolo Bonzini03f49952013-11-07 17:14:36 +0100117#define P_L2_SIZE (1 << P_L2_BITS)
118
119#define P_L2_LEVELS (((ADDR_SPACE_BITS - TARGET_PAGE_BITS - 1) / P_L2_BITS) + 1)
120
121typedef PhysPageEntry Node[P_L2_SIZE];
Paolo Bonzini0475d942013-05-29 12:28:21 +0200122
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200123typedef struct PhysPageMap {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100124 struct rcu_head rcu;
125
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200126 unsigned sections_nb;
127 unsigned sections_nb_alloc;
128 unsigned nodes_nb;
129 unsigned nodes_nb_alloc;
130 Node *nodes;
131 MemoryRegionSection *sections;
132} PhysPageMap;
133
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200134struct AddressSpaceDispatch {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100135 struct rcu_head rcu;
136
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200137 /* This is a multi-level map on the physical address space.
138 * The bottom level has pointers to MemoryRegionSections.
139 */
140 PhysPageEntry phys_map;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200141 PhysPageMap map;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200142 AddressSpace *as;
Paolo Bonzini1db8abb2013-05-21 12:07:21 +0200143};
144
Jan Kiszka90260c62013-05-26 21:46:51 +0200145#define SUBPAGE_IDX(addr) ((addr) & ~TARGET_PAGE_MASK)
146typedef struct subpage_t {
147 MemoryRegion iomem;
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200148 AddressSpace *as;
Jan Kiszka90260c62013-05-26 21:46:51 +0200149 hwaddr base;
150 uint16_t sub_section[TARGET_PAGE_SIZE];
151} subpage_t;
152
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200153#define PHYS_SECTION_UNASSIGNED 0
154#define PHYS_SECTION_NOTDIRTY 1
155#define PHYS_SECTION_ROM 2
156#define PHYS_SECTION_WATCH 3
Avi Kivity5312bd82012-02-12 18:32:55 +0200157
pbrooke2eef172008-06-08 01:09:01 +0000158static void io_mem_init(void);
Avi Kivity62152b82011-07-26 14:26:14 +0300159static void memory_map_init(void);
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000160static void tcg_commit(MemoryListener *listener);
pbrooke2eef172008-06-08 01:09:01 +0000161
Avi Kivity1ec9b902012-01-02 12:47:48 +0200162static MemoryRegion io_mem_watch;
pbrook6658ffb2007-03-16 23:58:11 +0000163#endif
bellard54936002003-05-13 00:25:15 +0000164
Paul Brook6d9a1302010-02-28 23:55:53 +0000165#if !defined(CONFIG_USER_ONLY)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200166
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200167static void phys_map_node_reserve(PhysPageMap *map, unsigned nodes)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200168{
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200169 if (map->nodes_nb + nodes > map->nodes_nb_alloc) {
170 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc * 2, 16);
171 map->nodes_nb_alloc = MAX(map->nodes_nb_alloc, map->nodes_nb + nodes);
172 map->nodes = g_renew(Node, map->nodes, map->nodes_nb_alloc);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200173 }
174}
175
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200176static uint32_t phys_map_node_alloc(PhysPageMap *map)
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200177{
178 unsigned i;
Michael S. Tsirkin8b795762013-11-11 14:51:56 +0200179 uint32_t ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200180
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200181 ret = map->nodes_nb++;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200182 assert(ret != PHYS_MAP_NODE_NIL);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200183 assert(ret != map->nodes_nb_alloc);
Paolo Bonzini03f49952013-11-07 17:14:36 +0100184 for (i = 0; i < P_L2_SIZE; ++i) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200185 map->nodes[ret][i].skip = 1;
186 map->nodes[ret][i].ptr = PHYS_MAP_NODE_NIL;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200187 }
Avi Kivityf7bf5462012-02-13 20:12:05 +0200188 return ret;
Avi Kivityd6f2ea22012-02-12 20:12:49 +0200189}
190
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200191static void phys_page_set_level(PhysPageMap *map, PhysPageEntry *lp,
192 hwaddr *index, hwaddr *nb, uint16_t leaf,
Avi Kivity29990972012-02-13 20:21:20 +0200193 int level)
Avi Kivityf7bf5462012-02-13 20:12:05 +0200194{
195 PhysPageEntry *p;
196 int i;
Paolo Bonzini03f49952013-11-07 17:14:36 +0100197 hwaddr step = (hwaddr)1 << (level * P_L2_BITS);
Avi Kivityf7bf5462012-02-13 20:12:05 +0200198
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200199 if (lp->skip && lp->ptr == PHYS_MAP_NODE_NIL) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200200 lp->ptr = phys_map_node_alloc(map);
201 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200202 if (level == 0) {
Paolo Bonzini03f49952013-11-07 17:14:36 +0100203 for (i = 0; i < P_L2_SIZE; i++) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200204 p[i].skip = 0;
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200205 p[i].ptr = PHYS_SECTION_UNASSIGNED;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200206 }
207 }
208 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200209 p = map->nodes[lp->ptr];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200210 }
Paolo Bonzini03f49952013-11-07 17:14:36 +0100211 lp = &p[(*index >> (level * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf7bf5462012-02-13 20:12:05 +0200212
Paolo Bonzini03f49952013-11-07 17:14:36 +0100213 while (*nb && lp < &p[P_L2_SIZE]) {
Avi Kivity07f07b32012-02-13 20:45:32 +0200214 if ((*index & (step - 1)) == 0 && *nb >= step) {
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200215 lp->skip = 0;
Avi Kivityc19e8802012-02-13 20:25:31 +0200216 lp->ptr = leaf;
Avi Kivity07f07b32012-02-13 20:45:32 +0200217 *index += step;
218 *nb -= step;
Avi Kivity29990972012-02-13 20:21:20 +0200219 } else {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200220 phys_page_set_level(map, lp, index, nb, leaf, level - 1);
Avi Kivity29990972012-02-13 20:21:20 +0200221 }
222 ++lp;
Avi Kivityf7bf5462012-02-13 20:12:05 +0200223 }
224}
225
Avi Kivityac1970f2012-10-03 16:22:53 +0200226static void phys_page_set(AddressSpaceDispatch *d,
Avi Kivitya8170e52012-10-23 12:30:10 +0200227 hwaddr index, hwaddr nb,
Avi Kivity29990972012-02-13 20:21:20 +0200228 uint16_t leaf)
bellard92e873b2004-05-21 14:52:29 +0000229{
Avi Kivity29990972012-02-13 20:21:20 +0200230 /* Wildly overreserve - it doesn't matter much. */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200231 phys_map_node_reserve(&d->map, 3 * P_L2_LEVELS);
bellard92e873b2004-05-21 14:52:29 +0000232
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200233 phys_page_set_level(&d->map, &d->phys_map, &index, &nb, leaf, P_L2_LEVELS - 1);
bellard92e873b2004-05-21 14:52:29 +0000234}
235
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200236/* Compact a non leaf page entry. Simply detect that the entry has a single child,
237 * and update our entry so we can skip it and go directly to the destination.
238 */
239static void phys_page_compact(PhysPageEntry *lp, Node *nodes, unsigned long *compacted)
240{
241 unsigned valid_ptr = P_L2_SIZE;
242 int valid = 0;
243 PhysPageEntry *p;
244 int i;
245
246 if (lp->ptr == PHYS_MAP_NODE_NIL) {
247 return;
248 }
249
250 p = nodes[lp->ptr];
251 for (i = 0; i < P_L2_SIZE; i++) {
252 if (p[i].ptr == PHYS_MAP_NODE_NIL) {
253 continue;
254 }
255
256 valid_ptr = i;
257 valid++;
258 if (p[i].skip) {
259 phys_page_compact(&p[i], nodes, compacted);
260 }
261 }
262
263 /* We can only compress if there's only one child. */
264 if (valid != 1) {
265 return;
266 }
267
268 assert(valid_ptr < P_L2_SIZE);
269
270 /* Don't compress if it won't fit in the # of bits we have. */
271 if (lp->skip + p[valid_ptr].skip >= (1 << 3)) {
272 return;
273 }
274
275 lp->ptr = p[valid_ptr].ptr;
276 if (!p[valid_ptr].skip) {
277 /* If our only child is a leaf, make this a leaf. */
278 /* By design, we should have made this node a leaf to begin with so we
279 * should never reach here.
280 * But since it's so simple to handle this, let's do it just in case we
281 * change this rule.
282 */
283 lp->skip = 0;
284 } else {
285 lp->skip += p[valid_ptr].skip;
286 }
287}
288
289static void phys_page_compact_all(AddressSpaceDispatch *d, int nodes_nb)
290{
291 DECLARE_BITMAP(compacted, nodes_nb);
292
293 if (d->phys_map.skip) {
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200294 phys_page_compact(&d->phys_map, d->map.nodes, compacted);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200295 }
296}
297
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200298static MemoryRegionSection *phys_page_find(PhysPageEntry lp, hwaddr addr,
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200299 Node *nodes, MemoryRegionSection *sections)
bellard92e873b2004-05-21 14:52:29 +0000300{
Avi Kivity31ab2b42012-02-13 16:44:19 +0200301 PhysPageEntry *p;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200302 hwaddr index = addr >> TARGET_PAGE_BITS;
Avi Kivity31ab2b42012-02-13 16:44:19 +0200303 int i;
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200304
Michael S. Tsirkin9736e552013-11-11 14:42:43 +0200305 for (i = P_L2_LEVELS; lp.skip && (i -= lp.skip) >= 0;) {
Avi Kivityc19e8802012-02-13 20:25:31 +0200306 if (lp.ptr == PHYS_MAP_NODE_NIL) {
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200307 return &sections[PHYS_SECTION_UNASSIGNED];
Avi Kivity31ab2b42012-02-13 16:44:19 +0200308 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200309 p = nodes[lp.ptr];
Paolo Bonzini03f49952013-11-07 17:14:36 +0100310 lp = p[(index >> (i * P_L2_BITS)) & (P_L2_SIZE - 1)];
Avi Kivityf1f6e3b2011-11-20 17:52:22 +0200311 }
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +0200312
313 if (sections[lp.ptr].size.hi ||
314 range_covers_byte(sections[lp.ptr].offset_within_address_space,
315 sections[lp.ptr].size.lo, addr)) {
316 return &sections[lp.ptr];
317 } else {
318 return &sections[PHYS_SECTION_UNASSIGNED];
319 }
Avi Kivityf3705d52012-03-08 16:16:34 +0200320}
321
Blue Swirle5548612012-04-21 13:08:33 +0000322bool memory_region_is_unassigned(MemoryRegion *mr)
323{
Paolo Bonzini2a8e7492013-05-24 14:34:08 +0200324 return mr != &io_mem_rom && mr != &io_mem_notdirty && !mr->rom_device
Blue Swirle5548612012-04-21 13:08:33 +0000325 && mr != &io_mem_watch;
326}
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200327
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100328/* Called from RCU critical section */
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200329static MemoryRegionSection *address_space_lookup_region(AddressSpaceDispatch *d,
Jan Kiszka90260c62013-05-26 21:46:51 +0200330 hwaddr addr,
331 bool resolve_subpage)
Jan Kiszka9f029602013-05-06 16:48:02 +0200332{
Jan Kiszka90260c62013-05-26 21:46:51 +0200333 MemoryRegionSection *section;
334 subpage_t *subpage;
335
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200336 section = phys_page_find(d->phys_map, addr, d->map.nodes, d->map.sections);
Jan Kiszka90260c62013-05-26 21:46:51 +0200337 if (resolve_subpage && section->mr->subpage) {
338 subpage = container_of(section->mr, subpage_t, iomem);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200339 section = &d->map.sections[subpage->sub_section[SUBPAGE_IDX(addr)]];
Jan Kiszka90260c62013-05-26 21:46:51 +0200340 }
341 return section;
Jan Kiszka9f029602013-05-06 16:48:02 +0200342}
343
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100344/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200345static MemoryRegionSection *
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200346address_space_translate_internal(AddressSpaceDispatch *d, hwaddr addr, hwaddr *xlat,
Jan Kiszka90260c62013-05-26 21:46:51 +0200347 hwaddr *plen, bool resolve_subpage)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200348{
349 MemoryRegionSection *section;
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100350 Int128 diff;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200351
Paolo Bonzinic7086b42013-06-02 15:27:39 +0200352 section = address_space_lookup_region(d, addr, resolve_subpage);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200353 /* Compute offset within MemoryRegionSection */
354 addr -= section->offset_within_address_space;
355
356 /* Compute offset within MemoryRegion */
357 *xlat = addr + section->offset_within_region;
358
359 diff = int128_sub(section->mr->size, int128_make64(addr));
Peter Maydell3752a032013-06-20 15:18:04 +0100360 *plen = int128_get64(int128_min(diff, int128_make64(*plen)));
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200361 return section;
362}
Jan Kiszka90260c62013-05-26 21:46:51 +0200363
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100364static inline bool memory_access_is_direct(MemoryRegion *mr, bool is_write)
365{
366 if (memory_region_is_ram(mr)) {
367 return !(is_write && mr->readonly);
368 }
369 if (memory_region_is_romd(mr)) {
370 return !is_write;
371 }
372
373 return false;
374}
375
Paolo Bonzini41063e12015-03-18 14:21:43 +0100376/* Called from RCU critical section */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +0200377MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
378 hwaddr *xlat, hwaddr *plen,
379 bool is_write)
Jan Kiszka90260c62013-05-26 21:46:51 +0200380{
Avi Kivity30951152012-10-30 13:47:46 +0200381 IOMMUTLBEntry iotlb;
382 MemoryRegionSection *section;
383 MemoryRegion *mr;
Avi Kivity30951152012-10-30 13:47:46 +0200384
385 for (;;) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100386 AddressSpaceDispatch *d = atomic_rcu_read(&as->dispatch);
387 section = address_space_translate_internal(d, addr, &addr, plen, true);
Avi Kivity30951152012-10-30 13:47:46 +0200388 mr = section->mr;
389
390 if (!mr->iommu_ops) {
391 break;
392 }
393
Le Tan8d7b8cb2014-08-16 13:55:37 +0800394 iotlb = mr->iommu_ops->translate(mr, addr, is_write);
Avi Kivity30951152012-10-30 13:47:46 +0200395 addr = ((iotlb.translated_addr & ~iotlb.addr_mask)
396 | (addr & iotlb.addr_mask));
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700397 *plen = MIN(*plen, (addr | iotlb.addr_mask) - addr + 1);
Avi Kivity30951152012-10-30 13:47:46 +0200398 if (!(iotlb.perm & (1 << is_write))) {
399 mr = &io_mem_unassigned;
400 break;
401 }
402
403 as = iotlb.target_as;
404 }
405
Alexey Kardashevskiyfe680d02014-05-07 13:40:39 +0000406 if (xen_enabled() && memory_access_is_direct(mr, is_write)) {
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100407 hwaddr page = ((addr & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE) - addr;
Peter Crosthwaite23820db2015-03-16 22:35:54 -0700408 *plen = MIN(page, *plen);
Paolo Bonzinia87f3952014-02-07 15:47:46 +0100409 }
410
Avi Kivity30951152012-10-30 13:47:46 +0200411 *xlat = addr;
412 return mr;
Jan Kiszka90260c62013-05-26 21:46:51 +0200413}
414
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100415/* Called from RCU critical section */
Jan Kiszka90260c62013-05-26 21:46:51 +0200416MemoryRegionSection *
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200417address_space_translate_for_iotlb(CPUState *cpu, hwaddr addr,
418 hwaddr *xlat, hwaddr *plen)
Jan Kiszka90260c62013-05-26 21:46:51 +0200419{
Avi Kivity30951152012-10-30 13:47:46 +0200420 MemoryRegionSection *section;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200421 section = address_space_translate_internal(cpu->memory_dispatch,
422 addr, xlat, plen, false);
Avi Kivity30951152012-10-30 13:47:46 +0200423
424 assert(!section->mr->iommu_ops);
425 return section;
Jan Kiszka90260c62013-05-26 21:46:51 +0200426}
bellard9fa3e852004-01-04 18:06:42 +0000427#endif
bellardfd6ce8f2003-05-14 19:00:11 +0000428
Andreas Färberb170fce2013-01-20 20:23:22 +0100429#if !defined(CONFIG_USER_ONLY)
pbrook9656f322008-07-01 20:01:19 +0000430
Juan Quintelae59fb372009-09-29 22:48:21 +0200431static int cpu_common_post_load(void *opaque, int version_id)
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200432{
Andreas Färber259186a2013-01-17 18:51:17 +0100433 CPUState *cpu = opaque;
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200434
aurel323098dba2009-03-07 21:28:24 +0000435 /* 0x01 was CPU_INTERRUPT_EXIT. This line can be removed when the
436 version_id is increased. */
Andreas Färber259186a2013-01-17 18:51:17 +0100437 cpu->interrupt_request &= ~0x01;
Christian Borntraegerc01a71c2014-03-17 17:13:12 +0100438 tlb_flush(cpu, 1);
pbrook9656f322008-07-01 20:01:19 +0000439
440 return 0;
441}
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200442
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400443static int cpu_common_pre_load(void *opaque)
444{
445 CPUState *cpu = opaque;
446
Paolo Bonziniadee6422014-12-19 12:53:14 +0100447 cpu->exception_index = -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400448
449 return 0;
450}
451
452static bool cpu_common_exception_index_needed(void *opaque)
453{
454 CPUState *cpu = opaque;
455
Paolo Bonziniadee6422014-12-19 12:53:14 +0100456 return tcg_enabled() && cpu->exception_index != -1;
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400457}
458
459static const VMStateDescription vmstate_cpu_common_exception_index = {
460 .name = "cpu_common/exception_index",
461 .version_id = 1,
462 .minimum_version_id = 1,
463 .fields = (VMStateField[]) {
464 VMSTATE_INT32(exception_index, CPUState),
465 VMSTATE_END_OF_LIST()
466 }
467};
468
Andreas Färber1a1562f2013-06-17 04:09:11 +0200469const VMStateDescription vmstate_cpu_common = {
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200470 .name = "cpu_common",
471 .version_id = 1,
472 .minimum_version_id = 1,
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400473 .pre_load = cpu_common_pre_load,
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200474 .post_load = cpu_common_post_load,
Juan Quintela35d08452014-04-16 16:01:33 +0200475 .fields = (VMStateField[]) {
Andreas Färber259186a2013-01-17 18:51:17 +0100476 VMSTATE_UINT32(halted, CPUState),
477 VMSTATE_UINT32(interrupt_request, CPUState),
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200478 VMSTATE_END_OF_LIST()
Pavel Dovgaluk6c3bff02014-07-31 09:41:17 +0400479 },
480 .subsections = (VMStateSubsection[]) {
481 {
482 .vmsd = &vmstate_cpu_common_exception_index,
483 .needed = cpu_common_exception_index_needed,
484 } , {
485 /* empty */
486 }
Juan Quintelae7f4eff2009-09-10 03:04:33 +0200487 }
488};
Andreas Färber1a1562f2013-06-17 04:09:11 +0200489
pbrook9656f322008-07-01 20:01:19 +0000490#endif
491
Andreas Färber38d8f5c2012-12-17 19:47:15 +0100492CPUState *qemu_get_cpu(int index)
Glauber Costa950f1472009-06-09 12:15:18 -0400493{
Andreas Färberbdc44642013-06-24 23:50:24 +0200494 CPUState *cpu;
Glauber Costa950f1472009-06-09 12:15:18 -0400495
Andreas Färberbdc44642013-06-24 23:50:24 +0200496 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100497 if (cpu->cpu_index == index) {
Andreas Färberbdc44642013-06-24 23:50:24 +0200498 return cpu;
Andreas Färber55e5c282012-12-17 06:18:02 +0100499 }
Glauber Costa950f1472009-06-09 12:15:18 -0400500 }
501
Andreas Färberbdc44642013-06-24 23:50:24 +0200502 return NULL;
Glauber Costa950f1472009-06-09 12:15:18 -0400503}
504
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000505#if !defined(CONFIG_USER_ONLY)
506void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as)
507{
508 /* We only support one address space per cpu at the moment. */
509 assert(cpu->as == as);
510
511 if (cpu->tcg_as_listener) {
512 memory_listener_unregister(cpu->tcg_as_listener);
513 } else {
514 cpu->tcg_as_listener = g_new0(MemoryListener, 1);
515 }
516 cpu->tcg_as_listener->commit = tcg_commit;
517 memory_listener_register(cpu->tcg_as_listener, as);
518}
519#endif
520
Andreas Färber9349b4f2012-03-14 01:38:32 +0100521void cpu_exec_init(CPUArchState *env)
bellardfd6ce8f2003-05-14 19:00:11 +0000522{
Andreas Färber9f09e182012-05-03 06:59:07 +0200523 CPUState *cpu = ENV_GET_CPU(env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100524 CPUClass *cc = CPU_GET_CLASS(cpu);
Andreas Färberbdc44642013-06-24 23:50:24 +0200525 CPUState *some_cpu;
bellard6a00d602005-11-21 23:25:50 +0000526 int cpu_index;
527
pbrookc2764712009-03-07 15:24:59 +0000528#if defined(CONFIG_USER_ONLY)
529 cpu_list_lock();
530#endif
bellard6a00d602005-11-21 23:25:50 +0000531 cpu_index = 0;
Andreas Färberbdc44642013-06-24 23:50:24 +0200532 CPU_FOREACH(some_cpu) {
bellard6a00d602005-11-21 23:25:50 +0000533 cpu_index++;
534 }
Andreas Färber55e5c282012-12-17 06:18:02 +0100535 cpu->cpu_index = cpu_index;
Andreas Färber1b1ed8d2012-12-17 04:22:03 +0100536 cpu->numa_node = 0;
Andreas Färberf0c3c502013-08-26 21:22:53 +0200537 QTAILQ_INIT(&cpu->breakpoints);
Andreas Färberff4700b2013-08-26 18:23:18 +0200538 QTAILQ_INIT(&cpu->watchpoints);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100539#ifndef CONFIG_USER_ONLY
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000540 cpu->as = &address_space_memory;
Andreas Färber9f09e182012-05-03 06:59:07 +0200541 cpu->thread_id = qemu_get_thread_id();
Paolo Bonzinicba70542015-03-09 15:28:37 +0100542 cpu_reload_memory_map(cpu);
Jan Kiszkadc7a09c2011-03-15 12:26:31 +0100543#endif
Andreas Färberbdc44642013-06-24 23:50:24 +0200544 QTAILQ_INSERT_TAIL(&cpus, cpu, node);
pbrookc2764712009-03-07 15:24:59 +0000545#if defined(CONFIG_USER_ONLY)
546 cpu_list_unlock();
547#endif
Andreas Färbere0d47942013-07-29 04:07:50 +0200548 if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
549 vmstate_register(NULL, cpu_index, &vmstate_cpu_common, cpu);
550 }
pbrookb3c77242008-06-30 16:31:04 +0000551#if defined(CPU_SAVE_VERSION) && !defined(CONFIG_USER_ONLY)
Alex Williamson0be71e32010-06-25 11:09:07 -0600552 register_savevm(NULL, "cpu", cpu_index, CPU_SAVE_VERSION,
pbrookb3c77242008-06-30 16:31:04 +0000553 cpu_save, cpu_load, env);
Andreas Färberb170fce2013-01-20 20:23:22 +0100554 assert(cc->vmsd == NULL);
Andreas Färbere0d47942013-07-29 04:07:50 +0200555 assert(qdev_get_vmsd(DEVICE(cpu)) == NULL);
pbrookb3c77242008-06-30 16:31:04 +0000556#endif
Andreas Färberb170fce2013-01-20 20:23:22 +0100557 if (cc->vmsd != NULL) {
558 vmstate_register(NULL, cpu_index, cc->vmsd, cpu);
559 }
bellardfd6ce8f2003-05-14 19:00:11 +0000560}
561
Paul Brook94df27f2010-02-28 23:47:45 +0000562#if defined(CONFIG_USER_ONLY)
Andreas Färber00b941e2013-06-29 18:55:54 +0200563static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Paul Brook94df27f2010-02-28 23:47:45 +0000564{
565 tb_invalidate_phys_page_range(pc, pc + 1, 0);
566}
567#else
Andreas Färber00b941e2013-06-29 18:55:54 +0200568static void breakpoint_invalidate(CPUState *cpu, target_ulong pc)
Max Filippov1e7855a2012-04-10 02:48:17 +0400569{
Max Filippove8262a12013-09-27 22:29:17 +0400570 hwaddr phys = cpu_get_phys_page_debug(cpu, pc);
571 if (phys != -1) {
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000572 tb_invalidate_phys_addr(cpu->as,
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100573 phys | (pc & ~TARGET_PAGE_MASK));
Max Filippove8262a12013-09-27 22:29:17 +0400574 }
Max Filippov1e7855a2012-04-10 02:48:17 +0400575}
bellardc27004e2005-01-03 23:35:10 +0000576#endif
bellardd720b932004-04-25 17:57:43 +0000577
Paul Brookc527ee82010-03-01 03:31:14 +0000578#if defined(CONFIG_USER_ONLY)
Andreas Färber75a34032013-09-02 16:57:02 +0200579void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
Paul Brookc527ee82010-03-01 03:31:14 +0000580
581{
582}
583
Peter Maydell3ee887e2014-09-12 14:06:48 +0100584int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
585 int flags)
586{
587 return -ENOSYS;
588}
589
590void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
591{
592}
593
Andreas Färber75a34032013-09-02 16:57:02 +0200594int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
Paul Brookc527ee82010-03-01 03:31:14 +0000595 int flags, CPUWatchpoint **watchpoint)
596{
597 return -ENOSYS;
598}
599#else
pbrook6658ffb2007-03-16 23:58:11 +0000600/* Add a watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200601int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000602 int flags, CPUWatchpoint **watchpoint)
pbrook6658ffb2007-03-16 23:58:11 +0000603{
aliguoric0ce9982008-11-25 22:13:57 +0000604 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000605
Peter Maydell05068c02014-09-12 14:06:48 +0100606 /* forbid ranges which are empty or run off the end of the address space */
Max Filippov07e28632014-09-17 22:03:36 -0700607 if (len == 0 || (addr + len - 1) < addr) {
Andreas Färber75a34032013-09-02 16:57:02 +0200608 error_report("tried to set invalid watchpoint at %"
609 VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
aliguorib4051332008-11-18 20:14:20 +0000610 return -EINVAL;
611 }
Anthony Liguori7267c092011-08-20 22:09:37 -0500612 wp = g_malloc(sizeof(*wp));
pbrook6658ffb2007-03-16 23:58:11 +0000613
aliguoria1d1bb32008-11-18 20:07:32 +0000614 wp->vaddr = addr;
Peter Maydell05068c02014-09-12 14:06:48 +0100615 wp->len = len;
aliguoria1d1bb32008-11-18 20:07:32 +0000616 wp->flags = flags;
617
aliguori2dc9f412008-11-18 20:56:59 +0000618 /* keep all GDB-injected watchpoints in front */
Andreas Färberff4700b2013-08-26 18:23:18 +0200619 if (flags & BP_GDB) {
620 QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
621 } else {
622 QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
623 }
aliguoria1d1bb32008-11-18 20:07:32 +0000624
Andreas Färber31b030d2013-09-04 01:29:02 +0200625 tlb_flush_page(cpu, addr);
aliguoria1d1bb32008-11-18 20:07:32 +0000626
627 if (watchpoint)
628 *watchpoint = wp;
629 return 0;
pbrook6658ffb2007-03-16 23:58:11 +0000630}
631
aliguoria1d1bb32008-11-18 20:07:32 +0000632/* Remove a specific watchpoint. */
Andreas Färber75a34032013-09-02 16:57:02 +0200633int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
aliguoria1d1bb32008-11-18 20:07:32 +0000634 int flags)
pbrook6658ffb2007-03-16 23:58:11 +0000635{
aliguoria1d1bb32008-11-18 20:07:32 +0000636 CPUWatchpoint *wp;
pbrook6658ffb2007-03-16 23:58:11 +0000637
Andreas Färberff4700b2013-08-26 18:23:18 +0200638 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100639 if (addr == wp->vaddr && len == wp->len
aliguori6e140f22008-11-18 20:37:55 +0000640 && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
Andreas Färber75a34032013-09-02 16:57:02 +0200641 cpu_watchpoint_remove_by_ref(cpu, wp);
pbrook6658ffb2007-03-16 23:58:11 +0000642 return 0;
643 }
644 }
aliguoria1d1bb32008-11-18 20:07:32 +0000645 return -ENOENT;
pbrook6658ffb2007-03-16 23:58:11 +0000646}
647
aliguoria1d1bb32008-11-18 20:07:32 +0000648/* Remove a specific watchpoint by reference. */
Andreas Färber75a34032013-09-02 16:57:02 +0200649void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
aliguoria1d1bb32008-11-18 20:07:32 +0000650{
Andreas Färberff4700b2013-08-26 18:23:18 +0200651 QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
edgar_igl7d03f822008-05-17 18:58:29 +0000652
Andreas Färber31b030d2013-09-04 01:29:02 +0200653 tlb_flush_page(cpu, watchpoint->vaddr);
aliguoria1d1bb32008-11-18 20:07:32 +0000654
Anthony Liguori7267c092011-08-20 22:09:37 -0500655 g_free(watchpoint);
edgar_igl7d03f822008-05-17 18:58:29 +0000656}
657
aliguoria1d1bb32008-11-18 20:07:32 +0000658/* Remove all matching watchpoints. */
Andreas Färber75a34032013-09-02 16:57:02 +0200659void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000660{
aliguoric0ce9982008-11-25 22:13:57 +0000661 CPUWatchpoint *wp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000662
Andreas Färberff4700b2013-08-26 18:23:18 +0200663 QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
Andreas Färber75a34032013-09-02 16:57:02 +0200664 if (wp->flags & mask) {
665 cpu_watchpoint_remove_by_ref(cpu, wp);
666 }
aliguoric0ce9982008-11-25 22:13:57 +0000667 }
aliguoria1d1bb32008-11-18 20:07:32 +0000668}
Peter Maydell05068c02014-09-12 14:06:48 +0100669
670/* Return true if this watchpoint address matches the specified
671 * access (ie the address range covered by the watchpoint overlaps
672 * partially or completely with the address range covered by the
673 * access).
674 */
675static inline bool cpu_watchpoint_address_matches(CPUWatchpoint *wp,
676 vaddr addr,
677 vaddr len)
678{
679 /* We know the lengths are non-zero, but a little caution is
680 * required to avoid errors in the case where the range ends
681 * exactly at the top of the address space and so addr + len
682 * wraps round to zero.
683 */
684 vaddr wpend = wp->vaddr + wp->len - 1;
685 vaddr addrend = addr + len - 1;
686
687 return !(addr > wpend || wp->vaddr > addrend);
688}
689
Paul Brookc527ee82010-03-01 03:31:14 +0000690#endif
aliguoria1d1bb32008-11-18 20:07:32 +0000691
692/* Add a breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200693int cpu_breakpoint_insert(CPUState *cpu, vaddr pc, int flags,
aliguoria1d1bb32008-11-18 20:07:32 +0000694 CPUBreakpoint **breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000695{
aliguoric0ce9982008-11-25 22:13:57 +0000696 CPUBreakpoint *bp;
ths3b46e622007-09-17 08:09:54 +0000697
Anthony Liguori7267c092011-08-20 22:09:37 -0500698 bp = g_malloc(sizeof(*bp));
aliguoria1d1bb32008-11-18 20:07:32 +0000699
700 bp->pc = pc;
701 bp->flags = flags;
702
aliguori2dc9f412008-11-18 20:56:59 +0000703 /* keep all GDB-injected breakpoints in front */
Andreas Färber00b941e2013-06-29 18:55:54 +0200704 if (flags & BP_GDB) {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200705 QTAILQ_INSERT_HEAD(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200706 } else {
Andreas Färberf0c3c502013-08-26 21:22:53 +0200707 QTAILQ_INSERT_TAIL(&cpu->breakpoints, bp, entry);
Andreas Färber00b941e2013-06-29 18:55:54 +0200708 }
aliguoria1d1bb32008-11-18 20:07:32 +0000709
Andreas Färberf0c3c502013-08-26 21:22:53 +0200710 breakpoint_invalidate(cpu, pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000711
Andreas Färber00b941e2013-06-29 18:55:54 +0200712 if (breakpoint) {
aliguoria1d1bb32008-11-18 20:07:32 +0000713 *breakpoint = bp;
Andreas Färber00b941e2013-06-29 18:55:54 +0200714 }
aliguoria1d1bb32008-11-18 20:07:32 +0000715 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000716}
717
718/* Remove a specific breakpoint. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200719int cpu_breakpoint_remove(CPUState *cpu, vaddr pc, int flags)
aliguoria1d1bb32008-11-18 20:07:32 +0000720{
aliguoria1d1bb32008-11-18 20:07:32 +0000721 CPUBreakpoint *bp;
722
Andreas Färberf0c3c502013-08-26 21:22:53 +0200723 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
aliguoria1d1bb32008-11-18 20:07:32 +0000724 if (bp->pc == pc && bp->flags == flags) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200725 cpu_breakpoint_remove_by_ref(cpu, bp);
bellard4c3a88a2003-07-26 12:06:08 +0000726 return 0;
aliguoria1d1bb32008-11-18 20:07:32 +0000727 }
bellard4c3a88a2003-07-26 12:06:08 +0000728 }
aliguoria1d1bb32008-11-18 20:07:32 +0000729 return -ENOENT;
bellard4c3a88a2003-07-26 12:06:08 +0000730}
731
aliguoria1d1bb32008-11-18 20:07:32 +0000732/* Remove a specific breakpoint by reference. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200733void cpu_breakpoint_remove_by_ref(CPUState *cpu, CPUBreakpoint *breakpoint)
bellard4c3a88a2003-07-26 12:06:08 +0000734{
Andreas Färberf0c3c502013-08-26 21:22:53 +0200735 QTAILQ_REMOVE(&cpu->breakpoints, breakpoint, entry);
736
737 breakpoint_invalidate(cpu, breakpoint->pc);
aliguoria1d1bb32008-11-18 20:07:32 +0000738
Anthony Liguori7267c092011-08-20 22:09:37 -0500739 g_free(breakpoint);
aliguoria1d1bb32008-11-18 20:07:32 +0000740}
741
742/* Remove all matching breakpoints. */
Andreas Färberb3310ab2013-09-02 17:26:20 +0200743void cpu_breakpoint_remove_all(CPUState *cpu, int mask)
aliguoria1d1bb32008-11-18 20:07:32 +0000744{
aliguoric0ce9982008-11-25 22:13:57 +0000745 CPUBreakpoint *bp, *next;
aliguoria1d1bb32008-11-18 20:07:32 +0000746
Andreas Färberf0c3c502013-08-26 21:22:53 +0200747 QTAILQ_FOREACH_SAFE(bp, &cpu->breakpoints, entry, next) {
Andreas Färberb3310ab2013-09-02 17:26:20 +0200748 if (bp->flags & mask) {
749 cpu_breakpoint_remove_by_ref(cpu, bp);
750 }
aliguoric0ce9982008-11-25 22:13:57 +0000751 }
bellard4c3a88a2003-07-26 12:06:08 +0000752}
753
bellardc33a3462003-07-29 20:50:33 +0000754/* enable or disable single step mode. EXCP_DEBUG is returned by the
755 CPU loop after each instruction */
Andreas Färber3825b282013-06-24 18:41:06 +0200756void cpu_single_step(CPUState *cpu, int enabled)
bellardc33a3462003-07-29 20:50:33 +0000757{
Andreas Färbered2803d2013-06-21 20:20:45 +0200758 if (cpu->singlestep_enabled != enabled) {
759 cpu->singlestep_enabled = enabled;
760 if (kvm_enabled()) {
Stefan Weil38e478e2013-07-25 20:50:21 +0200761 kvm_update_guest_debug(cpu, 0);
Andreas Färbered2803d2013-06-21 20:20:45 +0200762 } else {
Stuart Bradyccbb4d42009-05-03 12:15:06 +0100763 /* must flush all the translated code to avoid inconsistencies */
aliguorie22a25c2009-03-12 20:12:48 +0000764 /* XXX: only flush what is necessary */
Stefan Weil38e478e2013-07-25 20:50:21 +0200765 CPUArchState *env = cpu->env_ptr;
aliguorie22a25c2009-03-12 20:12:48 +0000766 tb_flush(env);
767 }
bellardc33a3462003-07-29 20:50:33 +0000768 }
bellardc33a3462003-07-29 20:50:33 +0000769}
770
Andreas Färbera47dddd2013-09-03 17:38:47 +0200771void cpu_abort(CPUState *cpu, const char *fmt, ...)
bellard75012672003-06-21 13:11:07 +0000772{
773 va_list ap;
pbrook493ae1f2007-11-23 16:53:59 +0000774 va_list ap2;
bellard75012672003-06-21 13:11:07 +0000775
776 va_start(ap, fmt);
pbrook493ae1f2007-11-23 16:53:59 +0000777 va_copy(ap2, ap);
bellard75012672003-06-21 13:11:07 +0000778 fprintf(stderr, "qemu: fatal: ");
779 vfprintf(stderr, fmt, ap);
780 fprintf(stderr, "\n");
Andreas Färber878096e2013-05-27 01:33:50 +0200781 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori93fcfe32009-01-15 22:34:14 +0000782 if (qemu_log_enabled()) {
783 qemu_log("qemu: fatal: ");
784 qemu_log_vprintf(fmt, ap2);
785 qemu_log("\n");
Andreas Färbera0762852013-06-16 07:28:50 +0200786 log_cpu_state(cpu, CPU_DUMP_FPU | CPU_DUMP_CCOP);
aliguori31b1a7b2009-01-15 22:35:09 +0000787 qemu_log_flush();
aliguori93fcfe32009-01-15 22:34:14 +0000788 qemu_log_close();
balrog924edca2007-06-10 14:07:13 +0000789 }
pbrook493ae1f2007-11-23 16:53:59 +0000790 va_end(ap2);
j_mayerf9373292007-09-29 12:18:20 +0000791 va_end(ap);
Riku Voipiofd052bf2010-01-25 14:30:49 +0200792#if defined(CONFIG_USER_ONLY)
793 {
794 struct sigaction act;
795 sigfillset(&act.sa_mask);
796 act.sa_handler = SIG_DFL;
797 sigaction(SIGABRT, &act, NULL);
798 }
799#endif
bellard75012672003-06-21 13:11:07 +0000800 abort();
801}
802
bellard01243112004-01-04 15:48:17 +0000803#if !defined(CONFIG_USER_ONLY)
Mike Day0dc3f442013-09-05 14:41:35 -0400804/* Called from RCU critical section */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200805static RAMBlock *qemu_get_ram_block(ram_addr_t addr)
806{
807 RAMBlock *block;
808
Paolo Bonzini43771532013-09-09 17:58:40 +0200809 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200810 if (block && addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200811 goto found;
812 }
Mike Day0dc3f442013-09-05 14:41:35 -0400813 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +0200814 if (addr - block->offset < block->max_length) {
Paolo Bonzini041603f2013-09-09 17:49:45 +0200815 goto found;
816 }
817 }
818
819 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
820 abort();
821
822found:
Paolo Bonzini43771532013-09-09 17:58:40 +0200823 /* It is safe to write mru_block outside the iothread lock. This
824 * is what happens:
825 *
826 * mru_block = xxx
827 * rcu_read_unlock()
828 * xxx removed from list
829 * rcu_read_lock()
830 * read mru_block
831 * mru_block = NULL;
832 * call_rcu(reclaim_ramblock, xxx);
833 * rcu_read_unlock()
834 *
835 * atomic_rcu_set is not needed here. The block was already published
836 * when it was placed into the list. Here we're just making an extra
837 * copy of the pointer.
838 */
Paolo Bonzini041603f2013-09-09 17:49:45 +0200839 ram_list.mru_block = block;
840 return block;
841}
842
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200843static void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length)
bellard1ccde1c2004-02-06 19:46:14 +0000844{
Paolo Bonzini041603f2013-09-09 17:49:45 +0200845 ram_addr_t start1;
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200846 RAMBlock *block;
847 ram_addr_t end;
848
849 end = TARGET_PAGE_ALIGN(start + length);
850 start &= TARGET_PAGE_MASK;
bellardf23db162005-08-21 19:12:28 +0000851
Mike Day0dc3f442013-09-05 14:41:35 -0400852 rcu_read_lock();
Paolo Bonzini041603f2013-09-09 17:49:45 +0200853 block = qemu_get_ram_block(start);
854 assert(block == qemu_get_ram_block(end - 1));
Michael S. Tsirkin1240be22014-11-12 11:44:41 +0200855 start1 = (uintptr_t)ramblock_ptr(block, start - block->offset);
Blue Swirle5548612012-04-21 13:08:33 +0000856 cpu_tlb_reset_dirty_all(start1, length);
Mike Day0dc3f442013-09-05 14:41:35 -0400857 rcu_read_unlock();
Juan Quintelad24981d2012-05-22 00:42:40 +0200858}
859
860/* Note: start and end must be within the same ram block. */
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200861void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t length,
Juan Quintela52159192013-10-08 12:44:04 +0200862 unsigned client)
Juan Quintelad24981d2012-05-22 00:42:40 +0200863{
Juan Quintelad24981d2012-05-22 00:42:40 +0200864 if (length == 0)
865 return;
Michael S. Tsirkinc8d6f662014-11-17 17:54:07 +0200866 cpu_physical_memory_clear_dirty_range_type(start, length, client);
Juan Quintelad24981d2012-05-22 00:42:40 +0200867
868 if (tcg_enabled()) {
Juan Quintelaa2f4d5b2013-10-10 11:49:53 +0200869 tlb_reset_dirty_range_all(start, length);
Juan Quintelad24981d2012-05-22 00:42:40 +0200870 }
bellard1ccde1c2004-02-06 19:46:14 +0000871}
872
Juan Quintela981fdf22013-10-10 11:54:09 +0200873static void cpu_physical_memory_set_dirty_tracking(bool enable)
aliguori74576192008-10-06 14:02:03 +0000874{
875 in_migration = enable;
aliguori74576192008-10-06 14:02:03 +0000876}
877
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100878/* Called from RCU critical section */
Andreas Färberbb0e6272013-09-03 13:32:01 +0200879hwaddr memory_region_section_get_iotlb(CPUState *cpu,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200880 MemoryRegionSection *section,
881 target_ulong vaddr,
882 hwaddr paddr, hwaddr xlat,
883 int prot,
884 target_ulong *address)
Blue Swirle5548612012-04-21 13:08:33 +0000885{
Avi Kivitya8170e52012-10-23 12:30:10 +0200886 hwaddr iotlb;
Blue Swirle5548612012-04-21 13:08:33 +0000887 CPUWatchpoint *wp;
888
Blue Swirlcc5bea62012-04-14 14:56:48 +0000889 if (memory_region_is_ram(section->mr)) {
Blue Swirle5548612012-04-21 13:08:33 +0000890 /* Normal RAM. */
891 iotlb = (memory_region_get_ram_addr(section->mr) & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200892 + xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000893 if (!section->readonly) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200894 iotlb |= PHYS_SECTION_NOTDIRTY;
Blue Swirle5548612012-04-21 13:08:33 +0000895 } else {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200896 iotlb |= PHYS_SECTION_ROM;
Blue Swirle5548612012-04-21 13:08:33 +0000897 }
898 } else {
Edgar E. Iglesias1b3fb982013-11-07 18:43:28 +0100899 iotlb = section - section->address_space->dispatch->map.sections;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200900 iotlb += xlat;
Blue Swirle5548612012-04-21 13:08:33 +0000901 }
902
903 /* Make accesses to pages with watchpoints go via the
904 watchpoint trap routines. */
Andreas Färberff4700b2013-08-26 18:23:18 +0200905 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +0100906 if (cpu_watchpoint_address_matches(wp, vaddr, TARGET_PAGE_SIZE)) {
Blue Swirle5548612012-04-21 13:08:33 +0000907 /* Avoid trapping reads of pages with a write breakpoint. */
908 if ((prot & PAGE_WRITE) || (wp->flags & BP_MEM_READ)) {
Liu Ping Fanb41aac42013-05-29 11:09:17 +0200909 iotlb = PHYS_SECTION_WATCH + paddr;
Blue Swirle5548612012-04-21 13:08:33 +0000910 *address |= TLB_MMIO;
911 break;
912 }
913 }
914 }
915
916 return iotlb;
917}
bellard9fa3e852004-01-04 18:06:42 +0000918#endif /* defined(CONFIG_USER_ONLY) */
919
pbrooke2eef172008-06-08 01:09:01 +0000920#if !defined(CONFIG_USER_ONLY)
pbrook8da3ff12008-12-01 18:59:50 +0000921
Anthony Liguoric227f092009-10-01 16:12:16 -0500922static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +0200923 uint16_t section);
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200924static subpage_t *subpage_init(AddressSpace *as, hwaddr base);
Avi Kivity54688b12012-02-09 17:34:32 +0200925
Igor Mammedova2b257d2014-10-31 16:38:37 +0000926static void *(*phys_mem_alloc)(size_t size, uint64_t *align) =
927 qemu_anon_ram_alloc;
Markus Armbruster91138032013-07-31 15:11:08 +0200928
929/*
930 * Set a custom physical guest memory alloator.
931 * Accelerators with unusual needs may need this. Hopefully, we can
932 * get rid of it eventually.
933 */
Igor Mammedova2b257d2014-10-31 16:38:37 +0000934void phys_mem_set_alloc(void *(*alloc)(size_t, uint64_t *align))
Markus Armbruster91138032013-07-31 15:11:08 +0200935{
936 phys_mem_alloc = alloc;
937}
938
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200939static uint16_t phys_section_add(PhysPageMap *map,
940 MemoryRegionSection *section)
Avi Kivity5312bd82012-02-12 18:32:55 +0200941{
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200942 /* The physical section number is ORed with a page-aligned
943 * pointer to produce the iotlb entries. Thus it should
944 * never overflow into the page-aligned value.
945 */
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200946 assert(map->sections_nb < TARGET_PAGE_SIZE);
Paolo Bonzini68f3f652013-05-07 11:30:23 +0200947
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200948 if (map->sections_nb == map->sections_nb_alloc) {
949 map->sections_nb_alloc = MAX(map->sections_nb_alloc * 2, 16);
950 map->sections = g_renew(MemoryRegionSection, map->sections,
951 map->sections_nb_alloc);
Avi Kivity5312bd82012-02-12 18:32:55 +0200952 }
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200953 map->sections[map->sections_nb] = *section;
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200954 memory_region_ref(section->mr);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200955 return map->sections_nb++;
Avi Kivity5312bd82012-02-12 18:32:55 +0200956}
957
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200958static void phys_section_destroy(MemoryRegion *mr)
959{
Paolo Bonzinidfde4e62013-05-06 10:46:11 +0200960 memory_region_unref(mr);
961
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200962 if (mr->subpage) {
963 subpage_t *subpage = container_of(mr, subpage_t, iomem);
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -0700964 object_unref(OBJECT(&subpage->iomem));
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200965 g_free(subpage);
966 }
967}
968
Paolo Bonzini60926662013-05-29 12:30:26 +0200969static void phys_sections_free(PhysPageMap *map)
Avi Kivity5312bd82012-02-12 18:32:55 +0200970{
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200971 while (map->sections_nb > 0) {
972 MemoryRegionSection *section = &map->sections[--map->sections_nb];
Paolo Bonzini058bc4b2013-06-25 09:30:48 +0200973 phys_section_destroy(section->mr);
974 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +0200975 g_free(map->sections);
976 g_free(map->nodes);
Avi Kivity5312bd82012-02-12 18:32:55 +0200977}
978
Avi Kivityac1970f2012-10-03 16:22:53 +0200979static void register_subpage(AddressSpaceDispatch *d, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +0200980{
981 subpage_t *subpage;
Avi Kivitya8170e52012-10-23 12:30:10 +0200982 hwaddr base = section->offset_within_address_space
Avi Kivity0f0cb162012-02-13 17:14:32 +0200983 & TARGET_PAGE_MASK;
Michael S. Tsirkin97115a82013-11-13 20:08:19 +0200984 MemoryRegionSection *existing = phys_page_find(d->phys_map, base,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200985 d->map.nodes, d->map.sections);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200986 MemoryRegionSection subsection = {
987 .offset_within_address_space = base,
Paolo Bonzini052e87b2013-05-27 10:08:27 +0200988 .size = int128_make64(TARGET_PAGE_SIZE),
Avi Kivity0f0cb162012-02-13 17:14:32 +0200989 };
Avi Kivitya8170e52012-10-23 12:30:10 +0200990 hwaddr start, end;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200991
Avi Kivityf3705d52012-03-08 16:16:34 +0200992 assert(existing->mr->subpage || existing->mr == &io_mem_unassigned);
Avi Kivity0f0cb162012-02-13 17:14:32 +0200993
Avi Kivityf3705d52012-03-08 16:16:34 +0200994 if (!(existing->mr->subpage)) {
Jan Kiszkaacc9d802013-05-26 21:55:37 +0200995 subpage = subpage_init(d->as, base);
Edgar E. Iglesias3be91e82013-11-07 18:42:51 +0100996 subsection.address_space = d->as;
Avi Kivity0f0cb162012-02-13 17:14:32 +0200997 subsection.mr = &subpage->iomem;
Avi Kivityac1970f2012-10-03 16:22:53 +0200998 phys_page_set(d, base >> TARGET_PAGE_BITS, 1,
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +0200999 phys_section_add(&d->map, &subsection));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001000 } else {
Avi Kivityf3705d52012-03-08 16:16:34 +02001001 subpage = container_of(existing->mr, subpage_t, iomem);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001002 }
1003 start = section->offset_within_address_space & ~TARGET_PAGE_MASK;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001004 end = start + int128_get64(section->size) - 1;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001005 subpage_register(subpage, start, end,
1006 phys_section_add(&d->map, section));
Avi Kivity0f0cb162012-02-13 17:14:32 +02001007}
1008
1009
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001010static void register_multipage(AddressSpaceDispatch *d,
1011 MemoryRegionSection *section)
bellard33417e72003-08-10 21:47:01 +00001012{
Avi Kivitya8170e52012-10-23 12:30:10 +02001013 hwaddr start_addr = section->offset_within_address_space;
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02001014 uint16_t section_index = phys_section_add(&d->map, section);
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001015 uint64_t num_pages = int128_get64(int128_rshift(section->size,
1016 TARGET_PAGE_BITS));
Avi Kivitydd811242012-01-02 12:17:03 +02001017
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001018 assert(num_pages);
1019 phys_page_set(d, start_addr >> TARGET_PAGE_BITS, num_pages, section_index);
bellard33417e72003-08-10 21:47:01 +00001020}
1021
Avi Kivityac1970f2012-10-03 16:22:53 +02001022static void mem_add(MemoryListener *listener, MemoryRegionSection *section)
Avi Kivity0f0cb162012-02-13 17:14:32 +02001023{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02001024 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini00752702013-05-29 12:13:54 +02001025 AddressSpaceDispatch *d = as->next_dispatch;
Paolo Bonzini99b9cc02013-05-27 13:18:01 +02001026 MemoryRegionSection now = *section, remain = *section;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001027 Int128 page_size = int128_make64(TARGET_PAGE_SIZE);
Avi Kivity0f0cb162012-02-13 17:14:32 +02001028
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001029 if (now.offset_within_address_space & ~TARGET_PAGE_MASK) {
1030 uint64_t left = TARGET_PAGE_ALIGN(now.offset_within_address_space)
1031 - now.offset_within_address_space;
1032
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001033 now.size = int128_min(int128_make64(left), now.size);
Avi Kivityac1970f2012-10-03 16:22:53 +02001034 register_subpage(d, &now);
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001035 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001036 now.size = int128_zero();
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001037 }
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001038 while (int128_ne(remain.size, now.size)) {
1039 remain.size = int128_sub(remain.size, now.size);
1040 remain.offset_within_address_space += int128_get64(now.size);
1041 remain.offset_within_region += int128_get64(now.size);
Tyler Hall69b67642012-07-25 18:45:04 -04001042 now = remain;
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001043 if (int128_lt(remain.size, page_size)) {
Paolo Bonzini733d5ef2013-05-27 10:47:10 +02001044 register_subpage(d, &now);
Hu Tao88266242013-08-29 18:21:16 +08001045 } else if (remain.offset_within_address_space & ~TARGET_PAGE_MASK) {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001046 now.size = page_size;
Avi Kivityac1970f2012-10-03 16:22:53 +02001047 register_subpage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001048 } else {
Paolo Bonzini052e87b2013-05-27 10:08:27 +02001049 now.size = int128_and(now.size, int128_neg(page_size));
Avi Kivityac1970f2012-10-03 16:22:53 +02001050 register_multipage(d, &now);
Tyler Hall69b67642012-07-25 18:45:04 -04001051 }
Avi Kivity0f0cb162012-02-13 17:14:32 +02001052 }
1053}
1054
Sheng Yang62a27442010-01-26 19:21:16 +08001055void qemu_flush_coalesced_mmio_buffer(void)
1056{
1057 if (kvm_enabled())
1058 kvm_flush_coalesced_mmio_buffer();
1059}
1060
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001061void qemu_mutex_lock_ramlist(void)
1062{
1063 qemu_mutex_lock(&ram_list.mutex);
1064}
1065
1066void qemu_mutex_unlock_ramlist(void)
1067{
1068 qemu_mutex_unlock(&ram_list.mutex);
1069}
1070
Markus Armbrustere1e84ba2013-07-31 15:11:10 +02001071#ifdef __linux__
Marcelo Tosattic9027602010-03-01 20:25:08 -03001072
1073#include <sys/vfs.h>
1074
1075#define HUGETLBFS_MAGIC 0x958458f6
1076
Hu Taofc7a5802014-09-09 13:28:01 +08001077static long gethugepagesize(const char *path, Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001078{
1079 struct statfs fs;
1080 int ret;
1081
1082 do {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001083 ret = statfs(path, &fs);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001084 } while (ret != 0 && errno == EINTR);
1085
1086 if (ret != 0) {
Hu Taofc7a5802014-09-09 13:28:01 +08001087 error_setg_errno(errp, errno, "failed to get page size of file %s",
1088 path);
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001089 return 0;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001090 }
1091
1092 if (fs.f_type != HUGETLBFS_MAGIC)
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001093 fprintf(stderr, "Warning: path not on HugeTLBFS: %s\n", path);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001094
1095 return fs.f_bsize;
1096}
1097
Alex Williamson04b16652010-07-02 11:13:17 -06001098static void *file_ram_alloc(RAMBlock *block,
1099 ram_addr_t memory,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001100 const char *path,
1101 Error **errp)
Marcelo Tosattic9027602010-03-01 20:25:08 -03001102{
1103 char *filename;
Peter Feiner8ca761f2013-03-04 13:54:25 -05001104 char *sanitized_name;
1105 char *c;
Hu Tao557529d2014-09-09 13:28:00 +08001106 void *area = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001107 int fd;
Hu Tao557529d2014-09-09 13:28:00 +08001108 uint64_t hpagesize;
Hu Taofc7a5802014-09-09 13:28:01 +08001109 Error *local_err = NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001110
Hu Taofc7a5802014-09-09 13:28:01 +08001111 hpagesize = gethugepagesize(path, &local_err);
1112 if (local_err) {
1113 error_propagate(errp, local_err);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001114 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001115 }
Igor Mammedova2b257d2014-10-31 16:38:37 +00001116 block->mr->align = hpagesize;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001117
1118 if (memory < hpagesize) {
Hu Tao557529d2014-09-09 13:28:00 +08001119 error_setg(errp, "memory size 0x" RAM_ADDR_FMT " must be equal to "
1120 "or larger than huge page size 0x%" PRIx64,
1121 memory, hpagesize);
1122 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001123 }
1124
1125 if (kvm_enabled() && !kvm_has_sync_mmu()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001126 error_setg(errp,
1127 "host lacks kvm mmu notifiers, -mem-path unsupported");
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001128 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001129 }
1130
Peter Feiner8ca761f2013-03-04 13:54:25 -05001131 /* Make name safe to use with mkstemp by replacing '/' with '_'. */
Peter Crosthwaite83234bf2014-08-14 23:54:29 -07001132 sanitized_name = g_strdup(memory_region_name(block->mr));
Peter Feiner8ca761f2013-03-04 13:54:25 -05001133 for (c = sanitized_name; *c != '\0'; c++) {
1134 if (*c == '/')
1135 *c = '_';
1136 }
1137
1138 filename = g_strdup_printf("%s/qemu_back_mem.%s.XXXXXX", path,
1139 sanitized_name);
1140 g_free(sanitized_name);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001141
1142 fd = mkstemp(filename);
1143 if (fd < 0) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001144 error_setg_errno(errp, errno,
1145 "unable to create backing store for hugepages");
Stefan Weile4ada482013-01-16 18:37:23 +01001146 g_free(filename);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001147 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001148 }
1149 unlink(filename);
Stefan Weile4ada482013-01-16 18:37:23 +01001150 g_free(filename);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001151
1152 memory = (memory+hpagesize-1) & ~(hpagesize-1);
1153
1154 /*
1155 * ftruncate is not supported by hugetlbfs in older
1156 * hosts, so don't bother bailing out on errors.
1157 * If anything goes wrong with it under other filesystems,
1158 * mmap will fail.
1159 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001160 if (ftruncate(fd, memory)) {
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001161 perror("ftruncate");
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001162 }
Marcelo Tosattic9027602010-03-01 20:25:08 -03001163
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001164 area = mmap(0, memory, PROT_READ | PROT_WRITE,
1165 (block->flags & RAM_SHARED ? MAP_SHARED : MAP_PRIVATE),
1166 fd, 0);
Marcelo Tosattic9027602010-03-01 20:25:08 -03001167 if (area == MAP_FAILED) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001168 error_setg_errno(errp, errno,
1169 "unable to map backing store for hugepages");
Yoshiaki Tamura9742bf22010-08-18 13:30:13 +09001170 close(fd);
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001171 goto error;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001172 }
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001173
1174 if (mem_prealloc) {
Paolo Bonzini38183312014-05-14 17:43:21 +08001175 os_mem_prealloc(fd, area, memory);
Marcelo Tosattief36fa12013-10-28 18:51:46 -02001176 }
1177
Alex Williamson04b16652010-07-02 11:13:17 -06001178 block->fd = fd;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001179 return area;
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001180
1181error:
1182 if (mem_prealloc) {
Gonglei81b07352015-02-25 12:22:31 +08001183 error_report("%s", error_get_pretty(*errp));
Marcelo Tosattif9a49df2014-02-04 13:41:53 -05001184 exit(1);
1185 }
1186 return NULL;
Marcelo Tosattic9027602010-03-01 20:25:08 -03001187}
1188#endif
1189
Mike Day0dc3f442013-09-05 14:41:35 -04001190/* Called with the ramlist lock held. */
Alex Williamsond17b5282010-06-25 11:08:38 -06001191static ram_addr_t find_ram_offset(ram_addr_t size)
1192{
Alex Williamson04b16652010-07-02 11:13:17 -06001193 RAMBlock *block, *next_block;
Alex Williamson3e837b22011-10-31 08:54:09 -06001194 ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001195
Stefan Hajnoczi49cd9ac2013-03-11 10:20:21 +01001196 assert(size != 0); /* it would hand out same offset multiple times */
1197
Mike Day0dc3f442013-09-05 14:41:35 -04001198 if (QLIST_EMPTY_RCU(&ram_list.blocks)) {
Alex Williamson04b16652010-07-02 11:13:17 -06001199 return 0;
Mike Day0d53d9f2015-01-21 13:45:24 +01001200 }
Alex Williamson04b16652010-07-02 11:13:17 -06001201
Mike Day0dc3f442013-09-05 14:41:35 -04001202 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001203 ram_addr_t end, next = RAM_ADDR_MAX;
Alex Williamson04b16652010-07-02 11:13:17 -06001204
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001205 end = block->offset + block->max_length;
Alex Williamson04b16652010-07-02 11:13:17 -06001206
Mike Day0dc3f442013-09-05 14:41:35 -04001207 QLIST_FOREACH_RCU(next_block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001208 if (next_block->offset >= end) {
1209 next = MIN(next, next_block->offset);
1210 }
1211 }
1212 if (next - end >= size && next - end < mingap) {
Alex Williamson3e837b22011-10-31 08:54:09 -06001213 offset = end;
Alex Williamson04b16652010-07-02 11:13:17 -06001214 mingap = next - end;
1215 }
1216 }
Alex Williamson3e837b22011-10-31 08:54:09 -06001217
1218 if (offset == RAM_ADDR_MAX) {
1219 fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
1220 (uint64_t)size);
1221 abort();
1222 }
1223
Alex Williamson04b16652010-07-02 11:13:17 -06001224 return offset;
1225}
1226
Juan Quintela652d7ec2012-07-20 10:37:54 +02001227ram_addr_t last_ram_offset(void)
Alex Williamson04b16652010-07-02 11:13:17 -06001228{
Alex Williamsond17b5282010-06-25 11:08:38 -06001229 RAMBlock *block;
1230 ram_addr_t last = 0;
1231
Mike Day0dc3f442013-09-05 14:41:35 -04001232 rcu_read_lock();
1233 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001234 last = MAX(last, block->offset + block->max_length);
Mike Day0d53d9f2015-01-21 13:45:24 +01001235 }
Mike Day0dc3f442013-09-05 14:41:35 -04001236 rcu_read_unlock();
Alex Williamsond17b5282010-06-25 11:08:38 -06001237 return last;
1238}
1239
Jason Baronddb97f12012-08-02 15:44:16 -04001240static void qemu_ram_setup_dump(void *addr, ram_addr_t size)
1241{
1242 int ret;
Jason Baronddb97f12012-08-02 15:44:16 -04001243
1244 /* Use MADV_DONTDUMP, if user doesn't want the guest memory in the core */
Marcel Apfelbaum47c8ca52015-02-04 17:43:54 +02001245 if (!machine_dump_guest_core(current_machine)) {
Jason Baronddb97f12012-08-02 15:44:16 -04001246 ret = qemu_madvise(addr, size, QEMU_MADV_DONTDUMP);
1247 if (ret) {
1248 perror("qemu_madvise");
1249 fprintf(stderr, "madvise doesn't support MADV_DONTDUMP, "
1250 "but dump_guest_core=off specified\n");
1251 }
1252 }
1253}
1254
Mike Day0dc3f442013-09-05 14:41:35 -04001255/* Called within an RCU critical section, or while the ramlist lock
1256 * is held.
1257 */
Hu Tao20cfe882014-04-02 15:13:26 +08001258static RAMBlock *find_ram_block(ram_addr_t addr)
Cam Macdonell84b89d72010-07-26 18:10:57 -06001259{
Hu Tao20cfe882014-04-02 15:13:26 +08001260 RAMBlock *block;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001261
Mike Day0dc3f442013-09-05 14:41:35 -04001262 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001263 if (block->offset == addr) {
Hu Tao20cfe882014-04-02 15:13:26 +08001264 return block;
Avi Kivityc5705a72011-12-20 15:59:12 +02001265 }
1266 }
Hu Tao20cfe882014-04-02 15:13:26 +08001267
1268 return NULL;
1269}
1270
Mike Dayae3a7042013-09-05 14:41:35 -04001271/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001272void qemu_ram_set_idstr(ram_addr_t addr, const char *name, DeviceState *dev)
1273{
Mike Dayae3a7042013-09-05 14:41:35 -04001274 RAMBlock *new_block, *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001275
Mike Day0dc3f442013-09-05 14:41:35 -04001276 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001277 new_block = find_ram_block(addr);
Avi Kivityc5705a72011-12-20 15:59:12 +02001278 assert(new_block);
1279 assert(!new_block->idstr[0]);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001280
Anthony Liguori09e5ab62012-02-03 12:28:43 -06001281 if (dev) {
1282 char *id = qdev_get_dev_path(dev);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001283 if (id) {
1284 snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
Anthony Liguori7267c092011-08-20 22:09:37 -05001285 g_free(id);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001286 }
1287 }
1288 pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
1289
Mike Day0dc3f442013-09-05 14:41:35 -04001290 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Avi Kivityc5705a72011-12-20 15:59:12 +02001291 if (block != new_block && !strcmp(block->idstr, new_block->idstr)) {
Cam Macdonell84b89d72010-07-26 18:10:57 -06001292 fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
1293 new_block->idstr);
1294 abort();
1295 }
1296 }
Mike Day0dc3f442013-09-05 14:41:35 -04001297 rcu_read_unlock();
Avi Kivityc5705a72011-12-20 15:59:12 +02001298}
1299
Mike Dayae3a7042013-09-05 14:41:35 -04001300/* Called with iothread lock held. */
Hu Tao20cfe882014-04-02 15:13:26 +08001301void qemu_ram_unset_idstr(ram_addr_t addr)
1302{
Mike Dayae3a7042013-09-05 14:41:35 -04001303 RAMBlock *block;
Hu Tao20cfe882014-04-02 15:13:26 +08001304
Mike Dayae3a7042013-09-05 14:41:35 -04001305 /* FIXME: arch_init.c assumes that this is not called throughout
1306 * migration. Ignore the problem since hot-unplug during migration
1307 * does not work anyway.
1308 */
1309
Mike Day0dc3f442013-09-05 14:41:35 -04001310 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001311 block = find_ram_block(addr);
Hu Tao20cfe882014-04-02 15:13:26 +08001312 if (block) {
1313 memset(block->idstr, 0, sizeof(block->idstr));
1314 }
Mike Day0dc3f442013-09-05 14:41:35 -04001315 rcu_read_unlock();
Hu Tao20cfe882014-04-02 15:13:26 +08001316}
1317
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001318static int memory_try_enable_merging(void *addr, size_t len)
1319{
Marcel Apfelbaum75cc7f02015-02-04 17:43:55 +02001320 if (!machine_mem_merge(current_machine)) {
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001321 /* disabled by the user */
1322 return 0;
1323 }
1324
1325 return qemu_madvise(addr, len, QEMU_MADV_MERGEABLE);
1326}
1327
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001328/* Only legal before guest might have detected the memory size: e.g. on
1329 * incoming migration, or right after reset.
1330 *
1331 * As memory core doesn't know how is memory accessed, it is up to
1332 * resize callback to update device state and/or add assertions to detect
1333 * misuse, if necessary.
1334 */
1335int qemu_ram_resize(ram_addr_t base, ram_addr_t newsize, Error **errp)
1336{
1337 RAMBlock *block = find_ram_block(base);
1338
1339 assert(block);
1340
Michael S. Tsirkin129ddaf2015-02-17 10:15:30 +01001341 newsize = TARGET_PAGE_ALIGN(newsize);
1342
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001343 if (block->used_length == newsize) {
1344 return 0;
1345 }
1346
1347 if (!(block->flags & RAM_RESIZEABLE)) {
1348 error_setg_errno(errp, EINVAL,
1349 "Length mismatch: %s: 0x" RAM_ADDR_FMT
1350 " in != 0x" RAM_ADDR_FMT, block->idstr,
1351 newsize, block->used_length);
1352 return -EINVAL;
1353 }
1354
1355 if (block->max_length < newsize) {
1356 error_setg_errno(errp, EINVAL,
1357 "Length too large: %s: 0x" RAM_ADDR_FMT
1358 " > 0x" RAM_ADDR_FMT, block->idstr,
1359 newsize, block->max_length);
1360 return -EINVAL;
1361 }
1362
1363 cpu_physical_memory_clear_dirty_range(block->offset, block->used_length);
1364 block->used_length = newsize;
1365 cpu_physical_memory_set_dirty_range(block->offset, block->used_length);
1366 memory_region_set_size(block->mr, newsize);
1367 if (block->resized) {
1368 block->resized(block->idstr, newsize, block->host);
1369 }
1370 return 0;
1371}
1372
Hu Taoef701d72014-09-09 13:27:54 +08001373static ram_addr_t ram_block_add(RAMBlock *new_block, Error **errp)
Avi Kivityc5705a72011-12-20 15:59:12 +02001374{
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001375 RAMBlock *block;
Mike Day0d53d9f2015-01-21 13:45:24 +01001376 RAMBlock *last_block = NULL;
Juan Quintela2152f5c2013-10-08 13:52:02 +02001377 ram_addr_t old_ram_size, new_ram_size;
1378
1379 old_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
Avi Kivityc5705a72011-12-20 15:59:12 +02001380
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001381 qemu_mutex_lock_ramlist();
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001382 new_block->offset = find_ram_offset(new_block->max_length);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001383
1384 if (!new_block->host) {
1385 if (xen_enabled()) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001386 xen_ram_alloc(new_block->offset, new_block->max_length,
1387 new_block->mr);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001388 } else {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001389 new_block->host = phys_mem_alloc(new_block->max_length,
Igor Mammedova2b257d2014-10-31 16:38:37 +00001390 &new_block->mr->align);
Markus Armbruster39228252013-07-31 15:11:11 +02001391 if (!new_block->host) {
Hu Taoef701d72014-09-09 13:27:54 +08001392 error_setg_errno(errp, errno,
1393 "cannot set up guest memory '%s'",
1394 memory_region_name(new_block->mr));
1395 qemu_mutex_unlock_ramlist();
1396 return -1;
Markus Armbruster39228252013-07-31 15:11:11 +02001397 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001398 memory_try_enable_merging(new_block->host, new_block->max_length);
Yoshiaki Tamura6977dfe2010-08-18 15:41:49 +09001399 }
1400 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001401
Mike Day0d53d9f2015-01-21 13:45:24 +01001402 /* Keep the list sorted from biggest to smallest block. Unlike QTAILQ,
1403 * QLIST (which has an RCU-friendly variant) does not have insertion at
1404 * tail, so save the last element in last_block.
1405 */
Mike Day0dc3f442013-09-05 14:41:35 -04001406 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Mike Day0d53d9f2015-01-21 13:45:24 +01001407 last_block = block;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001408 if (block->max_length < new_block->max_length) {
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001409 break;
1410 }
1411 }
1412 if (block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001413 QLIST_INSERT_BEFORE_RCU(block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001414 } else if (last_block) {
Mike Day0dc3f442013-09-05 14:41:35 -04001415 QLIST_INSERT_AFTER_RCU(last_block, new_block, next);
Mike Day0d53d9f2015-01-21 13:45:24 +01001416 } else { /* list is empty */
Mike Day0dc3f442013-09-05 14:41:35 -04001417 QLIST_INSERT_HEAD_RCU(&ram_list.blocks, new_block, next);
Paolo Bonziniabb26d62012-11-14 16:00:51 +01001418 }
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001419 ram_list.mru_block = NULL;
Cam Macdonell84b89d72010-07-26 18:10:57 -06001420
Mike Day0dc3f442013-09-05 14:41:35 -04001421 /* Write list before version */
1422 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001423 ram_list.version++;
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001424 qemu_mutex_unlock_ramlist();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001425
Juan Quintela2152f5c2013-10-08 13:52:02 +02001426 new_ram_size = last_ram_offset() >> TARGET_PAGE_BITS;
1427
1428 if (new_ram_size > old_ram_size) {
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001429 int i;
Mike Dayae3a7042013-09-05 14:41:35 -04001430
1431 /* ram_list.dirty_memory[] is protected by the iothread lock. */
Juan Quintela1ab4c8c2013-10-08 16:14:39 +02001432 for (i = 0; i < DIRTY_MEMORY_NUM; i++) {
1433 ram_list.dirty_memory[i] =
1434 bitmap_zero_extend(ram_list.dirty_memory[i],
1435 old_ram_size, new_ram_size);
1436 }
Juan Quintela2152f5c2013-10-08 13:52:02 +02001437 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001438 cpu_physical_memory_set_dirty_range(new_block->offset,
1439 new_block->used_length);
Cam Macdonell84b89d72010-07-26 18:10:57 -06001440
Paolo Bonzinia904c912015-01-21 16:18:35 +01001441 if (new_block->host) {
1442 qemu_ram_setup_dump(new_block->host, new_block->max_length);
1443 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
1444 qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
1445 if (kvm_enabled()) {
1446 kvm_setup_guest_memory(new_block->host, new_block->max_length);
1447 }
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001448 }
Cam Macdonell84b89d72010-07-26 18:10:57 -06001449
1450 return new_block->offset;
1451}
1452
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001453#ifdef __linux__
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001454ram_addr_t qemu_ram_alloc_from_file(ram_addr_t size, MemoryRegion *mr,
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001455 bool share, const char *mem_path,
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001456 Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001457{
1458 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001459 ram_addr_t addr;
1460 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001461
1462 if (xen_enabled()) {
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001463 error_setg(errp, "-mem-path not supported with Xen");
1464 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001465 }
1466
1467 if (phys_mem_alloc != qemu_anon_ram_alloc) {
1468 /*
1469 * file_ram_alloc() needs to allocate just like
1470 * phys_mem_alloc, but we haven't bothered to provide
1471 * a hook there.
1472 */
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001473 error_setg(errp,
1474 "-mem-path not supported with this accelerator");
1475 return -1;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001476 }
1477
1478 size = TARGET_PAGE_ALIGN(size);
1479 new_block = g_malloc0(sizeof(*new_block));
1480 new_block->mr = mr;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001481 new_block->used_length = size;
1482 new_block->max_length = size;
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001483 new_block->flags = share ? RAM_SHARED : 0;
Paolo Bonzini7f56e742014-05-14 17:43:20 +08001484 new_block->host = file_ram_alloc(new_block, size,
1485 mem_path, errp);
1486 if (!new_block->host) {
1487 g_free(new_block);
1488 return -1;
1489 }
1490
Hu Taoef701d72014-09-09 13:27:54 +08001491 addr = ram_block_add(new_block, &local_err);
1492 if (local_err) {
1493 g_free(new_block);
1494 error_propagate(errp, local_err);
1495 return -1;
1496 }
1497 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001498}
Paolo Bonzini0b183fc2014-05-14 17:43:19 +08001499#endif
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001500
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001501static
1502ram_addr_t qemu_ram_alloc_internal(ram_addr_t size, ram_addr_t max_size,
1503 void (*resized)(const char*,
1504 uint64_t length,
1505 void *host),
1506 void *host, bool resizeable,
Hu Taoef701d72014-09-09 13:27:54 +08001507 MemoryRegion *mr, Error **errp)
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001508{
1509 RAMBlock *new_block;
Hu Taoef701d72014-09-09 13:27:54 +08001510 ram_addr_t addr;
1511 Error *local_err = NULL;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001512
1513 size = TARGET_PAGE_ALIGN(size);
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001514 max_size = TARGET_PAGE_ALIGN(max_size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001515 new_block = g_malloc0(sizeof(*new_block));
1516 new_block->mr = mr;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001517 new_block->resized = resized;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001518 new_block->used_length = size;
1519 new_block->max_length = max_size;
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001520 assert(max_size >= size);
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001521 new_block->fd = -1;
1522 new_block->host = host;
1523 if (host) {
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001524 new_block->flags |= RAM_PREALLOC;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001525 }
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001526 if (resizeable) {
1527 new_block->flags |= RAM_RESIZEABLE;
1528 }
Hu Taoef701d72014-09-09 13:27:54 +08001529 addr = ram_block_add(new_block, &local_err);
1530 if (local_err) {
1531 g_free(new_block);
1532 error_propagate(errp, local_err);
1533 return -1;
1534 }
1535 return addr;
Paolo Bonzinie1c57ab2014-05-14 17:43:18 +08001536}
1537
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001538ram_addr_t qemu_ram_alloc_from_ptr(ram_addr_t size, void *host,
1539 MemoryRegion *mr, Error **errp)
1540{
1541 return qemu_ram_alloc_internal(size, size, NULL, host, false, mr, errp);
1542}
1543
Hu Taoef701d72014-09-09 13:27:54 +08001544ram_addr_t qemu_ram_alloc(ram_addr_t size, MemoryRegion *mr, Error **errp)
pbrook94a6b542009-04-11 17:15:54 +00001545{
Michael S. Tsirkin62be4e32014-11-12 14:27:41 +02001546 return qemu_ram_alloc_internal(size, size, NULL, NULL, false, mr, errp);
1547}
1548
1549ram_addr_t qemu_ram_alloc_resizeable(ram_addr_t size, ram_addr_t maxsz,
1550 void (*resized)(const char*,
1551 uint64_t length,
1552 void *host),
1553 MemoryRegion *mr, Error **errp)
1554{
1555 return qemu_ram_alloc_internal(size, maxsz, resized, NULL, true, mr, errp);
pbrook94a6b542009-04-11 17:15:54 +00001556}
bellarde9a1ab12007-02-08 23:08:38 +00001557
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001558void qemu_ram_free_from_ptr(ram_addr_t addr)
1559{
1560 RAMBlock *block;
1561
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001562 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001563 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001564 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001565 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001566 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001567 /* Write list before version */
1568 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001569 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001570 g_free_rcu(block, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001571 break;
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001572 }
1573 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001574 qemu_mutex_unlock_ramlist();
Alex Williamson1f2e98b2011-05-03 12:48:09 -06001575}
1576
Paolo Bonzini43771532013-09-09 17:58:40 +02001577static void reclaim_ramblock(RAMBlock *block)
1578{
1579 if (block->flags & RAM_PREALLOC) {
1580 ;
1581 } else if (xen_enabled()) {
1582 xen_invalidate_map_cache_entry(block->host);
1583#ifndef _WIN32
1584 } else if (block->fd >= 0) {
1585 munmap(block->host, block->max_length);
1586 close(block->fd);
1587#endif
1588 } else {
1589 qemu_anon_ram_free(block->host, block->max_length);
1590 }
1591 g_free(block);
1592}
1593
Anthony Liguoric227f092009-10-01 16:12:16 -05001594void qemu_ram_free(ram_addr_t addr)
bellarde9a1ab12007-02-08 23:08:38 +00001595{
Alex Williamson04b16652010-07-02 11:13:17 -06001596 RAMBlock *block;
1597
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001598 qemu_mutex_lock_ramlist();
Mike Day0dc3f442013-09-05 14:41:35 -04001599 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Alex Williamson04b16652010-07-02 11:13:17 -06001600 if (addr == block->offset) {
Mike Day0dc3f442013-09-05 14:41:35 -04001601 QLIST_REMOVE_RCU(block, next);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001602 ram_list.mru_block = NULL;
Mike Day0dc3f442013-09-05 14:41:35 -04001603 /* Write list before version */
1604 smp_wmb();
Umesh Deshpandef798b072011-08-18 11:41:17 -07001605 ram_list.version++;
Paolo Bonzini43771532013-09-09 17:58:40 +02001606 call_rcu(block, reclaim_ramblock, rcu);
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001607 break;
Alex Williamson04b16652010-07-02 11:13:17 -06001608 }
1609 }
Umesh Deshpandeb2a86582011-08-17 00:01:33 -07001610 qemu_mutex_unlock_ramlist();
bellarde9a1ab12007-02-08 23:08:38 +00001611}
1612
Huang Yingcd19cfa2011-03-02 08:56:19 +01001613#ifndef _WIN32
1614void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
1615{
1616 RAMBlock *block;
1617 ram_addr_t offset;
1618 int flags;
1619 void *area, *vaddr;
1620
Mike Day0dc3f442013-09-05 14:41:35 -04001621 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001622 offset = addr - block->offset;
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001623 if (offset < block->max_length) {
Michael S. Tsirkin1240be22014-11-12 11:44:41 +02001624 vaddr = ramblock_ptr(block, offset);
Paolo Bonzini7bd4f432014-05-14 17:43:22 +08001625 if (block->flags & RAM_PREALLOC) {
Huang Yingcd19cfa2011-03-02 08:56:19 +01001626 ;
Markus Armbrusterdfeaf2a2013-07-31 15:11:05 +02001627 } else if (xen_enabled()) {
1628 abort();
Huang Yingcd19cfa2011-03-02 08:56:19 +01001629 } else {
1630 flags = MAP_FIXED;
Markus Armbruster3435f392013-07-31 15:11:07 +02001631 if (block->fd >= 0) {
Paolo Bonzinidbcb8982014-06-10 19:15:24 +08001632 flags |= (block->flags & RAM_SHARED ?
1633 MAP_SHARED : MAP_PRIVATE);
Markus Armbruster3435f392013-07-31 15:11:07 +02001634 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1635 flags, block->fd, offset);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001636 } else {
Markus Armbruster2eb9fba2013-07-31 15:11:09 +02001637 /*
1638 * Remap needs to match alloc. Accelerators that
1639 * set phys_mem_alloc never remap. If they did,
1640 * we'd need a remap hook here.
1641 */
1642 assert(phys_mem_alloc == qemu_anon_ram_alloc);
1643
Huang Yingcd19cfa2011-03-02 08:56:19 +01001644 flags |= MAP_PRIVATE | MAP_ANONYMOUS;
1645 area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
1646 flags, -1, 0);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001647 }
1648 if (area != vaddr) {
Anthony PERARDf15fbc42011-07-20 08:17:42 +00001649 fprintf(stderr, "Could not remap addr: "
1650 RAM_ADDR_FMT "@" RAM_ADDR_FMT "\n",
Huang Yingcd19cfa2011-03-02 08:56:19 +01001651 length, addr);
1652 exit(1);
1653 }
Luiz Capitulino8490fc72012-09-05 16:50:16 -03001654 memory_try_enable_merging(vaddr, length);
Jason Baronddb97f12012-08-02 15:44:16 -04001655 qemu_ram_setup_dump(vaddr, length);
Huang Yingcd19cfa2011-03-02 08:56:19 +01001656 }
Huang Yingcd19cfa2011-03-02 08:56:19 +01001657 }
1658 }
1659}
1660#endif /* !_WIN32 */
1661
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001662int qemu_get_ram_fd(ram_addr_t addr)
1663{
Mike Dayae3a7042013-09-05 14:41:35 -04001664 RAMBlock *block;
1665 int fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001666
Mike Day0dc3f442013-09-05 14:41:35 -04001667 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001668 block = qemu_get_ram_block(addr);
1669 fd = block->fd;
Mike Day0dc3f442013-09-05 14:41:35 -04001670 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001671 return fd;
Paolo Bonzinia35ba7b2014-06-10 19:15:23 +08001672}
1673
Damjan Marion3fd74b82014-06-26 23:01:32 +02001674void *qemu_get_ram_block_host_ptr(ram_addr_t addr)
1675{
Mike Dayae3a7042013-09-05 14:41:35 -04001676 RAMBlock *block;
1677 void *ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001678
Mike Day0dc3f442013-09-05 14:41:35 -04001679 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001680 block = qemu_get_ram_block(addr);
1681 ptr = ramblock_ptr(block, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001682 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001683 return ptr;
Damjan Marion3fd74b82014-06-26 23:01:32 +02001684}
1685
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001686/* Return a host pointer to ram allocated with qemu_ram_alloc.
Mike Dayae3a7042013-09-05 14:41:35 -04001687 * This should not be used for general purpose DMA. Use address_space_map
1688 * or address_space_rw instead. For local memory (e.g. video ram) that the
1689 * device owns, use memory_region_get_ram_ptr.
Mike Day0dc3f442013-09-05 14:41:35 -04001690 *
1691 * By the time this function returns, the returned pointer is not protected
1692 * by RCU anymore. If the caller is not within an RCU critical section and
1693 * does not hold the iothread lock, it must have other means of protecting the
1694 * pointer, such as a reference to the region that includes the incoming
1695 * ram_addr_t.
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001696 */
1697void *qemu_get_ram_ptr(ram_addr_t addr)
1698{
Mike Dayae3a7042013-09-05 14:41:35 -04001699 RAMBlock *block;
1700 void *ptr;
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001701
Mike Day0dc3f442013-09-05 14:41:35 -04001702 rcu_read_lock();
Mike Dayae3a7042013-09-05 14:41:35 -04001703 block = qemu_get_ram_block(addr);
1704
1705 if (xen_enabled() && block->host == NULL) {
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001706 /* We need to check if the requested address is in the RAM
1707 * because we don't want to map the entire memory in QEMU.
1708 * In that case just map until the end of the page.
1709 */
1710 if (block->offset == 0) {
Mike Dayae3a7042013-09-05 14:41:35 -04001711 ptr = xen_map_cache(addr, 0, 0);
Mike Day0dc3f442013-09-05 14:41:35 -04001712 goto unlock;
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001713 }
Mike Dayae3a7042013-09-05 14:41:35 -04001714
1715 block->host = xen_map_cache(block->offset, block->max_length, 1);
Paolo Bonzini0d6d3c82012-11-14 15:45:02 +01001716 }
Mike Dayae3a7042013-09-05 14:41:35 -04001717 ptr = ramblock_ptr(block, addr - block->offset);
1718
Mike Day0dc3f442013-09-05 14:41:35 -04001719unlock:
1720 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001721 return ptr;
pbrookdc828ca2009-04-09 22:21:07 +00001722}
1723
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001724/* Return a host pointer to guest's ram. Similar to qemu_get_ram_ptr
Mike Dayae3a7042013-09-05 14:41:35 -04001725 * but takes a size argument.
Mike Day0dc3f442013-09-05 14:41:35 -04001726 *
1727 * By the time this function returns, the returned pointer is not protected
1728 * by RCU anymore. If the caller is not within an RCU critical section and
1729 * does not hold the iothread lock, it must have other means of protecting the
1730 * pointer, such as a reference to the region that includes the incoming
1731 * ram_addr_t.
Mike Dayae3a7042013-09-05 14:41:35 -04001732 */
Peter Maydellcb85f7a2013-07-08 09:44:04 +01001733static void *qemu_ram_ptr_length(ram_addr_t addr, hwaddr *size)
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001734{
Mike Dayae3a7042013-09-05 14:41:35 -04001735 void *ptr;
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01001736 if (*size == 0) {
1737 return NULL;
1738 }
Jan Kiszka868bb332011-06-21 22:59:09 +02001739 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001740 return xen_map_cache(addr, *size, 1);
Jan Kiszka868bb332011-06-21 22:59:09 +02001741 } else {
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001742 RAMBlock *block;
Mike Day0dc3f442013-09-05 14:41:35 -04001743 rcu_read_lock();
1744 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001745 if (addr - block->offset < block->max_length) {
1746 if (addr - block->offset + *size > block->max_length)
1747 *size = block->max_length - addr + block->offset;
Mike Dayae3a7042013-09-05 14:41:35 -04001748 ptr = ramblock_ptr(block, addr - block->offset);
Mike Day0dc3f442013-09-05 14:41:35 -04001749 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001750 return ptr;
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001751 }
1752 }
1753
1754 fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
1755 abort();
Stefano Stabellini38bee5d2011-05-19 18:35:45 +01001756 }
1757}
1758
Paolo Bonzini7443b432013-06-03 12:44:02 +02001759/* Some of the softmmu routines need to translate from a host pointer
Mike Dayae3a7042013-09-05 14:41:35 -04001760 * (typically a TLB entry) back to a ram offset.
1761 *
1762 * By the time this function returns, the returned pointer is not protected
1763 * by RCU anymore. If the caller is not within an RCU critical section and
1764 * does not hold the iothread lock, it must have other means of protecting the
1765 * pointer, such as a reference to the region that includes the incoming
1766 * ram_addr_t.
1767 */
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001768MemoryRegion *qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
pbrook5579c7f2009-04-11 14:47:08 +00001769{
pbrook94a6b542009-04-11 17:15:54 +00001770 RAMBlock *block;
1771 uint8_t *host = ptr;
Mike Dayae3a7042013-09-05 14:41:35 -04001772 MemoryRegion *mr;
pbrook94a6b542009-04-11 17:15:54 +00001773
Jan Kiszka868bb332011-06-21 22:59:09 +02001774 if (xen_enabled()) {
Mike Day0dc3f442013-09-05 14:41:35 -04001775 rcu_read_lock();
Jan Kiszkae41d7c62011-06-21 22:59:08 +02001776 *ram_addr = xen_ram_addr_from_mapcache(ptr);
Mike Dayae3a7042013-09-05 14:41:35 -04001777 mr = qemu_get_ram_block(*ram_addr)->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001778 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001779 return mr;
Stefano Stabellini712c2b42011-05-19 18:35:46 +01001780 }
1781
Mike Day0dc3f442013-09-05 14:41:35 -04001782 rcu_read_lock();
1783 block = atomic_rcu_read(&ram_list.mru_block);
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001784 if (block && block->host && host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001785 goto found;
1786 }
1787
Mike Day0dc3f442013-09-05 14:41:35 -04001788 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Jun Nakajima432d2682010-08-31 16:41:25 +01001789 /* This case append when the block is not mapped. */
1790 if (block->host == NULL) {
1791 continue;
1792 }
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02001793 if (host - block->host < block->max_length) {
Paolo Bonzini23887b72013-05-06 14:28:39 +02001794 goto found;
Alex Williamsonf471a172010-06-11 11:11:42 -06001795 }
pbrook94a6b542009-04-11 17:15:54 +00001796 }
Jun Nakajima432d2682010-08-31 16:41:25 +01001797
Mike Day0dc3f442013-09-05 14:41:35 -04001798 rcu_read_unlock();
Paolo Bonzini1b5ec232013-05-06 14:36:15 +02001799 return NULL;
Paolo Bonzini23887b72013-05-06 14:28:39 +02001800
1801found:
1802 *ram_addr = block->offset + (host - block->host);
Mike Dayae3a7042013-09-05 14:41:35 -04001803 mr = block->mr;
Mike Day0dc3f442013-09-05 14:41:35 -04001804 rcu_read_unlock();
Mike Dayae3a7042013-09-05 14:41:35 -04001805 return mr;
Marcelo Tosattie8902612010-10-11 15:31:19 -03001806}
Alex Williamsonf471a172010-06-11 11:11:42 -06001807
Avi Kivitya8170e52012-10-23 12:30:10 +02001808static void notdirty_mem_write(void *opaque, hwaddr ram_addr,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001809 uint64_t val, unsigned size)
bellard1ccde1c2004-02-06 19:46:14 +00001810{
Juan Quintela52159192013-10-08 12:44:04 +02001811 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001812 tb_invalidate_phys_page_fast(ram_addr, size);
bellard3a7d9292005-08-21 09:26:42 +00001813 }
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001814 switch (size) {
1815 case 1:
1816 stb_p(qemu_get_ram_ptr(ram_addr), val);
1817 break;
1818 case 2:
1819 stw_p(qemu_get_ram_ptr(ram_addr), val);
1820 break;
1821 case 4:
1822 stl_p(qemu_get_ram_ptr(ram_addr), val);
1823 break;
1824 default:
1825 abort();
1826 }
Paolo Bonzini68868672014-07-21 16:45:18 +02001827 cpu_physical_memory_set_dirty_range_nocode(ram_addr, size);
bellardf23db162005-08-21 19:12:28 +00001828 /* we remove the notdirty callback only if the code has been
1829 flushed */
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02001830 if (!cpu_physical_memory_is_clean(ram_addr)) {
Andreas Färber4917cf42013-05-27 05:17:50 +02001831 CPUArchState *env = current_cpu->env_ptr;
Andreas Färber93afead2013-08-26 03:41:01 +02001832 tlb_set_dirty(env, current_cpu->mem_io_vaddr);
Andreas Färber4917cf42013-05-27 05:17:50 +02001833 }
bellard1ccde1c2004-02-06 19:46:14 +00001834}
1835
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001836static bool notdirty_mem_accepts(void *opaque, hwaddr addr,
1837 unsigned size, bool is_write)
1838{
1839 return is_write;
1840}
1841
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001842static const MemoryRegionOps notdirty_mem_ops = {
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001843 .write = notdirty_mem_write,
Paolo Bonzinib018ddf2013-05-24 14:48:38 +02001844 .valid.accepts = notdirty_mem_accepts,
Avi Kivity0e0df1e2012-01-02 00:32:15 +02001845 .endianness = DEVICE_NATIVE_ENDIAN,
bellard1ccde1c2004-02-06 19:46:14 +00001846};
1847
pbrook0f459d12008-06-09 00:20:13 +00001848/* Generate a debug exception if a watchpoint has been hit. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001849static void check_watchpoint(int offset, int len, MemTxAttrs attrs, int flags)
pbrook0f459d12008-06-09 00:20:13 +00001850{
Andreas Färber93afead2013-08-26 03:41:01 +02001851 CPUState *cpu = current_cpu;
1852 CPUArchState *env = cpu->env_ptr;
aliguori06d55cc2008-11-18 20:24:06 +00001853 target_ulong pc, cs_base;
pbrook0f459d12008-06-09 00:20:13 +00001854 target_ulong vaddr;
aliguoria1d1bb32008-11-18 20:07:32 +00001855 CPUWatchpoint *wp;
aliguori06d55cc2008-11-18 20:24:06 +00001856 int cpu_flags;
pbrook0f459d12008-06-09 00:20:13 +00001857
Andreas Färberff4700b2013-08-26 18:23:18 +02001858 if (cpu->watchpoint_hit) {
aliguori06d55cc2008-11-18 20:24:06 +00001859 /* We re-entered the check after replacing the TB. Now raise
1860 * the debug interrupt so that is will trigger after the
1861 * current instruction. */
Andreas Färber93afead2013-08-26 03:41:01 +02001862 cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
aliguori06d55cc2008-11-18 20:24:06 +00001863 return;
1864 }
Andreas Färber93afead2013-08-26 03:41:01 +02001865 vaddr = (cpu->mem_io_vaddr & TARGET_PAGE_MASK) + offset;
Andreas Färberff4700b2013-08-26 18:23:18 +02001866 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Peter Maydell05068c02014-09-12 14:06:48 +01001867 if (cpu_watchpoint_address_matches(wp, vaddr, len)
1868 && (wp->flags & flags)) {
Peter Maydell08225672014-09-12 14:06:48 +01001869 if (flags == BP_MEM_READ) {
1870 wp->flags |= BP_WATCHPOINT_HIT_READ;
1871 } else {
1872 wp->flags |= BP_WATCHPOINT_HIT_WRITE;
1873 }
1874 wp->hitaddr = vaddr;
Peter Maydell66b9b432015-04-26 16:49:24 +01001875 wp->hitattrs = attrs;
Andreas Färberff4700b2013-08-26 18:23:18 +02001876 if (!cpu->watchpoint_hit) {
1877 cpu->watchpoint_hit = wp;
Andreas Färber239c51a2013-09-01 17:12:23 +02001878 tb_check_watchpoint(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001879 if (wp->flags & BP_STOP_BEFORE_ACCESS) {
Andreas Färber27103422013-08-26 08:31:06 +02001880 cpu->exception_index = EXCP_DEBUG;
Andreas Färber5638d182013-08-27 17:52:12 +02001881 cpu_loop_exit(cpu);
aliguori6e140f22008-11-18 20:37:55 +00001882 } else {
1883 cpu_get_tb_cpu_state(env, &pc, &cs_base, &cpu_flags);
Andreas Färber648f0342013-09-01 17:43:17 +02001884 tb_gen_code(cpu, pc, cs_base, cpu_flags, 1);
Andreas Färber0ea8cb82013-09-03 02:12:23 +02001885 cpu_resume_from_signal(cpu, NULL);
aliguori6e140f22008-11-18 20:37:55 +00001886 }
aliguori06d55cc2008-11-18 20:24:06 +00001887 }
aliguori6e140f22008-11-18 20:37:55 +00001888 } else {
1889 wp->flags &= ~BP_WATCHPOINT_HIT;
pbrook0f459d12008-06-09 00:20:13 +00001890 }
1891 }
1892}
1893
pbrook6658ffb2007-03-16 23:58:11 +00001894/* Watchpoint access routines. Watchpoints are inserted using TLB tricks,
1895 so these check for a hit then pass through to the normal out-of-line
1896 phys routines. */
Peter Maydell66b9b432015-04-26 16:49:24 +01001897static MemTxResult watch_mem_read(void *opaque, hwaddr addr, uint64_t *pdata,
1898 unsigned size, MemTxAttrs attrs)
pbrook6658ffb2007-03-16 23:58:11 +00001899{
Peter Maydell66b9b432015-04-26 16:49:24 +01001900 MemTxResult res;
1901 uint64_t data;
pbrook6658ffb2007-03-16 23:58:11 +00001902
Peter Maydell66b9b432015-04-26 16:49:24 +01001903 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_READ);
Avi Kivity1ec9b902012-01-02 12:47:48 +02001904 switch (size) {
Max Filippov67364152012-01-29 00:01:40 +04001905 case 1:
Peter Maydell66b9b432015-04-26 16:49:24 +01001906 data = address_space_ldub(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001907 break;
1908 case 2:
Peter Maydell66b9b432015-04-26 16:49:24 +01001909 data = address_space_lduw(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001910 break;
1911 case 4:
Peter Maydell66b9b432015-04-26 16:49:24 +01001912 data = address_space_ldl(&address_space_memory, addr, attrs, &res);
Max Filippov67364152012-01-29 00:01:40 +04001913 break;
Avi Kivity1ec9b902012-01-02 12:47:48 +02001914 default: abort();
1915 }
Peter Maydell66b9b432015-04-26 16:49:24 +01001916 *pdata = data;
1917 return res;
1918}
1919
1920static MemTxResult watch_mem_write(void *opaque, hwaddr addr,
1921 uint64_t val, unsigned size,
1922 MemTxAttrs attrs)
1923{
1924 MemTxResult res;
1925
1926 check_watchpoint(addr & ~TARGET_PAGE_MASK, size, attrs, BP_MEM_WRITE);
1927 switch (size) {
1928 case 1:
1929 address_space_stb(&address_space_memory, addr, val, attrs, &res);
1930 break;
1931 case 2:
1932 address_space_stw(&address_space_memory, addr, val, attrs, &res);
1933 break;
1934 case 4:
1935 address_space_stl(&address_space_memory, addr, val, attrs, &res);
1936 break;
1937 default: abort();
1938 }
1939 return res;
pbrook6658ffb2007-03-16 23:58:11 +00001940}
1941
Avi Kivity1ec9b902012-01-02 12:47:48 +02001942static const MemoryRegionOps watch_mem_ops = {
Peter Maydell66b9b432015-04-26 16:49:24 +01001943 .read_with_attrs = watch_mem_read,
1944 .write_with_attrs = watch_mem_write,
Avi Kivity1ec9b902012-01-02 12:47:48 +02001945 .endianness = DEVICE_NATIVE_ENDIAN,
pbrook6658ffb2007-03-16 23:58:11 +00001946};
pbrook6658ffb2007-03-16 23:58:11 +00001947
Peter Maydellf25a49e2015-04-26 16:49:24 +01001948static MemTxResult subpage_read(void *opaque, hwaddr addr, uint64_t *data,
1949 unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001950{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001951 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001952 uint8_t buf[8];
Peter Maydell5c9eb022015-04-26 16:49:24 +01001953 MemTxResult res;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02001954
blueswir1db7b5422007-05-26 17:36:03 +00001955#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001956 printf("%s: subpage %p len %u addr " TARGET_FMT_plx "\n", __func__,
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001957 subpage, len, addr);
blueswir1db7b5422007-05-26 17:36:03 +00001958#endif
Peter Maydell5c9eb022015-04-26 16:49:24 +01001959 res = address_space_read(subpage->as, addr + subpage->base,
1960 attrs, buf, len);
1961 if (res) {
1962 return res;
Peter Maydellf25a49e2015-04-26 16:49:24 +01001963 }
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001964 switch (len) {
1965 case 1:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001966 *data = ldub_p(buf);
1967 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001968 case 2:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001969 *data = lduw_p(buf);
1970 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001971 case 4:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001972 *data = ldl_p(buf);
1973 return MEMTX_OK;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001974 case 8:
Peter Maydellf25a49e2015-04-26 16:49:24 +01001975 *data = ldq_p(buf);
1976 return MEMTX_OK;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001977 default:
1978 abort();
1979 }
blueswir1db7b5422007-05-26 17:36:03 +00001980}
1981
Peter Maydellf25a49e2015-04-26 16:49:24 +01001982static MemTxResult subpage_write(void *opaque, hwaddr addr,
1983 uint64_t value, unsigned len, MemTxAttrs attrs)
blueswir1db7b5422007-05-26 17:36:03 +00001984{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001985 subpage_t *subpage = opaque;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01001986 uint8_t buf[8];
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001987
blueswir1db7b5422007-05-26 17:36:03 +00001988#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08001989 printf("%s: subpage %p len %u addr " TARGET_FMT_plx
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001990 " value %"PRIx64"\n",
1991 __func__, subpage, len, addr, value);
blueswir1db7b5422007-05-26 17:36:03 +00001992#endif
Jan Kiszkaacc9d802013-05-26 21:55:37 +02001993 switch (len) {
1994 case 1:
1995 stb_p(buf, value);
1996 break;
1997 case 2:
1998 stw_p(buf, value);
1999 break;
2000 case 4:
2001 stl_p(buf, value);
2002 break;
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002003 case 8:
2004 stq_p(buf, value);
2005 break;
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002006 default:
2007 abort();
2008 }
Peter Maydell5c9eb022015-04-26 16:49:24 +01002009 return address_space_write(subpage->as, addr + subpage->base,
2010 attrs, buf, len);
blueswir1db7b5422007-05-26 17:36:03 +00002011}
2012
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002013static bool subpage_accepts(void *opaque, hwaddr addr,
Amos Kong016e9d62013-09-27 09:25:38 +08002014 unsigned len, bool is_write)
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002015{
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002016 subpage_t *subpage = opaque;
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002017#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002018 printf("%s: subpage %p %c len %u addr " TARGET_FMT_plx "\n",
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002019 __func__, subpage, is_write ? 'w' : 'r', len, addr);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002020#endif
2021
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002022 return address_space_access_valid(subpage->as, addr + subpage->base,
Amos Kong016e9d62013-09-27 09:25:38 +08002023 len, is_write);
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002024}
2025
Avi Kivity70c68e42012-01-02 12:32:48 +02002026static const MemoryRegionOps subpage_ops = {
Peter Maydellf25a49e2015-04-26 16:49:24 +01002027 .read_with_attrs = subpage_read,
2028 .write_with_attrs = subpage_write,
Paolo Bonziniff6cff72014-12-22 13:11:39 +01002029 .impl.min_access_size = 1,
2030 .impl.max_access_size = 8,
2031 .valid.min_access_size = 1,
2032 .valid.max_access_size = 8,
Paolo Bonzinic353e4c2013-05-24 14:02:39 +02002033 .valid.accepts = subpage_accepts,
Avi Kivity70c68e42012-01-02 12:32:48 +02002034 .endianness = DEVICE_NATIVE_ENDIAN,
blueswir1db7b5422007-05-26 17:36:03 +00002035};
2036
Anthony Liguoric227f092009-10-01 16:12:16 -05002037static int subpage_register (subpage_t *mmio, uint32_t start, uint32_t end,
Avi Kivity5312bd82012-02-12 18:32:55 +02002038 uint16_t section)
blueswir1db7b5422007-05-26 17:36:03 +00002039{
2040 int idx, eidx;
2041
2042 if (start >= TARGET_PAGE_SIZE || end >= TARGET_PAGE_SIZE)
2043 return -1;
2044 idx = SUBPAGE_IDX(start);
2045 eidx = SUBPAGE_IDX(end);
2046#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002047 printf("%s: %p start %08x end %08x idx %08x eidx %08x section %d\n",
2048 __func__, mmio, start, end, idx, eidx, section);
blueswir1db7b5422007-05-26 17:36:03 +00002049#endif
blueswir1db7b5422007-05-26 17:36:03 +00002050 for (; idx <= eidx; idx++) {
Avi Kivity5312bd82012-02-12 18:32:55 +02002051 mmio->sub_section[idx] = section;
blueswir1db7b5422007-05-26 17:36:03 +00002052 }
2053
2054 return 0;
2055}
2056
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002057static subpage_t *subpage_init(AddressSpace *as, hwaddr base)
blueswir1db7b5422007-05-26 17:36:03 +00002058{
Anthony Liguoric227f092009-10-01 16:12:16 -05002059 subpage_t *mmio;
blueswir1db7b5422007-05-26 17:36:03 +00002060
Anthony Liguori7267c092011-08-20 22:09:37 -05002061 mmio = g_malloc0(sizeof(subpage_t));
aliguori1eec6142009-02-05 22:06:18 +00002062
Jan Kiszkaacc9d802013-05-26 21:55:37 +02002063 mmio->as = as;
aliguori1eec6142009-02-05 22:06:18 +00002064 mmio->base = base;
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002065 memory_region_init_io(&mmio->iomem, NULL, &subpage_ops, mmio,
Peter Crosthwaiteb4fefef2014-06-05 23:15:52 -07002066 NULL, TARGET_PAGE_SIZE);
Avi Kivityb3b00c72012-01-02 13:20:11 +02002067 mmio->iomem.subpage = true;
blueswir1db7b5422007-05-26 17:36:03 +00002068#if defined(DEBUG_SUBPAGE)
Amos Kong016e9d62013-09-27 09:25:38 +08002069 printf("%s: %p base " TARGET_FMT_plx " len %08x\n", __func__,
2070 mmio, base, TARGET_PAGE_SIZE);
blueswir1db7b5422007-05-26 17:36:03 +00002071#endif
Liu Ping Fanb41aac42013-05-29 11:09:17 +02002072 subpage_register(mmio, 0, TARGET_PAGE_SIZE-1, PHYS_SECTION_UNASSIGNED);
blueswir1db7b5422007-05-26 17:36:03 +00002073
2074 return mmio;
2075}
2076
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002077static uint16_t dummy_section(PhysPageMap *map, AddressSpace *as,
2078 MemoryRegion *mr)
Avi Kivity5312bd82012-02-12 18:32:55 +02002079{
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002080 assert(as);
Avi Kivity5312bd82012-02-12 18:32:55 +02002081 MemoryRegionSection section = {
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002082 .address_space = as,
Avi Kivity5312bd82012-02-12 18:32:55 +02002083 .mr = mr,
2084 .offset_within_address_space = 0,
2085 .offset_within_region = 0,
Paolo Bonzini052e87b2013-05-27 10:08:27 +02002086 .size = int128_2_64(),
Avi Kivity5312bd82012-02-12 18:32:55 +02002087 };
2088
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002089 return phys_section_add(map, &section);
Avi Kivity5312bd82012-02-12 18:32:55 +02002090}
2091
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002092MemoryRegion *iotlb_to_region(CPUState *cpu, hwaddr index)
Avi Kivityaa102232012-03-08 17:06:55 +02002093{
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002094 AddressSpaceDispatch *d = atomic_rcu_read(&cpu->memory_dispatch);
2095 MemoryRegionSection *sections = d->map.sections;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +02002096
2097 return sections[index & ~TARGET_PAGE_MASK].mr;
Avi Kivityaa102232012-03-08 17:06:55 +02002098}
2099
Avi Kivitye9179ce2009-06-14 11:38:52 +03002100static void io_mem_init(void)
2101{
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002102 memory_region_init_io(&io_mem_rom, NULL, &unassigned_mem_ops, NULL, NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002103 memory_region_init_io(&io_mem_unassigned, NULL, &unassigned_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002104 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002105 memory_region_init_io(&io_mem_notdirty, NULL, &notdirty_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002106 NULL, UINT64_MAX);
Paolo Bonzini2c9b15c2013-06-06 05:41:28 -04002107 memory_region_init_io(&io_mem_watch, NULL, &watch_mem_ops, NULL,
Paolo Bonzini1f6245e2014-06-13 10:48:06 +02002108 NULL, UINT64_MAX);
Avi Kivitye9179ce2009-06-14 11:38:52 +03002109}
2110
Avi Kivityac1970f2012-10-03 16:22:53 +02002111static void mem_begin(MemoryListener *listener)
2112{
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002113 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002114 AddressSpaceDispatch *d = g_new0(AddressSpaceDispatch, 1);
2115 uint16_t n;
2116
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002117 n = dummy_section(&d->map, as, &io_mem_unassigned);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002118 assert(n == PHYS_SECTION_UNASSIGNED);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002119 n = dummy_section(&d->map, as, &io_mem_notdirty);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002120 assert(n == PHYS_SECTION_NOTDIRTY);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002121 n = dummy_section(&d->map, as, &io_mem_rom);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002122 assert(n == PHYS_SECTION_ROM);
Peter Crosthwaitea656e222014-06-02 19:08:44 -07002123 n = dummy_section(&d->map, as, &io_mem_watch);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002124 assert(n == PHYS_SECTION_WATCH);
Paolo Bonzini00752702013-05-29 12:13:54 +02002125
Michael S. Tsirkin9736e552013-11-11 14:42:43 +02002126 d->phys_map = (PhysPageEntry) { .ptr = PHYS_MAP_NODE_NIL, .skip = 1 };
Paolo Bonzini00752702013-05-29 12:13:54 +02002127 d->as = as;
2128 as->next_dispatch = d;
2129}
2130
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002131static void address_space_dispatch_free(AddressSpaceDispatch *d)
2132{
2133 phys_sections_free(&d->map);
2134 g_free(d);
2135}
2136
Paolo Bonzini00752702013-05-29 12:13:54 +02002137static void mem_commit(MemoryListener *listener)
2138{
2139 AddressSpace *as = container_of(listener, AddressSpace, dispatch_listener);
Paolo Bonzini0475d942013-05-29 12:28:21 +02002140 AddressSpaceDispatch *cur = as->dispatch;
2141 AddressSpaceDispatch *next = as->next_dispatch;
Avi Kivityac1970f2012-10-03 16:22:53 +02002142
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002143 phys_page_compact_all(next, next->map.nodes_nb);
Michael S. Tsirkinb35ba302013-11-11 17:52:07 +02002144
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002145 atomic_rcu_set(&as->dispatch, next);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002146 if (cur) {
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002147 call_rcu(cur, address_space_dispatch_free, rcu);
Marcel Apfelbaum53cb28c2013-12-01 14:02:23 +02002148 }
Paolo Bonzini9affd6f2013-05-29 12:09:47 +02002149}
2150
Avi Kivity1d711482012-10-02 18:54:45 +02002151static void tcg_commit(MemoryListener *listener)
Avi Kivity50c1e142012-02-08 21:36:02 +02002152{
Andreas Färber182735e2013-05-29 22:29:20 +02002153 CPUState *cpu;
Avi Kivity117712c2012-02-12 21:23:17 +02002154
2155 /* since each CPU stores ram addresses in its TLB cache, we must
2156 reset the modified entries */
2157 /* XXX: slow ! */
Andreas Färberbdc44642013-06-24 23:50:24 +02002158 CPU_FOREACH(cpu) {
Edgar E. Iglesias33bde2e2013-11-21 19:06:30 +01002159 /* FIXME: Disentangle the cpu.h circular files deps so we can
2160 directly get the right CPU from listener. */
2161 if (cpu->tcg_as_listener != listener) {
2162 continue;
2163 }
Paolo Bonzini76e5c762015-01-15 12:46:47 +01002164 cpu_reload_memory_map(cpu);
Avi Kivity117712c2012-02-12 21:23:17 +02002165 }
Avi Kivity50c1e142012-02-08 21:36:02 +02002166}
2167
Avi Kivity93632742012-02-08 16:54:16 +02002168static void core_log_global_start(MemoryListener *listener)
2169{
Juan Quintela981fdf22013-10-10 11:54:09 +02002170 cpu_physical_memory_set_dirty_tracking(true);
Avi Kivity93632742012-02-08 16:54:16 +02002171}
2172
2173static void core_log_global_stop(MemoryListener *listener)
2174{
Juan Quintela981fdf22013-10-10 11:54:09 +02002175 cpu_physical_memory_set_dirty_tracking(false);
Avi Kivity93632742012-02-08 16:54:16 +02002176}
2177
Avi Kivity93632742012-02-08 16:54:16 +02002178static MemoryListener core_memory_listener = {
Avi Kivity93632742012-02-08 16:54:16 +02002179 .log_global_start = core_log_global_start,
2180 .log_global_stop = core_log_global_stop,
Avi Kivityac1970f2012-10-03 16:22:53 +02002181 .priority = 1,
Avi Kivity93632742012-02-08 16:54:16 +02002182};
2183
Avi Kivityac1970f2012-10-03 16:22:53 +02002184void address_space_init_dispatch(AddressSpace *as)
2185{
Paolo Bonzini00752702013-05-29 12:13:54 +02002186 as->dispatch = NULL;
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002187 as->dispatch_listener = (MemoryListener) {
Avi Kivityac1970f2012-10-03 16:22:53 +02002188 .begin = mem_begin,
Paolo Bonzini00752702013-05-29 12:13:54 +02002189 .commit = mem_commit,
Avi Kivityac1970f2012-10-03 16:22:53 +02002190 .region_add = mem_add,
2191 .region_nop = mem_add,
2192 .priority = 0,
2193 };
Paolo Bonzini89ae3372013-06-02 10:39:07 +02002194 memory_listener_register(&as->dispatch_listener, as);
Avi Kivityac1970f2012-10-03 16:22:53 +02002195}
2196
Paolo Bonzini6e48e8f2015-02-10 10:25:44 -07002197void address_space_unregister(AddressSpace *as)
2198{
2199 memory_listener_unregister(&as->dispatch_listener);
2200}
2201
Avi Kivity83f3c252012-10-07 12:59:55 +02002202void address_space_destroy_dispatch(AddressSpace *as)
2203{
2204 AddressSpaceDispatch *d = as->dispatch;
2205
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01002206 atomic_rcu_set(&as->dispatch, NULL);
2207 if (d) {
2208 call_rcu(d, address_space_dispatch_free, rcu);
2209 }
Avi Kivity83f3c252012-10-07 12:59:55 +02002210}
2211
Avi Kivity62152b82011-07-26 14:26:14 +03002212static void memory_map_init(void)
2213{
Anthony Liguori7267c092011-08-20 22:09:37 -05002214 system_memory = g_malloc(sizeof(*system_memory));
Paolo Bonzini03f49952013-11-07 17:14:36 +01002215
Paolo Bonzini57271d62013-11-07 17:14:37 +01002216 memory_region_init(system_memory, NULL, "system", UINT64_MAX);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002217 address_space_init(&address_space_memory, system_memory, "memory");
Avi Kivity309cb472011-08-08 16:09:03 +03002218
Anthony Liguori7267c092011-08-20 22:09:37 -05002219 system_io = g_malloc(sizeof(*system_io));
Jan Kiszka3bb28b722013-09-02 18:43:30 +02002220 memory_region_init_io(system_io, NULL, &unassigned_io_ops, NULL, "io",
2221 65536);
Alexey Kardashevskiy7dca8042013-04-29 16:25:51 +00002222 address_space_init(&address_space_io, system_io, "I/O");
Avi Kivity93632742012-02-08 16:54:16 +02002223
Avi Kivityf6790af2012-10-02 20:13:51 +02002224 memory_listener_register(&core_memory_listener, &address_space_memory);
Avi Kivity62152b82011-07-26 14:26:14 +03002225}
2226
2227MemoryRegion *get_system_memory(void)
2228{
2229 return system_memory;
2230}
2231
Avi Kivity309cb472011-08-08 16:09:03 +03002232MemoryRegion *get_system_io(void)
2233{
2234 return system_io;
2235}
2236
pbrooke2eef172008-06-08 01:09:01 +00002237#endif /* !defined(CONFIG_USER_ONLY) */
2238
bellard13eb76e2004-01-24 15:23:36 +00002239/* physical memory access (slow version, mainly for debug) */
2240#if defined(CONFIG_USER_ONLY)
Andreas Färberf17ec442013-06-29 19:40:58 +02002241int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brooka68fe892010-03-01 00:08:59 +00002242 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00002243{
2244 int l, flags;
2245 target_ulong page;
pbrook53a59602006-03-25 19:31:22 +00002246 void * p;
bellard13eb76e2004-01-24 15:23:36 +00002247
2248 while (len > 0) {
2249 page = addr & TARGET_PAGE_MASK;
2250 l = (page + TARGET_PAGE_SIZE) - addr;
2251 if (l > len)
2252 l = len;
2253 flags = page_get_flags(page);
2254 if (!(flags & PAGE_VALID))
Paul Brooka68fe892010-03-01 00:08:59 +00002255 return -1;
bellard13eb76e2004-01-24 15:23:36 +00002256 if (is_write) {
2257 if (!(flags & PAGE_WRITE))
Paul Brooka68fe892010-03-01 00:08:59 +00002258 return -1;
bellard579a97f2007-11-11 14:26:47 +00002259 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002260 if (!(p = lock_user(VERIFY_WRITE, addr, l, 0)))
Paul Brooka68fe892010-03-01 00:08:59 +00002261 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002262 memcpy(p, buf, l);
2263 unlock_user(p, addr, l);
bellard13eb76e2004-01-24 15:23:36 +00002264 } else {
2265 if (!(flags & PAGE_READ))
Paul Brooka68fe892010-03-01 00:08:59 +00002266 return -1;
bellard579a97f2007-11-11 14:26:47 +00002267 /* XXX: this code should not depend on lock_user */
aurel3272fb7da2008-04-27 23:53:45 +00002268 if (!(p = lock_user(VERIFY_READ, addr, l, 1)))
Paul Brooka68fe892010-03-01 00:08:59 +00002269 return -1;
aurel3272fb7da2008-04-27 23:53:45 +00002270 memcpy(buf, p, l);
aurel325b257572008-04-28 08:54:59 +00002271 unlock_user(p, addr, 0);
bellard13eb76e2004-01-24 15:23:36 +00002272 }
2273 len -= l;
2274 buf += l;
2275 addr += l;
2276 }
Paul Brooka68fe892010-03-01 00:08:59 +00002277 return 0;
bellard13eb76e2004-01-24 15:23:36 +00002278}
bellard8df1cd02005-01-28 22:37:22 +00002279
bellard13eb76e2004-01-24 15:23:36 +00002280#else
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002281
Avi Kivitya8170e52012-10-23 12:30:10 +02002282static void invalidate_and_set_dirty(hwaddr addr,
2283 hwaddr length)
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002284{
Peter Maydellf874bf92014-11-16 19:44:21 +00002285 if (cpu_physical_memory_range_includes_clean(addr, length)) {
2286 tb_invalidate_phys_range(addr, addr + length, 0);
Paolo Bonzini68868672014-07-21 16:45:18 +02002287 cpu_physical_memory_set_dirty_range_nocode(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002288 }
Anthony PERARDe2269392012-10-03 13:49:22 +00002289 xen_modified_memory(addr, length);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002290}
2291
Richard Henderson23326162013-07-08 14:55:59 -07002292static int memory_access_size(MemoryRegion *mr, unsigned l, hwaddr addr)
Paolo Bonzini82f25632013-05-24 11:59:43 +02002293{
Paolo Bonzinie1622f42013-07-17 13:17:41 +02002294 unsigned access_size_max = mr->ops->valid.max_access_size;
Richard Henderson23326162013-07-08 14:55:59 -07002295
2296 /* Regions are assumed to support 1-4 byte accesses unless
2297 otherwise specified. */
Richard Henderson23326162013-07-08 14:55:59 -07002298 if (access_size_max == 0) {
2299 access_size_max = 4;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002300 }
Richard Henderson23326162013-07-08 14:55:59 -07002301
2302 /* Bound the maximum access by the alignment of the address. */
2303 if (!mr->ops->impl.unaligned) {
2304 unsigned align_size_max = addr & -addr;
2305 if (align_size_max != 0 && align_size_max < access_size_max) {
2306 access_size_max = align_size_max;
2307 }
2308 }
2309
2310 /* Don't attempt accesses larger than the maximum. */
2311 if (l > access_size_max) {
2312 l = access_size_max;
2313 }
Paolo Bonzini098178f2013-07-29 14:27:39 +02002314 if (l & (l - 1)) {
2315 l = 1 << (qemu_fls(l) - 1);
2316 }
Richard Henderson23326162013-07-08 14:55:59 -07002317
2318 return l;
Paolo Bonzini82f25632013-05-24 11:59:43 +02002319}
2320
Peter Maydell5c9eb022015-04-26 16:49:24 +01002321MemTxResult address_space_rw(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2322 uint8_t *buf, int len, bool is_write)
bellard13eb76e2004-01-24 15:23:36 +00002323{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002324 hwaddr l;
bellard13eb76e2004-01-24 15:23:36 +00002325 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002326 uint64_t val;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002327 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002328 MemoryRegion *mr;
Peter Maydell3b643492015-04-26 16:49:23 +01002329 MemTxResult result = MEMTX_OK;
ths3b46e622007-09-17 08:09:54 +00002330
Paolo Bonzini41063e12015-03-18 14:21:43 +01002331 rcu_read_lock();
bellard13eb76e2004-01-24 15:23:36 +00002332 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002333 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002334 mr = address_space_translate(as, addr, &addr1, &l, is_write);
ths3b46e622007-09-17 08:09:54 +00002335
bellard13eb76e2004-01-24 15:23:36 +00002336 if (is_write) {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002337 if (!memory_access_is_direct(mr, is_write)) {
2338 l = memory_access_size(mr, l, addr1);
Andreas Färber4917cf42013-05-27 05:17:50 +02002339 /* XXX: could force current_cpu to NULL to avoid
bellard6a00d602005-11-21 23:25:50 +00002340 potential bugs */
Richard Henderson23326162013-07-08 14:55:59 -07002341 switch (l) {
2342 case 8:
2343 /* 64 bit write access */
2344 val = ldq_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002345 result |= memory_region_dispatch_write(mr, addr1, val, 8,
2346 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002347 break;
2348 case 4:
bellard1c213d12005-09-03 10:49:04 +00002349 /* 32 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002350 val = ldl_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002351 result |= memory_region_dispatch_write(mr, addr1, val, 4,
2352 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002353 break;
2354 case 2:
bellard1c213d12005-09-03 10:49:04 +00002355 /* 16 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002356 val = lduw_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002357 result |= memory_region_dispatch_write(mr, addr1, val, 2,
2358 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002359 break;
2360 case 1:
bellard1c213d12005-09-03 10:49:04 +00002361 /* 8 bit write access */
bellardc27004e2005-01-03 23:35:10 +00002362 val = ldub_p(buf);
Peter Maydell3b643492015-04-26 16:49:23 +01002363 result |= memory_region_dispatch_write(mr, addr1, val, 1,
2364 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002365 break;
2366 default:
2367 abort();
bellard13eb76e2004-01-24 15:23:36 +00002368 }
Paolo Bonzini2bbfa052013-05-24 12:29:54 +02002369 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002370 addr1 += memory_region_get_ram_addr(mr);
bellard13eb76e2004-01-24 15:23:36 +00002371 /* RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002372 ptr = qemu_get_ram_ptr(addr1);
bellard13eb76e2004-01-24 15:23:36 +00002373 memcpy(ptr, buf, l);
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00002374 invalidate_and_set_dirty(addr1, l);
bellard13eb76e2004-01-24 15:23:36 +00002375 }
2376 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002377 if (!memory_access_is_direct(mr, is_write)) {
bellard13eb76e2004-01-24 15:23:36 +00002378 /* I/O case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002379 l = memory_access_size(mr, l, addr1);
Richard Henderson23326162013-07-08 14:55:59 -07002380 switch (l) {
2381 case 8:
2382 /* 64 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002383 result |= memory_region_dispatch_read(mr, addr1, &val, 8,
2384 attrs);
Richard Henderson23326162013-07-08 14:55:59 -07002385 stq_p(buf, val);
2386 break;
2387 case 4:
bellard13eb76e2004-01-24 15:23:36 +00002388 /* 32 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002389 result |= memory_region_dispatch_read(mr, addr1, &val, 4,
2390 attrs);
bellardc27004e2005-01-03 23:35:10 +00002391 stl_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002392 break;
2393 case 2:
bellard13eb76e2004-01-24 15:23:36 +00002394 /* 16 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002395 result |= memory_region_dispatch_read(mr, addr1, &val, 2,
2396 attrs);
bellardc27004e2005-01-03 23:35:10 +00002397 stw_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002398 break;
2399 case 1:
bellard1c213d12005-09-03 10:49:04 +00002400 /* 8 bit read access */
Peter Maydell3b643492015-04-26 16:49:23 +01002401 result |= memory_region_dispatch_read(mr, addr1, &val, 1,
2402 attrs);
bellardc27004e2005-01-03 23:35:10 +00002403 stb_p(buf, val);
Richard Henderson23326162013-07-08 14:55:59 -07002404 break;
2405 default:
2406 abort();
bellard13eb76e2004-01-24 15:23:36 +00002407 }
2408 } else {
2409 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002410 ptr = qemu_get_ram_ptr(mr->ram_addr + addr1);
Avi Kivityf3705d52012-03-08 16:16:34 +02002411 memcpy(buf, ptr, l);
bellard13eb76e2004-01-24 15:23:36 +00002412 }
2413 }
2414 len -= l;
2415 buf += l;
2416 addr += l;
2417 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002418 rcu_read_unlock();
Paolo Bonzinifd8aaa72013-05-21 09:56:55 +02002419
Peter Maydell3b643492015-04-26 16:49:23 +01002420 return result;
bellard13eb76e2004-01-24 15:23:36 +00002421}
bellard8df1cd02005-01-28 22:37:22 +00002422
Peter Maydell5c9eb022015-04-26 16:49:24 +01002423MemTxResult address_space_write(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2424 const uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002425{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002426 return address_space_rw(as, addr, attrs, (uint8_t *)buf, len, true);
Avi Kivityac1970f2012-10-03 16:22:53 +02002427}
2428
Peter Maydell5c9eb022015-04-26 16:49:24 +01002429MemTxResult address_space_read(AddressSpace *as, hwaddr addr, MemTxAttrs attrs,
2430 uint8_t *buf, int len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002431{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002432 return address_space_rw(as, addr, attrs, buf, len, false);
Avi Kivityac1970f2012-10-03 16:22:53 +02002433}
2434
2435
Avi Kivitya8170e52012-10-23 12:30:10 +02002436void cpu_physical_memory_rw(hwaddr addr, uint8_t *buf,
Avi Kivityac1970f2012-10-03 16:22:53 +02002437 int len, int is_write)
2438{
Peter Maydell5c9eb022015-04-26 16:49:24 +01002439 address_space_rw(&address_space_memory, addr, MEMTXATTRS_UNSPECIFIED,
2440 buf, len, is_write);
Avi Kivityac1970f2012-10-03 16:22:53 +02002441}
2442
Alexander Graf582b55a2013-12-11 14:17:44 +01002443enum write_rom_type {
2444 WRITE_DATA,
2445 FLUSH_CACHE,
2446};
2447
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002448static inline void cpu_physical_memory_write_rom_internal(AddressSpace *as,
Alexander Graf582b55a2013-12-11 14:17:44 +01002449 hwaddr addr, const uint8_t *buf, int len, enum write_rom_type type)
bellardd0ecd2a2006-04-23 17:14:48 +00002450{
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002451 hwaddr l;
bellardd0ecd2a2006-04-23 17:14:48 +00002452 uint8_t *ptr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002453 hwaddr addr1;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002454 MemoryRegion *mr;
ths3b46e622007-09-17 08:09:54 +00002455
Paolo Bonzini41063e12015-03-18 14:21:43 +01002456 rcu_read_lock();
bellardd0ecd2a2006-04-23 17:14:48 +00002457 while (len > 0) {
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002458 l = len;
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002459 mr = address_space_translate(as, addr, &addr1, &l, true);
ths3b46e622007-09-17 08:09:54 +00002460
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002461 if (!(memory_region_is_ram(mr) ||
2462 memory_region_is_romd(mr))) {
bellardd0ecd2a2006-04-23 17:14:48 +00002463 /* do nothing */
2464 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002465 addr1 += memory_region_get_ram_addr(mr);
bellardd0ecd2a2006-04-23 17:14:48 +00002466 /* ROM/RAM case */
pbrook5579c7f2009-04-11 14:47:08 +00002467 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf582b55a2013-12-11 14:17:44 +01002468 switch (type) {
2469 case WRITE_DATA:
2470 memcpy(ptr, buf, l);
2471 invalidate_and_set_dirty(addr1, l);
2472 break;
2473 case FLUSH_CACHE:
2474 flush_icache_range((uintptr_t)ptr, (uintptr_t)ptr + l);
2475 break;
2476 }
bellardd0ecd2a2006-04-23 17:14:48 +00002477 }
2478 len -= l;
2479 buf += l;
2480 addr += l;
2481 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002482 rcu_read_unlock();
bellardd0ecd2a2006-04-23 17:14:48 +00002483}
2484
Alexander Graf582b55a2013-12-11 14:17:44 +01002485/* used for ROM loading : can write in RAM and ROM */
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002486void cpu_physical_memory_write_rom(AddressSpace *as, hwaddr addr,
Alexander Graf582b55a2013-12-11 14:17:44 +01002487 const uint8_t *buf, int len)
2488{
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002489 cpu_physical_memory_write_rom_internal(as, addr, buf, len, WRITE_DATA);
Alexander Graf582b55a2013-12-11 14:17:44 +01002490}
2491
2492void cpu_flush_icache_range(hwaddr start, int len)
2493{
2494 /*
2495 * This function should do the same thing as an icache flush that was
2496 * triggered from within the guest. For TCG we are always cache coherent,
2497 * so there is no need to flush anything. For KVM / Xen we need to flush
2498 * the host's instruction cache at least.
2499 */
2500 if (tcg_enabled()) {
2501 return;
2502 }
2503
Edgar E. Iglesias2a221652013-12-13 16:28:52 +10002504 cpu_physical_memory_write_rom_internal(&address_space_memory,
2505 start, NULL, len, FLUSH_CACHE);
Alexander Graf582b55a2013-12-11 14:17:44 +01002506}
2507
aliguori6d16c2f2009-01-22 16:59:11 +00002508typedef struct {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002509 MemoryRegion *mr;
aliguori6d16c2f2009-01-22 16:59:11 +00002510 void *buffer;
Avi Kivitya8170e52012-10-23 12:30:10 +02002511 hwaddr addr;
2512 hwaddr len;
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002513 bool in_use;
aliguori6d16c2f2009-01-22 16:59:11 +00002514} BounceBuffer;
2515
2516static BounceBuffer bounce;
2517
aliguoriba223c22009-01-22 16:59:16 +00002518typedef struct MapClient {
Fam Zhenge95205e2015-03-16 17:03:37 +08002519 QEMUBH *bh;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002520 QLIST_ENTRY(MapClient) link;
aliguoriba223c22009-01-22 16:59:16 +00002521} MapClient;
2522
Fam Zheng38e047b2015-03-16 17:03:35 +08002523QemuMutex map_client_list_lock;
Blue Swirl72cf2d42009-09-12 07:36:22 +00002524static QLIST_HEAD(map_client_list, MapClient) map_client_list
2525 = QLIST_HEAD_INITIALIZER(map_client_list);
aliguoriba223c22009-01-22 16:59:16 +00002526
Fam Zhenge95205e2015-03-16 17:03:37 +08002527static void cpu_unregister_map_client_do(MapClient *client)
aliguoriba223c22009-01-22 16:59:16 +00002528{
Blue Swirl72cf2d42009-09-12 07:36:22 +00002529 QLIST_REMOVE(client, link);
Anthony Liguori7267c092011-08-20 22:09:37 -05002530 g_free(client);
aliguoriba223c22009-01-22 16:59:16 +00002531}
2532
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002533static void cpu_notify_map_clients_locked(void)
aliguoriba223c22009-01-22 16:59:16 +00002534{
2535 MapClient *client;
2536
Blue Swirl72cf2d42009-09-12 07:36:22 +00002537 while (!QLIST_EMPTY(&map_client_list)) {
2538 client = QLIST_FIRST(&map_client_list);
Fam Zhenge95205e2015-03-16 17:03:37 +08002539 qemu_bh_schedule(client->bh);
2540 cpu_unregister_map_client_do(client);
aliguoriba223c22009-01-22 16:59:16 +00002541 }
2542}
2543
Fam Zhenge95205e2015-03-16 17:03:37 +08002544void cpu_register_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002545{
2546 MapClient *client = g_malloc(sizeof(*client));
2547
Fam Zheng38e047b2015-03-16 17:03:35 +08002548 qemu_mutex_lock(&map_client_list_lock);
Fam Zhenge95205e2015-03-16 17:03:37 +08002549 client->bh = bh;
bellardd0ecd2a2006-04-23 17:14:48 +00002550 QLIST_INSERT_HEAD(&map_client_list, client, link);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002551 if (!atomic_read(&bounce.in_use)) {
2552 cpu_notify_map_clients_locked();
2553 }
Fam Zheng38e047b2015-03-16 17:03:35 +08002554 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002555}
2556
Fam Zheng38e047b2015-03-16 17:03:35 +08002557void cpu_exec_init_all(void)
2558{
2559 qemu_mutex_init(&ram_list.mutex);
2560 memory_map_init();
2561 io_mem_init();
2562 qemu_mutex_init(&map_client_list_lock);
2563}
2564
Fam Zhenge95205e2015-03-16 17:03:37 +08002565void cpu_unregister_map_client(QEMUBH *bh)
bellardd0ecd2a2006-04-23 17:14:48 +00002566{
Fam Zhenge95205e2015-03-16 17:03:37 +08002567 MapClient *client;
bellardd0ecd2a2006-04-23 17:14:48 +00002568
Fam Zhenge95205e2015-03-16 17:03:37 +08002569 qemu_mutex_lock(&map_client_list_lock);
2570 QLIST_FOREACH(client, &map_client_list, link) {
2571 if (client->bh == bh) {
2572 cpu_unregister_map_client_do(client);
2573 break;
2574 }
2575 }
2576 qemu_mutex_unlock(&map_client_list_lock);
bellardd0ecd2a2006-04-23 17:14:48 +00002577}
2578
2579static void cpu_notify_map_clients(void)
2580{
Fam Zheng38e047b2015-03-16 17:03:35 +08002581 qemu_mutex_lock(&map_client_list_lock);
Fam Zheng33b6c2e2015-03-16 17:03:36 +08002582 cpu_notify_map_clients_locked();
Fam Zheng38e047b2015-03-16 17:03:35 +08002583 qemu_mutex_unlock(&map_client_list_lock);
aliguori6d16c2f2009-01-22 16:59:11 +00002584}
2585
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002586bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write)
2587{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002588 MemoryRegion *mr;
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002589 hwaddr l, xlat;
2590
Paolo Bonzini41063e12015-03-18 14:21:43 +01002591 rcu_read_lock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002592 while (len > 0) {
2593 l = len;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002594 mr = address_space_translate(as, addr, &xlat, &l, is_write);
2595 if (!memory_access_is_direct(mr, is_write)) {
2596 l = memory_access_size(mr, l, addr);
2597 if (!memory_region_access_valid(mr, xlat, l, is_write)) {
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002598 return false;
2599 }
2600 }
2601
2602 len -= l;
2603 addr += l;
2604 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002605 rcu_read_unlock();
Paolo Bonzini51644ab2013-04-11 15:40:59 +02002606 return true;
2607}
2608
aliguori6d16c2f2009-01-22 16:59:11 +00002609/* Map a physical memory region into a host virtual address.
2610 * May map a subset of the requested range, given by and returned in *plen.
2611 * May return NULL if resources needed to perform the mapping are exhausted.
2612 * Use only for reads OR writes - not for read-modify-write operations.
aliguoriba223c22009-01-22 16:59:16 +00002613 * Use cpu_register_map_client() to know when retrying the map operation is
2614 * likely to succeed.
aliguori6d16c2f2009-01-22 16:59:11 +00002615 */
Avi Kivityac1970f2012-10-03 16:22:53 +02002616void *address_space_map(AddressSpace *as,
Avi Kivitya8170e52012-10-23 12:30:10 +02002617 hwaddr addr,
2618 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002619 bool is_write)
aliguori6d16c2f2009-01-22 16:59:11 +00002620{
Avi Kivitya8170e52012-10-23 12:30:10 +02002621 hwaddr len = *plen;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002622 hwaddr done = 0;
2623 hwaddr l, xlat, base;
2624 MemoryRegion *mr, *this_mr;
2625 ram_addr_t raddr;
aliguori6d16c2f2009-01-22 16:59:11 +00002626
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002627 if (len == 0) {
2628 return NULL;
2629 }
aliguori6d16c2f2009-01-22 16:59:11 +00002630
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002631 l = len;
Paolo Bonzini41063e12015-03-18 14:21:43 +01002632 rcu_read_lock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002633 mr = address_space_translate(as, addr, &xlat, &l, is_write);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002634
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002635 if (!memory_access_is_direct(mr, is_write)) {
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002636 if (atomic_xchg(&bounce.in_use, true)) {
Paolo Bonzini41063e12015-03-18 14:21:43 +01002637 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002638 return NULL;
aliguori6d16c2f2009-01-22 16:59:11 +00002639 }
Kevin Wolfe85d9db2013-07-22 14:30:23 +02002640 /* Avoid unbounded allocations */
2641 l = MIN(l, TARGET_PAGE_SIZE);
2642 bounce.buffer = qemu_memalign(TARGET_PAGE_SIZE, l);
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002643 bounce.addr = addr;
2644 bounce.len = l;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002645
2646 memory_region_ref(mr);
2647 bounce.mr = mr;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002648 if (!is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002649 address_space_read(as, addr, MEMTXATTRS_UNSPECIFIED,
2650 bounce.buffer, l);
Stefano Stabellini8ab934f2011-06-27 18:26:06 +01002651 }
aliguori6d16c2f2009-01-22 16:59:11 +00002652
Paolo Bonzini41063e12015-03-18 14:21:43 +01002653 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002654 *plen = l;
2655 return bounce.buffer;
2656 }
2657
2658 base = xlat;
2659 raddr = memory_region_get_ram_addr(mr);
2660
2661 for (;;) {
aliguori6d16c2f2009-01-22 16:59:11 +00002662 len -= l;
2663 addr += l;
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002664 done += l;
2665 if (len == 0) {
2666 break;
2667 }
2668
2669 l = len;
2670 this_mr = address_space_translate(as, addr, &xlat, &l, is_write);
2671 if (this_mr != mr || xlat != base + done) {
2672 break;
2673 }
aliguori6d16c2f2009-01-22 16:59:11 +00002674 }
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002675
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002676 memory_region_ref(mr);
Paolo Bonzini41063e12015-03-18 14:21:43 +01002677 rcu_read_unlock();
Paolo Bonzinie3127ae2013-06-28 17:29:27 +02002678 *plen = done;
2679 return qemu_ram_ptr_length(raddr + base, plen);
aliguori6d16c2f2009-01-22 16:59:11 +00002680}
2681
Avi Kivityac1970f2012-10-03 16:22:53 +02002682/* Unmaps a memory region previously mapped by address_space_map().
aliguori6d16c2f2009-01-22 16:59:11 +00002683 * Will also mark the memory as dirty if is_write == 1. access_len gives
2684 * the amount of memory that was actually read or written by the caller.
2685 */
Avi Kivitya8170e52012-10-23 12:30:10 +02002686void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
2687 int is_write, hwaddr access_len)
aliguori6d16c2f2009-01-22 16:59:11 +00002688{
2689 if (buffer != bounce.buffer) {
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002690 MemoryRegion *mr;
2691 ram_addr_t addr1;
2692
2693 mr = qemu_ram_addr_from_host(buffer, &addr1);
2694 assert(mr != NULL);
aliguori6d16c2f2009-01-22 16:59:11 +00002695 if (is_write) {
Paolo Bonzini68868672014-07-21 16:45:18 +02002696 invalidate_and_set_dirty(addr1, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002697 }
Jan Kiszka868bb332011-06-21 22:59:09 +02002698 if (xen_enabled()) {
Jan Kiszkae41d7c62011-06-21 22:59:08 +02002699 xen_invalidate_map_cache_entry(buffer);
Anthony PERARD050a0dd2010-09-16 13:57:49 +01002700 }
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002701 memory_region_unref(mr);
aliguori6d16c2f2009-01-22 16:59:11 +00002702 return;
2703 }
2704 if (is_write) {
Peter Maydell5c9eb022015-04-26 16:49:24 +01002705 address_space_write(as, bounce.addr, MEMTXATTRS_UNSPECIFIED,
2706 bounce.buffer, access_len);
aliguori6d16c2f2009-01-22 16:59:11 +00002707 }
Herve Poussineauf8a83242010-01-24 21:23:56 +00002708 qemu_vfree(bounce.buffer);
aliguori6d16c2f2009-01-22 16:59:11 +00002709 bounce.buffer = NULL;
Paolo Bonzinid3e71552013-06-28 17:33:29 +02002710 memory_region_unref(bounce.mr);
Fam Zhengc2cba0f2015-03-16 17:03:33 +08002711 atomic_mb_set(&bounce.in_use, false);
aliguoriba223c22009-01-22 16:59:16 +00002712 cpu_notify_map_clients();
aliguori6d16c2f2009-01-22 16:59:11 +00002713}
bellardd0ecd2a2006-04-23 17:14:48 +00002714
Avi Kivitya8170e52012-10-23 12:30:10 +02002715void *cpu_physical_memory_map(hwaddr addr,
2716 hwaddr *plen,
Avi Kivityac1970f2012-10-03 16:22:53 +02002717 int is_write)
2718{
2719 return address_space_map(&address_space_memory, addr, plen, is_write);
2720}
2721
Avi Kivitya8170e52012-10-23 12:30:10 +02002722void cpu_physical_memory_unmap(void *buffer, hwaddr len,
2723 int is_write, hwaddr access_len)
Avi Kivityac1970f2012-10-03 16:22:53 +02002724{
2725 return address_space_unmap(&address_space_memory, buffer, len, is_write, access_len);
2726}
2727
bellard8df1cd02005-01-28 22:37:22 +00002728/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002729static inline uint32_t address_space_ldl_internal(AddressSpace *as, hwaddr addr,
2730 MemTxAttrs attrs,
2731 MemTxResult *result,
2732 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00002733{
bellard8df1cd02005-01-28 22:37:22 +00002734 uint8_t *ptr;
Paolo Bonzini791af8c2013-05-24 16:10:39 +02002735 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002736 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002737 hwaddr l = 4;
2738 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002739 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00002740
Paolo Bonzini41063e12015-03-18 14:21:43 +01002741 rcu_read_lock();
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002742 mr = address_space_translate(as, addr, &addr1, &l, false);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002743 if (l < 4 || !memory_access_is_direct(mr, false)) {
bellard8df1cd02005-01-28 22:37:22 +00002744 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002745 r = memory_region_dispatch_read(mr, addr1, &val, 4, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002746#if defined(TARGET_WORDS_BIGENDIAN)
2747 if (endian == DEVICE_LITTLE_ENDIAN) {
2748 val = bswap32(val);
2749 }
2750#else
2751 if (endian == DEVICE_BIG_ENDIAN) {
2752 val = bswap32(val);
2753 }
2754#endif
bellard8df1cd02005-01-28 22:37:22 +00002755 } else {
2756 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002757 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002758 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002759 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002760 switch (endian) {
2761 case DEVICE_LITTLE_ENDIAN:
2762 val = ldl_le_p(ptr);
2763 break;
2764 case DEVICE_BIG_ENDIAN:
2765 val = ldl_be_p(ptr);
2766 break;
2767 default:
2768 val = ldl_p(ptr);
2769 break;
2770 }
Peter Maydell50013112015-04-26 16:49:24 +01002771 r = MEMTX_OK;
2772 }
2773 if (result) {
2774 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00002775 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002776 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00002777 return val;
2778}
2779
Peter Maydell50013112015-04-26 16:49:24 +01002780uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
2781 MemTxAttrs attrs, MemTxResult *result)
2782{
2783 return address_space_ldl_internal(as, addr, attrs, result,
2784 DEVICE_NATIVE_ENDIAN);
2785}
2786
2787uint32_t address_space_ldl_le(AddressSpace *as, hwaddr addr,
2788 MemTxAttrs attrs, MemTxResult *result)
2789{
2790 return address_space_ldl_internal(as, addr, attrs, result,
2791 DEVICE_LITTLE_ENDIAN);
2792}
2793
2794uint32_t address_space_ldl_be(AddressSpace *as, hwaddr addr,
2795 MemTxAttrs attrs, MemTxResult *result)
2796{
2797 return address_space_ldl_internal(as, addr, attrs, result,
2798 DEVICE_BIG_ENDIAN);
2799}
2800
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002801uint32_t ldl_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002802{
Peter Maydell50013112015-04-26 16:49:24 +01002803 return address_space_ldl(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002804}
2805
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002806uint32_t ldl_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002807{
Peter Maydell50013112015-04-26 16:49:24 +01002808 return address_space_ldl_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002809}
2810
Edgar E. Iglesiasfdfba1a2013-11-15 14:46:38 +01002811uint32_t ldl_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002812{
Peter Maydell50013112015-04-26 16:49:24 +01002813 return address_space_ldl_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002814}
2815
bellard84b7b8e2005-11-28 21:19:04 +00002816/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002817static inline uint64_t address_space_ldq_internal(AddressSpace *as, hwaddr addr,
2818 MemTxAttrs attrs,
2819 MemTxResult *result,
2820 enum device_endian endian)
bellard84b7b8e2005-11-28 21:19:04 +00002821{
bellard84b7b8e2005-11-28 21:19:04 +00002822 uint8_t *ptr;
2823 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002824 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002825 hwaddr l = 8;
2826 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002827 MemTxResult r;
bellard84b7b8e2005-11-28 21:19:04 +00002828
Paolo Bonzini41063e12015-03-18 14:21:43 +01002829 rcu_read_lock();
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002830 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002831 false);
2832 if (l < 8 || !memory_access_is_direct(mr, false)) {
bellard84b7b8e2005-11-28 21:19:04 +00002833 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002834 r = memory_region_dispatch_read(mr, addr1, &val, 8, attrs);
Paolo Bonzini968a5622013-05-24 17:58:37 +02002835#if defined(TARGET_WORDS_BIGENDIAN)
2836 if (endian == DEVICE_LITTLE_ENDIAN) {
2837 val = bswap64(val);
2838 }
2839#else
2840 if (endian == DEVICE_BIG_ENDIAN) {
2841 val = bswap64(val);
2842 }
2843#endif
bellard84b7b8e2005-11-28 21:19:04 +00002844 } else {
2845 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002846 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002847 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002848 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002849 switch (endian) {
2850 case DEVICE_LITTLE_ENDIAN:
2851 val = ldq_le_p(ptr);
2852 break;
2853 case DEVICE_BIG_ENDIAN:
2854 val = ldq_be_p(ptr);
2855 break;
2856 default:
2857 val = ldq_p(ptr);
2858 break;
2859 }
Peter Maydell50013112015-04-26 16:49:24 +01002860 r = MEMTX_OK;
2861 }
2862 if (result) {
2863 *result = r;
bellard84b7b8e2005-11-28 21:19:04 +00002864 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002865 rcu_read_unlock();
bellard84b7b8e2005-11-28 21:19:04 +00002866 return val;
2867}
2868
Peter Maydell50013112015-04-26 16:49:24 +01002869uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
2870 MemTxAttrs attrs, MemTxResult *result)
2871{
2872 return address_space_ldq_internal(as, addr, attrs, result,
2873 DEVICE_NATIVE_ENDIAN);
2874}
2875
2876uint64_t address_space_ldq_le(AddressSpace *as, hwaddr addr,
2877 MemTxAttrs attrs, MemTxResult *result)
2878{
2879 return address_space_ldq_internal(as, addr, attrs, result,
2880 DEVICE_LITTLE_ENDIAN);
2881}
2882
2883uint64_t address_space_ldq_be(AddressSpace *as, hwaddr addr,
2884 MemTxAttrs attrs, MemTxResult *result)
2885{
2886 return address_space_ldq_internal(as, addr, attrs, result,
2887 DEVICE_BIG_ENDIAN);
2888}
2889
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002890uint64_t ldq_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002891{
Peter Maydell50013112015-04-26 16:49:24 +01002892 return address_space_ldq(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002893}
2894
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002895uint64_t ldq_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002896{
Peter Maydell50013112015-04-26 16:49:24 +01002897 return address_space_ldq_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002898}
2899
Edgar E. Iglesias2c174492013-12-17 14:05:40 +10002900uint64_t ldq_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002901{
Peter Maydell50013112015-04-26 16:49:24 +01002902 return address_space_ldq_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002903}
2904
bellardaab33092005-10-30 20:48:42 +00002905/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01002906uint32_t address_space_ldub(AddressSpace *as, hwaddr addr,
2907 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00002908{
2909 uint8_t val;
Peter Maydell50013112015-04-26 16:49:24 +01002910 MemTxResult r;
2911
2912 r = address_space_rw(as, addr, attrs, &val, 1, 0);
2913 if (result) {
2914 *result = r;
2915 }
bellardaab33092005-10-30 20:48:42 +00002916 return val;
2917}
2918
Peter Maydell50013112015-04-26 16:49:24 +01002919uint32_t ldub_phys(AddressSpace *as, hwaddr addr)
2920{
2921 return address_space_ldub(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
2922}
2923
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002924/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01002925static inline uint32_t address_space_lduw_internal(AddressSpace *as,
2926 hwaddr addr,
2927 MemTxAttrs attrs,
2928 MemTxResult *result,
2929 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00002930{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002931 uint8_t *ptr;
2932 uint64_t val;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002933 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002934 hwaddr l = 2;
2935 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01002936 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002937
Paolo Bonzini41063e12015-03-18 14:21:43 +01002938 rcu_read_lock();
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002939 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002940 false);
2941 if (l < 2 || !memory_access_is_direct(mr, false)) {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002942 /* I/O case */
Peter Maydell50013112015-04-26 16:49:24 +01002943 r = memory_region_dispatch_read(mr, addr1, &val, 2, attrs);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002944#if defined(TARGET_WORDS_BIGENDIAN)
2945 if (endian == DEVICE_LITTLE_ENDIAN) {
2946 val = bswap16(val);
2947 }
2948#else
2949 if (endian == DEVICE_BIG_ENDIAN) {
2950 val = bswap16(val);
2951 }
2952#endif
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002953 } else {
2954 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02002955 ptr = qemu_get_ram_ptr((memory_region_get_ram_addr(mr)
Avi Kivity06ef3522012-02-13 16:11:22 +02002956 & TARGET_PAGE_MASK)
Paolo Bonzini149f54b2013-05-24 12:59:37 +02002957 + addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02002958 switch (endian) {
2959 case DEVICE_LITTLE_ENDIAN:
2960 val = lduw_le_p(ptr);
2961 break;
2962 case DEVICE_BIG_ENDIAN:
2963 val = lduw_be_p(ptr);
2964 break;
2965 default:
2966 val = lduw_p(ptr);
2967 break;
2968 }
Peter Maydell50013112015-04-26 16:49:24 +01002969 r = MEMTX_OK;
2970 }
2971 if (result) {
2972 *result = r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002973 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01002974 rcu_read_unlock();
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03002975 return val;
bellardaab33092005-10-30 20:48:42 +00002976}
2977
Peter Maydell50013112015-04-26 16:49:24 +01002978uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
2979 MemTxAttrs attrs, MemTxResult *result)
2980{
2981 return address_space_lduw_internal(as, addr, attrs, result,
2982 DEVICE_NATIVE_ENDIAN);
2983}
2984
2985uint32_t address_space_lduw_le(AddressSpace *as, hwaddr addr,
2986 MemTxAttrs attrs, MemTxResult *result)
2987{
2988 return address_space_lduw_internal(as, addr, attrs, result,
2989 DEVICE_LITTLE_ENDIAN);
2990}
2991
2992uint32_t address_space_lduw_be(AddressSpace *as, hwaddr addr,
2993 MemTxAttrs attrs, MemTxResult *result)
2994{
2995 return address_space_lduw_internal(as, addr, attrs, result,
2996 DEVICE_BIG_ENDIAN);
2997}
2998
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10002999uint32_t lduw_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003000{
Peter Maydell50013112015-04-26 16:49:24 +01003001 return address_space_lduw(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003002}
3003
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003004uint32_t lduw_le_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003005{
Peter Maydell50013112015-04-26 16:49:24 +01003006 return address_space_lduw_le(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003007}
3008
Edgar E. Iglesias41701aa2013-12-17 14:33:56 +10003009uint32_t lduw_be_phys(AddressSpace *as, hwaddr addr)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003010{
Peter Maydell50013112015-04-26 16:49:24 +01003011 return address_space_lduw_be(as, addr, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003012}
3013
bellard8df1cd02005-01-28 22:37:22 +00003014/* warning: addr must be aligned. The ram page is not masked as dirty
3015 and the code inside is not invalidated. It is useful if the dirty
3016 bits are used to track modified PTEs */
Peter Maydell50013112015-04-26 16:49:24 +01003017void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
3018 MemTxAttrs attrs, MemTxResult *result)
bellard8df1cd02005-01-28 22:37:22 +00003019{
bellard8df1cd02005-01-28 22:37:22 +00003020 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003021 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003022 hwaddr l = 4;
3023 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003024 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00003025
Paolo Bonzini41063e12015-03-18 14:21:43 +01003026 rcu_read_lock();
Edgar E. Iglesias2198a122013-11-28 10:13:41 +01003027 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003028 true);
3029 if (l < 4 || !memory_access_is_direct(mr, true)) {
Peter Maydell50013112015-04-26 16:49:24 +01003030 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003031 } else {
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003032 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003033 ptr = qemu_get_ram_ptr(addr1);
bellard8df1cd02005-01-28 22:37:22 +00003034 stl_p(ptr, val);
aliguori74576192008-10-06 14:02:03 +00003035
3036 if (unlikely(in_migration)) {
Juan Quintelaa2cd8c82013-10-10 11:20:22 +02003037 if (cpu_physical_memory_is_clean(addr1)) {
aliguori74576192008-10-06 14:02:03 +00003038 /* invalidate code */
3039 tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
3040 /* set dirty bit */
Paolo Bonzini68868672014-07-21 16:45:18 +02003041 cpu_physical_memory_set_dirty_range_nocode(addr1, 4);
aliguori74576192008-10-06 14:02:03 +00003042 }
3043 }
Peter Maydell50013112015-04-26 16:49:24 +01003044 r = MEMTX_OK;
3045 }
3046 if (result) {
3047 *result = r;
bellard8df1cd02005-01-28 22:37:22 +00003048 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003049 rcu_read_unlock();
bellard8df1cd02005-01-28 22:37:22 +00003050}
3051
Peter Maydell50013112015-04-26 16:49:24 +01003052void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
3053{
3054 address_space_stl_notdirty(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
3055}
3056
bellard8df1cd02005-01-28 22:37:22 +00003057/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003058static inline void address_space_stl_internal(AddressSpace *as,
3059 hwaddr addr, uint32_t val,
3060 MemTxAttrs attrs,
3061 MemTxResult *result,
3062 enum device_endian endian)
bellard8df1cd02005-01-28 22:37:22 +00003063{
bellard8df1cd02005-01-28 22:37:22 +00003064 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003065 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003066 hwaddr l = 4;
3067 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003068 MemTxResult r;
bellard8df1cd02005-01-28 22:37:22 +00003069
Paolo Bonzini41063e12015-03-18 14:21:43 +01003070 rcu_read_lock();
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003071 mr = address_space_translate(as, addr, &addr1, &l,
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003072 true);
3073 if (l < 4 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003074#if defined(TARGET_WORDS_BIGENDIAN)
3075 if (endian == DEVICE_LITTLE_ENDIAN) {
3076 val = bswap32(val);
3077 }
3078#else
3079 if (endian == DEVICE_BIG_ENDIAN) {
3080 val = bswap32(val);
3081 }
3082#endif
Peter Maydell50013112015-04-26 16:49:24 +01003083 r = memory_region_dispatch_write(mr, addr1, val, 4, attrs);
bellard8df1cd02005-01-28 22:37:22 +00003084 } else {
bellard8df1cd02005-01-28 22:37:22 +00003085 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003086 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
pbrook5579c7f2009-04-11 14:47:08 +00003087 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003088 switch (endian) {
3089 case DEVICE_LITTLE_ENDIAN:
3090 stl_le_p(ptr, val);
3091 break;
3092 case DEVICE_BIG_ENDIAN:
3093 stl_be_p(ptr, val);
3094 break;
3095 default:
3096 stl_p(ptr, val);
3097 break;
3098 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003099 invalidate_and_set_dirty(addr1, 4);
Peter Maydell50013112015-04-26 16:49:24 +01003100 r = MEMTX_OK;
bellard8df1cd02005-01-28 22:37:22 +00003101 }
Peter Maydell50013112015-04-26 16:49:24 +01003102 if (result) {
3103 *result = r;
3104 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003105 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003106}
3107
3108void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
3109 MemTxAttrs attrs, MemTxResult *result)
3110{
3111 address_space_stl_internal(as, addr, val, attrs, result,
3112 DEVICE_NATIVE_ENDIAN);
3113}
3114
3115void address_space_stl_le(AddressSpace *as, hwaddr addr, uint32_t val,
3116 MemTxAttrs attrs, MemTxResult *result)
3117{
3118 address_space_stl_internal(as, addr, val, attrs, result,
3119 DEVICE_LITTLE_ENDIAN);
3120}
3121
3122void address_space_stl_be(AddressSpace *as, hwaddr addr, uint32_t val,
3123 MemTxAttrs attrs, MemTxResult *result)
3124{
3125 address_space_stl_internal(as, addr, val, attrs, result,
3126 DEVICE_BIG_ENDIAN);
bellard8df1cd02005-01-28 22:37:22 +00003127}
3128
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003129void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003130{
Peter Maydell50013112015-04-26 16:49:24 +01003131 address_space_stl(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003132}
3133
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003134void stl_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003135{
Peter Maydell50013112015-04-26 16:49:24 +01003136 address_space_stl_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003137}
3138
Edgar E. Iglesiasab1da852013-12-17 15:07:29 +10003139void stl_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003140{
Peter Maydell50013112015-04-26 16:49:24 +01003141 address_space_stl_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003142}
3143
bellardaab33092005-10-30 20:48:42 +00003144/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003145void address_space_stb(AddressSpace *as, hwaddr addr, uint32_t val,
3146 MemTxAttrs attrs, MemTxResult *result)
bellardaab33092005-10-30 20:48:42 +00003147{
3148 uint8_t v = val;
Peter Maydell50013112015-04-26 16:49:24 +01003149 MemTxResult r;
3150
3151 r = address_space_rw(as, addr, attrs, &v, 1, 1);
3152 if (result) {
3153 *result = r;
3154 }
3155}
3156
3157void stb_phys(AddressSpace *as, hwaddr addr, uint32_t val)
3158{
3159 address_space_stb(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003160}
3161
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003162/* warning: addr must be aligned */
Peter Maydell50013112015-04-26 16:49:24 +01003163static inline void address_space_stw_internal(AddressSpace *as,
3164 hwaddr addr, uint32_t val,
3165 MemTxAttrs attrs,
3166 MemTxResult *result,
3167 enum device_endian endian)
bellardaab33092005-10-30 20:48:42 +00003168{
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003169 uint8_t *ptr;
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003170 MemoryRegion *mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003171 hwaddr l = 2;
3172 hwaddr addr1;
Peter Maydell50013112015-04-26 16:49:24 +01003173 MemTxResult r;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003174
Paolo Bonzini41063e12015-03-18 14:21:43 +01003175 rcu_read_lock();
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003176 mr = address_space_translate(as, addr, &addr1, &l, true);
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003177 if (l < 2 || !memory_access_is_direct(mr, true)) {
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003178#if defined(TARGET_WORDS_BIGENDIAN)
3179 if (endian == DEVICE_LITTLE_ENDIAN) {
3180 val = bswap16(val);
3181 }
3182#else
3183 if (endian == DEVICE_BIG_ENDIAN) {
3184 val = bswap16(val);
3185 }
3186#endif
Peter Maydell50013112015-04-26 16:49:24 +01003187 r = memory_region_dispatch_write(mr, addr1, val, 2, attrs);
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003188 } else {
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003189 /* RAM case */
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003190 addr1 += memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003191 ptr = qemu_get_ram_ptr(addr1);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003192 switch (endian) {
3193 case DEVICE_LITTLE_ENDIAN:
3194 stw_le_p(ptr, val);
3195 break;
3196 case DEVICE_BIG_ENDIAN:
3197 stw_be_p(ptr, val);
3198 break;
3199 default:
3200 stw_p(ptr, val);
3201 break;
3202 }
Anthony PERARD51d7a9e2012-10-03 13:49:05 +00003203 invalidate_and_set_dirty(addr1, 2);
Peter Maydell50013112015-04-26 16:49:24 +01003204 r = MEMTX_OK;
Michael S. Tsirkin733f0b02010-04-06 14:18:19 +03003205 }
Peter Maydell50013112015-04-26 16:49:24 +01003206 if (result) {
3207 *result = r;
3208 }
Paolo Bonzini41063e12015-03-18 14:21:43 +01003209 rcu_read_unlock();
Peter Maydell50013112015-04-26 16:49:24 +01003210}
3211
3212void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
3213 MemTxAttrs attrs, MemTxResult *result)
3214{
3215 address_space_stw_internal(as, addr, val, attrs, result,
3216 DEVICE_NATIVE_ENDIAN);
3217}
3218
3219void address_space_stw_le(AddressSpace *as, hwaddr addr, uint32_t val,
3220 MemTxAttrs attrs, MemTxResult *result)
3221{
3222 address_space_stw_internal(as, addr, val, attrs, result,
3223 DEVICE_LITTLE_ENDIAN);
3224}
3225
3226void address_space_stw_be(AddressSpace *as, hwaddr addr, uint32_t val,
3227 MemTxAttrs attrs, MemTxResult *result)
3228{
3229 address_space_stw_internal(as, addr, val, attrs, result,
3230 DEVICE_BIG_ENDIAN);
bellardaab33092005-10-30 20:48:42 +00003231}
3232
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003233void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003234{
Peter Maydell50013112015-04-26 16:49:24 +01003235 address_space_stw(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003236}
3237
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003238void stw_le_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003239{
Peter Maydell50013112015-04-26 16:49:24 +01003240 address_space_stw_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003241}
3242
Edgar E. Iglesias5ce59442013-12-17 15:22:06 +10003243void stw_be_phys(AddressSpace *as, hwaddr addr, uint32_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003244{
Peter Maydell50013112015-04-26 16:49:24 +01003245 address_space_stw_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003246}
3247
bellardaab33092005-10-30 20:48:42 +00003248/* XXX: optimize */
Peter Maydell50013112015-04-26 16:49:24 +01003249void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
3250 MemTxAttrs attrs, MemTxResult *result)
3251{
3252 MemTxResult r;
3253 val = tswap64(val);
3254 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3255 if (result) {
3256 *result = r;
3257 }
3258}
3259
3260void address_space_stq_le(AddressSpace *as, hwaddr addr, uint64_t val,
3261 MemTxAttrs attrs, MemTxResult *result)
3262{
3263 MemTxResult r;
3264 val = cpu_to_le64(val);
3265 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3266 if (result) {
3267 *result = r;
3268 }
3269}
3270void address_space_stq_be(AddressSpace *as, hwaddr addr, uint64_t val,
3271 MemTxAttrs attrs, MemTxResult *result)
3272{
3273 MemTxResult r;
3274 val = cpu_to_be64(val);
3275 r = address_space_rw(as, addr, attrs, (void *) &val, 8, 1);
3276 if (result) {
3277 *result = r;
3278 }
3279}
3280
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003281void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val)
bellardaab33092005-10-30 20:48:42 +00003282{
Peter Maydell50013112015-04-26 16:49:24 +01003283 address_space_stq(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
bellardaab33092005-10-30 20:48:42 +00003284}
3285
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003286void stq_le_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003287{
Peter Maydell50013112015-04-26 16:49:24 +01003288 address_space_stq_le(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003289}
3290
Edgar E. Iglesiasf6066042013-11-28 00:11:44 +01003291void stq_be_phys(AddressSpace *as, hwaddr addr, uint64_t val)
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003292{
Peter Maydell50013112015-04-26 16:49:24 +01003293 address_space_stq_be(as, addr, val, MEMTXATTRS_UNSPECIFIED, NULL);
Alexander Graf1e78bcc2011-07-06 09:09:23 +02003294}
3295
aliguori5e2972f2009-03-28 17:51:36 +00003296/* virtual memory access for debug (includes writing to ROM) */
Andreas Färberf17ec442013-06-29 19:40:58 +02003297int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
bellardb448f2f2004-02-25 23:24:04 +00003298 uint8_t *buf, int len, int is_write)
bellard13eb76e2004-01-24 15:23:36 +00003299{
3300 int l;
Avi Kivitya8170e52012-10-23 12:30:10 +02003301 hwaddr phys_addr;
j_mayer9b3c35e2007-04-07 11:21:28 +00003302 target_ulong page;
bellard13eb76e2004-01-24 15:23:36 +00003303
3304 while (len > 0) {
3305 page = addr & TARGET_PAGE_MASK;
Andreas Färberf17ec442013-06-29 19:40:58 +02003306 phys_addr = cpu_get_phys_page_debug(cpu, page);
bellard13eb76e2004-01-24 15:23:36 +00003307 /* if no physical page mapped, return an error */
3308 if (phys_addr == -1)
3309 return -1;
3310 l = (page + TARGET_PAGE_SIZE) - addr;
3311 if (l > len)
3312 l = len;
aliguori5e2972f2009-03-28 17:51:36 +00003313 phys_addr += (addr & ~TARGET_PAGE_MASK);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003314 if (is_write) {
3315 cpu_physical_memory_write_rom(cpu->as, phys_addr, buf, l);
3316 } else {
Peter Maydell5c9eb022015-04-26 16:49:24 +01003317 address_space_rw(cpu->as, phys_addr, MEMTXATTRS_UNSPECIFIED,
3318 buf, l, 0);
Edgar E. Iglesias2e388472013-12-13 16:31:02 +10003319 }
bellard13eb76e2004-01-24 15:23:36 +00003320 len -= l;
3321 buf += l;
3322 addr += l;
3323 }
3324 return 0;
3325}
Paul Brooka68fe892010-03-01 00:08:59 +00003326#endif
bellard13eb76e2004-01-24 15:23:36 +00003327
Blue Swirl8e4a4242013-01-06 18:30:17 +00003328/*
3329 * A helper function for the _utterly broken_ virtio device model to find out if
3330 * it's running on a big endian machine. Don't do this at home kids!
3331 */
Greg Kurz98ed8ec2014-06-24 19:26:29 +02003332bool target_words_bigendian(void);
3333bool target_words_bigendian(void)
Blue Swirl8e4a4242013-01-06 18:30:17 +00003334{
3335#if defined(TARGET_WORDS_BIGENDIAN)
3336 return true;
3337#else
3338 return false;
3339#endif
3340}
3341
Wen Congyang76f35532012-05-07 12:04:18 +08003342#ifndef CONFIG_USER_ONLY
Avi Kivitya8170e52012-10-23 12:30:10 +02003343bool cpu_physical_memory_is_io(hwaddr phys_addr)
Wen Congyang76f35532012-05-07 12:04:18 +08003344{
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003345 MemoryRegion*mr;
Paolo Bonzini149f54b2013-05-24 12:59:37 +02003346 hwaddr l = 1;
Paolo Bonzini41063e12015-03-18 14:21:43 +01003347 bool res;
Wen Congyang76f35532012-05-07 12:04:18 +08003348
Paolo Bonzini41063e12015-03-18 14:21:43 +01003349 rcu_read_lock();
Paolo Bonzini5c8a00c2013-05-29 12:42:00 +02003350 mr = address_space_translate(&address_space_memory,
3351 phys_addr, &phys_addr, &l, false);
Wen Congyang76f35532012-05-07 12:04:18 +08003352
Paolo Bonzini41063e12015-03-18 14:21:43 +01003353 res = !(memory_region_is_ram(mr) || memory_region_is_romd(mr));
3354 rcu_read_unlock();
3355 return res;
Wen Congyang76f35532012-05-07 12:04:18 +08003356}
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003357
3358void qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque)
3359{
3360 RAMBlock *block;
3361
Mike Day0dc3f442013-09-05 14:41:35 -04003362 rcu_read_lock();
3363 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Michael S. Tsirkin9b8424d2014-12-15 22:55:32 +02003364 func(block->host, block->offset, block->used_length, opaque);
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003365 }
Mike Day0dc3f442013-09-05 14:41:35 -04003366 rcu_read_unlock();
Michael R. Hinesbd2fa512013-06-25 21:35:34 -04003367}
Peter Maydellec3f8c92013-06-27 20:53:38 +01003368#endif