blob: a50608676c7b6209a41b7cc09409cad503d689f0 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000026
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/cputlb.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000028
Paolo Bonzini022c62c2012-12-17 18:19:49 +010029#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020030#include "exec/ram_addr.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +010031#include "tcg/tcg.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000032
33//#define DEBUG_TLB
34//#define DEBUG_TLB_CHECK
35
36/* statistics */
37int tlb_flush_count;
38
Blue Swirl0cac1b62012-04-09 16:50:52 +000039/* NOTE:
40 * If flush_global is true (the usual case), flush all tlb entries.
41 * If flush_global is false, flush (at least) all tlb entries not
42 * marked global.
43 *
44 * Since QEMU doesn't currently implement a global/not-global flag
45 * for tlb entries, at the moment tlb_flush() will also flush all
46 * tlb entries in the flush_global == false case. This is OK because
47 * CPU architectures generally permit an implementation to drop
48 * entries from the TLB at any time, so flushing more entries than
49 * required is only an efficiency issue, not a correctness issue.
50 */
Andreas Färber00c8cb02013-09-04 02:19:44 +020051void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +000052{
Andreas Färber00c8cb02013-09-04 02:19:44 +020053 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000054
55#if defined(DEBUG_TLB)
56 printf("tlb_flush:\n");
57#endif
58 /* must reset current TB so that interrupts cannot modify the
59 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +010060 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +000061
Richard Henderson4fadb3b2013-12-07 10:44:51 +130062 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Xin Tong88e89a52014-08-04 20:35:23 -050063 memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table));
Andreas Färber8cd70432013-08-26 06:03:38 +020064 memset(cpu->tb_jmp_cache, 0, sizeof(cpu->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000065
Xin Tong88e89a52014-08-04 20:35:23 -050066 env->vtlb_index = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +000067 env->tlb_flush_addr = -1;
68 env->tlb_flush_mask = 0;
69 tlb_flush_count++;
70}
71
72static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
73{
74 if (addr == (tlb_entry->addr_read &
75 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
76 addr == (tlb_entry->addr_write &
77 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
78 addr == (tlb_entry->addr_code &
79 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +130080 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +000081 }
82}
83
Andreas Färber31b030d2013-09-04 01:29:02 +020084void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +000085{
Andreas Färber31b030d2013-09-04 01:29:02 +020086 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +000087 int i;
88 int mmu_idx;
89
90#if defined(DEBUG_TLB)
91 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
92#endif
93 /* Check if we need to flush due to large pages. */
94 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
95#if defined(DEBUG_TLB)
96 printf("tlb_flush_page: forced full flush ("
97 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
98 env->tlb_flush_addr, env->tlb_flush_mask);
99#endif
Andreas Färber00c8cb02013-09-04 02:19:44 +0200100 tlb_flush(cpu, 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000101 return;
102 }
103 /* must reset current TB so that interrupts cannot modify the
104 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +0100105 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000106
107 addr &= TARGET_PAGE_MASK;
108 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
109 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
110 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
111 }
112
Xin Tong88e89a52014-08-04 20:35:23 -0500113 /* check whether there are entries that need to be flushed in the vtlb */
114 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
115 int k;
116 for (k = 0; k < CPU_VTLB_SIZE; k++) {
117 tlb_flush_entry(&env->tlb_v_table[mmu_idx][k], addr);
118 }
119 }
120
Andreas Färber611d4f92013-09-01 17:52:07 +0200121 tb_flush_jmp_cache(cpu, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000122}
123
124/* update the TLBs so that writes to code in the virtual page 'addr'
125 can be detected */
126void tlb_protect_code(ram_addr_t ram_addr)
127{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000128 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
129 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000130}
131
132/* update the TLB so that writes in physical page 'phys_addr' are no longer
133 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200134void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000135{
Juan Quintela52159192013-10-08 12:44:04 +0200136 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000137}
138
139static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
140{
141 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
142}
143
144void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
145 uintptr_t length)
146{
147 uintptr_t addr;
148
149 if (tlb_is_dirty_ram(tlb_entry)) {
150 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
151 if ((addr - start) < length) {
152 tlb_entry->addr_write |= TLB_NOTDIRTY;
153 }
154 }
155}
156
Paolo Bonzini7443b432013-06-03 12:44:02 +0200157static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
158{
159 ram_addr_t ram_addr;
160
Paolo Bonzini1b5ec232013-05-06 14:36:15 +0200161 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200162 fprintf(stderr, "Bad ram pointer %p\n", ptr);
163 abort();
164 }
165 return ram_addr;
166}
167
Blue Swirl0cac1b62012-04-09 16:50:52 +0000168void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
169{
Andreas Färber182735e2013-05-29 22:29:20 +0200170 CPUState *cpu;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000171 CPUArchState *env;
172
Andreas Färberbdc44642013-06-24 23:50:24 +0200173 CPU_FOREACH(cpu) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000174 int mmu_idx;
175
Andreas Färber182735e2013-05-29 22:29:20 +0200176 env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000177 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
178 unsigned int i;
179
180 for (i = 0; i < CPU_TLB_SIZE; i++) {
181 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
182 start1, length);
183 }
Xin Tong88e89a52014-08-04 20:35:23 -0500184
185 for (i = 0; i < CPU_VTLB_SIZE; i++) {
186 tlb_reset_dirty_range(&env->tlb_v_table[mmu_idx][i],
187 start1, length);
188 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000189 }
190 }
191}
192
193static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
194{
195 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
196 tlb_entry->addr_write = vaddr;
197 }
198}
199
200/* update the TLB corresponding to virtual page vaddr
201 so that it is no longer dirty */
202void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
203{
204 int i;
205 int mmu_idx;
206
207 vaddr &= TARGET_PAGE_MASK;
208 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
209 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
210 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
211 }
Xin Tong88e89a52014-08-04 20:35:23 -0500212
213 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
214 int k;
215 for (k = 0; k < CPU_VTLB_SIZE; k++) {
216 tlb_set_dirty1(&env->tlb_v_table[mmu_idx][k], vaddr);
217 }
218 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000219}
220
221/* Our TLB does not support large pages, so remember the area covered by
222 large pages and trigger a full TLB flush if these are invalidated. */
223static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
224 target_ulong size)
225{
226 target_ulong mask = ~(size - 1);
227
228 if (env->tlb_flush_addr == (target_ulong)-1) {
229 env->tlb_flush_addr = vaddr & mask;
230 env->tlb_flush_mask = mask;
231 return;
232 }
233 /* Extend the existing region to include the new page.
234 This is a compromise between unnecessary flushes and the cost
235 of maintaining a full variable size TLB. */
236 mask &= env->tlb_flush_mask;
237 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
238 mask <<= 1;
239 }
240 env->tlb_flush_addr &= mask;
241 env->tlb_flush_mask = mask;
242}
243
244/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100245 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
246 * supplied size is only used by tlb_flush_page.
247 *
248 * Called from TCG-generated code, which is under an RCU read-side
249 * critical section.
250 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100251void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
252 hwaddr paddr, MemTxAttrs attrs, int prot,
253 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000254{
Andreas Färber0c591eb2013-09-03 13:59:37 +0200255 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000256 MemoryRegionSection *section;
257 unsigned int index;
258 target_ulong address;
259 target_ulong code_address;
260 uintptr_t addend;
261 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200262 hwaddr iotlb, xlat, sz;
Xin Tong88e89a52014-08-04 20:35:23 -0500263 unsigned vidx = env->vtlb_index++ % CPU_VTLB_SIZE;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000264
265 assert(size >= TARGET_PAGE_SIZE);
266 if (size != TARGET_PAGE_SIZE) {
267 tlb_add_large_page(env, vaddr, size);
268 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200269
270 sz = size;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200271 section = address_space_translate_for_iotlb(cpu, paddr, &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200272 assert(sz >= TARGET_PAGE_SIZE);
273
Blue Swirl0cac1b62012-04-09 16:50:52 +0000274#if defined(DEBUG_TLB)
Antony Pavlov339aaf52014-12-13 19:48:18 +0300275 qemu_log_mask(CPU_LOG_MMU,
276 "tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
Hervé Poussineau54b949d2013-06-05 20:16:42 +0800277 " prot=%x idx=%d\n",
278 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000279#endif
280
281 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200282 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
283 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000284 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200285 addend = 0;
286 } else {
287 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200288 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000289 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000290
291 code_address = address;
Andreas Färberbb0e6272013-09-03 13:32:01 +0200292 iotlb = memory_region_section_get_iotlb(cpu, section, vaddr, paddr, xlat,
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200293 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000294
295 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000296 te = &env->tlb_table[mmu_idx][index];
Xin Tong88e89a52014-08-04 20:35:23 -0500297
298 /* do not discard the translation in te, evict it into a victim tlb */
299 env->tlb_v_table[mmu_idx][vidx] = *te;
300 env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index];
301
302 /* refill the tlb */
Peter Maydelle469b222015-04-26 16:49:23 +0100303 env->iotlb[mmu_idx][index].addr = iotlb - vaddr;
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100304 env->iotlb[mmu_idx][index].attrs = attrs;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000305 te->addend = addend - vaddr;
306 if (prot & PAGE_READ) {
307 te->addr_read = address;
308 } else {
309 te->addr_read = -1;
310 }
311
312 if (prot & PAGE_EXEC) {
313 te->addr_code = code_address;
314 } else {
315 te->addr_code = -1;
316 }
317 if (prot & PAGE_WRITE) {
318 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000319 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000320 /* Write access calls the I/O callback. */
321 te->addr_write = address | TLB_MMIO;
322 } else if (memory_region_is_ram(section->mr)
Juan Quintelaa2cd8c82013-10-10 11:20:22 +0200323 && cpu_physical_memory_is_clean(section->mr->ram_addr
324 + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000325 te->addr_write = address | TLB_NOTDIRTY;
326 } else {
327 te->addr_write = address;
328 }
329 } else {
330 te->addr_write = -1;
331 }
332}
333
Peter Maydellfadc1cb2015-04-26 16:49:24 +0100334/* Add a new TLB entry, but without specifying the memory
335 * transaction attributes to be used.
336 */
337void tlb_set_page(CPUState *cpu, target_ulong vaddr,
338 hwaddr paddr, int prot,
339 int mmu_idx, target_ulong size)
340{
341 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
342 prot, mmu_idx, size);
343}
344
Blue Swirl0cac1b62012-04-09 16:50:52 +0000345/* NOTE: this function can trigger an exception */
346/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100347 * is actually a ram_addr_t (in system mode; the user mode emulation
348 * version of this function returns a guest virtual address).
349 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000350tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
351{
352 int mmu_idx, page_index, pd;
353 void *p;
354 MemoryRegion *mr;
Edgar E. Iglesias09daed82013-12-17 13:06:51 +1000355 CPUState *cpu = ENV_GET_CPU(env1);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000356
357 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
358 mmu_idx = cpu_mmu_index(env1);
359 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
360 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000361 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000362 }
Peter Maydelle469b222015-04-26 16:49:23 +0100363 pd = env1->iotlb[mmu_idx][page_index].addr & ~TARGET_PAGE_MASK;
Paolo Bonzini9d82b5a2013-08-16 08:26:30 +0200364 mr = iotlb_to_region(cpu, pd);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000365 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200366 CPUClass *cc = CPU_GET_CLASS(cpu);
367
368 if (cc->do_unassigned_access) {
369 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
370 } else {
Andreas Färbera47dddd2013-09-03 17:38:47 +0200371 cpu_abort(cpu, "Trying to execute code outside RAM or ROM at 0x"
Andreas Färberc658b942013-05-27 06:49:53 +0200372 TARGET_FMT_lx "\n", addr);
373 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000374 }
375 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
376 return qemu_ram_addr_from_host_nofail(p);
377}
378
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100379#define MMUSUFFIX _mmu
380
381#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100382#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100383
384#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100385#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100386
387#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100388#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100389
390#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100391#include "softmmu_template.h"
Paolo Bonzini0f590e72014-03-28 17:55:24 +0100392#undef MMUSUFFIX
393
Blue Swirl0cac1b62012-04-09 16:50:52 +0000394#define MMUSUFFIX _cmmu
Stefan Weil7e4e8862014-04-28 19:20:00 +0200395#undef GETPC_ADJ
396#define GETPC_ADJ 0
397#undef GETRA
398#define GETRA() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000399#define SOFTMMU_CODE_ACCESS
400
401#define SHIFT 0
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100402#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000403
404#define SHIFT 1
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100405#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000406
407#define SHIFT 2
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100408#include "softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000409
410#define SHIFT 3
Paolo Bonzini58ed2702014-03-28 18:00:25 +0100411#include "softmmu_template.h"