blob: eab91d7b5e48cb63de00b14185eb4bec4ad27381 [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "config.h"
21#include "cpu.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010022#include "exec/exec-all.h"
23#include "exec/memory.h"
24#include "exec/address-spaces.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000025
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/cputlb.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000027
Paolo Bonzini022c62c2012-12-17 18:19:49 +010028#include "exec/memory-internal.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000029
30//#define DEBUG_TLB
31//#define DEBUG_TLB_CHECK
32
33/* statistics */
34int tlb_flush_count;
35
Blue Swirl0cac1b62012-04-09 16:50:52 +000036/* NOTE:
37 * If flush_global is true (the usual case), flush all tlb entries.
38 * If flush_global is false, flush (at least) all tlb entries not
39 * marked global.
40 *
41 * Since QEMU doesn't currently implement a global/not-global flag
42 * for tlb entries, at the moment tlb_flush() will also flush all
43 * tlb entries in the flush_global == false case. This is OK because
44 * CPU architectures generally permit an implementation to drop
45 * entries from the TLB at any time, so flushing more entries than
46 * required is only an efficiency issue, not a correctness issue.
47 */
48void tlb_flush(CPUArchState *env, int flush_global)
49{
Andreas Färberd77953b2013-01-16 19:29:31 +010050 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl0cac1b62012-04-09 16:50:52 +000051
52#if defined(DEBUG_TLB)
53 printf("tlb_flush:\n");
54#endif
55 /* must reset current TB so that interrupts cannot modify the
56 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +010057 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +000058
Richard Henderson4fadb3b2013-12-07 10:44:51 +130059 memset(env->tlb_table, -1, sizeof(env->tlb_table));
Richard Hendersoneb2535f2013-12-07 10:44:52 +130060 memset(env->tb_jmp_cache, 0, sizeof(env->tb_jmp_cache));
Blue Swirl0cac1b62012-04-09 16:50:52 +000061
62 env->tlb_flush_addr = -1;
63 env->tlb_flush_mask = 0;
64 tlb_flush_count++;
65}
66
67static inline void tlb_flush_entry(CPUTLBEntry *tlb_entry, target_ulong addr)
68{
69 if (addr == (tlb_entry->addr_read &
70 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
71 addr == (tlb_entry->addr_write &
72 (TARGET_PAGE_MASK | TLB_INVALID_MASK)) ||
73 addr == (tlb_entry->addr_code &
74 (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +130075 memset(tlb_entry, -1, sizeof(*tlb_entry));
Blue Swirl0cac1b62012-04-09 16:50:52 +000076 }
77}
78
79void tlb_flush_page(CPUArchState *env, target_ulong addr)
80{
Andreas Färberd77953b2013-01-16 19:29:31 +010081 CPUState *cpu = ENV_GET_CPU(env);
Blue Swirl0cac1b62012-04-09 16:50:52 +000082 int i;
83 int mmu_idx;
84
85#if defined(DEBUG_TLB)
86 printf("tlb_flush_page: " TARGET_FMT_lx "\n", addr);
87#endif
88 /* Check if we need to flush due to large pages. */
89 if ((addr & env->tlb_flush_mask) == env->tlb_flush_addr) {
90#if defined(DEBUG_TLB)
91 printf("tlb_flush_page: forced full flush ("
92 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
93 env->tlb_flush_addr, env->tlb_flush_mask);
94#endif
95 tlb_flush(env, 1);
96 return;
97 }
98 /* must reset current TB so that interrupts cannot modify the
99 links while we are modifying them */
Andreas Färberd77953b2013-01-16 19:29:31 +0100100 cpu->current_tb = NULL;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000101
102 addr &= TARGET_PAGE_MASK;
103 i = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
104 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
105 tlb_flush_entry(&env->tlb_table[mmu_idx][i], addr);
106 }
107
108 tb_flush_jmp_cache(env, addr);
109}
110
111/* update the TLBs so that writes to code in the virtual page 'addr'
112 can be detected */
113void tlb_protect_code(ram_addr_t ram_addr)
114{
115 cpu_physical_memory_reset_dirty(ram_addr,
116 ram_addr + TARGET_PAGE_SIZE,
Juan Quintela52159192013-10-08 12:44:04 +0200117 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000118}
119
120/* update the TLB so that writes in physical page 'phys_addr' are no longer
121 tested for self modifying code */
122void tlb_unprotect_code_phys(CPUArchState *env, ram_addr_t ram_addr,
123 target_ulong vaddr)
124{
Juan Quintela52159192013-10-08 12:44:04 +0200125 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000126}
127
128static bool tlb_is_dirty_ram(CPUTLBEntry *tlbe)
129{
130 return (tlbe->addr_write & (TLB_INVALID_MASK|TLB_MMIO|TLB_NOTDIRTY)) == 0;
131}
132
133void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry, uintptr_t start,
134 uintptr_t length)
135{
136 uintptr_t addr;
137
138 if (tlb_is_dirty_ram(tlb_entry)) {
139 addr = (tlb_entry->addr_write & TARGET_PAGE_MASK) + tlb_entry->addend;
140 if ((addr - start) < length) {
141 tlb_entry->addr_write |= TLB_NOTDIRTY;
142 }
143 }
144}
145
Paolo Bonzini7443b432013-06-03 12:44:02 +0200146static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
147{
148 ram_addr_t ram_addr;
149
Paolo Bonzini1b5ec232013-05-06 14:36:15 +0200150 if (qemu_ram_addr_from_host(ptr, &ram_addr) == NULL) {
Paolo Bonzini7443b432013-06-03 12:44:02 +0200151 fprintf(stderr, "Bad ram pointer %p\n", ptr);
152 abort();
153 }
154 return ram_addr;
155}
156
Blue Swirl0cac1b62012-04-09 16:50:52 +0000157void cpu_tlb_reset_dirty_all(ram_addr_t start1, ram_addr_t length)
158{
Andreas Färber182735e2013-05-29 22:29:20 +0200159 CPUState *cpu;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000160 CPUArchState *env;
161
Andreas Färberbdc44642013-06-24 23:50:24 +0200162 CPU_FOREACH(cpu) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000163 int mmu_idx;
164
Andreas Färber182735e2013-05-29 22:29:20 +0200165 env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000166 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
167 unsigned int i;
168
169 for (i = 0; i < CPU_TLB_SIZE; i++) {
170 tlb_reset_dirty_range(&env->tlb_table[mmu_idx][i],
171 start1, length);
172 }
173 }
174 }
175}
176
177static inline void tlb_set_dirty1(CPUTLBEntry *tlb_entry, target_ulong vaddr)
178{
179 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
180 tlb_entry->addr_write = vaddr;
181 }
182}
183
184/* update the TLB corresponding to virtual page vaddr
185 so that it is no longer dirty */
186void tlb_set_dirty(CPUArchState *env, target_ulong vaddr)
187{
188 int i;
189 int mmu_idx;
190
191 vaddr &= TARGET_PAGE_MASK;
192 i = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
193 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
194 tlb_set_dirty1(&env->tlb_table[mmu_idx][i], vaddr);
195 }
196}
197
198/* Our TLB does not support large pages, so remember the area covered by
199 large pages and trigger a full TLB flush if these are invalidated. */
200static void tlb_add_large_page(CPUArchState *env, target_ulong vaddr,
201 target_ulong size)
202{
203 target_ulong mask = ~(size - 1);
204
205 if (env->tlb_flush_addr == (target_ulong)-1) {
206 env->tlb_flush_addr = vaddr & mask;
207 env->tlb_flush_mask = mask;
208 return;
209 }
210 /* Extend the existing region to include the new page.
211 This is a compromise between unnecessary flushes and the cost
212 of maintaining a full variable size TLB. */
213 mask &= env->tlb_flush_mask;
214 while (((env->tlb_flush_addr ^ vaddr) & mask) != 0) {
215 mask <<= 1;
216 }
217 env->tlb_flush_addr &= mask;
218 env->tlb_flush_mask = mask;
219}
220
221/* Add a new TLB entry. At most one entry for a given virtual address
222 is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
223 supplied size is only used by tlb_flush_page. */
224void tlb_set_page(CPUArchState *env, target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200225 hwaddr paddr, int prot,
Blue Swirl0cac1b62012-04-09 16:50:52 +0000226 int mmu_idx, target_ulong size)
227{
228 MemoryRegionSection *section;
229 unsigned int index;
230 target_ulong address;
231 target_ulong code_address;
232 uintptr_t addend;
233 CPUTLBEntry *te;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200234 hwaddr iotlb, xlat, sz;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000235
236 assert(size >= TARGET_PAGE_SIZE);
237 if (size != TARGET_PAGE_SIZE) {
238 tlb_add_large_page(env, vaddr, size);
239 }
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200240
241 sz = size;
Jan Kiszka90260c62013-05-26 21:46:51 +0200242 section = address_space_translate_for_iotlb(&address_space_memory, paddr,
243 &xlat, &sz);
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200244 assert(sz >= TARGET_PAGE_SIZE);
245
Blue Swirl0cac1b62012-04-09 16:50:52 +0000246#if defined(DEBUG_TLB)
247 printf("tlb_set_page: vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
Hervé Poussineau54b949d2013-06-05 20:16:42 +0800248 " prot=%x idx=%d\n",
249 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000250#endif
251
252 address = vaddr;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200253 if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) {
254 /* IO memory case */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000255 address |= TLB_MMIO;
Paolo Bonzini8f3e03c2013-05-24 16:45:30 +0200256 addend = 0;
257 } else {
258 /* TLB_MMIO for rom/romd handled below */
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200259 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000260 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000261
262 code_address = address;
Paolo Bonzini149f54b2013-05-24 12:59:37 +0200263 iotlb = memory_region_section_get_iotlb(env, section, vaddr, paddr, xlat,
264 prot, &address);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000265
266 index = (vaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
267 env->iotlb[mmu_idx][index] = iotlb - vaddr;
268 te = &env->tlb_table[mmu_idx][index];
269 te->addend = addend - vaddr;
270 if (prot & PAGE_READ) {
271 te->addr_read = address;
272 } else {
273 te->addr_read = -1;
274 }
275
276 if (prot & PAGE_EXEC) {
277 te->addr_code = code_address;
278 } else {
279 te->addr_code = -1;
280 }
281 if (prot & PAGE_WRITE) {
282 if ((memory_region_is_ram(section->mr) && section->readonly)
Blue Swirlcc5bea62012-04-14 14:56:48 +0000283 || memory_region_is_romd(section->mr)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000284 /* Write access calls the I/O callback. */
285 te->addr_write = address | TLB_MMIO;
286 } else if (memory_region_is_ram(section->mr)
Juan Quintelaa2cd8c82013-10-10 11:20:22 +0200287 && cpu_physical_memory_is_clean(section->mr->ram_addr
288 + xlat)) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000289 te->addr_write = address | TLB_NOTDIRTY;
290 } else {
291 te->addr_write = address;
292 }
293 } else {
294 te->addr_write = -1;
295 }
296}
297
298/* NOTE: this function can trigger an exception */
299/* NOTE2: the returned address is not exactly the physical address: it
Peter Maydell116aae32012-08-10 17:14:05 +0100300 * is actually a ram_addr_t (in system mode; the user mode emulation
301 * version of this function returns a guest virtual address).
302 */
Blue Swirl0cac1b62012-04-09 16:50:52 +0000303tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
304{
305 int mmu_idx, page_index, pd;
306 void *p;
307 MemoryRegion *mr;
308
309 page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
310 mmu_idx = cpu_mmu_index(env1);
311 if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
312 (addr & TARGET_PAGE_MASK))) {
Blue Swirl0cac1b62012-04-09 16:50:52 +0000313 cpu_ldub_code(env1, addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000314 }
315 pd = env1->iotlb[mmu_idx][page_index] & ~TARGET_PAGE_MASK;
316 mr = iotlb_to_region(pd);
317 if (memory_region_is_unassigned(mr)) {
Andreas Färberc658b942013-05-27 06:49:53 +0200318 CPUState *cpu = ENV_GET_CPU(env1);
319 CPUClass *cc = CPU_GET_CLASS(cpu);
320
321 if (cc->do_unassigned_access) {
322 cc->do_unassigned_access(cpu, addr, false, true, 0, 4);
323 } else {
324 cpu_abort(env1, "Trying to execute code outside RAM or ROM at 0x"
325 TARGET_FMT_lx "\n", addr);
326 }
Blue Swirl0cac1b62012-04-09 16:50:52 +0000327 }
328 p = (void *)((uintptr_t)addr + env1->tlb_table[mmu_idx][page_index].addend);
329 return qemu_ram_addr_from_host_nofail(p);
330}
331
332#define MMUSUFFIX _cmmu
333#undef GETPC
334#define GETPC() ((uintptr_t)0)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000335#define SOFTMMU_CODE_ACCESS
336
337#define SHIFT 0
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100338#include "exec/softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000339
340#define SHIFT 1
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100341#include "exec/softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000342
343#define SHIFT 2
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100344#include "exec/softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000345
346#define SHIFT 3
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100347#include "exec/softmmu_template.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +0000348
349#undef env