Add unified memory mapping sources from upstream.

This patch adds the sources of the unified memory API implementation
from QEMU 2.0.0, but doesn't change the rest of the sources to use
them yet.

Change-Id: I89d286e873114246e4b9a4c661a70ee2a77abcbb
diff --git a/Makefile.target b/Makefile.target
index 5b5bf2d..583d7df 100644
--- a/Makefile.target
+++ b/Makefile.target
@@ -246,6 +246,10 @@
     common_LOCAL_SRC_FILES += os-posix.c util/oslib-posix.c
 endif
 
+# Unified memory API sources
+common_LOCAL_SRC_FILES += \
+    memory.c \
+    memory_mapping.c
 
 ## one for 32-bit
 $(call start-emulator-library, emulator-target-$(EMULATOR_TARGET_CPU))
diff --git a/include/exec/address-spaces.h b/include/exec/address-spaces.h
new file mode 100644
index 0000000..3d12cdd
--- /dev/null
+++ b/include/exec/address-spaces.h
@@ -0,0 +1,41 @@
+/*
+ * Internal memory management interfaces
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef EXEC_MEMORY_H
+#define EXEC_MEMORY_H
+
+/*
+ * Internal interfaces between memory.c/exec.c/vl.c.  Do not #include unless
+ * you're one of them.
+ */
+
+#include "exec/memory.h"
+
+#ifndef CONFIG_USER_ONLY
+
+/* Get the root memory region.  This interface should only be used temporarily
+ * until a proper bus interface is available.
+ */
+MemoryRegion *get_system_memory(void);
+
+/* Get the root I/O port region.  This interface should only be used
+ * temporarily until a proper bus interface is available.
+ */
+MemoryRegion *get_system_io(void);
+
+extern AddressSpace address_space_memory;
+extern AddressSpace address_space_io;
+
+#endif
+
+#endif
diff --git a/include/exec/cpu-common.h b/include/exec/cpu-common.h
index 5164153..042f11f 100644
--- a/include/exec/cpu-common.h
+++ b/include/exec/cpu-common.h
@@ -58,6 +58,8 @@
 typedef void CPUWriteMemoryFunc(void *opaque, hwaddr addr, uint32_t value);
 typedef uint32_t CPUReadMemoryFunc(void *opaque, hwaddr addr);
 
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+
 void cpu_register_physical_memory_log(hwaddr start_addr,
                                       ram_addr_t size,
                                       ram_addr_t phys_offset,
diff --git a/include/exec/memory-internal.h b/include/exec/memory-internal.h
new file mode 100644
index 0000000..25c43c0
--- /dev/null
+++ b/include/exec/memory-internal.h
@@ -0,0 +1,34 @@
+/*
+ * Declarations for obsolete exec.c functions
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or
+ * later.  See the COPYING file in the top-level directory.
+ *
+ */
+
+/*
+ * This header is for use by exec.c and memory.c ONLY.  Do not include it.
+ * The functions declared here will be removed soon.
+ */
+
+#ifndef MEMORY_INTERNAL_H
+#define MEMORY_INTERNAL_H
+
+#ifndef CONFIG_USER_ONLY
+typedef struct AddressSpaceDispatch AddressSpaceDispatch;
+
+void address_space_init_dispatch(AddressSpace *as);
+void address_space_destroy_dispatch(AddressSpace *as);
+
+extern const MemoryRegionOps unassigned_mem_ops;
+
+bool memory_region_access_valid(MemoryRegion *mr, hwaddr addr,
+                                unsigned size, bool is_write);
+
+#endif
+#endif
diff --git a/include/exec/memory.h b/include/exec/memory.h
new file mode 100644
index 0000000..df0ad15
--- /dev/null
+++ b/include/exec/memory.h
@@ -0,0 +1,1058 @@
+/*
+ * Physical memory management API
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_H
+#define MEMORY_H
+
+#ifndef CONFIG_USER_ONLY
+
+#define DIRTY_MEMORY_VGA       0
+#define DIRTY_MEMORY_CODE      1
+#define DIRTY_MEMORY_MIGRATION 2
+#define DIRTY_MEMORY_NUM       3        /* num of dirty bits */
+
+#include <stdint.h>
+#include <stdbool.h>
+#include "qemu-common.h"
+#include "cpu.h"
+#include "exec/cpu-common.h"
+#ifndef CONFIG_USER_ONLY
+#include "exec/hwaddr.h"
+#endif
+#include "qemu/queue.h"
+#include "qemu/int128.h"
+#include "qemu/notify.h"
+
+#define MAX_PHYS_ADDR_SPACE_BITS 62
+#define MAX_PHYS_ADDR            (((hwaddr)1 << MAX_PHYS_ADDR_SPACE_BITS) - 1)
+
+typedef struct MemoryRegionOps MemoryRegionOps;
+typedef struct MemoryRegionMmio MemoryRegionMmio;
+
+struct MemoryRegionMmio {
+    CPUReadMemoryFunc *read[3];
+    CPUWriteMemoryFunc *write[3];
+};
+
+typedef struct IOMMUTLBEntry IOMMUTLBEntry;
+
+/* See address_space_translate: bit 0 is read, bit 1 is write.  */
+typedef enum {
+    IOMMU_NONE = 0,
+    IOMMU_RO   = 1,
+    IOMMU_WO   = 2,
+    IOMMU_RW   = 3,
+} IOMMUAccessFlags;
+
+struct IOMMUTLBEntry {
+    AddressSpace    *target_as;
+    hwaddr           iova;
+    hwaddr           translated_addr;
+    hwaddr           addr_mask;  /* 0xfff = 4k translation */
+    IOMMUAccessFlags perm;
+};
+
+/*
+ * Memory region callbacks
+ */
+struct MemoryRegionOps {
+    /* Read from the memory region. @addr is relative to @mr; @size is
+     * in bytes. */
+    uint64_t (*read)(void *opaque,
+                     hwaddr addr,
+                     unsigned size);
+    /* Write to the memory region. @addr is relative to @mr; @size is
+     * in bytes. */
+    void (*write)(void *opaque,
+                  hwaddr addr,
+                  uint64_t data,
+                  unsigned size);
+
+    enum device_endian endianness;
+    /* Guest-visible constraints: */
+    struct {
+        /* If nonzero, specify bounds on access sizes beyond which a machine
+         * check is thrown.
+         */
+        unsigned min_access_size;
+        unsigned max_access_size;
+        /* If true, unaligned accesses are supported.  Otherwise unaligned
+         * accesses throw machine checks.
+         */
+         bool unaligned;
+        /*
+         * If present, and returns #false, the transaction is not accepted
+         * by the device (and results in machine dependent behaviour such
+         * as a machine check exception).
+         */
+        bool (*accepts)(void *opaque, hwaddr addr,
+                        unsigned size, bool is_write);
+    } valid;
+    /* Internal implementation constraints: */
+    struct {
+        /* If nonzero, specifies the minimum size implemented.  Smaller sizes
+         * will be rounded upwards and a partial result will be returned.
+         */
+        unsigned min_access_size;
+        /* If nonzero, specifies the maximum size implemented.  Larger sizes
+         * will be done as a series of accesses with smaller sizes.
+         */
+        unsigned max_access_size;
+        /* If true, unaligned accesses are supported.  Otherwise all accesses
+         * are converted to (possibly multiple) naturally aligned accesses.
+         */
+         bool unaligned;
+    } impl;
+
+    /* If .read and .write are not present, old_mmio may be used for
+     * backwards compatibility with old mmio registration
+     */
+    const MemoryRegionMmio old_mmio;
+};
+
+typedef struct MemoryRegionIOMMUOps MemoryRegionIOMMUOps;
+
+struct MemoryRegionIOMMUOps {
+    /* Return a TLB entry that contains a given address. */
+    IOMMUTLBEntry (*translate)(MemoryRegion *iommu, hwaddr addr);
+};
+
+typedef struct CoalescedMemoryRange CoalescedMemoryRange;
+typedef struct MemoryRegionIoeventfd MemoryRegionIoeventfd;
+
+struct MemoryRegion {
+    /* All fields are private - violators will be prosecuted */
+    const MemoryRegionOps *ops;
+    const MemoryRegionIOMMUOps *iommu_ops;
+    void *opaque;
+    struct Object *owner;
+    MemoryRegion *parent;
+    Int128 size;
+    hwaddr addr;
+    void (*destructor)(MemoryRegion *mr);
+    ram_addr_t ram_addr;
+    bool subpage;
+    bool terminates;
+    bool romd_mode;
+    bool ram;
+    bool readonly; /* For RAM regions */
+    bool enabled;
+    bool rom_device;
+    bool warning_printed; /* For reservations */
+    bool flush_coalesced_mmio;
+    MemoryRegion *alias;
+    hwaddr alias_offset;
+    int priority;
+    bool may_overlap;
+    QTAILQ_HEAD(subregions, MemoryRegion) subregions;
+    QTAILQ_ENTRY(MemoryRegion) subregions_link;
+    QTAILQ_HEAD(coalesced_ranges, CoalescedMemoryRange) coalesced;
+    const char *name;
+    uint8_t dirty_log_mask;
+    unsigned ioeventfd_nb;
+    MemoryRegionIoeventfd *ioeventfds;
+    NotifierList iommu_notify;
+};
+
+/**
+ * MemoryListener: callbacks structure for updates to the physical memory map
+ *
+ * Allows a component to adjust to changes in the guest-visible memory map.
+ * Use with memory_listener_register() and memory_listener_unregister().
+ */
+struct MemoryListener {
+    void (*begin)(MemoryListener *listener);
+    void (*commit)(MemoryListener *listener);
+    void (*region_add)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*region_del)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*region_nop)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_start)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_stop)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_sync)(MemoryListener *listener, MemoryRegionSection *section);
+    void (*log_global_start)(MemoryListener *listener);
+    void (*log_global_stop)(MemoryListener *listener);
+    void (*eventfd_add)(MemoryListener *listener, MemoryRegionSection *section,
+                        bool match_data, uint64_t data, EventNotifier *e);
+    void (*eventfd_del)(MemoryListener *listener, MemoryRegionSection *section,
+                        bool match_data, uint64_t data, EventNotifier *e);
+    void (*coalesced_mmio_add)(MemoryListener *listener, MemoryRegionSection *section,
+                               hwaddr addr, hwaddr len);
+    void (*coalesced_mmio_del)(MemoryListener *listener, MemoryRegionSection *section,
+                               hwaddr addr, hwaddr len);
+    /* Lower = earlier (during add), later (during del) */
+    unsigned priority;
+    AddressSpace *address_space_filter;
+    QTAILQ_ENTRY(MemoryListener) link;
+};
+
+/**
+ * AddressSpace: describes a mapping of addresses to #MemoryRegion objects
+ */
+struct AddressSpace {
+    /* All fields are private. */
+    char *name;
+    MemoryRegion *root;
+    struct FlatView *current_map;
+    int ioeventfd_nb;
+    struct MemoryRegionIoeventfd *ioeventfds;
+    struct AddressSpaceDispatch *dispatch;
+    struct AddressSpaceDispatch *next_dispatch;
+    MemoryListener dispatch_listener;
+
+    QTAILQ_ENTRY(AddressSpace) address_spaces_link;
+};
+
+/**
+ * MemoryRegionSection: describes a fragment of a #MemoryRegion
+ *
+ * @mr: the region, or %NULL if empty
+ * @address_space: the address space the region is mapped in
+ * @offset_within_region: the beginning of the section, relative to @mr's start
+ * @size: the size of the section; will not exceed @mr's boundaries
+ * @offset_within_address_space: the address of the first byte of the section
+ *     relative to the region's address space
+ * @readonly: writes to this section are ignored
+ */
+struct MemoryRegionSection {
+    MemoryRegion *mr;
+    AddressSpace *address_space;
+    hwaddr offset_within_region;
+    Int128 size;
+    hwaddr offset_within_address_space;
+    bool readonly;
+};
+
+/**
+ * memory_region_init: Initialize a memory region
+ *
+ * The region typically acts as a container for other memory regions.  Use
+ * memory_region_add_subregion() to add subregions.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region; any subregions beyond this size will be clipped
+ */
+void memory_region_init(MemoryRegion *mr,
+                        struct Object *owner,
+                        const char *name,
+                        uint64_t size);
+
+/**
+ * memory_region_ref: Add 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug.  MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function adds a reference to the owner.
+ *
+ * All MemoryRegions must have an owner if they can disappear, even if the
+ * device they belong to operates exclusively under the BQL.  This is because
+ * the region could be returned at any time by memory_region_find, and this
+ * is usually under guest control.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_ref(MemoryRegion *mr);
+
+/**
+ * memory_region_unref: Remove 1 to a memory region's reference count
+ *
+ * Whenever memory regions are accessed outside the BQL, they need to be
+ * preserved against hot-unplug.  MemoryRegions actually do not have their
+ * own reference count; they piggyback on a QOM object, their "owner".
+ * This function removes a reference to the owner and possibly destroys it.
+ *
+ * @mr: the #MemoryRegion
+ */
+void memory_region_unref(MemoryRegion *mr);
+
+/**
+ * memory_region_init_io: Initialize an I/O memory region.
+ *
+ * Accesses into the region will cause the callbacks in @ops to be called.
+ * if @size is nonzero, subregions will be clipped to @size.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: a structure containing read and write callbacks to be used when
+ *       I/O is performed on the region.
+ * @opaque: passed to to the read and write callbacks of the @ops structure.
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_io(MemoryRegion *mr,
+                           struct Object *owner,
+                           const MemoryRegionOps *ops,
+                           void *opaque,
+                           const char *name,
+                           uint64_t size);
+
+/**
+ * memory_region_init_ram:  Initialize RAM memory region.  Accesses into the
+ *                          region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_ram(MemoryRegion *mr,
+                            struct Object *owner,
+                            const char *name,
+                            uint64_t size);
+
+/**
+ * memory_region_init_ram_ptr:  Initialize RAM memory region from a
+ *                              user-provided pointer.  Accesses into the
+ *                              region will modify memory directly.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: the name of the region.
+ * @size: size of the region.
+ * @ptr: memory to be mapped; must contain at least @size bytes.
+ */
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+                                struct Object *owner,
+                                const char *name,
+                                uint64_t size,
+                                void *ptr);
+
+/**
+ * memory_region_init_alias: Initialize a memory region that aliases all or a
+ *                           part of another memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @orig: the region to be referenced; @mr will be equivalent to
+ *        @orig between @offset and @offset + @size - 1.
+ * @offset: start of the section in @orig to be referenced.
+ * @size: size of the region.
+ */
+void memory_region_init_alias(MemoryRegion *mr,
+                              struct Object *owner,
+                              const char *name,
+                              MemoryRegion *orig,
+                              hwaddr offset,
+                              uint64_t size);
+
+/**
+ * memory_region_init_rom_device:  Initialize a ROM memory region.  Writes are
+ *                                 handled via callbacks.
+ *
+ * @mr: the #MemoryRegion to be initialized.
+ * @owner: the object that tracks the region's reference count
+ * @ops: callbacks for write access handling.
+ * @name: the name of the region.
+ * @size: size of the region.
+ */
+void memory_region_init_rom_device(MemoryRegion *mr,
+                                   struct Object *owner,
+                                   const MemoryRegionOps *ops,
+                                   void *opaque,
+                                   const char *name,
+                                   uint64_t size);
+
+/**
+ * memory_region_init_reservation: Initialize a memory region that reserves
+ *                                 I/O space.
+ *
+ * A reservation region primariy serves debugging purposes.  It claims I/O
+ * space that is not supposed to be handled by QEMU itself.  Any access via
+ * the memory API will cause an abort().
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_reservation(MemoryRegion *mr,
+                                    struct Object *owner,
+                                    const char *name,
+                                    uint64_t size);
+
+/**
+ * memory_region_init_iommu: Initialize a memory region that translates
+ * addresses
+ *
+ * An IOMMU region translates addresses and forwards accesses to a target
+ * memory region.
+ *
+ * @mr: the #MemoryRegion to be initialized
+ * @owner: the object that tracks the region's reference count
+ * @ops: a function that translates addresses into the @target region
+ * @name: used for debugging; not visible to the user or ABI
+ * @size: size of the region.
+ */
+void memory_region_init_iommu(MemoryRegion *mr,
+                              struct Object *owner,
+                              const MemoryRegionIOMMUOps *ops,
+                              const char *name,
+                              uint64_t size);
+
+/**
+ * memory_region_destroy: Destroy a memory region and reclaim all resources.
+ *
+ * @mr: the region to be destroyed.  May not currently be a subregion
+ *      (see memory_region_add_subregion()) or referenced in an alias
+ *      (see memory_region_init_alias()).
+ */
+void memory_region_destroy(MemoryRegion *mr);
+
+/**
+ * memory_region_owner: get a memory region's owner.
+ *
+ * @mr: the memory region being queried.
+ */
+struct Object *memory_region_owner(MemoryRegion *mr);
+
+/**
+ * memory_region_size: get a memory region's size.
+ *
+ * @mr: the memory region being queried.
+ */
+uint64_t memory_region_size(MemoryRegion *mr);
+
+/**
+ * memory_region_is_ram: check whether a memory region is random access
+ *
+ * Returns %true is a memory region is random access.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_ram(MemoryRegion *mr);
+
+/**
+ * memory_region_is_romd: check whether a memory region is in ROMD mode
+ *
+ * Returns %true if a memory region is a ROM device and currently set to allow
+ * direct reads.
+ *
+ * @mr: the memory region being queried
+ */
+static inline bool memory_region_is_romd(MemoryRegion *mr)
+{
+    return mr->rom_device && mr->romd_mode;
+}
+
+/**
+ * memory_region_is_iommu: check whether a memory region is an iommu
+ *
+ * Returns %true is a memory region is an iommu.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_iommu(MemoryRegion *mr);
+
+/**
+ * memory_region_notify_iommu: notify a change in an IOMMU translation entry.
+ *
+ * @mr: the memory region that was changed
+ * @entry: the new entry in the IOMMU translation table.  The entry
+ *         replaces all old entries for the same virtual I/O address range.
+ *         Deleted entries have .@perm == 0.
+ */
+void memory_region_notify_iommu(MemoryRegion *mr,
+                                IOMMUTLBEntry entry);
+
+/**
+ * memory_region_register_iommu_notifier: register a notifier for changes to
+ * IOMMU translation entries.
+ *
+ * @mr: the memory region to observe
+ * @n: the notifier to be added; the notifier receives a pointer to an
+ *     #IOMMUTLBEntry as the opaque value; the pointer ceases to be
+ *     valid on exit from the notifier.
+ */
+void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n);
+
+/**
+ * memory_region_unregister_iommu_notifier: unregister a notifier for
+ * changes to IOMMU translation entries.
+ *
+ * @n: the notifier to be removed.
+ */
+void memory_region_unregister_iommu_notifier(Notifier *n);
+
+/**
+ * memory_region_name: get a memory region's name
+ *
+ * Returns the string that was used to initialize the memory region.
+ *
+ * @mr: the memory region being queried
+ */
+const char *memory_region_name(MemoryRegion *mr);
+
+/**
+ * memory_region_is_logging: return whether a memory region is logging writes
+ *
+ * Returns %true if the memory region is logging writes
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_logging(MemoryRegion *mr);
+
+/**
+ * memory_region_is_rom: check whether a memory region is ROM
+ *
+ * Returns %true is a memory region is read-only memory.
+ *
+ * @mr: the memory region being queried
+ */
+bool memory_region_is_rom(MemoryRegion *mr);
+
+/**
+ * memory_region_get_ram_ptr: Get a pointer into a RAM memory region.
+ *
+ * Returns a host pointer to a RAM memory region (created with
+ * memory_region_init_ram() or memory_region_init_ram_ptr()).  Use with
+ * care.
+ *
+ * @mr: the memory region being queried.
+ */
+void *memory_region_get_ram_ptr(MemoryRegion *mr);
+
+/**
+ * memory_region_set_log: Turn dirty logging on or off for a region.
+ *
+ * Turns dirty logging on or off for a specified client (display, migration).
+ * Only meaningful for RAM regions.
+ *
+ * @mr: the memory region being updated.
+ * @log: whether dirty logging is to be enabled or disabled.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client);
+
+/**
+ * memory_region_get_dirty: Check whether a range of bytes is dirty
+ *                          for a specified client.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client.  Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_dirty: Mark a range of bytes as dirty in a memory region.
+ *
+ * Marks a range of bytes as dirty, after it has been dirtied outside
+ * guest code.
+ *
+ * @mr: the memory region being dirtied.
+ * @addr: the address (relative to the start of the region) being dirtied.
+ * @size: size of the range being dirtied.
+ */
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size);
+
+/**
+ * memory_region_test_and_clear_dirty: Check whether a range of bytes is dirty
+ *                                     for a specified client. It clears them.
+ *
+ * Checks whether a range of bytes has been written to since the last
+ * call to memory_region_reset_dirty() with the same @client.  Dirty logging
+ * must be enabled.
+ *
+ * @mr: the memory region being queried.
+ * @addr: the address (relative to the start of the region) being queried.
+ * @size: the size of the range being queried.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
+                                        hwaddr size, unsigned client);
+/**
+ * memory_region_sync_dirty_bitmap: Synchronize a region's dirty bitmap with
+ *                                  any external TLBs (e.g. kvm)
+ *
+ * Flushes dirty information from accelerators such as kvm and vhost-net
+ * and makes it available to users of the memory API.
+ *
+ * @mr: the region being flushed.
+ */
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr);
+
+/**
+ * memory_region_reset_dirty: Mark a range of pages as clean, for a specified
+ *                            client.
+ *
+ * Marks a range of pages as no longer dirty.
+ *
+ * @mr: the region being updated.
+ * @addr: the start of the subrange being cleaned.
+ * @size: the size of the subrange being cleaned.
+ * @client: the user of the logging information; %DIRTY_MEMORY_MIGRATION or
+ *          %DIRTY_MEMORY_VGA.
+ */
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+                               hwaddr size, unsigned client);
+
+/**
+ * memory_region_set_readonly: Turn a memory region read-only (or read-write)
+ *
+ * Allows a memory region to be marked as read-only (turning it into a ROM).
+ * only useful on RAM regions.
+ *
+ * @mr: the region being updated.
+ * @readonly: whether rhe region is to be ROM or RAM.
+ */
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly);
+
+/**
+ * memory_region_rom_device_set_romd: enable/disable ROMD mode
+ *
+ * Allows a ROM device (initialized with memory_region_init_rom_device() to
+ * set to ROMD mode (default) or MMIO mode.  When it is in ROMD mode, the
+ * device is mapped to guest memory and satisfies read access directly.
+ * When in MMIO mode, reads are forwarded to the #MemoryRegion.read function.
+ * Writes are always handled by the #MemoryRegion.write function.
+ *
+ * @mr: the memory region to be updated
+ * @romd_mode: %true to put the region into ROMD mode
+ */
+void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode);
+
+/**
+ * memory_region_set_coalescing: Enable memory coalescing for the region.
+ *
+ * Enabled writes to a region to be queued for later processing. MMIO ->write
+ * callbacks may be delayed until a non-coalesced MMIO is issued.
+ * Only useful for IO regions.  Roughly similar to write-combining hardware.
+ *
+ * @mr: the memory region to be write coalesced
+ */
+void memory_region_set_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_add_coalescing: Enable memory coalescing for a sub-range of
+ *                               a region.
+ *
+ * Like memory_region_set_coalescing(), but works on a sub-range of a region.
+ * Multiple calls can be issued coalesced disjoint ranges.
+ *
+ * @mr: the memory region to be updated.
+ * @offset: the start of the range within the region to be coalesced.
+ * @size: the size of the subrange to be coalesced.
+ */
+void memory_region_add_coalescing(MemoryRegion *mr,
+                                  hwaddr offset,
+                                  uint64_t size);
+
+/**
+ * memory_region_clear_coalescing: Disable MMIO coalescing for the region.
+ *
+ * Disables any coalescing caused by memory_region_set_coalescing() or
+ * memory_region_add_coalescing().  Roughly equivalent to uncacheble memory
+ * hardware.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_coalescing(MemoryRegion *mr);
+
+/**
+ * memory_region_set_flush_coalesced: Enforce memory coalescing flush before
+ *                                    accesses.
+ *
+ * Ensure that pending coalesced MMIO request are flushed before the memory
+ * region is accessed. This property is automatically enabled for all regions
+ * passed to memory_region_set_coalescing() and memory_region_add_coalescing().
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_set_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_clear_flush_coalesced: Disable memory coalescing flush before
+ *                                      accesses.
+ *
+ * Clear the automatic coalesced MMIO flushing enabled via
+ * memory_region_set_flush_coalesced. Note that this service has no effect on
+ * memory regions that have MMIO coalescing enabled for themselves. For them,
+ * automatic flushing will stop once coalescing is disabled.
+ *
+ * @mr: the memory region to be updated.
+ */
+void memory_region_clear_flush_coalesced(MemoryRegion *mr);
+
+/**
+ * memory_region_add_eventfd: Request an eventfd to be triggered when a word
+ *                            is written to a location.
+ *
+ * Marks a word in an IO region (initialized with memory_region_init_io())
+ * as a trigger for an eventfd event.  The I/O callback will not be called.
+ * The caller must be prepared to handle failure (that is, take the required
+ * action if the callback _is_ called).
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ **/
+void memory_region_add_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e);
+
+/**
+ * memory_region_del_eventfd: Cancel an eventfd.
+ *
+ * Cancels an eventfd trigger requested by a previous
+ * memory_region_add_eventfd() call.
+ *
+ * @mr: the memory region being updated.
+ * @addr: the address within @mr that is to be monitored
+ * @size: the size of the access to trigger the eventfd
+ * @match_data: whether to match against @data, instead of just @addr
+ * @data: the data to match against the guest write
+ * @fd: the eventfd to be triggered when @addr, @size, and @data all match.
+ */
+void memory_region_del_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e);
+
+/**
+ * memory_region_add_subregion: Add a subregion to a container.
+ *
+ * Adds a subregion at @offset.  The subregion may not overlap with other
+ * subregions (except for those explicitly marked as overlapping).  A region
+ * may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ *      initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ */
+void memory_region_add_subregion(MemoryRegion *mr,
+                                 hwaddr offset,
+                                 MemoryRegion *subregion);
+/**
+ * memory_region_add_subregion_overlap: Add a subregion to a container
+ *                                      with overlap.
+ *
+ * Adds a subregion at @offset.  The subregion may overlap with other
+ * subregions.  Conflicts are resolved by having a higher @priority hide a
+ * lower @priority. Subregions without priority are taken as @priority 0.
+ * A region may only be added once as a subregion (unless removed with
+ * memory_region_del_subregion()); use memory_region_init_alias() if you
+ * want a region to be a subregion in multiple locations.
+ *
+ * @mr: the region to contain the new subregion; must be a container
+ *      initialized with memory_region_init().
+ * @offset: the offset relative to @mr where @subregion is added.
+ * @subregion: the subregion to be added.
+ * @priority: used for resolving overlaps; highest priority wins.
+ */
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+                                         hwaddr offset,
+                                         MemoryRegion *subregion,
+                                         int priority);
+
+/**
+ * memory_region_get_ram_addr: Get the ram address associated with a memory
+ *                             region
+ *
+ * DO NOT USE THIS FUNCTION.  This is a temporary workaround while the Xen
+ * code is being reworked.
+ */
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr);
+
+/**
+ * memory_region_del_subregion: Remove a subregion.
+ *
+ * Removes a subregion from its container.
+ *
+ * @mr: the container to be updated.
+ * @subregion: the region being removed; must be a current subregion of @mr.
+ */
+void memory_region_del_subregion(MemoryRegion *mr,
+                                 MemoryRegion *subregion);
+
+/*
+ * memory_region_set_enabled: dynamically enable or disable a region
+ *
+ * Enables or disables a memory region.  A disabled memory region
+ * ignores all accesses to itself and its subregions.  It does not
+ * obscure sibling subregions with lower priority - it simply behaves as
+ * if it was removed from the hierarchy.
+ *
+ * Regions default to being enabled.
+ *
+ * @mr: the region to be updated
+ * @enabled: whether to enable or disable the region
+ */
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled);
+
+/*
+ * memory_region_set_address: dynamically update the address of a region
+ *
+ * Dynamically updates the address of a region, relative to its parent.
+ * May be used on regions are currently part of a memory hierarchy.
+ *
+ * @mr: the region to be updated
+ * @addr: new address, relative to parent region
+ */
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr);
+
+/*
+ * memory_region_set_alias_offset: dynamically update a memory alias's offset
+ *
+ * Dynamically updates the offset into the target region that an alias points
+ * to, as if the fourth argument to memory_region_init_alias() has changed.
+ *
+ * @mr: the #MemoryRegion to be updated; should be an alias.
+ * @offset: the new offset into the target memory region
+ */
+void memory_region_set_alias_offset(MemoryRegion *mr,
+                                    hwaddr offset);
+
+/**
+ * memory_region_present: checks if an address relative to a @parent
+ * translates into #MemoryRegion within @parent
+ *
+ * Answer whether a #MemoryRegion within @parent covers the address
+ * @addr.
+ *
+ * @parent: a #MemoryRegion within which @addr is a relative address
+ * @addr: the area within @parent to be searched
+ */
+bool memory_region_present(MemoryRegion *parent, hwaddr addr);
+
+/**
+ * memory_region_find: translate an address/size relative to a
+ * MemoryRegion into a #MemoryRegionSection.
+ *
+ * Locates the first #MemoryRegion within @mr that overlaps the range
+ * given by @addr and @size.
+ *
+ * Returns a #MemoryRegionSection that describes a contiguous overlap.
+ * It will have the following characteristics:
+ *    .@size = 0 iff no overlap was found
+ *    .@mr is non-%NULL iff an overlap was found
+ *
+ * Remember that in the return value the @offset_within_region is
+ * relative to the returned region (in the .@mr field), not to the
+ * @mr argument.
+ *
+ * Similarly, the .@offset_within_address_space is relative to the
+ * address space that contains both regions, the passed and the
+ * returned one.  However, in the special case where the @mr argument
+ * has no parent (and thus is the root of the address space), the
+ * following will hold:
+ *    .@offset_within_address_space >= @addr
+ *    .@offset_within_address_space + .@size <= @addr + @size
+ *
+ * @mr: a MemoryRegion within which @addr is a relative address
+ * @addr: start of the area within @as to be searched
+ * @size: size of the area to be searched
+ */
+MemoryRegionSection memory_region_find(MemoryRegion *mr,
+                                       hwaddr addr, uint64_t size);
+
+/**
+ * address_space_sync_dirty_bitmap: synchronize the dirty log for all memory
+ *
+ * Synchronizes the dirty page log for an entire address space.
+ * @as: the address space that contains the memory being synchronized
+ */
+void address_space_sync_dirty_bitmap(AddressSpace *as);
+
+/**
+ * memory_region_transaction_begin: Start a transaction.
+ *
+ * During a transaction, changes will be accumulated and made visible
+ * only when the transaction ends (is committed).
+ */
+void memory_region_transaction_begin(void);
+
+/**
+ * memory_region_transaction_commit: Commit a transaction and make changes
+ *                                   visible to the guest.
+ */
+void memory_region_transaction_commit(void);
+
+/**
+ * memory_listener_register: register callbacks to be called when memory
+ *                           sections are mapped or unmapped into an address
+ *                           space
+ *
+ * @listener: an object containing the callbacks to be called
+ * @filter: if non-%NULL, only regions in this address space will be observed
+ */
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter);
+
+/**
+ * memory_listener_unregister: undo the effect of memory_listener_register()
+ *
+ * @listener: an object containing the callbacks to be removed
+ */
+void memory_listener_unregister(MemoryListener *listener);
+
+/**
+ * memory_global_dirty_log_start: begin dirty logging for all regions
+ */
+void memory_global_dirty_log_start(void);
+
+/**
+ * memory_global_dirty_log_stop: end dirty logging for all regions
+ */
+void memory_global_dirty_log_stop(void);
+
+void mtree_info(fprintf_function mon_printf, void *f);
+
+/**
+ * address_space_init: initializes an address space
+ *
+ * @as: an uninitialized #AddressSpace
+ * @root: a #MemoryRegion that routes addesses for the address space
+ * @name: an address space name.  The name is only used for debugging
+ *        output.
+ */
+void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name);
+
+
+/**
+ * address_space_destroy: destroy an address space
+ *
+ * Releases all resources associated with an address space.  After an address space
+ * is destroyed, its root memory region (given by address_space_init()) may be destroyed
+ * as well.
+ *
+ * @as: address space to be destroyed
+ */
+void address_space_destroy(AddressSpace *as);
+
+/**
+ * address_space_rw: read from or write to an address space.
+ *
+ * Return true if the operation hit any unassigned memory or encountered an
+ * IOMMU fault.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ * @is_write: indicates the transfer direction
+ */
+bool address_space_rw(AddressSpace *as, hwaddr addr, uint8_t *buf,
+                      int len, bool is_write);
+
+/**
+ * address_space_write: write to address space.
+ *
+ * Return true if the operation hit any unassigned memory or encountered an
+ * IOMMU fault.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+bool address_space_write(AddressSpace *as, hwaddr addr,
+                         const uint8_t *buf, int len);
+
+/**
+ * address_space_read: read from an address space.
+ *
+ * Return true if the operation hit any unassigned memory or encountered an
+ * IOMMU fault.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @buf: buffer with the data transferred
+ */
+bool address_space_read(AddressSpace *as, hwaddr addr, uint8_t *buf, int len);
+
+/* address_space_translate: translate an address range into an address space
+ * into a MemoryRegion and an address range into that section
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @xlat: pointer to address within the returned memory region section's
+ * #MemoryRegion.
+ * @len: pointer to length
+ * @is_write: indicates the transfer direction
+ */
+MemoryRegion *address_space_translate(AddressSpace *as, hwaddr addr,
+                                      hwaddr *xlat, hwaddr *len,
+                                      bool is_write);
+
+/* address_space_access_valid: check for validity of accessing an address
+ * space range
+ *
+ * Check whether memory is assigned to the given address space range, and
+ * access is permitted by any IOMMU regions that are active for the address
+ * space.
+ *
+ * For now, addr and len should be aligned to a page size.  This limitation
+ * will be lifted in the future.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @len: length of the area to be checked
+ * @is_write: indicates the transfer direction
+ */
+bool address_space_access_valid(AddressSpace *as, hwaddr addr, int len, bool is_write);
+
+/* address_space_map: map a physical memory region into a host virtual address
+ *
+ * May map a subset of the requested range, given by and returned in @plen.
+ * May return %NULL if resources needed to perform the mapping are exhausted.
+ * Use only for reads OR writes - not for read-modify-write operations.
+ * Use cpu_register_map_client() to know when retrying the map operation is
+ * likely to succeed.
+ *
+ * @as: #AddressSpace to be accessed
+ * @addr: address within that address space
+ * @plen: pointer to length of buffer; updated on return
+ * @is_write: indicates the transfer direction
+ */
+void *address_space_map(AddressSpace *as, hwaddr addr,
+                        hwaddr *plen, bool is_write);
+
+/* address_space_unmap: Unmaps a memory region previously mapped by address_space_map()
+ *
+ * Will also mark the memory as dirty if @is_write == %true.  @access_len gives
+ * the amount of memory that was actually read or written by the caller.
+ *
+ * @as: #AddressSpace used
+ * @addr: address within that address space
+ * @len: buffer length as returned by address_space_map()
+ * @access_len: amount of data actually transferred
+ * @is_write: indicates the transfer direction
+ */
+void address_space_unmap(AddressSpace *as, void *buffer, hwaddr len,
+                         int is_write, hwaddr access_len);
+
+
+#endif
+
+#endif
diff --git a/include/exec/ram_addr.h b/include/exec/ram_addr.h
index 1ee6add..2c85a9b 100644
--- a/include/exec/ram_addr.h
+++ b/include/exec/ram_addr.h
@@ -37,7 +37,7 @@
 /* This should not be used by devices.  */
 int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
 void qemu_ram_free(ram_addr_t addr);
-void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
+void qemu_ram_free_from_ptr(ram_addr_t addr);
 ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
 
 static inline int cpu_physical_memory_get_dirty(ram_addr_t start,
diff --git a/include/qemu/typedefs.h b/include/qemu/typedefs.h
index cb89dc7..3638e37 100644
--- a/include/qemu/typedefs.h
+++ b/include/qemu/typedefs.h
@@ -25,6 +25,7 @@
 typedef struct AddressSpace AddressSpace;
 typedef struct MemoryRegion MemoryRegion;
 typedef struct MemoryRegionSection MemoryRegionSection;
+typedef struct MemoryListener MemoryListener;
 
 typedef struct MemoryMappingList MemoryMappingList;
 
diff --git a/include/sysemu/memory_mapping.h b/include/sysemu/memory_mapping.h
new file mode 100644
index 0000000..a75d59a
--- /dev/null
+++ b/include/sysemu/memory_mapping.h
@@ -0,0 +1,82 @@
+/*
+ * QEMU memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ *     Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef MEMORY_MAPPING_H
+#define MEMORY_MAPPING_H
+
+#include "qemu/queue.h"
+#include "qemu/typedefs.h"
+
+typedef struct GuestPhysBlock {
+    /* visible to guest, reflects PCI hole, etc */
+    hwaddr target_start;
+
+    /* implies size */
+    hwaddr target_end;
+
+    /* points into host memory */
+    uint8_t *host_addr;
+
+    QTAILQ_ENTRY(GuestPhysBlock) next;
+} GuestPhysBlock;
+
+/* point-in-time snapshot of guest-visible physical mappings */
+typedef struct GuestPhysBlockList {
+    unsigned num;
+    QTAILQ_HEAD(GuestPhysBlockHead, GuestPhysBlock) head;
+} GuestPhysBlockList;
+
+/* The physical and virtual address in the memory mapping are contiguous. */
+typedef struct MemoryMapping {
+    hwaddr phys_addr;
+    target_ulong virt_addr;
+    ram_addr_t length;
+    QTAILQ_ENTRY(MemoryMapping) next;
+} MemoryMapping;
+
+struct MemoryMappingList {
+    unsigned int num;
+    MemoryMapping *last_mapping;
+    QTAILQ_HEAD(, MemoryMapping) head;
+};
+
+/*
+ * add or merge the memory region [phys_addr, phys_addr + length) into the
+ * memory mapping's list. The region's virtual address starts with virt_addr,
+ * and is contiguous. The list is sorted by phys_addr.
+ */
+void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
+                                          hwaddr phys_addr,
+                                          hwaddr virt_addr,
+                                          ram_addr_t length);
+
+void memory_mapping_list_free(MemoryMappingList *list);
+
+void memory_mapping_list_init(MemoryMappingList *list);
+
+void guest_phys_blocks_free(GuestPhysBlockList *list);
+void guest_phys_blocks_init(GuestPhysBlockList *list);
+void guest_phys_blocks_append(GuestPhysBlockList *list);
+
+void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+                                   const GuestPhysBlockList *guest_phys_blocks,
+                                   Error **errp);
+
+/* get guest's memory mapping without do paging(virtual address is 0). */
+void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
+                                  const GuestPhysBlockList *guest_phys_blocks);
+
+void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
+                           int64_t length);
+
+#endif
diff --git a/memory.c b/memory.c
new file mode 100644
index 0000000..3fd655a
--- /dev/null
+++ b/memory.c
@@ -0,0 +1,1892 @@
+/*
+ * Physical memory management
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates
+ *
+ * Authors:
+ *  Avi Kivity <avi@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Contributions after 2012-01-13 are licensed under the terms of the
+ * GNU GPL, version 2 or (at your option) any later version.
+ */
+
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+#include "exec/ioport.h"
+#include "qemu/bitops.h"
+#include "qom/object.h"
+#include "trace.h"
+#include <assert.h>
+
+#include "exec/memory-internal.h"
+#include "exec/ram_addr.h"
+
+//#define DEBUG_UNASSIGNED
+
+static unsigned memory_region_transaction_depth;
+static bool memory_region_update_pending;
+static bool global_dirty_log = false;
+
+/* flat_view_mutex is taken around reading as->current_map; the critical
+ * section is extremely short, so I'm using a single mutex for every AS.
+ * We could also RCU for the read-side.
+ *
+ * The BQL is taken around transaction commits, hence both locks are taken
+ * while writing to as->current_map (with the BQL taken outside).
+ */
+static QemuMutex flat_view_mutex;
+
+static QTAILQ_HEAD(memory_listeners, MemoryListener) memory_listeners
+    = QTAILQ_HEAD_INITIALIZER(memory_listeners);
+
+static QTAILQ_HEAD(, AddressSpace) address_spaces
+    = QTAILQ_HEAD_INITIALIZER(address_spaces);
+
+static void memory_init(void)
+{
+    qemu_mutex_init(&flat_view_mutex);
+}
+
+typedef struct AddrRange AddrRange;
+
+/*
+ * Note using signed integers limits us to physical addresses at most
+ * 63 bits wide.  They are needed for negative offsetting in aliases
+ * (large MemoryRegion::alias_offset).
+ */
+struct AddrRange {
+    Int128 start;
+    Int128 size;
+};
+
+static AddrRange addrrange_make(Int128 start, Int128 size)
+{
+    return (AddrRange) { start, size };
+}
+
+static bool addrrange_equal(AddrRange r1, AddrRange r2)
+{
+    return int128_eq(r1.start, r2.start) && int128_eq(r1.size, r2.size);
+}
+
+static Int128 addrrange_end(AddrRange r)
+{
+    return int128_add(r.start, r.size);
+}
+
+static AddrRange addrrange_shift(AddrRange range, Int128 delta)
+{
+    int128_addto(&range.start, delta);
+    return range;
+}
+
+static bool addrrange_contains(AddrRange range, Int128 addr)
+{
+    return int128_ge(addr, range.start)
+        && int128_lt(addr, addrrange_end(range));
+}
+
+static bool addrrange_intersects(AddrRange r1, AddrRange r2)
+{
+    return addrrange_contains(r1, r2.start)
+        || addrrange_contains(r2, r1.start);
+}
+
+static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
+{
+    Int128 start = int128_max(r1.start, r2.start);
+    Int128 end = int128_min(addrrange_end(r1), addrrange_end(r2));
+    return addrrange_make(start, int128_sub(end, start));
+}
+
+enum ListenerDirection { Forward, Reverse };
+
+static bool memory_listener_match(MemoryListener *listener,
+                                  MemoryRegionSection *section)
+{
+    return !listener->address_space_filter
+        || listener->address_space_filter == section->address_space;
+}
+
+#define MEMORY_LISTENER_CALL_GLOBAL(_callback, _direction, _args...)    \
+    do {                                                                \
+        MemoryListener *_listener;                                      \
+                                                                        \
+        switch (_direction) {                                           \
+        case Forward:                                                   \
+            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
+                if (_listener->_callback) {                             \
+                    _listener->_callback(_listener, ##_args);           \
+                }                                                       \
+            }                                                           \
+            break;                                                      \
+        case Reverse:                                                   \
+            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
+                                   memory_listeners, link) {            \
+                if (_listener->_callback) {                             \
+                    _listener->_callback(_listener, ##_args);           \
+                }                                                       \
+            }                                                           \
+            break;                                                      \
+        default:                                                        \
+            abort();                                                    \
+        }                                                               \
+    } while (0)
+
+#define MEMORY_LISTENER_CALL(_callback, _direction, _section, _args...) \
+    do {                                                                \
+        MemoryListener *_listener;                                      \
+                                                                        \
+        switch (_direction) {                                           \
+        case Forward:                                                   \
+            QTAILQ_FOREACH(_listener, &memory_listeners, link) {        \
+                if (_listener->_callback                                \
+                    && memory_listener_match(_listener, _section)) {    \
+                    _listener->_callback(_listener, _section, ##_args); \
+                }                                                       \
+            }                                                           \
+            break;                                                      \
+        case Reverse:                                                   \
+            QTAILQ_FOREACH_REVERSE(_listener, &memory_listeners,        \
+                                   memory_listeners, link) {            \
+                if (_listener->_callback                                \
+                    && memory_listener_match(_listener, _section)) {    \
+                    _listener->_callback(_listener, _section, ##_args); \
+                }                                                       \
+            }                                                           \
+            break;                                                      \
+        default:                                                        \
+            abort();                                                    \
+        }                                                               \
+    } while (0)
+
+/* No need to ref/unref .mr, the FlatRange keeps it alive.  */
+#define MEMORY_LISTENER_UPDATE_REGION(fr, as, dir, callback)            \
+    MEMORY_LISTENER_CALL(callback, dir, (&(MemoryRegionSection) {       \
+        .mr = (fr)->mr,                                                 \
+        .address_space = (as),                                          \
+        .offset_within_region = (fr)->offset_in_region,                 \
+        .size = (fr)->addr.size,                                        \
+        .offset_within_address_space = int128_get64((fr)->addr.start),  \
+        .readonly = (fr)->readonly,                                     \
+              }))
+
+struct CoalescedMemoryRange {
+    AddrRange addr;
+    QTAILQ_ENTRY(CoalescedMemoryRange) link;
+};
+
+struct MemoryRegionIoeventfd {
+    AddrRange addr;
+    bool match_data;
+    uint64_t data;
+    EventNotifier *e;
+};
+
+static bool memory_region_ioeventfd_before(MemoryRegionIoeventfd a,
+                                           MemoryRegionIoeventfd b)
+{
+    if (int128_lt(a.addr.start, b.addr.start)) {
+        return true;
+    } else if (int128_gt(a.addr.start, b.addr.start)) {
+        return false;
+    } else if (int128_lt(a.addr.size, b.addr.size)) {
+        return true;
+    } else if (int128_gt(a.addr.size, b.addr.size)) {
+        return false;
+    } else if (a.match_data < b.match_data) {
+        return true;
+    } else  if (a.match_data > b.match_data) {
+        return false;
+    } else if (a.match_data) {
+        if (a.data < b.data) {
+            return true;
+        } else if (a.data > b.data) {
+            return false;
+        }
+    }
+    if (a.e < b.e) {
+        return true;
+    } else if (a.e > b.e) {
+        return false;
+    }
+    return false;
+}
+
+static bool memory_region_ioeventfd_equal(MemoryRegionIoeventfd a,
+                                          MemoryRegionIoeventfd b)
+{
+    return !memory_region_ioeventfd_before(a, b)
+        && !memory_region_ioeventfd_before(b, a);
+}
+
+typedef struct FlatRange FlatRange;
+typedef struct FlatView FlatView;
+
+/* Range of memory in the global map.  Addresses are absolute. */
+struct FlatRange {
+    MemoryRegion *mr;
+    hwaddr offset_in_region;
+    AddrRange addr;
+    uint8_t dirty_log_mask;
+    bool romd_mode;
+    bool readonly;
+};
+
+/* Flattened global view of current active memory hierarchy.  Kept in sorted
+ * order.
+ */
+struct FlatView {
+    unsigned ref;
+    FlatRange *ranges;
+    unsigned nr;
+    unsigned nr_allocated;
+};
+
+typedef struct AddressSpaceOps AddressSpaceOps;
+
+#define FOR_EACH_FLAT_RANGE(var, view)          \
+    for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
+
+static bool flatrange_equal(FlatRange *a, FlatRange *b)
+{
+    return a->mr == b->mr
+        && addrrange_equal(a->addr, b->addr)
+        && a->offset_in_region == b->offset_in_region
+        && a->romd_mode == b->romd_mode
+        && a->readonly == b->readonly;
+}
+
+static void flatview_init(FlatView *view)
+{
+    view->ref = 1;
+    view->ranges = NULL;
+    view->nr = 0;
+    view->nr_allocated = 0;
+}
+
+/* Insert a range into a given position.  Caller is responsible for maintaining
+ * sorting order.
+ */
+static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
+{
+    if (view->nr == view->nr_allocated) {
+        view->nr_allocated = MAX(2 * view->nr, 10);
+        view->ranges = g_realloc(view->ranges,
+                                    view->nr_allocated * sizeof(*view->ranges));
+    }
+    memmove(view->ranges + pos + 1, view->ranges + pos,
+            (view->nr - pos) * sizeof(FlatRange));
+    view->ranges[pos] = *range;
+    memory_region_ref(range->mr);
+    ++view->nr;
+}
+
+static void flatview_destroy(FlatView *view)
+{
+    int i;
+
+    for (i = 0; i < view->nr; i++) {
+        memory_region_unref(view->ranges[i].mr);
+    }
+    g_free(view->ranges);
+    g_free(view);
+}
+
+static void flatview_ref(FlatView *view)
+{
+    atomic_inc(&view->ref);
+}
+
+static void flatview_unref(FlatView *view)
+{
+    if (atomic_fetch_dec(&view->ref) == 1) {
+        flatview_destroy(view);
+    }
+}
+
+static bool can_merge(FlatRange *r1, FlatRange *r2)
+{
+    return int128_eq(addrrange_end(r1->addr), r2->addr.start)
+        && r1->mr == r2->mr
+        && int128_eq(int128_add(int128_make64(r1->offset_in_region),
+                                r1->addr.size),
+                     int128_make64(r2->offset_in_region))
+        && r1->dirty_log_mask == r2->dirty_log_mask
+        && r1->romd_mode == r2->romd_mode
+        && r1->readonly == r2->readonly;
+}
+
+/* Attempt to simplify a view by merging adjacent ranges */
+static void flatview_simplify(FlatView *view)
+{
+    unsigned i, j;
+
+    i = 0;
+    while (i < view->nr) {
+        j = i + 1;
+        while (j < view->nr
+               && can_merge(&view->ranges[j-1], &view->ranges[j])) {
+            int128_addto(&view->ranges[i].addr.size, view->ranges[j].addr.size);
+            ++j;
+        }
+        ++i;
+        memmove(&view->ranges[i], &view->ranges[j],
+                (view->nr - j) * sizeof(view->ranges[j]));
+        view->nr -= j - i;
+    }
+}
+
+static bool memory_region_big_endian(MemoryRegion *mr)
+{
+#ifdef TARGET_WORDS_BIGENDIAN
+    return mr->ops->endianness != DEVICE_LITTLE_ENDIAN;
+#else
+    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
+#endif
+}
+
+static bool memory_region_wrong_endianness(MemoryRegion *mr)
+{
+#ifdef TARGET_WORDS_BIGENDIAN
+    return mr->ops->endianness == DEVICE_LITTLE_ENDIAN;
+#else
+    return mr->ops->endianness == DEVICE_BIG_ENDIAN;
+#endif
+}
+
+static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
+{
+    if (memory_region_wrong_endianness(mr)) {
+        switch (size) {
+        case 1:
+            break;
+        case 2:
+            *data = bswap16(*data);
+            break;
+        case 4:
+            *data = bswap32(*data);
+            break;
+        case 8:
+            *data = bswap64(*data);
+            break;
+        default:
+            abort();
+        }
+    }
+}
+
+static void memory_region_oldmmio_read_accessor(MemoryRegion *mr,
+                                                hwaddr addr,
+                                                uint64_t *value,
+                                                unsigned size,
+                                                unsigned shift,
+                                                uint64_t mask)
+{
+    uint64_t tmp;
+
+    tmp = mr->ops->old_mmio.read[ctz32(size)](mr->opaque, addr);
+    trace_memory_region_ops_read(mr, addr, tmp, size);
+    *value |= (tmp & mask) << shift;
+}
+
+static void memory_region_read_accessor(MemoryRegion *mr,
+                                        hwaddr addr,
+                                        uint64_t *value,
+                                        unsigned size,
+                                        unsigned shift,
+                                        uint64_t mask)
+{
+    uint64_t tmp;
+
+    if (mr->flush_coalesced_mmio) {
+        qemu_flush_coalesced_mmio_buffer();
+    }
+    tmp = mr->ops->read(mr->opaque, addr, size);
+    trace_memory_region_ops_read(mr, addr, tmp, size);
+    *value |= (tmp & mask) << shift;
+}
+
+static void memory_region_oldmmio_write_accessor(MemoryRegion *mr,
+                                                 hwaddr addr,
+                                                 uint64_t *value,
+                                                 unsigned size,
+                                                 unsigned shift,
+                                                 uint64_t mask)
+{
+    uint64_t tmp;
+
+    tmp = (*value >> shift) & mask;
+    trace_memory_region_ops_write(mr, addr, tmp, size);
+    mr->ops->old_mmio.write[ctz32(size)](mr->opaque, addr, tmp);
+}
+
+static void memory_region_write_accessor(MemoryRegion *mr,
+                                         hwaddr addr,
+                                         uint64_t *value,
+                                         unsigned size,
+                                         unsigned shift,
+                                         uint64_t mask)
+{
+    uint64_t tmp;
+
+    if (mr->flush_coalesced_mmio) {
+        qemu_flush_coalesced_mmio_buffer();
+    }
+    tmp = (*value >> shift) & mask;
+    trace_memory_region_ops_write(mr, addr, tmp, size);
+    mr->ops->write(mr->opaque, addr, tmp, size);
+}
+
+static void access_with_adjusted_size(hwaddr addr,
+                                      uint64_t *value,
+                                      unsigned size,
+                                      unsigned access_size_min,
+                                      unsigned access_size_max,
+                                      void (*access)(MemoryRegion *mr,
+                                                     hwaddr addr,
+                                                     uint64_t *value,
+                                                     unsigned size,
+                                                     unsigned shift,
+                                                     uint64_t mask),
+                                      MemoryRegion *mr)
+{
+    uint64_t access_mask;
+    unsigned access_size;
+    unsigned i;
+
+    if (!access_size_min) {
+        access_size_min = 1;
+    }
+    if (!access_size_max) {
+        access_size_max = 4;
+    }
+
+    /* FIXME: support unaligned access? */
+    access_size = MAX(MIN(size, access_size_max), access_size_min);
+    access_mask = -1ULL >> (64 - access_size * 8);
+    if (memory_region_big_endian(mr)) {
+        for (i = 0; i < size; i += access_size) {
+            access(mr, addr + i, value, access_size,
+                   (size - access_size - i) * 8, access_mask);
+        }
+    } else {
+        for (i = 0; i < size; i += access_size) {
+            access(mr, addr + i, value, access_size, i * 8, access_mask);
+        }
+    }
+}
+
+static AddressSpace *memory_region_to_address_space(MemoryRegion *mr)
+{
+    AddressSpace *as;
+
+    while (mr->parent) {
+        mr = mr->parent;
+    }
+    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+        if (mr == as->root) {
+            return as;
+        }
+    }
+    abort();
+}
+
+/* Render a memory region into the global view.  Ranges in @view obscure
+ * ranges in @mr.
+ */
+static void render_memory_region(FlatView *view,
+                                 MemoryRegion *mr,
+                                 Int128 base,
+                                 AddrRange clip,
+                                 bool readonly)
+{
+    MemoryRegion *subregion;
+    unsigned i;
+    hwaddr offset_in_region;
+    Int128 remain;
+    Int128 now;
+    FlatRange fr;
+    AddrRange tmp;
+
+    if (!mr->enabled) {
+        return;
+    }
+
+    int128_addto(&base, int128_make64(mr->addr));
+    readonly |= mr->readonly;
+
+    tmp = addrrange_make(base, mr->size);
+
+    if (!addrrange_intersects(tmp, clip)) {
+        return;
+    }
+
+    clip = addrrange_intersection(tmp, clip);
+
+    if (mr->alias) {
+        int128_subfrom(&base, int128_make64(mr->alias->addr));
+        int128_subfrom(&base, int128_make64(mr->alias_offset));
+        render_memory_region(view, mr->alias, base, clip, readonly);
+        return;
+    }
+
+    /* Render subregions in priority order. */
+    QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
+        render_memory_region(view, subregion, base, clip, readonly);
+    }
+
+    if (!mr->terminates) {
+        return;
+    }
+
+    offset_in_region = int128_get64(int128_sub(clip.start, base));
+    base = clip.start;
+    remain = clip.size;
+
+    fr.mr = mr;
+    fr.dirty_log_mask = mr->dirty_log_mask;
+    fr.romd_mode = mr->romd_mode;
+    fr.readonly = readonly;
+
+    /* Render the region itself into any gaps left by the current view. */
+    for (i = 0; i < view->nr && int128_nz(remain); ++i) {
+        if (int128_ge(base, addrrange_end(view->ranges[i].addr))) {
+            continue;
+        }
+        if (int128_lt(base, view->ranges[i].addr.start)) {
+            now = int128_min(remain,
+                             int128_sub(view->ranges[i].addr.start, base));
+            fr.offset_in_region = offset_in_region;
+            fr.addr = addrrange_make(base, now);
+            flatview_insert(view, i, &fr);
+            ++i;
+            int128_addto(&base, now);
+            offset_in_region += int128_get64(now);
+            int128_subfrom(&remain, now);
+        }
+        now = int128_sub(int128_min(int128_add(base, remain),
+                                    addrrange_end(view->ranges[i].addr)),
+                         base);
+        int128_addto(&base, now);
+        offset_in_region += int128_get64(now);
+        int128_subfrom(&remain, now);
+    }
+    if (int128_nz(remain)) {
+        fr.offset_in_region = offset_in_region;
+        fr.addr = addrrange_make(base, remain);
+        flatview_insert(view, i, &fr);
+    }
+}
+
+/* Render a memory topology into a list of disjoint absolute ranges. */
+static FlatView *generate_memory_topology(MemoryRegion *mr)
+{
+    FlatView *view;
+
+    view = g_new(FlatView, 1);
+    flatview_init(view);
+
+    if (mr) {
+        render_memory_region(view, mr, int128_zero(),
+                             addrrange_make(int128_zero(), int128_2_64()), false);
+    }
+    flatview_simplify(view);
+
+    return view;
+}
+
+static void address_space_add_del_ioeventfds(AddressSpace *as,
+                                             MemoryRegionIoeventfd *fds_new,
+                                             unsigned fds_new_nb,
+                                             MemoryRegionIoeventfd *fds_old,
+                                             unsigned fds_old_nb)
+{
+    unsigned iold, inew;
+    MemoryRegionIoeventfd *fd;
+    MemoryRegionSection section;
+
+    /* Generate a symmetric difference of the old and new fd sets, adding
+     * and deleting as necessary.
+     */
+
+    iold = inew = 0;
+    while (iold < fds_old_nb || inew < fds_new_nb) {
+        if (iold < fds_old_nb
+            && (inew == fds_new_nb
+                || memory_region_ioeventfd_before(fds_old[iold],
+                                                  fds_new[inew]))) {
+            fd = &fds_old[iold];
+            section = (MemoryRegionSection) {
+                .address_space = as,
+                .offset_within_address_space = int128_get64(fd->addr.start),
+                .size = fd->addr.size,
+            };
+            MEMORY_LISTENER_CALL(eventfd_del, Forward, &section,
+                                 fd->match_data, fd->data, fd->e);
+            ++iold;
+        } else if (inew < fds_new_nb
+                   && (iold == fds_old_nb
+                       || memory_region_ioeventfd_before(fds_new[inew],
+                                                         fds_old[iold]))) {
+            fd = &fds_new[inew];
+            section = (MemoryRegionSection) {
+                .address_space = as,
+                .offset_within_address_space = int128_get64(fd->addr.start),
+                .size = fd->addr.size,
+            };
+            MEMORY_LISTENER_CALL(eventfd_add, Reverse, &section,
+                                 fd->match_data, fd->data, fd->e);
+            ++inew;
+        } else {
+            ++iold;
+            ++inew;
+        }
+    }
+}
+
+static FlatView *address_space_get_flatview(AddressSpace *as)
+{
+    FlatView *view;
+
+    qemu_mutex_lock(&flat_view_mutex);
+    view = as->current_map;
+    flatview_ref(view);
+    qemu_mutex_unlock(&flat_view_mutex);
+    return view;
+}
+
+static void address_space_update_ioeventfds(AddressSpace *as)
+{
+    FlatView *view;
+    FlatRange *fr;
+    unsigned ioeventfd_nb = 0;
+    MemoryRegionIoeventfd *ioeventfds = NULL;
+    AddrRange tmp;
+    unsigned i;
+
+    view = address_space_get_flatview(as);
+    FOR_EACH_FLAT_RANGE(fr, view) {
+        for (i = 0; i < fr->mr->ioeventfd_nb; ++i) {
+            tmp = addrrange_shift(fr->mr->ioeventfds[i].addr,
+                                  int128_sub(fr->addr.start,
+                                             int128_make64(fr->offset_in_region)));
+            if (addrrange_intersects(fr->addr, tmp)) {
+                ++ioeventfd_nb;
+                ioeventfds = g_realloc(ioeventfds,
+                                          ioeventfd_nb * sizeof(*ioeventfds));
+                ioeventfds[ioeventfd_nb-1] = fr->mr->ioeventfds[i];
+                ioeventfds[ioeventfd_nb-1].addr = tmp;
+            }
+        }
+    }
+
+    address_space_add_del_ioeventfds(as, ioeventfds, ioeventfd_nb,
+                                     as->ioeventfds, as->ioeventfd_nb);
+
+    g_free(as->ioeventfds);
+    as->ioeventfds = ioeventfds;
+    as->ioeventfd_nb = ioeventfd_nb;
+    flatview_unref(view);
+}
+
+static void address_space_update_topology_pass(AddressSpace *as,
+                                               const FlatView *old_view,
+                                               const FlatView *new_view,
+                                               bool adding)
+{
+    unsigned iold, inew;
+    FlatRange *frold, *frnew;
+
+    /* Generate a symmetric difference of the old and new memory maps.
+     * Kill ranges in the old map, and instantiate ranges in the new map.
+     */
+    iold = inew = 0;
+    while (iold < old_view->nr || inew < new_view->nr) {
+        if (iold < old_view->nr) {
+            frold = &old_view->ranges[iold];
+        } else {
+            frold = NULL;
+        }
+        if (inew < new_view->nr) {
+            frnew = &new_view->ranges[inew];
+        } else {
+            frnew = NULL;
+        }
+
+        if (frold
+            && (!frnew
+                || int128_lt(frold->addr.start, frnew->addr.start)
+                || (int128_eq(frold->addr.start, frnew->addr.start)
+                    && !flatrange_equal(frold, frnew)))) {
+            /* In old but not in new, or in both but attributes changed. */
+
+            if (!adding) {
+                MEMORY_LISTENER_UPDATE_REGION(frold, as, Reverse, region_del);
+            }
+
+            ++iold;
+        } else if (frold && frnew && flatrange_equal(frold, frnew)) {
+            /* In both and unchanged (except logging may have changed) */
+
+            if (adding) {
+                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_nop);
+                if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
+                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Reverse, log_stop);
+                } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
+                    MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, log_start);
+                }
+            }
+
+            ++iold;
+            ++inew;
+        } else {
+            /* In new */
+
+            if (adding) {
+                MEMORY_LISTENER_UPDATE_REGION(frnew, as, Forward, region_add);
+            }
+
+            ++inew;
+        }
+    }
+}
+
+
+static void address_space_update_topology(AddressSpace *as)
+{
+    FlatView *old_view = address_space_get_flatview(as);
+    FlatView *new_view = generate_memory_topology(as->root);
+
+    address_space_update_topology_pass(as, old_view, new_view, false);
+    address_space_update_topology_pass(as, old_view, new_view, true);
+
+    qemu_mutex_lock(&flat_view_mutex);
+    flatview_unref(as->current_map);
+    as->current_map = new_view;
+    qemu_mutex_unlock(&flat_view_mutex);
+
+    /* Note that all the old MemoryRegions are still alive up to this
+     * point.  This relieves most MemoryListeners from the need to
+     * ref/unref the MemoryRegions they get---unless they use them
+     * outside the iothread mutex, in which case precise reference
+     * counting is necessary.
+     */
+    flatview_unref(old_view);
+
+    address_space_update_ioeventfds(as);
+}
+
+void memory_region_transaction_begin(void)
+{
+    qemu_flush_coalesced_mmio_buffer();
+    ++memory_region_transaction_depth;
+}
+
+void memory_region_transaction_commit(void)
+{
+    AddressSpace *as;
+
+    assert(memory_region_transaction_depth);
+    --memory_region_transaction_depth;
+    if (!memory_region_transaction_depth && memory_region_update_pending) {
+        memory_region_update_pending = false;
+        MEMORY_LISTENER_CALL_GLOBAL(begin, Forward);
+
+        QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+            address_space_update_topology(as);
+        }
+
+        MEMORY_LISTENER_CALL_GLOBAL(commit, Forward);
+    }
+}
+
+static void memory_region_destructor_none(MemoryRegion *mr)
+{
+}
+
+static void memory_region_destructor_ram(MemoryRegion *mr)
+{
+    qemu_ram_free(mr->ram_addr);
+}
+
+static void memory_region_destructor_alias(MemoryRegion *mr)
+{
+    memory_region_unref(mr->alias);
+}
+
+static void memory_region_destructor_ram_from_ptr(MemoryRegion *mr)
+{
+    qemu_ram_free_from_ptr(mr->ram_addr);
+}
+
+static void memory_region_destructor_rom_device(MemoryRegion *mr)
+{
+    qemu_ram_free(mr->ram_addr & TARGET_PAGE_MASK);
+}
+
+void memory_region_init(MemoryRegion *mr,
+                        Object *owner,
+                        const char *name,
+                        uint64_t size)
+{
+    mr->ops = &unassigned_mem_ops;
+    mr->opaque = NULL;
+    mr->owner = owner;
+    mr->iommu_ops = NULL;
+    mr->parent = NULL;
+    mr->size = int128_make64(size);
+    if (size == UINT64_MAX) {
+        mr->size = int128_2_64();
+    }
+    mr->addr = 0;
+    mr->subpage = false;
+    mr->enabled = true;
+    mr->terminates = false;
+    mr->ram = false;
+    mr->romd_mode = true;
+    mr->readonly = false;
+    mr->rom_device = false;
+    mr->destructor = memory_region_destructor_none;
+    mr->priority = 0;
+    mr->may_overlap = false;
+    mr->alias = NULL;
+    QTAILQ_INIT(&mr->subregions);
+    memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
+    QTAILQ_INIT(&mr->coalesced);
+    mr->name = g_strdup(name);
+    mr->dirty_log_mask = 0;
+    mr->ioeventfd_nb = 0;
+    mr->ioeventfds = NULL;
+    mr->flush_coalesced_mmio = false;
+}
+
+static uint64_t unassigned_mem_read(void *opaque, hwaddr addr,
+                                    unsigned size)
+{
+#ifdef DEBUG_UNASSIGNED
+    printf("Unassigned mem read " TARGET_FMT_plx "\n", addr);
+#endif
+    if (current_cpu != NULL) {
+        cpu_unassigned_access(current_cpu, addr, false, false, 0, size);
+    }
+    return 0;
+}
+
+static void unassigned_mem_write(void *opaque, hwaddr addr,
+                                 uint64_t val, unsigned size)
+{
+#ifdef DEBUG_UNASSIGNED
+    printf("Unassigned mem write " TARGET_FMT_plx " = 0x%"PRIx64"\n", addr, val);
+#endif
+    if (current_cpu != NULL) {
+        cpu_unassigned_access(current_cpu, addr, true, false, 0, size);
+    }
+}
+
+static bool unassigned_mem_accepts(void *opaque, hwaddr addr,
+                                   unsigned size, bool is_write)
+{
+    return false;
+}
+
+const MemoryRegionOps unassigned_mem_ops = {
+    .valid.accepts = unassigned_mem_accepts,
+    .endianness = DEVICE_NATIVE_ENDIAN,
+};
+
+bool memory_region_access_valid(MemoryRegion *mr,
+                                hwaddr addr,
+                                unsigned size,
+                                bool is_write)
+{
+    int access_size_min, access_size_max;
+    int access_size, i;
+
+    if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
+        return false;
+    }
+
+    if (!mr->ops->valid.accepts) {
+        return true;
+    }
+
+    access_size_min = mr->ops->valid.min_access_size;
+    if (!mr->ops->valid.min_access_size) {
+        access_size_min = 1;
+    }
+
+    access_size_max = mr->ops->valid.max_access_size;
+    if (!mr->ops->valid.max_access_size) {
+        access_size_max = 4;
+    }
+
+    access_size = MAX(MIN(size, access_size_max), access_size_min);
+    for (i = 0; i < size; i += access_size) {
+        if (!mr->ops->valid.accepts(mr->opaque, addr + i, access_size,
+                                    is_write)) {
+            return false;
+        }
+    }
+
+    return true;
+}
+
+static uint64_t memory_region_dispatch_read1(MemoryRegion *mr,
+                                             hwaddr addr,
+                                             unsigned size)
+{
+    uint64_t data = 0;
+
+    if (mr->ops->read) {
+        access_with_adjusted_size(addr, &data, size,
+                                  mr->ops->impl.min_access_size,
+                                  mr->ops->impl.max_access_size,
+                                  memory_region_read_accessor, mr);
+    } else {
+        access_with_adjusted_size(addr, &data, size, 1, 4,
+                                  memory_region_oldmmio_read_accessor, mr);
+    }
+
+    return data;
+}
+
+static bool memory_region_dispatch_read(MemoryRegion *mr,
+                                        hwaddr addr,
+                                        uint64_t *pval,
+                                        unsigned size)
+{
+    if (!memory_region_access_valid(mr, addr, size, false)) {
+        *pval = unassigned_mem_read(mr, addr, size);
+        return true;
+    }
+
+    *pval = memory_region_dispatch_read1(mr, addr, size);
+    adjust_endianness(mr, pval, size);
+    return false;
+}
+
+static bool memory_region_dispatch_write(MemoryRegion *mr,
+                                         hwaddr addr,
+                                         uint64_t data,
+                                         unsigned size)
+{
+    if (!memory_region_access_valid(mr, addr, size, true)) {
+        unassigned_mem_write(mr, addr, data, size);
+        return true;
+    }
+
+    adjust_endianness(mr, &data, size);
+
+    if (mr->ops->write) {
+        access_with_adjusted_size(addr, &data, size,
+                                  mr->ops->impl.min_access_size,
+                                  mr->ops->impl.max_access_size,
+                                  memory_region_write_accessor, mr);
+    } else {
+        access_with_adjusted_size(addr, &data, size, 1, 4,
+                                  memory_region_oldmmio_write_accessor, mr);
+    }
+    return false;
+}
+
+void memory_region_init_io(MemoryRegion *mr,
+                           Object *owner,
+                           const MemoryRegionOps *ops,
+                           void *opaque,
+                           const char *name,
+                           uint64_t size)
+{
+    memory_region_init(mr, owner, name, size);
+    mr->ops = ops;
+    mr->opaque = opaque;
+    mr->terminates = true;
+    mr->ram_addr = ~(ram_addr_t)0;
+}
+
+void memory_region_init_ram(MemoryRegion *mr,
+                            Object *owner,
+                            const char *name,
+                            uint64_t size)
+{
+    memory_region_init(mr, owner, name, size);
+    mr->ram = true;
+    mr->terminates = true;
+    mr->destructor = memory_region_destructor_ram;
+#ifdef CONFIG_ANDROID
+    mr->ram_addr = qemu_ram_alloc(NULL, name, size);
+#else
+    mr->ram_addr = qemu_ram_alloc(size, mr);
+#endif
+}
+
+void memory_region_init_ram_ptr(MemoryRegion *mr,
+                                Object *owner,
+                                const char *name,
+                                uint64_t size,
+                                void *ptr)
+{
+    memory_region_init(mr, owner, name, size);
+    mr->ram = true;
+    mr->terminates = true;
+    mr->destructor = memory_region_destructor_ram_from_ptr;
+#ifdef CONFIG_ANDROID
+    mr->ram_addr = qemu_ram_alloc_from_ptr(NULL, name, size, ptr);
+#else
+    mr->ram_addr = qemu_ram_alloc_from_ptr(size, ptr, mr);
+#endif
+}
+
+void memory_region_init_alias(MemoryRegion *mr,
+                              Object *owner,
+                              const char *name,
+                              MemoryRegion *orig,
+                              hwaddr offset,
+                              uint64_t size)
+{
+    memory_region_init(mr, owner, name, size);
+    memory_region_ref(orig);
+    mr->destructor = memory_region_destructor_alias;
+    mr->alias = orig;
+    mr->alias_offset = offset;
+}
+
+void memory_region_init_rom_device(MemoryRegion *mr,
+                                   Object *owner,
+                                   const MemoryRegionOps *ops,
+                                   void *opaque,
+                                   const char *name,
+                                   uint64_t size)
+{
+    memory_region_init(mr, owner, name, size);
+    mr->ops = ops;
+    mr->opaque = opaque;
+    mr->terminates = true;
+    mr->rom_device = true;
+    mr->destructor = memory_region_destructor_rom_device;
+#ifdef CONFIG_ANDROID
+    mr->ram_addr = qemu_ram_alloc(NULL, name, size);
+#else
+    mr->ram_addr = qemu_ram_alloc(size, mr);
+#endif
+}
+
+void memory_region_init_iommu(MemoryRegion *mr,
+                              Object *owner,
+                              const MemoryRegionIOMMUOps *ops,
+                              const char *name,
+                              uint64_t size)
+{
+    memory_region_init(mr, owner, name, size);
+    mr->iommu_ops = ops,
+    mr->terminates = true;  /* then re-forwards */
+    notifier_list_init(&mr->iommu_notify);
+}
+
+void memory_region_init_reservation(MemoryRegion *mr,
+                                    Object *owner,
+                                    const char *name,
+                                    uint64_t size)
+{
+    memory_region_init_io(mr, owner, &unassigned_mem_ops, mr, name, size);
+}
+
+void memory_region_destroy(MemoryRegion *mr)
+{
+    assert(QTAILQ_EMPTY(&mr->subregions));
+    assert(memory_region_transaction_depth == 0);
+    mr->destructor(mr);
+    memory_region_clear_coalescing(mr);
+    g_free((char *)mr->name);
+    g_free(mr->ioeventfds);
+}
+
+Object *memory_region_owner(MemoryRegion *mr)
+{
+    return mr->owner;
+}
+
+void memory_region_ref(MemoryRegion *mr)
+{
+    if (mr && mr->owner) {
+        object_ref(mr->owner);
+    }
+}
+
+void memory_region_unref(MemoryRegion *mr)
+{
+    if (mr && mr->owner) {
+        object_unref(mr->owner);
+    }
+}
+
+uint64_t memory_region_size(MemoryRegion *mr)
+{
+    if (int128_eq(mr->size, int128_2_64())) {
+        return UINT64_MAX;
+    }
+    return int128_get64(mr->size);
+}
+
+const char *memory_region_name(MemoryRegion *mr)
+{
+    return mr->name;
+}
+
+bool memory_region_is_ram(MemoryRegion *mr)
+{
+    return mr->ram;
+}
+
+bool memory_region_is_logging(MemoryRegion *mr)
+{
+    return mr->dirty_log_mask;
+}
+
+bool memory_region_is_rom(MemoryRegion *mr)
+{
+    return mr->ram && mr->readonly;
+}
+
+bool memory_region_is_iommu(MemoryRegion *mr)
+{
+    return mr->iommu_ops;
+}
+
+void memory_region_register_iommu_notifier(MemoryRegion *mr, Notifier *n)
+{
+    notifier_list_add(&mr->iommu_notify, n);
+}
+
+void memory_region_unregister_iommu_notifier(Notifier *n)
+{
+    notifier_remove(n);
+}
+
+void memory_region_notify_iommu(MemoryRegion *mr,
+                                IOMMUTLBEntry entry)
+{
+    assert(memory_region_is_iommu(mr));
+    notifier_list_notify(&mr->iommu_notify, &entry);
+}
+
+void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
+{
+    uint8_t mask = 1 << client;
+
+    memory_region_transaction_begin();
+    mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
+    memory_region_update_pending |= mr->enabled;
+    memory_region_transaction_commit();
+}
+
+bool memory_region_get_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size, unsigned client)
+{
+    assert(mr->terminates);
+    return cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
+}
+
+void memory_region_set_dirty(MemoryRegion *mr, hwaddr addr,
+                             hwaddr size)
+{
+    assert(mr->terminates);
+    cpu_physical_memory_set_dirty_range(mr->ram_addr + addr, size);
+}
+
+bool memory_region_test_and_clear_dirty(MemoryRegion *mr, hwaddr addr,
+                                        hwaddr size, unsigned client)
+{
+    bool ret;
+    assert(mr->terminates);
+    ret = cpu_physical_memory_get_dirty(mr->ram_addr + addr, size, client);
+    if (ret) {
+        cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
+    }
+    return ret;
+}
+
+
+void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
+{
+    AddressSpace *as;
+    FlatRange *fr;
+
+    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+        FlatView *view = address_space_get_flatview(as);
+        FOR_EACH_FLAT_RANGE(fr, view) {
+            if (fr->mr == mr) {
+                MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
+            }
+        }
+        flatview_unref(view);
+    }
+}
+
+void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
+{
+    if (mr->readonly != readonly) {
+        memory_region_transaction_begin();
+        mr->readonly = readonly;
+        memory_region_update_pending |= mr->enabled;
+        memory_region_transaction_commit();
+    }
+}
+
+void memory_region_rom_device_set_romd(MemoryRegion *mr, bool romd_mode)
+{
+    if (mr->romd_mode != romd_mode) {
+        memory_region_transaction_begin();
+        mr->romd_mode = romd_mode;
+        memory_region_update_pending |= mr->enabled;
+        memory_region_transaction_commit();
+    }
+}
+
+void memory_region_reset_dirty(MemoryRegion *mr, hwaddr addr,
+                               hwaddr size, unsigned client)
+{
+    assert(mr->terminates);
+    cpu_physical_memory_reset_dirty(mr->ram_addr + addr, size, client);
+}
+
+void *memory_region_get_ram_ptr(MemoryRegion *mr)
+{
+    if (mr->alias) {
+        return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
+    }
+
+    assert(mr->terminates);
+
+    return qemu_get_ram_ptr(mr->ram_addr & TARGET_PAGE_MASK);
+}
+
+static void memory_region_update_coalesced_range_as(MemoryRegion *mr, AddressSpace *as)
+{
+    FlatView *view;
+    FlatRange *fr;
+    CoalescedMemoryRange *cmr;
+    AddrRange tmp;
+    MemoryRegionSection section;
+
+    view = address_space_get_flatview(as);
+    FOR_EACH_FLAT_RANGE(fr, view) {
+        if (fr->mr == mr) {
+            section = (MemoryRegionSection) {
+                .address_space = as,
+                .offset_within_address_space = int128_get64(fr->addr.start),
+                .size = fr->addr.size,
+            };
+
+            MEMORY_LISTENER_CALL(coalesced_mmio_del, Reverse, &section,
+                                 int128_get64(fr->addr.start),
+                                 int128_get64(fr->addr.size));
+            QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
+                tmp = addrrange_shift(cmr->addr,
+                                      int128_sub(fr->addr.start,
+                                                 int128_make64(fr->offset_in_region)));
+                if (!addrrange_intersects(tmp, fr->addr)) {
+                    continue;
+                }
+                tmp = addrrange_intersection(tmp, fr->addr);
+                MEMORY_LISTENER_CALL(coalesced_mmio_add, Forward, &section,
+                                     int128_get64(tmp.start),
+                                     int128_get64(tmp.size));
+            }
+        }
+    }
+    flatview_unref(view);
+}
+
+static void memory_region_update_coalesced_range(MemoryRegion *mr)
+{
+    AddressSpace *as;
+
+    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+        memory_region_update_coalesced_range_as(mr, as);
+    }
+}
+
+void memory_region_set_coalescing(MemoryRegion *mr)
+{
+    memory_region_clear_coalescing(mr);
+    memory_region_add_coalescing(mr, 0, int128_get64(mr->size));
+}
+
+void memory_region_add_coalescing(MemoryRegion *mr,
+                                  hwaddr offset,
+                                  uint64_t size)
+{
+    CoalescedMemoryRange *cmr = g_malloc(sizeof(*cmr));
+
+    cmr->addr = addrrange_make(int128_make64(offset), int128_make64(size));
+    QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
+    memory_region_update_coalesced_range(mr);
+    memory_region_set_flush_coalesced(mr);
+}
+
+void memory_region_clear_coalescing(MemoryRegion *mr)
+{
+    CoalescedMemoryRange *cmr;
+
+    qemu_flush_coalesced_mmio_buffer();
+    mr->flush_coalesced_mmio = false;
+
+    while (!QTAILQ_EMPTY(&mr->coalesced)) {
+        cmr = QTAILQ_FIRST(&mr->coalesced);
+        QTAILQ_REMOVE(&mr->coalesced, cmr, link);
+        g_free(cmr);
+    }
+    memory_region_update_coalesced_range(mr);
+}
+
+void memory_region_set_flush_coalesced(MemoryRegion *mr)
+{
+    mr->flush_coalesced_mmio = true;
+}
+
+void memory_region_clear_flush_coalesced(MemoryRegion *mr)
+{
+    qemu_flush_coalesced_mmio_buffer();
+    if (QTAILQ_EMPTY(&mr->coalesced)) {
+        mr->flush_coalesced_mmio = false;
+    }
+}
+
+void memory_region_add_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e)
+{
+    MemoryRegionIoeventfd mrfd = {
+        .addr.start = int128_make64(addr),
+        .addr.size = int128_make64(size),
+        .match_data = match_data,
+        .data = data,
+        .e = e,
+    };
+    unsigned i;
+
+    adjust_endianness(mr, &mrfd.data, size);
+    memory_region_transaction_begin();
+    for (i = 0; i < mr->ioeventfd_nb; ++i) {
+        if (memory_region_ioeventfd_before(mrfd, mr->ioeventfds[i])) {
+            break;
+        }
+    }
+    ++mr->ioeventfd_nb;
+    mr->ioeventfds = g_realloc(mr->ioeventfds,
+                                  sizeof(*mr->ioeventfds) * mr->ioeventfd_nb);
+    memmove(&mr->ioeventfds[i+1], &mr->ioeventfds[i],
+            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb-1 - i));
+    mr->ioeventfds[i] = mrfd;
+    memory_region_update_pending |= mr->enabled;
+    memory_region_transaction_commit();
+}
+
+void memory_region_del_eventfd(MemoryRegion *mr,
+                               hwaddr addr,
+                               unsigned size,
+                               bool match_data,
+                               uint64_t data,
+                               EventNotifier *e)
+{
+    MemoryRegionIoeventfd mrfd = {
+        .addr.start = int128_make64(addr),
+        .addr.size = int128_make64(size),
+        .match_data = match_data,
+        .data = data,
+        .e = e,
+    };
+    unsigned i;
+
+    adjust_endianness(mr, &mrfd.data, size);
+    memory_region_transaction_begin();
+    for (i = 0; i < mr->ioeventfd_nb; ++i) {
+        if (memory_region_ioeventfd_equal(mrfd, mr->ioeventfds[i])) {
+            break;
+        }
+    }
+    assert(i != mr->ioeventfd_nb);
+    memmove(&mr->ioeventfds[i], &mr->ioeventfds[i+1],
+            sizeof(*mr->ioeventfds) * (mr->ioeventfd_nb - (i+1)));
+    --mr->ioeventfd_nb;
+    mr->ioeventfds = g_realloc(mr->ioeventfds,
+                                  sizeof(*mr->ioeventfds)*mr->ioeventfd_nb + 1);
+    memory_region_update_pending |= mr->enabled;
+    memory_region_transaction_commit();
+}
+
+static void memory_region_add_subregion_common(MemoryRegion *mr,
+                                               hwaddr offset,
+                                               MemoryRegion *subregion)
+{
+    MemoryRegion *other;
+
+    memory_region_transaction_begin();
+
+    assert(!subregion->parent);
+    memory_region_ref(subregion);
+    subregion->parent = mr;
+    subregion->addr = offset;
+    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
+        if (subregion->may_overlap || other->may_overlap) {
+            continue;
+        }
+        if (int128_ge(int128_make64(offset),
+                      int128_add(int128_make64(other->addr), other->size))
+            || int128_le(int128_add(int128_make64(offset), subregion->size),
+                         int128_make64(other->addr))) {
+            continue;
+        }
+#if 0
+        printf("warning: subregion collision %llx/%llx (%s) "
+               "vs %llx/%llx (%s)\n",
+               (unsigned long long)offset,
+               (unsigned long long)int128_get64(subregion->size),
+               subregion->name,
+               (unsigned long long)other->addr,
+               (unsigned long long)int128_get64(other->size),
+               other->name);
+#endif
+    }
+    QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
+        if (subregion->priority >= other->priority) {
+            QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
+            goto done;
+        }
+    }
+    QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
+done:
+    memory_region_update_pending |= mr->enabled && subregion->enabled;
+    memory_region_transaction_commit();
+}
+
+
+void memory_region_add_subregion(MemoryRegion *mr,
+                                 hwaddr offset,
+                                 MemoryRegion *subregion)
+{
+    subregion->may_overlap = false;
+    subregion->priority = 0;
+    memory_region_add_subregion_common(mr, offset, subregion);
+}
+
+void memory_region_add_subregion_overlap(MemoryRegion *mr,
+                                         hwaddr offset,
+                                         MemoryRegion *subregion,
+                                         int priority)
+{
+    subregion->may_overlap = true;
+    subregion->priority = priority;
+    memory_region_add_subregion_common(mr, offset, subregion);
+}
+
+void memory_region_del_subregion(MemoryRegion *mr,
+                                 MemoryRegion *subregion)
+{
+    memory_region_transaction_begin();
+    assert(subregion->parent == mr);
+    subregion->parent = NULL;
+    QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
+    memory_region_unref(subregion);
+    memory_region_update_pending |= mr->enabled && subregion->enabled;
+    memory_region_transaction_commit();
+}
+
+void memory_region_set_enabled(MemoryRegion *mr, bool enabled)
+{
+    if (enabled == mr->enabled) {
+        return;
+    }
+    memory_region_transaction_begin();
+    mr->enabled = enabled;
+    memory_region_update_pending = true;
+    memory_region_transaction_commit();
+}
+
+void memory_region_set_address(MemoryRegion *mr, hwaddr addr)
+{
+    MemoryRegion *parent = mr->parent;
+    int priority = mr->priority;
+    bool may_overlap = mr->may_overlap;
+
+    if (addr == mr->addr || !parent) {
+        mr->addr = addr;
+        return;
+    }
+
+    memory_region_transaction_begin();
+    memory_region_ref(mr);
+    memory_region_del_subregion(parent, mr);
+    if (may_overlap) {
+        memory_region_add_subregion_overlap(parent, addr, mr, priority);
+    } else {
+        memory_region_add_subregion(parent, addr, mr);
+    }
+    memory_region_unref(mr);
+    memory_region_transaction_commit();
+}
+
+void memory_region_set_alias_offset(MemoryRegion *mr, hwaddr offset)
+{
+    assert(mr->alias);
+
+    if (offset == mr->alias_offset) {
+        return;
+    }
+
+    memory_region_transaction_begin();
+    mr->alias_offset = offset;
+    memory_region_update_pending |= mr->enabled;
+    memory_region_transaction_commit();
+}
+
+ram_addr_t memory_region_get_ram_addr(MemoryRegion *mr)
+{
+    return mr->ram_addr;
+}
+
+static int cmp_flatrange_addr(const void *addr_, const void *fr_)
+{
+    const AddrRange *addr = addr_;
+    const FlatRange *fr = fr_;
+
+    if (int128_le(addrrange_end(*addr), fr->addr.start)) {
+        return -1;
+    } else if (int128_ge(addr->start, addrrange_end(fr->addr))) {
+        return 1;
+    }
+    return 0;
+}
+
+static FlatRange *flatview_lookup(FlatView *view, AddrRange addr)
+{
+    return bsearch(&addr, view->ranges, view->nr,
+                   sizeof(FlatRange), cmp_flatrange_addr);
+}
+
+bool memory_region_present(MemoryRegion *parent, hwaddr addr)
+{
+    MemoryRegion *mr = memory_region_find(parent, addr, 1).mr;
+    if (!mr || (mr == parent)) {
+        return false;
+    }
+    memory_region_unref(mr);
+    return true;
+}
+
+MemoryRegionSection memory_region_find(MemoryRegion *mr,
+                                       hwaddr addr, uint64_t size)
+{
+    MemoryRegionSection ret = { .mr = NULL };
+    MemoryRegion *root;
+    AddressSpace *as;
+    AddrRange range;
+    FlatView *view;
+    FlatRange *fr;
+
+    addr += mr->addr;
+    for (root = mr; root->parent; ) {
+        root = root->parent;
+        addr += root->addr;
+    }
+
+    as = memory_region_to_address_space(root);
+    range = addrrange_make(int128_make64(addr), int128_make64(size));
+
+    view = address_space_get_flatview(as);
+    fr = flatview_lookup(view, range);
+    if (!fr) {
+        flatview_unref(view);
+        return ret;
+    }
+
+    while (fr > view->ranges && addrrange_intersects(fr[-1].addr, range)) {
+        --fr;
+    }
+
+    ret.mr = fr->mr;
+    ret.address_space = as;
+    range = addrrange_intersection(range, fr->addr);
+    ret.offset_within_region = fr->offset_in_region;
+    ret.offset_within_region += int128_get64(int128_sub(range.start,
+                                                        fr->addr.start));
+    ret.size = range.size;
+    ret.offset_within_address_space = int128_get64(range.start);
+    ret.readonly = fr->readonly;
+    memory_region_ref(ret.mr);
+
+    flatview_unref(view);
+    return ret;
+}
+
+void address_space_sync_dirty_bitmap(AddressSpace *as)
+{
+    FlatView *view;
+    FlatRange *fr;
+
+    view = address_space_get_flatview(as);
+    FOR_EACH_FLAT_RANGE(fr, view) {
+        MEMORY_LISTENER_UPDATE_REGION(fr, as, Forward, log_sync);
+    }
+    flatview_unref(view);
+}
+
+void memory_global_dirty_log_start(void)
+{
+    global_dirty_log = true;
+    MEMORY_LISTENER_CALL_GLOBAL(log_global_start, Forward);
+}
+
+void memory_global_dirty_log_stop(void)
+{
+    global_dirty_log = false;
+    MEMORY_LISTENER_CALL_GLOBAL(log_global_stop, Reverse);
+}
+
+static void listener_add_address_space(MemoryListener *listener,
+                                       AddressSpace *as)
+{
+    FlatView *view;
+    FlatRange *fr;
+
+    if (listener->address_space_filter
+        && listener->address_space_filter != as) {
+        return;
+    }
+
+    if (global_dirty_log) {
+        if (listener->log_global_start) {
+            listener->log_global_start(listener);
+        }
+    }
+
+    view = address_space_get_flatview(as);
+    FOR_EACH_FLAT_RANGE(fr, view) {
+        MemoryRegionSection section = {
+            .mr = fr->mr,
+            .address_space = as,
+            .offset_within_region = fr->offset_in_region,
+            .size = fr->addr.size,
+            .offset_within_address_space = int128_get64(fr->addr.start),
+            .readonly = fr->readonly,
+        };
+        if (listener->region_add) {
+            listener->region_add(listener, &section);
+        }
+    }
+    flatview_unref(view);
+}
+
+void memory_listener_register(MemoryListener *listener, AddressSpace *filter)
+{
+    MemoryListener *other = NULL;
+    AddressSpace *as;
+
+    listener->address_space_filter = filter;
+    if (QTAILQ_EMPTY(&memory_listeners)
+        || listener->priority >= QTAILQ_LAST(&memory_listeners,
+                                             memory_listeners)->priority) {
+        QTAILQ_INSERT_TAIL(&memory_listeners, listener, link);
+    } else {
+        QTAILQ_FOREACH(other, &memory_listeners, link) {
+            if (listener->priority < other->priority) {
+                break;
+            }
+        }
+        QTAILQ_INSERT_BEFORE(other, listener, link);
+    }
+
+    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+        listener_add_address_space(listener, as);
+    }
+}
+
+void memory_listener_unregister(MemoryListener *listener)
+{
+    QTAILQ_REMOVE(&memory_listeners, listener, link);
+}
+
+void address_space_init(AddressSpace *as, MemoryRegion *root, const char *name)
+{
+    if (QTAILQ_EMPTY(&address_spaces)) {
+        memory_init();
+    }
+
+    memory_region_transaction_begin();
+    as->root = root;
+    as->current_map = g_new(FlatView, 1);
+    flatview_init(as->current_map);
+    as->ioeventfd_nb = 0;
+    as->ioeventfds = NULL;
+    QTAILQ_INSERT_TAIL(&address_spaces, as, address_spaces_link);
+    as->name = g_strdup(name ? name : "anonymous");
+    address_space_init_dispatch(as);
+    memory_region_update_pending |= root->enabled;
+    memory_region_transaction_commit();
+}
+
+void address_space_destroy(AddressSpace *as)
+{
+    /* Flush out anything from MemoryListeners listening in on this */
+    memory_region_transaction_begin();
+    as->root = NULL;
+    memory_region_transaction_commit();
+    QTAILQ_REMOVE(&address_spaces, as, address_spaces_link);
+    address_space_destroy_dispatch(as);
+    flatview_unref(as->current_map);
+    g_free(as->name);
+    g_free(as->ioeventfds);
+}
+
+bool io_mem_read(MemoryRegion *mr, hwaddr addr, uint64_t *pval, unsigned size)
+{
+    return memory_region_dispatch_read(mr, addr, pval, size);
+}
+
+bool io_mem_write(MemoryRegion *mr, hwaddr addr,
+                  uint64_t val, unsigned size)
+{
+    return memory_region_dispatch_write(mr, addr, val, size);
+}
+
+typedef struct MemoryRegionList MemoryRegionList;
+
+struct MemoryRegionList {
+    const MemoryRegion *mr;
+    bool printed;
+    QTAILQ_ENTRY(MemoryRegionList) queue;
+};
+
+typedef QTAILQ_HEAD(queue, MemoryRegionList) MemoryRegionListHead;
+
+static void mtree_print_mr(fprintf_function mon_printf, void *f,
+                           const MemoryRegion *mr, unsigned int level,
+                           hwaddr base,
+                           MemoryRegionListHead *alias_print_queue)
+{
+    MemoryRegionList *new_ml, *ml, *next_ml;
+    MemoryRegionListHead submr_print_queue;
+    const MemoryRegion *submr;
+    unsigned int i;
+
+    if (!mr || !mr->enabled) {
+        return;
+    }
+
+    for (i = 0; i < level; i++) {
+        mon_printf(f, "  ");
+    }
+
+    if (mr->alias) {
+        MemoryRegionList *ml;
+        bool found = false;
+
+        /* check if the alias is already in the queue */
+        QTAILQ_FOREACH(ml, alias_print_queue, queue) {
+            if (ml->mr == mr->alias && !ml->printed) {
+                found = true;
+            }
+        }
+
+        if (!found) {
+            ml = g_new(MemoryRegionList, 1);
+            ml->mr = mr->alias;
+            ml->printed = false;
+            QTAILQ_INSERT_TAIL(alias_print_queue, ml, queue);
+        }
+        mon_printf(f, TARGET_FMT_plx "-" TARGET_FMT_plx
+                   " (prio %d, %c%c): alias %s @%s " TARGET_FMT_plx
+                   "-" TARGET_FMT_plx "\n",
+                   base + mr->addr,
+                   base + mr->addr
+                   + (int128_nz(mr->size) ?
+                      (hwaddr)int128_get64(int128_sub(mr->size,
+                                                      int128_one())) : 0),
+                   mr->priority,
+                   mr->romd_mode ? 'R' : '-',
+                   !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
+                                                                       : '-',
+                   mr->name,
+                   mr->alias->name,
+                   mr->alias_offset,
+                   mr->alias_offset
+                   + (int128_nz(mr->size) ?
+                      (hwaddr)int128_get64(int128_sub(mr->size,
+                                                      int128_one())) : 0));
+    } else {
+        mon_printf(f,
+                   TARGET_FMT_plx "-" TARGET_FMT_plx " (prio %d, %c%c): %s\n",
+                   base + mr->addr,
+                   base + mr->addr
+                   + (int128_nz(mr->size) ?
+                      (hwaddr)int128_get64(int128_sub(mr->size,
+                                                      int128_one())) : 0),
+                   mr->priority,
+                   mr->romd_mode ? 'R' : '-',
+                   !mr->readonly && !(mr->rom_device && mr->romd_mode) ? 'W'
+                                                                       : '-',
+                   mr->name);
+    }
+
+    QTAILQ_INIT(&submr_print_queue);
+
+    QTAILQ_FOREACH(submr, &mr->subregions, subregions_link) {
+        new_ml = g_new(MemoryRegionList, 1);
+        new_ml->mr = submr;
+        QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
+            if (new_ml->mr->addr < ml->mr->addr ||
+                (new_ml->mr->addr == ml->mr->addr &&
+                 new_ml->mr->priority > ml->mr->priority)) {
+                QTAILQ_INSERT_BEFORE(ml, new_ml, queue);
+                new_ml = NULL;
+                break;
+            }
+        }
+        if (new_ml) {
+            QTAILQ_INSERT_TAIL(&submr_print_queue, new_ml, queue);
+        }
+    }
+
+    QTAILQ_FOREACH(ml, &submr_print_queue, queue) {
+        mtree_print_mr(mon_printf, f, ml->mr, level + 1, base + mr->addr,
+                       alias_print_queue);
+    }
+
+    QTAILQ_FOREACH_SAFE(ml, &submr_print_queue, queue, next_ml) {
+        g_free(ml);
+    }
+}
+
+void mtree_info(fprintf_function mon_printf, void *f)
+{
+    MemoryRegionListHead ml_head;
+    MemoryRegionList *ml, *ml2;
+    AddressSpace *as;
+
+    QTAILQ_INIT(&ml_head);
+
+    QTAILQ_FOREACH(as, &address_spaces, address_spaces_link) {
+        mon_printf(f, "%s\n", as->name);
+        mtree_print_mr(mon_printf, f, as->root, 0, 0, &ml_head);
+    }
+
+    mon_printf(f, "aliases\n");
+    /* print aliased regions */
+    QTAILQ_FOREACH(ml, &ml_head, queue) {
+        if (!ml->printed) {
+            mon_printf(f, "%s\n", ml->mr->name);
+            mtree_print_mr(mon_printf, f, ml->mr, 0, 0, &ml_head);
+        }
+    }
+
+    QTAILQ_FOREACH_SAFE(ml, &ml_head, queue, ml2) {
+        g_free(ml);
+    }
+}
diff --git a/memory_mapping.c b/memory_mapping.c
new file mode 100644
index 0000000..ebb7e03
--- /dev/null
+++ b/memory_mapping.c
@@ -0,0 +1,354 @@
+/*
+ * QEMU memory mapping
+ *
+ * Copyright Fujitsu, Corp. 2011, 2012
+ *
+ * Authors:
+ *     Wen Congyang <wency@cn.fujitsu.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include <glib.h>
+
+#include "cpu.h"
+#include "exec/cpu-all.h"
+#include "qapi/error.h"
+#include "sysemu/memory_mapping.h"
+#include "exec/memory.h"
+#include "exec/address-spaces.h"
+
+//#define DEBUG_GUEST_PHYS_REGION_ADD
+
+static void memory_mapping_list_add_mapping_sorted(MemoryMappingList *list,
+                                                   MemoryMapping *mapping)
+{
+    MemoryMapping *p;
+
+    QTAILQ_FOREACH(p, &list->head, next) {
+        if (p->phys_addr >= mapping->phys_addr) {
+            QTAILQ_INSERT_BEFORE(p, mapping, next);
+            return;
+        }
+    }
+    QTAILQ_INSERT_TAIL(&list->head, mapping, next);
+}
+
+static void create_new_memory_mapping(MemoryMappingList *list,
+                                      hwaddr phys_addr,
+                                      hwaddr virt_addr,
+                                      ram_addr_t length)
+{
+    MemoryMapping *memory_mapping;
+
+    memory_mapping = g_malloc(sizeof(MemoryMapping));
+    memory_mapping->phys_addr = phys_addr;
+    memory_mapping->virt_addr = virt_addr;
+    memory_mapping->length = length;
+    list->last_mapping = memory_mapping;
+    list->num++;
+    memory_mapping_list_add_mapping_sorted(list, memory_mapping);
+}
+
+static inline bool mapping_contiguous(MemoryMapping *map,
+                                      hwaddr phys_addr,
+                                      hwaddr virt_addr)
+{
+    return phys_addr == map->phys_addr + map->length &&
+           virt_addr == map->virt_addr + map->length;
+}
+
+/*
+ * [map->phys_addr, map->phys_addr + map->length) and
+ * [phys_addr, phys_addr + length) have intersection?
+ */
+static inline bool mapping_have_same_region(MemoryMapping *map,
+                                            hwaddr phys_addr,
+                                            ram_addr_t length)
+{
+    return !(phys_addr + length < map->phys_addr ||
+             phys_addr >= map->phys_addr + map->length);
+}
+
+/*
+ * [map->phys_addr, map->phys_addr + map->length) and
+ * [phys_addr, phys_addr + length) have intersection. The virtual address in the
+ * intersection are the same?
+ */
+static inline bool mapping_conflict(MemoryMapping *map,
+                                    hwaddr phys_addr,
+                                    hwaddr virt_addr)
+{
+    return virt_addr - map->virt_addr != phys_addr - map->phys_addr;
+}
+
+/*
+ * [map->virt_addr, map->virt_addr + map->length) and
+ * [virt_addr, virt_addr + length) have intersection. And the physical address
+ * in the intersection are the same.
+ */
+static inline void mapping_merge(MemoryMapping *map,
+                                 hwaddr virt_addr,
+                                 ram_addr_t length)
+{
+    if (virt_addr < map->virt_addr) {
+        map->length += map->virt_addr - virt_addr;
+        map->virt_addr = virt_addr;
+    }
+
+    if ((virt_addr + length) >
+        (map->virt_addr + map->length)) {
+        map->length = virt_addr + length - map->virt_addr;
+    }
+}
+
+void memory_mapping_list_add_merge_sorted(MemoryMappingList *list,
+                                          hwaddr phys_addr,
+                                          hwaddr virt_addr,
+                                          ram_addr_t length)
+{
+    MemoryMapping *memory_mapping, *last_mapping;
+
+    if (QTAILQ_EMPTY(&list->head)) {
+        create_new_memory_mapping(list, phys_addr, virt_addr, length);
+        return;
+    }
+
+    last_mapping = list->last_mapping;
+    if (last_mapping) {
+        if (mapping_contiguous(last_mapping, phys_addr, virt_addr)) {
+            last_mapping->length += length;
+            return;
+        }
+    }
+
+    QTAILQ_FOREACH(memory_mapping, &list->head, next) {
+        if (mapping_contiguous(memory_mapping, phys_addr, virt_addr)) {
+            memory_mapping->length += length;
+            list->last_mapping = memory_mapping;
+            return;
+        }
+
+        if (phys_addr + length < memory_mapping->phys_addr) {
+            /* create a new region before memory_mapping */
+            break;
+        }
+
+        if (mapping_have_same_region(memory_mapping, phys_addr, length)) {
+            if (mapping_conflict(memory_mapping, phys_addr, virt_addr)) {
+                continue;
+            }
+
+            /* merge this region into memory_mapping */
+            mapping_merge(memory_mapping, virt_addr, length);
+            list->last_mapping = memory_mapping;
+            return;
+        }
+    }
+
+    /* this region can not be merged into any existed memory mapping. */
+    create_new_memory_mapping(list, phys_addr, virt_addr, length);
+}
+
+void memory_mapping_list_free(MemoryMappingList *list)
+{
+    MemoryMapping *p, *q;
+
+    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
+        QTAILQ_REMOVE(&list->head, p, next);
+        g_free(p);
+    }
+
+    list->num = 0;
+    list->last_mapping = NULL;
+}
+
+void memory_mapping_list_init(MemoryMappingList *list)
+{
+    list->num = 0;
+    list->last_mapping = NULL;
+    QTAILQ_INIT(&list->head);
+}
+
+void guest_phys_blocks_free(GuestPhysBlockList *list)
+{
+    GuestPhysBlock *p, *q;
+
+    QTAILQ_FOREACH_SAFE(p, &list->head, next, q) {
+        QTAILQ_REMOVE(&list->head, p, next);
+        g_free(p);
+    }
+    list->num = 0;
+}
+
+void guest_phys_blocks_init(GuestPhysBlockList *list)
+{
+    list->num = 0;
+    QTAILQ_INIT(&list->head);
+}
+
+typedef struct GuestPhysListener {
+    GuestPhysBlockList *list;
+    MemoryListener listener;
+} GuestPhysListener;
+
+static void guest_phys_blocks_region_add(MemoryListener *listener,
+                                         MemoryRegionSection *section)
+{
+    GuestPhysListener *g;
+    uint64_t section_size;
+    hwaddr target_start, target_end;
+    uint8_t *host_addr;
+    GuestPhysBlock *predecessor;
+
+    /* we only care about RAM */
+    if (!memory_region_is_ram(section->mr)) {
+        return;
+    }
+
+    g            = container_of(listener, GuestPhysListener, listener);
+    section_size = int128_get64(section->size);
+    target_start = section->offset_within_address_space;
+    target_end   = target_start + section_size;
+    host_addr    = memory_region_get_ram_ptr(section->mr) +
+                   section->offset_within_region;
+    predecessor  = NULL;
+
+    /* find continuity in guest physical address space */
+    if (!QTAILQ_EMPTY(&g->list->head)) {
+        hwaddr predecessor_size;
+
+        predecessor = QTAILQ_LAST(&g->list->head, GuestPhysBlockHead);
+        predecessor_size = predecessor->target_end - predecessor->target_start;
+
+        /* the memory API guarantees monotonically increasing traversal */
+        g_assert(predecessor->target_end <= target_start);
+
+        /* we want continuity in both guest-physical and host-virtual memory */
+        if (predecessor->target_end < target_start ||
+            predecessor->host_addr + predecessor_size != host_addr) {
+            predecessor = NULL;
+        }
+    }
+
+    if (predecessor == NULL) {
+        /* isolated mapping, allocate it and add it to the list */
+        GuestPhysBlock *block = g_malloc0(sizeof *block);
+
+        block->target_start = target_start;
+        block->target_end   = target_end;
+        block->host_addr    = host_addr;
+
+        QTAILQ_INSERT_TAIL(&g->list->head, block, next);
+        ++g->list->num;
+    } else {
+        /* expand predecessor until @target_end; predecessor's start doesn't
+         * change
+         */
+        predecessor->target_end = target_end;
+    }
+
+#ifdef DEBUG_GUEST_PHYS_REGION_ADD
+    fprintf(stderr, "%s: target_start=" TARGET_FMT_plx " target_end="
+            TARGET_FMT_plx ": %s (count: %u)\n", __FUNCTION__, target_start,
+            target_end, predecessor ? "joined" : "added", g->list->num);
+#endif
+}
+
+void guest_phys_blocks_append(GuestPhysBlockList *list)
+{
+    GuestPhysListener g = { 0 };
+
+    g.list = list;
+    g.listener.region_add = &guest_phys_blocks_region_add;
+    memory_listener_register(&g.listener, &address_space_memory);
+    memory_listener_unregister(&g.listener);
+}
+
+#ifndef CONFIG_ANDROID
+static CPUState *find_paging_enabled_cpu(CPUState *start_cpu)
+{
+    CPUState *cpu;
+
+    CPU_FOREACH(cpu) {
+        if (cpu_paging_enabled(cpu)) {
+            return cpu;
+        }
+    }
+
+    return NULL;
+}
+
+void qemu_get_guest_memory_mapping(MemoryMappingList *list,
+                                   const GuestPhysBlockList *guest_phys_blocks,
+                                   Error **errp)
+{
+    CPUState *cpu, *first_paging_enabled_cpu;
+    GuestPhysBlock *block;
+    ram_addr_t offset, length;
+
+    first_paging_enabled_cpu = find_paging_enabled_cpu(first_cpu);
+    if (first_paging_enabled_cpu) {
+        for (cpu = first_paging_enabled_cpu; cpu != NULL;
+             cpu = CPU_NEXT(cpu)) {
+            Error *err = NULL;
+            cpu_get_memory_mapping(cpu, list, &err);
+            if (err) {
+                error_propagate(errp, err);
+                return;
+            }
+        }
+        return;
+    }
+
+    /*
+     * If the guest doesn't use paging, the virtual address is equal to physical
+     * address.
+     */
+    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+        offset = block->target_start;
+        length = block->target_end - block->target_start;
+        create_new_memory_mapping(list, offset, offset, length);
+    }
+}
+#endif
+
+void qemu_get_guest_simple_memory_mapping(MemoryMappingList *list,
+                                   const GuestPhysBlockList *guest_phys_blocks)
+{
+    GuestPhysBlock *block;
+
+    QTAILQ_FOREACH(block, &guest_phys_blocks->head, next) {
+        create_new_memory_mapping(list, block->target_start, 0,
+                                  block->target_end - block->target_start);
+    }
+}
+
+void memory_mapping_filter(MemoryMappingList *list, int64_t begin,
+                           int64_t length)
+{
+    MemoryMapping *cur, *next;
+
+    QTAILQ_FOREACH_SAFE(cur, &list->head, next, next) {
+        if (cur->phys_addr >= begin + length ||
+            cur->phys_addr + cur->length <= begin) {
+            QTAILQ_REMOVE(&list->head, cur, next);
+            list->num--;
+            continue;
+        }
+
+        if (cur->phys_addr < begin) {
+            cur->length -= begin - cur->phys_addr;
+            if (cur->virt_addr) {
+                cur->virt_addr += begin - cur->phys_addr;
+            }
+            cur->phys_addr = begin;
+        }
+
+        if (cur->phys_addr + cur->length > begin + length) {
+            cur->length -= cur->phys_addr + cur->length - begin - length;
+        }
+    }
+}