summaryrefslogtreecommitdiff
path: root/pcr/qemu-user-static-git/0005-linux-user-init_guest_space-Clarify-page-alignment-l.patch
blob: 1570774eeff53790633075d80a1385007134246a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
From b2432d1b415143a55083782712f11241f194c0ba Mon Sep 17 00:00:00 2001
From: Luke Shumaker <lukeshu@parabola.nu>
Date: Wed, 27 Dec 2017 20:05:52 -0500
Subject: [PATCH 05/10] linux-user: init_guest_space: Clarify page alignment
 logic

There are 3 parts to this change:
 - Add a comment showing the relative sizes and positions of the blocks of
   memory
 - introduce and use new aligned_{start,size} instead of adjusting
   real_{start_size}
 - When we clean up (on failure), munmap(real_start, real_size) instead of
   munmap(aligned_start, aligned_size).  It *shouldn't* make any
   difference, but I will admit that this does mean we are making the
   syscall with different values, so this isn't quite a no-op patch.

Signed-off-by: Luke Shumaker <lukeshu@parabola.nu>
---
 linux-user/elfload.c | 43 +++++++++++++++++++++++++++++++++----------
 1 file changed, 33 insertions(+), 10 deletions(-)

diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index f41cecc3cb..22f2632dfa 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -1827,7 +1827,7 @@ unsigned long init_guest_space(unsigned long host_start,
                                unsigned long guest_start,
                                bool fixed)
 {
-    unsigned long current_start, real_start;
+    unsigned long current_start, aligned_start;
     int flags;
 
     assert(host_start || host_size);
@@ -1853,7 +1853,8 @@ unsigned long init_guest_space(unsigned long host_start,
     /* Otherwise, a non-zero size region of memory needs to be mapped
      * and validated.  */
     while (1) {
-        unsigned long real_size = host_size;
+        unsigned long real_start, real_size, aligned_size;
+        aligned_size = real_size = host_size;
 
         /* Do not use mmap_find_vma here because that is limited to the
          * guest address space.  We are going to make the
@@ -1867,26 +1868,48 @@ unsigned long init_guest_space(unsigned long host_start,
 
         /* Ensure the address is properly aligned.  */
         if (real_start & ~qemu_host_page_mask) {
+            /* Ideally, we adjust like
+             *
+             *    pages: [  ][  ][  ][  ][  ]
+             *      old:   [   real   ]
+             *             [ aligned  ]
+             *      new:   [     real     ]
+             *               [ aligned  ]
+             *
+             * But if there is something else mapped right after it,
+             * then obviously it won't have room to grow, and the
+             * kernel will put the new larger real someplace else with
+             * unknown alignment (if we made it to here, then
+             * fixed=false).  Which is why we grow real by a full page
+             * size, instead of by part of one; so that even if we get
+             * moved, we can still guarantee alignment.  But this does
+             * mean that there is a padding of < 1 page both before
+             * and after the aligned range; the "after" could could
+             * cause problems for ARM emulation where it could butt in
+             * to where we need to put the commpage.
+             */
             munmap((void *)real_start, host_size);
-            real_size = host_size + qemu_host_page_size;
+            real_size = aligned_size + qemu_host_page_size;
             real_start = (unsigned long)
                 mmap((void *)real_start, real_size, PROT_NONE, flags, -1, 0);
             if (real_start == (unsigned long)-1) {
                 return (unsigned long)-1;
             }
-            real_start = HOST_PAGE_ALIGN(real_start);
+            aligned_start = HOST_PAGE_ALIGN(real_start);
+        } else {
+            aligned_start = real_start;
         }
 
         /* Check to see if the address is valid.  */
-        if (!host_start || real_start == current_start) {
+        if (!host_start || aligned_start == current_start) {
 #if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
             /* On 32-bit ARM, we need to also be able to map the commpage.  */
-            int valid = init_guest_commpage(real_start - guest_start,
-                                            real_size + guest_start);
+            int valid = init_guest_commpage(aligned_start - guest_start,
+                                            aligned_size + guest_start);
             if (valid == 1) {
                 break;
             } else if (valid == -1) {
-                munmap((void *)real_start, host_size);
+                munmap((void *)real_start, real_size);
                 return (unsigned long)-1;
             }
             /* valid == 0, so try again. */
@@ -1905,7 +1928,7 @@ unsigned long init_guest_space(unsigned long host_start,
          * address space randomization put a shared library somewhere
          * inconvenient.
          */
-        munmap((void *)real_start, host_size);
+        munmap((void *)real_start, real_size);
         current_start += qemu_host_page_size;
         if (host_start == current_start) {
             /* Theoretically possible if host doesn't have any suitably
@@ -1917,7 +1940,7 @@ unsigned long init_guest_space(unsigned long host_start,
 
     qemu_log_mask(CPU_LOG_PAGE, "Reserved 0x%lx bytes of guest address space\n", host_size);
 
-    return real_start;
+    return aligned_start;
 }
 
 static void probe_guest_base(const char *image_name,
-- 
2.15.1