summaryrefslogtreecommitdiff
path: root/libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch
diff options
context:
space:
mode:
Diffstat (limited to 'libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch')
-rw-r--r--libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch99
1 files changed, 99 insertions, 0 deletions
diff --git a/libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch b/libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch
new file mode 100644
index 000000000..587293564
--- /dev/null
+++ b/libre/qemu-user-static/0010-linux-user-init_guest_space-Try-to-make-ARM-space-co.patch
@@ -0,0 +1,99 @@
+From f8ccd5e7353937e0a48ccd8bfbc07202dc85afac Mon Sep 17 00:00:00 2001
+From: Luke Shumaker <lukeshu@parabola.nu>
+Date: Thu, 28 Dec 2017 07:27:20 -0500
+Cc: paul@codesourcery.com
+Subject: [PATCH 10/10] linux-user: init_guest_space: Try to make ARM
+ space+commpage continuous
+
+At a fixed distance after the usable memory that init_guest_space maps, for
+32-bit ARM targets we also need to map a commpage. The normal
+init_guest_space logic doesn't keep this in mind when searching for an
+address range.
+
+If !host_start, then try to find a big continuous segment where we can put
+both the usable memory and the commpage; we then munmap that segment and
+set current_start to that address; and let the normal code mmap the usable
+memory and the commpage separately. That is: if we don't have hint of
+where to start looking for memory, come up with one that is better than
+NULL. Depending on host_size and guest_start, there may or may not be a
+gap between the usable memory and the commpage, so this is slightly more
+restrictive than it needs to be; but it's only a hint, so that's OK.
+
+We only do that for !host start, because if host_start, then either:
+ - we got an address passed in with -B, in which case we don't want to
+ interfere with what the user said;
+ - or host_start is based off of the ELF image's loaddr. The check "if
+ (host_start && real_start != current_start)" suggests that we really
+ want lowest available address that is >= loaddr. I don't know why that
+ is, but I'm trusting that Paul Brook knew what he was doing when he
+ wrote the original version of that check in
+ c581deda322080e8beb88b2e468d4af54454e4b3 way back in 2010.
+
+Signed-off-by: Luke Shumaker <lukeshu@parabola.nu>
+---
+ linux-user/elfload.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 49 insertions(+)
+
+diff --git a/linux-user/elfload.c b/linux-user/elfload.c
+index 7736ea2c3a..cd3a7d877d 100644
+--- a/linux-user/elfload.c
++++ b/linux-user/elfload.c
+@@ -1857,6 +1857,55 @@ unsigned long init_guest_space(unsigned long host_start,
+
+ /* Otherwise, a non-zero size region of memory needs to be mapped
+ * and validated. */
++
++#if defined(TARGET_ARM) && !defined(TARGET_AARCH64)
++ /* On 32-bit ARM, we need to map not just the usable memory, but
++ * also the commpage. Try to find a suitable place by allocating
++ * a big chunk for all of it. If host_start, then the naive
++ * strategy probably does good enough.
++ */
++ if (!host_start) {
++ unsigned long guest_full_size, host_full_size, real_start;
++
++ guest_full_size =
++ (0xffff0f00 & qemu_host_page_mask) + qemu_host_page_size;
++ host_full_size = guest_full_size - guest_start;
++ real_start = (unsigned long)
++ mmap(NULL, host_full_size, PROT_NONE, flags, -1, 0);
++ if (real_start == (unsigned long)-1) {
++ if (host_size < host_full_size - qemu_host_page_size) {
++ /* We failed to map a continous segment, but we're
++ * allowed to have a gap between the usable memory and
++ * the commpage where other things can be mapped.
++ * This sparseness gives us more flexibility to find
++ * an address range.
++ */
++ goto naive;
++ }
++ return (unsigned long)-1;
++ }
++ munmap((void *)real_start, host_full_size);
++ if (real_start & ~qemu_host_page_mask) {
++ /* The same thing again, but with an extra qemu_host_page_size
++ * so that we can shift around alignment.
++ */
++ unsigned long real_size = host_full_size + qemu_host_page_size;
++ real_start = (unsigned long)
++ mmap(NULL, real_size, PROT_NONE, flags, -1, 0);
++ if (real_start == (unsigned long)-1) {
++ if (host_size < host_full_size - qemu_host_page_size) {
++ goto naive;
++ }
++ return (unsigned long)-1;
++ }
++ munmap((void *)real_start, real_size);
++ real_start = HOST_PAGE_ALIGN(real_start);
++ }
++ current_start = real_start;
++ }
++ naive:
++#endif
++
+ while (1) {
+ unsigned long real_start, real_size, aligned_size;
+ aligned_size = real_size = host_size;
+--
+2.15.1
+