diff options
author | David P <megver83@parabola.nu> | 2020-02-24 22:25:49 -0300 |
---|---|---|
committer | David P <megver83@parabola.nu> | 2020-02-24 22:25:49 -0300 |
commit | fb261dccc26646b9c46e8c2a080b202034aabc0e (patch) | |
tree | 89cefe6ad76e99c50356c6e6bcddb3c98a291fa3 /libre/linux-libre-pae | |
parent | bd22097c9a7814644854982dc80e5c271332902c (diff) | |
download | abslibre-fb261dccc26646b9c46e8c2a080b202034aabc0e.tar.gz abslibre-fb261dccc26646b9c46e8c2a080b202034aabc0e.tar.bz2 abslibre-fb261dccc26646b9c46e8c2a080b202034aabc0e.zip |
updpkg: libre/linux-libre-pae 5.5.5-1
Signed-off-by: David P <megver83@parabola.nu>
Diffstat (limited to 'libre/linux-libre-pae')
16 files changed, 1196 insertions, 294 deletions
diff --git a/libre/linux-libre-pae/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch b/libre/linux-libre-pae/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch index f7b1e9f54..0593d2420 100644 --- a/libre/linux-libre-pae/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch +++ b/libre/linux-libre-pae/0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch @@ -1,7 +1,7 @@ -From 46190aab49a3591ebd824367627b47492653fa31 Mon Sep 17 00:00:00 2001 +From 5bb4af349a4a3f6393d5b491d8528d60088ef3c0 Mon Sep 17 00:00:00 2001 From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com> Date: Mon, 16 Sep 2019 04:53:20 +0200 -Subject: [PATCH 1/5] ZEN: Add sysctl and CONFIG to disallow unprivileged +Subject: [PATCH 01/13] ZEN: Add sysctl and CONFIG to disallow unprivileged CLONE_NEWUSER Our default behavior continues to match the vanilla kernel. @@ -128,5 +128,5 @@ index 8eadadc478f9..c36ecd19562c 100644 static DEFINE_MUTEX(userns_state_mutex); -- -2.25.0 +2.25.1 diff --git a/libre/linux-libre-pae/0002-iwlwifi-pcie-restore-support-for-Killer-Qu-C0-NICs.patch b/libre/linux-libre-pae/0002-iwlwifi-pcie-restore-support-for-Killer-Qu-C0-NICs.patch index 679faad2b..348c6c3a1 100644 --- a/libre/linux-libre-pae/0002-iwlwifi-pcie-restore-support-for-Killer-Qu-C0-NICs.patch +++ b/libre/linux-libre-pae/0002-iwlwifi-pcie-restore-support-for-Killer-Qu-C0-NICs.patch @@ -1,7 +1,7 @@ -From 013d00e92f514854803a9abcddee1255b4cc348e Mon Sep 17 00:00:00 2001 +From bc21f2c8df72a57532eb295f77763154263b16b6 Mon Sep 17 00:00:00 2001 From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com> Date: Tue, 24 Dec 2019 06:16:39 +0100 -Subject: [PATCH 2/5] iwlwifi: pcie: restore support for Killer Qu C0 NICs +Subject: [PATCH 02/13] iwlwifi: pcie: restore support for Killer Qu C0 NICs Commit 809805a820c6 ("iwlwifi: pcie: move some cfg mangling from trans_pcie_alloc to probe") refactored the cfg mangling. Unfortunately, @@ -38,5 +38,5 @@ index b0b7eca1754e..de62a6dc4e73 100644 /* same thing for QuZ... */ -- -2.25.0 +2.25.1 diff --git a/libre/linux-libre-pae/0003-ALSA-hda-Fix-DP-MST-support-for-NVIDIA-codecs.patch b/libre/linux-libre-pae/0003-ALSA-hda-Fix-DP-MST-support-for-NVIDIA-codecs.patch deleted file mode 100644 index 665022cd6..000000000 --- a/libre/linux-libre-pae/0003-ALSA-hda-Fix-DP-MST-support-for-NVIDIA-codecs.patch +++ /dev/null @@ -1,182 +0,0 @@ -From 6b43b6519a752c51657b9afa4eb844cad29d2b8c Mon Sep 17 00:00:00 2001 -From: Nikhil Mahale <nmahale@nvidia.com> -Date: Mon, 3 Feb 2020 15:36:17 +0530 -Subject: [PATCH 3/5] ALSA: hda - Fix DP-MST support for NVIDIA codecs - -If dyn_pcm_assign is set, different jack objects are being created -for pcm and pins. - -If dyn_pcm_assign is set, generic_hdmi_build_jack() calls into -add_hdmi_jack_kctl() to create and track separate jack object for -pcm. Like sync_eld_via_acomp(), hdmi_present_sense_via_verbs() also -need to report status change of the pcm jack. - -Rename pin_idx_to_jack() to pin_idx_to_pcm_jack(). The code to -report status change of pcm jack, move it to update_eld() which is -common for acomp and !acomp code paths. - -Fixes: 5398e94fb753 ALSA: hda - Add DP-MST support for NVIDIA codecs -Signed-off-by: Nikhil Mahale <nmahale@nvidia.com> ---- - sound/pci/hda/patch_hdmi.c | 94 +++++++++++++++++++------------------- - 1 file changed, 47 insertions(+), 47 deletions(-) - -diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c -index 630b1f5c276d..469b25065643 100644 ---- a/sound/pci/hda/patch_hdmi.c -+++ b/sound/pci/hda/patch_hdmi.c -@@ -1477,6 +1477,35 @@ static void hdmi_pcm_reset_pin(struct hdmi_spec *spec, - per_pin->channels = 0; - } - -+static struct snd_jack *pin_idx_to_pcm_jack(struct hda_codec *codec, -+ struct hdmi_spec_per_pin *per_pin) -+{ -+ struct hdmi_spec *spec = codec->spec; -+ struct snd_jack *jack = NULL; -+ struct hda_jack_tbl *jack_tbl; -+ -+ /* if !dyn_pcm_assign, get jack from hda_jack_tbl -+ * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not -+ * NULL even after snd_hda_jack_tbl_clear() is called to -+ * free snd_jack. This may cause access invalid memory -+ * when calling snd_jack_report -+ */ -+ if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign) { -+ jack = spec->pcm_rec[per_pin->pcm_idx].jack; -+ } else if (!spec->dyn_pcm_assign) { -+ /* -+ * jack tbl doesn't support DP MST -+ * DP MST will use dyn_pcm_assign, -+ * so DP MST will never come here -+ */ -+ jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid, -+ per_pin->dev_id); -+ if (jack_tbl) -+ jack = jack_tbl->jack; -+ } -+ return jack; -+} -+ - /* update per_pin ELD from the given new ELD; - * setup info frame and notification accordingly - */ -@@ -1487,9 +1516,15 @@ static bool update_eld(struct hda_codec *codec, - struct hdmi_eld *pin_eld = &per_pin->sink_eld; - struct hdmi_spec *spec = codec->spec; - bool old_eld_valid = pin_eld->eld_valid; -+ struct snd_jack *pcm_jack; - bool eld_changed; - int pcm_idx; - -+ /* pcm_idx >=0 before update_eld() means it is in monitor -+ * disconnected event. Jack must be fetched before update_eld() -+ */ -+ pcm_jack = pin_idx_to_pcm_jack(codec, per_pin); -+ - /* for monitor disconnection, save pcm_idx firstly */ - pcm_idx = per_pin->pcm_idx; - if (spec->dyn_pcm_assign) { -@@ -1544,6 +1579,14 @@ static bool update_eld(struct hda_codec *codec, - SNDRV_CTL_EVENT_MASK_VALUE | - SNDRV_CTL_EVENT_MASK_INFO, - &get_hdmi_pcm(spec, pcm_idx)->eld_ctl->id); -+ -+ if (!pcm_jack) -+ pcm_jack = pin_idx_to_pcm_jack(codec, per_pin); -+ if (eld_changed && pcm_jack) -+ snd_jack_report(pcm_jack, -+ (eld->monitor_present && eld->eld_valid) ? -+ SND_JACK_AVOUT : 0); -+ - return eld_changed; - } - -@@ -1566,7 +1609,6 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, - * the unsolicited response to avoid custom WARs. - */ - int present; -- bool ret; - bool do_repoll = false; - - present = snd_hda_jack_pin_sense(codec, pin_nid, dev_id); -@@ -1600,45 +1642,14 @@ static bool hdmi_present_sense_via_verbs(struct hdmi_spec_per_pin *per_pin, - else - update_eld(codec, per_pin, eld); - -- ret = !repoll || !eld->monitor_present || eld->eld_valid; -- - jack = snd_hda_jack_tbl_get_mst(codec, pin_nid, per_pin->dev_id); - if (jack) { -- jack->block_report = !ret; -+ jack->block_report = do_repoll; - jack->pin_sense = (eld->monitor_present && eld->eld_valid) ? - AC_PINSENSE_PRESENCE : 0; - } - mutex_unlock(&per_pin->lock); -- return ret; --} -- --static struct snd_jack *pin_idx_to_jack(struct hda_codec *codec, -- struct hdmi_spec_per_pin *per_pin) --{ -- struct hdmi_spec *spec = codec->spec; -- struct snd_jack *jack = NULL; -- struct hda_jack_tbl *jack_tbl; -- -- /* if !dyn_pcm_assign, get jack from hda_jack_tbl -- * in !dyn_pcm_assign case, spec->pcm_rec[].jack is not -- * NULL even after snd_hda_jack_tbl_clear() is called to -- * free snd_jack. This may cause access invalid memory -- * when calling snd_jack_report -- */ -- if (per_pin->pcm_idx >= 0 && spec->dyn_pcm_assign) -- jack = spec->pcm_rec[per_pin->pcm_idx].jack; -- else if (!spec->dyn_pcm_assign) { -- /* -- * jack tbl doesn't support DP MST -- * DP MST will use dyn_pcm_assign, -- * so DP MST will never come here -- */ -- jack_tbl = snd_hda_jack_tbl_get_mst(codec, per_pin->pin_nid, -- per_pin->dev_id); -- if (jack_tbl) -- jack = jack_tbl->jack; -- } -- return jack; -+ return !do_repoll; - } - - /* update ELD and jack state via audio component */ -@@ -1647,8 +1658,6 @@ static void sync_eld_via_acomp(struct hda_codec *codec, - { - struct hdmi_spec *spec = codec->spec; - struct hdmi_eld *eld = &spec->temp_eld; -- struct snd_jack *jack = NULL; -- bool changed; - int size; - - mutex_lock(&per_pin->lock); -@@ -1671,17 +1680,8 @@ static void sync_eld_via_acomp(struct hda_codec *codec, - eld->eld_size = 0; - } - -- /* pcm_idx >=0 before update_eld() means it is in monitor -- * disconnected event. Jack must be fetched before update_eld() -- */ -- jack = pin_idx_to_jack(codec, per_pin); -- changed = update_eld(codec, per_pin, eld); -- if (jack == NULL) -- jack = pin_idx_to_jack(codec, per_pin); -- if (changed && jack) -- snd_jack_report(jack, -- (eld->monitor_present && eld->eld_valid) ? -- SND_JACK_AVOUT : 0); -+ update_eld(codec, per_pin, eld); -+ - mutex_unlock(&per_pin->lock); - } - --- -2.25.0 - diff --git a/libre/linux-libre-pae/0005-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch b/libre/linux-libre-pae/0003-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch index 14f03c105..5c309de77 100644 --- a/libre/linux-libre-pae/0005-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch +++ b/libre/linux-libre-pae/0003-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch @@ -1,8 +1,8 @@ -From 5686c24790e74b13b9ba70ed5602029613af13e9 Mon Sep 17 00:00:00 2001 +From d4f3675632b7c55981b2a062e05d3efcaa1e4c55 Mon Sep 17 00:00:00 2001 From: Dan Moulding <dmoulding@me.com> Date: Tue, 28 Jan 2020 02:31:07 -0700 -Subject: [PATCH 5/5] iwlwifi: mvm: Do not require PHY_SKU NVM section for 3168 - devices +Subject: [PATCH 03/13] iwlwifi: mvm: Do not require PHY_SKU NVM section for + 3168 devices The logic for checking required NVM sections was recently fixed in commit b3f20e098293 ("iwlwifi: mvm: fix NVM check for 3168 @@ -36,5 +36,5 @@ index 46128a2a9c6e..e98ce380c7b9 100644 "Can't parse phy_sku in B0, empty sections\n"); return NULL; -- -2.25.0 +2.25.1 diff --git a/libre/linux-libre-pae/0004-Btrfs-send-fix-emission-of-invalid-clone-operations-.patch b/libre/linux-libre-pae/0004-Btrfs-send-fix-emission-of-invalid-clone-operations-.patch deleted file mode 100644 index 26c10a8df..000000000 --- a/libre/linux-libre-pae/0004-Btrfs-send-fix-emission-of-invalid-clone-operations-.patch +++ /dev/null @@ -1,92 +0,0 @@ -From d0910be3493b40233c513c9ad01ac0878ae26a48 Mon Sep 17 00:00:00 2001 -From: Filipe Manana <fdmanana@suse.com> -Date: Wed, 29 Jan 2020 17:09:53 +0000 -Subject: [PATCH 4/5] Btrfs: send, fix emission of invalid clone operations - within the same file - -When doing an incremental send and a file has extents shared with itself -at different file offsets, it's possible for send to emit clone operations -that will fail at the destination because the source range goes beyond the -file's current size. This happens when the file size has increased in the -send snapshot, there is a hole between the shared extents and both shared -extents are at file offsets which are greater the file's size in the -parent snapshot. - -Example: - - $ mkfs.btrfs -f /dev/sdb - $ mount /dev/sdb /mnt/sdb - - $ xfs_io -f -c "pwrite -S 0xf1 0 64K" /mnt/sdb/foobar - $ btrfs subvolume snapshot -r /mnt/sdb /mnt/sdb/base - $ btrfs send -f /tmp/1.snap /mnt/sdb/base - - # Create a 320K extent at file offset 512K. - $ xfs_io -c "pwrite -S 0xab 512K 64K" /mnt/sdb/foobar - $ xfs_io -c "pwrite -S 0xcd 576K 64K" /mnt/sdb/foobar - $ xfs_io -c "pwrite -S 0xef 640K 64K" /mnt/sdb/foobar - $ xfs_io -c "pwrite -S 0x64 704K 64K" /mnt/sdb/foobar - $ xfs_io -c "pwrite -S 0x73 768K 64K" /mnt/sdb/foobar - - # Clone part of that 320K extent into a lower file offset (192K). - # This file offset is greater than the file's size in the parent - # snapshot (64K). Also the clone range is a bit behind the offset of - # the 320K extent so that we leave a hole between the shared extents. - $ xfs_io -c "reflink /mnt/sdb/foobar 448K 192K 192K" /mnt/sdb/foobar - - $ btrfs subvolume snapshot -r /mnt/sdb /mnt/sdb/incr - $ btrfs send -p /mnt/sdb/base -f /tmp/2.snap /mnt/sdb/incr - - $ mkfs.btrfs -f /dev/sdc - $ mount /dev/sdc /mnt/sdc - - $ btrfs receive -f /tmp/1.snap /mnt/sdc - $ btrfs receive -f /tmp/2.snap /mnt/sdc - ERROR: failed to clone extents to foobar: Invalid argument - -The problem is that after processing the extent at file offset 256K, which -refers to the first 128K of the 320K extent created by the buffered write -operations, we have 'cur_inode_next_write_offset' set to 384K, which -corresponds to the end offset of the partially shared extent (256K + 128K) -and to the current file size in the receiver. Then when we process the -extent at offset 512K, we do extent backreference iteration to figure out -if we can clone the extent from some other inode or from the same inode, -and we consider the extent at offset 256K of the same inode as a valid -source for a clone operation, which is not correct because at that point -the current file size in the receiver is 384K, which corresponds to the -end of last processed extent (at file offset 256K), so using a clone -source range from 256K to 256K + 320K is invalid because that goes past -the current size of the file (384K) - this makes the receiver get an --EINVAL error when attempting the clone operation. - -So fix this by excluding clone sources that have a range that goes beyond -the current file size in the receiver when iterating extent backreferences. - -A test case for fstests follows soon. - -Fixes: 11f2069c113e02 ("Btrfs: send, allow clone operations within the same file") -CC: stable@vger.kernel.org # 5.5+ -Reviewed-by: Josef Bacik <josef@toxicpanda.com> -Signed-off-by: Filipe Manana <fdmanana@suse.com> -Signed-off-by: David Sterba <dsterba@suse.com> ---- - fs/btrfs/send.c | 3 ++- - 1 file changed, 2 insertions(+), 1 deletion(-) - -diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c -index 091e5bc8c7ea..a055b657cb85 100644 ---- a/fs/btrfs/send.c -+++ b/fs/btrfs/send.c -@@ -1269,7 +1269,8 @@ static int __iterate_backrefs(u64 ino, u64 offset, u64 root, void *ctx_) - * destination of the stream. - */ - if (ino == bctx->cur_objectid && -- offset >= bctx->sctx->cur_inode_next_write_offset) -+ offset + bctx->extent_len > -+ bctx->sctx->cur_inode_next_write_offset) - return 0; - } - --- -2.25.0 - diff --git a/libre/linux-libre-pae/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch b/libre/linux-libre-pae/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch new file mode 100644 index 000000000..b4fe2c99b --- /dev/null +++ b/libre/linux-libre-pae/0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch @@ -0,0 +1,260 @@ +From 9765a866d7439523fdf06ea780498041505e5b32 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Sun, 2 Feb 2020 15:39:34 +0000 +Subject: [PATCH 04/13] drm/i915: Wean off drm_pci_alloc/drm_pci_free + +drm_pci_alloc and drm_pci_free are just very thin wrappers around +dma_alloc_coherent, with a note that we should be removing them. +Furthermore since + +commit de09d31dd38a50fdce106c15abd68432eebbd014 +Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> +Date: Fri Jan 15 16:51:42 2016 -0800 + + page-flags: define PG_reserved behavior on compound pages + + As far as I can see there's no users of PG_reserved on compound pages. + Let's use PF_NO_COMPOUND here. + +drm_pci_alloc has been declared broken since it mixes GFP_COMP and +SetPageReserved. Avoid this conflict by weaning ourselves off using the +abstraction and using the dma functions directly. + +Reported-by: Taketo Kabe +Closes: https://gitlab.freedesktop.org/drm/intel/issues/1027 +Fixes: de09d31dd38a ("page-flags: define PG_reserved behavior on compound pages") +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: <stable@vger.kernel.org> # v4.5+ +Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> +Link: https://patchwork.freedesktop.org/patch/msgid/20200202153934.3899472-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/display/intel_display.c | 2 +- + .../gpu/drm/i915/gem/i915_gem_object_types.h | 3 - + drivers/gpu/drm/i915/gem/i915_gem_phys.c | 98 ++++++++++--------- + drivers/gpu/drm/i915/i915_gem.c | 8 +- + 4 files changed, 55 insertions(+), 56 deletions(-) + +diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c +index 301897791627..b670239a293b 100644 +--- a/drivers/gpu/drm/i915/display/intel_display.c ++++ b/drivers/gpu/drm/i915/display/intel_display.c +@@ -10731,7 +10731,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state) + u32 base; + + if (INTEL_INFO(dev_priv)->display.cursor_needs_physical) +- base = obj->phys_handle->busaddr; ++ base = sg_dma_address(obj->mm.pages->sgl); + else + base = intel_plane_ggtt_offset(plane_state); + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +index e3f3944fbd90..1078a76d6d84 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h +@@ -260,9 +260,6 @@ struct drm_i915_gem_object { + + void *gvt_info; + }; +- +- /** for phys allocated objects */ +- struct drm_dma_handle *phys_handle; + }; + + static inline struct drm_i915_gem_object * +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c +index 8043ff63d73f..5e2e0109c9ba 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c +@@ -22,88 +22,87 @@ + static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) + { + struct address_space *mapping = obj->base.filp->f_mapping; +- struct drm_dma_handle *phys; +- struct sg_table *st; + struct scatterlist *sg; +- char *vaddr; ++ struct sg_table *st; ++ dma_addr_t dma; ++ void *vaddr; ++ void *dst; + int i; +- int err; + + if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) + return -EINVAL; + +- /* Always aligning to the object size, allows a single allocation ++ /* ++ * Always aligning to the object size, allows a single allocation + * to handle all possible callers, and given typical object sizes, + * the alignment of the buddy allocation will naturally match. + */ +- phys = drm_pci_alloc(obj->base.dev, +- roundup_pow_of_two(obj->base.size), +- roundup_pow_of_two(obj->base.size)); +- if (!phys) ++ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev, ++ roundup_pow_of_two(obj->base.size), ++ &dma, GFP_KERNEL); ++ if (!vaddr) + return -ENOMEM; + +- vaddr = phys->vaddr; ++ st = kmalloc(sizeof(*st), GFP_KERNEL); ++ if (!st) ++ goto err_pci; ++ ++ if (sg_alloc_table(st, 1, GFP_KERNEL)) ++ goto err_st; ++ ++ sg = st->sgl; ++ sg->offset = 0; ++ sg->length = obj->base.size; ++ ++ sg_assign_page(sg, (struct page *)vaddr); ++ sg_dma_address(sg) = dma; ++ sg_dma_len(sg) = obj->base.size; ++ ++ dst = vaddr; + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { + struct page *page; +- char *src; ++ void *src; + + page = shmem_read_mapping_page(mapping, i); +- if (IS_ERR(page)) { +- err = PTR_ERR(page); +- goto err_phys; +- } ++ if (IS_ERR(page)) ++ goto err_st; + + src = kmap_atomic(page); +- memcpy(vaddr, src, PAGE_SIZE); +- drm_clflush_virt_range(vaddr, PAGE_SIZE); ++ memcpy(dst, src, PAGE_SIZE); ++ drm_clflush_virt_range(dst, PAGE_SIZE); + kunmap_atomic(src); + + put_page(page); +- vaddr += PAGE_SIZE; ++ dst += PAGE_SIZE; + } + + intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt); + +- st = kmalloc(sizeof(*st), GFP_KERNEL); +- if (!st) { +- err = -ENOMEM; +- goto err_phys; +- } +- +- if (sg_alloc_table(st, 1, GFP_KERNEL)) { +- kfree(st); +- err = -ENOMEM; +- goto err_phys; +- } +- +- sg = st->sgl; +- sg->offset = 0; +- sg->length = obj->base.size; +- +- sg_dma_address(sg) = phys->busaddr; +- sg_dma_len(sg) = obj->base.size; +- +- obj->phys_handle = phys; +- + __i915_gem_object_set_pages(obj, st, sg->length); + + return 0; + +-err_phys: +- drm_pci_free(obj->base.dev, phys); +- +- return err; ++err_st: ++ kfree(st); ++err_pci: ++ dma_free_coherent(&obj->base.dev->pdev->dev, ++ roundup_pow_of_two(obj->base.size), ++ vaddr, dma); ++ return -ENOMEM; + } + + static void + i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, + struct sg_table *pages) + { ++ dma_addr_t dma = sg_dma_address(pages->sgl); ++ void *vaddr = sg_page(pages->sgl); ++ + __i915_gem_object_release_shmem(obj, pages, false); + + if (obj->mm.dirty) { + struct address_space *mapping = obj->base.filp->f_mapping; +- char *vaddr = obj->phys_handle->vaddr; ++ void *src = vaddr; + int i; + + for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { +@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, + continue; + + dst = kmap_atomic(page); +- drm_clflush_virt_range(vaddr, PAGE_SIZE); +- memcpy(dst, vaddr, PAGE_SIZE); ++ drm_clflush_virt_range(src, PAGE_SIZE); ++ memcpy(dst, src, PAGE_SIZE); + kunmap_atomic(dst); + + set_page_dirty(page); + if (obj->mm.madv == I915_MADV_WILLNEED) + mark_page_accessed(page); + put_page(page); +- vaddr += PAGE_SIZE; ++ ++ src += PAGE_SIZE; + } + obj->mm.dirty = false; + } +@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj, + sg_free_table(pages); + kfree(pages); + +- drm_pci_free(obj->base.dev, obj->phys_handle); ++ dma_free_coherent(&obj->base.dev->pdev->dev, ++ roundup_pow_of_two(obj->base.size), ++ vaddr, dma); + } + + static void phys_release(struct drm_i915_gem_object *obj) +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 905890e3ac24..3f07948ea4da 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -154,7 +154,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, + struct drm_i915_gem_pwrite *args, + struct drm_file *file) + { +- void *vaddr = obj->phys_handle->vaddr + args->offset; ++ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset; + char __user *user_data = u64_to_user_ptr(args->data_ptr); + + /* +@@ -800,10 +800,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, + ret = i915_gem_gtt_pwrite_fast(obj, args); + + if (ret == -EFAULT || ret == -ENOSPC) { +- if (obj->phys_handle) +- ret = i915_gem_phys_pwrite(obj, args, file); +- else ++ if (i915_gem_object_has_struct_page(obj)) + ret = i915_gem_shmem_pwrite(obj, args); ++ else ++ ret = i915_gem_phys_pwrite(obj, args, file); + } + + i915_gem_object_unpin_pages(obj); +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0005-drm-Remove-PageReserved-manipulation-from-drm_pci_al.patch b/libre/linux-libre-pae/0005-drm-Remove-PageReserved-manipulation-from-drm_pci_al.patch new file mode 100644 index 000000000..173559c8c --- /dev/null +++ b/libre/linux-libre-pae/0005-drm-Remove-PageReserved-manipulation-from-drm_pci_al.patch @@ -0,0 +1,92 @@ +From 4c2060e15fe37c5fa9d62bc6cc59e29fd157c0c3 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Sun, 2 Feb 2020 17:16:31 +0000 +Subject: [PATCH 05/13] drm: Remove PageReserved manipulation from + drm_pci_alloc + +drm_pci_alloc/drm_pci_free are very thin wrappers around the core dma +facilities, and we have no special reason within the drm layer to behave +differently. In particular, since + +commit de09d31dd38a50fdce106c15abd68432eebbd014 +Author: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> +Date: Fri Jan 15 16:51:42 2016 -0800 + + page-flags: define PG_reserved behavior on compound pages + + As far as I can see there's no users of PG_reserved on compound pages. + Let's use PF_NO_COMPOUND here. + +it has been illegal to combine GFP_COMP with SetPageReserved, so lets +stop doing both and leave the dma layer to its own devices. + +Reported-by: Taketo Kabe +Bug: https://gitlab.freedesktop.org/drm/intel/issues/1027 +Fixes: de09d31dd38a ("page-flags: define PG_reserved behavior on compound pages") +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: <stable@vger.kernel.org> # v4.5+ +Reviewed-by: Alex Deucher <alexander.deucher@amd.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20200202171635.4039044-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/drm_pci.c | 23 ++--------------------- + 1 file changed, 2 insertions(+), 21 deletions(-) + +diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c +index a86a3ab2771c..235729f4aadb 100644 +--- a/drivers/gpu/drm/drm_pci.c ++++ b/drivers/gpu/drm/drm_pci.c +@@ -51,8 +51,6 @@ + drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align) + { + drm_dma_handle_t *dmah; +- unsigned long addr; +- size_t sz; + + /* pci_alloc_consistent only guarantees alignment to the smallest + * PAGE_SIZE order which is greater than or equal to the requested size. +@@ -68,20 +66,13 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali + dmah->size = size; + dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size, + &dmah->busaddr, +- GFP_KERNEL | __GFP_COMP); ++ GFP_KERNEL); + + if (dmah->vaddr == NULL) { + kfree(dmah); + return NULL; + } + +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Reserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- SetPageReserved(virt_to_page((void *)addr)); +- } +- + return dmah; + } + +@@ -94,19 +85,9 @@ EXPORT_SYMBOL(drm_pci_alloc); + */ + void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah) + { +- unsigned long addr; +- size_t sz; +- +- if (dmah->vaddr) { +- /* XXX - Is virt_to_page() legal for consistent mem? */ +- /* Unreserve */ +- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size; +- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) { +- ClearPageReserved(virt_to_page((void *)addr)); +- } ++ if (dmah->vaddr) + dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr, + dmah->busaddr); +- } + } + + /** +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0006-drm-i915-execlists-Always-force-a-context-reload-whe.patch b/libre/linux-libre-pae/0006-drm-i915-execlists-Always-force-a-context-reload-whe.patch new file mode 100644 index 000000000..fda8482df --- /dev/null +++ b/libre/linux-libre-pae/0006-drm-i915-execlists-Always-force-a-context-reload-whe.patch @@ -0,0 +1,145 @@ +From db1660f27f62a515dde34ccb192d7c63acf7fde9 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Fri, 7 Feb 2020 21:14:52 +0000 +Subject: [PATCH 06/13] drm/i915/execlists: Always force a context reload when + rewinding RING_TAIL + +If we rewind the RING_TAIL on a context, due to a preemption event, we +must force the context restore for the RING_TAIL update to be properly +handled. Rather than note which preemption events may cause us to rewind +the tail, compare the new request's tail with the previously submitted +RING_TAIL, as it turns out that timeslicing was causing unexpected +rewinds. + + <idle>-0 0d.s2 1280851190us : __execlists_submission_tasklet: 0000:00:02.0 rcs0: expired last=130:4698, prio=3, hint=3 + <idle>-0 0d.s2 1280851192us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 66:119966, current 119964 + <idle>-0 0d.s2 1280851195us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 130:4698, current 4695 + <idle>-0 0d.s2 1280851198us : __i915_request_unsubmit: 0000:00:02.0 rcs0: fence 130:4696, current 4695 +^---- Note we unwind 2 requests from the same context + + <idle>-0 0d.s2 1280851208us : __i915_request_submit: 0000:00:02.0 rcs0: fence 130:4696, current 4695 + <idle>-0 0d.s2 1280851213us : __i915_request_submit: 0000:00:02.0 rcs0: fence 134:1508, current 1506 +^---- But to apply the new timeslice, we have to replay the first request + before the new client can start -- the unexpected RING_TAIL rewind + + <idle>-0 0d.s2 1280851219us : trace_ports: 0000:00:02.0 rcs0: submit { 130:4696*, 134:1508 } + synmark2-5425 2..s. 1280851239us : process_csb: 0000:00:02.0 rcs0: cs-irq head=5, tail=0 + synmark2-5425 2..s. 1280851240us : process_csb: 0000:00:02.0 rcs0: csb[0]: status=0x00008002:0x00000000 +^---- Preemption event for the ELSP update; note the lite-restore + + synmark2-5425 2..s. 1280851243us : trace_ports: 0000:00:02.0 rcs0: preempted { 130:4698, 66:119966 } + synmark2-5425 2..s. 1280851246us : trace_ports: 0000:00:02.0 rcs0: promote { 130:4696*, 134:1508 } + synmark2-5425 2.... 1280851462us : __i915_request_commit: 0000:00:02.0 rcs0: fence 130:4700, current 4695 + synmark2-5425 2.... 1280852111us : __i915_request_commit: 0000:00:02.0 rcs0: fence 130:4702, current 4695 + synmark2-5425 2.Ns1 1280852296us : process_csb: 0000:00:02.0 rcs0: cs-irq head=0, tail=2 + synmark2-5425 2.Ns1 1280852297us : process_csb: 0000:00:02.0 rcs0: csb[1]: status=0x00000814:0x00000000 + synmark2-5425 2.Ns1 1280852299us : trace_ports: 0000:00:02.0 rcs0: completed { 130:4696!, 134:1508 } + synmark2-5425 2.Ns1 1280852301us : process_csb: 0000:00:02.0 rcs0: csb[2]: status=0x00000818:0x00000040 + synmark2-5425 2.Ns1 1280852302us : trace_ports: 0000:00:02.0 rcs0: completed { 134:1508, 0:0 } + synmark2-5425 2.Ns1 1280852313us : process_csb: process_csb:2336 GEM_BUG_ON(!i915_request_completed(*execlists->active) && !reset_in_progress(execlists)) + +Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing") +Referenecs: 82c69bf58650 ("drm/i915/gt: Detect if we miss WaIdleLiteRestore") +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> +Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> +Cc: <stable@vger.kernel.org> # v5.4+ +Link: https://patchwork.freedesktop.org/patch/msgid/20200207211452.2860634-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gt/intel_lrc.c | 18 ++++++++---------- + drivers/gpu/drm/i915/gt/intel_ring.c | 1 + + drivers/gpu/drm/i915/gt/intel_ring.h | 8 ++++++++ + drivers/gpu/drm/i915/gt/intel_ring_types.h | 1 + + 4 files changed, 18 insertions(+), 10 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c +index d925a1035c9d..1b4784bfa7e5 100644 +--- a/drivers/gpu/drm/i915/gt/intel_lrc.c ++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c +@@ -1157,7 +1157,7 @@ static u64 execlists_update_context(struct i915_request *rq) + { + struct intel_context *ce = rq->hw_context; + u64 desc = ce->lrc_desc; +- u32 tail; ++ u32 tail, prev; + + /* + * WaIdleLiteRestore:bdw,skl +@@ -1170,9 +1170,15 @@ static u64 execlists_update_context(struct i915_request *rq) + * subsequent resubmissions (for lite restore). Should that fail us, + * and we try and submit the same tail again, force the context + * reload. ++ * ++ * If we need to return to a preempted context, we need to skip the ++ * lite-restore and force it to reload the RING_TAIL. Otherwise, the ++ * HW has a tendency to ignore us rewinding the TAIL to the end of ++ * an earlier request. + */ + tail = intel_ring_set_tail(rq->ring, rq->tail); +- if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail)) ++ prev = ce->lrc_reg_state[CTX_RING_TAIL]; ++ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) + desc |= CTX_DESC_FORCE_RESTORE; + ce->lrc_reg_state[CTX_RING_TAIL] = tail; + rq->tail = rq->wa_tail; +@@ -1651,14 +1657,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine) + */ + __unwind_incomplete_requests(engine); + +- /* +- * If we need to return to the preempted context, we +- * need to skip the lite-restore and force it to +- * reload the RING_TAIL. Otherwise, the HW has a +- * tendency to ignore us rewinding the TAIL to the +- * end of an earlier request. +- */ +- last->hw_context->lrc_desc |= CTX_DESC_FORCE_RESTORE; + last = NULL; + } else if (need_timeslice(engine, last) && + timer_expired(&engine->execlists.timer)) { +diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c +index 374b28f13ca0..6ff803f397c4 100644 +--- a/drivers/gpu/drm/i915/gt/intel_ring.c ++++ b/drivers/gpu/drm/i915/gt/intel_ring.c +@@ -145,6 +145,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size) + + kref_init(&ring->ref); + ring->size = size; ++ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size); + + /* + * Workaround an erratum on the i830 which causes a hang if +diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h +index ea2839d9e044..5bdce24994aa 100644 +--- a/drivers/gpu/drm/i915/gt/intel_ring.h ++++ b/drivers/gpu/drm/i915/gt/intel_ring.h +@@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos) + return pos & (ring->size - 1); + } + ++static inline int intel_ring_direction(const struct intel_ring *ring, ++ u32 next, u32 prev) ++{ ++ typecheck(typeof(ring->size), next); ++ typecheck(typeof(ring->size), prev); ++ return (next - prev) << ring->wrap; ++} ++ + static inline bool + intel_ring_offset_valid(const struct intel_ring *ring, + unsigned int pos) +diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h +index d9f17f38e0cc..3cd7fec7fd8d 100644 +--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h ++++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h +@@ -45,6 +45,7 @@ struct intel_ring { + + u32 space; + u32 size; ++ u32 wrap; + u32 effective_size; + }; + +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0007-drm-i915-Serialise-i915_active_acquire-with-__active.patch b/libre/linux-libre-pae/0007-drm-i915-Serialise-i915_active_acquire-with-__active.patch new file mode 100644 index 000000000..0b6743d43 --- /dev/null +++ b/libre/linux-libre-pae/0007-drm-i915-Serialise-i915_active_acquire-with-__active.patch @@ -0,0 +1,79 @@ +From 4111ed5d5b9bed12b5bf7929045ca3c10cb60521 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Thu, 5 Dec 2019 18:33:32 +0000 +Subject: [PATCH 07/13] drm/i915: Serialise i915_active_acquire() with + __active_retire() + +As __active_retire() does it's final atomic_dec() under the +ref->tree_lock spinlock, in order to prevent ourselves from reusing the +ref->cache and ref->tree as they are being destroyed, we need to +serialise with the retirement during i915_active_acquire(). + +[ +0.000005] kernel BUG at drivers/gpu/drm/i915/i915_active.c:157! +[ +0.000011] invalid opcode: 0000 [#1] SMP +[ +0.000004] CPU: 7 PID: 188 Comm: kworker/u16:4 Not tainted 5.4.0-rc8-03070-gac5e57322614 #89 +[ +0.000002] Hardware name: Razer Razer Blade Stealth 13 Late 2019/LY320, BIOS 1.02 09/10/2019 +[ +0.000082] Workqueue: events_unbound active_work [i915] +[ +0.000059] RIP: 0010:__active_retire+0x115/0x120 [i915] +[ +0.000003] Code: 75 28 48 8b 3d 8c 6e 1a 00 48 89 ee e8 e4 5f a5 c0 48 8b 44 24 10 65 48 33 04 25 28 00 00 00 75 0f 48 83 c4 18 5b 5d 41 5c c3 <0f> 0b 0f 0b 0f 0b e8 a0 90 87 c0 0f 1f 44 00 00 48 8b 3d 54 6e 1a +[ +0.000002] RSP: 0018:ffffb833003f7e48 EFLAGS: 00010286 +[ +0.000003] RAX: ffff8d6e8d726d00 RBX: ffff8d6f9db4e840 RCX: 0000000000000000 +[ +0.000001] RDX: ffffffff82605930 RSI: ffff8d6f9adc4908 RDI: ffff8d6e96cefe28 +[ +0.000002] RBP: ffff8d6e96cefe00 R08: 0000000000000000 R09: ffff8d6f9ffe9a50 +[ +0.000002] R10: 0000000000000048 R11: 0000000000000018 R12: ffff8d6f9adc4930 +[ +0.000001] R13: ffff8d6f9e04fb00 R14: 0000000000000000 R15: ffff8d6f9adc4988 +[ +0.000002] FS: 0000000000000000(0000) GS:ffff8d6f9ffc0000(0000) knlGS:0000000000000000 +[ +0.000002] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 +[ +0.000002] CR2: 000055eb5a34cf10 CR3: 000000018d609002 CR4: 0000000000760ee0 +[ +0.000002] PKRU: 55555554 +[ +0.000001] Call Trace: +[ +0.000010] process_one_work+0x1aa/0x350 +[ +0.000004] worker_thread+0x4d/0x3a0 +[ +0.000004] kthread+0xfb/0x130 +[ +0.000004] ? process_one_work+0x350/0x350 +[ +0.000003] ? kthread_park+0x90/0x90 +[ +0.000005] ret_from_fork+0x1f/0x40 + +Reported-by: Kenneth Graunke <kenneth@whitecape.org> +Fixes: c9ad602feabe ("drm/i915: Split i915_active.mutex into an irq-safe spinlock for the rbtree") +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> +Cc: Kenneth Graunke <kenneth@whitecape.org> +Cc: Matthew Auld <matthew.auld@intel.com> +Tested-by: Kenneth Graunke <kenneth@whitecape.org> +Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> +Link: https://patchwork.freedesktop.org/patch/msgid/20191205183332.801237-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_active.c | 5 +++-- + 1 file changed, 3 insertions(+), 2 deletions(-) + +diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c +index a19e7d89bc8a..378b52d1ab74 100644 +--- a/drivers/gpu/drm/i915/i915_active.c ++++ b/drivers/gpu/drm/i915/i915_active.c +@@ -91,10 +91,9 @@ static void debug_active_init(struct i915_active *ref) + + static void debug_active_activate(struct i915_active *ref) + { +- spin_lock_irq(&ref->tree_lock); ++ lockdep_assert_held(&ref->tree_lock); + if (!atomic_read(&ref->count)) /* before the first inc */ + debug_object_activate(ref, &active_debug_desc); +- spin_unlock_irq(&ref->tree_lock); + } + + static void debug_active_deactivate(struct i915_active *ref) +@@ -407,8 +406,10 @@ int i915_active_acquire(struct i915_active *ref) + if (!atomic_read(&ref->count) && ref->active) + err = ref->active(ref); + if (!err) { ++ spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */ + debug_active_activate(ref); + atomic_inc(&ref->count); ++ spin_unlock_irq(&ref->tree_lock); + } + + mutex_unlock(&ref->mutex); +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0008-drm-i915-gem-Take-runtime-pm-wakeref-prior-to-unbind.patch b/libre/linux-libre-pae/0008-drm-i915-gem-Take-runtime-pm-wakeref-prior-to-unbind.patch new file mode 100644 index 000000000..2cca09b39 --- /dev/null +++ b/libre/linux-libre-pae/0008-drm-i915-gem-Take-runtime-pm-wakeref-prior-to-unbind.patch @@ -0,0 +1,130 @@ +From ff71a1213c20acd951584e14fa2f19e9133722d4 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Tue, 3 Dec 2019 10:13:46 +0000 +Subject: [PATCH 08/13] drm/i915/gem: Take runtime-pm wakeref prior to + unbinding + +Some machines require ACPI for runtime resume, and ACPI is quite kmalloc +happy. We cannot handle kmalloc from inside the vm->mutex, as they are +used by the shrinker, and so we must ensure the global runtime-pm is +awake prior to unbinding to avoid the potential inversion. + +<4> [57.121748] ====================================================== +<4> [57.121750] WARNING: possible circular locking dependency detected +<4> [57.121753] 5.4.0-rc8-CI-CI_DRM_7466+ #1 Tainted: G U +<4> [57.121754] ------------------------------------------------------ +<4> [57.121756] i915_pm_rpm/1105 is trying to acquire lock: +<4> [57.121758] ffffffff82263a40 (fs_reclaim){+.+.}, at: fs_reclaim_acquire.part.117+0x0/0x30 +<4> [57.121766] +but task is already holding lock: +<4> [57.121768] ffff888475a593c0 (&vm->mutex){+.+.}, at: i915_vma_unbind+0x21/0x50 [i915] +<4> [57.121868] +which lock already depends on the new lock. + +<4> [57.121869] +the existing dependency chain (in reverse order) is: +<4> [57.121871] +-> #1 (&vm->mutex){+.+.}: +<4> [57.121951] i915_gem_shrinker_taints_mutex+0xa2/0xd0 [i915] +<4> [57.122028] i915_address_space_init+0xa9/0x170 [i915] +<4> [57.122104] i915_ggtt_init_hw+0x47/0x130 [i915] +<4> [57.122150] i915_driver_probe+0xbb4/0x15f0 [i915] +<4> [57.122197] i915_pci_probe+0x43/0x1c0 [i915] +<4> [57.122202] pci_device_probe+0x9e/0x120 +<4> [57.122206] really_probe+0xea/0x420 +<4> [57.122209] driver_probe_device+0x10b/0x120 +<4> [57.122212] device_driver_attach+0x4a/0x50 +<4> [57.122214] __driver_attach+0x97/0x130 +<4> [57.122217] bus_for_each_dev+0x74/0xc0 +<4> [57.122220] bus_add_driver+0x142/0x220 +<4> [57.122222] driver_register+0x56/0xf0 +<4> [57.122226] do_one_initcall+0x58/0x2ff +<4> [57.122230] do_init_module+0x56/0x1f8 +<4> [57.122233] load_module+0x243e/0x29f0 +<4> [57.122236] __do_sys_finit_module+0xe9/0x110 +<4> [57.122239] do_syscall_64+0x4f/0x210 +<4> [57.122242] entry_SYSCALL_64_after_hwframe+0x49/0xbe +<4> [57.122244] +-> #0 (fs_reclaim){+.+.}: +<4> [57.122249] __lock_acquire+0x1328/0x15d0 +<4> [57.122251] lock_acquire+0xa7/0x1c0 +<4> [57.122254] fs_reclaim_acquire.part.117+0x24/0x30 +<4> [57.122257] __kmalloc+0x48/0x320 +<4> [57.122261] acpi_ns_internalize_name+0x44/0x9b +<4> [57.122264] acpi_ns_get_node_unlocked+0x6b/0xd3 +<4> [57.122267] acpi_ns_get_node+0x3b/0x50 +<4> [57.122271] acpi_get_handle+0x8a/0xb4 +<4> [57.122274] acpi_has_method+0x1c/0x40 +<4> [57.122278] acpi_pci_set_power_state+0x40/0xe0 +<4> [57.122281] pci_platform_power_transition+0x3e/0x90 +<4> [57.122284] pci_set_power_state+0x83/0xf0 +<4> [57.122287] pci_restore_standard_config+0x22/0x40 +<4> [57.122289] pci_pm_runtime_resume+0x23/0xc0 +<4> [57.122293] __rpm_callback+0xb1/0x110 +<4> [57.122296] rpm_callback+0x1a/0x70 +<4> [57.122299] rpm_resume+0x50e/0x790 +<4> [57.122302] __pm_runtime_resume+0x42/0x80 +<4> [57.122357] __intel_runtime_pm_get+0x15/0x60 [i915] +<4> [57.122435] ggtt_unbind_vma+0x24/0x60 [i915] +<4> [57.122514] __i915_vma_unbind.part.39+0xb5/0x500 [i915] +<4> [57.122593] i915_vma_unbind+0x2d/0x50 [i915] +<4> [57.122668] i915_gem_object_unbind+0x11c/0x260 [i915] +<4> [57.122740] i915_gem_object_set_cache_level+0x32/0x90 [i915] +<4> [57.122810] i915_gem_set_caching_ioctl+0x1f7/0x2f0 [i915] +<4> [57.122815] drm_ioctl_kernel+0xa7/0xf0 +<4> [57.122818] drm_ioctl+0x2e1/0x390 +<4> [57.122822] do_vfs_ioctl+0xa0/0x6f0 +<4> [57.122825] ksys_ioctl+0x35/0x60 +<4> [57.122828] __x64_sys_ioctl+0x11/0x20 +<4> [57.122830] do_syscall_64+0x4f/0x210 +<4> [57.122833] entry_SYSCALL_64_after_hwframe+0x49/0xbe + +Closes: https://gitlab.freedesktop.org/drm/intel/issues/711 +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Reviewed-by: Matthew Auld <matthew.auld@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191203101347.2836057-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_gem.c | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 3f07948ea4da..ed2436db5dd2 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -119,10 +119,23 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data, + int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + unsigned long flags) + { +- struct i915_vma *vma; ++ struct intel_runtime_pm *rpm = &to_i915(obj->base.dev)->runtime_pm; + LIST_HEAD(still_in_list); ++ intel_wakeref_t wakeref; ++ struct i915_vma *vma; + int ret = 0; + ++ if (!atomic_read(&obj->bind_count)) ++ return 0; ++ ++ /* ++ * As some machines use ACPI to handle runtime-resume callbacks, and ++ * ACPI is quite kmalloc happy, we cannot resume beneath the vm->mutex ++ * as they are required by the shrinker. Ergo, we wake the device up ++ * first just in case. ++ */ ++ wakeref = intel_runtime_pm_get(rpm); ++ + spin_lock(&obj->vma.lock); + while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, + struct i915_vma, +@@ -146,6 +159,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + list_splice(&still_in_list, &obj->vma.list); + spin_unlock(&obj->vma.lock); + ++ intel_runtime_pm_put(rpm, wakeref); ++ + return ret; + } + +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0009-drm-i915-gem-Avoid-parking-the-vma-as-we-unbind.patch b/libre/linux-libre-pae/0009-drm-i915-gem-Avoid-parking-the-vma-as-we-unbind.patch new file mode 100644 index 000000000..18690528d --- /dev/null +++ b/libre/linux-libre-pae/0009-drm-i915-gem-Avoid-parking-the-vma-as-we-unbind.patch @@ -0,0 +1,63 @@ +From 2d5fd5d31dcd5c63c0c82a4a599443ee2a7144b7 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Tue, 3 Dec 2019 15:50:32 +0000 +Subject: [PATCH 09/13] drm/i915/gem: Avoid parking the vma as we unbind + +In order to avoid keeping a reference on the i915_vma (which is long +overdue!) we have to coordinate all the possible lifetimes and only use +the vma while we know it is alive. In this episode, we are reminded that +while idle, the closed vma are destroyed. So if the GT idles while we are +working with the vma, the vma itself becomes invalid. + +First class i915_vma here we come, but in the meantime keep piling on +the straw. + +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Reviewed-by: Matthew Auld <matthew.auld@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191203155032.3137263-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_gem.c | 17 ++++++++++++++++- + 1 file changed, 16 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index ed2436db5dd2..739543812422 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -141,18 +141,33 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + struct i915_vma, + obj_link))) { + struct i915_address_space *vm = vma->vm; ++ bool awake = false; + +- ret = -EBUSY; ++ ret = -EAGAIN; + if (!i915_vm_tryopen(vm)) + break; + ++ /* Prevent vma being freed by i915_vma_parked as we unbind */ ++ if (intel_gt_pm_get_if_awake(vm->gt)) { ++ awake = true; ++ } else { ++ if (i915_vma_is_closed(vma)) { ++ spin_unlock(&obj->vma.lock); ++ goto err_vm; ++ } ++ } ++ + list_move_tail(&vma->obj_link, &still_in_list); + spin_unlock(&obj->vma.lock); + ++ ret = -EBUSY; + if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || + !i915_vma_is_active(vma)) + ret = i915_vma_unbind(vma); + ++ if (awake) ++ intel_gt_pm_put(vm->gt); ++err_vm: + i915_vm_close(vm); + spin_lock(&obj->vma.lock); + } +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0010-drm-i915-gem-Try-to-flush-pending-unbind-events.patch b/libre/linux-libre-pae/0010-drm-i915-gem-Try-to-flush-pending-unbind-events.patch new file mode 100644 index 000000000..8cff1cbf9 --- /dev/null +++ b/libre/linux-libre-pae/0010-drm-i915-gem-Try-to-flush-pending-unbind-events.patch @@ -0,0 +1,64 @@ +From ee4f0e7d05f4e20dec1a91b4751da983109dbe44 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Wed, 4 Dec 2019 12:35:56 +0000 +Subject: [PATCH 10/13] drm/i915/gem: Try to flush pending unbind events + +If we cannot handle a vma within the unbind loop, try to flush the +pending events (i915_vma_parked, i915_vm_release) and try again. This +avoids a round trip to userspace that is not guaranteed to make forward +progress, as the events we wait upon require being idle. + +References: cb6c3d45f948 ("drm/i915/gem: Avoid parking the vma as we unbind") +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: Matthew Auld <matthew.auld@intel.com> +Reviewed-by: Matthew Auld <matthew.auld@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191204123556.3740002-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_gem.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 739543812422..85b12228c2cf 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -123,7 +123,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + LIST_HEAD(still_in_list); + intel_wakeref_t wakeref; + struct i915_vma *vma; +- int ret = 0; ++ int ret; + + if (!atomic_read(&obj->bind_count)) + return 0; +@@ -136,6 +136,8 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + */ + wakeref = intel_runtime_pm_get(rpm); + ++try_again: ++ ret = 0; + spin_lock(&obj->vma.lock); + while (!ret && (vma = list_first_entry_or_null(&obj->vma.list, + struct i915_vma, +@@ -153,6 +155,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + } else { + if (i915_vma_is_closed(vma)) { + spin_unlock(&obj->vma.lock); ++ i915_vma_parked(vm->gt); + goto err_vm; + } + } +@@ -174,6 +177,11 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + list_splice(&still_in_list, &obj->vma.list); + spin_unlock(&obj->vma.lock); + ++ if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_ACTIVE) { ++ rcu_barrier(); /* flush the i915_vm_release() */ ++ goto try_again; ++ } ++ + intel_runtime_pm_put(rpm, wakeref); + + return ret; +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0011-drm-i915-gem-Reinitialise-the-local-list-before-repe.patch b/libre/linux-libre-pae/0011-drm-i915-gem-Reinitialise-the-local-list-before-repe.patch new file mode 100644 index 000000000..d947d9502 --- /dev/null +++ b/libre/linux-libre-pae/0011-drm-i915-gem-Reinitialise-the-local-list-before-repe.patch @@ -0,0 +1,34 @@ +From c13629b1458dc936a12611ec91925ab421ffa800 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Thu, 5 Dec 2019 13:29:12 +0000 +Subject: [PATCH 11/13] drm/i915/gem: Reinitialise the local list before + repeating + +As we may start the loop again, we require our local list of i915_vma +we've processed to be reinitialised. + +Fixes: aa5e4453dc05 ("drm/i915/gem: Try to flush pending unbind events") +Closes: https://gitlab.freedesktop.org/drm/intel/issues/731 +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Reviewed-by: Andi Shyti <andi.shyti@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191205132912.606868-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_gem.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 85b12228c2cf..c2c025c4f4ad 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -174,7 +174,7 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + i915_vm_close(vm); + spin_lock(&obj->vma.lock); + } +- list_splice(&still_in_list, &obj->vma.list); ++ list_splice_init(&still_in_list, &obj->vma.list); + spin_unlock(&obj->vma.lock); + + if (ret == -EAGAIN && flags & I915_GEM_OBJECT_UNBIND_ACTIVE) { +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0012-drm-i915-Add-a-simple-is-bound-check-before-unbindin.patch b/libre/linux-libre-pae/0012-drm-i915-Add-a-simple-is-bound-check-before-unbindin.patch new file mode 100644 index 000000000..4c4bdaafc --- /dev/null +++ b/libre/linux-libre-pae/0012-drm-i915-Add-a-simple-is-bound-check-before-unbindin.patch @@ -0,0 +1,41 @@ +From 2b7ac37ef0d71002f3b7a9da6553049bc1ea9172 Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Sun, 22 Dec 2019 21:02:54 +0000 +Subject: [PATCH 12/13] drm/i915: Add a simple is-bound check before unbinding + +Only acquire the various atomic references required to unbind the vma if +we do need to unbind the vma. + +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Acked-by: Imre Deak <imre.deak@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191222210256.2066451-1-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/i915_gem.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index c2c025c4f4ad..83eed642cbcd 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -145,6 +145,10 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + struct i915_address_space *vm = vma->vm; + bool awake = false; + ++ list_move_tail(&vma->obj_link, &still_in_list); ++ if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) ++ continue; ++ + ret = -EAGAIN; + if (!i915_vm_tryopen(vm)) + break; +@@ -160,7 +164,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + } + } + +- list_move_tail(&vma->obj_link, &still_in_list); + spin_unlock(&obj->vma.lock); + + ret = -EBUSY; +-- +2.25.1 + diff --git a/libre/linux-libre-pae/0013-drm-i915-Introduce-a-vma.kref.patch b/libre/linux-libre-pae/0013-drm-i915-Introduce-a-vma.kref.patch new file mode 100644 index 000000000..861889d73 --- /dev/null +++ b/libre/linux-libre-pae/0013-drm-i915-Introduce-a-vma.kref.patch @@ -0,0 +1,252 @@ +From 1e76c5d4a020453c26640572a2452456d68ebeea Mon Sep 17 00:00:00 2001 +From: Chris Wilson <chris@chris-wilson.co.uk> +Date: Sun, 22 Dec 2019 21:02:55 +0000 +Subject: [PATCH 13/13] drm/i915: Introduce a vma.kref + +Start introducing a kref on i915_vma in order to protect the vma unbind +(i915_gem_object_unbind) from a parallel destruction (i915_vma_parked). +Later, we will use the refcount to manage all access and turn i915_vma +into a first class container. + +Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> +Cc: Imre Deak <imre.deak@intel.com> +Acked-by: Imre Deak <imre.deak@intel.com> +Link: https://patchwork.freedesktop.org/patch/msgid/20191222210256.2066451-2-chris@chris-wilson.co.uk +--- + drivers/gpu/drm/i915/gem/i915_gem_object.c | 2 +- + .../gpu/drm/i915/gem/selftests/huge_pages.c | 3 +-- + .../drm/i915/gem/selftests/i915_gem_mman.c | 4 +-- + drivers/gpu/drm/i915/i915_gem.c | 27 +++++++------------ + drivers/gpu/drm/i915/i915_gem_gtt.c | 5 ++-- + drivers/gpu/drm/i915/i915_vma.c | 9 ++++--- + drivers/gpu/drm/i915/i915_vma.h | 25 ++++++++++++++--- + 7 files changed, 44 insertions(+), 31 deletions(-) + +diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c +index a596548c07bf..b6937469ffd3 100644 +--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c ++++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c +@@ -174,7 +174,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, + GEM_BUG_ON(vma->obj != obj); + spin_unlock(&obj->vma.lock); + +- i915_vma_destroy(vma); ++ __i915_vma_put(vma); + + spin_lock(&obj->vma.lock); + } +diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +index 688c49a24f32..bd1e2c12de63 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c ++++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +@@ -1110,8 +1110,7 @@ static int __igt_write_huge(struct intel_context *ce, + out_vma_unpin: + i915_vma_unpin(vma); + out_vma_close: +- i915_vma_destroy(vma); +- ++ __i915_vma_put(vma); + return err; + } + +diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +index 29b2077b73d2..d226e55df8b2 100644 +--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c ++++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +@@ -161,7 +161,7 @@ static int check_partial_mapping(struct drm_i915_gem_object *obj, + kunmap(p); + + out: +- i915_vma_destroy(vma); ++ __i915_vma_put(vma); + return err; + } + +@@ -255,7 +255,7 @@ static int check_partial_mappings(struct drm_i915_gem_object *obj, + if (err) + return err; + +- i915_vma_destroy(vma); ++ __i915_vma_put(vma); + + if (igt_timeout(end_time, + "%s: timed out after tiling=%d stride=%d\n", +diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c +index 83eed642cbcd..0475a0343487 100644 +--- a/drivers/gpu/drm/i915/i915_gem.c ++++ b/drivers/gpu/drm/i915/i915_gem.c +@@ -143,7 +143,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + struct i915_vma, + obj_link))) { + struct i915_address_space *vm = vma->vm; +- bool awake = false; + + list_move_tail(&vma->obj_link, &still_in_list); + if (!i915_vma_is_bound(vma, I915_VMA_BIND_MASK)) +@@ -154,26 +153,18 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj, + break; + + /* Prevent vma being freed by i915_vma_parked as we unbind */ +- if (intel_gt_pm_get_if_awake(vm->gt)) { +- awake = true; +- } else { +- if (i915_vma_is_closed(vma)) { +- spin_unlock(&obj->vma.lock); +- i915_vma_parked(vm->gt); +- goto err_vm; +- } +- } +- ++ vma = __i915_vma_get(vma); + spin_unlock(&obj->vma.lock); + +- ret = -EBUSY; +- if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || +- !i915_vma_is_active(vma)) +- ret = i915_vma_unbind(vma); ++ if (vma) { ++ ret = -EBUSY; ++ if (flags & I915_GEM_OBJECT_UNBIND_ACTIVE || ++ !i915_vma_is_active(vma)) ++ ret = i915_vma_unbind(vma); ++ ++ __i915_vma_put(vma); ++ } + +- if (awake) +- intel_gt_pm_put(vm->gt); +-err_vm: + i915_vm_close(vm); + spin_lock(&obj->vma.lock); + } +diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c +index 44727806dfd7..dd2c20f7d4d2 100644 +--- a/drivers/gpu/drm/i915/i915_gem_gtt.c ++++ b/drivers/gpu/drm/i915/i915_gem_gtt.c +@@ -522,7 +522,7 @@ void __i915_vm_close(struct i915_address_space *vm) + + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); + WARN_ON(__i915_vma_unbind(vma)); +- i915_vma_destroy(vma); ++ __i915_vma_put(vma); + + i915_gem_object_put(obj); + } +@@ -1790,7 +1790,7 @@ static void gen6_ppgtt_cleanup(struct i915_address_space *vm) + { + struct gen6_ppgtt *ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); + +- i915_vma_destroy(ppgtt->vma); ++ __i915_vma_put(ppgtt->vma); + + gen6_ppgtt_free_pd(ppgtt); + free_scratch(vm); +@@ -1878,6 +1878,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size) + + i915_active_init(&vma->active, NULL, NULL); + ++ kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(&ggtt->vm); + vma->ops = &pd_vma_ops; +diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c +index 01c822256b39..00973017abba 100644 +--- a/drivers/gpu/drm/i915/i915_vma.c ++++ b/drivers/gpu/drm/i915/i915_vma.c +@@ -112,6 +112,7 @@ vma_create(struct drm_i915_gem_object *obj, + if (vma == NULL) + return ERR_PTR(-ENOMEM); + ++ kref_init(&vma->ref); + mutex_init(&vma->pages_mutex); + vma->vm = i915_vm_get(vm); + vma->ops = &vm->vma_ops; +@@ -978,8 +979,10 @@ void i915_vma_reopen(struct i915_vma *vma) + __i915_vma_remove_closed(vma); + } + +-void i915_vma_destroy(struct i915_vma *vma) ++void i915_vma_release(struct kref *ref) + { ++ struct i915_vma *vma = container_of(ref, typeof(*vma), ref); ++ + if (drm_mm_node_allocated(&vma->node)) { + mutex_lock(&vma->vm->mutex); + atomic_and(~I915_VMA_PIN_MASK, &vma->flags); +@@ -1027,7 +1030,7 @@ void i915_vma_parked(struct intel_gt *gt) + spin_unlock_irq(>->closed_lock); + + if (obj) { +- i915_vma_destroy(vma); ++ __i915_vma_put(vma); + i915_gem_object_put(obj); + } + +@@ -1192,7 +1195,7 @@ int __i915_vma_unbind(struct i915_vma *vma) + i915_vma_detach(vma); + vma_unbind_pages(vma); + +- drm_mm_remove_node(&vma->node); /* pairs with i915_vma_destroy() */ ++ drm_mm_remove_node(&vma->node); /* pairs with i915_vma_release() */ + return 0; + } + +diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h +index 465932813bc5..ce1db908ad69 100644 +--- a/drivers/gpu/drm/i915/i915_vma.h ++++ b/drivers/gpu/drm/i915/i915_vma.h +@@ -51,14 +51,19 @@ enum i915_cache_level; + */ + struct i915_vma { + struct drm_mm_node node; +- struct drm_i915_gem_object *obj; ++ + struct i915_address_space *vm; + const struct i915_vma_ops *ops; +- struct i915_fence_reg *fence; ++ ++ struct drm_i915_gem_object *obj; + struct dma_resv *resv; /** Alias of obj->resv */ ++ + struct sg_table *pages; + void __iomem *iomap; + void *private; /* owned by creator */ ++ ++ struct i915_fence_reg *fence; ++ + u64 size; + u64 display_alignment; + struct i915_page_sizes page_sizes; +@@ -71,6 +76,7 @@ struct i915_vma { + * handles (but same file) for execbuf, i.e. the number of aliases + * that exist in the ctx->handle_vmas LUT for this vma. + */ ++ struct kref ref; + atomic_t open_count; + atomic_t flags; + /** +@@ -333,7 +339,20 @@ int __must_check i915_vma_unbind(struct i915_vma *vma); + void i915_vma_unlink_ctx(struct i915_vma *vma); + void i915_vma_close(struct i915_vma *vma); + void i915_vma_reopen(struct i915_vma *vma); +-void i915_vma_destroy(struct i915_vma *vma); ++ ++static inline struct i915_vma *__i915_vma_get(struct i915_vma *vma) ++{ ++ if (kref_get_unless_zero(&vma->ref)) ++ return vma; ++ ++ return NULL; ++} ++ ++void i915_vma_release(struct kref *ref); ++static inline void __i915_vma_put(struct i915_vma *vma) ++{ ++ kref_put(&vma->ref, i915_vma_release); ++} + + #define assert_vma_held(vma) dma_resv_assert_held((vma)->resv) + +-- +2.25.1 + diff --git a/libre/linux-libre-pae/PKGBUILD b/libre/linux-libre-pae/PKGBUILD index e3ca642b8..77bce7a92 100644 --- a/libre/linux-libre-pae/PKGBUILD +++ b/libre/linux-libre-pae/PKGBUILD @@ -9,7 +9,7 @@ _replacesoldkernels=() # '%' gets replaced with kernel suffix _replacesoldmodules=() # '%' gets replaced with kernel suffix pkgbase=linux-libre-pae -pkgver=5.5.2 +pkgver=5.5.5 pkgrel=1 pkgdesc='Linux-libre PAE (physical address extension)' url='https://linux-libre.fsfla.org/' @@ -38,9 +38,17 @@ source=( # extracted patches from Arch Linux kernel sources 0001-ZEN-Add-sysctl-and-CONFIG-to-disallow-unprivileged-C.patch 0002-iwlwifi-pcie-restore-support-for-Killer-Qu-C0-NICs.patch - 0003-ALSA-hda-Fix-DP-MST-support-for-NVIDIA-codecs.patch - 0004-Btrfs-send-fix-emission-of-invalid-clone-operations-.patch - 0005-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch + 0003-iwlwifi-mvm-Do-not-require-PHY_SKU-NVM-section-for-3.patch + 0004-drm-i915-Wean-off-drm_pci_alloc-drm_pci_free.patch + 0005-drm-Remove-PageReserved-manipulation-from-drm_pci_al.patch + 0006-drm-i915-execlists-Always-force-a-context-reload-whe.patch + 0007-drm-i915-Serialise-i915_active_acquire-with-__active.patch + 0008-drm-i915-gem-Take-runtime-pm-wakeref-prior-to-unbind.patch + 0009-drm-i915-gem-Avoid-parking-the-vma-as-we-unbind.patch + 0010-drm-i915-gem-Try-to-flush-pending-unbind-events.patch + 0011-drm-i915-gem-Reinitialise-the-local-list-before-repe.patch + 0012-drm-i915-Add-a-simple-is-bound-check-before-unbindin.patch + 0013-drm-i915-Introduce-a-vma.kref.patch ) validpgpkeys=( '474402C8C582DAFBE389C427BCB7CF877E7D47A7' # Alexandre Oliva @@ -48,7 +56,7 @@ validpgpkeys=( ) sha512sums=('187368a8fb4e04acfd7d18a024d6cdbc2841bcc06dcfbc3a053706e8512c3e3f573755228347c11bd791b296ec60eb2d67d5075ece2aef234a847e72f2b3e746' 'SKIP' - 'f4c6e0bc192dbb5b38db407443ab4dd7c352b7ed2e638abcfab90a80486625a962fd30d0aa1d98ec87a87ab2c0de71a2d9ab4246fcc5fa7685803b02b52ed23f' + 'd1ff0cd2089118da0311401c7f1f9f3d083af452899f3ffa7f9875d92aa9e85716fe87cbd8c59292d37cd6a37a0ebe68d8b1c0ea433277aaf636dd984fe0a7de' 'SKIP' '13cb5bc42542e7b8bb104d5f68253f6609e463b6799800418af33eb0272cc269aaa36163c3e6f0aacbdaaa1d05e2827a4a7c4a08a029238439ed08b89c564bb3' 'SKIP' @@ -59,11 +67,19 @@ sha512sums=('187368a8fb4e04acfd7d18a024d6cdbc2841bcc06dcfbc3a053706e8512c3e3f573 '9b8298a195857108453042090bfebf7736b373b177077926bda7091bc490aeda1872fb51cf126e021eae97096973dfab6d6c858bebf5d565a9df4ffe28d2df2f' '02af4dd2a007e41db0c63822c8ab3b80b5d25646af1906dc85d0ad9bb8bbf5236f8e381d7f91cf99ed4b0978c50aee37cb9567cdeef65b7ec3d91b882852b1af' 'b8fe56e14006ab866970ddbd501c054ae37186ddc065bb869cf7d18db8c0d455118d5bda3255fb66a0dde38b544655cfe9040ffe46e41d19830b47959b2fb168' - 'cde74d205146ca872250ac4b21e7a931da7c64dd3024ebefd09a61e87fc8beaa17cd5fff388483ab39a9ced4eca77fcaa8ce1e59f632119c4749683d12fa1795' - '4c2454c3712788d7fd90455cd866e81aa444bf25b2670afd0732141ee39ad12c83bf5d0ae82faeabe1aa22d495d7542ac2b8db94c1627c39dc498b03f0dd4973' - '65f22d4996f9669e52efbf981c5a9955ea3eb6429ad5c12561c3862884ea12b10c58af1fbcb6bfb575a6c1be1ad3bff513459a32fb0a627541a0b6b97faaba16' - 'a4a545f29f21dc4e1ea59ab3361207f50769cb5c2d3056c169252623811f7087318e9d6f0f58e79124185637ab4c2036a9da88525ebf57846f63ae2c02f36017' - '6b895b7fa46463204ce5ea01b9a832ab24f510bcc5b998c6390a0b3c02ef5d5666db7034f428521aaa0c6e017a99103efc298cb6cf6b8123fbadce97587c3e7a') + 'c5006db63c01ad4f94786e4ed129470d74b9131cf2218c82dcbbc24af7de4881ce7e4c8144e3b2194d6015df328593c8f017d1372ae1e46fe49fe55acf5aac38' + 'ba13e423ef1bba3f46c20bb1c0f68b7533564cc4819aa17cabbf5b89552416341b83f7f65e97d4316412ac78365f25c612fde5c35cb29d27d612c351761a8385' + '28780156c1cdcc65d41a08cae0184bc6b42d7cf5f7c94fafea475f3e1f388faa28532216b2cdefcfa6a4d55f70cbb21329d48409c6ad9be78a72d212d0e80252' + '6c13d71552e1a651c7f1e753c6f94210b27f61dee58b8a855a11eed28dcd45afd168b361f2a8ac53b3899cc73322a102c1f99a04496e80b2ead229230883d261' + 'd9d807c2cb6680f494f52c86b54b8ad5b3fafb6bae8a6098a9ebe20383569704cfabded9ad442a9cc8aa05f5a280b76ff41e8b5d504523251e31d753930f3cc3' + '521a5320af6e56f85662a1fe0749aefa9759e4dc8fd21dd890d409256a67ed2da488dab5d54bb12eb71fbfc14142246f2df98ce4cfab061283a4019a8b748999' + 'b1605a93c97c01d255f19f5da20b0b41d9457a9aa473bb22d52feffab99bca0594f5f8b1cf5fb20862e65c84315ddc7bb162869c3337ff9ee795938856d583ba' + 'e24fa8a4bc8589274fc31edbf3e64cd1639737544982bc2c085e5f064daa16b1d56928115813733b46a758ed047dd415d14b87b7fcd958b7a73766066a7a6c32' + '2f961f57e8462615a2a01793d21f3ce49be956991cb537de68b80edbf8dd7123a790324c1a5aa81061436eda4eae30a9b710144d9b18a33c22b595710e30667e' + 'd29b49e87e90a773a0d101ff57688c6d55b1145cb6866da8706ed9313660d16e12c4debfd5005a585b33dad741a19de36fd9ccd8b5e18d9979a05e1845df993c' + '009b4280ba5da1ef78db7582eac04c9e290bfb2f0d7d8ba9574353b1cca9c5dd97c0bf1f85d228dede1f3ac342e35754ba776c2fe755382c65edf1b52f2d9eba' + '0c2cc5f04a577c42dece649d54e00bf8a1f45880a5f984328a3d4a51887b3d48e30dae3058f9da2fc651a95e204749b5512619752ecfc37227acb21e042e26ff' + 'f41929dd44a929dc3bf5f090f7f8c800112a07f1cc2c5ac1e282d4cff2821001c9daeb35987e8b47e70bde5cf5c06b010cac13b4f00fbcbb56b283069a5b30cc') _replacesarchkernel=("${_replacesarchkernel[@]/\%/${pkgbase#linux-libre}}") _replacesoldkernels=("${_replacesoldkernels[@]/\%/${pkgbase#linux-libre}}") |