1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2022 Intel Corporation
4 */
5
6#include "intel_ggtt_gmch.h"
7
8#include <drm/intel/intel-gtt.h>
9
10#include <linux/agp_backend.h>
11
12#include "i915_drv.h"
13#include "i915_utils.h"
14#include "intel_gtt.h"
15#include "intel_gt_regs.h"
16#include "intel_gt.h"
17
18static void gmch_ggtt_insert_page(struct i915_address_space *vm,
19 dma_addr_t addr,
20 u64 offset,
21 unsigned int pat_index,
22 u32 unused)
23{
24 unsigned int flags = (pat_index == I915_CACHE_NONE) ?
25 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
26
27 intel_gmch_gtt_insert_page(addr, pg: offset >> PAGE_SHIFT, flags);
28}
29
30static dma_addr_t gmch_ggtt_read_entry(struct i915_address_space *vm,
31 u64 offset, bool *is_present, bool *is_local)
32{
33 return intel_gmch_gtt_read_entry(pg: offset >> PAGE_SHIFT,
34 is_present, is_local);
35}
36
37static void gmch_ggtt_insert_entries(struct i915_address_space *vm,
38 struct i915_vma_resource *vma_res,
39 unsigned int pat_index,
40 u32 unused)
41{
42 unsigned int flags = (pat_index == I915_CACHE_NONE) ?
43 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
44
45 intel_gmch_gtt_insert_sg_entries(st: vma_res->bi.pages, pg_start: vma_res->start >> PAGE_SHIFT,
46 flags);
47}
48
49static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
50{
51 intel_gmch_gtt_flush();
52}
53
54static void gmch_ggtt_clear_range(struct i915_address_space *vm,
55 u64 start, u64 length)
56{
57 intel_gmch_gtt_clear_range(first_entry: start >> PAGE_SHIFT, num_entries: length >> PAGE_SHIFT);
58}
59
60static void gmch_ggtt_remove(struct i915_address_space *vm)
61{
62 intel_gmch_remove();
63}
64
65/*
66 * Certain Gen5 chipsets require idling the GPU before unmapping anything from
67 * the GTT when VT-d is enabled.
68 */
69static bool needs_idle_maps(struct drm_i915_private *i915)
70{
71 /*
72 * Query intel_iommu to see if we need the workaround. Presumably that
73 * was loaded first.
74 */
75 if (!i915_vtd_active(i915))
76 return false;
77
78 if (GRAPHICS_VER(i915) == 5 && IS_MOBILE(i915))
79 return true;
80
81 return false;
82}
83
84int intel_ggtt_gmch_probe(struct i915_ggtt *ggtt)
85{
86 struct drm_i915_private *i915 = ggtt->vm.i915;
87 phys_addr_t gmadr_base;
88 int ret;
89
90 ret = intel_gmch_probe(bridge_pdev: i915->gmch.pdev, to_pci_dev(i915->drm.dev), NULL);
91 if (!ret) {
92 drm_err(&i915->drm, "failed to set up gmch\n");
93 return -EIO;
94 }
95
96 intel_gmch_gtt_get(gtt_total: &ggtt->vm.total, mappable_base: &gmadr_base, mappable_end: &ggtt->mappable_end);
97
98 ggtt->gmadr = DEFINE_RES_MEM(gmadr_base, ggtt->mappable_end);
99
100 ggtt->vm.alloc_pt_dma = alloc_pt_dma;
101 ggtt->vm.alloc_scratch_dma = alloc_pt_dma;
102
103 if (needs_idle_maps(i915)) {
104 drm_notice(&i915->drm,
105 "Flushing DMA requests before IOMMU unmaps; performance may be degraded\n");
106 ggtt->do_idle_maps = true;
107 }
108
109 ggtt->vm.insert_page = gmch_ggtt_insert_page;
110 ggtt->vm.insert_entries = gmch_ggtt_insert_entries;
111 ggtt->vm.clear_range = gmch_ggtt_clear_range;
112 ggtt->vm.scratch_range = gmch_ggtt_clear_range;
113 ggtt->vm.read_entry = gmch_ggtt_read_entry;
114 ggtt->vm.cleanup = gmch_ggtt_remove;
115
116 ggtt->invalidate = gmch_ggtt_invalidate;
117
118 ggtt->vm.vma_ops.bind_vma = intel_ggtt_bind_vma;
119 ggtt->vm.vma_ops.unbind_vma = intel_ggtt_unbind_vma;
120
121 if (unlikely(ggtt->do_idle_maps))
122 drm_notice(&i915->drm,
123 "Applying Ironlake quirks for intel_iommu\n");
124
125 return 0;
126}
127
128int intel_ggtt_gmch_enable_hw(struct drm_i915_private *i915)
129{
130 if (!intel_gmch_enable_gtt())
131 return -EIO;
132
133 return 0;
134}
135
136void intel_ggtt_gmch_flush(void)
137{
138 intel_gmch_gtt_flush();
139}
140