| 1 | // SPDX-License-Identifier: GPL-2.0-only | 
|---|
| 2 | /* | 
|---|
| 3 | * Copyright (c) 2006, Intel Corporation. | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (C) 2006-2008 Intel Corporation | 
|---|
| 6 | * Author: Ashok Raj <ashok.raj@intel.com> | 
|---|
| 7 | * Author: Shaohua Li <shaohua.li@intel.com> | 
|---|
| 8 | * Author: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com> | 
|---|
| 9 | * | 
|---|
| 10 | * This file implements early detection/parsing of Remapping Devices | 
|---|
| 11 | * reported to OS through BIOS via DMA remapping reporting (DMAR) ACPI | 
|---|
| 12 | * tables. | 
|---|
| 13 | * | 
|---|
| 14 | * These routines are used by both DMA-remapping and Interrupt-remapping | 
|---|
| 15 | */ | 
|---|
| 16 |  | 
|---|
| 17 | #define pr_fmt(fmt)     "DMAR: " fmt | 
|---|
| 18 |  | 
|---|
| 19 | #include <linux/pci.h> | 
|---|
| 20 | #include <linux/dmar.h> | 
|---|
| 21 | #include <linux/iova.h> | 
|---|
| 22 | #include <linux/timer.h> | 
|---|
| 23 | #include <linux/irq.h> | 
|---|
| 24 | #include <linux/interrupt.h> | 
|---|
| 25 | #include <linux/tboot.h> | 
|---|
| 26 | #include <linux/dmi.h> | 
|---|
| 27 | #include <linux/slab.h> | 
|---|
| 28 | #include <linux/iommu.h> | 
|---|
| 29 | #include <linux/numa.h> | 
|---|
| 30 | #include <linux/limits.h> | 
|---|
| 31 | #include <asm/irq_remapping.h> | 
|---|
| 32 |  | 
|---|
| 33 | #include "iommu.h" | 
|---|
| 34 | #include "../irq_remapping.h" | 
|---|
| 35 | #include "../iommu-pages.h" | 
|---|
| 36 | #include "perf.h" | 
|---|
| 37 | #include "trace.h" | 
|---|
| 38 | #include "perfmon.h" | 
|---|
| 39 |  | 
|---|
| 40 | typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *); | 
|---|
| 41 | struct dmar_res_callback { | 
|---|
| 42 | dmar_res_handler_t	cb[ACPI_DMAR_TYPE_RESERVED]; | 
|---|
| 43 | void			*arg[ACPI_DMAR_TYPE_RESERVED]; | 
|---|
| 44 | bool			ignore_unhandled; | 
|---|
| 45 | bool			print_entry; | 
|---|
| 46 | }; | 
|---|
| 47 |  | 
|---|
| 48 | /* | 
|---|
| 49 | * Assumptions: | 
|---|
| 50 | * 1) The hotplug framework guarentees that DMAR unit will be hot-added | 
|---|
| 51 | *    before IO devices managed by that unit. | 
|---|
| 52 | * 2) The hotplug framework guarantees that DMAR unit will be hot-removed | 
|---|
| 53 | *    after IO devices managed by that unit. | 
|---|
| 54 | * 3) Hotplug events are rare. | 
|---|
| 55 | * | 
|---|
| 56 | * Locking rules for DMA and interrupt remapping related global data structures: | 
|---|
| 57 | * 1) Use dmar_global_lock in process context | 
|---|
| 58 | * 2) Use RCU in interrupt context | 
|---|
| 59 | */ | 
|---|
| 60 | DECLARE_RWSEM(dmar_global_lock); | 
|---|
| 61 | LIST_HEAD(dmar_drhd_units); | 
|---|
| 62 |  | 
|---|
| 63 | struct acpi_table_header * __initdata dmar_tbl; | 
|---|
| 64 | static int dmar_dev_scope_status = 1; | 
|---|
| 65 | static DEFINE_IDA(dmar_seq_ids); | 
|---|
| 66 |  | 
|---|
| 67 | static int alloc_iommu(struct dmar_drhd_unit *drhd); | 
|---|
| 68 | static void free_iommu(struct intel_iommu *iommu); | 
|---|
| 69 |  | 
|---|
| 70 | static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) | 
|---|
| 71 | { | 
|---|
| 72 | /* | 
|---|
| 73 | * add INCLUDE_ALL at the tail, so scan the list will find it at | 
|---|
| 74 | * the very end. | 
|---|
| 75 | */ | 
|---|
| 76 | if (drhd->include_all) | 
|---|
| 77 | list_add_tail_rcu(new: &drhd->list, head: &dmar_drhd_units); | 
|---|
| 78 | else | 
|---|
| 79 | list_add_rcu(new: &drhd->list, head: &dmar_drhd_units); | 
|---|
| 80 | } | 
|---|
| 81 |  | 
|---|
| 82 | void *dmar_alloc_dev_scope(void *start, void *end, int *cnt) | 
|---|
| 83 | { | 
|---|
| 84 | struct acpi_dmar_device_scope *scope; | 
|---|
| 85 |  | 
|---|
| 86 | *cnt = 0; | 
|---|
| 87 | while (start < end) { | 
|---|
| 88 | scope = start; | 
|---|
| 89 | if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_NAMESPACE || | 
|---|
| 90 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT || | 
|---|
| 91 | scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE) | 
|---|
| 92 | (*cnt)++; | 
|---|
| 93 | else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC && | 
|---|
| 94 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_HPET) { | 
|---|
| 95 | pr_warn( "Unsupported device scope\n"); | 
|---|
| 96 | } | 
|---|
| 97 | start += scope->length; | 
|---|
| 98 | } | 
|---|
| 99 | if (*cnt == 0) | 
|---|
| 100 | return NULL; | 
|---|
| 101 |  | 
|---|
| 102 | return kcalloc(*cnt, sizeof(struct dmar_dev_scope), GFP_KERNEL); | 
|---|
| 103 | } | 
|---|
| 104 |  | 
|---|
| 105 | void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt) | 
|---|
| 106 | { | 
|---|
| 107 | int i; | 
|---|
| 108 | struct device *tmp_dev; | 
|---|
| 109 |  | 
|---|
| 110 | if (*devices && *cnt) { | 
|---|
| 111 | for_each_active_dev_scope(*devices, *cnt, i, tmp_dev) | 
|---|
| 112 | put_device(dev: tmp_dev); | 
|---|
| 113 | kfree(objp: *devices); | 
|---|
| 114 | } | 
|---|
| 115 |  | 
|---|
| 116 | *devices = NULL; | 
|---|
| 117 | *cnt = 0; | 
|---|
| 118 | } | 
|---|
| 119 |  | 
|---|
| 120 | /* Optimize out kzalloc()/kfree() for normal cases */ | 
|---|
| 121 | static char dmar_pci_notify_info_buf[64]; | 
|---|
| 122 |  | 
|---|
| 123 | static struct dmar_pci_notify_info * | 
|---|
| 124 | dmar_alloc_pci_notify_info(struct pci_dev *dev, unsigned long event) | 
|---|
| 125 | { | 
|---|
| 126 | int level = 0; | 
|---|
| 127 | size_t size; | 
|---|
| 128 | struct pci_dev *tmp; | 
|---|
| 129 | struct dmar_pci_notify_info *info; | 
|---|
| 130 |  | 
|---|
| 131 | /* | 
|---|
| 132 | * Ignore devices that have a domain number higher than what can | 
|---|
| 133 | * be looked up in DMAR, e.g. VMD subdevices with domain 0x10000 | 
|---|
| 134 | */ | 
|---|
| 135 | if (pci_domain_nr(bus: dev->bus) > U16_MAX) | 
|---|
| 136 | return NULL; | 
|---|
| 137 |  | 
|---|
| 138 | /* Only generate path[] for device addition event */ | 
|---|
| 139 | if (event == BUS_NOTIFY_ADD_DEVICE) | 
|---|
| 140 | for (tmp = dev; tmp; tmp = tmp->bus->self) | 
|---|
| 141 | level++; | 
|---|
| 142 |  | 
|---|
| 143 | size = struct_size(info, path, level); | 
|---|
| 144 | if (size <= sizeof(dmar_pci_notify_info_buf)) { | 
|---|
| 145 | info = (struct dmar_pci_notify_info *)dmar_pci_notify_info_buf; | 
|---|
| 146 | } else { | 
|---|
| 147 | info = kzalloc(size, GFP_KERNEL); | 
|---|
| 148 | if (!info) { | 
|---|
| 149 | if (dmar_dev_scope_status == 0) | 
|---|
| 150 | dmar_dev_scope_status = -ENOMEM; | 
|---|
| 151 | return NULL; | 
|---|
| 152 | } | 
|---|
| 153 | } | 
|---|
| 154 |  | 
|---|
| 155 | info->event = event; | 
|---|
| 156 | info->dev = dev; | 
|---|
| 157 | info->seg = pci_domain_nr(bus: dev->bus); | 
|---|
| 158 | info->level = level; | 
|---|
| 159 | if (event == BUS_NOTIFY_ADD_DEVICE) { | 
|---|
| 160 | for (tmp = dev; tmp; tmp = tmp->bus->self) { | 
|---|
| 161 | level--; | 
|---|
| 162 | info->path[level].bus = tmp->bus->number; | 
|---|
| 163 | info->path[level].device = PCI_SLOT(tmp->devfn); | 
|---|
| 164 | info->path[level].function = PCI_FUNC(tmp->devfn); | 
|---|
| 165 | if (pci_is_root_bus(pbus: tmp->bus)) | 
|---|
| 166 | info->bus = tmp->bus->number; | 
|---|
| 167 | } | 
|---|
| 168 | } | 
|---|
| 169 |  | 
|---|
| 170 | return info; | 
|---|
| 171 | } | 
|---|
| 172 |  | 
|---|
| 173 | static inline void dmar_free_pci_notify_info(struct dmar_pci_notify_info *info) | 
|---|
| 174 | { | 
|---|
| 175 | if ((void *)info != dmar_pci_notify_info_buf) | 
|---|
| 176 | kfree(objp: info); | 
|---|
| 177 | } | 
|---|
| 178 |  | 
|---|
| 179 | static bool dmar_match_pci_path(struct dmar_pci_notify_info *info, int bus, | 
|---|
| 180 | struct acpi_dmar_pci_path *path, int count) | 
|---|
| 181 | { | 
|---|
| 182 | int i; | 
|---|
| 183 |  | 
|---|
| 184 | if (info->bus != bus) | 
|---|
| 185 | goto fallback; | 
|---|
| 186 | if (info->level != count) | 
|---|
| 187 | goto fallback; | 
|---|
| 188 |  | 
|---|
| 189 | for (i = 0; i < count; i++) { | 
|---|
| 190 | if (path[i].device != info->path[i].device || | 
|---|
| 191 | path[i].function != info->path[i].function) | 
|---|
| 192 | goto fallback; | 
|---|
| 193 | } | 
|---|
| 194 |  | 
|---|
| 195 | return true; | 
|---|
| 196 |  | 
|---|
| 197 | fallback: | 
|---|
| 198 |  | 
|---|
| 199 | if (count != 1) | 
|---|
| 200 | return false; | 
|---|
| 201 |  | 
|---|
| 202 | i = info->level - 1; | 
|---|
| 203 | if (bus              == info->path[i].bus && | 
|---|
| 204 | path[0].device   == info->path[i].device && | 
|---|
| 205 | path[0].function == info->path[i].function) { | 
|---|
| 206 | pr_info(FW_BUG "RMRR entry for device %02x:%02x.%x is broken - applying workaround\n", | 
|---|
| 207 | bus, path[0].device, path[0].function); | 
|---|
| 208 | return true; | 
|---|
| 209 | } | 
|---|
| 210 |  | 
|---|
| 211 | return false; | 
|---|
| 212 | } | 
|---|
| 213 |  | 
|---|
| 214 | /* Return: > 0 if match found, 0 if no match found, < 0 if error happens */ | 
|---|
| 215 | int dmar_insert_dev_scope(struct dmar_pci_notify_info *info, | 
|---|
| 216 | void *start, void*end, u16 segment, | 
|---|
| 217 | struct dmar_dev_scope *devices, | 
|---|
| 218 | int devices_cnt) | 
|---|
| 219 | { | 
|---|
| 220 | int i, level; | 
|---|
| 221 | struct device *tmp, *dev = &info->dev->dev; | 
|---|
| 222 | struct acpi_dmar_device_scope *scope; | 
|---|
| 223 | struct acpi_dmar_pci_path *path; | 
|---|
| 224 |  | 
|---|
| 225 | if (segment != info->seg) | 
|---|
| 226 | return 0; | 
|---|
| 227 |  | 
|---|
| 228 | for (; start < end; start += scope->length) { | 
|---|
| 229 | scope = start; | 
|---|
| 230 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_ENDPOINT && | 
|---|
| 231 | scope->entry_type != ACPI_DMAR_SCOPE_TYPE_BRIDGE) | 
|---|
| 232 | continue; | 
|---|
| 233 |  | 
|---|
| 234 | path = (struct acpi_dmar_pci_path *)(scope + 1); | 
|---|
| 235 | level = (scope->length - sizeof(*scope)) / sizeof(*path); | 
|---|
| 236 | if (!dmar_match_pci_path(info, bus: scope->bus, path, count: level)) | 
|---|
| 237 | continue; | 
|---|
| 238 |  | 
|---|
| 239 | /* | 
|---|
| 240 | * We expect devices with endpoint scope to have normal PCI | 
|---|
| 241 | * headers, and devices with bridge scope to have bridge PCI | 
|---|
| 242 | * headers.  However PCI NTB devices may be listed in the | 
|---|
| 243 | * DMAR table with bridge scope, even though they have a | 
|---|
| 244 | * normal PCI header.  NTB devices are identified by class | 
|---|
| 245 | * "BRIDGE_OTHER" (0680h) - we don't declare a socpe mismatch | 
|---|
| 246 | * for this special case. | 
|---|
| 247 | */ | 
|---|
| 248 | if ((scope->entry_type == ACPI_DMAR_SCOPE_TYPE_ENDPOINT && | 
|---|
| 249 | info->dev->hdr_type != PCI_HEADER_TYPE_NORMAL) || | 
|---|
| 250 | (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE && | 
|---|
| 251 | (info->dev->hdr_type == PCI_HEADER_TYPE_NORMAL && | 
|---|
| 252 | info->dev->class >> 16 != PCI_BASE_CLASS_BRIDGE))) { | 
|---|
| 253 | pr_warn( "Device scope type does not match for %s\n", | 
|---|
| 254 | pci_name(info->dev)); | 
|---|
| 255 | return -EINVAL; | 
|---|
| 256 | } | 
|---|
| 257 |  | 
|---|
| 258 | for_each_dev_scope(devices, devices_cnt, i, tmp) | 
|---|
| 259 | if (tmp == NULL) { | 
|---|
| 260 | devices[i].bus = info->dev->bus->number; | 
|---|
| 261 | devices[i].devfn = info->dev->devfn; | 
|---|
| 262 | rcu_assign_pointer(devices[i].dev, | 
|---|
| 263 | get_device(dev)); | 
|---|
| 264 | return 1; | 
|---|
| 265 | } | 
|---|
| 266 | if (WARN_ON(i >= devices_cnt)) | 
|---|
| 267 | return -EINVAL; | 
|---|
| 268 | } | 
|---|
| 269 |  | 
|---|
| 270 | return 0; | 
|---|
| 271 | } | 
|---|
| 272 |  | 
|---|
| 273 | int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, u16 segment, | 
|---|
| 274 | struct dmar_dev_scope *devices, int count) | 
|---|
| 275 | { | 
|---|
| 276 | int index; | 
|---|
| 277 | struct device *tmp; | 
|---|
| 278 |  | 
|---|
| 279 | if (info->seg != segment) | 
|---|
| 280 | return 0; | 
|---|
| 281 |  | 
|---|
| 282 | for_each_active_dev_scope(devices, count, index, tmp) | 
|---|
| 283 | if (tmp == &info->dev->dev) { | 
|---|
| 284 | RCU_INIT_POINTER(devices[index].dev, NULL); | 
|---|
| 285 | synchronize_rcu(); | 
|---|
| 286 | put_device(dev: tmp); | 
|---|
| 287 | return 1; | 
|---|
| 288 | } | 
|---|
| 289 |  | 
|---|
| 290 | return 0; | 
|---|
| 291 | } | 
|---|
| 292 |  | 
|---|
| 293 | static int dmar_pci_bus_add_dev(struct dmar_pci_notify_info *info) | 
|---|
| 294 | { | 
|---|
| 295 | int ret = 0; | 
|---|
| 296 | struct dmar_drhd_unit *dmaru; | 
|---|
| 297 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 298 |  | 
|---|
| 299 | for_each_drhd_unit(dmaru) { | 
|---|
| 300 | if (dmaru->include_all) | 
|---|
| 301 | continue; | 
|---|
| 302 |  | 
|---|
| 303 | drhd = container_of(dmaru->hdr, | 
|---|
| 304 | struct acpi_dmar_hardware_unit, header); | 
|---|
| 305 | ret = dmar_insert_dev_scope(info, start: (void *)(drhd + 1), | 
|---|
| 306 | end: ((void *)drhd) + drhd->header.length, | 
|---|
| 307 | segment: dmaru->segment, | 
|---|
| 308 | devices: dmaru->devices, devices_cnt: dmaru->devices_cnt); | 
|---|
| 309 | if (ret) | 
|---|
| 310 | break; | 
|---|
| 311 | } | 
|---|
| 312 | if (ret >= 0) | 
|---|
| 313 | ret = dmar_iommu_notify_scope_dev(info); | 
|---|
| 314 | if (ret < 0 && dmar_dev_scope_status == 0) | 
|---|
| 315 | dmar_dev_scope_status = ret; | 
|---|
| 316 |  | 
|---|
| 317 | if (ret >= 0) | 
|---|
| 318 | intel_irq_remap_add_device(info); | 
|---|
| 319 |  | 
|---|
| 320 | return ret; | 
|---|
| 321 | } | 
|---|
| 322 |  | 
|---|
| 323 | static void  dmar_pci_bus_del_dev(struct dmar_pci_notify_info *info) | 
|---|
| 324 | { | 
|---|
| 325 | struct dmar_drhd_unit *dmaru; | 
|---|
| 326 |  | 
|---|
| 327 | for_each_drhd_unit(dmaru) | 
|---|
| 328 | if (dmar_remove_dev_scope(info, segment: dmaru->segment, | 
|---|
| 329 | devices: dmaru->devices, count: dmaru->devices_cnt)) | 
|---|
| 330 | break; | 
|---|
| 331 | dmar_iommu_notify_scope_dev(info); | 
|---|
| 332 | } | 
|---|
| 333 |  | 
|---|
| 334 | static inline void vf_inherit_msi_domain(struct pci_dev *pdev) | 
|---|
| 335 | { | 
|---|
| 336 | struct pci_dev *physfn = pci_physfn(dev: pdev); | 
|---|
| 337 |  | 
|---|
| 338 | dev_set_msi_domain(dev: &pdev->dev, d: dev_get_msi_domain(dev: &physfn->dev)); | 
|---|
| 339 | } | 
|---|
| 340 |  | 
|---|
| 341 | static int dmar_pci_bus_notifier(struct notifier_block *nb, | 
|---|
| 342 | unsigned long action, void *data) | 
|---|
| 343 | { | 
|---|
| 344 | struct pci_dev *pdev = to_pci_dev(data); | 
|---|
| 345 | struct dmar_pci_notify_info *info; | 
|---|
| 346 |  | 
|---|
| 347 | /* Only care about add/remove events for physical functions. | 
|---|
| 348 | * For VFs we actually do the lookup based on the corresponding | 
|---|
| 349 | * PF in device_to_iommu() anyway. */ | 
|---|
| 350 | if (pdev->is_virtfn) { | 
|---|
| 351 | /* | 
|---|
| 352 | * Ensure that the VF device inherits the irq domain of the | 
|---|
| 353 | * PF device. Ideally the device would inherit the domain | 
|---|
| 354 | * from the bus, but DMAR can have multiple units per bus | 
|---|
| 355 | * which makes this impossible. The VF 'bus' could inherit | 
|---|
| 356 | * from the PF device, but that's yet another x86'sism to | 
|---|
| 357 | * inflict on everybody else. | 
|---|
| 358 | */ | 
|---|
| 359 | if (action == BUS_NOTIFY_ADD_DEVICE) | 
|---|
| 360 | vf_inherit_msi_domain(pdev); | 
|---|
| 361 | return NOTIFY_DONE; | 
|---|
| 362 | } | 
|---|
| 363 |  | 
|---|
| 364 | if (action != BUS_NOTIFY_ADD_DEVICE && | 
|---|
| 365 | action != BUS_NOTIFY_REMOVED_DEVICE) | 
|---|
| 366 | return NOTIFY_DONE; | 
|---|
| 367 |  | 
|---|
| 368 | info = dmar_alloc_pci_notify_info(dev: pdev, event: action); | 
|---|
| 369 | if (!info) | 
|---|
| 370 | return NOTIFY_DONE; | 
|---|
| 371 |  | 
|---|
| 372 | down_write(sem: &dmar_global_lock); | 
|---|
| 373 | if (action == BUS_NOTIFY_ADD_DEVICE) | 
|---|
| 374 | dmar_pci_bus_add_dev(info); | 
|---|
| 375 | else if (action == BUS_NOTIFY_REMOVED_DEVICE) | 
|---|
| 376 | dmar_pci_bus_del_dev(info); | 
|---|
| 377 | up_write(sem: &dmar_global_lock); | 
|---|
| 378 |  | 
|---|
| 379 | dmar_free_pci_notify_info(info); | 
|---|
| 380 |  | 
|---|
| 381 | return NOTIFY_OK; | 
|---|
| 382 | } | 
|---|
| 383 |  | 
|---|
| 384 | static struct notifier_block dmar_pci_bus_nb = { | 
|---|
| 385 | .notifier_call = dmar_pci_bus_notifier, | 
|---|
| 386 | .priority = 1, | 
|---|
| 387 | }; | 
|---|
| 388 |  | 
|---|
| 389 | static struct dmar_drhd_unit * | 
|---|
| 390 | dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd) | 
|---|
| 391 | { | 
|---|
| 392 | struct dmar_drhd_unit *dmaru; | 
|---|
| 393 |  | 
|---|
| 394 | list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list, | 
|---|
| 395 | dmar_rcu_check()) | 
|---|
| 396 | if (dmaru->segment == drhd->segment && | 
|---|
| 397 | dmaru->reg_base_addr == drhd->address) | 
|---|
| 398 | return dmaru; | 
|---|
| 399 |  | 
|---|
| 400 | return NULL; | 
|---|
| 401 | } | 
|---|
| 402 |  | 
|---|
| 403 | /* | 
|---|
| 404 | * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition | 
|---|
| 405 | * structure which uniquely represent one DMA remapping hardware unit | 
|---|
| 406 | * present in the platform | 
|---|
| 407 | */ | 
|---|
| 408 | static int dmar_parse_one_drhd(struct acpi_dmar_header *, void *arg) | 
|---|
| 409 | { | 
|---|
| 410 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 411 | struct dmar_drhd_unit *dmaru; | 
|---|
| 412 | int ret; | 
|---|
| 413 |  | 
|---|
| 414 | drhd = (struct acpi_dmar_hardware_unit *)header; | 
|---|
| 415 | dmaru = dmar_find_dmaru(drhd); | 
|---|
| 416 | if (dmaru) | 
|---|
| 417 | goto out; | 
|---|
| 418 |  | 
|---|
| 419 | dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL); | 
|---|
| 420 | if (!dmaru) | 
|---|
| 421 | return -ENOMEM; | 
|---|
| 422 |  | 
|---|
| 423 | /* | 
|---|
| 424 | * If header is allocated from slab by ACPI _DSM method, we need to | 
|---|
| 425 | * copy the content because the memory buffer will be freed on return. | 
|---|
| 426 | */ | 
|---|
| 427 | dmaru->hdr = (void *)(dmaru + 1); | 
|---|
| 428 | memcpy(to: dmaru->hdr, from: header, len: header->length); | 
|---|
| 429 | dmaru->reg_base_addr = drhd->address; | 
|---|
| 430 | dmaru->segment = drhd->segment; | 
|---|
| 431 | /* The size of the register set is 2 ^ N 4 KB pages. */ | 
|---|
| 432 | dmaru->reg_size = 1UL << (drhd->size + 12); | 
|---|
| 433 | dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ | 
|---|
| 434 | dmaru->devices = dmar_alloc_dev_scope(start: (void *)(drhd + 1), | 
|---|
| 435 | end: ((void *)drhd) + drhd->header.length, | 
|---|
| 436 | cnt: &dmaru->devices_cnt); | 
|---|
| 437 | if (dmaru->devices_cnt && dmaru->devices == NULL) { | 
|---|
| 438 | kfree(objp: dmaru); | 
|---|
| 439 | return -ENOMEM; | 
|---|
| 440 | } | 
|---|
| 441 |  | 
|---|
| 442 | ret = alloc_iommu(drhd: dmaru); | 
|---|
| 443 | if (ret) { | 
|---|
| 444 | dmar_free_dev_scope(devices: &dmaru->devices, | 
|---|
| 445 | cnt: &dmaru->devices_cnt); | 
|---|
| 446 | kfree(objp: dmaru); | 
|---|
| 447 | return ret; | 
|---|
| 448 | } | 
|---|
| 449 | dmar_register_drhd_unit(drhd: dmaru); | 
|---|
| 450 |  | 
|---|
| 451 | out: | 
|---|
| 452 | if (arg) | 
|---|
| 453 | (*(int *)arg)++; | 
|---|
| 454 |  | 
|---|
| 455 | return 0; | 
|---|
| 456 | } | 
|---|
| 457 |  | 
|---|
| 458 | static void dmar_free_drhd(struct dmar_drhd_unit *dmaru) | 
|---|
| 459 | { | 
|---|
| 460 | if (dmaru->devices && dmaru->devices_cnt) | 
|---|
| 461 | dmar_free_dev_scope(devices: &dmaru->devices, cnt: &dmaru->devices_cnt); | 
|---|
| 462 | if (dmaru->iommu) | 
|---|
| 463 | free_iommu(iommu: dmaru->iommu); | 
|---|
| 464 | kfree(objp: dmaru); | 
|---|
| 465 | } | 
|---|
| 466 |  | 
|---|
| 467 | static int __init dmar_parse_one_andd(struct acpi_dmar_header *, | 
|---|
| 468 | void *arg) | 
|---|
| 469 | { | 
|---|
| 470 | struct acpi_dmar_andd *andd = (void *)header; | 
|---|
| 471 |  | 
|---|
| 472 | /* Check for NUL termination within the designated length */ | 
|---|
| 473 | if (strnlen(andd->device_name, header->length - 8) == header->length - 8) { | 
|---|
| 474 | pr_warn(FW_BUG | 
|---|
| 475 | "Your BIOS is broken; ANDD object name is not NUL-terminated\n" | 
|---|
| 476 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 
|---|
| 477 | dmi_get_system_info(DMI_BIOS_VENDOR), | 
|---|
| 478 | dmi_get_system_info(DMI_BIOS_VERSION), | 
|---|
| 479 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 
|---|
| 480 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 
|---|
| 481 | return -EINVAL; | 
|---|
| 482 | } | 
|---|
| 483 | pr_info( "ANDD device: %x name: %s\n", andd->device_number, | 
|---|
| 484 | andd->device_name); | 
|---|
| 485 |  | 
|---|
| 486 | return 0; | 
|---|
| 487 | } | 
|---|
| 488 |  | 
|---|
| 489 | #ifdef CONFIG_ACPI_NUMA | 
|---|
| 490 | static int dmar_parse_one_rhsa(struct acpi_dmar_header *, void *arg) | 
|---|
| 491 | { | 
|---|
| 492 | struct acpi_dmar_rhsa *rhsa; | 
|---|
| 493 | struct dmar_drhd_unit *drhd; | 
|---|
| 494 |  | 
|---|
| 495 | rhsa = (struct acpi_dmar_rhsa *)header; | 
|---|
| 496 | for_each_drhd_unit(drhd) { | 
|---|
| 497 | if (drhd->reg_base_addr == rhsa->base_address) { | 
|---|
| 498 | int node = pxm_to_node(rhsa->proximity_domain); | 
|---|
| 499 |  | 
|---|
| 500 | if (node != NUMA_NO_NODE && !node_online(node)) | 
|---|
| 501 | node = NUMA_NO_NODE; | 
|---|
| 502 | drhd->iommu->node = node; | 
|---|
| 503 | return 0; | 
|---|
| 504 | } | 
|---|
| 505 | } | 
|---|
| 506 | pr_warn(FW_BUG | 
|---|
| 507 | "Your BIOS is broken; RHSA refers to non-existent DMAR unit at %llx\n" | 
|---|
| 508 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 
|---|
| 509 | rhsa->base_address, | 
|---|
| 510 | dmi_get_system_info(DMI_BIOS_VENDOR), | 
|---|
| 511 | dmi_get_system_info(DMI_BIOS_VERSION), | 
|---|
| 512 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 
|---|
| 513 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 
|---|
| 514 |  | 
|---|
| 515 | return 0; | 
|---|
| 516 | } | 
|---|
| 517 | #else | 
|---|
| 518 | #define	dmar_parse_one_rhsa		dmar_res_noop | 
|---|
| 519 | #endif | 
|---|
| 520 |  | 
|---|
| 521 | static void | 
|---|
| 522 | dmar_table_print_dmar_entry(struct acpi_dmar_header *) | 
|---|
| 523 | { | 
|---|
| 524 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 525 | struct acpi_dmar_reserved_memory *rmrr; | 
|---|
| 526 | struct acpi_dmar_atsr *atsr; | 
|---|
| 527 | struct acpi_dmar_rhsa *rhsa; | 
|---|
| 528 | struct acpi_dmar_satc *satc; | 
|---|
| 529 |  | 
|---|
| 530 | switch (header->type) { | 
|---|
| 531 | case ACPI_DMAR_TYPE_HARDWARE_UNIT: | 
|---|
| 532 | drhd = container_of(header, struct acpi_dmar_hardware_unit, | 
|---|
| 533 | header); | 
|---|
| 534 | pr_info( "DRHD base: %#016Lx flags: %#x\n", | 
|---|
| 535 | (unsigned long long)drhd->address, drhd->flags); | 
|---|
| 536 | break; | 
|---|
| 537 | case ACPI_DMAR_TYPE_RESERVED_MEMORY: | 
|---|
| 538 | rmrr = container_of(header, struct acpi_dmar_reserved_memory, | 
|---|
| 539 | header); | 
|---|
| 540 | pr_info( "RMRR base: %#016Lx end: %#016Lx\n", | 
|---|
| 541 | (unsigned long long)rmrr->base_address, | 
|---|
| 542 | (unsigned long long)rmrr->end_address); | 
|---|
| 543 | break; | 
|---|
| 544 | case ACPI_DMAR_TYPE_ROOT_ATS: | 
|---|
| 545 | atsr = container_of(header, struct acpi_dmar_atsr, header); | 
|---|
| 546 | pr_info( "ATSR flags: %#x\n", atsr->flags); | 
|---|
| 547 | break; | 
|---|
| 548 | case ACPI_DMAR_TYPE_HARDWARE_AFFINITY: | 
|---|
| 549 | rhsa = container_of(header, struct acpi_dmar_rhsa, header); | 
|---|
| 550 | pr_info( "RHSA base: %#016Lx proximity domain: %#x\n", | 
|---|
| 551 | (unsigned long long)rhsa->base_address, | 
|---|
| 552 | rhsa->proximity_domain); | 
|---|
| 553 | break; | 
|---|
| 554 | case ACPI_DMAR_TYPE_NAMESPACE: | 
|---|
| 555 | /* We don't print this here because we need to sanity-check | 
|---|
| 556 | it first. So print it in dmar_parse_one_andd() instead. */ | 
|---|
| 557 | break; | 
|---|
| 558 | case ACPI_DMAR_TYPE_SATC: | 
|---|
| 559 | satc = container_of(header, struct acpi_dmar_satc, header); | 
|---|
| 560 | pr_info( "SATC flags: 0x%x\n", satc->flags); | 
|---|
| 561 | break; | 
|---|
| 562 | } | 
|---|
| 563 | } | 
|---|
| 564 |  | 
|---|
| 565 | /** | 
|---|
| 566 | * dmar_table_detect - checks to see if the platform supports DMAR devices | 
|---|
| 567 | */ | 
|---|
| 568 | static int __init dmar_table_detect(void) | 
|---|
| 569 | { | 
|---|
| 570 | acpi_status status = AE_OK; | 
|---|
| 571 |  | 
|---|
| 572 | /* if we could find DMAR table, then there are DMAR devices */ | 
|---|
| 573 | status = acpi_get_table(ACPI_SIG_DMAR, instance: 0, out_table: &dmar_tbl); | 
|---|
| 574 |  | 
|---|
| 575 | if (ACPI_SUCCESS(status) && !dmar_tbl) { | 
|---|
| 576 | pr_warn( "Unable to map DMAR\n"); | 
|---|
| 577 | status = AE_NOT_FOUND; | 
|---|
| 578 | } | 
|---|
| 579 |  | 
|---|
| 580 | return ACPI_SUCCESS(status) ? 0 : -ENOENT; | 
|---|
| 581 | } | 
|---|
| 582 |  | 
|---|
| 583 | static int dmar_walk_remapping_entries(struct acpi_dmar_header *start, | 
|---|
| 584 | size_t len, struct dmar_res_callback *cb) | 
|---|
| 585 | { | 
|---|
| 586 | struct acpi_dmar_header *iter, *next; | 
|---|
| 587 | struct acpi_dmar_header *end = ((void *)start) + len; | 
|---|
| 588 |  | 
|---|
| 589 | for (iter = start; iter < end; iter = next) { | 
|---|
| 590 | next = (void *)iter + iter->length; | 
|---|
| 591 | if (iter->length == 0) { | 
|---|
| 592 | /* Avoid looping forever on bad ACPI tables */ | 
|---|
| 593 | pr_debug(FW_BUG "Invalid 0-length structure\n"); | 
|---|
| 594 | break; | 
|---|
| 595 | } else if (next > end) { | 
|---|
| 596 | /* Avoid passing table end */ | 
|---|
| 597 | pr_warn(FW_BUG "Record passes table end\n"); | 
|---|
| 598 | return -EINVAL; | 
|---|
| 599 | } | 
|---|
| 600 |  | 
|---|
| 601 | if (cb->print_entry) | 
|---|
| 602 | dmar_table_print_dmar_entry(header: iter); | 
|---|
| 603 |  | 
|---|
| 604 | if (iter->type >= ACPI_DMAR_TYPE_RESERVED) { | 
|---|
| 605 | /* continue for forward compatibility */ | 
|---|
| 606 | pr_debug( "Unknown DMAR structure type %d\n", | 
|---|
| 607 | iter->type); | 
|---|
| 608 | } else if (cb->cb[iter->type]) { | 
|---|
| 609 | int ret; | 
|---|
| 610 |  | 
|---|
| 611 | ret = cb->cb[iter->type](iter, cb->arg[iter->type]); | 
|---|
| 612 | if (ret) | 
|---|
| 613 | return ret; | 
|---|
| 614 | } else if (!cb->ignore_unhandled) { | 
|---|
| 615 | pr_warn( "No handler for DMAR structure type %d\n", | 
|---|
| 616 | iter->type); | 
|---|
| 617 | return -EINVAL; | 
|---|
| 618 | } | 
|---|
| 619 | } | 
|---|
| 620 |  | 
|---|
| 621 | return 0; | 
|---|
| 622 | } | 
|---|
| 623 |  | 
|---|
| 624 | static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar, | 
|---|
| 625 | struct dmar_res_callback *cb) | 
|---|
| 626 | { | 
|---|
| 627 | return dmar_walk_remapping_entries(start: (void *)(dmar + 1), | 
|---|
| 628 | len: dmar->header.length - sizeof(*dmar), cb); | 
|---|
| 629 | } | 
|---|
| 630 |  | 
|---|
| 631 | /** | 
|---|
| 632 | * parse_dmar_table - parses the DMA reporting table | 
|---|
| 633 | */ | 
|---|
| 634 | static int __init | 
|---|
| 635 | parse_dmar_table(void) | 
|---|
| 636 | { | 
|---|
| 637 | struct acpi_table_dmar *dmar; | 
|---|
| 638 | int drhd_count = 0; | 
|---|
| 639 | int ret; | 
|---|
| 640 | struct dmar_res_callback cb = { | 
|---|
| 641 | .print_entry = true, | 
|---|
| 642 | .ignore_unhandled = true, | 
|---|
| 643 | .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count, | 
|---|
| 644 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd, | 
|---|
| 645 | .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr, | 
|---|
| 646 | .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr, | 
|---|
| 647 | .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa, | 
|---|
| 648 | .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd, | 
|---|
| 649 | .cb[ACPI_DMAR_TYPE_SATC] = &dmar_parse_one_satc, | 
|---|
| 650 | }; | 
|---|
| 651 |  | 
|---|
| 652 | /* | 
|---|
| 653 | * Do it again, earlier dmar_tbl mapping could be mapped with | 
|---|
| 654 | * fixed map. | 
|---|
| 655 | */ | 
|---|
| 656 | dmar_table_detect(); | 
|---|
| 657 |  | 
|---|
| 658 | /* | 
|---|
| 659 | * ACPI tables may not be DMA protected by tboot, so use DMAR copy | 
|---|
| 660 | * SINIT saved in SinitMleData in TXT heap (which is DMA protected) | 
|---|
| 661 | */ | 
|---|
| 662 | dmar_tbl = tboot_get_dmar_table(dmar_tbl); | 
|---|
| 663 |  | 
|---|
| 664 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 
|---|
| 665 | if (!dmar) | 
|---|
| 666 | return -ENODEV; | 
|---|
| 667 |  | 
|---|
| 668 | if (dmar->width < PAGE_SHIFT - 1) { | 
|---|
| 669 | pr_warn( "Invalid DMAR haw\n"); | 
|---|
| 670 | return -EINVAL; | 
|---|
| 671 | } | 
|---|
| 672 |  | 
|---|
| 673 | pr_info( "Host address width %d\n", dmar->width + 1); | 
|---|
| 674 | ret = dmar_walk_dmar_table(dmar, cb: &cb); | 
|---|
| 675 | if (ret == 0 && drhd_count == 0) | 
|---|
| 676 | pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); | 
|---|
| 677 |  | 
|---|
| 678 | return ret; | 
|---|
| 679 | } | 
|---|
| 680 |  | 
|---|
| 681 | static int dmar_pci_device_match(struct dmar_dev_scope devices[], | 
|---|
| 682 | int cnt, struct pci_dev *dev) | 
|---|
| 683 | { | 
|---|
| 684 | int index; | 
|---|
| 685 | struct device *tmp; | 
|---|
| 686 |  | 
|---|
| 687 | while (dev) { | 
|---|
| 688 | for_each_active_dev_scope(devices, cnt, index, tmp) | 
|---|
| 689 | if (dev_is_pci(tmp) && dev == to_pci_dev(tmp)) | 
|---|
| 690 | return 1; | 
|---|
| 691 |  | 
|---|
| 692 | /* Check our parent */ | 
|---|
| 693 | dev = dev->bus->self; | 
|---|
| 694 | } | 
|---|
| 695 |  | 
|---|
| 696 | return 0; | 
|---|
| 697 | } | 
|---|
| 698 |  | 
|---|
| 699 | struct dmar_drhd_unit * | 
|---|
| 700 | dmar_find_matched_drhd_unit(struct pci_dev *dev) | 
|---|
| 701 | { | 
|---|
| 702 | struct dmar_drhd_unit *dmaru; | 
|---|
| 703 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 704 |  | 
|---|
| 705 | dev = pci_physfn(dev); | 
|---|
| 706 |  | 
|---|
| 707 | rcu_read_lock(); | 
|---|
| 708 | for_each_drhd_unit(dmaru) { | 
|---|
| 709 | drhd = container_of(dmaru->hdr, | 
|---|
| 710 | struct acpi_dmar_hardware_unit, | 
|---|
| 711 | header); | 
|---|
| 712 |  | 
|---|
| 713 | if (dmaru->include_all && | 
|---|
| 714 | drhd->segment == pci_domain_nr(bus: dev->bus)) | 
|---|
| 715 | goto out; | 
|---|
| 716 |  | 
|---|
| 717 | if (dmar_pci_device_match(devices: dmaru->devices, | 
|---|
| 718 | cnt: dmaru->devices_cnt, dev)) | 
|---|
| 719 | goto out; | 
|---|
| 720 | } | 
|---|
| 721 | dmaru = NULL; | 
|---|
| 722 | out: | 
|---|
| 723 | rcu_read_unlock(); | 
|---|
| 724 |  | 
|---|
| 725 | return dmaru; | 
|---|
| 726 | } | 
|---|
| 727 |  | 
|---|
| 728 | static void __init dmar_acpi_insert_dev_scope(u8 device_number, | 
|---|
| 729 | struct acpi_device *adev) | 
|---|
| 730 | { | 
|---|
| 731 | struct dmar_drhd_unit *dmaru; | 
|---|
| 732 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 733 | struct acpi_dmar_device_scope *scope; | 
|---|
| 734 | struct device *tmp; | 
|---|
| 735 | int i; | 
|---|
| 736 | struct acpi_dmar_pci_path *path; | 
|---|
| 737 |  | 
|---|
| 738 | for_each_drhd_unit(dmaru) { | 
|---|
| 739 | drhd = container_of(dmaru->hdr, | 
|---|
| 740 | struct acpi_dmar_hardware_unit, | 
|---|
| 741 | header); | 
|---|
| 742 |  | 
|---|
| 743 | for (scope = (void *)(drhd + 1); | 
|---|
| 744 | (unsigned long)scope < ((unsigned long)drhd) + drhd->header.length; | 
|---|
| 745 | scope = ((void *)scope) + scope->length) { | 
|---|
| 746 | if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_NAMESPACE) | 
|---|
| 747 | continue; | 
|---|
| 748 | if (scope->enumeration_id != device_number) | 
|---|
| 749 | continue; | 
|---|
| 750 |  | 
|---|
| 751 | path = (void *)(scope + 1); | 
|---|
| 752 | pr_info( "ACPI device \"%s\" under DMAR at %llx as %02x:%02x.%d\n", | 
|---|
| 753 | dev_name(&adev->dev), dmaru->reg_base_addr, | 
|---|
| 754 | scope->bus, path->device, path->function); | 
|---|
| 755 | for_each_dev_scope(dmaru->devices, dmaru->devices_cnt, i, tmp) | 
|---|
| 756 | if (tmp == NULL) { | 
|---|
| 757 | dmaru->devices[i].bus = scope->bus; | 
|---|
| 758 | dmaru->devices[i].devfn = PCI_DEVFN(path->device, | 
|---|
| 759 | path->function); | 
|---|
| 760 | rcu_assign_pointer(dmaru->devices[i].dev, | 
|---|
| 761 | get_device(&adev->dev)); | 
|---|
| 762 | return; | 
|---|
| 763 | } | 
|---|
| 764 | BUG_ON(i >= dmaru->devices_cnt); | 
|---|
| 765 | } | 
|---|
| 766 | } | 
|---|
| 767 | pr_warn( "No IOMMU scope found for ANDD enumeration ID %d (%s)\n", | 
|---|
| 768 | device_number, dev_name(&adev->dev)); | 
|---|
| 769 | } | 
|---|
| 770 |  | 
|---|
| 771 | static int __init dmar_acpi_dev_scope_init(void) | 
|---|
| 772 | { | 
|---|
| 773 | struct acpi_dmar_andd *andd; | 
|---|
| 774 |  | 
|---|
| 775 | if (dmar_tbl == NULL) | 
|---|
| 776 | return -ENODEV; | 
|---|
| 777 |  | 
|---|
| 778 | for (andd = (void *)dmar_tbl + sizeof(struct acpi_table_dmar); | 
|---|
| 779 | ((unsigned long)andd) < ((unsigned long)dmar_tbl) + dmar_tbl->length; | 
|---|
| 780 | andd = ((void *)andd) + andd->header.length) { | 
|---|
| 781 | if (andd->header.type == ACPI_DMAR_TYPE_NAMESPACE) { | 
|---|
| 782 | acpi_handle h; | 
|---|
| 783 | struct acpi_device *adev; | 
|---|
| 784 |  | 
|---|
| 785 | if (!ACPI_SUCCESS(acpi_get_handle(ACPI_ROOT_OBJECT, | 
|---|
| 786 | andd->device_name, | 
|---|
| 787 | &h))) { | 
|---|
| 788 | pr_err( "Failed to find handle for ACPI object %s\n", | 
|---|
| 789 | andd->device_name); | 
|---|
| 790 | continue; | 
|---|
| 791 | } | 
|---|
| 792 | adev = acpi_fetch_acpi_dev(handle: h); | 
|---|
| 793 | if (!adev) { | 
|---|
| 794 | pr_err( "Failed to get device for ACPI object %s\n", | 
|---|
| 795 | andd->device_name); | 
|---|
| 796 | continue; | 
|---|
| 797 | } | 
|---|
| 798 | dmar_acpi_insert_dev_scope(device_number: andd->device_number, adev); | 
|---|
| 799 | } | 
|---|
| 800 | } | 
|---|
| 801 | return 0; | 
|---|
| 802 | } | 
|---|
| 803 |  | 
|---|
| 804 | int __init dmar_dev_scope_init(void) | 
|---|
| 805 | { | 
|---|
| 806 | struct pci_dev *dev = NULL; | 
|---|
| 807 | struct dmar_pci_notify_info *info; | 
|---|
| 808 |  | 
|---|
| 809 | if (dmar_dev_scope_status != 1) | 
|---|
| 810 | return dmar_dev_scope_status; | 
|---|
| 811 |  | 
|---|
| 812 | if (list_empty(head: &dmar_drhd_units)) { | 
|---|
| 813 | dmar_dev_scope_status = -ENODEV; | 
|---|
| 814 | } else { | 
|---|
| 815 | dmar_dev_scope_status = 0; | 
|---|
| 816 |  | 
|---|
| 817 | dmar_acpi_dev_scope_init(); | 
|---|
| 818 |  | 
|---|
| 819 | for_each_pci_dev(dev) { | 
|---|
| 820 | if (dev->is_virtfn) | 
|---|
| 821 | continue; | 
|---|
| 822 |  | 
|---|
| 823 | info = dmar_alloc_pci_notify_info(dev, | 
|---|
| 824 | event: BUS_NOTIFY_ADD_DEVICE); | 
|---|
| 825 | if (!info) { | 
|---|
| 826 | pci_dev_put(dev); | 
|---|
| 827 | return dmar_dev_scope_status; | 
|---|
| 828 | } else { | 
|---|
| 829 | dmar_pci_bus_add_dev(info); | 
|---|
| 830 | dmar_free_pci_notify_info(info); | 
|---|
| 831 | } | 
|---|
| 832 | } | 
|---|
| 833 | } | 
|---|
| 834 |  | 
|---|
| 835 | return dmar_dev_scope_status; | 
|---|
| 836 | } | 
|---|
| 837 |  | 
|---|
| 838 | void __init dmar_register_bus_notifier(void) | 
|---|
| 839 | { | 
|---|
| 840 | bus_register_notifier(bus: &pci_bus_type, nb: &dmar_pci_bus_nb); | 
|---|
| 841 | } | 
|---|
| 842 |  | 
|---|
| 843 |  | 
|---|
| 844 | int __init dmar_table_init(void) | 
|---|
| 845 | { | 
|---|
| 846 | static int dmar_table_initialized; | 
|---|
| 847 | int ret; | 
|---|
| 848 |  | 
|---|
| 849 | if (dmar_table_initialized == 0) { | 
|---|
| 850 | ret = parse_dmar_table(); | 
|---|
| 851 | if (ret < 0) { | 
|---|
| 852 | if (ret != -ENODEV) | 
|---|
| 853 | pr_info( "Parse DMAR table failure.\n"); | 
|---|
| 854 | } else  if (list_empty(head: &dmar_drhd_units)) { | 
|---|
| 855 | pr_info( "No DMAR devices found\n"); | 
|---|
| 856 | ret = -ENODEV; | 
|---|
| 857 | } | 
|---|
| 858 |  | 
|---|
| 859 | if (ret < 0) | 
|---|
| 860 | dmar_table_initialized = ret; | 
|---|
| 861 | else | 
|---|
| 862 | dmar_table_initialized = 1; | 
|---|
| 863 | } | 
|---|
| 864 |  | 
|---|
| 865 | return dmar_table_initialized < 0 ? dmar_table_initialized : 0; | 
|---|
| 866 | } | 
|---|
| 867 |  | 
|---|
| 868 | static void warn_invalid_dmar(u64 addr, const char *message) | 
|---|
| 869 | { | 
|---|
| 870 | pr_warn_once(FW_BUG | 
|---|
| 871 | "Your BIOS is broken; DMAR reported at address %llx%s!\n" | 
|---|
| 872 | "BIOS vendor: %s; Ver: %s; Product Version: %s\n", | 
|---|
| 873 | addr, message, | 
|---|
| 874 | dmi_get_system_info(DMI_BIOS_VENDOR), | 
|---|
| 875 | dmi_get_system_info(DMI_BIOS_VERSION), | 
|---|
| 876 | dmi_get_system_info(DMI_PRODUCT_VERSION)); | 
|---|
| 877 | add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); | 
|---|
| 878 | } | 
|---|
| 879 |  | 
|---|
| 880 | static int __ref | 
|---|
| 881 | dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg) | 
|---|
| 882 | { | 
|---|
| 883 | struct acpi_dmar_hardware_unit *drhd; | 
|---|
| 884 | void __iomem *addr; | 
|---|
| 885 | u64 cap, ecap; | 
|---|
| 886 |  | 
|---|
| 887 | drhd = (void *)entry; | 
|---|
| 888 | if (!drhd->address) { | 
|---|
| 889 | warn_invalid_dmar(addr: 0, message: ""); | 
|---|
| 890 | return -EINVAL; | 
|---|
| 891 | } | 
|---|
| 892 |  | 
|---|
| 893 | if (arg) | 
|---|
| 894 | addr = ioremap(offset: drhd->address, VTD_PAGE_SIZE); | 
|---|
| 895 | else | 
|---|
| 896 | addr = early_ioremap(phys_addr: drhd->address, VTD_PAGE_SIZE); | 
|---|
| 897 | if (!addr) { | 
|---|
| 898 | pr_warn( "Can't validate DRHD address: %llx\n", drhd->address); | 
|---|
| 899 | return -EINVAL; | 
|---|
| 900 | } | 
|---|
| 901 |  | 
|---|
| 902 | cap = dmar_readq(addr + DMAR_CAP_REG); | 
|---|
| 903 | ecap = dmar_readq(addr + DMAR_ECAP_REG); | 
|---|
| 904 |  | 
|---|
| 905 | if (arg) | 
|---|
| 906 | iounmap(addr); | 
|---|
| 907 | else | 
|---|
| 908 | early_iounmap(addr, VTD_PAGE_SIZE); | 
|---|
| 909 |  | 
|---|
| 910 | if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) { | 
|---|
| 911 | warn_invalid_dmar(addr: drhd->address, message: " returns all ones"); | 
|---|
| 912 | return -EINVAL; | 
|---|
| 913 | } | 
|---|
| 914 |  | 
|---|
| 915 | return 0; | 
|---|
| 916 | } | 
|---|
| 917 |  | 
|---|
| 918 | void __init detect_intel_iommu(void) | 
|---|
| 919 | { | 
|---|
| 920 | int ret; | 
|---|
| 921 | struct dmar_res_callback validate_drhd_cb = { | 
|---|
| 922 | .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd, | 
|---|
| 923 | .ignore_unhandled = true, | 
|---|
| 924 | }; | 
|---|
| 925 |  | 
|---|
| 926 | down_write(sem: &dmar_global_lock); | 
|---|
| 927 | ret = dmar_table_detect(); | 
|---|
| 928 | if (!ret) | 
|---|
| 929 | ret = dmar_walk_dmar_table(dmar: (struct acpi_table_dmar *)dmar_tbl, | 
|---|
| 930 | cb: &validate_drhd_cb); | 
|---|
| 931 | if (!ret && !no_iommu && !iommu_detected && | 
|---|
| 932 | (!dmar_disabled || dmar_platform_optin())) { | 
|---|
| 933 | iommu_detected = 1; | 
|---|
| 934 | /* Make sure ACS will be enabled */ | 
|---|
| 935 | pci_request_acs(); | 
|---|
| 936 | } | 
|---|
| 937 |  | 
|---|
| 938 | if (!ret) { | 
|---|
| 939 | x86_init.iommu.iommu_init = intel_iommu_init; | 
|---|
| 940 | x86_platform.iommu_shutdown = intel_iommu_shutdown; | 
|---|
| 941 | } | 
|---|
| 942 |  | 
|---|
| 943 | if (dmar_tbl) { | 
|---|
| 944 | acpi_put_table(table: dmar_tbl); | 
|---|
| 945 | dmar_tbl = NULL; | 
|---|
| 946 | } | 
|---|
| 947 | up_write(sem: &dmar_global_lock); | 
|---|
| 948 | } | 
|---|
| 949 |  | 
|---|
| 950 | static void unmap_iommu(struct intel_iommu *iommu) | 
|---|
| 951 | { | 
|---|
| 952 | iounmap(addr: iommu->reg); | 
|---|
| 953 | release_mem_region(iommu->reg_phys, iommu->reg_size); | 
|---|
| 954 | } | 
|---|
| 955 |  | 
|---|
| 956 | /** | 
|---|
| 957 | * map_iommu: map the iommu's registers | 
|---|
| 958 | * @iommu: the iommu to map | 
|---|
| 959 | * @drhd: DMA remapping hardware definition structure | 
|---|
| 960 | * | 
|---|
| 961 | * Memory map the iommu's registers.  Start w/ a single page, and | 
|---|
| 962 | * possibly expand if that turns out to be insufficent. | 
|---|
| 963 | */ | 
|---|
| 964 | static int map_iommu(struct intel_iommu *iommu, struct dmar_drhd_unit *drhd) | 
|---|
| 965 | { | 
|---|
| 966 | u64 phys_addr = drhd->reg_base_addr; | 
|---|
| 967 | int map_size, err=0; | 
|---|
| 968 |  | 
|---|
| 969 | iommu->reg_phys = phys_addr; | 
|---|
| 970 | iommu->reg_size = drhd->reg_size; | 
|---|
| 971 |  | 
|---|
| 972 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) { | 
|---|
| 973 | pr_err( "Can't reserve memory\n"); | 
|---|
| 974 | err = -EBUSY; | 
|---|
| 975 | goto out; | 
|---|
| 976 | } | 
|---|
| 977 |  | 
|---|
| 978 | iommu->reg = ioremap(offset: iommu->reg_phys, size: iommu->reg_size); | 
|---|
| 979 | if (!iommu->reg) { | 
|---|
| 980 | pr_err( "Can't map the region\n"); | 
|---|
| 981 | err = -ENOMEM; | 
|---|
| 982 | goto release; | 
|---|
| 983 | } | 
|---|
| 984 |  | 
|---|
| 985 | iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); | 
|---|
| 986 | iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); | 
|---|
| 987 |  | 
|---|
| 988 | if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) { | 
|---|
| 989 | err = -EINVAL; | 
|---|
| 990 | warn_invalid_dmar(addr: phys_addr, message: " returns all ones"); | 
|---|
| 991 | goto unmap; | 
|---|
| 992 | } | 
|---|
| 993 |  | 
|---|
| 994 | /* the registers might be more than one page */ | 
|---|
| 995 | map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap), | 
|---|
| 996 | cap_max_fault_reg_offset(iommu->cap)); | 
|---|
| 997 | map_size = VTD_PAGE_ALIGN(map_size); | 
|---|
| 998 | if (map_size > iommu->reg_size) { | 
|---|
| 999 | iounmap(addr: iommu->reg); | 
|---|
| 1000 | release_mem_region(iommu->reg_phys, iommu->reg_size); | 
|---|
| 1001 | iommu->reg_size = map_size; | 
|---|
| 1002 | if (!request_mem_region(iommu->reg_phys, iommu->reg_size, | 
|---|
| 1003 | iommu->name)) { | 
|---|
| 1004 | pr_err( "Can't reserve memory\n"); | 
|---|
| 1005 | err = -EBUSY; | 
|---|
| 1006 | goto out; | 
|---|
| 1007 | } | 
|---|
| 1008 | iommu->reg = ioremap(offset: iommu->reg_phys, size: iommu->reg_size); | 
|---|
| 1009 | if (!iommu->reg) { | 
|---|
| 1010 | pr_err( "Can't map the region\n"); | 
|---|
| 1011 | err = -ENOMEM; | 
|---|
| 1012 | goto release; | 
|---|
| 1013 | } | 
|---|
| 1014 | } | 
|---|
| 1015 |  | 
|---|
| 1016 | if (cap_ecmds(iommu->cap)) { | 
|---|
| 1017 | int i; | 
|---|
| 1018 |  | 
|---|
| 1019 | for (i = 0; i < DMA_MAX_NUM_ECMDCAP; i++) { | 
|---|
| 1020 | iommu->ecmdcap[i] = dmar_readq(iommu->reg + DMAR_ECCAP_REG + | 
|---|
| 1021 | i * DMA_ECMD_REG_STEP); | 
|---|
| 1022 | } | 
|---|
| 1023 | } | 
|---|
| 1024 |  | 
|---|
| 1025 | err = 0; | 
|---|
| 1026 | goto out; | 
|---|
| 1027 |  | 
|---|
| 1028 | unmap: | 
|---|
| 1029 | iounmap(addr: iommu->reg); | 
|---|
| 1030 | release: | 
|---|
| 1031 | release_mem_region(iommu->reg_phys, iommu->reg_size); | 
|---|
| 1032 | out: | 
|---|
| 1033 | return err; | 
|---|
| 1034 | } | 
|---|
| 1035 |  | 
|---|
| 1036 | static int alloc_iommu(struct dmar_drhd_unit *drhd) | 
|---|
| 1037 | { | 
|---|
| 1038 | struct intel_iommu *iommu; | 
|---|
| 1039 | u32 ver, sts; | 
|---|
| 1040 | int agaw = -1; | 
|---|
| 1041 | int msagaw = -1; | 
|---|
| 1042 | int err; | 
|---|
| 1043 |  | 
|---|
| 1044 | if (!drhd->reg_base_addr) { | 
|---|
| 1045 | warn_invalid_dmar(addr: 0, message: ""); | 
|---|
| 1046 | return -EINVAL; | 
|---|
| 1047 | } | 
|---|
| 1048 |  | 
|---|
| 1049 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); | 
|---|
| 1050 | if (!iommu) | 
|---|
| 1051 | return -ENOMEM; | 
|---|
| 1052 |  | 
|---|
| 1053 | iommu->seq_id = ida_alloc_range(&dmar_seq_ids, min: 0, | 
|---|
| 1054 | DMAR_UNITS_SUPPORTED - 1, GFP_KERNEL); | 
|---|
| 1055 | if (iommu->seq_id < 0) { | 
|---|
| 1056 | pr_err( "Failed to allocate seq_id\n"); | 
|---|
| 1057 | err = iommu->seq_id; | 
|---|
| 1058 | goto error; | 
|---|
| 1059 | } | 
|---|
| 1060 | snprintf(buf: iommu->name, size: sizeof(iommu->name), fmt: "dmar%d", iommu->seq_id); | 
|---|
| 1061 |  | 
|---|
| 1062 | err = map_iommu(iommu, drhd); | 
|---|
| 1063 | if (err) { | 
|---|
| 1064 | pr_err( "Failed to map %s\n", iommu->name); | 
|---|
| 1065 | goto error_free_seq_id; | 
|---|
| 1066 | } | 
|---|
| 1067 |  | 
|---|
| 1068 | if (!cap_sagaw(iommu->cap) && | 
|---|
| 1069 | (!ecap_smts(iommu->ecap) || ecap_slts(iommu->ecap))) { | 
|---|
| 1070 | pr_info( "%s: No supported address widths. Not attempting DMA translation.\n", | 
|---|
| 1071 | iommu->name); | 
|---|
| 1072 | drhd->ignored = 1; | 
|---|
| 1073 | } | 
|---|
| 1074 |  | 
|---|
| 1075 | if (!drhd->ignored) { | 
|---|
| 1076 | agaw = iommu_calculate_agaw(iommu); | 
|---|
| 1077 | if (agaw < 0) { | 
|---|
| 1078 | pr_err( "Cannot get a valid agaw for iommu (seq_id = %d)\n", | 
|---|
| 1079 | iommu->seq_id); | 
|---|
| 1080 | drhd->ignored = 1; | 
|---|
| 1081 | } | 
|---|
| 1082 | } | 
|---|
| 1083 | if (!drhd->ignored) { | 
|---|
| 1084 | msagaw = iommu_calculate_max_sagaw(iommu); | 
|---|
| 1085 | if (msagaw < 0) { | 
|---|
| 1086 | pr_err( "Cannot get a valid max agaw for iommu (seq_id = %d)\n", | 
|---|
| 1087 | iommu->seq_id); | 
|---|
| 1088 | drhd->ignored = 1; | 
|---|
| 1089 | agaw = -1; | 
|---|
| 1090 | } | 
|---|
| 1091 | } | 
|---|
| 1092 | iommu->agaw = agaw; | 
|---|
| 1093 | iommu->msagaw = msagaw; | 
|---|
| 1094 | iommu->segment = drhd->segment; | 
|---|
| 1095 | iommu->device_rbtree = RB_ROOT; | 
|---|
| 1096 | spin_lock_init(&iommu->device_rbtree_lock); | 
|---|
| 1097 | mutex_init(&iommu->iopf_lock); | 
|---|
| 1098 | iommu->node = NUMA_NO_NODE; | 
|---|
| 1099 | spin_lock_init(&iommu->lock); | 
|---|
| 1100 | ida_init(ida: &iommu->domain_ida); | 
|---|
| 1101 | mutex_init(&iommu->did_lock); | 
|---|
| 1102 |  | 
|---|
| 1103 | ver = readl(addr: iommu->reg + DMAR_VER_REG); | 
|---|
| 1104 | pr_info( "%s: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n", | 
|---|
| 1105 | iommu->name, | 
|---|
| 1106 | (unsigned long long)drhd->reg_base_addr, | 
|---|
| 1107 | DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), | 
|---|
| 1108 | (unsigned long long)iommu->cap, | 
|---|
| 1109 | (unsigned long long)iommu->ecap); | 
|---|
| 1110 |  | 
|---|
| 1111 | /* Reflect status in gcmd */ | 
|---|
| 1112 | sts = readl(addr: iommu->reg + DMAR_GSTS_REG); | 
|---|
| 1113 | if (sts & DMA_GSTS_IRES) | 
|---|
| 1114 | iommu->gcmd |= DMA_GCMD_IRE; | 
|---|
| 1115 | if (sts & DMA_GSTS_TES) | 
|---|
| 1116 | iommu->gcmd |= DMA_GCMD_TE; | 
|---|
| 1117 | if (sts & DMA_GSTS_QIES) | 
|---|
| 1118 | iommu->gcmd |= DMA_GCMD_QIE; | 
|---|
| 1119 |  | 
|---|
| 1120 | if (alloc_iommu_pmu(iommu)) | 
|---|
| 1121 | pr_debug( "Cannot alloc PMU for iommu (seq_id = %d)\n", iommu->seq_id); | 
|---|
| 1122 |  | 
|---|
| 1123 | raw_spin_lock_init(&iommu->register_lock); | 
|---|
| 1124 |  | 
|---|
| 1125 | /* | 
|---|
| 1126 | * A value of N in PSS field of eCap register indicates hardware | 
|---|
| 1127 | * supports PASID field of N+1 bits. | 
|---|
| 1128 | */ | 
|---|
| 1129 | if (pasid_supported(iommu)) | 
|---|
| 1130 | iommu->iommu.max_pasids = 2UL << ecap_pss(iommu->ecap); | 
|---|
| 1131 |  | 
|---|
| 1132 | /* | 
|---|
| 1133 | * This is only for hotplug; at boot time intel_iommu_enabled won't | 
|---|
| 1134 | * be set yet. When intel_iommu_init() runs, it registers the units | 
|---|
| 1135 | * present at boot time, then sets intel_iommu_enabled. | 
|---|
| 1136 | */ | 
|---|
| 1137 | if (intel_iommu_enabled && !drhd->ignored) { | 
|---|
| 1138 | err = iommu_device_sysfs_add(iommu: &iommu->iommu, NULL, | 
|---|
| 1139 | groups: intel_iommu_groups, | 
|---|
| 1140 | fmt: "%s", iommu->name); | 
|---|
| 1141 | if (err) | 
|---|
| 1142 | goto err_unmap; | 
|---|
| 1143 |  | 
|---|
| 1144 | err = iommu_device_register(iommu: &iommu->iommu, ops: &intel_iommu_ops, NULL); | 
|---|
| 1145 | if (err) | 
|---|
| 1146 | goto err_sysfs; | 
|---|
| 1147 |  | 
|---|
| 1148 | iommu_pmu_register(iommu); | 
|---|
| 1149 | } | 
|---|
| 1150 |  | 
|---|
| 1151 | drhd->iommu = iommu; | 
|---|
| 1152 | iommu->drhd = drhd; | 
|---|
| 1153 |  | 
|---|
| 1154 | return 0; | 
|---|
| 1155 |  | 
|---|
| 1156 | err_sysfs: | 
|---|
| 1157 | iommu_device_sysfs_remove(iommu: &iommu->iommu); | 
|---|
| 1158 | err_unmap: | 
|---|
| 1159 | free_iommu_pmu(iommu); | 
|---|
| 1160 | unmap_iommu(iommu); | 
|---|
| 1161 | error_free_seq_id: | 
|---|
| 1162 | ida_free(&dmar_seq_ids, id: iommu->seq_id); | 
|---|
| 1163 | error: | 
|---|
| 1164 | kfree(objp: iommu); | 
|---|
| 1165 | return err; | 
|---|
| 1166 | } | 
|---|
| 1167 |  | 
|---|
| 1168 | static void free_iommu(struct intel_iommu *iommu) | 
|---|
| 1169 | { | 
|---|
| 1170 | if (intel_iommu_enabled && !iommu->drhd->ignored) { | 
|---|
| 1171 | iommu_pmu_unregister(iommu); | 
|---|
| 1172 | iommu_device_unregister(iommu: &iommu->iommu); | 
|---|
| 1173 | iommu_device_sysfs_remove(iommu: &iommu->iommu); | 
|---|
| 1174 | } | 
|---|
| 1175 |  | 
|---|
| 1176 | free_iommu_pmu(iommu); | 
|---|
| 1177 |  | 
|---|
| 1178 | if (iommu->irq) { | 
|---|
| 1179 | if (iommu->pr_irq) { | 
|---|
| 1180 | free_irq(iommu->pr_irq, iommu); | 
|---|
| 1181 | dmar_free_hwirq(irq: iommu->pr_irq); | 
|---|
| 1182 | iommu->pr_irq = 0; | 
|---|
| 1183 | } | 
|---|
| 1184 | free_irq(iommu->irq, iommu); | 
|---|
| 1185 | dmar_free_hwirq(irq: iommu->irq); | 
|---|
| 1186 | iommu->irq = 0; | 
|---|
| 1187 | } | 
|---|
| 1188 |  | 
|---|
| 1189 | if (iommu->qi) { | 
|---|
| 1190 | iommu_free_pages(virt: iommu->qi->desc); | 
|---|
| 1191 | kfree(objp: iommu->qi->desc_status); | 
|---|
| 1192 | kfree(objp: iommu->qi); | 
|---|
| 1193 | } | 
|---|
| 1194 |  | 
|---|
| 1195 | if (iommu->reg) | 
|---|
| 1196 | unmap_iommu(iommu); | 
|---|
| 1197 |  | 
|---|
| 1198 | ida_destroy(ida: &iommu->domain_ida); | 
|---|
| 1199 | ida_free(&dmar_seq_ids, id: iommu->seq_id); | 
|---|
| 1200 | kfree(objp: iommu); | 
|---|
| 1201 | } | 
|---|
| 1202 |  | 
|---|
| 1203 | /* | 
|---|
| 1204 | * Reclaim all the submitted descriptors which have completed its work. | 
|---|
| 1205 | */ | 
|---|
| 1206 | static inline void reclaim_free_desc(struct q_inval *qi) | 
|---|
| 1207 | { | 
|---|
| 1208 | while (qi->desc_status[qi->free_tail] == QI_FREE && qi->free_tail != qi->free_head) { | 
|---|
| 1209 | qi->free_tail = (qi->free_tail + 1) % QI_LENGTH; | 
|---|
| 1210 | qi->free_cnt++; | 
|---|
| 1211 | } | 
|---|
| 1212 | } | 
|---|
| 1213 |  | 
|---|
| 1214 | static const char *qi_type_string(u8 type) | 
|---|
| 1215 | { | 
|---|
| 1216 | switch (type) { | 
|---|
| 1217 | case QI_CC_TYPE: | 
|---|
| 1218 | return "Context-cache Invalidation"; | 
|---|
| 1219 | case QI_IOTLB_TYPE: | 
|---|
| 1220 | return "IOTLB Invalidation"; | 
|---|
| 1221 | case QI_DIOTLB_TYPE: | 
|---|
| 1222 | return "Device-TLB Invalidation"; | 
|---|
| 1223 | case QI_IEC_TYPE: | 
|---|
| 1224 | return "Interrupt Entry Cache Invalidation"; | 
|---|
| 1225 | case QI_IWD_TYPE: | 
|---|
| 1226 | return "Invalidation Wait"; | 
|---|
| 1227 | case QI_EIOTLB_TYPE: | 
|---|
| 1228 | return "PASID-based IOTLB Invalidation"; | 
|---|
| 1229 | case QI_PC_TYPE: | 
|---|
| 1230 | return "PASID-cache Invalidation"; | 
|---|
| 1231 | case QI_DEIOTLB_TYPE: | 
|---|
| 1232 | return "PASID-based Device-TLB Invalidation"; | 
|---|
| 1233 | case QI_PGRP_RESP_TYPE: | 
|---|
| 1234 | return "Page Group Response"; | 
|---|
| 1235 | default: | 
|---|
| 1236 | return "UNKNOWN"; | 
|---|
| 1237 | } | 
|---|
| 1238 | } | 
|---|
| 1239 |  | 
|---|
| 1240 | static void qi_dump_fault(struct intel_iommu *iommu, u32 fault) | 
|---|
| 1241 | { | 
|---|
| 1242 | unsigned int head = dmar_readl(iommu->reg + DMAR_IQH_REG); | 
|---|
| 1243 | u64 iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); | 
|---|
| 1244 | struct qi_desc *desc = iommu->qi->desc + head; | 
|---|
| 1245 |  | 
|---|
| 1246 | if (fault & DMA_FSTS_IQE) | 
|---|
| 1247 | pr_err( "VT-d detected Invalidation Queue Error: Reason %llx", | 
|---|
| 1248 | DMAR_IQER_REG_IQEI(iqe_err)); | 
|---|
| 1249 | if (fault & DMA_FSTS_ITE) | 
|---|
| 1250 | pr_err( "VT-d detected Invalidation Time-out Error: SID %llx", | 
|---|
| 1251 | DMAR_IQER_REG_ITESID(iqe_err)); | 
|---|
| 1252 | if (fault & DMA_FSTS_ICE) | 
|---|
| 1253 | pr_err( "VT-d detected Invalidation Completion Error: SID %llx", | 
|---|
| 1254 | DMAR_IQER_REG_ICESID(iqe_err)); | 
|---|
| 1255 |  | 
|---|
| 1256 | pr_err( "QI HEAD: %s qw0 = 0x%llx, qw1 = 0x%llx\n", | 
|---|
| 1257 | qi_type_string(desc->qw0 & 0xf), | 
|---|
| 1258 | (unsigned long long)desc->qw0, | 
|---|
| 1259 | (unsigned long long)desc->qw1); | 
|---|
| 1260 |  | 
|---|
| 1261 | head = ((head >> qi_shift(iommu)) + QI_LENGTH - 1) % QI_LENGTH; | 
|---|
| 1262 | head <<= qi_shift(iommu); | 
|---|
| 1263 | desc = iommu->qi->desc + head; | 
|---|
| 1264 |  | 
|---|
| 1265 | pr_err( "QI PRIOR: %s qw0 = 0x%llx, qw1 = 0x%llx\n", | 
|---|
| 1266 | qi_type_string(desc->qw0 & 0xf), | 
|---|
| 1267 | (unsigned long long)desc->qw0, | 
|---|
| 1268 | (unsigned long long)desc->qw1); | 
|---|
| 1269 | } | 
|---|
| 1270 |  | 
|---|
| 1271 | static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index) | 
|---|
| 1272 | { | 
|---|
| 1273 | u32 fault; | 
|---|
| 1274 | int head, tail; | 
|---|
| 1275 | struct device *dev; | 
|---|
| 1276 | u64 iqe_err, ite_sid; | 
|---|
| 1277 | struct q_inval *qi = iommu->qi; | 
|---|
| 1278 | int shift = qi_shift(iommu); | 
|---|
| 1279 |  | 
|---|
| 1280 | if (qi->desc_status[wait_index] == QI_ABORT) | 
|---|
| 1281 | return -EAGAIN; | 
|---|
| 1282 |  | 
|---|
| 1283 | fault = readl(addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 1284 | if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE)) | 
|---|
| 1285 | qi_dump_fault(iommu, fault); | 
|---|
| 1286 |  | 
|---|
| 1287 | /* | 
|---|
| 1288 | * If IQE happens, the head points to the descriptor associated | 
|---|
| 1289 | * with the error. No new descriptors are fetched until the IQE | 
|---|
| 1290 | * is cleared. | 
|---|
| 1291 | */ | 
|---|
| 1292 | if (fault & DMA_FSTS_IQE) { | 
|---|
| 1293 | head = readl(addr: iommu->reg + DMAR_IQH_REG); | 
|---|
| 1294 | if ((head >> shift) == index) { | 
|---|
| 1295 | struct qi_desc *desc = qi->desc + head; | 
|---|
| 1296 |  | 
|---|
| 1297 | /* | 
|---|
| 1298 | * desc->qw2 and desc->qw3 are either reserved or | 
|---|
| 1299 | * used by software as private data. We won't print | 
|---|
| 1300 | * out these two qw's for security consideration. | 
|---|
| 1301 | */ | 
|---|
| 1302 | memcpy(to: desc, from: qi->desc + (wait_index << shift), | 
|---|
| 1303 | len: 1 << shift); | 
|---|
| 1304 | writel(DMA_FSTS_IQE, addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 1305 | pr_info( "Invalidation Queue Error (IQE) cleared\n"); | 
|---|
| 1306 | return -EINVAL; | 
|---|
| 1307 | } | 
|---|
| 1308 | } | 
|---|
| 1309 |  | 
|---|
| 1310 | /* | 
|---|
| 1311 | * If ITE happens, all pending wait_desc commands are aborted. | 
|---|
| 1312 | * No new descriptors are fetched until the ITE is cleared. | 
|---|
| 1313 | */ | 
|---|
| 1314 | if (fault & DMA_FSTS_ITE) { | 
|---|
| 1315 | head = readl(addr: iommu->reg + DMAR_IQH_REG); | 
|---|
| 1316 | head = ((head >> shift) - 1 + QI_LENGTH) % QI_LENGTH; | 
|---|
| 1317 | head |= 1; | 
|---|
| 1318 | tail = readl(addr: iommu->reg + DMAR_IQT_REG); | 
|---|
| 1319 | tail = ((tail >> shift) - 1 + QI_LENGTH) % QI_LENGTH; | 
|---|
| 1320 |  | 
|---|
| 1321 | /* | 
|---|
| 1322 | * SID field is valid only when the ITE field is Set in FSTS_REG | 
|---|
| 1323 | * see Intel VT-d spec r4.1, section 11.4.9.9 | 
|---|
| 1324 | */ | 
|---|
| 1325 | iqe_err = dmar_readq(iommu->reg + DMAR_IQER_REG); | 
|---|
| 1326 | ite_sid = DMAR_IQER_REG_ITESID(iqe_err); | 
|---|
| 1327 |  | 
|---|
| 1328 | writel(DMA_FSTS_ITE, addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 1329 | pr_info( "Invalidation Time-out Error (ITE) cleared\n"); | 
|---|
| 1330 |  | 
|---|
| 1331 | do { | 
|---|
| 1332 | if (qi->desc_status[head] == QI_IN_USE) | 
|---|
| 1333 | qi->desc_status[head] = QI_ABORT; | 
|---|
| 1334 | head = (head - 2 + QI_LENGTH) % QI_LENGTH; | 
|---|
| 1335 | } while (head != tail); | 
|---|
| 1336 |  | 
|---|
| 1337 | /* | 
|---|
| 1338 | * If device was released or isn't present, no need to retry | 
|---|
| 1339 | * the ATS invalidate request anymore. | 
|---|
| 1340 | * | 
|---|
| 1341 | * 0 value of ite_sid means old VT-d device, no ite_sid value. | 
|---|
| 1342 | * see Intel VT-d spec r4.1, section 11.4.9.9 | 
|---|
| 1343 | */ | 
|---|
| 1344 | if (ite_sid) { | 
|---|
| 1345 | dev = device_rbtree_find(iommu, rid: ite_sid); | 
|---|
| 1346 | if (!dev || !dev_is_pci(dev) || | 
|---|
| 1347 | !pci_device_is_present(to_pci_dev(dev))) | 
|---|
| 1348 | return -ETIMEDOUT; | 
|---|
| 1349 | } | 
|---|
| 1350 | if (qi->desc_status[wait_index] == QI_ABORT) | 
|---|
| 1351 | return -EAGAIN; | 
|---|
| 1352 | } | 
|---|
| 1353 |  | 
|---|
| 1354 | if (fault & DMA_FSTS_ICE) { | 
|---|
| 1355 | writel(DMA_FSTS_ICE, addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 1356 | pr_info( "Invalidation Completion Error (ICE) cleared\n"); | 
|---|
| 1357 | } | 
|---|
| 1358 |  | 
|---|
| 1359 | return 0; | 
|---|
| 1360 | } | 
|---|
| 1361 |  | 
|---|
| 1362 | /* | 
|---|
| 1363 | * Function to submit invalidation descriptors of all types to the queued | 
|---|
| 1364 | * invalidation interface(QI). Multiple descriptors can be submitted at a | 
|---|
| 1365 | * time, a wait descriptor will be appended to each submission to ensure | 
|---|
| 1366 | * hardware has completed the invalidation before return. Wait descriptors | 
|---|
| 1367 | * can be part of the submission but it will not be polled for completion. | 
|---|
| 1368 | */ | 
|---|
| 1369 | int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc, | 
|---|
| 1370 | unsigned int count, unsigned long options) | 
|---|
| 1371 | { | 
|---|
| 1372 | struct q_inval *qi = iommu->qi; | 
|---|
| 1373 | s64 devtlb_start_ktime = 0; | 
|---|
| 1374 | s64 iotlb_start_ktime = 0; | 
|---|
| 1375 | s64 iec_start_ktime = 0; | 
|---|
| 1376 | struct qi_desc wait_desc; | 
|---|
| 1377 | int wait_index, index; | 
|---|
| 1378 | unsigned long flags; | 
|---|
| 1379 | int offset, shift; | 
|---|
| 1380 | int rc, i; | 
|---|
| 1381 | u64 type; | 
|---|
| 1382 |  | 
|---|
| 1383 | if (!qi) | 
|---|
| 1384 | return 0; | 
|---|
| 1385 |  | 
|---|
| 1386 | type = desc->qw0 & GENMASK_ULL(3, 0); | 
|---|
| 1387 |  | 
|---|
| 1388 | if ((type == QI_IOTLB_TYPE || type == QI_EIOTLB_TYPE) && | 
|---|
| 1389 | dmar_latency_enabled(iommu, type: DMAR_LATENCY_INV_IOTLB)) | 
|---|
| 1390 | iotlb_start_ktime = ktime_to_ns(kt: ktime_get()); | 
|---|
| 1391 |  | 
|---|
| 1392 | if ((type == QI_DIOTLB_TYPE || type == QI_DEIOTLB_TYPE) && | 
|---|
| 1393 | dmar_latency_enabled(iommu, type: DMAR_LATENCY_INV_DEVTLB)) | 
|---|
| 1394 | devtlb_start_ktime = ktime_to_ns(kt: ktime_get()); | 
|---|
| 1395 |  | 
|---|
| 1396 | if (type == QI_IEC_TYPE && | 
|---|
| 1397 | dmar_latency_enabled(iommu, type: DMAR_LATENCY_INV_IEC)) | 
|---|
| 1398 | iec_start_ktime = ktime_to_ns(kt: ktime_get()); | 
|---|
| 1399 |  | 
|---|
| 1400 | restart: | 
|---|
| 1401 | rc = 0; | 
|---|
| 1402 |  | 
|---|
| 1403 | raw_spin_lock_irqsave(&qi->q_lock, flags); | 
|---|
| 1404 | /* | 
|---|
| 1405 | * Check if we have enough empty slots in the queue to submit, | 
|---|
| 1406 | * the calculation is based on: | 
|---|
| 1407 | * # of desc + 1 wait desc + 1 space between head and tail | 
|---|
| 1408 | */ | 
|---|
| 1409 | while (qi->free_cnt < count + 2) { | 
|---|
| 1410 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); | 
|---|
| 1411 | cpu_relax(); | 
|---|
| 1412 | raw_spin_lock_irqsave(&qi->q_lock, flags); | 
|---|
| 1413 | } | 
|---|
| 1414 |  | 
|---|
| 1415 | index = qi->free_head; | 
|---|
| 1416 | wait_index = (index + count) % QI_LENGTH; | 
|---|
| 1417 | shift = qi_shift(iommu); | 
|---|
| 1418 |  | 
|---|
| 1419 | for (i = 0; i < count; i++) { | 
|---|
| 1420 | offset = ((index + i) % QI_LENGTH) << shift; | 
|---|
| 1421 | memcpy(to: qi->desc + offset, from: &desc[i], len: 1 << shift); | 
|---|
| 1422 | qi->desc_status[(index + i) % QI_LENGTH] = QI_IN_USE; | 
|---|
| 1423 | trace_qi_submit(iommu, qw0: desc[i].qw0, qw1: desc[i].qw1, | 
|---|
| 1424 | qw2: desc[i].qw2, qw3: desc[i].qw3); | 
|---|
| 1425 | } | 
|---|
| 1426 | qi->desc_status[wait_index] = QI_IN_USE; | 
|---|
| 1427 |  | 
|---|
| 1428 | wait_desc.qw0 = QI_IWD_STATUS_DATA(QI_DONE) | | 
|---|
| 1429 | QI_IWD_STATUS_WRITE | QI_IWD_TYPE; | 
|---|
| 1430 | if (options & QI_OPT_WAIT_DRAIN) | 
|---|
| 1431 | wait_desc.qw0 |= QI_IWD_PRQ_DRAIN; | 
|---|
| 1432 | wait_desc.qw1 = virt_to_phys(address: &qi->desc_status[wait_index]); | 
|---|
| 1433 | wait_desc.qw2 = 0; | 
|---|
| 1434 | wait_desc.qw3 = 0; | 
|---|
| 1435 |  | 
|---|
| 1436 | offset = wait_index << shift; | 
|---|
| 1437 | memcpy(to: qi->desc + offset, from: &wait_desc, len: 1 << shift); | 
|---|
| 1438 |  | 
|---|
| 1439 | qi->free_head = (qi->free_head + count + 1) % QI_LENGTH; | 
|---|
| 1440 | qi->free_cnt -= count + 1; | 
|---|
| 1441 |  | 
|---|
| 1442 | /* | 
|---|
| 1443 | * update the HW tail register indicating the presence of | 
|---|
| 1444 | * new descriptors. | 
|---|
| 1445 | */ | 
|---|
| 1446 | writel(val: qi->free_head << shift, addr: iommu->reg + DMAR_IQT_REG); | 
|---|
| 1447 |  | 
|---|
| 1448 | while (READ_ONCE(qi->desc_status[wait_index]) != QI_DONE) { | 
|---|
| 1449 | /* | 
|---|
| 1450 | * We will leave the interrupts disabled, to prevent interrupt | 
|---|
| 1451 | * context to queue another cmd while a cmd is already submitted | 
|---|
| 1452 | * and waiting for completion on this cpu. This is to avoid | 
|---|
| 1453 | * a deadlock where the interrupt context can wait indefinitely | 
|---|
| 1454 | * for free slots in the queue. | 
|---|
| 1455 | */ | 
|---|
| 1456 | rc = qi_check_fault(iommu, index, wait_index); | 
|---|
| 1457 | if (rc) | 
|---|
| 1458 | break; | 
|---|
| 1459 |  | 
|---|
| 1460 | raw_spin_unlock(&qi->q_lock); | 
|---|
| 1461 | cpu_relax(); | 
|---|
| 1462 | raw_spin_lock(&qi->q_lock); | 
|---|
| 1463 | } | 
|---|
| 1464 |  | 
|---|
| 1465 | /* | 
|---|
| 1466 | * The reclaim code can free descriptors from multiple submissions | 
|---|
| 1467 | * starting from the tail of the queue. When count == 0, the | 
|---|
| 1468 | * status of the standalone wait descriptor at the tail of the queue | 
|---|
| 1469 | * must be set to QI_FREE to allow the reclaim code to proceed. | 
|---|
| 1470 | * It is also possible that descriptors from one of the previous | 
|---|
| 1471 | * submissions has to be reclaimed by a subsequent submission. | 
|---|
| 1472 | */ | 
|---|
| 1473 | for (i = 0; i <= count; i++) | 
|---|
| 1474 | qi->desc_status[(index + i) % QI_LENGTH] = QI_FREE; | 
|---|
| 1475 |  | 
|---|
| 1476 | reclaim_free_desc(qi); | 
|---|
| 1477 | raw_spin_unlock_irqrestore(&qi->q_lock, flags); | 
|---|
| 1478 |  | 
|---|
| 1479 | if (rc == -EAGAIN) | 
|---|
| 1480 | goto restart; | 
|---|
| 1481 |  | 
|---|
| 1482 | if (iotlb_start_ktime) | 
|---|
| 1483 | dmar_latency_update(iommu, type: DMAR_LATENCY_INV_IOTLB, | 
|---|
| 1484 | latency: ktime_to_ns(kt: ktime_get()) - iotlb_start_ktime); | 
|---|
| 1485 |  | 
|---|
| 1486 | if (devtlb_start_ktime) | 
|---|
| 1487 | dmar_latency_update(iommu, type: DMAR_LATENCY_INV_DEVTLB, | 
|---|
| 1488 | latency: ktime_to_ns(kt: ktime_get()) - devtlb_start_ktime); | 
|---|
| 1489 |  | 
|---|
| 1490 | if (iec_start_ktime) | 
|---|
| 1491 | dmar_latency_update(iommu, type: DMAR_LATENCY_INV_IEC, | 
|---|
| 1492 | latency: ktime_to_ns(kt: ktime_get()) - iec_start_ktime); | 
|---|
| 1493 |  | 
|---|
| 1494 | return rc; | 
|---|
| 1495 | } | 
|---|
| 1496 |  | 
|---|
| 1497 | /* | 
|---|
| 1498 | * Flush the global interrupt entry cache. | 
|---|
| 1499 | */ | 
|---|
| 1500 | void qi_global_iec(struct intel_iommu *iommu) | 
|---|
| 1501 | { | 
|---|
| 1502 | struct qi_desc desc; | 
|---|
| 1503 |  | 
|---|
| 1504 | desc.qw0 = QI_IEC_TYPE; | 
|---|
| 1505 | desc.qw1 = 0; | 
|---|
| 1506 | desc.qw2 = 0; | 
|---|
| 1507 | desc.qw3 = 0; | 
|---|
| 1508 |  | 
|---|
| 1509 | /* should never fail */ | 
|---|
| 1510 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1511 | } | 
|---|
| 1512 |  | 
|---|
| 1513 | void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm, | 
|---|
| 1514 | u64 type) | 
|---|
| 1515 | { | 
|---|
| 1516 | struct qi_desc desc; | 
|---|
| 1517 |  | 
|---|
| 1518 | desc.qw0 = QI_CC_FM(fm) | QI_CC_SID(sid) | QI_CC_DID(did) | 
|---|
| 1519 | | QI_CC_GRAN(type) | QI_CC_TYPE; | 
|---|
| 1520 | desc.qw1 = 0; | 
|---|
| 1521 | desc.qw2 = 0; | 
|---|
| 1522 | desc.qw3 = 0; | 
|---|
| 1523 |  | 
|---|
| 1524 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1525 | } | 
|---|
| 1526 |  | 
|---|
| 1527 | void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr, | 
|---|
| 1528 | unsigned int size_order, u64 type) | 
|---|
| 1529 | { | 
|---|
| 1530 | struct qi_desc desc; | 
|---|
| 1531 |  | 
|---|
| 1532 | qi_desc_iotlb(iommu, did, addr, size_order, type, desc: &desc); | 
|---|
| 1533 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1534 | } | 
|---|
| 1535 |  | 
|---|
| 1536 | void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid, | 
|---|
| 1537 | u16 qdep, u64 addr, unsigned mask) | 
|---|
| 1538 | { | 
|---|
| 1539 | struct qi_desc desc; | 
|---|
| 1540 |  | 
|---|
| 1541 | /* | 
|---|
| 1542 | * VT-d spec, section 4.3: | 
|---|
| 1543 | * | 
|---|
| 1544 | * Software is recommended to not submit any Device-TLB invalidation | 
|---|
| 1545 | * requests while address remapping hardware is disabled. | 
|---|
| 1546 | */ | 
|---|
| 1547 | if (!(iommu->gcmd & DMA_GCMD_TE)) | 
|---|
| 1548 | return; | 
|---|
| 1549 |  | 
|---|
| 1550 | qi_desc_dev_iotlb(sid, pfsid, qdep, addr, mask, desc: &desc); | 
|---|
| 1551 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1552 | } | 
|---|
| 1553 |  | 
|---|
| 1554 | /* PASID-based IOTLB invalidation */ | 
|---|
| 1555 | void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr, | 
|---|
| 1556 | unsigned long npages, bool ih) | 
|---|
| 1557 | { | 
|---|
| 1558 | struct qi_desc desc = {.qw2 = 0, .qw3 = 0}; | 
|---|
| 1559 |  | 
|---|
| 1560 | /* | 
|---|
| 1561 | * npages == -1 means a PASID-selective invalidation, otherwise, | 
|---|
| 1562 | * a positive value for Page-selective-within-PASID invalidation. | 
|---|
| 1563 | * 0 is not a valid input. | 
|---|
| 1564 | */ | 
|---|
| 1565 | if (WARN_ON(!npages)) { | 
|---|
| 1566 | pr_err( "Invalid input npages = %ld\n", npages); | 
|---|
| 1567 | return; | 
|---|
| 1568 | } | 
|---|
| 1569 |  | 
|---|
| 1570 | qi_desc_piotlb(did, pasid, addr, npages, ih, desc: &desc); | 
|---|
| 1571 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1572 | } | 
|---|
| 1573 |  | 
|---|
| 1574 | /* PASID-based device IOTLB Invalidate */ | 
|---|
| 1575 | void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, | 
|---|
| 1576 | u32 pasid,  u16 qdep, u64 addr, unsigned int size_order) | 
|---|
| 1577 | { | 
|---|
| 1578 | struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; | 
|---|
| 1579 |  | 
|---|
| 1580 | /* | 
|---|
| 1581 | * VT-d spec, section 4.3: | 
|---|
| 1582 | * | 
|---|
| 1583 | * Software is recommended to not submit any Device-TLB invalidation | 
|---|
| 1584 | * requests while address remapping hardware is disabled. | 
|---|
| 1585 | */ | 
|---|
| 1586 | if (!(iommu->gcmd & DMA_GCMD_TE)) | 
|---|
| 1587 | return; | 
|---|
| 1588 |  | 
|---|
| 1589 | qi_desc_dev_iotlb_pasid(sid, pfsid, pasid, | 
|---|
| 1590 | qdep, addr, size_order, | 
|---|
| 1591 | desc: &desc); | 
|---|
| 1592 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1593 | } | 
|---|
| 1594 |  | 
|---|
| 1595 | void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, | 
|---|
| 1596 | u64 granu, u32 pasid) | 
|---|
| 1597 | { | 
|---|
| 1598 | struct qi_desc desc = {.qw1 = 0, .qw2 = 0, .qw3 = 0}; | 
|---|
| 1599 |  | 
|---|
| 1600 | desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) | | 
|---|
| 1601 | QI_PC_GRAN(granu) | QI_PC_TYPE; | 
|---|
| 1602 | qi_submit_sync(iommu, desc: &desc, count: 1, options: 0); | 
|---|
| 1603 | } | 
|---|
| 1604 |  | 
|---|
| 1605 | /* | 
|---|
| 1606 | * Disable Queued Invalidation interface. | 
|---|
| 1607 | */ | 
|---|
| 1608 | void dmar_disable_qi(struct intel_iommu *iommu) | 
|---|
| 1609 | { | 
|---|
| 1610 | unsigned long flags; | 
|---|
| 1611 | u32 sts; | 
|---|
| 1612 | cycles_t start_time = get_cycles(); | 
|---|
| 1613 |  | 
|---|
| 1614 | if (!ecap_qis(iommu->ecap)) | 
|---|
| 1615 | return; | 
|---|
| 1616 |  | 
|---|
| 1617 | raw_spin_lock_irqsave(&iommu->register_lock, flags); | 
|---|
| 1618 |  | 
|---|
| 1619 | sts =  readl(addr: iommu->reg + DMAR_GSTS_REG); | 
|---|
| 1620 | if (!(sts & DMA_GSTS_QIES)) | 
|---|
| 1621 | goto end; | 
|---|
| 1622 |  | 
|---|
| 1623 | /* | 
|---|
| 1624 | * Give a chance to HW to complete the pending invalidation requests. | 
|---|
| 1625 | */ | 
|---|
| 1626 | while ((readl(addr: iommu->reg + DMAR_IQT_REG) != | 
|---|
| 1627 | readl(addr: iommu->reg + DMAR_IQH_REG)) && | 
|---|
| 1628 | (DMAR_OPERATION_TIMEOUT > (get_cycles() - start_time))) | 
|---|
| 1629 | cpu_relax(); | 
|---|
| 1630 |  | 
|---|
| 1631 | iommu->gcmd &= ~DMA_GCMD_QIE; | 
|---|
| 1632 | writel(val: iommu->gcmd, addr: iommu->reg + DMAR_GCMD_REG); | 
|---|
| 1633 |  | 
|---|
| 1634 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, | 
|---|
| 1635 | !(sts & DMA_GSTS_QIES), sts); | 
|---|
| 1636 | end: | 
|---|
| 1637 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 
|---|
| 1638 | } | 
|---|
| 1639 |  | 
|---|
| 1640 | /* | 
|---|
| 1641 | * Enable queued invalidation. | 
|---|
| 1642 | */ | 
|---|
| 1643 | static void __dmar_enable_qi(struct intel_iommu *iommu) | 
|---|
| 1644 | { | 
|---|
| 1645 | u32 sts; | 
|---|
| 1646 | unsigned long flags; | 
|---|
| 1647 | struct q_inval *qi = iommu->qi; | 
|---|
| 1648 | u64 val = virt_to_phys(address: qi->desc); | 
|---|
| 1649 |  | 
|---|
| 1650 | qi->free_head = qi->free_tail = 0; | 
|---|
| 1651 | qi->free_cnt = QI_LENGTH; | 
|---|
| 1652 |  | 
|---|
| 1653 | /* | 
|---|
| 1654 | * Set DW=1 and QS=1 in IQA_REG when Scalable Mode capability | 
|---|
| 1655 | * is present. | 
|---|
| 1656 | */ | 
|---|
| 1657 | if (ecap_smts(iommu->ecap)) | 
|---|
| 1658 | val |= BIT_ULL(11) | BIT_ULL(0); | 
|---|
| 1659 |  | 
|---|
| 1660 | raw_spin_lock_irqsave(&iommu->register_lock, flags); | 
|---|
| 1661 |  | 
|---|
| 1662 | /* write zero to the tail reg */ | 
|---|
| 1663 | writel(val: 0, addr: iommu->reg + DMAR_IQT_REG); | 
|---|
| 1664 |  | 
|---|
| 1665 | dmar_writeq(iommu->reg + DMAR_IQA_REG, val); | 
|---|
| 1666 |  | 
|---|
| 1667 | iommu->gcmd |= DMA_GCMD_QIE; | 
|---|
| 1668 | writel(val: iommu->gcmd, addr: iommu->reg + DMAR_GCMD_REG); | 
|---|
| 1669 |  | 
|---|
| 1670 | /* Make sure hardware complete it */ | 
|---|
| 1671 | IOMMU_WAIT_OP(iommu, DMAR_GSTS_REG, readl, (sts & DMA_GSTS_QIES), sts); | 
|---|
| 1672 |  | 
|---|
| 1673 | raw_spin_unlock_irqrestore(&iommu->register_lock, flags); | 
|---|
| 1674 | } | 
|---|
| 1675 |  | 
|---|
| 1676 | /* | 
|---|
| 1677 | * Enable Queued Invalidation interface. This is a must to support | 
|---|
| 1678 | * interrupt-remapping. Also used by DMA-remapping, which replaces | 
|---|
| 1679 | * register based IOTLB invalidation. | 
|---|
| 1680 | */ | 
|---|
| 1681 | int dmar_enable_qi(struct intel_iommu *iommu) | 
|---|
| 1682 | { | 
|---|
| 1683 | struct q_inval *qi; | 
|---|
| 1684 | void *desc; | 
|---|
| 1685 |  | 
|---|
| 1686 | if (!ecap_qis(iommu->ecap)) | 
|---|
| 1687 | return -ENOENT; | 
|---|
| 1688 |  | 
|---|
| 1689 | /* | 
|---|
| 1690 | * queued invalidation is already setup and enabled. | 
|---|
| 1691 | */ | 
|---|
| 1692 | if (iommu->qi) | 
|---|
| 1693 | return 0; | 
|---|
| 1694 |  | 
|---|
| 1695 | iommu->qi = kmalloc(sizeof(*qi), GFP_ATOMIC); | 
|---|
| 1696 | if (!iommu->qi) | 
|---|
| 1697 | return -ENOMEM; | 
|---|
| 1698 |  | 
|---|
| 1699 | qi = iommu->qi; | 
|---|
| 1700 |  | 
|---|
| 1701 | /* | 
|---|
| 1702 | * Need two pages to accommodate 256 descriptors of 256 bits each | 
|---|
| 1703 | * if the remapping hardware supports scalable mode translation. | 
|---|
| 1704 | */ | 
|---|
| 1705 | desc = iommu_alloc_pages_node_sz(nid: iommu->node, GFP_ATOMIC, | 
|---|
| 1706 | ecap_smts(iommu->ecap) ? SZ_8K : | 
|---|
| 1707 | SZ_4K); | 
|---|
| 1708 | if (!desc) { | 
|---|
| 1709 | kfree(objp: qi); | 
|---|
| 1710 | iommu->qi = NULL; | 
|---|
| 1711 | return -ENOMEM; | 
|---|
| 1712 | } | 
|---|
| 1713 |  | 
|---|
| 1714 | qi->desc = desc; | 
|---|
| 1715 |  | 
|---|
| 1716 | qi->desc_status = kcalloc(QI_LENGTH, sizeof(int), GFP_ATOMIC); | 
|---|
| 1717 | if (!qi->desc_status) { | 
|---|
| 1718 | iommu_free_pages(virt: qi->desc); | 
|---|
| 1719 | kfree(objp: qi); | 
|---|
| 1720 | iommu->qi = NULL; | 
|---|
| 1721 | return -ENOMEM; | 
|---|
| 1722 | } | 
|---|
| 1723 |  | 
|---|
| 1724 | raw_spin_lock_init(&qi->q_lock); | 
|---|
| 1725 |  | 
|---|
| 1726 | __dmar_enable_qi(iommu); | 
|---|
| 1727 |  | 
|---|
| 1728 | return 0; | 
|---|
| 1729 | } | 
|---|
| 1730 |  | 
|---|
| 1731 | /* iommu interrupt handling. Most stuff are MSI-like. */ | 
|---|
| 1732 |  | 
|---|
| 1733 | enum faulttype { | 
|---|
| 1734 | DMA_REMAP, | 
|---|
| 1735 | INTR_REMAP, | 
|---|
| 1736 | UNKNOWN, | 
|---|
| 1737 | }; | 
|---|
| 1738 |  | 
|---|
| 1739 | static const char *dma_remap_fault_reasons[] = | 
|---|
| 1740 | { | 
|---|
| 1741 | "Software", | 
|---|
| 1742 | "Present bit in root entry is clear", | 
|---|
| 1743 | "Present bit in context entry is clear", | 
|---|
| 1744 | "Invalid context entry", | 
|---|
| 1745 | "Access beyond MGAW", | 
|---|
| 1746 | "PTE Write access is not set", | 
|---|
| 1747 | "PTE Read access is not set", | 
|---|
| 1748 | "Next page table ptr is invalid", | 
|---|
| 1749 | "Root table address invalid", | 
|---|
| 1750 | "Context table ptr is invalid", | 
|---|
| 1751 | "non-zero reserved fields in RTP", | 
|---|
| 1752 | "non-zero reserved fields in CTP", | 
|---|
| 1753 | "non-zero reserved fields in PTE", | 
|---|
| 1754 | "PCE for translation request specifies blocking", | 
|---|
| 1755 | }; | 
|---|
| 1756 |  | 
|---|
| 1757 | static const char * const dma_remap_sm_fault_reasons[] = { | 
|---|
| 1758 | "SM: Invalid Root Table Address", | 
|---|
| 1759 | "SM: TTM 0 for request with PASID", | 
|---|
| 1760 | "SM: TTM 0 for page group request", | 
|---|
| 1761 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x33-0x37 */ | 
|---|
| 1762 | "SM: Error attempting to access Root Entry", | 
|---|
| 1763 | "SM: Present bit in Root Entry is clear", | 
|---|
| 1764 | "SM: Non-zero reserved field set in Root Entry", | 
|---|
| 1765 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x3B-0x3F */ | 
|---|
| 1766 | "SM: Error attempting to access Context Entry", | 
|---|
| 1767 | "SM: Present bit in Context Entry is clear", | 
|---|
| 1768 | "SM: Non-zero reserved field set in the Context Entry", | 
|---|
| 1769 | "SM: Invalid Context Entry", | 
|---|
| 1770 | "SM: DTE field in Context Entry is clear", | 
|---|
| 1771 | "SM: PASID Enable field in Context Entry is clear", | 
|---|
| 1772 | "SM: PASID is larger than the max in Context Entry", | 
|---|
| 1773 | "SM: PRE field in Context-Entry is clear", | 
|---|
| 1774 | "SM: RID_PASID field error in Context-Entry", | 
|---|
| 1775 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x49-0x4F */ | 
|---|
| 1776 | "SM: Error attempting to access the PASID Directory Entry", | 
|---|
| 1777 | "SM: Present bit in Directory Entry is clear", | 
|---|
| 1778 | "SM: Non-zero reserved field set in PASID Directory Entry", | 
|---|
| 1779 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x53-0x57 */ | 
|---|
| 1780 | "SM: Error attempting to access PASID Table Entry", | 
|---|
| 1781 | "SM: Present bit in PASID Table Entry is clear", | 
|---|
| 1782 | "SM: Non-zero reserved field set in PASID Table Entry", | 
|---|
| 1783 | "SM: Invalid Scalable-Mode PASID Table Entry", | 
|---|
| 1784 | "SM: ERE field is clear in PASID Table Entry", | 
|---|
| 1785 | "SM: SRE field is clear in PASID Table Entry", | 
|---|
| 1786 | "Unknown", "Unknown",/* 0x5E-0x5F */ | 
|---|
| 1787 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x60-0x67 */ | 
|---|
| 1788 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x68-0x6F */ | 
|---|
| 1789 | "SM: Error attempting to access first-level paging entry", | 
|---|
| 1790 | "SM: Present bit in first-level paging entry is clear", | 
|---|
| 1791 | "SM: Non-zero reserved field set in first-level paging entry", | 
|---|
| 1792 | "SM: Error attempting to access FL-PML4 entry", | 
|---|
| 1793 | "SM: First-level entry address beyond MGAW in Nested translation", | 
|---|
| 1794 | "SM: Read permission error in FL-PML4 entry in Nested translation", | 
|---|
| 1795 | "SM: Read permission error in first-level paging entry in Nested translation", | 
|---|
| 1796 | "SM: Write permission error in first-level paging entry in Nested translation", | 
|---|
| 1797 | "SM: Error attempting to access second-level paging entry", | 
|---|
| 1798 | "SM: Read/Write permission error in second-level paging entry", | 
|---|
| 1799 | "SM: Non-zero reserved field set in second-level paging entry", | 
|---|
| 1800 | "SM: Invalid second-level page table pointer", | 
|---|
| 1801 | "SM: A/D bit update needed in second-level entry when set up in no snoop", | 
|---|
| 1802 | "Unknown", "Unknown", "Unknown", /* 0x7D-0x7F */ | 
|---|
| 1803 | "SM: Address in first-level translation is not canonical", | 
|---|
| 1804 | "SM: U/S set 0 for first-level translation with user privilege", | 
|---|
| 1805 | "SM: No execute permission for request with PASID and ER=1", | 
|---|
| 1806 | "SM: Address beyond the DMA hardware max", | 
|---|
| 1807 | "SM: Second-level entry address beyond the max", | 
|---|
| 1808 | "SM: No write permission for Write/AtomicOp request", | 
|---|
| 1809 | "SM: No read permission for Read/AtomicOp request", | 
|---|
| 1810 | "SM: Invalid address-interrupt address", | 
|---|
| 1811 | "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", /* 0x88-0x8F */ | 
|---|
| 1812 | "SM: A/D bit update needed in first-level entry when set up in no snoop", | 
|---|
| 1813 | }; | 
|---|
| 1814 |  | 
|---|
| 1815 | static const char *irq_remap_fault_reasons[] = | 
|---|
| 1816 | { | 
|---|
| 1817 | "Detected reserved fields in the decoded interrupt-remapped request", | 
|---|
| 1818 | "Interrupt index exceeded the interrupt-remapping table size", | 
|---|
| 1819 | "Present field in the IRTE entry is clear", | 
|---|
| 1820 | "Error accessing interrupt-remapping table pointed by IRTA_REG", | 
|---|
| 1821 | "Detected reserved fields in the IRTE entry", | 
|---|
| 1822 | "Blocked a compatibility format interrupt request", | 
|---|
| 1823 | "Blocked an interrupt request due to source-id verification failure", | 
|---|
| 1824 | }; | 
|---|
| 1825 |  | 
|---|
| 1826 | static const char *dmar_get_fault_reason(u8 fault_reason, int *fault_type) | 
|---|
| 1827 | { | 
|---|
| 1828 | if (fault_reason >= 0x20 && (fault_reason - 0x20 < | 
|---|
| 1829 | ARRAY_SIZE(irq_remap_fault_reasons))) { | 
|---|
| 1830 | *fault_type = INTR_REMAP; | 
|---|
| 1831 | return irq_remap_fault_reasons[fault_reason - 0x20]; | 
|---|
| 1832 | } else if (fault_reason >= 0x30 && (fault_reason - 0x30 < | 
|---|
| 1833 | ARRAY_SIZE(dma_remap_sm_fault_reasons))) { | 
|---|
| 1834 | *fault_type = DMA_REMAP; | 
|---|
| 1835 | return dma_remap_sm_fault_reasons[fault_reason - 0x30]; | 
|---|
| 1836 | } else if (fault_reason < ARRAY_SIZE(dma_remap_fault_reasons)) { | 
|---|
| 1837 | *fault_type = DMA_REMAP; | 
|---|
| 1838 | return dma_remap_fault_reasons[fault_reason]; | 
|---|
| 1839 | } else { | 
|---|
| 1840 | *fault_type = UNKNOWN; | 
|---|
| 1841 | return "Unknown"; | 
|---|
| 1842 | } | 
|---|
| 1843 | } | 
|---|
| 1844 |  | 
|---|
| 1845 |  | 
|---|
| 1846 | static inline int dmar_msi_reg(struct intel_iommu *iommu, int irq) | 
|---|
| 1847 | { | 
|---|
| 1848 | if (iommu->irq == irq) | 
|---|
| 1849 | return DMAR_FECTL_REG; | 
|---|
| 1850 | else if (iommu->pr_irq == irq) | 
|---|
| 1851 | return DMAR_PECTL_REG; | 
|---|
| 1852 | else if (iommu->perf_irq == irq) | 
|---|
| 1853 | return DMAR_PERFINTRCTL_REG; | 
|---|
| 1854 | else | 
|---|
| 1855 | BUG(); | 
|---|
| 1856 | } | 
|---|
| 1857 |  | 
|---|
| 1858 | void dmar_msi_unmask(struct irq_data *data) | 
|---|
| 1859 | { | 
|---|
| 1860 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(d: data); | 
|---|
| 1861 | int reg = dmar_msi_reg(iommu, irq: data->irq); | 
|---|
| 1862 | unsigned long flag; | 
|---|
| 1863 |  | 
|---|
| 1864 | /* unmask it */ | 
|---|
| 1865 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 
|---|
| 1866 | writel(val: 0, addr: iommu->reg + reg); | 
|---|
| 1867 | /* Read a reg to force flush the post write */ | 
|---|
| 1868 | readl(addr: iommu->reg + reg); | 
|---|
| 1869 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 
|---|
| 1870 | } | 
|---|
| 1871 |  | 
|---|
| 1872 | void dmar_msi_mask(struct irq_data *data) | 
|---|
| 1873 | { | 
|---|
| 1874 | struct intel_iommu *iommu = irq_data_get_irq_handler_data(d: data); | 
|---|
| 1875 | int reg = dmar_msi_reg(iommu, irq: data->irq); | 
|---|
| 1876 | unsigned long flag; | 
|---|
| 1877 |  | 
|---|
| 1878 | /* mask it */ | 
|---|
| 1879 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 
|---|
| 1880 | writel(DMA_FECTL_IM, addr: iommu->reg + reg); | 
|---|
| 1881 | /* Read a reg to force flush the post write */ | 
|---|
| 1882 | readl(addr: iommu->reg + reg); | 
|---|
| 1883 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 
|---|
| 1884 | } | 
|---|
| 1885 |  | 
|---|
| 1886 | void dmar_msi_write(int irq, struct msi_msg *msg) | 
|---|
| 1887 | { | 
|---|
| 1888 | struct intel_iommu *iommu = irq_get_handler_data(irq); | 
|---|
| 1889 | int reg = dmar_msi_reg(iommu, irq); | 
|---|
| 1890 | unsigned long flag; | 
|---|
| 1891 |  | 
|---|
| 1892 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 
|---|
| 1893 | writel(val: msg->data, addr: iommu->reg + reg + 4); | 
|---|
| 1894 | writel(val: msg->address_lo, addr: iommu->reg + reg + 8); | 
|---|
| 1895 | writel(val: msg->address_hi, addr: iommu->reg + reg + 12); | 
|---|
| 1896 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 
|---|
| 1897 | } | 
|---|
| 1898 |  | 
|---|
| 1899 | static int dmar_fault_do_one(struct intel_iommu *iommu, int type, | 
|---|
| 1900 | u8 fault_reason, u32 pasid, u16 source_id, | 
|---|
| 1901 | unsigned long long addr) | 
|---|
| 1902 | { | 
|---|
| 1903 | const char *reason; | 
|---|
| 1904 | int fault_type; | 
|---|
| 1905 |  | 
|---|
| 1906 | reason = dmar_get_fault_reason(fault_reason, fault_type: &fault_type); | 
|---|
| 1907 |  | 
|---|
| 1908 | if (fault_type == INTR_REMAP) { | 
|---|
| 1909 | pr_err( "[INTR-REMAP] Request device [%02x:%02x.%d] fault index 0x%llx [fault reason 0x%02x] %s\n", | 
|---|
| 1910 | source_id >> 8, PCI_SLOT(source_id & 0xFF), | 
|---|
| 1911 | PCI_FUNC(source_id & 0xFF), addr >> 48, | 
|---|
| 1912 | fault_reason, reason); | 
|---|
| 1913 |  | 
|---|
| 1914 | return 0; | 
|---|
| 1915 | } | 
|---|
| 1916 |  | 
|---|
| 1917 | if (pasid == IOMMU_PASID_INVALID) | 
|---|
| 1918 | pr_err( "[%s NO_PASID] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", | 
|---|
| 1919 | type ? "DMA Read": "DMA Write", | 
|---|
| 1920 | source_id >> 8, PCI_SLOT(source_id & 0xFF), | 
|---|
| 1921 | PCI_FUNC(source_id & 0xFF), addr, | 
|---|
| 1922 | fault_reason, reason); | 
|---|
| 1923 | else | 
|---|
| 1924 | pr_err( "[%s PASID 0x%x] Request device [%02x:%02x.%d] fault addr 0x%llx [fault reason 0x%02x] %s\n", | 
|---|
| 1925 | type ? "DMA Read": "DMA Write", pasid, | 
|---|
| 1926 | source_id >> 8, PCI_SLOT(source_id & 0xFF), | 
|---|
| 1927 | PCI_FUNC(source_id & 0xFF), addr, | 
|---|
| 1928 | fault_reason, reason); | 
|---|
| 1929 |  | 
|---|
| 1930 | dmar_fault_dump_ptes(iommu, source_id, addr, pasid); | 
|---|
| 1931 |  | 
|---|
| 1932 | return 0; | 
|---|
| 1933 | } | 
|---|
| 1934 |  | 
|---|
| 1935 | #define PRIMARY_FAULT_REG_LEN (16) | 
|---|
| 1936 | irqreturn_t dmar_fault(int irq, void *dev_id) | 
|---|
| 1937 | { | 
|---|
| 1938 | struct intel_iommu *iommu = dev_id; | 
|---|
| 1939 | int reg, fault_index; | 
|---|
| 1940 | u32 fault_status; | 
|---|
| 1941 | unsigned long flag; | 
|---|
| 1942 | static DEFINE_RATELIMIT_STATE(rs, | 
|---|
| 1943 | DEFAULT_RATELIMIT_INTERVAL, | 
|---|
| 1944 | DEFAULT_RATELIMIT_BURST); | 
|---|
| 1945 |  | 
|---|
| 1946 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 
|---|
| 1947 | fault_status = readl(addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 1948 | if (fault_status && __ratelimit(&rs)) | 
|---|
| 1949 | pr_err( "DRHD: handling fault status reg %x\n", fault_status); | 
|---|
| 1950 |  | 
|---|
| 1951 | /* TBD: ignore advanced fault log currently */ | 
|---|
| 1952 | if (!(fault_status & DMA_FSTS_PPF)) | 
|---|
| 1953 | goto unlock_exit; | 
|---|
| 1954 |  | 
|---|
| 1955 | fault_index = dma_fsts_fault_record_index(fault_status); | 
|---|
| 1956 | reg = cap_fault_reg_offset(iommu->cap); | 
|---|
| 1957 | while (1) { | 
|---|
| 1958 | /* Disable printing, simply clear the fault when ratelimited */ | 
|---|
| 1959 | bool ratelimited = !__ratelimit(&rs); | 
|---|
| 1960 | u8 fault_reason; | 
|---|
| 1961 | u16 source_id; | 
|---|
| 1962 | u64 guest_addr; | 
|---|
| 1963 | u32 pasid; | 
|---|
| 1964 | int type; | 
|---|
| 1965 | u32 data; | 
|---|
| 1966 | bool pasid_present; | 
|---|
| 1967 |  | 
|---|
| 1968 | /* highest 32 bits */ | 
|---|
| 1969 | data = readl(addr: iommu->reg + reg + | 
|---|
| 1970 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | 
|---|
| 1971 | if (!(data & DMA_FRCD_F)) | 
|---|
| 1972 | break; | 
|---|
| 1973 |  | 
|---|
| 1974 | if (!ratelimited) { | 
|---|
| 1975 | fault_reason = dma_frcd_fault_reason(data); | 
|---|
| 1976 | type = dma_frcd_type(data); | 
|---|
| 1977 |  | 
|---|
| 1978 | pasid = dma_frcd_pasid_value(data); | 
|---|
| 1979 | data = readl(addr: iommu->reg + reg + | 
|---|
| 1980 | fault_index * PRIMARY_FAULT_REG_LEN + 8); | 
|---|
| 1981 | source_id = dma_frcd_source_id(data); | 
|---|
| 1982 |  | 
|---|
| 1983 | pasid_present = dma_frcd_pasid_present(data); | 
|---|
| 1984 | guest_addr = dmar_readq(iommu->reg + reg + | 
|---|
| 1985 | fault_index * PRIMARY_FAULT_REG_LEN); | 
|---|
| 1986 | guest_addr = dma_frcd_page_addr(guest_addr); | 
|---|
| 1987 | } | 
|---|
| 1988 |  | 
|---|
| 1989 | /* clear the fault */ | 
|---|
| 1990 | writel(DMA_FRCD_F, addr: iommu->reg + reg + | 
|---|
| 1991 | fault_index * PRIMARY_FAULT_REG_LEN + 12); | 
|---|
| 1992 |  | 
|---|
| 1993 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 
|---|
| 1994 |  | 
|---|
| 1995 | if (!ratelimited) | 
|---|
| 1996 | /* Using pasid -1 if pasid is not present */ | 
|---|
| 1997 | dmar_fault_do_one(iommu, type, fault_reason, | 
|---|
| 1998 | pasid: pasid_present ? pasid : IOMMU_PASID_INVALID, | 
|---|
| 1999 | source_id, addr: guest_addr); | 
|---|
| 2000 |  | 
|---|
| 2001 | fault_index++; | 
|---|
| 2002 | if (fault_index >= cap_num_fault_regs(iommu->cap)) | 
|---|
| 2003 | fault_index = 0; | 
|---|
| 2004 | raw_spin_lock_irqsave(&iommu->register_lock, flag); | 
|---|
| 2005 | } | 
|---|
| 2006 |  | 
|---|
| 2007 | writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO, | 
|---|
| 2008 | addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 2009 |  | 
|---|
| 2010 | unlock_exit: | 
|---|
| 2011 | raw_spin_unlock_irqrestore(&iommu->register_lock, flag); | 
|---|
| 2012 | return IRQ_HANDLED; | 
|---|
| 2013 | } | 
|---|
| 2014 |  | 
|---|
| 2015 | int dmar_set_interrupt(struct intel_iommu *iommu) | 
|---|
| 2016 | { | 
|---|
| 2017 | int irq, ret; | 
|---|
| 2018 |  | 
|---|
| 2019 | /* | 
|---|
| 2020 | * Check if the fault interrupt is already initialized. | 
|---|
| 2021 | */ | 
|---|
| 2022 | if (iommu->irq) | 
|---|
| 2023 | return 0; | 
|---|
| 2024 |  | 
|---|
| 2025 | irq = dmar_alloc_hwirq(id: iommu->seq_id, node: iommu->node, arg: iommu); | 
|---|
| 2026 | if (irq > 0) { | 
|---|
| 2027 | iommu->irq = irq; | 
|---|
| 2028 | } else { | 
|---|
| 2029 | pr_err( "No free IRQ vectors\n"); | 
|---|
| 2030 | return -EINVAL; | 
|---|
| 2031 | } | 
|---|
| 2032 |  | 
|---|
| 2033 | ret = request_irq(irq, handler: dmar_fault, IRQF_NO_THREAD, name: iommu->name, dev: iommu); | 
|---|
| 2034 | if (ret) | 
|---|
| 2035 | pr_err( "Can't request irq\n"); | 
|---|
| 2036 | return ret; | 
|---|
| 2037 | } | 
|---|
| 2038 |  | 
|---|
| 2039 | int enable_drhd_fault_handling(unsigned int cpu) | 
|---|
| 2040 | { | 
|---|
| 2041 | struct dmar_drhd_unit *drhd; | 
|---|
| 2042 | struct intel_iommu *iommu; | 
|---|
| 2043 |  | 
|---|
| 2044 | /* | 
|---|
| 2045 | * Enable fault control interrupt. | 
|---|
| 2046 | */ | 
|---|
| 2047 | guard(rwsem_read)(T: &dmar_global_lock); | 
|---|
| 2048 | for_each_iommu(iommu, drhd) { | 
|---|
| 2049 | u32 fault_status; | 
|---|
| 2050 | int ret; | 
|---|
| 2051 |  | 
|---|
| 2052 | if (iommu->irq || iommu->node != cpu_to_node(cpu)) | 
|---|
| 2053 | continue; | 
|---|
| 2054 |  | 
|---|
| 2055 | ret = dmar_set_interrupt(iommu); | 
|---|
| 2056 |  | 
|---|
| 2057 | if (ret) { | 
|---|
| 2058 | pr_err( "DRHD %Lx: failed to enable fault, interrupt, ret %d\n", | 
|---|
| 2059 | (unsigned long long)drhd->reg_base_addr, ret); | 
|---|
| 2060 | return -1; | 
|---|
| 2061 | } | 
|---|
| 2062 |  | 
|---|
| 2063 | /* | 
|---|
| 2064 | * Clear any previous faults. | 
|---|
| 2065 | */ | 
|---|
| 2066 | dmar_fault(irq: iommu->irq, dev_id: iommu); | 
|---|
| 2067 | fault_status = readl(addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 2068 | writel(val: fault_status, addr: iommu->reg + DMAR_FSTS_REG); | 
|---|
| 2069 | } | 
|---|
| 2070 |  | 
|---|
| 2071 | return 0; | 
|---|
| 2072 | } | 
|---|
| 2073 |  | 
|---|
| 2074 | /* | 
|---|
| 2075 | * Re-enable Queued Invalidation interface. | 
|---|
| 2076 | */ | 
|---|
| 2077 | int dmar_reenable_qi(struct intel_iommu *iommu) | 
|---|
| 2078 | { | 
|---|
| 2079 | if (!ecap_qis(iommu->ecap)) | 
|---|
| 2080 | return -ENOENT; | 
|---|
| 2081 |  | 
|---|
| 2082 | if (!iommu->qi) | 
|---|
| 2083 | return -ENOENT; | 
|---|
| 2084 |  | 
|---|
| 2085 | /* | 
|---|
| 2086 | * First disable queued invalidation. | 
|---|
| 2087 | */ | 
|---|
| 2088 | dmar_disable_qi(iommu); | 
|---|
| 2089 | /* | 
|---|
| 2090 | * Then enable queued invalidation again. Since there is no pending | 
|---|
| 2091 | * invalidation requests now, it's safe to re-enable queued | 
|---|
| 2092 | * invalidation. | 
|---|
| 2093 | */ | 
|---|
| 2094 | __dmar_enable_qi(iommu); | 
|---|
| 2095 |  | 
|---|
| 2096 | return 0; | 
|---|
| 2097 | } | 
|---|
| 2098 |  | 
|---|
| 2099 | /* | 
|---|
| 2100 | * Check interrupt remapping support in DMAR table description. | 
|---|
| 2101 | */ | 
|---|
| 2102 | int __init dmar_ir_support(void) | 
|---|
| 2103 | { | 
|---|
| 2104 | struct acpi_table_dmar *dmar; | 
|---|
| 2105 | dmar = (struct acpi_table_dmar *)dmar_tbl; | 
|---|
| 2106 | if (!dmar) | 
|---|
| 2107 | return 0; | 
|---|
| 2108 | return dmar->flags & 0x1; | 
|---|
| 2109 | } | 
|---|
| 2110 |  | 
|---|
| 2111 | /* Check whether DMAR units are in use */ | 
|---|
| 2112 | static inline bool dmar_in_use(void) | 
|---|
| 2113 | { | 
|---|
| 2114 | return irq_remapping_enabled || intel_iommu_enabled; | 
|---|
| 2115 | } | 
|---|
| 2116 |  | 
|---|
| 2117 | static int __init dmar_free_unused_resources(void) | 
|---|
| 2118 | { | 
|---|
| 2119 | struct dmar_drhd_unit *dmaru, *dmaru_n; | 
|---|
| 2120 |  | 
|---|
| 2121 | if (dmar_in_use()) | 
|---|
| 2122 | return 0; | 
|---|
| 2123 |  | 
|---|
| 2124 | if (dmar_dev_scope_status != 1 && !list_empty(head: &dmar_drhd_units)) | 
|---|
| 2125 | bus_unregister_notifier(bus: &pci_bus_type, nb: &dmar_pci_bus_nb); | 
|---|
| 2126 |  | 
|---|
| 2127 | down_write(sem: &dmar_global_lock); | 
|---|
| 2128 | list_for_each_entry_safe(dmaru, dmaru_n, &dmar_drhd_units, list) { | 
|---|
| 2129 | list_del(entry: &dmaru->list); | 
|---|
| 2130 | dmar_free_drhd(dmaru); | 
|---|
| 2131 | } | 
|---|
| 2132 | up_write(sem: &dmar_global_lock); | 
|---|
| 2133 |  | 
|---|
| 2134 | return 0; | 
|---|
| 2135 | } | 
|---|
| 2136 |  | 
|---|
| 2137 | late_initcall(dmar_free_unused_resources); | 
|---|
| 2138 |  | 
|---|
| 2139 | /* | 
|---|
| 2140 | * DMAR Hotplug Support | 
|---|
| 2141 | * For more details, please refer to Intel(R) Virtualization Technology | 
|---|
| 2142 | * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8 | 
|---|
| 2143 | * "Remapping Hardware Unit Hot Plug". | 
|---|
| 2144 | */ | 
|---|
| 2145 | static guid_t dmar_hp_guid = | 
|---|
| 2146 | GUID_INIT(0xD8C1A3A6, 0xBE9B, 0x4C9B, | 
|---|
| 2147 | 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF); | 
|---|
| 2148 |  | 
|---|
| 2149 | /* | 
|---|
| 2150 | * Currently there's only one revision and BIOS will not check the revision id, | 
|---|
| 2151 | * so use 0 for safety. | 
|---|
| 2152 | */ | 
|---|
| 2153 | #define	DMAR_DSM_REV_ID			0 | 
|---|
| 2154 | #define	DMAR_DSM_FUNC_DRHD		1 | 
|---|
| 2155 | #define	DMAR_DSM_FUNC_ATSR		2 | 
|---|
| 2156 | #define	DMAR_DSM_FUNC_RHSA		3 | 
|---|
| 2157 | #define	DMAR_DSM_FUNC_SATC		4 | 
|---|
| 2158 |  | 
|---|
| 2159 | static inline bool dmar_detect_dsm(acpi_handle handle, int func) | 
|---|
| 2160 | { | 
|---|
| 2161 | return acpi_check_dsm(handle, guid: &dmar_hp_guid, DMAR_DSM_REV_ID, funcs: 1 << func); | 
|---|
| 2162 | } | 
|---|
| 2163 |  | 
|---|
| 2164 | static int dmar_walk_dsm_resource(acpi_handle handle, int func, | 
|---|
| 2165 | dmar_res_handler_t handler, void *arg) | 
|---|
| 2166 | { | 
|---|
| 2167 | int ret = -ENODEV; | 
|---|
| 2168 | union acpi_object *obj; | 
|---|
| 2169 | struct acpi_dmar_header *start; | 
|---|
| 2170 | struct dmar_res_callback callback; | 
|---|
| 2171 | static int res_type[] = { | 
|---|
| 2172 | [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT, | 
|---|
| 2173 | [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS, | 
|---|
| 2174 | [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY, | 
|---|
| 2175 | [DMAR_DSM_FUNC_SATC] = ACPI_DMAR_TYPE_SATC, | 
|---|
| 2176 | }; | 
|---|
| 2177 |  | 
|---|
| 2178 | if (!dmar_detect_dsm(handle, func)) | 
|---|
| 2179 | return 0; | 
|---|
| 2180 |  | 
|---|
| 2181 | obj = acpi_evaluate_dsm_typed(handle, guid: &dmar_hp_guid, DMAR_DSM_REV_ID, | 
|---|
| 2182 | func, NULL, ACPI_TYPE_BUFFER); | 
|---|
| 2183 | if (!obj) | 
|---|
| 2184 | return -ENODEV; | 
|---|
| 2185 |  | 
|---|
| 2186 | memset(s: &callback, c: 0, n: sizeof(callback)); | 
|---|
| 2187 | callback.cb[res_type[func]] = handler; | 
|---|
| 2188 | callback.arg[res_type[func]] = arg; | 
|---|
| 2189 | start = (struct acpi_dmar_header *)obj->buffer.pointer; | 
|---|
| 2190 | ret = dmar_walk_remapping_entries(start, len: obj->buffer.length, cb: &callback); | 
|---|
| 2191 |  | 
|---|
| 2192 | ACPI_FREE(obj); | 
|---|
| 2193 |  | 
|---|
| 2194 | return ret; | 
|---|
| 2195 | } | 
|---|
| 2196 |  | 
|---|
| 2197 | static int dmar_hp_add_drhd(struct acpi_dmar_header *, void *arg) | 
|---|
| 2198 | { | 
|---|
| 2199 | int ret; | 
|---|
| 2200 | struct dmar_drhd_unit *dmaru; | 
|---|
| 2201 |  | 
|---|
| 2202 | dmaru = dmar_find_dmaru(drhd: (struct acpi_dmar_hardware_unit *)header); | 
|---|
| 2203 | if (!dmaru) | 
|---|
| 2204 | return -ENODEV; | 
|---|
| 2205 |  | 
|---|
| 2206 | ret = dmar_ir_hotplug(dmaru, insert: true); | 
|---|
| 2207 | if (ret == 0) | 
|---|
| 2208 | ret = dmar_iommu_hotplug(dmaru, insert: true); | 
|---|
| 2209 |  | 
|---|
| 2210 | return ret; | 
|---|
| 2211 | } | 
|---|
| 2212 |  | 
|---|
| 2213 | static int dmar_hp_remove_drhd(struct acpi_dmar_header *, void *arg) | 
|---|
| 2214 | { | 
|---|
| 2215 | int i, ret; | 
|---|
| 2216 | struct device *dev; | 
|---|
| 2217 | struct dmar_drhd_unit *dmaru; | 
|---|
| 2218 |  | 
|---|
| 2219 | dmaru = dmar_find_dmaru(drhd: (struct acpi_dmar_hardware_unit *)header); | 
|---|
| 2220 | if (!dmaru) | 
|---|
| 2221 | return 0; | 
|---|
| 2222 |  | 
|---|
| 2223 | /* | 
|---|
| 2224 | * All PCI devices managed by this unit should have been destroyed. | 
|---|
| 2225 | */ | 
|---|
| 2226 | if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt) { | 
|---|
| 2227 | for_each_active_dev_scope(dmaru->devices, | 
|---|
| 2228 | dmaru->devices_cnt, i, dev) | 
|---|
| 2229 | return -EBUSY; | 
|---|
| 2230 | } | 
|---|
| 2231 |  | 
|---|
| 2232 | ret = dmar_ir_hotplug(dmaru, insert: false); | 
|---|
| 2233 | if (ret == 0) | 
|---|
| 2234 | ret = dmar_iommu_hotplug(dmaru, insert: false); | 
|---|
| 2235 |  | 
|---|
| 2236 | return ret; | 
|---|
| 2237 | } | 
|---|
| 2238 |  | 
|---|
| 2239 | static int dmar_hp_release_drhd(struct acpi_dmar_header *, void *arg) | 
|---|
| 2240 | { | 
|---|
| 2241 | struct dmar_drhd_unit *dmaru; | 
|---|
| 2242 |  | 
|---|
| 2243 | dmaru = dmar_find_dmaru(drhd: (struct acpi_dmar_hardware_unit *)header); | 
|---|
| 2244 | if (dmaru) { | 
|---|
| 2245 | list_del_rcu(entry: &dmaru->list); | 
|---|
| 2246 | synchronize_rcu(); | 
|---|
| 2247 | dmar_free_drhd(dmaru); | 
|---|
| 2248 | } | 
|---|
| 2249 |  | 
|---|
| 2250 | return 0; | 
|---|
| 2251 | } | 
|---|
| 2252 |  | 
|---|
| 2253 | static int dmar_hotplug_insert(acpi_handle handle) | 
|---|
| 2254 | { | 
|---|
| 2255 | int ret; | 
|---|
| 2256 | int drhd_count = 0; | 
|---|
| 2257 |  | 
|---|
| 2258 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2259 | handler: &dmar_validate_one_drhd, arg: (void *)1); | 
|---|
| 2260 | if (ret) | 
|---|
| 2261 | goto out; | 
|---|
| 2262 |  | 
|---|
| 2263 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2264 | handler: &dmar_parse_one_drhd, arg: (void *)&drhd_count); | 
|---|
| 2265 | if (ret == 0 && drhd_count == 0) { | 
|---|
| 2266 | pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n"); | 
|---|
| 2267 | goto out; | 
|---|
| 2268 | } else if (ret) { | 
|---|
| 2269 | goto release_drhd; | 
|---|
| 2270 | } | 
|---|
| 2271 |  | 
|---|
| 2272 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA, | 
|---|
| 2273 | handler: &dmar_parse_one_rhsa, NULL); | 
|---|
| 2274 | if (ret) | 
|---|
| 2275 | goto release_drhd; | 
|---|
| 2276 |  | 
|---|
| 2277 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | 
|---|
| 2278 | handler: &dmar_parse_one_atsr, NULL); | 
|---|
| 2279 | if (ret) | 
|---|
| 2280 | goto release_atsr; | 
|---|
| 2281 |  | 
|---|
| 2282 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2283 | handler: &dmar_hp_add_drhd, NULL); | 
|---|
| 2284 | if (!ret) | 
|---|
| 2285 | return 0; | 
|---|
| 2286 |  | 
|---|
| 2287 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2288 | handler: &dmar_hp_remove_drhd, NULL); | 
|---|
| 2289 | release_atsr: | 
|---|
| 2290 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | 
|---|
| 2291 | handler: &dmar_release_one_atsr, NULL); | 
|---|
| 2292 | release_drhd: | 
|---|
| 2293 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2294 | handler: &dmar_hp_release_drhd, NULL); | 
|---|
| 2295 | out: | 
|---|
| 2296 | return ret; | 
|---|
| 2297 | } | 
|---|
| 2298 |  | 
|---|
| 2299 | static int dmar_hotplug_remove(acpi_handle handle) | 
|---|
| 2300 | { | 
|---|
| 2301 | int ret; | 
|---|
| 2302 |  | 
|---|
| 2303 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | 
|---|
| 2304 | handler: &dmar_check_one_atsr, NULL); | 
|---|
| 2305 | if (ret) | 
|---|
| 2306 | return ret; | 
|---|
| 2307 |  | 
|---|
| 2308 | ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2309 | handler: &dmar_hp_remove_drhd, NULL); | 
|---|
| 2310 | if (ret == 0) { | 
|---|
| 2311 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR, | 
|---|
| 2312 | &dmar_release_one_atsr, NULL)); | 
|---|
| 2313 | WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2314 | &dmar_hp_release_drhd, NULL)); | 
|---|
| 2315 | } else { | 
|---|
| 2316 | dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD, | 
|---|
| 2317 | handler: &dmar_hp_add_drhd, NULL); | 
|---|
| 2318 | } | 
|---|
| 2319 |  | 
|---|
| 2320 | return ret; | 
|---|
| 2321 | } | 
|---|
| 2322 |  | 
|---|
| 2323 | static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl, | 
|---|
| 2324 | void *context, void **retval) | 
|---|
| 2325 | { | 
|---|
| 2326 | acpi_handle *phdl = retval; | 
|---|
| 2327 |  | 
|---|
| 2328 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { | 
|---|
| 2329 | *phdl = handle; | 
|---|
| 2330 | return AE_CTRL_TERMINATE; | 
|---|
| 2331 | } | 
|---|
| 2332 |  | 
|---|
| 2333 | return AE_OK; | 
|---|
| 2334 | } | 
|---|
| 2335 |  | 
|---|
| 2336 | static int dmar_device_hotplug(acpi_handle handle, bool insert) | 
|---|
| 2337 | { | 
|---|
| 2338 | int ret; | 
|---|
| 2339 | acpi_handle tmp = NULL; | 
|---|
| 2340 | acpi_status status; | 
|---|
| 2341 |  | 
|---|
| 2342 | if (!dmar_in_use()) | 
|---|
| 2343 | return 0; | 
|---|
| 2344 |  | 
|---|
| 2345 | if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) { | 
|---|
| 2346 | tmp = handle; | 
|---|
| 2347 | } else { | 
|---|
| 2348 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, start_object: handle, | 
|---|
| 2349 | ACPI_UINT32_MAX, | 
|---|
| 2350 | descending_callback: dmar_get_dsm_handle, | 
|---|
| 2351 | NULL, NULL, return_value: &tmp); | 
|---|
| 2352 | if (ACPI_FAILURE(status)) { | 
|---|
| 2353 | pr_warn( "Failed to locate _DSM method.\n"); | 
|---|
| 2354 | return -ENXIO; | 
|---|
| 2355 | } | 
|---|
| 2356 | } | 
|---|
| 2357 | if (tmp == NULL) | 
|---|
| 2358 | return 0; | 
|---|
| 2359 |  | 
|---|
| 2360 | down_write(sem: &dmar_global_lock); | 
|---|
| 2361 | if (insert) | 
|---|
| 2362 | ret = dmar_hotplug_insert(handle: tmp); | 
|---|
| 2363 | else | 
|---|
| 2364 | ret = dmar_hotplug_remove(handle: tmp); | 
|---|
| 2365 | up_write(sem: &dmar_global_lock); | 
|---|
| 2366 |  | 
|---|
| 2367 | return ret; | 
|---|
| 2368 | } | 
|---|
| 2369 |  | 
|---|
| 2370 | int dmar_device_add(acpi_handle handle) | 
|---|
| 2371 | { | 
|---|
| 2372 | return dmar_device_hotplug(handle, insert: true); | 
|---|
| 2373 | } | 
|---|
| 2374 |  | 
|---|
| 2375 | int dmar_device_remove(acpi_handle handle) | 
|---|
| 2376 | { | 
|---|
| 2377 | return dmar_device_hotplug(handle, insert: false); | 
|---|
| 2378 | } | 
|---|
| 2379 |  | 
|---|
| 2380 | /* | 
|---|
| 2381 | * dmar_platform_optin - Is %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in DMAR table | 
|---|
| 2382 | * | 
|---|
| 2383 | * Returns true if the platform has %DMA_CTRL_PLATFORM_OPT_IN_FLAG set in | 
|---|
| 2384 | * the ACPI DMAR table. This means that the platform boot firmware has made | 
|---|
| 2385 | * sure no device can issue DMA outside of RMRR regions. | 
|---|
| 2386 | */ | 
|---|
| 2387 | bool dmar_platform_optin(void) | 
|---|
| 2388 | { | 
|---|
| 2389 | struct acpi_table_dmar *dmar; | 
|---|
| 2390 | acpi_status status; | 
|---|
| 2391 | bool ret; | 
|---|
| 2392 |  | 
|---|
| 2393 | status = acpi_get_table(ACPI_SIG_DMAR, instance: 0, | 
|---|
| 2394 | out_table: (struct acpi_table_header **)&dmar); | 
|---|
| 2395 | if (ACPI_FAILURE(status)) | 
|---|
| 2396 | return false; | 
|---|
| 2397 |  | 
|---|
| 2398 | ret = !!(dmar->flags & DMAR_PLATFORM_OPT_IN); | 
|---|
| 2399 | acpi_put_table(table: (struct acpi_table_header *)dmar); | 
|---|
| 2400 |  | 
|---|
| 2401 | return ret; | 
|---|
| 2402 | } | 
|---|
| 2403 | EXPORT_SYMBOL_GPL(dmar_platform_optin); | 
|---|
| 2404 |  | 
|---|