| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * arch-independent dma-mapping routines | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (c) 2006  SUSE Linux Products GmbH | 
|---|
| 6 | * Copyright (c) 2006  Tejun Heo <teheo@suse.de> | 
|---|
| 7 | */ | 
|---|
| 8 | #include <linux/memblock.h> /* for max_pfn */ | 
|---|
| 9 | #include <linux/acpi.h> | 
|---|
| 10 | #include <linux/dma-map-ops.h> | 
|---|
| 11 | #include <linux/export.h> | 
|---|
| 12 | #include <linux/gfp.h> | 
|---|
| 13 | #include <linux/iommu-dma.h> | 
|---|
| 14 | #include <linux/kmsan.h> | 
|---|
| 15 | #include <linux/of_device.h> | 
|---|
| 16 | #include <linux/slab.h> | 
|---|
| 17 | #include <linux/vmalloc.h> | 
|---|
| 18 | #include "debug.h" | 
|---|
| 19 | #include "direct.h" | 
|---|
| 20 |  | 
|---|
| 21 | #define CREATE_TRACE_POINTS | 
|---|
| 22 | #include <trace/events/dma.h> | 
|---|
| 23 |  | 
|---|
| 24 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ | 
|---|
| 25 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ | 
|---|
| 26 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) | 
|---|
| 27 | bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT); | 
|---|
| 28 | #endif | 
|---|
| 29 |  | 
|---|
| 30 | /* | 
|---|
| 31 | * Managed DMA API | 
|---|
| 32 | */ | 
|---|
| 33 | struct dma_devres { | 
|---|
| 34 | size_t		size; | 
|---|
| 35 | void		*vaddr; | 
|---|
| 36 | dma_addr_t	dma_handle; | 
|---|
| 37 | unsigned long	attrs; | 
|---|
| 38 | }; | 
|---|
| 39 |  | 
|---|
| 40 | static void dmam_release(struct device *dev, void *res) | 
|---|
| 41 | { | 
|---|
| 42 | struct dma_devres *this = res; | 
|---|
| 43 |  | 
|---|
| 44 | dma_free_attrs(dev, size: this->size, cpu_addr: this->vaddr, dma_handle: this->dma_handle, | 
|---|
| 45 | attrs: this->attrs); | 
|---|
| 46 | } | 
|---|
| 47 |  | 
|---|
| 48 | static int dmam_match(struct device *dev, void *res, void *match_data) | 
|---|
| 49 | { | 
|---|
| 50 | struct dma_devres *this = res, *match = match_data; | 
|---|
| 51 |  | 
|---|
| 52 | if (this->vaddr == match->vaddr) { | 
|---|
| 53 | WARN_ON(this->size != match->size || | 
|---|
| 54 | this->dma_handle != match->dma_handle); | 
|---|
| 55 | return 1; | 
|---|
| 56 | } | 
|---|
| 57 | return 0; | 
|---|
| 58 | } | 
|---|
| 59 |  | 
|---|
| 60 | /** | 
|---|
| 61 | * dmam_free_coherent - Managed dma_free_coherent() | 
|---|
| 62 | * @dev: Device to free coherent memory for | 
|---|
| 63 | * @size: Size of allocation | 
|---|
| 64 | * @vaddr: Virtual address of the memory to free | 
|---|
| 65 | * @dma_handle: DMA handle of the memory to free | 
|---|
| 66 | * | 
|---|
| 67 | * Managed dma_free_coherent(). | 
|---|
| 68 | */ | 
|---|
| 69 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, | 
|---|
| 70 | dma_addr_t dma_handle) | 
|---|
| 71 | { | 
|---|
| 72 | struct dma_devres match_data = { size, vaddr, dma_handle }; | 
|---|
| 73 |  | 
|---|
| 74 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); | 
|---|
| 75 | dma_free_coherent(dev, size, cpu_addr: vaddr, dma_handle); | 
|---|
| 76 | } | 
|---|
| 77 | EXPORT_SYMBOL(dmam_free_coherent); | 
|---|
| 78 |  | 
|---|
| 79 | /** | 
|---|
| 80 | * dmam_alloc_attrs - Managed dma_alloc_attrs() | 
|---|
| 81 | * @dev: Device to allocate non_coherent memory for | 
|---|
| 82 | * @size: Size of allocation | 
|---|
| 83 | * @dma_handle: Out argument for allocated DMA handle | 
|---|
| 84 | * @gfp: Allocation flags | 
|---|
| 85 | * @attrs: Flags in the DMA_ATTR_* namespace. | 
|---|
| 86 | * | 
|---|
| 87 | * Managed dma_alloc_attrs().  Memory allocated using this function will be | 
|---|
| 88 | * automatically released on driver detach. | 
|---|
| 89 | * | 
|---|
| 90 | * RETURNS: | 
|---|
| 91 | * Pointer to allocated memory on success, NULL on failure. | 
|---|
| 92 | */ | 
|---|
| 93 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | 
|---|
| 94 | gfp_t gfp, unsigned long attrs) | 
|---|
| 95 | { | 
|---|
| 96 | struct dma_devres *dr; | 
|---|
| 97 | void *vaddr; | 
|---|
| 98 |  | 
|---|
| 99 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); | 
|---|
| 100 | if (!dr) | 
|---|
| 101 | return NULL; | 
|---|
| 102 |  | 
|---|
| 103 | vaddr = dma_alloc_attrs(dev, size, dma_handle, flag: gfp, attrs); | 
|---|
| 104 | if (!vaddr) { | 
|---|
| 105 | devres_free(res: dr); | 
|---|
| 106 | return NULL; | 
|---|
| 107 | } | 
|---|
| 108 |  | 
|---|
| 109 | dr->vaddr = vaddr; | 
|---|
| 110 | dr->dma_handle = *dma_handle; | 
|---|
| 111 | dr->size = size; | 
|---|
| 112 | dr->attrs = attrs; | 
|---|
| 113 |  | 
|---|
| 114 | devres_add(dev, res: dr); | 
|---|
| 115 |  | 
|---|
| 116 | return vaddr; | 
|---|
| 117 | } | 
|---|
| 118 | EXPORT_SYMBOL(dmam_alloc_attrs); | 
|---|
| 119 |  | 
|---|
| 120 | static bool dma_go_direct(struct device *dev, dma_addr_t mask, | 
|---|
| 121 | const struct dma_map_ops *ops) | 
|---|
| 122 | { | 
|---|
| 123 | if (use_dma_iommu(dev)) | 
|---|
| 124 | return false; | 
|---|
| 125 |  | 
|---|
| 126 | if (likely(!ops)) | 
|---|
| 127 | return true; | 
|---|
| 128 |  | 
|---|
| 129 | #ifdef CONFIG_DMA_OPS_BYPASS | 
|---|
| 130 | if (dev->dma_ops_bypass) | 
|---|
| 131 | return min_not_zero(mask, dev->bus_dma_limit) >= | 
|---|
| 132 | dma_direct_get_required_mask(dev); | 
|---|
| 133 | #endif | 
|---|
| 134 | return false; | 
|---|
| 135 | } | 
|---|
| 136 |  | 
|---|
| 137 |  | 
|---|
| 138 | /* | 
|---|
| 139 | * Check if the devices uses a direct mapping for streaming DMA operations. | 
|---|
| 140 | * This allows IOMMU drivers to set a bypass mode if the DMA mask is large | 
|---|
| 141 | * enough. | 
|---|
| 142 | */ | 
|---|
| 143 | static inline bool dma_alloc_direct(struct device *dev, | 
|---|
| 144 | const struct dma_map_ops *ops) | 
|---|
| 145 | { | 
|---|
| 146 | return dma_go_direct(dev, mask: dev->coherent_dma_mask, ops); | 
|---|
| 147 | } | 
|---|
| 148 |  | 
|---|
| 149 | static inline bool dma_map_direct(struct device *dev, | 
|---|
| 150 | const struct dma_map_ops *ops) | 
|---|
| 151 | { | 
|---|
| 152 | return dma_go_direct(dev, mask: *dev->dma_mask, ops); | 
|---|
| 153 | } | 
|---|
| 154 |  | 
|---|
| 155 | dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, | 
|---|
| 156 | enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 157 | { | 
|---|
| 158 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 159 | bool is_mmio = attrs & DMA_ATTR_MMIO; | 
|---|
| 160 | dma_addr_t addr; | 
|---|
| 161 |  | 
|---|
| 162 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 163 |  | 
|---|
| 164 | if (WARN_ON_ONCE(!dev->dma_mask)) | 
|---|
| 165 | return DMA_MAPPING_ERROR; | 
|---|
| 166 |  | 
|---|
| 167 | if (dma_map_direct(dev, ops) || | 
|---|
| 168 | (!is_mmio && arch_dma_map_phys_direct(dev, phys + size))) | 
|---|
| 169 | addr = dma_direct_map_phys(dev, phys, size, dir, attrs); | 
|---|
| 170 | else if (use_dma_iommu(dev)) | 
|---|
| 171 | addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); | 
|---|
| 172 | else if (is_mmio) { | 
|---|
| 173 | if (!ops->map_resource) | 
|---|
| 174 | return DMA_MAPPING_ERROR; | 
|---|
| 175 |  | 
|---|
| 176 | addr = ops->map_resource(dev, phys, size, dir, attrs); | 
|---|
| 177 | } else { | 
|---|
| 178 | struct page *page = phys_to_page(phys); | 
|---|
| 179 | size_t offset = offset_in_page(phys); | 
|---|
| 180 |  | 
|---|
| 181 | /* | 
|---|
| 182 | * The dma_ops API contract for ops->map_page() requires | 
|---|
| 183 | * kmappable memory, while ops->map_resource() does not. | 
|---|
| 184 | */ | 
|---|
| 185 | addr = ops->map_page(dev, page, offset, size, dir, attrs); | 
|---|
| 186 | } | 
|---|
| 187 |  | 
|---|
| 188 | if (!is_mmio) | 
|---|
| 189 | kmsan_handle_dma(phys, size, dir); | 
|---|
| 190 | trace_dma_map_phys(dev, phys_addr: phys, dma_addr: addr, size, dir, attrs); | 
|---|
| 191 | debug_dma_map_phys(dev, phys, size, direction: dir, dma_addr: addr, attrs); | 
|---|
| 192 |  | 
|---|
| 193 | return addr; | 
|---|
| 194 | } | 
|---|
| 195 | EXPORT_SYMBOL_GPL(dma_map_phys); | 
|---|
| 196 |  | 
|---|
| 197 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, | 
|---|
| 198 | size_t offset, size_t size, enum dma_data_direction dir, | 
|---|
| 199 | unsigned long attrs) | 
|---|
| 200 | { | 
|---|
| 201 | phys_addr_t phys = page_to_phys(page) + offset; | 
|---|
| 202 |  | 
|---|
| 203 | if (unlikely(attrs & DMA_ATTR_MMIO)) | 
|---|
| 204 | return DMA_MAPPING_ERROR; | 
|---|
| 205 |  | 
|---|
| 206 | if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && | 
|---|
| 207 | WARN_ON_ONCE(is_zone_device_page(page))) | 
|---|
| 208 | return DMA_MAPPING_ERROR; | 
|---|
| 209 |  | 
|---|
| 210 | return dma_map_phys(dev, phys, size, dir, attrs); | 
|---|
| 211 | } | 
|---|
| 212 | EXPORT_SYMBOL(dma_map_page_attrs); | 
|---|
| 213 |  | 
|---|
| 214 | void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, | 
|---|
| 215 | enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 216 | { | 
|---|
| 217 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 218 | bool is_mmio = attrs & DMA_ATTR_MMIO; | 
|---|
| 219 |  | 
|---|
| 220 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 221 | if (dma_map_direct(dev, ops) || | 
|---|
| 222 | (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size))) | 
|---|
| 223 | dma_direct_unmap_phys(dev, addr, size, dir, attrs); | 
|---|
| 224 | else if (use_dma_iommu(dev)) | 
|---|
| 225 | iommu_dma_unmap_phys(dev, dma_handle: addr, size, dir, attrs); | 
|---|
| 226 | else if (is_mmio) { | 
|---|
| 227 | if (ops->unmap_resource) | 
|---|
| 228 | ops->unmap_resource(dev, addr, size, dir, attrs); | 
|---|
| 229 | } else | 
|---|
| 230 | ops->unmap_page(dev, addr, size, dir, attrs); | 
|---|
| 231 | trace_dma_unmap_phys(dev, addr, size, dir, attrs); | 
|---|
| 232 | debug_dma_unmap_phys(dev, addr, size, direction: dir); | 
|---|
| 233 | } | 
|---|
| 234 | EXPORT_SYMBOL_GPL(dma_unmap_phys); | 
|---|
| 235 |  | 
|---|
| 236 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, | 
|---|
| 237 | enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 238 | { | 
|---|
| 239 | if (unlikely(attrs & DMA_ATTR_MMIO)) | 
|---|
| 240 | return; | 
|---|
| 241 |  | 
|---|
| 242 | dma_unmap_phys(dev, addr, size, dir, attrs); | 
|---|
| 243 | } | 
|---|
| 244 | EXPORT_SYMBOL(dma_unmap_page_attrs); | 
|---|
| 245 |  | 
|---|
| 246 | static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | 
|---|
| 247 | int nents, enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 248 | { | 
|---|
| 249 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 250 | int ents; | 
|---|
| 251 |  | 
|---|
| 252 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 253 |  | 
|---|
| 254 | if (WARN_ON_ONCE(!dev->dma_mask)) | 
|---|
| 255 | return 0; | 
|---|
| 256 |  | 
|---|
| 257 | if (dma_map_direct(dev, ops) || | 
|---|
| 258 | arch_dma_map_sg_direct(dev, sg, nents)) | 
|---|
| 259 | ents = dma_direct_map_sg(dev, sgl: sg, nents, dir, attrs); | 
|---|
| 260 | else if (use_dma_iommu(dev)) | 
|---|
| 261 | ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs); | 
|---|
| 262 | else | 
|---|
| 263 | ents = ops->map_sg(dev, sg, nents, dir, attrs); | 
|---|
| 264 |  | 
|---|
| 265 | if (ents > 0) { | 
|---|
| 266 | kmsan_handle_dma_sg(sg, nents, dir); | 
|---|
| 267 | trace_dma_map_sg(dev, sgl: sg, nents, ents, dir, attrs); | 
|---|
| 268 | debug_dma_map_sg(dev, sg, nents, mapped_ents: ents, direction: dir, attrs); | 
|---|
| 269 | } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && | 
|---|
| 270 | ents != -EIO && ents != -EREMOTEIO)) { | 
|---|
| 271 | trace_dma_map_sg_err(dev, sgl: sg, nents, err: ents, dir, attrs); | 
|---|
| 272 | return -EIO; | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | return ents; | 
|---|
| 276 | } | 
|---|
| 277 |  | 
|---|
| 278 | /** | 
|---|
| 279 | * dma_map_sg_attrs - Map the given buffer for DMA | 
|---|
| 280 | * @dev:	The device for which to perform the DMA operation | 
|---|
| 281 | * @sg:		The sg_table object describing the buffer | 
|---|
| 282 | * @nents:	Number of entries to map | 
|---|
| 283 | * @dir:	DMA direction | 
|---|
| 284 | * @attrs:	Optional DMA attributes for the map operation | 
|---|
| 285 | * | 
|---|
| 286 | * Maps a buffer described by a scatterlist passed in the sg argument with | 
|---|
| 287 | * nents segments for the @dir DMA operation by the @dev device. | 
|---|
| 288 | * | 
|---|
| 289 | * Returns the number of mapped entries (which can be less than nents) | 
|---|
| 290 | * on success. Zero is returned for any error. | 
|---|
| 291 | * | 
|---|
| 292 | * dma_unmap_sg_attrs() should be used to unmap the buffer with the | 
|---|
| 293 | * original sg and original nents (not the value returned by this funciton). | 
|---|
| 294 | */ | 
|---|
| 295 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, | 
|---|
| 296 | int nents, enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 297 | { | 
|---|
| 298 | int ret; | 
|---|
| 299 |  | 
|---|
| 300 | ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); | 
|---|
| 301 | if (ret < 0) | 
|---|
| 302 | return 0; | 
|---|
| 303 | return ret; | 
|---|
| 304 | } | 
|---|
| 305 | EXPORT_SYMBOL(dma_map_sg_attrs); | 
|---|
| 306 |  | 
|---|
| 307 | /** | 
|---|
| 308 | * dma_map_sgtable - Map the given buffer for DMA | 
|---|
| 309 | * @dev:	The device for which to perform the DMA operation | 
|---|
| 310 | * @sgt:	The sg_table object describing the buffer | 
|---|
| 311 | * @dir:	DMA direction | 
|---|
| 312 | * @attrs:	Optional DMA attributes for the map operation | 
|---|
| 313 | * | 
|---|
| 314 | * Maps a buffer described by a scatterlist stored in the given sg_table | 
|---|
| 315 | * object for the @dir DMA operation by the @dev device. After success, the | 
|---|
| 316 | * ownership for the buffer is transferred to the DMA domain.  One has to | 
|---|
| 317 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the | 
|---|
| 318 | * ownership of the buffer back to the CPU domain before touching the | 
|---|
| 319 | * buffer by the CPU. | 
|---|
| 320 | * | 
|---|
| 321 | * Returns 0 on success or a negative error code on error. The following | 
|---|
| 322 | * error codes are supported with the given meaning: | 
|---|
| 323 | * | 
|---|
| 324 | *   -EINVAL		An invalid argument, unaligned access or other error | 
|---|
| 325 | *			in usage. Will not succeed if retried. | 
|---|
| 326 | *   -ENOMEM		Insufficient resources (like memory or IOVA space) to | 
|---|
| 327 | *			complete the mapping. Should succeed if retried later. | 
|---|
| 328 | *   -EIO		Legacy error code with an unknown meaning. eg. this is | 
|---|
| 329 | *			returned if a lower level call returned | 
|---|
| 330 | *			DMA_MAPPING_ERROR. | 
|---|
| 331 | *   -EREMOTEIO		The DMA device cannot access P2PDMA memory specified | 
|---|
| 332 | *			in the sg_table. This will not succeed if retried. | 
|---|
| 333 | */ | 
|---|
| 334 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, | 
|---|
| 335 | enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 336 | { | 
|---|
| 337 | int nents; | 
|---|
| 338 |  | 
|---|
| 339 | nents = __dma_map_sg_attrs(dev, sg: sgt->sgl, nents: sgt->orig_nents, dir, attrs); | 
|---|
| 340 | if (nents < 0) | 
|---|
| 341 | return nents; | 
|---|
| 342 | sgt->nents = nents; | 
|---|
| 343 | return 0; | 
|---|
| 344 | } | 
|---|
| 345 | EXPORT_SYMBOL_GPL(dma_map_sgtable); | 
|---|
| 346 |  | 
|---|
| 347 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, | 
|---|
| 348 | int nents, enum dma_data_direction dir, | 
|---|
| 349 | unsigned long attrs) | 
|---|
| 350 | { | 
|---|
| 351 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 352 |  | 
|---|
| 353 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 354 | trace_dma_unmap_sg(dev, sgl: sg, nents, dir, attrs); | 
|---|
| 355 | debug_dma_unmap_sg(dev, sglist: sg, nelems: nents, dir); | 
|---|
| 356 | if (dma_map_direct(dev, ops) || | 
|---|
| 357 | arch_dma_unmap_sg_direct(dev, sg, nents)) | 
|---|
| 358 | dma_direct_unmap_sg(dev, sgl: sg, nents, dir, attrs); | 
|---|
| 359 | else if (use_dma_iommu(dev)) | 
|---|
| 360 | iommu_dma_unmap_sg(dev, sg, nents, dir, attrs); | 
|---|
| 361 | else if (ops->unmap_sg) | 
|---|
| 362 | ops->unmap_sg(dev, sg, nents, dir, attrs); | 
|---|
| 363 | } | 
|---|
| 364 | EXPORT_SYMBOL(dma_unmap_sg_attrs); | 
|---|
| 365 |  | 
|---|
| 366 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, | 
|---|
| 367 | size_t size, enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 368 | { | 
|---|
| 369 | if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && | 
|---|
| 370 | WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) | 
|---|
| 371 | return DMA_MAPPING_ERROR; | 
|---|
| 372 |  | 
|---|
| 373 | return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO); | 
|---|
| 374 | } | 
|---|
| 375 | EXPORT_SYMBOL(dma_map_resource); | 
|---|
| 376 |  | 
|---|
| 377 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, | 
|---|
| 378 | enum dma_data_direction dir, unsigned long attrs) | 
|---|
| 379 | { | 
|---|
| 380 | dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO); | 
|---|
| 381 | } | 
|---|
| 382 | EXPORT_SYMBOL(dma_unmap_resource); | 
|---|
| 383 |  | 
|---|
| 384 | #ifdef CONFIG_DMA_NEED_SYNC | 
|---|
| 385 | void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, | 
|---|
| 386 | enum dma_data_direction dir) | 
|---|
| 387 | { | 
|---|
| 388 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 389 |  | 
|---|
| 390 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 391 | if (dma_map_direct(dev, ops)) | 
|---|
| 392 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); | 
|---|
| 393 | else if (use_dma_iommu(dev)) | 
|---|
| 394 | iommu_dma_sync_single_for_cpu(dev, dma_handle: addr, size, dir); | 
|---|
| 395 | else if (ops->sync_single_for_cpu) | 
|---|
| 396 | ops->sync_single_for_cpu(dev, addr, size, dir); | 
|---|
| 397 | trace_dma_sync_single_for_cpu(dev, dma_addr: addr, size, dir); | 
|---|
| 398 | debug_dma_sync_single_for_cpu(dev, dma_handle: addr, size, direction: dir); | 
|---|
| 399 | } | 
|---|
| 400 | EXPORT_SYMBOL(__dma_sync_single_for_cpu); | 
|---|
| 401 |  | 
|---|
| 402 | void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, | 
|---|
| 403 | size_t size, enum dma_data_direction dir) | 
|---|
| 404 | { | 
|---|
| 405 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 406 |  | 
|---|
| 407 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 408 | if (dma_map_direct(dev, ops)) | 
|---|
| 409 | dma_direct_sync_single_for_device(dev, addr, size, dir); | 
|---|
| 410 | else if (use_dma_iommu(dev)) | 
|---|
| 411 | iommu_dma_sync_single_for_device(dev, dma_handle: addr, size, dir); | 
|---|
| 412 | else if (ops->sync_single_for_device) | 
|---|
| 413 | ops->sync_single_for_device(dev, addr, size, dir); | 
|---|
| 414 | trace_dma_sync_single_for_device(dev, dma_addr: addr, size, dir); | 
|---|
| 415 | debug_dma_sync_single_for_device(dev, dma_handle: addr, size, direction: dir); | 
|---|
| 416 | } | 
|---|
| 417 | EXPORT_SYMBOL(__dma_sync_single_for_device); | 
|---|
| 418 |  | 
|---|
| 419 | void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, | 
|---|
| 420 | int nelems, enum dma_data_direction dir) | 
|---|
| 421 | { | 
|---|
| 422 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 423 |  | 
|---|
| 424 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 425 | if (dma_map_direct(dev, ops)) | 
|---|
| 426 | dma_direct_sync_sg_for_cpu(dev, sgl: sg, nents: nelems, dir); | 
|---|
| 427 | else if (use_dma_iommu(dev)) | 
|---|
| 428 | iommu_dma_sync_sg_for_cpu(dev, sgl: sg, nelems, dir); | 
|---|
| 429 | else if (ops->sync_sg_for_cpu) | 
|---|
| 430 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); | 
|---|
| 431 | trace_dma_sync_sg_for_cpu(dev, sg, nents: nelems, dir); | 
|---|
| 432 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, direction: dir); | 
|---|
| 433 | } | 
|---|
| 434 | EXPORT_SYMBOL(__dma_sync_sg_for_cpu); | 
|---|
| 435 |  | 
|---|
| 436 | void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, | 
|---|
| 437 | int nelems, enum dma_data_direction dir) | 
|---|
| 438 | { | 
|---|
| 439 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 440 |  | 
|---|
| 441 | BUG_ON(!valid_dma_direction(dir)); | 
|---|
| 442 | if (dma_map_direct(dev, ops)) | 
|---|
| 443 | dma_direct_sync_sg_for_device(dev, sgl: sg, nents: nelems, dir); | 
|---|
| 444 | else if (use_dma_iommu(dev)) | 
|---|
| 445 | iommu_dma_sync_sg_for_device(dev, sgl: sg, nelems, dir); | 
|---|
| 446 | else if (ops->sync_sg_for_device) | 
|---|
| 447 | ops->sync_sg_for_device(dev, sg, nelems, dir); | 
|---|
| 448 | trace_dma_sync_sg_for_device(dev, sg, nents: nelems, dir); | 
|---|
| 449 | debug_dma_sync_sg_for_device(dev, sg, nelems, direction: dir); | 
|---|
| 450 | } | 
|---|
| 451 | EXPORT_SYMBOL(__dma_sync_sg_for_device); | 
|---|
| 452 |  | 
|---|
| 453 | bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr) | 
|---|
| 454 | { | 
|---|
| 455 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 456 |  | 
|---|
| 457 | if (dma_map_direct(dev, ops)) | 
|---|
| 458 | /* | 
|---|
| 459 | * dma_skip_sync could've been reset on first SWIOTLB buffer | 
|---|
| 460 | * mapping, but @dma_addr is not necessary an SWIOTLB buffer. | 
|---|
| 461 | * In this case, fall back to more granular check. | 
|---|
| 462 | */ | 
|---|
| 463 | return dma_direct_need_sync(dev, dma_addr); | 
|---|
| 464 | return true; | 
|---|
| 465 | } | 
|---|
| 466 | EXPORT_SYMBOL_GPL(__dma_need_sync); | 
|---|
| 467 |  | 
|---|
| 468 | /** | 
|---|
| 469 | * dma_need_unmap - does this device need dma_unmap_* operations | 
|---|
| 470 | * @dev: device to check | 
|---|
| 471 | * | 
|---|
| 472 | * If this function returns %false, drivers can skip calling dma_unmap_* after | 
|---|
| 473 | * finishing an I/O.  This function must be called after all mappings that might | 
|---|
| 474 | * need to be unmapped have been performed. | 
|---|
| 475 | */ | 
|---|
| 476 | bool dma_need_unmap(struct device *dev) | 
|---|
| 477 | { | 
|---|
| 478 | if (!dma_map_direct(dev, ops: get_dma_ops(dev))) | 
|---|
| 479 | return true; | 
|---|
| 480 | if (!dev->dma_skip_sync) | 
|---|
| 481 | return true; | 
|---|
| 482 | return IS_ENABLED(CONFIG_DMA_API_DEBUG); | 
|---|
| 483 | } | 
|---|
| 484 | EXPORT_SYMBOL_GPL(dma_need_unmap); | 
|---|
| 485 |  | 
|---|
| 486 | static void dma_setup_need_sync(struct device *dev) | 
|---|
| 487 | { | 
|---|
| 488 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 489 |  | 
|---|
| 490 | if (dma_map_direct(dev, ops) || use_dma_iommu(dev)) | 
|---|
| 491 | /* | 
|---|
| 492 | * dma_skip_sync will be reset to %false on first SWIOTLB buffer | 
|---|
| 493 | * mapping, if any. During the device initialization, it's | 
|---|
| 494 | * enough to check only for the DMA coherence. | 
|---|
| 495 | */ | 
|---|
| 496 | dev->dma_skip_sync = dev_is_dma_coherent(dev); | 
|---|
| 497 | else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && | 
|---|
| 498 | !ops->sync_sg_for_device && !ops->sync_sg_for_cpu) | 
|---|
| 499 | /* | 
|---|
| 500 | * Synchronization is not possible when none of DMA sync ops | 
|---|
| 501 | * is set. | 
|---|
| 502 | */ | 
|---|
| 503 | dev->dma_skip_sync = true; | 
|---|
| 504 | else | 
|---|
| 505 | dev->dma_skip_sync = false; | 
|---|
| 506 | } | 
|---|
| 507 | #else /* !CONFIG_DMA_NEED_SYNC */ | 
|---|
| 508 | static inline void dma_setup_need_sync(struct device *dev) { } | 
|---|
| 509 | #endif /* !CONFIG_DMA_NEED_SYNC */ | 
|---|
| 510 |  | 
|---|
| 511 | /* | 
|---|
| 512 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems | 
|---|
| 513 | * that the intention is to allow exporting memory allocated via the | 
|---|
| 514 | * coherent DMA APIs through the dma_buf API, which only accepts a | 
|---|
| 515 | * scattertable.  This presents a couple of problems: | 
|---|
| 516 | * 1. Not all memory allocated via the coherent DMA APIs is backed by | 
|---|
| 517 | *    a struct page | 
|---|
| 518 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed | 
|---|
| 519 | *    as we will try to flush the memory through a different alias to that | 
|---|
| 520 | *    actually being used (and the flushes are redundant.) | 
|---|
| 521 | */ | 
|---|
| 522 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, | 
|---|
| 523 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 
|---|
| 524 | unsigned long attrs) | 
|---|
| 525 | { | 
|---|
| 526 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 527 |  | 
|---|
| 528 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 529 | return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, | 
|---|
| 530 | size, attrs); | 
|---|
| 531 | if (use_dma_iommu(dev)) | 
|---|
| 532 | return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, | 
|---|
| 533 | size, attrs); | 
|---|
| 534 | if (!ops->get_sgtable) | 
|---|
| 535 | return -ENXIO; | 
|---|
| 536 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); | 
|---|
| 537 | } | 
|---|
| 538 | EXPORT_SYMBOL(dma_get_sgtable_attrs); | 
|---|
| 539 |  | 
|---|
| 540 | #ifdef CONFIG_MMU | 
|---|
| 541 | /* | 
|---|
| 542 | * Return the page attributes used for mapping dma_alloc_* memory, either in | 
|---|
| 543 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. | 
|---|
| 544 | */ | 
|---|
| 545 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) | 
|---|
| 546 | { | 
|---|
| 547 | if (dev_is_dma_coherent(dev)) | 
|---|
| 548 | return prot; | 
|---|
| 549 | #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE | 
|---|
| 550 | if (attrs & DMA_ATTR_WRITE_COMBINE) | 
|---|
| 551 | return pgprot_writecombine(prot); | 
|---|
| 552 | #endif | 
|---|
| 553 | return pgprot_dmacoherent(prot); | 
|---|
| 554 | } | 
|---|
| 555 | #endif /* CONFIG_MMU */ | 
|---|
| 556 |  | 
|---|
| 557 | /** | 
|---|
| 558 | * dma_can_mmap - check if a given device supports dma_mmap_* | 
|---|
| 559 | * @dev: device to check | 
|---|
| 560 | * | 
|---|
| 561 | * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to | 
|---|
| 562 | * map DMA allocations to userspace. | 
|---|
| 563 | */ | 
|---|
| 564 | bool dma_can_mmap(struct device *dev) | 
|---|
| 565 | { | 
|---|
| 566 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 567 |  | 
|---|
| 568 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 569 | return dma_direct_can_mmap(dev); | 
|---|
| 570 | if (use_dma_iommu(dev)) | 
|---|
| 571 | return true; | 
|---|
| 572 | return ops->mmap != NULL; | 
|---|
| 573 | } | 
|---|
| 574 | EXPORT_SYMBOL_GPL(dma_can_mmap); | 
|---|
| 575 |  | 
|---|
| 576 | /** | 
|---|
| 577 | * dma_mmap_attrs - map a coherent DMA allocation into user space | 
|---|
| 578 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices | 
|---|
| 579 | * @vma: vm_area_struct describing requested user mapping | 
|---|
| 580 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs | 
|---|
| 581 | * @dma_addr: device-view address returned from dma_alloc_attrs | 
|---|
| 582 | * @size: size of memory originally requested in dma_alloc_attrs | 
|---|
| 583 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs | 
|---|
| 584 | * | 
|---|
| 585 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user | 
|---|
| 586 | * space.  The coherent DMA buffer must not be freed by the driver until the | 
|---|
| 587 | * user space mapping has been released. | 
|---|
| 588 | */ | 
|---|
| 589 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, | 
|---|
| 590 | void *cpu_addr, dma_addr_t dma_addr, size_t size, | 
|---|
| 591 | unsigned long attrs) | 
|---|
| 592 | { | 
|---|
| 593 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 594 |  | 
|---|
| 595 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 596 | return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, | 
|---|
| 597 | attrs); | 
|---|
| 598 | if (use_dma_iommu(dev)) | 
|---|
| 599 | return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, | 
|---|
| 600 | attrs); | 
|---|
| 601 | if (!ops->mmap) | 
|---|
| 602 | return -ENXIO; | 
|---|
| 603 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); | 
|---|
| 604 | } | 
|---|
| 605 | EXPORT_SYMBOL(dma_mmap_attrs); | 
|---|
| 606 |  | 
|---|
| 607 | u64 dma_get_required_mask(struct device *dev) | 
|---|
| 608 | { | 
|---|
| 609 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 610 |  | 
|---|
| 611 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 612 | return dma_direct_get_required_mask(dev); | 
|---|
| 613 |  | 
|---|
| 614 | if (use_dma_iommu(dev)) | 
|---|
| 615 | return DMA_BIT_MASK(32); | 
|---|
| 616 |  | 
|---|
| 617 | if (ops->get_required_mask) | 
|---|
| 618 | return ops->get_required_mask(dev); | 
|---|
| 619 |  | 
|---|
| 620 | /* | 
|---|
| 621 | * We require every DMA ops implementation to at least support a 32-bit | 
|---|
| 622 | * DMA mask (and use bounce buffering if that isn't supported in | 
|---|
| 623 | * hardware).  As the direct mapping code has its own routine to | 
|---|
| 624 | * actually report an optimal mask we default to 32-bit here as that | 
|---|
| 625 | * is the right thing for most IOMMUs, and at least not actively | 
|---|
| 626 | * harmful in general. | 
|---|
| 627 | */ | 
|---|
| 628 | return DMA_BIT_MASK(32); | 
|---|
| 629 | } | 
|---|
| 630 | EXPORT_SYMBOL_GPL(dma_get_required_mask); | 
|---|
| 631 |  | 
|---|
| 632 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, | 
|---|
| 633 | gfp_t flag, unsigned long attrs) | 
|---|
| 634 | { | 
|---|
| 635 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 636 | void *cpu_addr; | 
|---|
| 637 |  | 
|---|
| 638 | WARN_ON_ONCE(!dev->coherent_dma_mask); | 
|---|
| 639 |  | 
|---|
| 640 | /* | 
|---|
| 641 | * DMA allocations can never be turned back into a page pointer, so | 
|---|
| 642 | * requesting compound pages doesn't make sense (and can't even be | 
|---|
| 643 | * supported at all by various backends). | 
|---|
| 644 | */ | 
|---|
| 645 | if (WARN_ON_ONCE(flag & __GFP_COMP)) | 
|---|
| 646 | return NULL; | 
|---|
| 647 |  | 
|---|
| 648 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) { | 
|---|
| 649 | trace_dma_alloc(dev, virt_addr: cpu_addr, dma_addr: *dma_handle, size, | 
|---|
| 650 | dir: DMA_BIDIRECTIONAL, flags: flag, attrs); | 
|---|
| 651 | return cpu_addr; | 
|---|
| 652 | } | 
|---|
| 653 |  | 
|---|
| 654 | /* let the implementation decide on the zone to allocate from: */ | 
|---|
| 655 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); | 
|---|
| 656 |  | 
|---|
| 657 | if (dma_alloc_direct(dev, ops)) { | 
|---|
| 658 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, gfp: flag, attrs); | 
|---|
| 659 | } else if (use_dma_iommu(dev)) { | 
|---|
| 660 | cpu_addr = iommu_dma_alloc(dev, size, handle: dma_handle, gfp: flag, attrs); | 
|---|
| 661 | } else if (ops->alloc) { | 
|---|
| 662 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); | 
|---|
| 663 | } else { | 
|---|
| 664 | trace_dma_alloc(dev, NULL, dma_addr: 0, size, dir: DMA_BIDIRECTIONAL, flags: flag, | 
|---|
| 665 | attrs); | 
|---|
| 666 | return NULL; | 
|---|
| 667 | } | 
|---|
| 668 |  | 
|---|
| 669 | trace_dma_alloc(dev, virt_addr: cpu_addr, dma_addr: *dma_handle, size, dir: DMA_BIDIRECTIONAL, | 
|---|
| 670 | flags: flag, attrs); | 
|---|
| 671 | debug_dma_alloc_coherent(dev, size, dma_addr: *dma_handle, virt: cpu_addr, attrs); | 
|---|
| 672 | return cpu_addr; | 
|---|
| 673 | } | 
|---|
| 674 | EXPORT_SYMBOL(dma_alloc_attrs); | 
|---|
| 675 |  | 
|---|
| 676 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, | 
|---|
| 677 | dma_addr_t dma_handle, unsigned long attrs) | 
|---|
| 678 | { | 
|---|
| 679 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 680 |  | 
|---|
| 681 | if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) | 
|---|
| 682 | return; | 
|---|
| 683 | /* | 
|---|
| 684 | * On non-coherent platforms which implement DMA-coherent buffers via | 
|---|
| 685 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting | 
|---|
| 686 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to | 
|---|
| 687 | * sleep on some machines, and b) an indication that the driver is | 
|---|
| 688 | * probably misusing the coherent API anyway. | 
|---|
| 689 | */ | 
|---|
| 690 | WARN_ON(irqs_disabled()); | 
|---|
| 691 |  | 
|---|
| 692 | trace_dma_free(dev, virt_addr: cpu_addr, dma_addr: dma_handle, size, dir: DMA_BIDIRECTIONAL, | 
|---|
| 693 | attrs); | 
|---|
| 694 | if (!cpu_addr) | 
|---|
| 695 | return; | 
|---|
| 696 |  | 
|---|
| 697 | debug_dma_free_coherent(dev, size, virt: cpu_addr, addr: dma_handle); | 
|---|
| 698 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 699 | dma_direct_free(dev, size, cpu_addr, dma_addr: dma_handle, attrs); | 
|---|
| 700 | else if (use_dma_iommu(dev)) | 
|---|
| 701 | iommu_dma_free(dev, size, cpu_addr, handle: dma_handle, attrs); | 
|---|
| 702 | else if (ops->free) | 
|---|
| 703 | ops->free(dev, size, cpu_addr, dma_handle, attrs); | 
|---|
| 704 | } | 
|---|
| 705 | EXPORT_SYMBOL(dma_free_attrs); | 
|---|
| 706 |  | 
|---|
| 707 | static struct page *__dma_alloc_pages(struct device *dev, size_t size, | 
|---|
| 708 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | 
|---|
| 709 | { | 
|---|
| 710 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 711 |  | 
|---|
| 712 | if (WARN_ON_ONCE(!dev->coherent_dma_mask)) | 
|---|
| 713 | return NULL; | 
|---|
| 714 | if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) | 
|---|
| 715 | return NULL; | 
|---|
| 716 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) | 
|---|
| 717 | return NULL; | 
|---|
| 718 |  | 
|---|
| 719 | size = PAGE_ALIGN(size); | 
|---|
| 720 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 721 | return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); | 
|---|
| 722 | if (use_dma_iommu(dev)) | 
|---|
| 723 | return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp); | 
|---|
| 724 | if (!ops->alloc_pages_op) | 
|---|
| 725 | return NULL; | 
|---|
| 726 | return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); | 
|---|
| 727 | } | 
|---|
| 728 |  | 
|---|
| 729 | struct page *dma_alloc_pages(struct device *dev, size_t size, | 
|---|
| 730 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) | 
|---|
| 731 | { | 
|---|
| 732 | struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); | 
|---|
| 733 |  | 
|---|
| 734 | if (page) { | 
|---|
| 735 | trace_dma_alloc_pages(dev, page_to_virt(page), dma_addr: *dma_handle, | 
|---|
| 736 | size, dir, flags: gfp, attrs: 0); | 
|---|
| 737 | debug_dma_alloc_pages(dev, page, size, direction: dir, dma_addr: *dma_handle, attrs: 0); | 
|---|
| 738 | } else { | 
|---|
| 739 | trace_dma_alloc_pages(dev, NULL, dma_addr: 0, size, dir, flags: gfp, attrs: 0); | 
|---|
| 740 | } | 
|---|
| 741 | return page; | 
|---|
| 742 | } | 
|---|
| 743 | EXPORT_SYMBOL_GPL(dma_alloc_pages); | 
|---|
| 744 |  | 
|---|
| 745 | static void __dma_free_pages(struct device *dev, size_t size, struct page *page, | 
|---|
| 746 | dma_addr_t dma_handle, enum dma_data_direction dir) | 
|---|
| 747 | { | 
|---|
| 748 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 749 |  | 
|---|
| 750 | size = PAGE_ALIGN(size); | 
|---|
| 751 | if (dma_alloc_direct(dev, ops)) | 
|---|
| 752 | dma_direct_free_pages(dev, size, page, dma_addr: dma_handle, dir); | 
|---|
| 753 | else if (use_dma_iommu(dev)) | 
|---|
| 754 | dma_common_free_pages(dev, size, vaddr: page, dma_handle, dir); | 
|---|
| 755 | else if (ops->free_pages) | 
|---|
| 756 | ops->free_pages(dev, size, page, dma_handle, dir); | 
|---|
| 757 | } | 
|---|
| 758 |  | 
|---|
| 759 | void dma_free_pages(struct device *dev, size_t size, struct page *page, | 
|---|
| 760 | dma_addr_t dma_handle, enum dma_data_direction dir) | 
|---|
| 761 | { | 
|---|
| 762 | trace_dma_free_pages(dev, page_to_virt(page), dma_addr: dma_handle, size, dir, attrs: 0); | 
|---|
| 763 | debug_dma_free_pages(dev, page, size, direction: dir, dma_addr: dma_handle); | 
|---|
| 764 | __dma_free_pages(dev, size, page, dma_handle, dir); | 
|---|
| 765 | } | 
|---|
| 766 | EXPORT_SYMBOL_GPL(dma_free_pages); | 
|---|
| 767 |  | 
|---|
| 768 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, | 
|---|
| 769 | size_t size, struct page *page) | 
|---|
| 770 | { | 
|---|
| 771 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; | 
|---|
| 772 |  | 
|---|
| 773 | if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) | 
|---|
| 774 | return -ENXIO; | 
|---|
| 775 | return remap_pfn_range(vma, addr: vma->vm_start, | 
|---|
| 776 | page_to_pfn(page) + vma->vm_pgoff, | 
|---|
| 777 | size: vma_pages(vma) << PAGE_SHIFT, vma->vm_page_prot); | 
|---|
| 778 | } | 
|---|
| 779 | EXPORT_SYMBOL_GPL(dma_mmap_pages); | 
|---|
| 780 |  | 
|---|
| 781 | static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, | 
|---|
| 782 | enum dma_data_direction dir, gfp_t gfp) | 
|---|
| 783 | { | 
|---|
| 784 | struct sg_table *sgt; | 
|---|
| 785 | struct page *page; | 
|---|
| 786 |  | 
|---|
| 787 | sgt = kmalloc(sizeof(*sgt), gfp); | 
|---|
| 788 | if (!sgt) | 
|---|
| 789 | return NULL; | 
|---|
| 790 | if (sg_alloc_table(sgt, 1, gfp)) | 
|---|
| 791 | goto out_free_sgt; | 
|---|
| 792 | page = __dma_alloc_pages(dev, size, dma_handle: &sgt->sgl->dma_address, dir, gfp); | 
|---|
| 793 | if (!page) | 
|---|
| 794 | goto out_free_table; | 
|---|
| 795 | sg_set_page(sg: sgt->sgl, page, PAGE_ALIGN(size), offset: 0); | 
|---|
| 796 | sg_dma_len(sgt->sgl) = sgt->sgl->length; | 
|---|
| 797 | return sgt; | 
|---|
| 798 | out_free_table: | 
|---|
| 799 | sg_free_table(sgt); | 
|---|
| 800 | out_free_sgt: | 
|---|
| 801 | kfree(objp: sgt); | 
|---|
| 802 | return NULL; | 
|---|
| 803 | } | 
|---|
| 804 |  | 
|---|
| 805 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, | 
|---|
| 806 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) | 
|---|
| 807 | { | 
|---|
| 808 | struct sg_table *sgt; | 
|---|
| 809 |  | 
|---|
| 810 | if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) | 
|---|
| 811 | return NULL; | 
|---|
| 812 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) | 
|---|
| 813 | return NULL; | 
|---|
| 814 |  | 
|---|
| 815 | if (use_dma_iommu(dev)) | 
|---|
| 816 | sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs); | 
|---|
| 817 | else | 
|---|
| 818 | sgt = alloc_single_sgt(dev, size, dir, gfp); | 
|---|
| 819 |  | 
|---|
| 820 | if (sgt) { | 
|---|
| 821 | sgt->nents = 1; | 
|---|
| 822 | trace_dma_alloc_sgt(dev, sgt, size, dir, flags: gfp, attrs); | 
|---|
| 823 | debug_dma_map_sg(dev, sg: sgt->sgl, nents: sgt->orig_nents, mapped_ents: 1, direction: dir, attrs); | 
|---|
| 824 | } else { | 
|---|
| 825 | trace_dma_alloc_sgt_err(dev, NULL, dma_addr: 0, size, dir, flags: gfp, attrs); | 
|---|
| 826 | } | 
|---|
| 827 | return sgt; | 
|---|
| 828 | } | 
|---|
| 829 | EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); | 
|---|
| 830 |  | 
|---|
| 831 | static void free_single_sgt(struct device *dev, size_t size, | 
|---|
| 832 | struct sg_table *sgt, enum dma_data_direction dir) | 
|---|
| 833 | { | 
|---|
| 834 | __dma_free_pages(dev, size, page: sg_page(sg: sgt->sgl), dma_handle: sgt->sgl->dma_address, | 
|---|
| 835 | dir); | 
|---|
| 836 | sg_free_table(sgt); | 
|---|
| 837 | kfree(objp: sgt); | 
|---|
| 838 | } | 
|---|
| 839 |  | 
|---|
| 840 | void dma_free_noncontiguous(struct device *dev, size_t size, | 
|---|
| 841 | struct sg_table *sgt, enum dma_data_direction dir) | 
|---|
| 842 | { | 
|---|
| 843 | trace_dma_free_sgt(dev, sgt, size, dir); | 
|---|
| 844 | debug_dma_unmap_sg(dev, sglist: sgt->sgl, nelems: sgt->orig_nents, dir); | 
|---|
| 845 |  | 
|---|
| 846 | if (use_dma_iommu(dev)) | 
|---|
| 847 | iommu_dma_free_noncontiguous(dev, size, sgt, dir); | 
|---|
| 848 | else | 
|---|
| 849 | free_single_sgt(dev, size, sgt, dir); | 
|---|
| 850 | } | 
|---|
| 851 | EXPORT_SYMBOL_GPL(dma_free_noncontiguous); | 
|---|
| 852 |  | 
|---|
| 853 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, | 
|---|
| 854 | struct sg_table *sgt) | 
|---|
| 855 | { | 
|---|
| 856 |  | 
|---|
| 857 | if (use_dma_iommu(dev)) | 
|---|
| 858 | return iommu_dma_vmap_noncontiguous(dev, size, sgt); | 
|---|
| 859 |  | 
|---|
| 860 | return page_address(sg_page(sgt->sgl)); | 
|---|
| 861 | } | 
|---|
| 862 | EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); | 
|---|
| 863 |  | 
|---|
| 864 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) | 
|---|
| 865 | { | 
|---|
| 866 | if (use_dma_iommu(dev)) | 
|---|
| 867 | iommu_dma_vunmap_noncontiguous(dev, vaddr); | 
|---|
| 868 | } | 
|---|
| 869 | EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); | 
|---|
| 870 |  | 
|---|
| 871 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, | 
|---|
| 872 | size_t size, struct sg_table *sgt) | 
|---|
| 873 | { | 
|---|
| 874 | if (use_dma_iommu(dev)) | 
|---|
| 875 | return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt); | 
|---|
| 876 | return dma_mmap_pages(dev, vma, size, sg_page(sg: sgt->sgl)); | 
|---|
| 877 | } | 
|---|
| 878 | EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); | 
|---|
| 879 |  | 
|---|
| 880 | static int dma_supported(struct device *dev, u64 mask) | 
|---|
| 881 | { | 
|---|
| 882 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 883 |  | 
|---|
| 884 | if (use_dma_iommu(dev)) { | 
|---|
| 885 | if (WARN_ON(ops)) | 
|---|
| 886 | return false; | 
|---|
| 887 | return true; | 
|---|
| 888 | } | 
|---|
| 889 |  | 
|---|
| 890 | /* | 
|---|
| 891 | * ->dma_supported sets and clears the bypass flag, so ignore it here | 
|---|
| 892 | * and always call into the method if there is one. | 
|---|
| 893 | */ | 
|---|
| 894 | if (ops) { | 
|---|
| 895 | if (!ops->dma_supported) | 
|---|
| 896 | return true; | 
|---|
| 897 | return ops->dma_supported(dev, mask); | 
|---|
| 898 | } | 
|---|
| 899 |  | 
|---|
| 900 | return dma_direct_supported(dev, mask); | 
|---|
| 901 | } | 
|---|
| 902 |  | 
|---|
| 903 | bool dma_pci_p2pdma_supported(struct device *dev) | 
|---|
| 904 | { | 
|---|
| 905 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 906 |  | 
|---|
| 907 | /* | 
|---|
| 908 | * Note: dma_ops_bypass is not checked here because P2PDMA should | 
|---|
| 909 | * not be used with dma mapping ops that do not have support even | 
|---|
| 910 | * if the specific device is bypassing them. | 
|---|
| 911 | */ | 
|---|
| 912 |  | 
|---|
| 913 | /* if ops is not set, dma direct and default IOMMU support P2PDMA */ | 
|---|
| 914 | return !ops; | 
|---|
| 915 | } | 
|---|
| 916 | EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); | 
|---|
| 917 |  | 
|---|
| 918 | int dma_set_mask(struct device *dev, u64 mask) | 
|---|
| 919 | { | 
|---|
| 920 | /* | 
|---|
| 921 | * Truncate the mask to the actually supported dma_addr_t width to | 
|---|
| 922 | * avoid generating unsupportable addresses. | 
|---|
| 923 | */ | 
|---|
| 924 | mask = (dma_addr_t)mask; | 
|---|
| 925 |  | 
|---|
| 926 | if (!dev->dma_mask || !dma_supported(dev, mask)) | 
|---|
| 927 | return -EIO; | 
|---|
| 928 |  | 
|---|
| 929 | arch_dma_set_mask(dev, mask); | 
|---|
| 930 | *dev->dma_mask = mask; | 
|---|
| 931 | dma_setup_need_sync(dev); | 
|---|
| 932 |  | 
|---|
| 933 | return 0; | 
|---|
| 934 | } | 
|---|
| 935 | EXPORT_SYMBOL(dma_set_mask); | 
|---|
| 936 |  | 
|---|
| 937 | int dma_set_coherent_mask(struct device *dev, u64 mask) | 
|---|
| 938 | { | 
|---|
| 939 | /* | 
|---|
| 940 | * Truncate the mask to the actually supported dma_addr_t width to | 
|---|
| 941 | * avoid generating unsupportable addresses. | 
|---|
| 942 | */ | 
|---|
| 943 | mask = (dma_addr_t)mask; | 
|---|
| 944 |  | 
|---|
| 945 | if (!dma_supported(dev, mask)) | 
|---|
| 946 | return -EIO; | 
|---|
| 947 |  | 
|---|
| 948 | dev->coherent_dma_mask = mask; | 
|---|
| 949 | return 0; | 
|---|
| 950 | } | 
|---|
| 951 | EXPORT_SYMBOL(dma_set_coherent_mask); | 
|---|
| 952 |  | 
|---|
| 953 | static bool __dma_addressing_limited(struct device *dev) | 
|---|
| 954 | { | 
|---|
| 955 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 956 |  | 
|---|
| 957 | if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < | 
|---|
| 958 | dma_get_required_mask(dev)) | 
|---|
| 959 | return true; | 
|---|
| 960 |  | 
|---|
| 961 | if (unlikely(ops) || use_dma_iommu(dev)) | 
|---|
| 962 | return false; | 
|---|
| 963 | return !dma_direct_all_ram_mapped(dev); | 
|---|
| 964 | } | 
|---|
| 965 |  | 
|---|
| 966 | /** | 
|---|
| 967 | * dma_addressing_limited - return if the device is addressing limited | 
|---|
| 968 | * @dev:	device to check | 
|---|
| 969 | * | 
|---|
| 970 | * Return %true if the devices DMA mask is too small to address all memory in | 
|---|
| 971 | * the system, else %false.  Lack of addressing bits is the prime reason for | 
|---|
| 972 | * bounce buffering, but might not be the only one. | 
|---|
| 973 | */ | 
|---|
| 974 | bool dma_addressing_limited(struct device *dev) | 
|---|
| 975 | { | 
|---|
| 976 | if (!__dma_addressing_limited(dev)) | 
|---|
| 977 | return false; | 
|---|
| 978 |  | 
|---|
| 979 | dev_dbg(dev, "device is DMA addressing limited\n"); | 
|---|
| 980 | return true; | 
|---|
| 981 | } | 
|---|
| 982 | EXPORT_SYMBOL_GPL(dma_addressing_limited); | 
|---|
| 983 |  | 
|---|
| 984 | size_t dma_max_mapping_size(struct device *dev) | 
|---|
| 985 | { | 
|---|
| 986 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 987 | size_t size = SIZE_MAX; | 
|---|
| 988 |  | 
|---|
| 989 | if (dma_map_direct(dev, ops)) | 
|---|
| 990 | size = dma_direct_max_mapping_size(dev); | 
|---|
| 991 | else if (use_dma_iommu(dev)) | 
|---|
| 992 | size = iommu_dma_max_mapping_size(dev); | 
|---|
| 993 | else if (ops && ops->max_mapping_size) | 
|---|
| 994 | size = ops->max_mapping_size(dev); | 
|---|
| 995 |  | 
|---|
| 996 | return size; | 
|---|
| 997 | } | 
|---|
| 998 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); | 
|---|
| 999 |  | 
|---|
| 1000 | size_t dma_opt_mapping_size(struct device *dev) | 
|---|
| 1001 | { | 
|---|
| 1002 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 1003 | size_t size = SIZE_MAX; | 
|---|
| 1004 |  | 
|---|
| 1005 | if (use_dma_iommu(dev)) | 
|---|
| 1006 | size = iommu_dma_opt_mapping_size(); | 
|---|
| 1007 | else if (ops && ops->opt_mapping_size) | 
|---|
| 1008 | size = ops->opt_mapping_size(); | 
|---|
| 1009 |  | 
|---|
| 1010 | return min(dma_max_mapping_size(dev), size); | 
|---|
| 1011 | } | 
|---|
| 1012 | EXPORT_SYMBOL_GPL(dma_opt_mapping_size); | 
|---|
| 1013 |  | 
|---|
| 1014 | unsigned long dma_get_merge_boundary(struct device *dev) | 
|---|
| 1015 | { | 
|---|
| 1016 | const struct dma_map_ops *ops = get_dma_ops(dev); | 
|---|
| 1017 |  | 
|---|
| 1018 | if (use_dma_iommu(dev)) | 
|---|
| 1019 | return iommu_dma_get_merge_boundary(dev); | 
|---|
| 1020 |  | 
|---|
| 1021 | if (!ops || !ops->get_merge_boundary) | 
|---|
| 1022 | return 0;	/* can't merge */ | 
|---|
| 1023 |  | 
|---|
| 1024 | return ops->get_merge_boundary(dev); | 
|---|
| 1025 | } | 
|---|
| 1026 | EXPORT_SYMBOL_GPL(dma_get_merge_boundary); | 
|---|
| 1027 |  | 
|---|