| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ | 
|---|
| 2 | #ifndef BLK_MQ_DMA_H | 
|---|
| 3 | #define BLK_MQ_DMA_H | 
|---|
| 4 |  | 
|---|
| 5 | #include <linux/blk-mq.h> | 
|---|
| 6 | #include <linux/pci-p2pdma.h> | 
|---|
| 7 |  | 
|---|
| 8 | struct blk_map_iter { | 
|---|
| 9 | struct bvec_iter		iter; | 
|---|
| 10 | struct bio			*bio; | 
|---|
| 11 | struct bio_vec			*bvecs; | 
|---|
| 12 | bool				is_integrity; | 
|---|
| 13 | }; | 
|---|
| 14 |  | 
|---|
| 15 | struct blk_dma_iter { | 
|---|
| 16 | /* Output address range for this iteration */ | 
|---|
| 17 | dma_addr_t			addr; | 
|---|
| 18 | u32				len; | 
|---|
| 19 |  | 
|---|
| 20 | /* Status code. Only valid when blk_rq_dma_map_iter_* returned false */ | 
|---|
| 21 | blk_status_t			status; | 
|---|
| 22 |  | 
|---|
| 23 | /* Internal to blk_rq_dma_map_iter_* */ | 
|---|
| 24 | struct blk_map_iter		iter; | 
|---|
| 25 | struct pci_p2pdma_map_state	p2pdma; | 
|---|
| 26 | }; | 
|---|
| 27 |  | 
|---|
| 28 | bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev, | 
|---|
| 29 | struct dma_iova_state *state, struct blk_dma_iter *iter); | 
|---|
| 30 | bool blk_rq_dma_map_iter_next(struct request *req, struct device *dma_dev, | 
|---|
| 31 | struct dma_iova_state *state, struct blk_dma_iter *iter); | 
|---|
| 32 |  | 
|---|
| 33 | /** | 
|---|
| 34 | * blk_rq_dma_map_coalesce - were all segments coalesced? | 
|---|
| 35 | * @state: DMA state to check | 
|---|
| 36 | * | 
|---|
| 37 | * Returns true if blk_rq_dma_map_iter_start coalesced all segments into a | 
|---|
| 38 | * single DMA range. | 
|---|
| 39 | */ | 
|---|
| 40 | static inline bool blk_rq_dma_map_coalesce(struct dma_iova_state *state) | 
|---|
| 41 | { | 
|---|
| 42 | return dma_use_iova(state); | 
|---|
| 43 | } | 
|---|
| 44 |  | 
|---|
| 45 | /** | 
|---|
| 46 | * blk_dma_unmap - try to DMA unmap a request | 
|---|
| 47 | * @req:	request to unmap | 
|---|
| 48 | * @dma_dev:	device to unmap from | 
|---|
| 49 | * @state:	DMA IOVA state | 
|---|
| 50 | * @mapped_len: number of bytes to unmap | 
|---|
| 51 | * @is_p2p:	true if mapped with PCI_P2PDMA_MAP_BUS_ADDR | 
|---|
| 52 | * | 
|---|
| 53 | * Returns %false if the callers need to manually unmap every DMA segment | 
|---|
| 54 | * mapped using @iter or %true if no work is left to be done. | 
|---|
| 55 | */ | 
|---|
| 56 | static inline bool blk_dma_unmap(struct request *req, struct device *dma_dev, | 
|---|
| 57 | struct dma_iova_state *state, size_t mapped_len, bool is_p2p) | 
|---|
| 58 | { | 
|---|
| 59 | if (is_p2p) | 
|---|
| 60 | return true; | 
|---|
| 61 |  | 
|---|
| 62 | if (dma_use_iova(state)) { | 
|---|
| 63 | dma_iova_destroy(dev: dma_dev, state, mapped_len, rq_dma_dir(req), | 
|---|
| 64 | attrs: 0); | 
|---|
| 65 | return true; | 
|---|
| 66 | } | 
|---|
| 67 |  | 
|---|
| 68 | return !dma_need_unmap(dev: dma_dev); | 
|---|
| 69 | } | 
|---|
| 70 |  | 
|---|
| 71 | static inline bool blk_rq_dma_unmap(struct request *req, struct device *dma_dev, | 
|---|
| 72 | struct dma_iova_state *state, size_t mapped_len) | 
|---|
| 73 | { | 
|---|
| 74 | return blk_dma_unmap(req, dma_dev, state, mapped_len, | 
|---|
| 75 | is_p2p: req->cmd_flags & REQ_P2PDMA); | 
|---|
| 76 | } | 
|---|
| 77 |  | 
|---|
| 78 | #endif /* BLK_MQ_DMA_H */ | 
|---|
| 79 |  | 
|---|