| 1 | // SPDX-License-Identifier: GPL-2.0 | 
|---|
| 2 | /* | 
|---|
| 3 | * Core driver for the Synopsys DesignWare DMA Controller | 
|---|
| 4 | * | 
|---|
| 5 | * Copyright (C) 2007-2008 Atmel Corporation | 
|---|
| 6 | * Copyright (C) 2010-2011 ST Microelectronics | 
|---|
| 7 | * Copyright (C) 2013 Intel Corporation | 
|---|
| 8 | */ | 
|---|
| 9 |  | 
|---|
| 10 | #include <linux/bitops.h> | 
|---|
| 11 | #include <linux/delay.h> | 
|---|
| 12 | #include <linux/dmaengine.h> | 
|---|
| 13 | #include <linux/dma-mapping.h> | 
|---|
| 14 | #include <linux/dmapool.h> | 
|---|
| 15 | #include <linux/err.h> | 
|---|
| 16 | #include <linux/init.h> | 
|---|
| 17 | #include <linux/interrupt.h> | 
|---|
| 18 | #include <linux/io.h> | 
|---|
| 19 | #include <linux/log2.h> | 
|---|
| 20 | #include <linux/mm.h> | 
|---|
| 21 | #include <linux/module.h> | 
|---|
| 22 | #include <linux/slab.h> | 
|---|
| 23 | #include <linux/pm_runtime.h> | 
|---|
| 24 |  | 
|---|
| 25 | #include "../dmaengine.h" | 
|---|
| 26 | #include "internal.h" | 
|---|
| 27 |  | 
|---|
| 28 | /* | 
|---|
| 29 | * This supports the Synopsys "DesignWare AHB Central DMA Controller", | 
|---|
| 30 | * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all | 
|---|
| 31 | * of which use ARM any more).  See the "Databook" from Synopsys for | 
|---|
| 32 | * information beyond what licensees probably provide. | 
|---|
| 33 | */ | 
|---|
| 34 |  | 
|---|
| 35 | /* The set of bus widths supported by the DMA controller */ | 
|---|
| 36 | #define DW_DMA_BUSWIDTHS			  \ | 
|---|
| 37 | BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED)	| \ | 
|---|
| 38 | BIT(DMA_SLAVE_BUSWIDTH_1_BYTE)		| \ | 
|---|
| 39 | BIT(DMA_SLAVE_BUSWIDTH_2_BYTES)		| \ | 
|---|
| 40 | BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | 
|---|
| 41 |  | 
|---|
| 42 | /*----------------------------------------------------------------------*/ | 
|---|
| 43 |  | 
|---|
| 44 | static struct device *chan2dev(struct dma_chan *chan) | 
|---|
| 45 | { | 
|---|
| 46 | return &chan->dev->device; | 
|---|
| 47 | } | 
|---|
| 48 |  | 
|---|
| 49 | static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc) | 
|---|
| 50 | { | 
|---|
| 51 | return to_dw_desc(dwc->active_list.next); | 
|---|
| 52 | } | 
|---|
| 53 |  | 
|---|
| 54 | static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx) | 
|---|
| 55 | { | 
|---|
| 56 | struct dw_desc		*desc = txd_to_dw_desc(txd: tx); | 
|---|
| 57 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan: tx->chan); | 
|---|
| 58 | dma_cookie_t		cookie; | 
|---|
| 59 | unsigned long		flags; | 
|---|
| 60 |  | 
|---|
| 61 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 62 | cookie = dma_cookie_assign(tx); | 
|---|
| 63 |  | 
|---|
| 64 | /* | 
|---|
| 65 | * REVISIT: We should attempt to chain as many descriptors as | 
|---|
| 66 | * possible, perhaps even appending to those already submitted | 
|---|
| 67 | * for DMA. But this is hard to do in a race-free manner. | 
|---|
| 68 | */ | 
|---|
| 69 |  | 
|---|
| 70 | list_add_tail(new: &desc->desc_node, head: &dwc->queue); | 
|---|
| 71 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 72 | dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", | 
|---|
| 73 | __func__, desc->txd.cookie); | 
|---|
| 74 |  | 
|---|
| 75 | return cookie; | 
|---|
| 76 | } | 
|---|
| 77 |  | 
|---|
| 78 | static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc) | 
|---|
| 79 | { | 
|---|
| 80 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 81 | struct dw_desc *desc; | 
|---|
| 82 | dma_addr_t phys; | 
|---|
| 83 |  | 
|---|
| 84 | desc = dma_pool_zalloc(pool: dw->desc_pool, GFP_ATOMIC, handle: &phys); | 
|---|
| 85 | if (!desc) | 
|---|
| 86 | return NULL; | 
|---|
| 87 |  | 
|---|
| 88 | dwc->descs_allocated++; | 
|---|
| 89 | INIT_LIST_HEAD(list: &desc->tx_list); | 
|---|
| 90 | dma_async_tx_descriptor_init(tx: &desc->txd, chan: &dwc->chan); | 
|---|
| 91 | desc->txd.tx_submit = dwc_tx_submit; | 
|---|
| 92 | desc->txd.flags = DMA_CTRL_ACK; | 
|---|
| 93 | desc->txd.phys = phys; | 
|---|
| 94 | return desc; | 
|---|
| 95 | } | 
|---|
| 96 |  | 
|---|
| 97 | static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|---|
| 98 | { | 
|---|
| 99 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 100 | struct dw_desc *child, *_next; | 
|---|
| 101 |  | 
|---|
| 102 | if (unlikely(!desc)) | 
|---|
| 103 | return; | 
|---|
| 104 |  | 
|---|
| 105 | list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) { | 
|---|
| 106 | list_del(entry: &child->desc_node); | 
|---|
| 107 | dma_pool_free(pool: dw->desc_pool, vaddr: child, addr: child->txd.phys); | 
|---|
| 108 | dwc->descs_allocated--; | 
|---|
| 109 | } | 
|---|
| 110 |  | 
|---|
| 111 | dma_pool_free(pool: dw->desc_pool, vaddr: desc, addr: desc->txd.phys); | 
|---|
| 112 | dwc->descs_allocated--; | 
|---|
| 113 | } | 
|---|
| 114 |  | 
|---|
| 115 | static void dwc_initialize(struct dw_dma_chan *dwc) | 
|---|
| 116 | { | 
|---|
| 117 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 118 |  | 
|---|
| 119 | dw->initialize_chan(dwc); | 
|---|
| 120 |  | 
|---|
| 121 | /* Enable interrupts */ | 
|---|
| 122 | channel_set_bit(dw, MASK.XFER, dwc->mask); | 
|---|
| 123 | channel_set_bit(dw, MASK.ERROR, dwc->mask); | 
|---|
| 124 | } | 
|---|
| 125 |  | 
|---|
| 126 | /*----------------------------------------------------------------------*/ | 
|---|
| 127 |  | 
|---|
| 128 | static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc) | 
|---|
| 129 | { | 
|---|
| 130 | dev_err(chan2dev(&dwc->chan), | 
|---|
| 131 | "  SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n", | 
|---|
| 132 | channel_readl(dwc, SAR), | 
|---|
| 133 | channel_readl(dwc, DAR), | 
|---|
| 134 | channel_readl(dwc, LLP), | 
|---|
| 135 | channel_readl(dwc, CTL_HI), | 
|---|
| 136 | channel_readl(dwc, CTL_LO)); | 
|---|
| 137 | } | 
|---|
| 138 |  | 
|---|
| 139 | static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|---|
| 140 | { | 
|---|
| 141 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|---|
| 142 | while (dma_readl(dw, CH_EN) & dwc->mask) | 
|---|
| 143 | cpu_relax(); | 
|---|
| 144 | } | 
|---|
| 145 |  | 
|---|
| 146 | /*----------------------------------------------------------------------*/ | 
|---|
| 147 |  | 
|---|
| 148 | /* Perform single block transfer */ | 
|---|
| 149 | static inline void dwc_do_single_block(struct dw_dma_chan *dwc, | 
|---|
| 150 | struct dw_desc *desc) | 
|---|
| 151 | { | 
|---|
| 152 | struct dw_dma	*dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 153 | u32		ctllo; | 
|---|
| 154 |  | 
|---|
| 155 | /* | 
|---|
| 156 | * Software emulation of LLP mode relies on interrupts to continue | 
|---|
| 157 | * multi block transfer. | 
|---|
| 158 | */ | 
|---|
| 159 | ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN; | 
|---|
| 160 |  | 
|---|
| 161 | channel_writel(dwc, SAR, lli_read(desc, sar)); | 
|---|
| 162 | channel_writel(dwc, DAR, lli_read(desc, dar)); | 
|---|
| 163 | channel_writel(dwc, CTL_LO, ctllo); | 
|---|
| 164 | channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi)); | 
|---|
| 165 | channel_set_bit(dw, CH_EN, dwc->mask); | 
|---|
| 166 |  | 
|---|
| 167 | /* Move pointer to next descriptor */ | 
|---|
| 168 | dwc->tx_node_active = dwc->tx_node_active->next; | 
|---|
| 169 | } | 
|---|
| 170 |  | 
|---|
| 171 | /* Called with dwc->lock held and bh disabled */ | 
|---|
| 172 | static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first) | 
|---|
| 173 | { | 
|---|
| 174 | struct dw_dma	*dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 175 | u8		lms = DWC_LLP_LMS(dwc->dws.m_master); | 
|---|
| 176 | unsigned long	was_soft_llp; | 
|---|
| 177 |  | 
|---|
| 178 | /* ASSERT:  channel is idle */ | 
|---|
| 179 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
|---|
| 180 | dev_err(chan2dev(&dwc->chan), | 
|---|
| 181 | "%s: BUG: Attempted to start non-idle channel\n", | 
|---|
| 182 | __func__); | 
|---|
| 183 | dwc_dump_chan_regs(dwc); | 
|---|
| 184 |  | 
|---|
| 185 | /* The tasklet will hopefully advance the queue... */ | 
|---|
| 186 | return; | 
|---|
| 187 | } | 
|---|
| 188 |  | 
|---|
| 189 | if (dwc->nollp) { | 
|---|
| 190 | was_soft_llp = test_and_set_bit(nr: DW_DMA_IS_SOFT_LLP, | 
|---|
| 191 | addr: &dwc->flags); | 
|---|
| 192 | if (was_soft_llp) { | 
|---|
| 193 | dev_err(chan2dev(&dwc->chan), | 
|---|
| 194 | "BUG: Attempted to start new LLP transfer inside ongoing one\n"); | 
|---|
| 195 | return; | 
|---|
| 196 | } | 
|---|
| 197 |  | 
|---|
| 198 | dwc_initialize(dwc); | 
|---|
| 199 |  | 
|---|
| 200 | first->residue = first->total_len; | 
|---|
| 201 | dwc->tx_node_active = &first->tx_list; | 
|---|
| 202 |  | 
|---|
| 203 | /* Submit first block */ | 
|---|
| 204 | dwc_do_single_block(dwc, desc: first); | 
|---|
| 205 |  | 
|---|
| 206 | return; | 
|---|
| 207 | } | 
|---|
| 208 |  | 
|---|
| 209 | dwc_initialize(dwc); | 
|---|
| 210 |  | 
|---|
| 211 | channel_writel(dwc, LLP, first->txd.phys | lms); | 
|---|
| 212 | channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 
|---|
| 213 | channel_writel(dwc, CTL_HI, 0); | 
|---|
| 214 | channel_set_bit(dw, CH_EN, dwc->mask); | 
|---|
| 215 | } | 
|---|
| 216 |  | 
|---|
| 217 | static void dwc_dostart_first_queued(struct dw_dma_chan *dwc) | 
|---|
| 218 | { | 
|---|
| 219 | struct dw_desc *desc; | 
|---|
| 220 |  | 
|---|
| 221 | if (list_empty(head: &dwc->queue)) | 
|---|
| 222 | return; | 
|---|
| 223 |  | 
|---|
| 224 | list_move(list: dwc->queue.next, head: &dwc->active_list); | 
|---|
| 225 | desc = dwc_first_active(dwc); | 
|---|
| 226 | dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); | 
|---|
| 227 | dwc_dostart(dwc, first: desc); | 
|---|
| 228 | } | 
|---|
| 229 |  | 
|---|
| 230 | /*----------------------------------------------------------------------*/ | 
|---|
| 231 |  | 
|---|
| 232 | static void | 
|---|
| 233 | dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, | 
|---|
| 234 | bool callback_required) | 
|---|
| 235 | { | 
|---|
| 236 | struct dma_async_tx_descriptor	*txd = &desc->txd; | 
|---|
| 237 | struct dw_desc			*child; | 
|---|
| 238 | unsigned long			flags; | 
|---|
| 239 | struct dmaengine_desc_callback	cb; | 
|---|
| 240 |  | 
|---|
| 241 | dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie); | 
|---|
| 242 |  | 
|---|
| 243 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 244 | dma_cookie_complete(tx: txd); | 
|---|
| 245 | if (callback_required) | 
|---|
| 246 | dmaengine_desc_get_callback(tx: txd, cb: &cb); | 
|---|
| 247 | else | 
|---|
| 248 | memset(s: &cb, c: 0, n: sizeof(cb)); | 
|---|
| 249 |  | 
|---|
| 250 | /* async_tx_ack */ | 
|---|
| 251 | list_for_each_entry(child, &desc->tx_list, desc_node) | 
|---|
| 252 | async_tx_ack(tx: &child->txd); | 
|---|
| 253 | async_tx_ack(tx: &desc->txd); | 
|---|
| 254 | dwc_desc_put(dwc, desc); | 
|---|
| 255 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 256 |  | 
|---|
| 257 | dmaengine_desc_callback_invoke(cb: &cb, NULL); | 
|---|
| 258 | } | 
|---|
| 259 |  | 
|---|
| 260 | static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|---|
| 261 | { | 
|---|
| 262 | struct dw_desc *desc, *_desc; | 
|---|
| 263 | LIST_HEAD(list); | 
|---|
| 264 | unsigned long flags; | 
|---|
| 265 |  | 
|---|
| 266 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 267 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
|---|
| 268 | dev_err(chan2dev(&dwc->chan), | 
|---|
| 269 | "BUG: XFER bit set, but channel not idle!\n"); | 
|---|
| 270 |  | 
|---|
| 271 | /* Try to continue after resetting the channel... */ | 
|---|
| 272 | dwc_chan_disable(dw, dwc); | 
|---|
| 273 | } | 
|---|
| 274 |  | 
|---|
| 275 | /* | 
|---|
| 276 | * Submit queued descriptors ASAP, i.e. before we go through | 
|---|
| 277 | * the completed ones. | 
|---|
| 278 | */ | 
|---|
| 279 | list_splice_init(list: &dwc->active_list, head: &list); | 
|---|
| 280 | dwc_dostart_first_queued(dwc); | 
|---|
| 281 |  | 
|---|
| 282 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 283 |  | 
|---|
| 284 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 
|---|
| 285 | dwc_descriptor_complete(dwc, desc, callback_required: true); | 
|---|
| 286 | } | 
|---|
| 287 |  | 
|---|
| 288 | /* Returns how many bytes were already received from source */ | 
|---|
| 289 | static inline u32 dwc_get_sent(struct dw_dma_chan *dwc) | 
|---|
| 290 | { | 
|---|
| 291 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 292 | u32 ctlhi = channel_readl(dwc, CTL_HI); | 
|---|
| 293 | u32 ctllo = channel_readl(dwc, CTL_LO); | 
|---|
| 294 |  | 
|---|
| 295 | return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7); | 
|---|
| 296 | } | 
|---|
| 297 |  | 
|---|
| 298 | static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|---|
| 299 | { | 
|---|
| 300 | dma_addr_t llp; | 
|---|
| 301 | struct dw_desc *desc, *_desc; | 
|---|
| 302 | struct dw_desc *child; | 
|---|
| 303 | u32 status_xfer; | 
|---|
| 304 | unsigned long flags; | 
|---|
| 305 |  | 
|---|
| 306 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 307 | llp = channel_readl(dwc, LLP); | 
|---|
| 308 | status_xfer = dma_readl(dw, RAW.XFER); | 
|---|
| 309 |  | 
|---|
| 310 | if (status_xfer & dwc->mask) { | 
|---|
| 311 | /* Everything we've submitted is done */ | 
|---|
| 312 | dma_writel(dw, CLEAR.XFER, dwc->mask); | 
|---|
| 313 |  | 
|---|
| 314 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | 
|---|
| 315 | struct list_head *head, *active = dwc->tx_node_active; | 
|---|
| 316 |  | 
|---|
| 317 | /* | 
|---|
| 318 | * We are inside first active descriptor. | 
|---|
| 319 | * Otherwise something is really wrong. | 
|---|
| 320 | */ | 
|---|
| 321 | desc = dwc_first_active(dwc); | 
|---|
| 322 |  | 
|---|
| 323 | head = &desc->tx_list; | 
|---|
| 324 | if (active != head) { | 
|---|
| 325 | /* Update residue to reflect last sent descriptor */ | 
|---|
| 326 | if (active == head->next) | 
|---|
| 327 | desc->residue -= desc->len; | 
|---|
| 328 | else | 
|---|
| 329 | desc->residue -= to_dw_desc(active->prev)->len; | 
|---|
| 330 |  | 
|---|
| 331 | child = to_dw_desc(active); | 
|---|
| 332 |  | 
|---|
| 333 | /* Submit next block */ | 
|---|
| 334 | dwc_do_single_block(dwc, desc: child); | 
|---|
| 335 |  | 
|---|
| 336 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 337 | return; | 
|---|
| 338 | } | 
|---|
| 339 |  | 
|---|
| 340 | /* We are done here */ | 
|---|
| 341 | clear_bit(nr: DW_DMA_IS_SOFT_LLP, addr: &dwc->flags); | 
|---|
| 342 | } | 
|---|
| 343 |  | 
|---|
| 344 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 345 |  | 
|---|
| 346 | dwc_complete_all(dw, dwc); | 
|---|
| 347 | return; | 
|---|
| 348 | } | 
|---|
| 349 |  | 
|---|
| 350 | if (list_empty(head: &dwc->active_list)) { | 
|---|
| 351 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 352 | return; | 
|---|
| 353 | } | 
|---|
| 354 |  | 
|---|
| 355 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) { | 
|---|
| 356 | dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__); | 
|---|
| 357 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 358 | return; | 
|---|
| 359 | } | 
|---|
| 360 |  | 
|---|
| 361 | dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp); | 
|---|
| 362 |  | 
|---|
| 363 | list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { | 
|---|
| 364 | /* Initial residue value */ | 
|---|
| 365 | desc->residue = desc->total_len; | 
|---|
| 366 |  | 
|---|
| 367 | /* Check first descriptors addr */ | 
|---|
| 368 | if (desc->txd.phys == DWC_LLP_LOC(llp)) { | 
|---|
| 369 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 370 | return; | 
|---|
| 371 | } | 
|---|
| 372 |  | 
|---|
| 373 | /* Check first descriptors llp */ | 
|---|
| 374 | if (lli_read(desc, llp) == llp) { | 
|---|
| 375 | /* This one is currently in progress */ | 
|---|
| 376 | desc->residue -= dwc_get_sent(dwc); | 
|---|
| 377 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 378 | return; | 
|---|
| 379 | } | 
|---|
| 380 |  | 
|---|
| 381 | desc->residue -= desc->len; | 
|---|
| 382 | list_for_each_entry(child, &desc->tx_list, desc_node) { | 
|---|
| 383 | if (lli_read(child, llp) == llp) { | 
|---|
| 384 | /* Currently in progress */ | 
|---|
| 385 | desc->residue -= dwc_get_sent(dwc); | 
|---|
| 386 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 387 | return; | 
|---|
| 388 | } | 
|---|
| 389 | desc->residue -= child->len; | 
|---|
| 390 | } | 
|---|
| 391 |  | 
|---|
| 392 | /* | 
|---|
| 393 | * No descriptors so far seem to be in progress, i.e. | 
|---|
| 394 | * this one must be done. | 
|---|
| 395 | */ | 
|---|
| 396 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 397 | dwc_descriptor_complete(dwc, desc, callback_required: true); | 
|---|
| 398 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 399 | } | 
|---|
| 400 |  | 
|---|
| 401 | dev_err(chan2dev(&dwc->chan), | 
|---|
| 402 | "BUG: All descriptors done, but channel not idle!\n"); | 
|---|
| 403 |  | 
|---|
| 404 | /* Try to continue after resetting the channel... */ | 
|---|
| 405 | dwc_chan_disable(dw, dwc); | 
|---|
| 406 |  | 
|---|
| 407 | dwc_dostart_first_queued(dwc); | 
|---|
| 408 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 409 | } | 
|---|
| 410 |  | 
|---|
| 411 | static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc) | 
|---|
| 412 | { | 
|---|
| 413 | dev_crit(chan2dev(&dwc->chan), "  desc: s0x%x d0x%x l0x%x c0x%x:%x\n", | 
|---|
| 414 | lli_read(desc, sar), | 
|---|
| 415 | lli_read(desc, dar), | 
|---|
| 416 | lli_read(desc, llp), | 
|---|
| 417 | lli_read(desc, ctlhi), | 
|---|
| 418 | lli_read(desc, ctllo)); | 
|---|
| 419 | } | 
|---|
| 420 |  | 
|---|
| 421 | static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc) | 
|---|
| 422 | { | 
|---|
| 423 | struct dw_desc *bad_desc; | 
|---|
| 424 | struct dw_desc *child; | 
|---|
| 425 | unsigned long flags; | 
|---|
| 426 |  | 
|---|
| 427 | dwc_scan_descriptors(dw, dwc); | 
|---|
| 428 |  | 
|---|
| 429 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 430 |  | 
|---|
| 431 | /* | 
|---|
| 432 | * The descriptor currently at the head of the active list is | 
|---|
| 433 | * borked. Since we don't have any way to report errors, we'll | 
|---|
| 434 | * just have to scream loudly and try to carry on. | 
|---|
| 435 | */ | 
|---|
| 436 | bad_desc = dwc_first_active(dwc); | 
|---|
| 437 | list_del_init(entry: &bad_desc->desc_node); | 
|---|
| 438 | list_move(list: dwc->queue.next, head: dwc->active_list.prev); | 
|---|
| 439 |  | 
|---|
| 440 | /* Clear the error flag and try to restart the controller */ | 
|---|
| 441 | dma_writel(dw, CLEAR.ERROR, dwc->mask); | 
|---|
| 442 | if (!list_empty(head: &dwc->active_list)) | 
|---|
| 443 | dwc_dostart(dwc, first: dwc_first_active(dwc)); | 
|---|
| 444 |  | 
|---|
| 445 | /* | 
|---|
| 446 | * WARN may seem harsh, but since this only happens | 
|---|
| 447 | * when someone submits a bad physical address in a | 
|---|
| 448 | * descriptor, we should consider ourselves lucky that the | 
|---|
| 449 | * controller flagged an error instead of scribbling over | 
|---|
| 450 | * random memory locations. | 
|---|
| 451 | */ | 
|---|
| 452 | dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n" | 
|---|
| 453 | "  cookie: %d\n", bad_desc->txd.cookie); | 
|---|
| 454 | dwc_dump_lli(dwc, desc: bad_desc); | 
|---|
| 455 | list_for_each_entry(child, &bad_desc->tx_list, desc_node) | 
|---|
| 456 | dwc_dump_lli(dwc, desc: child); | 
|---|
| 457 |  | 
|---|
| 458 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 459 |  | 
|---|
| 460 | /* Pretend the descriptor completed successfully */ | 
|---|
| 461 | dwc_descriptor_complete(dwc, desc: bad_desc, callback_required: true); | 
|---|
| 462 | } | 
|---|
| 463 |  | 
|---|
| 464 | static void dw_dma_tasklet(struct tasklet_struct *t) | 
|---|
| 465 | { | 
|---|
| 466 | struct dw_dma *dw = from_tasklet(dw, t, tasklet); | 
|---|
| 467 | struct dw_dma_chan *dwc; | 
|---|
| 468 | u32 status_xfer; | 
|---|
| 469 | u32 status_err; | 
|---|
| 470 | unsigned int i; | 
|---|
| 471 |  | 
|---|
| 472 | status_xfer = dma_readl(dw, RAW.XFER); | 
|---|
| 473 | status_err = dma_readl(dw, RAW.ERROR); | 
|---|
| 474 |  | 
|---|
| 475 | dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err); | 
|---|
| 476 |  | 
|---|
| 477 | for (i = 0; i < dw->dma.chancnt; i++) { | 
|---|
| 478 | dwc = &dw->chan[i]; | 
|---|
| 479 | if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) | 
|---|
| 480 | dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n"); | 
|---|
| 481 | else if (status_err & (1 << i)) | 
|---|
| 482 | dwc_handle_error(dw, dwc); | 
|---|
| 483 | else if (status_xfer & (1 << i)) | 
|---|
| 484 | dwc_scan_descriptors(dw, dwc); | 
|---|
| 485 | } | 
|---|
| 486 |  | 
|---|
| 487 | /* Re-enable interrupts */ | 
|---|
| 488 | channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|---|
| 489 | channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|---|
| 490 | } | 
|---|
| 491 |  | 
|---|
| 492 | static irqreturn_t dw_dma_interrupt(int irq, void *dev_id) | 
|---|
| 493 | { | 
|---|
| 494 | struct dw_dma *dw = dev_id; | 
|---|
| 495 | u32 status; | 
|---|
| 496 |  | 
|---|
| 497 | /* Check if we have any interrupt from the DMAC which is not in use */ | 
|---|
| 498 | if (!dw->in_use) | 
|---|
| 499 | return IRQ_NONE; | 
|---|
| 500 |  | 
|---|
| 501 | status = dma_readl(dw, STATUS_INT); | 
|---|
| 502 | dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status); | 
|---|
| 503 |  | 
|---|
| 504 | /* Check if we have any interrupt from the DMAC */ | 
|---|
| 505 | if (!status) | 
|---|
| 506 | return IRQ_NONE; | 
|---|
| 507 |  | 
|---|
| 508 | /* | 
|---|
| 509 | * Just disable the interrupts. We'll turn them back on in the | 
|---|
| 510 | * softirq handler. | 
|---|
| 511 | */ | 
|---|
| 512 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|---|
| 513 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|---|
| 514 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|---|
| 515 |  | 
|---|
| 516 | status = dma_readl(dw, STATUS_INT); | 
|---|
| 517 | if (status) { | 
|---|
| 518 | dev_err(dw->dma.dev, | 
|---|
| 519 | "BUG: Unexpected interrupts pending: 0x%x\n", | 
|---|
| 520 | status); | 
|---|
| 521 |  | 
|---|
| 522 | /* Try to recover */ | 
|---|
| 523 | channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1); | 
|---|
| 524 | channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1); | 
|---|
| 525 | channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1); | 
|---|
| 526 | channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1); | 
|---|
| 527 | channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1); | 
|---|
| 528 | } | 
|---|
| 529 |  | 
|---|
| 530 | tasklet_schedule(t: &dw->tasklet); | 
|---|
| 531 |  | 
|---|
| 532 | return IRQ_HANDLED; | 
|---|
| 533 | } | 
|---|
| 534 |  | 
|---|
| 535 | /*----------------------------------------------------------------------*/ | 
|---|
| 536 |  | 
|---|
| 537 | static struct dma_async_tx_descriptor * | 
|---|
| 538 | dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, | 
|---|
| 539 | size_t len, unsigned long flags) | 
|---|
| 540 | { | 
|---|
| 541 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 542 | struct dw_dma		*dw = to_dw_dma(ddev: chan->device); | 
|---|
| 543 | struct dw_desc		*desc; | 
|---|
| 544 | struct dw_desc		*first; | 
|---|
| 545 | struct dw_desc		*prev; | 
|---|
| 546 | size_t			xfer_count; | 
|---|
| 547 | size_t			offset; | 
|---|
| 548 | u8			m_master = dwc->dws.m_master; | 
|---|
| 549 | unsigned int		src_width; | 
|---|
| 550 | unsigned int		dst_width; | 
|---|
| 551 | unsigned int		data_width = dw->pdata->data_width[m_master]; | 
|---|
| 552 | u32			ctllo, ctlhi; | 
|---|
| 553 | u8			lms = DWC_LLP_LMS(m_master); | 
|---|
| 554 |  | 
|---|
| 555 | dev_vdbg(chan2dev(chan), | 
|---|
| 556 | "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__, | 
|---|
| 557 | &dest, &src, len, flags); | 
|---|
| 558 |  | 
|---|
| 559 | if (unlikely(!len)) { | 
|---|
| 560 | dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); | 
|---|
| 561 | return NULL; | 
|---|
| 562 | } | 
|---|
| 563 |  | 
|---|
| 564 | dwc->direction = DMA_MEM_TO_MEM; | 
|---|
| 565 |  | 
|---|
| 566 | src_width = dst_width = __ffs(data_width | src | dest | len); | 
|---|
| 567 |  | 
|---|
| 568 | ctllo = dw->prepare_ctllo(dwc) | 
|---|
| 569 | | DWC_CTLL_DST_WIDTH(dst_width) | 
|---|
| 570 | | DWC_CTLL_SRC_WIDTH(src_width) | 
|---|
| 571 | | DWC_CTLL_DST_INC | 
|---|
| 572 | | DWC_CTLL_SRC_INC | 
|---|
| 573 | | DWC_CTLL_FC_M2M; | 
|---|
| 574 | prev = first = NULL; | 
|---|
| 575 |  | 
|---|
| 576 | for (offset = 0; offset < len; offset += xfer_count) { | 
|---|
| 577 | desc = dwc_desc_get(dwc); | 
|---|
| 578 | if (!desc) | 
|---|
| 579 | goto err_desc_get; | 
|---|
| 580 |  | 
|---|
| 581 | ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count); | 
|---|
| 582 |  | 
|---|
| 583 | lli_write(desc, sar, src + offset); | 
|---|
| 584 | lli_write(desc, dar, dest + offset); | 
|---|
| 585 | lli_write(desc, ctllo, ctllo); | 
|---|
| 586 | lli_write(desc, ctlhi, ctlhi); | 
|---|
| 587 | desc->len = xfer_count; | 
|---|
| 588 |  | 
|---|
| 589 | if (!first) { | 
|---|
| 590 | first = desc; | 
|---|
| 591 | } else { | 
|---|
| 592 | lli_write(prev, llp, desc->txd.phys | lms); | 
|---|
| 593 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); | 
|---|
| 594 | } | 
|---|
| 595 | prev = desc; | 
|---|
| 596 | } | 
|---|
| 597 |  | 
|---|
| 598 | if (flags & DMA_PREP_INTERRUPT) | 
|---|
| 599 | /* Trigger interrupt after last block */ | 
|---|
| 600 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); | 
|---|
| 601 |  | 
|---|
| 602 | prev->lli.llp = 0; | 
|---|
| 603 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 
|---|
| 604 | first->txd.flags = flags; | 
|---|
| 605 | first->total_len = len; | 
|---|
| 606 |  | 
|---|
| 607 | return &first->txd; | 
|---|
| 608 |  | 
|---|
| 609 | err_desc_get: | 
|---|
| 610 | dwc_desc_put(dwc, desc: first); | 
|---|
| 611 | return NULL; | 
|---|
| 612 | } | 
|---|
| 613 |  | 
|---|
| 614 | static struct dma_async_tx_descriptor * | 
|---|
| 615 | dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, | 
|---|
| 616 | unsigned int sg_len, enum dma_transfer_direction direction, | 
|---|
| 617 | unsigned long flags, void *context) | 
|---|
| 618 | { | 
|---|
| 619 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 620 | struct dw_dma		*dw = to_dw_dma(ddev: chan->device); | 
|---|
| 621 | struct dma_slave_config	*sconfig = &dwc->dma_sconfig; | 
|---|
| 622 | struct dw_desc		*prev; | 
|---|
| 623 | struct dw_desc		*first; | 
|---|
| 624 | u32			ctllo, ctlhi; | 
|---|
| 625 | u8			lms = DWC_LLP_LMS(dwc->dws.m_master); | 
|---|
| 626 | dma_addr_t		reg; | 
|---|
| 627 | unsigned int		reg_width; | 
|---|
| 628 | unsigned int		mem_width; | 
|---|
| 629 | unsigned int		i; | 
|---|
| 630 | struct scatterlist	*sg; | 
|---|
| 631 | size_t			total_len = 0; | 
|---|
| 632 |  | 
|---|
| 633 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 
|---|
| 634 |  | 
|---|
| 635 | if (unlikely(!is_slave_direction(direction) || !sg_len)) | 
|---|
| 636 | return NULL; | 
|---|
| 637 |  | 
|---|
| 638 | dwc->direction = direction; | 
|---|
| 639 |  | 
|---|
| 640 | prev = first = NULL; | 
|---|
| 641 |  | 
|---|
| 642 | switch (direction) { | 
|---|
| 643 | case DMA_MEM_TO_DEV: | 
|---|
| 644 | reg_width = __ffs(sconfig->dst_addr_width); | 
|---|
| 645 | reg = sconfig->dst_addr; | 
|---|
| 646 | ctllo = dw->prepare_ctllo(dwc) | 
|---|
| 647 | | DWC_CTLL_DST_WIDTH(reg_width) | 
|---|
| 648 | | DWC_CTLL_DST_FIX | 
|---|
| 649 | | DWC_CTLL_SRC_INC; | 
|---|
| 650 |  | 
|---|
| 651 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) : | 
|---|
| 652 | DWC_CTLL_FC(DW_DMA_FC_D_M2P); | 
|---|
| 653 |  | 
|---|
| 654 | for_each_sg(sgl, sg, sg_len, i) { | 
|---|
| 655 | struct dw_desc	*desc; | 
|---|
| 656 | u32		len, mem; | 
|---|
| 657 | size_t		dlen; | 
|---|
| 658 |  | 
|---|
| 659 | mem = sg_dma_address(sg); | 
|---|
| 660 | len = sg_dma_len(sg); | 
|---|
| 661 |  | 
|---|
| 662 | mem_width = __ffs(sconfig->src_addr_width | mem | len); | 
|---|
| 663 |  | 
|---|
| 664 | slave_sg_todev_fill_desc: | 
|---|
| 665 | desc = dwc_desc_get(dwc); | 
|---|
| 666 | if (!desc) | 
|---|
| 667 | goto err_desc_get; | 
|---|
| 668 |  | 
|---|
| 669 | ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen); | 
|---|
| 670 |  | 
|---|
| 671 | lli_write(desc, sar, mem); | 
|---|
| 672 | lli_write(desc, dar, reg); | 
|---|
| 673 | lli_write(desc, ctlhi, ctlhi); | 
|---|
| 674 | lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width)); | 
|---|
| 675 | desc->len = dlen; | 
|---|
| 676 |  | 
|---|
| 677 | if (!first) { | 
|---|
| 678 | first = desc; | 
|---|
| 679 | } else { | 
|---|
| 680 | lli_write(prev, llp, desc->txd.phys | lms); | 
|---|
| 681 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); | 
|---|
| 682 | } | 
|---|
| 683 | prev = desc; | 
|---|
| 684 |  | 
|---|
| 685 | mem += dlen; | 
|---|
| 686 | len -= dlen; | 
|---|
| 687 | total_len += dlen; | 
|---|
| 688 |  | 
|---|
| 689 | if (len) | 
|---|
| 690 | goto slave_sg_todev_fill_desc; | 
|---|
| 691 | } | 
|---|
| 692 | break; | 
|---|
| 693 | case DMA_DEV_TO_MEM: | 
|---|
| 694 | reg_width = __ffs(sconfig->src_addr_width); | 
|---|
| 695 | reg = sconfig->src_addr; | 
|---|
| 696 | ctllo = dw->prepare_ctllo(dwc) | 
|---|
| 697 | | DWC_CTLL_SRC_WIDTH(reg_width) | 
|---|
| 698 | | DWC_CTLL_DST_INC | 
|---|
| 699 | | DWC_CTLL_SRC_FIX; | 
|---|
| 700 |  | 
|---|
| 701 | ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) : | 
|---|
| 702 | DWC_CTLL_FC(DW_DMA_FC_D_P2M); | 
|---|
| 703 |  | 
|---|
| 704 | for_each_sg(sgl, sg, sg_len, i) { | 
|---|
| 705 | struct dw_desc	*desc; | 
|---|
| 706 | u32		len, mem; | 
|---|
| 707 | size_t		dlen; | 
|---|
| 708 |  | 
|---|
| 709 | mem = sg_dma_address(sg); | 
|---|
| 710 | len = sg_dma_len(sg); | 
|---|
| 711 |  | 
|---|
| 712 | slave_sg_fromdev_fill_desc: | 
|---|
| 713 | desc = dwc_desc_get(dwc); | 
|---|
| 714 | if (!desc) | 
|---|
| 715 | goto err_desc_get; | 
|---|
| 716 |  | 
|---|
| 717 | ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen); | 
|---|
| 718 |  | 
|---|
| 719 | lli_write(desc, sar, reg); | 
|---|
| 720 | lli_write(desc, dar, mem); | 
|---|
| 721 | lli_write(desc, ctlhi, ctlhi); | 
|---|
| 722 | mem_width = __ffs(sconfig->dst_addr_width | mem); | 
|---|
| 723 | lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width)); | 
|---|
| 724 | desc->len = dlen; | 
|---|
| 725 |  | 
|---|
| 726 | if (!first) { | 
|---|
| 727 | first = desc; | 
|---|
| 728 | } else { | 
|---|
| 729 | lli_write(prev, llp, desc->txd.phys | lms); | 
|---|
| 730 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); | 
|---|
| 731 | } | 
|---|
| 732 | prev = desc; | 
|---|
| 733 |  | 
|---|
| 734 | mem += dlen; | 
|---|
| 735 | len -= dlen; | 
|---|
| 736 | total_len += dlen; | 
|---|
| 737 |  | 
|---|
| 738 | if (len) | 
|---|
| 739 | goto slave_sg_fromdev_fill_desc; | 
|---|
| 740 | } | 
|---|
| 741 | break; | 
|---|
| 742 | default: | 
|---|
| 743 | return NULL; | 
|---|
| 744 | } | 
|---|
| 745 |  | 
|---|
| 746 | if (flags & DMA_PREP_INTERRUPT) | 
|---|
| 747 | /* Trigger interrupt after last block */ | 
|---|
| 748 | lli_set(prev, ctllo, DWC_CTLL_INT_EN); | 
|---|
| 749 |  | 
|---|
| 750 | prev->lli.llp = 0; | 
|---|
| 751 | lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN); | 
|---|
| 752 | first->total_len = total_len; | 
|---|
| 753 |  | 
|---|
| 754 | return &first->txd; | 
|---|
| 755 |  | 
|---|
| 756 | err_desc_get: | 
|---|
| 757 | dev_err(chan2dev(chan), | 
|---|
| 758 | "not enough descriptors available. Direction %d\n", direction); | 
|---|
| 759 | dwc_desc_put(dwc, desc: first); | 
|---|
| 760 | return NULL; | 
|---|
| 761 | } | 
|---|
| 762 |  | 
|---|
| 763 | bool dw_dma_filter(struct dma_chan *chan, void *param) | 
|---|
| 764 | { | 
|---|
| 765 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 766 | struct dw_dma_slave *dws = param; | 
|---|
| 767 |  | 
|---|
| 768 | if (dws->dma_dev != chan->device->dev) | 
|---|
| 769 | return false; | 
|---|
| 770 |  | 
|---|
| 771 | /* permit channels in accordance with the channels mask */ | 
|---|
| 772 | if (dws->channels && !(dws->channels & dwc->mask)) | 
|---|
| 773 | return false; | 
|---|
| 774 |  | 
|---|
| 775 | /* We have to copy data since dws can be temporary storage */ | 
|---|
| 776 | memcpy(to: &dwc->dws, from: dws, len: sizeof(struct dw_dma_slave)); | 
|---|
| 777 |  | 
|---|
| 778 | return true; | 
|---|
| 779 | } | 
|---|
| 780 | EXPORT_SYMBOL_GPL(dw_dma_filter); | 
|---|
| 781 |  | 
|---|
| 782 | static int dwc_verify_maxburst(struct dma_chan *chan) | 
|---|
| 783 | { | 
|---|
| 784 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 785 |  | 
|---|
| 786 | dwc->dma_sconfig.src_maxburst = | 
|---|
| 787 | clamp(dwc->dma_sconfig.src_maxburst, 1U, dwc->max_burst); | 
|---|
| 788 | dwc->dma_sconfig.dst_maxburst = | 
|---|
| 789 | clamp(dwc->dma_sconfig.dst_maxburst, 1U, dwc->max_burst); | 
|---|
| 790 |  | 
|---|
| 791 | dwc->dma_sconfig.src_maxburst = | 
|---|
| 792 | rounddown_pow_of_two(dwc->dma_sconfig.src_maxburst); | 
|---|
| 793 | dwc->dma_sconfig.dst_maxburst = | 
|---|
| 794 | rounddown_pow_of_two(dwc->dma_sconfig.dst_maxburst); | 
|---|
| 795 |  | 
|---|
| 796 | return 0; | 
|---|
| 797 | } | 
|---|
| 798 |  | 
|---|
| 799 | static int dwc_verify_p_buswidth(struct dma_chan *chan) | 
|---|
| 800 | { | 
|---|
| 801 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 802 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); | 
|---|
| 803 | u32 reg_width, max_width; | 
|---|
| 804 |  | 
|---|
| 805 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | 
|---|
| 806 | reg_width = dwc->dma_sconfig.dst_addr_width; | 
|---|
| 807 | else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) | 
|---|
| 808 | reg_width = dwc->dma_sconfig.src_addr_width; | 
|---|
| 809 | else /* DMA_MEM_TO_MEM */ | 
|---|
| 810 | return 0; | 
|---|
| 811 |  | 
|---|
| 812 | max_width = dw->pdata->data_width[dwc->dws.p_master]; | 
|---|
| 813 |  | 
|---|
| 814 | /* Fall-back to 1-byte transfer width if undefined */ | 
|---|
| 815 | if (reg_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) | 
|---|
| 816 | reg_width = DMA_SLAVE_BUSWIDTH_1_BYTE; | 
|---|
| 817 | else if (!is_power_of_2(n: reg_width) || reg_width > max_width) | 
|---|
| 818 | return -EINVAL; | 
|---|
| 819 | else /* bus width is valid */ | 
|---|
| 820 | return 0; | 
|---|
| 821 |  | 
|---|
| 822 | /* Update undefined addr width value */ | 
|---|
| 823 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) | 
|---|
| 824 | dwc->dma_sconfig.dst_addr_width = reg_width; | 
|---|
| 825 | else /* DMA_DEV_TO_MEM */ | 
|---|
| 826 | dwc->dma_sconfig.src_addr_width = reg_width; | 
|---|
| 827 |  | 
|---|
| 828 | return 0; | 
|---|
| 829 | } | 
|---|
| 830 |  | 
|---|
| 831 | static int dwc_verify_m_buswidth(struct dma_chan *chan) | 
|---|
| 832 | { | 
|---|
| 833 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 834 | struct dw_dma *dw = to_dw_dma(ddev: chan->device); | 
|---|
| 835 | u32 reg_width, reg_burst, mem_width; | 
|---|
| 836 |  | 
|---|
| 837 | mem_width = dw->pdata->data_width[dwc->dws.m_master]; | 
|---|
| 838 |  | 
|---|
| 839 | /* | 
|---|
| 840 | * It's possible to have a data portion locked in the DMA FIFO in case | 
|---|
| 841 | * of the channel suspension. Subsequent channel disabling will cause | 
|---|
| 842 | * that data silent loss. In order to prevent that maintain the src and | 
|---|
| 843 | * dst transfer widths coherency by means of the relation: | 
|---|
| 844 | * (CTLx.SRC_TR_WIDTH * CTLx.SRC_MSIZE >= CTLx.DST_TR_WIDTH) | 
|---|
| 845 | * Look for the details in the commit message that brings this change. | 
|---|
| 846 | * | 
|---|
| 847 | * Note the DMA configs utilized in the calculations below must have | 
|---|
| 848 | * been verified to have correct values by this method call. | 
|---|
| 849 | */ | 
|---|
| 850 | if (dwc->dma_sconfig.direction == DMA_MEM_TO_DEV) { | 
|---|
| 851 | reg_width = dwc->dma_sconfig.dst_addr_width; | 
|---|
| 852 | if (mem_width < reg_width) | 
|---|
| 853 | return -EINVAL; | 
|---|
| 854 |  | 
|---|
| 855 | dwc->dma_sconfig.src_addr_width = mem_width; | 
|---|
| 856 | } else if (dwc->dma_sconfig.direction == DMA_DEV_TO_MEM) { | 
|---|
| 857 | reg_width = dwc->dma_sconfig.src_addr_width; | 
|---|
| 858 | reg_burst = dwc->dma_sconfig.src_maxburst; | 
|---|
| 859 |  | 
|---|
| 860 | dwc->dma_sconfig.dst_addr_width = min(mem_width, reg_width * reg_burst); | 
|---|
| 861 | } | 
|---|
| 862 |  | 
|---|
| 863 | return 0; | 
|---|
| 864 | } | 
|---|
| 865 |  | 
|---|
| 866 | static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig) | 
|---|
| 867 | { | 
|---|
| 868 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 869 | int ret; | 
|---|
| 870 |  | 
|---|
| 871 | memcpy(to: &dwc->dma_sconfig, from: sconfig, len: sizeof(*sconfig)); | 
|---|
| 872 |  | 
|---|
| 873 | ret = dwc_verify_maxburst(chan); | 
|---|
| 874 | if (ret) | 
|---|
| 875 | return ret; | 
|---|
| 876 |  | 
|---|
| 877 | ret = dwc_verify_p_buswidth(chan); | 
|---|
| 878 | if (ret) | 
|---|
| 879 | return ret; | 
|---|
| 880 |  | 
|---|
| 881 | ret = dwc_verify_m_buswidth(chan); | 
|---|
| 882 | if (ret) | 
|---|
| 883 | return ret; | 
|---|
| 884 |  | 
|---|
| 885 | return 0; | 
|---|
| 886 | } | 
|---|
| 887 |  | 
|---|
| 888 | static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain) | 
|---|
| 889 | { | 
|---|
| 890 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 891 | unsigned int		count = 20;	/* timeout iterations */ | 
|---|
| 892 |  | 
|---|
| 893 | dw->suspend_chan(dwc, drain); | 
|---|
| 894 |  | 
|---|
| 895 | while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--) | 
|---|
| 896 | udelay(usec: 2); | 
|---|
| 897 |  | 
|---|
| 898 | set_bit(nr: DW_DMA_IS_PAUSED, addr: &dwc->flags); | 
|---|
| 899 | } | 
|---|
| 900 |  | 
|---|
| 901 | static int dwc_pause(struct dma_chan *chan) | 
|---|
| 902 | { | 
|---|
| 903 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 904 | unsigned long		flags; | 
|---|
| 905 |  | 
|---|
| 906 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 907 | dwc_chan_pause(dwc, drain: false); | 
|---|
| 908 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 909 |  | 
|---|
| 910 | return 0; | 
|---|
| 911 | } | 
|---|
| 912 |  | 
|---|
| 913 | static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain) | 
|---|
| 914 | { | 
|---|
| 915 | struct dw_dma *dw = to_dw_dma(ddev: dwc->chan.device); | 
|---|
| 916 |  | 
|---|
| 917 | dw->resume_chan(dwc, drain); | 
|---|
| 918 |  | 
|---|
| 919 | clear_bit(nr: DW_DMA_IS_PAUSED, addr: &dwc->flags); | 
|---|
| 920 | } | 
|---|
| 921 |  | 
|---|
| 922 | static int dwc_resume(struct dma_chan *chan) | 
|---|
| 923 | { | 
|---|
| 924 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 925 | unsigned long		flags; | 
|---|
| 926 |  | 
|---|
| 927 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 928 |  | 
|---|
| 929 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) | 
|---|
| 930 | dwc_chan_resume(dwc, drain: false); | 
|---|
| 931 |  | 
|---|
| 932 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 933 |  | 
|---|
| 934 | return 0; | 
|---|
| 935 | } | 
|---|
| 936 |  | 
|---|
| 937 | static int dwc_terminate_all(struct dma_chan *chan) | 
|---|
| 938 | { | 
|---|
| 939 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 940 | struct dw_dma		*dw = to_dw_dma(ddev: chan->device); | 
|---|
| 941 | struct dw_desc		*desc, *_desc; | 
|---|
| 942 | unsigned long		flags; | 
|---|
| 943 | LIST_HEAD(list); | 
|---|
| 944 |  | 
|---|
| 945 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 946 |  | 
|---|
| 947 | clear_bit(nr: DW_DMA_IS_SOFT_LLP, addr: &dwc->flags); | 
|---|
| 948 |  | 
|---|
| 949 | dwc_chan_pause(dwc, drain: true); | 
|---|
| 950 |  | 
|---|
| 951 | dwc_chan_disable(dw, dwc); | 
|---|
| 952 |  | 
|---|
| 953 | dwc_chan_resume(dwc, drain: true); | 
|---|
| 954 |  | 
|---|
| 955 | /* active_list entries will end up before queued entries */ | 
|---|
| 956 | list_splice_init(list: &dwc->queue, head: &list); | 
|---|
| 957 | list_splice_init(list: &dwc->active_list, head: &list); | 
|---|
| 958 |  | 
|---|
| 959 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 960 |  | 
|---|
| 961 | /* Flush all pending and queued descriptors */ | 
|---|
| 962 | list_for_each_entry_safe(desc, _desc, &list, desc_node) | 
|---|
| 963 | dwc_descriptor_complete(dwc, desc, callback_required: false); | 
|---|
| 964 |  | 
|---|
| 965 | return 0; | 
|---|
| 966 | } | 
|---|
| 967 |  | 
|---|
| 968 | static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c) | 
|---|
| 969 | { | 
|---|
| 970 | struct dw_desc *desc; | 
|---|
| 971 |  | 
|---|
| 972 | list_for_each_entry(desc, &dwc->active_list, desc_node) | 
|---|
| 973 | if (desc->txd.cookie == c) | 
|---|
| 974 | return desc; | 
|---|
| 975 |  | 
|---|
| 976 | return NULL; | 
|---|
| 977 | } | 
|---|
| 978 |  | 
|---|
| 979 | static u32 dwc_get_residue_and_status(struct dw_dma_chan *dwc, dma_cookie_t cookie, | 
|---|
| 980 | enum dma_status *status) | 
|---|
| 981 | { | 
|---|
| 982 | struct dw_desc *desc; | 
|---|
| 983 | unsigned long flags; | 
|---|
| 984 | u32 residue; | 
|---|
| 985 |  | 
|---|
| 986 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 987 |  | 
|---|
| 988 | desc = dwc_find_desc(dwc, c: cookie); | 
|---|
| 989 | if (desc) { | 
|---|
| 990 | if (desc == dwc_first_active(dwc)) { | 
|---|
| 991 | residue = desc->residue; | 
|---|
| 992 | if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue) | 
|---|
| 993 | residue -= dwc_get_sent(dwc); | 
|---|
| 994 | if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags)) | 
|---|
| 995 | *status = DMA_PAUSED; | 
|---|
| 996 | } else { | 
|---|
| 997 | residue = desc->total_len; | 
|---|
| 998 | } | 
|---|
| 999 | } else { | 
|---|
| 1000 | residue = 0; | 
|---|
| 1001 | } | 
|---|
| 1002 |  | 
|---|
| 1003 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 1004 | return residue; | 
|---|
| 1005 | } | 
|---|
| 1006 |  | 
|---|
| 1007 | static enum dma_status | 
|---|
| 1008 | dwc_tx_status(struct dma_chan *chan, | 
|---|
| 1009 | dma_cookie_t cookie, | 
|---|
| 1010 | struct dma_tx_state *txstate) | 
|---|
| 1011 | { | 
|---|
| 1012 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 1013 | enum dma_status		ret; | 
|---|
| 1014 |  | 
|---|
| 1015 | ret = dma_cookie_status(chan, cookie, state: txstate); | 
|---|
| 1016 | if (ret == DMA_COMPLETE) | 
|---|
| 1017 | return ret; | 
|---|
| 1018 |  | 
|---|
| 1019 | dwc_scan_descriptors(dw: to_dw_dma(ddev: chan->device), dwc); | 
|---|
| 1020 |  | 
|---|
| 1021 | ret = dma_cookie_status(chan, cookie, state: txstate); | 
|---|
| 1022 | if (ret == DMA_COMPLETE) | 
|---|
| 1023 | return ret; | 
|---|
| 1024 |  | 
|---|
| 1025 | dma_set_residue(state: txstate, residue: dwc_get_residue_and_status(dwc, cookie, status: &ret)); | 
|---|
| 1026 | return ret; | 
|---|
| 1027 | } | 
|---|
| 1028 |  | 
|---|
| 1029 | static void dwc_issue_pending(struct dma_chan *chan) | 
|---|
| 1030 | { | 
|---|
| 1031 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 1032 | unsigned long		flags; | 
|---|
| 1033 |  | 
|---|
| 1034 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 1035 | if (list_empty(head: &dwc->active_list)) | 
|---|
| 1036 | dwc_dostart_first_queued(dwc); | 
|---|
| 1037 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 1038 | } | 
|---|
| 1039 |  | 
|---|
| 1040 | /*----------------------------------------------------------------------*/ | 
|---|
| 1041 |  | 
|---|
| 1042 | void do_dw_dma_off(struct dw_dma *dw) | 
|---|
| 1043 | { | 
|---|
| 1044 | dma_writel(dw, CFG, 0); | 
|---|
| 1045 |  | 
|---|
| 1046 | channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask); | 
|---|
| 1047 | channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask); | 
|---|
| 1048 | channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask); | 
|---|
| 1049 | channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask); | 
|---|
| 1050 | channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask); | 
|---|
| 1051 |  | 
|---|
| 1052 | while (dma_readl(dw, CFG) & DW_CFG_DMA_EN) | 
|---|
| 1053 | cpu_relax(); | 
|---|
| 1054 | } | 
|---|
| 1055 |  | 
|---|
| 1056 | void do_dw_dma_on(struct dw_dma *dw) | 
|---|
| 1057 | { | 
|---|
| 1058 | dma_writel(dw, CFG, DW_CFG_DMA_EN); | 
|---|
| 1059 | } | 
|---|
| 1060 |  | 
|---|
| 1061 | static int dwc_alloc_chan_resources(struct dma_chan *chan) | 
|---|
| 1062 | { | 
|---|
| 1063 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 1064 | struct dw_dma		*dw = to_dw_dma(ddev: chan->device); | 
|---|
| 1065 |  | 
|---|
| 1066 | dev_vdbg(chan2dev(chan), "%s\n", __func__); | 
|---|
| 1067 |  | 
|---|
| 1068 | /* ASSERT:  channel is idle */ | 
|---|
| 1069 | if (dma_readl(dw, CH_EN) & dwc->mask) { | 
|---|
| 1070 | dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); | 
|---|
| 1071 | return -EIO; | 
|---|
| 1072 | } | 
|---|
| 1073 |  | 
|---|
| 1074 | dma_cookie_init(chan); | 
|---|
| 1075 |  | 
|---|
| 1076 | /* | 
|---|
| 1077 | * NOTE: some controllers may have additional features that we | 
|---|
| 1078 | * need to initialize here, like "scatter-gather" (which | 
|---|
| 1079 | * doesn't mean what you think it means), and status writeback. | 
|---|
| 1080 | */ | 
|---|
| 1081 |  | 
|---|
| 1082 | /* | 
|---|
| 1083 | * We need controller-specific data to set up slave transfers. | 
|---|
| 1084 | */ | 
|---|
| 1085 | if (chan->private && !dw_dma_filter(chan, chan->private)) { | 
|---|
| 1086 | dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); | 
|---|
| 1087 | return -EINVAL; | 
|---|
| 1088 | } | 
|---|
| 1089 |  | 
|---|
| 1090 | /* Enable controller here if needed */ | 
|---|
| 1091 | if (!dw->in_use) | 
|---|
| 1092 | do_dw_dma_on(dw); | 
|---|
| 1093 | dw->in_use |= dwc->mask; | 
|---|
| 1094 |  | 
|---|
| 1095 | return 0; | 
|---|
| 1096 | } | 
|---|
| 1097 |  | 
|---|
| 1098 | static void dwc_free_chan_resources(struct dma_chan *chan) | 
|---|
| 1099 | { | 
|---|
| 1100 | struct dw_dma_chan	*dwc = to_dw_dma_chan(chan); | 
|---|
| 1101 | struct dw_dma		*dw = to_dw_dma(ddev: chan->device); | 
|---|
| 1102 | unsigned long		flags; | 
|---|
| 1103 |  | 
|---|
| 1104 | dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__, | 
|---|
| 1105 | dwc->descs_allocated); | 
|---|
| 1106 |  | 
|---|
| 1107 | /* ASSERT:  channel is idle */ | 
|---|
| 1108 | BUG_ON(!list_empty(&dwc->active_list)); | 
|---|
| 1109 | BUG_ON(!list_empty(&dwc->queue)); | 
|---|
| 1110 | BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask); | 
|---|
| 1111 |  | 
|---|
| 1112 | spin_lock_irqsave(&dwc->lock, flags); | 
|---|
| 1113 |  | 
|---|
| 1114 | /* Clear custom channel configuration */ | 
|---|
| 1115 | memset(s: &dwc->dws, c: 0, n: sizeof(struct dw_dma_slave)); | 
|---|
| 1116 |  | 
|---|
| 1117 | /* Disable interrupts */ | 
|---|
| 1118 | channel_clear_bit(dw, MASK.XFER, dwc->mask); | 
|---|
| 1119 | channel_clear_bit(dw, MASK.BLOCK, dwc->mask); | 
|---|
| 1120 | channel_clear_bit(dw, MASK.ERROR, dwc->mask); | 
|---|
| 1121 |  | 
|---|
| 1122 | spin_unlock_irqrestore(lock: &dwc->lock, flags); | 
|---|
| 1123 |  | 
|---|
| 1124 | /* Disable controller in case it was a last user */ | 
|---|
| 1125 | dw->in_use &= ~dwc->mask; | 
|---|
| 1126 | if (!dw->in_use) | 
|---|
| 1127 | do_dw_dma_off(dw); | 
|---|
| 1128 |  | 
|---|
| 1129 | dev_vdbg(chan2dev(chan), "%s: done\n", __func__); | 
|---|
| 1130 | } | 
|---|
| 1131 |  | 
|---|
| 1132 | static void dwc_caps(struct dma_chan *chan, struct dma_slave_caps *caps) | 
|---|
| 1133 | { | 
|---|
| 1134 | struct dw_dma_chan *dwc = to_dw_dma_chan(chan); | 
|---|
| 1135 |  | 
|---|
| 1136 | caps->max_burst = dwc->max_burst; | 
|---|
| 1137 |  | 
|---|
| 1138 | /* | 
|---|
| 1139 | * It might be crucial for some devices to have the hardware | 
|---|
| 1140 | * accelerated multi-block transfers supported, aka LLPs in DW DMAC | 
|---|
| 1141 | * notation. So if LLPs are supported then max_sg_burst is set to | 
|---|
| 1142 | * zero which means unlimited number of SG entries can be handled in a | 
|---|
| 1143 | * single DMA transaction, otherwise it's just one SG entry. | 
|---|
| 1144 | */ | 
|---|
| 1145 | if (dwc->nollp) | 
|---|
| 1146 | caps->max_sg_burst = 1; | 
|---|
| 1147 | else | 
|---|
| 1148 | caps->max_sg_burst = 0; | 
|---|
| 1149 | } | 
|---|
| 1150 |  | 
|---|
| 1151 | int do_dma_probe(struct dw_dma_chip *chip) | 
|---|
| 1152 | { | 
|---|
| 1153 | struct dw_dma *dw = chip->dw; | 
|---|
| 1154 | struct dw_dma_platform_data *pdata; | 
|---|
| 1155 | bool			autocfg = false; | 
|---|
| 1156 | unsigned int		dw_params; | 
|---|
| 1157 | unsigned int		i; | 
|---|
| 1158 | int			ret; | 
|---|
| 1159 |  | 
|---|
| 1160 | dw->pdata = devm_kzalloc(dev: chip->dev, size: sizeof(*dw->pdata), GFP_KERNEL); | 
|---|
| 1161 | if (!dw->pdata) | 
|---|
| 1162 | return -ENOMEM; | 
|---|
| 1163 |  | 
|---|
| 1164 | dw->regs = chip->regs; | 
|---|
| 1165 |  | 
|---|
| 1166 | pm_runtime_get_sync(dev: chip->dev); | 
|---|
| 1167 |  | 
|---|
| 1168 | if (!chip->pdata) { | 
|---|
| 1169 | dw_params = dma_readl(dw, DW_PARAMS); | 
|---|
| 1170 | dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); | 
|---|
| 1171 |  | 
|---|
| 1172 | autocfg = dw_params >> DW_PARAMS_EN & 1; | 
|---|
| 1173 | if (!autocfg) { | 
|---|
| 1174 | ret = -EINVAL; | 
|---|
| 1175 | goto err_pdata; | 
|---|
| 1176 | } | 
|---|
| 1177 |  | 
|---|
| 1178 | /* Reassign the platform data pointer */ | 
|---|
| 1179 | pdata = dw->pdata; | 
|---|
| 1180 |  | 
|---|
| 1181 | /* Get hardware configuration parameters */ | 
|---|
| 1182 | pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; | 
|---|
| 1183 | pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; | 
|---|
| 1184 | for (i = 0; i < pdata->nr_masters; i++) { | 
|---|
| 1185 | pdata->data_width[i] = | 
|---|
| 1186 | 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3); | 
|---|
| 1187 | } | 
|---|
| 1188 | pdata->block_size = dma_readl(dw, MAX_BLK_SIZE); | 
|---|
| 1189 |  | 
|---|
| 1190 | /* Fill platform data with the default values */ | 
|---|
| 1191 | pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; | 
|---|
| 1192 | pdata->chan_priority = CHAN_PRIORITY_ASCENDING; | 
|---|
| 1193 | } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { | 
|---|
| 1194 | ret = -EINVAL; | 
|---|
| 1195 | goto err_pdata; | 
|---|
| 1196 | } else { | 
|---|
| 1197 | memcpy(to: dw->pdata, from: chip->pdata, len: sizeof(*dw->pdata)); | 
|---|
| 1198 |  | 
|---|
| 1199 | /* Reassign the platform data pointer */ | 
|---|
| 1200 | pdata = dw->pdata; | 
|---|
| 1201 | } | 
|---|
| 1202 |  | 
|---|
| 1203 | dw->chan = devm_kcalloc(dev: chip->dev, n: pdata->nr_channels, size: sizeof(*dw->chan), | 
|---|
| 1204 | GFP_KERNEL); | 
|---|
| 1205 | if (!dw->chan) { | 
|---|
| 1206 | ret = -ENOMEM; | 
|---|
| 1207 | goto err_pdata; | 
|---|
| 1208 | } | 
|---|
| 1209 |  | 
|---|
| 1210 | /* Calculate all channel mask before DMA setup */ | 
|---|
| 1211 | dw->all_chan_mask = (1 << pdata->nr_channels) - 1; | 
|---|
| 1212 |  | 
|---|
| 1213 | /* Force dma off, just in case */ | 
|---|
| 1214 | dw->disable(dw); | 
|---|
| 1215 |  | 
|---|
| 1216 | /* Device and instance ID for IRQ and DMA pool */ | 
|---|
| 1217 | dw->set_device_name(dw, chip->id); | 
|---|
| 1218 |  | 
|---|
| 1219 | /* Create a pool of consistent memory blocks for hardware descriptors */ | 
|---|
| 1220 | dw->desc_pool = dmam_pool_create(name: dw->name, dev: chip->dev, | 
|---|
| 1221 | size: sizeof(struct dw_desc), align: 4, allocation: 0); | 
|---|
| 1222 | if (!dw->desc_pool) { | 
|---|
| 1223 | dev_err(chip->dev, "No memory for descriptors dma pool\n"); | 
|---|
| 1224 | ret = -ENOMEM; | 
|---|
| 1225 | goto err_pdata; | 
|---|
| 1226 | } | 
|---|
| 1227 |  | 
|---|
| 1228 | tasklet_setup(t: &dw->tasklet, callback: dw_dma_tasklet); | 
|---|
| 1229 |  | 
|---|
| 1230 | ret = request_irq(irq: chip->irq, handler: dw_dma_interrupt, IRQF_SHARED, | 
|---|
| 1231 | name: dw->name, dev: dw); | 
|---|
| 1232 | if (ret) | 
|---|
| 1233 | goto err_pdata; | 
|---|
| 1234 |  | 
|---|
| 1235 | INIT_LIST_HEAD(list: &dw->dma.channels); | 
|---|
| 1236 | for (i = 0; i < pdata->nr_channels; i++) { | 
|---|
| 1237 | struct dw_dma_chan	*dwc = &dw->chan[i]; | 
|---|
| 1238 |  | 
|---|
| 1239 | dwc->chan.device = &dw->dma; | 
|---|
| 1240 | dma_cookie_init(chan: &dwc->chan); | 
|---|
| 1241 | if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING) | 
|---|
| 1242 | list_add_tail(new: &dwc->chan.device_node, | 
|---|
| 1243 | head: &dw->dma.channels); | 
|---|
| 1244 | else | 
|---|
| 1245 | list_add(new: &dwc->chan.device_node, head: &dw->dma.channels); | 
|---|
| 1246 |  | 
|---|
| 1247 | /* 7 is highest priority & 0 is lowest. */ | 
|---|
| 1248 | if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) | 
|---|
| 1249 | dwc->priority = pdata->nr_channels - i - 1; | 
|---|
| 1250 | else | 
|---|
| 1251 | dwc->priority = i; | 
|---|
| 1252 |  | 
|---|
| 1253 | dwc->ch_regs = &__dw_regs(dw)->CHAN[i]; | 
|---|
| 1254 | spin_lock_init(&dwc->lock); | 
|---|
| 1255 | dwc->mask = 1 << i; | 
|---|
| 1256 |  | 
|---|
| 1257 | INIT_LIST_HEAD(list: &dwc->active_list); | 
|---|
| 1258 | INIT_LIST_HEAD(list: &dwc->queue); | 
|---|
| 1259 |  | 
|---|
| 1260 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|---|
| 1261 |  | 
|---|
| 1262 | dwc->direction = DMA_TRANS_NONE; | 
|---|
| 1263 |  | 
|---|
| 1264 | /* Hardware configuration */ | 
|---|
| 1265 | if (autocfg) { | 
|---|
| 1266 | unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1; | 
|---|
| 1267 | void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r]; | 
|---|
| 1268 | unsigned int dwc_params = readl(addr); | 
|---|
| 1269 |  | 
|---|
| 1270 | dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i, | 
|---|
| 1271 | dwc_params); | 
|---|
| 1272 |  | 
|---|
| 1273 | /* | 
|---|
| 1274 | * Decode maximum block size for given channel. The | 
|---|
| 1275 | * stored 4 bit value represents blocks from 0x00 for 3 | 
|---|
| 1276 | * up to 0x0a for 4095. | 
|---|
| 1277 | */ | 
|---|
| 1278 | dwc->block_size = | 
|---|
| 1279 | (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1; | 
|---|
| 1280 |  | 
|---|
| 1281 | /* | 
|---|
| 1282 | * According to the DW DMA databook the true scatter- | 
|---|
| 1283 | * gether LLPs aren't available if either multi-block | 
|---|
| 1284 | * config is disabled (CHx_MULTI_BLK_EN == 0) or the | 
|---|
| 1285 | * LLP register is hard-coded to zeros | 
|---|
| 1286 | * (CHx_HC_LLP == 1). | 
|---|
| 1287 | */ | 
|---|
| 1288 | dwc->nollp = | 
|---|
| 1289 | (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0 || | 
|---|
| 1290 | (dwc_params >> DWC_PARAMS_HC_LLP & 0x1) == 1; | 
|---|
| 1291 | dwc->max_burst = | 
|---|
| 1292 | (0x4 << (dwc_params >> DWC_PARAMS_MSIZE & 0x7)); | 
|---|
| 1293 | } else { | 
|---|
| 1294 | dwc->block_size = pdata->block_size; | 
|---|
| 1295 | dwc->nollp = !pdata->multi_block[i]; | 
|---|
| 1296 | dwc->max_burst = pdata->max_burst[i] ?: DW_DMA_MAX_BURST; | 
|---|
| 1297 | } | 
|---|
| 1298 | } | 
|---|
| 1299 |  | 
|---|
| 1300 | /* Clear all interrupts on all channels. */ | 
|---|
| 1301 | dma_writel(dw, CLEAR.XFER, dw->all_chan_mask); | 
|---|
| 1302 | dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask); | 
|---|
| 1303 | dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask); | 
|---|
| 1304 | dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); | 
|---|
| 1305 | dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); | 
|---|
| 1306 |  | 
|---|
| 1307 | /* Set capabilities */ | 
|---|
| 1308 | dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); | 
|---|
| 1309 | dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); | 
|---|
| 1310 | dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); | 
|---|
| 1311 |  | 
|---|
| 1312 | dw->dma.dev = chip->dev; | 
|---|
| 1313 | dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; | 
|---|
| 1314 | dw->dma.device_free_chan_resources = dwc_free_chan_resources; | 
|---|
| 1315 |  | 
|---|
| 1316 | dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; | 
|---|
| 1317 | dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; | 
|---|
| 1318 |  | 
|---|
| 1319 | dw->dma.device_caps = dwc_caps; | 
|---|
| 1320 | dw->dma.device_config = dwc_config; | 
|---|
| 1321 | dw->dma.device_pause = dwc_pause; | 
|---|
| 1322 | dw->dma.device_resume = dwc_resume; | 
|---|
| 1323 | dw->dma.device_terminate_all = dwc_terminate_all; | 
|---|
| 1324 |  | 
|---|
| 1325 | dw->dma.device_tx_status = dwc_tx_status; | 
|---|
| 1326 | dw->dma.device_issue_pending = dwc_issue_pending; | 
|---|
| 1327 |  | 
|---|
| 1328 | /* DMA capabilities */ | 
|---|
| 1329 | dw->dma.min_burst = DW_DMA_MIN_BURST; | 
|---|
| 1330 | dw->dma.max_burst = DW_DMA_MAX_BURST; | 
|---|
| 1331 | dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS; | 
|---|
| 1332 | dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS; | 
|---|
| 1333 | dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) | | 
|---|
| 1334 | BIT(DMA_MEM_TO_MEM); | 
|---|
| 1335 | dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; | 
|---|
| 1336 |  | 
|---|
| 1337 | /* | 
|---|
| 1338 | * For now there is no hardware with non uniform maximum block size | 
|---|
| 1339 | * across all of the device channels, so we set the maximum segment | 
|---|
| 1340 | * size as the block size found for the very first channel. | 
|---|
| 1341 | */ | 
|---|
| 1342 | dma_set_max_seg_size(dev: dw->dma.dev, size: dw->chan[0].block_size); | 
|---|
| 1343 |  | 
|---|
| 1344 | ret = dma_async_device_register(device: &dw->dma); | 
|---|
| 1345 | if (ret) | 
|---|
| 1346 | goto err_dma_register; | 
|---|
| 1347 |  | 
|---|
| 1348 | dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", | 
|---|
| 1349 | pdata->nr_channels); | 
|---|
| 1350 |  | 
|---|
| 1351 | pm_runtime_put_sync_suspend(dev: chip->dev); | 
|---|
| 1352 |  | 
|---|
| 1353 | return 0; | 
|---|
| 1354 |  | 
|---|
| 1355 | err_dma_register: | 
|---|
| 1356 | free_irq(chip->irq, dw); | 
|---|
| 1357 | err_pdata: | 
|---|
| 1358 | pm_runtime_put_sync_suspend(dev: chip->dev); | 
|---|
| 1359 | return ret; | 
|---|
| 1360 | } | 
|---|
| 1361 |  | 
|---|
| 1362 | int do_dma_remove(struct dw_dma_chip *chip) | 
|---|
| 1363 | { | 
|---|
| 1364 | struct dw_dma		*dw = chip->dw; | 
|---|
| 1365 | struct dw_dma_chan	*dwc, *_dwc; | 
|---|
| 1366 |  | 
|---|
| 1367 | pm_runtime_get_sync(dev: chip->dev); | 
|---|
| 1368 |  | 
|---|
| 1369 | do_dw_dma_off(dw); | 
|---|
| 1370 | dma_async_device_unregister(device: &dw->dma); | 
|---|
| 1371 |  | 
|---|
| 1372 | free_irq(chip->irq, dw); | 
|---|
| 1373 | tasklet_kill(t: &dw->tasklet); | 
|---|
| 1374 |  | 
|---|
| 1375 | list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels, | 
|---|
| 1376 | chan.device_node) { | 
|---|
| 1377 | list_del(entry: &dwc->chan.device_node); | 
|---|
| 1378 | channel_clear_bit(dw, CH_EN, dwc->mask); | 
|---|
| 1379 | } | 
|---|
| 1380 |  | 
|---|
| 1381 | pm_runtime_put_sync_suspend(dev: chip->dev); | 
|---|
| 1382 | return 0; | 
|---|
| 1383 | } | 
|---|
| 1384 |  | 
|---|
| 1385 | int do_dw_dma_disable(struct dw_dma_chip *chip) | 
|---|
| 1386 | { | 
|---|
| 1387 | struct dw_dma *dw = chip->dw; | 
|---|
| 1388 |  | 
|---|
| 1389 | dw->disable(dw); | 
|---|
| 1390 | return 0; | 
|---|
| 1391 | } | 
|---|
| 1392 | EXPORT_SYMBOL_GPL(do_dw_dma_disable); | 
|---|
| 1393 |  | 
|---|
| 1394 | int do_dw_dma_enable(struct dw_dma_chip *chip) | 
|---|
| 1395 | { | 
|---|
| 1396 | struct dw_dma *dw = chip->dw; | 
|---|
| 1397 |  | 
|---|
| 1398 | dw->enable(dw); | 
|---|
| 1399 | return 0; | 
|---|
| 1400 | } | 
|---|
| 1401 | EXPORT_SYMBOL_GPL(do_dw_dma_enable); | 
|---|
| 1402 |  | 
|---|
| 1403 | MODULE_LICENSE( "GPL v2"); | 
|---|
| 1404 | MODULE_DESCRIPTION( "Synopsys DesignWare DMA Controller core driver"); | 
|---|
| 1405 | MODULE_AUTHOR( "Haavard Skinnemoen (Atmel)"); | 
|---|
| 1406 | MODULE_AUTHOR( "Viresh Kumar <vireshk@kernel.org>"); | 
|---|
| 1407 |  | 
|---|