Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * arch-independent dma-mapping routines
4 : *
5 : * Copyright (c) 2006 SUSE Linux Products GmbH
6 : * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
7 : */
8 : #include <linux/memblock.h> /* for max_pfn */
9 : #include <linux/acpi.h>
10 : #include <linux/dma-map-ops.h>
11 : #include <linux/export.h>
12 : #include <linux/gfp.h>
13 : #include <linux/of_device.h>
14 : #include <linux/slab.h>
15 : #include <linux/vmalloc.h>
16 : #include "debug.h"
17 : #include "direct.h"
18 :
19 : bool dma_default_coherent;
20 :
21 : /*
22 : * Managed DMA API
23 : */
24 : struct dma_devres {
25 : size_t size;
26 : void *vaddr;
27 : dma_addr_t dma_handle;
28 : unsigned long attrs;
29 : };
30 :
31 0 : static void dmam_release(struct device *dev, void *res)
32 : {
33 0 : struct dma_devres *this = res;
34 :
35 0 : dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle,
36 : this->attrs);
37 0 : }
38 :
39 0 : static int dmam_match(struct device *dev, void *res, void *match_data)
40 : {
41 0 : struct dma_devres *this = res, *match = match_data;
42 :
43 0 : if (this->vaddr == match->vaddr) {
44 0 : WARN_ON(this->size != match->size ||
45 : this->dma_handle != match->dma_handle);
46 : return 1;
47 : }
48 : return 0;
49 : }
50 :
51 : /**
52 : * dmam_free_coherent - Managed dma_free_coherent()
53 : * @dev: Device to free coherent memory for
54 : * @size: Size of allocation
55 : * @vaddr: Virtual address of the memory to free
56 : * @dma_handle: DMA handle of the memory to free
57 : *
58 : * Managed dma_free_coherent().
59 : */
60 0 : void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
61 : dma_addr_t dma_handle)
62 : {
63 0 : struct dma_devres match_data = { size, vaddr, dma_handle };
64 :
65 0 : dma_free_coherent(dev, size, vaddr, dma_handle);
66 0 : WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data));
67 0 : }
68 : EXPORT_SYMBOL(dmam_free_coherent);
69 :
70 : /**
71 : * dmam_alloc_attrs - Managed dma_alloc_attrs()
72 : * @dev: Device to allocate non_coherent memory for
73 : * @size: Size of allocation
74 : * @dma_handle: Out argument for allocated DMA handle
75 : * @gfp: Allocation flags
76 : * @attrs: Flags in the DMA_ATTR_* namespace.
77 : *
78 : * Managed dma_alloc_attrs(). Memory allocated using this function will be
79 : * automatically released on driver detach.
80 : *
81 : * RETURNS:
82 : * Pointer to allocated memory on success, NULL on failure.
83 : */
84 0 : void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
85 : gfp_t gfp, unsigned long attrs)
86 : {
87 0 : struct dma_devres *dr;
88 0 : void *vaddr;
89 :
90 0 : dr = devres_alloc(dmam_release, sizeof(*dr), gfp);
91 0 : if (!dr)
92 : return NULL;
93 :
94 0 : vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs);
95 0 : if (!vaddr) {
96 0 : devres_free(dr);
97 0 : return NULL;
98 : }
99 :
100 0 : dr->vaddr = vaddr;
101 0 : dr->dma_handle = *dma_handle;
102 0 : dr->size = size;
103 0 : dr->attrs = attrs;
104 :
105 0 : devres_add(dev, dr);
106 :
107 0 : return vaddr;
108 : }
109 : EXPORT_SYMBOL(dmam_alloc_attrs);
110 :
111 0 : static bool dma_go_direct(struct device *dev, dma_addr_t mask,
112 : const struct dma_map_ops *ops)
113 : {
114 0 : if (likely(!ops))
115 0 : return true;
116 : #ifdef CONFIG_DMA_OPS_BYPASS
117 : if (dev->dma_ops_bypass)
118 : return min_not_zero(mask, dev->bus_dma_limit) >=
119 : dma_direct_get_required_mask(dev);
120 : #endif
121 : return false;
122 : }
123 :
124 :
125 : /*
126 : * Check if the devices uses a direct mapping for streaming DMA operations.
127 : * This allows IOMMU drivers to set a bypass mode if the DMA mask is large
128 : * enough.
129 : */
130 0 : static inline bool dma_alloc_direct(struct device *dev,
131 : const struct dma_map_ops *ops)
132 : {
133 0 : return dma_go_direct(dev, dev->coherent_dma_mask, ops);
134 : }
135 :
136 0 : static inline bool dma_map_direct(struct device *dev,
137 : const struct dma_map_ops *ops)
138 : {
139 0 : return dma_go_direct(dev, *dev->dma_mask, ops);
140 : }
141 :
142 0 : dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page,
143 : size_t offset, size_t size, enum dma_data_direction dir,
144 : unsigned long attrs)
145 : {
146 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
147 0 : dma_addr_t addr;
148 :
149 0 : BUG_ON(!valid_dma_direction(dir));
150 :
151 0 : if (WARN_ON_ONCE(!dev->dma_mask))
152 : return DMA_MAPPING_ERROR;
153 :
154 0 : if (dma_map_direct(dev, ops) ||
155 : arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size))
156 0 : addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
157 : else
158 : addr = ops->map_page(dev, page, offset, size, dir, attrs);
159 0 : debug_dma_map_page(dev, page, offset, size, dir, addr);
160 :
161 0 : return addr;
162 : }
163 : EXPORT_SYMBOL(dma_map_page_attrs);
164 :
165 0 : void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size,
166 : enum dma_data_direction dir, unsigned long attrs)
167 : {
168 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
169 :
170 0 : BUG_ON(!valid_dma_direction(dir));
171 0 : if (dma_map_direct(dev, ops) ||
172 : arch_dma_unmap_page_direct(dev, addr + size))
173 0 : dma_direct_unmap_page(dev, addr, size, dir, attrs);
174 : else if (ops->unmap_page)
175 : ops->unmap_page(dev, addr, size, dir, attrs);
176 0 : debug_dma_unmap_page(dev, addr, size, dir);
177 0 : }
178 : EXPORT_SYMBOL(dma_unmap_page_attrs);
179 :
180 : /*
181 : * dma_maps_sg_attrs returns 0 on error and > 0 on success.
182 : * It should never return a value < 0.
183 : */
184 0 : int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, int nents,
185 : enum dma_data_direction dir, unsigned long attrs)
186 : {
187 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
188 0 : int ents;
189 :
190 0 : BUG_ON(!valid_dma_direction(dir));
191 :
192 0 : if (WARN_ON_ONCE(!dev->dma_mask))
193 : return 0;
194 :
195 0 : if (dma_map_direct(dev, ops) ||
196 : arch_dma_map_sg_direct(dev, sg, nents))
197 0 : ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
198 : else
199 : ents = ops->map_sg(dev, sg, nents, dir, attrs);
200 0 : BUG_ON(ents < 0);
201 0 : debug_dma_map_sg(dev, sg, nents, ents, dir);
202 :
203 : return ents;
204 : }
205 : EXPORT_SYMBOL(dma_map_sg_attrs);
206 :
207 0 : void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
208 : int nents, enum dma_data_direction dir,
209 : unsigned long attrs)
210 : {
211 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
212 :
213 0 : BUG_ON(!valid_dma_direction(dir));
214 0 : debug_dma_unmap_sg(dev, sg, nents, dir);
215 0 : if (dma_map_direct(dev, ops) ||
216 : arch_dma_unmap_sg_direct(dev, sg, nents))
217 0 : dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
218 : else if (ops->unmap_sg)
219 : ops->unmap_sg(dev, sg, nents, dir, attrs);
220 0 : }
221 : EXPORT_SYMBOL(dma_unmap_sg_attrs);
222 :
223 0 : dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr,
224 : size_t size, enum dma_data_direction dir, unsigned long attrs)
225 : {
226 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
227 0 : dma_addr_t addr = DMA_MAPPING_ERROR;
228 :
229 0 : BUG_ON(!valid_dma_direction(dir));
230 :
231 0 : if (WARN_ON_ONCE(!dev->dma_mask))
232 : return DMA_MAPPING_ERROR;
233 :
234 : /* Don't allow RAM to be mapped */
235 0 : if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
236 : return DMA_MAPPING_ERROR;
237 :
238 0 : if (dma_map_direct(dev, ops))
239 0 : addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
240 : else if (ops->map_resource)
241 : addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
242 :
243 0 : debug_dma_map_resource(dev, phys_addr, size, dir, addr);
244 0 : return addr;
245 : }
246 : EXPORT_SYMBOL(dma_map_resource);
247 :
248 0 : void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size,
249 : enum dma_data_direction dir, unsigned long attrs)
250 : {
251 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
252 :
253 0 : BUG_ON(!valid_dma_direction(dir));
254 0 : if (!dma_map_direct(dev, ops) && ops->unmap_resource)
255 : ops->unmap_resource(dev, addr, size, dir, attrs);
256 0 : debug_dma_unmap_resource(dev, addr, size, dir);
257 0 : }
258 : EXPORT_SYMBOL(dma_unmap_resource);
259 :
260 0 : void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
261 : enum dma_data_direction dir)
262 : {
263 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
264 :
265 0 : BUG_ON(!valid_dma_direction(dir));
266 0 : if (dma_map_direct(dev, ops))
267 0 : dma_direct_sync_single_for_cpu(dev, addr, size, dir);
268 : else if (ops->sync_single_for_cpu)
269 : ops->sync_single_for_cpu(dev, addr, size, dir);
270 0 : debug_dma_sync_single_for_cpu(dev, addr, size, dir);
271 0 : }
272 : EXPORT_SYMBOL(dma_sync_single_for_cpu);
273 :
274 0 : void dma_sync_single_for_device(struct device *dev, dma_addr_t addr,
275 : size_t size, enum dma_data_direction dir)
276 : {
277 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
278 :
279 0 : BUG_ON(!valid_dma_direction(dir));
280 0 : if (dma_map_direct(dev, ops))
281 0 : dma_direct_sync_single_for_device(dev, addr, size, dir);
282 : else if (ops->sync_single_for_device)
283 : ops->sync_single_for_device(dev, addr, size, dir);
284 0 : debug_dma_sync_single_for_device(dev, addr, size, dir);
285 0 : }
286 : EXPORT_SYMBOL(dma_sync_single_for_device);
287 :
288 0 : void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
289 : int nelems, enum dma_data_direction dir)
290 : {
291 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
292 :
293 0 : BUG_ON(!valid_dma_direction(dir));
294 0 : if (dma_map_direct(dev, ops))
295 0 : dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
296 : else if (ops->sync_sg_for_cpu)
297 : ops->sync_sg_for_cpu(dev, sg, nelems, dir);
298 0 : debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
299 0 : }
300 : EXPORT_SYMBOL(dma_sync_sg_for_cpu);
301 :
302 0 : void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
303 : int nelems, enum dma_data_direction dir)
304 : {
305 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
306 :
307 0 : BUG_ON(!valid_dma_direction(dir));
308 0 : if (dma_map_direct(dev, ops))
309 0 : dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
310 : else if (ops->sync_sg_for_device)
311 : ops->sync_sg_for_device(dev, sg, nelems, dir);
312 0 : debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
313 0 : }
314 : EXPORT_SYMBOL(dma_sync_sg_for_device);
315 :
316 : /*
317 : * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems
318 : * that the intention is to allow exporting memory allocated via the
319 : * coherent DMA APIs through the dma_buf API, which only accepts a
320 : * scattertable. This presents a couple of problems:
321 : * 1. Not all memory allocated via the coherent DMA APIs is backed by
322 : * a struct page
323 : * 2. Passing coherent DMA memory into the streaming APIs is not allowed
324 : * as we will try to flush the memory through a different alias to that
325 : * actually being used (and the flushes are redundant.)
326 : */
327 0 : int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
328 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
329 : unsigned long attrs)
330 : {
331 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
332 :
333 0 : if (dma_alloc_direct(dev, ops))
334 0 : return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
335 : size, attrs);
336 : if (!ops->get_sgtable)
337 : return -ENXIO;
338 : return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs);
339 : }
340 : EXPORT_SYMBOL(dma_get_sgtable_attrs);
341 :
342 : #ifdef CONFIG_MMU
343 : /*
344 : * Return the page attributes used for mapping dma_alloc_* memory, either in
345 : * kernel space if remapping is needed, or to userspace through dma_mmap_*.
346 : */
347 0 : pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs)
348 : {
349 0 : if (force_dma_unencrypted(dev))
350 : prot = pgprot_decrypted(prot);
351 0 : if (dev_is_dma_coherent(dev))
352 0 : return prot;
353 : #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE
354 : if (attrs & DMA_ATTR_WRITE_COMBINE)
355 : return pgprot_writecombine(prot);
356 : #endif
357 : return pgprot_dmacoherent(prot);
358 : }
359 : #endif /* CONFIG_MMU */
360 :
361 : /**
362 : * dma_can_mmap - check if a given device supports dma_mmap_*
363 : * @dev: device to check
364 : *
365 : * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to
366 : * map DMA allocations to userspace.
367 : */
368 0 : bool dma_can_mmap(struct device *dev)
369 : {
370 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
371 :
372 0 : if (dma_alloc_direct(dev, ops))
373 0 : return dma_direct_can_mmap(dev);
374 : return ops->mmap != NULL;
375 : }
376 : EXPORT_SYMBOL_GPL(dma_can_mmap);
377 :
378 : /**
379 : * dma_mmap_attrs - map a coherent DMA allocation into user space
380 : * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
381 : * @vma: vm_area_struct describing requested user mapping
382 : * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
383 : * @dma_addr: device-view address returned from dma_alloc_attrs
384 : * @size: size of memory originally requested in dma_alloc_attrs
385 : * @attrs: attributes of mapping properties requested in dma_alloc_attrs
386 : *
387 : * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user
388 : * space. The coherent DMA buffer must not be freed by the driver until the
389 : * user space mapping has been released.
390 : */
391 0 : int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
392 : void *cpu_addr, dma_addr_t dma_addr, size_t size,
393 : unsigned long attrs)
394 : {
395 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
396 :
397 0 : if (dma_alloc_direct(dev, ops))
398 0 : return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
399 : attrs);
400 : if (!ops->mmap)
401 : return -ENXIO;
402 : return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
403 : }
404 : EXPORT_SYMBOL(dma_mmap_attrs);
405 :
406 0 : u64 dma_get_required_mask(struct device *dev)
407 : {
408 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
409 :
410 0 : if (dma_alloc_direct(dev, ops))
411 0 : return dma_direct_get_required_mask(dev);
412 : if (ops->get_required_mask)
413 : return ops->get_required_mask(dev);
414 :
415 : /*
416 : * We require every DMA ops implementation to at least support a 32-bit
417 : * DMA mask (and use bounce buffering if that isn't supported in
418 : * hardware). As the direct mapping code has its own routine to
419 : * actually report an optimal mask we default to 32-bit here as that
420 : * is the right thing for most IOMMUs, and at least not actively
421 : * harmful in general.
422 : */
423 : return DMA_BIT_MASK(32);
424 : }
425 : EXPORT_SYMBOL_GPL(dma_get_required_mask);
426 :
427 0 : void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
428 : gfp_t flag, unsigned long attrs)
429 : {
430 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
431 0 : void *cpu_addr;
432 :
433 0 : WARN_ON_ONCE(!dev->coherent_dma_mask);
434 :
435 0 : if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
436 : return cpu_addr;
437 :
438 : /* let the implementation decide on the zone to allocate from: */
439 0 : flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
440 :
441 0 : if (dma_alloc_direct(dev, ops))
442 0 : cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
443 : else if (ops->alloc)
444 : cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
445 : else
446 : return NULL;
447 :
448 0 : debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
449 0 : return cpu_addr;
450 : }
451 : EXPORT_SYMBOL(dma_alloc_attrs);
452 :
453 0 : void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
454 : dma_addr_t dma_handle, unsigned long attrs)
455 : {
456 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
457 :
458 0 : if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
459 : return;
460 : /*
461 : * On non-coherent platforms which implement DMA-coherent buffers via
462 : * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
463 : * this far in IRQ context is a) at risk of a BUG_ON() or trying to
464 : * sleep on some machines, and b) an indication that the driver is
465 : * probably misusing the coherent API anyway.
466 : */
467 0 : WARN_ON(irqs_disabled());
468 :
469 0 : if (!cpu_addr)
470 : return;
471 :
472 0 : debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
473 0 : if (dma_alloc_direct(dev, ops))
474 0 : dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
475 : else if (ops->free)
476 : ops->free(dev, size, cpu_addr, dma_handle, attrs);
477 : }
478 : EXPORT_SYMBOL(dma_free_attrs);
479 :
480 0 : struct page *dma_alloc_pages(struct device *dev, size_t size,
481 : dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp)
482 : {
483 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
484 0 : struct page *page;
485 :
486 0 : if (WARN_ON_ONCE(!dev->coherent_dma_mask))
487 : return NULL;
488 0 : if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM)))
489 : return NULL;
490 :
491 0 : size = PAGE_ALIGN(size);
492 0 : if (dma_alloc_direct(dev, ops))
493 0 : page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp);
494 : else if (ops->alloc_pages)
495 : page = ops->alloc_pages(dev, size, dma_handle, dir, gfp);
496 : else
497 : return NULL;
498 :
499 0 : debug_dma_map_page(dev, page, 0, size, dir, *dma_handle);
500 :
501 0 : return page;
502 : }
503 : EXPORT_SYMBOL_GPL(dma_alloc_pages);
504 :
505 0 : void dma_free_pages(struct device *dev, size_t size, struct page *page,
506 : dma_addr_t dma_handle, enum dma_data_direction dir)
507 : {
508 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
509 :
510 0 : size = PAGE_ALIGN(size);
511 0 : debug_dma_unmap_page(dev, dma_handle, size, dir);
512 :
513 0 : if (dma_alloc_direct(dev, ops))
514 0 : dma_direct_free_pages(dev, size, page, dma_handle, dir);
515 : else if (ops->free_pages)
516 : ops->free_pages(dev, size, page, dma_handle, dir);
517 0 : }
518 : EXPORT_SYMBOL_GPL(dma_free_pages);
519 :
520 4 : int dma_supported(struct device *dev, u64 mask)
521 : {
522 2 : const struct dma_map_ops *ops = get_dma_ops(dev);
523 :
524 : /*
525 : * ->dma_supported sets the bypass flag, so we must always call
526 : * into the method here unless the device is truly direct mapped.
527 : */
528 4 : if (!ops)
529 4 : return dma_direct_supported(dev, mask);
530 : if (!ops->dma_supported)
531 : return 1;
532 : return ops->dma_supported(dev, mask);
533 : }
534 : EXPORT_SYMBOL(dma_supported);
535 :
536 : #ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
537 : void arch_dma_set_mask(struct device *dev, u64 mask);
538 : #else
539 : #define arch_dma_set_mask(dev, mask) do { } while (0)
540 : #endif
541 :
542 2 : int dma_set_mask(struct device *dev, u64 mask)
543 : {
544 : /*
545 : * Truncate the mask to the actually supported dma_addr_t width to
546 : * avoid generating unsupportable addresses.
547 : */
548 2 : mask = (dma_addr_t)mask;
549 :
550 4 : if (!dev->dma_mask || !dma_supported(dev, mask))
551 0 : return -EIO;
552 :
553 2 : arch_dma_set_mask(dev, mask);
554 2 : *dev->dma_mask = mask;
555 2 : return 0;
556 : }
557 : EXPORT_SYMBOL(dma_set_mask);
558 :
559 : #ifndef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
560 2 : int dma_set_coherent_mask(struct device *dev, u64 mask)
561 : {
562 : /*
563 : * Truncate the mask to the actually supported dma_addr_t width to
564 : * avoid generating unsupportable addresses.
565 : */
566 2 : mask = (dma_addr_t)mask;
567 :
568 2 : if (!dma_supported(dev, mask))
569 : return -EIO;
570 :
571 2 : dev->coherent_dma_mask = mask;
572 2 : return 0;
573 : }
574 : EXPORT_SYMBOL(dma_set_coherent_mask);
575 : #endif
576 :
577 0 : size_t dma_max_mapping_size(struct device *dev)
578 : {
579 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
580 0 : size_t size = SIZE_MAX;
581 :
582 0 : if (dma_map_direct(dev, ops))
583 0 : size = dma_direct_max_mapping_size(dev);
584 : else if (ops && ops->max_mapping_size)
585 : size = ops->max_mapping_size(dev);
586 :
587 0 : return size;
588 : }
589 : EXPORT_SYMBOL_GPL(dma_max_mapping_size);
590 :
591 0 : bool dma_need_sync(struct device *dev, dma_addr_t dma_addr)
592 : {
593 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
594 :
595 0 : if (dma_map_direct(dev, ops))
596 0 : return dma_direct_need_sync(dev, dma_addr);
597 : return ops->sync_single_for_cpu || ops->sync_single_for_device;
598 : }
599 : EXPORT_SYMBOL_GPL(dma_need_sync);
600 :
601 0 : unsigned long dma_get_merge_boundary(struct device *dev)
602 : {
603 0 : const struct dma_map_ops *ops = get_dma_ops(dev);
604 :
605 0 : if (!ops || !ops->get_merge_boundary)
606 0 : return 0; /* can't merge */
607 :
608 : return ops->get_merge_boundary(dev);
609 : }
610 : EXPORT_SYMBOL_GPL(dma_get_merge_boundary);
|