Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0-only
2 : /*
3 : * DMA Pool allocator
4 : *
5 : * Copyright 2001 David Brownell
6 : * Copyright 2007 Intel Corporation
7 : * Author: Matthew Wilcox <willy@linux.intel.com>
8 : *
9 : * This allocator returns small blocks of a given size which are DMA-able by
10 : * the given device. It uses the dma_alloc_coherent page allocator to get
11 : * new pages, then splits them up into blocks of the required size.
12 : * Many older drivers still have their own code to do this.
13 : *
14 : * The current design of this allocator is fairly simple. The pool is
15 : * represented by the 'struct dma_pool' which keeps a doubly-linked list of
16 : * allocated pages. Each page in the page_list is split into blocks of at
17 : * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
18 : * list of free blocks within the page. Used blocks aren't tracked, but we
19 : * keep a count of how many are currently allocated from each page.
20 : */
21 :
22 : #include <linux/device.h>
23 : #include <linux/dma-mapping.h>
24 : #include <linux/dmapool.h>
25 : #include <linux/kernel.h>
26 : #include <linux/list.h>
27 : #include <linux/export.h>
28 : #include <linux/mutex.h>
29 : #include <linux/poison.h>
30 : #include <linux/sched.h>
31 : #include <linux/sched/mm.h>
32 : #include <linux/slab.h>
33 : #include <linux/stat.h>
34 : #include <linux/spinlock.h>
35 : #include <linux/string.h>
36 : #include <linux/types.h>
37 : #include <linux/wait.h>
38 :
39 : #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB_DEBUG_ON)
40 : #define DMAPOOL_DEBUG 1
41 : #endif
42 :
43 : struct dma_pool { /* the pool */
44 : struct list_head page_list;
45 : spinlock_t lock;
46 : size_t size;
47 : struct device *dev;
48 : size_t allocation;
49 : size_t boundary;
50 : char name[32];
51 : struct list_head pools;
52 : };
53 :
54 : struct dma_page { /* cacheable header for 'allocation' bytes */
55 : struct list_head page_list;
56 : void *vaddr;
57 : dma_addr_t dma;
58 : unsigned int in_use;
59 : unsigned int offset;
60 : };
61 :
62 : static DEFINE_MUTEX(pools_lock);
63 : static DEFINE_MUTEX(pools_reg_lock);
64 :
65 : static ssize_t
66 0 : show_pools(struct device *dev, struct device_attribute *attr, char *buf)
67 : {
68 0 : unsigned temp;
69 0 : unsigned size;
70 0 : char *next;
71 0 : struct dma_page *page;
72 0 : struct dma_pool *pool;
73 :
74 0 : next = buf;
75 0 : size = PAGE_SIZE;
76 :
77 0 : temp = scnprintf(next, size, "poolinfo - 0.1\n");
78 0 : size -= temp;
79 0 : next += temp;
80 :
81 0 : mutex_lock(&pools_lock);
82 0 : list_for_each_entry(pool, &dev->dma_pools, pools) {
83 0 : unsigned pages = 0;
84 0 : unsigned blocks = 0;
85 :
86 0 : spin_lock_irq(&pool->lock);
87 0 : list_for_each_entry(page, &pool->page_list, page_list) {
88 0 : pages++;
89 0 : blocks += page->in_use;
90 : }
91 0 : spin_unlock_irq(&pool->lock);
92 :
93 : /* per-pool info, no real statistics yet */
94 0 : temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n",
95 0 : pool->name, blocks,
96 0 : pages * (pool->allocation / pool->size),
97 : pool->size, pages);
98 0 : size -= temp;
99 0 : next += temp;
100 : }
101 0 : mutex_unlock(&pools_lock);
102 :
103 0 : return PAGE_SIZE - size;
104 : }
105 :
106 : static DEVICE_ATTR(pools, 0444, show_pools, NULL);
107 :
108 : /**
109 : * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
110 : * @name: name of pool, for diagnostics
111 : * @dev: device that will be doing the DMA
112 : * @size: size of the blocks in this pool.
113 : * @align: alignment requirement for blocks; must be a power of two
114 : * @boundary: returned blocks won't cross this power of two boundary
115 : * Context: not in_interrupt()
116 : *
117 : * Given one of these pools, dma_pool_alloc()
118 : * may be used to allocate memory. Such memory will all have "consistent"
119 : * DMA mappings, accessible by the device and its driver without using
120 : * cache flushing primitives. The actual size of blocks allocated may be
121 : * larger than requested because of alignment.
122 : *
123 : * If @boundary is nonzero, objects returned from dma_pool_alloc() won't
124 : * cross that size boundary. This is useful for devices which have
125 : * addressing restrictions on individual DMA transfers, such as not crossing
126 : * boundaries of 4KBytes.
127 : *
128 : * Return: a dma allocation pool with the requested characteristics, or
129 : * %NULL if one can't be created.
130 : */
131 0 : struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132 : size_t size, size_t align, size_t boundary)
133 : {
134 0 : struct dma_pool *retval;
135 0 : size_t allocation;
136 0 : bool empty = false;
137 :
138 0 : if (align == 0)
139 : align = 1;
140 0 : else if (align & (align - 1))
141 : return NULL;
142 :
143 0 : if (size == 0)
144 : return NULL;
145 0 : else if (size < 4)
146 : size = 4;
147 :
148 0 : size = ALIGN(size, align);
149 0 : allocation = max_t(size_t, size, PAGE_SIZE);
150 :
151 0 : if (!boundary)
152 : boundary = allocation;
153 0 : else if ((boundary < size) || (boundary & (boundary - 1)))
154 : return NULL;
155 :
156 0 : retval = kmalloc_node(sizeof(*retval), GFP_KERNEL, dev_to_node(dev));
157 0 : if (!retval)
158 : return retval;
159 :
160 0 : strlcpy(retval->name, name, sizeof(retval->name));
161 :
162 0 : retval->dev = dev;
163 :
164 0 : INIT_LIST_HEAD(&retval->page_list);
165 0 : spin_lock_init(&retval->lock);
166 0 : retval->size = size;
167 0 : retval->boundary = boundary;
168 0 : retval->allocation = allocation;
169 :
170 0 : INIT_LIST_HEAD(&retval->pools);
171 :
172 : /*
173 : * pools_lock ensures that the ->dma_pools list does not get corrupted.
174 : * pools_reg_lock ensures that there is not a race between
175 : * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
176 : * when the first invocation of dma_pool_create() failed on
177 : * device_create_file() and the second assumes that it has been done (I
178 : * know it is a short window).
179 : */
180 0 : mutex_lock(&pools_reg_lock);
181 0 : mutex_lock(&pools_lock);
182 0 : if (list_empty(&dev->dma_pools))
183 0 : empty = true;
184 0 : list_add(&retval->pools, &dev->dma_pools);
185 0 : mutex_unlock(&pools_lock);
186 0 : if (empty) {
187 0 : int err;
188 :
189 0 : err = device_create_file(dev, &dev_attr_pools);
190 0 : if (err) {
191 0 : mutex_lock(&pools_lock);
192 0 : list_del(&retval->pools);
193 0 : mutex_unlock(&pools_lock);
194 0 : mutex_unlock(&pools_reg_lock);
195 0 : kfree(retval);
196 0 : return NULL;
197 : }
198 : }
199 0 : mutex_unlock(&pools_reg_lock);
200 0 : return retval;
201 : }
202 : EXPORT_SYMBOL(dma_pool_create);
203 :
204 0 : static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page)
205 : {
206 0 : unsigned int offset = 0;
207 0 : unsigned int next_boundary = pool->boundary;
208 :
209 0 : do {
210 0 : unsigned int next = offset + pool->size;
211 0 : if (unlikely((next + pool->size) >= next_boundary)) {
212 0 : next = next_boundary;
213 0 : next_boundary += pool->boundary;
214 : }
215 0 : *(int *)(page->vaddr + offset) = next;
216 0 : offset = next;
217 0 : } while (offset < pool->allocation);
218 0 : }
219 :
220 0 : static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags)
221 : {
222 0 : struct dma_page *page;
223 :
224 0 : page = kmalloc(sizeof(*page), mem_flags);
225 0 : if (!page)
226 : return NULL;
227 0 : page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation,
228 : &page->dma, mem_flags);
229 0 : if (page->vaddr) {
230 : #ifdef DMAPOOL_DEBUG
231 : memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
232 : #endif
233 0 : pool_initialise_page(pool, page);
234 0 : page->in_use = 0;
235 0 : page->offset = 0;
236 : } else {
237 0 : kfree(page);
238 0 : page = NULL;
239 : }
240 : return page;
241 : }
242 :
243 0 : static inline bool is_page_busy(struct dma_page *page)
244 : {
245 0 : return page->in_use != 0;
246 : }
247 :
248 0 : static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
249 : {
250 0 : dma_addr_t dma = page->dma;
251 :
252 : #ifdef DMAPOOL_DEBUG
253 : memset(page->vaddr, POOL_POISON_FREED, pool->allocation);
254 : #endif
255 0 : dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma);
256 0 : list_del(&page->page_list);
257 0 : kfree(page);
258 0 : }
259 :
260 : /**
261 : * dma_pool_destroy - destroys a pool of dma memory blocks.
262 : * @pool: dma pool that will be destroyed
263 : * Context: !in_interrupt()
264 : *
265 : * Caller guarantees that no more memory from the pool is in use,
266 : * and that nothing will try to use the pool after this call.
267 : */
268 0 : void dma_pool_destroy(struct dma_pool *pool)
269 : {
270 0 : struct dma_page *page, *tmp;
271 0 : bool empty = false;
272 :
273 0 : if (unlikely(!pool))
274 : return;
275 :
276 0 : mutex_lock(&pools_reg_lock);
277 0 : mutex_lock(&pools_lock);
278 0 : list_del(&pool->pools);
279 0 : if (pool->dev && list_empty(&pool->dev->dma_pools))
280 0 : empty = true;
281 0 : mutex_unlock(&pools_lock);
282 0 : if (empty)
283 0 : device_remove_file(pool->dev, &dev_attr_pools);
284 0 : mutex_unlock(&pools_reg_lock);
285 :
286 0 : list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) {
287 0 : if (is_page_busy(page)) {
288 0 : if (pool->dev)
289 0 : dev_err(pool->dev, "%s %s, %p busy\n", __func__,
290 : pool->name, page->vaddr);
291 : else
292 0 : pr_err("%s %s, %p busy\n", __func__,
293 : pool->name, page->vaddr);
294 : /* leak the still-in-use consistent memory */
295 0 : list_del(&page->page_list);
296 0 : kfree(page);
297 : } else
298 0 : pool_free_page(pool, page);
299 : }
300 :
301 0 : kfree(pool);
302 : }
303 : EXPORT_SYMBOL(dma_pool_destroy);
304 :
305 : /**
306 : * dma_pool_alloc - get a block of consistent memory
307 : * @pool: dma pool that will produce the block
308 : * @mem_flags: GFP_* bitmask
309 : * @handle: pointer to dma address of block
310 : *
311 : * Return: the kernel virtual address of a currently unused block,
312 : * and reports its dma address through the handle.
313 : * If such a memory block can't be allocated, %NULL is returned.
314 : */
315 0 : void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
316 : dma_addr_t *handle)
317 : {
318 0 : unsigned long flags;
319 0 : struct dma_page *page;
320 0 : size_t offset;
321 0 : void *retval;
322 :
323 0 : might_alloc(mem_flags);
324 :
325 0 : spin_lock_irqsave(&pool->lock, flags);
326 0 : list_for_each_entry(page, &pool->page_list, page_list) {
327 0 : if (page->offset < pool->allocation)
328 0 : goto ready;
329 : }
330 :
331 : /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */
332 0 : spin_unlock_irqrestore(&pool->lock, flags);
333 :
334 0 : page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO));
335 0 : if (!page)
336 : return NULL;
337 :
338 0 : spin_lock_irqsave(&pool->lock, flags);
339 :
340 0 : list_add(&page->page_list, &pool->page_list);
341 0 : ready:
342 0 : page->in_use++;
343 0 : offset = page->offset;
344 0 : page->offset = *(int *)(page->vaddr + offset);
345 0 : retval = offset + page->vaddr;
346 0 : *handle = offset + page->dma;
347 : #ifdef DMAPOOL_DEBUG
348 : {
349 : int i;
350 : u8 *data = retval;
351 : /* page->offset is stored in first 4 bytes */
352 : for (i = sizeof(page->offset); i < pool->size; i++) {
353 : if (data[i] == POOL_POISON_FREED)
354 : continue;
355 : if (pool->dev)
356 : dev_err(pool->dev, "%s %s, %p (corrupted)\n",
357 : __func__, pool->name, retval);
358 : else
359 : pr_err("%s %s, %p (corrupted)\n",
360 : __func__, pool->name, retval);
361 :
362 : /*
363 : * Dump the first 4 bytes even if they are not
364 : * POOL_POISON_FREED
365 : */
366 : print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1,
367 : data, pool->size, 1);
368 : break;
369 : }
370 : }
371 : if (!(mem_flags & __GFP_ZERO))
372 : memset(retval, POOL_POISON_ALLOCATED, pool->size);
373 : #endif
374 0 : spin_unlock_irqrestore(&pool->lock, flags);
375 :
376 0 : if (want_init_on_alloc(mem_flags))
377 0 : memset(retval, 0, pool->size);
378 :
379 : return retval;
380 : }
381 : EXPORT_SYMBOL(dma_pool_alloc);
382 :
383 0 : static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma)
384 : {
385 0 : struct dma_page *page;
386 :
387 0 : list_for_each_entry(page, &pool->page_list, page_list) {
388 0 : if (dma < page->dma)
389 0 : continue;
390 0 : if ((dma - page->dma) < pool->allocation)
391 : return page;
392 : }
393 : return NULL;
394 : }
395 :
396 : /**
397 : * dma_pool_free - put block back into dma pool
398 : * @pool: the dma pool holding the block
399 : * @vaddr: virtual address of block
400 : * @dma: dma address of block
401 : *
402 : * Caller promises neither device nor driver will again touch this block
403 : * unless it is first re-allocated.
404 : */
405 0 : void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma)
406 : {
407 0 : struct dma_page *page;
408 0 : unsigned long flags;
409 0 : unsigned int offset;
410 :
411 0 : spin_lock_irqsave(&pool->lock, flags);
412 0 : page = pool_find_page(pool, dma);
413 0 : if (!page) {
414 0 : spin_unlock_irqrestore(&pool->lock, flags);
415 0 : if (pool->dev)
416 0 : dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n",
417 : __func__, pool->name, vaddr, &dma);
418 : else
419 0 : pr_err("%s %s, %p/%pad (bad dma)\n",
420 : __func__, pool->name, vaddr, &dma);
421 0 : return;
422 : }
423 :
424 0 : offset = vaddr - page->vaddr;
425 0 : if (want_init_on_free())
426 0 : memset(vaddr, 0, pool->size);
427 : #ifdef DMAPOOL_DEBUG
428 : if ((dma - page->dma) != offset) {
429 : spin_unlock_irqrestore(&pool->lock, flags);
430 : if (pool->dev)
431 : dev_err(pool->dev, "%s %s, %p (bad vaddr)/%pad\n",
432 : __func__, pool->name, vaddr, &dma);
433 : else
434 : pr_err("%s %s, %p (bad vaddr)/%pad\n",
435 : __func__, pool->name, vaddr, &dma);
436 : return;
437 : }
438 : {
439 : unsigned int chain = page->offset;
440 : while (chain < pool->allocation) {
441 : if (chain != offset) {
442 : chain = *(int *)(page->vaddr + chain);
443 : continue;
444 : }
445 : spin_unlock_irqrestore(&pool->lock, flags);
446 : if (pool->dev)
447 : dev_err(pool->dev, "%s %s, dma %pad already free\n",
448 : __func__, pool->name, &dma);
449 : else
450 : pr_err("%s %s, dma %pad already free\n",
451 : __func__, pool->name, &dma);
452 : return;
453 : }
454 : }
455 : memset(vaddr, POOL_POISON_FREED, pool->size);
456 : #endif
457 :
458 0 : page->in_use--;
459 0 : *(int *)vaddr = page->offset;
460 0 : page->offset = offset;
461 : /*
462 : * Resist a temptation to do
463 : * if (!is_page_busy(page)) pool_free_page(pool, page);
464 : * Better have a few empty pages hang around.
465 : */
466 0 : spin_unlock_irqrestore(&pool->lock, flags);
467 : }
468 : EXPORT_SYMBOL(dma_pool_free);
469 :
470 : /*
471 : * Managed DMA pool
472 : */
473 0 : static void dmam_pool_release(struct device *dev, void *res)
474 : {
475 0 : struct dma_pool *pool = *(struct dma_pool **)res;
476 :
477 0 : dma_pool_destroy(pool);
478 0 : }
479 :
480 0 : static int dmam_pool_match(struct device *dev, void *res, void *match_data)
481 : {
482 0 : return *(struct dma_pool **)res == match_data;
483 : }
484 :
485 : /**
486 : * dmam_pool_create - Managed dma_pool_create()
487 : * @name: name of pool, for diagnostics
488 : * @dev: device that will be doing the DMA
489 : * @size: size of the blocks in this pool.
490 : * @align: alignment requirement for blocks; must be a power of two
491 : * @allocation: returned blocks won't cross this boundary (or zero)
492 : *
493 : * Managed dma_pool_create(). DMA pool created with this function is
494 : * automatically destroyed on driver detach.
495 : *
496 : * Return: a managed dma allocation pool with the requested
497 : * characteristics, or %NULL if one can't be created.
498 : */
499 0 : struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
500 : size_t size, size_t align, size_t allocation)
501 : {
502 0 : struct dma_pool **ptr, *pool;
503 :
504 0 : ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
505 0 : if (!ptr)
506 : return NULL;
507 :
508 0 : pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
509 0 : if (pool)
510 0 : devres_add(dev, ptr);
511 : else
512 0 : devres_free(ptr);
513 :
514 : return pool;
515 : }
516 : EXPORT_SYMBOL(dmam_pool_create);
517 :
518 : /**
519 : * dmam_pool_destroy - Managed dma_pool_destroy()
520 : * @pool: dma pool that will be destroyed
521 : *
522 : * Managed dma_pool_destroy().
523 : */
524 0 : void dmam_pool_destroy(struct dma_pool *pool)
525 : {
526 0 : struct device *dev = pool->dev;
527 :
528 0 : WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool));
529 0 : }
530 : EXPORT_SYMBOL(dmam_pool_destroy);
|