2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
34 * The subsystem keeps two global lists, dma_device_list and dma_client_list.
35 * Both of these are protected by a mutex, dma_list_mutex.
37 * Each device has a channels list, which runs unlocked but is never modified
38 * once the device is registered, it's just setup by the driver.
40 * Each client is responsible for keeping track of the channels it uses. See
41 * the definition of dma_event_callback in dmaengine.h.
43 * Each device has a kref, which is initialized to 1 when the device is
44 * registered. A kref_get is done for each device registered. When the
45 * device is released, the corresponding kref_put is done in the release
46 * method. Every time one of the device's channels is allocated to a client,
47 * a kref_get occurs. When the channel is freed, the corresponding kref_put
48 * happens. The device's release function does a completion, so
49 * unregister_device does a remove event, device_unregister, a kref_put
50 * for the first reference, then waits on the completion for all other
51 * references to finish.
53 * Each channel has an open-coded implementation of Rusty Russell's "bigref,"
54 * with a kref and a per_cpu local_t. A dma_chan_get is called when a client
55 * signals that it wants to use a channel, and dma_chan_put is called when
56 * a channel is removed or a client using it is unregistered. A client can
57 * take extra references per outstanding transaction, as is the case with
58 * the NET DMA client. The release function does a kref_put on the device.
62 #include <linux/init.h>
63 #include <linux/module.h>
65 #include <linux/device.h>
66 #include <linux/dmaengine.h>
67 #include <linux/hardirq.h>
68 #include <linux/spinlock.h>
69 #include <linux/percpu.h>
70 #include <linux/rcupdate.h>
71 #include <linux/mutex.h>
72 #include <linux/jiffies.h>
73 #include <linux/rculist.h>
75 static DEFINE_MUTEX(dma_list_mutex);
76 static LIST_HEAD(dma_device_list);
77 static LIST_HEAD(dma_client_list);
78 static long dmaengine_ref_count;
80 /* --- sysfs implementation --- */
82 static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
84 struct dma_chan *chan = to_dma_chan(dev);
85 unsigned long count = 0;
88 for_each_possible_cpu(i)
89 count += per_cpu_ptr(chan->local, i)->memcpy_count;
91 return sprintf(buf, "%lu\n", count);
94 static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
97 struct dma_chan *chan = to_dma_chan(dev);
98 unsigned long count = 0;
101 for_each_possible_cpu(i)
102 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
104 return sprintf(buf, "%lu\n", count);
107 static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
109 struct dma_chan *chan = to_dma_chan(dev);
111 return sprintf(buf, "%d\n", chan->client_count);
114 static struct device_attribute dma_attrs[] = {
115 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
116 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
117 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
121 static void dma_async_device_cleanup(struct kref *kref);
123 static void dma_dev_release(struct device *dev)
125 struct dma_chan *chan = to_dma_chan(dev);
126 kref_put(&chan->device->refcount, dma_async_device_cleanup);
129 static struct class dma_devclass = {
131 .dev_attrs = dma_attrs,
132 .dev_release = dma_dev_release,
135 /* --- client and device registration --- */
137 #define dma_device_satisfies_mask(device, mask) \
138 __dma_device_satisfies_mask((device), &(mask))
140 __dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
144 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
146 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
149 static struct module *dma_chan_to_owner(struct dma_chan *chan)
151 return chan->device->dev->driver->owner;
155 * balance_ref_count - catch up the channel reference count
156 * @chan - channel to balance ->client_count versus dmaengine_ref_count
158 * balance_ref_count must be called under dma_list_mutex
160 static void balance_ref_count(struct dma_chan *chan)
162 struct module *owner = dma_chan_to_owner(chan);
164 while (chan->client_count < dmaengine_ref_count) {
166 chan->client_count++;
171 * dma_chan_get - try to grab a dma channel's parent driver module
172 * @chan - channel to grab
174 * Must be called under dma_list_mutex
176 static int dma_chan_get(struct dma_chan *chan)
179 struct module *owner = dma_chan_to_owner(chan);
181 if (chan->client_count) {
184 } else if (try_module_get(owner))
188 chan->client_count++;
190 /* allocate upon first client reference */
191 if (chan->client_count == 1 && err == 0) {
192 int desc_cnt = chan->device->device_alloc_chan_resources(chan, NULL);
196 chan->client_count = 0;
198 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
199 balance_ref_count(chan);
206 * dma_chan_put - drop a reference to a dma channel's parent driver module
207 * @chan - channel to release
209 * Must be called under dma_list_mutex
211 static void dma_chan_put(struct dma_chan *chan)
213 if (!chan->client_count)
214 return; /* this channel failed alloc_chan_resources */
215 chan->client_count--;
216 module_put(dma_chan_to_owner(chan));
217 if (chan->client_count == 0)
218 chan->device->device_free_chan_resources(chan);
222 * dma_client_chan_alloc - try to allocate channels to a client
223 * @client: &dma_client
225 * Called with dma_list_mutex held.
227 static void dma_client_chan_alloc(struct dma_client *client)
229 struct dma_device *device;
230 struct dma_chan *chan;
231 enum dma_state_client ack;
234 list_for_each_entry(device, &dma_device_list, global_node) {
235 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
237 /* Does the client require a specific DMA controller? */
238 if (client->slave && client->slave->dma_dev
239 && client->slave->dma_dev != device->dev)
241 if (!dma_device_satisfies_mask(device, client->cap_mask))
244 list_for_each_entry(chan, &device->channels, device_node) {
245 if (!chan->client_count)
247 ack = client->event_callback(client, chan,
248 DMA_RESOURCE_AVAILABLE);
250 /* we are done once this client rejects
251 * an available resource
259 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
261 enum dma_status status;
262 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
264 dma_async_issue_pending(chan);
266 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
267 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
268 printk(KERN_ERR "dma_sync_wait_timeout!\n");
271 } while (status == DMA_IN_PROGRESS);
275 EXPORT_SYMBOL(dma_sync_wait);
278 * dma_chan_cleanup - release a DMA channel's resources
279 * @kref: kernel reference structure that contains the DMA channel device
281 void dma_chan_cleanup(struct kref *kref)
283 struct dma_chan *chan = container_of(kref, struct dma_chan, refcount);
284 kref_put(&chan->device->refcount, dma_async_device_cleanup);
286 EXPORT_SYMBOL(dma_chan_cleanup);
288 static void dma_chan_free_rcu(struct rcu_head *rcu)
290 struct dma_chan *chan = container_of(rcu, struct dma_chan, rcu);
292 kref_put(&chan->refcount, dma_chan_cleanup);
295 static void dma_chan_release(struct dma_chan *chan)
297 call_rcu(&chan->rcu, dma_chan_free_rcu);
301 * dma_cap_mask_all - enable iteration over all operation types
303 static dma_cap_mask_t dma_cap_mask_all;
306 * dma_chan_tbl_ent - tracks channel allocations per core/operation
307 * @chan - associated channel for this entry
309 struct dma_chan_tbl_ent {
310 struct dma_chan *chan;
314 * channel_table - percpu lookup table for memory-to-memory offload providers
316 static struct dma_chan_tbl_ent *channel_table[DMA_TX_TYPE_END];
318 static int __init dma_channel_table_init(void)
320 enum dma_transaction_type cap;
323 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
325 /* 'interrupt', 'private', and 'slave' are channel capabilities,
326 * but are not associated with an operation so they do not need
327 * an entry in the channel_table
329 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
330 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
331 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
333 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
334 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
335 if (!channel_table[cap]) {
342 pr_err("dmaengine: initialization failure\n");
343 for_each_dma_cap_mask(cap, dma_cap_mask_all)
344 if (channel_table[cap])
345 free_percpu(channel_table[cap]);
350 subsys_initcall(dma_channel_table_init);
353 * dma_find_channel - find a channel to carry out the operation
354 * @tx_type: transaction type
356 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
358 struct dma_chan *chan;
361 WARN_ONCE(dmaengine_ref_count == 0,
362 "client called %s without a reference", __func__);
365 chan = per_cpu_ptr(channel_table[tx_type], cpu)->chan;
370 EXPORT_SYMBOL(dma_find_channel);
373 * dma_issue_pending_all - flush all pending operations across all channels
375 void dma_issue_pending_all(void)
377 struct dma_device *device;
378 struct dma_chan *chan;
380 WARN_ONCE(dmaengine_ref_count == 0,
381 "client called %s without a reference", __func__);
384 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
385 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
387 list_for_each_entry(chan, &device->channels, device_node)
388 if (chan->client_count)
389 device->device_issue_pending(chan);
393 EXPORT_SYMBOL(dma_issue_pending_all);
396 * nth_chan - returns the nth channel of the given capability
397 * @cap: capability to match
398 * @n: nth channel desired
400 * Defaults to returning the channel with the desired capability and the
401 * lowest reference count when 'n' cannot be satisfied. Must be called
402 * under dma_list_mutex.
404 static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *ret = NULL;
409 struct dma_chan *min = NULL;
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
420 else if (chan->table_count < min->table_count)
442 * dma_channel_rebalance - redistribute the available channels
444 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
445 * operation type) in the SMP case, and operation isolation (avoid
446 * multi-tasking channels) in the non-SMP case. Must be called under
449 static void dma_channel_rebalance(void)
451 struct dma_chan *chan;
452 struct dma_device *device;
457 /* undo the last distribution */
458 for_each_dma_cap_mask(cap, dma_cap_mask_all)
459 for_each_possible_cpu(cpu)
460 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
462 list_for_each_entry(device, &dma_device_list, global_node) {
463 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
465 list_for_each_entry(chan, &device->channels, device_node)
466 chan->table_count = 0;
469 /* don't populate the channel_table if no clients are available */
470 if (!dmaengine_ref_count)
473 /* redistribute available channels */
475 for_each_dma_cap_mask(cap, dma_cap_mask_all)
476 for_each_online_cpu(cpu) {
477 if (num_possible_cpus() > 1)
478 chan = nth_chan(cap, n++);
480 chan = nth_chan(cap, -1);
482 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
486 static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev)
488 struct dma_chan *chan;
489 struct dma_chan *ret = NULL;
491 if (!__dma_device_satisfies_mask(dev, mask)) {
492 pr_debug("%s: wrong capabilities\n", __func__);
495 /* devices with multiple channels need special handling as we need to
496 * ensure that all channels are either private or public.
498 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
499 list_for_each_entry(chan, &dev->channels, device_node) {
500 /* some channels are already publicly allocated */
501 if (chan->client_count)
505 list_for_each_entry(chan, &dev->channels, device_node) {
506 if (chan->client_count) {
507 pr_debug("%s: %s busy\n",
508 __func__, dev_name(&chan->dev));
519 * dma_request_channel - try to allocate an exclusive channel
520 * @mask: capabilities that the channel must satisfy
521 * @fn: optional callback to disposition available channels
522 * @fn_param: opaque parameter to pass to dma_filter_fn
524 struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
526 struct dma_device *device, *_d;
527 struct dma_chan *chan = NULL;
528 enum dma_state_client ack;
532 mutex_lock(&dma_list_mutex);
533 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
534 chan = private_candidate(mask, device);
539 ack = fn(chan, fn_param);
543 if (ack == DMA_ACK) {
544 /* Found a suitable channel, try to grab, prep, and
545 * return it. We first set DMA_PRIVATE to disable
546 * balance_ref_count as this channel will not be
547 * published in the general-purpose allocator
549 dma_cap_set(DMA_PRIVATE, device->cap_mask);
550 err = dma_chan_get(chan);
552 if (err == -ENODEV) {
553 pr_debug("%s: %s module removed\n", __func__,
554 dev_name(&chan->dev));
555 list_del_rcu(&device->global_node);
557 pr_err("dmaengine: failed to get %s: (%d)\n",
558 dev_name(&chan->dev), err);
561 } else if (ack == DMA_DUP) {
562 pr_debug("%s: %s filter said DMA_DUP\n",
563 __func__, dev_name(&chan->dev));
564 } else if (ack == DMA_NAK) {
565 pr_debug("%s: %s filter said DMA_NAK\n",
566 __func__, dev_name(&chan->dev));
569 WARN_ONCE(1, "filter_fn: unknown response?\n");
572 mutex_unlock(&dma_list_mutex);
574 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
575 chan ? dev_name(&chan->dev) : NULL);
579 EXPORT_SYMBOL_GPL(__dma_request_channel);
581 void dma_release_channel(struct dma_chan *chan)
583 mutex_lock(&dma_list_mutex);
584 WARN_ONCE(chan->client_count != 1,
585 "chan reference count %d != 1\n", chan->client_count);
587 mutex_unlock(&dma_list_mutex);
589 EXPORT_SYMBOL_GPL(dma_release_channel);
592 * dma_chans_notify_available - broadcast available channels to the clients
594 static void dma_clients_notify_available(void)
596 struct dma_client *client;
598 mutex_lock(&dma_list_mutex);
600 list_for_each_entry(client, &dma_client_list, global_node)
601 dma_client_chan_alloc(client);
603 mutex_unlock(&dma_list_mutex);
607 * dma_async_client_register - register a &dma_client
608 * @client: ptr to a client structure with valid 'event_callback' and 'cap_mask'
610 void dma_async_client_register(struct dma_client *client)
612 struct dma_device *device, *_d;
613 struct dma_chan *chan;
616 /* validate client data */
617 BUG_ON(dma_has_cap(DMA_SLAVE, client->cap_mask) &&
620 mutex_lock(&dma_list_mutex);
621 dmaengine_ref_count++;
623 /* try to grab channels */
624 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
625 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
627 list_for_each_entry(chan, &device->channels, device_node) {
628 err = dma_chan_get(chan);
629 if (err == -ENODEV) {
630 /* module removed before we could use it */
631 list_del_rcu(&device->global_node);
634 pr_err("dmaengine: failed to get %s: (%d)\n",
635 dev_name(&chan->dev), err);
639 /* if this is the first reference and there were channels
640 * waiting we need to rebalance to get those channels
641 * incorporated into the channel table
643 if (dmaengine_ref_count == 1)
644 dma_channel_rebalance();
645 list_add_tail(&client->global_node, &dma_client_list);
646 mutex_unlock(&dma_list_mutex);
648 EXPORT_SYMBOL(dma_async_client_register);
651 * dma_async_client_unregister - unregister a client and free the &dma_client
652 * @client: &dma_client to free
654 * Force frees any allocated DMA channels, frees the &dma_client memory
656 void dma_async_client_unregister(struct dma_client *client)
658 struct dma_device *device;
659 struct dma_chan *chan;
664 mutex_lock(&dma_list_mutex);
665 dmaengine_ref_count--;
666 BUG_ON(dmaengine_ref_count < 0);
667 /* drop channel references */
668 list_for_each_entry(device, &dma_device_list, global_node) {
669 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
671 list_for_each_entry(chan, &device->channels, device_node)
675 list_del(&client->global_node);
676 mutex_unlock(&dma_list_mutex);
678 EXPORT_SYMBOL(dma_async_client_unregister);
681 * dma_async_client_chan_request - send all available channels to the
682 * client that satisfy the capability mask
683 * @client - requester
685 void dma_async_client_chan_request(struct dma_client *client)
687 mutex_lock(&dma_list_mutex);
688 dma_client_chan_alloc(client);
689 mutex_unlock(&dma_list_mutex);
691 EXPORT_SYMBOL(dma_async_client_chan_request);
694 * dma_async_device_register - registers DMA devices found
695 * @device: &dma_device
697 int dma_async_device_register(struct dma_device *device)
701 struct dma_chan* chan;
706 /* validate device routines */
707 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
708 !device->device_prep_dma_memcpy);
709 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
710 !device->device_prep_dma_xor);
711 BUG_ON(dma_has_cap(DMA_ZERO_SUM, device->cap_mask) &&
712 !device->device_prep_dma_zero_sum);
713 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
714 !device->device_prep_dma_memset);
715 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
716 !device->device_prep_dma_interrupt);
717 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
718 !device->device_prep_slave_sg);
719 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
720 !device->device_terminate_all);
722 BUG_ON(!device->device_alloc_chan_resources);
723 BUG_ON(!device->device_free_chan_resources);
724 BUG_ON(!device->device_is_tx_complete);
725 BUG_ON(!device->device_issue_pending);
726 BUG_ON(!device->dev);
728 init_completion(&device->done);
729 kref_init(&device->refcount);
731 mutex_lock(&dma_list_mutex);
732 device->dev_id = id++;
733 mutex_unlock(&dma_list_mutex);
735 /* represent channels in sysfs. Probably want devs too */
736 list_for_each_entry(chan, &device->channels, device_node) {
737 chan->local = alloc_percpu(typeof(*chan->local));
738 if (chan->local == NULL)
741 chan->chan_id = chancnt++;
742 chan->dev.class = &dma_devclass;
743 chan->dev.parent = device->dev;
744 dev_set_name(&chan->dev, "dma%dchan%d",
745 device->dev_id, chan->chan_id);
747 rc = device_register(&chan->dev);
750 free_percpu(chan->local);
755 /* One for the channel, one of the class device */
756 kref_get(&device->refcount);
757 kref_get(&device->refcount);
758 kref_init(&chan->refcount);
759 chan->client_count = 0;
761 INIT_RCU_HEAD(&chan->rcu);
763 device->chancnt = chancnt;
765 mutex_lock(&dma_list_mutex);
766 /* take references on public channels */
767 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
768 list_for_each_entry(chan, &device->channels, device_node) {
769 /* if clients are already waiting for channels we need
770 * to take references on their behalf
772 if (dma_chan_get(chan) == -ENODEV) {
773 /* note we can only get here for the first
774 * channel as the remaining channels are
775 * guaranteed to get a reference
778 mutex_unlock(&dma_list_mutex);
782 list_add_tail_rcu(&device->global_node, &dma_device_list);
783 dma_channel_rebalance();
784 mutex_unlock(&dma_list_mutex);
786 dma_clients_notify_available();
791 list_for_each_entry(chan, &device->channels, device_node) {
792 if (chan->local == NULL)
794 kref_put(&device->refcount, dma_async_device_cleanup);
795 device_unregister(&chan->dev);
797 free_percpu(chan->local);
801 EXPORT_SYMBOL(dma_async_device_register);
804 * dma_async_device_cleanup - function called when all references are released
805 * @kref: kernel reference object
807 static void dma_async_device_cleanup(struct kref *kref)
809 struct dma_device *device;
811 device = container_of(kref, struct dma_device, refcount);
812 complete(&device->done);
816 * dma_async_device_unregister - unregister a DMA device
817 * @device: &dma_device
819 void dma_async_device_unregister(struct dma_device *device)
821 struct dma_chan *chan;
823 mutex_lock(&dma_list_mutex);
824 list_del_rcu(&device->global_node);
825 dma_channel_rebalance();
826 mutex_unlock(&dma_list_mutex);
828 list_for_each_entry(chan, &device->channels, device_node) {
829 WARN_ONCE(chan->client_count,
830 "%s called while %d clients hold a reference\n",
831 __func__, chan->client_count);
832 device_unregister(&chan->dev);
833 dma_chan_release(chan);
836 kref_put(&device->refcount, dma_async_device_cleanup);
837 wait_for_completion(&device->done);
839 EXPORT_SYMBOL(dma_async_device_unregister);
842 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
843 * @chan: DMA channel to offload copy to
844 * @dest: destination address (virtual)
845 * @src: source address (virtual)
848 * Both @dest and @src must be mappable to a bus address according to the
849 * DMA mapping API rules for streaming mappings.
850 * Both @dest and @src must stay memory resident (kernel memory or locked
854 dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
855 void *src, size_t len)
857 struct dma_device *dev = chan->device;
858 struct dma_async_tx_descriptor *tx;
859 dma_addr_t dma_dest, dma_src;
863 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
864 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
865 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
869 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
870 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
875 cookie = tx->tx_submit(tx);
878 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
879 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
884 EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
887 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
888 * @chan: DMA channel to offload copy to
889 * @page: destination page
890 * @offset: offset in page to copy to
891 * @kdata: source address (virtual)
894 * Both @page/@offset and @kdata must be mappable to a bus address according
895 * to the DMA mapping API rules for streaming mappings.
896 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
897 * locked user space pages)
900 dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
901 unsigned int offset, void *kdata, size_t len)
903 struct dma_device *dev = chan->device;
904 struct dma_async_tx_descriptor *tx;
905 dma_addr_t dma_dest, dma_src;
909 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
910 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
911 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
915 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
916 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
921 cookie = tx->tx_submit(tx);
924 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
925 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
930 EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
933 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
934 * @chan: DMA channel to offload copy to
935 * @dest_pg: destination page
936 * @dest_off: offset in page to copy to
937 * @src_pg: source page
938 * @src_off: offset in page to copy from
941 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
942 * address according to the DMA mapping API rules for streaming mappings.
943 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
944 * (kernel memory or locked user space pages).
947 dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
948 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
951 struct dma_device *dev = chan->device;
952 struct dma_async_tx_descriptor *tx;
953 dma_addr_t dma_dest, dma_src;
957 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
958 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
960 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len,
964 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
965 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
970 cookie = tx->tx_submit(tx);
973 per_cpu_ptr(chan->local, cpu)->bytes_transferred += len;
974 per_cpu_ptr(chan->local, cpu)->memcpy_count++;
979 EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
981 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
982 struct dma_chan *chan)
985 spin_lock_init(&tx->lock);
987 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
989 /* dma_wait_for_async_tx - spin wait for a transaction to complete
990 * @tx: in-flight transaction to wait on
992 * This routine assumes that tx was obtained from a call to async_memcpy,
993 * async_xor, async_memset, etc which ensures that tx is "in-flight" (prepped
994 * and submitted). Walking the parent chain is only meant to cover for DMA
995 * drivers that do not implement the DMA_INTERRUPT capability and may race with
996 * the driver's descriptor cleanup routine.
999 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1001 enum dma_status status;
1002 struct dma_async_tx_descriptor *iter;
1003 struct dma_async_tx_descriptor *parent;
1008 WARN_ONCE(tx->parent, "%s: speculatively walking dependency chain for"
1009 " %s\n", __func__, dev_name(&tx->chan->dev));
1011 /* poll through the dependency chain, return when tx is complete */
1015 /* find the root of the unsubmitted dependency chain */
1017 parent = iter->parent;
1024 /* there is a small window for ->parent == NULL and
1025 * ->cookie == -EBUSY
1027 while (iter->cookie == -EBUSY)
1030 status = dma_sync_wait(iter->chan, iter->cookie);
1031 } while (status == DMA_IN_PROGRESS || (iter != tx));
1035 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1037 /* dma_run_dependencies - helper routine for dma drivers to process
1038 * (start) dependent operations on their target channel
1039 * @tx: transaction with dependencies
1041 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1043 struct dma_async_tx_descriptor *dep = tx->next;
1044 struct dma_async_tx_descriptor *dep_next;
1045 struct dma_chan *chan;
1052 /* keep submitting up until a channel switch is detected
1053 * in that case we will be called again as a result of
1054 * processing the interrupt from async_tx_channel_switch
1056 for (; dep; dep = dep_next) {
1057 spin_lock_bh(&dep->lock);
1059 dep_next = dep->next;
1060 if (dep_next && dep_next->chan == chan)
1061 dep->next = NULL; /* ->next will be submitted */
1063 dep_next = NULL; /* submit current dep and terminate */
1064 spin_unlock_bh(&dep->lock);
1066 dep->tx_submit(dep);
1069 chan->device->device_issue_pending(chan);
1071 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1073 static int __init dma_bus_init(void)
1075 mutex_init(&dma_list_mutex);
1076 return class_register(&dma_devclass);
1078 subsys_initcall(dma_bus_init);