2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2015 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * The full GNU General Public License is included in this distribution in
15 * the file called "COPYING".
20 * This driver supports an Intel I/OAT DMA engine, which does asynchronous
24 #include <linux/init.h>
25 #include <linux/module.h>
26 #include <linux/slab.h>
27 #include <linux/pci.h>
28 #include <linux/interrupt.h>
29 #include <linux/dmaengine.h>
30 #include <linux/delay.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/workqueue.h>
33 #include <linux/prefetch.h>
35 #include "registers.h"
38 #include "../dmaengine.h"
40 int ioat_pending_level = 4;
41 module_param(ioat_pending_level, int, 0644);
42 MODULE_PARM_DESC(ioat_pending_level,
43 "high-water mark for pushing ioat descriptors (default: 4)");
44 int ioat_ring_alloc_order = 8;
45 module_param(ioat_ring_alloc_order, int, 0644);
46 MODULE_PARM_DESC(ioat_ring_alloc_order,
47 "ioat+: allocate 2^n descriptors per channel (default: 8 max: 16)");
48 static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
49 module_param(ioat_ring_max_alloc_order, int, 0644);
50 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
51 "ioat+: upper limit for ring size (default: 16)");
52 static char ioat_interrupt_style[32] = "msix";
53 module_param_string(ioat_interrupt_style, ioat_interrupt_style,
54 sizeof(ioat_interrupt_style), 0644);
55 MODULE_PARM_DESC(ioat_interrupt_style,
56 "set ioat interrupt style: msix (default), msi, intx");
59 * ioat_dma_do_interrupt - handler used for single vector interrupt mode
61 * @data: interrupt data
63 static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
65 struct ioatdma_device *instance = data;
66 struct ioatdma_chan *ioat_chan;
67 unsigned long attnstatus;
71 intrctrl = readb(instance->reg_base + IOAT_INTRCTRL_OFFSET);
73 if (!(intrctrl & IOAT_INTRCTRL_MASTER_INT_EN))
76 if (!(intrctrl & IOAT_INTRCTRL_INT_STATUS)) {
77 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
81 attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
82 for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
83 ioat_chan = ioat_chan_by_index(instance, bit);
84 if (test_bit(IOAT_RUN, &ioat_chan->state))
85 tasklet_schedule(&ioat_chan->cleanup_task);
88 writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
93 * ioat_dma_do_interrupt_msix - handler used for vector-per-channel interrupt mode
95 * @data: interrupt data
97 static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
99 struct ioatdma_chan *ioat_chan = data;
101 if (test_bit(IOAT_RUN, &ioat_chan->state))
102 tasklet_schedule(&ioat_chan->cleanup_task);
107 /* common channel initialization */
109 ioat_init_channel(struct ioatdma_device *ioat_dma,
110 struct ioatdma_chan *ioat_chan, int idx)
112 struct dma_device *dma = &ioat_dma->dma_dev;
113 struct dma_chan *c = &ioat_chan->dma_chan;
114 unsigned long data = (unsigned long) c;
116 ioat_chan->ioat_dma = ioat_dma;
117 ioat_chan->reg_base = ioat_dma->reg_base + (0x80 * (idx + 1));
118 spin_lock_init(&ioat_chan->cleanup_lock);
119 ioat_chan->dma_chan.device = dma;
120 dma_cookie_init(&ioat_chan->dma_chan);
121 list_add_tail(&ioat_chan->dma_chan.device_node, &dma->channels);
122 ioat_dma->idx[idx] = ioat_chan;
123 init_timer(&ioat_chan->timer);
124 ioat_chan->timer.function = ioat_dma->timer_fn;
125 ioat_chan->timer.data = data;
126 tasklet_init(&ioat_chan->cleanup_task, ioat_dma->cleanup_fn, data);
129 void ioat_stop(struct ioatdma_chan *ioat_chan)
131 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
132 struct pci_dev *pdev = ioat_dma->pdev;
133 int chan_id = chan_num(ioat_chan);
134 struct msix_entry *msix;
136 /* 1/ stop irq from firing tasklets
137 * 2/ stop the tasklet from re-arming irqs
139 clear_bit(IOAT_RUN, &ioat_chan->state);
141 /* flush inflight interrupts */
142 switch (ioat_dma->irq_mode) {
144 msix = &ioat_dma->msix_entries[chan_id];
145 synchronize_irq(msix->vector);
149 synchronize_irq(pdev->irq);
155 /* flush inflight timers */
156 del_timer_sync(&ioat_chan->timer);
158 /* flush inflight tasklet runs */
159 tasklet_kill(&ioat_chan->cleanup_task);
161 /* final cleanup now that everything is quiesced and can't re-arm */
162 ioat_dma->cleanup_fn((unsigned long)&ioat_chan->dma_chan);
165 dma_addr_t ioat_get_current_completion(struct ioatdma_chan *ioat_chan)
167 dma_addr_t phys_complete;
170 completion = *ioat_chan->completion;
171 phys_complete = ioat_chansts_to_addr(completion);
173 dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__,
174 (unsigned long long) phys_complete);
176 if (is_ioat_halted(completion)) {
177 u32 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
179 dev_err(to_dev(ioat_chan), "Channel halted, chanerr = %x\n",
182 /* TODO do something to salvage the situation */
185 return phys_complete;
188 bool ioat_cleanup_preamble(struct ioatdma_chan *ioat_chan,
189 dma_addr_t *phys_complete)
191 *phys_complete = ioat_get_current_completion(ioat_chan);
192 if (*phys_complete == ioat_chan->last_completion)
194 clear_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
195 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
201 ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie,
202 struct dma_tx_state *txstate)
204 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
205 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
208 ret = dma_cookie_status(c, cookie, txstate);
209 if (ret == DMA_COMPLETE)
212 ioat_dma->cleanup_fn((unsigned long) c);
214 return dma_cookie_status(c, cookie, txstate);
218 * Perform a IOAT transaction to verify the HW works.
220 #define IOAT_TEST_SIZE 2000
222 static void ioat_dma_test_callback(void *dma_async_param)
224 struct completion *cmp = dma_async_param;
230 * ioat_dma_self_test - Perform a IOAT transaction to verify the HW works.
231 * @ioat_dma: dma device to be tested
233 int ioat_dma_self_test(struct ioatdma_device *ioat_dma)
238 struct dma_device *dma = &ioat_dma->dma_dev;
239 struct device *dev = &ioat_dma->pdev->dev;
240 struct dma_chan *dma_chan;
241 struct dma_async_tx_descriptor *tx;
242 dma_addr_t dma_dest, dma_src;
245 struct completion cmp;
249 src = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
252 dest = kzalloc(sizeof(u8) * IOAT_TEST_SIZE, GFP_KERNEL);
258 /* Fill in src buffer */
259 for (i = 0; i < IOAT_TEST_SIZE; i++)
262 /* Start copy, using first DMA channel */
263 dma_chan = container_of(dma->channels.next, struct dma_chan,
265 if (dma->device_alloc_chan_resources(dma_chan) < 1) {
266 dev_err(dev, "selftest cannot allocate chan resource\n");
271 dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
272 if (dma_mapping_error(dev, dma_src)) {
273 dev_err(dev, "mapping src buffer failed\n");
276 dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
277 if (dma_mapping_error(dev, dma_dest)) {
278 dev_err(dev, "mapping dest buffer failed\n");
281 flags = DMA_PREP_INTERRUPT;
282 tx = ioat_dma->dma_dev.device_prep_dma_memcpy(dma_chan, dma_dest,
283 dma_src, IOAT_TEST_SIZE,
286 dev_err(dev, "Self-test prep failed, disabling\n");
292 init_completion(&cmp);
293 tx->callback = ioat_dma_test_callback;
294 tx->callback_param = &cmp;
295 cookie = tx->tx_submit(tx);
297 dev_err(dev, "Self-test setup failed, disabling\n");
301 dma->device_issue_pending(dma_chan);
303 tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000));
306 dma->device_tx_status(dma_chan, cookie, NULL)
308 dev_err(dev, "Self-test copy timed out, disabling\n");
312 if (memcmp(src, dest, IOAT_TEST_SIZE)) {
313 dev_err(dev, "Self-test copy failed compare, disabling\n");
319 dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
321 dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
323 dma->device_free_chan_resources(dma_chan);
331 * ioat_dma_setup_interrupts - setup interrupt handler
332 * @ioat_dma: ioat dma device
334 int ioat_dma_setup_interrupts(struct ioatdma_device *ioat_dma)
336 struct ioatdma_chan *ioat_chan;
337 struct pci_dev *pdev = ioat_dma->pdev;
338 struct device *dev = &pdev->dev;
339 struct msix_entry *msix;
344 if (!strcmp(ioat_interrupt_style, "msix"))
346 if (!strcmp(ioat_interrupt_style, "msi"))
348 if (!strcmp(ioat_interrupt_style, "intx"))
350 dev_err(dev, "invalid ioat_interrupt_style %s\n", ioat_interrupt_style);
354 /* The number of MSI-X vectors should equal the number of channels */
355 msixcnt = ioat_dma->dma_dev.chancnt;
356 for (i = 0; i < msixcnt; i++)
357 ioat_dma->msix_entries[i].entry = i;
359 err = pci_enable_msix_exact(pdev, ioat_dma->msix_entries, msixcnt);
363 for (i = 0; i < msixcnt; i++) {
364 msix = &ioat_dma->msix_entries[i];
365 ioat_chan = ioat_chan_by_index(ioat_dma, i);
366 err = devm_request_irq(dev, msix->vector,
367 ioat_dma_do_interrupt_msix, 0,
368 "ioat-msix", ioat_chan);
370 for (j = 0; j < i; j++) {
371 msix = &ioat_dma->msix_entries[j];
372 ioat_chan = ioat_chan_by_index(ioat_dma, j);
373 devm_free_irq(dev, msix->vector, ioat_chan);
378 intrctrl |= IOAT_INTRCTRL_MSIX_VECTOR_CONTROL;
379 ioat_dma->irq_mode = IOAT_MSIX;
383 err = pci_enable_msi(pdev);
387 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt, 0,
388 "ioat-msi", ioat_dma);
390 pci_disable_msi(pdev);
393 ioat_dma->irq_mode = IOAT_MSI;
397 err = devm_request_irq(dev, pdev->irq, ioat_dma_do_interrupt,
398 IRQF_SHARED, "ioat-intx", ioat_dma);
402 ioat_dma->irq_mode = IOAT_INTX;
404 if (ioat_dma->intr_quirk)
405 ioat_dma->intr_quirk(ioat_dma);
406 intrctrl |= IOAT_INTRCTRL_MASTER_INT_EN;
407 writeb(intrctrl, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
411 /* Disable all interrupt generation */
412 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
413 ioat_dma->irq_mode = IOAT_NOIRQ;
414 dev_err(dev, "no usable interrupts\n");
417 EXPORT_SYMBOL(ioat_dma_setup_interrupts);
419 static void ioat_disable_interrupts(struct ioatdma_device *ioat_dma)
421 /* Disable all interrupt generation */
422 writeb(0, ioat_dma->reg_base + IOAT_INTRCTRL_OFFSET);
425 int ioat_probe(struct ioatdma_device *ioat_dma)
428 struct dma_device *dma = &ioat_dma->dma_dev;
429 struct pci_dev *pdev = ioat_dma->pdev;
430 struct device *dev = &pdev->dev;
432 /* DMA coherent memory pool for DMA descriptor allocations */
433 ioat_dma->dma_pool = pci_pool_create("dma_desc_pool", pdev,
434 sizeof(struct ioat_dma_descriptor),
436 if (!ioat_dma->dma_pool) {
441 ioat_dma->completion_pool = pci_pool_create("completion_pool", pdev,
446 if (!ioat_dma->completion_pool) {
448 goto err_completion_pool;
451 ioat_dma->enumerate_channels(ioat_dma);
453 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
454 dma->dev = &pdev->dev;
457 dev_err(dev, "channel enumeration error\n");
458 goto err_setup_interrupts;
461 err = ioat_dma_setup_interrupts(ioat_dma);
463 goto err_setup_interrupts;
465 err = ioat_dma->self_test(ioat_dma);
472 ioat_disable_interrupts(ioat_dma);
473 err_setup_interrupts:
474 pci_pool_destroy(ioat_dma->completion_pool);
476 pci_pool_destroy(ioat_dma->dma_pool);
481 int ioat_register(struct ioatdma_device *ioat_dma)
483 int err = dma_async_device_register(&ioat_dma->dma_dev);
486 ioat_disable_interrupts(ioat_dma);
487 pci_pool_destroy(ioat_dma->completion_pool);
488 pci_pool_destroy(ioat_dma->dma_pool);
494 static ssize_t cap_show(struct dma_chan *c, char *page)
496 struct dma_device *dma = c->device;
498 return sprintf(page, "copy%s%s%s%s%s\n",
499 dma_has_cap(DMA_PQ, dma->cap_mask) ? " pq" : "",
500 dma_has_cap(DMA_PQ_VAL, dma->cap_mask) ? " pq_val" : "",
501 dma_has_cap(DMA_XOR, dma->cap_mask) ? " xor" : "",
502 dma_has_cap(DMA_XOR_VAL, dma->cap_mask) ? " xor_val" : "",
503 dma_has_cap(DMA_INTERRUPT, dma->cap_mask) ? " intr" : "");
506 struct ioat_sysfs_entry ioat_cap_attr = __ATTR_RO(cap);
508 static ssize_t version_show(struct dma_chan *c, char *page)
510 struct dma_device *dma = c->device;
511 struct ioatdma_device *ioat_dma = to_ioatdma_device(dma);
513 return sprintf(page, "%d.%d\n",
514 ioat_dma->version >> 4, ioat_dma->version & 0xf);
516 struct ioat_sysfs_entry ioat_version_attr = __ATTR_RO(version);
519 ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
521 struct ioat_sysfs_entry *entry;
522 struct ioatdma_chan *ioat_chan;
524 entry = container_of(attr, struct ioat_sysfs_entry, attr);
525 ioat_chan = container_of(kobj, struct ioatdma_chan, kobj);
529 return entry->show(&ioat_chan->dma_chan, page);
532 const struct sysfs_ops ioat_sysfs_ops = {
533 .show = ioat_attr_show,
536 void ioat_kobject_add(struct ioatdma_device *ioat_dma, struct kobj_type *type)
538 struct dma_device *dma = &ioat_dma->dma_dev;
541 list_for_each_entry(c, &dma->channels, device_node) {
542 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
543 struct kobject *parent = &c->dev->device.kobj;
546 err = kobject_init_and_add(&ioat_chan->kobj, type,
547 parent, "quickdata");
549 dev_warn(to_dev(ioat_chan),
550 "sysfs init error (%d), continuing...\n", err);
551 kobject_put(&ioat_chan->kobj);
552 set_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state);
557 void ioat_kobject_del(struct ioatdma_device *ioat_dma)
559 struct dma_device *dma = &ioat_dma->dma_dev;
562 list_for_each_entry(c, &dma->channels, device_node) {
563 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
565 if (!test_bit(IOAT_KOBJ_INIT_FAIL, &ioat_chan->state)) {
566 kobject_del(&ioat_chan->kobj);
567 kobject_put(&ioat_chan->kobj);
572 void ioat_dma_remove(struct ioatdma_device *ioat_dma)
574 struct dma_device *dma = &ioat_dma->dma_dev;
576 ioat_disable_interrupts(ioat_dma);
578 ioat_kobject_del(ioat_dma);
580 dma_async_device_unregister(dma);
582 pci_pool_destroy(ioat_dma->dma_pool);
583 pci_pool_destroy(ioat_dma->completion_pool);
585 INIT_LIST_HEAD(&dma->channels);
588 void __ioat_issue_pending(struct ioatdma_chan *ioat_chan)
590 ioat_chan->dmacount += ioat_ring_pending(ioat_chan);
591 ioat_chan->issued = ioat_chan->head;
592 writew(ioat_chan->dmacount,
593 ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
594 dev_dbg(to_dev(ioat_chan),
595 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
596 __func__, ioat_chan->head, ioat_chan->tail,
597 ioat_chan->issued, ioat_chan->dmacount);
600 void ioat_issue_pending(struct dma_chan *c)
602 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
604 if (ioat_ring_pending(ioat_chan)) {
605 spin_lock_bh(&ioat_chan->prep_lock);
606 __ioat_issue_pending(ioat_chan);
607 spin_unlock_bh(&ioat_chan->prep_lock);
612 * ioat_update_pending - log pending descriptors
613 * @ioat: ioat+ channel
615 * Check if the number of unsubmitted descriptors has exceeded the
616 * watermark. Called with prep_lock held
618 static void ioat_update_pending(struct ioatdma_chan *ioat_chan)
620 if (ioat_ring_pending(ioat_chan) > ioat_pending_level)
621 __ioat_issue_pending(ioat_chan);
624 static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
626 struct ioat_ring_ent *desc;
627 struct ioat_dma_descriptor *hw;
629 if (ioat_ring_space(ioat_chan) < 1) {
630 dev_err(to_dev(ioat_chan),
631 "Unable to start null desc - ring full\n");
635 dev_dbg(to_dev(ioat_chan),
636 "%s: head: %#x tail: %#x issued: %#x\n",
637 __func__, ioat_chan->head, ioat_chan->tail, ioat_chan->issued);
638 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head);
643 hw->ctl_f.int_en = 1;
644 hw->ctl_f.compl_write = 1;
645 /* set size to non-zero value (channel returns error when size is 0) */
646 hw->size = NULL_DESC_BUFFER_SIZE;
649 async_tx_ack(&desc->txd);
650 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
651 dump_desc_dbg(ioat_chan, desc);
652 /* make sure descriptors are written before we submit */
654 ioat_chan->head += 1;
655 __ioat_issue_pending(ioat_chan);
658 static void ioat_start_null_desc(struct ioatdma_chan *ioat_chan)
660 spin_lock_bh(&ioat_chan->prep_lock);
661 __ioat_start_null_desc(ioat_chan);
662 spin_unlock_bh(&ioat_chan->prep_lock);
665 void __ioat_restart_chan(struct ioatdma_chan *ioat_chan)
667 /* set the tail to be re-issued */
668 ioat_chan->issued = ioat_chan->tail;
669 ioat_chan->dmacount = 0;
670 set_bit(IOAT_COMPLETION_PENDING, &ioat_chan->state);
671 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
673 dev_dbg(to_dev(ioat_chan),
674 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
675 __func__, ioat_chan->head, ioat_chan->tail,
676 ioat_chan->issued, ioat_chan->dmacount);
678 if (ioat_ring_pending(ioat_chan)) {
679 struct ioat_ring_ent *desc;
681 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail);
682 ioat_set_chainaddr(ioat_chan, desc->txd.phys);
683 __ioat_issue_pending(ioat_chan);
685 __ioat_start_null_desc(ioat_chan);
688 int ioat_quiesce(struct ioatdma_chan *ioat_chan, unsigned long tmo)
690 unsigned long end = jiffies + tmo;
694 status = ioat_chansts(ioat_chan);
695 if (is_ioat_active(status) || is_ioat_idle(status))
696 ioat_suspend(ioat_chan);
697 while (is_ioat_active(status) || is_ioat_idle(status)) {
698 if (tmo && time_after(jiffies, end)) {
702 status = ioat_chansts(ioat_chan);
709 int ioat_reset_sync(struct ioatdma_chan *ioat_chan, unsigned long tmo)
711 unsigned long end = jiffies + tmo;
714 ioat_reset(ioat_chan);
715 while (ioat_reset_pending(ioat_chan)) {
716 if (end && time_after(jiffies, end)) {
727 * ioat_enumerate_channels - find and initialize the device's channels
728 * @ioat_dma: the ioat dma device to be enumerated
730 int ioat_enumerate_channels(struct ioatdma_device *ioat_dma)
732 struct ioatdma_chan *ioat_chan;
733 struct device *dev = &ioat_dma->pdev->dev;
734 struct dma_device *dma = &ioat_dma->dma_dev;
738 INIT_LIST_HEAD(&dma->channels);
739 dma->chancnt = readb(ioat_dma->reg_base + IOAT_CHANCNT_OFFSET);
740 dma->chancnt &= 0x1f; /* bits [4:0] valid */
741 if (dma->chancnt > ARRAY_SIZE(ioat_dma->idx)) {
742 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
743 dma->chancnt, ARRAY_SIZE(ioat_dma->idx));
744 dma->chancnt = ARRAY_SIZE(ioat_dma->idx);
746 xfercap_log = readb(ioat_dma->reg_base + IOAT_XFERCAP_OFFSET);
747 xfercap_log &= 0x1f; /* bits [4:0] valid */
748 if (xfercap_log == 0)
750 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
752 for (i = 0; i < dma->chancnt; i++) {
753 ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL);
757 ioat_init_channel(ioat_dma, ioat_chan, i);
758 ioat_chan->xfercap_log = xfercap_log;
759 spin_lock_init(&ioat_chan->prep_lock);
760 if (ioat_dma->reset_hw(ioat_chan)) {
769 static dma_cookie_t ioat_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
771 struct dma_chan *c = tx->chan;
772 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
775 cookie = dma_cookie_assign(tx);
776 dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie);
778 if (!test_and_set_bit(IOAT_CHAN_ACTIVE, &ioat_chan->state))
779 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
781 /* make descriptor updates visible before advancing ioat->head,
782 * this is purposefully not smp_wmb() since we are also
783 * publishing the descriptor updates to a dma device
787 ioat_chan->head += ioat_chan->produce;
789 ioat_update_pending(ioat_chan);
790 spin_unlock_bh(&ioat_chan->prep_lock);
795 static struct ioat_ring_ent *
796 ioat_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
798 struct ioat_dma_descriptor *hw;
799 struct ioat_ring_ent *desc;
800 struct ioatdma_device *ioat_dma;
803 ioat_dma = to_ioatdma_device(chan->device);
804 hw = pci_pool_alloc(ioat_dma->dma_pool, flags, &phys);
807 memset(hw, 0, sizeof(*hw));
809 desc = kmem_cache_zalloc(ioat_cache, flags);
811 pci_pool_free(ioat_dma->dma_pool, hw, phys);
815 dma_async_tx_descriptor_init(&desc->txd, chan);
816 desc->txd.tx_submit = ioat_tx_submit_unlock;
818 desc->txd.phys = phys;
823 ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
825 struct ioatdma_device *ioat_dma;
827 ioat_dma = to_ioatdma_device(chan->device);
828 pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys);
829 kmem_cache_free(ioat_cache, desc);
832 static struct ioat_ring_ent **
833 ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
835 struct ioat_ring_ent **ring;
836 int descs = 1 << order;
839 if (order > ioat_get_max_alloc_order())
842 /* allocate the array to hold the software ring */
843 ring = kcalloc(descs, sizeof(*ring), flags);
846 for (i = 0; i < descs; i++) {
847 ring[i] = ioat_alloc_ring_ent(c, flags);
850 ioat_free_ring_ent(ring[i], c);
854 set_desc_id(ring[i], i);
858 for (i = 0; i < descs-1; i++) {
859 struct ioat_ring_ent *next = ring[i+1];
860 struct ioat_dma_descriptor *hw = ring[i]->hw;
862 hw->next = next->txd.phys;
864 ring[i]->hw->next = ring[0]->txd.phys;
870 * ioat_free_chan_resources - release all the descriptors
871 * @chan: the channel to be cleaned
873 void ioat_free_chan_resources(struct dma_chan *c)
875 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
876 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
877 struct ioat_ring_ent *desc;
878 const int total_descs = 1 << ioat_chan->alloc_order;
882 /* Before freeing channel resources first check
883 * if they have been previously allocated for this channel.
885 if (!ioat_chan->ring)
888 ioat_stop(ioat_chan);
889 ioat_dma->reset_hw(ioat_chan);
891 spin_lock_bh(&ioat_chan->cleanup_lock);
892 spin_lock_bh(&ioat_chan->prep_lock);
893 descs = ioat_ring_space(ioat_chan);
894 dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs);
895 for (i = 0; i < descs; i++) {
896 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head + i);
897 ioat_free_ring_ent(desc, c);
900 if (descs < total_descs)
901 dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n",
902 total_descs - descs);
904 for (i = 0; i < total_descs - descs; i++) {
905 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail + i);
906 dump_desc_dbg(ioat_chan, desc);
907 ioat_free_ring_ent(desc, c);
910 kfree(ioat_chan->ring);
911 ioat_chan->ring = NULL;
912 ioat_chan->alloc_order = 0;
913 pci_pool_free(ioat_dma->completion_pool, ioat_chan->completion,
914 ioat_chan->completion_dma);
915 spin_unlock_bh(&ioat_chan->prep_lock);
916 spin_unlock_bh(&ioat_chan->cleanup_lock);
918 ioat_chan->last_completion = 0;
919 ioat_chan->completion_dma = 0;
920 ioat_chan->dmacount = 0;
923 /* ioat_alloc_chan_resources - allocate/initialize ioat descriptor ring
924 * @chan: channel to be initialized
926 int ioat_alloc_chan_resources(struct dma_chan *c)
928 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
929 struct ioat_ring_ent **ring;
935 /* have we already been set up? */
937 return 1 << ioat_chan->alloc_order;
939 /* Setup register to interrupt and write completion status on error */
940 writew(IOAT_CHANCTRL_RUN, ioat_chan->reg_base + IOAT_CHANCTRL_OFFSET);
942 /* allocate a completion writeback area */
943 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
944 ioat_chan->completion =
945 pci_pool_alloc(ioat_chan->ioat_dma->completion_pool,
946 GFP_KERNEL, &ioat_chan->completion_dma);
947 if (!ioat_chan->completion)
950 memset(ioat_chan->completion, 0, sizeof(*ioat_chan->completion));
951 writel(((u64)ioat_chan->completion_dma) & 0x00000000FFFFFFFF,
952 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
953 writel(((u64)ioat_chan->completion_dma) >> 32,
954 ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
956 order = ioat_get_alloc_order();
957 ring = ioat_alloc_ring(c, order, GFP_KERNEL);
961 spin_lock_bh(&ioat_chan->cleanup_lock);
962 spin_lock_bh(&ioat_chan->prep_lock);
963 ioat_chan->ring = ring;
965 ioat_chan->issued = 0;
967 ioat_chan->alloc_order = order;
968 set_bit(IOAT_RUN, &ioat_chan->state);
969 spin_unlock_bh(&ioat_chan->prep_lock);
970 spin_unlock_bh(&ioat_chan->cleanup_lock);
972 ioat_start_null_desc(ioat_chan);
974 /* check that we got off the ground */
977 status = ioat_chansts(ioat_chan);
978 } while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
980 if (is_ioat_active(status) || is_ioat_idle(status))
981 return 1 << ioat_chan->alloc_order;
983 chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
985 dev_WARN(to_dev(ioat_chan),
986 "failed to start channel chanerr: %#x\n", chanerr);
987 ioat_free_chan_resources(c);
991 bool reshape_ring(struct ioatdma_chan *ioat_chan, int order)
993 /* reshape differs from normal ring allocation in that we want
994 * to allocate a new software ring while only
995 * extending/truncating the hardware ring
997 struct dma_chan *c = &ioat_chan->dma_chan;
998 const u32 curr_size = ioat_ring_size(ioat_chan);
999 const u16 active = ioat_ring_active(ioat_chan);
1000 const u32 new_size = 1 << order;
1001 struct ioat_ring_ent **ring;
1004 if (order > ioat_get_max_alloc_order())
1007 /* double check that we have at least 1 free descriptor */
1008 if (active == curr_size)
1011 /* when shrinking, verify that we can hold the current active
1012 * set in the new ring
1014 if (active >= new_size)
1017 /* allocate the array to hold the software ring */
1018 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
1022 /* allocate/trim descriptors as needed */
1023 if (new_size > curr_size) {
1024 /* copy current descriptors to the new ring */
1025 for (i = 0; i < curr_size; i++) {
1026 u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
1027 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
1029 ring[new_idx] = ioat_chan->ring[curr_idx];
1030 set_desc_id(ring[new_idx], new_idx);
1033 /* add new descriptors to the ring */
1034 for (i = curr_size; i < new_size; i++) {
1035 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
1037 ring[new_idx] = ioat_alloc_ring_ent(c, GFP_NOWAIT);
1038 if (!ring[new_idx]) {
1040 u16 new_idx = (ioat_chan->tail+i) &
1043 ioat_free_ring_ent(ring[new_idx], c);
1048 set_desc_id(ring[new_idx], new_idx);
1051 /* hw link new descriptors */
1052 for (i = curr_size-1; i < new_size; i++) {
1053 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
1054 struct ioat_ring_ent *next =
1055 ring[(new_idx+1) & (new_size-1)];
1056 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
1058 hw->next = next->txd.phys;
1061 struct ioat_dma_descriptor *hw;
1062 struct ioat_ring_ent *next;
1064 /* copy current descriptors to the new ring, dropping the
1065 * removed descriptors
1067 for (i = 0; i < new_size; i++) {
1068 u16 curr_idx = (ioat_chan->tail+i) & (curr_size-1);
1069 u16 new_idx = (ioat_chan->tail+i) & (new_size-1);
1071 ring[new_idx] = ioat_chan->ring[curr_idx];
1072 set_desc_id(ring[new_idx], new_idx);
1075 /* free deleted descriptors */
1076 for (i = new_size; i < curr_size; i++) {
1077 struct ioat_ring_ent *ent;
1079 ent = ioat_get_ring_ent(ioat_chan, ioat_chan->tail+i);
1080 ioat_free_ring_ent(ent, c);
1083 /* fix up hardware ring */
1084 hw = ring[(ioat_chan->tail+new_size-1) & (new_size-1)]->hw;
1085 next = ring[(ioat_chan->tail+new_size) & (new_size-1)];
1086 hw->next = next->txd.phys;
1089 dev_dbg(to_dev(ioat_chan), "%s: allocated %d descriptors\n",
1090 __func__, new_size);
1092 kfree(ioat_chan->ring);
1093 ioat_chan->ring = ring;
1094 ioat_chan->alloc_order = order;
1100 * ioat_check_space_lock - verify space and grab ring producer lock
1101 * @ioat: ioat,3 channel (ring) to operate on
1102 * @num_descs: allocation length
1104 int ioat_check_space_lock(struct ioatdma_chan *ioat_chan, int num_descs)
1109 spin_lock_bh(&ioat_chan->prep_lock);
1110 /* never allow the last descriptor to be consumed, we need at
1111 * least one free at all times to allow for on-the-fly ring
1114 if (likely(ioat_ring_space(ioat_chan) > num_descs)) {
1115 dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n",
1116 __func__, num_descs, ioat_chan->head,
1117 ioat_chan->tail, ioat_chan->issued);
1118 ioat_chan->produce = num_descs;
1119 return 0; /* with ioat->prep_lock held */
1121 retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
1122 spin_unlock_bh(&ioat_chan->prep_lock);
1124 /* is another cpu already trying to expand the ring? */
1128 spin_lock_bh(&ioat_chan->cleanup_lock);
1129 spin_lock_bh(&ioat_chan->prep_lock);
1130 retry = reshape_ring(ioat_chan, ioat_chan->alloc_order + 1);
1131 clear_bit(IOAT_RESHAPE_PENDING, &ioat_chan->state);
1132 spin_unlock_bh(&ioat_chan->prep_lock);
1133 spin_unlock_bh(&ioat_chan->cleanup_lock);
1135 /* if we were able to expand the ring retry the allocation */
1139 dev_dbg_ratelimited(to_dev(ioat_chan),
1140 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
1141 __func__, num_descs, ioat_chan->head,
1142 ioat_chan->tail, ioat_chan->issued);
1144 /* progress reclaim in the allocation failure case we may be
1145 * called under bh_disabled so we need to trigger the timer
1148 if (time_is_before_jiffies(ioat_chan->timer.expires)
1149 && timer_pending(&ioat_chan->timer)) {
1150 struct ioatdma_device *ioat_dma = ioat_chan->ioat_dma;
1152 mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
1153 ioat_dma->timer_fn((unsigned long)ioat_chan);
1159 struct dma_async_tx_descriptor *
1160 ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
1161 dma_addr_t dma_src, size_t len, unsigned long flags)
1163 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
1164 struct ioat_dma_descriptor *hw;
1165 struct ioat_ring_ent *desc;
1166 dma_addr_t dst = dma_dest;
1167 dma_addr_t src = dma_src;
1168 size_t total_len = len;
1169 int num_descs, idx, i;
1171 num_descs = ioat_xferlen_to_descs(ioat_chan, len);
1172 if (likely(num_descs) &&
1173 ioat_check_space_lock(ioat_chan, num_descs) == 0)
1174 idx = ioat_chan->head;
1179 size_t copy = min_t(size_t, len, 1 << ioat_chan->xfercap_log);
1181 desc = ioat_get_ring_ent(ioat_chan, idx + i);
1192 dump_desc_dbg(ioat_chan, desc);
1193 } while (++i < num_descs);
1195 desc->txd.flags = flags;
1196 desc->len = total_len;
1197 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
1198 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
1199 hw->ctl_f.compl_write = 1;
1200 dump_desc_dbg(ioat_chan, desc);
1201 /* we leave the channel locked to ensure in order submission */
1206 static ssize_t ring_size_show(struct dma_chan *c, char *page)
1208 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
1210 return sprintf(page, "%d\n", (1 << ioat_chan->alloc_order) & ~1);
1212 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
1214 static ssize_t ring_active_show(struct dma_chan *c, char *page)
1216 struct ioatdma_chan *ioat_chan = to_ioat_chan(c);
1218 /* ...taken outside the lock, no need to be precise */
1219 return sprintf(page, "%d\n", ioat_ring_active(ioat_chan));
1221 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
1223 static struct attribute *ioat_attrs[] = {
1224 &ring_size_attr.attr,
1225 &ring_active_attr.attr,
1226 &ioat_cap_attr.attr,
1227 &ioat_version_attr.attr,
1231 struct kobj_type ioat_ktype = {
1232 .sysfs_ops = &ioat_sysfs_ops,
1233 .default_attrs = ioat_attrs,