2 * Intel I/OAT DMA Linux driver
3 * Copyright(c) 2004 - 2009 Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
24 * This driver supports an Intel I/OAT DMA engine (versions >= 2), which
25 * does asynchronous data movement and checksumming operations.
28 #include <linux/init.h>
29 #include <linux/module.h>
30 #include <linux/pci.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/workqueue.h>
36 #include <linux/i7300_idle.h>
39 #include "registers.h"
42 int ioat_ring_alloc_order = 8;
43 module_param(ioat_ring_alloc_order, int, 0644);
44 MODULE_PARM_DESC(ioat_ring_alloc_order,
45 "ioat2+: allocate 2^n descriptors per channel"
46 " (default: 8 max: 16)");
47 static int ioat_ring_max_alloc_order = IOAT_MAX_ORDER;
48 module_param(ioat_ring_max_alloc_order, int, 0644);
49 MODULE_PARM_DESC(ioat_ring_max_alloc_order,
50 "ioat2+: upper limit for ring size (default: 16)");
52 void __ioat2_issue_pending(struct ioat2_dma_chan *ioat)
54 struct ioat_chan_common *chan = &ioat->base;
56 ioat->dmacount += ioat2_ring_pending(ioat);
57 ioat->issued = ioat->head;
58 /* make descriptor updates globally visible before notifying channel */
60 writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET);
62 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
63 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
66 void ioat2_issue_pending(struct dma_chan *c)
68 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
70 if (ioat2_ring_pending(ioat)) {
71 spin_lock_bh(&ioat->ring_lock);
72 __ioat2_issue_pending(ioat);
73 spin_unlock_bh(&ioat->ring_lock);
78 * ioat2_update_pending - log pending descriptors
79 * @ioat: ioat2+ channel
81 * Check if the number of unsubmitted descriptors has exceeded the
82 * watermark. Called with ring_lock held
84 static void ioat2_update_pending(struct ioat2_dma_chan *ioat)
86 if (ioat2_ring_pending(ioat) > ioat_pending_level)
87 __ioat2_issue_pending(ioat);
90 static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
92 struct ioat_ring_ent *desc;
93 struct ioat_dma_descriptor *hw;
96 if (ioat2_ring_space(ioat) < 1) {
97 dev_err(to_dev(&ioat->base),
98 "Unable to start null desc - ring full\n");
102 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n",
103 __func__, ioat->head, ioat->tail, ioat->issued);
104 idx = ioat2_desc_alloc(ioat, 1);
105 desc = ioat2_get_ring_ent(ioat, idx);
110 hw->ctl_f.int_en = 1;
111 hw->ctl_f.compl_write = 1;
112 /* set size to non-zero value (channel returns error when size is 0) */
113 hw->size = NULL_DESC_BUFFER_SIZE;
116 async_tx_ack(&desc->txd);
117 ioat2_set_chainaddr(ioat, desc->txd.phys);
118 dump_desc_dbg(ioat, desc);
119 __ioat2_issue_pending(ioat);
122 static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat)
124 spin_lock_bh(&ioat->ring_lock);
125 __ioat2_start_null_desc(ioat);
126 spin_unlock_bh(&ioat->ring_lock);
129 static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete)
131 struct ioat_chan_common *chan = &ioat->base;
132 struct dma_async_tx_descriptor *tx;
133 struct ioat_ring_ent *desc;
134 bool seen_current = false;
138 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n",
139 __func__, ioat->head, ioat->tail, ioat->issued);
141 active = ioat2_ring_active(ioat);
142 for (i = 0; i < active && !seen_current; i++) {
143 prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1));
144 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
146 dump_desc_dbg(ioat, desc);
148 ioat_dma_unmap(chan, tx->flags, desc->len, desc->hw);
149 chan->completed_cookie = tx->cookie;
152 tx->callback(tx->callback_param);
157 if (tx->phys == phys_complete)
161 BUG_ON(active && !seen_current); /* no active descs have written a completion? */
163 chan->last_completion = phys_complete;
164 if (ioat->head == ioat->tail) {
165 dev_dbg(to_dev(chan), "%s: cancel completion timeout\n",
167 clear_bit(IOAT_COMPLETION_PENDING, &chan->state);
168 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
173 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
174 * @chan: ioat channel to be cleaned up
176 static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
178 struct ioat_chan_common *chan = &ioat->base;
179 unsigned long phys_complete;
181 prefetch(chan->completion);
183 if (!spin_trylock_bh(&chan->cleanup_lock))
186 if (!ioat_cleanup_preamble(chan, &phys_complete)) {
187 spin_unlock_bh(&chan->cleanup_lock);
191 if (!spin_trylock_bh(&ioat->ring_lock)) {
192 spin_unlock_bh(&chan->cleanup_lock);
196 __cleanup(ioat, phys_complete);
198 spin_unlock_bh(&ioat->ring_lock);
199 spin_unlock_bh(&chan->cleanup_lock);
202 void ioat2_cleanup_event(unsigned long data)
204 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
207 writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
210 void __ioat2_restart_chan(struct ioat2_dma_chan *ioat)
212 struct ioat_chan_common *chan = &ioat->base;
214 /* set the tail to be re-issued */
215 ioat->issued = ioat->tail;
217 set_bit(IOAT_COMPLETION_PENDING, &chan->state);
218 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
220 dev_dbg(to_dev(chan),
221 "%s: head: %#x tail: %#x issued: %#x count: %#x\n",
222 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount);
224 if (ioat2_ring_pending(ioat)) {
225 struct ioat_ring_ent *desc;
227 desc = ioat2_get_ring_ent(ioat, ioat->tail);
228 ioat2_set_chainaddr(ioat, desc->txd.phys);
229 __ioat2_issue_pending(ioat);
231 __ioat2_start_null_desc(ioat);
234 int ioat2_quiesce(struct ioat_chan_common *chan, unsigned long tmo)
236 unsigned long end = jiffies + tmo;
240 status = ioat_chansts(chan);
241 if (is_ioat_active(status) || is_ioat_idle(status))
243 while (is_ioat_active(status) || is_ioat_idle(status)) {
244 if (end && time_after(jiffies, end)) {
248 status = ioat_chansts(chan);
255 int ioat2_reset_sync(struct ioat_chan_common *chan, unsigned long tmo)
257 unsigned long end = jiffies + tmo;
261 while (ioat_reset_pending(chan)) {
262 if (end && time_after(jiffies, end)) {
272 static void ioat2_restart_channel(struct ioat2_dma_chan *ioat)
274 struct ioat_chan_common *chan = &ioat->base;
275 unsigned long phys_complete;
277 ioat2_quiesce(chan, 0);
278 if (ioat_cleanup_preamble(chan, &phys_complete))
279 __cleanup(ioat, phys_complete);
281 __ioat2_restart_chan(ioat);
284 void ioat2_timer_event(unsigned long data)
286 struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
287 struct ioat_chan_common *chan = &ioat->base;
289 spin_lock_bh(&chan->cleanup_lock);
290 if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) {
291 unsigned long phys_complete;
294 spin_lock_bh(&ioat->ring_lock);
295 status = ioat_chansts(chan);
297 /* when halted due to errors check for channel
298 * programming errors before advancing the completion state
300 if (is_ioat_halted(status)) {
303 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
304 dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
306 BUG_ON(is_ioat_bug(chanerr));
309 /* if we haven't made progress and we have already
310 * acknowledged a pending completion once, then be more
311 * forceful with a restart
313 if (ioat_cleanup_preamble(chan, &phys_complete))
314 __cleanup(ioat, phys_complete);
315 else if (test_bit(IOAT_COMPLETION_ACK, &chan->state))
316 ioat2_restart_channel(ioat);
318 set_bit(IOAT_COMPLETION_ACK, &chan->state);
319 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
321 spin_unlock_bh(&ioat->ring_lock);
325 /* if the ring is idle, empty, and oversized try to step
328 spin_lock_bh(&ioat->ring_lock);
329 active = ioat2_ring_active(ioat);
330 if (active == 0 && ioat->alloc_order > ioat_get_alloc_order())
331 reshape_ring(ioat, ioat->alloc_order-1);
332 spin_unlock_bh(&ioat->ring_lock);
334 /* keep shrinking until we get back to our minimum
337 if (ioat->alloc_order > ioat_get_alloc_order())
338 mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT);
340 spin_unlock_bh(&chan->cleanup_lock);
343 static int ioat2_reset_hw(struct ioat_chan_common *chan)
345 /* throw away whatever the channel was doing and get it initialized */
348 ioat2_quiesce(chan, msecs_to_jiffies(100));
350 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
351 writel(chanerr, chan->reg_base + IOAT_CHANERR_OFFSET);
353 return ioat2_reset_sync(chan, msecs_to_jiffies(200));
357 * ioat2_enumerate_channels - find and initialize the device's channels
358 * @device: the device to be enumerated
360 int ioat2_enumerate_channels(struct ioatdma_device *device)
362 struct ioat2_dma_chan *ioat;
363 struct device *dev = &device->pdev->dev;
364 struct dma_device *dma = &device->common;
368 INIT_LIST_HEAD(&dma->channels);
369 dma->chancnt = readb(device->reg_base + IOAT_CHANCNT_OFFSET);
370 dma->chancnt &= 0x1f; /* bits [4:0] valid */
371 if (dma->chancnt > ARRAY_SIZE(device->idx)) {
372 dev_warn(dev, "(%d) exceeds max supported channels (%zu)\n",
373 dma->chancnt, ARRAY_SIZE(device->idx));
374 dma->chancnt = ARRAY_SIZE(device->idx);
376 xfercap_log = readb(device->reg_base + IOAT_XFERCAP_OFFSET);
377 xfercap_log &= 0x1f; /* bits [4:0] valid */
378 if (xfercap_log == 0)
380 dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log);
382 /* FIXME which i/oat version is i7300? */
383 #ifdef CONFIG_I7300_IDLE_IOAT_CHANNEL
384 if (i7300_idle_platform_probe(NULL, NULL, 1) == 0)
387 for (i = 0; i < dma->chancnt; i++) {
388 ioat = devm_kzalloc(dev, sizeof(*ioat), GFP_KERNEL);
392 ioat_init_channel(device, &ioat->base, i);
393 ioat->xfercap_log = xfercap_log;
394 spin_lock_init(&ioat->ring_lock);
395 if (device->reset_hw(&ioat->base)) {
404 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx)
406 struct dma_chan *c = tx->chan;
407 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
408 struct ioat_chan_common *chan = &ioat->base;
409 dma_cookie_t cookie = c->cookie;
416 dev_dbg(to_dev(&ioat->base), "%s: cookie: %d\n", __func__, cookie);
418 if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state))
419 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
420 ioat2_update_pending(ioat);
421 spin_unlock_bh(&ioat->ring_lock);
426 static struct ioat_ring_ent *ioat2_alloc_ring_ent(struct dma_chan *chan, gfp_t flags)
428 struct ioat_dma_descriptor *hw;
429 struct ioat_ring_ent *desc;
430 struct ioatdma_device *dma;
433 dma = to_ioatdma_device(chan->device);
434 hw = pci_pool_alloc(dma->dma_pool, flags, &phys);
437 memset(hw, 0, sizeof(*hw));
439 desc = kmem_cache_alloc(ioat2_cache, flags);
441 pci_pool_free(dma->dma_pool, hw, phys);
444 memset(desc, 0, sizeof(*desc));
446 dma_async_tx_descriptor_init(&desc->txd, chan);
447 desc->txd.tx_submit = ioat2_tx_submit_unlock;
449 desc->txd.phys = phys;
453 static void ioat2_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan)
455 struct ioatdma_device *dma;
457 dma = to_ioatdma_device(chan->device);
458 pci_pool_free(dma->dma_pool, desc->hw, desc->txd.phys);
459 kmem_cache_free(ioat2_cache, desc);
462 static struct ioat_ring_ent **ioat2_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
464 struct ioat_ring_ent **ring;
465 int descs = 1 << order;
468 if (order > ioat_get_max_alloc_order())
471 /* allocate the array to hold the software ring */
472 ring = kcalloc(descs, sizeof(*ring), flags);
475 for (i = 0; i < descs; i++) {
476 ring[i] = ioat2_alloc_ring_ent(c, flags);
479 ioat2_free_ring_ent(ring[i], c);
483 set_desc_id(ring[i], i);
487 for (i = 0; i < descs-1; i++) {
488 struct ioat_ring_ent *next = ring[i+1];
489 struct ioat_dma_descriptor *hw = ring[i]->hw;
491 hw->next = next->txd.phys;
493 ring[i]->hw->next = ring[0]->txd.phys;
498 /* ioat2_alloc_chan_resources - allocate/initialize ioat2 descriptor ring
499 * @chan: channel to be initialized
501 int ioat2_alloc_chan_resources(struct dma_chan *c)
503 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
504 struct ioat_chan_common *chan = &ioat->base;
505 struct ioat_ring_ent **ring;
508 /* have we already been set up? */
510 return 1 << ioat->alloc_order;
512 /* Setup register to interrupt and write completion status on error */
513 writew(IOAT_CHANCTRL_RUN, chan->reg_base + IOAT_CHANCTRL_OFFSET);
515 /* allocate a completion writeback area */
516 /* doing 2 32bit writes to mmio since 1 64b write doesn't work */
517 chan->completion = pci_pool_alloc(chan->device->completion_pool,
518 GFP_KERNEL, &chan->completion_dma);
519 if (!chan->completion)
522 memset(chan->completion, 0, sizeof(*chan->completion));
523 writel(((u64) chan->completion_dma) & 0x00000000FFFFFFFF,
524 chan->reg_base + IOAT_CHANCMP_OFFSET_LOW);
525 writel(((u64) chan->completion_dma) >> 32,
526 chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
528 order = ioat_get_alloc_order();
529 ring = ioat2_alloc_ring(c, order, GFP_KERNEL);
533 spin_lock_bh(&ioat->ring_lock);
538 ioat->alloc_order = order;
539 spin_unlock_bh(&ioat->ring_lock);
541 tasklet_enable(&chan->cleanup_task);
542 ioat2_start_null_desc(ioat);
544 return 1 << ioat->alloc_order;
547 bool reshape_ring(struct ioat2_dma_chan *ioat, int order)
549 /* reshape differs from normal ring allocation in that we want
550 * to allocate a new software ring while only
551 * extending/truncating the hardware ring
553 struct ioat_chan_common *chan = &ioat->base;
554 struct dma_chan *c = &chan->common;
555 const u16 curr_size = ioat2_ring_mask(ioat) + 1;
556 const u16 active = ioat2_ring_active(ioat);
557 const u16 new_size = 1 << order;
558 struct ioat_ring_ent **ring;
561 if (order > ioat_get_max_alloc_order())
564 /* double check that we have at least 1 free descriptor */
565 if (active == curr_size)
568 /* when shrinking, verify that we can hold the current active
569 * set in the new ring
571 if (active >= new_size)
574 /* allocate the array to hold the software ring */
575 ring = kcalloc(new_size, sizeof(*ring), GFP_NOWAIT);
579 /* allocate/trim descriptors as needed */
580 if (new_size > curr_size) {
581 /* copy current descriptors to the new ring */
582 for (i = 0; i < curr_size; i++) {
583 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
584 u16 new_idx = (ioat->tail+i) & (new_size-1);
586 ring[new_idx] = ioat->ring[curr_idx];
587 set_desc_id(ring[new_idx], new_idx);
590 /* add new descriptors to the ring */
591 for (i = curr_size; i < new_size; i++) {
592 u16 new_idx = (ioat->tail+i) & (new_size-1);
594 ring[new_idx] = ioat2_alloc_ring_ent(c, GFP_NOWAIT);
595 if (!ring[new_idx]) {
597 u16 new_idx = (ioat->tail+i) & (new_size-1);
599 ioat2_free_ring_ent(ring[new_idx], c);
604 set_desc_id(ring[new_idx], new_idx);
607 /* hw link new descriptors */
608 for (i = curr_size-1; i < new_size; i++) {
609 u16 new_idx = (ioat->tail+i) & (new_size-1);
610 struct ioat_ring_ent *next = ring[(new_idx+1) & (new_size-1)];
611 struct ioat_dma_descriptor *hw = ring[new_idx]->hw;
613 hw->next = next->txd.phys;
616 struct ioat_dma_descriptor *hw;
617 struct ioat_ring_ent *next;
619 /* copy current descriptors to the new ring, dropping the
620 * removed descriptors
622 for (i = 0; i < new_size; i++) {
623 u16 curr_idx = (ioat->tail+i) & (curr_size-1);
624 u16 new_idx = (ioat->tail+i) & (new_size-1);
626 ring[new_idx] = ioat->ring[curr_idx];
627 set_desc_id(ring[new_idx], new_idx);
630 /* free deleted descriptors */
631 for (i = new_size; i < curr_size; i++) {
632 struct ioat_ring_ent *ent;
634 ent = ioat2_get_ring_ent(ioat, ioat->tail+i);
635 ioat2_free_ring_ent(ent, c);
638 /* fix up hardware ring */
639 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw;
640 next = ring[(ioat->tail+new_size) & (new_size-1)];
641 hw->next = next->txd.phys;
644 dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
649 ioat->alloc_order = order;
655 * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops
656 * @idx: gets starting descriptor index on successful allocation
657 * @ioat: ioat2,3 channel (ring) to operate on
658 * @num_descs: allocation length
660 int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs)
662 struct ioat_chan_common *chan = &ioat->base;
664 spin_lock_bh(&ioat->ring_lock);
665 /* never allow the last descriptor to be consumed, we need at
666 * least one free at all times to allow for on-the-fly ring
669 while (unlikely(ioat2_ring_space(ioat) <= num_descs)) {
670 if (reshape_ring(ioat, ioat->alloc_order + 1) &&
671 ioat2_ring_space(ioat) > num_descs)
674 if (printk_ratelimit())
675 dev_dbg(to_dev(chan),
676 "%s: ring full! num_descs: %d (%x:%x:%x)\n",
677 __func__, num_descs, ioat->head, ioat->tail,
679 spin_unlock_bh(&ioat->ring_lock);
681 /* progress reclaim in the allocation failure case we
682 * may be called under bh_disabled so we need to trigger
683 * the timer event directly
685 spin_lock_bh(&chan->cleanup_lock);
686 if (jiffies > chan->timer.expires &&
687 timer_pending(&chan->timer)) {
688 struct ioatdma_device *device = chan->device;
690 mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT);
691 spin_unlock_bh(&chan->cleanup_lock);
692 device->timer_fn((unsigned long) &chan->common);
694 spin_unlock_bh(&chan->cleanup_lock);
698 dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n",
699 __func__, num_descs, ioat->head, ioat->tail, ioat->issued);
701 *idx = ioat2_desc_alloc(ioat, num_descs);
702 return 0; /* with ioat->ring_lock held */
705 struct dma_async_tx_descriptor *
706 ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest,
707 dma_addr_t dma_src, size_t len, unsigned long flags)
709 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
710 struct ioat_dma_descriptor *hw;
711 struct ioat_ring_ent *desc;
712 dma_addr_t dst = dma_dest;
713 dma_addr_t src = dma_src;
714 size_t total_len = len;
719 num_descs = ioat2_xferlen_to_descs(ioat, len);
720 if (likely(num_descs) &&
721 ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0)
727 size_t copy = min_t(size_t, len, 1 << ioat->xfercap_log);
729 desc = ioat2_get_ring_ent(ioat, idx + i);
740 dump_desc_dbg(ioat, desc);
741 } while (++i < num_descs);
743 desc->txd.flags = flags;
744 desc->len = total_len;
745 hw->ctl_f.int_en = !!(flags & DMA_PREP_INTERRUPT);
746 hw->ctl_f.fence = !!(flags & DMA_PREP_FENCE);
747 hw->ctl_f.compl_write = 1;
748 dump_desc_dbg(ioat, desc);
749 /* we leave the channel locked to ensure in order submission */
755 * ioat2_free_chan_resources - release all the descriptors
756 * @chan: the channel to be cleaned
758 void ioat2_free_chan_resources(struct dma_chan *c)
760 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
761 struct ioat_chan_common *chan = &ioat->base;
762 struct ioatdma_device *device = chan->device;
763 struct ioat_ring_ent *desc;
764 const u16 total_descs = 1 << ioat->alloc_order;
768 /* Before freeing channel resources first check
769 * if they have been previously allocated for this channel.
774 tasklet_disable(&chan->cleanup_task);
775 del_timer_sync(&chan->timer);
776 device->cleanup_fn((unsigned long) c);
777 device->reset_hw(chan);
779 spin_lock_bh(&ioat->ring_lock);
780 descs = ioat2_ring_space(ioat);
781 dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs);
782 for (i = 0; i < descs; i++) {
783 desc = ioat2_get_ring_ent(ioat, ioat->head + i);
784 ioat2_free_ring_ent(desc, c);
787 if (descs < total_descs)
788 dev_err(to_dev(chan), "Freeing %d in use descriptors!\n",
789 total_descs - descs);
791 for (i = 0; i < total_descs - descs; i++) {
792 desc = ioat2_get_ring_ent(ioat, ioat->tail + i);
793 dump_desc_dbg(ioat, desc);
794 ioat2_free_ring_ent(desc, c);
799 ioat->alloc_order = 0;
800 pci_pool_free(device->completion_pool, chan->completion,
801 chan->completion_dma);
802 spin_unlock_bh(&ioat->ring_lock);
804 chan->last_completion = 0;
805 chan->completion_dma = 0;
809 static ssize_t ring_size_show(struct dma_chan *c, char *page)
811 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
813 return sprintf(page, "%d\n", (1 << ioat->alloc_order) & ~1);
815 static struct ioat_sysfs_entry ring_size_attr = __ATTR_RO(ring_size);
817 static ssize_t ring_active_show(struct dma_chan *c, char *page)
819 struct ioat2_dma_chan *ioat = to_ioat2_chan(c);
821 /* ...taken outside the lock, no need to be precise */
822 return sprintf(page, "%d\n", ioat2_ring_active(ioat));
824 static struct ioat_sysfs_entry ring_active_attr = __ATTR_RO(ring_active);
826 static struct attribute *ioat2_attrs[] = {
827 &ring_size_attr.attr,
828 &ring_active_attr.attr,
830 &ioat_version_attr.attr,
834 struct kobj_type ioat2_ktype = {
835 .sysfs_ops = &ioat_sysfs_ops,
836 .default_attrs = ioat2_attrs,
839 int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca)
841 struct pci_dev *pdev = device->pdev;
842 struct dma_device *dma;
844 struct ioat_chan_common *chan;
847 device->enumerate_channels = ioat2_enumerate_channels;
848 device->reset_hw = ioat2_reset_hw;
849 device->cleanup_fn = ioat2_cleanup_event;
850 device->timer_fn = ioat2_timer_event;
851 device->self_test = ioat_dma_self_test;
852 dma = &device->common;
853 dma->device_prep_dma_memcpy = ioat2_dma_prep_memcpy_lock;
854 dma->device_issue_pending = ioat2_issue_pending;
855 dma->device_alloc_chan_resources = ioat2_alloc_chan_resources;
856 dma->device_free_chan_resources = ioat2_free_chan_resources;
857 dma->device_is_tx_complete = ioat_is_dma_complete;
859 err = ioat_probe(device);
862 ioat_set_tcp_copy_break(2048);
864 list_for_each_entry(c, &dma->channels, device_node) {
865 chan = to_chan_common(c);
866 writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE | IOAT_DMA_DCA_ANY_CPU,
867 chan->reg_base + IOAT_DCACTRL_OFFSET);
870 err = ioat_register(device);
874 ioat_kobject_add(device, &ioat2_ktype);
877 device->dca = ioat2_dca_init(pdev, device->reg_base);