2 * This file is based on code from OCTEON SDK by Cavium Networks.
4 * Copyright (c) 2003-2010 Cavium Networks
6 * This file is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, Version 2, as
8 * published by the Free Software Foundation.
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/netdevice.h>
14 #include <linux/etherdevice.h>
16 #include <linux/ratelimit.h>
17 #include <linux/string.h>
18 #include <linux/interrupt.h>
21 #include <linux/xfrm.h>
23 #endif /* CONFIG_XFRM */
25 #include <linux/atomic.h>
27 #include <asm/octeon/octeon.h>
29 #include "ethernet-defines.h"
30 #include "octeon-ethernet.h"
31 #include "ethernet-tx.h"
32 #include "ethernet-util.h"
34 #include <asm/octeon/cvmx-wqe.h>
35 #include <asm/octeon/cvmx-fau.h>
36 #include <asm/octeon/cvmx-pip.h>
37 #include <asm/octeon/cvmx-pko.h>
38 #include <asm/octeon/cvmx-helper.h>
40 #include <asm/octeon/cvmx-gmxx-defs.h>
42 #define CVM_OCT_SKB_CB(skb) ((u64 *)((skb)->cb))
45 * You can define GET_SKBUFF_QOS() to override how the skbuff output
46 * function determines which output queue is used. The default
47 * implementation always uses the base queue for the port. If, for
48 * example, you wanted to use the skb->priority field, define
49 * GET_SKBUFF_QOS as: #define GET_SKBUFF_QOS(skb) ((skb)->priority)
51 #ifndef GET_SKBUFF_QOS
52 #define GET_SKBUFF_QOS(skb) 0
55 static void cvm_oct_tx_do_cleanup(unsigned long arg);
56 static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
58 /* Maximum number of SKBs to try to free per xmit packet. */
59 #define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
61 static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
65 undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
68 cvmx_fau_atomic_add32(fau, -undo);
69 skb_to_free = -skb_to_free > MAX_SKB_TO_FREE ? MAX_SKB_TO_FREE :
74 static void cvm_oct_kick_tx_poll_watchdog(void)
76 union cvmx_ciu_timx ciu_timx;
79 ciu_timx.s.one_shot = 1;
80 ciu_timx.s.len = cvm_oct_tx_poll_interval;
81 cvmx_write_csr(CVMX_CIU_TIMX(1), ciu_timx.u64);
84 static void cvm_oct_free_tx_skbs(struct net_device *dev)
87 int qos, queues_per_port;
89 int total_remaining = 0;
91 struct octeon_ethernet *priv = netdev_priv(dev);
93 queues_per_port = cvmx_pko_get_num_queues(priv->port);
94 /* Drain any pending packets in the free list */
95 for (qos = 0; qos < queues_per_port; qos++) {
96 if (skb_queue_len(&priv->tx_free_list[qos]) == 0)
98 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
100 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
101 priv->fau + qos * 4);
102 total_freed += skb_to_free;
103 if (skb_to_free > 0) {
104 struct sk_buff *to_free_list = NULL;
106 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
107 while (skb_to_free > 0) {
110 t = __skb_dequeue(&priv->tx_free_list[qos]);
111 t->next = to_free_list;
115 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
117 /* Do the actual freeing outside of the lock. */
118 while (to_free_list) {
119 struct sk_buff *t = to_free_list;
121 to_free_list = to_free_list->next;
122 dev_kfree_skb_any(t);
125 total_remaining += skb_queue_len(&priv->tx_free_list[qos]);
127 if (total_remaining < MAX_OUT_QUEUE_DEPTH && netif_queue_stopped(dev))
128 netif_wake_queue(dev);
130 cvm_oct_kick_tx_poll_watchdog();
134 * cvm_oct_xmit - transmit a packet
135 * @skb: Packet to send
136 * @dev: Device info structure
138 * Returns Always returns NETDEV_TX_OK
140 int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
142 cvmx_pko_command_word0_t pko_command;
143 union cvmx_buf_ptr hw_buffer;
148 enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
149 struct octeon_ethernet *priv = netdev_priv(dev);
150 struct sk_buff *to_free_list;
152 int32_t buffers_to_free;
155 #if REUSE_SKBUFFS_WITHOUT_FREE
156 unsigned char *fpa_head;
160 * Prefetch the private data structure. It is larger than the
166 * The check on CVMX_PKO_QUEUES_PER_PORT_* is designed to
167 * completely remove "qos" in the event neither interface
168 * supports multiple queues per port.
170 if ((CVMX_PKO_QUEUES_PER_PORT_INTERFACE0 > 1) ||
171 (CVMX_PKO_QUEUES_PER_PORT_INTERFACE1 > 1)) {
172 qos = GET_SKBUFF_QOS(skb);
175 else if (qos >= cvmx_pko_get_num_queues(priv->port))
181 if (USE_ASYNC_IOBDMA) {
182 /* Save scratch in case userspace is using it */
184 old_scratch = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
185 old_scratch2 = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
188 * Fetch and increment the number of packets to be
191 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH + 8,
192 FAU_NUM_PACKET_BUFFERS_TO_FREE,
194 cvmx_fau_async_fetch_and_add32(CVMX_SCR_SCRATCH,
200 * We have space for 6 segment pointers, If there will be more
201 * than that, we must linearize.
203 if (unlikely(skb_shinfo(skb)->nr_frags > 5)) {
204 if (unlikely(__skb_linearize(skb))) {
205 queue_type = QUEUE_DROP;
206 if (USE_ASYNC_IOBDMA) {
208 * Get the number of skbuffs in use
213 cvmx_scratch_read64(CVMX_SCR_SCRATCH);
216 * Get the number of skbuffs in use
219 skb_to_free = cvmx_fau_fetch_and_add32(
220 priv->fau + qos * 4, MAX_SKB_TO_FREE);
222 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
223 priv->fau + qos * 4);
224 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
230 * The CN3XXX series of parts has an errata (GMX-401) which
231 * causes the GMX block to hang if a collision occurs towards
232 * the end of a <68 byte packet. As a workaround for this, we
233 * pad packets to be 68 bytes whenever we are in half duplex
234 * mode. We don't handle the case of having a small packet but
235 * no room to add the padding. The kernel should always give
236 * us at least a cache line
238 if ((skb->len < 64) && OCTEON_IS_MODEL(OCTEON_CN3XXX)) {
239 union cvmx_gmxx_prtx_cfg gmx_prt_cfg;
240 int interface = INTERFACE(priv->port);
241 int index = INDEX(priv->port);
244 /* We only need to pad packet in half duplex mode */
246 cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
247 if (gmx_prt_cfg.s.duplex == 0) {
248 int add_bytes = 64 - skb->len;
250 if ((skb_tail_pointer(skb) + add_bytes) <=
251 skb_end_pointer(skb))
252 memset(__skb_put(skb, add_bytes), 0,
258 /* Build the PKO command */
260 #ifdef __LITTLE_ENDIAN
261 pko_command.s.le = 1;
263 pko_command.s.n2 = 1; /* Don't pollute L2 with the outgoing packet */
264 pko_command.s.segs = 1;
265 pko_command.s.total_bytes = skb->len;
266 pko_command.s.size0 = CVMX_FAU_OP_SIZE_32;
267 pko_command.s.subone0 = 1;
269 pko_command.s.dontfree = 1;
271 /* Build the PKO buffer pointer */
273 if (skb_shinfo(skb)->nr_frags == 0) {
274 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
275 hw_buffer.s.pool = 0;
276 hw_buffer.s.size = skb->len;
278 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)skb->data);
279 hw_buffer.s.pool = 0;
280 hw_buffer.s.size = skb_headlen(skb);
281 CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
282 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
283 struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
285 hw_buffer.s.addr = XKPHYS_TO_PHYS(
286 (u64)(page_address(fs->page.p) +
288 hw_buffer.s.size = fs->size;
289 CVM_OCT_SKB_CB(skb)[i + 1] = hw_buffer.u64;
291 hw_buffer.s.addr = XKPHYS_TO_PHYS((u64)CVM_OCT_SKB_CB(skb));
292 hw_buffer.s.size = skb_shinfo(skb)->nr_frags + 1;
293 pko_command.s.segs = skb_shinfo(skb)->nr_frags + 1;
294 pko_command.s.gather = 1;
295 goto dont_put_skbuff_in_hw;
299 * See if we can put this skb in the FPA pool. Any strange
300 * behavior from the Linux networking stack will most likely
301 * be caused by a bug in the following code. If some field is
302 * in use by the network stack and gets carried over when a
303 * buffer is reused, bad things may happen. If in doubt and
304 * you dont need the absolute best performance, disable the
305 * define REUSE_SKBUFFS_WITHOUT_FREE. The reuse of buffers has
306 * shown a 25% increase in performance under some loads.
308 #if REUSE_SKBUFFS_WITHOUT_FREE
309 fpa_head = skb->head + 256 - ((unsigned long)skb->head & 0x7f);
310 if (unlikely(skb->data < fpa_head)) {
311 /* TX buffer beginning can't meet FPA alignment constraints */
312 goto dont_put_skbuff_in_hw;
315 ((skb_end_pointer(skb) - fpa_head) < CVMX_FPA_PACKET_POOL_SIZE)) {
316 /* TX buffer isn't large enough for the FPA */
317 goto dont_put_skbuff_in_hw;
319 if (unlikely(skb_shared(skb))) {
320 /* TX buffer sharing data with someone else */
321 goto dont_put_skbuff_in_hw;
323 if (unlikely(skb_cloned(skb))) {
324 /* TX buffer has been cloned */
325 goto dont_put_skbuff_in_hw;
327 if (unlikely(skb_header_cloned(skb))) {
328 /* TX buffer header has been cloned */
329 goto dont_put_skbuff_in_hw;
331 if (unlikely(skb->destructor)) {
332 /* TX buffer has a destructor */
333 goto dont_put_skbuff_in_hw;
335 if (unlikely(skb_shinfo(skb)->nr_frags)) {
336 /* TX buffer has fragments */
337 goto dont_put_skbuff_in_hw;
341 sizeof(*skb) + skb_end_offset(skb))) {
342 /* TX buffer truesize has been changed */
343 goto dont_put_skbuff_in_hw;
347 * We can use this buffer in the FPA. We don't need the FAU
350 pko_command.s.dontfree = 0;
352 hw_buffer.s.back = ((unsigned long)skb->data >> 7) -
353 ((unsigned long)fpa_head >> 7);
355 *(struct sk_buff **)(fpa_head - sizeof(void *)) = skb;
358 * The skbuff will be reused without ever being freed. We must
359 * cleanup a bunch of core things.
361 dst_release(skb_dst(skb));
362 skb_dst_set(skb, NULL);
364 secpath_put(skb->sp);
369 #ifdef CONFIG_NET_SCHED
371 #ifdef CONFIG_NET_CLS_ACT
373 #endif /* CONFIG_NET_CLS_ACT */
374 #endif /* CONFIG_NET_SCHED */
375 #endif /* REUSE_SKBUFFS_WITHOUT_FREE */
377 dont_put_skbuff_in_hw:
379 /* Check if we can use the hardware checksumming */
380 if ((skb->protocol == htons(ETH_P_IP)) &&
381 (ip_hdr(skb)->version == 4) &&
382 (ip_hdr(skb)->ihl == 5) &&
383 ((ip_hdr(skb)->frag_off == 0) ||
384 (ip_hdr(skb)->frag_off == htons(1 << 14))) &&
385 ((ip_hdr(skb)->protocol == IPPROTO_TCP) ||
386 (ip_hdr(skb)->protocol == IPPROTO_UDP))) {
387 /* Use hardware checksum calc */
388 pko_command.s.ipoffp1 = skb_network_offset(skb) + 1;
391 if (USE_ASYNC_IOBDMA) {
392 /* Get the number of skbuffs in use by the hardware */
394 skb_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
395 buffers_to_free = cvmx_scratch_read64(CVMX_SCR_SCRATCH + 8);
397 /* Get the number of skbuffs in use by the hardware */
398 skb_to_free = cvmx_fau_fetch_and_add32(priv->fau + qos * 4,
401 cvmx_fau_fetch_and_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, 0);
404 skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
405 priv->fau + qos * 4);
408 * If we're sending faster than the receive can free them then
409 * don't do the HW free.
411 if ((buffers_to_free < -100) && !pko_command.s.dontfree)
412 pko_command.s.dontfree = 1;
414 if (pko_command.s.dontfree) {
415 queue_type = QUEUE_CORE;
416 pko_command.s.reg0 = priv->fau + qos * 4;
418 queue_type = QUEUE_HW;
420 if (USE_ASYNC_IOBDMA)
421 cvmx_fau_async_fetch_and_add32(
422 CVMX_SCR_SCRATCH, FAU_TOTAL_TX_TO_CLEAN, 1);
424 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
426 /* Drop this packet if we have too many already queued to the HW */
427 if (unlikely(skb_queue_len(&priv->tx_free_list[qos]) >=
428 MAX_OUT_QUEUE_DEPTH)) {
429 if (dev->tx_queue_len != 0) {
430 /* Drop the lock when notifying the core. */
431 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock,
433 netif_stop_queue(dev);
434 spin_lock_irqsave(&priv->tx_free_list[qos].lock,
437 /* If not using normal queueing. */
438 queue_type = QUEUE_DROP;
443 cvmx_pko_send_packet_prepare(priv->port, priv->queue + qos,
446 /* Send the packet to the output queue */
447 if (unlikely(cvmx_pko_send_packet_finish(priv->port,
449 pko_command, hw_buffer,
450 CVMX_PKO_LOCK_NONE))) {
451 printk_ratelimited("%s: Failed to send the packet\n",
453 queue_type = QUEUE_DROP;
458 switch (queue_type) {
460 skb->next = to_free_list;
462 priv->stats.tx_dropped++;
465 cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE, -1);
468 __skb_queue_tail(&priv->tx_free_list[qos], skb);
474 while (skb_to_free > 0) {
475 struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
477 t->next = to_free_list;
482 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
484 /* Do the actual freeing outside of the lock. */
485 while (to_free_list) {
486 struct sk_buff *t = to_free_list;
488 to_free_list = to_free_list->next;
489 dev_kfree_skb_any(t);
492 if (USE_ASYNC_IOBDMA) {
494 total_to_clean = cvmx_scratch_read64(CVMX_SCR_SCRATCH);
495 /* Restore the scratch area */
496 cvmx_scratch_write64(CVMX_SCR_SCRATCH, old_scratch);
497 cvmx_scratch_write64(CVMX_SCR_SCRATCH + 8, old_scratch2);
499 total_to_clean = cvmx_fau_fetch_and_add32(
500 FAU_TOTAL_TX_TO_CLEAN, 1);
503 if (total_to_clean & 0x3ff) {
505 * Schedule the cleanup tasklet every 1024 packets for
506 * the pathological case of high traffic on one port
507 * delaying clean up of packets on a different port
508 * that is blocked waiting for the cleanup.
510 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
513 cvm_oct_kick_tx_poll_watchdog();
519 * cvm_oct_xmit_pow - transmit a packet to the POW
520 * @skb: Packet to send
521 * @dev: Device info structure
523 * Returns Always returns zero
525 int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
527 struct octeon_ethernet *priv = netdev_priv(dev);
531 /* Get a work queue entry */
532 cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
534 if (unlikely(!work)) {
535 printk_ratelimited("%s: Failed to allocate a work queue entry\n",
537 priv->stats.tx_dropped++;
538 dev_kfree_skb_any(skb);
542 /* Get a packet buffer */
543 packet_buffer = cvmx_fpa_alloc(CVMX_FPA_PACKET_POOL);
544 if (unlikely(!packet_buffer)) {
545 printk_ratelimited("%s: Failed to allocate a packet buffer\n",
547 cvmx_fpa_free(work, CVMX_FPA_WQE_POOL, 1);
548 priv->stats.tx_dropped++;
549 dev_kfree_skb_any(skb);
554 * Calculate where we need to copy the data to. We need to
555 * leave 8 bytes for a next pointer (unused). We also need to
556 * include any configure skip. Then we need to align the IP
557 * packet src and dest into the same 64bit word. The below
558 * calculation may add a little extra, but that doesn't
561 copy_location = packet_buffer + sizeof(u64);
562 copy_location += ((CVMX_HELPER_FIRST_MBUFF_SKIP + 7) & 0xfff8) + 6;
565 * We have to copy the packet since whoever processes this
566 * packet will free it to a hardware pool. We can't use the
567 * trick of counting outstanding packets like in
570 memcpy(copy_location, skb->data, skb->len);
573 * Fill in some of the work queue fields. We may need to add
574 * more if the software at the other end needs them.
576 if (!OCTEON_IS_MODEL(OCTEON_CN68XX))
577 work->word0.pip.cn38xx.hw_chksum = skb->csum;
578 work->word1.len = skb->len;
579 cvmx_wqe_set_port(work, priv->port);
580 cvmx_wqe_set_qos(work, priv->port & 0x7);
581 cvmx_wqe_set_grp(work, pow_send_group);
582 work->word1.tag_type = CVMX_HELPER_INPUT_TAG_TYPE;
583 work->word1.tag = pow_send_group; /* FIXME */
584 /* Default to zero. Sets of zero later are commented out */
586 work->word2.s.bufs = 1;
587 work->packet_ptr.u64 = 0;
588 work->packet_ptr.s.addr = cvmx_ptr_to_phys(copy_location);
589 work->packet_ptr.s.pool = CVMX_FPA_PACKET_POOL;
590 work->packet_ptr.s.size = CVMX_FPA_PACKET_POOL_SIZE;
591 work->packet_ptr.s.back = (copy_location - packet_buffer) >> 7;
593 if (skb->protocol == htons(ETH_P_IP)) {
594 work->word2.s.ip_offset = 14;
596 work->word2.s.vlan_valid = 0; /* FIXME */
597 work->word2.s.vlan_cfi = 0; /* FIXME */
598 work->word2.s.vlan_id = 0; /* FIXME */
599 work->word2.s.dec_ipcomp = 0; /* FIXME */
601 work->word2.s.tcp_or_udp =
602 (ip_hdr(skb)->protocol == IPPROTO_TCP) ||
603 (ip_hdr(skb)->protocol == IPPROTO_UDP);
606 work->word2.s.dec_ipsec = 0;
607 /* We only support IPv4 right now */
608 work->word2.s.is_v6 = 0;
609 /* Hardware would set to zero */
610 work->word2.s.software = 0;
611 /* No error, packet is internal */
612 work->word2.s.L4_error = 0;
614 work->word2.s.is_frag = !((ip_hdr(skb)->frag_off == 0) ||
615 (ip_hdr(skb)->frag_off ==
618 /* Assume Linux is sending a good packet */
619 work->word2.s.IP_exc = 0;
621 work->word2.s.is_bcast = (skb->pkt_type == PACKET_BROADCAST);
622 work->word2.s.is_mcast = (skb->pkt_type == PACKET_MULTICAST);
624 /* This is an IP packet */
625 work->word2.s.not_IP = 0;
626 /* No error, packet is internal */
627 work->word2.s.rcv_error = 0;
628 /* No error, packet is internal */
629 work->word2.s.err_code = 0;
633 * When copying the data, include 4 bytes of the
634 * ethernet header to align the same way hardware
637 memcpy(work->packet_data, skb->data + 10,
638 sizeof(work->packet_data));
641 work->word2.snoip.vlan_valid = 0; /* FIXME */
642 work->word2.snoip.vlan_cfi = 0; /* FIXME */
643 work->word2.snoip.vlan_id = 0; /* FIXME */
644 work->word2.snoip.software = 0; /* Hardware would set to zero */
646 work->word2.snoip.is_rarp = skb->protocol == htons(ETH_P_RARP);
647 work->word2.snoip.is_arp = skb->protocol == htons(ETH_P_ARP);
648 work->word2.snoip.is_bcast =
649 (skb->pkt_type == PACKET_BROADCAST);
650 work->word2.snoip.is_mcast =
651 (skb->pkt_type == PACKET_MULTICAST);
652 work->word2.snoip.not_IP = 1; /* IP was done up above */
654 /* No error, packet is internal */
655 work->word2.snoip.rcv_error = 0;
656 /* No error, packet is internal */
657 work->word2.snoip.err_code = 0;
659 memcpy(work->packet_data, skb->data, sizeof(work->packet_data));
662 /* Submit the packet to the POW */
663 cvmx_pow_work_submit(work, work->word1.tag, work->word1.tag_type,
664 cvmx_wqe_get_qos(work), cvmx_wqe_get_grp(work));
665 priv->stats.tx_packets++;
666 priv->stats.tx_bytes += skb->len;
667 dev_consume_skb_any(skb);
672 * cvm_oct_tx_shutdown_dev - free all skb that are currently queued for TX.
673 * @dev: Device being shutdown
676 void cvm_oct_tx_shutdown_dev(struct net_device *dev)
678 struct octeon_ethernet *priv = netdev_priv(dev);
682 for (qos = 0; qos < 16; qos++) {
683 spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
684 while (skb_queue_len(&priv->tx_free_list[qos]))
685 dev_kfree_skb_any(__skb_dequeue
686 (&priv->tx_free_list[qos]));
687 spin_unlock_irqrestore(&priv->tx_free_list[qos].lock, flags);
691 static void cvm_oct_tx_do_cleanup(unsigned long arg)
695 for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
696 if (cvm_oct_device[port]) {
697 struct net_device *dev = cvm_oct_device[port];
699 cvm_oct_free_tx_skbs(dev);
704 static irqreturn_t cvm_oct_tx_cleanup_watchdog(int cpl, void *dev_id)
706 /* Disable the interrupt. */
707 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
708 /* Do the work in the tasklet. */
709 tasklet_schedule(&cvm_oct_tx_cleanup_tasklet);
713 void cvm_oct_tx_initialize(void)
717 /* Disable the interrupt. */
718 cvmx_write_csr(CVMX_CIU_TIMX(1), 0);
719 /* Register an IRQ handler to receive CIU_TIMX(1) interrupts */
720 i = request_irq(OCTEON_IRQ_TIMER1,
721 cvm_oct_tx_cleanup_watchdog, 0,
722 "Ethernet", cvm_oct_device);
725 panic("Could not acquire Ethernet IRQ %d\n", OCTEON_IRQ_TIMER1);
728 void cvm_oct_tx_shutdown(void)
730 /* Free the interrupt handler */
731 free_irq(OCTEON_IRQ_TIMER1, cvm_oct_device);