ixgbevf: move ring specific stats into ring specific structure
authorEmil Tantilov <emil.s.tantilov@intel.com>
Sat, 18 Jan 2014 02:30:00 +0000 (18:30 -0800)
committerDavid S. Miller <davem@davemloft.net>
Sat, 18 Jan 2014 03:15:10 +0000 (19:15 -0800)
This patch moves hot-path specific statistics into the ring structure.
This allows us to drop the adapter structure in some functions and should
help with performance.

Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com>
Signed-off-by: Emil Tantilov <emil.s.tantilov@intel.com>
Tested-by: Phil Schmitt <phillip.j.schmitt@intel.com>
Signed-off-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c

index f4e0574..0769306 100644 (file)
@@ -411,15 +411,15 @@ static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
            tx_yields = 0, tx_cleaned = 0, tx_missed = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               rx_yields += adapter->rx_ring[i]->bp_yields;
-               rx_cleaned += adapter->rx_ring[i]->bp_cleaned;
-               rx_yields += adapter->rx_ring[i]->bp_yields;
+               rx_yields += adapter->rx_ring[i]->stats.yields;
+               rx_cleaned += adapter->rx_ring[i]->stats.cleaned;
+               rx_yields += adapter->rx_ring[i]->stats.yields;
        }
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               tx_yields += adapter->tx_ring[i]->bp_yields;
-               tx_cleaned += adapter->tx_ring[i]->bp_cleaned;
-               tx_yields += adapter->tx_ring[i]->bp_yields;
+               tx_yields += adapter->tx_ring[i]->stats.yields;
+               tx_cleaned += adapter->tx_ring[i]->stats.cleaned;
+               tx_yields += adapter->tx_ring[i]->stats.yields;
        }
 
        adapter->bp_rx_yields = rx_yields;
index 59a7574..0642bd2 100644 (file)
@@ -59,6 +59,29 @@ struct ixgbevf_rx_buffer {
        dma_addr_t dma;
 };
 
+struct ixgbevf_stats {
+       u64 packets;
+       u64 bytes;
+#ifdef BP_EXTENDED_STATS
+       u64 yields;
+       u64 misses;
+       u64 cleaned;
+#endif
+};
+
+struct ixgbevf_tx_queue_stats {
+       u64 restart_queue;
+       u64 tx_busy;
+       u64 tx_done_old;
+};
+
+struct ixgbevf_rx_queue_stats {
+       u64 non_eop_descs;
+       u64 alloc_rx_page_failed;
+       u64 alloc_rx_buff_failed;
+       u64 csum_err;
+};
+
 struct ixgbevf_ring {
        struct ixgbevf_ring *next;
        struct net_device *netdev;
@@ -70,22 +93,20 @@ struct ixgbevf_ring {
        unsigned int next_to_use;
        unsigned int next_to_clean;
 
-       int queue_index; /* needed for multiqueue queue management */
        union {
                struct ixgbevf_tx_buffer *tx_buffer_info;
                struct ixgbevf_rx_buffer *rx_buffer_info;
        };
 
-       u64                     total_bytes;
-       u64                     total_packets;
-       struct u64_stats_sync   syncp;
+       struct ixgbevf_stats stats;
+       struct u64_stats_sync syncp;
+       union {
+               struct ixgbevf_tx_queue_stats tx_stats;
+               struct ixgbevf_rx_queue_stats rx_stats;
+       };
+
        u64 hw_csum_rx_error;
        u64 hw_csum_rx_good;
-#ifdef BP_EXTENDED_STATS
-       u64 bp_yields;
-       u64 bp_misses;
-       u64 bp_cleaned;
-#endif
        u8 __iomem *tail;
 
        u16 reg_idx; /* holds the special value that gets the hardware register
@@ -93,6 +114,7 @@ struct ixgbevf_ring {
                      * for DCB and RSS modes */
 
        u16 rx_buf_len;
+       int queue_index; /* needed for multiqueue queue management */
 };
 
 /* How many Rx Buffers do we bundle into one write to the hardware ? */
@@ -186,7 +208,7 @@ static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
                q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
                rc = false;
 #ifdef BP_EXTENDED_STATS
-               q_vector->tx.ring->bp_yields++;
+               q_vector->tx.ring->stats.yields++;
 #endif
        } else {
                /* we don't care if someone yielded */
@@ -221,7 +243,7 @@ static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
                q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
                rc = false;
 #ifdef BP_EXTENDED_STATS
-               q_vector->rx.ring->bp_yields++;
+               q_vector->rx.ring->stats.yields++;
 #endif
        } else {
                /* preserve yield marks */
index a9dd678..69e6d27 100644 (file)
@@ -268,8 +268,8 @@ static bool ixgbevf_clean_tx_irq(struct ixgbevf_q_vector *q_vector,
        }
 
        u64_stats_update_begin(&tx_ring->syncp);
-       tx_ring->total_bytes += total_bytes;
-       tx_ring->total_packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       tx_ring->stats.packets += total_packets;
        u64_stats_update_end(&tx_ring->syncp);
        q_vector->tx.total_bytes += total_bytes;
        q_vector->tx.total_packets += total_packets;
@@ -343,7 +343,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
        /* if IP and error */
        if ((status_err & IXGBE_RXD_STAT_IPCS) &&
            (status_err & IXGBE_RXDADV_ERR_IPE)) {
-               ring->hw_csum_rx_error++;
+               ring->rx_stats.csum_err++;
                return;
        }
 
@@ -351,7 +351,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
                return;
 
        if (status_err & IXGBE_RXDADV_ERR_TCPE) {
-               ring->hw_csum_rx_error++;
+               ring->rx_stats.csum_err++;
                return;
        }
 
@@ -362,10 +362,9 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
 
 /**
  * ixgbevf_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: rx descriptor ring (for a specific queue) to setup buffers on
  **/
-static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
-                                    struct ixgbevf_ring *rx_ring,
+static void ixgbevf_alloc_rx_buffers(struct ixgbevf_ring *rx_ring,
                                     int cleaned_count)
 {
        union ixgbe_adv_rx_desc *rx_desc;
@@ -404,7 +403,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
        }
 
 no_buffers:
-       adapter->alloc_rx_buff_failed++;
+       rx_ring->rx_stats.alloc_rx_buff_failed++;
        if (rx_ring->next_to_use != i)
                ixgbevf_release_rx_desc(rx_ring, i);
 }
@@ -421,7 +420,6 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                                struct ixgbevf_ring *rx_ring,
                                int budget)
 {
-       struct ixgbevf_adapter *adapter = q_vector->adapter;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbevf_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
@@ -467,7 +465,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                if (!(staterr & IXGBE_RXD_STAT_EOP)) {
                        skb->next = next_buffer->skb;
                        IXGBE_CB(skb->next)->prev = skb;
-                       adapter->non_eop_descs++;
+                       rx_ring->rx_stats.non_eop_descs++;
                        goto next_desc;
                }
 
@@ -499,7 +497,7 @@ static int ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
                 * source pruning.
                 */
                if ((skb->pkt_type & (PACKET_BROADCAST | PACKET_MULTICAST)) &&
-                   ether_addr_equal(adapter->netdev->dev_addr,
+                   ether_addr_equal(rx_ring->netdev->dev_addr,
                                     eth_hdr(skb)->h_source)) {
                        dev_kfree_skb_irq(skb);
                        goto next_desc;
@@ -512,8 +510,7 @@ next_desc:
 
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBEVF_RX_BUFFER_WRITE) {
-                       ixgbevf_alloc_rx_buffers(adapter, rx_ring,
-                                                cleaned_count);
+                       ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
@@ -528,11 +525,11 @@ next_desc:
        cleaned_count = ixgbevf_desc_unused(rx_ring);
 
        if (cleaned_count)
-               ixgbevf_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+               ixgbevf_alloc_rx_buffers(rx_ring, cleaned_count);
 
        u64_stats_update_begin(&rx_ring->syncp);
-       rx_ring->total_packets += total_rx_packets;
-       rx_ring->total_bytes += total_rx_bytes;
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
        u64_stats_update_end(&rx_ring->syncp);
        q_vector->rx.total_packets += total_rx_packets;
        q_vector->rx.total_bytes += total_rx_bytes;
@@ -637,9 +634,9 @@ static int ixgbevf_busy_poll_recv(struct napi_struct *napi)
                found = ixgbevf_clean_rx_irq(q_vector, ring, 4);
 #ifdef BP_EXTENDED_STATS
                if (found)
-                       ring->bp_cleaned += found;
+                       ring->stats.cleaned += found;
                else
-                       ring->bp_misses++;
+                       ring->stats.misses++;
 #endif
                if (found)
                        break;
@@ -1313,7 +1310,7 @@ static void ixgbevf_configure_rx_ring(struct ixgbevf_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(reg_idx), rxdctl);
 
        ixgbevf_rx_desc_queue_enable(adapter, ring);
-       ixgbevf_alloc_rx_buffers(adapter, ring, ixgbevf_desc_unused(ring));
+       ixgbevf_alloc_rx_buffers(ring, ixgbevf_desc_unused(ring));
 }
 
 /**
@@ -3048,8 +3045,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
 
 static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 {
-       struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev);
-
        netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
@@ -3063,7 +3058,8 @@ static int __ixgbevf_maybe_stop_tx(struct ixgbevf_ring *tx_ring, int size)
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
        netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
-       ++adapter->restart_queue;
+       ++tx_ring->tx_stats.restart_queue;
+
        return 0;
 }
 
@@ -3108,7 +3104,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        count += skb_shinfo(skb)->nr_frags;
 #endif
        if (ixgbevf_maybe_stop_tx(tx_ring, count + 3)) {
-               adapter->tx_busy++;
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
@@ -3308,8 +3304,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
                ring = adapter->rx_ring[i];
                do {
                        start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       bytes = ring->total_bytes;
-                       packets = ring->total_packets;
+                       bytes = ring->stats.bytes;
+                       packets = ring->stats.packets;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                stats->rx_bytes += bytes;
                stats->rx_packets += packets;
@@ -3319,8 +3315,8 @@ static struct rtnl_link_stats64 *ixgbevf_get_stats(struct net_device *netdev,
                ring = adapter->tx_ring[i];
                do {
                        start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       bytes = ring->total_bytes;
-                       packets = ring->total_packets;
+                       bytes = ring->stats.bytes;
+                       packets = ring->stats.packets;
                } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
                stats->tx_bytes += bytes;
                stats->tx_packets += packets;