1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2014 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
31 #include <linux/types.h>
32 #include <linux/bitops.h>
33 #include <linux/timer.h>
35 #include <linux/netdevice.h>
36 #include <linux/if_vlan.h>
37 #include <linux/u64_stats_sync.h>
41 #ifdef CONFIG_NET_RX_BUSY_POLL
42 #include <net/busy_poll.h>
43 #define BP_EXTENDED_STATS
46 #define IXGBE_MAX_TXD_PWR 14
47 #define IXGBE_MAX_DATA_PER_TXD BIT(IXGBE_MAX_TXD_PWR)
49 /* Tx Descriptors needed, worst case */
50 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
51 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
53 /* wrapper around a pointer to a socket buffer,
54 * so a DMA handle can be stored along with the buffer */
55 struct ixgbevf_tx_buffer {
56 union ixgbe_adv_tx_desc *next_to_watch;
57 unsigned long time_stamp;
59 unsigned int bytecount;
60 unsigned short gso_segs;
62 DEFINE_DMA_UNMAP_ADDR(dma);
63 DEFINE_DMA_UNMAP_LEN(len);
67 struct ixgbevf_rx_buffer {
70 unsigned int page_offset;
73 struct ixgbevf_stats {
76 #ifdef BP_EXTENDED_STATS
83 struct ixgbevf_tx_queue_stats {
89 struct ixgbevf_rx_queue_stats {
90 u64 alloc_rx_page_failed;
91 u64 alloc_rx_buff_failed;
95 enum ixgbevf_ring_state_t {
96 __IXGBEVF_TX_DETECT_HANG,
97 __IXGBEVF_HANG_CHECK_ARMED,
100 #define check_for_tx_hang(ring) \
101 test_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
102 #define set_check_for_tx_hang(ring) \
103 set_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
104 #define clear_check_for_tx_hang(ring) \
105 clear_bit(__IXGBEVF_TX_DETECT_HANG, &(ring)->state)
107 struct ixgbevf_ring {
108 struct ixgbevf_ring *next;
109 struct net_device *netdev;
111 void *desc; /* descriptor ring memory */
112 dma_addr_t dma; /* phys. address of descriptor ring */
113 unsigned int size; /* length in bytes */
114 u16 count; /* amount of descriptors */
120 struct ixgbevf_tx_buffer *tx_buffer_info;
121 struct ixgbevf_rx_buffer *rx_buffer_info;
124 struct ixgbevf_stats stats;
125 struct u64_stats_sync syncp;
127 struct ixgbevf_tx_queue_stats tx_stats;
128 struct ixgbevf_rx_queue_stats rx_stats;
131 u64 hw_csum_rx_error;
135 u16 reg_idx; /* holds the special value that gets the hardware register
136 * offset associated with this ring, which is different
137 * for DCB and RSS modes */
138 int queue_index; /* needed for multiqueue queue management */
141 /* How many Rx Buffers do we bundle into one write to the hardware ? */
142 #define IXGBEVF_RX_BUFFER_WRITE 16 /* Must be power of 2 */
144 #define MAX_RX_QUEUES IXGBE_VF_MAX_RX_QUEUES
145 #define MAX_TX_QUEUES IXGBE_VF_MAX_TX_QUEUES
146 #define IXGBEVF_MAX_RSS_QUEUES 2
148 #define IXGBEVF_DEFAULT_TXD 1024
149 #define IXGBEVF_DEFAULT_RXD 512
150 #define IXGBEVF_MAX_TXD 4096
151 #define IXGBEVF_MIN_TXD 64
152 #define IXGBEVF_MAX_RXD 4096
153 #define IXGBEVF_MIN_RXD 64
155 /* Supported Rx Buffer Sizes */
156 #define IXGBEVF_RXBUFFER_256 256 /* Used for packet split */
157 #define IXGBEVF_RXBUFFER_2048 2048
159 #define IXGBEVF_RX_HDR_SIZE IXGBEVF_RXBUFFER_256
160 #define IXGBEVF_RX_BUFSZ IXGBEVF_RXBUFFER_2048
162 #define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN)
164 #define IXGBE_TX_FLAGS_CSUM (u32)(1)
165 #define IXGBE_TX_FLAGS_VLAN (u32)(1 << 1)
166 #define IXGBE_TX_FLAGS_TSO (u32)(1 << 2)
167 #define IXGBE_TX_FLAGS_IPV4 (u32)(1 << 3)
168 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
169 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0x0000e000
170 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
172 struct ixgbevf_ring_container {
173 struct ixgbevf_ring *ring; /* pointer to linked list of rings */
174 unsigned int total_bytes; /* total bytes processed this int */
175 unsigned int total_packets; /* total packets processed this int */
176 u8 count; /* total number of rings in vector */
177 u8 itr; /* current ITR setting for ring */
180 /* iterator for handling rings in ring container */
181 #define ixgbevf_for_each_ring(pos, head) \
182 for (pos = (head).ring; pos != NULL; pos = pos->next)
184 /* MAX_MSIX_Q_VECTORS of these are allocated,
185 * but we only use one per queue-specific vector.
187 struct ixgbevf_q_vector {
188 struct ixgbevf_adapter *adapter;
189 u16 v_idx; /* index of q_vector within array, also used for
190 * finding the bit in EICR and friends that
191 * represents the vector for this ring */
192 u16 itr; /* Interrupt throttle rate written to EITR */
193 struct napi_struct napi;
194 struct ixgbevf_ring_container rx, tx;
195 char name[IFNAMSIZ + 9];
196 #ifdef CONFIG_NET_RX_BUSY_POLL
198 #define IXGBEVF_QV_STATE_IDLE 0
199 #define IXGBEVF_QV_STATE_NAPI 1 /* NAPI owns this QV */
200 #define IXGBEVF_QV_STATE_POLL 2 /* poll owns this QV */
201 #define IXGBEVF_QV_STATE_DISABLED 4 /* QV is disabled */
202 #define IXGBEVF_QV_OWNED (IXGBEVF_QV_STATE_NAPI | IXGBEVF_QV_STATE_POLL)
203 #define IXGBEVF_QV_LOCKED (IXGBEVF_QV_OWNED | IXGBEVF_QV_STATE_DISABLED)
204 #define IXGBEVF_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */
205 #define IXGBEVF_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */
206 #define IXGBEVF_QV_YIELD (IXGBEVF_QV_STATE_NAPI_YIELD | IXGBEVF_QV_STATE_POLL_YIELD)
207 #define IXGBEVF_QV_USER_PEND (IXGBEVF_QV_STATE_POLL | IXGBEVF_QV_STATE_POLL_YIELD)
209 #endif /* CONFIG_NET_RX_BUSY_POLL */
211 #ifdef CONFIG_NET_RX_BUSY_POLL
212 static inline void ixgbevf_qv_init_lock(struct ixgbevf_q_vector *q_vector)
215 spin_lock_init(&q_vector->lock);
216 q_vector->state = IXGBEVF_QV_STATE_IDLE;
219 /* called from the device poll routine to get ownership of a q_vector */
220 static inline bool ixgbevf_qv_lock_napi(struct ixgbevf_q_vector *q_vector)
223 spin_lock_bh(&q_vector->lock);
224 if (q_vector->state & IXGBEVF_QV_LOCKED) {
225 WARN_ON(q_vector->state & IXGBEVF_QV_STATE_NAPI);
226 q_vector->state |= IXGBEVF_QV_STATE_NAPI_YIELD;
228 #ifdef BP_EXTENDED_STATS
229 q_vector->tx.ring->stats.yields++;
232 /* we don't care if someone yielded */
233 q_vector->state = IXGBEVF_QV_STATE_NAPI;
235 spin_unlock_bh(&q_vector->lock);
239 /* returns true is someone tried to get the qv while napi had it */
240 static inline bool ixgbevf_qv_unlock_napi(struct ixgbevf_q_vector *q_vector)
243 spin_lock_bh(&q_vector->lock);
244 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_POLL |
245 IXGBEVF_QV_STATE_NAPI_YIELD));
247 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
249 /* reset state to idle, unless QV is disabled */
250 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
251 spin_unlock_bh(&q_vector->lock);
255 /* called from ixgbevf_low_latency_poll() */
256 static inline bool ixgbevf_qv_lock_poll(struct ixgbevf_q_vector *q_vector)
259 spin_lock_bh(&q_vector->lock);
260 if ((q_vector->state & IXGBEVF_QV_LOCKED)) {
261 q_vector->state |= IXGBEVF_QV_STATE_POLL_YIELD;
263 #ifdef BP_EXTENDED_STATS
264 q_vector->rx.ring->stats.yields++;
267 /* preserve yield marks */
268 q_vector->state |= IXGBEVF_QV_STATE_POLL;
270 spin_unlock_bh(&q_vector->lock);
274 /* returns true if someone tried to get the qv while it was locked */
275 static inline bool ixgbevf_qv_unlock_poll(struct ixgbevf_q_vector *q_vector)
278 spin_lock_bh(&q_vector->lock);
279 WARN_ON(q_vector->state & (IXGBEVF_QV_STATE_NAPI));
281 if (q_vector->state & IXGBEVF_QV_STATE_POLL_YIELD)
283 /* reset state to idle, unless QV is disabled */
284 q_vector->state &= IXGBEVF_QV_STATE_DISABLED;
285 spin_unlock_bh(&q_vector->lock);
289 /* true if a socket is polling, even if it did not get the lock */
290 static inline bool ixgbevf_qv_busy_polling(struct ixgbevf_q_vector *q_vector)
292 WARN_ON(!(q_vector->state & IXGBEVF_QV_OWNED));
293 return q_vector->state & IXGBEVF_QV_USER_PEND;
296 /* false if QV is currently owned */
297 static inline bool ixgbevf_qv_disable(struct ixgbevf_q_vector *q_vector)
300 spin_lock_bh(&q_vector->lock);
301 if (q_vector->state & IXGBEVF_QV_OWNED)
303 q_vector->state |= IXGBEVF_QV_STATE_DISABLED;
304 spin_unlock_bh(&q_vector->lock);
308 #endif /* CONFIG_NET_RX_BUSY_POLL */
311 * microsecond values for various ITR rates shifted by 2 to fit itr register
312 * with the first 3 bits reserved 0
314 #define IXGBE_MIN_RSC_ITR 24
315 #define IXGBE_100K_ITR 40
316 #define IXGBE_20K_ITR 200
317 #define IXGBE_10K_ITR 400
318 #define IXGBE_8K_ITR 500
320 /* Helper macros to switch between ints/sec and what the register uses.
321 * And yes, it's the same math going both ways. The lowest value
322 * supported by all of the ixgbe hardware is 8.
324 #define EITR_INTS_PER_SEC_TO_REG(_eitr) \
325 ((_eitr) ? (1000000000 / ((_eitr) * 256)) : 8)
326 #define EITR_REG_TO_INTS_PER_SEC EITR_INTS_PER_SEC_TO_REG
328 /* ixgbevf_test_staterr - tests bits in Rx descriptor status and error fields */
329 static inline __le32 ixgbevf_test_staterr(union ixgbe_adv_rx_desc *rx_desc,
330 const u32 stat_err_bits)
332 return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
335 static inline u16 ixgbevf_desc_unused(struct ixgbevf_ring *ring)
337 u16 ntc = ring->next_to_clean;
338 u16 ntu = ring->next_to_use;
340 return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1;
343 static inline void ixgbevf_write_tail(struct ixgbevf_ring *ring, u32 value)
345 writel(value, ring->tail);
348 #define IXGBEVF_RX_DESC(R, i) \
349 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
350 #define IXGBEVF_TX_DESC(R, i) \
351 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
352 #define IXGBEVF_TX_CTXTDESC(R, i) \
353 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
355 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
357 #define OTHER_VECTOR 1
358 #define NON_Q_VECTORS (OTHER_VECTOR)
360 #define MAX_MSIX_Q_VECTORS 2
362 #define MIN_MSIX_Q_VECTORS 1
363 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
365 /* board specific private data structure */
366 struct ixgbevf_adapter {
367 /* this field must be first, see ixgbevf_process_skb_fields */
368 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
370 struct ixgbevf_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
372 /* Interrupt Throttle Rate */
376 /* interrupt masks */
377 u32 eims_enable_mask;
382 struct ixgbevf_ring *tx_ring[MAX_TX_QUEUES]; /* One per active queue */
384 u32 tx_timeout_count;
388 struct ixgbevf_ring *rx_ring[MAX_TX_QUEUES]; /* One per active queue */
389 u64 hw_csum_rx_error;
390 u64 hw_rx_no_dma_resources;
391 int num_msix_vectors;
392 u32 alloc_rx_page_failed;
393 u32 alloc_rx_buff_failed;
395 /* Some features need tri-state capability,
396 * thus the additional *_CAPABLE flags.
399 #define IXGBEVF_FLAG_RESET_REQUESTED (u32)(1)
400 #define IXGBEVF_FLAG_QUEUE_RESET_REQUESTED (u32)(1 << 2)
402 struct msix_entry *msix_entries;
404 /* OS defined structs */
405 struct net_device *netdev;
406 struct pci_dev *pdev;
408 /* structs defined in ixgbe_vf.h */
411 /* Interrupt Throttle Rate */
414 struct ixgbevf_hw_stats stats;
418 unsigned int tx_ring_count;
419 unsigned int rx_ring_count;
421 #ifdef BP_EXTENDED_STATS
431 u8 __iomem *io_addr; /* Mainly for iounmap use */
435 struct timer_list service_timer;
436 struct work_struct service_task;
439 unsigned long last_reset;
442 enum ixbgevf_state_t {
448 __IXGBEVF_SERVICE_SCHED,
449 __IXGBEVF_SERVICE_INITED,
452 enum ixgbevf_boards {
459 extern const struct ixgbevf_info ixgbevf_82599_vf_info;
460 extern const struct ixgbevf_info ixgbevf_X540_vf_info;
461 extern const struct ixgbevf_info ixgbevf_X550_vf_info;
462 extern const struct ixgbevf_info ixgbevf_X550EM_x_vf_info;
463 extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
465 /* needed by ethtool.c */
466 extern const char ixgbevf_driver_name[];
467 extern const char ixgbevf_driver_version[];
469 void ixgbevf_up(struct ixgbevf_adapter *adapter);
470 void ixgbevf_down(struct ixgbevf_adapter *adapter);
471 void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter);
472 void ixgbevf_reset(struct ixgbevf_adapter *adapter);
473 void ixgbevf_set_ethtool_ops(struct net_device *netdev);
474 int ixgbevf_setup_rx_resources(struct ixgbevf_ring *);
475 int ixgbevf_setup_tx_resources(struct ixgbevf_ring *);
476 void ixgbevf_free_rx_resources(struct ixgbevf_ring *);
477 void ixgbevf_free_tx_resources(struct ixgbevf_ring *);
478 void ixgbevf_update_stats(struct ixgbevf_adapter *adapter);
479 int ethtool_ioctl(struct ifreq *ifr);
481 extern void ixgbevf_write_eitr(struct ixgbevf_q_vector *q_vector);
483 void ixgbe_napi_add_all(struct ixgbevf_adapter *adapter);
484 void ixgbe_napi_del_all(struct ixgbevf_adapter *adapter);
487 char *ixgbevf_get_hw_dev_name(struct ixgbe_hw *hw);
488 #define hw_dbg(hw, format, arg...) \
489 printk(KERN_DEBUG "%s: " format, ixgbevf_get_hw_dev_name(hw), ##arg)
491 #define hw_dbg(hw, format, arg...) do {} while (0)
494 #endif /* _IXGBEVF_H_ */