2 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
3 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
5 * This program is free software; you may redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; version 2 of the License.
9 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
10 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
11 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
12 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
13 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
14 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
15 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23 #include <linux/pci.h>
28 /* Receive queue control */
30 u64 ring_base; /* 0x00 */
31 u32 ring_size; /* 0x08 */
33 u32 posted_index; /* 0x10 */
35 u32 cq_index; /* 0x18 */
37 u32 enable; /* 0x20 */
39 u32 running; /* 0x28 */
41 u32 fetch_index; /* 0x30 */
43 u32 error_interrupt_enable; /* 0x38 */
45 u32 error_interrupt_offset; /* 0x40 */
47 u32 error_status; /* 0x48 */
49 u32 dropped_packet_count; /* 0x50 */
51 u32 dropped_packet_count_rc; /* 0x58 */
55 /* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
56 #define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
57 #define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
58 #define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
59 ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
60 VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
61 #define VNIC_RQ_BUF_BLK_SZ(entries) \
62 (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
63 #define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
64 DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
65 #define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
68 struct vnic_rq_buf *next;
71 unsigned int os_buf_index;
80 struct vnic_dev *vdev;
81 struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
82 struct vnic_dev_ring ring;
83 struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
84 struct vnic_rq_buf *to_use;
85 struct vnic_rq_buf *to_clean;
87 unsigned int pkts_outstanding;
88 #ifdef CONFIG_NET_RX_BUSY_POLL
89 #define ENIC_POLL_STATE_IDLE 0
90 #define ENIC_POLL_STATE_NAPI (1 << 0) /* NAPI owns this poll */
91 #define ENIC_POLL_STATE_POLL (1 << 1) /* poll owns this poll */
92 #define ENIC_POLL_STATE_NAPI_YIELD (1 << 2) /* NAPI yielded this poll */
93 #define ENIC_POLL_STATE_POLL_YIELD (1 << 3) /* poll yielded this poll */
94 #define ENIC_POLL_YIELD (ENIC_POLL_STATE_NAPI_YIELD | \
95 ENIC_POLL_STATE_POLL_YIELD)
96 #define ENIC_POLL_LOCKED (ENIC_POLL_STATE_NAPI | \
98 #define ENIC_POLL_USER_PEND (ENIC_POLL_STATE_POLL | \
99 ENIC_POLL_STATE_POLL_YIELD)
100 unsigned int bpoll_state;
101 spinlock_t bpoll_lock;
102 #endif /* CONFIG_NET_RX_BUSY_POLL */
105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
107 /* how many does SW own? */
108 return rq->ring.desc_avail;
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
113 /* how many does HW own? */
114 return rq->ring.desc_count - rq->ring.desc_avail - 1;
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
119 return rq->to_use->desc;
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
124 return rq->to_use->index;
127 static inline void vnic_rq_post(struct vnic_rq *rq,
128 void *os_buf, unsigned int os_buf_index,
129 dma_addr_t dma_addr, unsigned int len,
132 struct vnic_rq_buf *buf = rq->to_use;
134 buf->os_buf = os_buf;
135 buf->os_buf_index = os_buf_index;
136 buf->dma_addr = dma_addr;
142 rq->ring.desc_avail--;
144 /* Move the posted_index every nth descriptor
147 #ifndef VNIC_RQ_RETURN_RATE
148 #define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
151 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
152 /* Adding write memory barrier prevents compiler and/or CPU
153 * reordering, thus avoiding descriptor posting before
154 * descriptor is initialized. Otherwise, hardware can read
155 * stale descriptor fields.
158 iowrite32(buf->index, &rq->ctrl->posted_index);
162 static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
164 rq->ring.desc_avail += count;
167 enum desc_return_options {
169 VNIC_RQ_DEFER_RETURN_DESC,
172 static inline void vnic_rq_service(struct vnic_rq *rq,
173 struct cq_desc *cq_desc, u16 completed_index,
174 int desc_return, void (*buf_service)(struct vnic_rq *rq,
175 struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
176 int skipped, void *opaque), void *opaque)
178 struct vnic_rq_buf *buf;
184 skipped = (buf->index != completed_index);
186 (*buf_service)(rq, cq_desc, buf, skipped, opaque);
188 if (desc_return == VNIC_RQ_RETURN_DESC)
189 rq->ring.desc_avail++;
191 rq->to_clean = buf->next;
200 static inline int vnic_rq_fill(struct vnic_rq *rq,
201 int (*buf_fill)(struct vnic_rq *rq))
205 while (vnic_rq_desc_avail(rq) > 0) {
207 err = (*buf_fill)(rq);
215 #ifdef CONFIG_NET_RX_BUSY_POLL
216 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
218 spin_lock_init(&rq->bpoll_lock);
219 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
222 static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
226 spin_lock(&rq->bpoll_lock);
227 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
228 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
229 rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
232 rq->bpoll_state = ENIC_POLL_STATE_NAPI;
234 spin_unlock(&rq->bpoll_lock);
239 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
243 spin_lock(&rq->bpoll_lock);
244 WARN_ON(rq->bpoll_state &
245 (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
246 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
248 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
249 spin_unlock(&rq->bpoll_lock);
254 static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
258 spin_lock_bh(&rq->bpoll_lock);
259 if (rq->bpoll_state & ENIC_POLL_LOCKED) {
260 rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
263 rq->bpoll_state |= ENIC_POLL_STATE_POLL;
265 spin_unlock_bh(&rq->bpoll_lock);
270 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
274 spin_lock_bh(&rq->bpoll_lock);
275 WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
276 if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
278 rq->bpoll_state = ENIC_POLL_STATE_IDLE;
279 spin_unlock_bh(&rq->bpoll_lock);
284 static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
286 WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
287 return rq->bpoll_state & ENIC_POLL_USER_PEND;
292 static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
296 static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
301 static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
306 static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
311 static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
316 static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
320 #endif /* CONFIG_NET_RX_BUSY_POLL */
322 void vnic_rq_free(struct vnic_rq *rq);
323 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
324 unsigned int desc_count, unsigned int desc_size);
325 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
326 unsigned int error_interrupt_enable,
327 unsigned int error_interrupt_offset);
328 unsigned int vnic_rq_error_status(struct vnic_rq *rq);
329 void vnic_rq_enable(struct vnic_rq *rq);
330 int vnic_rq_disable(struct vnic_rq *rq);
331 void vnic_rq_clean(struct vnic_rq *rq,
332 void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
334 #endif /* _VNIC_RQ_H_ */