5e68a33b209728c9844a170ea17e8964ff489634
[cascardo/linux.git] / drivers / net / ethernet / qlogic / qed / qed_spq.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/io.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include "qed.h"
22 #include "qed_cxt.h"
23 #include "qed_dev_api.h"
24 #include "qed_hsi.h"
25 #include "qed_hw.h"
26 #include "qed_int.h"
27 #include "qed_mcp.h"
28 #include "qed_reg_addr.h"
29 #include "qed_sp.h"
30 #include "qed_sriov.h"
31
32 /***************************************************************************
33 * Structures & Definitions
34 ***************************************************************************/
35
36 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
37 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
38
39 /***************************************************************************
40 * Blocking Imp. (BLOCK/EBLOCK mode)
41 ***************************************************************************/
42 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
43                                 void *cookie,
44                                 union event_ring_data *data, u8 fw_return_code)
45 {
46         struct qed_spq_comp_done *comp_done;
47
48         comp_done = (struct qed_spq_comp_done *)cookie;
49
50         comp_done->done                 = 0x1;
51         comp_done->fw_return_code       = fw_return_code;
52
53         /* make update visible to waiting thread */
54         smp_wmb();
55 }
56
57 static int qed_spq_block(struct qed_hwfn *p_hwfn,
58                          struct qed_spq_entry *p_ent,
59                          u8 *p_fw_ret)
60 {
61         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
62         struct qed_spq_comp_done *comp_done;
63         int rc;
64
65         comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
66         while (sleep_count) {
67                 /* validate we receive completion update */
68                 smp_rmb();
69                 if (comp_done->done == 1) {
70                         if (p_fw_ret)
71                                 *p_fw_ret = comp_done->fw_return_code;
72                         return 0;
73                 }
74                 usleep_range(5000, 10000);
75                 sleep_count--;
76         }
77
78         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
79         rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
80         if (rc != 0)
81                 DP_NOTICE(p_hwfn, "MCP drain failed\n");
82
83         /* Retry after drain */
84         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
85         while (sleep_count) {
86                 /* validate we receive completion update */
87                 smp_rmb();
88                 if (comp_done->done == 1) {
89                         if (p_fw_ret)
90                                 *p_fw_ret = comp_done->fw_return_code;
91                         return 0;
92                 }
93                 usleep_range(5000, 10000);
94                 sleep_count--;
95         }
96
97         if (comp_done->done == 1) {
98                 if (p_fw_ret)
99                         *p_fw_ret = comp_done->fw_return_code;
100                 return 0;
101         }
102
103         DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
104
105         return -EBUSY;
106 }
107
108 /***************************************************************************
109 * SPQ entries inner API
110 ***************************************************************************/
111 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
112                               struct qed_spq_entry *p_ent)
113 {
114         p_ent->flags = 0;
115
116         switch (p_ent->comp_mode) {
117         case QED_SPQ_MODE_EBLOCK:
118         case QED_SPQ_MODE_BLOCK:
119                 p_ent->comp_cb.function = qed_spq_blocking_cb;
120                 break;
121         case QED_SPQ_MODE_CB:
122                 break;
123         default:
124                 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
125                           p_ent->comp_mode);
126                 return -EINVAL;
127         }
128
129         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
130                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
131                    p_ent->elem.hdr.cid,
132                    p_ent->elem.hdr.cmd_id,
133                    p_ent->elem.hdr.protocol_id,
134                    p_ent->elem.data_ptr.hi,
135                    p_ent->elem.data_ptr.lo,
136                    D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
137                            QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
138                            "MODE_CB"));
139
140         return 0;
141 }
142
143 /***************************************************************************
144 * HSI access
145 ***************************************************************************/
146 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
147                                   struct qed_spq *p_spq)
148 {
149         u16                             pq;
150         struct qed_cxt_info             cxt_info;
151         struct core_conn_context        *p_cxt;
152         union qed_qm_pq_params          pq_params;
153         int                             rc;
154
155         cxt_info.iid = p_spq->cid;
156
157         rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
158
159         if (rc < 0) {
160                 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
161                           p_spq->cid);
162                 return;
163         }
164
165         p_cxt = cxt_info.p_cxt;
166
167         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
168                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
169         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
170                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
171         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
172                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
173
174         /* QM physical queue */
175         memset(&pq_params, 0, sizeof(pq_params));
176         pq_params.core.tc = LB_TC;
177         pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
178         p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
179
180         p_cxt->xstorm_st_context.spq_base_lo =
181                 DMA_LO_LE(p_spq->chain.p_phys_addr);
182         p_cxt->xstorm_st_context.spq_base_hi =
183                 DMA_HI_LE(p_spq->chain.p_phys_addr);
184
185         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
186                        p_hwfn->p_consq->chain.p_phys_addr);
187 }
188
189 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
190                            struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
191 {
192         struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
193         u16 echo = qed_chain_get_prod_idx(p_chain);
194         struct slow_path_element        *elem;
195         struct core_db_data             db;
196
197         p_ent->elem.hdr.echo    = cpu_to_le16(echo);
198         elem = qed_chain_produce(p_chain);
199         if (!elem) {
200                 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
201                 return -EINVAL;
202         }
203
204         *elem = p_ent->elem; /* struct assignment */
205
206         /* send a doorbell on the slow hwfn session */
207         memset(&db, 0, sizeof(db));
208         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
209         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
210         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
211                   DQ_XCM_CORE_SPQ_PROD_CMD);
212         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
213         db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
214
215         /* make sure the SPQE is updated before the doorbell */
216         wmb();
217
218         DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
219
220         /* make sure doorbell is rang */
221         wmb();
222
223         DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
224                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
225                    qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
226                    p_spq->cid, db.params, db.agg_flags,
227                    qed_chain_get_prod_idx(p_chain));
228
229         return 0;
230 }
231
232 /***************************************************************************
233 * Asynchronous events
234 ***************************************************************************/
235 static int
236 qed_async_event_completion(struct qed_hwfn *p_hwfn,
237                            struct event_ring_entry *p_eqe)
238 {
239         switch (p_eqe->protocol_id) {
240         case PROTOCOLID_COMMON:
241                 return qed_sriov_eqe_event(p_hwfn,
242                                            p_eqe->opcode,
243                                            p_eqe->echo, &p_eqe->data);
244         default:
245                 DP_NOTICE(p_hwfn,
246                           "Unknown Async completion for protocol: %d\n",
247                           p_eqe->protocol_id);
248                 return -EINVAL;
249         }
250 }
251
252 /***************************************************************************
253 * EQ API
254 ***************************************************************************/
255 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
256 {
257         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
258                    USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
259
260         REG_WR16(p_hwfn, addr, prod);
261
262         /* keep prod updates ordered */
263         mmiowb();
264 }
265
266 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
267 {
268         struct qed_eq *p_eq = cookie;
269         struct qed_chain *p_chain = &p_eq->chain;
270         int rc = 0;
271
272         /* take a snapshot of the FW consumer */
273         u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
274
275         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
276
277         /* Need to guarantee the fw_cons index we use points to a usuable
278          * element (to comply with our chain), so our macros would comply
279          */
280         if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
281             qed_chain_get_usable_per_page(p_chain))
282                 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
283
284         /* Complete current segment of eq entries */
285         while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
286                 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
287
288                 if (!p_eqe) {
289                         rc = -EINVAL;
290                         break;
291                 }
292
293                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
294                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
295                            p_eqe->opcode,
296                            p_eqe->protocol_id,
297                            p_eqe->reserved0,
298                            le16_to_cpu(p_eqe->echo),
299                            p_eqe->fw_return_code,
300                            p_eqe->flags);
301
302                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
303                         if (qed_async_event_completion(p_hwfn, p_eqe))
304                                 rc = -EINVAL;
305                 } else if (qed_spq_completion(p_hwfn,
306                                               p_eqe->echo,
307                                               p_eqe->fw_return_code,
308                                               &p_eqe->data)) {
309                         rc = -EINVAL;
310                 }
311
312                 qed_chain_recycle_consumed(p_chain);
313         }
314
315         qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
316
317         return rc;
318 }
319
320 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
321 {
322         struct qed_eq *p_eq;
323
324         /* Allocate EQ struct */
325         p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
326         if (!p_eq) {
327                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_eq'\n");
328                 return NULL;
329         }
330
331         /* Allocate and initialize EQ chain*/
332         if (qed_chain_alloc(p_hwfn->cdev,
333                             QED_CHAIN_USE_TO_PRODUCE,
334                             QED_CHAIN_MODE_PBL,
335                             QED_CHAIN_CNT_TYPE_U16,
336                             num_elem,
337                             sizeof(union event_ring_element),
338                             &p_eq->chain)) {
339                 DP_NOTICE(p_hwfn, "Failed to allocate eq chain\n");
340                 goto eq_allocate_fail;
341         }
342
343         /* register EQ completion on the SP SB */
344         qed_int_register_cb(p_hwfn, qed_eq_completion,
345                             p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
346
347         return p_eq;
348
349 eq_allocate_fail:
350         qed_eq_free(p_hwfn, p_eq);
351         return NULL;
352 }
353
354 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
355 {
356         qed_chain_reset(&p_eq->chain);
357 }
358
359 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
360 {
361         if (!p_eq)
362                 return;
363         qed_chain_free(p_hwfn->cdev, &p_eq->chain);
364         kfree(p_eq);
365 }
366
367 /***************************************************************************
368 * CQE API - manipulate EQ functionality
369 ***************************************************************************/
370 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
371                               struct eth_slow_path_rx_cqe *cqe,
372                               enum protocol_type protocol)
373 {
374         if (IS_VF(p_hwfn->cdev))
375                 return 0;
376
377         /* @@@tmp - it's possible we'll eventually want to handle some
378          * actual commands that can arrive here, but for now this is only
379          * used to complete the ramrod using the echo value on the cqe
380          */
381         return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
382 }
383
384 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
385                            struct eth_slow_path_rx_cqe *cqe)
386 {
387         int rc;
388
389         rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
390         if (rc)
391                 DP_NOTICE(p_hwfn,
392                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
393                           cqe->ramrod_cmd_id);
394
395         return rc;
396 }
397
398 /***************************************************************************
399 * Slow hwfn Queue (spq)
400 ***************************************************************************/
401 void qed_spq_setup(struct qed_hwfn *p_hwfn)
402 {
403         struct qed_spq *p_spq = p_hwfn->p_spq;
404         struct qed_spq_entry *p_virt = NULL;
405         dma_addr_t p_phys = 0;
406         u32 i, capacity;
407
408         INIT_LIST_HEAD(&p_spq->pending);
409         INIT_LIST_HEAD(&p_spq->completion_pending);
410         INIT_LIST_HEAD(&p_spq->free_pool);
411         INIT_LIST_HEAD(&p_spq->unlimited_pending);
412         spin_lock_init(&p_spq->lock);
413
414         /* SPQ empty pool */
415         p_phys  = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
416         p_virt  = p_spq->p_virt;
417
418         capacity = qed_chain_get_capacity(&p_spq->chain);
419         for (i = 0; i < capacity; i++) {
420                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
421
422                 list_add_tail(&p_virt->list, &p_spq->free_pool);
423
424                 p_virt++;
425                 p_phys += sizeof(struct qed_spq_entry);
426         }
427
428         /* Statistics */
429         p_spq->normal_count             = 0;
430         p_spq->comp_count               = 0;
431         p_spq->comp_sent_count          = 0;
432         p_spq->unlimited_pending_count  = 0;
433
434         bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
435         p_spq->comp_bitmap_idx = 0;
436
437         /* SPQ cid, cannot fail */
438         qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
439         qed_spq_hw_initialize(p_hwfn, p_spq);
440
441         /* reset the chain itself */
442         qed_chain_reset(&p_spq->chain);
443 }
444
445 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
446 {
447         struct qed_spq_entry *p_virt = NULL;
448         struct qed_spq *p_spq = NULL;
449         dma_addr_t p_phys = 0;
450         u32 capacity;
451
452         /* SPQ struct */
453         p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
454         if (!p_spq) {
455                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_spq'\n");
456                 return -ENOMEM;
457         }
458
459         /* SPQ ring  */
460         if (qed_chain_alloc(p_hwfn->cdev,
461                             QED_CHAIN_USE_TO_PRODUCE,
462                             QED_CHAIN_MODE_SINGLE,
463                             QED_CHAIN_CNT_TYPE_U16,
464                             0,   /* N/A when the mode is SINGLE */
465                             sizeof(struct slow_path_element),
466                             &p_spq->chain)) {
467                 DP_NOTICE(p_hwfn, "Failed to allocate spq chain\n");
468                 goto spq_allocate_fail;
469         }
470
471         /* allocate and fill the SPQ elements (incl. ramrod data list) */
472         capacity = qed_chain_get_capacity(&p_spq->chain);
473         p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
474                                     capacity *
475                                     sizeof(struct qed_spq_entry),
476                                     &p_phys, GFP_KERNEL);
477
478         if (!p_virt)
479                 goto spq_allocate_fail;
480
481         p_spq->p_virt = p_virt;
482         p_spq->p_phys = p_phys;
483         p_hwfn->p_spq = p_spq;
484
485         return 0;
486
487 spq_allocate_fail:
488         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
489         kfree(p_spq);
490         return -ENOMEM;
491 }
492
493 void qed_spq_free(struct qed_hwfn *p_hwfn)
494 {
495         struct qed_spq *p_spq = p_hwfn->p_spq;
496         u32 capacity;
497
498         if (!p_spq)
499                 return;
500
501         if (p_spq->p_virt) {
502                 capacity = qed_chain_get_capacity(&p_spq->chain);
503                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
504                                   capacity *
505                                   sizeof(struct qed_spq_entry),
506                                   p_spq->p_virt, p_spq->p_phys);
507         }
508
509         qed_chain_free(p_hwfn->cdev, &p_spq->chain);
510         ;
511         kfree(p_spq);
512 }
513
514 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
515 {
516         struct qed_spq *p_spq = p_hwfn->p_spq;
517         struct qed_spq_entry *p_ent = NULL;
518         int rc = 0;
519
520         spin_lock_bh(&p_spq->lock);
521
522         if (list_empty(&p_spq->free_pool)) {
523                 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
524                 if (!p_ent) {
525                         DP_NOTICE(p_hwfn,
526                                   "Failed to allocate an SPQ entry for a pending ramrod\n");
527                         rc = -ENOMEM;
528                         goto out_unlock;
529                 }
530                 p_ent->queue = &p_spq->unlimited_pending;
531         } else {
532                 p_ent = list_first_entry(&p_spq->free_pool,
533                                          struct qed_spq_entry, list);
534                 list_del(&p_ent->list);
535                 p_ent->queue = &p_spq->pending;
536         }
537
538         *pp_ent = p_ent;
539
540 out_unlock:
541         spin_unlock_bh(&p_spq->lock);
542         return rc;
543 }
544
545 /* Locked variant; Should be called while the SPQ lock is taken */
546 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
547                                    struct qed_spq_entry *p_ent)
548 {
549         list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
550 }
551
552 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
553 {
554         spin_lock_bh(&p_hwfn->p_spq->lock);
555         __qed_spq_return_entry(p_hwfn, p_ent);
556         spin_unlock_bh(&p_hwfn->p_spq->lock);
557 }
558
559 /**
560  * @brief qed_spq_add_entry - adds a new entry to the pending
561  *        list. Should be used while lock is being held.
562  *
563  * Addes an entry to the pending list is there is room (en empty
564  * element is available in the free_pool), or else places the
565  * entry in the unlimited_pending pool.
566  *
567  * @param p_hwfn
568  * @param p_ent
569  * @param priority
570  *
571  * @return int
572  */
573 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
574                              struct qed_spq_entry *p_ent,
575                              enum spq_priority priority)
576 {
577         struct qed_spq *p_spq = p_hwfn->p_spq;
578
579         if (p_ent->queue == &p_spq->unlimited_pending) {
580
581                 if (list_empty(&p_spq->free_pool)) {
582                         list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
583                         p_spq->unlimited_pending_count++;
584
585                         return 0;
586                 } else {
587                         struct qed_spq_entry *p_en2;
588
589                         p_en2 = list_first_entry(&p_spq->free_pool,
590                                                  struct qed_spq_entry, list);
591                         list_del(&p_en2->list);
592
593                         /* Copy the ring element physical pointer to the new
594                          * entry, since we are about to override the entire ring
595                          * entry and don't want to lose the pointer.
596                          */
597                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
598
599                         *p_en2 = *p_ent;
600
601                         /* EBLOCK responsible to free the allocated p_ent */
602                         if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
603                                 kfree(p_ent);
604
605                         p_ent = p_en2;
606                 }
607         }
608
609         /* entry is to be placed in 'pending' queue */
610         switch (priority) {
611         case QED_SPQ_PRIORITY_NORMAL:
612                 list_add_tail(&p_ent->list, &p_spq->pending);
613                 p_spq->normal_count++;
614                 break;
615         case QED_SPQ_PRIORITY_HIGH:
616                 list_add(&p_ent->list, &p_spq->pending);
617                 p_spq->high_count++;
618                 break;
619         default:
620                 return -EINVAL;
621         }
622
623         return 0;
624 }
625
626 /***************************************************************************
627 * Accessor
628 ***************************************************************************/
629 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
630 {
631         if (!p_hwfn->p_spq)
632                 return 0xffffffff;      /* illegal */
633         return p_hwfn->p_spq->cid;
634 }
635
636 /***************************************************************************
637 * Posting new Ramrods
638 ***************************************************************************/
639 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
640                              struct list_head *head, u32 keep_reserve)
641 {
642         struct qed_spq *p_spq = p_hwfn->p_spq;
643         int rc;
644
645         while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
646                !list_empty(head)) {
647                 struct qed_spq_entry *p_ent =
648                         list_first_entry(head, struct qed_spq_entry, list);
649                 list_del(&p_ent->list);
650                 list_add_tail(&p_ent->list, &p_spq->completion_pending);
651                 p_spq->comp_sent_count++;
652
653                 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
654                 if (rc) {
655                         list_del(&p_ent->list);
656                         __qed_spq_return_entry(p_hwfn, p_ent);
657                         return rc;
658                 }
659         }
660
661         return 0;
662 }
663
664 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
665 {
666         struct qed_spq *p_spq = p_hwfn->p_spq;
667         struct qed_spq_entry *p_ent = NULL;
668
669         while (!list_empty(&p_spq->free_pool)) {
670                 if (list_empty(&p_spq->unlimited_pending))
671                         break;
672
673                 p_ent = list_first_entry(&p_spq->unlimited_pending,
674                                          struct qed_spq_entry, list);
675                 if (!p_ent)
676                         return -EINVAL;
677
678                 list_del(&p_ent->list);
679
680                 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
681         }
682
683         return qed_spq_post_list(p_hwfn, &p_spq->pending,
684                                  SPQ_HIGH_PRI_RESERVE_DEFAULT);
685 }
686
687 int qed_spq_post(struct qed_hwfn *p_hwfn,
688                  struct qed_spq_entry *p_ent, u8 *fw_return_code)
689 {
690         int rc = 0;
691         struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
692         bool b_ret_ent = true;
693
694         if (!p_hwfn)
695                 return -EINVAL;
696
697         if (!p_ent) {
698                 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
699                 return -EINVAL;
700         }
701
702         /* Complete the entry */
703         rc = qed_spq_fill_entry(p_hwfn, p_ent);
704
705         spin_lock_bh(&p_spq->lock);
706
707         /* Check return value after LOCK is taken for cleaner error flow */
708         if (rc)
709                 goto spq_post_fail;
710
711         /* Add the request to the pending queue */
712         rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
713         if (rc)
714                 goto spq_post_fail;
715
716         rc = qed_spq_pend_post(p_hwfn);
717         if (rc) {
718                 /* Since it's possible that pending failed for a different
719                  * entry [although unlikely], the failed entry was already
720                  * dealt with; No need to return it here.
721                  */
722                 b_ret_ent = false;
723                 goto spq_post_fail;
724         }
725
726         spin_unlock_bh(&p_spq->lock);
727
728         if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
729                 /* For entries in QED BLOCK mode, the completion code cannot
730                  * perform the necessary cleanup - if it did, we couldn't
731                  * access p_ent here to see whether it's successful or not.
732                  * Thus, after gaining the answer perform the cleanup here.
733                  */
734                 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
735
736                 if (p_ent->queue == &p_spq->unlimited_pending) {
737                         /* This is an allocated p_ent which does not need to
738                          * return to pool.
739                          */
740                         kfree(p_ent);
741                         return rc;
742                 }
743
744                 if (rc)
745                         goto spq_post_fail2;
746
747                 /* return to pool */
748                 qed_spq_return_entry(p_hwfn, p_ent);
749         }
750         return rc;
751
752 spq_post_fail2:
753         spin_lock_bh(&p_spq->lock);
754         list_del(&p_ent->list);
755         qed_chain_return_produced(&p_spq->chain);
756
757 spq_post_fail:
758         /* return to the free pool */
759         if (b_ret_ent)
760                 __qed_spq_return_entry(p_hwfn, p_ent);
761         spin_unlock_bh(&p_spq->lock);
762
763         return rc;
764 }
765
766 int qed_spq_completion(struct qed_hwfn *p_hwfn,
767                        __le16 echo,
768                        u8 fw_return_code,
769                        union event_ring_data *p_data)
770 {
771         struct qed_spq          *p_spq;
772         struct qed_spq_entry    *p_ent = NULL;
773         struct qed_spq_entry    *tmp;
774         struct qed_spq_entry    *found = NULL;
775         int                     rc;
776
777         if (!p_hwfn)
778                 return -EINVAL;
779
780         p_spq = p_hwfn->p_spq;
781         if (!p_spq)
782                 return -EINVAL;
783
784         spin_lock_bh(&p_spq->lock);
785         list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
786                 if (p_ent->elem.hdr.echo == echo) {
787                         u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
788
789                         list_del(&p_ent->list);
790
791                         /* Avoid overriding of SPQ entries when getting
792                          * out-of-order completions, by marking the completions
793                          * in a bitmap and increasing the chain consumer only
794                          * for the first successive completed entries.
795                          */
796                         __set_bit(pos, p_spq->p_comp_bitmap);
797
798                         while (test_bit(p_spq->comp_bitmap_idx,
799                                         p_spq->p_comp_bitmap)) {
800                                 __clear_bit(p_spq->comp_bitmap_idx,
801                                             p_spq->p_comp_bitmap);
802                                 p_spq->comp_bitmap_idx++;
803                                 qed_chain_return_produced(&p_spq->chain);
804                         }
805
806                         p_spq->comp_count++;
807                         found = p_ent;
808                         break;
809                 }
810
811                 /* This is relatively uncommon - depends on scenarios
812                  * which have mutliple per-PF sent ramrods.
813                  */
814                 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
815                            "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
816                            le16_to_cpu(echo),
817                            le16_to_cpu(p_ent->elem.hdr.echo));
818         }
819
820         /* Release lock before callback, as callback may post
821          * an additional ramrod.
822          */
823         spin_unlock_bh(&p_spq->lock);
824
825         if (!found) {
826                 DP_NOTICE(p_hwfn,
827                           "Failed to find an entry this EQE completes\n");
828                 return -EEXIST;
829         }
830
831         DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Complete: func %p cookie %p)\n",
832                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
833         if (found->comp_cb.function)
834                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
835                                         fw_return_code);
836
837         if ((found->comp_mode != QED_SPQ_MODE_EBLOCK) ||
838             (found->queue == &p_spq->unlimited_pending))
839                 /* EBLOCK  is responsible for returning its own entry into the
840                  * free list, unless it originally added the entry into the
841                  * unlimited pending list.
842                  */
843                 qed_spq_return_entry(p_hwfn, found);
844
845         /* Attempt to post pending requests */
846         spin_lock_bh(&p_spq->lock);
847         rc = qed_spq_pend_post(p_hwfn);
848         spin_unlock_bh(&p_spq->lock);
849
850         return rc;
851 }
852
853 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
854 {
855         struct qed_consq *p_consq;
856
857         /* Allocate ConsQ struct */
858         p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
859         if (!p_consq) {
860                 DP_NOTICE(p_hwfn, "Failed to allocate `struct qed_consq'\n");
861                 return NULL;
862         }
863
864         /* Allocate and initialize EQ chain*/
865         if (qed_chain_alloc(p_hwfn->cdev,
866                             QED_CHAIN_USE_TO_PRODUCE,
867                             QED_CHAIN_MODE_PBL,
868                             QED_CHAIN_CNT_TYPE_U16,
869                             QED_CHAIN_PAGE_SIZE / 0x80,
870                             0x80, &p_consq->chain)) {
871                 DP_NOTICE(p_hwfn, "Failed to allocate consq chain");
872                 goto consq_allocate_fail;
873         }
874
875         return p_consq;
876
877 consq_allocate_fail:
878         qed_consq_free(p_hwfn, p_consq);
879         return NULL;
880 }
881
882 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
883 {
884         qed_chain_reset(&p_consq->chain);
885 }
886
887 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
888 {
889         if (!p_consq)
890                 return;
891         qed_chain_free(p_hwfn->cdev, &p_consq->chain);
892         kfree(p_consq);
893 }