2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
24 /* IOC local definitions */
26 /* Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. */
28 #define bfa_ioc_firmware_lock(__ioc) \
29 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
30 #define bfa_ioc_firmware_unlock(__ioc) \
31 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
32 #define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
33 #define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
34 #define bfa_ioc_notify_fail(__ioc) \
35 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
36 #define bfa_ioc_sync_start(__ioc) \
37 ((__ioc)->ioc_hwif->ioc_sync_start(__ioc))
38 #define bfa_ioc_sync_join(__ioc) \
39 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
40 #define bfa_ioc_sync_leave(__ioc) \
41 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
42 #define bfa_ioc_sync_ack(__ioc) \
43 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
44 #define bfa_ioc_sync_complete(__ioc) \
45 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
46 #define bfa_ioc_set_cur_ioc_fwstate(__ioc, __fwstate) \
47 ((__ioc)->ioc_hwif->ioc_set_fwstate(__ioc, __fwstate))
48 #define bfa_ioc_get_cur_ioc_fwstate(__ioc) \
49 ((__ioc)->ioc_hwif->ioc_get_fwstate(__ioc))
50 #define bfa_ioc_set_alt_ioc_fwstate(__ioc, __fwstate) \
51 ((__ioc)->ioc_hwif->ioc_set_alt_fwstate(__ioc, __fwstate))
53 static bool bfa_nw_auto_recover = true;
56 * forward declarations
58 static void bfa_ioc_hw_sem_init(struct bfa_ioc *ioc);
59 static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
60 static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
61 static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
62 static void bfa_ioc_poll_fwinit(struct bfa_ioc *ioc);
63 static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
64 static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
65 static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
66 static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
67 static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
68 static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
69 static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
70 static void bfa_ioc_mbox_flush(struct bfa_ioc *ioc);
71 static void bfa_ioc_recover(struct bfa_ioc *ioc);
72 static void bfa_ioc_event_notify(struct bfa_ioc *, enum bfa_ioc_event);
73 static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
74 static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
75 static void bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc);
76 static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
77 static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
78 static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
79 static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
80 static void bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc);
81 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
82 static enum bfa_status bfa_ioc_boot(struct bfa_ioc *ioc,
83 enum bfi_fwboot_type boot_type, u32 boot_param);
84 static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
85 static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
87 static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
89 static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
91 static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
93 static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
95 static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
96 static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
98 /* IOC state machine definitions/declarations */
100 IOC_E_RESET = 1, /*!< IOC reset request */
101 IOC_E_ENABLE = 2, /*!< IOC enable request */
102 IOC_E_DISABLE = 3, /*!< IOC disable request */
103 IOC_E_DETACH = 4, /*!< driver detach cleanup */
104 IOC_E_ENABLED = 5, /*!< f/w enabled */
105 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
106 IOC_E_DISABLED = 7, /*!< f/w disabled */
107 IOC_E_PFFAILED = 8, /*!< failure notice by iocpf sm */
108 IOC_E_HBFAIL = 9, /*!< heartbeat failure */
109 IOC_E_HWERROR = 10, /*!< hardware error interrupt */
110 IOC_E_TIMEOUT = 11, /*!< timeout */
111 IOC_E_HWFAILED = 12, /*!< PCI mapping failure notice */
114 bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
115 bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
116 bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
117 bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
118 bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
119 bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
120 bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
121 bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
122 bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
123 bfa_fsm_state_decl(bfa_ioc, hwfail, struct bfa_ioc, enum ioc_event);
125 static struct bfa_sm_table ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
135 {BFA_SM(bfa_ioc_sm_hwfail), BFA_IOC_HWFAIL},
139 * Forward declareations for iocpf state machine
141 static void bfa_iocpf_enable(struct bfa_ioc *ioc);
142 static void bfa_iocpf_disable(struct bfa_ioc *ioc);
143 static void bfa_iocpf_fail(struct bfa_ioc *ioc);
144 static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
145 static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
146 static void bfa_iocpf_stop(struct bfa_ioc *ioc);
148 /* IOCPF state machine events */
150 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
151 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
152 IOCPF_E_STOP = 3, /*!< stop on driver detach */
153 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
154 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
155 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
156 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
157 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
158 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
159 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
160 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
161 IOCPF_E_SEM_ERROR = 12, /*!< h/w sem mapping error */
165 enum bfa_iocpf_state {
166 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
167 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
168 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
169 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
170 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
171 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
172 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
173 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
174 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
177 bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
178 bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
179 bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
180 bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
181 bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
182 bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
183 bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
184 bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
186 bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
187 bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
188 bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
189 bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
190 bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
192 bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
194 static struct bfa_sm_table iocpf_sm_table[] = {
195 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
196 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
197 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
198 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
199 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
200 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
201 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
202 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
203 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
204 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
205 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
206 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
207 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
208 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
211 /* IOC State Machine */
213 /* Beginning state. IOC uninit state. */
215 bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
219 /* IOC is in uninit state. */
221 bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
225 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
233 /* Reset entry actions -- initialize state machine */
235 bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
237 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
240 /* IOC is in reset state. */
242 bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
246 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
250 bfa_ioc_disable_comp(ioc);
254 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
263 bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
265 bfa_iocpf_enable(ioc);
268 /* Host IOC function is being enabled, awaiting response from firmware.
269 * Semaphore is acquired.
272 bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
276 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
280 /* !!! fall through !!! */
282 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
283 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
284 if (event != IOC_E_PFFAILED)
285 bfa_iocpf_initfail(ioc);
289 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
290 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
294 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
298 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
310 /* Semaphore should be acquired for version check. */
312 bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
314 mod_timer(&ioc->ioc_timer, jiffies +
315 msecs_to_jiffies(BFA_IOC_TOV));
316 bfa_ioc_send_getattr(ioc);
319 /* IOC configuration in progress. Timer is active. */
321 bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
324 case IOC_E_FWRSP_GETATTR:
325 del_timer(&ioc->ioc_timer);
326 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
331 del_timer(&ioc->ioc_timer);
334 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
336 if (event != IOC_E_PFFAILED)
337 bfa_iocpf_getattrfail(ioc);
341 del_timer(&ioc->ioc_timer);
342 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
354 bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
356 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
357 bfa_ioc_event_notify(ioc, BFA_IOC_E_ENABLED);
358 bfa_ioc_hb_monitor(ioc);
362 bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
369 bfa_ioc_hb_stop(ioc);
370 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
375 bfa_ioc_hb_stop(ioc);
376 /* !!! fall through !!! */
378 if (ioc->iocpf.auto_recover)
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
381 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
383 bfa_ioc_fail_notify(ioc);
385 if (event != IOC_E_PFFAILED)
395 bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
397 bfa_iocpf_disable(ioc);
400 /* IOC is being disabled */
402 bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
406 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
411 * No state change. Will move to disabled state
412 * after iocpf sm completes failure processing and
413 * moves to disabled state.
419 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
420 bfa_ioc_disable_comp(ioc);
428 /* IOC disable completion entry. */
430 bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
432 bfa_ioc_disable_comp(ioc);
436 bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
440 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
444 ioc->cbfn->disable_cbfn(ioc->bfa);
448 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
458 bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
462 /* Hardware initialization retry. */
464 bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
468 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
474 * Initialization retry failed.
476 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
477 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
478 if (event != IOC_E_PFFAILED)
479 bfa_iocpf_initfail(ioc);
483 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
484 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwfail);
491 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
495 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
505 bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
511 bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
515 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
519 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
523 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
528 /* HB failure notification, ignore. */
537 bfa_ioc_sm_hwfail_entry(struct bfa_ioc *ioc)
543 bfa_ioc_sm_hwfail(struct bfa_ioc *ioc, enum ioc_event event)
548 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
552 ioc->cbfn->disable_cbfn(ioc->bfa);
556 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
564 /* IOCPF State Machine */
566 /* Reset entry actions -- initialize state machine */
568 bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
570 iocpf->fw_mismatch_notified = false;
571 iocpf->auto_recover = bfa_nw_auto_recover;
574 /* Beginning state. IOC is in reset state. */
576 bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
580 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
591 /* Semaphore should be acquired for version check. */
593 bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
595 bfa_ioc_hw_sem_init(iocpf->ioc);
596 bfa_ioc_hw_sem_get(iocpf->ioc);
599 /* Awaiting h/w semaphore to continue with version check. */
601 bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
603 struct bfa_ioc *ioc = iocpf->ioc;
606 case IOCPF_E_SEMLOCKED:
607 if (bfa_ioc_firmware_lock(ioc)) {
608 if (bfa_ioc_sync_start(ioc)) {
609 bfa_ioc_sync_join(ioc);
610 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
612 bfa_ioc_firmware_unlock(ioc);
613 bfa_nw_ioc_hw_sem_release(ioc);
614 mod_timer(&ioc->sem_timer, jiffies +
615 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
618 bfa_nw_ioc_hw_sem_release(ioc);
619 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
623 case IOCPF_E_SEM_ERROR:
624 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
625 bfa_ioc_pf_hwfailed(ioc);
628 case IOCPF_E_DISABLE:
629 bfa_ioc_hw_sem_get_cancel(ioc);
630 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
631 bfa_ioc_pf_disabled(ioc);
635 bfa_ioc_hw_sem_get_cancel(ioc);
636 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
644 /* Notify enable completion callback */
646 bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
648 /* Call only the first time sm enters fwmismatch state. */
649 if (!iocpf->fw_mismatch_notified)
650 bfa_ioc_pf_fwmismatch(iocpf->ioc);
652 iocpf->fw_mismatch_notified = true;
653 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
654 msecs_to_jiffies(BFA_IOC_TOV));
657 /* Awaiting firmware version match. */
659 bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
661 struct bfa_ioc *ioc = iocpf->ioc;
664 case IOCPF_E_TIMEOUT:
665 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
668 case IOCPF_E_DISABLE:
669 del_timer(&ioc->iocpf_timer);
670 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
671 bfa_ioc_pf_disabled(ioc);
675 del_timer(&ioc->iocpf_timer);
676 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
684 /* Request for semaphore. */
686 bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
688 bfa_ioc_hw_sem_get(iocpf->ioc);
691 /* Awaiting semaphore for h/w initialzation. */
693 bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
695 struct bfa_ioc *ioc = iocpf->ioc;
698 case IOCPF_E_SEMLOCKED:
699 if (bfa_ioc_sync_complete(ioc)) {
700 bfa_ioc_sync_join(ioc);
701 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
703 bfa_nw_ioc_hw_sem_release(ioc);
704 mod_timer(&ioc->sem_timer, jiffies +
705 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
709 case IOCPF_E_SEM_ERROR:
710 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
711 bfa_ioc_pf_hwfailed(ioc);
714 case IOCPF_E_DISABLE:
715 bfa_ioc_hw_sem_get_cancel(ioc);
716 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
725 bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
727 iocpf->poll_time = 0;
728 bfa_ioc_reset(iocpf->ioc, false);
731 /* Hardware is being initialized. Interrupts are enabled.
732 * Holding hardware semaphore lock.
735 bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
737 struct bfa_ioc *ioc = iocpf->ioc;
740 case IOCPF_E_FWREADY:
741 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
744 case IOCPF_E_TIMEOUT:
745 bfa_nw_ioc_hw_sem_release(ioc);
746 bfa_ioc_pf_failed(ioc);
747 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
750 case IOCPF_E_DISABLE:
751 del_timer(&ioc->iocpf_timer);
752 bfa_ioc_sync_leave(ioc);
753 bfa_nw_ioc_hw_sem_release(ioc);
754 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
763 bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
765 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
766 msecs_to_jiffies(BFA_IOC_TOV));
768 * Enable Interrupts before sending fw IOC ENABLE cmd.
770 iocpf->ioc->cbfn->reset_cbfn(iocpf->ioc->bfa);
771 bfa_ioc_send_enable(iocpf->ioc);
774 /* Host IOC function is being enabled, awaiting response from firmware.
775 * Semaphore is acquired.
778 bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
780 struct bfa_ioc *ioc = iocpf->ioc;
783 case IOCPF_E_FWRSP_ENABLE:
784 del_timer(&ioc->iocpf_timer);
785 bfa_nw_ioc_hw_sem_release(ioc);
786 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
789 case IOCPF_E_INITFAIL:
790 del_timer(&ioc->iocpf_timer);
792 * !!! fall through !!!
794 case IOCPF_E_TIMEOUT:
795 bfa_nw_ioc_hw_sem_release(ioc);
796 if (event == IOCPF_E_TIMEOUT)
797 bfa_ioc_pf_failed(ioc);
798 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
801 case IOCPF_E_DISABLE:
802 del_timer(&ioc->iocpf_timer);
803 bfa_nw_ioc_hw_sem_release(ioc);
804 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
813 bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
815 bfa_ioc_pf_enabled(iocpf->ioc);
819 bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
822 case IOCPF_E_DISABLE:
823 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
826 case IOCPF_E_GETATTRFAIL:
827 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
831 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
840 bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
842 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
843 msecs_to_jiffies(BFA_IOC_TOV));
844 bfa_ioc_send_disable(iocpf->ioc);
847 /* IOC is being disabled */
849 bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
851 struct bfa_ioc *ioc = iocpf->ioc;
854 case IOCPF_E_FWRSP_DISABLE:
855 del_timer(&ioc->iocpf_timer);
856 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
860 del_timer(&ioc->iocpf_timer);
862 * !!! fall through !!!
865 case IOCPF_E_TIMEOUT:
866 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
867 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
870 case IOCPF_E_FWRSP_ENABLE:
879 bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
881 bfa_ioc_hw_sem_get(iocpf->ioc);
884 /* IOC hb ack request is being removed. */
886 bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
888 struct bfa_ioc *ioc = iocpf->ioc;
891 case IOCPF_E_SEMLOCKED:
892 bfa_ioc_sync_leave(ioc);
893 bfa_nw_ioc_hw_sem_release(ioc);
894 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
897 case IOCPF_E_SEM_ERROR:
898 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
899 bfa_ioc_pf_hwfailed(ioc);
910 /* IOC disable completion entry. */
912 bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
914 bfa_ioc_mbox_flush(iocpf->ioc);
915 bfa_ioc_pf_disabled(iocpf->ioc);
919 bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
921 struct bfa_ioc *ioc = iocpf->ioc;
925 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
929 bfa_ioc_firmware_unlock(ioc);
930 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
939 bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
941 bfa_nw_ioc_debug_save_ftrc(iocpf->ioc);
942 bfa_ioc_hw_sem_get(iocpf->ioc);
945 /* Hardware initialization failed. */
947 bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
949 struct bfa_ioc *ioc = iocpf->ioc;
952 case IOCPF_E_SEMLOCKED:
953 bfa_ioc_notify_fail(ioc);
954 bfa_ioc_sync_leave(ioc);
955 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
956 bfa_nw_ioc_hw_sem_release(ioc);
957 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
960 case IOCPF_E_SEM_ERROR:
961 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
962 bfa_ioc_pf_hwfailed(ioc);
965 case IOCPF_E_DISABLE:
966 bfa_ioc_hw_sem_get_cancel(ioc);
967 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
971 bfa_ioc_hw_sem_get_cancel(ioc);
972 bfa_ioc_firmware_unlock(ioc);
973 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
985 bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
989 /* Hardware initialization failed. */
991 bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
993 struct bfa_ioc *ioc = iocpf->ioc;
996 case IOCPF_E_DISABLE:
997 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1001 bfa_ioc_firmware_unlock(ioc);
1002 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1006 bfa_sm_fault(event);
1011 bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1014 * Mark IOC as failed in hardware and stop firmware.
1016 bfa_ioc_lpu_stop(iocpf->ioc);
1019 * Flush any queued up mailbox requests.
1021 bfa_ioc_mbox_flush(iocpf->ioc);
1022 bfa_ioc_hw_sem_get(iocpf->ioc);
1025 /* IOC is in failed state. */
1027 bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1029 struct bfa_ioc *ioc = iocpf->ioc;
1032 case IOCPF_E_SEMLOCKED:
1033 bfa_ioc_sync_ack(ioc);
1034 bfa_ioc_notify_fail(ioc);
1035 if (!iocpf->auto_recover) {
1036 bfa_ioc_sync_leave(ioc);
1037 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_FAIL);
1038 bfa_nw_ioc_hw_sem_release(ioc);
1039 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1041 if (bfa_ioc_sync_complete(ioc))
1042 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1044 bfa_nw_ioc_hw_sem_release(ioc);
1045 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1050 case IOCPF_E_SEM_ERROR:
1051 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1052 bfa_ioc_pf_hwfailed(ioc);
1055 case IOCPF_E_DISABLE:
1056 bfa_ioc_hw_sem_get_cancel(ioc);
1057 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1064 bfa_sm_fault(event);
1069 bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1073 /* IOC is in failed state. */
1075 bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1078 case IOCPF_E_DISABLE:
1079 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1083 bfa_sm_fault(event);
1087 /* BFA IOC private functions */
1089 /* Notify common modules registered for notification. */
1091 bfa_ioc_event_notify(struct bfa_ioc *ioc, enum bfa_ioc_event event)
1093 struct bfa_ioc_notify *notify;
1094 struct list_head *qe;
1096 list_for_each(qe, &ioc->notify_q) {
1097 notify = (struct bfa_ioc_notify *)qe;
1098 notify->cbfn(notify->cbarg, event);
1103 bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1105 ioc->cbfn->disable_cbfn(ioc->bfa);
1106 bfa_ioc_event_notify(ioc, BFA_IOC_E_DISABLED);
1110 bfa_nw_ioc_sem_get(void __iomem *sem_reg)
1114 #define BFA_SEM_SPINCNT 3000
1116 r32 = readl(sem_reg);
1118 while ((r32 & 1) && (cnt < BFA_SEM_SPINCNT)) {
1121 r32 = readl(sem_reg);
1131 bfa_nw_ioc_sem_release(void __iomem *sem_reg)
1137 /* Clear fwver hdr */
1139 bfa_ioc_fwver_clear(struct bfa_ioc *ioc)
1141 u32 pgnum, pgoff, loff = 0;
1144 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, loff);
1145 pgoff = PSS_SMEM_PGOFF(loff);
1146 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1148 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32)); i++) {
1149 writel(0, ioc->ioc_regs.smem_page_start + loff);
1150 loff += sizeof(u32);
1156 bfa_ioc_hw_sem_init(struct bfa_ioc *ioc)
1158 struct bfi_ioc_image_hdr fwhdr;
1161 /* Spin on init semaphore to serialize. */
1162 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1165 r32 = readl(ioc->ioc_regs.ioc_init_sem_reg);
1168 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1169 if (fwstate == BFI_IOC_UNINIT) {
1170 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1174 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1176 if (swab32(fwhdr.exec) == BFI_FWBOOT_TYPE_NORMAL) {
1177 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1181 bfa_ioc_fwver_clear(ioc);
1182 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1183 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_UNINIT);
1186 * Try to lock and then unlock the semaphore.
1188 readl(ioc->ioc_regs.ioc_sem_reg);
1189 writel(1, ioc->ioc_regs.ioc_sem_reg);
1191 /* Unlock init semaphore */
1192 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
1196 bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1201 * First read to the semaphore register will return 0, subsequent reads
1202 * will return 1. Semaphore is released by writing 1 to the register
1204 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1206 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEM_ERROR);
1210 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
1214 mod_timer(&ioc->sem_timer, jiffies +
1215 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1219 bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
1221 writel(1, ioc->ioc_regs.ioc_sem_reg);
1225 bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1227 del_timer(&ioc->sem_timer);
1230 /* Initialize LPU local memory (aka secondary memory / SRAM) */
1232 bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1236 #define PSS_LMEM_INIT_TIME 10000
1238 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1239 pss_ctl &= ~__PSS_LMEM_RESET;
1240 pss_ctl |= __PSS_LMEM_INIT_EN;
1243 * i2c workaround 12.5khz clock
1245 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1246 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1249 * wait for memory initialization to be complete
1253 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1255 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1258 * If memory initialization is not successful, IOC timeout will catch
1261 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1263 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1264 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1268 bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1273 * Take processor out of reset.
1275 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1276 pss_ctl &= ~__PSS_LPU0_RESET;
1278 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1282 bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1287 * Put processors in reset.
1289 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1290 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1292 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1295 /* Get driver and firmware versions. */
1297 bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1302 u32 *fwsig = (u32 *) fwhdr;
1304 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
1305 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1307 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1310 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1311 loff += sizeof(u32);
1316 bfa_ioc_fwver_md5_check(struct bfi_ioc_image_hdr *fwhdr_1,
1317 struct bfi_ioc_image_hdr *fwhdr_2)
1321 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1322 if (fwhdr_1->md5sum[i] != fwhdr_2->md5sum[i])
1329 /* Returns TRUE if major minor and maintenance are same.
1330 * If patch version are same, check for MD5 Checksum to be same.
1333 bfa_ioc_fw_ver_compatible(struct bfi_ioc_image_hdr *drv_fwhdr,
1334 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1336 if (drv_fwhdr->signature != fwhdr_to_cmp->signature)
1338 if (drv_fwhdr->fwver.major != fwhdr_to_cmp->fwver.major)
1340 if (drv_fwhdr->fwver.minor != fwhdr_to_cmp->fwver.minor)
1342 if (drv_fwhdr->fwver.maint != fwhdr_to_cmp->fwver.maint)
1344 if (drv_fwhdr->fwver.patch == fwhdr_to_cmp->fwver.patch &&
1345 drv_fwhdr->fwver.phase == fwhdr_to_cmp->fwver.phase &&
1346 drv_fwhdr->fwver.build == fwhdr_to_cmp->fwver.build)
1347 return bfa_ioc_fwver_md5_check(drv_fwhdr, fwhdr_to_cmp);
1353 bfa_ioc_flash_fwver_valid(struct bfi_ioc_image_hdr *flash_fwhdr)
1355 if (flash_fwhdr->fwver.major == 0 || flash_fwhdr->fwver.major == 0xFF)
1362 fwhdr_is_ga(struct bfi_ioc_image_hdr *fwhdr)
1364 if (fwhdr->fwver.phase == 0 &&
1365 fwhdr->fwver.build == 0)
1371 /* Returns TRUE if both are compatible and patch of fwhdr_to_cmp is better. */
1372 static enum bfi_ioc_img_ver_cmp
1373 bfa_ioc_fw_ver_patch_cmp(struct bfi_ioc_image_hdr *base_fwhdr,
1374 struct bfi_ioc_image_hdr *fwhdr_to_cmp)
1376 if (!bfa_ioc_fw_ver_compatible(base_fwhdr, fwhdr_to_cmp))
1377 return BFI_IOC_IMG_VER_INCOMP;
1379 if (fwhdr_to_cmp->fwver.patch > base_fwhdr->fwver.patch)
1380 return BFI_IOC_IMG_VER_BETTER;
1381 else if (fwhdr_to_cmp->fwver.patch < base_fwhdr->fwver.patch)
1382 return BFI_IOC_IMG_VER_OLD;
1384 /* GA takes priority over internal builds of the same patch stream.
1385 * At this point major minor maint and patch numbers are same.
1387 if (fwhdr_is_ga(base_fwhdr))
1388 if (fwhdr_is_ga(fwhdr_to_cmp))
1389 return BFI_IOC_IMG_VER_SAME;
1391 return BFI_IOC_IMG_VER_OLD;
1393 if (fwhdr_is_ga(fwhdr_to_cmp))
1394 return BFI_IOC_IMG_VER_BETTER;
1396 if (fwhdr_to_cmp->fwver.phase > base_fwhdr->fwver.phase)
1397 return BFI_IOC_IMG_VER_BETTER;
1398 else if (fwhdr_to_cmp->fwver.phase < base_fwhdr->fwver.phase)
1399 return BFI_IOC_IMG_VER_OLD;
1401 if (fwhdr_to_cmp->fwver.build > base_fwhdr->fwver.build)
1402 return BFI_IOC_IMG_VER_BETTER;
1403 else if (fwhdr_to_cmp->fwver.build < base_fwhdr->fwver.build)
1404 return BFI_IOC_IMG_VER_OLD;
1406 /* All Version Numbers are equal.
1407 * Md5 check to be done as a part of compatibility check.
1409 return BFI_IOC_IMG_VER_SAME;
1412 /* register definitions */
1413 #define FLI_CMD_REG 0x0001d000
1414 #define FLI_WRDATA_REG 0x0001d00c
1415 #define FLI_RDDATA_REG 0x0001d010
1416 #define FLI_ADDR_REG 0x0001d004
1417 #define FLI_DEV_STATUS_REG 0x0001d014
1419 #define BFA_FLASH_FIFO_SIZE 128 /* fifo size */
1420 #define BFA_FLASH_CHECK_MAX 10000 /* max # of status check */
1421 #define BFA_FLASH_BLOCKING_OP_MAX 1000000 /* max # of blocking op check */
1422 #define BFA_FLASH_WIP_MASK 0x01 /* write in progress bit mask */
1424 #define NFC_STATE_RUNNING 0x20000001
1425 #define NFC_STATE_PAUSED 0x00004560
1426 #define NFC_VER_VALID 0x147
1428 enum bfa_flash_cmd {
1429 BFA_FLASH_FAST_READ = 0x0b, /* fast read */
1430 BFA_FLASH_WRITE_ENABLE = 0x06, /* write enable */
1431 BFA_FLASH_SECTOR_ERASE = 0xd8, /* sector erase */
1432 BFA_FLASH_WRITE = 0x02, /* write */
1433 BFA_FLASH_READ_STATUS = 0x05, /* read status */
1436 /* hardware error definition */
1437 enum bfa_flash_err {
1438 BFA_FLASH_NOT_PRESENT = -1, /*!< flash not present */
1439 BFA_FLASH_UNINIT = -2, /*!< flash not initialized */
1440 BFA_FLASH_BAD = -3, /*!< flash bad */
1441 BFA_FLASH_BUSY = -4, /*!< flash busy */
1442 BFA_FLASH_ERR_CMD_ACT = -5, /*!< command active never cleared */
1443 BFA_FLASH_ERR_FIFO_CNT = -6, /*!< fifo count never cleared */
1444 BFA_FLASH_ERR_WIP = -7, /*!< write-in-progress never cleared */
1445 BFA_FLASH_ERR_TIMEOUT = -8, /*!< fli timeout */
1446 BFA_FLASH_ERR_LEN = -9, /*!< invalid length */
1449 /* flash command register data structure */
1450 union bfa_flash_cmd_reg {
1471 /* flash device status register data structure */
1472 union bfa_flash_dev_status_reg {
1495 /* flash address register data structure */
1496 union bfa_flash_addr_reg {
1509 /* Flash raw private functions */
1511 bfa_flash_set_cmd(void __iomem *pci_bar, u8 wr_cnt,
1512 u8 rd_cnt, u8 ad_cnt, u8 op)
1514 union bfa_flash_cmd_reg cmd;
1518 cmd.r.write_cnt = wr_cnt;
1519 cmd.r.read_cnt = rd_cnt;
1520 cmd.r.addr_cnt = ad_cnt;
1522 writel(cmd.i, (pci_bar + FLI_CMD_REG));
1526 bfa_flash_set_addr(void __iomem *pci_bar, u32 address)
1528 union bfa_flash_addr_reg addr;
1530 addr.r.addr = address & 0x00ffffff;
1532 writel(addr.i, (pci_bar + FLI_ADDR_REG));
1536 bfa_flash_cmd_act_check(void __iomem *pci_bar)
1538 union bfa_flash_cmd_reg cmd;
1540 cmd.i = readl(pci_bar + FLI_CMD_REG);
1543 return BFA_FLASH_ERR_CMD_ACT;
1548 /* Flush FLI data fifo. */
1550 bfa_flash_fifo_flush(void __iomem *pci_bar)
1554 union bfa_flash_dev_status_reg dev_status;
1556 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1558 if (!dev_status.r.fifo_cnt)
1561 /* fifo counter in terms of words */
1562 for (i = 0; i < dev_status.r.fifo_cnt; i++)
1563 t = readl(pci_bar + FLI_RDDATA_REG);
1565 /* Check the device status. It may take some time. */
1566 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1567 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1568 if (!dev_status.r.fifo_cnt)
1572 if (dev_status.r.fifo_cnt)
1573 return BFA_FLASH_ERR_FIFO_CNT;
1578 /* Read flash status. */
1580 bfa_flash_status_read(void __iomem *pci_bar)
1582 union bfa_flash_dev_status_reg dev_status;
1587 status = bfa_flash_fifo_flush(pci_bar);
1591 bfa_flash_set_cmd(pci_bar, 0, 4, 0, BFA_FLASH_READ_STATUS);
1593 for (i = 0; i < BFA_FLASH_CHECK_MAX; i++) {
1594 status = bfa_flash_cmd_act_check(pci_bar);
1602 dev_status.i = readl(pci_bar + FLI_DEV_STATUS_REG);
1603 if (!dev_status.r.fifo_cnt)
1604 return BFA_FLASH_BUSY;
1606 ret_status = readl(pci_bar + FLI_RDDATA_REG);
1609 status = bfa_flash_fifo_flush(pci_bar);
1616 /* Start flash read operation. */
1618 bfa_flash_read_start(void __iomem *pci_bar, u32 offset, u32 len,
1623 /* len must be mutiple of 4 and not exceeding fifo size */
1624 if (len == 0 || len > BFA_FLASH_FIFO_SIZE || (len & 0x03) != 0)
1625 return BFA_FLASH_ERR_LEN;
1628 status = bfa_flash_status_read(pci_bar);
1629 if (status == BFA_FLASH_BUSY)
1630 status = bfa_flash_status_read(pci_bar);
1635 /* check if write-in-progress bit is cleared */
1636 if (status & BFA_FLASH_WIP_MASK)
1637 return BFA_FLASH_ERR_WIP;
1639 bfa_flash_set_addr(pci_bar, offset);
1641 bfa_flash_set_cmd(pci_bar, 0, (u8)len, 4, BFA_FLASH_FAST_READ);
1646 /* Check flash read operation. */
1648 bfa_flash_read_check(void __iomem *pci_bar)
1650 if (bfa_flash_cmd_act_check(pci_bar))
1656 /* End flash read operation. */
1658 bfa_flash_read_end(void __iomem *pci_bar, u32 len, char *buf)
1662 /* read data fifo up to 32 words */
1663 for (i = 0; i < len; i += 4) {
1664 u32 w = readl(pci_bar + FLI_RDDATA_REG);
1665 *((u32 *)(buf + i)) = swab32(w);
1668 bfa_flash_fifo_flush(pci_bar);
1671 /* Perform flash raw read. */
1673 #define FLASH_BLOCKING_OP_MAX 500
1674 #define FLASH_SEM_LOCK_REG 0x18820
1677 bfa_raw_sem_get(void __iomem *bar)
1681 locked = readl((bar + FLASH_SEM_LOCK_REG));
1686 static enum bfa_status
1687 bfa_flash_sem_get(void __iomem *bar)
1689 u32 n = FLASH_BLOCKING_OP_MAX;
1691 while (!bfa_raw_sem_get(bar)) {
1693 return BFA_STATUS_BADFLASH;
1696 return BFA_STATUS_OK;
1700 bfa_flash_sem_put(void __iomem *bar)
1702 writel(0, (bar + FLASH_SEM_LOCK_REG));
1705 static enum bfa_status
1706 bfa_flash_raw_read(void __iomem *pci_bar, u32 offset, char *buf,
1710 u32 off, l, s, residue, fifo_sz;
1714 fifo_sz = BFA_FLASH_FIFO_SIZE;
1715 status = bfa_flash_sem_get(pci_bar);
1716 if (status != BFA_STATUS_OK)
1722 l = (n + 1) * fifo_sz - s;
1726 status = bfa_flash_read_start(pci_bar, offset + off, l,
1729 bfa_flash_sem_put(pci_bar);
1730 return BFA_STATUS_FAILED;
1733 n = BFA_FLASH_BLOCKING_OP_MAX;
1734 while (bfa_flash_read_check(pci_bar)) {
1736 bfa_flash_sem_put(pci_bar);
1737 return BFA_STATUS_FAILED;
1741 bfa_flash_read_end(pci_bar, l, &buf[off]);
1746 bfa_flash_sem_put(pci_bar);
1748 return BFA_STATUS_OK;
1751 #define BFA_FLASH_PART_FWIMG_ADDR 0x100000 /* fw image address */
1753 static enum bfa_status
1754 bfa_nw_ioc_flash_img_get_chnk(struct bfa_ioc *ioc, u32 off,
1757 return bfa_flash_raw_read(ioc->pcidev.pci_bar_kva,
1758 BFA_FLASH_PART_FWIMG_ADDR + (off * sizeof(u32)),
1759 (char *)fwimg, BFI_FLASH_CHUNK_SZ);
1762 static enum bfi_ioc_img_ver_cmp
1763 bfa_ioc_flash_fwver_cmp(struct bfa_ioc *ioc,
1764 struct bfi_ioc_image_hdr *base_fwhdr)
1766 struct bfi_ioc_image_hdr *flash_fwhdr;
1767 enum bfa_status status;
1768 u32 fwimg[BFI_FLASH_CHUNK_SZ_WORDS];
1770 status = bfa_nw_ioc_flash_img_get_chnk(ioc, 0, fwimg);
1771 if (status != BFA_STATUS_OK)
1772 return BFI_IOC_IMG_VER_INCOMP;
1774 flash_fwhdr = (struct bfi_ioc_image_hdr *)fwimg;
1775 if (bfa_ioc_flash_fwver_valid(flash_fwhdr))
1776 return bfa_ioc_fw_ver_patch_cmp(base_fwhdr, flash_fwhdr);
1778 return BFI_IOC_IMG_VER_INCOMP;
1782 * Returns TRUE if driver is willing to work with current smem f/w version.
1785 bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
1787 struct bfi_ioc_image_hdr *drv_fwhdr;
1788 enum bfi_ioc_img_ver_cmp smem_flash_cmp, drv_smem_cmp;
1790 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1791 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
1793 /* If smem is incompatible or old, driver should not work with it. */
1794 drv_smem_cmp = bfa_ioc_fw_ver_patch_cmp(drv_fwhdr, fwhdr);
1795 if (drv_smem_cmp == BFI_IOC_IMG_VER_INCOMP ||
1796 drv_smem_cmp == BFI_IOC_IMG_VER_OLD) {
1800 /* IF Flash has a better F/W than smem do not work with smem.
1801 * If smem f/w == flash f/w, as smem f/w not old | incmp, work with it.
1802 * If Flash is old or incomp work with smem iff smem f/w == drv f/w.
1804 smem_flash_cmp = bfa_ioc_flash_fwver_cmp(ioc, fwhdr);
1806 if (smem_flash_cmp == BFI_IOC_IMG_VER_BETTER)
1808 else if (smem_flash_cmp == BFI_IOC_IMG_VER_SAME)
1811 return (drv_smem_cmp == BFI_IOC_IMG_VER_SAME) ?
1815 /* Return true if current running version is valid. Firmware signature and
1816 * execution context (driver/bios) must match.
1819 bfa_ioc_fwver_valid(struct bfa_ioc *ioc, u32 boot_env)
1821 struct bfi_ioc_image_hdr fwhdr;
1823 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
1824 if (swab32(fwhdr.bootenv) != boot_env)
1827 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
1830 /* Conditionally flush any pending message from firmware at start. */
1832 bfa_ioc_msgflush(struct bfa_ioc *ioc)
1836 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1838 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1842 bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1844 enum bfi_ioc_state ioc_fwstate;
1848 ioc_fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
1851 ioc_fwstate = BFI_IOC_UNINIT;
1853 boot_env = BFI_FWBOOT_ENV_OS;
1856 * check if firmware is valid
1858 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1859 false : bfa_ioc_fwver_valid(ioc, boot_env);
1862 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1864 bfa_ioc_poll_fwinit(ioc);
1870 * If hardware initialization is in progress (initialized by other IOC),
1871 * just wait for an initialization completion interrupt.
1873 if (ioc_fwstate == BFI_IOC_INITING) {
1874 bfa_ioc_poll_fwinit(ioc);
1879 * If IOC function is disabled and firmware version is same,
1880 * just re-enable IOC.
1882 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
1884 * When using MSI-X any pending firmware ready event should
1885 * be flushed. Otherwise MSI-X interrupts are not delivered.
1887 bfa_ioc_msgflush(ioc);
1888 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
1893 * Initialize the h/w for any other states.
1895 if (bfa_ioc_boot(ioc, BFI_FWBOOT_TYPE_NORMAL, boot_env) ==
1897 bfa_ioc_poll_fwinit(ioc);
1901 bfa_nw_ioc_timeout(void *ioc_arg)
1903 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1905 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1909 bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1911 u32 *msgp = (u32 *) ioc_msg;
1914 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1917 * first write msg to mailbox registers
1919 for (i = 0; i < len / sizeof(u32); i++)
1920 writel(cpu_to_le32(msgp[i]),
1921 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1923 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1924 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1927 * write 1 to mailbox CMD to trigger LPU event
1929 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1930 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1934 bfa_ioc_send_enable(struct bfa_ioc *ioc)
1936 struct bfi_ioc_ctrl_req enable_req;
1939 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1940 bfa_ioc_portid(ioc));
1941 enable_req.clscode = htons(ioc->clscode);
1942 do_gettimeofday(&tv);
1943 enable_req.tv_sec = ntohl(tv.tv_sec);
1944 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1948 bfa_ioc_send_disable(struct bfa_ioc *ioc)
1950 struct bfi_ioc_ctrl_req disable_req;
1952 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1953 bfa_ioc_portid(ioc));
1954 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1958 bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1960 struct bfi_ioc_getattr_req attr_req;
1962 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1963 bfa_ioc_portid(ioc));
1964 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1965 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1969 bfa_nw_ioc_hb_check(void *cbarg)
1971 struct bfa_ioc *ioc = cbarg;
1974 hb_count = readl(ioc->ioc_regs.heartbeat);
1975 if (ioc->hb_count == hb_count) {
1976 bfa_ioc_recover(ioc);
1979 ioc->hb_count = hb_count;
1982 bfa_ioc_mbox_poll(ioc);
1983 mod_timer(&ioc->hb_timer, jiffies +
1984 msecs_to_jiffies(BFA_IOC_HB_TOV));
1988 bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1990 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1991 mod_timer(&ioc->hb_timer, jiffies +
1992 msecs_to_jiffies(BFA_IOC_HB_TOV));
1996 bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1998 del_timer(&ioc->hb_timer);
2001 /* Initiate a full firmware download. */
2002 static enum bfa_status
2003 bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
2013 u32 fwimg_buf[BFI_FLASH_CHUNK_SZ_WORDS];
2014 enum bfa_status status;
2016 if (boot_env == BFI_FWBOOT_ENV_OS &&
2017 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2018 fwimg_size = BFI_FLASH_IMAGE_SZ/sizeof(u32);
2020 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2021 BFA_IOC_FLASH_CHUNK_ADDR(chunkno), fwimg_buf);
2022 if (status != BFA_STATUS_OK)
2027 fwimg_size = bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc));
2028 fwimg = bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc),
2029 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2032 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
2034 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2036 for (i = 0; i < fwimg_size; i++) {
2037 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
2038 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
2039 if (boot_env == BFI_FWBOOT_ENV_OS &&
2040 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2041 status = bfa_nw_ioc_flash_img_get_chnk(ioc,
2042 BFA_IOC_FLASH_CHUNK_ADDR(chunkno),
2044 if (status != BFA_STATUS_OK)
2049 fwimg = bfa_cb_image_get_chunk(
2050 bfa_ioc_asic_gen(ioc),
2051 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
2058 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
2059 ((ioc->ioc_regs.smem_page_start) + (loff)));
2061 loff += sizeof(u32);
2064 * handle page offset wrap around
2066 loff = PSS_SMEM_PGOFF(loff);
2070 ioc->ioc_regs.host_page_num_fn);
2074 writel(bfa_ioc_smem_pgnum(ioc, 0),
2075 ioc->ioc_regs.host_page_num_fn);
2078 * Set boot type, env and device mode at the end.
2080 if (boot_env == BFI_FWBOOT_ENV_OS &&
2081 boot_type == BFI_FWBOOT_TYPE_FLASH) {
2082 boot_type = BFI_FWBOOT_TYPE_NORMAL;
2084 asicmode = BFI_FWBOOT_DEVMODE(ioc->asic_gen, ioc->asic_mode,
2085 ioc->port0_mode, ioc->port1_mode);
2086 writel(asicmode, ((ioc->ioc_regs.smem_page_start)
2087 + BFI_FWBOOT_DEVMODE_OFF));
2088 writel(boot_type, ((ioc->ioc_regs.smem_page_start)
2089 + (BFI_FWBOOT_TYPE_OFF)));
2090 writel(boot_env, ((ioc->ioc_regs.smem_page_start)
2091 + (BFI_FWBOOT_ENV_OFF)));
2092 return BFA_STATUS_OK;
2096 bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
2098 bfa_ioc_hwinit(ioc, force);
2101 /* BFA ioc enable reply by firmware */
2103 bfa_ioc_enable_reply(struct bfa_ioc *ioc, enum bfa_mode port_mode,
2106 struct bfa_iocpf *iocpf = &ioc->iocpf;
2108 ioc->port_mode = ioc->port_mode_cfg = port_mode;
2109 ioc->ad_cap_bm = cap_bm;
2110 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
2113 /* Update BFA configuration from firmware configuration. */
2115 bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
2117 struct bfi_ioc_attr *attr = ioc->attr;
2119 attr->adapter_prop = ntohl(attr->adapter_prop);
2120 attr->card_type = ntohl(attr->card_type);
2121 attr->maxfrsize = ntohs(attr->maxfrsize);
2123 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
2126 /* Attach time initialization of mbox logic. */
2128 bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
2130 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2133 INIT_LIST_HEAD(&mod->cmd_q);
2134 for (mc = 0; mc < BFI_MC_MAX; mc++) {
2135 mod->mbhdlr[mc].cbfn = NULL;
2136 mod->mbhdlr[mc].cbarg = ioc->bfa;
2140 /* Mbox poll timer -- restarts any pending mailbox requests. */
2142 bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
2144 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2145 struct bfa_mbox_cmd *cmd;
2146 bfa_mbox_cmd_cbfn_t cbfn;
2151 * If no command pending, do nothing
2153 if (list_empty(&mod->cmd_q))
2157 * If previous command is not yet fetched by firmware, do nothing
2159 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2164 * Enqueue command to firmware.
2166 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2168 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2171 * Give a callback to the client, indicating that the command is sent
2181 /* Cleanup any pending requests. */
2183 bfa_ioc_mbox_flush(struct bfa_ioc *ioc)
2185 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2186 struct bfa_mbox_cmd *cmd;
2188 while (!list_empty(&mod->cmd_q)) {
2189 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe);
2195 * bfa_nw_ioc_smem_read - Read data from SMEM to host through PCI memmap
2197 * @ioc: memory for IOC
2198 * @tbuf: app memory to store data from smem
2199 * @soff: smem offset
2200 * @sz: size of smem in bytes
2203 bfa_nw_ioc_smem_read(struct bfa_ioc *ioc, void *tbuf, u32 soff, u32 sz)
2205 u32 pgnum, loff, r32;
2209 pgnum = PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, soff);
2210 loff = PSS_SMEM_PGOFF(soff);
2213 * Hold semaphore to serialize pll init and fwtrc.
2215 if (!bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
2218 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2220 len = sz/sizeof(u32);
2221 for (i = 0; i < len; i++) {
2222 r32 = swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
2223 buf[i] = be32_to_cpu(r32);
2224 loff += sizeof(u32);
2227 * handle page offset wrap around
2229 loff = PSS_SMEM_PGOFF(loff);
2232 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
2236 writel(PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, 0),
2237 ioc->ioc_regs.host_page_num_fn);
2242 readl(ioc->ioc_regs.ioc_init_sem_reg);
2243 writel(1, ioc->ioc_regs.ioc_init_sem_reg);
2247 /* Retrieve saved firmware trace from a prior IOC failure. */
2249 bfa_nw_ioc_debug_fwtrc(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2251 u32 loff = BFI_IOC_TRC_OFF + BNA_DBG_FWTRC_LEN * ioc->port_id;
2252 int tlen, status = 0;
2255 if (tlen > BNA_DBG_FWTRC_LEN)
2256 tlen = BNA_DBG_FWTRC_LEN;
2258 status = bfa_nw_ioc_smem_read(ioc, trcdata, loff, tlen);
2263 /* Save firmware trace if configured. */
2265 bfa_nw_ioc_debug_save_ftrc(struct bfa_ioc *ioc)
2269 if (ioc->dbg_fwsave_once) {
2270 ioc->dbg_fwsave_once = false;
2271 if (ioc->dbg_fwsave_len) {
2272 tlen = ioc->dbg_fwsave_len;
2273 bfa_nw_ioc_debug_fwtrc(ioc, ioc->dbg_fwsave, &tlen);
2278 /* Retrieve saved firmware trace from a prior IOC failure. */
2280 bfa_nw_ioc_debug_fwsave(struct bfa_ioc *ioc, void *trcdata, int *trclen)
2284 if (ioc->dbg_fwsave_len == 0)
2285 return BFA_STATUS_ENOFSAVE;
2288 if (tlen > ioc->dbg_fwsave_len)
2289 tlen = ioc->dbg_fwsave_len;
2291 memcpy(trcdata, ioc->dbg_fwsave, tlen);
2293 return BFA_STATUS_OK;
2297 bfa_ioc_fail_notify(struct bfa_ioc *ioc)
2300 * Notify driver and common modules registered for notification.
2302 ioc->cbfn->hbfail_cbfn(ioc->bfa);
2303 bfa_ioc_event_notify(ioc, BFA_IOC_E_FAILED);
2304 bfa_nw_ioc_debug_save_ftrc(ioc);
2307 /* IOCPF to IOC interface */
2309 bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
2311 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
2315 bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
2317 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
2321 bfa_ioc_pf_failed(struct bfa_ioc *ioc)
2323 bfa_fsm_send_event(ioc, IOC_E_PFFAILED);
2327 bfa_ioc_pf_hwfailed(struct bfa_ioc *ioc)
2329 bfa_fsm_send_event(ioc, IOC_E_HWFAILED);
2333 bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
2336 * Provide enable completion callback and AEN notification.
2338 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
2342 static enum bfa_status
2343 bfa_ioc_pll_init(struct bfa_ioc *ioc)
2346 * Hold semaphore so that nobody can access the chip during init.
2348 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
2350 bfa_ioc_pll_init_asic(ioc);
2352 ioc->pllinit = true;
2354 /* Initialize LMEM */
2355 bfa_ioc_lmem_init(ioc);
2358 * release semaphore.
2360 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
2362 return BFA_STATUS_OK;
2365 /* Interface used by diag module to do firmware boot with memory test
2366 * as the entry vector.
2368 static enum bfa_status
2369 bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
2372 struct bfi_ioc_image_hdr *drv_fwhdr;
2373 enum bfa_status status;
2374 bfa_ioc_stats(ioc, ioc_boots);
2376 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
2377 return BFA_STATUS_FAILED;
2378 if (boot_env == BFI_FWBOOT_ENV_OS &&
2379 boot_type == BFI_FWBOOT_TYPE_NORMAL) {
2380 drv_fwhdr = (struct bfi_ioc_image_hdr *)
2381 bfa_cb_image_get_chunk(bfa_ioc_asic_gen(ioc), 0);
2382 /* Work with Flash iff flash f/w is better than driver f/w.
2383 * Otherwise push drivers firmware.
2385 if (bfa_ioc_flash_fwver_cmp(ioc, drv_fwhdr) ==
2386 BFI_IOC_IMG_VER_BETTER)
2387 boot_type = BFI_FWBOOT_TYPE_FLASH;
2391 * Initialize IOC state of all functions on a chip reset.
2393 if (boot_type == BFI_FWBOOT_TYPE_MEMTEST) {
2394 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2395 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_MEMTEST);
2397 bfa_ioc_set_cur_ioc_fwstate(ioc, BFI_IOC_INITING);
2398 bfa_ioc_set_alt_ioc_fwstate(ioc, BFI_IOC_INITING);
2401 bfa_ioc_msgflush(ioc);
2402 status = bfa_ioc_download_fw(ioc, boot_type, boot_env);
2403 if (status == BFA_STATUS_OK)
2404 bfa_ioc_lpu_start(ioc);
2406 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2411 /* Enable/disable IOC failure auto recovery. */
2413 bfa_nw_ioc_auto_recover(bool auto_recover)
2415 bfa_nw_auto_recover = auto_recover;
2419 bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
2425 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
2432 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
2434 r32 = readl(ioc->ioc_regs.lpu_mbox +
2436 msgp[i] = htonl(r32);
2440 * turn off mailbox interrupt by clearing mailbox status
2442 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
2443 readl(ioc->ioc_regs.lpu_mbox_cmd);
2449 bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
2451 union bfi_ioc_i2h_msg_u *msg;
2452 struct bfa_iocpf *iocpf = &ioc->iocpf;
2454 msg = (union bfi_ioc_i2h_msg_u *) m;
2456 bfa_ioc_stats(ioc, ioc_isrs);
2458 switch (msg->mh.msg_id) {
2459 case BFI_IOC_I2H_HBEAT:
2462 case BFI_IOC_I2H_ENABLE_REPLY:
2463 bfa_ioc_enable_reply(ioc,
2464 (enum bfa_mode)msg->fw_event.port_mode,
2465 msg->fw_event.cap_bm);
2468 case BFI_IOC_I2H_DISABLE_REPLY:
2469 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
2472 case BFI_IOC_I2H_GETATTR_REPLY:
2473 bfa_ioc_getattr_reply(ioc);
2482 * bfa_nw_ioc_attach - IOC attach time initialization and setup.
2484 * @ioc: memory for IOC
2485 * @bfa: driver instance structure
2488 bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
2492 ioc->fcmode = false;
2493 ioc->pllinit = false;
2494 ioc->dbg_fwsave_once = true;
2495 ioc->iocpf.ioc = ioc;
2497 bfa_ioc_mbox_attach(ioc);
2498 INIT_LIST_HEAD(&ioc->notify_q);
2500 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
2501 bfa_fsm_send_event(ioc, IOC_E_RESET);
2504 /* Driver detach time IOC cleanup. */
2506 bfa_nw_ioc_detach(struct bfa_ioc *ioc)
2508 bfa_fsm_send_event(ioc, IOC_E_DETACH);
2510 /* Done with detach, empty the notify_q. */
2511 INIT_LIST_HEAD(&ioc->notify_q);
2515 * bfa_nw_ioc_pci_init - Setup IOC PCI properties.
2517 * @pcidev: PCI device information for this IOC
2520 bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
2521 enum bfi_pcifn_class clscode)
2523 ioc->clscode = clscode;
2524 ioc->pcidev = *pcidev;
2527 * Initialize IOC and device personality
2529 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_FC;
2530 ioc->asic_mode = BFI_ASIC_MODE_FC;
2532 switch (pcidev->device_id) {
2533 case PCI_DEVICE_ID_BROCADE_CT:
2534 ioc->asic_gen = BFI_ASIC_GEN_CT;
2535 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2536 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2537 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_CNA;
2538 ioc->ad_cap_bm = BFA_CM_CNA;
2541 case BFA_PCI_DEVICE_ID_CT2:
2542 ioc->asic_gen = BFI_ASIC_GEN_CT2;
2543 if (clscode == BFI_PCIFN_CLASS_FC &&
2544 pcidev->ssid == BFA_PCI_CT2_SSID_FC) {
2545 ioc->asic_mode = BFI_ASIC_MODE_FC16;
2547 ioc->port_mode = ioc->port_mode_cfg = BFA_MODE_HBA;
2548 ioc->ad_cap_bm = BFA_CM_HBA;
2550 ioc->port0_mode = ioc->port1_mode = BFI_PORT_MODE_ETH;
2551 ioc->asic_mode = BFI_ASIC_MODE_ETH;
2552 if (pcidev->ssid == BFA_PCI_CT2_SSID_FCoE) {
2554 ioc->port_mode_cfg = BFA_MODE_CNA;
2555 ioc->ad_cap_bm = BFA_CM_CNA;
2558 ioc->port_mode_cfg = BFA_MODE_NIC;
2559 ioc->ad_cap_bm = BFA_CM_NIC;
2569 * Set asic specific interfaces.
2571 if (ioc->asic_gen == BFI_ASIC_GEN_CT)
2572 bfa_nw_ioc_set_ct_hwif(ioc);
2574 WARN_ON(ioc->asic_gen != BFI_ASIC_GEN_CT2);
2575 bfa_nw_ioc_set_ct2_hwif(ioc);
2576 bfa_nw_ioc_ct2_poweron(ioc);
2579 bfa_ioc_map_port(ioc);
2580 bfa_ioc_reg_init(ioc);
2584 * bfa_nw_ioc_mem_claim - Initialize IOC dma memory
2586 * @dm_kva: kernel virtual address of IOC dma memory
2587 * @dm_pa: physical address of IOC dma memory
2590 bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
2593 * dma memory for firmware attribute
2595 ioc->attr_dma.kva = dm_kva;
2596 ioc->attr_dma.pa = dm_pa;
2597 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
2600 /* Return size of dma memory required. */
2602 bfa_nw_ioc_meminfo(void)
2604 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
2608 bfa_nw_ioc_enable(struct bfa_ioc *ioc)
2610 bfa_ioc_stats(ioc, ioc_enables);
2611 ioc->dbg_fwsave_once = true;
2613 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
2617 bfa_nw_ioc_disable(struct bfa_ioc *ioc)
2619 bfa_ioc_stats(ioc, ioc_disables);
2620 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
2623 /* Initialize memory for saving firmware trace. */
2625 bfa_nw_ioc_debug_memclaim(struct bfa_ioc *ioc, void *dbg_fwsave)
2627 ioc->dbg_fwsave = dbg_fwsave;
2628 ioc->dbg_fwsave_len = ioc->iocpf.auto_recover ? BNA_DBG_FWTRC_LEN : 0;
2632 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
2634 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
2637 /* Register mailbox message handler function, to be called by common modules */
2639 bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
2640 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
2642 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2644 mod->mbhdlr[mc].cbfn = cbfn;
2645 mod->mbhdlr[mc].cbarg = cbarg;
2649 * bfa_nw_ioc_mbox_queue - Queue a mailbox command request to firmware.
2651 * @ioc: IOC instance
2652 * @cmd: Mailbox command
2654 * Waits if mailbox is busy. Responsibility of caller to serialize
2657 bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd,
2658 bfa_mbox_cmd_cbfn_t cbfn, void *cbarg)
2660 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2667 * If a previous command is pending, queue new command
2669 if (!list_empty(&mod->cmd_q)) {
2670 list_add_tail(&cmd->qe, &mod->cmd_q);
2675 * If mailbox is busy, queue command for poll timer
2677 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
2679 list_add_tail(&cmd->qe, &mod->cmd_q);
2684 * mailbox is free -- queue command to firmware
2686 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
2691 /* Handle mailbox interrupts */
2693 bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
2695 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
2699 if (bfa_ioc_msgget(ioc, &m)) {
2701 * Treat IOC message class as special.
2703 mc = m.mh.msg_class;
2704 if (mc == BFI_MC_IOC) {
2705 bfa_ioc_isr(ioc, &m);
2709 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
2712 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2715 bfa_ioc_lpu_read_stat(ioc);
2718 * Try to send pending mailbox commands
2720 bfa_ioc_mbox_poll(ioc);
2724 bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
2726 bfa_ioc_stats(ioc, ioc_hbfails);
2727 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2728 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2731 /* return true if IOC is disabled */
2733 bfa_nw_ioc_is_disabled(struct bfa_ioc *ioc)
2735 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabling) ||
2736 bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
2739 /* return true if IOC is operational */
2741 bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
2743 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
2746 /* Add to IOC heartbeat failure notification queue. To be used by common
2747 * modules such as cee, port, diag.
2750 bfa_nw_ioc_notify_register(struct bfa_ioc *ioc,
2751 struct bfa_ioc_notify *notify)
2753 list_add_tail(¬ify->qe, &ioc->notify_q);
2756 #define BFA_MFG_NAME "QLogic"
2758 bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2759 struct bfa_adapter_attr *ad_attr)
2761 struct bfi_ioc_attr *ioc_attr;
2763 ioc_attr = ioc->attr;
2765 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2766 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2767 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2768 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2769 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2770 sizeof(struct bfa_mfg_vpd));
2772 ad_attr->nports = bfa_ioc_get_nports(ioc);
2773 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2775 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2776 /* For now, model descr uses same model string */
2777 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2779 ad_attr->card_type = ioc_attr->card_type;
2780 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2782 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2783 ad_attr->prototype = 1;
2785 ad_attr->prototype = 0;
2787 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
2788 bfa_nw_ioc_get_mac(ioc, ad_attr->mac);
2790 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2791 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2792 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2793 ad_attr->asic_rev = ioc_attr->asic_rev;
2795 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2798 static enum bfa_ioc_type
2799 bfa_ioc_get_type(struct bfa_ioc *ioc)
2801 if (ioc->clscode == BFI_PCIFN_CLASS_ETH)
2802 return BFA_IOC_TYPE_LL;
2804 BUG_ON(!(ioc->clscode == BFI_PCIFN_CLASS_FC));
2806 return (ioc->attr->port_mode == BFI_PORT_MODE_FC)
2807 ? BFA_IOC_TYPE_FC : BFA_IOC_TYPE_FCoE;
2811 bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2814 (void *)ioc->attr->brcd_serialnum,
2815 BFA_ADAPTER_SERIAL_NUM_LEN);
2819 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2821 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2825 bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2827 BUG_ON(!(chip_rev));
2829 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2835 chip_rev[4] = ioc->attr->asic_rev;
2840 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2842 memcpy(optrom_ver, ioc->attr->optrom_version,
2847 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2849 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2853 bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2855 struct bfi_ioc_attr *ioc_attr;
2858 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2860 ioc_attr = ioc->attr;
2862 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2863 BFA_MFG_NAME, ioc_attr->card_type);
2866 static enum bfa_ioc_state
2867 bfa_ioc_get_state(struct bfa_ioc *ioc)
2869 enum bfa_iocpf_state iocpf_st;
2870 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2872 if (ioc_st == BFA_IOC_ENABLING ||
2873 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2875 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2878 case BFA_IOCPF_SEMWAIT:
2879 ioc_st = BFA_IOC_SEMWAIT;
2882 case BFA_IOCPF_HWINIT:
2883 ioc_st = BFA_IOC_HWINIT;
2886 case BFA_IOCPF_FWMISMATCH:
2887 ioc_st = BFA_IOC_FWMISMATCH;
2890 case BFA_IOCPF_FAIL:
2891 ioc_st = BFA_IOC_FAIL;
2894 case BFA_IOCPF_INITFAIL:
2895 ioc_st = BFA_IOC_INITFAIL;
2906 bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
2908 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2910 ioc_attr->state = bfa_ioc_get_state(ioc);
2911 ioc_attr->port_id = bfa_ioc_portid(ioc);
2912 ioc_attr->port_mode = ioc->port_mode;
2914 ioc_attr->port_mode_cfg = ioc->port_mode_cfg;
2915 ioc_attr->cap_bm = ioc->ad_cap_bm;
2917 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2919 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2921 ioc_attr->pci_attr.device_id = bfa_ioc_devid(ioc);
2922 ioc_attr->pci_attr.pcifn = bfa_ioc_pcifn(ioc);
2923 ioc_attr->def_fn = bfa_ioc_is_default(ioc);
2924 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2929 bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2931 return ioc->attr->pwwn;
2935 bfa_nw_ioc_get_mac(struct bfa_ioc *ioc, u8 *mac)
2937 ether_addr_copy(mac, ioc->attr->mac);
2940 /* Firmware failure detected. Start recovery actions. */
2942 bfa_ioc_recover(struct bfa_ioc *ioc)
2944 pr_crit("Heart Beat of IOC has failed\n");
2945 bfa_ioc_stats(ioc, ioc_hbfails);
2946 bfa_ioc_stats_hb_count(ioc, ioc->hb_count);
2947 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
2950 /* BFA IOC PF private functions */
2953 bfa_iocpf_enable(struct bfa_ioc *ioc)
2955 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2959 bfa_iocpf_disable(struct bfa_ioc *ioc)
2961 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2965 bfa_iocpf_fail(struct bfa_ioc *ioc)
2967 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2971 bfa_iocpf_initfail(struct bfa_ioc *ioc)
2973 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2977 bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2979 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2983 bfa_iocpf_stop(struct bfa_ioc *ioc)
2985 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2989 bfa_nw_iocpf_timeout(void *ioc_arg)
2991 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2992 enum bfa_iocpf_state iocpf_st;
2994 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2996 if (iocpf_st == BFA_IOCPF_HWINIT)
2997 bfa_ioc_poll_fwinit(ioc);
2999 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3003 bfa_nw_iocpf_sem_timeout(void *ioc_arg)
3005 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
3007 bfa_ioc_hw_sem_get(ioc);
3011 bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
3013 u32 fwstate = bfa_ioc_get_cur_ioc_fwstate(ioc);
3015 if (fwstate == BFI_IOC_DISABLED) {
3016 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
3020 if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
3021 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
3023 ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
3024 mod_timer(&ioc->iocpf_timer, jiffies +
3025 msecs_to_jiffies(BFA_IOC_POLL_TOV));
3030 * Flash module specific
3034 * FLASH DMA buffer should be big enough to hold both MFG block and
3035 * asic block(64k) at the same time and also should be 2k aligned to
3036 * avoid write segement to cross sector boundary.
3038 #define BFA_FLASH_SEG_SZ 2048
3039 #define BFA_FLASH_DMA_BUF_SZ \
3040 roundup(0x010000 + sizeof(struct bfa_mfg_block), BFA_FLASH_SEG_SZ)
3043 bfa_flash_cb(struct bfa_flash *flash)
3047 flash->cbfn(flash->cbarg, flash->status);
3051 bfa_flash_notify(void *cbarg, enum bfa_ioc_event event)
3053 struct bfa_flash *flash = cbarg;
3056 case BFA_IOC_E_DISABLED:
3057 case BFA_IOC_E_FAILED:
3058 if (flash->op_busy) {
3059 flash->status = BFA_STATUS_IOC_FAILURE;
3060 flash->cbfn(flash->cbarg, flash->status);
3070 * Send flash write request.
3073 bfa_flash_write_send(struct bfa_flash *flash)
3075 struct bfi_flash_write_req *msg =
3076 (struct bfi_flash_write_req *) flash->mb.msg;
3079 msg->type = be32_to_cpu(flash->type);
3080 msg->instance = flash->instance;
3081 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3082 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3083 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3084 msg->length = be32_to_cpu(len);
3086 /* indicate if it's the last msg of the whole write operation */
3087 msg->last = (len == flash->residue) ? 1 : 0;
3089 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_WRITE_REQ,
3090 bfa_ioc_portid(flash->ioc));
3091 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3092 memcpy(flash->dbuf_kva, flash->ubuf + flash->offset, len);
3093 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3095 flash->residue -= len;
3096 flash->offset += len;
3100 * bfa_flash_read_send - Send flash read request.
3102 * @cbarg: callback argument
3105 bfa_flash_read_send(void *cbarg)
3107 struct bfa_flash *flash = cbarg;
3108 struct bfi_flash_read_req *msg =
3109 (struct bfi_flash_read_req *) flash->mb.msg;
3112 msg->type = be32_to_cpu(flash->type);
3113 msg->instance = flash->instance;
3114 msg->offset = be32_to_cpu(flash->addr_off + flash->offset);
3115 len = (flash->residue < BFA_FLASH_DMA_BUF_SZ) ?
3116 flash->residue : BFA_FLASH_DMA_BUF_SZ;
3117 msg->length = be32_to_cpu(len);
3118 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_READ_REQ,
3119 bfa_ioc_portid(flash->ioc));
3120 bfa_alen_set(&msg->alen, len, flash->dbuf_pa);
3121 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3125 * bfa_flash_intr - Process flash response messages upon receiving interrupts.
3127 * @flasharg: flash structure
3128 * @msg: message structure
3131 bfa_flash_intr(void *flasharg, struct bfi_mbmsg *msg)
3133 struct bfa_flash *flash = flasharg;
3137 struct bfi_flash_query_rsp *query;
3138 struct bfi_flash_write_rsp *write;
3139 struct bfi_flash_read_rsp *read;
3140 struct bfi_mbmsg *msg;
3145 /* receiving response after ioc failure */
3146 if (!flash->op_busy && msg->mh.msg_id != BFI_FLASH_I2H_EVENT)
3149 switch (msg->mh.msg_id) {
3150 case BFI_FLASH_I2H_QUERY_RSP:
3151 status = be32_to_cpu(m.query->status);
3152 if (status == BFA_STATUS_OK) {
3154 struct bfa_flash_attr *attr, *f;
3156 attr = (struct bfa_flash_attr *) flash->ubuf;
3157 f = (struct bfa_flash_attr *) flash->dbuf_kva;
3158 attr->status = be32_to_cpu(f->status);
3159 attr->npart = be32_to_cpu(f->npart);
3160 for (i = 0; i < attr->npart; i++) {
3161 attr->part[i].part_type =
3162 be32_to_cpu(f->part[i].part_type);
3163 attr->part[i].part_instance =
3164 be32_to_cpu(f->part[i].part_instance);
3165 attr->part[i].part_off =
3166 be32_to_cpu(f->part[i].part_off);
3167 attr->part[i].part_size =
3168 be32_to_cpu(f->part[i].part_size);
3169 attr->part[i].part_len =
3170 be32_to_cpu(f->part[i].part_len);
3171 attr->part[i].part_status =
3172 be32_to_cpu(f->part[i].part_status);
3175 flash->status = status;
3176 bfa_flash_cb(flash);
3178 case BFI_FLASH_I2H_WRITE_RSP:
3179 status = be32_to_cpu(m.write->status);
3180 if (status != BFA_STATUS_OK || flash->residue == 0) {
3181 flash->status = status;
3182 bfa_flash_cb(flash);
3184 bfa_flash_write_send(flash);
3186 case BFI_FLASH_I2H_READ_RSP:
3187 status = be32_to_cpu(m.read->status);
3188 if (status != BFA_STATUS_OK) {
3189 flash->status = status;
3190 bfa_flash_cb(flash);
3192 u32 len = be32_to_cpu(m.read->length);
3193 memcpy(flash->ubuf + flash->offset,
3194 flash->dbuf_kva, len);
3195 flash->residue -= len;
3196 flash->offset += len;
3197 if (flash->residue == 0) {
3198 flash->status = status;
3199 bfa_flash_cb(flash);
3201 bfa_flash_read_send(flash);
3204 case BFI_FLASH_I2H_BOOT_VER_RSP:
3205 case BFI_FLASH_I2H_EVENT:
3213 * Flash memory info API.
3216 bfa_nw_flash_meminfo(void)
3218 return roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3222 * bfa_nw_flash_attach - Flash attach API.
3224 * @flash: flash structure
3225 * @ioc: ioc structure
3226 * @dev: device structure
3229 bfa_nw_flash_attach(struct bfa_flash *flash, struct bfa_ioc *ioc, void *dev)
3233 flash->cbarg = NULL;
3236 bfa_nw_ioc_mbox_regisr(flash->ioc, BFI_MC_FLASH, bfa_flash_intr, flash);
3237 bfa_ioc_notify_init(&flash->ioc_notify, bfa_flash_notify, flash);
3238 list_add_tail(&flash->ioc_notify.qe, &flash->ioc->notify_q);
3242 * bfa_nw_flash_memclaim - Claim memory for flash
3244 * @flash: flash structure
3245 * @dm_kva: pointer to virtual memory address
3246 * @dm_pa: physical memory address
3249 bfa_nw_flash_memclaim(struct bfa_flash *flash, u8 *dm_kva, u64 dm_pa)
3251 flash->dbuf_kva = dm_kva;
3252 flash->dbuf_pa = dm_pa;
3253 memset(flash->dbuf_kva, 0, BFA_FLASH_DMA_BUF_SZ);
3254 dm_kva += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3255 dm_pa += roundup(BFA_FLASH_DMA_BUF_SZ, BFA_DMA_ALIGN_SZ);
3259 * bfa_nw_flash_get_attr - Get flash attribute.
3261 * @flash: flash structure
3262 * @attr: flash attribute structure
3263 * @cbfn: callback function
3264 * @cbarg: callback argument
3269 bfa_nw_flash_get_attr(struct bfa_flash *flash, struct bfa_flash_attr *attr,
3270 bfa_cb_flash cbfn, void *cbarg)
3272 struct bfi_flash_query_req *msg =
3273 (struct bfi_flash_query_req *) flash->mb.msg;
3275 if (!bfa_nw_ioc_is_operational(flash->ioc))
3276 return BFA_STATUS_IOC_NON_OP;
3279 return BFA_STATUS_DEVBUSY;
3283 flash->cbarg = cbarg;
3284 flash->ubuf = (u8 *) attr;
3286 bfi_h2i_set(msg->mh, BFI_MC_FLASH, BFI_FLASH_H2I_QUERY_REQ,
3287 bfa_ioc_portid(flash->ioc));
3288 bfa_alen_set(&msg->alen, sizeof(struct bfa_flash_attr), flash->dbuf_pa);
3289 bfa_nw_ioc_mbox_queue(flash->ioc, &flash->mb, NULL, NULL);
3291 return BFA_STATUS_OK;
3295 * bfa_nw_flash_update_part - Update flash partition.
3297 * @flash: flash structure
3298 * @type: flash partition type
3299 * @instance: flash partition instance
3300 * @buf: update data buffer
3301 * @len: data buffer length
3302 * @offset: offset relative to the partition starting address
3303 * @cbfn: callback function
3304 * @cbarg: callback argument
3309 bfa_nw_flash_update_part(struct bfa_flash *flash, u32 type, u8 instance,
3310 void *buf, u32 len, u32 offset,
3311 bfa_cb_flash cbfn, void *cbarg)
3313 if (!bfa_nw_ioc_is_operational(flash->ioc))
3314 return BFA_STATUS_IOC_NON_OP;
3317 * 'len' must be in word (4-byte) boundary
3319 if (!len || (len & 0x03))
3320 return BFA_STATUS_FLASH_BAD_LEN;
3322 if (type == BFA_FLASH_PART_MFG)
3323 return BFA_STATUS_EINVAL;
3326 return BFA_STATUS_DEVBUSY;
3330 flash->cbarg = cbarg;
3332 flash->instance = instance;
3333 flash->residue = len;
3335 flash->addr_off = offset;
3338 bfa_flash_write_send(flash);
3340 return BFA_STATUS_OK;
3344 * bfa_nw_flash_read_part - Read flash partition.
3346 * @flash: flash structure
3347 * @type: flash partition type
3348 * @instance: flash partition instance
3349 * @buf: read data buffer
3350 * @len: data buffer length
3351 * @offset: offset relative to the partition starting address
3352 * @cbfn: callback function
3353 * @cbarg: callback argument
3358 bfa_nw_flash_read_part(struct bfa_flash *flash, u32 type, u8 instance,
3359 void *buf, u32 len, u32 offset,
3360 bfa_cb_flash cbfn, void *cbarg)
3362 if (!bfa_nw_ioc_is_operational(flash->ioc))
3363 return BFA_STATUS_IOC_NON_OP;
3366 * 'len' must be in word (4-byte) boundary
3368 if (!len || (len & 0x03))
3369 return BFA_STATUS_FLASH_BAD_LEN;
3372 return BFA_STATUS_DEVBUSY;
3376 flash->cbarg = cbarg;
3378 flash->instance = instance;
3379 flash->residue = len;
3381 flash->addr_off = offset;
3384 bfa_flash_read_send(flash);
3386 return BFA_STATUS_OK;