2 * Linux network driver for QLogic BR-series Converged Network Adapter.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
14 * Copyright (c) 2005-2014 Brocade Communications Systems, Inc.
15 * Copyright (c) 2014-2015 QLogic Corporation
26 #define bfa_ioc_ct_sync_pos(__ioc) \
27 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
28 #define BFA_IOC_SYNC_REQD_SH 16
29 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
30 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
31 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
32 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
33 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
36 * forward declarations
38 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
44 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
45 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
46 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
47 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
50 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
51 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
52 static void bfa_ioc_ct_set_cur_ioc_fwstate(
53 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
54 static enum bfi_ioc_state bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc);
55 static void bfa_ioc_ct_set_alt_ioc_fwstate(
56 struct bfa_ioc *ioc, enum bfi_ioc_state fwstate);
57 static enum bfi_ioc_state bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc);
58 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
59 enum bfi_asic_mode asic_mode);
60 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
61 enum bfi_asic_mode asic_mode);
62 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
64 static const struct bfa_ioc_hwif nw_hwif_ct = {
65 .ioc_pll_init = bfa_ioc_ct_pll_init,
66 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
67 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
68 .ioc_reg_init = bfa_ioc_ct_reg_init,
69 .ioc_map_port = bfa_ioc_ct_map_port,
70 .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
71 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
72 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
73 .ioc_sync_start = bfa_ioc_ct_sync_start,
74 .ioc_sync_join = bfa_ioc_ct_sync_join,
75 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
76 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
77 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
78 .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
79 .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
80 .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
81 .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
84 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
85 .ioc_pll_init = bfa_ioc_ct2_pll_init,
86 .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
87 .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
88 .ioc_reg_init = bfa_ioc_ct2_reg_init,
89 .ioc_map_port = bfa_ioc_ct2_map_port,
90 .ioc_lpu_read_stat = bfa_ioc_ct2_lpu_read_stat,
91 .ioc_isr_mode_set = NULL,
92 .ioc_notify_fail = bfa_ioc_ct_notify_fail,
93 .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
94 .ioc_sync_start = bfa_ioc_ct_sync_start,
95 .ioc_sync_join = bfa_ioc_ct_sync_join,
96 .ioc_sync_leave = bfa_ioc_ct_sync_leave,
97 .ioc_sync_ack = bfa_ioc_ct_sync_ack,
98 .ioc_sync_complete = bfa_ioc_ct_sync_complete,
99 .ioc_set_fwstate = bfa_ioc_ct_set_cur_ioc_fwstate,
100 .ioc_get_fwstate = bfa_ioc_ct_get_cur_ioc_fwstate,
101 .ioc_set_alt_fwstate = bfa_ioc_ct_set_alt_ioc_fwstate,
102 .ioc_get_alt_fwstate = bfa_ioc_ct_get_alt_ioc_fwstate,
105 /* Called from bfa_ioc_attach() to map asic specific calls. */
107 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
109 ioc->ioc_hwif = &nw_hwif_ct;
113 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
115 ioc->ioc_hwif = &nw_hwif_ct2;
118 /* Return true if firmware of current driver matches the running firmware. */
120 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
122 enum bfi_ioc_state ioc_fwstate;
124 struct bfi_ioc_image_hdr fwhdr;
127 * If bios boot (flash based) -- do not increment usage count
129 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
133 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
134 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
137 * If usage count is 0, always return TRUE.
140 writel(1, ioc->ioc_regs.ioc_usage_reg);
141 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
142 writel(0, ioc->ioc_regs.ioc_fail_sync);
146 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
149 * Use count cannot be non-zero and chip in uninitialized state.
151 BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
154 * Check if another driver with a different firmware is active
156 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
157 if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
158 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
163 * Same firmware version. Increment the reference count.
166 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
167 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
172 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
177 * If bios boot (flash based) -- do not decrement usage count
179 if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
184 * decrement usage count
186 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
187 usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
188 BUG_ON(!(usecnt > 0));
191 writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
193 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
196 /* Notify other functions on HB failure. */
198 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
200 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
201 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
202 /* Wait for halt to take effect */
203 readl(ioc->ioc_regs.ll_halt);
204 readl(ioc->ioc_regs.alt_ll_halt);
207 /* Host to LPU mailbox message addresses */
208 static const struct {
213 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
214 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
215 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
216 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
219 /* Host <-> LPU mailbox command/status registers - port 0 */
220 static const struct {
224 { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
225 { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
226 { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
227 { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
230 /* Host <-> LPU mailbox command/status registers - port 1 */
231 static const struct {
235 { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
236 { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
237 { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
238 { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
241 static const struct {
249 { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
250 CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
251 CT2_HOSTFN_LPU0_READ_STAT},
252 { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
253 CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
254 CT2_HOSTFN_LPU1_READ_STAT},
258 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
261 int pcifn = bfa_ioc_pcifn(ioc);
263 rb = bfa_ioc_bar0(ioc);
265 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
266 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
267 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
269 if (ioc->port_id == 0) {
270 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
271 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
272 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
273 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
274 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
275 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
276 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
278 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
279 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
280 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
281 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
282 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
283 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
284 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
288 * PSS control registers
290 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
291 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
292 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
293 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
296 * IOC semaphore registers and serialization
298 ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
299 ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
300 ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
301 ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
302 ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
307 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
308 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
311 * err set reg : for notification of hb failure in fcmode
313 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
317 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
320 int port = bfa_ioc_portid(ioc);
322 rb = bfa_ioc_bar0(ioc);
324 ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
325 ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
326 ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
327 ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
328 ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
329 ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
332 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
333 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
334 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
335 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
336 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
338 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
339 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
340 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
341 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
342 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
346 * PSS control registers
348 ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
349 ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
350 ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
351 ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
354 * IOC semaphore registers and serialization
356 ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
357 ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
358 ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
359 ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
360 ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
365 ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
366 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
369 * err set reg : for notification of hb failure in fcmode
371 ioc->ioc_regs.err_set = rb + ERR_SET_REG;
374 /* Initialize IOC to port mapping. */
376 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
378 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
380 void __iomem *rb = ioc->pcidev.pci_bar_kva;
384 * For catapult, base port id on personality register and IOC type
386 r32 = readl(rb + FNC_PERS_REG);
387 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
388 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
393 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
395 void __iomem *rb = ioc->pcidev.pci_bar_kva;
398 r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
399 ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
402 /* Set interrupt mode for a function: INTX or MSIX */
404 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
406 void __iomem *rb = ioc->pcidev.pci_bar_kva;
409 r32 = readl(rb + FNC_PERS_REG);
411 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
415 * If already in desired mode, do not change anything
417 if ((!msix && mode) || (msix && !mode))
421 mode = __F0_INTX_STATUS_MSIX;
423 mode = __F0_INTX_STATUS_INTA;
425 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
426 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
428 writel(r32, rb + FNC_PERS_REG);
432 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
436 r32 = readl(ioc->ioc_regs.lpu_read_stat);
438 writel(1, ioc->ioc_regs.lpu_read_stat);
445 /* MSI-X resource allocation for 1860 with no asic block */
446 #define HOSTFN_MSIX_DEFAULT 64
447 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR 0x30138
448 #define HOSTFN_MSIX_VT_OFST_NUMVT 0x3013c
449 #define __MSIX_VT_NUMVT__MK 0x003ff800
450 #define __MSIX_VT_NUMVT__SH 11
451 #define __MSIX_VT_NUMVT_(_v) ((_v) << __MSIX_VT_NUMVT__SH)
452 #define __MSIX_VT_OFST_ 0x000007ff
454 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
456 void __iomem *rb = ioc->pcidev.pci_bar_kva;
459 r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
460 if (r32 & __MSIX_VT_NUMVT__MK) {
461 writel(r32 & __MSIX_VT_OFST_,
462 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
466 writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
467 HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
468 rb + HOSTFN_MSIX_VT_OFST_NUMVT);
469 writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
470 rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
473 /* Cleanup hw semaphore and usecnt registers */
475 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
477 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
478 writel(0, ioc->ioc_regs.ioc_usage_reg);
479 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
482 * Read the hw sem reg to make sure that it is locked
483 * before we clear it. If it is not locked, writing 1
484 * will lock it instead of clearing it.
486 readl(ioc->ioc_regs.ioc_sem_reg);
487 bfa_nw_ioc_hw_sem_release(ioc);
490 /* Synchronized IOC failure processing routines */
492 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
494 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
495 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
498 * Driver load time. If the sync required bit for this PCI fn
499 * is set, it is due to an unclean exit by the driver for this
500 * PCI fn in the previous incarnation. Whoever comes here first
501 * should clean it up, no matter which PCI fn.
504 if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
505 writel(0, ioc->ioc_regs.ioc_fail_sync);
506 writel(1, ioc->ioc_regs.ioc_usage_reg);
507 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
508 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
512 return bfa_ioc_ct_sync_complete(ioc);
514 /* Synchronized IOC failure processing routines */
516 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
518 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
519 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
521 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
525 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
527 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
528 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
529 bfa_ioc_ct_sync_pos(ioc);
531 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
535 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
537 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
539 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
543 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
545 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
546 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
547 u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
554 * The check below is to see whether any other PCI fn
555 * has reinitialized the ASIC (reset sync_ackd bits)
556 * and failed again while this IOC was waiting for hw
557 * semaphore (in bfa_iocpf_sm_semwait()).
559 tmp_ackd = sync_ackd;
560 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
561 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
562 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
564 if (sync_reqd == sync_ackd) {
565 writel(bfa_ioc_ct_clear_sync_ackd(r32),
566 ioc->ioc_regs.ioc_fail_sync);
567 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
568 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
573 * If another PCI fn reinitialized and failed again while
574 * this IOC was waiting for hw sem, the sync_ackd bit for
575 * this IOC need to be set again to allow reinitialization.
577 if (tmp_ackd != sync_ackd)
578 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
584 bfa_ioc_ct_set_cur_ioc_fwstate(struct bfa_ioc *ioc,
585 enum bfi_ioc_state fwstate)
587 writel(fwstate, ioc->ioc_regs.ioc_fwstate);
590 static enum bfi_ioc_state
591 bfa_ioc_ct_get_cur_ioc_fwstate(struct bfa_ioc *ioc)
593 return (enum bfi_ioc_state)readl(ioc->ioc_regs.ioc_fwstate);
597 bfa_ioc_ct_set_alt_ioc_fwstate(struct bfa_ioc *ioc,
598 enum bfi_ioc_state fwstate)
600 writel(fwstate, ioc->ioc_regs.alt_ioc_fwstate);
603 static enum bfi_ioc_state
604 bfa_ioc_ct_get_alt_ioc_fwstate(struct bfa_ioc *ioc)
606 return (enum bfi_ioc_state)readl(ioc->ioc_regs.alt_ioc_fwstate);
609 static enum bfa_status
610 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
612 u32 pll_sclk, pll_fclk, r32;
613 bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
615 pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
616 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
617 __APP_PLL_SCLK_JITLMT0_1(3U) |
618 __APP_PLL_SCLK_CNTLMT0_1(1U);
619 pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
620 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
621 __APP_PLL_LCLK_JITLMT0_1(3U) |
622 __APP_PLL_LCLK_CNTLMT0_1(1U);
625 writel(0, (rb + OP_MODE));
626 writel(__APP_EMS_CMLCKSEL |
627 __APP_EMS_REFCKBUFEN2 |
628 __APP_EMS_CHANNEL_SEL,
629 (rb + ETH_MAC_SER_REG));
631 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
632 writel(__APP_EMS_REFCKBUFEN1,
633 (rb + ETH_MAC_SER_REG));
635 writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
636 writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
637 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
638 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
639 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
640 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
641 writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
642 writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
644 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
645 rb + APP_PLL_SCLK_CTL_REG);
647 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
648 rb + APP_PLL_LCLK_CTL_REG);
650 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
651 rb + APP_PLL_SCLK_CTL_REG);
653 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
654 rb + APP_PLL_LCLK_CTL_REG);
655 readl(rb + HOSTFN0_INT_MSK);
657 writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
658 writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
660 __APP_PLL_SCLK_ENABLE,
661 rb + APP_PLL_SCLK_CTL_REG);
663 __APP_PLL_LCLK_ENABLE,
664 rb + APP_PLL_LCLK_CTL_REG);
667 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
668 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
670 r32 = readl((rb + PSS_CTL_REG));
671 r32 &= ~__PSS_LMEM_RESET;
672 writel(r32, (rb + PSS_CTL_REG));
675 writel(0, (rb + PMM_1T_RESET_REG_P0));
676 writel(0, (rb + PMM_1T_RESET_REG_P1));
679 writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
681 r32 = readl((rb + MBIST_STAT_REG));
682 writel(0, (rb + MBIST_CTL_REG));
683 return BFA_STATUS_OK;
687 bfa_ioc_ct2_sclk_init(void __iomem *rb)
692 * put s_clk PLL and PLL FSM in reset
694 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
695 r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
696 r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
697 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
698 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
701 * Ignore mode and program for the max clock (which is FC16)
702 * Firmware/NFC will do the PLL init appropiately
704 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
705 r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
706 writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
709 * while doing PLL init dont clock gate ethernet subsystem
711 r32 = readl((rb + CT2_CHIP_MISC_PRG));
712 writel((r32 | __ETH_CLK_ENABLE_PORT0),
713 (rb + CT2_CHIP_MISC_PRG));
715 r32 = readl((rb + CT2_PCIE_MISC_REG));
716 writel((r32 | __ETH_CLK_ENABLE_PORT1),
717 (rb + CT2_PCIE_MISC_REG));
722 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
723 r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
724 __APP_PLL_SCLK_CLK_DIV2);
725 writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
728 * poll for s_clk lock or delay 1ms
733 * Dont do clock gating for ethernet subsystem, firmware/NFC will
734 * do this appropriately
739 bfa_ioc_ct2_lclk_init(void __iomem *rb)
744 * put l_clk PLL and PLL FSM in reset
746 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
747 r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
748 r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
749 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
750 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
753 * set LPU speed (set for FC16 which will work for other modes)
755 r32 = readl((rb + CT2_CHIP_MISC_PRG));
756 writel(r32, (rb + CT2_CHIP_MISC_PRG));
759 * set LPU half speed (set for FC16 which will work for other modes)
761 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
762 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
765 * set lclk for mode (set for FC16)
767 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
768 r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
770 writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
773 * poll for s_clk lock or delay 1ms
779 bfa_ioc_ct2_mem_init(void __iomem *rb)
783 r32 = readl((rb + PSS_CTL_REG));
784 r32 &= ~__PSS_LMEM_RESET;
785 writel(r32, (rb + PSS_CTL_REG));
788 writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
790 writel(0, (rb + CT2_MBIST_CTL_REG));
794 bfa_ioc_ct2_mac_reset(void __iomem *rb)
798 bfa_ioc_ct2_sclk_init(rb);
799 bfa_ioc_ct2_lclk_init(rb);
802 * release soft reset on s_clk & l_clk
804 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
805 writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
806 (rb + CT2_APP_PLL_SCLK_CTL_REG));
809 * release soft reset on s_clk & l_clk
811 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
812 writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
813 (rb + CT2_APP_PLL_LCLK_CTL_REG));
815 /* put port0, port1 MAC & AHB in reset */
816 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
817 (rb + CT2_CSI_MAC_CONTROL_REG(0)));
818 writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
819 (rb + CT2_CSI_MAC_CONTROL_REG(1)));
822 #define CT2_NFC_MAX_DELAY 1000
823 #define CT2_NFC_VER_VALID 0x143
824 #define BFA_IOC_PLL_POLL 1000000
827 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
831 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
832 if (r32 & __NFC_CONTROLLER_HALTED)
839 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
844 writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
845 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
846 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
847 if (!(r32 & __NFC_CONTROLLER_HALTED))
854 static enum bfa_status
855 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
857 volatile u32 wgn, r32;
860 wgn = readl(rb + CT2_WGN_STATUS);
862 nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
864 if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
865 (nfc_ver >= CT2_NFC_VER_VALID)) {
866 if (bfa_ioc_ct2_nfc_halted(rb))
867 bfa_ioc_ct2_nfc_resume(rb);
868 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
869 rb + CT2_CSI_FW_CTL_SET_REG);
871 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
872 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
873 if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
876 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
878 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
879 r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
880 if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
883 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
886 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
887 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
889 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
890 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
891 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
892 if (r32 & __NFC_CONTROLLER_HALTED)
897 bfa_ioc_ct2_mac_reset(rb);
898 bfa_ioc_ct2_sclk_init(rb);
899 bfa_ioc_ct2_lclk_init(rb);
901 /* release soft reset on s_clk & l_clk */
902 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
903 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
904 rb + CT2_APP_PLL_SCLK_CTL_REG);
905 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
906 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
907 rb + CT2_APP_PLL_LCLK_CTL_REG);
910 /* Announce flash device presence, if flash was corrupted. */
911 if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
912 r32 = readl((rb + PSS_GPIO_OUT_REG));
913 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
914 r32 = readl((rb + PSS_GPIO_OE_REG));
915 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
919 * Mask the interrupts and clear any
920 * pending interrupts left by BIOS/EFI
922 writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
923 writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
925 /* For first time initialization, no need to clear interrupts */
926 r32 = readl(rb + HOST_SEM5_REG);
928 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
930 writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
931 readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
933 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
935 writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
936 readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
940 bfa_ioc_ct2_mem_init(rb);
942 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
943 writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
944 return BFA_STATUS_OK;