]> nv-tegra.nvidia Code Review - linux-3.10.git/blob - drivers/net/ethernet/brocade/bna/bfa_ioc_ct.c
Merge tag 'dt' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
[linux-3.10.git] / drivers / net / ethernet / brocade / bna / bfa_ioc_ct.c
1 /*
2  * Linux network driver for Brocade Converged Network Adapter.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License (GPL) Version 2 as
6  * published by the Free Software Foundation
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 /*
14  * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15  * All rights reserved
16  * www.brocade.com
17  */
18
19 #include "bfa_ioc.h"
20 #include "cna.h"
21 #include "bfi.h"
22 #include "bfi_reg.h"
23 #include "bfa_defs.h"
24
25 #define bfa_ioc_ct_sync_pos(__ioc)      \
26                 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27 #define BFA_IOC_SYNC_REQD_SH            16
28 #define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29 #define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30 #define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31 #define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32                 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
34 /*
35  * forward declarations
36  */
37 static bool bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc);
38 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
39 static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
40 static void bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc);
41 static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
42 static void bfa_ioc_ct2_map_port(struct bfa_ioc *ioc);
43 static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
44 static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
45 static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
46 static bool bfa_ioc_ct_sync_start(struct bfa_ioc *ioc);
47 static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
48 static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
49 static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
50 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
51 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
52                                 enum bfi_asic_mode asic_mode);
53 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
54                                 enum bfi_asic_mode asic_mode);
55 static bool bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc);
56
57 static const struct bfa_ioc_hwif nw_hwif_ct = {
58         .ioc_pll_init        = bfa_ioc_ct_pll_init,
59         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
60         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
61         .ioc_reg_init        = bfa_ioc_ct_reg_init,
62         .ioc_map_port        = bfa_ioc_ct_map_port,
63         .ioc_isr_mode_set    = bfa_ioc_ct_isr_mode_set,
64         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
65         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
66         .ioc_sync_start      = bfa_ioc_ct_sync_start,
67         .ioc_sync_join       = bfa_ioc_ct_sync_join,
68         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
69         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
70         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
71 };
72
73 static const struct bfa_ioc_hwif nw_hwif_ct2 = {
74         .ioc_pll_init        = bfa_ioc_ct2_pll_init,
75         .ioc_firmware_lock   = bfa_ioc_ct_firmware_lock,
76         .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
77         .ioc_reg_init        = bfa_ioc_ct2_reg_init,
78         .ioc_map_port        = bfa_ioc_ct2_map_port,
79         .ioc_lpu_read_stat   = bfa_ioc_ct2_lpu_read_stat,
80         .ioc_isr_mode_set    = NULL,
81         .ioc_notify_fail     = bfa_ioc_ct_notify_fail,
82         .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
83         .ioc_sync_start      = bfa_ioc_ct_sync_start,
84         .ioc_sync_join       = bfa_ioc_ct_sync_join,
85         .ioc_sync_leave      = bfa_ioc_ct_sync_leave,
86         .ioc_sync_ack        = bfa_ioc_ct_sync_ack,
87         .ioc_sync_complete   = bfa_ioc_ct_sync_complete,
88 };
89
90 /**
91  * Called from bfa_ioc_attach() to map asic specific calls.
92  */
93 void
94 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
95 {
96         ioc->ioc_hwif = &nw_hwif_ct;
97 }
98
99 void
100 bfa_nw_ioc_set_ct2_hwif(struct bfa_ioc *ioc)
101 {
102         ioc->ioc_hwif = &nw_hwif_ct2;
103 }
104
105 /**
106  * Return true if firmware of current driver matches the running firmware.
107  */
108 static bool
109 bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
110 {
111         enum bfi_ioc_state ioc_fwstate;
112         u32 usecnt;
113         struct bfi_ioc_image_hdr fwhdr;
114
115         /**
116          * If bios boot (flash based) -- do not increment usage count
117          */
118         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
119                                                 BFA_IOC_FWIMG_MINSZ)
120                 return true;
121
122         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
123         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
124
125         /**
126          * If usage count is 0, always return TRUE.
127          */
128         if (usecnt == 0) {
129                 writel(1, ioc->ioc_regs.ioc_usage_reg);
130                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
131                 writel(0, ioc->ioc_regs.ioc_fail_sync);
132                 return true;
133         }
134
135         ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
136
137         /**
138          * Use count cannot be non-zero and chip in uninitialized state.
139          */
140         BUG_ON(!(ioc_fwstate != BFI_IOC_UNINIT));
141
142         /**
143          * Check if another driver with a different firmware is active
144          */
145         bfa_nw_ioc_fwver_get(ioc, &fwhdr);
146         if (!bfa_nw_ioc_fwver_cmp(ioc, &fwhdr)) {
147                 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
148                 return false;
149         }
150
151         /**
152          * Same firmware version. Increment the reference count.
153          */
154         usecnt++;
155         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
156         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
157         return true;
158 }
159
160 static void
161 bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
162 {
163         u32 usecnt;
164
165         /**
166          * If bios boot (flash based) -- do not decrement usage count
167          */
168         if (bfa_cb_image_get_size(bfa_ioc_asic_gen(ioc)) <
169                                                 BFA_IOC_FWIMG_MINSZ)
170                 return;
171
172         /**
173          * decrement usage count
174          */
175         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
176         usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
177         BUG_ON(!(usecnt > 0));
178
179         usecnt--;
180         writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
181
182         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
183 }
184
185 /**
186  * Notify other functions on HB failure.
187  */
188 static void
189 bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
190 {
191         writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
192         writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
193         /* Wait for halt to take effect */
194         readl(ioc->ioc_regs.ll_halt);
195         readl(ioc->ioc_regs.alt_ll_halt);
196 }
197
198 /**
199  * Host to LPU mailbox message addresses
200  */
201 static const struct {
202         u32     hfn_mbox;
203         u32     lpu_mbox;
204         u32     hfn_pgn;
205 } ct_fnreg[] = {
206         { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
207         { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
208         { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
209         { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
210 };
211
212 /**
213  * Host <-> LPU mailbox command/status registers - port 0
214  */
215 static const struct {
216         u32     hfn;
217         u32     lpu;
218 } ct_p0reg[] = {
219         { HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
220         { HOSTFN1_LPU0_CMD_STAT, LPU0_HOSTFN1_CMD_STAT },
221         { HOSTFN2_LPU0_CMD_STAT, LPU0_HOSTFN2_CMD_STAT },
222         { HOSTFN3_LPU0_CMD_STAT, LPU0_HOSTFN3_CMD_STAT }
223 };
224
225 /**
226  * Host <-> LPU mailbox command/status registers - port 1
227  */
228 static const struct {
229         u32     hfn;
230         u32     lpu;
231 } ct_p1reg[] = {
232         { HOSTFN0_LPU1_CMD_STAT, LPU1_HOSTFN0_CMD_STAT },
233         { HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT },
234         { HOSTFN2_LPU1_CMD_STAT, LPU1_HOSTFN2_CMD_STAT },
235         { HOSTFN3_LPU1_CMD_STAT, LPU1_HOSTFN3_CMD_STAT }
236 };
237
238 static const struct {
239         u32     hfn_mbox;
240         u32     lpu_mbox;
241         u32     hfn_pgn;
242         u32     hfn;
243         u32     lpu;
244         u32     lpu_read;
245 } ct2_reg[] = {
246         { CT2_HOSTFN_LPU0_MBOX0, CT2_LPU0_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
247           CT2_HOSTFN_LPU0_CMD_STAT, CT2_LPU0_HOSTFN_CMD_STAT,
248           CT2_HOSTFN_LPU0_READ_STAT},
249         { CT2_HOSTFN_LPU1_MBOX0, CT2_LPU1_HOSTFN_MBOX0, CT2_HOSTFN_PAGE_NUM,
250           CT2_HOSTFN_LPU1_CMD_STAT, CT2_LPU1_HOSTFN_CMD_STAT,
251           CT2_HOSTFN_LPU1_READ_STAT},
252 };
253
254 static void
255 bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
256 {
257         void __iomem *rb;
258         int             pcifn = bfa_ioc_pcifn(ioc);
259
260         rb = bfa_ioc_bar0(ioc);
261
262         ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox;
263         ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox;
264         ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn;
265
266         if (ioc->port_id == 0) {
267                 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
268                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
269                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
270                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn;
271                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu;
272                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
273                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
274         } else {
275                 ioc->ioc_regs.heartbeat = rb + BFA_IOC1_HBEAT_REG;
276                 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC1_STATE_REG;
277                 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
278                 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p1reg[pcifn].hfn;
279                 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p1reg[pcifn].lpu;
280                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
281                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
282         }
283
284         /*
285          * PSS control registers
286          */
287         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
288         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
289         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + APP_PLL_LCLK_CTL_REG;
290         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + APP_PLL_SCLK_CTL_REG;
291
292         /*
293          * IOC semaphore registers and serialization
294          */
295         ioc->ioc_regs.ioc_sem_reg = rb + HOST_SEM0_REG;
296         ioc->ioc_regs.ioc_usage_sem_reg = rb + HOST_SEM1_REG;
297         ioc->ioc_regs.ioc_init_sem_reg = rb + HOST_SEM2_REG;
298         ioc->ioc_regs.ioc_usage_reg = rb + BFA_FW_USE_COUNT;
299         ioc->ioc_regs.ioc_fail_sync = rb + BFA_IOC_FAIL_SYNC;
300
301         /**
302          * sram memory access
303          */
304         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
305         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
306
307         /*
308          * err set reg : for notification of hb failure in fcmode
309          */
310         ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
311 }
312
313 static void
314 bfa_ioc_ct2_reg_init(struct bfa_ioc *ioc)
315 {
316         void __iomem *rb;
317         int             port = bfa_ioc_portid(ioc);
318
319         rb = bfa_ioc_bar0(ioc);
320
321         ioc->ioc_regs.hfn_mbox = rb + ct2_reg[port].hfn_mbox;
322         ioc->ioc_regs.lpu_mbox = rb + ct2_reg[port].lpu_mbox;
323         ioc->ioc_regs.host_page_num_fn = rb + ct2_reg[port].hfn_pgn;
324         ioc->ioc_regs.hfn_mbox_cmd = rb + ct2_reg[port].hfn;
325         ioc->ioc_regs.lpu_mbox_cmd = rb + ct2_reg[port].lpu;
326         ioc->ioc_regs.lpu_read_stat = rb + ct2_reg[port].lpu_read;
327
328         if (port == 0) {
329                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC0_HBEAT_REG;
330                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
331                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
332                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
333                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
334         } else {
335                 ioc->ioc_regs.heartbeat = rb + CT2_BFA_IOC1_HBEAT_REG;
336                 ioc->ioc_regs.ioc_fwstate = rb + CT2_BFA_IOC1_STATE_REG;
337                 ioc->ioc_regs.alt_ioc_fwstate = rb + CT2_BFA_IOC0_STATE_REG;
338                 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
339                 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
340         }
341
342         /*
343          * PSS control registers
344          */
345         ioc->ioc_regs.pss_ctl_reg = rb + PSS_CTL_REG;
346         ioc->ioc_regs.pss_err_status_reg = rb + PSS_ERR_STATUS_REG;
347         ioc->ioc_regs.app_pll_fast_ctl_reg = rb + CT2_APP_PLL_LCLK_CTL_REG;
348         ioc->ioc_regs.app_pll_slow_ctl_reg = rb + CT2_APP_PLL_SCLK_CTL_REG;
349
350         /*
351          * IOC semaphore registers and serialization
352          */
353         ioc->ioc_regs.ioc_sem_reg = rb + CT2_HOST_SEM0_REG;
354         ioc->ioc_regs.ioc_usage_sem_reg = rb + CT2_HOST_SEM1_REG;
355         ioc->ioc_regs.ioc_init_sem_reg = rb + CT2_HOST_SEM2_REG;
356         ioc->ioc_regs.ioc_usage_reg = rb + CT2_BFA_FW_USE_COUNT;
357         ioc->ioc_regs.ioc_fail_sync = rb + CT2_BFA_IOC_FAIL_SYNC;
358
359         /**
360          * sram memory access
361          */
362         ioc->ioc_regs.smem_page_start = rb + PSS_SMEM_PAGE_START;
363         ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
364
365         /*
366          * err set reg : for notification of hb failure in fcmode
367          */
368         ioc->ioc_regs.err_set = rb + ERR_SET_REG;
369 }
370
371 /**
372  * Initialize IOC to port mapping.
373  */
374
375 #define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
376 static void
377 bfa_ioc_ct_map_port(struct bfa_ioc *ioc)
378 {
379         void __iomem *rb = ioc->pcidev.pci_bar_kva;
380         u32     r32;
381
382         /**
383          * For catapult, base port id on personality register and IOC type
384          */
385         r32 = readl(rb + FNC_PERS_REG);
386         r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
387         ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
388
389 }
390
391 static void
392 bfa_ioc_ct2_map_port(struct bfa_ioc *ioc)
393 {
394         void __iomem *rb = ioc->pcidev.pci_bar_kva;
395         u32     r32;
396
397         r32 = readl(rb + CT2_HOSTFN_PERSONALITY0);
398         ioc->port_id = ((r32 & __FC_LL_PORT_MAP__MK) >> __FC_LL_PORT_MAP__SH);
399 }
400
401 /**
402  * Set interrupt mode for a function: INTX or MSIX
403  */
404 static void
405 bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix)
406 {
407         void __iomem *rb = ioc->pcidev.pci_bar_kva;
408         u32     r32, mode;
409
410         r32 = readl(rb + FNC_PERS_REG);
411
412         mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
413                 __F0_INTX_STATUS;
414
415         /**
416          * If already in desired mode, do not change anything
417          */
418         if ((!msix && mode) || (msix && !mode))
419                 return;
420
421         if (msix)
422                 mode = __F0_INTX_STATUS_MSIX;
423         else
424                 mode = __F0_INTX_STATUS_INTA;
425
426         r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
427         r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
428
429         writel(r32, rb + FNC_PERS_REG);
430 }
431
432 static bool
433 bfa_ioc_ct2_lpu_read_stat(struct bfa_ioc *ioc)
434 {
435         u32 r32;
436
437         r32 = readl(ioc->ioc_regs.lpu_read_stat);
438         if (r32) {
439                 writel(1, ioc->ioc_regs.lpu_read_stat);
440                 return true;
441         }
442
443         return false;
444 }
445
446 /**
447  * MSI-X resource allocation for 1860 with no asic block
448  */
449 #define HOSTFN_MSIX_DEFAULT             64
450 #define HOSTFN_MSIX_VT_INDEX_MBOX_ERR   0x30138
451 #define HOSTFN_MSIX_VT_OFST_NUMVT       0x3013c
452 #define __MSIX_VT_NUMVT__MK             0x003ff800
453 #define __MSIX_VT_NUMVT__SH             11
454 #define __MSIX_VT_NUMVT_(_v)            ((_v) << __MSIX_VT_NUMVT__SH)
455 #define __MSIX_VT_OFST_                 0x000007ff
456 void
457 bfa_nw_ioc_ct2_poweron(struct bfa_ioc *ioc)
458 {
459         void __iomem *rb = ioc->pcidev.pci_bar_kva;
460         u32 r32;
461
462         r32 = readl(rb + HOSTFN_MSIX_VT_OFST_NUMVT);
463         if (r32 & __MSIX_VT_NUMVT__MK) {
464                 writel(r32 & __MSIX_VT_OFST_,
465                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
466                 return;
467         }
468
469         writel(__MSIX_VT_NUMVT_(HOSTFN_MSIX_DEFAULT - 1) |
470                         HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
471                         rb + HOSTFN_MSIX_VT_OFST_NUMVT);
472         writel(HOSTFN_MSIX_DEFAULT * bfa_ioc_pcifn(ioc),
473                         rb + HOSTFN_MSIX_VT_INDEX_MBOX_ERR);
474 }
475
476 /**
477  * Cleanup hw semaphore and usecnt registers
478  */
479 static void
480 bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
481 {
482         bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
483         writel(0, ioc->ioc_regs.ioc_usage_reg);
484         bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
485
486         /*
487          * Read the hw sem reg to make sure that it is locked
488          * before we clear it. If it is not locked, writing 1
489          * will lock it instead of clearing it.
490          */
491         readl(ioc->ioc_regs.ioc_sem_reg);
492         bfa_nw_ioc_hw_sem_release(ioc);
493 }
494
495 /**
496  * Synchronized IOC failure processing routines
497  */
498 static bool
499 bfa_ioc_ct_sync_start(struct bfa_ioc *ioc)
500 {
501         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
502         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
503
504         /*
505          * Driver load time.  If the sync required bit for this PCI fn
506          * is set, it is due to an unclean exit by the driver for this
507          * PCI fn in the previous incarnation. Whoever comes here first
508          * should clean it up, no matter which PCI fn.
509          */
510
511         if (sync_reqd & bfa_ioc_ct_sync_pos(ioc)) {
512                 writel(0, ioc->ioc_regs.ioc_fail_sync);
513                 writel(1, ioc->ioc_regs.ioc_usage_reg);
514                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
515                 writel(BFI_IOC_UNINIT, ioc->ioc_regs.alt_ioc_fwstate);
516                 return true;
517         }
518
519         return bfa_ioc_ct_sync_complete(ioc);
520 }
521 /**
522  * Synchronized IOC failure processing routines
523  */
524 static void
525 bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
526 {
527         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
528         u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
529
530         writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
531 }
532
533 static void
534 bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
535 {
536         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
537         u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
538                                         bfa_ioc_ct_sync_pos(ioc);
539
540         writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
541 }
542
543 static void
544 bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
545 {
546         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
547
548         writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
549 }
550
551 static bool
552 bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
553 {
554         u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
555         u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
556         u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
557         u32 tmp_ackd;
558
559         if (sync_ackd == 0)
560                 return true;
561
562         /**
563          * The check below is to see whether any other PCI fn
564          * has reinitialized the ASIC (reset sync_ackd bits)
565          * and failed again while this IOC was waiting for hw
566          * semaphore (in bfa_iocpf_sm_semwait()).
567          */
568         tmp_ackd = sync_ackd;
569         if ((sync_reqd &  bfa_ioc_ct_sync_pos(ioc)) &&
570                         !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
571                 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
572
573         if (sync_reqd == sync_ackd) {
574                 writel(bfa_ioc_ct_clear_sync_ackd(r32),
575                                 ioc->ioc_regs.ioc_fail_sync);
576                 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
577                 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
578                 return true;
579         }
580
581         /**
582          * If another PCI fn reinitialized and failed again while
583          * this IOC was waiting for hw sem, the sync_ackd bit for
584          * this IOC need to be set again to allow reinitialization.
585          */
586         if (tmp_ackd != sync_ackd)
587                 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
588
589         return false;
590 }
591
592 static enum bfa_status
593 bfa_ioc_ct_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
594 {
595         u32     pll_sclk, pll_fclk, r32;
596         bool fcmode = (asic_mode == BFI_ASIC_MODE_FC);
597
598         pll_sclk = __APP_PLL_SCLK_LRESETN | __APP_PLL_SCLK_ENARST |
599                 __APP_PLL_SCLK_RSEL200500 | __APP_PLL_SCLK_P0_1(3U) |
600                 __APP_PLL_SCLK_JITLMT0_1(3U) |
601                 __APP_PLL_SCLK_CNTLMT0_1(1U);
602         pll_fclk = __APP_PLL_LCLK_LRESETN | __APP_PLL_LCLK_ENARST |
603                 __APP_PLL_LCLK_RSEL200500 | __APP_PLL_LCLK_P0_1(3U) |
604                 __APP_PLL_LCLK_JITLMT0_1(3U) |
605                 __APP_PLL_LCLK_CNTLMT0_1(1U);
606
607         if (fcmode) {
608                 writel(0, (rb + OP_MODE));
609                 writel(__APP_EMS_CMLCKSEL |
610                                 __APP_EMS_REFCKBUFEN2 |
611                                 __APP_EMS_CHANNEL_SEL,
612                                 (rb + ETH_MAC_SER_REG));
613         } else {
614                 writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
615                 writel(__APP_EMS_REFCKBUFEN1,
616                                 (rb + ETH_MAC_SER_REG));
617         }
618         writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
619         writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
620         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
621         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
622         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
623         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
624         writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
625         writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
626         writel(pll_sclk |
627                 __APP_PLL_SCLK_LOGIC_SOFT_RESET,
628                 rb + APP_PLL_SCLK_CTL_REG);
629         writel(pll_fclk |
630                 __APP_PLL_LCLK_LOGIC_SOFT_RESET,
631                 rb + APP_PLL_LCLK_CTL_REG);
632         writel(pll_sclk |
633                 __APP_PLL_SCLK_LOGIC_SOFT_RESET | __APP_PLL_SCLK_ENABLE,
634                 rb + APP_PLL_SCLK_CTL_REG);
635         writel(pll_fclk |
636                 __APP_PLL_LCLK_LOGIC_SOFT_RESET | __APP_PLL_LCLK_ENABLE,
637                 rb + APP_PLL_LCLK_CTL_REG);
638         readl(rb + HOSTFN0_INT_MSK);
639         udelay(2000);
640         writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
641         writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
642         writel(pll_sclk |
643                 __APP_PLL_SCLK_ENABLE,
644                 rb + APP_PLL_SCLK_CTL_REG);
645         writel(pll_fclk |
646                 __APP_PLL_LCLK_ENABLE,
647                 rb + APP_PLL_LCLK_CTL_REG);
648
649         if (!fcmode) {
650                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
651                 writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
652         }
653         r32 = readl((rb + PSS_CTL_REG));
654         r32 &= ~__PSS_LMEM_RESET;
655         writel(r32, (rb + PSS_CTL_REG));
656         udelay(1000);
657         if (!fcmode) {
658                 writel(0, (rb + PMM_1T_RESET_REG_P0));
659                 writel(0, (rb + PMM_1T_RESET_REG_P1));
660         }
661
662         writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
663         udelay(1000);
664         r32 = readl((rb + MBIST_STAT_REG));
665         writel(0, (rb + MBIST_CTL_REG));
666         return BFA_STATUS_OK;
667 }
668
669 static void
670 bfa_ioc_ct2_sclk_init(void __iomem *rb)
671 {
672         u32 r32;
673
674         /*
675          * put s_clk PLL and PLL FSM in reset
676          */
677         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
678         r32 &= ~(__APP_PLL_SCLK_ENABLE | __APP_PLL_SCLK_LRESETN);
679         r32 |= (__APP_PLL_SCLK_ENARST | __APP_PLL_SCLK_BYPASS |
680                 __APP_PLL_SCLK_LOGIC_SOFT_RESET);
681         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
682
683         /*
684          * Ignore mode and program for the max clock (which is FC16)
685          * Firmware/NFC will do the PLL init appropiately
686          */
687         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
688         r32 &= ~(__APP_PLL_SCLK_REFCLK_SEL | __APP_PLL_SCLK_CLK_DIV2);
689         writel(r32, (rb + CT2_APP_PLL_SCLK_CTL_REG));
690
691         /*
692          * while doing PLL init dont clock gate ethernet subsystem
693          */
694         r32 = readl((rb + CT2_CHIP_MISC_PRG));
695         writel((r32 | __ETH_CLK_ENABLE_PORT0),
696                                 (rb + CT2_CHIP_MISC_PRG));
697
698         r32 = readl((rb + CT2_PCIE_MISC_REG));
699         writel((r32 | __ETH_CLK_ENABLE_PORT1),
700                                 (rb + CT2_PCIE_MISC_REG));
701
702         /*
703          * set sclk value
704          */
705         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
706         r32 &= (__P_SCLK_PLL_LOCK | __APP_PLL_SCLK_REFCLK_SEL |
707                 __APP_PLL_SCLK_CLK_DIV2);
708         writel(r32 | 0x1061731b, (rb + CT2_APP_PLL_SCLK_CTL_REG));
709
710         /*
711          * poll for s_clk lock or delay 1ms
712          */
713         udelay(1000);
714
715         /*
716          * Dont do clock gating for ethernet subsystem, firmware/NFC will
717          * do this appropriately
718          */
719 }
720
721 static void
722 bfa_ioc_ct2_lclk_init(void __iomem *rb)
723 {
724         u32 r32;
725
726         /*
727          * put l_clk PLL and PLL FSM in reset
728          */
729         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
730         r32 &= ~(__APP_PLL_LCLK_ENABLE | __APP_PLL_LCLK_LRESETN);
731         r32 |= (__APP_PLL_LCLK_ENARST | __APP_PLL_LCLK_BYPASS |
732                 __APP_PLL_LCLK_LOGIC_SOFT_RESET);
733         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
734
735         /*
736          * set LPU speed (set for FC16 which will work for other modes)
737          */
738         r32 = readl((rb + CT2_CHIP_MISC_PRG));
739         writel(r32, (rb + CT2_CHIP_MISC_PRG));
740
741         /*
742          * set LPU half speed (set for FC16 which will work for other modes)
743          */
744         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
745         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
746
747         /*
748          * set lclk for mode (set for FC16)
749          */
750         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
751         r32 &= (__P_LCLK_PLL_LOCK | __APP_LPUCLK_HALFSPEED);
752         r32 |= 0x20c1731b;
753         writel(r32, (rb + CT2_APP_PLL_LCLK_CTL_REG));
754
755         /*
756          * poll for s_clk lock or delay 1ms
757          */
758         udelay(1000);
759 }
760
761 static void
762 bfa_ioc_ct2_mem_init(void __iomem *rb)
763 {
764         u32 r32;
765
766         r32 = readl((rb + PSS_CTL_REG));
767         r32 &= ~__PSS_LMEM_RESET;
768         writel(r32, (rb + PSS_CTL_REG));
769         udelay(1000);
770
771         writel(__EDRAM_BISTR_START, (rb + CT2_MBIST_CTL_REG));
772         udelay(1000);
773         writel(0, (rb + CT2_MBIST_CTL_REG));
774 }
775
776 static void
777 bfa_ioc_ct2_mac_reset(void __iomem *rb)
778 {
779         volatile u32 r32;
780
781         bfa_ioc_ct2_sclk_init(rb);
782         bfa_ioc_ct2_lclk_init(rb);
783
784         /*
785          * release soft reset on s_clk & l_clk
786          */
787         r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
788         writel((r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET),
789                         (rb + CT2_APP_PLL_SCLK_CTL_REG));
790
791         /*
792          * release soft reset on s_clk & l_clk
793          */
794         r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
795         writel((r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET),
796                         (rb + CT2_APP_PLL_LCLK_CTL_REG));
797
798         /* put port0, port1 MAC & AHB in reset */
799         writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
800                         (rb + CT2_CSI_MAC_CONTROL_REG(0)));
801         writel((__CSI_MAC_RESET | __CSI_MAC_AHB_RESET),
802                         (rb + CT2_CSI_MAC_CONTROL_REG(1)));
803 }
804
805 #define CT2_NFC_MAX_DELAY       1000
806 #define CT2_NFC_VER_VALID       0x143
807 #define BFA_IOC_PLL_POLL        1000000
808
809 static bool
810 bfa_ioc_ct2_nfc_halted(void __iomem *rb)
811 {
812         volatile u32 r32;
813
814         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
815         if (r32 & __NFC_CONTROLLER_HALTED)
816                 return true;
817
818         return false;
819 }
820
821 static void
822 bfa_ioc_ct2_nfc_resume(void __iomem *rb)
823 {
824         volatile u32 r32;
825         int i;
826
827         writel(__HALT_NFC_CONTROLLER, rb + CT2_NFC_CSR_CLR_REG);
828         for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
829                 r32 = readl(rb + CT2_NFC_CSR_SET_REG);
830                 if (!(r32 & __NFC_CONTROLLER_HALTED))
831                         return;
832                 udelay(1000);
833         }
834         BUG_ON(1);
835 }
836
837 static enum bfa_status
838 bfa_ioc_ct2_pll_init(void __iomem *rb, enum bfi_asic_mode asic_mode)
839 {
840         volatile u32 wgn, r32;
841         u32 nfc_ver, i;
842
843         wgn = readl(rb + CT2_WGN_STATUS);
844
845         nfc_ver = readl(rb + CT2_RSC_GPR15_REG);
846
847         if ((wgn == (__A2T_AHB_LOAD | __WGN_READY)) &&
848                 (nfc_ver >= CT2_NFC_VER_VALID)) {
849                 if (bfa_ioc_ct2_nfc_halted(rb))
850                         bfa_ioc_ct2_nfc_resume(rb);
851                 writel(__RESET_AND_START_SCLK_LCLK_PLLS,
852                                 rb + CT2_CSI_FW_CTL_SET_REG);
853
854                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
855                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
856                         if (r32 & __RESET_AND_START_SCLK_LCLK_PLLS)
857                                 break;
858                 }
859                 BUG_ON(!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS));
860
861                 for (i = 0; i < BFA_IOC_PLL_POLL; i++) {
862                         r32 = readl(rb + CT2_APP_PLL_LCLK_CTL_REG);
863                         if (!(r32 & __RESET_AND_START_SCLK_LCLK_PLLS))
864                                 break;
865                 }
866                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
867                 udelay(1000);
868
869                 r32 = readl(rb + CT2_CSI_FW_CTL_REG);
870                 BUG_ON(r32 & __RESET_AND_START_SCLK_LCLK_PLLS);
871         } else {
872                 writel(__HALT_NFC_CONTROLLER, (rb + CT2_NFC_CSR_SET_REG));
873                 for (i = 0; i < CT2_NFC_MAX_DELAY; i++) {
874                         r32 = readl(rb + CT2_NFC_CSR_SET_REG);
875                         if (r32 & __NFC_CONTROLLER_HALTED)
876                                 break;
877                         udelay(1000);
878                 }
879
880                 bfa_ioc_ct2_mac_reset(rb);
881                 bfa_ioc_ct2_sclk_init(rb);
882                 bfa_ioc_ct2_lclk_init(rb);
883
884                 /* release soft reset on s_clk & l_clk */
885                 r32 = readl((rb + CT2_APP_PLL_SCLK_CTL_REG));
886                 writel(r32 & ~__APP_PLL_SCLK_LOGIC_SOFT_RESET,
887                                 rb + CT2_APP_PLL_SCLK_CTL_REG);
888                 r32 = readl((rb + CT2_APP_PLL_LCLK_CTL_REG));
889                 writel(r32 & ~__APP_PLL_LCLK_LOGIC_SOFT_RESET,
890                                 rb + CT2_APP_PLL_LCLK_CTL_REG);
891         }
892
893         /* Announce flash device presence, if flash was corrupted. */
894         if (wgn == (__WGN_READY | __GLBL_PF_VF_CFG_RDY)) {
895                 r32 = readl((rb + PSS_GPIO_OUT_REG));
896                 writel(r32 & ~1, rb + PSS_GPIO_OUT_REG);
897                 r32 = readl((rb + PSS_GPIO_OE_REG));
898                 writel(r32 | 1, rb + PSS_GPIO_OE_REG);
899         }
900
901         /*
902          * Mask the interrupts and clear any
903          * pending interrupts left by BIOS/EFI
904          */
905         writel(1, (rb + CT2_LPU0_HOSTFN_MBOX0_MSK));
906         writel(1, (rb + CT2_LPU1_HOSTFN_MBOX0_MSK));
907
908         /* For first time initialization, no need to clear interrupts */
909         r32 = readl(rb + HOST_SEM5_REG);
910         if (r32 & 0x1) {
911                 r32 = readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
912                 if (r32 == 1) {
913                         writel(1, (rb + CT2_LPU0_HOSTFN_CMD_STAT));
914                         readl((rb + CT2_LPU0_HOSTFN_CMD_STAT));
915                 }
916                 r32 = readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
917                 if (r32 == 1) {
918                         writel(1, (rb + CT2_LPU1_HOSTFN_CMD_STAT));
919                         readl((rb + CT2_LPU1_HOSTFN_CMD_STAT));
920                 }
921         }
922
923         bfa_ioc_ct2_mem_init(rb);
924
925         writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC0_STATE_REG));
926         writel(BFI_IOC_UNINIT, (rb + CT2_BFA_IOC1_STATE_REG));
927         return BFA_STATUS_OK;
928 }