5bdf09459a08cce3a68c5dd550549a915a2c96c3
[linux-2.6.git] / drivers / net / bnx2x / bnx2x_sp.c
1 /* bnx2x_sp.c: Broadcom Everest network driver.
2  *
3  * Copyright 2011 Broadcom Corporation
4  *
5  * Unless you and Broadcom execute a separate written software license
6  * agreement governing use of this software, this software is licensed to you
7  * under the terms of the GNU General Public License version 2, available
8  * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9  *
10  * Notwithstanding the above, under no circumstances may you combine this
11  * software in any way with any other Broadcom software provided under a
12  * license other than the GPL, without Broadcom's express prior written
13  * consent.
14  *
15  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16  * Written by: Vladislav Zolotarov
17  *
18  */
19 #include <linux/version.h>
20 #include <linux/module.h>
21 #include <linux/crc32.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/crc32c.h>
25 #include "bnx2x.h"
26 #include "bnx2x_cmn.h"
27 #include "bnx2x_sp.h"
28
29 #define BNX2X_MAX_EMUL_MULTI            16
30
31 /**** Exe Queue interfaces ****/
32
33 /**
34  * bnx2x_exe_queue_init - init the Exe Queue object
35  *
36  * @o:          poiter to the object
37  * @exe_len:    length
38  * @owner:      poiter to the owner
39  * @validate:   validate function pointer
40  * @optimize:   optimize function pointer
41  * @exec:       execute function pointer
42  * @get:        get function pointer
43  */
44 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
45                                         struct bnx2x_exe_queue_obj *o,
46                                         int exe_len,
47                                         union bnx2x_qable_obj *owner,
48                                         exe_q_validate validate,
49                                         exe_q_optimize optimize,
50                                         exe_q_execute exec,
51                                         exe_q_get get)
52 {
53         memset(o, 0, sizeof(*o));
54
55         INIT_LIST_HEAD(&o->exe_queue);
56         INIT_LIST_HEAD(&o->pending_comp);
57
58         spin_lock_init(&o->lock);
59
60         o->exe_chunk_len = exe_len;
61         o->owner         = owner;
62
63         /* Owner specific callbacks */
64         o->validate      = validate;
65         o->optimize      = optimize;
66         o->execute       = exec;
67         o->get           = get;
68
69         DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
70                          "length of %d\n", exe_len);
71 }
72
73 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
74                                              struct bnx2x_exeq_elem *elem)
75 {
76         DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
77         kfree(elem);
78 }
79
80 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
81 {
82         struct bnx2x_exeq_elem *elem;
83         int cnt = 0;
84
85         spin_lock_bh(&o->lock);
86
87         list_for_each_entry(elem, &o->exe_queue, link)
88                 cnt++;
89
90         spin_unlock_bh(&o->lock);
91
92         return cnt;
93 }
94
95 /**
96  * bnx2x_exe_queue_add - add a new element to the execution queue
97  *
98  * @bp:         driver handle
99  * @o:          queue
100  * @cmd:        new command to add
101  * @restore:    true - do not optimize the command
102  *
103  * If the element is optimized or is illegal, frees it.
104  */
105 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
106                                       struct bnx2x_exe_queue_obj *o,
107                                       struct bnx2x_exeq_elem *elem,
108                                       bool restore)
109 {
110         int rc;
111
112         spin_lock_bh(&o->lock);
113
114         if (!restore) {
115                 /* Try to cancel this element queue */
116                 rc = o->optimize(bp, o->owner, elem);
117                 if (rc)
118                         goto free_and_exit;
119
120                 /* Check if this request is ok */
121                 rc = o->validate(bp, o->owner, elem);
122                 if (rc) {
123                         BNX2X_ERR("Preamble failed: %d\n", rc);
124                         goto free_and_exit;
125                 }
126         }
127
128         /* If so, add it to the execution queue */
129         list_add_tail(&elem->link, &o->exe_queue);
130
131         spin_unlock_bh(&o->lock);
132
133         return 0;
134
135 free_and_exit:
136         bnx2x_exe_queue_free_elem(bp, elem);
137
138         spin_unlock_bh(&o->lock);
139
140         return rc;
141
142 }
143
144 static inline void __bnx2x_exe_queue_reset_pending(
145         struct bnx2x *bp,
146         struct bnx2x_exe_queue_obj *o)
147 {
148         struct bnx2x_exeq_elem *elem;
149
150         while (!list_empty(&o->pending_comp)) {
151                 elem = list_first_entry(&o->pending_comp,
152                                         struct bnx2x_exeq_elem, link);
153
154                 list_del(&elem->link);
155                 bnx2x_exe_queue_free_elem(bp, elem);
156         }
157 }
158
159 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
160                                                  struct bnx2x_exe_queue_obj *o)
161 {
162
163         spin_lock_bh(&o->lock);
164
165         __bnx2x_exe_queue_reset_pending(bp, o);
166
167         spin_unlock_bh(&o->lock);
168
169 }
170
171 /**
172  * bnx2x_exe_queue_step - execute one execution chunk atomically
173  *
174  * @bp:                 driver handle
175  * @o:                  queue
176  * @ramrod_flags:       flags
177  *
178  * (Atomicy is ensured using the exe_queue->lock).
179  */
180 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
181                                        struct bnx2x_exe_queue_obj *o,
182                                        unsigned long *ramrod_flags)
183 {
184         struct bnx2x_exeq_elem *elem, spacer;
185         int cur_len = 0, rc;
186
187         memset(&spacer, 0, sizeof(spacer));
188
189         spin_lock_bh(&o->lock);
190
191         /*
192          * Next step should not be performed until the current is finished,
193          * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194          * properly clear object internals without sending any command to the FW
195          * which also implies there won't be any completion to clear the
196          * 'pending' list.
197          */
198         if (!list_empty(&o->pending_comp)) {
199                 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
200                         DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
201                                          "resetting pending_comp\n");
202                         __bnx2x_exe_queue_reset_pending(bp, o);
203                 } else {
204                         spin_unlock_bh(&o->lock);
205                         return 1;
206                 }
207         }
208
209         /*
210          * Run through the pending commands list and create a next
211          * execution chunk.
212          */
213         while (!list_empty(&o->exe_queue)) {
214                 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
215                                         link);
216                 WARN_ON(!elem->cmd_len);
217
218                 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
219                         cur_len += elem->cmd_len;
220                         /*
221                          * Prevent from both lists being empty when moving an
222                          * element. This will allow the call of
223                          * bnx2x_exe_queue_empty() without locking.
224                          */
225                         list_add_tail(&spacer.link, &o->pending_comp);
226                         mb();
227                         list_del(&elem->link);
228                         list_add_tail(&elem->link, &o->pending_comp);
229                         list_del(&spacer.link);
230                 } else
231                         break;
232         }
233
234         /* Sanity check */
235         if (!cur_len) {
236                 spin_unlock_bh(&o->lock);
237                 return 0;
238         }
239
240         rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
241         if (rc < 0)
242                 /*
243                  *  In case of an error return the commands back to the queue
244                  *  and reset the pending_comp.
245                  */
246                 list_splice_init(&o->pending_comp, &o->exe_queue);
247         else if (!rc)
248                 /*
249                  * If zero is returned, means there are no outstanding pending
250                  * completions and we may dismiss the pending list.
251                  */
252                 __bnx2x_exe_queue_reset_pending(bp, o);
253
254         spin_unlock_bh(&o->lock);
255         return rc;
256 }
257
258 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
259 {
260         bool empty = list_empty(&o->exe_queue);
261
262         /* Don't reorder!!! */
263         mb();
264
265         return empty && list_empty(&o->pending_comp);
266 }
267
268 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
269         struct bnx2x *bp)
270 {
271         DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
272         return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
273 }
274
275 /************************ raw_obj functions ***********************************/
276 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
277 {
278         return !!test_bit(o->state, o->pstate);
279 }
280
281 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
282 {
283         smp_mb__before_clear_bit();
284         clear_bit(o->state, o->pstate);
285         smp_mb__after_clear_bit();
286 }
287
288 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
289 {
290         smp_mb__before_clear_bit();
291         set_bit(o->state, o->pstate);
292         smp_mb__after_clear_bit();
293 }
294
295 /**
296  * bnx2x_state_wait - wait until the given bit(state) is cleared
297  *
298  * @bp:         device handle
299  * @state:      state which is to be cleared
300  * @state_p:    state buffer
301  *
302  */
303 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
304                                    unsigned long *pstate)
305 {
306         /* can take a while if any port is running */
307         int cnt = 5000;
308
309
310         if (CHIP_REV_IS_EMUL(bp))
311                 cnt *= 20;
312
313         DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
314
315         might_sleep();
316         while (cnt--) {
317                 if (!test_bit(state, pstate)) {
318 #ifdef BNX2X_STOP_ON_ERROR
319                         DP(BNX2X_MSG_SP, "exit  (cnt %d)\n", 5000 - cnt);
320 #endif
321                         return 0;
322                 }
323
324                 usleep_range(1000, 1000);
325
326                 if (bp->panic)
327                         return -EIO;
328         }
329
330         /* timeout! */
331         BNX2X_ERR("timeout waiting for state %d\n", state);
332 #ifdef BNX2X_STOP_ON_ERROR
333         bnx2x_panic();
334 #endif
335
336         return -EBUSY;
337 }
338
339 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
340 {
341         return bnx2x_state_wait(bp, raw->state, raw->pstate);
342 }
343
344 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
345 /* credit handling callbacks */
346 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
347 {
348         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
349
350         WARN_ON(!mp);
351
352         return mp->get_entry(mp, offset);
353 }
354
355 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
356 {
357         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
358
359         WARN_ON(!mp);
360
361         return mp->get(mp, 1);
362 }
363
364 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
365 {
366         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
367
368         WARN_ON(!vp);
369
370         return vp->get_entry(vp, offset);
371 }
372
373 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
374 {
375         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
376
377         WARN_ON(!vp);
378
379         return vp->get(vp, 1);
380 }
381
382 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
383 {
384         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
386
387         if (!mp->get(mp, 1))
388                 return false;
389
390         if (!vp->get(vp, 1)) {
391                 mp->put(mp, 1);
392                 return false;
393         }
394
395         return true;
396 }
397
398 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
399 {
400         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
401
402         return mp->put_entry(mp, offset);
403 }
404
405 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
406 {
407         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
408
409         return mp->put(mp, 1);
410 }
411
412 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
413 {
414         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
415
416         return vp->put_entry(vp, offset);
417 }
418
419 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
420 {
421         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
422
423         return vp->put(vp, 1);
424 }
425
426 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
427 {
428         struct bnx2x_credit_pool_obj *mp = o->macs_pool;
429         struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
430
431         if (!mp->put(mp, 1))
432                 return false;
433
434         if (!vp->put(vp, 1)) {
435                 mp->get(mp, 1);
436                 return false;
437         }
438
439         return true;
440 }
441
442 /* check_add() callbacks */
443 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
444                                union bnx2x_classification_ramrod_data *data)
445 {
446         struct bnx2x_vlan_mac_registry_elem *pos;
447
448         if (!is_valid_ether_addr(data->mac.mac))
449                 return -EINVAL;
450
451         /* Check if a requested MAC already exists */
452         list_for_each_entry(pos, &o->head, link)
453                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
454                         return -EEXIST;
455
456         return 0;
457 }
458
459 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
460                                 union bnx2x_classification_ramrod_data *data)
461 {
462         struct bnx2x_vlan_mac_registry_elem *pos;
463
464         list_for_each_entry(pos, &o->head, link)
465                 if (data->vlan.vlan == pos->u.vlan.vlan)
466                         return -EEXIST;
467
468         return 0;
469 }
470
471 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
472                                    union bnx2x_classification_ramrod_data *data)
473 {
474         struct bnx2x_vlan_mac_registry_elem *pos;
475
476         list_for_each_entry(pos, &o->head, link)
477                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
478                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
479                              ETH_ALEN)))
480                         return -EEXIST;
481
482         return 0;
483 }
484
485
486 /* check_del() callbacks */
487 static struct bnx2x_vlan_mac_registry_elem *
488         bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
489                             union bnx2x_classification_ramrod_data *data)
490 {
491         struct bnx2x_vlan_mac_registry_elem *pos;
492
493         list_for_each_entry(pos, &o->head, link)
494                 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
495                         return pos;
496
497         return NULL;
498 }
499
500 static struct bnx2x_vlan_mac_registry_elem *
501         bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
502                              union bnx2x_classification_ramrod_data *data)
503 {
504         struct bnx2x_vlan_mac_registry_elem *pos;
505
506         list_for_each_entry(pos, &o->head, link)
507                 if (data->vlan.vlan == pos->u.vlan.vlan)
508                         return pos;
509
510         return NULL;
511 }
512
513 static struct bnx2x_vlan_mac_registry_elem *
514         bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
515                                  union bnx2x_classification_ramrod_data *data)
516 {
517         struct bnx2x_vlan_mac_registry_elem *pos;
518
519         list_for_each_entry(pos, &o->head, link)
520                 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
521                     (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
522                              ETH_ALEN)))
523                         return pos;
524
525         return NULL;
526 }
527
528 /* check_move() callback */
529 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
530                              struct bnx2x_vlan_mac_obj *dst_o,
531                              union bnx2x_classification_ramrod_data *data)
532 {
533         struct bnx2x_vlan_mac_registry_elem *pos;
534         int rc;
535
536         /* Check if we can delete the requested configuration from the first
537          * object.
538          */
539         pos = src_o->check_del(src_o, data);
540
541         /*  check if configuration can be added */
542         rc = dst_o->check_add(dst_o, data);
543
544         /* If this classification can not be added (is already set)
545          * or can't be deleted - return an error.
546          */
547         if (rc || !pos)
548                 return false;
549
550         return true;
551 }
552
553 static bool bnx2x_check_move_always_err(
554         struct bnx2x_vlan_mac_obj *src_o,
555         struct bnx2x_vlan_mac_obj *dst_o,
556         union bnx2x_classification_ramrod_data *data)
557 {
558         return false;
559 }
560
561
562 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
563 {
564         struct bnx2x_raw_obj *raw = &o->raw;
565         u8 rx_tx_flag = 0;
566
567         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
568             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
569                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
570
571         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
572             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
573                 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
574
575         return rx_tx_flag;
576 }
577
578 /* LLH CAM line allocations */
579 enum {
580         LLH_CAM_ISCSI_ETH_LINE = 0,
581         LLH_CAM_ETH_LINE,
582         LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
583 };
584
585 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
586                                  bool add, unsigned char *dev_addr, int index)
587 {
588         u32 wb_data[2];
589         u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
590                          NIG_REG_LLH0_FUNC_MEM;
591
592         if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
593                 return;
594
595         DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
596                          (add ? "ADD" : "DELETE"), index);
597
598         if (add) {
599                 /* LLH_FUNC_MEM is a u64 WB register */
600                 reg_offset += 8*index;
601
602                 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
603                               (dev_addr[4] <<  8) |  dev_addr[5]);
604                 wb_data[1] = ((dev_addr[0] <<  8) |  dev_addr[1]);
605
606                 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
607         }
608
609         REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
610                                   NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
611 }
612
613 /**
614  * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
615  *
616  * @bp:         device handle
617  * @o:          queue for which we want to configure this rule
618  * @add:        if true the command is an ADD command, DEL otherwise
619  * @opcode:     CLASSIFY_RULE_OPCODE_XXX
620  * @hdr:        pointer to a header to setup
621  *
622  */
623 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
624         struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
625         struct eth_classify_cmd_header *hdr)
626 {
627         struct bnx2x_raw_obj *raw = &o->raw;
628
629         hdr->client_id = raw->cl_id;
630         hdr->func_id = raw->func_id;
631
632         /* Rx or/and Tx (internal switching) configuration ? */
633         hdr->cmd_general_data |=
634                 bnx2x_vlan_mac_get_rx_tx_flag(o);
635
636         if (add)
637                 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
638
639         hdr->cmd_general_data |=
640                 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
641 }
642
643 /**
644  * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
645  *
646  * @cid:        connection id
647  * @type:       BNX2X_FILTER_XXX_PENDING
648  * @hdr:        poiter to header to setup
649  * @rule_cnt:
650  *
651  * currently we always configure one rule and echo field to contain a CID and an
652  * opcode type.
653  */
654 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
655                                 struct eth_classify_header *hdr, int rule_cnt)
656 {
657         hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
658         hdr->rule_cnt = (u8)rule_cnt;
659 }
660
661
662 /* hw_config() callbacks */
663 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
664                                  struct bnx2x_vlan_mac_obj *o,
665                                  struct bnx2x_exeq_elem *elem, int rule_idx,
666                                  int cam_offset)
667 {
668         struct bnx2x_raw_obj *raw = &o->raw;
669         struct eth_classify_rules_ramrod_data *data =
670                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
671         int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
672         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
673         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
674         unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
675         u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
676
677         /*
678          * Set LLH CAM entry: currently only iSCSI and ETH macs are
679          * relevant. In addition, current implementation is tuned for a
680          * single ETH MAC.
681          *
682          * When multiple unicast ETH MACs PF configuration in switch
683          * independent mode is required (NetQ, multiple netdev MACs,
684          * etc.), consider better utilisation of 8 per function MAC
685          * entries in the LLH register. There is also
686          * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
687          * total number of CAM entries to 16.
688          *
689          * Currently we won't configure NIG for MACs other than a primary ETH
690          * MAC and iSCSI L2 MAC.
691          *
692          * If this MAC is moving from one Queue to another, no need to change
693          * NIG configuration.
694          */
695         if (cmd != BNX2X_VLAN_MAC_MOVE) {
696                 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
697                         bnx2x_set_mac_in_nig(bp, add, mac,
698                                              LLH_CAM_ISCSI_ETH_LINE);
699                 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
700                         bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
701         }
702
703         /* Reset the ramrod data buffer for the first rule */
704         if (rule_idx == 0)
705                 memset(data, 0, sizeof(*data));
706
707         /* Setup a command header */
708         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
709                                       &rule_entry->mac.header);
710
711         DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
712                          "Queue %d\n", (add ? "add" : "delete"),
713                          BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
714
715         /* Set a MAC itself */
716         bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
717                               &rule_entry->mac.mac_mid,
718                               &rule_entry->mac.mac_lsb, mac);
719
720         /* MOVE: Add a rule that will add this MAC to the target Queue */
721         if (cmd == BNX2X_VLAN_MAC_MOVE) {
722                 rule_entry++;
723                 rule_cnt++;
724
725                 /* Setup ramrod data */
726                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
727                                         elem->cmd_data.vlan_mac.target_obj,
728                                               true, CLASSIFY_RULE_OPCODE_MAC,
729                                               &rule_entry->mac.header);
730
731                 /* Set a MAC itself */
732                 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
733                                       &rule_entry->mac.mac_mid,
734                                       &rule_entry->mac.mac_lsb, mac);
735         }
736
737         /* Set the ramrod data header */
738         /* TODO: take this to the higher level in order to prevent multiple
739                  writing */
740         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
741                                         rule_cnt);
742 }
743
744 /**
745  * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
746  *
747  * @bp:         device handle
748  * @o:          queue
749  * @type:
750  * @cam_offset: offset in cam memory
751  * @hdr:        pointer to a header to setup
752  *
753  * E1/E1H
754  */
755 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
756         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
757         struct mac_configuration_hdr *hdr)
758 {
759         struct bnx2x_raw_obj *r = &o->raw;
760
761         hdr->length = 1;
762         hdr->offset = (u8)cam_offset;
763         hdr->client_id = 0xff;
764         hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
765 }
766
767 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
768         struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
769         u16 vlan_id, struct mac_configuration_entry *cfg_entry)
770 {
771         struct bnx2x_raw_obj *r = &o->raw;
772         u32 cl_bit_vec = (1 << r->cl_id);
773
774         cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
775         cfg_entry->pf_id = r->func_id;
776         cfg_entry->vlan_id = cpu_to_le16(vlan_id);
777
778         if (add) {
779                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
780                          T_ETH_MAC_COMMAND_SET);
781                 SET_FLAG(cfg_entry->flags,
782                          MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
783
784                 /* Set a MAC in a ramrod data */
785                 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
786                                       &cfg_entry->middle_mac_addr,
787                                       &cfg_entry->lsb_mac_addr, mac);
788         } else
789                 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
790                          T_ETH_MAC_COMMAND_INVALIDATE);
791 }
792
793 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
794         struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
795         u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
796 {
797         struct mac_configuration_entry *cfg_entry = &config->config_table[0];
798         struct bnx2x_raw_obj *raw = &o->raw;
799
800         bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
801                                          &config->hdr);
802         bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
803                                          cfg_entry);
804
805         DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
806                          (add ? "setting" : "clearing"),
807                          BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
808 }
809
810 /**
811  * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
812  *
813  * @bp:         device handle
814  * @o:          bnx2x_vlan_mac_obj
815  * @elem:       bnx2x_exeq_elem
816  * @rule_idx:   rule_idx
817  * @cam_offset: cam_offset
818  */
819 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
820                                   struct bnx2x_vlan_mac_obj *o,
821                                   struct bnx2x_exeq_elem *elem, int rule_idx,
822                                   int cam_offset)
823 {
824         struct bnx2x_raw_obj *raw = &o->raw;
825         struct mac_configuration_cmd *config =
826                 (struct mac_configuration_cmd *)(raw->rdata);
827         /*
828          * 57710 and 57711 do not support MOVE command,
829          * so it's either ADD or DEL
830          */
831         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
832                 true : false;
833
834         /* Reset the ramrod data buffer */
835         memset(config, 0, sizeof(*config));
836
837         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
838                                      cam_offset, add,
839                                      elem->cmd_data.vlan_mac.u.mac.mac, 0,
840                                      ETH_VLAN_FILTER_ANY_VLAN, config);
841 }
842
843 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
844                                   struct bnx2x_vlan_mac_obj *o,
845                                   struct bnx2x_exeq_elem *elem, int rule_idx,
846                                   int cam_offset)
847 {
848         struct bnx2x_raw_obj *raw = &o->raw;
849         struct eth_classify_rules_ramrod_data *data =
850                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
851         int rule_cnt = rule_idx + 1;
852         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
853         int cmd = elem->cmd_data.vlan_mac.cmd;
854         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
855         u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
856
857         /* Reset the ramrod data buffer for the first rule */
858         if (rule_idx == 0)
859                 memset(data, 0, sizeof(*data));
860
861         /* Set a rule header */
862         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
863                                       &rule_entry->vlan.header);
864
865         DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
866                          vlan);
867
868         /* Set a VLAN itself */
869         rule_entry->vlan.vlan = cpu_to_le16(vlan);
870
871         /* MOVE: Add a rule that will add this MAC to the target Queue */
872         if (cmd == BNX2X_VLAN_MAC_MOVE) {
873                 rule_entry++;
874                 rule_cnt++;
875
876                 /* Setup ramrod data */
877                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
878                                         elem->cmd_data.vlan_mac.target_obj,
879                                               true, CLASSIFY_RULE_OPCODE_VLAN,
880                                               &rule_entry->vlan.header);
881
882                 /* Set a VLAN itself */
883                 rule_entry->vlan.vlan = cpu_to_le16(vlan);
884         }
885
886         /* Set the ramrod data header */
887         /* TODO: take this to the higher level in order to prevent multiple
888                  writing */
889         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
890                                         rule_cnt);
891 }
892
893 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
894                                       struct bnx2x_vlan_mac_obj *o,
895                                       struct bnx2x_exeq_elem *elem,
896                                       int rule_idx, int cam_offset)
897 {
898         struct bnx2x_raw_obj *raw = &o->raw;
899         struct eth_classify_rules_ramrod_data *data =
900                 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
901         int rule_cnt = rule_idx + 1;
902         union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
903         int cmd = elem->cmd_data.vlan_mac.cmd;
904         bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
905         u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
906         u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
907
908
909         /* Reset the ramrod data buffer for the first rule */
910         if (rule_idx == 0)
911                 memset(data, 0, sizeof(*data));
912
913         /* Set a rule header */
914         bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
915                                       &rule_entry->pair.header);
916
917         /* Set VLAN and MAC themselvs */
918         rule_entry->pair.vlan = cpu_to_le16(vlan);
919         bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
920                               &rule_entry->pair.mac_mid,
921                               &rule_entry->pair.mac_lsb, mac);
922
923         /* MOVE: Add a rule that will add this MAC to the target Queue */
924         if (cmd == BNX2X_VLAN_MAC_MOVE) {
925                 rule_entry++;
926                 rule_cnt++;
927
928                 /* Setup ramrod data */
929                 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
930                                         elem->cmd_data.vlan_mac.target_obj,
931                                               true, CLASSIFY_RULE_OPCODE_PAIR,
932                                               &rule_entry->pair.header);
933
934                 /* Set a VLAN itself */
935                 rule_entry->pair.vlan = cpu_to_le16(vlan);
936                 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
937                                       &rule_entry->pair.mac_mid,
938                                       &rule_entry->pair.mac_lsb, mac);
939         }
940
941         /* Set the ramrod data header */
942         /* TODO: take this to the higher level in order to prevent multiple
943                  writing */
944         bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
945                                         rule_cnt);
946 }
947
948 /**
949  * bnx2x_set_one_vlan_mac_e1h -
950  *
951  * @bp:         device handle
952  * @o:          bnx2x_vlan_mac_obj
953  * @elem:       bnx2x_exeq_elem
954  * @rule_idx:   rule_idx
955  * @cam_offset: cam_offset
956  */
957 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
958                                        struct bnx2x_vlan_mac_obj *o,
959                                        struct bnx2x_exeq_elem *elem,
960                                        int rule_idx, int cam_offset)
961 {
962         struct bnx2x_raw_obj *raw = &o->raw;
963         struct mac_configuration_cmd *config =
964                 (struct mac_configuration_cmd *)(raw->rdata);
965         /*
966          * 57710 and 57711 do not support MOVE command,
967          * so it's either ADD or DEL
968          */
969         bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
970                 true : false;
971
972         /* Reset the ramrod data buffer */
973         memset(config, 0, sizeof(*config));
974
975         bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
976                                      cam_offset, add,
977                                      elem->cmd_data.vlan_mac.u.vlan_mac.mac,
978                                      elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
979                                      ETH_VLAN_FILTER_CLASSIFY, config);
980 }
981
982 #define list_next_entry(pos, member) \
983         list_entry((pos)->member.next, typeof(*(pos)), member)
984
985 /**
986  * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
987  *
988  * @bp:         device handle
989  * @p:          command parameters
990  * @ppos:       pointer to the cooky
991  *
992  * reconfigure next MAC/VLAN/VLAN-MAC element from the
993  * previously configured elements list.
994  *
995  * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
996  * into an account
997  *
998  * pointer to the cooky  - that should be given back in the next call to make
999  * function handle the next element. If *ppos is set to NULL it will restart the
1000  * iterator. If returned *ppos == NULL this means that the last element has been
1001  * handled.
1002  *
1003  */
1004 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1005                            struct bnx2x_vlan_mac_ramrod_params *p,
1006                            struct bnx2x_vlan_mac_registry_elem **ppos)
1007 {
1008         struct bnx2x_vlan_mac_registry_elem *pos;
1009         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1010
1011         /* If list is empty - there is nothing to do here */
1012         if (list_empty(&o->head)) {
1013                 *ppos = NULL;
1014                 return 0;
1015         }
1016
1017         /* make a step... */
1018         if (*ppos == NULL)
1019                 *ppos = list_first_entry(&o->head,
1020                                          struct bnx2x_vlan_mac_registry_elem,
1021                                          link);
1022         else
1023                 *ppos = list_next_entry(*ppos, link);
1024
1025         pos = *ppos;
1026
1027         /* If it's the last step - return NULL */
1028         if (list_is_last(&pos->link, &o->head))
1029                 *ppos = NULL;
1030
1031         /* Prepare a 'user_req' */
1032         memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1033
1034         /* Set the command */
1035         p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1036
1037         /* Set vlan_mac_flags */
1038         p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1039
1040         /* Set a restore bit */
1041         __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1042
1043         return bnx2x_config_vlan_mac(bp, p);
1044 }
1045
1046 /*
1047  * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1048  * pointer to an element with a specific criteria and NULL if such an element
1049  * hasn't been found.
1050  */
1051 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1052         struct bnx2x_exe_queue_obj *o,
1053         struct bnx2x_exeq_elem *elem)
1054 {
1055         struct bnx2x_exeq_elem *pos;
1056         struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1057
1058         /* Check pending for execution commands */
1059         list_for_each_entry(pos, &o->exe_queue, link)
1060                 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1061                               sizeof(*data)) &&
1062                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1063                         return pos;
1064
1065         return NULL;
1066 }
1067
1068 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1069         struct bnx2x_exe_queue_obj *o,
1070         struct bnx2x_exeq_elem *elem)
1071 {
1072         struct bnx2x_exeq_elem *pos;
1073         struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1074
1075         /* Check pending for execution commands */
1076         list_for_each_entry(pos, &o->exe_queue, link)
1077                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1078                               sizeof(*data)) &&
1079                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1080                         return pos;
1081
1082         return NULL;
1083 }
1084
1085 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1086         struct bnx2x_exe_queue_obj *o,
1087         struct bnx2x_exeq_elem *elem)
1088 {
1089         struct bnx2x_exeq_elem *pos;
1090         struct bnx2x_vlan_mac_ramrod_data *data =
1091                 &elem->cmd_data.vlan_mac.u.vlan_mac;
1092
1093         /* Check pending for execution commands */
1094         list_for_each_entry(pos, &o->exe_queue, link)
1095                 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1096                               sizeof(*data)) &&
1097                     (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1098                         return pos;
1099
1100         return NULL;
1101 }
1102
1103 /**
1104  * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1105  *
1106  * @bp:         device handle
1107  * @qo:         bnx2x_qable_obj
1108  * @elem:       bnx2x_exeq_elem
1109  *
1110  * Checks that the requested configuration can be added. If yes and if
1111  * requested, consume CAM credit.
1112  *
1113  * The 'validate' is run after the 'optimize'.
1114  *
1115  */
1116 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1117                                               union bnx2x_qable_obj *qo,
1118                                               struct bnx2x_exeq_elem *elem)
1119 {
1120         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1121         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1122         int rc;
1123
1124         /* Check the registry */
1125         rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1126         if (rc) {
1127                 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1128                                  "current registry state\n");
1129                 return rc;
1130         }
1131
1132         /*
1133          * Check if there is a pending ADD command for this
1134          * MAC/VLAN/VLAN-MAC. Return an error if there is.
1135          */
1136         if (exeq->get(exeq, elem)) {
1137                 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1138                 return -EEXIST;
1139         }
1140
1141         /*
1142          * TODO: Check the pending MOVE from other objects where this
1143          * object is a destination object.
1144          */
1145
1146         /* Consume the credit if not requested not to */
1147         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1148                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1149             o->get_credit(o)))
1150                 return -EINVAL;
1151
1152         return 0;
1153 }
1154
1155 /**
1156  * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1157  *
1158  * @bp:         device handle
1159  * @qo:         quable object to check
1160  * @elem:       element that needs to be deleted
1161  *
1162  * Checks that the requested configuration can be deleted. If yes and if
1163  * requested, returns a CAM credit.
1164  *
1165  * The 'validate' is run after the 'optimize'.
1166  */
1167 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1168                                               union bnx2x_qable_obj *qo,
1169                                               struct bnx2x_exeq_elem *elem)
1170 {
1171         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1172         struct bnx2x_vlan_mac_registry_elem *pos;
1173         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1174         struct bnx2x_exeq_elem query_elem;
1175
1176         /* If this classification can not be deleted (doesn't exist)
1177          * - return a BNX2X_EXIST.
1178          */
1179         pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1180         if (!pos) {
1181                 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1182                                  "current registry state\n");
1183                 return -EEXIST;
1184         }
1185
1186         /*
1187          * Check if there are pending DEL or MOVE commands for this
1188          * MAC/VLAN/VLAN-MAC. Return an error if so.
1189          */
1190         memcpy(&query_elem, elem, sizeof(query_elem));
1191
1192         /* Check for MOVE commands */
1193         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1194         if (exeq->get(exeq, &query_elem)) {
1195                 BNX2X_ERR("There is a pending MOVE command already\n");
1196                 return -EINVAL;
1197         }
1198
1199         /* Check for DEL commands */
1200         if (exeq->get(exeq, elem)) {
1201                 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1202                 return -EEXIST;
1203         }
1204
1205         /* Return the credit to the credit pool if not requested not to */
1206         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1207                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1208             o->put_credit(o))) {
1209                 BNX2X_ERR("Failed to return a credit\n");
1210                 return -EINVAL;
1211         }
1212
1213         return 0;
1214 }
1215
1216 /**
1217  * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1218  *
1219  * @bp:         device handle
1220  * @qo:         quable object to check (source)
1221  * @elem:       element that needs to be moved
1222  *
1223  * Checks that the requested configuration can be moved. If yes and if
1224  * requested, returns a CAM credit.
1225  *
1226  * The 'validate' is run after the 'optimize'.
1227  */
1228 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1229                                                union bnx2x_qable_obj *qo,
1230                                                struct bnx2x_exeq_elem *elem)
1231 {
1232         struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1233         struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1234         struct bnx2x_exeq_elem query_elem;
1235         struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1236         struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1237
1238         /*
1239          * Check if we can perform this operation based on the current registry
1240          * state.
1241          */
1242         if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1243                 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1244                                  "current registry state\n");
1245                 return -EINVAL;
1246         }
1247
1248         /*
1249          * Check if there is an already pending DEL or MOVE command for the
1250          * source object or ADD command for a destination object. Return an
1251          * error if so.
1252          */
1253         memcpy(&query_elem, elem, sizeof(query_elem));
1254
1255         /* Check DEL on source */
1256         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1257         if (src_exeq->get(src_exeq, &query_elem)) {
1258                 BNX2X_ERR("There is a pending DEL command on the source "
1259                           "queue already\n");
1260                 return -EINVAL;
1261         }
1262
1263         /* Check MOVE on source */
1264         if (src_exeq->get(src_exeq, elem)) {
1265                 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1266                 return -EEXIST;
1267         }
1268
1269         /* Check ADD on destination */
1270         query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1271         if (dest_exeq->get(dest_exeq, &query_elem)) {
1272                 BNX2X_ERR("There is a pending ADD command on the "
1273                           "destination queue already\n");
1274                 return -EINVAL;
1275         }
1276
1277         /* Consume the credit if not requested not to */
1278         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1279                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1280             dest_o->get_credit(dest_o)))
1281                 return -EINVAL;
1282
1283         if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1284                        &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1285             src_o->put_credit(src_o))) {
1286                 /* return the credit taken from dest... */
1287                 dest_o->put_credit(dest_o);
1288                 return -EINVAL;
1289         }
1290
1291         return 0;
1292 }
1293
1294 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1295                                    union bnx2x_qable_obj *qo,
1296                                    struct bnx2x_exeq_elem *elem)
1297 {
1298         switch (elem->cmd_data.vlan_mac.cmd) {
1299         case BNX2X_VLAN_MAC_ADD:
1300                 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1301         case BNX2X_VLAN_MAC_DEL:
1302                 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1303         case BNX2X_VLAN_MAC_MOVE:
1304                 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1305         default:
1306                 return -EINVAL;
1307         }
1308 }
1309
1310 /**
1311  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1312  *
1313  * @bp:         device handle
1314  * @o:          bnx2x_vlan_mac_obj
1315  *
1316  */
1317 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1318                                struct bnx2x_vlan_mac_obj *o)
1319 {
1320         int cnt = 5000, rc;
1321         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1322         struct bnx2x_raw_obj *raw = &o->raw;
1323
1324         while (cnt--) {
1325                 /* Wait for the current command to complete */
1326                 rc = raw->wait_comp(bp, raw);
1327                 if (rc)
1328                         return rc;
1329
1330                 /* Wait until there are no pending commands */
1331                 if (!bnx2x_exe_queue_empty(exeq))
1332                         usleep_range(1000, 1000);
1333                 else
1334                         return 0;
1335         }
1336
1337         return -EBUSY;
1338 }
1339
1340 /**
1341  * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1342  *
1343  * @bp:         device handle
1344  * @o:          bnx2x_vlan_mac_obj
1345  * @cqe:
1346  * @cont:       if true schedule next execution chunk
1347  *
1348  */
1349 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1350                                    struct bnx2x_vlan_mac_obj *o,
1351                                    union event_ring_elem *cqe,
1352                                    unsigned long *ramrod_flags)
1353 {
1354         struct bnx2x_raw_obj *r = &o->raw;
1355         int rc;
1356
1357         /* Reset pending list */
1358         bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1359
1360         /* Clear pending */
1361         r->clear_pending(r);
1362
1363         /* If ramrod failed this is most likely a SW bug */
1364         if (cqe->message.error)
1365                 return -EINVAL;
1366
1367         /* Run the next bulk of pending commands if requeted */
1368         if (test_bit(RAMROD_CONT, ramrod_flags)) {
1369                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1370                 if (rc < 0)
1371                         return rc;
1372         }
1373
1374         /* If there is more work to do return PENDING */
1375         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1376                 return 1;
1377
1378         return 0;
1379 }
1380
1381 /**
1382  * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1383  *
1384  * @bp:         device handle
1385  * @o:          bnx2x_qable_obj
1386  * @elem:       bnx2x_exeq_elem
1387  */
1388 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1389                                    union bnx2x_qable_obj *qo,
1390                                    struct bnx2x_exeq_elem *elem)
1391 {
1392         struct bnx2x_exeq_elem query, *pos;
1393         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1394         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1395
1396         memcpy(&query, elem, sizeof(query));
1397
1398         switch (elem->cmd_data.vlan_mac.cmd) {
1399         case BNX2X_VLAN_MAC_ADD:
1400                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1401                 break;
1402         case BNX2X_VLAN_MAC_DEL:
1403                 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1404                 break;
1405         default:
1406                 /* Don't handle anything other than ADD or DEL */
1407                 return 0;
1408         }
1409
1410         /* If we found the appropriate element - delete it */
1411         pos = exeq->get(exeq, &query);
1412         if (pos) {
1413
1414                 /* Return the credit of the optimized command */
1415                 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1416                               &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1417                         if ((query.cmd_data.vlan_mac.cmd ==
1418                              BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1419                                 BNX2X_ERR("Failed to return the credit for the "
1420                                           "optimized ADD command\n");
1421                                 return -EINVAL;
1422                         } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1423                                 BNX2X_ERR("Failed to recover the credit from "
1424                                           "the optimized DEL command\n");
1425                                 return -EINVAL;
1426                         }
1427                 }
1428
1429                 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1430                            (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1431                            "ADD" : "DEL");
1432
1433                 list_del(&pos->link);
1434                 bnx2x_exe_queue_free_elem(bp, pos);
1435                 return 1;
1436         }
1437
1438         return 0;
1439 }
1440
1441 /**
1442  * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1443  *
1444  * @bp:   device handle
1445  * @o:
1446  * @elem:
1447  * @restore:
1448  * @re:
1449  *
1450  * prepare a registry element according to the current command request.
1451  */
1452 static inline int bnx2x_vlan_mac_get_registry_elem(
1453         struct bnx2x *bp,
1454         struct bnx2x_vlan_mac_obj *o,
1455         struct bnx2x_exeq_elem *elem,
1456         bool restore,
1457         struct bnx2x_vlan_mac_registry_elem **re)
1458 {
1459         int cmd = elem->cmd_data.vlan_mac.cmd;
1460         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1461
1462         /* Allocate a new registry element if needed. */
1463         if (!restore &&
1464             ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1465                 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1466                 if (!reg_elem)
1467                         return -ENOMEM;
1468
1469                 /* Get a new CAM offset */
1470                 if (!o->get_cam_offset(o, &reg_elem->cam_offset)) {
1471                         /*
1472                          * This shell never happen, because we have checked the
1473                          * CAM availiability in the 'validate'.
1474                          */
1475                         WARN_ON(1);
1476                         kfree(reg_elem);
1477                         return -EINVAL;
1478                 }
1479
1480                 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1481
1482                 /* Set a VLAN-MAC data */
1483                 memcpy(&reg_elem->u, &elem->cmd_data.vlan_mac.u,
1484                           sizeof(reg_elem->u));
1485
1486                 /* Copy the flags (needed for DEL and RESTORE flows) */
1487                 reg_elem->vlan_mac_flags =
1488                         elem->cmd_data.vlan_mac.vlan_mac_flags;
1489         } else /* DEL, RESTORE */
1490                 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1491
1492         *re = reg_elem;
1493         return 0;
1494 }
1495
1496 /**
1497  * bnx2x_execute_vlan_mac - execute vlan mac command
1498  *
1499  * @bp:                 device handle
1500  * @qo:
1501  * @exe_chunk:
1502  * @ramrod_flags:
1503  *
1504  * go and send a ramrod!
1505  */
1506 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1507                                   union bnx2x_qable_obj *qo,
1508                                   struct list_head *exe_chunk,
1509                                   unsigned long *ramrod_flags)
1510 {
1511         struct bnx2x_exeq_elem *elem;
1512         struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1513         struct bnx2x_raw_obj *r = &o->raw;
1514         int rc, idx = 0;
1515         bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1516         bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1517         struct bnx2x_vlan_mac_registry_elem *reg_elem;
1518         int cmd;
1519
1520         /*
1521          * If DRIVER_ONLY execution is requested, cleanup a registry
1522          * and exit. Otherwise send a ramrod to FW.
1523          */
1524         if (!drv_only) {
1525                 WARN_ON(r->check_pending(r));
1526
1527                 /* Set pending */
1528                 r->set_pending(r);
1529
1530                 /* Fill tha ramrod data */
1531                 list_for_each_entry(elem, exe_chunk, link) {
1532                         cmd = elem->cmd_data.vlan_mac.cmd;
1533                         /*
1534                          * We will add to the target object in MOVE command, so
1535                          * change the object for a CAM search.
1536                          */
1537                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1538                                 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1539                         else
1540                                 cam_obj = o;
1541
1542                         rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1543                                                               elem, restore,
1544                                                               &reg_elem);
1545                         if (rc)
1546                                 goto error_exit;
1547
1548                         WARN_ON(!reg_elem);
1549
1550                         /* Push a new entry into the registry */
1551                         if (!restore &&
1552                             ((cmd == BNX2X_VLAN_MAC_ADD) ||
1553                             (cmd == BNX2X_VLAN_MAC_MOVE)))
1554                                 list_add(&reg_elem->link, &cam_obj->head);
1555
1556                         /* Configure a single command in a ramrod data buffer */
1557                         o->set_one_rule(bp, o, elem, idx,
1558                                         reg_elem->cam_offset);
1559
1560                         /* MOVE command consumes 2 entries in the ramrod data */
1561                         if (cmd == BNX2X_VLAN_MAC_MOVE)
1562                                 idx += 2;
1563                         else
1564                                 idx++;
1565                 }
1566
1567                 /* Commit the data writes towards the memory */
1568                 mb();
1569
1570                 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1571                                    U64_HI(r->rdata_mapping),
1572                                    U64_LO(r->rdata_mapping),
1573                                    ETH_CONNECTION_TYPE);
1574                 if (rc)
1575                         goto error_exit;
1576         }
1577
1578         /* Now, when we are done with the ramrod - clean up the registry */
1579         list_for_each_entry(elem, exe_chunk, link) {
1580                 cmd = elem->cmd_data.vlan_mac.cmd;
1581                 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1582                     (cmd == BNX2X_VLAN_MAC_MOVE)) {
1583                         reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1584
1585                         WARN_ON(!reg_elem);
1586
1587                         o->put_cam_offset(o, reg_elem->cam_offset);
1588                         list_del(&reg_elem->link);
1589                         kfree(reg_elem);
1590                 }
1591         }
1592
1593         if (!drv_only)
1594                 return 1;
1595         else
1596                 return 0;
1597
1598 error_exit:
1599         r->clear_pending(r);
1600
1601         /* Cleanup a registry in case of a failure */
1602         list_for_each_entry(elem, exe_chunk, link) {
1603                 cmd = elem->cmd_data.vlan_mac.cmd;
1604
1605                 if (cmd == BNX2X_VLAN_MAC_MOVE)
1606                         cam_obj = elem->cmd_data.vlan_mac.target_obj;
1607                 else
1608                         cam_obj = o;
1609
1610                 /* Delete all newly added above entries */
1611                 if (!restore &&
1612                     ((cmd == BNX2X_VLAN_MAC_ADD) ||
1613                     (cmd == BNX2X_VLAN_MAC_MOVE))) {
1614                         reg_elem = o->check_del(cam_obj,
1615                                                 &elem->cmd_data.vlan_mac.u);
1616                         if (reg_elem) {
1617                                 list_del(&reg_elem->link);
1618                                 kfree(reg_elem);
1619                         }
1620                 }
1621         }
1622
1623         return rc;
1624 }
1625
1626 static inline int bnx2x_vlan_mac_push_new_cmd(
1627         struct bnx2x *bp,
1628         struct bnx2x_vlan_mac_ramrod_params *p)
1629 {
1630         struct bnx2x_exeq_elem *elem;
1631         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1632         bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1633
1634         /* Allocate the execution queue element */
1635         elem = bnx2x_exe_queue_alloc_elem(bp);
1636         if (!elem)
1637                 return -ENOMEM;
1638
1639         /* Set the command 'length' */
1640         switch (p->user_req.cmd) {
1641         case BNX2X_VLAN_MAC_MOVE:
1642                 elem->cmd_len = 2;
1643                 break;
1644         default:
1645                 elem->cmd_len = 1;
1646         }
1647
1648         /* Fill the object specific info */
1649         memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1650
1651         /* Try to add a new command to the pending list */
1652         return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1653 }
1654
1655 /**
1656  * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1657  *
1658  * @bp:   device handle
1659  * @p:
1660  *
1661  */
1662 int bnx2x_config_vlan_mac(
1663         struct bnx2x *bp,
1664         struct bnx2x_vlan_mac_ramrod_params *p)
1665 {
1666         int rc = 0;
1667         struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1668         unsigned long *ramrod_flags = &p->ramrod_flags;
1669         bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1670         struct bnx2x_raw_obj *raw = &o->raw;
1671
1672         /*
1673          * Add new elements to the execution list for commands that require it.
1674          */
1675         if (!cont) {
1676                 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1677                 if (rc)
1678                         return rc;
1679         }
1680
1681         /*
1682          * If nothing will be executed further in this iteration we want to
1683          * return PENDING if there are pending commands
1684          */
1685         if (!bnx2x_exe_queue_empty(&o->exe_queue))
1686                 rc = 1;
1687
1688         /* Execute commands if required */
1689         if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1690             test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1691                 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1692                 if (rc < 0)
1693                         return rc;
1694         }
1695
1696         /*
1697          * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1698          * then user want to wait until the last command is done.
1699          */
1700         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1701                 /*
1702                  * Wait maximum for the current exe_queue length iterations plus
1703                  * one (for the current pending command).
1704                  */
1705                 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1706
1707                 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1708                        max_iterations--) {
1709
1710                         /* Wait for the current command to complete */
1711                         rc = raw->wait_comp(bp, raw);
1712                         if (rc)
1713                                 return rc;
1714
1715                         /* Make a next step */
1716                         rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1717                                                   ramrod_flags);
1718                         if (rc < 0)
1719                                 return rc;
1720                 }
1721
1722                 return 0;
1723         }
1724
1725         return rc;
1726 }
1727
1728
1729
1730 /**
1731  * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1732  *
1733  * @bp:                 device handle
1734  * @o:
1735  * @vlan_mac_flags:
1736  * @ramrod_flags:       execution flags to be used for this deletion
1737  *
1738  * if the last operation has completed successfully and there are no
1739  * moreelements left, positive value if the last operation has completed
1740  * successfully and there are more previously configured elements, negative
1741  * value is current operation has failed.
1742  */
1743 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1744                                   struct bnx2x_vlan_mac_obj *o,
1745                                   unsigned long *vlan_mac_flags,
1746                                   unsigned long *ramrod_flags)
1747 {
1748         struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1749         int rc = 0;
1750         struct bnx2x_vlan_mac_ramrod_params p;
1751         struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1752         struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1753
1754         /* Clear pending commands first */
1755
1756         spin_lock_bh(&exeq->lock);
1757
1758         list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1759                 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1760                     *vlan_mac_flags)
1761                         list_del(&exeq_pos->link);
1762         }
1763
1764         spin_unlock_bh(&exeq->lock);
1765
1766         /* Prepare a command request */
1767         memset(&p, 0, sizeof(p));
1768         p.vlan_mac_obj = o;
1769         p.ramrod_flags = *ramrod_flags;
1770         p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1771
1772         /*
1773          * Add all but the last VLAN-MAC to the execution queue without actually
1774          * execution anything.
1775          */
1776         __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1777         __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1778         __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1779
1780         list_for_each_entry(pos, &o->head, link) {
1781                 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1782                         p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1783                         memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1784                         rc = bnx2x_config_vlan_mac(bp, &p);
1785                         if (rc < 0) {
1786                                 BNX2X_ERR("Failed to add a new DEL command\n");
1787                                 return rc;
1788                         }
1789                 }
1790         }
1791
1792         p.ramrod_flags = *ramrod_flags;
1793         __set_bit(RAMROD_CONT, &p.ramrod_flags);
1794
1795         return bnx2x_config_vlan_mac(bp, &p);
1796 }
1797
1798 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1799         u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1800         unsigned long *pstate, bnx2x_obj_type type)
1801 {
1802         raw->func_id = func_id;
1803         raw->cid = cid;
1804         raw->cl_id = cl_id;
1805         raw->rdata = rdata;
1806         raw->rdata_mapping = rdata_mapping;
1807         raw->state = state;
1808         raw->pstate = pstate;
1809         raw->obj_type = type;
1810         raw->check_pending = bnx2x_raw_check_pending;
1811         raw->clear_pending = bnx2x_raw_clear_pending;
1812         raw->set_pending = bnx2x_raw_set_pending;
1813         raw->wait_comp = bnx2x_raw_wait;
1814 }
1815
1816 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1817         u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1818         int state, unsigned long *pstate, bnx2x_obj_type type,
1819         struct bnx2x_credit_pool_obj *macs_pool,
1820         struct bnx2x_credit_pool_obj *vlans_pool)
1821 {
1822         INIT_LIST_HEAD(&o->head);
1823
1824         o->macs_pool = macs_pool;
1825         o->vlans_pool = vlans_pool;
1826
1827         o->delete_all = bnx2x_vlan_mac_del_all;
1828         o->restore = bnx2x_vlan_mac_restore;
1829         o->complete = bnx2x_complete_vlan_mac;
1830         o->wait = bnx2x_wait_vlan_mac;
1831
1832         bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1833                            state, pstate, type);
1834 }
1835
1836
1837 void bnx2x_init_mac_obj(struct bnx2x *bp,
1838                         struct bnx2x_vlan_mac_obj *mac_obj,
1839                         u8 cl_id, u32 cid, u8 func_id, void *rdata,
1840                         dma_addr_t rdata_mapping, int state,
1841                         unsigned long *pstate, bnx2x_obj_type type,
1842                         struct bnx2x_credit_pool_obj *macs_pool)
1843 {
1844         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1845
1846         bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1847                                    rdata_mapping, state, pstate, type,
1848                                    macs_pool, NULL);
1849
1850         /* CAM credit pool handling */
1851         mac_obj->get_credit = bnx2x_get_credit_mac;
1852         mac_obj->put_credit = bnx2x_put_credit_mac;
1853         mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1854         mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1855
1856         if (CHIP_IS_E1x(bp)) {
1857                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e1x;
1858                 mac_obj->check_del         = bnx2x_check_mac_del;
1859                 mac_obj->check_add         = bnx2x_check_mac_add;
1860                 mac_obj->check_move        = bnx2x_check_move_always_err;
1861                 mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1862
1863                 /* Exe Queue */
1864                 bnx2x_exe_queue_init(bp,
1865                                      &mac_obj->exe_queue, 1, qable_obj,
1866                                      bnx2x_validate_vlan_mac,
1867                                      bnx2x_optimize_vlan_mac,
1868                                      bnx2x_execute_vlan_mac,
1869                                      bnx2x_exeq_get_mac);
1870         } else {
1871                 mac_obj->set_one_rule      = bnx2x_set_one_mac_e2;
1872                 mac_obj->check_del         = bnx2x_check_mac_del;
1873                 mac_obj->check_add         = bnx2x_check_mac_add;
1874                 mac_obj->check_move        = bnx2x_check_move;
1875                 mac_obj->ramrod_cmd        =
1876                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1877
1878                 /* Exe Queue */
1879                 bnx2x_exe_queue_init(bp,
1880                                      &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1881                                      qable_obj, bnx2x_validate_vlan_mac,
1882                                      bnx2x_optimize_vlan_mac,
1883                                      bnx2x_execute_vlan_mac,
1884                                      bnx2x_exeq_get_mac);
1885         }
1886 }
1887
1888 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1889                          struct bnx2x_vlan_mac_obj *vlan_obj,
1890                          u8 cl_id, u32 cid, u8 func_id, void *rdata,
1891                          dma_addr_t rdata_mapping, int state,
1892                          unsigned long *pstate, bnx2x_obj_type type,
1893                          struct bnx2x_credit_pool_obj *vlans_pool)
1894 {
1895         union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1896
1897         bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1898                                    rdata_mapping, state, pstate, type, NULL,
1899                                    vlans_pool);
1900
1901         vlan_obj->get_credit = bnx2x_get_credit_vlan;
1902         vlan_obj->put_credit = bnx2x_put_credit_vlan;
1903         vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1904         vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1905
1906         if (CHIP_IS_E1x(bp)) {
1907                 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1908                 BUG();
1909         } else {
1910                 vlan_obj->set_one_rule      = bnx2x_set_one_vlan_e2;
1911                 vlan_obj->check_del         = bnx2x_check_vlan_del;
1912                 vlan_obj->check_add         = bnx2x_check_vlan_add;
1913                 vlan_obj->check_move        = bnx2x_check_move;
1914                 vlan_obj->ramrod_cmd        =
1915                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1916
1917                 /* Exe Queue */
1918                 bnx2x_exe_queue_init(bp,
1919                                      &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1920                                      qable_obj, bnx2x_validate_vlan_mac,
1921                                      bnx2x_optimize_vlan_mac,
1922                                      bnx2x_execute_vlan_mac,
1923                                      bnx2x_exeq_get_vlan);
1924         }
1925 }
1926
1927 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1928                              struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1929                              u8 cl_id, u32 cid, u8 func_id, void *rdata,
1930                              dma_addr_t rdata_mapping, int state,
1931                              unsigned long *pstate, bnx2x_obj_type type,
1932                              struct bnx2x_credit_pool_obj *macs_pool,
1933                              struct bnx2x_credit_pool_obj *vlans_pool)
1934 {
1935         union bnx2x_qable_obj *qable_obj =
1936                 (union bnx2x_qable_obj *)vlan_mac_obj;
1937
1938         bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1939                                    rdata_mapping, state, pstate, type,
1940                                    macs_pool, vlans_pool);
1941
1942         /* CAM pool handling */
1943         vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1944         vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1945         /*
1946          * CAM offset is relevant for 57710 and 57711 chips only which have a
1947          * single CAM for both MACs and VLAN-MAC pairs. So the offset
1948          * will be taken from MACs' pool object only.
1949          */
1950         vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1951         vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1952
1953         if (CHIP_IS_E1(bp)) {
1954                 BNX2X_ERR("Do not support chips others than E2\n");
1955                 BUG();
1956         } else if (CHIP_IS_E1H(bp)) {
1957                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e1h;
1958                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
1959                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
1960                 vlan_mac_obj->check_move        = bnx2x_check_move_always_err;
1961                 vlan_mac_obj->ramrod_cmd        = RAMROD_CMD_ID_ETH_SET_MAC;
1962
1963                 /* Exe Queue */
1964                 bnx2x_exe_queue_init(bp,
1965                                      &vlan_mac_obj->exe_queue, 1, qable_obj,
1966                                      bnx2x_validate_vlan_mac,
1967                                      bnx2x_optimize_vlan_mac,
1968                                      bnx2x_execute_vlan_mac,
1969                                      bnx2x_exeq_get_vlan_mac);
1970         } else {
1971                 vlan_mac_obj->set_one_rule      = bnx2x_set_one_vlan_mac_e2;
1972                 vlan_mac_obj->check_del         = bnx2x_check_vlan_mac_del;
1973                 vlan_mac_obj->check_add         = bnx2x_check_vlan_mac_add;
1974                 vlan_mac_obj->check_move        = bnx2x_check_move;
1975                 vlan_mac_obj->ramrod_cmd        =
1976                         RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1977
1978                 /* Exe Queue */
1979                 bnx2x_exe_queue_init(bp,
1980                                      &vlan_mac_obj->exe_queue,
1981                                      CLASSIFY_RULES_COUNT,
1982                                      qable_obj, bnx2x_validate_vlan_mac,
1983                                      bnx2x_optimize_vlan_mac,
1984                                      bnx2x_execute_vlan_mac,
1985                                      bnx2x_exeq_get_vlan_mac);
1986         }
1987
1988 }
1989
1990 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1991 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
1992                         struct tstorm_eth_mac_filter_config *mac_filters,
1993                         u16 pf_id)
1994 {
1995         size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1996
1997         u32 addr = BAR_TSTRORM_INTMEM +
1998                         TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
1999
2000         __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2001 }
2002
2003 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2004                                  struct bnx2x_rx_mode_ramrod_params *p)
2005 {
2006         /* update the bp MAC filter structure  */
2007         u32 mask = (1 << p->cl_id);
2008
2009         struct tstorm_eth_mac_filter_config *mac_filters =
2010                 (struct tstorm_eth_mac_filter_config *)p->rdata;
2011
2012         /* initial seeting is drop-all */
2013         u8 drop_all_ucast = 1, drop_all_mcast = 1;
2014         u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2015         u8 unmatched_unicast = 0;
2016
2017     /* In e1x there we only take into account rx acceot flag since tx switching
2018      * isn't enabled. */
2019         if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2020                 /* accept matched ucast */
2021                 drop_all_ucast = 0;
2022
2023         if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2024                 /* accept matched mcast */
2025                 drop_all_mcast = 0;
2026
2027         if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2028                 /* accept all mcast */
2029                 drop_all_ucast = 0;
2030                 accp_all_ucast = 1;
2031         }
2032         if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2033                 /* accept all mcast */
2034                 drop_all_mcast = 0;
2035                 accp_all_mcast = 1;
2036         }
2037         if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2038                 /* accept (all) bcast */
2039                 accp_all_bcast = 1;
2040         if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2041                 /* accept unmatched unicasts */
2042                 unmatched_unicast = 1;
2043
2044         mac_filters->ucast_drop_all = drop_all_ucast ?
2045                 mac_filters->ucast_drop_all | mask :
2046                 mac_filters->ucast_drop_all & ~mask;
2047
2048         mac_filters->mcast_drop_all = drop_all_mcast ?
2049                 mac_filters->mcast_drop_all | mask :
2050                 mac_filters->mcast_drop_all & ~mask;
2051
2052         mac_filters->ucast_accept_all = accp_all_ucast ?
2053                 mac_filters->ucast_accept_all | mask :
2054                 mac_filters->ucast_accept_all & ~mask;
2055
2056         mac_filters->mcast_accept_all = accp_all_mcast ?
2057                 mac_filters->mcast_accept_all | mask :
2058                 mac_filters->mcast_accept_all & ~mask;
2059
2060         mac_filters->bcast_accept_all = accp_all_bcast ?
2061                 mac_filters->bcast_accept_all | mask :
2062                 mac_filters->bcast_accept_all & ~mask;
2063
2064         mac_filters->unmatched_unicast = unmatched_unicast ?
2065                 mac_filters->unmatched_unicast | mask :
2066                 mac_filters->unmatched_unicast & ~mask;
2067
2068         DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2069                          "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2070                          mac_filters->ucast_drop_all,
2071                          mac_filters->mcast_drop_all,
2072                          mac_filters->ucast_accept_all,
2073                          mac_filters->mcast_accept_all,
2074                          mac_filters->bcast_accept_all);
2075
2076         /* write the MAC filter structure*/
2077         __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2078
2079         /* The operation is completed */
2080         clear_bit(p->state, p->pstate);
2081         smp_mb__after_clear_bit();
2082
2083         return 0;
2084 }
2085
2086 /* Setup ramrod data */
2087 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2088                                 struct eth_classify_header *hdr,
2089                                 u8 rule_cnt)
2090 {
2091         hdr->echo = cid;
2092         hdr->rule_cnt = rule_cnt;
2093 }
2094
2095 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2096                                 unsigned long accept_flags,
2097                                 struct eth_filter_rules_cmd *cmd,
2098                                 bool clear_accept_all)
2099 {
2100         u16 state;
2101
2102         /* start with 'drop-all' */
2103         state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2104                 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2105
2106         if (accept_flags) {
2107                 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2108                         state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2109
2110                 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2111                         state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2112
2113                 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2114                         state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2115                         state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2116                 }
2117
2118                 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2119                         state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2120                         state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2121                 }
2122                 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2123                         state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2124
2125                 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2126                         state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2127                         state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2128                 }
2129                 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2130                         state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2131         }
2132
2133         /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2134         if (clear_accept_all) {
2135                 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2136                 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2137                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2138                 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2139         }
2140
2141         cmd->state = cpu_to_le16(state);
2142
2143 }
2144
2145 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2146                                 struct bnx2x_rx_mode_ramrod_params *p)
2147 {
2148         struct eth_filter_rules_ramrod_data *data = p->rdata;
2149         int rc;
2150         u8 rule_idx = 0;
2151
2152         /* Reset the ramrod data buffer */
2153         memset(data, 0, sizeof(*data));
2154
2155         /* Setup ramrod data */
2156
2157         /* Tx (internal switching) */
2158         if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2159                 data->rules[rule_idx].client_id = p->cl_id;
2160                 data->rules[rule_idx].func_id = p->func_id;
2161
2162                 data->rules[rule_idx].cmd_general_data =
2163                         ETH_FILTER_RULES_CMD_TX_CMD;
2164
2165                 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2166                         &(data->rules[rule_idx++]), false);
2167         }
2168
2169         /* Rx */
2170         if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2171                 data->rules[rule_idx].client_id = p->cl_id;
2172                 data->rules[rule_idx].func_id = p->func_id;
2173
2174                 data->rules[rule_idx].cmd_general_data =
2175                         ETH_FILTER_RULES_CMD_RX_CMD;
2176
2177                 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2178                         &(data->rules[rule_idx++]), false);
2179         }
2180
2181
2182         /*
2183          * If FCoE Queue configuration has been requested configure the Rx and
2184          * internal switching modes for this queue in separate rules.
2185          *
2186          * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2187          * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2188          */
2189         if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2190                 /*  Tx (internal switching) */
2191                 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2192                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2193                         data->rules[rule_idx].func_id = p->func_id;
2194
2195                         data->rules[rule_idx].cmd_general_data =
2196                                                 ETH_FILTER_RULES_CMD_TX_CMD;
2197
2198                         bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2199                                                      &(data->rules[rule_idx++]),
2200                                                        true);
2201                 }
2202
2203                 /* Rx */
2204                 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2205                         data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2206                         data->rules[rule_idx].func_id = p->func_id;
2207
2208                         data->rules[rule_idx].cmd_general_data =
2209                                                 ETH_FILTER_RULES_CMD_RX_CMD;
2210
2211                         bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2212                                                      &(data->rules[rule_idx++]),
2213                                                        true);
2214                 }
2215         }
2216
2217         /*
2218          * Set the ramrod header (most importantly - number of rules to
2219          * configure).
2220          */
2221         bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2222
2223         DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2224                          "tx_accept_flags 0x%lx\n",
2225                          data->header.rule_cnt, p->rx_accept_flags,
2226                          p->tx_accept_flags);
2227
2228         /* Commit writes towards the memory before sending a ramrod */
2229         mb();
2230
2231         /* Send a ramrod */
2232         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2233                            U64_HI(p->rdata_mapping),
2234                            U64_LO(p->rdata_mapping),
2235                            ETH_CONNECTION_TYPE);
2236         if (rc)
2237                 return rc;
2238
2239         /* Ramrod completion is pending */
2240         return 1;
2241 }
2242
2243 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2244                                       struct bnx2x_rx_mode_ramrod_params *p)
2245 {
2246         return bnx2x_state_wait(bp, p->state, p->pstate);
2247 }
2248
2249 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2250                                     struct bnx2x_rx_mode_ramrod_params *p)
2251 {
2252         /* Do nothing */
2253         return 0;
2254 }
2255
2256 int bnx2x_config_rx_mode(struct bnx2x *bp,
2257                          struct bnx2x_rx_mode_ramrod_params *p)
2258 {
2259         int rc;
2260
2261         /* Configure the new classification in the chip */
2262         rc = p->rx_mode_obj->config_rx_mode(bp, p);
2263         if (rc < 0)
2264                 return rc;
2265
2266         /* Wait for a ramrod completion if was requested */
2267         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2268                 rc = p->rx_mode_obj->wait_comp(bp, p);
2269                 if (rc)
2270                         return rc;
2271         }
2272
2273         return rc;
2274 }
2275
2276 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2277                             struct bnx2x_rx_mode_obj *o)
2278 {
2279         if (CHIP_IS_E1x(bp)) {
2280                 o->wait_comp      = bnx2x_empty_rx_mode_wait;
2281                 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2282         } else {
2283                 o->wait_comp      = bnx2x_wait_rx_mode_comp_e2;
2284                 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2285         }
2286 }
2287
2288 /********************* Multicast verbs: SET, CLEAR ****************************/
2289 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2290 {
2291         return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2292 }
2293
2294 struct bnx2x_mcast_mac_elem {
2295         struct list_head link;
2296         u8 mac[ETH_ALEN];
2297         u8 pad[2]; /* For a natural alignment of the following buffer */
2298 };
2299
2300 struct bnx2x_pending_mcast_cmd {
2301         struct list_head link;
2302         int type; /* BNX2X_MCAST_CMD_X */
2303         union {
2304                 struct list_head macs_head;
2305                 u32 macs_num; /* Needed for DEL command */
2306                 int next_bin; /* Needed for RESTORE flow with aprox match */
2307         } data;
2308
2309         bool done; /* set to true, when the command has been handled,
2310                     * practically used in 57712 handling only, where one pending
2311                     * command may be handled in a few operations. As long as for
2312                     * other chips every operation handling is completed in a
2313                     * single ramrod, there is no need to utilize this field.
2314                     */
2315 };
2316
2317 static int bnx2x_mcast_wait(struct bnx2x *bp,
2318                             struct bnx2x_mcast_obj *o)
2319 {
2320         if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2321                         o->raw.wait_comp(bp, &o->raw))
2322                 return -EBUSY;
2323
2324         return 0;
2325 }
2326
2327 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2328                                    struct bnx2x_mcast_obj *o,
2329                                    struct bnx2x_mcast_ramrod_params *p,
2330                                    int cmd)
2331 {
2332         int total_sz;
2333         struct bnx2x_pending_mcast_cmd *new_cmd;
2334         struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2335         struct bnx2x_mcast_list_elem *pos;
2336         int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2337                              p->mcast_list_len : 0);
2338
2339         /* If the command is empty ("handle pending commands only"), break */
2340         if (!p->mcast_list_len)
2341                 return 0;
2342
2343         total_sz = sizeof(*new_cmd) +
2344                 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2345
2346         /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2347         new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2348
2349         if (!new_cmd)
2350                 return -ENOMEM;
2351
2352         DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2353                          "macs_list_len=%d\n", cmd, macs_list_len);
2354
2355         INIT_LIST_HEAD(&new_cmd->data.macs_head);
2356
2357         new_cmd->type = cmd;
2358         new_cmd->done = false;
2359
2360         switch (cmd) {
2361         case BNX2X_MCAST_CMD_ADD:
2362                 cur_mac = (struct bnx2x_mcast_mac_elem *)
2363                           ((u8 *)new_cmd + sizeof(*new_cmd));
2364
2365                 /* Push the MACs of the current command into the pendig command
2366                  * MACs list: FIFO
2367                  */
2368                 list_for_each_entry(pos, &p->mcast_list, link) {
2369                         memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2370                         list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2371                         cur_mac++;
2372                 }
2373
2374                 break;
2375
2376         case BNX2X_MCAST_CMD_DEL:
2377                 new_cmd->data.macs_num = p->mcast_list_len;
2378                 break;
2379
2380         case BNX2X_MCAST_CMD_RESTORE:
2381                 new_cmd->data.next_bin = 0;
2382                 break;
2383
2384         default:
2385                 BNX2X_ERR("Unknown command: %d\n", cmd);
2386                 return -EINVAL;
2387         }
2388
2389         /* Push the new pending command to the tail of the pending list: FIFO */
2390         list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2391
2392         o->set_sched(o);
2393
2394         return 1;
2395 }
2396
2397 /**
2398  * bnx2x_mcast_get_next_bin - get the next set bin (index)
2399  *
2400  * @o:
2401  * @last:       index to start looking from (including)
2402  *
2403  * returns the next found (set) bin or a negative value if none is found.
2404  */
2405 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2406 {
2407         int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2408
2409         for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2410                 if (o->registry.aprox_match.vec[i])
2411                         for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2412                                 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2413                                 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2414                                                        vec, cur_bit)) {
2415                                         return cur_bit;
2416                                 }
2417                         }
2418                 inner_start = 0;
2419         }
2420
2421         /* None found */
2422         return -1;
2423 }
2424
2425 /**
2426  * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2427  *
2428  * @o:
2429  *
2430  * returns the index of the found bin or -1 if none is found
2431  */
2432 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2433 {
2434         int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2435
2436         if (cur_bit >= 0)
2437                 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2438
2439         return cur_bit;
2440 }
2441
2442 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2443 {
2444         struct bnx2x_raw_obj *raw = &o->raw;
2445         u8 rx_tx_flag = 0;
2446
2447         if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2448             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2449                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2450
2451         if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2452             (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2453                 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2454
2455         return rx_tx_flag;
2456 }
2457
2458 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2459                                         struct bnx2x_mcast_obj *o, int idx,
2460                                         union bnx2x_mcast_config_data *cfg_data,
2461                                         int cmd)
2462 {
2463         struct bnx2x_raw_obj *r = &o->raw;
2464         struct eth_multicast_rules_ramrod_data *data =
2465                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2466         u8 func_id = r->func_id;
2467         u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2468         int bin;
2469
2470         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2471                 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2472
2473         data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2474
2475         /* Get a bin and update a bins' vector */
2476         switch (cmd) {
2477         case BNX2X_MCAST_CMD_ADD:
2478                 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2479                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2480                 break;
2481
2482         case BNX2X_MCAST_CMD_DEL:
2483                 /* If there were no more bins to clear
2484                  * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2485                  * clear any (0xff) bin.
2486                  * See bnx2x_mcast_validate_e2() for explanation when it may
2487                  * happen.
2488                  */
2489                 bin = bnx2x_mcast_clear_first_bin(o);
2490                 break;
2491
2492         case BNX2X_MCAST_CMD_RESTORE:
2493                 bin = cfg_data->bin;
2494                 break;
2495
2496         default:
2497                 BNX2X_ERR("Unknown command: %d\n", cmd);
2498                 return;
2499         }
2500
2501         DP(BNX2X_MSG_SP, "%s bin %d\n",
2502                          ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2503                          "Setting"  : "Clearing"), bin);
2504
2505         data->rules[idx].bin_id    = (u8)bin;
2506         data->rules[idx].func_id   = func_id;
2507         data->rules[idx].engine_id = o->engine_id;
2508 }
2509
2510 /**
2511  * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2512  *
2513  * @bp:         device handle
2514  * @o:
2515  * @start_bin:  index in the registry to start from (including)
2516  * @rdata_idx:  index in the ramrod data to start from
2517  *
2518  * returns last handled bin index or -1 if all bins have been handled
2519  */
2520 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2521         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2522         int *rdata_idx)
2523 {
2524         int cur_bin, cnt = *rdata_idx;
2525         union bnx2x_mcast_config_data cfg_data = {0};
2526
2527         /* go through the registry and configure the bins from it */
2528         for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2529             cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2530
2531                 cfg_data.bin = (u8)cur_bin;
2532                 o->set_one_rule(bp, o, cnt, &cfg_data,
2533                                 BNX2X_MCAST_CMD_RESTORE);
2534
2535                 cnt++;
2536
2537                 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2538
2539                 /* Break if we reached the maximum number
2540                  * of rules.
2541                  */
2542                 if (cnt >= o->max_cmd_len)
2543                         break;
2544         }
2545
2546         *rdata_idx = cnt;
2547
2548         return cur_bin;
2549 }
2550
2551 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2552         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2553         int *line_idx)
2554 {
2555         struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2556         int cnt = *line_idx;
2557         union bnx2x_mcast_config_data cfg_data = {0};
2558
2559         list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2560                                  link) {
2561
2562                 cfg_data.mac = &pmac_pos->mac[0];
2563                 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2564
2565                 cnt++;
2566
2567                 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2568                                  " mcast MAC\n",
2569                                  BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2570
2571                 list_del(&pmac_pos->link);
2572
2573                 /* Break if we reached the maximum number
2574                  * of rules.
2575                  */
2576                 if (cnt >= o->max_cmd_len)
2577                         break;
2578         }
2579
2580         *line_idx = cnt;
2581
2582         /* if no more MACs to configure - we are done */
2583         if (list_empty(&cmd_pos->data.macs_head))
2584                 cmd_pos->done = true;
2585 }
2586
2587 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2588         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2589         int *line_idx)
2590 {
2591         int cnt = *line_idx;
2592
2593         while (cmd_pos->data.macs_num) {
2594                 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2595
2596                 cnt++;
2597
2598                 cmd_pos->data.macs_num--;
2599
2600                   DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2601                                    cmd_pos->data.macs_num, cnt);
2602
2603                 /* Break if we reached the maximum
2604                  * number of rules.
2605                  */
2606                 if (cnt >= o->max_cmd_len)
2607                         break;
2608         }
2609
2610         *line_idx = cnt;
2611
2612         /* If we cleared all bins - we are done */
2613         if (!cmd_pos->data.macs_num)
2614                 cmd_pos->done = true;
2615 }
2616
2617 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2618         struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2619         int *line_idx)
2620 {
2621         cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2622                                                 line_idx);
2623
2624         if (cmd_pos->data.next_bin < 0)
2625                 /* If o->set_restore returned -1 we are done */
2626                 cmd_pos->done = true;
2627         else
2628                 /* Start from the next bin next time */
2629                 cmd_pos->data.next_bin++;
2630 }
2631
2632 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2633                                 struct bnx2x_mcast_ramrod_params *p)
2634 {
2635         struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2636         int cnt = 0;
2637         struct bnx2x_mcast_obj *o = p->mcast_obj;
2638
2639         list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2640                                  link) {
2641                 switch (cmd_pos->type) {
2642                 case BNX2X_MCAST_CMD_ADD:
2643                         bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2644                         break;
2645
2646                 case BNX2X_MCAST_CMD_DEL:
2647                         bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2648                         break;
2649
2650                 case BNX2X_MCAST_CMD_RESTORE:
2651                         bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2652                                                            &cnt);
2653                         break;
2654
2655                 default:
2656                         BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2657                         return -EINVAL;
2658                 }
2659
2660                 /* If the command has been completed - remove it from the list
2661                  * and free the memory
2662                  */
2663                 if (cmd_pos->done) {
2664                         list_del(&cmd_pos->link);
2665                         kfree(cmd_pos);
2666                 }
2667
2668                 /* Break if we reached the maximum number of rules */
2669                 if (cnt >= o->max_cmd_len)
2670                         break;
2671         }
2672
2673         return cnt;
2674 }
2675
2676 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2677         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2678         int *line_idx)
2679 {
2680         struct bnx2x_mcast_list_elem *mlist_pos;
2681         union bnx2x_mcast_config_data cfg_data = {0};
2682         int cnt = *line_idx;
2683
2684         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2685                 cfg_data.mac = mlist_pos->mac;
2686                 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2687
2688                 cnt++;
2689
2690                 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2691                                  " mcast MAC\n",
2692                                  BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2693         }
2694
2695         *line_idx = cnt;
2696 }
2697
2698 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2699         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2700         int *line_idx)
2701 {
2702         int cnt = *line_idx, i;
2703
2704         for (i = 0; i < p->mcast_list_len; i++) {
2705                 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2706
2707                 cnt++;
2708
2709                 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2710                                  p->mcast_list_len - i - 1);
2711         }
2712
2713         *line_idx = cnt;
2714 }
2715
2716 /**
2717  * bnx2x_mcast_handle_current_cmd -
2718  *
2719  * @bp:         device handle
2720  * @p:
2721  * @cmd:
2722  * @start_cnt:  first line in the ramrod data that may be used
2723  *
2724  * This function is called iff there is enough place for the current command in
2725  * the ramrod data.
2726  * Returns number of lines filled in the ramrod data in total.
2727  */
2728 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2729                         struct bnx2x_mcast_ramrod_params *p, int cmd,
2730                         int start_cnt)
2731 {
2732         struct bnx2x_mcast_obj *o = p->mcast_obj;
2733         int cnt = start_cnt;
2734
2735         DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2736
2737         switch (cmd) {
2738         case BNX2X_MCAST_CMD_ADD:
2739                 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2740                 break;
2741
2742         case BNX2X_MCAST_CMD_DEL:
2743                 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2744                 break;
2745
2746         case BNX2X_MCAST_CMD_RESTORE:
2747                 o->hdl_restore(bp, o, 0, &cnt);
2748                 break;
2749
2750         default:
2751                 BNX2X_ERR("Unknown command: %d\n", cmd);
2752                 return -EINVAL;
2753         }
2754
2755         /* The current command has been handled */
2756         p->mcast_list_len = 0;
2757
2758         return cnt;
2759 }
2760
2761 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2762                                    struct bnx2x_mcast_ramrod_params *p,
2763                                    int cmd)
2764 {
2765         struct bnx2x_mcast_obj *o = p->mcast_obj;
2766         int reg_sz = o->get_registry_size(o);
2767
2768         switch (cmd) {
2769         /* DEL command deletes all currently configured MACs */
2770         case BNX2X_MCAST_CMD_DEL:
2771                 o->set_registry_size(o, 0);
2772                 /* Don't break */
2773
2774         /* RESTORE command will restore the entire multicast configuration */
2775         case BNX2X_MCAST_CMD_RESTORE:
2776                 /* Here we set the approximate amount of work to do, which in
2777                  * fact may be only less as some MACs in postponed ADD
2778                  * command(s) scheduled before this command may fall into
2779                  * the same bin and the actual number of bins set in the
2780                  * registry would be less than we estimated here. See
2781                  * bnx2x_mcast_set_one_rule_e2() for further details.
2782                  */
2783                 p->mcast_list_len = reg_sz;
2784                 break;
2785
2786         case BNX2X_MCAST_CMD_ADD:
2787         case BNX2X_MCAST_CMD_CONT:
2788                 /* Here we assume that all new MACs will fall into new bins.
2789                  * However we will correct the real registry size after we
2790                  * handle all pending commands.
2791                  */
2792                 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2793                 break;
2794
2795         default:
2796                 BNX2X_ERR("Unknown command: %d\n", cmd);
2797                 return -EINVAL;
2798
2799         }
2800
2801         /* Increase the total number of MACs pending to be configured */
2802         o->total_pending_num += p->mcast_list_len;
2803
2804         return 0;
2805 }
2806
2807 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2808                                       struct bnx2x_mcast_ramrod_params *p,
2809                                       int old_num_bins)
2810 {
2811         struct bnx2x_mcast_obj *o = p->mcast_obj;
2812
2813         o->set_registry_size(o, old_num_bins);
2814         o->total_pending_num -= p->mcast_list_len;
2815 }
2816
2817 /**
2818  * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2819  *
2820  * @bp:         device handle
2821  * @p:
2822  * @len:        number of rules to handle
2823  */
2824 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2825                                         struct bnx2x_mcast_ramrod_params *p,
2826                                         u8 len)
2827 {
2828         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2829         struct eth_multicast_rules_ramrod_data *data =
2830                 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2831
2832         data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2833                           (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2834         data->header.rule_cnt = len;
2835 }
2836
2837 /**
2838  * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2839  *
2840  * @bp:         device handle
2841  * @o:
2842  *
2843  * Recalculate the actual number of set bins in the registry using Brian
2844  * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2845  *
2846  * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2847  */
2848 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2849                                                   struct bnx2x_mcast_obj *o)
2850 {
2851         int i, cnt = 0;
2852         u64 elem;
2853
2854         for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2855                 elem = o->registry.aprox_match.vec[i];
2856                 for (; elem; cnt++)
2857                         elem &= elem - 1;
2858         }
2859
2860         o->set_registry_size(o, cnt);
2861
2862         return 0;
2863 }
2864
2865 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2866                                 struct bnx2x_mcast_ramrod_params *p,
2867                                 int cmd)
2868 {
2869         struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2870         struct bnx2x_mcast_obj *o = p->mcast_obj;
2871         struct eth_multicast_rules_ramrod_data *data =
2872                 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2873         int cnt = 0, rc;
2874
2875         /* Reset the ramrod data buffer */
2876         memset(data, 0, sizeof(*data));
2877
2878         cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2879
2880         /* If there are no more pending commands - clear SCHEDULED state */
2881         if (list_empty(&o->pending_cmds_head))
2882                 o->clear_sched(o);
2883
2884         /* The below may be true iff there was enough room in ramrod
2885          * data for all pending commands and for the current
2886          * command. Otherwise the current command would have been added
2887          * to the pending commands and p->mcast_list_len would have been
2888          * zeroed.
2889          */
2890         if (p->mcast_list_len > 0)
2891                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2892
2893         /* We've pulled out some MACs - update the total number of
2894          * outstanding.
2895          */
2896         o->total_pending_num -= cnt;
2897
2898         /* send a ramrod */
2899         WARN_ON(o->total_pending_num < 0);
2900         WARN_ON(cnt > o->max_cmd_len);
2901
2902         bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2903
2904         /* Update a registry size if there are no more pending operations.
2905          *
2906          * We don't want to change the value of the registry size if there are
2907          * pending operations because we want it to always be equal to the
2908          * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2909          * set bins after the last requested operation in order to properly
2910          * evaluate the size of the next DEL/RESTORE operation.
2911          *
2912          * Note that we update the registry itself during command(s) handling
2913          * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2914          * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2915          * with a limited amount of update commands (per MAC/bin) and we don't
2916          * know in this scope what the actual state of bins configuration is
2917          * going to be after this ramrod.
2918          */
2919         if (!o->total_pending_num)
2920                 bnx2x_mcast_refresh_registry_e2(bp, o);
2921
2922         /* Commit writes towards the memory before sending a ramrod */
2923         mb();
2924
2925         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2926          * RAMROD_PENDING status immediately.
2927          */
2928         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2929                 raw->clear_pending(raw);
2930                 return 0;
2931         } else {
2932                 /* Send a ramrod */
2933                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2934                                    raw->cid, U64_HI(raw->rdata_mapping),
2935                                    U64_LO(raw->rdata_mapping),
2936                                    ETH_CONNECTION_TYPE);
2937                 if (rc)
2938                         return rc;
2939
2940                 /* Ramrod completion is pending */
2941                 return 1;
2942         }
2943 }
2944
2945 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2946                                     struct bnx2x_mcast_ramrod_params *p,
2947                                     int cmd)
2948 {
2949         /* Mark, that there is a work to do */
2950         if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2951                 p->mcast_list_len = 1;
2952
2953         return 0;
2954 }
2955
2956 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2957                                        struct bnx2x_mcast_ramrod_params *p,
2958                                        int old_num_bins)
2959 {
2960         /* Do nothing */
2961 }
2962
2963 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2964 do { \
2965         (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2966 } while (0)
2967
2968 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2969                                            struct bnx2x_mcast_obj *o,
2970                                            struct bnx2x_mcast_ramrod_params *p,
2971                                            u32 *mc_filter)
2972 {
2973         struct bnx2x_mcast_list_elem *mlist_pos;
2974         int bit;
2975
2976         list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2977                 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2978                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
2979
2980                 DP(BNX2X_MSG_SP, "About to configure "
2981                                  BNX2X_MAC_FMT" mcast MAC, bin %d\n",
2982                                  BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
2983
2984                 /* bookkeeping... */
2985                 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
2986                                   bit);
2987         }
2988 }
2989
2990 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
2991         struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2992         u32 *mc_filter)
2993 {
2994         int bit;
2995
2996         for (bit = bnx2x_mcast_get_next_bin(o, 0);
2997              bit >= 0;
2998              bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
2999                 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000                 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3001         }
3002 }
3003
3004 /* On 57711 we write the multicast MACs' aproximate match
3005  * table by directly into the TSTORM's internal RAM. So we don't
3006  * really need to handle any tricks to make it work.
3007  */
3008 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3009                                  struct bnx2x_mcast_ramrod_params *p,
3010                                  int cmd)
3011 {
3012         int i;
3013         struct bnx2x_mcast_obj *o = p->mcast_obj;
3014         struct bnx2x_raw_obj *r = &o->raw;
3015
3016         /* If CLEAR_ONLY has been requested - clear the registry
3017          * and clear a pending bit.
3018          */
3019         if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3020                 u32 mc_filter[MC_HASH_SIZE] = {0};
3021
3022                 /* Set the multicast filter bits before writing it into
3023                  * the internal memory.
3024                  */
3025                 switch (cmd) {
3026                 case BNX2X_MCAST_CMD_ADD:
3027                         bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3028                         break;
3029
3030                 case BNX2X_MCAST_CMD_DEL:
3031                         DP(BNX2X_MSG_SP, "Invalidating multicast "
3032                                          "MACs configuration\n");
3033
3034                         /* clear the registry */
3035                         memset(o->registry.aprox_match.vec, 0,
3036                                sizeof(o->registry.aprox_match.vec));
3037                         break;
3038
3039                 case BNX2X_MCAST_CMD_RESTORE:
3040                         bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3041                         break;
3042
3043                 default:
3044                         BNX2X_ERR("Unknown command: %d\n", cmd);
3045                         return -EINVAL;
3046                 }
3047
3048                 /* Set the mcast filter in the internal memory */
3049                 for (i = 0; i < MC_HASH_SIZE; i++)
3050                         REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3051         } else
3052                 /* clear the registry */
3053                 memset(o->registry.aprox_match.vec, 0,
3054                        sizeof(o->registry.aprox_match.vec));
3055
3056         /* We are done */
3057         r->clear_pending(r);
3058
3059         return 0;
3060 }
3061
3062 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3063                                    struct bnx2x_mcast_ramrod_params *p,
3064                                    int cmd)
3065 {
3066         struct bnx2x_mcast_obj *o = p->mcast_obj;
3067         int reg_sz = o->get_registry_size(o);
3068
3069         switch (cmd) {
3070         /* DEL command deletes all currently configured MACs */
3071         case BNX2X_MCAST_CMD_DEL:
3072                 o->set_registry_size(o, 0);
3073                 /* Don't break */
3074
3075         /* RESTORE command will restore the entire multicast configuration */
3076         case BNX2X_MCAST_CMD_RESTORE:
3077                 p->mcast_list_len = reg_sz;
3078                   DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3079                                    cmd, p->mcast_list_len);
3080                 break;
3081
3082         case BNX2X_MCAST_CMD_ADD:
3083         case BNX2X_MCAST_CMD_CONT:
3084                 /* Multicast MACs on 57710 are configured as unicast MACs and
3085                  * there is only a limited number of CAM entries for that
3086                  * matter.
3087                  */
3088                 if (p->mcast_list_len > o->max_cmd_len) {
3089                         BNX2X_ERR("Can't configure more than %d multicast MACs"
3090                                    "on 57710\n", o->max_cmd_len);
3091                         return -EINVAL;
3092                 }
3093                 /* Every configured MAC should be cleared if DEL command is
3094                  * called. Only the last ADD command is relevant as long as
3095                  * every ADD commands overrides the previous configuration.
3096                  */
3097                 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3098                 if (p->mcast_list_len > 0)
3099                         o->set_registry_size(o, p->mcast_list_len);
3100
3101                 break;
3102
3103         default:
3104                 BNX2X_ERR("Unknown command: %d\n", cmd);
3105                 return -EINVAL;
3106
3107         }
3108
3109         /* We want to ensure that commands are executed one by one for 57710.
3110          * Therefore each none-empty command will consume o->max_cmd_len.
3111          */
3112         if (p->mcast_list_len)
3113                 o->total_pending_num += o->max_cmd_len;
3114
3115         return 0;
3116 }
3117
3118 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3119                                       struct bnx2x_mcast_ramrod_params *p,
3120                                       int old_num_macs)
3121 {
3122         struct bnx2x_mcast_obj *o = p->mcast_obj;
3123
3124         o->set_registry_size(o, old_num_macs);
3125
3126         /* If current command hasn't been handled yet and we are
3127          * here means that it's meant to be dropped and we have to
3128          * update the number of outstandling MACs accordingly.
3129          */
3130         if (p->mcast_list_len)
3131                 o->total_pending_num -= o->max_cmd_len;
3132 }
3133
3134 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3135                                         struct bnx2x_mcast_obj *o, int idx,
3136                                         union bnx2x_mcast_config_data *cfg_data,
3137                                         int cmd)
3138 {
3139         struct bnx2x_raw_obj *r = &o->raw;
3140         struct mac_configuration_cmd *data =
3141                 (struct mac_configuration_cmd *)(r->rdata);
3142
3143         /* copy mac */
3144         if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3145                 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3146                                       &data->config_table[idx].middle_mac_addr,
3147                                       &data->config_table[idx].lsb_mac_addr,
3148                                       cfg_data->mac);
3149
3150                 data->config_table[idx].vlan_id = 0;
3151                 data->config_table[idx].pf_id = r->func_id;
3152                 data->config_table[idx].clients_bit_vector =
3153                         cpu_to_le32(1 << r->cl_id);
3154
3155                 SET_FLAG(data->config_table[idx].flags,
3156                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3157                          T_ETH_MAC_COMMAND_SET);
3158         }
3159 }
3160
3161 /**
3162  * bnx2x_mcast_set_rdata_hdr_e1  - set header values in mac_configuration_cmd
3163  *
3164  * @bp:         device handle
3165  * @p:
3166  * @len:        number of rules to handle
3167  */
3168 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3169                                         struct bnx2x_mcast_ramrod_params *p,
3170                                         u8 len)
3171 {
3172         struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3173         struct mac_configuration_cmd *data =
3174                 (struct mac_configuration_cmd *)(r->rdata);
3175
3176         u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3177                      BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3178                      BNX2X_MAX_MULTICAST*(1 + r->func_id));
3179
3180         data->hdr.offset = offset;
3181         data->hdr.client_id = 0xff;
3182         data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3183                           (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3184         data->hdr.length = len;
3185 }
3186
3187 /**
3188  * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3189  *
3190  * @bp:         device handle
3191  * @o:
3192  * @start_idx:  index in the registry to start from
3193  * @rdata_idx:  index in the ramrod data to start from
3194  *
3195  * restore command for 57710 is like all other commands - always a stand alone
3196  * command - start_idx and rdata_idx will always be 0. This function will always
3197  * succeed.
3198  * returns -1 to comply with 57712 variant.
3199  */
3200 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3201         struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3202         int *rdata_idx)
3203 {
3204         struct bnx2x_mcast_mac_elem *elem;
3205         int i = 0;
3206         union bnx2x_mcast_config_data cfg_data = {0};
3207
3208         /* go through the registry and configure the MACs from it. */
3209         list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3210                 cfg_data.mac = &elem->mac[0];
3211                 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3212
3213                 i++;
3214
3215                   DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3216                                    " mcast MAC\n",
3217                                    BNX2X_MAC_PRN_LIST(cfg_data.mac));
3218         }
3219
3220         *rdata_idx = i;
3221
3222         return -1;
3223 }
3224
3225
3226 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3227         struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3228 {
3229         struct bnx2x_pending_mcast_cmd *cmd_pos;
3230         struct bnx2x_mcast_mac_elem *pmac_pos;
3231         struct bnx2x_mcast_obj *o = p->mcast_obj;
3232         union bnx2x_mcast_config_data cfg_data = {0};
3233         int cnt = 0;
3234
3235
3236         /* If nothing to be done - return */
3237         if (list_empty(&o->pending_cmds_head))
3238                 return 0;
3239
3240         /* Handle the first command */
3241         cmd_pos = list_first_entry(&o->pending_cmds_head,
3242                                    struct bnx2x_pending_mcast_cmd, link);
3243
3244         switch (cmd_pos->type) {
3245         case BNX2X_MCAST_CMD_ADD:
3246                 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3247                         cfg_data.mac = &pmac_pos->mac[0];
3248                         o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3249
3250                         cnt++;
3251
3252                         DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3253                                          " mcast MAC\n",
3254                                          BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3255                 }
3256                 break;
3257
3258         case BNX2X_MCAST_CMD_DEL:
3259                 cnt = cmd_pos->data.macs_num;
3260                 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3261                 break;
3262
3263         case BNX2X_MCAST_CMD_RESTORE:
3264                 o->hdl_restore(bp, o, 0, &cnt);
3265                 break;
3266
3267         default:
3268                 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3269                 return -EINVAL;
3270         }
3271
3272         list_del(&cmd_pos->link);
3273         kfree(cmd_pos);
3274
3275         return cnt;
3276 }
3277
3278 /**
3279  * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3280  *
3281  * @fw_hi:
3282  * @fw_mid:
3283  * @fw_lo:
3284  * @mac:
3285  */
3286 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3287                                          __le16 *fw_lo, u8 *mac)
3288 {
3289         mac[1] = ((u8 *)fw_hi)[0];
3290         mac[0] = ((u8 *)fw_hi)[1];
3291         mac[3] = ((u8 *)fw_mid)[0];
3292         mac[2] = ((u8 *)fw_mid)[1];
3293         mac[5] = ((u8 *)fw_lo)[0];
3294         mac[4] = ((u8 *)fw_lo)[1];
3295 }
3296
3297 /**
3298  * bnx2x_mcast_refresh_registry_e1 -
3299  *
3300  * @bp:         device handle
3301  * @cnt:
3302  *
3303  * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3304  * and update the registry correspondingly: if ADD - allocate a memory and add
3305  * the entries to the registry (list), if DELETE - clear the registry and free
3306  * the memory.
3307  */
3308 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3309                                                   struct bnx2x_mcast_obj *o)
3310 {
3311         struct bnx2x_raw_obj *raw = &o->raw;
3312         struct bnx2x_mcast_mac_elem *elem;
3313         struct mac_configuration_cmd *data =
3314                         (struct mac_configuration_cmd *)(raw->rdata);
3315
3316         /* If first entry contains a SET bit - the command was ADD,
3317          * otherwise - DEL_ALL
3318          */
3319         if (GET_FLAG(data->config_table[0].flags,
3320                         MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3321                 int i, len = data->hdr.length;
3322
3323                 /* Break if it was a RESTORE command */
3324                 if (!list_empty(&o->registry.exact_match.macs))
3325                         return 0;
3326
3327                 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3328                 if (!elem) {
3329                         BNX2X_ERR("Failed to allocate registry memory\n");
3330                         return -ENOMEM;
3331                 }
3332
3333                 for (i = 0; i < len; i++, elem++) {
3334                         bnx2x_get_fw_mac_addr(
3335                                 &data->config_table[i].msb_mac_addr,
3336                                 &data->config_table[i].middle_mac_addr,
3337                                 &data->config_table[i].lsb_mac_addr,
3338                                 elem->mac);
3339                         DP(BNX2X_MSG_SP, "Adding registry entry for ["
3340                                          BNX2X_MAC_FMT"]\n",
3341                                    BNX2X_MAC_PRN_LIST(elem->mac));
3342                         list_add_tail(&elem->link,
3343                                       &o->registry.exact_match.macs);
3344                 }
3345         } else {
3346                 elem = list_first_entry(&o->registry.exact_match.macs,
3347                                         struct bnx2x_mcast_mac_elem, link);
3348                 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3349                 kfree(elem);
3350                 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3351         }
3352
3353         return 0;
3354 }
3355
3356 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3357                                 struct bnx2x_mcast_ramrod_params *p,
3358                                 int cmd)
3359 {
3360         struct bnx2x_mcast_obj *o = p->mcast_obj;
3361         struct bnx2x_raw_obj *raw = &o->raw;
3362         struct mac_configuration_cmd *data =
3363                 (struct mac_configuration_cmd *)(raw->rdata);
3364         int cnt = 0, i, rc;
3365
3366         /* Reset the ramrod data buffer */
3367         memset(data, 0, sizeof(*data));
3368
3369         /* First set all entries as invalid */
3370         for (i = 0; i < o->max_cmd_len ; i++)
3371                 SET_FLAG(data->config_table[i].flags,
3372                          MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3373                          T_ETH_MAC_COMMAND_INVALIDATE);
3374
3375         /* Handle pending commands first */
3376         cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3377
3378         /* If there are no more pending commands - clear SCHEDULED state */
3379         if (list_empty(&o->pending_cmds_head))
3380                 o->clear_sched(o);
3381
3382         /* The below may be true iff there were no pending commands */
3383         if (!cnt)
3384                 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3385
3386         /* For 57710 every command has o->max_cmd_len length to ensure that
3387          * commands are done one at a time.
3388          */
3389         o->total_pending_num -= o->max_cmd_len;
3390
3391         /* send a ramrod */
3392
3393         WARN_ON(cnt > o->max_cmd_len);
3394
3395         /* Set ramrod header (in particular, a number of entries to update) */
3396         bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3397
3398         /* update a registry: we need the registry contents to be always up
3399          * to date in order to be able to execute a RESTORE opcode. Here
3400          * we use the fact that for 57710 we sent one command at a time
3401          * hence we may take the registry update out of the command handling
3402          * and do it in a simpler way here.
3403          */
3404         rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3405         if (rc)
3406                 return rc;
3407
3408         /* Commit writes towards the memory before sending a ramrod */
3409         mb();
3410
3411         /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3412          * RAMROD_PENDING status immediately.
3413          */
3414         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3415                 raw->clear_pending(raw);
3416                 return 0;
3417         } else {
3418                 /* Send a ramrod */
3419                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3420                                    U64_HI(raw->rdata_mapping),
3421                                    U64_LO(raw->rdata_mapping),
3422                                    ETH_CONNECTION_TYPE);
3423                 if (rc)
3424                         return rc;
3425
3426                 /* Ramrod completion is pending */
3427                 return 1;
3428         }
3429
3430 }
3431
3432 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3433 {
3434         return o->registry.exact_match.num_macs_set;
3435 }
3436
3437 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3438 {
3439         return o->registry.aprox_match.num_bins_set;
3440 }
3441
3442 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3443                                                 int n)
3444 {
3445         o->registry.exact_match.num_macs_set = n;
3446 }
3447
3448 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3449                                                 int n)
3450 {
3451         o->registry.aprox_match.num_bins_set = n;
3452 }
3453
3454 int bnx2x_config_mcast(struct bnx2x *bp,
3455                        struct bnx2x_mcast_ramrod_params *p,
3456                        int cmd)
3457 {
3458         struct bnx2x_mcast_obj *o = p->mcast_obj;
3459         struct bnx2x_raw_obj *r = &o->raw;
3460         int rc = 0, old_reg_size;
3461
3462         /* This is needed to recover number of currently configured mcast macs
3463          * in case of failure.
3464          */
3465         old_reg_size = o->get_registry_size(o);
3466
3467         /* Do some calculations and checks */
3468         rc = o->validate(bp, p, cmd);
3469         if (rc)
3470                 return rc;
3471
3472         /* Return if there is no work to do */
3473         if ((!p->mcast_list_len) && (!o->check_sched(o)))
3474                 return 0;
3475
3476         DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3477                          "o->max_cmd_len=%d\n", o->total_pending_num,
3478                          p->mcast_list_len, o->max_cmd_len);
3479
3480         /* Enqueue the current command to the pending list if we can't complete
3481          * it in the current iteration
3482          */
3483         if (r->check_pending(r) ||
3484             ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3485                 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3486                 if (rc < 0)
3487                         goto error_exit1;
3488
3489                 /* As long as the current command is in a command list we
3490                  * don't need to handle it separately.
3491                  */
3492                 p->mcast_list_len = 0;
3493         }
3494
3495         if (!r->check_pending(r)) {
3496
3497                 /* Set 'pending' state */
3498                 r->set_pending(r);
3499
3500                 /* Configure the new classification in the chip */
3501                 rc = o->config_mcast(bp, p, cmd);
3502                 if (rc < 0)
3503                         goto error_exit2;
3504
3505                 /* Wait for a ramrod completion if was requested */
3506                 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3507                         rc = o->wait_comp(bp, o);
3508         }
3509
3510         return rc;
3511
3512 error_exit2:
3513         r->clear_pending(r);
3514
3515 error_exit1:
3516         o->revert(bp, p, old_reg_size);
3517
3518         return rc;
3519 }
3520
3521 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3522 {
3523         smp_mb__before_clear_bit();
3524         clear_bit(o->sched_state, o->raw.pstate);
3525         smp_mb__after_clear_bit();
3526 }
3527
3528 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3529 {
3530         smp_mb__before_clear_bit();
3531         set_bit(o->sched_state, o->raw.pstate);
3532         smp_mb__after_clear_bit();
3533 }
3534
3535 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3536 {
3537         return !!test_bit(o->sched_state, o->raw.pstate);
3538 }
3539
3540 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3541 {
3542         return o->raw.check_pending(&o->raw) || o->check_sched(o);
3543 }
3544
3545 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3546                           struct bnx2x_mcast_obj *mcast_obj,
3547                           u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3548                           u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3549                           int state, unsigned long *pstate, bnx2x_obj_type type)
3550 {
3551         memset(mcast_obj, 0, sizeof(*mcast_obj));
3552
3553         bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3554                            rdata, rdata_mapping, state, pstate, type);
3555
3556         mcast_obj->engine_id = engine_id;
3557
3558         INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3559
3560         mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3561         mcast_obj->check_sched = bnx2x_mcast_check_sched;
3562         mcast_obj->set_sched = bnx2x_mcast_set_sched;
3563         mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3564
3565         if (CHIP_IS_E1(bp)) {
3566                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e1;
3567                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3568                 mcast_obj->hdl_restore       =
3569                         bnx2x_mcast_handle_restore_cmd_e1;
3570                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3571
3572                 if (CHIP_REV_IS_SLOW(bp))
3573                         mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3574                 else
3575                         mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3576
3577                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3578                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e1;
3579                 mcast_obj->validate          = bnx2x_mcast_validate_e1;
3580                 mcast_obj->revert            = bnx2x_mcast_revert_e1;
3581                 mcast_obj->get_registry_size =
3582                         bnx2x_mcast_get_registry_size_exact;
3583                 mcast_obj->set_registry_size =
3584                         bnx2x_mcast_set_registry_size_exact;
3585
3586                 /* 57710 is the only chip that uses the exact match for mcast
3587                  * at the moment.
3588                  */
3589                 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3590
3591         } else if (CHIP_IS_E1H(bp)) {
3592                 mcast_obj->config_mcast  = bnx2x_mcast_setup_e1h;
3593                 mcast_obj->enqueue_cmd   = NULL;
3594                 mcast_obj->hdl_restore   = NULL;
3595                 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3596
3597                 /* 57711 doesn't send a ramrod, so it has unlimited credit
3598                  * for one command.
3599                  */
3600                 mcast_obj->max_cmd_len       = -1;
3601                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3602                 mcast_obj->set_one_rule      = NULL;
3603                 mcast_obj->validate          = bnx2x_mcast_validate_e1h;
3604                 mcast_obj->revert            = bnx2x_mcast_revert_e1h;
3605                 mcast_obj->get_registry_size =
3606                         bnx2x_mcast_get_registry_size_aprox;
3607                 mcast_obj->set_registry_size =
3608                         bnx2x_mcast_set_registry_size_aprox;
3609         } else {
3610                 mcast_obj->config_mcast      = bnx2x_mcast_setup_e2;
3611                 mcast_obj->enqueue_cmd       = bnx2x_mcast_enqueue_cmd;
3612                 mcast_obj->hdl_restore       =
3613                         bnx2x_mcast_handle_restore_cmd_e2;
3614                 mcast_obj->check_pending     = bnx2x_mcast_check_pending;
3615                 /* TODO: There should be a proper HSI define for this number!!!
3616                  */
3617                 mcast_obj->max_cmd_len       = 16;
3618                 mcast_obj->wait_comp         = bnx2x_mcast_wait;
3619                 mcast_obj->set_one_rule      = bnx2x_mcast_set_one_rule_e2;
3620                 mcast_obj->validate          = bnx2x_mcast_validate_e2;
3621                 mcast_obj->revert            = bnx2x_mcast_revert_e2;
3622                 mcast_obj->get_registry_size =
3623                         bnx2x_mcast_get_registry_size_aprox;
3624                 mcast_obj->set_registry_size =
3625                         bnx2x_mcast_set_registry_size_aprox;
3626         }
3627 }
3628
3629 /*************************** Credit handling **********************************/
3630
3631 /**
3632  * atomic_add_ifless - add if the result is less than a given value.
3633  *
3634  * @v:  pointer of type atomic_t
3635  * @a:  the amount to add to v...
3636  * @u:  ...if (v + a) is less than u.
3637  *
3638  * returns true if (v + a) was less than u, and false otherwise.
3639  *
3640  */
3641 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3642 {
3643         int c, old;
3644
3645         c = atomic_read(v);
3646         for (;;) {
3647                 if (unlikely(c + a >= u))
3648                         return false;
3649
3650                 old = atomic_cmpxchg((v), c, c + a);
3651                 if (likely(old == c))
3652                         break;
3653                 c = old;
3654         }
3655
3656         return true;
3657 }
3658
3659 /**
3660  * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3661  *
3662  * @v:  pointer of type atomic_t
3663  * @a:  the amount to dec from v...
3664  * @u:  ...if (v - a) is more or equal than u.
3665  *
3666  * returns true if (v - a) was more or equal than u, and false
3667  * otherwise.
3668  */
3669 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3670 {
3671         int c, old;
3672
3673         c = atomic_read(v);
3674         for (;;) {
3675                 if (unlikely(c - a < u))
3676                         return false;
3677
3678                 old = atomic_cmpxchg((v), c, c - a);
3679                 if (likely(old == c))
3680                         break;
3681                 c = old;
3682         }
3683
3684         return true;
3685 }
3686
3687 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3688 {
3689         bool rc;
3690
3691         smp_mb();
3692         rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3693         smp_mb();
3694
3695         return rc;
3696 }
3697
3698 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3699 {
3700         bool rc;
3701
3702         smp_mb();
3703
3704         /* Don't let to refill if credit + cnt > pool_sz */
3705         rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3706
3707         smp_mb();
3708
3709         return rc;
3710 }
3711
3712 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3713 {
3714         int cur_credit;
3715
3716         smp_mb();
3717         cur_credit = atomic_read(&o->credit);
3718
3719         return cur_credit;
3720 }
3721
3722 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3723                                           int cnt)
3724 {
3725         return true;
3726 }
3727
3728
3729 static bool bnx2x_credit_pool_get_entry(
3730         struct bnx2x_credit_pool_obj *o,
3731         int *offset)
3732 {
3733         int idx, vec, i;
3734
3735         *offset = -1;
3736
3737         /* Find "internal cam-offset" then add to base for this object... */
3738         for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3739
3740                 /* Skip the current vector if there are no free entries in it */
3741                 if (!o->pool_mirror[vec])
3742                         continue;
3743
3744                 /* If we've got here we are going to find a free entry */
3745                 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3746                       i < BIT_VEC64_ELEM_SZ; idx++, i++)
3747
3748                         if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3749                                 /* Got one!! */
3750                                 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3751                                 *offset = o->base_pool_offset + idx;
3752                                 return true;
3753                         }
3754         }
3755
3756         return false;
3757 }
3758
3759 static bool bnx2x_credit_pool_put_entry(
3760         struct bnx2x_credit_pool_obj *o,
3761         int offset)
3762 {
3763         if (offset < o->base_pool_offset)
3764                 return false;
3765
3766         offset -= o->base_pool_offset;
3767
3768         if (offset >= o->pool_sz)
3769                 return false;
3770
3771         /* Return the entry to the pool */
3772         BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3773
3774         return true;
3775 }
3776
3777 static bool bnx2x_credit_pool_put_entry_always_true(
3778         struct bnx2x_credit_pool_obj *o,
3779         int offset)
3780 {
3781         return true;
3782 }
3783
3784 static bool bnx2x_credit_pool_get_entry_always_true(
3785         struct bnx2x_credit_pool_obj *o,
3786         int *offset)
3787 {
3788         *offset = -1;
3789         return true;
3790 }
3791 /**
3792  * bnx2x_init_credit_pool - initialize credit pool internals.
3793  *
3794  * @p:
3795  * @base:       Base entry in the CAM to use.
3796  * @credit:     pool size.
3797  *
3798  * If base is negative no CAM entries handling will be performed.
3799  * If credit is negative pool operations will always succeed (unlimited pool).
3800  *
3801  */
3802 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3803                                           int base, int credit)
3804 {
3805         /* Zero the object first */
3806         memset(p, 0, sizeof(*p));
3807
3808         /* Set the table to all 1s */
3809         memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3810
3811         /* Init a pool as full */
3812         atomic_set(&p->credit, credit);
3813
3814         /* The total poll size */
3815         p->pool_sz = credit;
3816
3817         p->base_pool_offset = base;
3818
3819         /* Commit the change */
3820         smp_mb();
3821
3822         p->check = bnx2x_credit_pool_check;
3823
3824         /* if pool credit is negative - disable the checks */
3825         if (credit >= 0) {
3826                 p->put      = bnx2x_credit_pool_put;
3827                 p->get      = bnx2x_credit_pool_get;
3828                 p->put_entry = bnx2x_credit_pool_put_entry;
3829                 p->get_entry = bnx2x_credit_pool_get_entry;
3830         } else {
3831                 p->put      = bnx2x_credit_pool_always_true;
3832                 p->get      = bnx2x_credit_pool_always_true;
3833                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3834                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3835         }
3836
3837         /* If base is negative - disable entries handling */
3838         if (base < 0) {
3839                 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3840                 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3841         }
3842 }
3843
3844 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3845                                 struct bnx2x_credit_pool_obj *p, u8 func_id,
3846                                 u8 func_num)
3847 {
3848 /* TODO: this will be defined in consts as well... */
3849 #define BNX2X_CAM_SIZE_EMUL 5
3850
3851         int cam_sz;
3852
3853         if (CHIP_IS_E1(bp)) {
3854                 /* In E1, Multicast is saved in cam... */
3855                 if (!CHIP_REV_IS_SLOW(bp))
3856                         cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3857                 else
3858                         cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3859
3860                 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3861
3862         } else if (CHIP_IS_E1H(bp)) {
3863                 /* CAM credit is equaly divided between all active functions
3864                  * on the PORT!.
3865                  */
3866                 if ((func_num > 0)) {
3867                         if (!CHIP_REV_IS_SLOW(bp))
3868                                 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3869                         else
3870                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3871                         bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3872                 } else {
3873                         /* this should never happen! Block MAC operations. */
3874                         bnx2x_init_credit_pool(p, 0, 0);
3875                 }
3876
3877         } else {
3878
3879                 /*
3880                  * CAM credit is equaly divided between all active functions
3881                  * on the PATH.
3882                  */
3883                 if ((func_num > 0)) {
3884                         if (!CHIP_REV_IS_SLOW(bp))
3885                                 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3886                         else
3887                                 cam_sz = BNX2X_CAM_SIZE_EMUL;
3888
3889                         /*
3890                          * No need for CAM entries handling for 57712 and
3891                          * newer.
3892                          */
3893                         bnx2x_init_credit_pool(p, -1, cam_sz);
3894                 } else {
3895                         /* this should never happen! Block MAC operations. */
3896                         bnx2x_init_credit_pool(p, 0, 0);
3897                 }
3898
3899         }
3900 }
3901
3902 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3903                                  struct bnx2x_credit_pool_obj *p,
3904                                  u8 func_id,
3905                                  u8 func_num)
3906 {
3907         if (CHIP_IS_E1x(bp)) {
3908                 /*
3909                  * There is no VLAN credit in HW on 57710 and 57711 only
3910                  * MAC / MAC-VLAN can be set
3911                  */
3912                 bnx2x_init_credit_pool(p, 0, -1);
3913         } else {
3914                 /*
3915                  * CAM credit is equaly divided between all active functions
3916                  * on the PATH.
3917                  */
3918                 if (func_num > 0) {
3919                         int credit = MAX_VLAN_CREDIT_E2 / func_num;
3920                         bnx2x_init_credit_pool(p, func_id * credit, credit);
3921                 } else
3922                         /* this should never happen! Block VLAN operations. */
3923                         bnx2x_init_credit_pool(p, 0, 0);
3924         }
3925 }
3926
3927 /****************** RSS Configuration ******************/
3928 /**
3929  * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3930  *
3931  * @bp:         driver hanlde
3932  * @p:          pointer to rss configuration
3933  *
3934  * Prints it when NETIF_MSG_IFUP debug level is configured.
3935  */
3936 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3937                                         struct bnx2x_config_rss_params *p)
3938 {
3939         int i;
3940
3941         DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3942         DP(BNX2X_MSG_SP, "0x0000: ");
3943         for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3944                 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3945
3946                 /* Print 4 bytes in a line */
3947                 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3948                     (((i + 1) & 0x3) == 0)) {
3949                         DP_CONT(BNX2X_MSG_SP, "\n");
3950                         DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3951                 }
3952         }
3953
3954         DP_CONT(BNX2X_MSG_SP, "\n");
3955 }
3956
3957 /**
3958  * bnx2x_setup_rss - configure RSS
3959  *
3960  * @bp:         device handle
3961  * @p:          rss configuration
3962  *
3963  * sends on UPDATE ramrod for that matter.
3964  */
3965 static int bnx2x_setup_rss(struct bnx2x *bp,
3966                            struct bnx2x_config_rss_params *p)
3967 {
3968         struct bnx2x_rss_config_obj *o = p->rss_obj;
3969         struct bnx2x_raw_obj *r = &o->raw;
3970         struct eth_rss_update_ramrod_data *data =
3971                 (struct eth_rss_update_ramrod_data *)(r->rdata);
3972         u8 rss_mode = 0;
3973         int rc;
3974
3975         memset(data, 0, sizeof(*data));
3976
3977         DP(BNX2X_MSG_SP, "Configuring RSS\n");
3978
3979         /* Set an echo field */
3980         data->echo = (r->cid & BNX2X_SWCID_MASK) |
3981                      (r->state << BNX2X_SWCID_SHIFT);
3982
3983         /* RSS mode */
3984         if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
3985                 rss_mode = ETH_RSS_MODE_DISABLED;
3986         else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
3987                 rss_mode = ETH_RSS_MODE_REGULAR;
3988         else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
3989                 rss_mode = ETH_RSS_MODE_VLAN_PRI;
3990         else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
3991                 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
3992         else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
3993                 rss_mode = ETH_RSS_MODE_IP_DSCP;
3994
3995         data->rss_mode = rss_mode;
3996
3997         DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
3998
3999         /* RSS capabilities */
4000         if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4001                 data->capabilities |=
4002                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4003
4004         if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4005                 data->capabilities |=
4006                         ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4007
4008         if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4009                 data->capabilities |=
4010                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4011
4012         if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4013                 data->capabilities |=
4014                         ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4015
4016         /* Hashing mask */
4017         data->rss_result_mask = p->rss_result_mask;
4018
4019         /* RSS engine ID */
4020         data->rss_engine_id = o->engine_id;
4021
4022         DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4023
4024         /* Indirection table */
4025         memcpy(data->indirection_table, p->ind_table,
4026                   T_ETH_INDIRECTION_TABLE_SIZE);
4027
4028         /* Remember the last configuration */
4029         memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4030
4031         /* Print the indirection table */
4032         if (netif_msg_ifup(bp))
4033                 bnx2x_debug_print_ind_table(bp, p);
4034
4035         /* RSS keys */
4036         if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4037                 memcpy(&data->rss_key[0], &p->rss_key[0],
4038                        sizeof(data->rss_key));
4039                 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4040         }
4041
4042         /* Commit writes towards the memory before sending a ramrod */
4043         mb();
4044
4045         /* Send a ramrod */
4046         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4047                            U64_HI(r->rdata_mapping),
4048                            U64_LO(r->rdata_mapping),
4049                            ETH_CONNECTION_TYPE);
4050
4051         if (rc < 0)
4052                 return rc;
4053
4054         return 1;
4055 }
4056
4057 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4058                              u8 *ind_table)
4059 {
4060         memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4061 }
4062
4063 int bnx2x_config_rss(struct bnx2x *bp,
4064                      struct bnx2x_config_rss_params *p)
4065 {
4066         int rc;
4067         struct bnx2x_rss_config_obj *o = p->rss_obj;
4068         struct bnx2x_raw_obj *r = &o->raw;
4069
4070         /* Do nothing if only driver cleanup was requested */
4071         if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4072                 return 0;
4073
4074         r->set_pending(r);
4075
4076         rc = o->config_rss(bp, p);
4077         if (rc < 0) {
4078                 r->clear_pending(r);
4079                 return rc;
4080         }
4081
4082         if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4083                 rc = r->wait_comp(bp, r);
4084
4085         return rc;
4086 }
4087
4088
4089 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4090                                struct bnx2x_rss_config_obj *rss_obj,
4091                                u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4092                                void *rdata, dma_addr_t rdata_mapping,
4093                                int state, unsigned long *pstate,
4094                                bnx2x_obj_type type)
4095 {
4096         bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4097                            rdata_mapping, state, pstate, type);
4098
4099         rss_obj->engine_id  = engine_id;
4100         rss_obj->config_rss = bnx2x_setup_rss;
4101 }
4102
4103 /********************** Queue state object ***********************************/
4104
4105 /**
4106  * bnx2x_queue_state_change - perform Queue state change transition
4107  *
4108  * @bp:         device handle
4109  * @params:     parameters to perform the transition
4110  *
4111  * returns 0 in case of successfully completed transition, negative error
4112  * code in case of failure, positive (EBUSY) value if there is a completion
4113  * to that is still pending (possible only if RAMROD_COMP_WAIT is
4114  * not set in params->ramrod_flags for asynchronous commands).
4115  *
4116  */
4117 int bnx2x_queue_state_change(struct bnx2x *bp,
4118                              struct bnx2x_queue_state_params *params)
4119 {
4120         struct bnx2x_queue_sp_obj *o = params->q_obj;
4121         int rc, pending_bit;
4122         unsigned long *pending = &o->pending;
4123
4124         /* Check that the requested transition is legal */
4125         if (o->check_transition(bp, o, params))
4126                 return -EINVAL;
4127
4128         /* Set "pending" bit */
4129         pending_bit = o->set_pending(o, params);
4130
4131         /* Don't send a command if only driver cleanup was requested */
4132         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags))
4133                 o->complete_cmd(bp, o, pending_bit);
4134         else {
4135                 /* Send a ramrod */
4136                 rc = o->send_cmd(bp, params);
4137                 if (rc) {
4138                         o->next_state = BNX2X_Q_STATE_MAX;
4139                         clear_bit(pending_bit, pending);
4140                         smp_mb__after_clear_bit();
4141                         return rc;
4142                 }
4143
4144                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
4145                         rc = o->wait_comp(bp, o, pending_bit);
4146                         if (rc)
4147                                 return rc;
4148
4149                         return 0;
4150                 }
4151         }
4152
4153         return !!test_bit(pending_bit, pending);
4154 }
4155
4156
4157 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4158                                    struct bnx2x_queue_state_params *params)
4159 {
4160         enum bnx2x_queue_cmd cmd = params->cmd, bit;
4161
4162         /* ACTIVATE and DEACTIVATE commands are implemented on top of
4163          * UPDATE command.
4164          */
4165         if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4166             (cmd == BNX2X_Q_CMD_DEACTIVATE))
4167                 bit = BNX2X_Q_CMD_UPDATE;
4168         else
4169                 bit = cmd;
4170
4171         set_bit(bit, &obj->pending);
4172         return bit;
4173 }
4174
4175 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4176                                  struct bnx2x_queue_sp_obj *o,
4177                                  enum bnx2x_queue_cmd cmd)
4178 {
4179         return bnx2x_state_wait(bp, cmd, &o->pending);
4180 }
4181
4182 /**
4183  * bnx2x_queue_comp_cmd - complete the state change command.
4184  *
4185  * @bp:         device handle
4186  * @o:
4187  * @cmd:
4188  *
4189  * Checks that the arrived completion is expected.
4190  */
4191 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4192                                 struct bnx2x_queue_sp_obj *o,
4193                                 enum bnx2x_queue_cmd cmd)
4194 {
4195         unsigned long cur_pending = o->pending;
4196
4197         if (!test_and_clear_bit(cmd, &cur_pending)) {
4198                 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4199                           "pending 0x%lx, next_state %d\n", cmd, o->cid,
4200                           o->state, cur_pending, o->next_state);
4201                 return -EINVAL;
4202         }
4203
4204         DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4205                          "setting state to %d\n", cmd, o->cid, o->next_state);
4206
4207         o->state = o->next_state;
4208         o->next_state = BNX2X_Q_STATE_MAX;
4209
4210         /* It's important that o->state and o->next_state are
4211          * updated before o->pending.
4212          */
4213         wmb();
4214
4215         clear_bit(cmd, &o->pending);
4216         smp_mb__after_clear_bit();
4217
4218         return 0;
4219 }
4220
4221 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4222                                 struct bnx2x_queue_state_params *cmd_params,
4223                                 struct client_init_ramrod_data *data)
4224 {
4225         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4226
4227         /* Rx data */
4228
4229         /* IPv6 TPA supported for E2 and above only */
4230         data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
4231                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4232 }
4233
4234 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4235                                 struct bnx2x_queue_state_params *cmd_params,
4236                                 struct client_init_ramrod_data *data)
4237 {
4238         struct bnx2x_queue_sp_obj *o = cmd_params->q_obj;
4239         struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4240
4241
4242         /* general */
4243         data->general.client_id = o->cl_id;
4244
4245         if (test_bit(BNX2X_Q_FLG_STATS, &params->flags)) {
4246                 data->general.statistics_counter_id =
4247                                         params->gen_params.stat_id;
4248                 data->general.statistics_en_flg = 1;
4249                 data->general.statistics_zero_flg =
4250                         test_bit(BNX2X_Q_FLG_ZERO_STATS, &params->flags);
4251         } else
4252                 data->general.statistics_counter_id =
4253                                         DISABLE_STATISTIC_COUNTER_ID_VALUE;
4254
4255         data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, &params->flags);
4256         data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE,
4257                                               &params->flags);
4258         data->general.sp_client_id = params->gen_params.spcl_id;
4259         data->general.mtu = cpu_to_le16(params->gen_params.mtu);
4260         data->general.func_id = o->func_id;
4261
4262
4263         data->general.cos = params->txq_params.cos;
4264
4265         data->general.traffic_type =
4266                 test_bit(BNX2X_Q_FLG_FCOE, &params->flags) ?
4267                 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4268
4269         /* Rx data */
4270         data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, &params->flags) *
4271                                 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4272         data->rx.vmqueue_mode_en_flg = 0;
4273
4274         data->rx.cache_line_alignment_log_size =
4275                 params->rxq_params.cache_line_log;
4276         data->rx.enable_dynamic_hc =
4277                 test_bit(BNX2X_Q_FLG_DHC, &params->flags);
4278         data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
4279         data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
4280         data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz);
4281
4282         /* Always start in DROP_ALL mode */
4283         data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4284                                      CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4285
4286         /* We don't set drop flags */
4287         data->rx.drop_ip_cs_err_flg = 0;
4288         data->rx.drop_tcp_cs_err_flg = 0;
4289         data->rx.drop_ttl0_flg = 0;
4290         data->rx.drop_udp_cs_err_flg = 0;
4291         data->rx.inner_vlan_removal_enable_flg =
4292                 test_bit(BNX2X_Q_FLG_VLAN, &params->flags);
4293         data->rx.outer_vlan_removal_enable_flg =
4294                 test_bit(BNX2X_Q_FLG_OV, &params->flags);
4295         data->rx.status_block_id = params->rxq_params.fw_sb_id;
4296         data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
4297         data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues;
4298         data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz);
4299         data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
4300         data->rx.bd_page_base.lo =
4301                 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
4302         data->rx.bd_page_base.hi =
4303                 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
4304         data->rx.sge_page_base.lo =
4305                 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
4306         data->rx.sge_page_base.hi =
4307                 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
4308         data->rx.cqe_page_base.lo =
4309                 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
4310         data->rx.cqe_page_base.hi =
4311                 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
4312         data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS,
4313                                            &params->flags);
4314
4315         if (test_bit(BNX2X_Q_FLG_MCAST, &params->flags)) {
4316                 data->rx.approx_mcast_engine_id = o->func_id;
4317                 data->rx.is_approx_mcast = 1;
4318         }
4319
4320         data->rx.rss_engine_id = params->rxq_params.rss_engine_id;
4321
4322         /* flow control data */
4323         data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
4324         data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
4325         data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
4326         data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
4327         data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
4328         data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
4329         data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
4330
4331         /* silent vlan removal */
4332         data->rx.silent_vlan_removal_flg =
4333                 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &params->flags);
4334         data->rx.silent_vlan_value =
4335                 cpu_to_le16(params->rxq_params.silent_removal_value);
4336         data->rx.silent_vlan_mask =
4337                 cpu_to_le16(params->rxq_params.silent_removal_mask);
4338
4339         /* Tx data */
4340         data->tx.enforce_security_flg =
4341                 test_bit(BNX2X_Q_FLG_TX_SEC, &params->flags);
4342         data->tx.default_vlan =
4343                 cpu_to_le16(params->txq_params.default_vlan);
4344         data->tx.default_vlan_flg =
4345                 test_bit(BNX2X_Q_FLG_DEF_VLAN, &params->flags);
4346         data->tx.tx_switching_flg =
4347                 test_bit(BNX2X_Q_FLG_TX_SWITCH, &params->flags);
4348         data->tx.anti_spoofing_flg =
4349                 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, &params->flags);
4350         data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
4351         data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
4352         data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id;
4353
4354         data->tx.tx_bd_page_base.lo =
4355                 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
4356         data->tx.tx_bd_page_base.hi =
4357                 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
4358
4359         /* Don't configure any Tx switching mode during queue SETUP */
4360         data->tx.state = 0;
4361 }
4362
4363
4364 /**
4365  * bnx2x_q_init - init HW/FW queue
4366  *
4367  * @bp:         device handle
4368  * @params:
4369  *
4370  * HW/FW initial Queue configuration:
4371  *      - HC: Rx and Tx
4372  *      - CDU context validation
4373  *
4374  */
4375 static inline int bnx2x_q_init(struct bnx2x *bp,
4376                                struct bnx2x_queue_state_params *params)
4377 {
4378         struct bnx2x_queue_sp_obj *o = params->q_obj;
4379         struct bnx2x_queue_init_params *init = &params->params.init;
4380         u16 hc_usec;
4381
4382         /* Tx HC configuration */
4383         if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4384             test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4385                 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4386
4387                 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4388                         init->tx.sb_cq_index,
4389                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4390                         hc_usec);
4391         }
4392
4393         /* Rx HC configuration */
4394         if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4395             test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4396                 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4397
4398                 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4399                         init->rx.sb_cq_index,
4400                         !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4401                         hc_usec);
4402         }
4403
4404         /* Set CDU context validation values */
4405         bnx2x_set_ctx_validation(bp, init->cxt, o->cid);
4406
4407         /* As no ramrod is sent, complete the command immediately  */
4408         o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4409
4410         mmiowb();
4411         smp_mb();
4412
4413         return 0;
4414 }
4415
4416 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4417                                         struct bnx2x_queue_state_params *params)
4418 {
4419         struct bnx2x_queue_sp_obj *o = params->q_obj;
4420         struct client_init_ramrod_data *rdata =
4421                 (struct client_init_ramrod_data *)o->rdata;
4422         dma_addr_t data_mapping = o->rdata_mapping;
4423         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4424
4425         /* Clear the ramrod data */
4426         memset(rdata, 0, sizeof(*rdata));
4427
4428         /* Fill the ramrod data */
4429         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4430
4431         mb();
4432
4433         return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
4434                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4435 }
4436
4437 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4438                                         struct bnx2x_queue_state_params *params)
4439 {
4440         struct bnx2x_queue_sp_obj *o = params->q_obj;
4441         struct client_init_ramrod_data *rdata =
4442                 (struct client_init_ramrod_data *)o->rdata;
4443         dma_addr_t data_mapping = o->rdata_mapping;
4444         int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4445
4446         /* Clear the ramrod data */
4447         memset(rdata, 0, sizeof(*rdata));
4448
4449         /* Fill the ramrod data */
4450         bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4451         bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4452
4453         mb();
4454
4455         return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
4456                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4457 }
4458
4459 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4460                                      struct bnx2x_queue_sp_obj *obj,
4461                                      struct bnx2x_queue_update_params *params,
4462                                      struct client_update_ramrod_data *data)
4463 {
4464         /* Client ID of the client to update */
4465         data->client_id = obj->cl_id;
4466
4467         /* Function ID of the client to update */
4468         data->func_id = obj->func_id;
4469
4470         /* Default VLAN value */
4471         data->default_vlan = cpu_to_le16(params->def_vlan);
4472
4473         /* Inner VLAN stripping */
4474         data->inner_vlan_removal_enable_flg =
4475                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, &params->update_flags);
4476         data->inner_vlan_removal_change_flg =
4477                 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4478                          &params->update_flags);
4479
4480         /* Outer VLAN sripping */
4481         data->outer_vlan_removal_enable_flg =
4482                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, &params->update_flags);
4483         data->outer_vlan_removal_change_flg =
4484                 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4485                          &params->update_flags);
4486
4487         /* Drop packets that have source MAC that doesn't belong to this
4488          * Queue.
4489          */
4490         data->anti_spoofing_enable_flg =
4491                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, &params->update_flags);
4492         data->anti_spoofing_change_flg =
4493                 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, &params->update_flags);
4494
4495         /* Activate/Deactivate */
4496         data->activate_flg =
4497                 test_bit(BNX2X_Q_UPDATE_ACTIVATE, &params->update_flags);
4498         data->activate_change_flg =
4499                 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &params->update_flags);
4500
4501         /* Enable default VLAN */
4502         data->default_vlan_enable_flg =
4503                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, &params->update_flags);
4504         data->default_vlan_change_flg =
4505                 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4506                          &params->update_flags);
4507
4508         /* silent vlan removal */
4509         data->silent_vlan_change_flg =
4510                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4511                          &params->update_flags);
4512         data->silent_vlan_removal_flg =
4513                 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, &params->update_flags);
4514         data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4515         data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4516 }
4517
4518 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4519                                       struct bnx2x_queue_state_params *params)
4520 {
4521         struct bnx2x_queue_sp_obj *o = params->q_obj;
4522         struct client_update_ramrod_data *rdata =
4523                 (struct client_update_ramrod_data *)o->rdata;
4524         dma_addr_t data_mapping = o->rdata_mapping;
4525
4526         /* Clear the ramrod data */
4527         memset(rdata, 0, sizeof(*rdata));
4528
4529         /* Fill the ramrod data */
4530         bnx2x_q_fill_update_data(bp, o, &params->params.update, rdata);
4531
4532         mb();
4533
4534         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid,
4535                              U64_HI(data_mapping),
4536                              U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4537 }
4538
4539 /**
4540  * bnx2x_q_send_deactivate - send DEACTIVATE command
4541  *
4542  * @bp:         device handle
4543  * @params:
4544  *
4545  * implemented using the UPDATE command.
4546  */
4547 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4548                                         struct bnx2x_queue_state_params *params)
4549 {
4550         struct bnx2x_queue_update_params *update = &params->params.update;
4551
4552         memset(update, 0, sizeof(*update));
4553
4554         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4555
4556         return bnx2x_q_send_update(bp, params);
4557 }
4558
4559 /**
4560  * bnx2x_q_send_activate - send ACTIVATE command
4561  *
4562  * @bp:         device handle
4563  * @params:
4564  *
4565  * implemented using the UPDATE command.
4566  */
4567 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4568                                         struct bnx2x_queue_state_params *params)
4569 {
4570         struct bnx2x_queue_update_params *update = &params->params.update;
4571
4572         memset(update, 0, sizeof(*update));
4573
4574         __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4575         __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4576
4577         return bnx2x_q_send_update(bp, params);
4578 }
4579
4580 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4581                                         struct bnx2x_queue_state_params *params)
4582 {
4583         /* TODO: Not implemented yet. */
4584         return -1;
4585 }
4586
4587 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4588                                     struct bnx2x_queue_state_params *params)
4589 {
4590         struct bnx2x_queue_sp_obj *o = params->q_obj;
4591
4592         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id,
4593                              ETH_CONNECTION_TYPE);
4594 }
4595
4596 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4597                                        struct bnx2x_queue_state_params *params)
4598 {
4599         struct bnx2x_queue_sp_obj *o = params->q_obj;
4600
4601         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0,
4602                              NONE_CONNECTION_TYPE);
4603 }
4604
4605 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4606                                         struct bnx2x_queue_state_params *params)
4607 {
4608         struct bnx2x_queue_sp_obj *o = params->q_obj;
4609
4610         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0,
4611                              ETH_CONNECTION_TYPE);
4612 }
4613
4614 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4615                                      struct bnx2x_queue_state_params *params)
4616 {
4617         struct bnx2x_queue_sp_obj *o = params->q_obj;
4618
4619         return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0,
4620                              ETH_CONNECTION_TYPE);
4621 }
4622
4623 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4624                                         struct bnx2x_queue_state_params *params)
4625 {
4626         switch (params->cmd) {
4627         case BNX2X_Q_CMD_INIT:
4628                 return bnx2x_q_init(bp, params);
4629         case BNX2X_Q_CMD_DEACTIVATE:
4630                 return bnx2x_q_send_deactivate(bp, params);
4631         case BNX2X_Q_CMD_ACTIVATE:
4632                 return bnx2x_q_send_activate(bp, params);
4633         case BNX2X_Q_CMD_UPDATE:
4634                 return bnx2x_q_send_update(bp, params);
4635         case BNX2X_Q_CMD_UPDATE_TPA:
4636                 return bnx2x_q_send_update_tpa(bp, params);
4637         case BNX2X_Q_CMD_HALT:
4638                 return bnx2x_q_send_halt(bp, params);
4639         case BNX2X_Q_CMD_CFC_DEL:
4640                 return bnx2x_q_send_cfc_del(bp, params);
4641         case BNX2X_Q_CMD_TERMINATE:
4642                 return bnx2x_q_send_terminate(bp, params);
4643         case BNX2X_Q_CMD_EMPTY:
4644                 return bnx2x_q_send_empty(bp, params);
4645         default:
4646                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4647                 return -EINVAL;
4648         }
4649 }
4650
4651 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4652                                     struct bnx2x_queue_state_params *params)
4653 {
4654         switch (params->cmd) {
4655         case BNX2X_Q_CMD_SETUP:
4656                 return bnx2x_q_send_setup_e1x(bp, params);
4657         case BNX2X_Q_CMD_INIT:
4658         case BNX2X_Q_CMD_DEACTIVATE:
4659         case BNX2X_Q_CMD_ACTIVATE:
4660         case BNX2X_Q_CMD_UPDATE:
4661         case BNX2X_Q_CMD_UPDATE_TPA:
4662         case BNX2X_Q_CMD_HALT:
4663         case BNX2X_Q_CMD_CFC_DEL:
4664         case BNX2X_Q_CMD_TERMINATE:
4665         case BNX2X_Q_CMD_EMPTY:
4666                 return bnx2x_queue_send_cmd_cmn(bp, params);
4667         default:
4668                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4669                 return -EINVAL;
4670         }
4671 }
4672
4673 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4674                                    struct bnx2x_queue_state_params *params)
4675 {
4676         switch (params->cmd) {
4677         case BNX2X_Q_CMD_SETUP:
4678                 return bnx2x_q_send_setup_e2(bp, params);
4679         case BNX2X_Q_CMD_INIT:
4680         case BNX2X_Q_CMD_DEACTIVATE:
4681         case BNX2X_Q_CMD_ACTIVATE:
4682         case BNX2X_Q_CMD_UPDATE:
4683         case BNX2X_Q_CMD_UPDATE_TPA:
4684         case BNX2X_Q_CMD_HALT:
4685         case BNX2X_Q_CMD_CFC_DEL:
4686         case BNX2X_Q_CMD_TERMINATE:
4687         case BNX2X_Q_CMD_EMPTY:
4688                 return bnx2x_queue_send_cmd_cmn(bp, params);
4689         default:
4690                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4691                 return -EINVAL;
4692         }
4693 }
4694
4695 /**
4696  * bnx2x_queue_chk_transition - check state machine of a regular Queue
4697  *
4698  * @bp:         device handle
4699  * @o:
4700  * @params:
4701  *
4702  * (not Forwarding)
4703  * It both checks if the requested command is legal in a current
4704  * state and, if it's legal, sets a `next_state' in the object
4705  * that will be used in the completion flow to set the `state'
4706  * of the object.
4707  *
4708  * returns 0 if a requested command is a legal transition,
4709  *         -EINVAL otherwise.
4710  */
4711 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4712                                       struct bnx2x_queue_sp_obj *o,
4713                                       struct bnx2x_queue_state_params *params)
4714 {
4715         enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4716         enum bnx2x_queue_cmd cmd = params->cmd;
4717
4718         switch (state) {
4719         case BNX2X_Q_STATE_RESET:
4720                 if (cmd == BNX2X_Q_CMD_INIT)
4721                         next_state = BNX2X_Q_STATE_INITIALIZED;
4722
4723                 break;
4724         case BNX2X_Q_STATE_INITIALIZED:
4725                 if (cmd == BNX2X_Q_CMD_SETUP) {
4726                         if (test_bit(BNX2X_Q_FLG_ACTIVE,
4727                                      &params->params.setup.flags))
4728                                 next_state = BNX2X_Q_STATE_ACTIVE;
4729                         else
4730                                 next_state = BNX2X_Q_STATE_INACTIVE;
4731                 }
4732
4733                 break;
4734         case BNX2X_Q_STATE_ACTIVE:
4735                 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4736                         next_state = BNX2X_Q_STATE_INACTIVE;
4737
4738                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4739                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4740                         next_state = BNX2X_Q_STATE_ACTIVE;
4741
4742                 else if (cmd == BNX2X_Q_CMD_HALT)
4743                         next_state = BNX2X_Q_STATE_STOPPED;
4744
4745                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4746                         struct bnx2x_queue_update_params *update_params =
4747                                 &params->params.update;
4748
4749                         /* If "active" state change is requested, update the
4750                          *  state accordingly.
4751                          */
4752                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4753                                      &update_params->update_flags) &&
4754                             !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4755                                       &update_params->update_flags))
4756                                 next_state = BNX2X_Q_STATE_INACTIVE;
4757                         else
4758                                 next_state = BNX2X_Q_STATE_ACTIVE;
4759                 }
4760
4761                 break;
4762         case BNX2X_Q_STATE_INACTIVE:
4763                 if (cmd == BNX2X_Q_CMD_ACTIVATE)
4764                         next_state = BNX2X_Q_STATE_ACTIVE;
4765
4766                 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4767                          (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4768                         next_state = BNX2X_Q_STATE_INACTIVE;
4769
4770                 else if (cmd == BNX2X_Q_CMD_HALT)
4771                         next_state = BNX2X_Q_STATE_STOPPED;
4772
4773                 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4774                         struct bnx2x_queue_update_params *update_params =
4775                                 &params->params.update;
4776
4777                         /* If "active" state change is requested, update the
4778                          * state accordingly.
4779                          */
4780                         if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4781                                      &update_params->update_flags) &&
4782                             test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4783                                      &update_params->update_flags))
4784                                 next_state = BNX2X_Q_STATE_ACTIVE;
4785                         else
4786                                 next_state = BNX2X_Q_STATE_INACTIVE;
4787                 }
4788
4789                 break;
4790         case BNX2X_Q_STATE_STOPPED:
4791                 if (cmd == BNX2X_Q_CMD_TERMINATE)
4792                         next_state = BNX2X_Q_STATE_TERMINATED;
4793
4794                 break;
4795         case BNX2X_Q_STATE_TERMINATED:
4796                 if (cmd == BNX2X_Q_CMD_CFC_DEL)
4797                         next_state = BNX2X_Q_STATE_RESET;
4798
4799                 break;
4800         default:
4801                 BNX2X_ERR("Illegal state: %d\n", state);
4802         }
4803
4804         /* Transition is assured */
4805         if (next_state != BNX2X_Q_STATE_MAX) {
4806                 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
4807                                  state, cmd, next_state);
4808                 o->next_state = next_state;
4809                 return 0;
4810         }
4811
4812         DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
4813
4814         return -EINVAL;
4815 }
4816
4817 void bnx2x_init_queue_obj(struct bnx2x *bp,
4818                           struct bnx2x_queue_sp_obj *obj,
4819                           u8 cl_id, u32 cid, u8 func_id, void *rdata,
4820                           dma_addr_t rdata_mapping, unsigned long type)
4821 {
4822         memset(obj, 0, sizeof(*obj));
4823
4824         obj->cid = cid;
4825         obj->cl_id = cl_id;
4826         obj->func_id = func_id;
4827         obj->rdata = rdata;
4828         obj->rdata_mapping = rdata_mapping;
4829         obj->type = type;
4830         obj->next_state = BNX2X_Q_STATE_MAX;
4831
4832         if (CHIP_IS_E1x(bp))
4833                 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
4834         else
4835                 obj->send_cmd = bnx2x_queue_send_cmd_e2;
4836
4837         obj->check_transition = bnx2x_queue_chk_transition;
4838
4839         obj->complete_cmd = bnx2x_queue_comp_cmd;
4840         obj->wait_comp = bnx2x_queue_wait_comp;
4841         obj->set_pending = bnx2x_queue_set_pending;
4842 }
4843
4844 /********************** Function state object *********************************/
4845
4846 static int bnx2x_func_wait_comp(struct bnx2x *bp,
4847                                 struct bnx2x_func_sp_obj *o,
4848                                 enum bnx2x_func_cmd cmd)
4849 {
4850         return bnx2x_state_wait(bp, cmd, &o->pending);
4851 }
4852
4853 /**
4854  * bnx2x_func_state_change_comp - complete the state machine transition
4855  *
4856  * @bp:         device handle
4857  * @o:
4858  * @cmd:
4859  *
4860  * Called on state change transition. Completes the state
4861  * machine transition only - no HW interaction.
4862  */
4863 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
4864                                                struct bnx2x_func_sp_obj *o,
4865                                                enum bnx2x_func_cmd cmd)
4866 {
4867         unsigned long cur_pending = o->pending;
4868
4869         if (!test_and_clear_bit(cmd, &cur_pending)) {
4870                 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
4871                           "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
4872                           o->state, cur_pending, o->next_state);
4873                 return -EINVAL;
4874         }
4875
4876         DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
4877                          "%d\n", cmd, BP_FUNC(bp), o->next_state);
4878
4879         o->state = o->next_state;
4880         o->next_state = BNX2X_F_STATE_MAX;
4881
4882         /* It's important that o->state and o->next_state are
4883          * updated before o->pending.
4884          */
4885         wmb();
4886
4887         clear_bit(cmd, &o->pending);
4888         smp_mb__after_clear_bit();
4889
4890         return 0;
4891 }
4892
4893 /**
4894  * bnx2x_func_comp_cmd - complete the state change command
4895  *
4896  * @bp:         device handle
4897  * @o:
4898  * @cmd:
4899  *
4900  * Checks that the arrived completion is expected.
4901  */
4902 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
4903                                struct bnx2x_func_sp_obj *o,
4904                                enum bnx2x_func_cmd cmd)
4905 {
4906         /* Complete the state machine part first, check if it's a
4907          * legal completion.
4908          */
4909         int rc = bnx2x_func_state_change_comp(bp, o, cmd);
4910         return rc;
4911 }
4912
4913 /**
4914  * bnx2x_func_chk_transition - perform function state machine transition
4915  *
4916  * @bp:         device handle
4917  * @o:
4918  * @params:
4919  *
4920  * It both checks if the requested command is legal in a current
4921  * state and, if it's legal, sets a `next_state' in the object
4922  * that will be used in the completion flow to set the `state'
4923  * of the object.
4924  *
4925  * returns 0 if a requested command is a legal transition,
4926  *         -EINVAL otherwise.
4927  */
4928 static int bnx2x_func_chk_transition(struct bnx2x *bp,
4929                                      struct bnx2x_func_sp_obj *o,
4930                                      struct bnx2x_func_state_params *params)
4931 {
4932         enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
4933         enum bnx2x_func_cmd cmd = params->cmd;
4934
4935         switch (state) {
4936         case BNX2X_F_STATE_RESET:
4937                 if (cmd == BNX2X_F_CMD_HW_INIT)
4938                         next_state = BNX2X_F_STATE_INITIALIZED;
4939
4940                 break;
4941         case BNX2X_F_STATE_INITIALIZED:
4942                 if (cmd == BNX2X_F_CMD_START)
4943                         next_state = BNX2X_F_STATE_STARTED;
4944
4945                 else if (cmd == BNX2X_F_CMD_HW_RESET)
4946                         next_state = BNX2X_F_STATE_RESET;
4947
4948                 break;
4949         case BNX2X_F_STATE_STARTED:
4950                 if (cmd == BNX2X_F_CMD_STOP)
4951                         next_state = BNX2X_F_STATE_INITIALIZED;
4952
4953                 break;
4954         default:
4955                 BNX2X_ERR("Unknown state: %d\n", state);
4956         }
4957
4958         /* Transition is assured */
4959         if (next_state != BNX2X_F_STATE_MAX) {
4960                 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
4961                                  state, cmd, next_state);
4962                 o->next_state = next_state;
4963                 return 0;
4964         }
4965
4966         DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
4967                          state, cmd);
4968
4969         return -EINVAL;
4970 }
4971
4972 /**
4973  * bnx2x_func_init_func - performs HW init at function stage
4974  *
4975  * @bp:         device handle
4976  * @drv:
4977  *
4978  * Init HW when the current phase is
4979  * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4980  * HW blocks.
4981  */
4982 static inline int bnx2x_func_init_func(struct bnx2x *bp,
4983                                        const struct bnx2x_func_sp_drv_ops *drv)
4984 {
4985         return drv->init_hw_func(bp);
4986 }
4987
4988 /**
4989  * bnx2x_func_init_port - performs HW init at port stage
4990  *
4991  * @bp:         device handle
4992  * @drv:
4993  *
4994  * Init HW when the current phase is
4995  * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4996  * FUNCTION-only HW blocks.
4997  *
4998  */
4999 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5000                                        const struct bnx2x_func_sp_drv_ops *drv)
5001 {
5002         int rc = drv->init_hw_port(bp);
5003         if (rc)
5004                 return rc;
5005
5006         return bnx2x_func_init_func(bp, drv);
5007 }
5008
5009 /**
5010  * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5011  *
5012  * @bp:         device handle
5013  * @drv:
5014  *
5015  * Init HW when the current phase is
5016  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5017  * PORT-only and FUNCTION-only HW blocks.
5018  */
5019 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5020                                         const struct bnx2x_func_sp_drv_ops *drv)
5021 {
5022         int rc = drv->init_hw_cmn_chip(bp);
5023         if (rc)
5024                 return rc;
5025
5026         return bnx2x_func_init_port(bp, drv);
5027 }
5028
5029 /**
5030  * bnx2x_func_init_cmn - performs HW init at common stage
5031  *
5032  * @bp:         device handle
5033  * @drv:
5034  *
5035  * Init HW when the current phase is
5036  * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5037  * PORT-only and FUNCTION-only HW blocks.
5038  */
5039 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5040                                       const struct bnx2x_func_sp_drv_ops *drv)
5041 {
5042         int rc = drv->init_hw_cmn(bp);
5043         if (rc)
5044                 return rc;
5045
5046         return bnx2x_func_init_port(bp, drv);
5047 }
5048
5049 static int bnx2x_func_hw_init(struct bnx2x *bp,
5050                               struct bnx2x_func_state_params *params)
5051 {
5052         u32 load_code = params->params.hw_init.load_phase;
5053         struct bnx2x_func_sp_obj *o = params->f_obj;
5054         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5055         int rc = 0;
5056
5057         DP(BNX2X_MSG_SP, "function %d  load_code %x\n",
5058                          BP_ABS_FUNC(bp), load_code);
5059
5060         /* Prepare buffers for unzipping the FW */
5061         rc = drv->gunzip_init(bp);
5062         if (rc)
5063                 return rc;
5064
5065         /* Prepare FW */
5066         rc = drv->init_fw(bp);
5067         if (rc) {
5068                 BNX2X_ERR("Error loading firmware\n");
5069                 goto fw_init_err;
5070         }
5071
5072         /* Handle the beginning of COMMON_XXX pases separatelly... */
5073         switch (load_code) {
5074         case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5075                 rc = bnx2x_func_init_cmn_chip(bp, drv);
5076                 if (rc)
5077                         goto init_hw_err;
5078
5079                 break;
5080         case FW_MSG_CODE_DRV_LOAD_COMMON:
5081                 rc = bnx2x_func_init_cmn(bp, drv);
5082                 if (rc)
5083                         goto init_hw_err;
5084
5085                 break;
5086         case FW_MSG_CODE_DRV_LOAD_PORT:
5087                 rc = bnx2x_func_init_port(bp, drv);
5088                 if (rc)
5089                         goto init_hw_err;
5090
5091                 break;
5092         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5093                 rc = bnx2x_func_init_func(bp, drv);
5094                 if (rc)
5095                         goto init_hw_err;
5096
5097                 break;
5098         default:
5099                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5100                 rc = -EINVAL;
5101         }
5102
5103 init_hw_err:
5104         drv->release_fw(bp);
5105
5106 fw_init_err:
5107         drv->gunzip_end(bp);
5108
5109         /* In case of success, complete the comand immediatelly: no ramrods
5110          * have been sent.
5111          */
5112         if (!rc)
5113                 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5114
5115         return rc;
5116 }
5117
5118 /**
5119  * bnx2x_func_reset_func - reset HW at function stage
5120  *
5121  * @bp:         device handle
5122  * @drv:
5123  *
5124  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5125  * FUNCTION-only HW blocks.
5126  */
5127 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5128                                         const struct bnx2x_func_sp_drv_ops *drv)
5129 {
5130         drv->reset_hw_func(bp);
5131 }
5132
5133 /**
5134  * bnx2x_func_reset_port - reser HW at port stage
5135  *
5136  * @bp:         device handle
5137  * @drv:
5138  *
5139  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5140  * FUNCTION-only and PORT-only HW blocks.
5141  *
5142  *                 !!!IMPORTANT!!!
5143  *
5144  * It's important to call reset_port before reset_func() as the last thing
5145  * reset_func does is pf_disable() thus disabling PGLUE_B, which
5146  * makes impossible any DMAE transactions.
5147  */
5148 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5149                                         const struct bnx2x_func_sp_drv_ops *drv)
5150 {
5151         drv->reset_hw_port(bp);
5152         bnx2x_func_reset_func(bp, drv);
5153 }
5154
5155 /**
5156  * bnx2x_func_reset_cmn - reser HW at common stage
5157  *
5158  * @bp:         device handle
5159  * @drv:
5160  *
5161  * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5162  * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5163  * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5164  */
5165 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5166                                         const struct bnx2x_func_sp_drv_ops *drv)
5167 {
5168         bnx2x_func_reset_port(bp, drv);
5169         drv->reset_hw_cmn(bp);
5170 }
5171
5172
5173 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5174                                       struct bnx2x_func_state_params *params)
5175 {
5176         u32 reset_phase = params->params.hw_reset.reset_phase;
5177         struct bnx2x_func_sp_obj *o = params->f_obj;
5178         const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5179
5180         DP(BNX2X_MSG_SP, "function %d  reset_phase %x\n", BP_ABS_FUNC(bp),
5181                          reset_phase);
5182
5183         switch (reset_phase) {
5184         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5185                 bnx2x_func_reset_cmn(bp, drv);
5186                 break;
5187         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5188                 bnx2x_func_reset_port(bp, drv);
5189                 break;
5190         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5191                 bnx2x_func_reset_func(bp, drv);
5192                 break;
5193         default:
5194                 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5195                            reset_phase);
5196                 break;
5197         }
5198
5199         /* Complete the comand immediatelly: no ramrods have been sent. */
5200         o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5201
5202         return 0;
5203 }
5204
5205 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5206                                         struct bnx2x_func_state_params *params)
5207 {
5208         struct bnx2x_func_sp_obj *o = params->f_obj;
5209         struct function_start_data *rdata =
5210                 (struct function_start_data *)o->rdata;
5211         dma_addr_t data_mapping = o->rdata_mapping;
5212         struct bnx2x_func_start_params *start_params = &params->params.start;
5213
5214         memset(rdata, 0, sizeof(*rdata));
5215
5216         /* Fill the ramrod data with provided parameters */
5217         rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5218         rdata->sd_vlan_tag   = start_params->sd_vlan_tag;
5219         rdata->path_id       = BP_PATH(bp);
5220         rdata->network_cos_mode = start_params->network_cos_mode;
5221
5222         mb();
5223
5224         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5225                              U64_HI(data_mapping),
5226                              U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5227 }
5228
5229 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5230                                        struct bnx2x_func_state_params *params)
5231 {
5232         return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5233                              NONE_CONNECTION_TYPE);
5234 }
5235
5236 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5237                                struct bnx2x_func_state_params *params)
5238 {
5239         switch (params->cmd) {
5240         case BNX2X_F_CMD_HW_INIT:
5241                 return bnx2x_func_hw_init(bp, params);
5242         case BNX2X_F_CMD_START:
5243                 return bnx2x_func_send_start(bp, params);
5244         case BNX2X_F_CMD_STOP:
5245                 return bnx2x_func_send_stop(bp, params);
5246         case BNX2X_F_CMD_HW_RESET:
5247                 return bnx2x_func_hw_reset(bp, params);
5248         default:
5249                 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5250                 return -EINVAL;
5251         }
5252 }
5253
5254 void bnx2x_init_func_obj(struct bnx2x *bp,
5255                          struct bnx2x_func_sp_obj *obj,
5256                          void *rdata, dma_addr_t rdata_mapping,
5257                          struct bnx2x_func_sp_drv_ops *drv_iface)
5258 {
5259         memset(obj, 0, sizeof(*obj));
5260
5261         mutex_init(&obj->one_pending_mutex);
5262
5263         obj->rdata = rdata;
5264         obj->rdata_mapping = rdata_mapping;
5265
5266         obj->send_cmd = bnx2x_func_send_cmd;
5267         obj->check_transition = bnx2x_func_chk_transition;
5268         obj->complete_cmd = bnx2x_func_comp_cmd;
5269         obj->wait_comp = bnx2x_func_wait_comp;
5270
5271         obj->drv = drv_iface;
5272 }
5273
5274 /**
5275  * bnx2x_func_state_change - perform Function state change transition
5276  *
5277  * @bp:         device handle
5278  * @params:     parameters to perform the transaction
5279  *
5280  * returns 0 in case of successfully completed transition,
5281  *         negative error code in case of failure, positive
5282  *         (EBUSY) value if there is a completion to that is
5283  *         still pending (possible only if RAMROD_COMP_WAIT is
5284  *         not set in params->ramrod_flags for asynchronous
5285  *         commands).
5286  */
5287 int bnx2x_func_state_change(struct bnx2x *bp,
5288                             struct bnx2x_func_state_params *params)
5289 {
5290         struct bnx2x_func_sp_obj *o = params->f_obj;
5291         int rc;
5292         enum bnx2x_func_cmd cmd = params->cmd;
5293         unsigned long *pending = &o->pending;
5294
5295         mutex_lock(&o->one_pending_mutex);
5296
5297         /* Check that the requested transition is legal */
5298         if (o->check_transition(bp, o, params)) {
5299                 mutex_unlock(&o->one_pending_mutex);
5300                 return -EINVAL;
5301         }
5302
5303         /* Set "pending" bit */
5304         set_bit(cmd, pending);
5305
5306         /* Don't send a command if only driver cleanup was requested */
5307         if (test_bit(RAMROD_DRV_CLR_ONLY, &params->ramrod_flags)) {
5308                 bnx2x_func_state_change_comp(bp, o, cmd);
5309                 mutex_unlock(&o->one_pending_mutex);
5310         } else {
5311                 /* Send a ramrod */
5312                 rc = o->send_cmd(bp, params);
5313
5314                 mutex_unlock(&o->one_pending_mutex);
5315
5316                 if (rc) {
5317                         o->next_state = BNX2X_F_STATE_MAX;
5318                         clear_bit(cmd, pending);
5319                         smp_mb__after_clear_bit();
5320                         return rc;
5321                 }
5322
5323                 if (test_bit(RAMROD_COMP_WAIT, &params->ramrod_flags)) {
5324                         rc = o->wait_comp(bp, o, cmd);
5325                         if (rc)
5326                                 return rc;
5327
5328                         return 0;
5329                 }
5330         }
5331
5332         return !!test_bit(cmd, pending);
5333 }