1 /* bnx2x_sp.c: Broadcom Everest network driver.
3 * Copyright 2011 Broadcom Corporation
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Vladislav Zolotarov
19 #include <linux/version.h>
20 #include <linux/module.h>
21 #include <linux/crc32.h>
22 #include <linux/netdevice.h>
23 #include <linux/etherdevice.h>
24 #include <linux/crc32c.h>
26 #include "bnx2x_cmn.h"
29 #define BNX2X_MAX_EMUL_MULTI 16
31 /**** Exe Queue interfaces ****/
34 * bnx2x_exe_queue_init - init the Exe Queue object
36 * @o: poiter to the object
38 * @owner: poiter to the owner
39 * @validate: validate function pointer
40 * @optimize: optimize function pointer
41 * @exec: execute function pointer
42 * @get: get function pointer
44 static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
45 struct bnx2x_exe_queue_obj *o,
47 union bnx2x_qable_obj *owner,
48 exe_q_validate validate,
49 exe_q_optimize optimize,
53 memset(o, 0, sizeof(*o));
55 INIT_LIST_HEAD(&o->exe_queue);
56 INIT_LIST_HEAD(&o->pending_comp);
58 spin_lock_init(&o->lock);
60 o->exe_chunk_len = exe_len;
63 /* Owner specific callbacks */
64 o->validate = validate;
65 o->optimize = optimize;
69 DP(BNX2X_MSG_SP, "Setup the execution queue with the chunk "
70 "length of %d\n", exe_len);
73 static inline void bnx2x_exe_queue_free_elem(struct bnx2x *bp,
74 struct bnx2x_exeq_elem *elem)
76 DP(BNX2X_MSG_SP, "Deleting an exe_queue element\n");
80 static inline int bnx2x_exe_queue_length(struct bnx2x_exe_queue_obj *o)
82 struct bnx2x_exeq_elem *elem;
85 spin_lock_bh(&o->lock);
87 list_for_each_entry(elem, &o->exe_queue, link)
90 spin_unlock_bh(&o->lock);
96 * bnx2x_exe_queue_add - add a new element to the execution queue
100 * @cmd: new command to add
101 * @restore: true - do not optimize the command
103 * If the element is optimized or is illegal, frees it.
105 static inline int bnx2x_exe_queue_add(struct bnx2x *bp,
106 struct bnx2x_exe_queue_obj *o,
107 struct bnx2x_exeq_elem *elem,
112 spin_lock_bh(&o->lock);
115 /* Try to cancel this element queue */
116 rc = o->optimize(bp, o->owner, elem);
120 /* Check if this request is ok */
121 rc = o->validate(bp, o->owner, elem);
123 BNX2X_ERR("Preamble failed: %d\n", rc);
128 /* If so, add it to the execution queue */
129 list_add_tail(&elem->link, &o->exe_queue);
131 spin_unlock_bh(&o->lock);
136 bnx2x_exe_queue_free_elem(bp, elem);
138 spin_unlock_bh(&o->lock);
144 static inline void __bnx2x_exe_queue_reset_pending(
146 struct bnx2x_exe_queue_obj *o)
148 struct bnx2x_exeq_elem *elem;
150 while (!list_empty(&o->pending_comp)) {
151 elem = list_first_entry(&o->pending_comp,
152 struct bnx2x_exeq_elem, link);
154 list_del(&elem->link);
155 bnx2x_exe_queue_free_elem(bp, elem);
159 static inline void bnx2x_exe_queue_reset_pending(struct bnx2x *bp,
160 struct bnx2x_exe_queue_obj *o)
163 spin_lock_bh(&o->lock);
165 __bnx2x_exe_queue_reset_pending(bp, o);
167 spin_unlock_bh(&o->lock);
172 * bnx2x_exe_queue_step - execute one execution chunk atomically
176 * @ramrod_flags: flags
178 * (Atomicy is ensured using the exe_queue->lock).
180 static inline int bnx2x_exe_queue_step(struct bnx2x *bp,
181 struct bnx2x_exe_queue_obj *o,
182 unsigned long *ramrod_flags)
184 struct bnx2x_exeq_elem *elem, spacer;
187 memset(&spacer, 0, sizeof(spacer));
189 spin_lock_bh(&o->lock);
192 * Next step should not be performed until the current is finished,
193 * unless a DRV_CLEAR_ONLY bit is set. In this case we just want to
194 * properly clear object internals without sending any command to the FW
195 * which also implies there won't be any completion to clear the
198 if (!list_empty(&o->pending_comp)) {
199 if (test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags)) {
200 DP(BNX2X_MSG_SP, "RAMROD_DRV_CLR_ONLY requested: "
201 "resetting pending_comp\n");
202 __bnx2x_exe_queue_reset_pending(bp, o);
204 spin_unlock_bh(&o->lock);
210 * Run through the pending commands list and create a next
213 while (!list_empty(&o->exe_queue)) {
214 elem = list_first_entry(&o->exe_queue, struct bnx2x_exeq_elem,
216 WARN_ON(!elem->cmd_len);
218 if (cur_len + elem->cmd_len <= o->exe_chunk_len) {
219 cur_len += elem->cmd_len;
221 * Prevent from both lists being empty when moving an
222 * element. This will allow the call of
223 * bnx2x_exe_queue_empty() without locking.
225 list_add_tail(&spacer.link, &o->pending_comp);
227 list_del(&elem->link);
228 list_add_tail(&elem->link, &o->pending_comp);
229 list_del(&spacer.link);
236 spin_unlock_bh(&o->lock);
240 rc = o->execute(bp, o->owner, &o->pending_comp, ramrod_flags);
243 * In case of an error return the commands back to the queue
244 * and reset the pending_comp.
246 list_splice_init(&o->pending_comp, &o->exe_queue);
249 * If zero is returned, means there are no outstanding pending
250 * completions and we may dismiss the pending list.
252 __bnx2x_exe_queue_reset_pending(bp, o);
254 spin_unlock_bh(&o->lock);
258 static inline bool bnx2x_exe_queue_empty(struct bnx2x_exe_queue_obj *o)
260 bool empty = list_empty(&o->exe_queue);
262 /* Don't reorder!!! */
265 return empty && list_empty(&o->pending_comp);
268 static inline struct bnx2x_exeq_elem *bnx2x_exe_queue_alloc_elem(
271 DP(BNX2X_MSG_SP, "Allocating a new exe_queue element\n");
272 return kzalloc(sizeof(struct bnx2x_exeq_elem), GFP_ATOMIC);
275 /************************ raw_obj functions ***********************************/
276 static bool bnx2x_raw_check_pending(struct bnx2x_raw_obj *o)
278 return !!test_bit(o->state, o->pstate);
281 static void bnx2x_raw_clear_pending(struct bnx2x_raw_obj *o)
283 smp_mb__before_clear_bit();
284 clear_bit(o->state, o->pstate);
285 smp_mb__after_clear_bit();
288 static void bnx2x_raw_set_pending(struct bnx2x_raw_obj *o)
290 smp_mb__before_clear_bit();
291 set_bit(o->state, o->pstate);
292 smp_mb__after_clear_bit();
296 * bnx2x_state_wait - wait until the given bit(state) is cleared
299 * @state: state which is to be cleared
300 * @state_p: state buffer
303 static inline int bnx2x_state_wait(struct bnx2x *bp, int state,
304 unsigned long *pstate)
306 /* can take a while if any port is running */
310 if (CHIP_REV_IS_EMUL(bp))
313 DP(BNX2X_MSG_SP, "waiting for state to become %d\n", state);
317 if (!test_bit(state, pstate)) {
318 #ifdef BNX2X_STOP_ON_ERROR
319 DP(BNX2X_MSG_SP, "exit (cnt %d)\n", 5000 - cnt);
324 usleep_range(1000, 1000);
331 BNX2X_ERR("timeout waiting for state %d\n", state);
332 #ifdef BNX2X_STOP_ON_ERROR
339 static int bnx2x_raw_wait(struct bnx2x *bp, struct bnx2x_raw_obj *raw)
341 return bnx2x_state_wait(bp, raw->state, raw->pstate);
344 /***************** Classification verbs: Set/Del MAC/VLAN/VLAN-MAC ************/
345 /* credit handling callbacks */
346 static bool bnx2x_get_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int *offset)
348 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
352 return mp->get_entry(mp, offset);
355 static bool bnx2x_get_credit_mac(struct bnx2x_vlan_mac_obj *o)
357 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
361 return mp->get(mp, 1);
364 static bool bnx2x_get_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int *offset)
366 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
370 return vp->get_entry(vp, offset);
373 static bool bnx2x_get_credit_vlan(struct bnx2x_vlan_mac_obj *o)
375 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
379 return vp->get(vp, 1);
382 static bool bnx2x_get_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
384 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
385 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
390 if (!vp->get(vp, 1)) {
398 static bool bnx2x_put_cam_offset_mac(struct bnx2x_vlan_mac_obj *o, int offset)
400 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
402 return mp->put_entry(mp, offset);
405 static bool bnx2x_put_credit_mac(struct bnx2x_vlan_mac_obj *o)
407 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
409 return mp->put(mp, 1);
412 static bool bnx2x_put_cam_offset_vlan(struct bnx2x_vlan_mac_obj *o, int offset)
414 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
416 return vp->put_entry(vp, offset);
419 static bool bnx2x_put_credit_vlan(struct bnx2x_vlan_mac_obj *o)
421 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
423 return vp->put(vp, 1);
426 static bool bnx2x_put_credit_vlan_mac(struct bnx2x_vlan_mac_obj *o)
428 struct bnx2x_credit_pool_obj *mp = o->macs_pool;
429 struct bnx2x_credit_pool_obj *vp = o->vlans_pool;
434 if (!vp->put(vp, 1)) {
442 /* check_add() callbacks */
443 static int bnx2x_check_mac_add(struct bnx2x_vlan_mac_obj *o,
444 union bnx2x_classification_ramrod_data *data)
446 struct bnx2x_vlan_mac_registry_elem *pos;
448 if (!is_valid_ether_addr(data->mac.mac))
451 /* Check if a requested MAC already exists */
452 list_for_each_entry(pos, &o->head, link)
453 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
459 static int bnx2x_check_vlan_add(struct bnx2x_vlan_mac_obj *o,
460 union bnx2x_classification_ramrod_data *data)
462 struct bnx2x_vlan_mac_registry_elem *pos;
464 list_for_each_entry(pos, &o->head, link)
465 if (data->vlan.vlan == pos->u.vlan.vlan)
471 static int bnx2x_check_vlan_mac_add(struct bnx2x_vlan_mac_obj *o,
472 union bnx2x_classification_ramrod_data *data)
474 struct bnx2x_vlan_mac_registry_elem *pos;
476 list_for_each_entry(pos, &o->head, link)
477 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
478 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
486 /* check_del() callbacks */
487 static struct bnx2x_vlan_mac_registry_elem *
488 bnx2x_check_mac_del(struct bnx2x_vlan_mac_obj *o,
489 union bnx2x_classification_ramrod_data *data)
491 struct bnx2x_vlan_mac_registry_elem *pos;
493 list_for_each_entry(pos, &o->head, link)
494 if (!memcmp(data->mac.mac, pos->u.mac.mac, ETH_ALEN))
500 static struct bnx2x_vlan_mac_registry_elem *
501 bnx2x_check_vlan_del(struct bnx2x_vlan_mac_obj *o,
502 union bnx2x_classification_ramrod_data *data)
504 struct bnx2x_vlan_mac_registry_elem *pos;
506 list_for_each_entry(pos, &o->head, link)
507 if (data->vlan.vlan == pos->u.vlan.vlan)
513 static struct bnx2x_vlan_mac_registry_elem *
514 bnx2x_check_vlan_mac_del(struct bnx2x_vlan_mac_obj *o,
515 union bnx2x_classification_ramrod_data *data)
517 struct bnx2x_vlan_mac_registry_elem *pos;
519 list_for_each_entry(pos, &o->head, link)
520 if ((data->vlan_mac.vlan == pos->u.vlan_mac.vlan) &&
521 (!memcmp(data->vlan_mac.mac, pos->u.vlan_mac.mac,
528 /* check_move() callback */
529 static bool bnx2x_check_move(struct bnx2x_vlan_mac_obj *src_o,
530 struct bnx2x_vlan_mac_obj *dst_o,
531 union bnx2x_classification_ramrod_data *data)
533 struct bnx2x_vlan_mac_registry_elem *pos;
536 /* Check if we can delete the requested configuration from the first
539 pos = src_o->check_del(src_o, data);
541 /* check if configuration can be added */
542 rc = dst_o->check_add(dst_o, data);
544 /* If this classification can not be added (is already set)
545 * or can't be deleted - return an error.
553 static bool bnx2x_check_move_always_err(
554 struct bnx2x_vlan_mac_obj *src_o,
555 struct bnx2x_vlan_mac_obj *dst_o,
556 union bnx2x_classification_ramrod_data *data)
562 static inline u8 bnx2x_vlan_mac_get_rx_tx_flag(struct bnx2x_vlan_mac_obj *o)
564 struct bnx2x_raw_obj *raw = &o->raw;
567 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
568 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
569 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_TX_CMD;
571 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
572 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
573 rx_tx_flag |= ETH_CLASSIFY_CMD_HEADER_RX_CMD;
578 /* LLH CAM line allocations */
580 LLH_CAM_ISCSI_ETH_LINE = 0,
582 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE / 2
585 static inline void bnx2x_set_mac_in_nig(struct bnx2x *bp,
586 bool add, unsigned char *dev_addr, int index)
589 u32 reg_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
590 NIG_REG_LLH0_FUNC_MEM;
592 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
595 DP(BNX2X_MSG_SP, "Going to %s LLH configuration at entry %d\n",
596 (add ? "ADD" : "DELETE"), index);
599 /* LLH_FUNC_MEM is a u64 WB register */
600 reg_offset += 8*index;
602 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
603 (dev_addr[4] << 8) | dev_addr[5]);
604 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
606 REG_WR_DMAE(bp, reg_offset, wb_data, 2);
609 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
610 NIG_REG_LLH0_FUNC_MEM_ENABLE) + 4*index, add);
614 * bnx2x_vlan_mac_set_cmd_hdr_e2 - set a header in a single classify ramrod
617 * @o: queue for which we want to configure this rule
618 * @add: if true the command is an ADD command, DEL otherwise
619 * @opcode: CLASSIFY_RULE_OPCODE_XXX
620 * @hdr: pointer to a header to setup
623 static inline void bnx2x_vlan_mac_set_cmd_hdr_e2(struct bnx2x *bp,
624 struct bnx2x_vlan_mac_obj *o, bool add, int opcode,
625 struct eth_classify_cmd_header *hdr)
627 struct bnx2x_raw_obj *raw = &o->raw;
629 hdr->client_id = raw->cl_id;
630 hdr->func_id = raw->func_id;
632 /* Rx or/and Tx (internal switching) configuration ? */
633 hdr->cmd_general_data |=
634 bnx2x_vlan_mac_get_rx_tx_flag(o);
637 hdr->cmd_general_data |= ETH_CLASSIFY_CMD_HEADER_IS_ADD;
639 hdr->cmd_general_data |=
640 (opcode << ETH_CLASSIFY_CMD_HEADER_OPCODE_SHIFT);
644 * bnx2x_vlan_mac_set_rdata_hdr_e2 - set the classify ramrod data header
646 * @cid: connection id
647 * @type: BNX2X_FILTER_XXX_PENDING
648 * @hdr: poiter to header to setup
651 * currently we always configure one rule and echo field to contain a CID and an
654 static inline void bnx2x_vlan_mac_set_rdata_hdr_e2(u32 cid, int type,
655 struct eth_classify_header *hdr, int rule_cnt)
657 hdr->echo = (cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT);
658 hdr->rule_cnt = (u8)rule_cnt;
662 /* hw_config() callbacks */
663 static void bnx2x_set_one_mac_e2(struct bnx2x *bp,
664 struct bnx2x_vlan_mac_obj *o,
665 struct bnx2x_exeq_elem *elem, int rule_idx,
668 struct bnx2x_raw_obj *raw = &o->raw;
669 struct eth_classify_rules_ramrod_data *data =
670 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
671 int rule_cnt = rule_idx + 1, cmd = elem->cmd_data.vlan_mac.cmd;
672 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
673 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
674 unsigned long *vlan_mac_flags = &elem->cmd_data.vlan_mac.vlan_mac_flags;
675 u8 *mac = elem->cmd_data.vlan_mac.u.mac.mac;
678 * Set LLH CAM entry: currently only iSCSI and ETH macs are
679 * relevant. In addition, current implementation is tuned for a
682 * When multiple unicast ETH MACs PF configuration in switch
683 * independent mode is required (NetQ, multiple netdev MACs,
684 * etc.), consider better utilisation of 8 per function MAC
685 * entries in the LLH register. There is also
686 * NIG_REG_P[01]_LLH_FUNC_MEM2 registers that complete the
687 * total number of CAM entries to 16.
689 * Currently we won't configure NIG for MACs other than a primary ETH
690 * MAC and iSCSI L2 MAC.
692 * If this MAC is moving from one Queue to another, no need to change
695 if (cmd != BNX2X_VLAN_MAC_MOVE) {
696 if (test_bit(BNX2X_ISCSI_ETH_MAC, vlan_mac_flags))
697 bnx2x_set_mac_in_nig(bp, add, mac,
698 LLH_CAM_ISCSI_ETH_LINE);
699 else if (test_bit(BNX2X_ETH_MAC, vlan_mac_flags))
700 bnx2x_set_mac_in_nig(bp, add, mac, LLH_CAM_ETH_LINE);
703 /* Reset the ramrod data buffer for the first rule */
705 memset(data, 0, sizeof(*data));
707 /* Setup a command header */
708 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_MAC,
709 &rule_entry->mac.header);
711 DP(BNX2X_MSG_SP, "About to %s MAC "BNX2X_MAC_FMT" for "
712 "Queue %d\n", (add ? "add" : "delete"),
713 BNX2X_MAC_PRN_LIST(mac), raw->cl_id);
715 /* Set a MAC itself */
716 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
717 &rule_entry->mac.mac_mid,
718 &rule_entry->mac.mac_lsb, mac);
720 /* MOVE: Add a rule that will add this MAC to the target Queue */
721 if (cmd == BNX2X_VLAN_MAC_MOVE) {
725 /* Setup ramrod data */
726 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
727 elem->cmd_data.vlan_mac.target_obj,
728 true, CLASSIFY_RULE_OPCODE_MAC,
729 &rule_entry->mac.header);
731 /* Set a MAC itself */
732 bnx2x_set_fw_mac_addr(&rule_entry->mac.mac_msb,
733 &rule_entry->mac.mac_mid,
734 &rule_entry->mac.mac_lsb, mac);
737 /* Set the ramrod data header */
738 /* TODO: take this to the higher level in order to prevent multiple
740 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
745 * bnx2x_vlan_mac_set_rdata_hdr_e1x - set a header in a single classify ramrod
750 * @cam_offset: offset in cam memory
751 * @hdr: pointer to a header to setup
755 static inline void bnx2x_vlan_mac_set_rdata_hdr_e1x(struct bnx2x *bp,
756 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset,
757 struct mac_configuration_hdr *hdr)
759 struct bnx2x_raw_obj *r = &o->raw;
762 hdr->offset = (u8)cam_offset;
763 hdr->client_id = 0xff;
764 hdr->echo = ((r->cid & BNX2X_SWCID_MASK) | (type << BNX2X_SWCID_SHIFT));
767 static inline void bnx2x_vlan_mac_set_cfg_entry_e1x(struct bnx2x *bp,
768 struct bnx2x_vlan_mac_obj *o, bool add, int opcode, u8 *mac,
769 u16 vlan_id, struct mac_configuration_entry *cfg_entry)
771 struct bnx2x_raw_obj *r = &o->raw;
772 u32 cl_bit_vec = (1 << r->cl_id);
774 cfg_entry->clients_bit_vector = cpu_to_le32(cl_bit_vec);
775 cfg_entry->pf_id = r->func_id;
776 cfg_entry->vlan_id = cpu_to_le16(vlan_id);
779 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
780 T_ETH_MAC_COMMAND_SET);
781 SET_FLAG(cfg_entry->flags,
782 MAC_CONFIGURATION_ENTRY_VLAN_FILTERING_MODE, opcode);
784 /* Set a MAC in a ramrod data */
785 bnx2x_set_fw_mac_addr(&cfg_entry->msb_mac_addr,
786 &cfg_entry->middle_mac_addr,
787 &cfg_entry->lsb_mac_addr, mac);
789 SET_FLAG(cfg_entry->flags, MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
790 T_ETH_MAC_COMMAND_INVALIDATE);
793 static inline void bnx2x_vlan_mac_set_rdata_e1x(struct bnx2x *bp,
794 struct bnx2x_vlan_mac_obj *o, int type, int cam_offset, bool add,
795 u8 *mac, u16 vlan_id, int opcode, struct mac_configuration_cmd *config)
797 struct mac_configuration_entry *cfg_entry = &config->config_table[0];
798 struct bnx2x_raw_obj *raw = &o->raw;
800 bnx2x_vlan_mac_set_rdata_hdr_e1x(bp, o, type, cam_offset,
802 bnx2x_vlan_mac_set_cfg_entry_e1x(bp, o, add, opcode, mac, vlan_id,
805 DP(BNX2X_MSG_SP, "%s MAC "BNX2X_MAC_FMT" CLID %d CAM offset %d\n",
806 (add ? "setting" : "clearing"),
807 BNX2X_MAC_PRN_LIST(mac), raw->cl_id, cam_offset);
811 * bnx2x_set_one_mac_e1x - fill a single MAC rule ramrod data
814 * @o: bnx2x_vlan_mac_obj
815 * @elem: bnx2x_exeq_elem
816 * @rule_idx: rule_idx
817 * @cam_offset: cam_offset
819 static void bnx2x_set_one_mac_e1x(struct bnx2x *bp,
820 struct bnx2x_vlan_mac_obj *o,
821 struct bnx2x_exeq_elem *elem, int rule_idx,
824 struct bnx2x_raw_obj *raw = &o->raw;
825 struct mac_configuration_cmd *config =
826 (struct mac_configuration_cmd *)(raw->rdata);
828 * 57710 and 57711 do not support MOVE command,
829 * so it's either ADD or DEL
831 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
834 /* Reset the ramrod data buffer */
835 memset(config, 0, sizeof(*config));
837 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_MAC_PENDING,
839 elem->cmd_data.vlan_mac.u.mac.mac, 0,
840 ETH_VLAN_FILTER_ANY_VLAN, config);
843 static void bnx2x_set_one_vlan_e2(struct bnx2x *bp,
844 struct bnx2x_vlan_mac_obj *o,
845 struct bnx2x_exeq_elem *elem, int rule_idx,
848 struct bnx2x_raw_obj *raw = &o->raw;
849 struct eth_classify_rules_ramrod_data *data =
850 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
851 int rule_cnt = rule_idx + 1;
852 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
853 int cmd = elem->cmd_data.vlan_mac.cmd;
854 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
855 u16 vlan = elem->cmd_data.vlan_mac.u.vlan.vlan;
857 /* Reset the ramrod data buffer for the first rule */
859 memset(data, 0, sizeof(*data));
861 /* Set a rule header */
862 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_VLAN,
863 &rule_entry->vlan.header);
865 DP(BNX2X_MSG_SP, "About to %s VLAN %d\n", (add ? "add" : "delete"),
868 /* Set a VLAN itself */
869 rule_entry->vlan.vlan = cpu_to_le16(vlan);
871 /* MOVE: Add a rule that will add this MAC to the target Queue */
872 if (cmd == BNX2X_VLAN_MAC_MOVE) {
876 /* Setup ramrod data */
877 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
878 elem->cmd_data.vlan_mac.target_obj,
879 true, CLASSIFY_RULE_OPCODE_VLAN,
880 &rule_entry->vlan.header);
882 /* Set a VLAN itself */
883 rule_entry->vlan.vlan = cpu_to_le16(vlan);
886 /* Set the ramrod data header */
887 /* TODO: take this to the higher level in order to prevent multiple
889 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
893 static void bnx2x_set_one_vlan_mac_e2(struct bnx2x *bp,
894 struct bnx2x_vlan_mac_obj *o,
895 struct bnx2x_exeq_elem *elem,
896 int rule_idx, int cam_offset)
898 struct bnx2x_raw_obj *raw = &o->raw;
899 struct eth_classify_rules_ramrod_data *data =
900 (struct eth_classify_rules_ramrod_data *)(raw->rdata);
901 int rule_cnt = rule_idx + 1;
902 union eth_classify_rule_cmd *rule_entry = &data->rules[rule_idx];
903 int cmd = elem->cmd_data.vlan_mac.cmd;
904 bool add = (cmd == BNX2X_VLAN_MAC_ADD) ? true : false;
905 u16 vlan = elem->cmd_data.vlan_mac.u.vlan_mac.vlan;
906 u8 *mac = elem->cmd_data.vlan_mac.u.vlan_mac.mac;
909 /* Reset the ramrod data buffer for the first rule */
911 memset(data, 0, sizeof(*data));
913 /* Set a rule header */
914 bnx2x_vlan_mac_set_cmd_hdr_e2(bp, o, add, CLASSIFY_RULE_OPCODE_PAIR,
915 &rule_entry->pair.header);
917 /* Set VLAN and MAC themselvs */
918 rule_entry->pair.vlan = cpu_to_le16(vlan);
919 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
920 &rule_entry->pair.mac_mid,
921 &rule_entry->pair.mac_lsb, mac);
923 /* MOVE: Add a rule that will add this MAC to the target Queue */
924 if (cmd == BNX2X_VLAN_MAC_MOVE) {
928 /* Setup ramrod data */
929 bnx2x_vlan_mac_set_cmd_hdr_e2(bp,
930 elem->cmd_data.vlan_mac.target_obj,
931 true, CLASSIFY_RULE_OPCODE_PAIR,
932 &rule_entry->pair.header);
934 /* Set a VLAN itself */
935 rule_entry->pair.vlan = cpu_to_le16(vlan);
936 bnx2x_set_fw_mac_addr(&rule_entry->pair.mac_msb,
937 &rule_entry->pair.mac_mid,
938 &rule_entry->pair.mac_lsb, mac);
941 /* Set the ramrod data header */
942 /* TODO: take this to the higher level in order to prevent multiple
944 bnx2x_vlan_mac_set_rdata_hdr_e2(raw->cid, raw->state, &data->header,
949 * bnx2x_set_one_vlan_mac_e1h -
952 * @o: bnx2x_vlan_mac_obj
953 * @elem: bnx2x_exeq_elem
954 * @rule_idx: rule_idx
955 * @cam_offset: cam_offset
957 static void bnx2x_set_one_vlan_mac_e1h(struct bnx2x *bp,
958 struct bnx2x_vlan_mac_obj *o,
959 struct bnx2x_exeq_elem *elem,
960 int rule_idx, int cam_offset)
962 struct bnx2x_raw_obj *raw = &o->raw;
963 struct mac_configuration_cmd *config =
964 (struct mac_configuration_cmd *)(raw->rdata);
966 * 57710 and 57711 do not support MOVE command,
967 * so it's either ADD or DEL
969 bool add = (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
972 /* Reset the ramrod data buffer */
973 memset(config, 0, sizeof(*config));
975 bnx2x_vlan_mac_set_rdata_e1x(bp, o, BNX2X_FILTER_VLAN_MAC_PENDING,
977 elem->cmd_data.vlan_mac.u.vlan_mac.mac,
978 elem->cmd_data.vlan_mac.u.vlan_mac.vlan,
979 ETH_VLAN_FILTER_CLASSIFY, config);
982 #define list_next_entry(pos, member) \
983 list_entry((pos)->member.next, typeof(*(pos)), member)
986 * bnx2x_vlan_mac_restore - reconfigure next MAC/VLAN/VLAN-MAC element
989 * @p: command parameters
990 * @ppos: pointer to the cooky
992 * reconfigure next MAC/VLAN/VLAN-MAC element from the
993 * previously configured elements list.
995 * from command parameters only RAMROD_COMP_WAIT bit in ramrod_flags is taken
998 * pointer to the cooky - that should be given back in the next call to make
999 * function handle the next element. If *ppos is set to NULL it will restart the
1000 * iterator. If returned *ppos == NULL this means that the last element has been
1004 static int bnx2x_vlan_mac_restore(struct bnx2x *bp,
1005 struct bnx2x_vlan_mac_ramrod_params *p,
1006 struct bnx2x_vlan_mac_registry_elem **ppos)
1008 struct bnx2x_vlan_mac_registry_elem *pos;
1009 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1011 /* If list is empty - there is nothing to do here */
1012 if (list_empty(&o->head)) {
1017 /* make a step... */
1019 *ppos = list_first_entry(&o->head,
1020 struct bnx2x_vlan_mac_registry_elem,
1023 *ppos = list_next_entry(*ppos, link);
1027 /* If it's the last step - return NULL */
1028 if (list_is_last(&pos->link, &o->head))
1031 /* Prepare a 'user_req' */
1032 memcpy(&p->user_req.u, &pos->u, sizeof(pos->u));
1034 /* Set the command */
1035 p->user_req.cmd = BNX2X_VLAN_MAC_ADD;
1037 /* Set vlan_mac_flags */
1038 p->user_req.vlan_mac_flags = pos->vlan_mac_flags;
1040 /* Set a restore bit */
1041 __set_bit(RAMROD_RESTORE, &p->ramrod_flags);
1043 return bnx2x_config_vlan_mac(bp, p);
1047 * bnx2x_exeq_get_mac/bnx2x_exeq_get_vlan/bnx2x_exeq_get_vlan_mac return a
1048 * pointer to an element with a specific criteria and NULL if such an element
1049 * hasn't been found.
1051 static struct bnx2x_exeq_elem *bnx2x_exeq_get_mac(
1052 struct bnx2x_exe_queue_obj *o,
1053 struct bnx2x_exeq_elem *elem)
1055 struct bnx2x_exeq_elem *pos;
1056 struct bnx2x_mac_ramrod_data *data = &elem->cmd_data.vlan_mac.u.mac;
1058 /* Check pending for execution commands */
1059 list_for_each_entry(pos, &o->exe_queue, link)
1060 if (!memcmp(&pos->cmd_data.vlan_mac.u.mac, data,
1062 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1068 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan(
1069 struct bnx2x_exe_queue_obj *o,
1070 struct bnx2x_exeq_elem *elem)
1072 struct bnx2x_exeq_elem *pos;
1073 struct bnx2x_vlan_ramrod_data *data = &elem->cmd_data.vlan_mac.u.vlan;
1075 /* Check pending for execution commands */
1076 list_for_each_entry(pos, &o->exe_queue, link)
1077 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan, data,
1079 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1085 static struct bnx2x_exeq_elem *bnx2x_exeq_get_vlan_mac(
1086 struct bnx2x_exe_queue_obj *o,
1087 struct bnx2x_exeq_elem *elem)
1089 struct bnx2x_exeq_elem *pos;
1090 struct bnx2x_vlan_mac_ramrod_data *data =
1091 &elem->cmd_data.vlan_mac.u.vlan_mac;
1093 /* Check pending for execution commands */
1094 list_for_each_entry(pos, &o->exe_queue, link)
1095 if (!memcmp(&pos->cmd_data.vlan_mac.u.vlan_mac, data,
1097 (pos->cmd_data.vlan_mac.cmd == elem->cmd_data.vlan_mac.cmd))
1104 * bnx2x_validate_vlan_mac_add - check if an ADD command can be executed
1106 * @bp: device handle
1107 * @qo: bnx2x_qable_obj
1108 * @elem: bnx2x_exeq_elem
1110 * Checks that the requested configuration can be added. If yes and if
1111 * requested, consume CAM credit.
1113 * The 'validate' is run after the 'optimize'.
1116 static inline int bnx2x_validate_vlan_mac_add(struct bnx2x *bp,
1117 union bnx2x_qable_obj *qo,
1118 struct bnx2x_exeq_elem *elem)
1120 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1121 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1124 /* Check the registry */
1125 rc = o->check_add(o, &elem->cmd_data.vlan_mac.u);
1127 DP(BNX2X_MSG_SP, "ADD command is not allowed considering "
1128 "current registry state\n");
1133 * Check if there is a pending ADD command for this
1134 * MAC/VLAN/VLAN-MAC. Return an error if there is.
1136 if (exeq->get(exeq, elem)) {
1137 DP(BNX2X_MSG_SP, "There is a pending ADD command already\n");
1142 * TODO: Check the pending MOVE from other objects where this
1143 * object is a destination object.
1146 /* Consume the credit if not requested not to */
1147 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1148 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1156 * bnx2x_validate_vlan_mac_del - check if the DEL command can be executed
1158 * @bp: device handle
1159 * @qo: quable object to check
1160 * @elem: element that needs to be deleted
1162 * Checks that the requested configuration can be deleted. If yes and if
1163 * requested, returns a CAM credit.
1165 * The 'validate' is run after the 'optimize'.
1167 static inline int bnx2x_validate_vlan_mac_del(struct bnx2x *bp,
1168 union bnx2x_qable_obj *qo,
1169 struct bnx2x_exeq_elem *elem)
1171 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1172 struct bnx2x_vlan_mac_registry_elem *pos;
1173 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1174 struct bnx2x_exeq_elem query_elem;
1176 /* If this classification can not be deleted (doesn't exist)
1177 * - return a BNX2X_EXIST.
1179 pos = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1181 DP(BNX2X_MSG_SP, "DEL command is not allowed considering "
1182 "current registry state\n");
1187 * Check if there are pending DEL or MOVE commands for this
1188 * MAC/VLAN/VLAN-MAC. Return an error if so.
1190 memcpy(&query_elem, elem, sizeof(query_elem));
1192 /* Check for MOVE commands */
1193 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_MOVE;
1194 if (exeq->get(exeq, &query_elem)) {
1195 BNX2X_ERR("There is a pending MOVE command already\n");
1199 /* Check for DEL commands */
1200 if (exeq->get(exeq, elem)) {
1201 DP(BNX2X_MSG_SP, "There is a pending DEL command already\n");
1205 /* Return the credit to the credit pool if not requested not to */
1206 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1207 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1208 o->put_credit(o))) {
1209 BNX2X_ERR("Failed to return a credit\n");
1217 * bnx2x_validate_vlan_mac_move - check if the MOVE command can be executed
1219 * @bp: device handle
1220 * @qo: quable object to check (source)
1221 * @elem: element that needs to be moved
1223 * Checks that the requested configuration can be moved. If yes and if
1224 * requested, returns a CAM credit.
1226 * The 'validate' is run after the 'optimize'.
1228 static inline int bnx2x_validate_vlan_mac_move(struct bnx2x *bp,
1229 union bnx2x_qable_obj *qo,
1230 struct bnx2x_exeq_elem *elem)
1232 struct bnx2x_vlan_mac_obj *src_o = &qo->vlan_mac;
1233 struct bnx2x_vlan_mac_obj *dest_o = elem->cmd_data.vlan_mac.target_obj;
1234 struct bnx2x_exeq_elem query_elem;
1235 struct bnx2x_exe_queue_obj *src_exeq = &src_o->exe_queue;
1236 struct bnx2x_exe_queue_obj *dest_exeq = &dest_o->exe_queue;
1239 * Check if we can perform this operation based on the current registry
1242 if (!src_o->check_move(src_o, dest_o, &elem->cmd_data.vlan_mac.u)) {
1243 DP(BNX2X_MSG_SP, "MOVE command is not allowed considering "
1244 "current registry state\n");
1249 * Check if there is an already pending DEL or MOVE command for the
1250 * source object or ADD command for a destination object. Return an
1253 memcpy(&query_elem, elem, sizeof(query_elem));
1255 /* Check DEL on source */
1256 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1257 if (src_exeq->get(src_exeq, &query_elem)) {
1258 BNX2X_ERR("There is a pending DEL command on the source "
1263 /* Check MOVE on source */
1264 if (src_exeq->get(src_exeq, elem)) {
1265 DP(BNX2X_MSG_SP, "There is a pending MOVE command already\n");
1269 /* Check ADD on destination */
1270 query_elem.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1271 if (dest_exeq->get(dest_exeq, &query_elem)) {
1272 BNX2X_ERR("There is a pending ADD command on the "
1273 "destination queue already\n");
1277 /* Consume the credit if not requested not to */
1278 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
1279 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1280 dest_o->get_credit(dest_o)))
1283 if (!(test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1284 &elem->cmd_data.vlan_mac.vlan_mac_flags) ||
1285 src_o->put_credit(src_o))) {
1286 /* return the credit taken from dest... */
1287 dest_o->put_credit(dest_o);
1294 static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
1295 union bnx2x_qable_obj *qo,
1296 struct bnx2x_exeq_elem *elem)
1298 switch (elem->cmd_data.vlan_mac.cmd) {
1299 case BNX2X_VLAN_MAC_ADD:
1300 return bnx2x_validate_vlan_mac_add(bp, qo, elem);
1301 case BNX2X_VLAN_MAC_DEL:
1302 return bnx2x_validate_vlan_mac_del(bp, qo, elem);
1303 case BNX2X_VLAN_MAC_MOVE:
1304 return bnx2x_validate_vlan_mac_move(bp, qo, elem);
1311 * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
1313 * @bp: device handle
1314 * @o: bnx2x_vlan_mac_obj
1317 static int bnx2x_wait_vlan_mac(struct bnx2x *bp,
1318 struct bnx2x_vlan_mac_obj *o)
1321 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1322 struct bnx2x_raw_obj *raw = &o->raw;
1325 /* Wait for the current command to complete */
1326 rc = raw->wait_comp(bp, raw);
1330 /* Wait until there are no pending commands */
1331 if (!bnx2x_exe_queue_empty(exeq))
1332 usleep_range(1000, 1000);
1341 * bnx2x_complete_vlan_mac - complete one VLAN-MAC ramrod
1343 * @bp: device handle
1344 * @o: bnx2x_vlan_mac_obj
1346 * @cont: if true schedule next execution chunk
1349 static int bnx2x_complete_vlan_mac(struct bnx2x *bp,
1350 struct bnx2x_vlan_mac_obj *o,
1351 union event_ring_elem *cqe,
1352 unsigned long *ramrod_flags)
1354 struct bnx2x_raw_obj *r = &o->raw;
1357 /* Reset pending list */
1358 bnx2x_exe_queue_reset_pending(bp, &o->exe_queue);
1361 r->clear_pending(r);
1363 /* If ramrod failed this is most likely a SW bug */
1364 if (cqe->message.error)
1367 /* Run the next bulk of pending commands if requeted */
1368 if (test_bit(RAMROD_CONT, ramrod_flags)) {
1369 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1374 /* If there is more work to do return PENDING */
1375 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1382 * bnx2x_optimize_vlan_mac - optimize ADD and DEL commands.
1384 * @bp: device handle
1385 * @o: bnx2x_qable_obj
1386 * @elem: bnx2x_exeq_elem
1388 static int bnx2x_optimize_vlan_mac(struct bnx2x *bp,
1389 union bnx2x_qable_obj *qo,
1390 struct bnx2x_exeq_elem *elem)
1392 struct bnx2x_exeq_elem query, *pos;
1393 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac;
1394 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1396 memcpy(&query, elem, sizeof(query));
1398 switch (elem->cmd_data.vlan_mac.cmd) {
1399 case BNX2X_VLAN_MAC_ADD:
1400 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_DEL;
1402 case BNX2X_VLAN_MAC_DEL:
1403 query.cmd_data.vlan_mac.cmd = BNX2X_VLAN_MAC_ADD;
1406 /* Don't handle anything other than ADD or DEL */
1410 /* If we found the appropriate element - delete it */
1411 pos = exeq->get(exeq, &query);
1414 /* Return the credit of the optimized command */
1415 if (!test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
1416 &pos->cmd_data.vlan_mac.vlan_mac_flags)) {
1417 if ((query.cmd_data.vlan_mac.cmd ==
1418 BNX2X_VLAN_MAC_ADD) && !o->put_credit(o)) {
1419 BNX2X_ERR("Failed to return the credit for the "
1420 "optimized ADD command\n");
1422 } else if (!o->get_credit(o)) { /* VLAN_MAC_DEL */
1423 BNX2X_ERR("Failed to recover the credit from "
1424 "the optimized DEL command\n");
1429 DP(BNX2X_MSG_SP, "Optimizing %s command\n",
1430 (elem->cmd_data.vlan_mac.cmd == BNX2X_VLAN_MAC_ADD) ?
1433 list_del(&pos->link);
1434 bnx2x_exe_queue_free_elem(bp, pos);
1442 * bnx2x_vlan_mac_get_registry_elem - prepare a registry element
1444 * @bp: device handle
1450 * prepare a registry element according to the current command request.
1452 static inline int bnx2x_vlan_mac_get_registry_elem(
1454 struct bnx2x_vlan_mac_obj *o,
1455 struct bnx2x_exeq_elem *elem,
1457 struct bnx2x_vlan_mac_registry_elem **re)
1459 int cmd = elem->cmd_data.vlan_mac.cmd;
1460 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1462 /* Allocate a new registry element if needed. */
1464 ((cmd == BNX2X_VLAN_MAC_ADD) || (cmd == BNX2X_VLAN_MAC_MOVE))) {
1465 reg_elem = kzalloc(sizeof(*reg_elem), GFP_ATOMIC);
1469 /* Get a new CAM offset */
1470 if (!o->get_cam_offset(o, ®_elem->cam_offset)) {
1472 * This shell never happen, because we have checked the
1473 * CAM availiability in the 'validate'.
1480 DP(BNX2X_MSG_SP, "Got cam offset %d\n", reg_elem->cam_offset);
1482 /* Set a VLAN-MAC data */
1483 memcpy(®_elem->u, &elem->cmd_data.vlan_mac.u,
1484 sizeof(reg_elem->u));
1486 /* Copy the flags (needed for DEL and RESTORE flows) */
1487 reg_elem->vlan_mac_flags =
1488 elem->cmd_data.vlan_mac.vlan_mac_flags;
1489 } else /* DEL, RESTORE */
1490 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1497 * bnx2x_execute_vlan_mac - execute vlan mac command
1499 * @bp: device handle
1504 * go and send a ramrod!
1506 static int bnx2x_execute_vlan_mac(struct bnx2x *bp,
1507 union bnx2x_qable_obj *qo,
1508 struct list_head *exe_chunk,
1509 unsigned long *ramrod_flags)
1511 struct bnx2x_exeq_elem *elem;
1512 struct bnx2x_vlan_mac_obj *o = &qo->vlan_mac, *cam_obj;
1513 struct bnx2x_raw_obj *r = &o->raw;
1515 bool restore = test_bit(RAMROD_RESTORE, ramrod_flags);
1516 bool drv_only = test_bit(RAMROD_DRV_CLR_ONLY, ramrod_flags);
1517 struct bnx2x_vlan_mac_registry_elem *reg_elem;
1521 * If DRIVER_ONLY execution is requested, cleanup a registry
1522 * and exit. Otherwise send a ramrod to FW.
1525 WARN_ON(r->check_pending(r));
1530 /* Fill tha ramrod data */
1531 list_for_each_entry(elem, exe_chunk, link) {
1532 cmd = elem->cmd_data.vlan_mac.cmd;
1534 * We will add to the target object in MOVE command, so
1535 * change the object for a CAM search.
1537 if (cmd == BNX2X_VLAN_MAC_MOVE)
1538 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1542 rc = bnx2x_vlan_mac_get_registry_elem(bp, cam_obj,
1550 /* Push a new entry into the registry */
1552 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1553 (cmd == BNX2X_VLAN_MAC_MOVE)))
1554 list_add(®_elem->link, &cam_obj->head);
1556 /* Configure a single command in a ramrod data buffer */
1557 o->set_one_rule(bp, o, elem, idx,
1558 reg_elem->cam_offset);
1560 /* MOVE command consumes 2 entries in the ramrod data */
1561 if (cmd == BNX2X_VLAN_MAC_MOVE)
1567 /* Commit the data writes towards the memory */
1570 rc = bnx2x_sp_post(bp, o->ramrod_cmd, r->cid,
1571 U64_HI(r->rdata_mapping),
1572 U64_LO(r->rdata_mapping),
1573 ETH_CONNECTION_TYPE);
1578 /* Now, when we are done with the ramrod - clean up the registry */
1579 list_for_each_entry(elem, exe_chunk, link) {
1580 cmd = elem->cmd_data.vlan_mac.cmd;
1581 if ((cmd == BNX2X_VLAN_MAC_DEL) ||
1582 (cmd == BNX2X_VLAN_MAC_MOVE)) {
1583 reg_elem = o->check_del(o, &elem->cmd_data.vlan_mac.u);
1587 o->put_cam_offset(o, reg_elem->cam_offset);
1588 list_del(®_elem->link);
1599 r->clear_pending(r);
1601 /* Cleanup a registry in case of a failure */
1602 list_for_each_entry(elem, exe_chunk, link) {
1603 cmd = elem->cmd_data.vlan_mac.cmd;
1605 if (cmd == BNX2X_VLAN_MAC_MOVE)
1606 cam_obj = elem->cmd_data.vlan_mac.target_obj;
1610 /* Delete all newly added above entries */
1612 ((cmd == BNX2X_VLAN_MAC_ADD) ||
1613 (cmd == BNX2X_VLAN_MAC_MOVE))) {
1614 reg_elem = o->check_del(cam_obj,
1615 &elem->cmd_data.vlan_mac.u);
1617 list_del(®_elem->link);
1626 static inline int bnx2x_vlan_mac_push_new_cmd(
1628 struct bnx2x_vlan_mac_ramrod_params *p)
1630 struct bnx2x_exeq_elem *elem;
1631 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1632 bool restore = test_bit(RAMROD_RESTORE, &p->ramrod_flags);
1634 /* Allocate the execution queue element */
1635 elem = bnx2x_exe_queue_alloc_elem(bp);
1639 /* Set the command 'length' */
1640 switch (p->user_req.cmd) {
1641 case BNX2X_VLAN_MAC_MOVE:
1648 /* Fill the object specific info */
1649 memcpy(&elem->cmd_data.vlan_mac, &p->user_req, sizeof(p->user_req));
1651 /* Try to add a new command to the pending list */
1652 return bnx2x_exe_queue_add(bp, &o->exe_queue, elem, restore);
1656 * bnx2x_config_vlan_mac - configure VLAN/MAC/VLAN_MAC filtering rules.
1658 * @bp: device handle
1662 int bnx2x_config_vlan_mac(
1664 struct bnx2x_vlan_mac_ramrod_params *p)
1667 struct bnx2x_vlan_mac_obj *o = p->vlan_mac_obj;
1668 unsigned long *ramrod_flags = &p->ramrod_flags;
1669 bool cont = test_bit(RAMROD_CONT, ramrod_flags);
1670 struct bnx2x_raw_obj *raw = &o->raw;
1673 * Add new elements to the execution list for commands that require it.
1676 rc = bnx2x_vlan_mac_push_new_cmd(bp, p);
1682 * If nothing will be executed further in this iteration we want to
1683 * return PENDING if there are pending commands
1685 if (!bnx2x_exe_queue_empty(&o->exe_queue))
1688 /* Execute commands if required */
1689 if (cont || test_bit(RAMROD_EXEC, ramrod_flags) ||
1690 test_bit(RAMROD_COMP_WAIT, ramrod_flags)) {
1691 rc = bnx2x_exe_queue_step(bp, &o->exe_queue, ramrod_flags);
1697 * RAMROD_COMP_WAIT is a superset of RAMROD_EXEC. If it was set
1698 * then user want to wait until the last command is done.
1700 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
1702 * Wait maximum for the current exe_queue length iterations plus
1703 * one (for the current pending command).
1705 int max_iterations = bnx2x_exe_queue_length(&o->exe_queue) + 1;
1707 while (!bnx2x_exe_queue_empty(&o->exe_queue) &&
1710 /* Wait for the current command to complete */
1711 rc = raw->wait_comp(bp, raw);
1715 /* Make a next step */
1716 rc = bnx2x_exe_queue_step(bp, &o->exe_queue,
1731 * bnx2x_vlan_mac_del_all - delete elements with given vlan_mac_flags spec
1733 * @bp: device handle
1736 * @ramrod_flags: execution flags to be used for this deletion
1738 * if the last operation has completed successfully and there are no
1739 * moreelements left, positive value if the last operation has completed
1740 * successfully and there are more previously configured elements, negative
1741 * value is current operation has failed.
1743 static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
1744 struct bnx2x_vlan_mac_obj *o,
1745 unsigned long *vlan_mac_flags,
1746 unsigned long *ramrod_flags)
1748 struct bnx2x_vlan_mac_registry_elem *pos = NULL;
1750 struct bnx2x_vlan_mac_ramrod_params p;
1751 struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
1752 struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
1754 /* Clear pending commands first */
1756 spin_lock_bh(&exeq->lock);
1758 list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
1759 if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
1761 list_del(&exeq_pos->link);
1764 spin_unlock_bh(&exeq->lock);
1766 /* Prepare a command request */
1767 memset(&p, 0, sizeof(p));
1769 p.ramrod_flags = *ramrod_flags;
1770 p.user_req.cmd = BNX2X_VLAN_MAC_DEL;
1773 * Add all but the last VLAN-MAC to the execution queue without actually
1774 * execution anything.
1776 __clear_bit(RAMROD_COMP_WAIT, &p.ramrod_flags);
1777 __clear_bit(RAMROD_EXEC, &p.ramrod_flags);
1778 __clear_bit(RAMROD_CONT, &p.ramrod_flags);
1780 list_for_each_entry(pos, &o->head, link) {
1781 if (pos->vlan_mac_flags == *vlan_mac_flags) {
1782 p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
1783 memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
1784 rc = bnx2x_config_vlan_mac(bp, &p);
1786 BNX2X_ERR("Failed to add a new DEL command\n");
1792 p.ramrod_flags = *ramrod_flags;
1793 __set_bit(RAMROD_CONT, &p.ramrod_flags);
1795 return bnx2x_config_vlan_mac(bp, &p);
1798 static inline void bnx2x_init_raw_obj(struct bnx2x_raw_obj *raw, u8 cl_id,
1799 u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping, int state,
1800 unsigned long *pstate, bnx2x_obj_type type)
1802 raw->func_id = func_id;
1806 raw->rdata_mapping = rdata_mapping;
1808 raw->pstate = pstate;
1809 raw->obj_type = type;
1810 raw->check_pending = bnx2x_raw_check_pending;
1811 raw->clear_pending = bnx2x_raw_clear_pending;
1812 raw->set_pending = bnx2x_raw_set_pending;
1813 raw->wait_comp = bnx2x_raw_wait;
1816 static inline void bnx2x_init_vlan_mac_common(struct bnx2x_vlan_mac_obj *o,
1817 u8 cl_id, u32 cid, u8 func_id, void *rdata, dma_addr_t rdata_mapping,
1818 int state, unsigned long *pstate, bnx2x_obj_type type,
1819 struct bnx2x_credit_pool_obj *macs_pool,
1820 struct bnx2x_credit_pool_obj *vlans_pool)
1822 INIT_LIST_HEAD(&o->head);
1824 o->macs_pool = macs_pool;
1825 o->vlans_pool = vlans_pool;
1827 o->delete_all = bnx2x_vlan_mac_del_all;
1828 o->restore = bnx2x_vlan_mac_restore;
1829 o->complete = bnx2x_complete_vlan_mac;
1830 o->wait = bnx2x_wait_vlan_mac;
1832 bnx2x_init_raw_obj(&o->raw, cl_id, cid, func_id, rdata, rdata_mapping,
1833 state, pstate, type);
1837 void bnx2x_init_mac_obj(struct bnx2x *bp,
1838 struct bnx2x_vlan_mac_obj *mac_obj,
1839 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1840 dma_addr_t rdata_mapping, int state,
1841 unsigned long *pstate, bnx2x_obj_type type,
1842 struct bnx2x_credit_pool_obj *macs_pool)
1844 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)mac_obj;
1846 bnx2x_init_vlan_mac_common(mac_obj, cl_id, cid, func_id, rdata,
1847 rdata_mapping, state, pstate, type,
1850 /* CAM credit pool handling */
1851 mac_obj->get_credit = bnx2x_get_credit_mac;
1852 mac_obj->put_credit = bnx2x_put_credit_mac;
1853 mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1854 mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1856 if (CHIP_IS_E1x(bp)) {
1857 mac_obj->set_one_rule = bnx2x_set_one_mac_e1x;
1858 mac_obj->check_del = bnx2x_check_mac_del;
1859 mac_obj->check_add = bnx2x_check_mac_add;
1860 mac_obj->check_move = bnx2x_check_move_always_err;
1861 mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1864 bnx2x_exe_queue_init(bp,
1865 &mac_obj->exe_queue, 1, qable_obj,
1866 bnx2x_validate_vlan_mac,
1867 bnx2x_optimize_vlan_mac,
1868 bnx2x_execute_vlan_mac,
1869 bnx2x_exeq_get_mac);
1871 mac_obj->set_one_rule = bnx2x_set_one_mac_e2;
1872 mac_obj->check_del = bnx2x_check_mac_del;
1873 mac_obj->check_add = bnx2x_check_mac_add;
1874 mac_obj->check_move = bnx2x_check_move;
1875 mac_obj->ramrod_cmd =
1876 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1879 bnx2x_exe_queue_init(bp,
1880 &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
1881 qable_obj, bnx2x_validate_vlan_mac,
1882 bnx2x_optimize_vlan_mac,
1883 bnx2x_execute_vlan_mac,
1884 bnx2x_exeq_get_mac);
1888 void bnx2x_init_vlan_obj(struct bnx2x *bp,
1889 struct bnx2x_vlan_mac_obj *vlan_obj,
1890 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1891 dma_addr_t rdata_mapping, int state,
1892 unsigned long *pstate, bnx2x_obj_type type,
1893 struct bnx2x_credit_pool_obj *vlans_pool)
1895 union bnx2x_qable_obj *qable_obj = (union bnx2x_qable_obj *)vlan_obj;
1897 bnx2x_init_vlan_mac_common(vlan_obj, cl_id, cid, func_id, rdata,
1898 rdata_mapping, state, pstate, type, NULL,
1901 vlan_obj->get_credit = bnx2x_get_credit_vlan;
1902 vlan_obj->put_credit = bnx2x_put_credit_vlan;
1903 vlan_obj->get_cam_offset = bnx2x_get_cam_offset_vlan;
1904 vlan_obj->put_cam_offset = bnx2x_put_cam_offset_vlan;
1906 if (CHIP_IS_E1x(bp)) {
1907 BNX2X_ERR("Do not support chips others than E2 and newer\n");
1910 vlan_obj->set_one_rule = bnx2x_set_one_vlan_e2;
1911 vlan_obj->check_del = bnx2x_check_vlan_del;
1912 vlan_obj->check_add = bnx2x_check_vlan_add;
1913 vlan_obj->check_move = bnx2x_check_move;
1914 vlan_obj->ramrod_cmd =
1915 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1918 bnx2x_exe_queue_init(bp,
1919 &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
1920 qable_obj, bnx2x_validate_vlan_mac,
1921 bnx2x_optimize_vlan_mac,
1922 bnx2x_execute_vlan_mac,
1923 bnx2x_exeq_get_vlan);
1927 void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
1928 struct bnx2x_vlan_mac_obj *vlan_mac_obj,
1929 u8 cl_id, u32 cid, u8 func_id, void *rdata,
1930 dma_addr_t rdata_mapping, int state,
1931 unsigned long *pstate, bnx2x_obj_type type,
1932 struct bnx2x_credit_pool_obj *macs_pool,
1933 struct bnx2x_credit_pool_obj *vlans_pool)
1935 union bnx2x_qable_obj *qable_obj =
1936 (union bnx2x_qable_obj *)vlan_mac_obj;
1938 bnx2x_init_vlan_mac_common(vlan_mac_obj, cl_id, cid, func_id, rdata,
1939 rdata_mapping, state, pstate, type,
1940 macs_pool, vlans_pool);
1942 /* CAM pool handling */
1943 vlan_mac_obj->get_credit = bnx2x_get_credit_vlan_mac;
1944 vlan_mac_obj->put_credit = bnx2x_put_credit_vlan_mac;
1946 * CAM offset is relevant for 57710 and 57711 chips only which have a
1947 * single CAM for both MACs and VLAN-MAC pairs. So the offset
1948 * will be taken from MACs' pool object only.
1950 vlan_mac_obj->get_cam_offset = bnx2x_get_cam_offset_mac;
1951 vlan_mac_obj->put_cam_offset = bnx2x_put_cam_offset_mac;
1953 if (CHIP_IS_E1(bp)) {
1954 BNX2X_ERR("Do not support chips others than E2\n");
1956 } else if (CHIP_IS_E1H(bp)) {
1957 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e1h;
1958 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1959 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1960 vlan_mac_obj->check_move = bnx2x_check_move_always_err;
1961 vlan_mac_obj->ramrod_cmd = RAMROD_CMD_ID_ETH_SET_MAC;
1964 bnx2x_exe_queue_init(bp,
1965 &vlan_mac_obj->exe_queue, 1, qable_obj,
1966 bnx2x_validate_vlan_mac,
1967 bnx2x_optimize_vlan_mac,
1968 bnx2x_execute_vlan_mac,
1969 bnx2x_exeq_get_vlan_mac);
1971 vlan_mac_obj->set_one_rule = bnx2x_set_one_vlan_mac_e2;
1972 vlan_mac_obj->check_del = bnx2x_check_vlan_mac_del;
1973 vlan_mac_obj->check_add = bnx2x_check_vlan_mac_add;
1974 vlan_mac_obj->check_move = bnx2x_check_move;
1975 vlan_mac_obj->ramrod_cmd =
1976 RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES;
1979 bnx2x_exe_queue_init(bp,
1980 &vlan_mac_obj->exe_queue,
1981 CLASSIFY_RULES_COUNT,
1982 qable_obj, bnx2x_validate_vlan_mac,
1983 bnx2x_optimize_vlan_mac,
1984 bnx2x_execute_vlan_mac,
1985 bnx2x_exeq_get_vlan_mac);
1990 /* RX_MODE verbs: DROP_ALL/ACCEPT_ALL/ACCEPT_ALL_MULTI/ACCEPT_ALL_VLAN/NORMAL */
1991 static inline void __storm_memset_mac_filters(struct bnx2x *bp,
1992 struct tstorm_eth_mac_filter_config *mac_filters,
1995 size_t size = sizeof(struct tstorm_eth_mac_filter_config);
1997 u32 addr = BAR_TSTRORM_INTMEM +
1998 TSTORM_MAC_FILTER_CONFIG_OFFSET(pf_id);
2000 __storm_memset_struct(bp, addr, size, (u32 *)mac_filters);
2003 static int bnx2x_set_rx_mode_e1x(struct bnx2x *bp,
2004 struct bnx2x_rx_mode_ramrod_params *p)
2006 /* update the bp MAC filter structure */
2007 u32 mask = (1 << p->cl_id);
2009 struct tstorm_eth_mac_filter_config *mac_filters =
2010 (struct tstorm_eth_mac_filter_config *)p->rdata;
2012 /* initial seeting is drop-all */
2013 u8 drop_all_ucast = 1, drop_all_mcast = 1;
2014 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2015 u8 unmatched_unicast = 0;
2017 /* In e1x there we only take into account rx acceot flag since tx switching
2019 if (test_bit(BNX2X_ACCEPT_UNICAST, &p->rx_accept_flags))
2020 /* accept matched ucast */
2023 if (test_bit(BNX2X_ACCEPT_MULTICAST, &p->rx_accept_flags))
2024 /* accept matched mcast */
2027 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &p->rx_accept_flags)) {
2028 /* accept all mcast */
2032 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &p->rx_accept_flags)) {
2033 /* accept all mcast */
2037 if (test_bit(BNX2X_ACCEPT_BROADCAST, &p->rx_accept_flags))
2038 /* accept (all) bcast */
2040 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &p->rx_accept_flags))
2041 /* accept unmatched unicasts */
2042 unmatched_unicast = 1;
2044 mac_filters->ucast_drop_all = drop_all_ucast ?
2045 mac_filters->ucast_drop_all | mask :
2046 mac_filters->ucast_drop_all & ~mask;
2048 mac_filters->mcast_drop_all = drop_all_mcast ?
2049 mac_filters->mcast_drop_all | mask :
2050 mac_filters->mcast_drop_all & ~mask;
2052 mac_filters->ucast_accept_all = accp_all_ucast ?
2053 mac_filters->ucast_accept_all | mask :
2054 mac_filters->ucast_accept_all & ~mask;
2056 mac_filters->mcast_accept_all = accp_all_mcast ?
2057 mac_filters->mcast_accept_all | mask :
2058 mac_filters->mcast_accept_all & ~mask;
2060 mac_filters->bcast_accept_all = accp_all_bcast ?
2061 mac_filters->bcast_accept_all | mask :
2062 mac_filters->bcast_accept_all & ~mask;
2064 mac_filters->unmatched_unicast = unmatched_unicast ?
2065 mac_filters->unmatched_unicast | mask :
2066 mac_filters->unmatched_unicast & ~mask;
2068 DP(BNX2X_MSG_SP, "drop_ucast 0x%x\ndrop_mcast 0x%x\n accp_ucast 0x%x\n"
2069 "accp_mcast 0x%x\naccp_bcast 0x%x\n",
2070 mac_filters->ucast_drop_all,
2071 mac_filters->mcast_drop_all,
2072 mac_filters->ucast_accept_all,
2073 mac_filters->mcast_accept_all,
2074 mac_filters->bcast_accept_all);
2076 /* write the MAC filter structure*/
2077 __storm_memset_mac_filters(bp, mac_filters, p->func_id);
2079 /* The operation is completed */
2080 clear_bit(p->state, p->pstate);
2081 smp_mb__after_clear_bit();
2086 /* Setup ramrod data */
2087 static inline void bnx2x_rx_mode_set_rdata_hdr_e2(u32 cid,
2088 struct eth_classify_header *hdr,
2092 hdr->rule_cnt = rule_cnt;
2095 static inline void bnx2x_rx_mode_set_cmd_state_e2(struct bnx2x *bp,
2096 unsigned long accept_flags,
2097 struct eth_filter_rules_cmd *cmd,
2098 bool clear_accept_all)
2102 /* start with 'drop-all' */
2103 state = ETH_FILTER_RULES_CMD_UCAST_DROP_ALL |
2104 ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2107 if (test_bit(BNX2X_ACCEPT_UNICAST, &accept_flags))
2108 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2110 if (test_bit(BNX2X_ACCEPT_MULTICAST, &accept_flags))
2111 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2113 if (test_bit(BNX2X_ACCEPT_ALL_UNICAST, &accept_flags)) {
2114 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2115 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2118 if (test_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags)) {
2119 state |= ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2120 state &= ~ETH_FILTER_RULES_CMD_MCAST_DROP_ALL;
2122 if (test_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags))
2123 state |= ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2125 if (test_bit(BNX2X_ACCEPT_UNMATCHED, &accept_flags)) {
2126 state &= ~ETH_FILTER_RULES_CMD_UCAST_DROP_ALL;
2127 state |= ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2129 if (test_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags))
2130 state |= ETH_FILTER_RULES_CMD_ACCEPT_ANY_VLAN;
2133 /* Clear ACCEPT_ALL_XXX flags for FCoE L2 Queue */
2134 if (clear_accept_all) {
2135 state &= ~ETH_FILTER_RULES_CMD_MCAST_ACCEPT_ALL;
2136 state &= ~ETH_FILTER_RULES_CMD_BCAST_ACCEPT_ALL;
2137 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_ALL;
2138 state &= ~ETH_FILTER_RULES_CMD_UCAST_ACCEPT_UNMATCHED;
2141 cmd->state = cpu_to_le16(state);
2145 static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
2146 struct bnx2x_rx_mode_ramrod_params *p)
2148 struct eth_filter_rules_ramrod_data *data = p->rdata;
2152 /* Reset the ramrod data buffer */
2153 memset(data, 0, sizeof(*data));
2155 /* Setup ramrod data */
2157 /* Tx (internal switching) */
2158 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2159 data->rules[rule_idx].client_id = p->cl_id;
2160 data->rules[rule_idx].func_id = p->func_id;
2162 data->rules[rule_idx].cmd_general_data =
2163 ETH_FILTER_RULES_CMD_TX_CMD;
2165 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2166 &(data->rules[rule_idx++]), false);
2170 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2171 data->rules[rule_idx].client_id = p->cl_id;
2172 data->rules[rule_idx].func_id = p->func_id;
2174 data->rules[rule_idx].cmd_general_data =
2175 ETH_FILTER_RULES_CMD_RX_CMD;
2177 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2178 &(data->rules[rule_idx++]), false);
2183 * If FCoE Queue configuration has been requested configure the Rx and
2184 * internal switching modes for this queue in separate rules.
2186 * FCoE queue shell never be set to ACCEPT_ALL packets of any sort:
2187 * MCAST_ALL, UCAST_ALL, BCAST_ALL and UNMATCHED.
2189 if (test_bit(BNX2X_RX_MODE_FCOE_ETH, &p->rx_mode_flags)) {
2190 /* Tx (internal switching) */
2191 if (test_bit(RAMROD_TX, &p->ramrod_flags)) {
2192 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2193 data->rules[rule_idx].func_id = p->func_id;
2195 data->rules[rule_idx].cmd_general_data =
2196 ETH_FILTER_RULES_CMD_TX_CMD;
2198 bnx2x_rx_mode_set_cmd_state_e2(bp, p->tx_accept_flags,
2199 &(data->rules[rule_idx++]),
2204 if (test_bit(RAMROD_RX, &p->ramrod_flags)) {
2205 data->rules[rule_idx].client_id = bnx2x_fcoe(bp, cl_id);
2206 data->rules[rule_idx].func_id = p->func_id;
2208 data->rules[rule_idx].cmd_general_data =
2209 ETH_FILTER_RULES_CMD_RX_CMD;
2211 bnx2x_rx_mode_set_cmd_state_e2(bp, p->rx_accept_flags,
2212 &(data->rules[rule_idx++]),
2218 * Set the ramrod header (most importantly - number of rules to
2221 bnx2x_rx_mode_set_rdata_hdr_e2(p->cid, &data->header, rule_idx);
2223 DP(BNX2X_MSG_SP, "About to configure %d rules, rx_accept_flags 0x%lx, "
2224 "tx_accept_flags 0x%lx\n",
2225 data->header.rule_cnt, p->rx_accept_flags,
2226 p->tx_accept_flags);
2228 /* Commit writes towards the memory before sending a ramrod */
2232 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_FILTER_RULES, p->cid,
2233 U64_HI(p->rdata_mapping),
2234 U64_LO(p->rdata_mapping),
2235 ETH_CONNECTION_TYPE);
2239 /* Ramrod completion is pending */
2243 static int bnx2x_wait_rx_mode_comp_e2(struct bnx2x *bp,
2244 struct bnx2x_rx_mode_ramrod_params *p)
2246 return bnx2x_state_wait(bp, p->state, p->pstate);
2249 static int bnx2x_empty_rx_mode_wait(struct bnx2x *bp,
2250 struct bnx2x_rx_mode_ramrod_params *p)
2256 int bnx2x_config_rx_mode(struct bnx2x *bp,
2257 struct bnx2x_rx_mode_ramrod_params *p)
2261 /* Configure the new classification in the chip */
2262 rc = p->rx_mode_obj->config_rx_mode(bp, p);
2266 /* Wait for a ramrod completion if was requested */
2267 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags)) {
2268 rc = p->rx_mode_obj->wait_comp(bp, p);
2276 void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
2277 struct bnx2x_rx_mode_obj *o)
2279 if (CHIP_IS_E1x(bp)) {
2280 o->wait_comp = bnx2x_empty_rx_mode_wait;
2281 o->config_rx_mode = bnx2x_set_rx_mode_e1x;
2283 o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
2284 o->config_rx_mode = bnx2x_set_rx_mode_e2;
2288 /********************* Multicast verbs: SET, CLEAR ****************************/
2289 static inline u8 bnx2x_mcast_bin_from_mac(u8 *mac)
2291 return (crc32c_le(0, mac, ETH_ALEN) >> 24) & 0xff;
2294 struct bnx2x_mcast_mac_elem {
2295 struct list_head link;
2297 u8 pad[2]; /* For a natural alignment of the following buffer */
2300 struct bnx2x_pending_mcast_cmd {
2301 struct list_head link;
2302 int type; /* BNX2X_MCAST_CMD_X */
2304 struct list_head macs_head;
2305 u32 macs_num; /* Needed for DEL command */
2306 int next_bin; /* Needed for RESTORE flow with aprox match */
2309 bool done; /* set to true, when the command has been handled,
2310 * practically used in 57712 handling only, where one pending
2311 * command may be handled in a few operations. As long as for
2312 * other chips every operation handling is completed in a
2313 * single ramrod, there is no need to utilize this field.
2317 static int bnx2x_mcast_wait(struct bnx2x *bp,
2318 struct bnx2x_mcast_obj *o)
2320 if (bnx2x_state_wait(bp, o->sched_state, o->raw.pstate) ||
2321 o->raw.wait_comp(bp, &o->raw))
2327 static int bnx2x_mcast_enqueue_cmd(struct bnx2x *bp,
2328 struct bnx2x_mcast_obj *o,
2329 struct bnx2x_mcast_ramrod_params *p,
2333 struct bnx2x_pending_mcast_cmd *new_cmd;
2334 struct bnx2x_mcast_mac_elem *cur_mac = NULL;
2335 struct bnx2x_mcast_list_elem *pos;
2336 int macs_list_len = ((cmd == BNX2X_MCAST_CMD_ADD) ?
2337 p->mcast_list_len : 0);
2339 /* If the command is empty ("handle pending commands only"), break */
2340 if (!p->mcast_list_len)
2343 total_sz = sizeof(*new_cmd) +
2344 macs_list_len * sizeof(struct bnx2x_mcast_mac_elem);
2346 /* Add mcast is called under spin_lock, thus calling with GFP_ATOMIC */
2347 new_cmd = kzalloc(total_sz, GFP_ATOMIC);
2352 DP(BNX2X_MSG_SP, "About to enqueue a new %d command. "
2353 "macs_list_len=%d\n", cmd, macs_list_len);
2355 INIT_LIST_HEAD(&new_cmd->data.macs_head);
2357 new_cmd->type = cmd;
2358 new_cmd->done = false;
2361 case BNX2X_MCAST_CMD_ADD:
2362 cur_mac = (struct bnx2x_mcast_mac_elem *)
2363 ((u8 *)new_cmd + sizeof(*new_cmd));
2365 /* Push the MACs of the current command into the pendig command
2368 list_for_each_entry(pos, &p->mcast_list, link) {
2369 memcpy(cur_mac->mac, pos->mac, ETH_ALEN);
2370 list_add_tail(&cur_mac->link, &new_cmd->data.macs_head);
2376 case BNX2X_MCAST_CMD_DEL:
2377 new_cmd->data.macs_num = p->mcast_list_len;
2380 case BNX2X_MCAST_CMD_RESTORE:
2381 new_cmd->data.next_bin = 0;
2385 BNX2X_ERR("Unknown command: %d\n", cmd);
2389 /* Push the new pending command to the tail of the pending list: FIFO */
2390 list_add_tail(&new_cmd->link, &o->pending_cmds_head);
2398 * bnx2x_mcast_get_next_bin - get the next set bin (index)
2401 * @last: index to start looking from (including)
2403 * returns the next found (set) bin or a negative value if none is found.
2405 static inline int bnx2x_mcast_get_next_bin(struct bnx2x_mcast_obj *o, int last)
2407 int i, j, inner_start = last % BIT_VEC64_ELEM_SZ;
2409 for (i = last / BIT_VEC64_ELEM_SZ; i < BNX2X_MCAST_VEC_SZ; i++) {
2410 if (o->registry.aprox_match.vec[i])
2411 for (j = inner_start; j < BIT_VEC64_ELEM_SZ; j++) {
2412 int cur_bit = j + BIT_VEC64_ELEM_SZ * i;
2413 if (BIT_VEC64_TEST_BIT(o->registry.aprox_match.
2426 * bnx2x_mcast_clear_first_bin - find the first set bin and clear it
2430 * returns the index of the found bin or -1 if none is found
2432 static inline int bnx2x_mcast_clear_first_bin(struct bnx2x_mcast_obj *o)
2434 int cur_bit = bnx2x_mcast_get_next_bin(o, 0);
2437 BIT_VEC64_CLEAR_BIT(o->registry.aprox_match.vec, cur_bit);
2442 static inline u8 bnx2x_mcast_get_rx_tx_flag(struct bnx2x_mcast_obj *o)
2444 struct bnx2x_raw_obj *raw = &o->raw;
2447 if ((raw->obj_type == BNX2X_OBJ_TYPE_TX) ||
2448 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2449 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_TX_CMD;
2451 if ((raw->obj_type == BNX2X_OBJ_TYPE_RX) ||
2452 (raw->obj_type == BNX2X_OBJ_TYPE_RX_TX))
2453 rx_tx_flag |= ETH_MULTICAST_RULES_CMD_RX_CMD;
2458 static void bnx2x_mcast_set_one_rule_e2(struct bnx2x *bp,
2459 struct bnx2x_mcast_obj *o, int idx,
2460 union bnx2x_mcast_config_data *cfg_data,
2463 struct bnx2x_raw_obj *r = &o->raw;
2464 struct eth_multicast_rules_ramrod_data *data =
2465 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2466 u8 func_id = r->func_id;
2467 u8 rx_tx_add_flag = bnx2x_mcast_get_rx_tx_flag(o);
2470 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2471 rx_tx_add_flag |= ETH_MULTICAST_RULES_CMD_IS_ADD;
2473 data->rules[idx].cmd_general_data |= rx_tx_add_flag;
2475 /* Get a bin and update a bins' vector */
2477 case BNX2X_MCAST_CMD_ADD:
2478 bin = bnx2x_mcast_bin_from_mac(cfg_data->mac);
2479 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec, bin);
2482 case BNX2X_MCAST_CMD_DEL:
2483 /* If there were no more bins to clear
2484 * (bnx2x_mcast_clear_first_bin() returns -1) then we would
2485 * clear any (0xff) bin.
2486 * See bnx2x_mcast_validate_e2() for explanation when it may
2489 bin = bnx2x_mcast_clear_first_bin(o);
2492 case BNX2X_MCAST_CMD_RESTORE:
2493 bin = cfg_data->bin;
2497 BNX2X_ERR("Unknown command: %d\n", cmd);
2501 DP(BNX2X_MSG_SP, "%s bin %d\n",
2502 ((rx_tx_add_flag & ETH_MULTICAST_RULES_CMD_IS_ADD) ?
2503 "Setting" : "Clearing"), bin);
2505 data->rules[idx].bin_id = (u8)bin;
2506 data->rules[idx].func_id = func_id;
2507 data->rules[idx].engine_id = o->engine_id;
2511 * bnx2x_mcast_handle_restore_cmd_e2 - restore configuration from the registry
2513 * @bp: device handle
2515 * @start_bin: index in the registry to start from (including)
2516 * @rdata_idx: index in the ramrod data to start from
2518 * returns last handled bin index or -1 if all bins have been handled
2520 static inline int bnx2x_mcast_handle_restore_cmd_e2(
2521 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_bin,
2524 int cur_bin, cnt = *rdata_idx;
2525 union bnx2x_mcast_config_data cfg_data = {0};
2527 /* go through the registry and configure the bins from it */
2528 for (cur_bin = bnx2x_mcast_get_next_bin(o, start_bin); cur_bin >= 0;
2529 cur_bin = bnx2x_mcast_get_next_bin(o, cur_bin + 1)) {
2531 cfg_data.bin = (u8)cur_bin;
2532 o->set_one_rule(bp, o, cnt, &cfg_data,
2533 BNX2X_MCAST_CMD_RESTORE);
2537 DP(BNX2X_MSG_SP, "About to configure a bin %d\n", cur_bin);
2539 /* Break if we reached the maximum number
2542 if (cnt >= o->max_cmd_len)
2551 static inline void bnx2x_mcast_hdl_pending_add_e2(struct bnx2x *bp,
2552 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2555 struct bnx2x_mcast_mac_elem *pmac_pos, *pmac_pos_n;
2556 int cnt = *line_idx;
2557 union bnx2x_mcast_config_data cfg_data = {0};
2559 list_for_each_entry_safe(pmac_pos, pmac_pos_n, &cmd_pos->data.macs_head,
2562 cfg_data.mac = &pmac_pos->mac[0];
2563 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
2567 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2569 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
2571 list_del(&pmac_pos->link);
2573 /* Break if we reached the maximum number
2576 if (cnt >= o->max_cmd_len)
2582 /* if no more MACs to configure - we are done */
2583 if (list_empty(&cmd_pos->data.macs_head))
2584 cmd_pos->done = true;
2587 static inline void bnx2x_mcast_hdl_pending_del_e2(struct bnx2x *bp,
2588 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2591 int cnt = *line_idx;
2593 while (cmd_pos->data.macs_num) {
2594 o->set_one_rule(bp, o, cnt, NULL, cmd_pos->type);
2598 cmd_pos->data.macs_num--;
2600 DP(BNX2X_MSG_SP, "Deleting MAC. %d left,cnt is %d\n",
2601 cmd_pos->data.macs_num, cnt);
2603 /* Break if we reached the maximum
2606 if (cnt >= o->max_cmd_len)
2612 /* If we cleared all bins - we are done */
2613 if (!cmd_pos->data.macs_num)
2614 cmd_pos->done = true;
2617 static inline void bnx2x_mcast_hdl_pending_restore_e2(struct bnx2x *bp,
2618 struct bnx2x_mcast_obj *o, struct bnx2x_pending_mcast_cmd *cmd_pos,
2621 cmd_pos->data.next_bin = o->hdl_restore(bp, o, cmd_pos->data.next_bin,
2624 if (cmd_pos->data.next_bin < 0)
2625 /* If o->set_restore returned -1 we are done */
2626 cmd_pos->done = true;
2628 /* Start from the next bin next time */
2629 cmd_pos->data.next_bin++;
2632 static inline int bnx2x_mcast_handle_pending_cmds_e2(struct bnx2x *bp,
2633 struct bnx2x_mcast_ramrod_params *p)
2635 struct bnx2x_pending_mcast_cmd *cmd_pos, *cmd_pos_n;
2637 struct bnx2x_mcast_obj *o = p->mcast_obj;
2639 list_for_each_entry_safe(cmd_pos, cmd_pos_n, &o->pending_cmds_head,
2641 switch (cmd_pos->type) {
2642 case BNX2X_MCAST_CMD_ADD:
2643 bnx2x_mcast_hdl_pending_add_e2(bp, o, cmd_pos, &cnt);
2646 case BNX2X_MCAST_CMD_DEL:
2647 bnx2x_mcast_hdl_pending_del_e2(bp, o, cmd_pos, &cnt);
2650 case BNX2X_MCAST_CMD_RESTORE:
2651 bnx2x_mcast_hdl_pending_restore_e2(bp, o, cmd_pos,
2656 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
2660 /* If the command has been completed - remove it from the list
2661 * and free the memory
2663 if (cmd_pos->done) {
2664 list_del(&cmd_pos->link);
2668 /* Break if we reached the maximum number of rules */
2669 if (cnt >= o->max_cmd_len)
2676 static inline void bnx2x_mcast_hdl_add(struct bnx2x *bp,
2677 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2680 struct bnx2x_mcast_list_elem *mlist_pos;
2681 union bnx2x_mcast_config_data cfg_data = {0};
2682 int cnt = *line_idx;
2684 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2685 cfg_data.mac = mlist_pos->mac;
2686 o->set_one_rule(bp, o, cnt, &cfg_data, BNX2X_MCAST_CMD_ADD);
2690 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
2692 BNX2X_MAC_PRN_LIST(mlist_pos->mac));
2698 static inline void bnx2x_mcast_hdl_del(struct bnx2x *bp,
2699 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2702 int cnt = *line_idx, i;
2704 for (i = 0; i < p->mcast_list_len; i++) {
2705 o->set_one_rule(bp, o, cnt, NULL, BNX2X_MCAST_CMD_DEL);
2709 DP(BNX2X_MSG_SP, "Deleting MAC. %d left\n",
2710 p->mcast_list_len - i - 1);
2717 * bnx2x_mcast_handle_current_cmd -
2719 * @bp: device handle
2722 * @start_cnt: first line in the ramrod data that may be used
2724 * This function is called iff there is enough place for the current command in
2726 * Returns number of lines filled in the ramrod data in total.
2728 static inline int bnx2x_mcast_handle_current_cmd(struct bnx2x *bp,
2729 struct bnx2x_mcast_ramrod_params *p, int cmd,
2732 struct bnx2x_mcast_obj *o = p->mcast_obj;
2733 int cnt = start_cnt;
2735 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
2738 case BNX2X_MCAST_CMD_ADD:
2739 bnx2x_mcast_hdl_add(bp, o, p, &cnt);
2742 case BNX2X_MCAST_CMD_DEL:
2743 bnx2x_mcast_hdl_del(bp, o, p, &cnt);
2746 case BNX2X_MCAST_CMD_RESTORE:
2747 o->hdl_restore(bp, o, 0, &cnt);
2751 BNX2X_ERR("Unknown command: %d\n", cmd);
2755 /* The current command has been handled */
2756 p->mcast_list_len = 0;
2761 static int bnx2x_mcast_validate_e2(struct bnx2x *bp,
2762 struct bnx2x_mcast_ramrod_params *p,
2765 struct bnx2x_mcast_obj *o = p->mcast_obj;
2766 int reg_sz = o->get_registry_size(o);
2769 /* DEL command deletes all currently configured MACs */
2770 case BNX2X_MCAST_CMD_DEL:
2771 o->set_registry_size(o, 0);
2774 /* RESTORE command will restore the entire multicast configuration */
2775 case BNX2X_MCAST_CMD_RESTORE:
2776 /* Here we set the approximate amount of work to do, which in
2777 * fact may be only less as some MACs in postponed ADD
2778 * command(s) scheduled before this command may fall into
2779 * the same bin and the actual number of bins set in the
2780 * registry would be less than we estimated here. See
2781 * bnx2x_mcast_set_one_rule_e2() for further details.
2783 p->mcast_list_len = reg_sz;
2786 case BNX2X_MCAST_CMD_ADD:
2787 case BNX2X_MCAST_CMD_CONT:
2788 /* Here we assume that all new MACs will fall into new bins.
2789 * However we will correct the real registry size after we
2790 * handle all pending commands.
2792 o->set_registry_size(o, reg_sz + p->mcast_list_len);
2796 BNX2X_ERR("Unknown command: %d\n", cmd);
2801 /* Increase the total number of MACs pending to be configured */
2802 o->total_pending_num += p->mcast_list_len;
2807 static void bnx2x_mcast_revert_e2(struct bnx2x *bp,
2808 struct bnx2x_mcast_ramrod_params *p,
2811 struct bnx2x_mcast_obj *o = p->mcast_obj;
2813 o->set_registry_size(o, old_num_bins);
2814 o->total_pending_num -= p->mcast_list_len;
2818 * bnx2x_mcast_set_rdata_hdr_e2 - sets a header values
2820 * @bp: device handle
2822 * @len: number of rules to handle
2824 static inline void bnx2x_mcast_set_rdata_hdr_e2(struct bnx2x *bp,
2825 struct bnx2x_mcast_ramrod_params *p,
2828 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
2829 struct eth_multicast_rules_ramrod_data *data =
2830 (struct eth_multicast_rules_ramrod_data *)(r->rdata);
2832 data->header.echo = ((r->cid & BNX2X_SWCID_MASK) |
2833 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
2834 data->header.rule_cnt = len;
2838 * bnx2x_mcast_refresh_registry_e2 - recalculate the actual number of set bins
2840 * @bp: device handle
2843 * Recalculate the actual number of set bins in the registry using Brian
2844 * Kernighan's algorithm: it's execution complexity is as a number of set bins.
2846 * returns 0 for the compliance with bnx2x_mcast_refresh_registry_e1().
2848 static inline int bnx2x_mcast_refresh_registry_e2(struct bnx2x *bp,
2849 struct bnx2x_mcast_obj *o)
2854 for (i = 0; i < BNX2X_MCAST_VEC_SZ; i++) {
2855 elem = o->registry.aprox_match.vec[i];
2860 o->set_registry_size(o, cnt);
2865 static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
2866 struct bnx2x_mcast_ramrod_params *p,
2869 struct bnx2x_raw_obj *raw = &p->mcast_obj->raw;
2870 struct bnx2x_mcast_obj *o = p->mcast_obj;
2871 struct eth_multicast_rules_ramrod_data *data =
2872 (struct eth_multicast_rules_ramrod_data *)(raw->rdata);
2875 /* Reset the ramrod data buffer */
2876 memset(data, 0, sizeof(*data));
2878 cnt = bnx2x_mcast_handle_pending_cmds_e2(bp, p);
2880 /* If there are no more pending commands - clear SCHEDULED state */
2881 if (list_empty(&o->pending_cmds_head))
2884 /* The below may be true iff there was enough room in ramrod
2885 * data for all pending commands and for the current
2886 * command. Otherwise the current command would have been added
2887 * to the pending commands and p->mcast_list_len would have been
2890 if (p->mcast_list_len > 0)
2891 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, cnt);
2893 /* We've pulled out some MACs - update the total number of
2896 o->total_pending_num -= cnt;
2899 WARN_ON(o->total_pending_num < 0);
2900 WARN_ON(cnt > o->max_cmd_len);
2902 bnx2x_mcast_set_rdata_hdr_e2(bp, p, (u8)cnt);
2904 /* Update a registry size if there are no more pending operations.
2906 * We don't want to change the value of the registry size if there are
2907 * pending operations because we want it to always be equal to the
2908 * exact or the approximate number (see bnx2x_mcast_validate_e2()) of
2909 * set bins after the last requested operation in order to properly
2910 * evaluate the size of the next DEL/RESTORE operation.
2912 * Note that we update the registry itself during command(s) handling
2913 * - see bnx2x_mcast_set_one_rule_e2(). That's because for 57712 we
2914 * aggregate multiple commands (ADD/DEL/RESTORE) into one ramrod but
2915 * with a limited amount of update commands (per MAC/bin) and we don't
2916 * know in this scope what the actual state of bins configuration is
2917 * going to be after this ramrod.
2919 if (!o->total_pending_num)
2920 bnx2x_mcast_refresh_registry_e2(bp, o);
2922 /* Commit writes towards the memory before sending a ramrod */
2925 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
2926 * RAMROD_PENDING status immediately.
2928 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
2929 raw->clear_pending(raw);
2933 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_MULTICAST_RULES,
2934 raw->cid, U64_HI(raw->rdata_mapping),
2935 U64_LO(raw->rdata_mapping),
2936 ETH_CONNECTION_TYPE);
2940 /* Ramrod completion is pending */
2945 static int bnx2x_mcast_validate_e1h(struct bnx2x *bp,
2946 struct bnx2x_mcast_ramrod_params *p,
2949 /* Mark, that there is a work to do */
2950 if ((cmd == BNX2X_MCAST_CMD_DEL) || (cmd == BNX2X_MCAST_CMD_RESTORE))
2951 p->mcast_list_len = 1;
2956 static void bnx2x_mcast_revert_e1h(struct bnx2x *bp,
2957 struct bnx2x_mcast_ramrod_params *p,
2963 #define BNX2X_57711_SET_MC_FILTER(filter, bit) \
2965 (filter)[(bit) >> 5] |= (1 << ((bit) & 0x1f)); \
2968 static inline void bnx2x_mcast_hdl_add_e1h(struct bnx2x *bp,
2969 struct bnx2x_mcast_obj *o,
2970 struct bnx2x_mcast_ramrod_params *p,
2973 struct bnx2x_mcast_list_elem *mlist_pos;
2976 list_for_each_entry(mlist_pos, &p->mcast_list, link) {
2977 bit = bnx2x_mcast_bin_from_mac(mlist_pos->mac);
2978 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
2980 DP(BNX2X_MSG_SP, "About to configure "
2981 BNX2X_MAC_FMT" mcast MAC, bin %d\n",
2982 BNX2X_MAC_PRN_LIST(mlist_pos->mac), bit);
2984 /* bookkeeping... */
2985 BIT_VEC64_SET_BIT(o->registry.aprox_match.vec,
2990 static inline void bnx2x_mcast_hdl_restore_e1h(struct bnx2x *bp,
2991 struct bnx2x_mcast_obj *o, struct bnx2x_mcast_ramrod_params *p,
2996 for (bit = bnx2x_mcast_get_next_bin(o, 0);
2998 bit = bnx2x_mcast_get_next_bin(o, bit + 1)) {
2999 BNX2X_57711_SET_MC_FILTER(mc_filter, bit);
3000 DP(BNX2X_MSG_SP, "About to set bin %d\n", bit);
3004 /* On 57711 we write the multicast MACs' aproximate match
3005 * table by directly into the TSTORM's internal RAM. So we don't
3006 * really need to handle any tricks to make it work.
3008 static int bnx2x_mcast_setup_e1h(struct bnx2x *bp,
3009 struct bnx2x_mcast_ramrod_params *p,
3013 struct bnx2x_mcast_obj *o = p->mcast_obj;
3014 struct bnx2x_raw_obj *r = &o->raw;
3016 /* If CLEAR_ONLY has been requested - clear the registry
3017 * and clear a pending bit.
3019 if (!test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3020 u32 mc_filter[MC_HASH_SIZE] = {0};
3022 /* Set the multicast filter bits before writing it into
3023 * the internal memory.
3026 case BNX2X_MCAST_CMD_ADD:
3027 bnx2x_mcast_hdl_add_e1h(bp, o, p, mc_filter);
3030 case BNX2X_MCAST_CMD_DEL:
3031 DP(BNX2X_MSG_SP, "Invalidating multicast "
3032 "MACs configuration\n");
3034 /* clear the registry */
3035 memset(o->registry.aprox_match.vec, 0,
3036 sizeof(o->registry.aprox_match.vec));
3039 case BNX2X_MCAST_CMD_RESTORE:
3040 bnx2x_mcast_hdl_restore_e1h(bp, o, p, mc_filter);
3044 BNX2X_ERR("Unknown command: %d\n", cmd);
3048 /* Set the mcast filter in the internal memory */
3049 for (i = 0; i < MC_HASH_SIZE; i++)
3050 REG_WR(bp, MC_HASH_OFFSET(bp, i), mc_filter[i]);
3052 /* clear the registry */
3053 memset(o->registry.aprox_match.vec, 0,
3054 sizeof(o->registry.aprox_match.vec));
3057 r->clear_pending(r);
3062 static int bnx2x_mcast_validate_e1(struct bnx2x *bp,
3063 struct bnx2x_mcast_ramrod_params *p,
3066 struct bnx2x_mcast_obj *o = p->mcast_obj;
3067 int reg_sz = o->get_registry_size(o);
3070 /* DEL command deletes all currently configured MACs */
3071 case BNX2X_MCAST_CMD_DEL:
3072 o->set_registry_size(o, 0);
3075 /* RESTORE command will restore the entire multicast configuration */
3076 case BNX2X_MCAST_CMD_RESTORE:
3077 p->mcast_list_len = reg_sz;
3078 DP(BNX2X_MSG_SP, "Command %d, p->mcast_list_len=%d\n",
3079 cmd, p->mcast_list_len);
3082 case BNX2X_MCAST_CMD_ADD:
3083 case BNX2X_MCAST_CMD_CONT:
3084 /* Multicast MACs on 57710 are configured as unicast MACs and
3085 * there is only a limited number of CAM entries for that
3088 if (p->mcast_list_len > o->max_cmd_len) {
3089 BNX2X_ERR("Can't configure more than %d multicast MACs"
3090 "on 57710\n", o->max_cmd_len);
3093 /* Every configured MAC should be cleared if DEL command is
3094 * called. Only the last ADD command is relevant as long as
3095 * every ADD commands overrides the previous configuration.
3097 DP(BNX2X_MSG_SP, "p->mcast_list_len=%d\n", p->mcast_list_len);
3098 if (p->mcast_list_len > 0)
3099 o->set_registry_size(o, p->mcast_list_len);
3104 BNX2X_ERR("Unknown command: %d\n", cmd);
3109 /* We want to ensure that commands are executed one by one for 57710.
3110 * Therefore each none-empty command will consume o->max_cmd_len.
3112 if (p->mcast_list_len)
3113 o->total_pending_num += o->max_cmd_len;
3118 static void bnx2x_mcast_revert_e1(struct bnx2x *bp,
3119 struct bnx2x_mcast_ramrod_params *p,
3122 struct bnx2x_mcast_obj *o = p->mcast_obj;
3124 o->set_registry_size(o, old_num_macs);
3126 /* If current command hasn't been handled yet and we are
3127 * here means that it's meant to be dropped and we have to
3128 * update the number of outstandling MACs accordingly.
3130 if (p->mcast_list_len)
3131 o->total_pending_num -= o->max_cmd_len;
3134 static void bnx2x_mcast_set_one_rule_e1(struct bnx2x *bp,
3135 struct bnx2x_mcast_obj *o, int idx,
3136 union bnx2x_mcast_config_data *cfg_data,
3139 struct bnx2x_raw_obj *r = &o->raw;
3140 struct mac_configuration_cmd *data =
3141 (struct mac_configuration_cmd *)(r->rdata);
3144 if ((cmd == BNX2X_MCAST_CMD_ADD) || (cmd == BNX2X_MCAST_CMD_RESTORE)) {
3145 bnx2x_set_fw_mac_addr(&data->config_table[idx].msb_mac_addr,
3146 &data->config_table[idx].middle_mac_addr,
3147 &data->config_table[idx].lsb_mac_addr,
3150 data->config_table[idx].vlan_id = 0;
3151 data->config_table[idx].pf_id = r->func_id;
3152 data->config_table[idx].clients_bit_vector =
3153 cpu_to_le32(1 << r->cl_id);
3155 SET_FLAG(data->config_table[idx].flags,
3156 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3157 T_ETH_MAC_COMMAND_SET);
3162 * bnx2x_mcast_set_rdata_hdr_e1 - set header values in mac_configuration_cmd
3164 * @bp: device handle
3166 * @len: number of rules to handle
3168 static inline void bnx2x_mcast_set_rdata_hdr_e1(struct bnx2x *bp,
3169 struct bnx2x_mcast_ramrod_params *p,
3172 struct bnx2x_raw_obj *r = &p->mcast_obj->raw;
3173 struct mac_configuration_cmd *data =
3174 (struct mac_configuration_cmd *)(r->rdata);
3176 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
3177 BNX2X_MAX_EMUL_MULTI*(1 + r->func_id) :
3178 BNX2X_MAX_MULTICAST*(1 + r->func_id));
3180 data->hdr.offset = offset;
3181 data->hdr.client_id = 0xff;
3182 data->hdr.echo = ((r->cid & BNX2X_SWCID_MASK) |
3183 (BNX2X_FILTER_MCAST_PENDING << BNX2X_SWCID_SHIFT));
3184 data->hdr.length = len;
3188 * bnx2x_mcast_handle_restore_cmd_e1 - restore command for 57710
3190 * @bp: device handle
3192 * @start_idx: index in the registry to start from
3193 * @rdata_idx: index in the ramrod data to start from
3195 * restore command for 57710 is like all other commands - always a stand alone
3196 * command - start_idx and rdata_idx will always be 0. This function will always
3198 * returns -1 to comply with 57712 variant.
3200 static inline int bnx2x_mcast_handle_restore_cmd_e1(
3201 struct bnx2x *bp, struct bnx2x_mcast_obj *o , int start_idx,
3204 struct bnx2x_mcast_mac_elem *elem;
3206 union bnx2x_mcast_config_data cfg_data = {0};
3208 /* go through the registry and configure the MACs from it. */
3209 list_for_each_entry(elem, &o->registry.exact_match.macs, link) {
3210 cfg_data.mac = &elem->mac[0];
3211 o->set_one_rule(bp, o, i, &cfg_data, BNX2X_MCAST_CMD_RESTORE);
3215 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3217 BNX2X_MAC_PRN_LIST(cfg_data.mac));
3226 static inline int bnx2x_mcast_handle_pending_cmds_e1(
3227 struct bnx2x *bp, struct bnx2x_mcast_ramrod_params *p)
3229 struct bnx2x_pending_mcast_cmd *cmd_pos;
3230 struct bnx2x_mcast_mac_elem *pmac_pos;
3231 struct bnx2x_mcast_obj *o = p->mcast_obj;
3232 union bnx2x_mcast_config_data cfg_data = {0};
3236 /* If nothing to be done - return */
3237 if (list_empty(&o->pending_cmds_head))
3240 /* Handle the first command */
3241 cmd_pos = list_first_entry(&o->pending_cmds_head,
3242 struct bnx2x_pending_mcast_cmd, link);
3244 switch (cmd_pos->type) {
3245 case BNX2X_MCAST_CMD_ADD:
3246 list_for_each_entry(pmac_pos, &cmd_pos->data.macs_head, link) {
3247 cfg_data.mac = &pmac_pos->mac[0];
3248 o->set_one_rule(bp, o, cnt, &cfg_data, cmd_pos->type);
3252 DP(BNX2X_MSG_SP, "About to configure "BNX2X_MAC_FMT
3254 BNX2X_MAC_PRN_LIST(pmac_pos->mac));
3258 case BNX2X_MCAST_CMD_DEL:
3259 cnt = cmd_pos->data.macs_num;
3260 DP(BNX2X_MSG_SP, "About to delete %d multicast MACs\n", cnt);
3263 case BNX2X_MCAST_CMD_RESTORE:
3264 o->hdl_restore(bp, o, 0, &cnt);
3268 BNX2X_ERR("Unknown command: %d\n", cmd_pos->type);
3272 list_del(&cmd_pos->link);
3279 * bnx2x_get_fw_mac_addr - revert the bnx2x_set_fw_mac_addr().
3286 static inline void bnx2x_get_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
3287 __le16 *fw_lo, u8 *mac)
3289 mac[1] = ((u8 *)fw_hi)[0];
3290 mac[0] = ((u8 *)fw_hi)[1];
3291 mac[3] = ((u8 *)fw_mid)[0];
3292 mac[2] = ((u8 *)fw_mid)[1];
3293 mac[5] = ((u8 *)fw_lo)[0];
3294 mac[4] = ((u8 *)fw_lo)[1];
3298 * bnx2x_mcast_refresh_registry_e1 -
3300 * @bp: device handle
3303 * Check the ramrod data first entry flag to see if it's a DELETE or ADD command
3304 * and update the registry correspondingly: if ADD - allocate a memory and add
3305 * the entries to the registry (list), if DELETE - clear the registry and free
3308 static inline int bnx2x_mcast_refresh_registry_e1(struct bnx2x *bp,
3309 struct bnx2x_mcast_obj *o)
3311 struct bnx2x_raw_obj *raw = &o->raw;
3312 struct bnx2x_mcast_mac_elem *elem;
3313 struct mac_configuration_cmd *data =
3314 (struct mac_configuration_cmd *)(raw->rdata);
3316 /* If first entry contains a SET bit - the command was ADD,
3317 * otherwise - DEL_ALL
3319 if (GET_FLAG(data->config_table[0].flags,
3320 MAC_CONFIGURATION_ENTRY_ACTION_TYPE)) {
3321 int i, len = data->hdr.length;
3323 /* Break if it was a RESTORE command */
3324 if (!list_empty(&o->registry.exact_match.macs))
3327 elem = kzalloc(sizeof(*elem)*len, GFP_ATOMIC);
3329 BNX2X_ERR("Failed to allocate registry memory\n");
3333 for (i = 0; i < len; i++, elem++) {
3334 bnx2x_get_fw_mac_addr(
3335 &data->config_table[i].msb_mac_addr,
3336 &data->config_table[i].middle_mac_addr,
3337 &data->config_table[i].lsb_mac_addr,
3339 DP(BNX2X_MSG_SP, "Adding registry entry for ["
3341 BNX2X_MAC_PRN_LIST(elem->mac));
3342 list_add_tail(&elem->link,
3343 &o->registry.exact_match.macs);
3346 elem = list_first_entry(&o->registry.exact_match.macs,
3347 struct bnx2x_mcast_mac_elem, link);
3348 DP(BNX2X_MSG_SP, "Deleting a registry\n");
3350 INIT_LIST_HEAD(&o->registry.exact_match.macs);
3356 static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
3357 struct bnx2x_mcast_ramrod_params *p,
3360 struct bnx2x_mcast_obj *o = p->mcast_obj;
3361 struct bnx2x_raw_obj *raw = &o->raw;
3362 struct mac_configuration_cmd *data =
3363 (struct mac_configuration_cmd *)(raw->rdata);
3366 /* Reset the ramrod data buffer */
3367 memset(data, 0, sizeof(*data));
3369 /* First set all entries as invalid */
3370 for (i = 0; i < o->max_cmd_len ; i++)
3371 SET_FLAG(data->config_table[i].flags,
3372 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
3373 T_ETH_MAC_COMMAND_INVALIDATE);
3375 /* Handle pending commands first */
3376 cnt = bnx2x_mcast_handle_pending_cmds_e1(bp, p);
3378 /* If there are no more pending commands - clear SCHEDULED state */
3379 if (list_empty(&o->pending_cmds_head))
3382 /* The below may be true iff there were no pending commands */
3384 cnt = bnx2x_mcast_handle_current_cmd(bp, p, cmd, 0);
3386 /* For 57710 every command has o->max_cmd_len length to ensure that
3387 * commands are done one at a time.
3389 o->total_pending_num -= o->max_cmd_len;
3393 WARN_ON(cnt > o->max_cmd_len);
3395 /* Set ramrod header (in particular, a number of entries to update) */
3396 bnx2x_mcast_set_rdata_hdr_e1(bp, p, (u8)cnt);
3398 /* update a registry: we need the registry contents to be always up
3399 * to date in order to be able to execute a RESTORE opcode. Here
3400 * we use the fact that for 57710 we sent one command at a time
3401 * hence we may take the registry update out of the command handling
3402 * and do it in a simpler way here.
3404 rc = bnx2x_mcast_refresh_registry_e1(bp, o);
3408 /* Commit writes towards the memory before sending a ramrod */
3411 /* If CLEAR_ONLY was requested - don't send a ramrod and clear
3412 * RAMROD_PENDING status immediately.
3414 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
3415 raw->clear_pending(raw);
3419 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, raw->cid,
3420 U64_HI(raw->rdata_mapping),
3421 U64_LO(raw->rdata_mapping),
3422 ETH_CONNECTION_TYPE);
3426 /* Ramrod completion is pending */
3432 static int bnx2x_mcast_get_registry_size_exact(struct bnx2x_mcast_obj *o)
3434 return o->registry.exact_match.num_macs_set;
3437 static int bnx2x_mcast_get_registry_size_aprox(struct bnx2x_mcast_obj *o)
3439 return o->registry.aprox_match.num_bins_set;
3442 static void bnx2x_mcast_set_registry_size_exact(struct bnx2x_mcast_obj *o,
3445 o->registry.exact_match.num_macs_set = n;
3448 static void bnx2x_mcast_set_registry_size_aprox(struct bnx2x_mcast_obj *o,
3451 o->registry.aprox_match.num_bins_set = n;
3454 int bnx2x_config_mcast(struct bnx2x *bp,
3455 struct bnx2x_mcast_ramrod_params *p,
3458 struct bnx2x_mcast_obj *o = p->mcast_obj;
3459 struct bnx2x_raw_obj *r = &o->raw;
3460 int rc = 0, old_reg_size;
3462 /* This is needed to recover number of currently configured mcast macs
3463 * in case of failure.
3465 old_reg_size = o->get_registry_size(o);
3467 /* Do some calculations and checks */
3468 rc = o->validate(bp, p, cmd);
3472 /* Return if there is no work to do */
3473 if ((!p->mcast_list_len) && (!o->check_sched(o)))
3476 DP(BNX2X_MSG_SP, "o->total_pending_num=%d p->mcast_list_len=%d "
3477 "o->max_cmd_len=%d\n", o->total_pending_num,
3478 p->mcast_list_len, o->max_cmd_len);
3480 /* Enqueue the current command to the pending list if we can't complete
3481 * it in the current iteration
3483 if (r->check_pending(r) ||
3484 ((o->max_cmd_len > 0) && (o->total_pending_num > o->max_cmd_len))) {
3485 rc = o->enqueue_cmd(bp, p->mcast_obj, p, cmd);
3489 /* As long as the current command is in a command list we
3490 * don't need to handle it separately.
3492 p->mcast_list_len = 0;
3495 if (!r->check_pending(r)) {
3497 /* Set 'pending' state */
3500 /* Configure the new classification in the chip */
3501 rc = o->config_mcast(bp, p, cmd);
3505 /* Wait for a ramrod completion if was requested */
3506 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
3507 rc = o->wait_comp(bp, o);
3513 r->clear_pending(r);
3516 o->revert(bp, p, old_reg_size);
3521 static void bnx2x_mcast_clear_sched(struct bnx2x_mcast_obj *o)
3523 smp_mb__before_clear_bit();
3524 clear_bit(o->sched_state, o->raw.pstate);
3525 smp_mb__after_clear_bit();
3528 static void bnx2x_mcast_set_sched(struct bnx2x_mcast_obj *o)
3530 smp_mb__before_clear_bit();
3531 set_bit(o->sched_state, o->raw.pstate);
3532 smp_mb__after_clear_bit();
3535 static bool bnx2x_mcast_check_sched(struct bnx2x_mcast_obj *o)
3537 return !!test_bit(o->sched_state, o->raw.pstate);
3540 static bool bnx2x_mcast_check_pending(struct bnx2x_mcast_obj *o)
3542 return o->raw.check_pending(&o->raw) || o->check_sched(o);
3545 void bnx2x_init_mcast_obj(struct bnx2x *bp,
3546 struct bnx2x_mcast_obj *mcast_obj,
3547 u8 mcast_cl_id, u32 mcast_cid, u8 func_id,
3548 u8 engine_id, void *rdata, dma_addr_t rdata_mapping,
3549 int state, unsigned long *pstate, bnx2x_obj_type type)
3551 memset(mcast_obj, 0, sizeof(*mcast_obj));
3553 bnx2x_init_raw_obj(&mcast_obj->raw, mcast_cl_id, mcast_cid, func_id,
3554 rdata, rdata_mapping, state, pstate, type);
3556 mcast_obj->engine_id = engine_id;
3558 INIT_LIST_HEAD(&mcast_obj->pending_cmds_head);
3560 mcast_obj->sched_state = BNX2X_FILTER_MCAST_SCHED;
3561 mcast_obj->check_sched = bnx2x_mcast_check_sched;
3562 mcast_obj->set_sched = bnx2x_mcast_set_sched;
3563 mcast_obj->clear_sched = bnx2x_mcast_clear_sched;
3565 if (CHIP_IS_E1(bp)) {
3566 mcast_obj->config_mcast = bnx2x_mcast_setup_e1;
3567 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3568 mcast_obj->hdl_restore =
3569 bnx2x_mcast_handle_restore_cmd_e1;
3570 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3572 if (CHIP_REV_IS_SLOW(bp))
3573 mcast_obj->max_cmd_len = BNX2X_MAX_EMUL_MULTI;
3575 mcast_obj->max_cmd_len = BNX2X_MAX_MULTICAST;
3577 mcast_obj->wait_comp = bnx2x_mcast_wait;
3578 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e1;
3579 mcast_obj->validate = bnx2x_mcast_validate_e1;
3580 mcast_obj->revert = bnx2x_mcast_revert_e1;
3581 mcast_obj->get_registry_size =
3582 bnx2x_mcast_get_registry_size_exact;
3583 mcast_obj->set_registry_size =
3584 bnx2x_mcast_set_registry_size_exact;
3586 /* 57710 is the only chip that uses the exact match for mcast
3589 INIT_LIST_HEAD(&mcast_obj->registry.exact_match.macs);
3591 } else if (CHIP_IS_E1H(bp)) {
3592 mcast_obj->config_mcast = bnx2x_mcast_setup_e1h;
3593 mcast_obj->enqueue_cmd = NULL;
3594 mcast_obj->hdl_restore = NULL;
3595 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3597 /* 57711 doesn't send a ramrod, so it has unlimited credit
3600 mcast_obj->max_cmd_len = -1;
3601 mcast_obj->wait_comp = bnx2x_mcast_wait;
3602 mcast_obj->set_one_rule = NULL;
3603 mcast_obj->validate = bnx2x_mcast_validate_e1h;
3604 mcast_obj->revert = bnx2x_mcast_revert_e1h;
3605 mcast_obj->get_registry_size =
3606 bnx2x_mcast_get_registry_size_aprox;
3607 mcast_obj->set_registry_size =
3608 bnx2x_mcast_set_registry_size_aprox;
3610 mcast_obj->config_mcast = bnx2x_mcast_setup_e2;
3611 mcast_obj->enqueue_cmd = bnx2x_mcast_enqueue_cmd;
3612 mcast_obj->hdl_restore =
3613 bnx2x_mcast_handle_restore_cmd_e2;
3614 mcast_obj->check_pending = bnx2x_mcast_check_pending;
3615 /* TODO: There should be a proper HSI define for this number!!!
3617 mcast_obj->max_cmd_len = 16;
3618 mcast_obj->wait_comp = bnx2x_mcast_wait;
3619 mcast_obj->set_one_rule = bnx2x_mcast_set_one_rule_e2;
3620 mcast_obj->validate = bnx2x_mcast_validate_e2;
3621 mcast_obj->revert = bnx2x_mcast_revert_e2;
3622 mcast_obj->get_registry_size =
3623 bnx2x_mcast_get_registry_size_aprox;
3624 mcast_obj->set_registry_size =
3625 bnx2x_mcast_set_registry_size_aprox;
3629 /*************************** Credit handling **********************************/
3632 * atomic_add_ifless - add if the result is less than a given value.
3634 * @v: pointer of type atomic_t
3635 * @a: the amount to add to v...
3636 * @u: ...if (v + a) is less than u.
3638 * returns true if (v + a) was less than u, and false otherwise.
3641 static inline bool __atomic_add_ifless(atomic_t *v, int a, int u)
3647 if (unlikely(c + a >= u))
3650 old = atomic_cmpxchg((v), c, c + a);
3651 if (likely(old == c))
3660 * atomic_dec_ifmoe - dec if the result is more or equal than a given value.
3662 * @v: pointer of type atomic_t
3663 * @a: the amount to dec from v...
3664 * @u: ...if (v - a) is more or equal than u.
3666 * returns true if (v - a) was more or equal than u, and false
3669 static inline bool __atomic_dec_ifmoe(atomic_t *v, int a, int u)
3675 if (unlikely(c - a < u))
3678 old = atomic_cmpxchg((v), c, c - a);
3679 if (likely(old == c))
3687 static bool bnx2x_credit_pool_get(struct bnx2x_credit_pool_obj *o, int cnt)
3692 rc = __atomic_dec_ifmoe(&o->credit, cnt, 0);
3698 static bool bnx2x_credit_pool_put(struct bnx2x_credit_pool_obj *o, int cnt)
3704 /* Don't let to refill if credit + cnt > pool_sz */
3705 rc = __atomic_add_ifless(&o->credit, cnt, o->pool_sz + 1);
3712 static int bnx2x_credit_pool_check(struct bnx2x_credit_pool_obj *o)
3717 cur_credit = atomic_read(&o->credit);
3722 static bool bnx2x_credit_pool_always_true(struct bnx2x_credit_pool_obj *o,
3729 static bool bnx2x_credit_pool_get_entry(
3730 struct bnx2x_credit_pool_obj *o,
3737 /* Find "internal cam-offset" then add to base for this object... */
3738 for (vec = 0; vec < BNX2X_POOL_VEC_SIZE; vec++) {
3740 /* Skip the current vector if there are no free entries in it */
3741 if (!o->pool_mirror[vec])
3744 /* If we've got here we are going to find a free entry */
3745 for (idx = vec * BNX2X_POOL_VEC_SIZE, i = 0;
3746 i < BIT_VEC64_ELEM_SZ; idx++, i++)
3748 if (BIT_VEC64_TEST_BIT(o->pool_mirror, idx)) {
3750 BIT_VEC64_CLEAR_BIT(o->pool_mirror, idx);
3751 *offset = o->base_pool_offset + idx;
3759 static bool bnx2x_credit_pool_put_entry(
3760 struct bnx2x_credit_pool_obj *o,
3763 if (offset < o->base_pool_offset)
3766 offset -= o->base_pool_offset;
3768 if (offset >= o->pool_sz)
3771 /* Return the entry to the pool */
3772 BIT_VEC64_SET_BIT(o->pool_mirror, offset);
3777 static bool bnx2x_credit_pool_put_entry_always_true(
3778 struct bnx2x_credit_pool_obj *o,
3784 static bool bnx2x_credit_pool_get_entry_always_true(
3785 struct bnx2x_credit_pool_obj *o,
3792 * bnx2x_init_credit_pool - initialize credit pool internals.
3795 * @base: Base entry in the CAM to use.
3796 * @credit: pool size.
3798 * If base is negative no CAM entries handling will be performed.
3799 * If credit is negative pool operations will always succeed (unlimited pool).
3802 static inline void bnx2x_init_credit_pool(struct bnx2x_credit_pool_obj *p,
3803 int base, int credit)
3805 /* Zero the object first */
3806 memset(p, 0, sizeof(*p));
3808 /* Set the table to all 1s */
3809 memset(&p->pool_mirror, 0xff, sizeof(p->pool_mirror));
3811 /* Init a pool as full */
3812 atomic_set(&p->credit, credit);
3814 /* The total poll size */
3815 p->pool_sz = credit;
3817 p->base_pool_offset = base;
3819 /* Commit the change */
3822 p->check = bnx2x_credit_pool_check;
3824 /* if pool credit is negative - disable the checks */
3826 p->put = bnx2x_credit_pool_put;
3827 p->get = bnx2x_credit_pool_get;
3828 p->put_entry = bnx2x_credit_pool_put_entry;
3829 p->get_entry = bnx2x_credit_pool_get_entry;
3831 p->put = bnx2x_credit_pool_always_true;
3832 p->get = bnx2x_credit_pool_always_true;
3833 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3834 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3837 /* If base is negative - disable entries handling */
3839 p->put_entry = bnx2x_credit_pool_put_entry_always_true;
3840 p->get_entry = bnx2x_credit_pool_get_entry_always_true;
3844 void bnx2x_init_mac_credit_pool(struct bnx2x *bp,
3845 struct bnx2x_credit_pool_obj *p, u8 func_id,
3848 /* TODO: this will be defined in consts as well... */
3849 #define BNX2X_CAM_SIZE_EMUL 5
3853 if (CHIP_IS_E1(bp)) {
3854 /* In E1, Multicast is saved in cam... */
3855 if (!CHIP_REV_IS_SLOW(bp))
3856 cam_sz = (MAX_MAC_CREDIT_E1 / 2) - BNX2X_MAX_MULTICAST;
3858 cam_sz = BNX2X_CAM_SIZE_EMUL - BNX2X_MAX_EMUL_MULTI;
3860 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3862 } else if (CHIP_IS_E1H(bp)) {
3863 /* CAM credit is equaly divided between all active functions
3866 if ((func_num > 0)) {
3867 if (!CHIP_REV_IS_SLOW(bp))
3868 cam_sz = (MAX_MAC_CREDIT_E1H / (2*func_num));
3870 cam_sz = BNX2X_CAM_SIZE_EMUL;
3871 bnx2x_init_credit_pool(p, func_id * cam_sz, cam_sz);
3873 /* this should never happen! Block MAC operations. */
3874 bnx2x_init_credit_pool(p, 0, 0);
3880 * CAM credit is equaly divided between all active functions
3883 if ((func_num > 0)) {
3884 if (!CHIP_REV_IS_SLOW(bp))
3885 cam_sz = (MAX_MAC_CREDIT_E2 / func_num);
3887 cam_sz = BNX2X_CAM_SIZE_EMUL;
3890 * No need for CAM entries handling for 57712 and
3893 bnx2x_init_credit_pool(p, -1, cam_sz);
3895 /* this should never happen! Block MAC operations. */
3896 bnx2x_init_credit_pool(p, 0, 0);
3902 void bnx2x_init_vlan_credit_pool(struct bnx2x *bp,
3903 struct bnx2x_credit_pool_obj *p,
3907 if (CHIP_IS_E1x(bp)) {
3909 * There is no VLAN credit in HW on 57710 and 57711 only
3910 * MAC / MAC-VLAN can be set
3912 bnx2x_init_credit_pool(p, 0, -1);
3915 * CAM credit is equaly divided between all active functions
3919 int credit = MAX_VLAN_CREDIT_E2 / func_num;
3920 bnx2x_init_credit_pool(p, func_id * credit, credit);
3922 /* this should never happen! Block VLAN operations. */
3923 bnx2x_init_credit_pool(p, 0, 0);
3927 /****************** RSS Configuration ******************/
3929 * bnx2x_debug_print_ind_table - prints the indirection table configuration.
3931 * @bp: driver hanlde
3932 * @p: pointer to rss configuration
3934 * Prints it when NETIF_MSG_IFUP debug level is configured.
3936 static inline void bnx2x_debug_print_ind_table(struct bnx2x *bp,
3937 struct bnx2x_config_rss_params *p)
3941 DP(BNX2X_MSG_SP, "Setting indirection table to:\n");
3942 DP(BNX2X_MSG_SP, "0x0000: ");
3943 for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
3944 DP_CONT(BNX2X_MSG_SP, "0x%02x ", p->ind_table[i]);
3946 /* Print 4 bytes in a line */
3947 if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) &&
3948 (((i + 1) & 0x3) == 0)) {
3949 DP_CONT(BNX2X_MSG_SP, "\n");
3950 DP(BNX2X_MSG_SP, "0x%04x: ", i + 1);
3954 DP_CONT(BNX2X_MSG_SP, "\n");
3958 * bnx2x_setup_rss - configure RSS
3960 * @bp: device handle
3961 * @p: rss configuration
3963 * sends on UPDATE ramrod for that matter.
3965 static int bnx2x_setup_rss(struct bnx2x *bp,
3966 struct bnx2x_config_rss_params *p)
3968 struct bnx2x_rss_config_obj *o = p->rss_obj;
3969 struct bnx2x_raw_obj *r = &o->raw;
3970 struct eth_rss_update_ramrod_data *data =
3971 (struct eth_rss_update_ramrod_data *)(r->rdata);
3975 memset(data, 0, sizeof(*data));
3977 DP(BNX2X_MSG_SP, "Configuring RSS\n");
3979 /* Set an echo field */
3980 data->echo = (r->cid & BNX2X_SWCID_MASK) |
3981 (r->state << BNX2X_SWCID_SHIFT);
3984 if (test_bit(BNX2X_RSS_MODE_DISABLED, &p->rss_flags))
3985 rss_mode = ETH_RSS_MODE_DISABLED;
3986 else if (test_bit(BNX2X_RSS_MODE_REGULAR, &p->rss_flags))
3987 rss_mode = ETH_RSS_MODE_REGULAR;
3988 else if (test_bit(BNX2X_RSS_MODE_VLAN_PRI, &p->rss_flags))
3989 rss_mode = ETH_RSS_MODE_VLAN_PRI;
3990 else if (test_bit(BNX2X_RSS_MODE_E1HOV_PRI, &p->rss_flags))
3991 rss_mode = ETH_RSS_MODE_E1HOV_PRI;
3992 else if (test_bit(BNX2X_RSS_MODE_IP_DSCP, &p->rss_flags))
3993 rss_mode = ETH_RSS_MODE_IP_DSCP;
3995 data->rss_mode = rss_mode;
3997 DP(BNX2X_MSG_SP, "rss_mode=%d\n", rss_mode);
3999 /* RSS capabilities */
4000 if (test_bit(BNX2X_RSS_IPV4, &p->rss_flags))
4001 data->capabilities |=
4002 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_CAPABILITY;
4004 if (test_bit(BNX2X_RSS_IPV4_TCP, &p->rss_flags))
4005 data->capabilities |=
4006 ETH_RSS_UPDATE_RAMROD_DATA_IPV4_TCP_CAPABILITY;
4008 if (test_bit(BNX2X_RSS_IPV6, &p->rss_flags))
4009 data->capabilities |=
4010 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_CAPABILITY;
4012 if (test_bit(BNX2X_RSS_IPV6_TCP, &p->rss_flags))
4013 data->capabilities |=
4014 ETH_RSS_UPDATE_RAMROD_DATA_IPV6_TCP_CAPABILITY;
4017 data->rss_result_mask = p->rss_result_mask;
4020 data->rss_engine_id = o->engine_id;
4022 DP(BNX2X_MSG_SP, "rss_engine_id=%d\n", data->rss_engine_id);
4024 /* Indirection table */
4025 memcpy(data->indirection_table, p->ind_table,
4026 T_ETH_INDIRECTION_TABLE_SIZE);
4028 /* Remember the last configuration */
4029 memcpy(o->ind_table, p->ind_table, T_ETH_INDIRECTION_TABLE_SIZE);
4031 /* Print the indirection table */
4032 if (netif_msg_ifup(bp))
4033 bnx2x_debug_print_ind_table(bp, p);
4036 if (test_bit(BNX2X_RSS_SET_SRCH, &p->rss_flags)) {
4037 memcpy(&data->rss_key[0], &p->rss_key[0],
4038 sizeof(data->rss_key));
4039 data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
4042 /* Commit writes towards the memory before sending a ramrod */
4046 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_RSS_UPDATE, r->cid,
4047 U64_HI(r->rdata_mapping),
4048 U64_LO(r->rdata_mapping),
4049 ETH_CONNECTION_TYPE);
4057 void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
4060 memcpy(ind_table, rss_obj->ind_table, sizeof(rss_obj->ind_table));
4063 int bnx2x_config_rss(struct bnx2x *bp,
4064 struct bnx2x_config_rss_params *p)
4067 struct bnx2x_rss_config_obj *o = p->rss_obj;
4068 struct bnx2x_raw_obj *r = &o->raw;
4070 /* Do nothing if only driver cleanup was requested */
4071 if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
4076 rc = o->config_rss(bp, p);
4078 r->clear_pending(r);
4082 if (test_bit(RAMROD_COMP_WAIT, &p->ramrod_flags))
4083 rc = r->wait_comp(bp, r);
4089 void bnx2x_init_rss_config_obj(struct bnx2x *bp,
4090 struct bnx2x_rss_config_obj *rss_obj,
4091 u8 cl_id, u32 cid, u8 func_id, u8 engine_id,
4092 void *rdata, dma_addr_t rdata_mapping,
4093 int state, unsigned long *pstate,
4094 bnx2x_obj_type type)
4096 bnx2x_init_raw_obj(&rss_obj->raw, cl_id, cid, func_id, rdata,
4097 rdata_mapping, state, pstate, type);
4099 rss_obj->engine_id = engine_id;
4100 rss_obj->config_rss = bnx2x_setup_rss;
4103 /********************** Queue state object ***********************************/
4106 * bnx2x_queue_state_change - perform Queue state change transition
4108 * @bp: device handle
4109 * @params: parameters to perform the transition
4111 * returns 0 in case of successfully completed transition, negative error
4112 * code in case of failure, positive (EBUSY) value if there is a completion
4113 * to that is still pending (possible only if RAMROD_COMP_WAIT is
4114 * not set in params->ramrod_flags for asynchronous commands).
4117 int bnx2x_queue_state_change(struct bnx2x *bp,
4118 struct bnx2x_queue_state_params *params)
4120 struct bnx2x_queue_sp_obj *o = params->q_obj;
4121 int rc, pending_bit;
4122 unsigned long *pending = &o->pending;
4124 /* Check that the requested transition is legal */
4125 if (o->check_transition(bp, o, params))
4128 /* Set "pending" bit */
4129 pending_bit = o->set_pending(o, params);
4131 /* Don't send a command if only driver cleanup was requested */
4132 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags))
4133 o->complete_cmd(bp, o, pending_bit);
4136 rc = o->send_cmd(bp, params);
4138 o->next_state = BNX2X_Q_STATE_MAX;
4139 clear_bit(pending_bit, pending);
4140 smp_mb__after_clear_bit();
4144 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
4145 rc = o->wait_comp(bp, o, pending_bit);
4153 return !!test_bit(pending_bit, pending);
4157 static int bnx2x_queue_set_pending(struct bnx2x_queue_sp_obj *obj,
4158 struct bnx2x_queue_state_params *params)
4160 enum bnx2x_queue_cmd cmd = params->cmd, bit;
4162 /* ACTIVATE and DEACTIVATE commands are implemented on top of
4165 if ((cmd == BNX2X_Q_CMD_ACTIVATE) ||
4166 (cmd == BNX2X_Q_CMD_DEACTIVATE))
4167 bit = BNX2X_Q_CMD_UPDATE;
4171 set_bit(bit, &obj->pending);
4175 static int bnx2x_queue_wait_comp(struct bnx2x *bp,
4176 struct bnx2x_queue_sp_obj *o,
4177 enum bnx2x_queue_cmd cmd)
4179 return bnx2x_state_wait(bp, cmd, &o->pending);
4183 * bnx2x_queue_comp_cmd - complete the state change command.
4185 * @bp: device handle
4189 * Checks that the arrived completion is expected.
4191 static int bnx2x_queue_comp_cmd(struct bnx2x *bp,
4192 struct bnx2x_queue_sp_obj *o,
4193 enum bnx2x_queue_cmd cmd)
4195 unsigned long cur_pending = o->pending;
4197 if (!test_and_clear_bit(cmd, &cur_pending)) {
4198 BNX2X_ERR("Bad MC reply %d for queue %d in state %d "
4199 "pending 0x%lx, next_state %d\n", cmd, o->cid,
4200 o->state, cur_pending, o->next_state);
4204 DP(BNX2X_MSG_SP, "Completing command %d for queue %d, "
4205 "setting state to %d\n", cmd, o->cid, o->next_state);
4207 o->state = o->next_state;
4208 o->next_state = BNX2X_Q_STATE_MAX;
4210 /* It's important that o->state and o->next_state are
4211 * updated before o->pending.
4215 clear_bit(cmd, &o->pending);
4216 smp_mb__after_clear_bit();
4221 static void bnx2x_q_fill_setup_data_e2(struct bnx2x *bp,
4222 struct bnx2x_queue_state_params *cmd_params,
4223 struct client_init_ramrod_data *data)
4225 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4229 /* IPv6 TPA supported for E2 and above only */
4230 data->rx.tpa_en |= test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) *
4231 CLIENT_INIT_RX_DATA_TPA_EN_IPV6;
4234 static void bnx2x_q_fill_setup_data_cmn(struct bnx2x *bp,
4235 struct bnx2x_queue_state_params *cmd_params,
4236 struct client_init_ramrod_data *data)
4238 struct bnx2x_queue_sp_obj *o = cmd_params->q_obj;
4239 struct bnx2x_queue_setup_params *params = &cmd_params->params.setup;
4243 data->general.client_id = o->cl_id;
4245 if (test_bit(BNX2X_Q_FLG_STATS, ¶ms->flags)) {
4246 data->general.statistics_counter_id =
4247 params->gen_params.stat_id;
4248 data->general.statistics_en_flg = 1;
4249 data->general.statistics_zero_flg =
4250 test_bit(BNX2X_Q_FLG_ZERO_STATS, ¶ms->flags);
4252 data->general.statistics_counter_id =
4253 DISABLE_STATISTIC_COUNTER_ID_VALUE;
4255 data->general.is_fcoe_flg = test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags);
4256 data->general.activate_flg = test_bit(BNX2X_Q_FLG_ACTIVE,
4258 data->general.sp_client_id = params->gen_params.spcl_id;
4259 data->general.mtu = cpu_to_le16(params->gen_params.mtu);
4260 data->general.func_id = o->func_id;
4263 data->general.cos = params->txq_params.cos;
4265 data->general.traffic_type =
4266 test_bit(BNX2X_Q_FLG_FCOE, ¶ms->flags) ?
4267 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
4270 data->rx.tpa_en = test_bit(BNX2X_Q_FLG_TPA, ¶ms->flags) *
4271 CLIENT_INIT_RX_DATA_TPA_EN_IPV4;
4272 data->rx.vmqueue_mode_en_flg = 0;
4274 data->rx.cache_line_alignment_log_size =
4275 params->rxq_params.cache_line_log;
4276 data->rx.enable_dynamic_hc =
4277 test_bit(BNX2X_Q_FLG_DHC, ¶ms->flags);
4278 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
4279 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
4280 data->rx.max_agg_size = cpu_to_le16(params->rxq_params.tpa_agg_sz);
4282 /* Always start in DROP_ALL mode */
4283 data->rx.state = cpu_to_le16(CLIENT_INIT_RX_DATA_UCAST_DROP_ALL |
4284 CLIENT_INIT_RX_DATA_MCAST_DROP_ALL);
4286 /* We don't set drop flags */
4287 data->rx.drop_ip_cs_err_flg = 0;
4288 data->rx.drop_tcp_cs_err_flg = 0;
4289 data->rx.drop_ttl0_flg = 0;
4290 data->rx.drop_udp_cs_err_flg = 0;
4291 data->rx.inner_vlan_removal_enable_flg =
4292 test_bit(BNX2X_Q_FLG_VLAN, ¶ms->flags);
4293 data->rx.outer_vlan_removal_enable_flg =
4294 test_bit(BNX2X_Q_FLG_OV, ¶ms->flags);
4295 data->rx.status_block_id = params->rxq_params.fw_sb_id;
4296 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
4297 data->rx.max_tpa_queues = params->rxq_params.max_tpa_queues;
4298 data->rx.max_bytes_on_bd = cpu_to_le16(params->rxq_params.buf_sz);
4299 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
4300 data->rx.bd_page_base.lo =
4301 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
4302 data->rx.bd_page_base.hi =
4303 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
4304 data->rx.sge_page_base.lo =
4305 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
4306 data->rx.sge_page_base.hi =
4307 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
4308 data->rx.cqe_page_base.lo =
4309 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
4310 data->rx.cqe_page_base.hi =
4311 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
4312 data->rx.is_leading_rss = test_bit(BNX2X_Q_FLG_LEADING_RSS,
4315 if (test_bit(BNX2X_Q_FLG_MCAST, ¶ms->flags)) {
4316 data->rx.approx_mcast_engine_id = o->func_id;
4317 data->rx.is_approx_mcast = 1;
4320 data->rx.rss_engine_id = params->rxq_params.rss_engine_id;
4322 /* flow control data */
4323 data->rx.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
4324 data->rx.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
4325 data->rx.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
4326 data->rx.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
4327 data->rx.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
4328 data->rx.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
4329 data->rx.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
4331 /* silent vlan removal */
4332 data->rx.silent_vlan_removal_flg =
4333 test_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, ¶ms->flags);
4334 data->rx.silent_vlan_value =
4335 cpu_to_le16(params->rxq_params.silent_removal_value);
4336 data->rx.silent_vlan_mask =
4337 cpu_to_le16(params->rxq_params.silent_removal_mask);
4340 data->tx.enforce_security_flg =
4341 test_bit(BNX2X_Q_FLG_TX_SEC, ¶ms->flags);
4342 data->tx.default_vlan =
4343 cpu_to_le16(params->txq_params.default_vlan);
4344 data->tx.default_vlan_flg =
4345 test_bit(BNX2X_Q_FLG_DEF_VLAN, ¶ms->flags);
4346 data->tx.tx_switching_flg =
4347 test_bit(BNX2X_Q_FLG_TX_SWITCH, ¶ms->flags);
4348 data->tx.anti_spoofing_flg =
4349 test_bit(BNX2X_Q_FLG_ANTI_SPOOF, ¶ms->flags);
4350 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
4351 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
4352 data->tx.tss_leading_client_id = params->txq_params.tss_leading_cl_id;
4354 data->tx.tx_bd_page_base.lo =
4355 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
4356 data->tx.tx_bd_page_base.hi =
4357 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
4359 /* Don't configure any Tx switching mode during queue SETUP */
4365 * bnx2x_q_init - init HW/FW queue
4367 * @bp: device handle
4370 * HW/FW initial Queue configuration:
4372 * - CDU context validation
4375 static inline int bnx2x_q_init(struct bnx2x *bp,
4376 struct bnx2x_queue_state_params *params)
4378 struct bnx2x_queue_sp_obj *o = params->q_obj;
4379 struct bnx2x_queue_init_params *init = ¶ms->params.init;
4382 /* Tx HC configuration */
4383 if (test_bit(BNX2X_Q_TYPE_HAS_TX, &o->type) &&
4384 test_bit(BNX2X_Q_FLG_HC, &init->tx.flags)) {
4385 hc_usec = init->tx.hc_rate ? 1000000 / init->tx.hc_rate : 0;
4387 bnx2x_update_coalesce_sb_index(bp, init->tx.fw_sb_id,
4388 init->tx.sb_cq_index,
4389 !test_bit(BNX2X_Q_FLG_HC_EN, &init->tx.flags),
4393 /* Rx HC configuration */
4394 if (test_bit(BNX2X_Q_TYPE_HAS_RX, &o->type) &&
4395 test_bit(BNX2X_Q_FLG_HC, &init->rx.flags)) {
4396 hc_usec = init->rx.hc_rate ? 1000000 / init->rx.hc_rate : 0;
4398 bnx2x_update_coalesce_sb_index(bp, init->rx.fw_sb_id,
4399 init->rx.sb_cq_index,
4400 !test_bit(BNX2X_Q_FLG_HC_EN, &init->rx.flags),
4404 /* Set CDU context validation values */
4405 bnx2x_set_ctx_validation(bp, init->cxt, o->cid);
4407 /* As no ramrod is sent, complete the command immediately */
4408 o->complete_cmd(bp, o, BNX2X_Q_CMD_INIT);
4416 static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
4417 struct bnx2x_queue_state_params *params)
4419 struct bnx2x_queue_sp_obj *o = params->q_obj;
4420 struct client_init_ramrod_data *rdata =
4421 (struct client_init_ramrod_data *)o->rdata;
4422 dma_addr_t data_mapping = o->rdata_mapping;
4423 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4425 /* Clear the ramrod data */
4426 memset(rdata, 0, sizeof(*rdata));
4428 /* Fill the ramrod data */
4429 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4433 return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
4434 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4437 static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
4438 struct bnx2x_queue_state_params *params)
4440 struct bnx2x_queue_sp_obj *o = params->q_obj;
4441 struct client_init_ramrod_data *rdata =
4442 (struct client_init_ramrod_data *)o->rdata;
4443 dma_addr_t data_mapping = o->rdata_mapping;
4444 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
4446 /* Clear the ramrod data */
4447 memset(rdata, 0, sizeof(*rdata));
4449 /* Fill the ramrod data */
4450 bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
4451 bnx2x_q_fill_setup_data_e2(bp, params, rdata);
4455 return bnx2x_sp_post(bp, ramrod, o->cid, U64_HI(data_mapping),
4456 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4459 static void bnx2x_q_fill_update_data(struct bnx2x *bp,
4460 struct bnx2x_queue_sp_obj *obj,
4461 struct bnx2x_queue_update_params *params,
4462 struct client_update_ramrod_data *data)
4464 /* Client ID of the client to update */
4465 data->client_id = obj->cl_id;
4467 /* Function ID of the client to update */
4468 data->func_id = obj->func_id;
4470 /* Default VLAN value */
4471 data->default_vlan = cpu_to_le16(params->def_vlan);
4473 /* Inner VLAN stripping */
4474 data->inner_vlan_removal_enable_flg =
4475 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM, ¶ms->update_flags);
4476 data->inner_vlan_removal_change_flg =
4477 test_bit(BNX2X_Q_UPDATE_IN_VLAN_REM_CHNG,
4478 ¶ms->update_flags);
4480 /* Outer VLAN sripping */
4481 data->outer_vlan_removal_enable_flg =
4482 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM, ¶ms->update_flags);
4483 data->outer_vlan_removal_change_flg =
4484 test_bit(BNX2X_Q_UPDATE_OUT_VLAN_REM_CHNG,
4485 ¶ms->update_flags);
4487 /* Drop packets that have source MAC that doesn't belong to this
4490 data->anti_spoofing_enable_flg =
4491 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF, ¶ms->update_flags);
4492 data->anti_spoofing_change_flg =
4493 test_bit(BNX2X_Q_UPDATE_ANTI_SPOOF_CHNG, ¶ms->update_flags);
4495 /* Activate/Deactivate */
4496 data->activate_flg =
4497 test_bit(BNX2X_Q_UPDATE_ACTIVATE, ¶ms->update_flags);
4498 data->activate_change_flg =
4499 test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, ¶ms->update_flags);
4501 /* Enable default VLAN */
4502 data->default_vlan_enable_flg =
4503 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN, ¶ms->update_flags);
4504 data->default_vlan_change_flg =
4505 test_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
4506 ¶ms->update_flags);
4508 /* silent vlan removal */
4509 data->silent_vlan_change_flg =
4510 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4511 ¶ms->update_flags);
4512 data->silent_vlan_removal_flg =
4513 test_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM, ¶ms->update_flags);
4514 data->silent_vlan_value = cpu_to_le16(params->silent_removal_value);
4515 data->silent_vlan_mask = cpu_to_le16(params->silent_removal_mask);
4518 static inline int bnx2x_q_send_update(struct bnx2x *bp,
4519 struct bnx2x_queue_state_params *params)
4521 struct bnx2x_queue_sp_obj *o = params->q_obj;
4522 struct client_update_ramrod_data *rdata =
4523 (struct client_update_ramrod_data *)o->rdata;
4524 dma_addr_t data_mapping = o->rdata_mapping;
4526 /* Clear the ramrod data */
4527 memset(rdata, 0, sizeof(*rdata));
4529 /* Fill the ramrod data */
4530 bnx2x_q_fill_update_data(bp, o, ¶ms->params.update, rdata);
4534 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE, o->cid,
4535 U64_HI(data_mapping),
4536 U64_LO(data_mapping), ETH_CONNECTION_TYPE);
4540 * bnx2x_q_send_deactivate - send DEACTIVATE command
4542 * @bp: device handle
4545 * implemented using the UPDATE command.
4547 static inline int bnx2x_q_send_deactivate(struct bnx2x *bp,
4548 struct bnx2x_queue_state_params *params)
4550 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4552 memset(update, 0, sizeof(*update));
4554 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4556 return bnx2x_q_send_update(bp, params);
4560 * bnx2x_q_send_activate - send ACTIVATE command
4562 * @bp: device handle
4565 * implemented using the UPDATE command.
4567 static inline int bnx2x_q_send_activate(struct bnx2x *bp,
4568 struct bnx2x_queue_state_params *params)
4570 struct bnx2x_queue_update_params *update = ¶ms->params.update;
4572 memset(update, 0, sizeof(*update));
4574 __set_bit(BNX2X_Q_UPDATE_ACTIVATE, &update->update_flags);
4575 __set_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG, &update->update_flags);
4577 return bnx2x_q_send_update(bp, params);
4580 static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
4581 struct bnx2x_queue_state_params *params)
4583 /* TODO: Not implemented yet. */
4587 static inline int bnx2x_q_send_halt(struct bnx2x *bp,
4588 struct bnx2x_queue_state_params *params)
4590 struct bnx2x_queue_sp_obj *o = params->q_obj;
4592 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, o->cid, 0, o->cl_id,
4593 ETH_CONNECTION_TYPE);
4596 static inline int bnx2x_q_send_cfc_del(struct bnx2x *bp,
4597 struct bnx2x_queue_state_params *params)
4599 struct bnx2x_queue_sp_obj *o = params->q_obj;
4601 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, o->cid, 0, 0,
4602 NONE_CONNECTION_TYPE);
4605 static inline int bnx2x_q_send_terminate(struct bnx2x *bp,
4606 struct bnx2x_queue_state_params *params)
4608 struct bnx2x_queue_sp_obj *o = params->q_obj;
4610 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, o->cid, 0, 0,
4611 ETH_CONNECTION_TYPE);
4614 static inline int bnx2x_q_send_empty(struct bnx2x *bp,
4615 struct bnx2x_queue_state_params *params)
4617 struct bnx2x_queue_sp_obj *o = params->q_obj;
4619 return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_EMPTY, o->cid, 0, 0,
4620 ETH_CONNECTION_TYPE);
4623 static inline int bnx2x_queue_send_cmd_cmn(struct bnx2x *bp,
4624 struct bnx2x_queue_state_params *params)
4626 switch (params->cmd) {
4627 case BNX2X_Q_CMD_INIT:
4628 return bnx2x_q_init(bp, params);
4629 case BNX2X_Q_CMD_DEACTIVATE:
4630 return bnx2x_q_send_deactivate(bp, params);
4631 case BNX2X_Q_CMD_ACTIVATE:
4632 return bnx2x_q_send_activate(bp, params);
4633 case BNX2X_Q_CMD_UPDATE:
4634 return bnx2x_q_send_update(bp, params);
4635 case BNX2X_Q_CMD_UPDATE_TPA:
4636 return bnx2x_q_send_update_tpa(bp, params);
4637 case BNX2X_Q_CMD_HALT:
4638 return bnx2x_q_send_halt(bp, params);
4639 case BNX2X_Q_CMD_CFC_DEL:
4640 return bnx2x_q_send_cfc_del(bp, params);
4641 case BNX2X_Q_CMD_TERMINATE:
4642 return bnx2x_q_send_terminate(bp, params);
4643 case BNX2X_Q_CMD_EMPTY:
4644 return bnx2x_q_send_empty(bp, params);
4646 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4651 static int bnx2x_queue_send_cmd_e1x(struct bnx2x *bp,
4652 struct bnx2x_queue_state_params *params)
4654 switch (params->cmd) {
4655 case BNX2X_Q_CMD_SETUP:
4656 return bnx2x_q_send_setup_e1x(bp, params);
4657 case BNX2X_Q_CMD_INIT:
4658 case BNX2X_Q_CMD_DEACTIVATE:
4659 case BNX2X_Q_CMD_ACTIVATE:
4660 case BNX2X_Q_CMD_UPDATE:
4661 case BNX2X_Q_CMD_UPDATE_TPA:
4662 case BNX2X_Q_CMD_HALT:
4663 case BNX2X_Q_CMD_CFC_DEL:
4664 case BNX2X_Q_CMD_TERMINATE:
4665 case BNX2X_Q_CMD_EMPTY:
4666 return bnx2x_queue_send_cmd_cmn(bp, params);
4668 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4673 static int bnx2x_queue_send_cmd_e2(struct bnx2x *bp,
4674 struct bnx2x_queue_state_params *params)
4676 switch (params->cmd) {
4677 case BNX2X_Q_CMD_SETUP:
4678 return bnx2x_q_send_setup_e2(bp, params);
4679 case BNX2X_Q_CMD_INIT:
4680 case BNX2X_Q_CMD_DEACTIVATE:
4681 case BNX2X_Q_CMD_ACTIVATE:
4682 case BNX2X_Q_CMD_UPDATE:
4683 case BNX2X_Q_CMD_UPDATE_TPA:
4684 case BNX2X_Q_CMD_HALT:
4685 case BNX2X_Q_CMD_CFC_DEL:
4686 case BNX2X_Q_CMD_TERMINATE:
4687 case BNX2X_Q_CMD_EMPTY:
4688 return bnx2x_queue_send_cmd_cmn(bp, params);
4690 BNX2X_ERR("Unknown command: %d\n", params->cmd);
4696 * bnx2x_queue_chk_transition - check state machine of a regular Queue
4698 * @bp: device handle
4703 * It both checks if the requested command is legal in a current
4704 * state and, if it's legal, sets a `next_state' in the object
4705 * that will be used in the completion flow to set the `state'
4708 * returns 0 if a requested command is a legal transition,
4709 * -EINVAL otherwise.
4711 static int bnx2x_queue_chk_transition(struct bnx2x *bp,
4712 struct bnx2x_queue_sp_obj *o,
4713 struct bnx2x_queue_state_params *params)
4715 enum bnx2x_q_state state = o->state, next_state = BNX2X_Q_STATE_MAX;
4716 enum bnx2x_queue_cmd cmd = params->cmd;
4719 case BNX2X_Q_STATE_RESET:
4720 if (cmd == BNX2X_Q_CMD_INIT)
4721 next_state = BNX2X_Q_STATE_INITIALIZED;
4724 case BNX2X_Q_STATE_INITIALIZED:
4725 if (cmd == BNX2X_Q_CMD_SETUP) {
4726 if (test_bit(BNX2X_Q_FLG_ACTIVE,
4727 ¶ms->params.setup.flags))
4728 next_state = BNX2X_Q_STATE_ACTIVE;
4730 next_state = BNX2X_Q_STATE_INACTIVE;
4734 case BNX2X_Q_STATE_ACTIVE:
4735 if (cmd == BNX2X_Q_CMD_DEACTIVATE)
4736 next_state = BNX2X_Q_STATE_INACTIVE;
4738 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4739 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4740 next_state = BNX2X_Q_STATE_ACTIVE;
4742 else if (cmd == BNX2X_Q_CMD_HALT)
4743 next_state = BNX2X_Q_STATE_STOPPED;
4745 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4746 struct bnx2x_queue_update_params *update_params =
4747 ¶ms->params.update;
4749 /* If "active" state change is requested, update the
4750 * state accordingly.
4752 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4753 &update_params->update_flags) &&
4754 !test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4755 &update_params->update_flags))
4756 next_state = BNX2X_Q_STATE_INACTIVE;
4758 next_state = BNX2X_Q_STATE_ACTIVE;
4762 case BNX2X_Q_STATE_INACTIVE:
4763 if (cmd == BNX2X_Q_CMD_ACTIVATE)
4764 next_state = BNX2X_Q_STATE_ACTIVE;
4766 else if ((cmd == BNX2X_Q_CMD_EMPTY) ||
4767 (cmd == BNX2X_Q_CMD_UPDATE_TPA))
4768 next_state = BNX2X_Q_STATE_INACTIVE;
4770 else if (cmd == BNX2X_Q_CMD_HALT)
4771 next_state = BNX2X_Q_STATE_STOPPED;
4773 else if (cmd == BNX2X_Q_CMD_UPDATE) {
4774 struct bnx2x_queue_update_params *update_params =
4775 ¶ms->params.update;
4777 /* If "active" state change is requested, update the
4778 * state accordingly.
4780 if (test_bit(BNX2X_Q_UPDATE_ACTIVATE_CHNG,
4781 &update_params->update_flags) &&
4782 test_bit(BNX2X_Q_UPDATE_ACTIVATE,
4783 &update_params->update_flags))
4784 next_state = BNX2X_Q_STATE_ACTIVE;
4786 next_state = BNX2X_Q_STATE_INACTIVE;
4790 case BNX2X_Q_STATE_STOPPED:
4791 if (cmd == BNX2X_Q_CMD_TERMINATE)
4792 next_state = BNX2X_Q_STATE_TERMINATED;
4795 case BNX2X_Q_STATE_TERMINATED:
4796 if (cmd == BNX2X_Q_CMD_CFC_DEL)
4797 next_state = BNX2X_Q_STATE_RESET;
4801 BNX2X_ERR("Illegal state: %d\n", state);
4804 /* Transition is assured */
4805 if (next_state != BNX2X_Q_STATE_MAX) {
4806 DP(BNX2X_MSG_SP, "Good state transition: %d(%d)->%d\n",
4807 state, cmd, next_state);
4808 o->next_state = next_state;
4812 DP(BNX2X_MSG_SP, "Bad state transition request: %d %d\n", state, cmd);
4817 void bnx2x_init_queue_obj(struct bnx2x *bp,
4818 struct bnx2x_queue_sp_obj *obj,
4819 u8 cl_id, u32 cid, u8 func_id, void *rdata,
4820 dma_addr_t rdata_mapping, unsigned long type)
4822 memset(obj, 0, sizeof(*obj));
4826 obj->func_id = func_id;
4828 obj->rdata_mapping = rdata_mapping;
4830 obj->next_state = BNX2X_Q_STATE_MAX;
4832 if (CHIP_IS_E1x(bp))
4833 obj->send_cmd = bnx2x_queue_send_cmd_e1x;
4835 obj->send_cmd = bnx2x_queue_send_cmd_e2;
4837 obj->check_transition = bnx2x_queue_chk_transition;
4839 obj->complete_cmd = bnx2x_queue_comp_cmd;
4840 obj->wait_comp = bnx2x_queue_wait_comp;
4841 obj->set_pending = bnx2x_queue_set_pending;
4844 /********************** Function state object *********************************/
4846 static int bnx2x_func_wait_comp(struct bnx2x *bp,
4847 struct bnx2x_func_sp_obj *o,
4848 enum bnx2x_func_cmd cmd)
4850 return bnx2x_state_wait(bp, cmd, &o->pending);
4854 * bnx2x_func_state_change_comp - complete the state machine transition
4856 * @bp: device handle
4860 * Called on state change transition. Completes the state
4861 * machine transition only - no HW interaction.
4863 static inline int bnx2x_func_state_change_comp(struct bnx2x *bp,
4864 struct bnx2x_func_sp_obj *o,
4865 enum bnx2x_func_cmd cmd)
4867 unsigned long cur_pending = o->pending;
4869 if (!test_and_clear_bit(cmd, &cur_pending)) {
4870 BNX2X_ERR("Bad MC reply %d for func %d in state %d "
4871 "pending 0x%lx, next_state %d\n", cmd, BP_FUNC(bp),
4872 o->state, cur_pending, o->next_state);
4876 DP(BNX2X_MSG_SP, "Completing command %d for func %d, setting state to "
4877 "%d\n", cmd, BP_FUNC(bp), o->next_state);
4879 o->state = o->next_state;
4880 o->next_state = BNX2X_F_STATE_MAX;
4882 /* It's important that o->state and o->next_state are
4883 * updated before o->pending.
4887 clear_bit(cmd, &o->pending);
4888 smp_mb__after_clear_bit();
4894 * bnx2x_func_comp_cmd - complete the state change command
4896 * @bp: device handle
4900 * Checks that the arrived completion is expected.
4902 static int bnx2x_func_comp_cmd(struct bnx2x *bp,
4903 struct bnx2x_func_sp_obj *o,
4904 enum bnx2x_func_cmd cmd)
4906 /* Complete the state machine part first, check if it's a
4909 int rc = bnx2x_func_state_change_comp(bp, o, cmd);
4914 * bnx2x_func_chk_transition - perform function state machine transition
4916 * @bp: device handle
4920 * It both checks if the requested command is legal in a current
4921 * state and, if it's legal, sets a `next_state' in the object
4922 * that will be used in the completion flow to set the `state'
4925 * returns 0 if a requested command is a legal transition,
4926 * -EINVAL otherwise.
4928 static int bnx2x_func_chk_transition(struct bnx2x *bp,
4929 struct bnx2x_func_sp_obj *o,
4930 struct bnx2x_func_state_params *params)
4932 enum bnx2x_func_state state = o->state, next_state = BNX2X_F_STATE_MAX;
4933 enum bnx2x_func_cmd cmd = params->cmd;
4936 case BNX2X_F_STATE_RESET:
4937 if (cmd == BNX2X_F_CMD_HW_INIT)
4938 next_state = BNX2X_F_STATE_INITIALIZED;
4941 case BNX2X_F_STATE_INITIALIZED:
4942 if (cmd == BNX2X_F_CMD_START)
4943 next_state = BNX2X_F_STATE_STARTED;
4945 else if (cmd == BNX2X_F_CMD_HW_RESET)
4946 next_state = BNX2X_F_STATE_RESET;
4949 case BNX2X_F_STATE_STARTED:
4950 if (cmd == BNX2X_F_CMD_STOP)
4951 next_state = BNX2X_F_STATE_INITIALIZED;
4955 BNX2X_ERR("Unknown state: %d\n", state);
4958 /* Transition is assured */
4959 if (next_state != BNX2X_F_STATE_MAX) {
4960 DP(BNX2X_MSG_SP, "Good function state transition: %d(%d)->%d\n",
4961 state, cmd, next_state);
4962 o->next_state = next_state;
4966 DP(BNX2X_MSG_SP, "Bad function state transition request: %d %d\n",
4973 * bnx2x_func_init_func - performs HW init at function stage
4975 * @bp: device handle
4978 * Init HW when the current phase is
4979 * FW_MSG_CODE_DRV_LOAD_FUNCTION: initialize only FUNCTION-only
4982 static inline int bnx2x_func_init_func(struct bnx2x *bp,
4983 const struct bnx2x_func_sp_drv_ops *drv)
4985 return drv->init_hw_func(bp);
4989 * bnx2x_func_init_port - performs HW init at port stage
4991 * @bp: device handle
4994 * Init HW when the current phase is
4995 * FW_MSG_CODE_DRV_LOAD_PORT: initialize PORT-only and
4996 * FUNCTION-only HW blocks.
4999 static inline int bnx2x_func_init_port(struct bnx2x *bp,
5000 const struct bnx2x_func_sp_drv_ops *drv)
5002 int rc = drv->init_hw_port(bp);
5006 return bnx2x_func_init_func(bp, drv);
5010 * bnx2x_func_init_cmn_chip - performs HW init at chip-common stage
5012 * @bp: device handle
5015 * Init HW when the current phase is
5016 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON_CHIP,
5017 * PORT-only and FUNCTION-only HW blocks.
5019 static inline int bnx2x_func_init_cmn_chip(struct bnx2x *bp,
5020 const struct bnx2x_func_sp_drv_ops *drv)
5022 int rc = drv->init_hw_cmn_chip(bp);
5026 return bnx2x_func_init_port(bp, drv);
5030 * bnx2x_func_init_cmn - performs HW init at common stage
5032 * @bp: device handle
5035 * Init HW when the current phase is
5036 * FW_MSG_CODE_DRV_LOAD_COMMON_CHIP: initialize COMMON,
5037 * PORT-only and FUNCTION-only HW blocks.
5039 static inline int bnx2x_func_init_cmn(struct bnx2x *bp,
5040 const struct bnx2x_func_sp_drv_ops *drv)
5042 int rc = drv->init_hw_cmn(bp);
5046 return bnx2x_func_init_port(bp, drv);
5049 static int bnx2x_func_hw_init(struct bnx2x *bp,
5050 struct bnx2x_func_state_params *params)
5052 u32 load_code = params->params.hw_init.load_phase;
5053 struct bnx2x_func_sp_obj *o = params->f_obj;
5054 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5057 DP(BNX2X_MSG_SP, "function %d load_code %x\n",
5058 BP_ABS_FUNC(bp), load_code);
5060 /* Prepare buffers for unzipping the FW */
5061 rc = drv->gunzip_init(bp);
5066 rc = drv->init_fw(bp);
5068 BNX2X_ERR("Error loading firmware\n");
5072 /* Handle the beginning of COMMON_XXX pases separatelly... */
5073 switch (load_code) {
5074 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
5075 rc = bnx2x_func_init_cmn_chip(bp, drv);
5080 case FW_MSG_CODE_DRV_LOAD_COMMON:
5081 rc = bnx2x_func_init_cmn(bp, drv);
5086 case FW_MSG_CODE_DRV_LOAD_PORT:
5087 rc = bnx2x_func_init_port(bp, drv);
5092 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5093 rc = bnx2x_func_init_func(bp, drv);
5099 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5104 drv->release_fw(bp);
5107 drv->gunzip_end(bp);
5109 /* In case of success, complete the comand immediatelly: no ramrods
5113 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_INIT);
5119 * bnx2x_func_reset_func - reset HW at function stage
5121 * @bp: device handle
5124 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_FUNCTION stage: reset only
5125 * FUNCTION-only HW blocks.
5127 static inline void bnx2x_func_reset_func(struct bnx2x *bp,
5128 const struct bnx2x_func_sp_drv_ops *drv)
5130 drv->reset_hw_func(bp);
5134 * bnx2x_func_reset_port - reser HW at port stage
5136 * @bp: device handle
5139 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_PORT stage: reset
5140 * FUNCTION-only and PORT-only HW blocks.
5144 * It's important to call reset_port before reset_func() as the last thing
5145 * reset_func does is pf_disable() thus disabling PGLUE_B, which
5146 * makes impossible any DMAE transactions.
5148 static inline void bnx2x_func_reset_port(struct bnx2x *bp,
5149 const struct bnx2x_func_sp_drv_ops *drv)
5151 drv->reset_hw_port(bp);
5152 bnx2x_func_reset_func(bp, drv);
5156 * bnx2x_func_reset_cmn - reser HW at common stage
5158 * @bp: device handle
5161 * Reset HW at FW_MSG_CODE_DRV_UNLOAD_COMMON and
5162 * FW_MSG_CODE_DRV_UNLOAD_COMMON_CHIP stages: reset COMMON,
5163 * COMMON_CHIP, FUNCTION-only and PORT-only HW blocks.
5165 static inline void bnx2x_func_reset_cmn(struct bnx2x *bp,
5166 const struct bnx2x_func_sp_drv_ops *drv)
5168 bnx2x_func_reset_port(bp, drv);
5169 drv->reset_hw_cmn(bp);
5173 static inline int bnx2x_func_hw_reset(struct bnx2x *bp,
5174 struct bnx2x_func_state_params *params)
5176 u32 reset_phase = params->params.hw_reset.reset_phase;
5177 struct bnx2x_func_sp_obj *o = params->f_obj;
5178 const struct bnx2x_func_sp_drv_ops *drv = o->drv;
5180 DP(BNX2X_MSG_SP, "function %d reset_phase %x\n", BP_ABS_FUNC(bp),
5183 switch (reset_phase) {
5184 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5185 bnx2x_func_reset_cmn(bp, drv);
5187 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5188 bnx2x_func_reset_port(bp, drv);
5190 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5191 bnx2x_func_reset_func(bp, drv);
5194 BNX2X_ERR("Unknown reset_phase (0x%x) from MCP\n",
5199 /* Complete the comand immediatelly: no ramrods have been sent. */
5200 o->complete_cmd(bp, o, BNX2X_F_CMD_HW_RESET);
5205 static inline int bnx2x_func_send_start(struct bnx2x *bp,
5206 struct bnx2x_func_state_params *params)
5208 struct bnx2x_func_sp_obj *o = params->f_obj;
5209 struct function_start_data *rdata =
5210 (struct function_start_data *)o->rdata;
5211 dma_addr_t data_mapping = o->rdata_mapping;
5212 struct bnx2x_func_start_params *start_params = ¶ms->params.start;
5214 memset(rdata, 0, sizeof(*rdata));
5216 /* Fill the ramrod data with provided parameters */
5217 rdata->function_mode = cpu_to_le16(start_params->mf_mode);
5218 rdata->sd_vlan_tag = start_params->sd_vlan_tag;
5219 rdata->path_id = BP_PATH(bp);
5220 rdata->network_cos_mode = start_params->network_cos_mode;
5224 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0,
5225 U64_HI(data_mapping),
5226 U64_LO(data_mapping), NONE_CONNECTION_TYPE);
5229 static inline int bnx2x_func_send_stop(struct bnx2x *bp,
5230 struct bnx2x_func_state_params *params)
5232 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0,
5233 NONE_CONNECTION_TYPE);
5236 static int bnx2x_func_send_cmd(struct bnx2x *bp,
5237 struct bnx2x_func_state_params *params)
5239 switch (params->cmd) {
5240 case BNX2X_F_CMD_HW_INIT:
5241 return bnx2x_func_hw_init(bp, params);
5242 case BNX2X_F_CMD_START:
5243 return bnx2x_func_send_start(bp, params);
5244 case BNX2X_F_CMD_STOP:
5245 return bnx2x_func_send_stop(bp, params);
5246 case BNX2X_F_CMD_HW_RESET:
5247 return bnx2x_func_hw_reset(bp, params);
5249 BNX2X_ERR("Unknown command: %d\n", params->cmd);
5254 void bnx2x_init_func_obj(struct bnx2x *bp,
5255 struct bnx2x_func_sp_obj *obj,
5256 void *rdata, dma_addr_t rdata_mapping,
5257 struct bnx2x_func_sp_drv_ops *drv_iface)
5259 memset(obj, 0, sizeof(*obj));
5261 mutex_init(&obj->one_pending_mutex);
5264 obj->rdata_mapping = rdata_mapping;
5266 obj->send_cmd = bnx2x_func_send_cmd;
5267 obj->check_transition = bnx2x_func_chk_transition;
5268 obj->complete_cmd = bnx2x_func_comp_cmd;
5269 obj->wait_comp = bnx2x_func_wait_comp;
5271 obj->drv = drv_iface;
5275 * bnx2x_func_state_change - perform Function state change transition
5277 * @bp: device handle
5278 * @params: parameters to perform the transaction
5280 * returns 0 in case of successfully completed transition,
5281 * negative error code in case of failure, positive
5282 * (EBUSY) value if there is a completion to that is
5283 * still pending (possible only if RAMROD_COMP_WAIT is
5284 * not set in params->ramrod_flags for asynchronous
5287 int bnx2x_func_state_change(struct bnx2x *bp,
5288 struct bnx2x_func_state_params *params)
5290 struct bnx2x_func_sp_obj *o = params->f_obj;
5292 enum bnx2x_func_cmd cmd = params->cmd;
5293 unsigned long *pending = &o->pending;
5295 mutex_lock(&o->one_pending_mutex);
5297 /* Check that the requested transition is legal */
5298 if (o->check_transition(bp, o, params)) {
5299 mutex_unlock(&o->one_pending_mutex);
5303 /* Set "pending" bit */
5304 set_bit(cmd, pending);
5306 /* Don't send a command if only driver cleanup was requested */
5307 if (test_bit(RAMROD_DRV_CLR_ONLY, ¶ms->ramrod_flags)) {
5308 bnx2x_func_state_change_comp(bp, o, cmd);
5309 mutex_unlock(&o->one_pending_mutex);
5312 rc = o->send_cmd(bp, params);
5314 mutex_unlock(&o->one_pending_mutex);
5317 o->next_state = BNX2X_F_STATE_MAX;
5318 clear_bit(cmd, pending);
5319 smp_mb__after_clear_bit();
5323 if (test_bit(RAMROD_COMP_WAIT, ¶ms->ramrod_flags)) {
5324 rc = o->wait_comp(bp, o, cmd);
5332 return !!test_bit(cmd, pending);