1 /* bnx2x_cmn.h: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
20 #include <linux/types.h>
21 #include <linux/netdevice.h>
27 /*********************** Interfaces ****************************
28 * Functions that need to be implemented by each driver version
32 * Initialize link parameters structure variables.
39 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode);
42 * Configure hw according to link parameters structure.
46 void bnx2x_link_set(struct bnx2x *bp);
54 * @return 0 - link is UP
56 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes);
59 * Handles link status change
63 void bnx2x__link_status_update(struct bnx2x *bp);
66 * MSI-X slowpath interrupt handler
73 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance);
76 * non MSI-X interrupt handler
83 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance);
87 * Send command to cnic driver
92 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
95 * Provides cnic information for proper interrupt handling
99 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
103 * Enable HW interrupts.
107 void bnx2x_int_enable(struct bnx2x *bp);
110 * Disable interrupts. This function ensures that there are no
111 * ISRs or SP DPCs (sp_task) are running after it returns.
114 * @param disable_hw if true, disable HW interrupts.
116 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw);
119 * Loads device firmware
125 int bnx2x_init_firmware(struct bnx2x *bp);
128 * Init HW blocks according to current initialization stage:
129 * COMMON, PORT or FUNCTION.
132 * @param load_code: COMMON, PORT or FUNCTION
136 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code);
139 * Init driver internals:
145 * @param load_code COMMON, PORT or FUNCTION
147 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code);
150 * Allocate driver's memory.
156 int bnx2x_alloc_mem(struct bnx2x *bp);
159 * Release driver's memory.
163 void bnx2x_free_mem(struct bnx2x *bp);
166 * Bring up a leading (the first) eth Client.
172 int bnx2x_setup_leading(struct bnx2x *bp);
175 * Setup non-leading eth Client.
182 int bnx2x_setup_multi(struct bnx2x *bp, int index);
185 * Set number of quueus according to mode and number of available
191 void bnx2x_set_num_queues_msix(struct bnx2x *bp);
194 * Cleanup chip internals:
195 * - Cleanup MAC configuration.
202 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode);
208 * @param resource Resource bit which was locked
212 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource);
217 * @param bp driver handle
218 * @param resource Resource bit which was locked
222 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
225 * Configure eth MAC address in the HW according to the value in
226 * netdev->dev_addr for 57711
228 * @param bp driver handle
231 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
234 * Configure eth MAC address in the HW according to the value in
235 * netdev->dev_addr for 57710
237 * @param bp driver handle
240 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set);
244 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
245 * MAC(s). The function will wait until the ramrod completion
248 * @param bp driver handle
249 * @param set set or clear the CAM entry
251 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
253 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set);
257 * Initialize status block in FW and HW
259 * @param bp driver handle
260 * @param sb host_status_block
261 * @param dma_addr_t mapping
264 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
265 dma_addr_t mapping, int sb_id);
268 * Reconfigure FW/HW according to dev->flags rx mode
270 * @param dev net_device
273 void bnx2x_set_rx_mode(struct net_device *dev);
276 * Configure MAC filtering rules in a FW.
278 * @param bp driver handle
280 void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
282 /* Parity errors related */
283 void bnx2x_inc_load_cnt(struct bnx2x *bp);
284 u32 bnx2x_dec_load_cnt(struct bnx2x *bp);
285 bool bnx2x_chk_parity_attn(struct bnx2x *bp);
286 bool bnx2x_reset_is_done(struct bnx2x *bp);
287 void bnx2x_disable_close_the_gate(struct bnx2x *bp);
290 * Perform statistics handling according to event
292 * @param bp driver handle
293 * @param even tbnx2x_stats_event
295 void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
298 * Configures FW with client paramteres (like HW VLAN removal)
299 * for each active client.
303 void bnx2x_set_client_config(struct bnx2x *bp);
308 * @param fp fastpath handle for the event
309 * @param rr_cqe eth_rx_cqe
311 void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe);
314 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
316 struct host_status_block *fpsb = fp->status_blk;
318 barrier(); /* status block is written to by the chip */
319 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
320 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
323 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
324 struct bnx2x_fastpath *fp,
325 u16 bd_prod, u16 rx_comp_prod,
328 struct ustorm_eth_rx_producers rx_prods = {0};
331 /* Update producers */
332 rx_prods.bd_prod = bd_prod;
333 rx_prods.cqe_prod = rx_comp_prod;
334 rx_prods.sge_prod = rx_sge_prod;
337 * Make sure that the BD and SGE data is updated before updating the
338 * producers since FW might read the BD/SGE right after the producer
340 * This is only applicable for weak-ordered memory model archs such
341 * as IA-64. The following barrier is also mandatory since FW will
342 * assumes BDs must have buffers.
346 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
347 REG_WR(bp, BAR_USTRORM_INTMEM +
348 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
349 ((u32 *)&rx_prods)[i]);
351 mmiowb(); /* keep prod updates ordered */
353 DP(NETIF_MSG_RX_STATUS,
354 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
355 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
360 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
361 u8 storm, u16 index, u8 op, u8 update)
363 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
364 COMMAND_REG_INT_ACK);
365 struct igu_ack_register igu_ack;
367 igu_ack.status_block_index = index;
368 igu_ack.sb_id_and_flags =
369 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
370 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
371 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
372 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
374 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
375 (*(u32 *)&igu_ack), hc_addr);
376 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
378 /* Make sure that ACK is written */
382 static inline u16 bnx2x_ack_int(struct bnx2x *bp)
384 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
385 COMMAND_REG_SIMD_MASK);
386 u32 result = REG_RD(bp, hc_addr);
388 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
395 * fast path service functions
398 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
400 /* Tell compiler that consumer and producer can change */
402 return fp->tx_pkt_prod != fp->tx_pkt_cons;
405 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
411 prod = fp->tx_bd_prod;
412 cons = fp->tx_bd_cons;
414 /* NUM_TX_RINGS = number of "next-page" entries
415 It will be used as a threshold */
416 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
418 #ifdef BNX2X_STOP_ON_ERROR
420 WARN_ON(used > fp->bp->tx_ring_size);
421 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
424 return (s16)(fp->bp->tx_ring_size) - used;
427 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
431 /* Tell compiler that status block fields can change */
433 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
434 return hw_cons != fp->tx_pkt_cons;
437 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
438 struct bnx2x_fastpath *fp, u16 index)
440 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
441 struct page *page = sw_buf->page;
442 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
444 /* Skip "next page" elements */
448 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
449 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
450 __free_pages(page, PAGES_PER_SGE_SHIFT);
457 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
458 struct bnx2x_fastpath *fp, int last)
462 for (i = 0; i < last; i++)
463 bnx2x_free_rx_sge(bp, fp, i);
466 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
467 struct bnx2x_fastpath *fp, u16 index)
469 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
470 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
471 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
474 if (unlikely(page == NULL))
477 mapping = dma_map_page(&bp->pdev->dev, page, 0,
478 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
479 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
480 __free_pages(page, PAGES_PER_SGE_SHIFT);
485 dma_unmap_addr_set(sw_buf, mapping, mapping);
487 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
488 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
492 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
493 struct bnx2x_fastpath *fp, u16 index)
496 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
497 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
500 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
501 if (unlikely(skb == NULL))
504 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
506 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
512 dma_unmap_addr_set(rx_buf, mapping, mapping);
514 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
515 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
520 /* note that we are not allocating a new skb,
521 * we are just moving one from cons to prod
522 * we are not creating a new mapping,
523 * so there is no need to check for dma_mapping_error().
525 static inline void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
526 struct sk_buff *skb, u16 cons, u16 prod)
528 struct bnx2x *bp = fp->bp;
529 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
530 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
531 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
532 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
534 dma_sync_single_for_device(&bp->pdev->dev,
535 dma_unmap_addr(cons_rx_buf, mapping),
536 RX_COPY_THRESH, DMA_FROM_DEVICE);
538 prod_rx_buf->skb = cons_rx_buf->skb;
539 dma_unmap_addr_set(prod_rx_buf, mapping,
540 dma_unmap_addr(cons_rx_buf, mapping));
544 static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
548 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
549 int idx = RX_SGE_CNT * i - 1;
551 for (j = 0; j < 2; j++) {
552 SGE_MASK_CLEAR_BIT(fp, idx);
558 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
560 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
561 memset(fp->sge_mask, 0xff,
562 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
564 /* Clear the two last indices in the page to 1:
565 these are the indices that correspond to the "next" element,
566 hence will never be indicated and should be removed from
568 bnx2x_clear_sge_mask_next_elems(fp);
570 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
571 struct bnx2x_fastpath *fp, int last)
575 for (i = 0; i < last; i++) {
576 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
577 struct sk_buff *skb = rx_buf->skb;
580 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
584 if (fp->tpa_state[i] == BNX2X_TPA_START)
585 dma_unmap_single(&bp->pdev->dev,
586 dma_unmap_addr(rx_buf, mapping),
587 bp->rx_buf_size, DMA_FROM_DEVICE);
595 static inline void bnx2x_init_tx_ring(struct bnx2x *bp)
599 for_each_queue(bp, j) {
600 struct bnx2x_fastpath *fp = &bp->fp[j];
602 for (i = 1; i <= NUM_TX_RINGS; i++) {
603 struct eth_tx_next_bd *tx_next_bd =
604 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
606 tx_next_bd->addr_hi =
607 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
608 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
609 tx_next_bd->addr_lo =
610 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
611 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
614 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
615 fp->tx_db.data.zero_fill1 = 0;
616 fp->tx_db.data.prod = 0;
622 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
626 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
630 /* Tell compiler that status block fields can change */
632 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
633 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
635 return fp->rx_comp_cons != rx_cons_sb;
638 /* HW Lock for shared dual port PHYs */
639 void bnx2x_acquire_phy_lock(struct bnx2x *bp);
640 void bnx2x_release_phy_lock(struct bnx2x *bp);
642 void bnx2x_link_report(struct bnx2x *bp);
643 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget);
644 int bnx2x_tx_int(struct bnx2x_fastpath *fp);
645 void bnx2x_init_rx_rings(struct bnx2x *bp);
646 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
648 int bnx2x_change_mac_addr(struct net_device *dev, void *p);
649 void bnx2x_tx_timeout(struct net_device *dev);
650 void bnx2x_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp);
651 void bnx2x_netif_start(struct bnx2x *bp);
652 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw);
653 void bnx2x_free_irq(struct bnx2x *bp, bool disable_only);
654 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state);
655 int bnx2x_resume(struct pci_dev *pdev);
656 void bnx2x_free_skbs(struct bnx2x *bp);
657 int bnx2x_change_mtu(struct net_device *dev, int new_mtu);
658 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
659 int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
660 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
662 #endif /* BNX2X_CMN_H */