1 /******************************************************************************
2 * This software may be used and distributed according to the terms of
3 * the GNU General Public License (GPL), incorporated herein by reference.
4 * Drivers based on or derived from this code fall under the GPL and must
5 * retain the authorship, copyright and license notice. This file is not
6 * a complete program and may only be used when the entire operating
7 * system is licensed under the GPL.
8 * See the file COPYING in this distribution for more information.
10 * vxge-traffic.c: Driver for Neterion Inc's X3100 Series 10GbE PCIe I/O
11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2009 Neterion Inc.
13 ******************************************************************************/
14 #include <linux/etherdevice.h>
16 #include "vxge-traffic.h"
17 #include "vxge-config.h"
18 #include "vxge-main.h"
21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
22 * @vp: Virtual Path handle.
24 * Enable vpath interrupts. The function is to be executed the last in
25 * vpath initialization sequence.
27 * See also: vxge_hw_vpath_intr_disable()
29 enum vxge_hw_status vxge_hw_vpath_intr_enable(struct __vxge_hw_vpath_handle *vp)
33 struct __vxge_hw_virtualpath *vpath;
34 struct vxge_hw_vpath_reg __iomem *vp_reg;
35 enum vxge_hw_status status = VXGE_HW_OK;
37 status = VXGE_HW_ERR_INVALID_HANDLE;
43 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
44 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
48 vp_reg = vpath->vp_reg;
50 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_reg);
52 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
53 &vp_reg->general_errors_reg);
55 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
56 &vp_reg->pci_config_errors_reg);
58 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
59 &vp_reg->mrpcim_to_vpath_alarm_reg);
61 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
62 &vp_reg->srpcim_to_vpath_alarm_reg);
64 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
65 &vp_reg->vpath_ppif_int_status);
67 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
68 &vp_reg->srpcim_msg_to_vpath_reg);
70 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
71 &vp_reg->vpath_pcipif_int_status);
73 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
74 &vp_reg->prc_alarm_reg);
76 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
77 &vp_reg->wrdma_alarm_status);
79 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
80 &vp_reg->asic_ntwk_vp_err_reg);
82 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
83 &vp_reg->xgmac_vp_int_status);
85 val64 = readq(&vp_reg->vpath_general_int_status);
87 /* Mask unwanted interrupts */
89 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
90 &vp_reg->vpath_pcipif_int_mask);
92 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
93 &vp_reg->srpcim_msg_to_vpath_mask);
95 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
96 &vp_reg->srpcim_to_vpath_alarm_mask);
98 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
99 &vp_reg->mrpcim_to_vpath_alarm_mask);
101 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
102 &vp_reg->pci_config_errors_mask);
104 /* Unmask the individual interrupts */
106 writeq((u32)vxge_bVALn((VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO1_OVRFLOW|
107 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO2_OVRFLOW|
108 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ|
109 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR), 0, 32),
110 &vp_reg->general_errors_mask);
112 __vxge_hw_pio_mem_write32_upper(
113 (u32)vxge_bVALn((VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_OVRWR|
114 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_OVRWR|
115 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_POISON|
116 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_POISON|
117 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO1_DMA_ERR|
118 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO2_DMA_ERR), 0, 32),
119 &vp_reg->kdfcctl_errors_mask);
121 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->vpath_ppif_int_mask);
123 __vxge_hw_pio_mem_write32_upper(
124 (u32)vxge_bVALn(VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP, 0, 32),
125 &vp_reg->prc_alarm_mask);
127 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->wrdma_alarm_mask);
128 __vxge_hw_pio_mem_write32_upper(0, &vp_reg->xgmac_vp_int_mask);
130 if (vpath->hldev->first_vp_id != vpath->vp_id)
131 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
132 &vp_reg->asic_ntwk_vp_err_mask);
134 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn((
135 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_FAULT |
136 VXGE_HW_ASIC_NTWK_VP_ERR_REG_XMACJ_NTWK_REAFFIRMED_OK), 0, 32),
137 &vp_reg->asic_ntwk_vp_err_mask);
139 __vxge_hw_pio_mem_write32_upper(0,
140 &vp_reg->vpath_general_int_mask);
147 * vxge_hw_vpath_intr_disable - Disable vpath interrupts.
148 * @vp: Virtual Path handle.
150 * Disable vpath interrupts. The function is to be executed the last in
151 * vpath initialization sequence.
153 * See also: vxge_hw_vpath_intr_enable()
155 enum vxge_hw_status vxge_hw_vpath_intr_disable(
156 struct __vxge_hw_vpath_handle *vp)
160 struct __vxge_hw_virtualpath *vpath;
161 enum vxge_hw_status status = VXGE_HW_OK;
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
164 status = VXGE_HW_ERR_INVALID_HANDLE;
170 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
171 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
174 vp_reg = vpath->vp_reg;
176 __vxge_hw_pio_mem_write32_upper(
177 (u32)VXGE_HW_INTR_MASK_ALL,
178 &vp_reg->vpath_general_int_mask);
180 val64 = VXGE_HW_TIM_CLR_INT_EN_VP(1 << (16 - vpath->vp_id));
182 writeq(VXGE_HW_INTR_MASK_ALL, &vp_reg->kdfcctl_errors_mask);
184 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
185 &vp_reg->general_errors_mask);
187 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
188 &vp_reg->pci_config_errors_mask);
190 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
191 &vp_reg->mrpcim_to_vpath_alarm_mask);
193 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
194 &vp_reg->srpcim_to_vpath_alarm_mask);
196 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
197 &vp_reg->vpath_ppif_int_mask);
199 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
200 &vp_reg->srpcim_msg_to_vpath_mask);
202 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
203 &vp_reg->vpath_pcipif_int_mask);
205 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
206 &vp_reg->wrdma_alarm_mask);
208 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
209 &vp_reg->prc_alarm_mask);
211 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
212 &vp_reg->xgmac_vp_int_mask);
214 __vxge_hw_pio_mem_write32_upper((u32)VXGE_HW_INTR_MASK_ALL,
215 &vp_reg->asic_ntwk_vp_err_mask);
222 * vxge_hw_channel_msix_mask - Mask MSIX Vector.
223 * @channeh: Channel for rx or tx handle
226 * The function masks the msix interrupt for the given msix_id
230 void vxge_hw_channel_msix_mask(struct __vxge_hw_channel *channel, int msix_id)
233 __vxge_hw_pio_mem_write32_upper(
234 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
235 &channel->common_reg->set_msix_mask_vect[msix_id%4]);
241 * vxge_hw_channel_msix_unmask - Unmask the MSIX Vector.
242 * @channeh: Channel for rx or tx handle
245 * The function unmasks the msix interrupt for the given msix_id
250 vxge_hw_channel_msix_unmask(struct __vxge_hw_channel *channel, int msix_id)
253 __vxge_hw_pio_mem_write32_upper(
254 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
255 &channel->common_reg->clear_msix_mask_vect[msix_id%4]);
261 * vxge_hw_device_set_intr_type - Updates the configuration
262 * with new interrupt type.
263 * @hldev: HW device handle.
264 * @intr_mode: New interrupt type
266 u32 vxge_hw_device_set_intr_type(struct __vxge_hw_device *hldev, u32 intr_mode)
269 if ((intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
270 (intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
271 (intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
272 (intr_mode != VXGE_HW_INTR_MODE_DEF))
273 intr_mode = VXGE_HW_INTR_MODE_IRQLINE;
275 hldev->config.intr_mode = intr_mode;
280 * vxge_hw_device_intr_enable - Enable interrupts.
281 * @hldev: HW device handle.
282 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
283 * the type(s) of interrupts to enable.
285 * Enable Titan interrupts. The function is to be executed the last in
286 * Titan initialization sequence.
288 * See also: vxge_hw_device_intr_disable()
290 void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
296 vxge_hw_device_mask_all(hldev);
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
303 vxge_hw_vpath_intr_enable(
304 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
307 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE) {
308 val64 = hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
309 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX];
312 writeq(val64, &hldev->common_reg->tim_int_status0);
314 writeq(~val64, &hldev->common_reg->tim_int_mask0);
317 val32 = hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
318 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX];
321 __vxge_hw_pio_mem_write32_upper(val32,
322 &hldev->common_reg->tim_int_status1);
324 __vxge_hw_pio_mem_write32_upper(~val32,
325 &hldev->common_reg->tim_int_mask1);
329 val64 = readq(&hldev->common_reg->titan_general_int_status);
331 vxge_hw_device_unmask_all(hldev);
337 * vxge_hw_device_intr_disable - Disable Titan interrupts.
338 * @hldev: HW device handle.
339 * @op: One of the enum vxge_hw_device_intr enumerated values specifying
340 * the type(s) of interrupts to disable.
342 * Disable Titan interrupts.
344 * See also: vxge_hw_device_intr_enable()
346 void vxge_hw_device_intr_disable(struct __vxge_hw_device *hldev)
350 vxge_hw_device_mask_all(hldev);
352 /* mask all the tim interrupts */
353 writeq(VXGE_HW_INTR_MASK_ALL, &hldev->common_reg->tim_int_mask0);
354 __vxge_hw_pio_mem_write32_upper(VXGE_HW_DEFAULT_32,
355 &hldev->common_reg->tim_int_mask1);
357 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
359 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
362 vxge_hw_vpath_intr_disable(
363 VXGE_HW_VIRTUAL_PATH_HANDLE(&hldev->virtual_paths[i]));
370 * vxge_hw_device_mask_all - Mask all device interrupts.
371 * @hldev: HW device handle.
373 * Mask all device interrupts.
375 * See also: vxge_hw_device_unmask_all()
377 void vxge_hw_device_mask_all(struct __vxge_hw_device *hldev)
381 val64 = VXGE_HW_TITAN_MASK_ALL_INT_ALARM |
382 VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
384 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
385 &hldev->common_reg->titan_mask_all_int);
391 * vxge_hw_device_unmask_all - Unmask all device interrupts.
392 * @hldev: HW device handle.
394 * Unmask all device interrupts.
396 * See also: vxge_hw_device_mask_all()
398 void vxge_hw_device_unmask_all(struct __vxge_hw_device *hldev)
402 if (hldev->config.intr_mode == VXGE_HW_INTR_MODE_IRQLINE)
403 val64 = VXGE_HW_TITAN_MASK_ALL_INT_TRAFFIC;
405 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
406 &hldev->common_reg->titan_mask_all_int);
412 * vxge_hw_device_flush_io - Flush io writes.
413 * @hldev: HW device handle.
415 * The function performs a read operation to flush io writes.
419 void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
423 val32 = readl(&hldev->common_reg->titan_general_int_status);
427 * vxge_hw_device_begin_irq - Begin IRQ processing.
428 * @hldev: HW device handle.
429 * @skip_alarms: Do not clear the alarms
430 * @reason: "Reason" for the interrupt, the value of Titan's
431 * general_int_status register.
433 * The function performs two actions, It first checks whether (shared IRQ) the
434 * interrupt was raised by the device. Next, it masks the device interrupts.
437 * vxge_hw_device_begin_irq() does not flush MMIO writes through the
438 * bridge. Therefore, two back-to-back interrupts are potentially possible.
440 * Returns: 0, if the interrupt is not "ours" (note that in this case the
441 * device remain enabled).
442 * Otherwise, vxge_hw_device_begin_irq() returns 64bit general adapter
445 enum vxge_hw_status vxge_hw_device_begin_irq(struct __vxge_hw_device *hldev,
446 u32 skip_alarms, u64 *reason)
452 enum vxge_hw_status ret = VXGE_HW_OK;
454 val64 = readq(&hldev->common_reg->titan_general_int_status);
456 if (unlikely(!val64)) {
457 /* not Titan interrupt */
459 ret = VXGE_HW_ERR_WRONG_IRQ;
463 if (unlikely(val64 == VXGE_HW_ALL_FOXES)) {
465 adapter_status = readq(&hldev->common_reg->adapter_status);
467 if (adapter_status == VXGE_HW_ALL_FOXES) {
469 __vxge_hw_device_handle_error(hldev,
470 NULL_VPID, VXGE_HW_EVENT_SLOT_FREEZE);
472 ret = VXGE_HW_ERR_SLOT_FREEZE;
477 hldev->stats.sw_dev_info_stats.total_intr_cnt++;
481 vpath_mask = hldev->vpaths_deployed >>
482 (64 - VXGE_HW_MAX_VIRTUAL_PATHS);
485 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_TRAFFIC_INT(vpath_mask)) {
486 hldev->stats.sw_dev_info_stats.traffic_intr_cnt++;
491 hldev->stats.sw_dev_info_stats.not_traffic_intr_cnt++;
494 VXGE_HW_TITAN_GENERAL_INT_STATUS_VPATH_ALARM_INT)) {
496 enum vxge_hw_status error_level = VXGE_HW_OK;
498 hldev->stats.sw_dev_err_stats.vpath_alarms++;
500 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
502 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
505 ret = __vxge_hw_vpath_alarm_process(
506 &hldev->virtual_paths[i], skip_alarms);
508 error_level = VXGE_HW_SET_LEVEL(ret, error_level);
510 if (unlikely((ret == VXGE_HW_ERR_CRITICAL) ||
511 (ret == VXGE_HW_ERR_SLOT_FREEZE)))
522 * __vxge_hw_device_handle_link_up_ind
523 * @hldev: HW device handle.
525 * Link up indication handler. The function is invoked by HW when
526 * Titan indicates that the link is up for programmable amount of time.
529 __vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
532 * If the previous link state is not down, return.
534 if (hldev->link_state == VXGE_HW_LINK_UP)
537 hldev->link_state = VXGE_HW_LINK_UP;
540 if (hldev->uld_callbacks.link_up)
541 hldev->uld_callbacks.link_up(hldev);
547 * __vxge_hw_device_handle_link_down_ind
548 * @hldev: HW device handle.
550 * Link down indication handler. The function is invoked by HW when
551 * Titan indicates that the link is down.
554 __vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
557 * If the previous link state is not down, return.
559 if (hldev->link_state == VXGE_HW_LINK_DOWN)
562 hldev->link_state = VXGE_HW_LINK_DOWN;
565 if (hldev->uld_callbacks.link_down)
566 hldev->uld_callbacks.link_down(hldev);
572 * __vxge_hw_device_handle_error - Handle error
575 * @type: Error type. Please see enum vxge_hw_event{}
580 __vxge_hw_device_handle_error(
581 struct __vxge_hw_device *hldev,
583 enum vxge_hw_event type)
586 case VXGE_HW_EVENT_UNKNOWN:
588 case VXGE_HW_EVENT_RESET_START:
589 case VXGE_HW_EVENT_RESET_COMPLETE:
590 case VXGE_HW_EVENT_LINK_DOWN:
591 case VXGE_HW_EVENT_LINK_UP:
593 case VXGE_HW_EVENT_ALARM_CLEARED:
595 case VXGE_HW_EVENT_ECCERR:
596 case VXGE_HW_EVENT_MRPCIM_ECCERR:
598 case VXGE_HW_EVENT_FIFO_ERR:
599 case VXGE_HW_EVENT_VPATH_ERR:
600 case VXGE_HW_EVENT_CRITICAL_ERR:
601 case VXGE_HW_EVENT_SERR:
603 case VXGE_HW_EVENT_SRPCIM_SERR:
604 case VXGE_HW_EVENT_MRPCIM_SERR:
606 case VXGE_HW_EVENT_SLOT_FREEZE:
614 if (hldev->uld_callbacks.crit_err)
615 hldev->uld_callbacks.crit_err(
616 (struct __vxge_hw_device *)hldev,
624 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
625 * condition that has caused the Tx and RX interrupt.
628 * Acknowledge (that is, clear) the condition that has caused
629 * the Tx and Rx interrupt.
630 * See also: vxge_hw_device_begin_irq(),
631 * vxge_hw_device_mask_tx_rx(), vxge_hw_device_unmask_tx_rx().
633 void vxge_hw_device_clear_tx_rx(struct __vxge_hw_device *hldev)
636 if ((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
637 (hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
638 writeq((hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
639 hldev->tim_int_mask0[VXGE_HW_VPATH_INTR_RX]),
640 &hldev->common_reg->tim_int_status0);
643 if ((hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
644 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
645 __vxge_hw_pio_mem_write32_upper(
646 (hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
647 hldev->tim_int_mask1[VXGE_HW_VPATH_INTR_RX]),
648 &hldev->common_reg->tim_int_status1);
655 * vxge_hw_channel_dtr_alloc - Allocate a dtr from the channel
657 * @dtrh: Buffer to return the DTR pointer
659 * Allocates a dtr from the reserve array. If the reserve array is empty,
660 * it swaps the reserve and free arrays.
664 vxge_hw_channel_dtr_alloc(struct __vxge_hw_channel *channel, void **dtrh)
668 if (channel->reserve_ptr - channel->reserve_top > 0) {
670 *dtrh = channel->reserve_arr[--channel->reserve_ptr];
675 /* switch between empty and full arrays */
677 /* the idea behind such a design is that by having free and reserved
678 * arrays separated we basically separated irq and non-irq parts.
679 * i.e. no additional lock need to be done when we free a resource */
681 if (channel->length - channel->free_ptr > 0) {
683 tmp_arr = channel->reserve_arr;
684 channel->reserve_arr = channel->free_arr;
685 channel->free_arr = tmp_arr;
686 channel->reserve_ptr = channel->length;
687 channel->reserve_top = channel->free_ptr;
688 channel->free_ptr = channel->length;
690 channel->stats->reserve_free_swaps_cnt++;
692 goto _alloc_after_swap;
695 channel->stats->full_cnt++;
698 return VXGE_HW_INF_OUT_OF_DESCRIPTORS;
702 * vxge_hw_channel_dtr_post - Post a dtr to the channel
706 * Posts a dtr to work array.
709 void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
711 vxge_assert(channel->work_arr[channel->post_index] == NULL);
713 channel->work_arr[channel->post_index++] = dtrh;
716 if (channel->post_index == channel->length)
717 channel->post_index = 0;
721 * vxge_hw_channel_dtr_try_complete - Returns next completed dtr
723 * @dtr: Buffer to return the next completed DTR pointer
725 * Returns the next completed dtr with out removing it from work array
729 vxge_hw_channel_dtr_try_complete(struct __vxge_hw_channel *channel, void **dtrh)
731 vxge_assert(channel->compl_index < channel->length);
733 *dtrh = channel->work_arr[channel->compl_index];
738 * vxge_hw_channel_dtr_complete - Removes next completed dtr from the work array
739 * @channel: Channel handle
741 * Removes the next completed dtr from work array
744 void vxge_hw_channel_dtr_complete(struct __vxge_hw_channel *channel)
746 channel->work_arr[channel->compl_index] = NULL;
749 if (++channel->compl_index == channel->length)
750 channel->compl_index = 0;
752 channel->stats->total_compl_cnt++;
756 * vxge_hw_channel_dtr_free - Frees a dtr
757 * @channel: Channel handle
760 * Returns the dtr to free array
763 void vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh)
765 channel->free_arr[--channel->free_ptr] = dtrh;
769 * vxge_hw_channel_dtr_count
770 * @channel: Channel handle. Obtained via vxge_hw_channel_open().
772 * Retreive number of DTRs available. This function can not be called
773 * from data path. ring_initial_replenishi() is the only user.
775 int vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel)
777 return (channel->reserve_ptr - channel->reserve_top) +
778 (channel->length - channel->free_ptr);
782 * vxge_hw_ring_rxd_reserve - Reserve ring descriptor.
783 * @ring: Handle to the ring object used for receive
784 * @rxdh: Reserved descriptor. On success HW fills this "out" parameter
785 * with a valid handle.
787 * Reserve Rx descriptor for the subsequent filling-in driver
788 * and posting on the corresponding channel (@channelh)
789 * via vxge_hw_ring_rxd_post().
791 * Returns: VXGE_HW_OK - success.
792 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available.
795 enum vxge_hw_status vxge_hw_ring_rxd_reserve(struct __vxge_hw_ring *ring,
798 enum vxge_hw_status status;
799 struct __vxge_hw_channel *channel;
801 channel = &ring->channel;
803 status = vxge_hw_channel_dtr_alloc(channel, rxdh);
805 if (status == VXGE_HW_OK) {
806 struct vxge_hw_ring_rxd_1 *rxdp =
807 (struct vxge_hw_ring_rxd_1 *)*rxdh;
809 rxdp->control_0 = rxdp->control_1 = 0;
816 * vxge_hw_ring_rxd_free - Free descriptor.
817 * @ring: Handle to the ring object used for receive
818 * @rxdh: Descriptor handle.
820 * Free the reserved descriptor. This operation is "symmetrical" to
821 * vxge_hw_ring_rxd_reserve. The "free-ing" completes the descriptor's
824 * After free-ing (see vxge_hw_ring_rxd_free()) the descriptor again can
827 * - reserved (vxge_hw_ring_rxd_reserve);
829 * - posted (vxge_hw_ring_rxd_post);
831 * - completed (vxge_hw_ring_rxd_next_completed);
833 * - and recycled again (vxge_hw_ring_rxd_free).
835 * For alternative state transitions and more details please refer to
839 void vxge_hw_ring_rxd_free(struct __vxge_hw_ring *ring, void *rxdh)
841 struct __vxge_hw_channel *channel;
843 channel = &ring->channel;
845 vxge_hw_channel_dtr_free(channel, rxdh);
850 * vxge_hw_ring_rxd_pre_post - Prepare rxd and post
851 * @ring: Handle to the ring object used for receive
852 * @rxdh: Descriptor handle.
854 * This routine prepares a rxd and posts
856 void vxge_hw_ring_rxd_pre_post(struct __vxge_hw_ring *ring, void *rxdh)
858 struct __vxge_hw_channel *channel;
860 channel = &ring->channel;
862 vxge_hw_channel_dtr_post(channel, rxdh);
866 * vxge_hw_ring_rxd_post_post - Process rxd after post.
867 * @ring: Handle to the ring object used for receive
868 * @rxdh: Descriptor handle.
870 * Processes rxd after post
872 void vxge_hw_ring_rxd_post_post(struct __vxge_hw_ring *ring, void *rxdh)
874 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
875 struct __vxge_hw_channel *channel;
877 channel = &ring->channel;
879 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
881 if (ring->stats->common_stats.usage_cnt > 0)
882 ring->stats->common_stats.usage_cnt--;
886 * vxge_hw_ring_rxd_post - Post descriptor on the ring.
887 * @ring: Handle to the ring object used for receive
888 * @rxdh: Descriptor obtained via vxge_hw_ring_rxd_reserve().
890 * Post descriptor on the ring.
891 * Prior to posting the descriptor should be filled in accordance with
892 * Host/Titan interface specification for a given service (LL, etc.).
895 void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
897 struct vxge_hw_ring_rxd_1 *rxdp = (struct vxge_hw_ring_rxd_1 *)rxdh;
898 struct __vxge_hw_channel *channel;
900 channel = &ring->channel;
903 rxdp->control_0 = VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
905 vxge_hw_channel_dtr_post(channel, rxdh);
907 if (ring->stats->common_stats.usage_cnt > 0)
908 ring->stats->common_stats.usage_cnt--;
912 * vxge_hw_ring_rxd_post_post_wmb - Process rxd after post with memory barrier.
913 * @ring: Handle to the ring object used for receive
914 * @rxdh: Descriptor handle.
916 * Processes rxd after post with memory barrier.
918 void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
920 struct __vxge_hw_channel *channel;
922 channel = &ring->channel;
925 vxge_hw_ring_rxd_post_post(ring, rxdh);
929 * vxge_hw_ring_rxd_next_completed - Get the _next_ completed descriptor.
930 * @ring: Handle to the ring object used for receive
931 * @rxdh: Descriptor handle. Returned by HW.
932 * @t_code: Transfer code, as per Titan User Guide,
933 * Receive Descriptor Format. Returned by HW.
935 * Retrieve the _next_ completed descriptor.
936 * HW uses ring callback (*vxge_hw_ring_callback_f) to notifiy
937 * driver of new completed descriptors. After that
938 * the driver can use vxge_hw_ring_rxd_next_completed to retrieve the rest
939 * completions (the very first completion is passed by HW via
940 * vxge_hw_ring_callback_f).
942 * Implementation-wise, the driver is free to call
943 * vxge_hw_ring_rxd_next_completed either immediately from inside the
944 * ring callback, or in a deferred fashion and separate (from HW)
947 * Non-zero @t_code means failure to fill-in receive buffer(s)
949 * For instance, parity error detected during the data transfer.
950 * In this case Titan will complete the descriptor and indicate
951 * for the host that the received data is not to be used.
952 * For details please refer to Titan User Guide.
954 * Returns: VXGE_HW_OK - success.
955 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
956 * are currently available for processing.
958 * See also: vxge_hw_ring_callback_f{},
959 * vxge_hw_fifo_rxd_next_completed(), enum vxge_hw_status{}.
961 enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
962 struct __vxge_hw_ring *ring, void **rxdh, u8 *t_code)
964 struct __vxge_hw_channel *channel;
965 struct vxge_hw_ring_rxd_1 *rxdp;
966 enum vxge_hw_status status = VXGE_HW_OK;
969 channel = &ring->channel;
971 vxge_hw_channel_dtr_try_complete(channel, rxdh);
973 rxdp = (struct vxge_hw_ring_rxd_1 *)*rxdh;
975 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
979 control_0 = rxdp->control_0;
980 own = control_0 & VXGE_HW_RING_RXD_LIST_OWN_ADAPTER;
981 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
983 /* check whether it is not the end */
984 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) {
986 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
990 vxge_hw_channel_dtr_complete(channel);
992 vxge_assert(*t_code != VXGE_HW_RING_RXD_T_CODE_UNUSED);
994 ring->stats->common_stats.usage_cnt++;
995 if (ring->stats->common_stats.usage_max <
996 ring->stats->common_stats.usage_cnt)
997 ring->stats->common_stats.usage_max =
998 ring->stats->common_stats.usage_cnt;
1000 status = VXGE_HW_OK;
1004 /* reset it. since we don't want to return
1005 * garbage to the driver */
1007 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1013 * vxge_hw_ring_handle_tcode - Handle transfer code.
1014 * @ring: Handle to the ring object used for receive
1015 * @rxdh: Descriptor handle.
1016 * @t_code: One of the enumerated (and documented in the Titan user guide)
1019 * Handle descriptor's transfer code. The latter comes with each completed
1022 * Returns: one of the enum vxge_hw_status{} enumerated types.
1023 * VXGE_HW_OK - for success.
1024 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1026 enum vxge_hw_status vxge_hw_ring_handle_tcode(
1027 struct __vxge_hw_ring *ring, void *rxdh, u8 t_code)
1029 struct __vxge_hw_channel *channel;
1030 enum vxge_hw_status status = VXGE_HW_OK;
1032 channel = &ring->channel;
1034 /* If the t_code is not supported and if the
1035 * t_code is other than 0x5 (unparseable packet
1036 * such as unknown UPV6 header), Drop it !!!
1039 if (t_code == VXGE_HW_RING_T_CODE_OK ||
1040 t_code == VXGE_HW_RING_T_CODE_L3_PKT_ERR) {
1041 status = VXGE_HW_OK;
1045 if (t_code > VXGE_HW_RING_T_CODE_MULTI_ERR) {
1046 status = VXGE_HW_ERR_INVALID_TCODE;
1050 ring->stats->rxd_t_code_err_cnt[t_code]++;
1056 * __vxge_hw_non_offload_db_post - Post non offload doorbell
1059 * @txdl_ptr: The starting location of the TxDL in host memory
1060 * @num_txds: The highest TxD in this TxDL (0 to 255 means 1 to 256)
1061 * @no_snoop: No snoop flags
1063 * This function posts a non-offload doorbell to doorbell FIFO
1066 static void __vxge_hw_non_offload_db_post(struct __vxge_hw_fifo *fifo,
1067 u64 txdl_ptr, u32 num_txds, u32 no_snoop)
1069 struct __vxge_hw_channel *channel;
1071 channel = &fifo->channel;
1073 writeq(VXGE_HW_NODBW_TYPE(VXGE_HW_NODBW_TYPE_NODBW) |
1074 VXGE_HW_NODBW_LAST_TXD_NUMBER(num_txds) |
1075 VXGE_HW_NODBW_GET_NO_SNOOP(no_snoop),
1076 &fifo->nofl_db->control_0);
1080 writeq(txdl_ptr, &fifo->nofl_db->txdl_ptr);
1086 * vxge_hw_fifo_free_txdl_count_get - returns the number of txdls available in
1088 * @fifoh: Handle to the fifo object used for non offload send
1090 u32 vxge_hw_fifo_free_txdl_count_get(struct __vxge_hw_fifo *fifoh)
1092 return vxge_hw_channel_dtr_count(&fifoh->channel);
1096 * vxge_hw_fifo_txdl_reserve - Reserve fifo descriptor.
1097 * @fifoh: Handle to the fifo object used for non offload send
1098 * @txdlh: Reserved descriptor. On success HW fills this "out" parameter
1099 * with a valid handle.
1100 * @txdl_priv: Buffer to return the pointer to per txdl space
1102 * Reserve a single TxDL (that is, fifo descriptor)
1103 * for the subsequent filling-in by driver)
1104 * and posting on the corresponding channel (@channelh)
1105 * via vxge_hw_fifo_txdl_post().
1107 * Note: it is the responsibility of driver to reserve multiple descriptors
1108 * for lengthy (e.g., LSO) transmit operation. A single fifo descriptor
1109 * carries up to configured number (fifo.max_frags) of contiguous buffers.
1111 * Returns: VXGE_HW_OK - success;
1112 * VXGE_HW_INF_OUT_OF_DESCRIPTORS - Currently no descriptors available
1115 enum vxge_hw_status vxge_hw_fifo_txdl_reserve(
1116 struct __vxge_hw_fifo *fifo,
1117 void **txdlh, void **txdl_priv)
1119 struct __vxge_hw_channel *channel;
1120 enum vxge_hw_status status;
1123 channel = &fifo->channel;
1125 status = vxge_hw_channel_dtr_alloc(channel, txdlh);
1127 if (status == VXGE_HW_OK) {
1128 struct vxge_hw_fifo_txd *txdp =
1129 (struct vxge_hw_fifo_txd *)*txdlh;
1130 struct __vxge_hw_fifo_txdl_priv *priv;
1132 priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
1134 /* reset the TxDL's private */
1135 priv->align_dma_offset = 0;
1136 priv->align_vaddr_start = priv->align_vaddr;
1137 priv->align_used_frags = 0;
1139 priv->alloc_frags = fifo->config->max_frags;
1140 priv->next_txdl_priv = NULL;
1142 *txdl_priv = (void *)(size_t)txdp->host_control;
1144 for (i = 0; i < fifo->config->max_frags; i++) {
1145 txdp = ((struct vxge_hw_fifo_txd *)*txdlh) + i;
1146 txdp->control_0 = txdp->control_1 = 0;
1154 * vxge_hw_fifo_txdl_buffer_set - Set transmit buffer pointer in the
1156 * @fifo: Handle to the fifo object used for non offload send
1157 * @txdlh: Descriptor handle.
1158 * @frag_idx: Index of the data buffer in the caller's scatter-gather list
1160 * @dma_pointer: DMA address of the data buffer referenced by @frag_idx.
1161 * @size: Size of the data buffer (in bytes).
1163 * This API is part of the preparation of the transmit descriptor for posting
1164 * (via vxge_hw_fifo_txdl_post()). The related "preparation" APIs include
1165 * vxge_hw_fifo_txdl_mss_set() and vxge_hw_fifo_txdl_cksum_set_bits().
1166 * All three APIs fill in the fields of the fifo descriptor,
1167 * in accordance with the Titan specification.
1170 void vxge_hw_fifo_txdl_buffer_set(struct __vxge_hw_fifo *fifo,
1171 void *txdlh, u32 frag_idx,
1172 dma_addr_t dma_pointer, u32 size)
1174 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1175 struct vxge_hw_fifo_txd *txdp, *txdp_last;
1176 struct __vxge_hw_channel *channel;
1178 channel = &fifo->channel;
1180 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1181 txdp = (struct vxge_hw_fifo_txd *)txdlh + txdl_priv->frags;
1184 txdp->control_0 = txdp->control_1 = 0;
1186 txdp->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1187 VXGE_HW_FIFO_TXD_GATHER_CODE_FIRST);
1188 txdp->control_1 |= fifo->interrupt_type;
1189 txdp->control_1 |= VXGE_HW_FIFO_TXD_INT_NUMBER(
1191 if (txdl_priv->frags) {
1192 txdp_last = (struct vxge_hw_fifo_txd *)txdlh +
1193 (txdl_priv->frags - 1);
1194 txdp_last->control_0 |= VXGE_HW_FIFO_TXD_GATHER_CODE(
1195 VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1199 vxge_assert(frag_idx < txdl_priv->alloc_frags);
1201 txdp->buffer_pointer = (u64)dma_pointer;
1202 txdp->control_0 |= VXGE_HW_FIFO_TXD_BUFFER_SIZE(size);
1203 fifo->stats->total_buffers++;
1208 * vxge_hw_fifo_txdl_post - Post descriptor on the fifo channel.
1209 * @fifo: Handle to the fifo object used for non offload send
1210 * @txdlh: Descriptor obtained via vxge_hw_fifo_txdl_reserve()
1211 * @frags: Number of contiguous buffers that are part of a single
1212 * transmit operation.
1214 * Post descriptor on the 'fifo' type channel for transmission.
1215 * Prior to posting the descriptor should be filled in accordance with
1216 * Host/Titan interface specification for a given service (LL, etc.).
1219 void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1221 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1222 struct vxge_hw_fifo_txd *txdp_last;
1223 struct vxge_hw_fifo_txd *txdp_first;
1224 struct __vxge_hw_channel *channel;
1226 channel = &fifo->channel;
1228 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdlh);
1229 txdp_first = (struct vxge_hw_fifo_txd *)txdlh;
1231 txdp_last = (struct vxge_hw_fifo_txd *)txdlh + (txdl_priv->frags - 1);
1232 txdp_last->control_0 |=
1233 VXGE_HW_FIFO_TXD_GATHER_CODE(VXGE_HW_FIFO_TXD_GATHER_CODE_LAST);
1234 txdp_first->control_0 |= VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER;
1236 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1238 __vxge_hw_non_offload_db_post(fifo,
1239 (u64)txdl_priv->dma_addr,
1240 txdl_priv->frags - 1,
1241 fifo->no_snoop_bits);
1243 fifo->stats->total_posts++;
1244 fifo->stats->common_stats.usage_cnt++;
1245 if (fifo->stats->common_stats.usage_max <
1246 fifo->stats->common_stats.usage_cnt)
1247 fifo->stats->common_stats.usage_max =
1248 fifo->stats->common_stats.usage_cnt;
1252 * vxge_hw_fifo_txdl_next_completed - Retrieve next completed descriptor.
1253 * @fifo: Handle to the fifo object used for non offload send
1254 * @txdlh: Descriptor handle. Returned by HW.
1255 * @t_code: Transfer code, as per Titan User Guide,
1256 * Transmit Descriptor Format.
1259 * Retrieve the _next_ completed descriptor.
1260 * HW uses channel callback (*vxge_hw_channel_callback_f) to notifiy
1261 * driver of new completed descriptors. After that
1262 * the driver can use vxge_hw_fifo_txdl_next_completed to retrieve the rest
1263 * completions (the very first completion is passed by HW via
1264 * vxge_hw_channel_callback_f).
1266 * Implementation-wise, the driver is free to call
1267 * vxge_hw_fifo_txdl_next_completed either immediately from inside the
1268 * channel callback, or in a deferred fashion and separate (from HW)
1271 * Non-zero @t_code means failure to process the descriptor.
1272 * The failure could happen, for instance, when the link is
1273 * down, in which case Titan completes the descriptor because it
1274 * is not able to send the data out.
1276 * For details please refer to Titan User Guide.
1278 * Returns: VXGE_HW_OK - success.
1279 * VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS - No completed descriptors
1280 * are currently available for processing.
1283 enum vxge_hw_status vxge_hw_fifo_txdl_next_completed(
1284 struct __vxge_hw_fifo *fifo, void **txdlh,
1285 enum vxge_hw_fifo_tcode *t_code)
1287 struct __vxge_hw_channel *channel;
1288 struct vxge_hw_fifo_txd *txdp;
1289 enum vxge_hw_status status = VXGE_HW_OK;
1291 channel = &fifo->channel;
1293 vxge_hw_channel_dtr_try_complete(channel, txdlh);
1295 txdp = (struct vxge_hw_fifo_txd *)*txdlh;
1297 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1301 /* check whether host owns it */
1302 if (!(txdp->control_0 & VXGE_HW_FIFO_TXD_LIST_OWN_ADAPTER)) {
1304 vxge_assert(txdp->host_control != 0);
1306 vxge_hw_channel_dtr_complete(channel);
1308 *t_code = (u8)VXGE_HW_FIFO_TXD_T_CODE_GET(txdp->control_0);
1310 if (fifo->stats->common_stats.usage_cnt > 0)
1311 fifo->stats->common_stats.usage_cnt--;
1313 status = VXGE_HW_OK;
1317 /* no more completions */
1319 status = VXGE_HW_INF_NO_MORE_COMPLETED_DESCRIPTORS;
1325 * vxge_hw_fifo_handle_tcode - Handle transfer code.
1326 * @fifo: Handle to the fifo object used for non offload send
1327 * @txdlh: Descriptor handle.
1328 * @t_code: One of the enumerated (and documented in the Titan user guide)
1331 * Handle descriptor's transfer code. The latter comes with each completed
1334 * Returns: one of the enum vxge_hw_status{} enumerated types.
1335 * VXGE_HW_OK - for success.
1336 * VXGE_HW_ERR_CRITICAL - when encounters critical error.
1338 enum vxge_hw_status vxge_hw_fifo_handle_tcode(struct __vxge_hw_fifo *fifo,
1340 enum vxge_hw_fifo_tcode t_code)
1342 struct __vxge_hw_channel *channel;
1344 enum vxge_hw_status status = VXGE_HW_OK;
1345 channel = &fifo->channel;
1347 if (((t_code & 0x7) < 0) || ((t_code & 0x7) > 0x4)) {
1348 status = VXGE_HW_ERR_INVALID_TCODE;
1352 fifo->stats->txd_t_code_err_cnt[t_code]++;
1358 * vxge_hw_fifo_txdl_free - Free descriptor.
1359 * @fifo: Handle to the fifo object used for non offload send
1360 * @txdlh: Descriptor handle.
1362 * Free the reserved descriptor. This operation is "symmetrical" to
1363 * vxge_hw_fifo_txdl_reserve. The "free-ing" completes the descriptor's
1366 * After free-ing (see vxge_hw_fifo_txdl_free()) the descriptor again can
1369 * - reserved (vxge_hw_fifo_txdl_reserve);
1371 * - posted (vxge_hw_fifo_txdl_post);
1373 * - completed (vxge_hw_fifo_txdl_next_completed);
1375 * - and recycled again (vxge_hw_fifo_txdl_free).
1377 * For alternative state transitions and more details please refer to
1381 void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh)
1383 struct __vxge_hw_fifo_txdl_priv *txdl_priv;
1385 struct __vxge_hw_channel *channel;
1387 channel = &fifo->channel;
1389 txdl_priv = __vxge_hw_fifo_txdl_priv(fifo,
1390 (struct vxge_hw_fifo_txd *)txdlh);
1392 max_frags = fifo->config->max_frags;
1394 vxge_hw_channel_dtr_free(channel, txdlh);
1398 * vxge_hw_vpath_mac_addr_add - Add the mac address entry for this vpath
1399 * to MAC address table.
1400 * @vp: Vpath handle.
1401 * @macaddr: MAC address to be added for this vpath into the list
1402 * @macaddr_mask: MAC address mask for macaddr
1403 * @duplicate_mode: Duplicate MAC address add mode. Please see
1404 * enum vxge_hw_vpath_mac_addr_add_mode{}
1406 * Adds the given mac address and mac address mask into the list for this
1408 * see also: vxge_hw_vpath_mac_addr_delete, vxge_hw_vpath_mac_addr_get and
1409 * vxge_hw_vpath_mac_addr_get_next
1413 vxge_hw_vpath_mac_addr_add(
1414 struct __vxge_hw_vpath_handle *vp,
1415 u8 (macaddr)[ETH_ALEN],
1416 u8 (macaddr_mask)[ETH_ALEN],
1417 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode)
1422 enum vxge_hw_status status = VXGE_HW_OK;
1425 status = VXGE_HW_ERR_INVALID_HANDLE;
1429 for (i = 0; i < ETH_ALEN; i++) {
1431 data1 |= (u8)macaddr[i];
1434 data2 |= (u8)macaddr_mask[i];
1437 switch (duplicate_mode) {
1438 case VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE:
1441 case VXGE_HW_VPATH_MAC_ADDR_DISCARD_DUPLICATE:
1444 case VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE:
1452 status = __vxge_hw_vpath_rts_table_set(vp,
1453 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1454 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1456 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1457 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2)|
1458 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MODE(i));
1464 * vxge_hw_vpath_mac_addr_get - Get the first mac address entry for this vpath
1465 * from MAC address table.
1466 * @vp: Vpath handle.
1467 * @macaddr: First MAC address entry for this vpath in the list
1468 * @macaddr_mask: MAC address mask for macaddr
1470 * Returns the first mac address and mac address mask in the list for this
1472 * see also: vxge_hw_vpath_mac_addr_get_next
1476 vxge_hw_vpath_mac_addr_get(
1477 struct __vxge_hw_vpath_handle *vp,
1478 u8 (macaddr)[ETH_ALEN],
1479 u8 (macaddr_mask)[ETH_ALEN])
1484 enum vxge_hw_status status = VXGE_HW_OK;
1487 status = VXGE_HW_ERR_INVALID_HANDLE;
1491 status = __vxge_hw_vpath_rts_table_get(vp,
1492 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1493 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1496 if (status != VXGE_HW_OK)
1499 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1501 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1503 for (i = ETH_ALEN; i > 0; i--) {
1504 macaddr[i-1] = (u8)(data1 & 0xFF);
1507 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1515 * vxge_hw_vpath_mac_addr_get_next - Get the next mac address entry for this
1517 * from MAC address table.
1518 * @vp: Vpath handle.
1519 * @macaddr: Next MAC address entry for this vpath in the list
1520 * @macaddr_mask: MAC address mask for macaddr
1522 * Returns the next mac address and mac address mask in the list for this
1524 * see also: vxge_hw_vpath_mac_addr_get
1528 vxge_hw_vpath_mac_addr_get_next(
1529 struct __vxge_hw_vpath_handle *vp,
1530 u8 (macaddr)[ETH_ALEN],
1531 u8 (macaddr_mask)[ETH_ALEN])
1536 enum vxge_hw_status status = VXGE_HW_OK;
1539 status = VXGE_HW_ERR_INVALID_HANDLE;
1543 status = __vxge_hw_vpath_rts_table_get(vp,
1544 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1545 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1548 if (status != VXGE_HW_OK)
1551 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
1553 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(data2);
1555 for (i = ETH_ALEN; i > 0; i--) {
1556 macaddr[i-1] = (u8)(data1 & 0xFF);
1559 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
1568 * vxge_hw_vpath_mac_addr_delete - Delete the mac address entry for this vpath
1569 * to MAC address table.
1570 * @vp: Vpath handle.
1571 * @macaddr: MAC address to be added for this vpath into the list
1572 * @macaddr_mask: MAC address mask for macaddr
1574 * Delete the given mac address and mac address mask into the list for this
1576 * see also: vxge_hw_vpath_mac_addr_add, vxge_hw_vpath_mac_addr_get and
1577 * vxge_hw_vpath_mac_addr_get_next
1581 vxge_hw_vpath_mac_addr_delete(
1582 struct __vxge_hw_vpath_handle *vp,
1583 u8 (macaddr)[ETH_ALEN],
1584 u8 (macaddr_mask)[ETH_ALEN])
1589 enum vxge_hw_status status = VXGE_HW_OK;
1592 status = VXGE_HW_ERR_INVALID_HANDLE;
1596 for (i = 0; i < ETH_ALEN; i++) {
1598 data1 |= (u8)macaddr[i];
1601 data2 |= (u8)macaddr_mask[i];
1604 status = __vxge_hw_vpath_rts_table_set(vp,
1605 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1606 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
1608 VXGE_HW_RTS_ACCESS_STEER_DATA0_DA_MAC_ADDR(data1),
1609 VXGE_HW_RTS_ACCESS_STEER_DATA1_DA_MAC_ADDR_MASK(data2));
1615 * vxge_hw_vpath_vid_add - Add the vlan id entry for this vpath
1617 * @vp: Vpath handle.
1618 * @vid: vlan id to be added for this vpath into the list
1620 * Adds the given vlan id into the list for this vpath.
1621 * see also: vxge_hw_vpath_vid_delete, vxge_hw_vpath_vid_get and
1622 * vxge_hw_vpath_vid_get_next
1626 vxge_hw_vpath_vid_add(struct __vxge_hw_vpath_handle *vp, u64 vid)
1628 enum vxge_hw_status status = VXGE_HW_OK;
1631 status = VXGE_HW_ERR_INVALID_HANDLE;
1635 status = __vxge_hw_vpath_rts_table_set(vp,
1636 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_ADD_ENTRY,
1637 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1638 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1644 * vxge_hw_vpath_vid_get - Get the first vid entry for this vpath
1645 * from vlan id table.
1646 * @vp: Vpath handle.
1647 * @vid: Buffer to return vlan id
1649 * Returns the first vlan id in the list for this vpath.
1650 * see also: vxge_hw_vpath_vid_get_next
1654 vxge_hw_vpath_vid_get(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1657 enum vxge_hw_status status = VXGE_HW_OK;
1660 status = VXGE_HW_ERR_INVALID_HANDLE;
1664 status = __vxge_hw_vpath_rts_table_get(vp,
1665 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
1666 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1669 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1675 * vxge_hw_vpath_vid_get_next - Get the next vid entry for this vpath
1676 * from vlan id table.
1677 * @vp: Vpath handle.
1678 * @vid: Buffer to return vlan id
1680 * Returns the next vlan id in the list for this vpath.
1681 * see also: vxge_hw_vpath_vid_get
1685 vxge_hw_vpath_vid_get_next(struct __vxge_hw_vpath_handle *vp, u64 *vid)
1688 enum vxge_hw_status status = VXGE_HW_OK;
1691 status = VXGE_HW_ERR_INVALID_HANDLE;
1695 status = __vxge_hw_vpath_rts_table_get(vp,
1696 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY,
1697 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1700 *vid = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_VLAN_ID(*vid);
1706 * vxge_hw_vpath_vid_delete - Delete the vlan id entry for this vpath
1708 * @vp: Vpath handle.
1709 * @vid: vlan id to be added for this vpath into the list
1711 * Adds the given vlan id into the list for this vpath.
1712 * see also: vxge_hw_vpath_vid_add, vxge_hw_vpath_vid_get and
1713 * vxge_hw_vpath_vid_get_next
1717 vxge_hw_vpath_vid_delete(struct __vxge_hw_vpath_handle *vp, u64 vid)
1719 enum vxge_hw_status status = VXGE_HW_OK;
1722 status = VXGE_HW_ERR_INVALID_HANDLE;
1726 status = __vxge_hw_vpath_rts_table_set(vp,
1727 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_DELETE_ENTRY,
1728 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_VID,
1729 0, VXGE_HW_RTS_ACCESS_STEER_DATA0_VLAN_ID(vid), 0);
1735 * vxge_hw_vpath_promisc_enable - Enable promiscuous mode.
1736 * @vp: Vpath handle.
1738 * Enable promiscuous mode of Titan-e operation.
1740 * See also: vxge_hw_vpath_promisc_disable().
1742 enum vxge_hw_status vxge_hw_vpath_promisc_enable(
1743 struct __vxge_hw_vpath_handle *vp)
1746 struct __vxge_hw_virtualpath *vpath;
1747 enum vxge_hw_status status = VXGE_HW_OK;
1749 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1750 status = VXGE_HW_ERR_INVALID_HANDLE;
1756 /* Enable promiscous mode for function 0 only */
1757 if (!(vpath->hldev->access_rights &
1758 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM))
1761 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1763 if (!(val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN)) {
1765 val64 |= VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1766 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1767 VXGE_HW_RXMAC_VCFG0_BCAST_EN |
1768 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN;
1770 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1777 * vxge_hw_vpath_promisc_disable - Disable promiscuous mode.
1778 * @vp: Vpath handle.
1780 * Disable promiscuous mode of Titan-e operation.
1782 * See also: vxge_hw_vpath_promisc_enable().
1784 enum vxge_hw_status vxge_hw_vpath_promisc_disable(
1785 struct __vxge_hw_vpath_handle *vp)
1788 struct __vxge_hw_virtualpath *vpath;
1789 enum vxge_hw_status status = VXGE_HW_OK;
1791 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1792 status = VXGE_HW_ERR_INVALID_HANDLE;
1798 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1800 if (val64 & VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN) {
1802 val64 &= ~(VXGE_HW_RXMAC_VCFG0_UCAST_ALL_ADDR_EN |
1803 VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN |
1804 VXGE_HW_RXMAC_VCFG0_ALL_VID_EN);
1806 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1813 * vxge_hw_vpath_bcast_enable - Enable broadcast
1814 * @vp: Vpath handle.
1816 * Enable receiving broadcasts.
1818 enum vxge_hw_status vxge_hw_vpath_bcast_enable(
1819 struct __vxge_hw_vpath_handle *vp)
1822 struct __vxge_hw_virtualpath *vpath;
1823 enum vxge_hw_status status = VXGE_HW_OK;
1825 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1826 status = VXGE_HW_ERR_INVALID_HANDLE;
1832 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1834 if (!(val64 & VXGE_HW_RXMAC_VCFG0_BCAST_EN)) {
1835 val64 |= VXGE_HW_RXMAC_VCFG0_BCAST_EN;
1836 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1843 * vxge_hw_vpath_mcast_enable - Enable multicast addresses.
1844 * @vp: Vpath handle.
1846 * Enable Titan-e multicast addresses.
1847 * Returns: VXGE_HW_OK on success.
1850 enum vxge_hw_status vxge_hw_vpath_mcast_enable(
1851 struct __vxge_hw_vpath_handle *vp)
1854 struct __vxge_hw_virtualpath *vpath;
1855 enum vxge_hw_status status = VXGE_HW_OK;
1857 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1858 status = VXGE_HW_ERR_INVALID_HANDLE;
1864 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1866 if (!(val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN)) {
1867 val64 |= VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1868 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1875 * vxge_hw_vpath_mcast_disable - Disable multicast addresses.
1876 * @vp: Vpath handle.
1878 * Disable Titan-e multicast addresses.
1879 * Returns: VXGE_HW_OK - success.
1880 * VXGE_HW_ERR_INVALID_HANDLE - Invalid handle
1884 vxge_hw_vpath_mcast_disable(struct __vxge_hw_vpath_handle *vp)
1887 struct __vxge_hw_virtualpath *vpath;
1888 enum vxge_hw_status status = VXGE_HW_OK;
1890 if ((vp == NULL) || (vp->vpath->ringh == NULL)) {
1891 status = VXGE_HW_ERR_INVALID_HANDLE;
1897 val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
1899 if (val64 & VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN) {
1900 val64 &= ~VXGE_HW_RXMAC_VCFG0_MCAST_ALL_ADDR_EN;
1901 writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
1908 * __vxge_hw_vpath_alarm_process - Process Alarms.
1909 * @vpath: Virtual Path.
1910 * @skip_alarms: Do not clear the alarms
1912 * Process vpath alarms.
1915 enum vxge_hw_status __vxge_hw_vpath_alarm_process(
1916 struct __vxge_hw_virtualpath *vpath,
1922 struct __vxge_hw_device *hldev = NULL;
1923 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1925 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1926 struct vxge_hw_vpath_reg __iomem *vp_reg;
1928 if (vpath == NULL) {
1929 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1934 hldev = vpath->hldev;
1935 vp_reg = vpath->vp_reg;
1936 alarm_status = readq(&vp_reg->vpath_general_int_status);
1938 if (alarm_status == VXGE_HW_ALL_FOXES) {
1939 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1944 sw_stats = vpath->sw_stats;
1946 if (alarm_status & ~(
1947 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1948 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1949 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1950 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1951 sw_stats->error_stats.unknown_alarms++;
1953 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1958 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1960 val64 = readq(&vp_reg->xgmac_vp_int_status);
1963 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1965 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1968 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1970 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1972 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1974 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1976 sw_stats->error_stats.network_sustained_fault++;
1979 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1980 &vp_reg->asic_ntwk_vp_err_mask);
1982 __vxge_hw_device_handle_link_down_ind(hldev);
1983 alarm_event = VXGE_HW_SET_LEVEL(
1984 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1988 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1990 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1992 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1994 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1997 sw_stats->error_stats.network_sustained_ok++;
2000 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
2001 &vp_reg->asic_ntwk_vp_err_mask);
2003 __vxge_hw_device_handle_link_up_ind(hldev);
2004 alarm_event = VXGE_HW_SET_LEVEL(
2005 VXGE_HW_EVENT_LINK_UP, alarm_event);
2008 writeq(VXGE_HW_INTR_MASK_ALL,
2009 &vp_reg->asic_ntwk_vp_err_reg);
2011 alarm_event = VXGE_HW_SET_LEVEL(
2012 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
2019 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
2021 pic_status = readq(&vp_reg->vpath_ppif_int_status);
2024 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
2026 val64 = readq(&vp_reg->general_errors_reg);
2027 mask64 = readq(&vp_reg->general_errors_mask);
2030 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
2032 sw_stats->error_stats.ini_serr_det++;
2034 alarm_event = VXGE_HW_SET_LEVEL(
2035 VXGE_HW_EVENT_SERR, alarm_event);
2039 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2041 sw_stats->error_stats.dblgen_fifo0_overflow++;
2043 alarm_event = VXGE_HW_SET_LEVEL(
2044 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2048 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2050 sw_stats->error_stats.statsb_pif_chain_error++;
2053 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2055 sw_stats->error_stats.statsb_drop_timeout++;
2058 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2060 sw_stats->error_stats.target_illegal_access++;
2063 writeq(VXGE_HW_INTR_MASK_ALL,
2064 &vp_reg->general_errors_reg);
2065 alarm_event = VXGE_HW_SET_LEVEL(
2066 VXGE_HW_EVENT_ALARM_CLEARED,
2072 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2074 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2075 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2078 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2080 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2082 alarm_event = VXGE_HW_SET_LEVEL(
2083 VXGE_HW_EVENT_FIFO_ERR,
2088 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2090 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2092 alarm_event = VXGE_HW_SET_LEVEL(
2093 VXGE_HW_EVENT_FIFO_ERR,
2098 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2100 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2102 alarm_event = VXGE_HW_SET_LEVEL(
2103 VXGE_HW_EVENT_FIFO_ERR,
2108 writeq(VXGE_HW_INTR_MASK_ALL,
2109 &vp_reg->kdfcctl_errors_reg);
2110 alarm_event = VXGE_HW_SET_LEVEL(
2111 VXGE_HW_EVENT_ALARM_CLEARED,
2118 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2120 val64 = readq(&vp_reg->wrdma_alarm_status);
2122 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2124 val64 = readq(&vp_reg->prc_alarm_reg);
2125 mask64 = readq(&vp_reg->prc_alarm_mask);
2127 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2129 sw_stats->error_stats.prc_ring_bumps++;
2131 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2133 sw_stats->error_stats.prc_rxdcm_sc_err++;
2135 alarm_event = VXGE_HW_SET_LEVEL(
2136 VXGE_HW_EVENT_VPATH_ERR,
2140 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2142 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2144 alarm_event = VXGE_HW_SET_LEVEL(
2145 VXGE_HW_EVENT_VPATH_ERR,
2149 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2151 sw_stats->error_stats.prc_quanta_size_err++;
2153 alarm_event = VXGE_HW_SET_LEVEL(
2154 VXGE_HW_EVENT_VPATH_ERR,
2159 writeq(VXGE_HW_INTR_MASK_ALL,
2160 &vp_reg->prc_alarm_reg);
2161 alarm_event = VXGE_HW_SET_LEVEL(
2162 VXGE_HW_EVENT_ALARM_CLEARED,
2168 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2170 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2171 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2174 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2176 if (alarm_event == VXGE_HW_EVENT_SERR)
2177 return VXGE_HW_ERR_CRITICAL;
2179 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2180 VXGE_HW_ERR_SLOT_FREEZE :
2181 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2186 * vxge_hw_vpath_alarm_process - Process Alarms.
2187 * @vpath: Virtual Path.
2188 * @skip_alarms: Do not clear the alarms
2190 * Process vpath alarms.
2193 enum vxge_hw_status vxge_hw_vpath_alarm_process(
2194 struct __vxge_hw_vpath_handle *vp,
2197 enum vxge_hw_status status = VXGE_HW_OK;
2200 status = VXGE_HW_ERR_INVALID_HANDLE;
2204 status = __vxge_hw_vpath_alarm_process(vp->vpath, skip_alarms);
2210 * vxge_hw_vpath_msix_set - Associate MSIX vectors with TIM interrupts and
2212 * @vp: Virtual Path handle.
2213 * @tim_msix_id: MSIX vectors associated with VXGE_HW_MAX_INTR_PER_VP number of
2214 * interrupts(Can be repeated). If fifo or ring are not enabled
2215 * the MSIX vector for that should be set to 0
2216 * @alarm_msix_id: MSIX vector for alarm.
2218 * This API will associate a given MSIX vector numbers with the four TIM
2219 * interrupts and alarm interrupt.
2222 vxge_hw_vpath_msix_set(struct __vxge_hw_vpath_handle *vp, int *tim_msix_id,
2226 struct __vxge_hw_virtualpath *vpath = vp->vpath;
2227 struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2228 u32 vp_id = vp->vpath->vp_id;
2230 val64 = VXGE_HW_INTERRUPT_CFG0_GROUP0_MSIX_FOR_TXTI(
2231 (vp_id * 4) + tim_msix_id[0]) |
2232 VXGE_HW_INTERRUPT_CFG0_GROUP1_MSIX_FOR_TXTI(
2233 (vp_id * 4) + tim_msix_id[1]);
2235 writeq(val64, &vp_reg->interrupt_cfg0);
2237 writeq(VXGE_HW_INTERRUPT_CFG2_ALARM_MAP_TO_MSG(
2238 (vpath->hldev->first_vp_id * 4) + alarm_msix_id),
2239 &vp_reg->interrupt_cfg2);
2241 if (vpath->hldev->config.intr_mode ==
2242 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2243 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2244 VXGE_HW_ONE_SHOT_VECT1_EN_ONE_SHOT_VECT1_EN,
2245 0, 32), &vp_reg->one_shot_vect1_en);
2248 if (vpath->hldev->config.intr_mode ==
2249 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2250 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2251 VXGE_HW_ONE_SHOT_VECT2_EN_ONE_SHOT_VECT2_EN,
2252 0, 32), &vp_reg->one_shot_vect2_en);
2254 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(
2255 VXGE_HW_ONE_SHOT_VECT3_EN_ONE_SHOT_VECT3_EN,
2256 0, 32), &vp_reg->one_shot_vect3_en);
2263 * vxge_hw_vpath_msix_mask - Mask MSIX Vector.
2264 * @vp: Virtual Path handle.
2267 * The function masks the msix interrupt for the given msix_id
2270 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2275 vxge_hw_vpath_msix_mask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2277 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2278 __vxge_hw_pio_mem_write32_upper(
2279 (u32) vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2280 &hldev->common_reg->set_msix_mask_vect[msix_id % 4]);
2286 * vxge_hw_vpath_msix_clear - Clear MSIX Vector.
2287 * @vp: Virtual Path handle.
2290 * The function clears the msix interrupt for the given msix_id
2293 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2298 vxge_hw_vpath_msix_clear(struct __vxge_hw_vpath_handle *vp, int msix_id)
2300 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2301 if (hldev->config.intr_mode ==
2302 VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) {
2303 __vxge_hw_pio_mem_write32_upper(
2304 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2305 &hldev->common_reg->
2306 clr_msix_one_shot_vec[msix_id%4]);
2308 __vxge_hw_pio_mem_write32_upper(
2309 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2310 &hldev->common_reg->
2311 clear_msix_mask_vect[msix_id%4]);
2318 * vxge_hw_vpath_msix_unmask - Unmask the MSIX Vector.
2319 * @vp: Virtual Path handle.
2322 * The function unmasks the msix interrupt for the given msix_id
2325 * Otherwise, VXGE_HW_ERR_WRONG_IRQ if the msix index is out of range
2330 vxge_hw_vpath_msix_unmask(struct __vxge_hw_vpath_handle *vp, int msix_id)
2332 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2333 __vxge_hw_pio_mem_write32_upper(
2334 (u32)vxge_bVALn(vxge_mBIT(msix_id >> 2), 0, 32),
2335 &hldev->common_reg->clear_msix_mask_vect[msix_id%4]);
2341 * vxge_hw_vpath_msix_mask_all - Mask all MSIX vectors for the vpath.
2342 * @vp: Virtual Path handle.
2344 * The function masks all msix interrupt for the given vpath
2348 vxge_hw_vpath_msix_mask_all(struct __vxge_hw_vpath_handle *vp)
2351 __vxge_hw_pio_mem_write32_upper(
2352 (u32)vxge_bVALn(vxge_mBIT(vp->vpath->vp_id), 0, 32),
2353 &vp->vpath->hldev->common_reg->set_msix_mask_all_vect);
2359 * vxge_hw_vpath_inta_mask_tx_rx - Mask Tx and Rx interrupts.
2360 * @vp: Virtual Path handle.
2362 * Mask Tx and Rx vpath interrupts.
2364 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2366 void vxge_hw_vpath_inta_mask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2368 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2369 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2371 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2373 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2374 tim_int_mask1, vp->vpath->vp_id);
2376 val64 = readq(&hldev->common_reg->tim_int_mask0);
2378 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2379 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2380 writeq((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2381 tim_int_mask0[VXGE_HW_VPATH_INTR_RX] | val64),
2382 &hldev->common_reg->tim_int_mask0);
2385 val64 = readl(&hldev->common_reg->tim_int_mask1);
2387 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2388 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2389 __vxge_hw_pio_mem_write32_upper(
2390 (tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2391 tim_int_mask1[VXGE_HW_VPATH_INTR_RX] | val64),
2392 &hldev->common_reg->tim_int_mask1);
2399 * vxge_hw_vpath_inta_unmask_tx_rx - Unmask Tx and Rx interrupts.
2400 * @vp: Virtual Path handle.
2402 * Unmask Tx and Rx vpath interrupts.
2404 * See also: vxge_hw_vpath_inta_mask_tx_rx()
2406 void vxge_hw_vpath_inta_unmask_tx_rx(struct __vxge_hw_vpath_handle *vp)
2408 u64 tim_int_mask0[4] = {[0 ...3] = 0};
2409 u32 tim_int_mask1[4] = {[0 ...3] = 0};
2411 struct __vxge_hw_device *hldev = vp->vpath->hldev;
2413 VXGE_HW_DEVICE_TIM_INT_MASK_SET(tim_int_mask0,
2414 tim_int_mask1, vp->vpath->vp_id);
2416 val64 = readq(&hldev->common_reg->tim_int_mask0);
2418 if ((tim_int_mask0[VXGE_HW_VPATH_INTR_TX] != 0) ||
2419 (tim_int_mask0[VXGE_HW_VPATH_INTR_RX] != 0)) {
2420 writeq((~(tim_int_mask0[VXGE_HW_VPATH_INTR_TX] |
2421 tim_int_mask0[VXGE_HW_VPATH_INTR_RX])) & val64,
2422 &hldev->common_reg->tim_int_mask0);
2425 if ((tim_int_mask1[VXGE_HW_VPATH_INTR_TX] != 0) ||
2426 (tim_int_mask1[VXGE_HW_VPATH_INTR_RX] != 0)) {
2427 __vxge_hw_pio_mem_write32_upper(
2428 (~(tim_int_mask1[VXGE_HW_VPATH_INTR_TX] |
2429 tim_int_mask1[VXGE_HW_VPATH_INTR_RX])) & val64,
2430 &hldev->common_reg->tim_int_mask1);
2437 * vxge_hw_vpath_poll_rx - Poll Rx Virtual Path for completed
2438 * descriptors and process the same.
2439 * @ring: Handle to the ring object used for receive
2441 * The function polls the Rx for the completed descriptors and calls
2442 * the driver via supplied completion callback.
2444 * Returns: VXGE_HW_OK, if the polling is completed successful.
2445 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2446 * descriptors available which are yet to be processed.
2448 * See also: vxge_hw_vpath_poll_rx()
2450 enum vxge_hw_status vxge_hw_vpath_poll_rx(struct __vxge_hw_ring *ring)
2453 enum vxge_hw_status status = VXGE_HW_OK;
2460 status = vxge_hw_ring_rxd_next_completed(ring, &first_rxdh, &t_code);
2461 if (status == VXGE_HW_OK)
2462 ring->callback(ring, first_rxdh,
2463 t_code, ring->channel.userdata);
2465 if (ring->cmpl_cnt != 0) {
2466 ring->doorbell_cnt += ring->cmpl_cnt;
2467 if (ring->doorbell_cnt >= ring->rxds_limit) {
2469 * Each RxD is of 4 qwords, update the number of
2470 * qwords replenished
2472 new_count = (ring->doorbell_cnt * 4);
2474 /* For each block add 4 more qwords */
2475 ring->total_db_cnt += ring->doorbell_cnt;
2476 if (ring->total_db_cnt >= ring->rxds_per_block) {
2478 /* Reset total count */
2479 ring->total_db_cnt %= ring->rxds_per_block;
2481 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(new_count),
2482 &ring->vp_reg->prc_rxd_doorbell);
2484 readl(&ring->common_reg->titan_general_int_status);
2485 ring->doorbell_cnt = 0;
2493 * vxge_hw_vpath_poll_tx - Poll Tx for completed descriptors and process
2495 * @fifo: Handle to the fifo object used for non offload send
2497 * The function polls the Tx for the completed descriptors and calls
2498 * the driver via supplied completion callback.
2500 * Returns: VXGE_HW_OK, if the polling is completed successful.
2501 * VXGE_HW_COMPLETIONS_REMAIN: There are still more completed
2502 * descriptors available which are yet to be processed.
2504 * See also: vxge_hw_vpath_poll_tx().
2506 enum vxge_hw_status vxge_hw_vpath_poll_tx(struct __vxge_hw_fifo *fifo,
2507 struct sk_buff ***skb_ptr, int nr_skb,
2510 enum vxge_hw_fifo_tcode t_code;
2512 enum vxge_hw_status status = VXGE_HW_OK;
2513 struct __vxge_hw_channel *channel;
2515 channel = &fifo->channel;
2517 status = vxge_hw_fifo_txdl_next_completed(fifo,
2518 &first_txdlh, &t_code);
2519 if (status == VXGE_HW_OK)
2520 if (fifo->callback(fifo, first_txdlh, t_code,
2521 channel->userdata, skb_ptr, nr_skb, more) != VXGE_HW_OK)
2522 status = VXGE_HW_COMPLETIONS_REMAIN;