vxge: serialize access to steering control register
[linux-2.6.git] / drivers / net / vxge / vxge-config.c
1 /******************************************************************************
2  * This software may be used and distributed according to the terms of
3  * the GNU General Public License (GPL), incorporated herein by reference.
4  * Drivers based on or derived from this code fall under the GPL and must
5  * retain the authorship, copyright and license notice.  This file is not
6  * a complete program and may only be used when the entire operating
7  * system is licensed under the GPL.
8  * See the file COPYING in this distribution for more information.
9  *
10  * vxge-config.c: Driver for Exar Corp's X3100 Series 10GbE PCIe I/O
11  *                Virtualized Server Adapter.
12  * Copyright(c) 2002-2010 Exar Corp.
13  ******************************************************************************/
14 #include <linux/vmalloc.h>
15 #include <linux/etherdevice.h>
16 #include <linux/pci.h>
17 #include <linux/pci_hotplug.h>
18 #include <linux/slab.h>
19
20 #include "vxge-traffic.h"
21 #include "vxge-config.h"
22 #include "vxge-main.h"
23
24 static enum vxge_hw_status
25 __vxge_hw_fifo_create(
26         struct __vxge_hw_vpath_handle *vpath_handle,
27         struct vxge_hw_fifo_attr *attr);
28
29 static enum vxge_hw_status
30 __vxge_hw_fifo_abort(
31         struct __vxge_hw_fifo *fifoh);
32
33 static enum vxge_hw_status
34 __vxge_hw_fifo_reset(
35         struct __vxge_hw_fifo *ringh);
36
37 static enum vxge_hw_status
38 __vxge_hw_fifo_delete(
39         struct __vxge_hw_vpath_handle *vpath_handle);
40
41 static struct __vxge_hw_blockpool_entry *
42 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
43                         u32 size);
44
45 static void
46 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev,
47                         struct __vxge_hw_blockpool_entry *entry);
48
49 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
50                                         void *block_addr,
51                                         u32 length,
52                                         struct pci_dev *dma_h,
53                                         struct pci_dev *acc_handle);
54
55 static enum vxge_hw_status
56 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
57                         struct __vxge_hw_blockpool  *blockpool,
58                         u32 pool_size,
59                         u32 pool_max);
60
61 static void
62 __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool  *blockpool);
63
64 static void *
65 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev,
66                         u32 size,
67                         struct vxge_hw_mempool_dma *dma_object);
68
69 static void
70 __vxge_hw_blockpool_free(struct __vxge_hw_device *hldev,
71                         void *memblock,
72                         u32 size,
73                         struct vxge_hw_mempool_dma *dma_object);
74
75
76 static struct __vxge_hw_channel*
77 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
78                         enum __vxge_hw_channel_type type, u32 length,
79                         u32 per_dtr_space, void *userdata);
80
81 static void
82 __vxge_hw_channel_free(
83         struct __vxge_hw_channel *channel);
84
85 static enum vxge_hw_status
86 __vxge_hw_channel_initialize(
87         struct __vxge_hw_channel *channel);
88
89 static enum vxge_hw_status
90 __vxge_hw_channel_reset(
91         struct __vxge_hw_channel *channel);
92
93 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp);
94
95 static enum vxge_hw_status
96 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config);
97
98 static enum vxge_hw_status
99 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config);
100
101 static void
102 __vxge_hw_device_id_get(struct __vxge_hw_device *hldev);
103
104 static void
105 __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev);
106
107 static enum vxge_hw_status
108 __vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
109
110 static void
111 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev);
112
113 static enum vxge_hw_status
114 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev);
115
116 static enum vxge_hw_status
117 __vxge_hw_device_register_poll(
118         void __iomem    *reg,
119         u64 mask, u32 max_millis);
120
121 static inline enum vxge_hw_status
122 __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
123                           u64 mask, u32 max_millis)
124 {
125         __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
126         wmb();
127
128         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
129         wmb();
130
131         return  __vxge_hw_device_register_poll(addr, mask, max_millis);
132 }
133
134 static struct vxge_hw_mempool*
135 __vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
136                          u32 item_size, u32 private_size, u32 items_initial,
137                          u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
138                          void *userdata);
139 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
140
141 static enum vxge_hw_status
142 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
143                           struct vxge_hw_vpath_stats_hw_info *hw_stats);
144
145 static enum vxge_hw_status
146 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
147
148 static enum vxge_hw_status
149 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg);
150
151 static enum vxge_hw_status
152 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
153
154
155 static enum vxge_hw_status
156 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id);
157
158
159 static enum vxge_hw_status
160 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
161
162 static void
163 __vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id);
164
165 static enum vxge_hw_status
166 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
167                              u32 operation, u32 offset, u64 *stat);
168
169 static enum vxge_hw_status
170 __vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath  *vpath,
171                                   struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
172
173 static enum vxge_hw_status
174 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath  *vpath,
175                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats);
176
177 static void
178 vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
179 {
180         u64 val64;
181
182         val64 = readq(&vp_reg->rxmac_vcfg0);
183         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
184         writeq(val64, &vp_reg->rxmac_vcfg0);
185         val64 = readq(&vp_reg->rxmac_vcfg0);
186
187         return;
188 }
189
190 /*
191  * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
192  */
193 int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
194 {
195         struct vxge_hw_vpath_reg __iomem *vp_reg;
196         struct __vxge_hw_virtualpath *vpath;
197         u64 val64, rxd_count, rxd_spat;
198         int count = 0, total_count = 0;
199
200         vpath = &hldev->virtual_paths[vp_id];
201         vp_reg = vpath->vp_reg;
202
203         vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
204
205         /* Check that the ring controller for this vpath has enough free RxDs
206          * to send frames to the host.  This is done by reading the
207          * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
208          * RXD_SPAT value for the vpath.
209          */
210         val64 = readq(&vp_reg->prc_cfg6);
211         rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
212         /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
213          * leg room.
214          */
215         rxd_spat *= 2;
216
217         do {
218                 mdelay(1);
219
220                 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
221
222                 /* Check that the ring controller for this vpath does
223                  * not have any frame in its pipeline.
224                  */
225                 val64 = readq(&vp_reg->frm_in_progress_cnt);
226                 if ((rxd_count <= rxd_spat) || (val64 > 0))
227                         count = 0;
228                 else
229                         count++;
230                 total_count++;
231         } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
232                         (total_count < VXGE_HW_MAX_POLLING_COUNT));
233
234         if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
235                 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
236                         __func__);
237
238         return total_count;
239 }
240
241 /* vxge_hw_device_wait_receive_idle - This function waits until all frames
242  * stored in the frame buffer for each vpath assigned to the given
243  * function (hldev) have been sent to the host.
244  */
245 void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
246 {
247         int i, total_count = 0;
248
249         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
250                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
251                         continue;
252
253                 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
254                 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
255                         break;
256         }
257 }
258
259 static enum vxge_hw_status
260 vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
261                      u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
262                      u64 *steer_ctrl)
263 {
264         struct vxge_hw_vpath_reg __iomem *vp_reg;
265         enum vxge_hw_status status;
266         u64 val64;
267         u32 retry = 0, max_retry = 100;
268
269         vp_reg = vpath->vp_reg;
270
271         if (vpath->vp_open) {
272                 max_retry = 3;
273                 spin_lock(&vpath->lock);
274         }
275
276         writeq(*data0, &vp_reg->rts_access_steer_data0);
277         writeq(*data1, &vp_reg->rts_access_steer_data1);
278         wmb();
279
280         val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
281                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
282                 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
283                 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
284                 *steer_ctrl;
285
286         status = __vxge_hw_pio_mem_write64(val64,
287                                            &vp_reg->rts_access_steer_ctrl,
288                                            VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
289                                            VXGE_HW_DEF_DEVICE_POLL_MILLIS);
290
291         /* The __vxge_hw_device_register_poll can udelay for a significant
292          * amount of time, blocking other proccess from the CPU.  If it delays
293          * for ~5secs, a NMI error can occur.  A way around this is to give up
294          * the processor via msleep, but this is not allowed is under lock.
295          * So, only allow it to sleep for ~4secs if open.  Otherwise, delay for
296          * 1sec and sleep for 10ms until the firmware operation has completed
297          * or timed-out.
298          */
299         while ((status != VXGE_HW_OK) && retry++ < max_retry) {
300                 if (!vpath->vp_open)
301                         msleep(20);
302                 status = __vxge_hw_device_register_poll(
303                                         &vp_reg->rts_access_steer_ctrl,
304                                         VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
305                                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
306         }
307
308         if (status != VXGE_HW_OK)
309                 goto out;
310
311         val64 = readq(&vp_reg->rts_access_steer_ctrl);
312         if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
313                 *data0 = readq(&vp_reg->rts_access_steer_data0);
314                 *data1 = readq(&vp_reg->rts_access_steer_data1);
315                 *steer_ctrl = val64;
316         } else
317                 status = VXGE_HW_FAIL;
318
319 out:
320         if (vpath->vp_open)
321                 spin_unlock(&vpath->lock);
322         return status;
323 }
324
325 /*
326  * __vxge_hw_channel_allocate - Allocate memory for channel
327  * This function allocates required memory for the channel and various arrays
328  * in the channel
329  */
330 struct __vxge_hw_channel*
331 __vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
332                            enum __vxge_hw_channel_type type,
333         u32 length, u32 per_dtr_space, void *userdata)
334 {
335         struct __vxge_hw_channel *channel;
336         struct __vxge_hw_device *hldev;
337         int size = 0;
338         u32 vp_id;
339
340         hldev = vph->vpath->hldev;
341         vp_id = vph->vpath->vp_id;
342
343         switch (type) {
344         case VXGE_HW_CHANNEL_TYPE_FIFO:
345                 size = sizeof(struct __vxge_hw_fifo);
346                 break;
347         case VXGE_HW_CHANNEL_TYPE_RING:
348                 size = sizeof(struct __vxge_hw_ring);
349                 break;
350         default:
351                 break;
352         }
353
354         channel = kzalloc(size, GFP_KERNEL);
355         if (channel == NULL)
356                 goto exit0;
357         INIT_LIST_HEAD(&channel->item);
358
359         channel->common_reg = hldev->common_reg;
360         channel->first_vp_id = hldev->first_vp_id;
361         channel->type = type;
362         channel->devh = hldev;
363         channel->vph = vph;
364         channel->userdata = userdata;
365         channel->per_dtr_space = per_dtr_space;
366         channel->length = length;
367         channel->vp_id = vp_id;
368
369         channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
370         if (channel->work_arr == NULL)
371                 goto exit1;
372
373         channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
374         if (channel->free_arr == NULL)
375                 goto exit1;
376         channel->free_ptr = length;
377
378         channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
379         if (channel->reserve_arr == NULL)
380                 goto exit1;
381         channel->reserve_ptr = length;
382         channel->reserve_top = 0;
383
384         channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
385         if (channel->orig_arr == NULL)
386                 goto exit1;
387
388         return channel;
389 exit1:
390         __vxge_hw_channel_free(channel);
391
392 exit0:
393         return NULL;
394 }
395
396 /*
397  * __vxge_hw_channel_free - Free memory allocated for channel
398  * This function deallocates memory from the channel and various arrays
399  * in the channel
400  */
401 void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
402 {
403         kfree(channel->work_arr);
404         kfree(channel->free_arr);
405         kfree(channel->reserve_arr);
406         kfree(channel->orig_arr);
407         kfree(channel);
408 }
409
410 /*
411  * __vxge_hw_channel_initialize - Initialize a channel
412  * This function initializes a channel by properly setting the
413  * various references
414  */
415 enum vxge_hw_status
416 __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
417 {
418         u32 i;
419         struct __vxge_hw_virtualpath *vpath;
420
421         vpath = channel->vph->vpath;
422
423         if ((channel->reserve_arr != NULL) && (channel->orig_arr != NULL)) {
424                 for (i = 0; i < channel->length; i++)
425                         channel->orig_arr[i] = channel->reserve_arr[i];
426         }
427
428         switch (channel->type) {
429         case VXGE_HW_CHANNEL_TYPE_FIFO:
430                 vpath->fifoh = (struct __vxge_hw_fifo *)channel;
431                 channel->stats = &((struct __vxge_hw_fifo *)
432                                 channel)->stats->common_stats;
433                 break;
434         case VXGE_HW_CHANNEL_TYPE_RING:
435                 vpath->ringh = (struct __vxge_hw_ring *)channel;
436                 channel->stats = &((struct __vxge_hw_ring *)
437                                 channel)->stats->common_stats;
438                 break;
439         default:
440                 break;
441         }
442
443         return VXGE_HW_OK;
444 }
445
446 /*
447  * __vxge_hw_channel_reset - Resets a channel
448  * This function resets a channel by properly setting the various references
449  */
450 enum vxge_hw_status
451 __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
452 {
453         u32 i;
454
455         for (i = 0; i < channel->length; i++) {
456                 if (channel->reserve_arr != NULL)
457                         channel->reserve_arr[i] = channel->orig_arr[i];
458                 if (channel->free_arr != NULL)
459                         channel->free_arr[i] = NULL;
460                 if (channel->work_arr != NULL)
461                         channel->work_arr[i] = NULL;
462         }
463         channel->free_ptr = channel->length;
464         channel->reserve_ptr = channel->length;
465         channel->reserve_top = 0;
466         channel->post_index = 0;
467         channel->compl_index = 0;
468
469         return VXGE_HW_OK;
470 }
471
472 /*
473  * __vxge_hw_device_pci_e_init
474  * Initialize certain PCI/PCI-X configuration registers
475  * with recommended values. Save config space for future hw resets.
476  */
477 void
478 __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
479 {
480         u16 cmd = 0;
481
482         /* Set the PErr Repconse bit and SERR in PCI command register. */
483         pci_read_config_word(hldev->pdev, PCI_COMMAND, &cmd);
484         cmd |= 0x140;
485         pci_write_config_word(hldev->pdev, PCI_COMMAND, cmd);
486
487         pci_save_state(hldev->pdev);
488 }
489
490 /*
491  * __vxge_hw_device_register_poll
492  * Will poll certain register for specified amount of time.
493  * Will poll until masked bit is not cleared.
494  */
495 static enum vxge_hw_status
496 __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
497 {
498         u64 val64;
499         u32 i = 0;
500         enum vxge_hw_status ret = VXGE_HW_FAIL;
501
502         udelay(10);
503
504         do {
505                 val64 = readq(reg);
506                 if (!(val64 & mask))
507                         return VXGE_HW_OK;
508                 udelay(100);
509         } while (++i <= 9);
510
511         i = 0;
512         do {
513                 val64 = readq(reg);
514                 if (!(val64 & mask))
515                         return VXGE_HW_OK;
516                 mdelay(1);
517         } while (++i <= max_millis);
518
519         return ret;
520 }
521
522 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
523  * in progress
524  * This routine checks the vpath reset in progress register is turned zero
525  */
526 static enum vxge_hw_status
527 __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
528 {
529         enum vxge_hw_status status;
530         status = __vxge_hw_device_register_poll(vpath_rst_in_prog,
531                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(0x1ffff),
532                         VXGE_HW_DEF_DEVICE_POLL_MILLIS);
533         return status;
534 }
535
536 /*
537  * __vxge_hw_device_toc_get
538  * This routine sets the swapper and reads the toc pointer and returns the
539  * memory mapped address of the toc
540  */
541 static struct vxge_hw_toc_reg __iomem *
542 __vxge_hw_device_toc_get(void __iomem *bar0)
543 {
544         u64 val64;
545         struct vxge_hw_toc_reg __iomem *toc = NULL;
546         enum vxge_hw_status status;
547
548         struct vxge_hw_legacy_reg __iomem *legacy_reg =
549                 (struct vxge_hw_legacy_reg __iomem *)bar0;
550
551         status = __vxge_hw_legacy_swapper_set(legacy_reg);
552         if (status != VXGE_HW_OK)
553                 goto exit;
554
555         val64 = readq(&legacy_reg->toc_first_pointer);
556         toc = (struct vxge_hw_toc_reg __iomem *)(bar0+val64);
557 exit:
558         return toc;
559 }
560
561 /*
562  * __vxge_hw_device_reg_addr_get
563  * This routine sets the swapper and reads the toc pointer and initializes the
564  * register location pointers in the device object. It waits until the ric is
565  * completed initializing registers.
566  */
567 enum vxge_hw_status
568 __vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
569 {
570         u64 val64;
571         u32 i;
572         enum vxge_hw_status status = VXGE_HW_OK;
573
574         hldev->legacy_reg = (struct vxge_hw_legacy_reg __iomem *)hldev->bar0;
575
576         hldev->toc_reg = __vxge_hw_device_toc_get(hldev->bar0);
577         if (hldev->toc_reg  == NULL) {
578                 status = VXGE_HW_FAIL;
579                 goto exit;
580         }
581
582         val64 = readq(&hldev->toc_reg->toc_common_pointer);
583         hldev->common_reg =
584         (struct vxge_hw_common_reg __iomem *)(hldev->bar0 + val64);
585
586         val64 = readq(&hldev->toc_reg->toc_mrpcim_pointer);
587         hldev->mrpcim_reg =
588                 (struct vxge_hw_mrpcim_reg __iomem *)(hldev->bar0 + val64);
589
590         for (i = 0; i < VXGE_HW_TITAN_SRPCIM_REG_SPACES; i++) {
591                 val64 = readq(&hldev->toc_reg->toc_srpcim_pointer[i]);
592                 hldev->srpcim_reg[i] =
593                         (struct vxge_hw_srpcim_reg __iomem *)
594                                 (hldev->bar0 + val64);
595         }
596
597         for (i = 0; i < VXGE_HW_TITAN_VPMGMT_REG_SPACES; i++) {
598                 val64 = readq(&hldev->toc_reg->toc_vpmgmt_pointer[i]);
599                 hldev->vpmgmt_reg[i] =
600                 (struct vxge_hw_vpmgmt_reg __iomem *)(hldev->bar0 + val64);
601         }
602
603         for (i = 0; i < VXGE_HW_TITAN_VPATH_REG_SPACES; i++) {
604                 val64 = readq(&hldev->toc_reg->toc_vpath_pointer[i]);
605                 hldev->vpath_reg[i] =
606                         (struct vxge_hw_vpath_reg __iomem *)
607                                 (hldev->bar0 + val64);
608         }
609
610         val64 = readq(&hldev->toc_reg->toc_kdfc);
611
612         switch (VXGE_HW_TOC_GET_KDFC_INITIAL_BIR(val64)) {
613         case 0:
614                 hldev->kdfc = (u8 __iomem *)(hldev->bar0 +
615                         VXGE_HW_TOC_GET_KDFC_INITIAL_OFFSET(val64));
616                 break;
617         default:
618                 break;
619         }
620
621         status = __vxge_hw_device_vpath_reset_in_prog_check(
622                         (u64 __iomem *)&hldev->common_reg->vpath_rst_in_prog);
623 exit:
624         return status;
625 }
626
627 /*
628  * __vxge_hw_device_id_get
629  * This routine returns sets the device id and revision numbers into the device
630  * structure
631  */
632 void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
633 {
634         u64 val64;
635
636         val64 = readq(&hldev->common_reg->titan_asic_id);
637         hldev->device_id =
638                 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
639
640         hldev->major_revision =
641                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
642
643         hldev->minor_revision =
644                 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
645 }
646
647 /*
648  * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
649  * This routine returns the Access Rights of the driver
650  */
651 static u32
652 __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
653 {
654         u32 access_rights = VXGE_HW_DEVICE_ACCESS_RIGHT_VPATH;
655
656         switch (host_type) {
657         case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
658                 if (func_id == 0) {
659                         access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
660                                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
661                 }
662                 break;
663         case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
664                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
665                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
666                 break;
667         case VXGE_HW_NO_MR_SR_VH0_FUNCTION0:
668                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
669                                 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
670                 break;
671         case VXGE_HW_NO_MR_SR_VH0_VIRTUAL_FUNCTION:
672         case VXGE_HW_SR_VH_VIRTUAL_FUNCTION:
673         case VXGE_HW_MR_SR_VH0_INVALID_CONFIG:
674                 break;
675         case VXGE_HW_SR_VH_FUNCTION0:
676         case VXGE_HW_VH_NORMAL_FUNCTION:
677                 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
678                 break;
679         }
680
681         return access_rights;
682 }
683 /*
684  * __vxge_hw_device_is_privilaged
685  * This routine checks if the device function is privilaged or not
686  */
687
688 enum vxge_hw_status
689 __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
690 {
691         if (__vxge_hw_device_access_rights_get(host_type,
692                 func_id) &
693                 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
694                 return VXGE_HW_OK;
695         else
696                 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
697 }
698
699 /*
700  * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
701  * Returns the function number of the vpath.
702  */
703 static u32
704 __vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
705 {
706         u64 val64;
707
708         val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
709
710         return
711          (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
712 }
713
714 /*
715  * __vxge_hw_device_host_info_get
716  * This routine returns the host type assignments
717  */
718 static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
719 {
720         u64 val64;
721         u32 i;
722
723         val64 = readq(&hldev->common_reg->host_type_assignments);
724
725         hldev->host_type =
726            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
727
728         hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
729
730         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
731                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
732                         continue;
733
734                 hldev->func_id =
735                         __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
736
737                 hldev->access_rights = __vxge_hw_device_access_rights_get(
738                         hldev->host_type, hldev->func_id);
739
740                 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
741                 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
742
743                 hldev->first_vp_id = i;
744                 break;
745         }
746 }
747
748 /*
749  * __vxge_hw_verify_pci_e_info - Validate the pci-e link parameters such as
750  * link width and signalling rate.
751  */
752 static enum vxge_hw_status
753 __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
754 {
755         int exp_cap;
756         u16 lnk;
757
758         /* Get the negotiated link width and speed from PCI config space */
759         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
760         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
761
762         if ((lnk & PCI_EXP_LNKSTA_CLS) != 1)
763                 return VXGE_HW_ERR_INVALID_PCI_INFO;
764
765         switch ((lnk & PCI_EXP_LNKSTA_NLW) >> 4) {
766         case PCIE_LNK_WIDTH_RESRV:
767         case PCIE_LNK_X1:
768         case PCIE_LNK_X2:
769         case PCIE_LNK_X4:
770         case PCIE_LNK_X8:
771                 break;
772         default:
773                 return VXGE_HW_ERR_INVALID_PCI_INFO;
774         }
775
776         return VXGE_HW_OK;
777 }
778
779 /*
780  * __vxge_hw_device_initialize
781  * Initialize Titan-V hardware.
782  */
783 enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
784 {
785         enum vxge_hw_status status = VXGE_HW_OK;
786
787         if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
788                                 hldev->func_id)) {
789                 /* Validate the pci-e link width and speed */
790                 status = __vxge_hw_verify_pci_e_info(hldev);
791                 if (status != VXGE_HW_OK)
792                         goto exit;
793         }
794
795 exit:
796         return status;
797 }
798
799 /*
800  * __vxge_hw_vpath_fw_ver_get - Get the fw version
801  * Returns FW Version
802  */
803 static enum vxge_hw_status
804 __vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
805                            struct vxge_hw_device_hw_info *hw_info)
806 {
807         struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
808         struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
809         struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
810         struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
811         u64 data0, data1 = 0, steer_ctrl = 0;
812         enum vxge_hw_status status;
813
814         status = vxge_hw_vpath_fw_api(vpath,
815                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
816                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
817                         0, &data0, &data1, &steer_ctrl);
818         if (status != VXGE_HW_OK)
819                 goto exit;
820
821         fw_date->day =
822             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
823         fw_date->month =
824             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
825         fw_date->year =
826             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
827
828         snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
829                  fw_date->month, fw_date->day, fw_date->year);
830
831         fw_version->major =
832             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
833         fw_version->minor =
834             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
835         fw_version->build =
836             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
837
838         snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
839                  fw_version->major, fw_version->minor, fw_version->build);
840
841         flash_date->day =
842             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
843         flash_date->month =
844             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
845         flash_date->year =
846             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
847
848         snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
849                  flash_date->month, flash_date->day, flash_date->year);
850
851         flash_version->major =
852             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
853         flash_version->minor =
854             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
855         flash_version->build =
856             (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
857
858         snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
859                  flash_version->major, flash_version->minor,
860                  flash_version->build);
861
862 exit:
863         return status;
864 }
865
866 /*
867  * __vxge_hw_vpath_card_info_get - Get the serial numbers,
868  * part number and product description.
869  */
870 static enum vxge_hw_status
871 __vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
872                               struct vxge_hw_device_hw_info *hw_info)
873 {
874         enum vxge_hw_status status;
875         u64 data0, data1 = 0, steer_ctrl = 0;
876         u8 *serial_number = hw_info->serial_number;
877         u8 *part_number = hw_info->part_number;
878         u8 *product_desc = hw_info->product_desc;
879         u32 i, j = 0;
880
881         data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
882
883         status = vxge_hw_vpath_fw_api(vpath,
884                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
885                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
886                         0, &data0, &data1, &steer_ctrl);
887         if (status != VXGE_HW_OK)
888                 return status;
889
890         ((u64 *)serial_number)[0] = be64_to_cpu(data0);
891         ((u64 *)serial_number)[1] = be64_to_cpu(data1);
892
893         data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
894         data1 = steer_ctrl = 0;
895
896         status = vxge_hw_vpath_fw_api(vpath,
897                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
898                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
899                         0, &data0, &data1, &steer_ctrl);
900         if (status != VXGE_HW_OK)
901                 return status;
902
903         ((u64 *)part_number)[0] = be64_to_cpu(data0);
904         ((u64 *)part_number)[1] = be64_to_cpu(data1);
905
906         for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
907              i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
908                 data0 = i;
909                 data1 = steer_ctrl = 0;
910
911                 status = vxge_hw_vpath_fw_api(vpath,
912                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
913                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
914                         0, &data0, &data1, &steer_ctrl);
915                 if (status != VXGE_HW_OK)
916                         return status;
917
918                 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
919                 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
920         }
921
922         return status;
923 }
924
925 /*
926  * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
927  * Returns pci function mode
928  */
929 static u64
930 __vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath)
931 {
932         u64 data0, data1 = 0, steer_ctrl = 0;
933         enum vxge_hw_status status;
934
935         data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE;
936
937         status = vxge_hw_vpath_fw_api(vpath,
938                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
939                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
940                         0, &data0, &data1, &steer_ctrl);
941
942         return data0;
943 }
944
945 /*
946  * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
947  *               from MAC address table.
948  */
949 static enum vxge_hw_status
950 __vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
951                          u8 *macaddr, u8 *macaddr_mask)
952 {
953         u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
954             data0 = 0, data1 = 0, steer_ctrl = 0;
955         enum vxge_hw_status status;
956         int i;
957
958         do {
959                 status = vxge_hw_vpath_fw_api(vpath, action,
960                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
961                         0, &data0, &data1, &steer_ctrl);
962                 if (status != VXGE_HW_OK)
963                         goto exit;
964
965                 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
966                 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
967                                                                         data1);
968
969                 for (i = ETH_ALEN; i > 0; i--) {
970                         macaddr[i - 1] = (u8) (data0 & 0xFF);
971                         data0 >>= 8;
972
973                         macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
974                         data1 >>= 8;
975                 }
976
977                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
978                 data0 = 0, data1 = 0, steer_ctrl = 0;
979
980         } while (!is_valid_ether_addr(macaddr));
981 exit:
982         return status;
983 }
984
985 /**
986  * vxge_hw_device_hw_info_get - Get the hw information
987  * Returns the vpath mask that has the bits set for each vpath allocated
988  * for the driver, FW version information and the first mac addresse for
989  * each vpath
990  */
991 enum vxge_hw_status __devinit
992 vxge_hw_device_hw_info_get(void __iomem *bar0,
993                            struct vxge_hw_device_hw_info *hw_info)
994 {
995         u32 i;
996         u64 val64;
997         struct vxge_hw_toc_reg __iomem *toc;
998         struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
999         struct vxge_hw_common_reg __iomem *common_reg;
1000         struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
1001         enum vxge_hw_status status;
1002         struct __vxge_hw_virtualpath vpath;
1003
1004         memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
1005
1006         toc = __vxge_hw_device_toc_get(bar0);
1007         if (toc == NULL) {
1008                 status = VXGE_HW_ERR_CRITICAL;
1009                 goto exit;
1010         }
1011
1012         val64 = readq(&toc->toc_common_pointer);
1013         common_reg = (struct vxge_hw_common_reg __iomem *)(bar0 + val64);
1014
1015         status = __vxge_hw_device_vpath_reset_in_prog_check(
1016                 (u64 __iomem *)&common_reg->vpath_rst_in_prog);
1017         if (status != VXGE_HW_OK)
1018                 goto exit;
1019
1020         hw_info->vpath_mask = readq(&common_reg->vpath_assignments);
1021
1022         val64 = readq(&common_reg->host_type_assignments);
1023
1024         hw_info->host_type =
1025            (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
1026
1027         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1028
1029                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1030                         continue;
1031
1032                 val64 = readq(&toc->toc_vpmgmt_pointer[i]);
1033
1034                 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
1035                                 (bar0 + val64);
1036
1037                 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
1038                 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
1039                         hw_info->func_id) &
1040                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
1041
1042                         val64 = readq(&toc->toc_mrpcim_pointer);
1043
1044                         mrpcim_reg = (struct vxge_hw_mrpcim_reg __iomem *)
1045                                         (bar0 + val64);
1046
1047                         writeq(0, &mrpcim_reg->xgmac_gen_fw_memo_mask);
1048                         wmb();
1049                 }
1050
1051                 val64 = readq(&toc->toc_vpath_pointer[i]);
1052
1053                 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1054                                (bar0 + val64);
1055                 vpath.vp_open = 0;
1056
1057                 hw_info->function_mode =
1058                         __vxge_hw_vpath_pci_func_mode_get(&vpath);
1059
1060                 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
1061                 if (status != VXGE_HW_OK)
1062                         goto exit;
1063
1064                 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
1065                 if (status != VXGE_HW_OK)
1066                         goto exit;
1067
1068                 break;
1069         }
1070
1071         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1072                 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
1073                         continue;
1074
1075                 val64 = readq(&toc->toc_vpath_pointer[i]);
1076                 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1077                                (bar0 + val64);
1078                 vpath.vp_open = 0;
1079
1080                 status =  __vxge_hw_vpath_addr_get(&vpath,
1081                                 hw_info->mac_addrs[i],
1082                                 hw_info->mac_addr_masks[i]);
1083                 if (status != VXGE_HW_OK)
1084                         goto exit;
1085         }
1086 exit:
1087         return status;
1088 }
1089
1090 /*
1091  * vxge_hw_device_initialize - Initialize Titan device.
1092  * Initialize Titan device. Note that all the arguments of this public API
1093  * are 'IN', including @hldev. Driver cooperates with
1094  * OS to find new Titan device, locate its PCI and memory spaces.
1095  *
1096  * When done, the driver allocates sizeof(struct __vxge_hw_device) bytes for HW
1097  * to enable the latter to perform Titan hardware initialization.
1098  */
1099 enum vxge_hw_status __devinit
1100 vxge_hw_device_initialize(
1101         struct __vxge_hw_device **devh,
1102         struct vxge_hw_device_attr *attr,
1103         struct vxge_hw_device_config *device_config)
1104 {
1105         u32 i;
1106         u32 nblocks = 0;
1107         struct __vxge_hw_device *hldev = NULL;
1108         enum vxge_hw_status status = VXGE_HW_OK;
1109
1110         status = __vxge_hw_device_config_check(device_config);
1111         if (status != VXGE_HW_OK)
1112                 goto exit;
1113
1114         hldev = (struct __vxge_hw_device *)
1115                         vmalloc(sizeof(struct __vxge_hw_device));
1116         if (hldev == NULL) {
1117                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1118                 goto exit;
1119         }
1120
1121         memset(hldev, 0, sizeof(struct __vxge_hw_device));
1122         hldev->magic = VXGE_HW_DEVICE_MAGIC;
1123
1124         vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
1125
1126         /* apply config */
1127         memcpy(&hldev->config, device_config,
1128                 sizeof(struct vxge_hw_device_config));
1129
1130         hldev->bar0 = attr->bar0;
1131         hldev->pdev = attr->pdev;
1132
1133         hldev->uld_callbacks.link_up = attr->uld_callbacks.link_up;
1134         hldev->uld_callbacks.link_down = attr->uld_callbacks.link_down;
1135         hldev->uld_callbacks.crit_err = attr->uld_callbacks.crit_err;
1136
1137         __vxge_hw_device_pci_e_init(hldev);
1138
1139         status = __vxge_hw_device_reg_addr_get(hldev);
1140         if (status != VXGE_HW_OK) {
1141                 vfree(hldev);
1142                 goto exit;
1143         }
1144         __vxge_hw_device_id_get(hldev);
1145
1146         __vxge_hw_device_host_info_get(hldev);
1147
1148         /* Incrementing for stats blocks */
1149         nblocks++;
1150
1151         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1152                 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
1153                         continue;
1154
1155                 if (device_config->vp_config[i].ring.enable ==
1156                         VXGE_HW_RING_ENABLE)
1157                         nblocks += device_config->vp_config[i].ring.ring_blocks;
1158
1159                 if (device_config->vp_config[i].fifo.enable ==
1160                         VXGE_HW_FIFO_ENABLE)
1161                         nblocks += device_config->vp_config[i].fifo.fifo_blocks;
1162                 nblocks++;
1163         }
1164
1165         if (__vxge_hw_blockpool_create(hldev,
1166                 &hldev->block_pool,
1167                 device_config->dma_blockpool_initial + nblocks,
1168                 device_config->dma_blockpool_max + nblocks) != VXGE_HW_OK) {
1169
1170                 vxge_hw_device_terminate(hldev);
1171                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1172                 goto exit;
1173         }
1174
1175         status = __vxge_hw_device_initialize(hldev);
1176         if (status != VXGE_HW_OK) {
1177                 vxge_hw_device_terminate(hldev);
1178                 goto exit;
1179         }
1180
1181         *devh = hldev;
1182 exit:
1183         return status;
1184 }
1185
1186 /*
1187  * vxge_hw_device_terminate - Terminate Titan device.
1188  * Terminate HW device.
1189  */
1190 void
1191 vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
1192 {
1193         vxge_assert(hldev->magic == VXGE_HW_DEVICE_MAGIC);
1194
1195         hldev->magic = VXGE_HW_DEVICE_DEAD;
1196         __vxge_hw_blockpool_destroy(&hldev->block_pool);
1197         vfree(hldev);
1198 }
1199
1200 /*
1201  * vxge_hw_device_stats_get - Get the device hw statistics.
1202  * Returns the vpath h/w stats for the device.
1203  */
1204 enum vxge_hw_status
1205 vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
1206                         struct vxge_hw_device_stats_hw_info *hw_stats)
1207 {
1208         u32 i;
1209         enum vxge_hw_status status = VXGE_HW_OK;
1210
1211         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1212                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
1213                         (hldev->virtual_paths[i].vp_open ==
1214                                 VXGE_HW_VP_NOT_OPEN))
1215                         continue;
1216
1217                 memcpy(hldev->virtual_paths[i].hw_stats_sav,
1218                                 hldev->virtual_paths[i].hw_stats,
1219                                 sizeof(struct vxge_hw_vpath_stats_hw_info));
1220
1221                 status = __vxge_hw_vpath_stats_get(
1222                         &hldev->virtual_paths[i],
1223                         hldev->virtual_paths[i].hw_stats);
1224         }
1225
1226         memcpy(hw_stats, &hldev->stats.hw_dev_info_stats,
1227                         sizeof(struct vxge_hw_device_stats_hw_info));
1228
1229         return status;
1230 }
1231
1232 /*
1233  * vxge_hw_driver_stats_get - Get the device sw statistics.
1234  * Returns the vpath s/w stats for the device.
1235  */
1236 enum vxge_hw_status vxge_hw_driver_stats_get(
1237                         struct __vxge_hw_device *hldev,
1238                         struct vxge_hw_device_stats_sw_info *sw_stats)
1239 {
1240         enum vxge_hw_status status = VXGE_HW_OK;
1241
1242         memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
1243                 sizeof(struct vxge_hw_device_stats_sw_info));
1244
1245         return status;
1246 }
1247
1248 /*
1249  * vxge_hw_mrpcim_stats_access - Access the statistics from the given location
1250  *                           and offset and perform an operation
1251  * Get the statistics from the given location and offset.
1252  */
1253 enum vxge_hw_status
1254 vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
1255                             u32 operation, u32 location, u32 offset, u64 *stat)
1256 {
1257         u64 val64;
1258         enum vxge_hw_status status = VXGE_HW_OK;
1259
1260         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1261                         hldev->func_id);
1262         if (status != VXGE_HW_OK)
1263                 goto exit;
1264
1265         val64 = VXGE_HW_XMAC_STATS_SYS_CMD_OP(operation) |
1266                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE |
1267                 VXGE_HW_XMAC_STATS_SYS_CMD_LOC_SEL(location) |
1268                 VXGE_HW_XMAC_STATS_SYS_CMD_OFFSET_SEL(offset);
1269
1270         status = __vxge_hw_pio_mem_write64(val64,
1271                                 &hldev->mrpcim_reg->xmac_stats_sys_cmd,
1272                                 VXGE_HW_XMAC_STATS_SYS_CMD_STROBE,
1273                                 hldev->config.device_poll_millis);
1274
1275         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1276                 *stat = readq(&hldev->mrpcim_reg->xmac_stats_sys_data);
1277         else
1278                 *stat = 0;
1279 exit:
1280         return status;
1281 }
1282
1283 /*
1284  * vxge_hw_device_xmac_aggr_stats_get - Get the Statistics on aggregate port
1285  * Get the Statistics on aggregate port
1286  */
1287 static enum vxge_hw_status
1288 vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
1289                                    struct vxge_hw_xmac_aggr_stats *aggr_stats)
1290 {
1291         u64 *val64;
1292         int i;
1293         u32 offset = VXGE_HW_STATS_AGGRn_OFFSET;
1294         enum vxge_hw_status status = VXGE_HW_OK;
1295
1296         val64 = (u64 *)aggr_stats;
1297
1298         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1299                         hldev->func_id);
1300         if (status != VXGE_HW_OK)
1301                 goto exit;
1302
1303         for (i = 0; i < sizeof(struct vxge_hw_xmac_aggr_stats) / 8; i++) {
1304                 status = vxge_hw_mrpcim_stats_access(hldev,
1305                                         VXGE_HW_STATS_OP_READ,
1306                                         VXGE_HW_STATS_LOC_AGGR,
1307                                         ((offset + (104 * port)) >> 3), val64);
1308                 if (status != VXGE_HW_OK)
1309                         goto exit;
1310
1311                 offset += 8;
1312                 val64++;
1313         }
1314 exit:
1315         return status;
1316 }
1317
1318 /*
1319  * vxge_hw_device_xmac_port_stats_get - Get the Statistics on a port
1320  * Get the Statistics on port
1321  */
1322 static enum vxge_hw_status
1323 vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1324                                    struct vxge_hw_xmac_port_stats *port_stats)
1325 {
1326         u64 *val64;
1327         enum vxge_hw_status status = VXGE_HW_OK;
1328         int i;
1329         u32 offset = 0x0;
1330         val64 = (u64 *) port_stats;
1331
1332         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1333                         hldev->func_id);
1334         if (status != VXGE_HW_OK)
1335                 goto exit;
1336
1337         for (i = 0; i < sizeof(struct vxge_hw_xmac_port_stats) / 8; i++) {
1338                 status = vxge_hw_mrpcim_stats_access(hldev,
1339                                         VXGE_HW_STATS_OP_READ,
1340                                         VXGE_HW_STATS_LOC_AGGR,
1341                                         ((offset + (608 * port)) >> 3), val64);
1342                 if (status != VXGE_HW_OK)
1343                         goto exit;
1344
1345                 offset += 8;
1346                 val64++;
1347         }
1348
1349 exit:
1350         return status;
1351 }
1352
1353 /*
1354  * vxge_hw_device_xmac_stats_get - Get the XMAC Statistics
1355  * Get the XMAC Statistics
1356  */
1357 enum vxge_hw_status
1358 vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1359                               struct vxge_hw_xmac_stats *xmac_stats)
1360 {
1361         enum vxge_hw_status status = VXGE_HW_OK;
1362         u32 i;
1363
1364         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1365                                         0, &xmac_stats->aggr_stats[0]);
1366
1367         if (status != VXGE_HW_OK)
1368                 goto exit;
1369
1370         status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1371                                 1, &xmac_stats->aggr_stats[1]);
1372         if (status != VXGE_HW_OK)
1373                 goto exit;
1374
1375         for (i = 0; i <= VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
1376
1377                 status = vxge_hw_device_xmac_port_stats_get(hldev,
1378                                         i, &xmac_stats->port_stats[i]);
1379                 if (status != VXGE_HW_OK)
1380                         goto exit;
1381         }
1382
1383         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1384
1385                 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
1386                         continue;
1387
1388                 status = __vxge_hw_vpath_xmac_tx_stats_get(
1389                                         &hldev->virtual_paths[i],
1390                                         &xmac_stats->vpath_tx_stats[i]);
1391                 if (status != VXGE_HW_OK)
1392                         goto exit;
1393
1394                 status = __vxge_hw_vpath_xmac_rx_stats_get(
1395                                         &hldev->virtual_paths[i],
1396                                         &xmac_stats->vpath_rx_stats[i]);
1397                 if (status != VXGE_HW_OK)
1398                         goto exit;
1399         }
1400 exit:
1401         return status;
1402 }
1403
1404 /*
1405  * vxge_hw_device_debug_set - Set the debug module, level and timestamp
1406  * This routine is used to dynamically change the debug output
1407  */
1408 void vxge_hw_device_debug_set(struct __vxge_hw_device *hldev,
1409                               enum vxge_debug_level level, u32 mask)
1410 {
1411         if (hldev == NULL)
1412                 return;
1413
1414 #if defined(VXGE_DEBUG_TRACE_MASK) || \
1415         defined(VXGE_DEBUG_ERR_MASK)
1416         hldev->debug_module_mask = mask;
1417         hldev->debug_level = level;
1418 #endif
1419
1420 #if defined(VXGE_DEBUG_ERR_MASK)
1421         hldev->level_err = level & VXGE_ERR;
1422 #endif
1423
1424 #if defined(VXGE_DEBUG_TRACE_MASK)
1425         hldev->level_trace = level & VXGE_TRACE;
1426 #endif
1427 }
1428
1429 /*
1430  * vxge_hw_device_error_level_get - Get the error level
1431  * This routine returns the current error level set
1432  */
1433 u32 vxge_hw_device_error_level_get(struct __vxge_hw_device *hldev)
1434 {
1435 #if defined(VXGE_DEBUG_ERR_MASK)
1436         if (hldev == NULL)
1437                 return VXGE_ERR;
1438         else
1439                 return hldev->level_err;
1440 #else
1441         return 0;
1442 #endif
1443 }
1444
1445 /*
1446  * vxge_hw_device_trace_level_get - Get the trace level
1447  * This routine returns the current trace level set
1448  */
1449 u32 vxge_hw_device_trace_level_get(struct __vxge_hw_device *hldev)
1450 {
1451 #if defined(VXGE_DEBUG_TRACE_MASK)
1452         if (hldev == NULL)
1453                 return VXGE_TRACE;
1454         else
1455                 return hldev->level_trace;
1456 #else
1457         return 0;
1458 #endif
1459 }
1460
1461 /*
1462  * vxge_hw_getpause_data -Pause frame frame generation and reception.
1463  * Returns the Pause frame generation and reception capability of the NIC.
1464  */
1465 enum vxge_hw_status vxge_hw_device_getpause_data(struct __vxge_hw_device *hldev,
1466                                                  u32 port, u32 *tx, u32 *rx)
1467 {
1468         u64 val64;
1469         enum vxge_hw_status status = VXGE_HW_OK;
1470
1471         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1472                 status = VXGE_HW_ERR_INVALID_DEVICE;
1473                 goto exit;
1474         }
1475
1476         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1477                 status = VXGE_HW_ERR_INVALID_PORT;
1478                 goto exit;
1479         }
1480
1481         if (!(hldev->access_rights & VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
1482                 status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
1483                 goto exit;
1484         }
1485
1486         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1487         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN)
1488                 *tx = 1;
1489         if (val64 & VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN)
1490                 *rx = 1;
1491 exit:
1492         return status;
1493 }
1494
1495 /*
1496  * vxge_hw_device_setpause_data -  set/reset pause frame generation.
1497  * It can be used to set or reset Pause frame generation or reception
1498  * support of the NIC.
1499  */
1500 enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1501                                                  u32 port, u32 tx, u32 rx)
1502 {
1503         u64 val64;
1504         enum vxge_hw_status status = VXGE_HW_OK;
1505
1506         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
1507                 status = VXGE_HW_ERR_INVALID_DEVICE;
1508                 goto exit;
1509         }
1510
1511         if (port > VXGE_HW_MAC_MAX_MAC_PORT_ID) {
1512                 status = VXGE_HW_ERR_INVALID_PORT;
1513                 goto exit;
1514         }
1515
1516         status = __vxge_hw_device_is_privilaged(hldev->host_type,
1517                         hldev->func_id);
1518         if (status != VXGE_HW_OK)
1519                 goto exit;
1520
1521         val64 = readq(&hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1522         if (tx)
1523                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1524         else
1525                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_GEN_EN;
1526         if (rx)
1527                 val64 |= VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1528         else
1529                 val64 &= ~VXGE_HW_RXMAC_PAUSE_CFG_PORT_RCV_EN;
1530
1531         writeq(val64, &hldev->mrpcim_reg->rxmac_pause_cfg_port[port]);
1532 exit:
1533         return status;
1534 }
1535
1536 u16 vxge_hw_device_link_width_get(struct __vxge_hw_device *hldev)
1537 {
1538         int link_width, exp_cap;
1539         u16 lnk;
1540
1541         exp_cap = pci_find_capability(hldev->pdev, PCI_CAP_ID_EXP);
1542         pci_read_config_word(hldev->pdev, exp_cap + PCI_EXP_LNKSTA, &lnk);
1543         link_width = (lnk & VXGE_HW_PCI_EXP_LNKCAP_LNK_WIDTH) >> 4;
1544         return link_width;
1545 }
1546
1547 /*
1548  * __vxge_hw_ring_block_memblock_idx - Return the memblock index
1549  * This function returns the index of memory block
1550  */
1551 static inline u32
1552 __vxge_hw_ring_block_memblock_idx(u8 *block)
1553 {
1554         return (u32)*((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET));
1555 }
1556
1557 /*
1558  * __vxge_hw_ring_block_memblock_idx_set - Sets the memblock index
1559  * This function sets index to a memory block
1560  */
1561 static inline void
1562 __vxge_hw_ring_block_memblock_idx_set(u8 *block, u32 memblock_idx)
1563 {
1564         *((u64 *)(block + VXGE_HW_RING_MEMBLOCK_IDX_OFFSET)) = memblock_idx;
1565 }
1566
1567 /*
1568  * __vxge_hw_ring_block_next_pointer_set - Sets the next block pointer
1569  * in RxD block
1570  * Sets the next block pointer in RxD block
1571  */
1572 static inline void
1573 __vxge_hw_ring_block_next_pointer_set(u8 *block, dma_addr_t dma_next)
1574 {
1575         *((u64 *)(block + VXGE_HW_RING_NEXT_BLOCK_POINTER_OFFSET)) = dma_next;
1576 }
1577
1578 /*
1579  * __vxge_hw_ring_first_block_address_get - Returns the dma address of the
1580  *             first block
1581  * Returns the dma address of the first RxD block
1582  */
1583 static u64 __vxge_hw_ring_first_block_address_get(struct __vxge_hw_ring *ring)
1584 {
1585         struct vxge_hw_mempool_dma *dma_object;
1586
1587         dma_object = ring->mempool->memblocks_dma_arr;
1588         vxge_assert(dma_object != NULL);
1589
1590         return dma_object->addr;
1591 }
1592
1593 /*
1594  * __vxge_hw_ring_item_dma_addr - Return the dma address of an item
1595  * This function returns the dma address of a given item
1596  */
1597 static dma_addr_t __vxge_hw_ring_item_dma_addr(struct vxge_hw_mempool *mempoolh,
1598                                                void *item)
1599 {
1600         u32 memblock_idx;
1601         void *memblock;
1602         struct vxge_hw_mempool_dma *memblock_dma_object;
1603         ptrdiff_t dma_item_offset;
1604
1605         /* get owner memblock index */
1606         memblock_idx = __vxge_hw_ring_block_memblock_idx(item);
1607
1608         /* get owner memblock by memblock index */
1609         memblock = mempoolh->memblocks_arr[memblock_idx];
1610
1611         /* get memblock DMA object by memblock index */
1612         memblock_dma_object = mempoolh->memblocks_dma_arr + memblock_idx;
1613
1614         /* calculate offset in the memblock of this item */
1615         dma_item_offset = (u8 *)item - (u8 *)memblock;
1616
1617         return memblock_dma_object->addr + dma_item_offset;
1618 }
1619
1620 /*
1621  * __vxge_hw_ring_rxdblock_link - Link the RxD blocks
1622  * This function returns the dma address of a given item
1623  */
1624 static void __vxge_hw_ring_rxdblock_link(struct vxge_hw_mempool *mempoolh,
1625                                          struct __vxge_hw_ring *ring, u32 from,
1626                                          u32 to)
1627 {
1628         u8 *to_item , *from_item;
1629         dma_addr_t to_dma;
1630
1631         /* get "from" RxD block */
1632         from_item = mempoolh->items_arr[from];
1633         vxge_assert(from_item);
1634
1635         /* get "to" RxD block */
1636         to_item = mempoolh->items_arr[to];
1637         vxge_assert(to_item);
1638
1639         /* return address of the beginning of previous RxD block */
1640         to_dma = __vxge_hw_ring_item_dma_addr(mempoolh, to_item);
1641
1642         /* set next pointer for this RxD block to point on
1643          * previous item's DMA start address */
1644         __vxge_hw_ring_block_next_pointer_set(from_item, to_dma);
1645 }
1646
1647 /*
1648  * __vxge_hw_ring_mempool_item_alloc - Allocate List blocks for RxD
1649  * block callback
1650  * This function is callback passed to __vxge_hw_mempool_create to create memory
1651  * pool for RxD block
1652  */
1653 static void
1654 __vxge_hw_ring_mempool_item_alloc(struct vxge_hw_mempool *mempoolh,
1655                                   u32 memblock_index,
1656                                   struct vxge_hw_mempool_dma *dma_object,
1657                                   u32 index, u32 is_last)
1658 {
1659         u32 i;
1660         void *item = mempoolh->items_arr[index];
1661         struct __vxge_hw_ring *ring =
1662                 (struct __vxge_hw_ring *)mempoolh->userdata;
1663
1664         /* format rxds array */
1665         for (i = 0; i < ring->rxds_per_block; i++) {
1666                 void *rxdblock_priv;
1667                 void *uld_priv;
1668                 struct vxge_hw_ring_rxd_1 *rxdp;
1669
1670                 u32 reserve_index = ring->channel.reserve_ptr -
1671                                 (index * ring->rxds_per_block + i + 1);
1672                 u32 memblock_item_idx;
1673
1674                 ring->channel.reserve_arr[reserve_index] = ((u8 *)item) +
1675                                                 i * ring->rxd_size;
1676
1677                 /* Note: memblock_item_idx is index of the item within
1678                  *       the memblock. For instance, in case of three RxD-blocks
1679                  *       per memblock this value can be 0, 1 or 2. */
1680                 rxdblock_priv = __vxge_hw_mempool_item_priv(mempoolh,
1681                                         memblock_index, item,
1682                                         &memblock_item_idx);
1683
1684                 rxdp = (struct vxge_hw_ring_rxd_1 *)
1685                                 ring->channel.reserve_arr[reserve_index];
1686
1687                 uld_priv = ((u8 *)rxdblock_priv + ring->rxd_priv_size * i);
1688
1689                 /* pre-format Host_Control */
1690                 rxdp->host_control = (u64)(size_t)uld_priv;
1691         }
1692
1693         __vxge_hw_ring_block_memblock_idx_set(item, memblock_index);
1694
1695         if (is_last) {
1696                 /* link last one with first one */
1697                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index, 0);
1698         }
1699
1700         if (index > 0) {
1701                 /* link this RxD block with previous one */
1702                 __vxge_hw_ring_rxdblock_link(mempoolh, ring, index - 1, index);
1703         }
1704 }
1705
1706 /*
1707  * __vxge_hw_ring_replenish - Initial replenish of RxDs
1708  * This function replenishes the RxDs from reserve array to work array
1709  */
1710 enum vxge_hw_status
1711 vxge_hw_ring_replenish(struct __vxge_hw_ring *ring)
1712 {
1713         void *rxd;
1714         struct __vxge_hw_channel *channel;
1715         enum vxge_hw_status status = VXGE_HW_OK;
1716
1717         channel = &ring->channel;
1718
1719         while (vxge_hw_channel_dtr_count(channel) > 0) {
1720
1721                 status = vxge_hw_ring_rxd_reserve(ring, &rxd);
1722
1723                 vxge_assert(status == VXGE_HW_OK);
1724
1725                 if (ring->rxd_init) {
1726                         status = ring->rxd_init(rxd, channel->userdata);
1727                         if (status != VXGE_HW_OK) {
1728                                 vxge_hw_ring_rxd_free(ring, rxd);
1729                                 goto exit;
1730                         }
1731                 }
1732
1733                 vxge_hw_ring_rxd_post(ring, rxd);
1734         }
1735         status = VXGE_HW_OK;
1736 exit:
1737         return status;
1738 }
1739
1740 /*
1741  * __vxge_hw_ring_create - Create a Ring
1742  * This function creates Ring and initializes it.
1743  */
1744 static enum vxge_hw_status
1745 __vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
1746                       struct vxge_hw_ring_attr *attr)
1747 {
1748         enum vxge_hw_status status = VXGE_HW_OK;
1749         struct __vxge_hw_ring *ring;
1750         u32 ring_length;
1751         struct vxge_hw_ring_config *config;
1752         struct __vxge_hw_device *hldev;
1753         u32 vp_id;
1754         struct vxge_hw_mempool_cbs ring_mp_callback;
1755
1756         if ((vp == NULL) || (attr == NULL)) {
1757                 status = VXGE_HW_FAIL;
1758                 goto exit;
1759         }
1760
1761         hldev = vp->vpath->hldev;
1762         vp_id = vp->vpath->vp_id;
1763
1764         config = &hldev->config.vp_config[vp_id].ring;
1765
1766         ring_length = config->ring_blocks *
1767                         vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1768
1769         ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1770                                                 VXGE_HW_CHANNEL_TYPE_RING,
1771                                                 ring_length,
1772                                                 attr->per_rxd_space,
1773                                                 attr->userdata);
1774
1775         if (ring == NULL) {
1776                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1777                 goto exit;
1778         }
1779
1780         vp->vpath->ringh = ring;
1781         ring->vp_id = vp_id;
1782         ring->vp_reg = vp->vpath->vp_reg;
1783         ring->common_reg = hldev->common_reg;
1784         ring->stats = &vp->vpath->sw_stats->ring_stats;
1785         ring->config = config;
1786         ring->callback = attr->callback;
1787         ring->rxd_init = attr->rxd_init;
1788         ring->rxd_term = attr->rxd_term;
1789         ring->buffer_mode = config->buffer_mode;
1790         ring->rxds_limit = config->rxds_limit;
1791
1792         ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1793         ring->rxd_priv_size =
1794                 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1795         ring->per_rxd_space = attr->per_rxd_space;
1796
1797         ring->rxd_priv_size =
1798                 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1799                 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1800
1801         /* how many RxDs can fit into one block. Depends on configured
1802          * buffer_mode. */
1803         ring->rxds_per_block =
1804                 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1805
1806         /* calculate actual RxD block private size */
1807         ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1808         ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1809         ring->mempool = __vxge_hw_mempool_create(hldev,
1810                                 VXGE_HW_BLOCK_SIZE,
1811                                 VXGE_HW_BLOCK_SIZE,
1812                                 ring->rxdblock_priv_size,
1813                                 ring->config->ring_blocks,
1814                                 ring->config->ring_blocks,
1815                                 &ring_mp_callback,
1816                                 ring);
1817
1818         if (ring->mempool == NULL) {
1819                 __vxge_hw_ring_delete(vp);
1820                 return VXGE_HW_ERR_OUT_OF_MEMORY;
1821         }
1822
1823         status = __vxge_hw_channel_initialize(&ring->channel);
1824         if (status != VXGE_HW_OK) {
1825                 __vxge_hw_ring_delete(vp);
1826                 goto exit;
1827         }
1828
1829         /* Note:
1830          * Specifying rxd_init callback means two things:
1831          * 1) rxds need to be initialized by driver at channel-open time;
1832          * 2) rxds need to be posted at channel-open time
1833          *    (that's what the initial_replenish() below does)
1834          * Currently we don't have a case when the 1) is done without the 2).
1835          */
1836         if (ring->rxd_init) {
1837                 status = vxge_hw_ring_replenish(ring);
1838                 if (status != VXGE_HW_OK) {
1839                         __vxge_hw_ring_delete(vp);
1840                         goto exit;
1841                 }
1842         }
1843
1844         /* initial replenish will increment the counter in its post() routine,
1845          * we have to reset it */
1846         ring->stats->common_stats.usage_cnt = 0;
1847 exit:
1848         return status;
1849 }
1850
1851 /*
1852  * __vxge_hw_ring_abort - Returns the RxD
1853  * This function terminates the RxDs of ring
1854  */
1855 static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1856 {
1857         void *rxdh;
1858         struct __vxge_hw_channel *channel;
1859
1860         channel = &ring->channel;
1861
1862         for (;;) {
1863                 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1864
1865                 if (rxdh == NULL)
1866                         break;
1867
1868                 vxge_hw_channel_dtr_complete(channel);
1869
1870                 if (ring->rxd_term)
1871                         ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
1872                                 channel->userdata);
1873
1874                 vxge_hw_channel_dtr_free(channel, rxdh);
1875         }
1876
1877         return VXGE_HW_OK;
1878 }
1879
1880 /*
1881  * __vxge_hw_ring_reset - Resets the ring
1882  * This function resets the ring during vpath reset operation
1883  */
1884 static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
1885 {
1886         enum vxge_hw_status status = VXGE_HW_OK;
1887         struct __vxge_hw_channel *channel;
1888
1889         channel = &ring->channel;
1890
1891         __vxge_hw_ring_abort(ring);
1892
1893         status = __vxge_hw_channel_reset(channel);
1894
1895         if (status != VXGE_HW_OK)
1896                 goto exit;
1897
1898         if (ring->rxd_init) {
1899                 status = vxge_hw_ring_replenish(ring);
1900                 if (status != VXGE_HW_OK)
1901                         goto exit;
1902         }
1903 exit:
1904         return status;
1905 }
1906
1907 /*
1908  * __vxge_hw_ring_delete - Removes the ring
1909  * This function freeup the memory pool and removes the ring
1910  */
1911 static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1912 {
1913         struct __vxge_hw_ring *ring = vp->vpath->ringh;
1914
1915         __vxge_hw_ring_abort(ring);
1916
1917         if (ring->mempool)
1918                 __vxge_hw_mempool_destroy(ring->mempool);
1919
1920         vp->vpath->ringh = NULL;
1921         __vxge_hw_channel_free(&ring->channel);
1922
1923         return VXGE_HW_OK;
1924 }
1925
1926 /*
1927  * __vxge_hw_mempool_grow
1928  * Will resize mempool up to %num_allocate value.
1929  */
1930 static enum vxge_hw_status
1931 __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1932                        u32 *num_allocated)
1933 {
1934         u32 i, first_time = mempool->memblocks_allocated == 0 ? 1 : 0;
1935         u32 n_items = mempool->items_per_memblock;
1936         u32 start_block_idx = mempool->memblocks_allocated;
1937         u32 end_block_idx = mempool->memblocks_allocated + num_allocate;
1938         enum vxge_hw_status status = VXGE_HW_OK;
1939
1940         *num_allocated = 0;
1941
1942         if (end_block_idx > mempool->memblocks_max) {
1943                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1944                 goto exit;
1945         }
1946
1947         for (i = start_block_idx; i < end_block_idx; i++) {
1948                 u32 j;
1949                 u32 is_last = ((end_block_idx - 1) == i);
1950                 struct vxge_hw_mempool_dma *dma_object =
1951                         mempool->memblocks_dma_arr + i;
1952                 void *the_memblock;
1953
1954                 /* allocate memblock's private part. Each DMA memblock
1955                  * has a space allocated for item's private usage upon
1956                  * mempool's user request. Each time mempool grows, it will
1957                  * allocate new memblock and its private part at once.
1958                  * This helps to minimize memory usage a lot. */
1959                 mempool->memblocks_priv_arr[i] =
1960                                 vmalloc(mempool->items_priv_size * n_items);
1961                 if (mempool->memblocks_priv_arr[i] == NULL) {
1962                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1963                         goto exit;
1964                 }
1965
1966                 memset(mempool->memblocks_priv_arr[i], 0,
1967                              mempool->items_priv_size * n_items);
1968
1969                 /* allocate DMA-capable memblock */
1970                 mempool->memblocks_arr[i] =
1971                         __vxge_hw_blockpool_malloc(mempool->devh,
1972                                 mempool->memblock_size, dma_object);
1973                 if (mempool->memblocks_arr[i] == NULL) {
1974                         vfree(mempool->memblocks_priv_arr[i]);
1975                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
1976                         goto exit;
1977                 }
1978
1979                 (*num_allocated)++;
1980                 mempool->memblocks_allocated++;
1981
1982                 memset(mempool->memblocks_arr[i], 0, mempool->memblock_size);
1983
1984                 the_memblock = mempool->memblocks_arr[i];
1985
1986                 /* fill the items hash array */
1987                 for (j = 0; j < n_items; j++) {
1988                         u32 index = i * n_items + j;
1989
1990                         if (first_time && index >= mempool->items_initial)
1991                                 break;
1992
1993                         mempool->items_arr[index] =
1994                                 ((char *)the_memblock + j*mempool->item_size);
1995
1996                         /* let caller to do more job on each item */
1997                         if (mempool->item_func_alloc != NULL)
1998                                 mempool->item_func_alloc(mempool, i,
1999                                         dma_object, index, is_last);
2000
2001                         mempool->items_current = index + 1;
2002                 }
2003
2004                 if (first_time && mempool->items_current ==
2005                                         mempool->items_initial)
2006                         break;
2007         }
2008 exit:
2009         return status;
2010 }
2011
2012 /*
2013  * vxge_hw_mempool_create
2014  * This function will create memory pool object. Pool may grow but will
2015  * never shrink. Pool consists of number of dynamically allocated blocks
2016  * with size enough to hold %items_initial number of items. Memory is
2017  * DMA-able but client must map/unmap before interoperating with the device.
2018  */
2019 static struct vxge_hw_mempool*
2020 __vxge_hw_mempool_create(
2021         struct __vxge_hw_device *devh,
2022         u32 memblock_size,
2023         u32 item_size,
2024         u32 items_priv_size,
2025         u32 items_initial,
2026         u32 items_max,
2027         struct vxge_hw_mempool_cbs *mp_callback,
2028         void *userdata)
2029 {
2030         enum vxge_hw_status status = VXGE_HW_OK;
2031         u32 memblocks_to_allocate;
2032         struct vxge_hw_mempool *mempool = NULL;
2033         u32 allocated;
2034
2035         if (memblock_size < item_size) {
2036                 status = VXGE_HW_FAIL;
2037                 goto exit;
2038         }
2039
2040         mempool = (struct vxge_hw_mempool *)
2041                         vmalloc(sizeof(struct vxge_hw_mempool));
2042         if (mempool == NULL) {
2043                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2044                 goto exit;
2045         }
2046         memset(mempool, 0, sizeof(struct vxge_hw_mempool));
2047
2048         mempool->devh                   = devh;
2049         mempool->memblock_size          = memblock_size;
2050         mempool->items_max              = items_max;
2051         mempool->items_initial          = items_initial;
2052         mempool->item_size              = item_size;
2053         mempool->items_priv_size        = items_priv_size;
2054         mempool->item_func_alloc        = mp_callback->item_func_alloc;
2055         mempool->userdata               = userdata;
2056
2057         mempool->memblocks_allocated = 0;
2058
2059         mempool->items_per_memblock = memblock_size / item_size;
2060
2061         mempool->memblocks_max = (items_max + mempool->items_per_memblock - 1) /
2062                                         mempool->items_per_memblock;
2063
2064         /* allocate array of memblocks */
2065         mempool->memblocks_arr =
2066                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
2067         if (mempool->memblocks_arr == NULL) {
2068                 __vxge_hw_mempool_destroy(mempool);
2069                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2070                 mempool = NULL;
2071                 goto exit;
2072         }
2073         memset(mempool->memblocks_arr, 0,
2074                 sizeof(void *) * mempool->memblocks_max);
2075
2076         /* allocate array of private parts of items per memblocks */
2077         mempool->memblocks_priv_arr =
2078                 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max);
2079         if (mempool->memblocks_priv_arr == NULL) {
2080                 __vxge_hw_mempool_destroy(mempool);
2081                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2082                 mempool = NULL;
2083                 goto exit;
2084         }
2085         memset(mempool->memblocks_priv_arr, 0,
2086                     sizeof(void *) * mempool->memblocks_max);
2087
2088         /* allocate array of memblocks DMA objects */
2089         mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *)
2090                 vmalloc(sizeof(struct vxge_hw_mempool_dma) *
2091                         mempool->memblocks_max);
2092
2093         if (mempool->memblocks_dma_arr == NULL) {
2094                 __vxge_hw_mempool_destroy(mempool);
2095                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2096                 mempool = NULL;
2097                 goto exit;
2098         }
2099         memset(mempool->memblocks_dma_arr, 0,
2100                         sizeof(struct vxge_hw_mempool_dma) *
2101                         mempool->memblocks_max);
2102
2103         /* allocate hash array of items */
2104         mempool->items_arr =
2105                 (void **) vmalloc(sizeof(void *) * mempool->items_max);
2106         if (mempool->items_arr == NULL) {
2107                 __vxge_hw_mempool_destroy(mempool);
2108                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2109                 mempool = NULL;
2110                 goto exit;
2111         }
2112         memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
2113
2114         /* calculate initial number of memblocks */
2115         memblocks_to_allocate = (mempool->items_initial +
2116                                  mempool->items_per_memblock - 1) /
2117                                                 mempool->items_per_memblock;
2118
2119         /* pre-allocate the mempool */
2120         status = __vxge_hw_mempool_grow(mempool, memblocks_to_allocate,
2121                                         &allocated);
2122         if (status != VXGE_HW_OK) {
2123                 __vxge_hw_mempool_destroy(mempool);
2124                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2125                 mempool = NULL;
2126                 goto exit;
2127         }
2128
2129 exit:
2130         return mempool;
2131 }
2132
2133 /*
2134  * vxge_hw_mempool_destroy
2135  */
2136 static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
2137 {
2138         u32 i, j;
2139         struct __vxge_hw_device *devh = mempool->devh;
2140
2141         for (i = 0; i < mempool->memblocks_allocated; i++) {
2142                 struct vxge_hw_mempool_dma *dma_object;
2143
2144                 vxge_assert(mempool->memblocks_arr[i]);
2145                 vxge_assert(mempool->memblocks_dma_arr + i);
2146
2147                 dma_object = mempool->memblocks_dma_arr + i;
2148
2149                 for (j = 0; j < mempool->items_per_memblock; j++) {
2150                         u32 index = i * mempool->items_per_memblock + j;
2151
2152                         /* to skip last partially filled(if any) memblock */
2153                         if (index >= mempool->items_current)
2154                                 break;
2155                 }
2156
2157                 vfree(mempool->memblocks_priv_arr[i]);
2158
2159                 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2160                                 mempool->memblock_size, dma_object);
2161         }
2162
2163         vfree(mempool->items_arr);
2164
2165         vfree(mempool->memblocks_dma_arr);
2166
2167         vfree(mempool->memblocks_priv_arr);
2168
2169         vfree(mempool->memblocks_arr);
2170
2171         vfree(mempool);
2172 }
2173
2174 /*
2175  * __vxge_hw_device_fifo_config_check - Check fifo configuration.
2176  * Check the fifo configuration
2177  */
2178 enum vxge_hw_status
2179 __vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
2180 {
2181         if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
2182              (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
2183                 return VXGE_HW_BADCFG_FIFO_BLOCKS;
2184
2185         return VXGE_HW_OK;
2186 }
2187
2188 /*
2189  * __vxge_hw_device_vpath_config_check - Check vpath configuration.
2190  * Check the vpath configuration
2191  */
2192 static enum vxge_hw_status
2193 __vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
2194 {
2195         enum vxge_hw_status status;
2196
2197         if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
2198                 (vp_config->min_bandwidth >
2199                                         VXGE_HW_VPATH_BANDWIDTH_MAX))
2200                 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
2201
2202         status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
2203         if (status != VXGE_HW_OK)
2204                 return status;
2205
2206         if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
2207                 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
2208                 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
2209                 return VXGE_HW_BADCFG_VPATH_MTU;
2210
2211         if ((vp_config->rpa_strip_vlan_tag !=
2212                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
2213                 (vp_config->rpa_strip_vlan_tag !=
2214                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
2215                 (vp_config->rpa_strip_vlan_tag !=
2216                 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
2217                 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
2218
2219         return VXGE_HW_OK;
2220 }
2221
2222 /*
2223  * __vxge_hw_device_config_check - Check device configuration.
2224  * Check the device configuration
2225  */
2226 enum vxge_hw_status
2227 __vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
2228 {
2229         u32 i;
2230         enum vxge_hw_status status;
2231
2232         if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
2233            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
2234            (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
2235            (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
2236                 return VXGE_HW_BADCFG_INTR_MODE;
2237
2238         if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
2239            (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
2240                 return VXGE_HW_BADCFG_RTS_MAC_EN;
2241
2242         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2243                 status = __vxge_hw_device_vpath_config_check(
2244                                 &new_config->vp_config[i]);
2245                 if (status != VXGE_HW_OK)
2246                         return status;
2247         }
2248
2249         return VXGE_HW_OK;
2250 }
2251
2252 /*
2253  * vxge_hw_device_config_default_get - Initialize device config with defaults.
2254  * Initialize Titan device config with default values.
2255  */
2256 enum vxge_hw_status __devinit
2257 vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2258 {
2259         u32 i;
2260
2261         device_config->dma_blockpool_initial =
2262                                         VXGE_HW_INITIAL_DMA_BLOCK_POOL_SIZE;
2263         device_config->dma_blockpool_max = VXGE_HW_MAX_DMA_BLOCK_POOL_SIZE;
2264         device_config->intr_mode = VXGE_HW_INTR_MODE_DEF;
2265         device_config->rth_en = VXGE_HW_RTH_DEFAULT;
2266         device_config->rth_it_type = VXGE_HW_RTH_IT_TYPE_DEFAULT;
2267         device_config->device_poll_millis =  VXGE_HW_DEF_DEVICE_POLL_MILLIS;
2268         device_config->rts_mac_en =  VXGE_HW_RTS_MAC_DEFAULT;
2269
2270         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2271
2272                 device_config->vp_config[i].vp_id = i;
2273
2274                 device_config->vp_config[i].min_bandwidth =
2275                                 VXGE_HW_VPATH_BANDWIDTH_DEFAULT;
2276
2277                 device_config->vp_config[i].ring.enable = VXGE_HW_RING_DEFAULT;
2278
2279                 device_config->vp_config[i].ring.ring_blocks =
2280                                 VXGE_HW_DEF_RING_BLOCKS;
2281
2282                 device_config->vp_config[i].ring.buffer_mode =
2283                                 VXGE_HW_RING_RXD_BUFFER_MODE_DEFAULT;
2284
2285                 device_config->vp_config[i].ring.scatter_mode =
2286                                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT;
2287
2288                 device_config->vp_config[i].ring.rxds_limit =
2289                                 VXGE_HW_DEF_RING_RXDS_LIMIT;
2290
2291                 device_config->vp_config[i].fifo.enable = VXGE_HW_FIFO_ENABLE;
2292
2293                 device_config->vp_config[i].fifo.fifo_blocks =
2294                                 VXGE_HW_MIN_FIFO_BLOCKS;
2295
2296                 device_config->vp_config[i].fifo.max_frags =
2297                                 VXGE_HW_MAX_FIFO_FRAGS;
2298
2299                 device_config->vp_config[i].fifo.memblock_size =
2300                                 VXGE_HW_DEF_FIFO_MEMBLOCK_SIZE;
2301
2302                 device_config->vp_config[i].fifo.alignment_size =
2303                                 VXGE_HW_DEF_FIFO_ALIGNMENT_SIZE;
2304
2305                 device_config->vp_config[i].fifo.intr =
2306                                 VXGE_HW_FIFO_QUEUE_INTR_DEFAULT;
2307
2308                 device_config->vp_config[i].fifo.no_snoop_bits =
2309                                 VXGE_HW_FIFO_NO_SNOOP_DEFAULT;
2310                 device_config->vp_config[i].tti.intr_enable =
2311                                 VXGE_HW_TIM_INTR_DEFAULT;
2312
2313                 device_config->vp_config[i].tti.btimer_val =
2314                                 VXGE_HW_USE_FLASH_DEFAULT;
2315
2316                 device_config->vp_config[i].tti.timer_ac_en =
2317                                 VXGE_HW_USE_FLASH_DEFAULT;
2318
2319                 device_config->vp_config[i].tti.timer_ci_en =
2320                                 VXGE_HW_USE_FLASH_DEFAULT;
2321
2322                 device_config->vp_config[i].tti.timer_ri_en =
2323                                 VXGE_HW_USE_FLASH_DEFAULT;
2324
2325                 device_config->vp_config[i].tti.rtimer_val =
2326                                 VXGE_HW_USE_FLASH_DEFAULT;
2327
2328                 device_config->vp_config[i].tti.util_sel =
2329                                 VXGE_HW_USE_FLASH_DEFAULT;
2330
2331                 device_config->vp_config[i].tti.ltimer_val =
2332                                 VXGE_HW_USE_FLASH_DEFAULT;
2333
2334                 device_config->vp_config[i].tti.urange_a =
2335                                 VXGE_HW_USE_FLASH_DEFAULT;
2336
2337                 device_config->vp_config[i].tti.uec_a =
2338                                 VXGE_HW_USE_FLASH_DEFAULT;
2339
2340                 device_config->vp_config[i].tti.urange_b =
2341                                 VXGE_HW_USE_FLASH_DEFAULT;
2342
2343                 device_config->vp_config[i].tti.uec_b =
2344                                 VXGE_HW_USE_FLASH_DEFAULT;
2345
2346                 device_config->vp_config[i].tti.urange_c =
2347                                 VXGE_HW_USE_FLASH_DEFAULT;
2348
2349                 device_config->vp_config[i].tti.uec_c =
2350                                 VXGE_HW_USE_FLASH_DEFAULT;
2351
2352                 device_config->vp_config[i].tti.uec_d =
2353                                 VXGE_HW_USE_FLASH_DEFAULT;
2354
2355                 device_config->vp_config[i].rti.intr_enable =
2356                                 VXGE_HW_TIM_INTR_DEFAULT;
2357
2358                 device_config->vp_config[i].rti.btimer_val =
2359                                 VXGE_HW_USE_FLASH_DEFAULT;
2360
2361                 device_config->vp_config[i].rti.timer_ac_en =
2362                                 VXGE_HW_USE_FLASH_DEFAULT;
2363
2364                 device_config->vp_config[i].rti.timer_ci_en =
2365                                 VXGE_HW_USE_FLASH_DEFAULT;
2366
2367                 device_config->vp_config[i].rti.timer_ri_en =
2368                                 VXGE_HW_USE_FLASH_DEFAULT;
2369
2370                 device_config->vp_config[i].rti.rtimer_val =
2371                                 VXGE_HW_USE_FLASH_DEFAULT;
2372
2373                 device_config->vp_config[i].rti.util_sel =
2374                                 VXGE_HW_USE_FLASH_DEFAULT;
2375
2376                 device_config->vp_config[i].rti.ltimer_val =
2377                                 VXGE_HW_USE_FLASH_DEFAULT;
2378
2379                 device_config->vp_config[i].rti.urange_a =
2380                                 VXGE_HW_USE_FLASH_DEFAULT;
2381
2382                 device_config->vp_config[i].rti.uec_a =
2383                                 VXGE_HW_USE_FLASH_DEFAULT;
2384
2385                 device_config->vp_config[i].rti.urange_b =
2386                                 VXGE_HW_USE_FLASH_DEFAULT;
2387
2388                 device_config->vp_config[i].rti.uec_b =
2389                                 VXGE_HW_USE_FLASH_DEFAULT;
2390
2391                 device_config->vp_config[i].rti.urange_c =
2392                                 VXGE_HW_USE_FLASH_DEFAULT;
2393
2394                 device_config->vp_config[i].rti.uec_c =
2395                                 VXGE_HW_USE_FLASH_DEFAULT;
2396
2397                 device_config->vp_config[i].rti.uec_d =
2398                                 VXGE_HW_USE_FLASH_DEFAULT;
2399
2400                 device_config->vp_config[i].mtu =
2401                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU;
2402
2403                 device_config->vp_config[i].rpa_strip_vlan_tag =
2404                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT;
2405         }
2406
2407         return VXGE_HW_OK;
2408 }
2409
2410 /*
2411  * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2412  * Set the swapper bits appropriately for the lagacy section.
2413  */
2414 static enum vxge_hw_status
2415 __vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2416 {
2417         u64 val64;
2418         enum vxge_hw_status status = VXGE_HW_OK;
2419
2420         val64 = readq(&legacy_reg->toc_swapper_fb);
2421
2422         wmb();
2423
2424         switch (val64) {
2425
2426         case VXGE_HW_SWAPPER_INITIAL_VALUE:
2427                 return status;
2428
2429         case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2430                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2431                         &legacy_reg->pifm_rd_swap_en);
2432                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2433                         &legacy_reg->pifm_rd_flip_en);
2434                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2435                         &legacy_reg->pifm_wr_swap_en);
2436                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2437                         &legacy_reg->pifm_wr_flip_en);
2438                 break;
2439
2440         case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2441                 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2442                         &legacy_reg->pifm_rd_swap_en);
2443                 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2444                         &legacy_reg->pifm_wr_swap_en);
2445                 break;
2446
2447         case VXGE_HW_SWAPPER_BIT_FLIPPED:
2448                 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2449                         &legacy_reg->pifm_rd_flip_en);
2450                 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2451                         &legacy_reg->pifm_wr_flip_en);
2452                 break;
2453         }
2454
2455         wmb();
2456
2457         val64 = readq(&legacy_reg->toc_swapper_fb);
2458
2459         if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2460                 status = VXGE_HW_ERR_SWAPPER_CTRL;
2461
2462         return status;
2463 }
2464
2465 /*
2466  * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2467  * Set the swapper bits appropriately for the vpath.
2468  */
2469 static enum vxge_hw_status
2470 __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2471 {
2472 #ifndef __BIG_ENDIAN
2473         u64 val64;
2474
2475         val64 = readq(&vpath_reg->vpath_general_cfg1);
2476         wmb();
2477         val64 |= VXGE_HW_VPATH_GENERAL_CFG1_CTL_BYTE_SWAPEN;
2478         writeq(val64, &vpath_reg->vpath_general_cfg1);
2479         wmb();
2480 #endif
2481         return VXGE_HW_OK;
2482 }
2483
2484 /*
2485  * __vxge_hw_kdfc_swapper_set - Set the swapper bits for the kdfc.
2486  * Set the swapper bits appropriately for the vpath.
2487  */
2488 static enum vxge_hw_status
2489 __vxge_hw_kdfc_swapper_set(
2490         struct vxge_hw_legacy_reg __iomem *legacy_reg,
2491         struct vxge_hw_vpath_reg __iomem *vpath_reg)
2492 {
2493         u64 val64;
2494
2495         val64 = readq(&legacy_reg->pifm_wr_swap_en);
2496
2497         if (val64 == VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE) {
2498                 val64 = readq(&vpath_reg->kdfcctl_cfg0);
2499                 wmb();
2500
2501                 val64 |= VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO0 |
2502                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO1  |
2503                         VXGE_HW_KDFCCTL_CFG0_BYTE_SWAPEN_FIFO2;
2504
2505                 writeq(val64, &vpath_reg->kdfcctl_cfg0);
2506                 wmb();
2507         }
2508
2509         return VXGE_HW_OK;
2510 }
2511
2512 /*
2513  * vxge_hw_mgmt_reg_read - Read Titan register.
2514  */
2515 enum vxge_hw_status
2516 vxge_hw_mgmt_reg_read(struct __vxge_hw_device *hldev,
2517                       enum vxge_hw_mgmt_reg_type type,
2518                       u32 index, u32 offset, u64 *value)
2519 {
2520         enum vxge_hw_status status = VXGE_HW_OK;
2521
2522         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2523                 status = VXGE_HW_ERR_INVALID_DEVICE;
2524                 goto exit;
2525         }
2526
2527         switch (type) {
2528         case vxge_hw_mgmt_reg_type_legacy:
2529                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2530                         status = VXGE_HW_ERR_INVALID_OFFSET;
2531                         break;
2532                 }
2533                 *value = readq((void __iomem *)hldev->legacy_reg + offset);
2534                 break;
2535         case vxge_hw_mgmt_reg_type_toc:
2536                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2537                         status = VXGE_HW_ERR_INVALID_OFFSET;
2538                         break;
2539                 }
2540                 *value = readq((void __iomem *)hldev->toc_reg + offset);
2541                 break;
2542         case vxge_hw_mgmt_reg_type_common:
2543                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2544                         status = VXGE_HW_ERR_INVALID_OFFSET;
2545                         break;
2546                 }
2547                 *value = readq((void __iomem *)hldev->common_reg + offset);
2548                 break;
2549         case vxge_hw_mgmt_reg_type_mrpcim:
2550                 if (!(hldev->access_rights &
2551                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2552                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2553                         break;
2554                 }
2555                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2556                         status = VXGE_HW_ERR_INVALID_OFFSET;
2557                         break;
2558                 }
2559                 *value = readq((void __iomem *)hldev->mrpcim_reg + offset);
2560                 break;
2561         case vxge_hw_mgmt_reg_type_srpcim:
2562                 if (!(hldev->access_rights &
2563                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2564                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2565                         break;
2566                 }
2567                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2568                         status = VXGE_HW_ERR_INVALID_INDEX;
2569                         break;
2570                 }
2571                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2572                         status = VXGE_HW_ERR_INVALID_OFFSET;
2573                         break;
2574                 }
2575                 *value = readq((void __iomem *)hldev->srpcim_reg[index] +
2576                                 offset);
2577                 break;
2578         case vxge_hw_mgmt_reg_type_vpmgmt:
2579                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2580                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2581                         status = VXGE_HW_ERR_INVALID_INDEX;
2582                         break;
2583                 }
2584                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2585                         status = VXGE_HW_ERR_INVALID_OFFSET;
2586                         break;
2587                 }
2588                 *value = readq((void __iomem *)hldev->vpmgmt_reg[index] +
2589                                 offset);
2590                 break;
2591         case vxge_hw_mgmt_reg_type_vpath:
2592                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) ||
2593                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2594                         status = VXGE_HW_ERR_INVALID_INDEX;
2595                         break;
2596                 }
2597                 if (index > VXGE_HW_TITAN_VPATH_REG_SPACES - 1) {
2598                         status = VXGE_HW_ERR_INVALID_INDEX;
2599                         break;
2600                 }
2601                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2602                         status = VXGE_HW_ERR_INVALID_OFFSET;
2603                         break;
2604                 }
2605                 *value = readq((void __iomem *)hldev->vpath_reg[index] +
2606                                 offset);
2607                 break;
2608         default:
2609                 status = VXGE_HW_ERR_INVALID_TYPE;
2610                 break;
2611         }
2612
2613 exit:
2614         return status;
2615 }
2616
2617 /*
2618  * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2619  */
2620 enum vxge_hw_status
2621 vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2622 {
2623         struct vxge_hw_vpmgmt_reg       __iomem *vpmgmt_reg;
2624         enum vxge_hw_status status = VXGE_HW_OK;
2625         int i = 0, j = 0;
2626
2627         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2628                 if (!((vpath_mask) & vxge_mBIT(i)))
2629                         continue;
2630                 vpmgmt_reg = hldev->vpmgmt_reg[i];
2631                 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2632                         if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2633                         & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2634                                 return VXGE_HW_FAIL;
2635                 }
2636         }
2637         return status;
2638 }
2639 /*
2640  * vxge_hw_mgmt_reg_Write - Write Titan register.
2641  */
2642 enum vxge_hw_status
2643 vxge_hw_mgmt_reg_write(struct __vxge_hw_device *hldev,
2644                       enum vxge_hw_mgmt_reg_type type,
2645                       u32 index, u32 offset, u64 value)
2646 {
2647         enum vxge_hw_status status = VXGE_HW_OK;
2648
2649         if ((hldev == NULL) || (hldev->magic != VXGE_HW_DEVICE_MAGIC)) {
2650                 status = VXGE_HW_ERR_INVALID_DEVICE;
2651                 goto exit;
2652         }
2653
2654         switch (type) {
2655         case vxge_hw_mgmt_reg_type_legacy:
2656                 if (offset > sizeof(struct vxge_hw_legacy_reg) - 8) {
2657                         status = VXGE_HW_ERR_INVALID_OFFSET;
2658                         break;
2659                 }
2660                 writeq(value, (void __iomem *)hldev->legacy_reg + offset);
2661                 break;
2662         case vxge_hw_mgmt_reg_type_toc:
2663                 if (offset > sizeof(struct vxge_hw_toc_reg) - 8) {
2664                         status = VXGE_HW_ERR_INVALID_OFFSET;
2665                         break;
2666                 }
2667                 writeq(value, (void __iomem *)hldev->toc_reg + offset);
2668                 break;
2669         case vxge_hw_mgmt_reg_type_common:
2670                 if (offset > sizeof(struct vxge_hw_common_reg) - 8) {
2671                         status = VXGE_HW_ERR_INVALID_OFFSET;
2672                         break;
2673                 }
2674                 writeq(value, (void __iomem *)hldev->common_reg + offset);
2675                 break;
2676         case vxge_hw_mgmt_reg_type_mrpcim:
2677                 if (!(hldev->access_rights &
2678                         VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)) {
2679                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2680                         break;
2681                 }
2682                 if (offset > sizeof(struct vxge_hw_mrpcim_reg) - 8) {
2683                         status = VXGE_HW_ERR_INVALID_OFFSET;
2684                         break;
2685                 }
2686                 writeq(value, (void __iomem *)hldev->mrpcim_reg + offset);
2687                 break;
2688         case vxge_hw_mgmt_reg_type_srpcim:
2689                 if (!(hldev->access_rights &
2690                         VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM)) {
2691                         status = VXGE_HW_ERR_PRIVILAGED_OPEARATION;
2692                         break;
2693                 }
2694                 if (index > VXGE_HW_TITAN_SRPCIM_REG_SPACES - 1) {
2695                         status = VXGE_HW_ERR_INVALID_INDEX;
2696                         break;
2697                 }
2698                 if (offset > sizeof(struct vxge_hw_srpcim_reg) - 8) {
2699                         status = VXGE_HW_ERR_INVALID_OFFSET;
2700                         break;
2701                 }
2702                 writeq(value, (void __iomem *)hldev->srpcim_reg[index] +
2703                         offset);
2704
2705                 break;
2706         case vxge_hw_mgmt_reg_type_vpmgmt:
2707                 if ((index > VXGE_HW_TITAN_VPMGMT_REG_SPACES - 1) ||
2708                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2709                         status = VXGE_HW_ERR_INVALID_INDEX;
2710                         break;
2711                 }
2712                 if (offset > sizeof(struct vxge_hw_vpmgmt_reg) - 8) {
2713                         status = VXGE_HW_ERR_INVALID_OFFSET;
2714                         break;
2715                 }
2716                 writeq(value, (void __iomem *)hldev->vpmgmt_reg[index] +
2717                         offset);
2718                 break;
2719         case vxge_hw_mgmt_reg_type_vpath:
2720                 if ((index > VXGE_HW_TITAN_VPATH_REG_SPACES-1) ||
2721                         (!(hldev->vpath_assignments & vxge_mBIT(index)))) {
2722                         status = VXGE_HW_ERR_INVALID_INDEX;
2723                         break;
2724                 }
2725                 if (offset > sizeof(struct vxge_hw_vpath_reg) - 8) {
2726                         status = VXGE_HW_ERR_INVALID_OFFSET;
2727                         break;
2728                 }
2729                 writeq(value, (void __iomem *)hldev->vpath_reg[index] +
2730                         offset);
2731                 break;
2732         default:
2733                 status = VXGE_HW_ERR_INVALID_TYPE;
2734                 break;
2735         }
2736 exit:
2737         return status;
2738 }
2739
2740 /*
2741  * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2742  * list callback
2743  * This function is callback passed to __vxge_hw_mempool_create to create memory
2744  * pool for TxD list
2745  */
2746 static void
2747 __vxge_hw_fifo_mempool_item_alloc(
2748         struct vxge_hw_mempool *mempoolh,
2749         u32 memblock_index, struct vxge_hw_mempool_dma *dma_object,
2750         u32 index, u32 is_last)
2751 {
2752         u32 memblock_item_idx;
2753         struct __vxge_hw_fifo_txdl_priv *txdl_priv;
2754         struct vxge_hw_fifo_txd *txdp =
2755                 (struct vxge_hw_fifo_txd *)mempoolh->items_arr[index];
2756         struct __vxge_hw_fifo *fifo =
2757                         (struct __vxge_hw_fifo *)mempoolh->userdata;
2758         void *memblock = mempoolh->memblocks_arr[memblock_index];
2759
2760         vxge_assert(txdp);
2761
2762         txdp->host_control = (u64) (size_t)
2763         __vxge_hw_mempool_item_priv(mempoolh, memblock_index, txdp,
2764                                         &memblock_item_idx);
2765
2766         txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, txdp);
2767
2768         vxge_assert(txdl_priv);
2769
2770         fifo->channel.reserve_arr[fifo->channel.reserve_ptr - 1 - index] = txdp;
2771
2772         /* pre-format HW's TxDL's private */
2773         txdl_priv->dma_offset = (char *)txdp - (char *)memblock;
2774         txdl_priv->dma_addr = dma_object->addr + txdl_priv->dma_offset;
2775         txdl_priv->dma_handle = dma_object->handle;
2776         txdl_priv->memblock   = memblock;
2777         txdl_priv->first_txdp = txdp;
2778         txdl_priv->next_txdl_priv = NULL;
2779         txdl_priv->alloc_frags = 0;
2780 }
2781
2782 /*
2783  * __vxge_hw_fifo_create - Create a FIFO
2784  * This function creates FIFO and initializes it.
2785  */
2786 enum vxge_hw_status
2787 __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2788                       struct vxge_hw_fifo_attr *attr)
2789 {
2790         enum vxge_hw_status status = VXGE_HW_OK;
2791         struct __vxge_hw_fifo *fifo;
2792         struct vxge_hw_fifo_config *config;
2793         u32 txdl_size, txdl_per_memblock;
2794         struct vxge_hw_mempool_cbs fifo_mp_callback;
2795         struct __vxge_hw_virtualpath *vpath;
2796
2797         if ((vp == NULL) || (attr == NULL)) {
2798                 status = VXGE_HW_ERR_INVALID_HANDLE;
2799                 goto exit;
2800         }
2801         vpath = vp->vpath;
2802         config = &vpath->hldev->config.vp_config[vpath->vp_id].fifo;
2803
2804         txdl_size = config->max_frags * sizeof(struct vxge_hw_fifo_txd);
2805
2806         txdl_per_memblock = config->memblock_size / txdl_size;
2807
2808         fifo = (struct __vxge_hw_fifo *)__vxge_hw_channel_allocate(vp,
2809                                         VXGE_HW_CHANNEL_TYPE_FIFO,
2810                                         config->fifo_blocks * txdl_per_memblock,
2811                                         attr->per_txdl_space, attr->userdata);
2812
2813         if (fifo == NULL) {
2814                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2815                 goto exit;
2816         }
2817
2818         vpath->fifoh = fifo;
2819         fifo->nofl_db = vpath->nofl_db;
2820
2821         fifo->vp_id = vpath->vp_id;
2822         fifo->vp_reg = vpath->vp_reg;
2823         fifo->stats = &vpath->sw_stats->fifo_stats;
2824
2825         fifo->config = config;
2826
2827         /* apply "interrupts per txdl" attribute */
2828         fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_UTILZ;
2829
2830         if (fifo->config->intr)
2831                 fifo->interrupt_type = VXGE_HW_FIFO_TXD_INT_TYPE_PER_LIST;
2832
2833         fifo->no_snoop_bits = config->no_snoop_bits;
2834
2835         /*
2836          * FIFO memory management strategy:
2837          *
2838          * TxDL split into three independent parts:
2839          *      - set of TxD's
2840          *      - TxD HW private part
2841          *      - driver private part
2842          *
2843          * Adaptative memory allocation used. i.e. Memory allocated on
2844          * demand with the size which will fit into one memory block.
2845          * One memory block may contain more than one TxDL.
2846          *
2847          * During "reserve" operations more memory can be allocated on demand
2848          * for example due to FIFO full condition.
2849          *
2850          * Pool of memory memblocks never shrinks except in __vxge_hw_fifo_close
2851          * routine which will essentially stop the channel and free resources.
2852          */
2853
2854         /* TxDL common private size == TxDL private  +  driver private */
2855         fifo->priv_size =
2856                 sizeof(struct __vxge_hw_fifo_txdl_priv) + attr->per_txdl_space;
2857         fifo->priv_size = ((fifo->priv_size  +  VXGE_CACHE_LINE_SIZE - 1) /
2858                         VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
2859
2860         fifo->per_txdl_space = attr->per_txdl_space;
2861
2862         /* recompute txdl size to be cacheline aligned */
2863         fifo->txdl_size = txdl_size;
2864         fifo->txdl_per_memblock = txdl_per_memblock;
2865
2866         fifo->txdl_term = attr->txdl_term;
2867         fifo->callback = attr->callback;
2868
2869         if (fifo->txdl_per_memblock == 0) {
2870                 __vxge_hw_fifo_delete(vp);
2871                 status = VXGE_HW_ERR_INVALID_BLOCK_SIZE;
2872                 goto exit;
2873         }
2874
2875         fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
2876
2877         fifo->mempool =
2878                 __vxge_hw_mempool_create(vpath->hldev,
2879                         fifo->config->memblock_size,
2880                         fifo->txdl_size,
2881                         fifo->priv_size,
2882                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2883                         (fifo->config->fifo_blocks * fifo->txdl_per_memblock),
2884                         &fifo_mp_callback,
2885                         fifo);
2886
2887         if (fifo->mempool == NULL) {
2888                 __vxge_hw_fifo_delete(vp);
2889                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2890                 goto exit;
2891         }
2892
2893         status = __vxge_hw_channel_initialize(&fifo->channel);
2894         if (status != VXGE_HW_OK) {
2895                 __vxge_hw_fifo_delete(vp);
2896                 goto exit;
2897         }
2898
2899         vxge_assert(fifo->channel.reserve_ptr);
2900 exit:
2901         return status;
2902 }
2903
2904 /*
2905  * __vxge_hw_fifo_abort - Returns the TxD
2906  * This function terminates the TxDs of fifo
2907  */
2908 static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2909 {
2910         void *txdlh;
2911
2912         for (;;) {
2913                 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2914
2915                 if (txdlh == NULL)
2916                         break;
2917
2918                 vxge_hw_channel_dtr_complete(&fifo->channel);
2919
2920                 if (fifo->txdl_term) {
2921                         fifo->txdl_term(txdlh,
2922                         VXGE_HW_TXDL_STATE_POSTED,
2923                         fifo->channel.userdata);
2924                 }
2925
2926                 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2927         }
2928
2929         return VXGE_HW_OK;
2930 }
2931
2932 /*
2933  * __vxge_hw_fifo_reset - Resets the fifo
2934  * This function resets the fifo during vpath reset operation
2935  */
2936 static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2937 {
2938         enum vxge_hw_status status = VXGE_HW_OK;
2939
2940         __vxge_hw_fifo_abort(fifo);
2941         status = __vxge_hw_channel_reset(&fifo->channel);
2942
2943         return status;
2944 }
2945
2946 /*
2947  * __vxge_hw_fifo_delete - Removes the FIFO
2948  * This function freeup the memory pool and removes the FIFO
2949  */
2950 enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2951 {
2952         struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2953
2954         __vxge_hw_fifo_abort(fifo);
2955
2956         if (fifo->mempool)
2957                 __vxge_hw_mempool_destroy(fifo->mempool);
2958
2959         vp->vpath->fifoh = NULL;
2960
2961         __vxge_hw_channel_free(&fifo->channel);
2962
2963         return VXGE_HW_OK;
2964 }
2965
2966 /*
2967  * __vxge_hw_vpath_pci_read - Read the content of given address
2968  *                          in pci config space.
2969  * Read from the vpath pci config space.
2970  */
2971 static enum vxge_hw_status
2972 __vxge_hw_vpath_pci_read(struct __vxge_hw_virtualpath *vpath,
2973                          u32 phy_func_0, u32 offset, u32 *val)
2974 {
2975         u64 val64;
2976         enum vxge_hw_status status = VXGE_HW_OK;
2977         struct vxge_hw_vpath_reg __iomem *vp_reg = vpath->vp_reg;
2978
2979         val64 = VXGE_HW_PCI_CONFIG_ACCESS_CFG1_ADDRESS(offset);
2980
2981         if (phy_func_0)
2982                 val64 |= VXGE_HW_PCI_CONFIG_ACCESS_CFG1_SEL_FUNC0;
2983
2984         writeq(val64, &vp_reg->pci_config_access_cfg1);
2985         wmb();
2986         writeq(VXGE_HW_PCI_CONFIG_ACCESS_CFG2_REQ,
2987                         &vp_reg->pci_config_access_cfg2);
2988         wmb();
2989
2990         status = __vxge_hw_device_register_poll(
2991                         &vp_reg->pci_config_access_cfg2,
2992                         VXGE_HW_INTR_MASK_ALL, VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2993
2994         if (status != VXGE_HW_OK)
2995                 goto exit;
2996
2997         val64 = readq(&vp_reg->pci_config_access_status);
2998
2999         if (val64 & VXGE_HW_PCI_CONFIG_ACCESS_STATUS_ACCESS_ERR) {
3000                 status = VXGE_HW_FAIL;
3001                 *val = 0;
3002         } else
3003                 *val = (u32)vxge_bVALn(val64, 32, 32);
3004 exit:
3005         return status;
3006 }
3007
3008 /**
3009  * vxge_hw_device_flick_link_led - Flick (blink) link LED.
3010  * @hldev: HW device.
3011  * @on_off: TRUE if flickering to be on, FALSE to be off
3012  *
3013  * Flicker the link LED.
3014  */
3015 enum vxge_hw_status
3016 vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
3017 {
3018         struct __vxge_hw_virtualpath *vpath;
3019         u64 data0, data1 = 0, steer_ctrl = 0;
3020         enum vxge_hw_status status;
3021
3022         if (hldev == NULL) {
3023                 status = VXGE_HW_ERR_INVALID_DEVICE;
3024                 goto exit;
3025         }
3026
3027         vpath = &hldev->virtual_paths[hldev->first_vp_id];
3028
3029         data0 = on_off;
3030         status = vxge_hw_vpath_fw_api(vpath,
3031                         VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
3032                         VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
3033                         0, &data0, &data1, &steer_ctrl);
3034 exit:
3035         return status;
3036 }
3037
3038 /*
3039  * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3040  */
3041 enum vxge_hw_status
3042 __vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3043                               u32 action, u32 rts_table, u32 offset,
3044                               u64 *data0, u64 *data1)
3045 {
3046         enum vxge_hw_status status;
3047         u64 steer_ctrl = 0;
3048
3049         if (vp == NULL) {
3050                 status = VXGE_HW_ERR_INVALID_HANDLE;
3051                 goto exit;
3052         }
3053
3054         if ((rts_table ==
3055              VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3056             (rts_table ==
3057              VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3058             (rts_table ==
3059              VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3060             (rts_table ==
3061              VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3062                 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3063         }
3064
3065         status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3066                                       data0, data1, &steer_ctrl);
3067         if (status != VXGE_HW_OK)
3068                 goto exit;
3069
3070         if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3071             (rts_table !=
3072              VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3073                 *data1 = 0;
3074 exit:
3075         return status;
3076 }
3077
3078 /*
3079  * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3080  */
3081 enum vxge_hw_status
3082 __vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3083                               u32 rts_table, u32 offset, u64 steer_data0,
3084                               u64 steer_data1)
3085 {
3086         u64 data0, data1 = 0, steer_ctrl = 0;
3087         enum vxge_hw_status status;
3088
3089         if (vp == NULL) {
3090                 status = VXGE_HW_ERR_INVALID_HANDLE;
3091                 goto exit;
3092         }
3093
3094         data0 = steer_data0;
3095
3096         if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3097             (rts_table ==
3098              VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3099                 data1 = steer_data1;
3100
3101         status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3102                                       &data0, &data1, &steer_ctrl);
3103 exit:
3104         return status;
3105 }
3106
3107 /*
3108  * vxge_hw_vpath_rts_rth_set - Set/configure RTS hashing.
3109  */
3110 enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3111                         struct __vxge_hw_vpath_handle *vp,
3112                         enum vxge_hw_rth_algoritms algorithm,
3113                         struct vxge_hw_rth_hash_types *hash_type,
3114                         u16 bucket_size)
3115 {
3116         u64 data0, data1;
3117         enum vxge_hw_status status = VXGE_HW_OK;
3118
3119         if (vp == NULL) {
3120                 status = VXGE_HW_ERR_INVALID_HANDLE;
3121                 goto exit;
3122         }
3123
3124         status = __vxge_hw_vpath_rts_table_get(vp,
3125                      VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3126                      VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3127                         0, &data0, &data1);
3128         if (status != VXGE_HW_OK)
3129                 goto exit;
3130
3131         data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3132                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
3133
3134         data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_EN |
3135         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(bucket_size) |
3136         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(algorithm);
3137
3138         if (hash_type->hash_type_tcpipv4_en)
3139                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV4_EN;
3140
3141         if (hash_type->hash_type_ipv4_en)
3142                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV4_EN;
3143
3144         if (hash_type->hash_type_tcpipv6_en)
3145                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EN;
3146
3147         if (hash_type->hash_type_ipv6_en)
3148                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EN;
3149
3150         if (hash_type->hash_type_tcpipv6ex_en)
3151                 data0 |=
3152                 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_TCP_IPV6_EX_EN;
3153
3154         if (hash_type->hash_type_ipv6ex_en)
3155                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_RTH_IPV6_EX_EN;
3156
3157         if (VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_RTH_GEN_ACTIVE_TABLE(data0))
3158                 data0 &= ~VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3159         else
3160                 data0 |= VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ACTIVE_TABLE;
3161
3162         status = __vxge_hw_vpath_rts_table_set(vp,
3163                 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY,
3164                 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3165                 0, data0, 0);
3166 exit:
3167         return status;
3168 }
3169
3170 static void
3171 vxge_hw_rts_rth_data0_data1_get(u32 j, u64 *data0, u64 *data1,
3172                                 u16 flag, u8 *itable)
3173 {
3174         switch (flag) {
3175         case 1:
3176                 *data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_NUM(j)|
3177                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_ENTRY_EN |
3178                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM0_BUCKET_DATA(
3179                         itable[j]);
3180         case 2:
3181                 *data0 |=
3182                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_NUM(j)|
3183                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_ENTRY_EN |
3184                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_ITEM1_BUCKET_DATA(
3185                         itable[j]);
3186         case 3:
3187                 *data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_NUM(j)|
3188                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_ENTRY_EN |
3189                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM0_BUCKET_DATA(
3190                         itable[j]);
3191         case 4:
3192                 *data1 |=
3193                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_NUM(j)|
3194                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_ENTRY_EN |
3195                         VXGE_HW_RTS_ACCESS_STEER_DATA1_RTH_ITEM1_BUCKET_DATA(
3196                         itable[j]);
3197         default:
3198                 return;
3199         }
3200 }
3201 /*
3202  * vxge_hw_vpath_rts_rth_itable_set - Set/configure indirection table (IT).
3203  */
3204 enum vxge_hw_status vxge_hw_vpath_rts_rth_itable_set(
3205                         struct __vxge_hw_vpath_handle **vpath_handles,
3206                         u32 vpath_count,
3207                         u8 *mtable,
3208                         u8 *itable,
3209                         u32 itable_size)
3210 {
3211         u32 i, j, action, rts_table;
3212         u64 data0;
3213         u64 data1;
3214         u32 max_entries;
3215         enum vxge_hw_status status = VXGE_HW_OK;
3216         struct __vxge_hw_vpath_handle *vp = vpath_handles[0];
3217
3218         if (vp == NULL) {
3219                 status = VXGE_HW_ERR_INVALID_HANDLE;
3220                 goto exit;
3221         }
3222
3223         max_entries = (((u32)1) << itable_size);
3224
3225         if (vp->vpath->hldev->config.rth_it_type
3226                                 == VXGE_HW_RTH_IT_TYPE_SOLO_IT) {
3227                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3228                 rts_table =
3229                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT;
3230
3231                 for (j = 0; j < max_entries; j++) {
3232
3233                         data1 = 0;
3234
3235                         data0 =
3236                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3237                                 itable[j]);
3238
3239                         status = __vxge_hw_vpath_rts_table_set(vpath_handles[0],
3240                                 action, rts_table, j, data0, data1);
3241
3242                         if (status != VXGE_HW_OK)
3243                                 goto exit;
3244                 }
3245
3246                 for (j = 0; j < max_entries; j++) {
3247
3248                         data1 = 0;
3249
3250                         data0 =
3251                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_ENTRY_EN |
3252                         VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_SOLO_IT_BUCKET_DATA(
3253                                 itable[j]);
3254
3255                         status = __vxge_hw_vpath_rts_table_set(
3256                                 vpath_handles[mtable[itable[j]]], action,
3257                                 rts_table, j, data0, data1);
3258
3259                         if (status != VXGE_HW_OK)
3260                                 goto exit;
3261                 }
3262         } else {
3263                 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_WRITE_ENTRY;
3264                 rts_table =
3265                         VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT;
3266                 for (i = 0; i < vpath_count; i++) {
3267
3268                         for (j = 0; j < max_entries;) {
3269
3270                                 data0 = 0;
3271                                 data1 = 0;
3272
3273                                 while (j < max_entries) {
3274                                         if (mtable[itable[j]] != i) {
3275                                                 j++;
3276                                                 continue;
3277                                         }
3278                                         vxge_hw_rts_rth_data0_data1_get(j,
3279                                                 &data0, &data1, 1, itable);
3280                                         j++;
3281                                         break;
3282                                 }
3283
3284                                 while (j < max_entries) {
3285                                         if (mtable[itable[j]] != i) {
3286                                                 j++;
3287                                                 continue;
3288                                         }
3289                                         vxge_hw_rts_rth_data0_data1_get(j,
3290                                                 &data0, &data1, 2, itable);
3291                                         j++;
3292                                         break;
3293                                 }
3294
3295                                 while (j < max_entries) {
3296                                         if (mtable[itable[j]] != i) {
3297                                                 j++;
3298                                                 continue;
3299                                         }
3300                                         vxge_hw_rts_rth_data0_data1_get(j,
3301                                                 &data0, &data1, 3, itable);
3302                                         j++;
3303                                         break;
3304                                 }
3305
3306                                 while (j < max_entries) {
3307                                         if (mtable[itable[j]] != i) {
3308                                                 j++;
3309                                                 continue;
3310                                         }
3311                                         vxge_hw_rts_rth_data0_data1_get(j,
3312                                                 &data0, &data1, 4, itable);
3313                                         j++;
3314                                         break;
3315                                 }
3316
3317                                 if (data0 != 0) {
3318                                         status = __vxge_hw_vpath_rts_table_set(
3319                                                         vpath_handles[i],
3320                                                         action, rts_table,
3321                                                         0, data0, data1);
3322
3323                                         if (status != VXGE_HW_OK)
3324                                                 goto exit;
3325                                 }
3326                         }
3327                 }
3328         }
3329 exit:
3330         return status;
3331 }
3332
3333 /**
3334  * vxge_hw_vpath_check_leak - Check for memory leak
3335  * @ringh: Handle to the ring object used for receive
3336  *
3337  * If PRC_RXD_DOORBELL_VPn.NEW_QW_CNT is larger or equal to
3338  * PRC_CFG6_VPn.RXD_SPAT then a leak has occurred.
3339  * Returns: VXGE_HW_FAIL, if leak has occurred.
3340  *
3341  */
3342 enum vxge_hw_status
3343 vxge_hw_vpath_check_leak(struct __vxge_hw_ring *ring)
3344 {
3345         enum vxge_hw_status status = VXGE_HW_OK;
3346         u64 rxd_new_count, rxd_spat;
3347
3348         if (ring == NULL)
3349                 return status;
3350
3351         rxd_new_count = readl(&ring->vp_reg->prc_rxd_doorbell);
3352         rxd_spat = readq(&ring->vp_reg->prc_cfg6);
3353         rxd_spat = VXGE_HW_PRC_CFG6_RXD_SPAT(rxd_spat);
3354
3355         if (rxd_new_count >= rxd_spat)
3356                 status = VXGE_HW_FAIL;
3357
3358         return status;
3359 }
3360
3361 /*
3362  * __vxge_hw_vpath_mgmt_read
3363  * This routine reads the vpath_mgmt registers
3364  */
3365 static enum vxge_hw_status
3366 __vxge_hw_vpath_mgmt_read(
3367         struct __vxge_hw_device *hldev,
3368         struct __vxge_hw_virtualpath *vpath)
3369 {
3370         u32 i, mtu = 0, max_pyld = 0;
3371         u64 val64;
3372         enum vxge_hw_status status = VXGE_HW_OK;
3373
3374         for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
3375
3376                 val64 = readq(&vpath->vpmgmt_reg->
3377                                 rxmac_cfg0_port_vpmgmt_clone[i]);
3378                 max_pyld =
3379                         (u32)
3380                         VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_GET_MAX_PYLD_LEN
3381                         (val64);
3382                 if (mtu < max_pyld)
3383                         mtu = max_pyld;
3384         }
3385
3386         vpath->max_mtu = mtu + VXGE_HW_MAC_HEADER_MAX_SIZE;
3387
3388         val64 = readq(&vpath->vpmgmt_reg->xmac_vsport_choices_vp);
3389
3390         for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3391                 if (val64 & vxge_mBIT(i))
3392                         vpath->vsport_number = i;
3393         }
3394
3395         val64 = readq(&vpath->vpmgmt_reg->xgmac_gen_status_vpmgmt_clone);
3396
3397         if (val64 & VXGE_HW_XGMAC_GEN_STATUS_VPMGMT_CLONE_XMACJ_NTWK_OK)
3398                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_UP);
3399         else
3400                 VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
3401
3402         return status;
3403 }
3404
3405 /*
3406  * __vxge_hw_vpath_reset_check - Check if resetting the vpath completed
3407  * This routine checks the vpath_rst_in_prog register to see if
3408  * adapter completed the reset process for the vpath
3409  */
3410 static enum vxge_hw_status
3411 __vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath)
3412 {
3413         enum vxge_hw_status status;
3414
3415         status = __vxge_hw_device_register_poll(
3416                         &vpath->hldev->common_reg->vpath_rst_in_prog,
3417                         VXGE_HW_VPATH_RST_IN_PROG_VPATH_RST_IN_PROG(
3418                                 1 << (16 - vpath->vp_id)),
3419                         vpath->hldev->config.device_poll_millis);
3420
3421         return status;
3422 }
3423
3424 /*
3425  * __vxge_hw_vpath_reset
3426  * This routine resets the vpath on the device
3427  */
3428 static enum vxge_hw_status
3429 __vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3430 {
3431         u64 val64;
3432         enum vxge_hw_status status = VXGE_HW_OK;
3433
3434         val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
3435
3436         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
3437                                 &hldev->common_reg->cmn_rsthdlr_cfg0);
3438
3439         return status;
3440 }
3441
3442 /*
3443  * __vxge_hw_vpath_sw_reset
3444  * This routine resets the vpath structures
3445  */
3446 static enum vxge_hw_status
3447 __vxge_hw_vpath_sw_reset(struct __vxge_hw_device *hldev, u32 vp_id)
3448 {
3449         enum vxge_hw_status status = VXGE_HW_OK;
3450         struct __vxge_hw_virtualpath *vpath;
3451
3452         vpath = (struct __vxge_hw_virtualpath *)&hldev->virtual_paths[vp_id];
3453
3454         if (vpath->ringh) {
3455                 status = __vxge_hw_ring_reset(vpath->ringh);
3456                 if (status != VXGE_HW_OK)
3457                         goto exit;
3458         }
3459
3460         if (vpath->fifoh)
3461                 status = __vxge_hw_fifo_reset(vpath->fifoh);
3462 exit:
3463         return status;
3464 }
3465
3466 /*
3467  * __vxge_hw_vpath_prc_configure
3468  * This routine configures the prc registers of virtual path using the config
3469  * passed
3470  */
3471 static void
3472 __vxge_hw_vpath_prc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3473 {
3474         u64 val64;
3475         struct __vxge_hw_virtualpath *vpath;
3476         struct vxge_hw_vp_config *vp_config;
3477         struct vxge_hw_vpath_reg __iomem *vp_reg;
3478
3479         vpath = &hldev->virtual_paths[vp_id];
3480         vp_reg = vpath->vp_reg;
3481         vp_config = vpath->vp_config;
3482
3483         if (vp_config->ring.enable == VXGE_HW_RING_DISABLE)
3484                 return;
3485
3486         val64 = readq(&vp_reg->prc_cfg1);
3487         val64 |= VXGE_HW_PRC_CFG1_RTI_TINT_DISABLE;
3488         writeq(val64, &vp_reg->prc_cfg1);
3489
3490         val64 = readq(&vpath->vp_reg->prc_cfg6);
3491         val64 |= VXGE_HW_PRC_CFG6_DOORBELL_MODE_EN;
3492         writeq(val64, &vpath->vp_reg->prc_cfg6);
3493
3494         val64 = readq(&vp_reg->prc_cfg7);
3495
3496         if (vpath->vp_config->ring.scatter_mode !=
3497                 VXGE_HW_RING_SCATTER_MODE_USE_FLASH_DEFAULT) {
3498
3499                 val64 &= ~VXGE_HW_PRC_CFG7_SCATTER_MODE(0x3);
3500
3501                 switch (vpath->vp_config->ring.scatter_mode) {
3502                 case VXGE_HW_RING_SCATTER_MODE_A:
3503                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3504                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_A);
3505                         break;
3506                 case VXGE_HW_RING_SCATTER_MODE_B:
3507                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3508                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_B);
3509                         break;
3510                 case VXGE_HW_RING_SCATTER_MODE_C:
3511                         val64 |= VXGE_HW_PRC_CFG7_SCATTER_MODE(
3512                                         VXGE_HW_PRC_CFG7_SCATTER_MODE_C);
3513                         break;
3514                 }
3515         }
3516
3517         writeq(val64, &vp_reg->prc_cfg7);
3518
3519         writeq(VXGE_HW_PRC_CFG5_RXD0_ADD(
3520                                 __vxge_hw_ring_first_block_address_get(
3521                                         vpath->ringh) >> 3), &vp_reg->prc_cfg5);
3522
3523         val64 = readq(&vp_reg->prc_cfg4);
3524         val64 |= VXGE_HW_PRC_CFG4_IN_SVC;
3525         val64 &= ~VXGE_HW_PRC_CFG4_RING_MODE(0x3);
3526
3527         val64 |= VXGE_HW_PRC_CFG4_RING_MODE(
3528                         VXGE_HW_PRC_CFG4_RING_MODE_ONE_BUFFER);
3529
3530         if (hldev->config.rth_en == VXGE_HW_RTH_DISABLE)
3531                 val64 |= VXGE_HW_PRC_CFG4_RTH_DISABLE;
3532         else
3533                 val64 &= ~VXGE_HW_PRC_CFG4_RTH_DISABLE;
3534
3535         writeq(val64, &vp_reg->prc_cfg4);
3536 }
3537
3538 /*
3539  * __vxge_hw_vpath_kdfc_configure
3540  * This routine configures the kdfc registers of virtual path using the
3541  * config passed
3542  */
3543 static enum vxge_hw_status
3544 __vxge_hw_vpath_kdfc_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3545 {
3546         u64 val64;
3547         u64 vpath_stride;
3548         enum vxge_hw_status status = VXGE_HW_OK;
3549         struct __vxge_hw_virtualpath *vpath;
3550         struct vxge_hw_vpath_reg __iomem *vp_reg;
3551
3552         vpath = &hldev->virtual_paths[vp_id];
3553         vp_reg = vpath->vp_reg;
3554         status = __vxge_hw_kdfc_swapper_set(hldev->legacy_reg, vp_reg);
3555
3556         if (status != VXGE_HW_OK)
3557                 goto exit;
3558
3559         val64 = readq(&vp_reg->kdfc_drbl_triplet_total);
3560
3561         vpath->max_kdfc_db =
3562                 (u32)VXGE_HW_KDFC_DRBL_TRIPLET_TOTAL_GET_KDFC_MAX_SIZE(
3563                         val64+1)/2;
3564
3565         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3566
3567                 vpath->max_nofl_db = vpath->max_kdfc_db;
3568
3569                 if (vpath->max_nofl_db <
3570                         ((vpath->vp_config->fifo.memblock_size /
3571                         (vpath->vp_config->fifo.max_frags *
3572                         sizeof(struct vxge_hw_fifo_txd))) *
3573                         vpath->vp_config->fifo.fifo_blocks)) {
3574
3575                         return VXGE_HW_BADCFG_FIFO_BLOCKS;
3576                 }
3577                 val64 = VXGE_HW_KDFC_FIFO_TRPL_PARTITION_LENGTH_0(
3578                                 (vpath->max_nofl_db*2)-1);
3579         }
3580
3581         writeq(val64, &vp_reg->kdfc_fifo_trpl_partition);
3582
3583         writeq(VXGE_HW_KDFC_FIFO_TRPL_CTRL_TRIPLET_ENABLE,
3584                 &vp_reg->kdfc_fifo_trpl_ctrl);
3585
3586         val64 = readq(&vp_reg->kdfc_trpl_fifo_0_ctrl);
3587
3588         val64 &= ~(VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(0x3) |
3589                    VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0xFF));
3590
3591         val64 |= VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE(
3592                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_MODE_NON_OFFLOAD_ONLY) |
3593 #ifndef __BIG_ENDIAN
3594                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SWAP_EN |
3595 #endif
3596                  VXGE_HW_KDFC_TRPL_FIFO_0_CTRL_SELECT(0);
3597
3598         writeq(val64, &vp_reg->kdfc_trpl_fifo_0_ctrl);
3599         writeq((u64)0, &vp_reg->kdfc_trpl_fifo_0_wb_address);
3600         wmb();
3601         vpath_stride = readq(&hldev->toc_reg->toc_kdfc_vpath_stride);
3602
3603         vpath->nofl_db =
3604                 (struct __vxge_hw_non_offload_db_wrapper __iomem *)
3605                 (hldev->kdfc + (vp_id *
3606                 VXGE_HW_TOC_KDFC_VPATH_STRIDE_GET_TOC_KDFC_VPATH_STRIDE(
3607                                         vpath_stride)));
3608 exit:
3609         return status;
3610 }
3611
3612 /*
3613  * __vxge_hw_vpath_mac_configure
3614  * This routine configures the mac of virtual path using the config passed
3615  */
3616 static enum vxge_hw_status
3617 __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3618 {
3619         u64 val64;
3620         enum vxge_hw_status status = VXGE_HW_OK;
3621         struct __vxge_hw_virtualpath *vpath;
3622         struct vxge_hw_vp_config *vp_config;
3623         struct vxge_hw_vpath_reg __iomem *vp_reg;
3624
3625         vpath = &hldev->virtual_paths[vp_id];
3626         vp_reg = vpath->vp_reg;
3627         vp_config = vpath->vp_config;
3628
3629         writeq(VXGE_HW_XMAC_VSPORT_CHOICE_VSPORT_NUMBER(
3630                         vpath->vsport_number), &vp_reg->xmac_vsport_choice);
3631
3632         if (vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
3633
3634                 val64 = readq(&vp_reg->xmac_rpa_vcfg);
3635
3636                 if (vp_config->rpa_strip_vlan_tag !=
3637                         VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) {
3638                         if (vp_config->rpa_strip_vlan_tag)
3639                                 val64 |= VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3640                         else
3641                                 val64 &= ~VXGE_HW_XMAC_RPA_VCFG_STRIP_VLAN_TAG;
3642                 }
3643
3644                 writeq(val64, &vp_reg->xmac_rpa_vcfg);
3645                 val64 = readq(&vp_reg->rxmac_vcfg0);
3646
3647                 if (vp_config->mtu !=
3648                                 VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) {
3649                         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
3650                         if ((vp_config->mtu  +
3651                                 VXGE_HW_MAC_HEADER_MAX_SIZE) < vpath->max_mtu)
3652                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3653                                         vp_config->mtu  +
3654                                         VXGE_HW_MAC_HEADER_MAX_SIZE);
3655                         else
3656                                 val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(
3657                                         vpath->max_mtu);
3658                 }
3659
3660                 writeq(val64, &vp_reg->rxmac_vcfg0);
3661
3662                 val64 = readq(&vp_reg->rxmac_vcfg1);
3663
3664                 val64 &= ~(VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(0x3) |
3665                         VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE);
3666
3667                 if (hldev->config.rth_it_type ==
3668                                 VXGE_HW_RTH_IT_TYPE_MULTI_IT) {
3669                         val64 |= VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_BD_MODE(
3670                                 0x2) |
3671                                 VXGE_HW_RXMAC_VCFG1_RTS_RTH_MULTI_IT_EN_MODE;
3672                 }
3673
3674                 writeq(val64, &vp_reg->rxmac_vcfg1);
3675         }
3676         return status;
3677 }
3678
3679 /*
3680  * __vxge_hw_vpath_tim_configure
3681  * This routine configures the tim registers of virtual path using the config
3682  * passed
3683  */
3684 static enum vxge_hw_status
3685 __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3686 {
3687         u64 val64;
3688         enum vxge_hw_status status = VXGE_HW_OK;
3689         struct __vxge_hw_virtualpath *vpath;
3690         struct vxge_hw_vpath_reg __iomem *vp_reg;
3691         struct vxge_hw_vp_config *config;
3692
3693         vpath = &hldev->virtual_paths[vp_id];
3694         vp_reg = vpath->vp_reg;
3695         config = vpath->vp_config;
3696
3697         writeq((u64)0, &vp_reg->tim_dest_addr);
3698         writeq((u64)0, &vp_reg->tim_vpath_map);
3699         writeq((u64)0, &vp_reg->tim_bitmap);
3700         writeq((u64)0, &vp_reg->tim_remap);
3701
3702         if (config->ring.enable == VXGE_HW_RING_ENABLE)
3703                 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
3704                         (vp_id * VXGE_HW_MAX_INTR_PER_VP) +
3705                         VXGE_HW_VPATH_INTR_RX), &vp_reg->tim_ring_assn);
3706
3707         val64 = readq(&vp_reg->tim_pci_cfg);
3708         val64 |= VXGE_HW_TIM_PCI_CFG_ADD_PAD;
3709         writeq(val64, &vp_reg->tim_pci_cfg);
3710
3711         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3712
3713                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3714
3715                 if (config->tti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3716                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3717                                 0x3ffffff);
3718                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3719                                         config->tti.btimer_val);
3720                 }
3721
3722                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3723
3724                 if (config->tti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3725                         if (config->tti.timer_ac_en)
3726                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3727                         else
3728                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3729                 }
3730
3731                 if (config->tti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3732                         if (config->tti.timer_ci_en)
3733                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3734                         else
3735                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3736                 }
3737
3738                 if (config->tti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3739                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3740                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3741                                         config->tti.urange_a);
3742                 }
3743
3744                 if (config->tti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3745                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3746                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3747                                         config->tti.urange_b);
3748                 }
3749
3750                 if (config->tti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3751                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3752                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3753                                         config->tti.urange_c);
3754                 }
3755
3756                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3757                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3758
3759                 if (config->tti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3760                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3761                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3762                                                 config->tti.uec_a);
3763                 }
3764
3765                 if (config->tti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3766                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3767                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3768                                                 config->tti.uec_b);
3769                 }
3770
3771                 if (config->tti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3772                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3773                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3774                                                 config->tti.uec_c);
3775                 }
3776
3777                 if (config->tti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3778                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3779                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3780                                                 config->tti.uec_d);
3781                 }
3782
3783                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_TX]);
3784                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3785
3786                 if (config->tti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3787                         if (config->tti.timer_ri_en)
3788                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3789                         else
3790                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3791                 }
3792
3793                 if (config->tti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3794                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3795                                         0x3ffffff);
3796                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3797                                         config->tti.rtimer_val);
3798                 }
3799
3800                 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3801                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3802                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3803                                         config->tti.util_sel);
3804                 }
3805
3806                 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3807                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3808                                         0x3ffffff);
3809                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3810                                         config->tti.ltimer_val);
3811                 }
3812
3813                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_TX]);
3814         }
3815
3816         if (config->ring.enable == VXGE_HW_RING_ENABLE) {
3817
3818                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3819
3820                 if (config->rti.btimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3821                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3822                                         0x3ffffff);
3823                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_BTIMER_VAL(
3824                                         config->rti.btimer_val);
3825                 }
3826
3827                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_BITMP_EN;
3828
3829                 if (config->rti.timer_ac_en != VXGE_HW_USE_FLASH_DEFAULT) {
3830                         if (config->rti.timer_ac_en)
3831                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3832                         else
3833                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_AC;
3834                 }
3835
3836                 if (config->rti.timer_ci_en != VXGE_HW_USE_FLASH_DEFAULT) {
3837                         if (config->rti.timer_ci_en)
3838                                 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3839                         else
3840                                 val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3841                 }
3842
3843                 if (config->rti.urange_a != VXGE_HW_USE_FLASH_DEFAULT) {
3844                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(0x3f);
3845                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_A(
3846                                         config->rti.urange_a);
3847                 }
3848
3849                 if (config->rti.urange_b != VXGE_HW_USE_FLASH_DEFAULT) {
3850                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(0x3f);
3851                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_B(
3852                                         config->rti.urange_b);
3853                 }
3854
3855                 if (config->rti.urange_c != VXGE_HW_USE_FLASH_DEFAULT) {
3856                         val64 &= ~VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(0x3f);
3857                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_URNG_C(
3858                                         config->rti.urange_c);
3859                 }
3860
3861                 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_RX]);
3862                 val64 = readq(&vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3863
3864                 if (config->rti.uec_a != VXGE_HW_USE_FLASH_DEFAULT) {
3865                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(0xffff);
3866                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_A(
3867                                                 config->rti.uec_a);
3868                 }
3869
3870                 if (config->rti.uec_b != VXGE_HW_USE_FLASH_DEFAULT) {
3871                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(0xffff);
3872                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_B(
3873                                                 config->rti.uec_b);
3874                 }
3875
3876                 if (config->rti.uec_c != VXGE_HW_USE_FLASH_DEFAULT) {
3877                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(0xffff);
3878                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_C(
3879                                                 config->rti.uec_c);
3880                 }
3881
3882                 if (config->rti.uec_d != VXGE_HW_USE_FLASH_DEFAULT) {
3883                         val64 &= ~VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(0xffff);
3884                         val64 |= VXGE_HW_TIM_CFG2_INT_NUM_UEC_D(
3885                                                 config->rti.uec_d);
3886                 }
3887
3888                 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_RX]);
3889                 val64 = readq(&vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3890
3891                 if (config->rti.timer_ri_en != VXGE_HW_USE_FLASH_DEFAULT) {
3892                         if (config->rti.timer_ri_en)
3893                                 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3894                         else
3895                                 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_TIMER_RI;
3896                 }
3897
3898                 if (config->rti.rtimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3899                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3900                                         0x3ffffff);
3901                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_RTIMER_VAL(
3902                                         config->rti.rtimer_val);
3903                 }
3904
3905                 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3906                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3907                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(
3908                                         config->rti.util_sel);
3909                 }
3910
3911                 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
3912                         val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3913                                         0x3ffffff);
3914                         val64 |= VXGE_HW_TIM_CFG3_INT_NUM_LTIMER_VAL(
3915                                         config->rti.ltimer_val);
3916                 }
3917
3918                 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_RX]);
3919         }
3920
3921         val64 = 0;
3922         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3923         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3924         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_EINTA]);
3925         writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3926         writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3927         writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
3928
3929         return status;
3930 }
3931
3932 void
3933 vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3934 {
3935         struct __vxge_hw_virtualpath *vpath;
3936         struct vxge_hw_vpath_reg __iomem *vp_reg;
3937         struct vxge_hw_vp_config *config;
3938         u64 val64;
3939
3940         vpath = &hldev->virtual_paths[vp_id];
3941         vp_reg = vpath->vp_reg;
3942         config = vpath->vp_config;
3943
3944         if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3945                 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3946
3947                 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3948                         config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3949                         val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3950                         writeq(val64,
3951                         &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3952                 }
3953         }
3954 }
3955 /*
3956  * __vxge_hw_vpath_initialize
3957  * This routine is the final phase of init which initializes the
3958  * registers of the vpath using the configuration passed.
3959  */
3960 static enum vxge_hw_status
3961 __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
3962 {
3963         u64 val64;
3964         u32 val32;
3965         enum vxge_hw_status status = VXGE_HW_OK;
3966         struct __vxge_hw_virtualpath *vpath;
3967         struct vxge_hw_vpath_reg __iomem *vp_reg;
3968
3969         vpath = &hldev->virtual_paths[vp_id];
3970
3971         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
3972                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
3973                 goto exit;
3974         }
3975         vp_reg = vpath->vp_reg;
3976
3977         status =  __vxge_hw_vpath_swapper_set(vpath->vp_reg);
3978
3979         if (status != VXGE_HW_OK)
3980                 goto exit;
3981
3982         status =  __vxge_hw_vpath_mac_configure(hldev, vp_id);
3983
3984         if (status != VXGE_HW_OK)
3985                 goto exit;
3986
3987         status =  __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
3988
3989         if (status != VXGE_HW_OK)
3990                 goto exit;
3991
3992         status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
3993
3994         if (status != VXGE_HW_OK)
3995                 goto exit;
3996
3997         val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
3998
3999         /* Get MRRS value from device control */
4000         status  = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4001
4002         if (status == VXGE_HW_OK) {
4003                 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4004                 val64 &=
4005                     ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(7));
4006                 val64 |=
4007                     VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_FILL_THRESH(val32);
4008
4009                 val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_WAIT_FOR_SPACE;
4010         }
4011
4012         val64 &= ~(VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(7));
4013         val64 |=
4014             VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY(
4015                     VXGE_HW_MAX_PAYLOAD_SIZE_512);
4016
4017         val64 |= VXGE_HW_RTDMA_RD_OPTIMIZATION_CTRL_FB_ADDR_BDRY_EN;
4018         writeq(val64, &vp_reg->rtdma_rd_optimization_ctrl);
4019
4020 exit:
4021         return status;
4022 }
4023
4024 /*
4025  * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4026  * This routine is the initial phase of init which resets the vpath and
4027  * initializes the software support structures.
4028  */
4029 static enum vxge_hw_status
4030 __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4031                         struct vxge_hw_vp_config *config)
4032 {
4033         struct __vxge_hw_virtualpath *vpath;
4034         enum vxge_hw_status status = VXGE_HW_OK;
4035
4036         if (!(hldev->vpath_assignments & vxge_mBIT(vp_id))) {
4037                 status = VXGE_HW_ERR_VPATH_NOT_AVAILABLE;
4038                 goto exit;
4039         }
4040
4041         vpath = &hldev->virtual_paths[vp_id];
4042
4043         spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4044         vpath->vp_id = vp_id;
4045         vpath->vp_open = VXGE_HW_VP_OPEN;
4046         vpath->hldev = hldev;
4047         vpath->vp_config = config;
4048         vpath->vp_reg = hldev->vpath_reg[vp_id];
4049         vpath->vpmgmt_reg = hldev->vpmgmt_reg[vp_id];
4050
4051         __vxge_hw_vpath_reset(hldev, vp_id);
4052
4053         status = __vxge_hw_vpath_reset_check(vpath);
4054         if (status != VXGE_HW_OK) {
4055                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4056                 goto exit;
4057         }
4058
4059         status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4060         if (status != VXGE_HW_OK) {
4061                 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4062                 goto exit;
4063         }
4064
4065         INIT_LIST_HEAD(&vpath->vpath_handles);
4066
4067         vpath->sw_stats = &hldev->stats.sw_dev_info_stats.vpath_info[vp_id];
4068
4069         VXGE_HW_DEVICE_TIM_INT_MASK_SET(hldev->tim_int_mask0,
4070                 hldev->tim_int_mask1, vp_id);
4071
4072         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4073         if (status != VXGE_HW_OK)
4074                 __vxge_hw_vp_terminate(hldev, vp_id);
4075 exit:
4076         return status;
4077 }
4078
4079 /*
4080  * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4081  * This routine closes all channels it opened and freeup memory
4082  */
4083 static void
4084 __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4085 {
4086         struct __vxge_hw_virtualpath *vpath;
4087
4088         vpath = &hldev->virtual_paths[vp_id];
4089
4090         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4091                 goto exit;
4092
4093         VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4094                 vpath->hldev->tim_int_mask1, vpath->vp_id);
4095         hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4096
4097         memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4098 exit:
4099         return;
4100 }
4101
4102 /*
4103  * vxge_hw_vpath_mtu_set - Set MTU.
4104  * Set new MTU value. Example, to use jumbo frames:
4105  * vxge_hw_vpath_mtu_set(my_device, 9600);
4106  */
4107 enum vxge_hw_status
4108 vxge_hw_vpath_mtu_set(struct __vxge_hw_vpath_handle *vp, u32 new_mtu)
4109 {
4110         u64 val64;
4111         enum vxge_hw_status status = VXGE_HW_OK;
4112         struct __vxge_hw_virtualpath *vpath;
4113
4114         if (vp == NULL) {
4115                 status = VXGE_HW_ERR_INVALID_HANDLE;
4116                 goto exit;
4117         }
4118         vpath = vp->vpath;
4119
4120         new_mtu += VXGE_HW_MAC_HEADER_MAX_SIZE;
4121
4122         if ((new_mtu < VXGE_HW_MIN_MTU) || (new_mtu > vpath->max_mtu))
4123                 status = VXGE_HW_ERR_INVALID_MTU_SIZE;
4124
4125         val64 = readq(&vpath->vp_reg->rxmac_vcfg0);
4126
4127         val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
4128         val64 |= VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(new_mtu);
4129
4130         writeq(val64, &vpath->vp_reg->rxmac_vcfg0);
4131
4132         vpath->vp_config->mtu = new_mtu - VXGE_HW_MAC_HEADER_MAX_SIZE;
4133
4134 exit:
4135         return status;
4136 }
4137
4138 /*
4139  * vxge_hw_vpath_open - Open a virtual path on a given adapter
4140  * This function is used to open access to virtual path of an
4141  * adapter for offload, GRO operations. This function returns
4142  * synchronously.
4143  */
4144 enum vxge_hw_status
4145 vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4146                    struct vxge_hw_vpath_attr *attr,
4147                    struct __vxge_hw_vpath_handle **vpath_handle)
4148 {
4149         struct __vxge_hw_virtualpath *vpath;
4150         struct __vxge_hw_vpath_handle *vp;
4151         enum vxge_hw_status status;
4152
4153         vpath = &hldev->virtual_paths[attr->vp_id];
4154
4155         if (vpath->vp_open == VXGE_HW_VP_OPEN) {
4156                 status = VXGE_HW_ERR_INVALID_STATE;
4157                 goto vpath_open_exit1;
4158         }
4159
4160         status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4161                         &hldev->config.vp_config[attr->vp_id]);
4162
4163         if (status != VXGE_HW_OK)
4164                 goto vpath_open_exit1;
4165
4166         vp = (struct __vxge_hw_vpath_handle *)
4167                 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4168         if (vp == NULL) {
4169                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4170                 goto vpath_open_exit2;
4171         }
4172
4173         memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4174
4175         vp->vpath = vpath;
4176
4177         if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
4178                 status = __vxge_hw_fifo_create(vp, &attr->fifo_attr);
4179                 if (status != VXGE_HW_OK)
4180                         goto vpath_open_exit6;
4181         }
4182
4183         if (vpath->vp_config->ring.enable == VXGE_HW_RING_ENABLE) {
4184                 status = __vxge_hw_ring_create(vp, &attr->ring_attr);
4185                 if (status != VXGE_HW_OK)
4186                         goto vpath_open_exit7;
4187
4188                 __vxge_hw_vpath_prc_configure(hldev, attr->vp_id);
4189         }
4190
4191         vpath->fifoh->tx_intr_num =
4192                 (attr->vp_id * VXGE_HW_MAX_INTR_PER_VP)  +
4193                         VXGE_HW_VPATH_INTR_TX;
4194
4195         vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4196                                 VXGE_HW_BLOCK_SIZE);
4197
4198         if (vpath->stats_block == NULL) {
4199                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4200                 goto vpath_open_exit8;
4201         }
4202
4203         vpath->hw_stats = (struct vxge_hw_vpath_stats_hw_info *)vpath->
4204                         stats_block->memblock;
4205         memset(vpath->hw_stats, 0,
4206                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4207
4208         hldev->stats.hw_dev_info_stats.vpath_info[attr->vp_id] =
4209                                                 vpath->hw_stats;
4210
4211         vpath->hw_stats_sav =
4212                 &hldev->stats.hw_dev_info_stats.vpath_info_sav[attr->vp_id];
4213         memset(vpath->hw_stats_sav, 0,
4214                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4215
4216         writeq(vpath->stats_block->dma_addr, &vpath->vp_reg->stats_cfg);
4217
4218         status = vxge_hw_vpath_stats_enable(vp);
4219         if (status != VXGE_HW_OK)
4220                 goto vpath_open_exit8;
4221
4222         list_add(&vp->item, &vpath->vpath_handles);
4223
4224         hldev->vpaths_deployed |= vxge_mBIT(vpath->vp_id);
4225
4226         *vpath_handle = vp;
4227
4228         attr->fifo_attr.userdata = vpath->fifoh;
4229         attr->ring_attr.userdata = vpath->ringh;
4230
4231         return VXGE_HW_OK;
4232
4233 vpath_open_exit8:
4234         if (vpath->ringh != NULL)
4235                 __vxge_hw_ring_delete(vp);
4236 vpath_open_exit7:
4237         if (vpath->fifoh != NULL)
4238                 __vxge_hw_fifo_delete(vp);
4239 vpath_open_exit6:
4240         vfree(vp);
4241 vpath_open_exit2:
4242         __vxge_hw_vp_terminate(hldev, attr->vp_id);
4243 vpath_open_exit1:
4244
4245         return status;
4246 }
4247
4248 /**
4249  * vxge_hw_vpath_rx_doorbell_post - Close the handle got from previous vpath
4250  * (vpath) open
4251  * @vp: Handle got from previous vpath open
4252  *
4253  * This function is used to close access to virtual path opened
4254  * earlier.
4255  */
4256 void
4257 vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4258 {
4259         struct __vxge_hw_virtualpath *vpath = NULL;
4260         u64 new_count, val64, val164;
4261         struct __vxge_hw_ring *ring;
4262
4263         vpath = vp->vpath;
4264         ring = vpath->ringh;
4265
4266         new_count = readq(&vpath->vp_reg->rxdmem_size);
4267         new_count &= 0x1fff;
4268         val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4269
4270         writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4271                 &vpath->vp_reg->prc_rxd_doorbell);
4272         readl(&vpath->vp_reg->prc_rxd_doorbell);
4273
4274         val164 /= 2;
4275         val64 = readq(&vpath->vp_reg->prc_cfg6);
4276         val64 = VXGE_HW_PRC_CFG6_RXD_SPAT(val64);
4277         val64 &= 0x1ff;
4278
4279         /*
4280          * Each RxD is of 4 qwords
4281          */
4282         new_count -= (val64 + 1);
4283         val64 = min(val164, new_count) / 4;
4284
4285         ring->rxds_limit = min(ring->rxds_limit, val64);
4286         if (ring->rxds_limit < 4)
4287                 ring->rxds_limit = 4;
4288 }
4289
4290 /*
4291  * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4292  * This function is used to close access to virtual path opened
4293  * earlier.
4294  */
4295 enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4296 {
4297         struct __vxge_hw_virtualpath *vpath = NULL;
4298         struct __vxge_hw_device *devh = NULL;
4299         u32 vp_id = vp->vpath->vp_id;
4300         u32 is_empty = TRUE;
4301         enum vxge_hw_status status = VXGE_HW_OK;
4302
4303         vpath = vp->vpath;
4304         devh = vpath->hldev;
4305
4306         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4307                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4308                 goto vpath_close_exit;
4309         }
4310
4311         list_del(&vp->item);
4312
4313         if (!list_empty(&vpath->vpath_handles)) {
4314                 list_add(&vp->item, &vpath->vpath_handles);
4315                 is_empty = FALSE;
4316         }
4317
4318         if (!is_empty) {
4319                 status = VXGE_HW_FAIL;
4320                 goto vpath_close_exit;
4321         }
4322
4323         devh->vpaths_deployed &= ~vxge_mBIT(vp_id);
4324
4325         if (vpath->ringh != NULL)
4326                 __vxge_hw_ring_delete(vp);
4327
4328         if (vpath->fifoh != NULL)
4329                 __vxge_hw_fifo_delete(vp);
4330
4331         if (vpath->stats_block != NULL)
4332                 __vxge_hw_blockpool_block_free(devh, vpath->stats_block);
4333
4334         vfree(vp);
4335
4336         __vxge_hw_vp_terminate(devh, vp_id);
4337
4338         spin_lock(&vpath->lock);
4339         vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
4340         spin_unlock(&vpath->lock);
4341
4342 vpath_close_exit:
4343         return status;
4344 }
4345
4346 /*
4347  * vxge_hw_vpath_reset - Resets vpath
4348  * This function is used to request a reset of vpath
4349  */
4350 enum vxge_hw_status vxge_hw_vpath_reset(struct __vxge_hw_vpath_handle *vp)
4351 {
4352         enum vxge_hw_status status;
4353         u32 vp_id;
4354         struct __vxge_hw_virtualpath *vpath = vp->vpath;
4355
4356         vp_id = vpath->vp_id;
4357
4358         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4359                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4360                 goto exit;
4361         }
4362
4363         status = __vxge_hw_vpath_reset(vpath->hldev, vp_id);
4364         if (status == VXGE_HW_OK)
4365                 vpath->sw_stats->soft_reset_cnt++;
4366 exit:
4367         return status;
4368 }
4369
4370 /*
4371  * vxge_hw_vpath_recover_from_reset - Poll for reset complete and re-initialize.
4372  * This function poll's for the vpath reset completion and re initializes
4373  * the vpath.
4374  */
4375 enum vxge_hw_status
4376 vxge_hw_vpath_recover_from_reset(struct __vxge_hw_vpath_handle *vp)
4377 {
4378         struct __vxge_hw_virtualpath *vpath = NULL;
4379         enum vxge_hw_status status;
4380         struct __vxge_hw_device *hldev;
4381         u32 vp_id;
4382
4383         vp_id = vp->vpath->vp_id;
4384         vpath = vp->vpath;
4385         hldev = vpath->hldev;
4386
4387         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4388                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4389                 goto exit;
4390         }
4391
4392         status = __vxge_hw_vpath_reset_check(vpath);
4393         if (status != VXGE_HW_OK)
4394                 goto exit;
4395
4396         status = __vxge_hw_vpath_sw_reset(hldev, vp_id);
4397         if (status != VXGE_HW_OK)
4398                 goto exit;
4399
4400         status = __vxge_hw_vpath_initialize(hldev, vp_id);
4401         if (status != VXGE_HW_OK)
4402                 goto exit;
4403
4404         if (vpath->ringh != NULL)
4405                 __vxge_hw_vpath_prc_configure(hldev, vp_id);
4406
4407         memset(vpath->hw_stats, 0,
4408                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4409
4410         memset(vpath->hw_stats_sav, 0,
4411                 sizeof(struct vxge_hw_vpath_stats_hw_info));
4412
4413         writeq(vpath->stats_block->dma_addr,
4414                 &vpath->vp_reg->stats_cfg);
4415
4416         status = vxge_hw_vpath_stats_enable(vp);
4417
4418 exit:
4419         return status;
4420 }
4421
4422 /*
4423  * vxge_hw_vpath_enable - Enable vpath.
4424  * This routine clears the vpath reset thereby enabling a vpath
4425  * to start forwarding frames and generating interrupts.
4426  */
4427 void
4428 vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4429 {
4430         struct __vxge_hw_device *hldev;
4431         u64 val64;
4432
4433         hldev = vp->vpath->hldev;
4434
4435         val64 = VXGE_HW_CMN_RSTHDLR_CFG1_CLR_VPATH_RESET(
4436                 1 << (16 - vp->vpath->vp_id));
4437
4438         __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4439                 &hldev->common_reg->cmn_rsthdlr_cfg1);
4440 }
4441
4442 /*
4443  * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4444  * Enable the DMA vpath statistics. The function is to be called to re-enable
4445  * the adapter to update stats into the host memory
4446  */
4447 static enum vxge_hw_status
4448 vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4449 {
4450         enum vxge_hw_status status = VXGE_HW_OK;
4451         struct __vxge_hw_virtualpath *vpath;
4452
4453         vpath = vp->vpath;
4454
4455         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4456                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4457                 goto exit;
4458         }
4459
4460         memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4461                         sizeof(struct vxge_hw_vpath_stats_hw_info));
4462
4463         status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4464 exit:
4465         return status;
4466 }
4467
4468 /*
4469  * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4470  *                           and offset and perform an operation
4471  */
4472 static enum vxge_hw_status
4473 __vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4474                              u32 operation, u32 offset, u64 *stat)
4475 {
4476         u64 val64;
4477         enum vxge_hw_status status = VXGE_HW_OK;
4478         struct vxge_hw_vpath_reg __iomem *vp_reg;
4479
4480         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4481                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4482                 goto vpath_stats_access_exit;
4483         }
4484
4485         vp_reg = vpath->vp_reg;
4486
4487         val64 =  VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4488                  VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4489                  VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4490
4491         status = __vxge_hw_pio_mem_write64(val64,
4492                                 &vp_reg->xmac_stats_access_cmd,
4493                                 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4494                                 vpath->hldev->config.device_poll_millis);
4495
4496         if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4497                 *stat = readq(&vp_reg->xmac_stats_access_data);
4498         else
4499                 *stat = 0;
4500
4501 vpath_stats_access_exit:
4502         return status;
4503 }
4504
4505 /*
4506  * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4507  */
4508 static enum vxge_hw_status
4509 __vxge_hw_vpath_xmac_tx_stats_get(
4510         struct __vxge_hw_virtualpath *vpath,
4511         struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4512 {
4513         u64 *val64;
4514         int i;
4515         u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4516         enum vxge_hw_status status = VXGE_HW_OK;
4517
4518         val64 = (u64 *) vpath_tx_stats;
4519
4520         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4521                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4522                 goto exit;
4523         }
4524
4525         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4526                 status = __vxge_hw_vpath_stats_access(vpath,
4527                                         VXGE_HW_STATS_OP_READ,
4528                                         offset, val64);
4529                 if (status != VXGE_HW_OK)
4530                         goto exit;
4531                 offset++;
4532                 val64++;
4533         }
4534 exit:
4535         return status;
4536 }
4537
4538 /*
4539  * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4540  */
4541 static enum vxge_hw_status
4542 __vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4543                                   struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4544 {
4545         u64 *val64;
4546         enum vxge_hw_status status = VXGE_HW_OK;
4547         int i;
4548         u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4549         val64 = (u64 *) vpath_rx_stats;
4550
4551         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4552                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4553                 goto exit;
4554         }
4555         for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4556                 status = __vxge_hw_vpath_stats_access(vpath,
4557                                         VXGE_HW_STATS_OP_READ,
4558                                         offset >> 3, val64);
4559                 if (status != VXGE_HW_OK)
4560                         goto exit;
4561
4562                 offset += 8;
4563                 val64++;
4564         }
4565 exit:
4566         return status;
4567 }
4568
4569 /*
4570  * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4571  */
4572 static enum vxge_hw_status
4573 __vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4574                           struct vxge_hw_vpath_stats_hw_info *hw_stats)
4575 {
4576         u64 val64;
4577         enum vxge_hw_status status = VXGE_HW_OK;
4578         struct vxge_hw_vpath_reg __iomem *vp_reg;
4579
4580         if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4581                 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4582                 goto exit;
4583         }
4584         vp_reg = vpath->vp_reg;
4585
4586         val64 = readq(&vp_reg->vpath_debug_stats0);
4587         hw_stats->ini_num_mwr_sent =
4588                 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4589
4590         val64 = readq(&vp_reg->vpath_debug_stats1);
4591         hw_stats->ini_num_mrd_sent =
4592                 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4593
4594         val64 = readq(&vp_reg->vpath_debug_stats2);
4595         hw_stats->ini_num_cpl_rcvd =
4596                 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4597
4598         val64 = readq(&vp_reg->vpath_debug_stats3);
4599         hw_stats->ini_num_mwr_byte_sent =
4600                 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4601
4602         val64 = readq(&vp_reg->vpath_debug_stats4);
4603         hw_stats->ini_num_cpl_byte_rcvd =
4604                 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4605
4606         val64 = readq(&vp_reg->vpath_debug_stats5);
4607         hw_stats->wrcrdtarb_xoff =
4608                 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4609
4610         val64 = readq(&vp_reg->vpath_debug_stats6);
4611         hw_stats->rdcrdtarb_xoff =
4612                 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4613
4614         val64 = readq(&vp_reg->vpath_genstats_count01);
4615         hw_stats->vpath_genstats_count0 =
4616         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4617                 val64);
4618
4619         val64 = readq(&vp_reg->vpath_genstats_count01);
4620         hw_stats->vpath_genstats_count1 =
4621         (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4622                 val64);
4623
4624         val64 = readq(&vp_reg->vpath_genstats_count23);
4625         hw_stats->vpath_genstats_count2 =
4626         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4627                 val64);
4628
4629         val64 = readq(&vp_reg->vpath_genstats_count01);
4630         hw_stats->vpath_genstats_count3 =
4631         (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4632                 val64);
4633
4634         val64 = readq(&vp_reg->vpath_genstats_count4);
4635         hw_stats->vpath_genstats_count4 =
4636         (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4637                 val64);
4638
4639         val64 = readq(&vp_reg->vpath_genstats_count5);
4640         hw_stats->vpath_genstats_count5 =
4641         (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4642                 val64);
4643
4644         status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4645         if (status != VXGE_HW_OK)
4646                 goto exit;
4647
4648         status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4649         if (status != VXGE_HW_OK)
4650                 goto exit;
4651
4652         VXGE_HW_VPATH_STATS_PIO_READ(
4653                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4654
4655         hw_stats->prog_event_vnum0 =
4656                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4657
4658         hw_stats->prog_event_vnum1 =
4659                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4660
4661         VXGE_HW_VPATH_STATS_PIO_READ(
4662                 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4663
4664         hw_stats->prog_event_vnum2 =
4665                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4666
4667         hw_stats->prog_event_vnum3 =
4668                         (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4669
4670         val64 = readq(&vp_reg->rx_multi_cast_stats);
4671         hw_stats->rx_multi_cast_frame_discard =
4672                 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4673
4674         val64 = readq(&vp_reg->rx_frm_transferred);
4675         hw_stats->rx_frm_transferred =
4676                 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4677
4678         val64 = readq(&vp_reg->rxd_returned);
4679         hw_stats->rxd_returned =
4680                 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4681
4682         val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4683         hw_stats->rx_mpa_len_fail_frms =
4684                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4685         hw_stats->rx_mpa_mrk_fail_frms =
4686                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4687         hw_stats->rx_mpa_crc_fail_frms =
4688                 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4689
4690         val64 = readq(&vp_reg->dbg_stats_rx_fau);
4691         hw_stats->rx_permitted_frms =
4692                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4693         hw_stats->rx_vp_reset_discarded_frms =
4694         (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4695         hw_stats->rx_wol_frms =
4696                 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4697
4698         val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4699         hw_stats->tx_vp_reset_discarded_frms =
4700         (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4701                 val64);
4702 exit:
4703         return status;
4704 }
4705
4706
4707 static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4708                                         unsigned long size)
4709 {
4710         gfp_t flags;
4711         void *vaddr;
4712
4713         if (in_interrupt())
4714                 flags = GFP_ATOMIC | GFP_DMA;
4715         else
4716                 flags = GFP_KERNEL | GFP_DMA;
4717
4718         vaddr = kmalloc((size), flags);
4719
4720         vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4721 }
4722
4723 static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4724                              struct pci_dev **p_dma_acch)
4725 {
4726         unsigned long misaligned = *(unsigned long *)p_dma_acch;
4727         u8 *tmp = (u8 *)vaddr;
4728         tmp -= misaligned;
4729         kfree((void *)tmp);
4730 }
4731
4732 /*
4733  * __vxge_hw_blockpool_create - Create block pool
4734  */
4735
4736 enum vxge_hw_status
4737 __vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4738                            struct __vxge_hw_blockpool *blockpool,
4739                            u32 pool_size,
4740                            u32 pool_max)
4741 {
4742         u32 i;
4743         struct __vxge_hw_blockpool_entry *entry = NULL;
4744         void *memblock;
4745         dma_addr_t dma_addr;
4746         struct pci_dev *dma_handle;
4747         struct pci_dev *acc_handle;
4748         enum vxge_hw_status status = VXGE_HW_OK;
4749
4750         if (blockpool == NULL) {
4751                 status = VXGE_HW_FAIL;
4752                 goto blockpool_create_exit;
4753         }
4754
4755         blockpool->hldev = hldev;
4756         blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4757         blockpool->pool_size = 0;
4758         blockpool->pool_max = pool_max;
4759         blockpool->req_out = 0;
4760
4761         INIT_LIST_HEAD(&blockpool->free_block_list);
4762         INIT_LIST_HEAD(&blockpool->free_entry_list);
4763
4764         for (i = 0; i < pool_size + pool_max; i++) {
4765                 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4766                                 GFP_KERNEL);
4767                 if (entry == NULL) {
4768                         __vxge_hw_blockpool_destroy(blockpool);
4769                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4770                         goto blockpool_create_exit;
4771                 }
4772                 list_add(&entry->item, &blockpool->free_entry_list);
4773         }
4774
4775         for (i = 0; i < pool_size; i++) {
4776
4777                 memblock = vxge_os_dma_malloc(
4778                                 hldev->pdev,
4779                                 VXGE_HW_BLOCK_SIZE,
4780                                 &dma_handle,
4781                                 &acc_handle);
4782
4783                 if (memblock == NULL) {
4784                         __vxge_hw_blockpool_destroy(blockpool);
4785                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4786                         goto blockpool_create_exit;
4787                 }
4788
4789                 dma_addr = pci_map_single(hldev->pdev, memblock,
4790                                 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4791
4792                 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4793                                 dma_addr))) {
4794
4795                         vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4796                         __vxge_hw_blockpool_destroy(blockpool);
4797                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4798                         goto blockpool_create_exit;
4799                 }
4800
4801                 if (!list_empty(&blockpool->free_entry_list))
4802                         entry = (struct __vxge_hw_blockpool_entry *)
4803                                 list_first_entry(&blockpool->free_entry_list,
4804                                         struct __vxge_hw_blockpool_entry,
4805                                         item);
4806
4807                 if (entry == NULL)
4808                         entry =
4809                             kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4810                                         GFP_KERNEL);
4811                 if (entry != NULL) {
4812                         list_del(&entry->item);
4813                         entry->length = VXGE_HW_BLOCK_SIZE;
4814                         entry->memblock = memblock;
4815                         entry->dma_addr = dma_addr;
4816                         entry->acc_handle = acc_handle;
4817                         entry->dma_handle = dma_handle;
4818                         list_add(&entry->item,
4819                                           &blockpool->free_block_list);
4820                         blockpool->pool_size++;
4821                 } else {
4822                         __vxge_hw_blockpool_destroy(blockpool);
4823                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
4824                         goto blockpool_create_exit;
4825                 }
4826         }
4827
4828 blockpool_create_exit:
4829         return status;
4830 }
4831
4832 /*
4833  * __vxge_hw_blockpool_destroy - Deallocates the block pool
4834  */
4835
4836 void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4837 {
4838
4839         struct __vxge_hw_device *hldev;
4840         struct list_head *p, *n;
4841         u16 ret;
4842
4843         if (blockpool == NULL) {
4844                 ret = 1;
4845                 goto exit;
4846         }
4847
4848         hldev = blockpool->hldev;
4849
4850         list_for_each_safe(p, n, &blockpool->free_block_list) {
4851
4852                 pci_unmap_single(hldev->pdev,
4853                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4854                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4855                         PCI_DMA_BIDIRECTIONAL);
4856
4857                 vxge_os_dma_free(hldev->pdev,
4858                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4859                         &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4860
4861                 list_del(
4862                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4863                 kfree(p);
4864                 blockpool->pool_size--;
4865         }
4866
4867         list_for_each_safe(p, n, &blockpool->free_entry_list) {
4868                 list_del(
4869                         &((struct __vxge_hw_blockpool_entry *)p)->item);
4870                 kfree((void *)p);
4871         }
4872         ret = 0;
4873 exit:
4874         return;
4875 }
4876
4877 /*
4878  * __vxge_hw_blockpool_blocks_add - Request additional blocks
4879  */
4880 static
4881 void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4882 {
4883         u32 nreq = 0, i;
4884
4885         if ((blockpool->pool_size  +  blockpool->req_out) <
4886                 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4887                 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4888                 blockpool->req_out += nreq;
4889         }
4890
4891         for (i = 0; i < nreq; i++)
4892                 vxge_os_dma_malloc_async(
4893                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4894                         blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4895 }
4896
4897 /*
4898  * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4899  */
4900 static
4901 void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4902 {
4903         struct list_head *p, *n;
4904
4905         list_for_each_safe(p, n, &blockpool->free_block_list) {
4906
4907                 if (blockpool->pool_size < blockpool->pool_max)
4908                         break;
4909
4910                 pci_unmap_single(
4911                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4912                         ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4913                         ((struct __vxge_hw_blockpool_entry *)p)->length,
4914                         PCI_DMA_BIDIRECTIONAL);
4915
4916                 vxge_os_dma_free(
4917                         ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4918                         ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4919                         &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4920
4921                 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4922
4923                 list_add(p, &blockpool->free_entry_list);
4924
4925                 blockpool->pool_size--;
4926
4927         }
4928 }
4929
4930 /*
4931  * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
4932  * Adds a block to block pool
4933  */
4934 static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
4935                                         void *block_addr,
4936                                         u32 length,
4937                                         struct pci_dev *dma_h,
4938                                         struct pci_dev *acc_handle)
4939 {
4940         struct __vxge_hw_blockpool  *blockpool;
4941         struct __vxge_hw_blockpool_entry  *entry = NULL;
4942         dma_addr_t dma_addr;
4943         enum vxge_hw_status status = VXGE_HW_OK;
4944         u32 req_out;
4945
4946         blockpool = &devh->block_pool;
4947
4948         if (block_addr == NULL) {
4949                 blockpool->req_out--;
4950                 status = VXGE_HW_FAIL;
4951                 goto exit;
4952         }
4953
4954         dma_addr = pci_map_single(devh->pdev, block_addr, length,
4955                                 PCI_DMA_BIDIRECTIONAL);
4956
4957         if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
4958
4959                 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
4960                 blockpool->req_out--;
4961                 status = VXGE_HW_FAIL;
4962                 goto exit;
4963         }
4964
4965
4966         if (!list_empty(&blockpool->free_entry_list))
4967                 entry = (struct __vxge_hw_blockpool_entry *)
4968                         list_first_entry(&blockpool->free_entry_list,
4969                                 struct __vxge_hw_blockpool_entry,
4970                                 item);
4971
4972         if (entry == NULL)
4973                 entry = (struct __vxge_hw_blockpool_entry *)
4974                         vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
4975         else
4976                 list_del(&entry->item);
4977
4978         if (entry != NULL) {
4979                 entry->length = length;
4980                 entry->memblock = block_addr;
4981                 entry->dma_addr = dma_addr;
4982                 entry->acc_handle = acc_handle;
4983                 entry->dma_handle = dma_h;
4984                 list_add(&entry->item, &blockpool->free_block_list);
4985                 blockpool->pool_size++;
4986                 status = VXGE_HW_OK;
4987         } else
4988                 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4989
4990         blockpool->req_out--;
4991
4992         req_out = blockpool->req_out;
4993 exit:
4994         return;
4995 }
4996
4997 /*
4998  * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
4999  * Allocates a block of memory of given size, either from block pool
5000  * or by calling vxge_os_dma_malloc()
5001  */
5002 void *
5003 __vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5004                                 struct vxge_hw_mempool_dma *dma_object)
5005 {
5006         struct __vxge_hw_blockpool_entry *entry = NULL;
5007         struct __vxge_hw_blockpool  *blockpool;
5008         void *memblock = NULL;
5009         enum vxge_hw_status status = VXGE_HW_OK;
5010
5011         blockpool = &devh->block_pool;
5012
5013         if (size != blockpool->block_size) {
5014
5015                 memblock = vxge_os_dma_malloc(devh->pdev, size,
5016                                                 &dma_object->handle,
5017                                                 &dma_object->acc_handle);
5018
5019                 if (memblock == NULL) {
5020                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5021                         goto exit;
5022                 }
5023
5024                 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5025                                         PCI_DMA_BIDIRECTIONAL);
5026
5027                 if (unlikely(pci_dma_mapping_error(devh->pdev,
5028                                 dma_object->addr))) {
5029                         vxge_os_dma_free(devh->pdev, memblock,
5030                                 &dma_object->acc_handle);
5031                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5032                         goto exit;
5033                 }
5034
5035         } else {
5036
5037                 if (!list_empty(&blockpool->free_block_list))
5038                         entry = (struct __vxge_hw_blockpool_entry *)
5039                                 list_first_entry(&blockpool->free_block_list,
5040                                         struct __vxge_hw_blockpool_entry,
5041                                         item);
5042
5043                 if (entry != NULL) {
5044                         list_del(&entry->item);
5045                         dma_object->addr = entry->dma_addr;
5046                         dma_object->handle = entry->dma_handle;
5047                         dma_object->acc_handle = entry->acc_handle;
5048                         memblock = entry->memblock;
5049
5050                         list_add(&entry->item,
5051                                 &blockpool->free_entry_list);
5052                         blockpool->pool_size--;
5053                 }
5054
5055                 if (memblock != NULL)
5056                         __vxge_hw_blockpool_blocks_add(blockpool);
5057         }
5058 exit:
5059         return memblock;
5060 }
5061
5062 /*
5063  * __vxge_hw_blockpool_free - Frees the memory allcoated with
5064                                 __vxge_hw_blockpool_malloc
5065  */
5066 void
5067 __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5068                         void *memblock, u32 size,
5069                         struct vxge_hw_mempool_dma *dma_object)
5070 {
5071         struct __vxge_hw_blockpool_entry *entry = NULL;
5072         struct __vxge_hw_blockpool  *blockpool;
5073         enum vxge_hw_status status = VXGE_HW_OK;
5074
5075         blockpool = &devh->block_pool;
5076
5077         if (size != blockpool->block_size) {
5078                 pci_unmap_single(devh->pdev, dma_object->addr, size,
5079                         PCI_DMA_BIDIRECTIONAL);
5080                 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5081         } else {
5082
5083                 if (!list_empty(&blockpool->free_entry_list))
5084                         entry = (struct __vxge_hw_blockpool_entry *)
5085                                 list_first_entry(&blockpool->free_entry_list,
5086                                         struct __vxge_hw_blockpool_entry,
5087                                         item);
5088
5089                 if (entry == NULL)
5090                         entry = (struct __vxge_hw_blockpool_entry *)
5091                                 vmalloc(sizeof(
5092                                         struct __vxge_hw_blockpool_entry));
5093                 else
5094                         list_del(&entry->item);
5095
5096                 if (entry != NULL) {
5097                         entry->length = size;
5098                         entry->memblock = memblock;
5099                         entry->dma_addr = dma_object->addr;
5100                         entry->acc_handle = dma_object->acc_handle;
5101                         entry->dma_handle = dma_object->handle;
5102                         list_add(&entry->item,
5103                                         &blockpool->free_block_list);
5104                         blockpool->pool_size++;
5105                         status = VXGE_HW_OK;
5106                 } else
5107                         status = VXGE_HW_ERR_OUT_OF_MEMORY;
5108
5109                 if (status == VXGE_HW_OK)
5110                         __vxge_hw_blockpool_blocks_remove(blockpool);
5111         }
5112 }
5113
5114 /*
5115  * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5116  * This function allocates a block from block pool or from the system
5117  */
5118 struct __vxge_hw_blockpool_entry *
5119 __vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5120 {
5121         struct __vxge_hw_blockpool_entry *entry = NULL;
5122         struct __vxge_hw_blockpool  *blockpool;
5123
5124         blockpool = &devh->block_pool;
5125
5126         if (size == blockpool->block_size) {
5127
5128                 if (!list_empty(&blockpool->free_block_list))
5129                         entry = (struct __vxge_hw_blockpool_entry *)
5130                                 list_first_entry(&blockpool->free_block_list,
5131                                         struct __vxge_hw_blockpool_entry,
5132                                         item);
5133
5134                 if (entry != NULL) {
5135                         list_del(&entry->item);
5136                         blockpool->pool_size--;
5137                 }
5138         }
5139
5140         if (entry != NULL)
5141                 __vxge_hw_blockpool_blocks_add(blockpool);
5142
5143         return entry;
5144 }
5145
5146 /*
5147  * __vxge_hw_blockpool_block_free - Frees a block from block pool
5148  * @devh: Hal device
5149  * @entry: Entry of block to be freed
5150  *
5151  * This function frees a block from block pool
5152  */
5153 void
5154 __vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5155                         struct __vxge_hw_blockpool_entry *entry)
5156 {
5157         struct __vxge_hw_blockpool  *blockpool;
5158
5159         blockpool = &devh->block_pool;
5160
5161         if (entry->length == blockpool->block_size) {
5162                 list_add(&entry->item, &blockpool->free_block_list);
5163                 blockpool->pool_size++;
5164         }
5165
5166         __vxge_hw_blockpool_blocks_remove(blockpool);
5167 }