28f36b88de380f78a9fb2fbbdb7d10376a2c02ca
[linux-2.6.git] / drivers / net / mlx4 / main.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/errno.h>
39 #include <linux/pci.h>
40 #include <linux/dma-mapping.h>
41
42 #include <linux/mlx4/device.h>
43 #include <linux/mlx4/doorbell.h>
44
45 #include "mlx4.h"
46 #include "fw.h"
47 #include "icm.h"
48
49 MODULE_AUTHOR("Roland Dreier");
50 MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
51 MODULE_LICENSE("Dual BSD/GPL");
52 MODULE_VERSION(DRV_VERSION);
53
54 #ifdef CONFIG_MLX4_DEBUG
55
56 int mlx4_debug_level = 0;
57 module_param_named(debug_level, mlx4_debug_level, int, 0644);
58 MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
59
60 #endif /* CONFIG_MLX4_DEBUG */
61
62 #ifdef CONFIG_PCI_MSI
63
64 static int msi_x = 1;
65 module_param(msi_x, int, 0444);
66 MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
67
68 #else /* CONFIG_PCI_MSI */
69
70 #define msi_x (0)
71
72 #endif /* CONFIG_PCI_MSI */
73
74 static char mlx4_version[] __devinitdata =
75         DRV_NAME ": Mellanox ConnectX core driver v"
76         DRV_VERSION " (" DRV_RELDATE ")\n";
77
78 static struct mlx4_profile default_profile = {
79         .num_qp         = 1 << 17,
80         .num_srq        = 1 << 16,
81         .rdmarc_per_qp  = 1 << 4,
82         .num_cq         = 1 << 16,
83         .num_mcg        = 1 << 13,
84         .num_mpt        = 1 << 17,
85         .num_mtt        = 1 << 20,
86 };
87
88 static int log_num_mac = 2;
89 module_param_named(log_num_mac, log_num_mac, int, 0444);
90 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
91
92 static int log_num_vlan;
93 module_param_named(log_num_vlan, log_num_vlan, int, 0444);
94 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
95
96 static int use_prio;
97 module_param_named(use_prio, use_prio, bool, 0444);
98 MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
99                   "(0/1, default 0)");
100
101 static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
102 {
103         int err;
104         int i;
105
106         err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
107         if (err) {
108                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
109                 return err;
110         }
111
112         if (dev_cap->min_page_sz > PAGE_SIZE) {
113                 mlx4_err(dev, "HCA minimum page size of %d bigger than "
114                          "kernel PAGE_SIZE of %ld, aborting.\n",
115                          dev_cap->min_page_sz, PAGE_SIZE);
116                 return -ENODEV;
117         }
118         if (dev_cap->num_ports > MLX4_MAX_PORTS) {
119                 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
120                          "aborting.\n",
121                          dev_cap->num_ports, MLX4_MAX_PORTS);
122                 return -ENODEV;
123         }
124
125         if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
126                 mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
127                          "PCI resource 2 size of 0x%llx, aborting.\n",
128                          dev_cap->uar_size,
129                          (unsigned long long) pci_resource_len(dev->pdev, 2));
130                 return -ENODEV;
131         }
132
133         dev->caps.num_ports          = dev_cap->num_ports;
134         for (i = 1; i <= dev->caps.num_ports; ++i) {
135                 dev->caps.vl_cap[i]         = dev_cap->max_vl[i];
136                 dev->caps.ib_mtu_cap[i]     = dev_cap->ib_mtu[i];
137                 dev->caps.gid_table_len[i]  = dev_cap->max_gids[i];
138                 dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
139                 dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
140                 dev->caps.eth_mtu_cap[i]    = dev_cap->eth_mtu[i];
141                 dev->caps.def_mac[i]        = dev_cap->def_mac[i];
142         }
143
144         dev->caps.num_uars           = dev_cap->uar_size / PAGE_SIZE;
145         dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
146         dev->caps.bf_reg_size        = dev_cap->bf_reg_size;
147         dev->caps.bf_regs_per_page   = dev_cap->bf_regs_per_page;
148         dev->caps.max_sq_sg          = dev_cap->max_sq_sg;
149         dev->caps.max_rq_sg          = dev_cap->max_rq_sg;
150         dev->caps.max_wqes           = dev_cap->max_qp_sz;
151         dev->caps.max_qp_init_rdma   = dev_cap->max_requester_per_qp;
152         dev->caps.max_srq_wqes       = dev_cap->max_srq_sz;
153         dev->caps.max_srq_sge        = dev_cap->max_rq_sg - 1;
154         dev->caps.reserved_srqs      = dev_cap->reserved_srqs;
155         dev->caps.max_sq_desc_sz     = dev_cap->max_sq_desc_sz;
156         dev->caps.max_rq_desc_sz     = dev_cap->max_rq_desc_sz;
157         dev->caps.num_qp_per_mgm     = MLX4_QP_PER_MGM;
158         /*
159          * Subtract 1 from the limit because we need to allocate a
160          * spare CQE so the HCA HW can tell the difference between an
161          * empty CQ and a full CQ.
162          */
163         dev->caps.max_cqes           = dev_cap->max_cq_sz - 1;
164         dev->caps.reserved_cqs       = dev_cap->reserved_cqs;
165         dev->caps.reserved_eqs       = dev_cap->reserved_eqs;
166         dev->caps.reserved_mtts      = DIV_ROUND_UP(dev_cap->reserved_mtts,
167                                                     MLX4_MTT_ENTRY_PER_SEG);
168         dev->caps.reserved_mrws      = dev_cap->reserved_mrws;
169         dev->caps.reserved_uars      = dev_cap->reserved_uars;
170         dev->caps.reserved_pds       = dev_cap->reserved_pds;
171         dev->caps.mtt_entry_sz       = MLX4_MTT_ENTRY_PER_SEG * dev_cap->mtt_entry_sz;
172         dev->caps.max_msg_sz         = dev_cap->max_msg_sz;
173         dev->caps.page_size_cap      = ~(u32) (dev_cap->min_page_sz - 1);
174         dev->caps.flags              = dev_cap->flags;
175         dev->caps.bmme_flags         = dev_cap->bmme_flags;
176         dev->caps.reserved_lkey      = dev_cap->reserved_lkey;
177         dev->caps.stat_rate_support  = dev_cap->stat_rate_support;
178         dev->caps.max_gso_sz         = dev_cap->max_gso_sz;
179
180         dev->caps.log_num_macs  = log_num_mac;
181         dev->caps.log_num_vlans = log_num_vlan;
182         dev->caps.log_num_prios = use_prio ? 3 : 0;
183
184         for (i = 1; i <= dev->caps.num_ports; ++i) {
185                 if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
186                         dev->caps.log_num_macs = dev_cap->log_max_macs[i];
187                         mlx4_warn(dev, "Requested number of MACs is too much "
188                                   "for port %d, reducing to %d.\n",
189                                   i, 1 << dev->caps.log_num_macs);
190                 }
191                 if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
192                         dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
193                         mlx4_warn(dev, "Requested number of VLANs is too much "
194                                   "for port %d, reducing to %d.\n",
195                                   i, 1 << dev->caps.log_num_vlans);
196                 }
197         }
198
199         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
200         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
201                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
202                 (1 << dev->caps.log_num_macs) *
203                 (1 << dev->caps.log_num_vlans) *
204                 (1 << dev->caps.log_num_prios) *
205                 dev->caps.num_ports;
206         dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
207
208         dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
209                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
210                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
211                 dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
212
213         return 0;
214 }
215
216 static int mlx4_load_fw(struct mlx4_dev *dev)
217 {
218         struct mlx4_priv *priv = mlx4_priv(dev);
219         int err;
220
221         priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
222                                          GFP_HIGHUSER | __GFP_NOWARN, 0);
223         if (!priv->fw.fw_icm) {
224                 mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
225                 return -ENOMEM;
226         }
227
228         err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
229         if (err) {
230                 mlx4_err(dev, "MAP_FA command failed, aborting.\n");
231                 goto err_free;
232         }
233
234         err = mlx4_RUN_FW(dev);
235         if (err) {
236                 mlx4_err(dev, "RUN_FW command failed, aborting.\n");
237                 goto err_unmap_fa;
238         }
239
240         return 0;
241
242 err_unmap_fa:
243         mlx4_UNMAP_FA(dev);
244
245 err_free:
246         mlx4_free_icm(dev, priv->fw.fw_icm, 0);
247         return err;
248 }
249
250 static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
251                                 int cmpt_entry_sz)
252 {
253         struct mlx4_priv *priv = mlx4_priv(dev);
254         int err;
255
256         err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
257                                   cmpt_base +
258                                   ((u64) (MLX4_CMPT_TYPE_QP *
259                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
260                                   cmpt_entry_sz, dev->caps.num_qps,
261                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
262                                   0, 0);
263         if (err)
264                 goto err;
265
266         err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
267                                   cmpt_base +
268                                   ((u64) (MLX4_CMPT_TYPE_SRQ *
269                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
270                                   cmpt_entry_sz, dev->caps.num_srqs,
271                                   dev->caps.reserved_srqs, 0, 0);
272         if (err)
273                 goto err_qp;
274
275         err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
276                                   cmpt_base +
277                                   ((u64) (MLX4_CMPT_TYPE_CQ *
278                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
279                                   cmpt_entry_sz, dev->caps.num_cqs,
280                                   dev->caps.reserved_cqs, 0, 0);
281         if (err)
282                 goto err_srq;
283
284         err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
285                                   cmpt_base +
286                                   ((u64) (MLX4_CMPT_TYPE_EQ *
287                                           cmpt_entry_sz) << MLX4_CMPT_SHIFT),
288                                   cmpt_entry_sz,
289                                   roundup_pow_of_two(MLX4_NUM_EQ +
290                                                      dev->caps.reserved_eqs),
291                                   MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0);
292         if (err)
293                 goto err_cq;
294
295         return 0;
296
297 err_cq:
298         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
299
300 err_srq:
301         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
302
303 err_qp:
304         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
305
306 err:
307         return err;
308 }
309
310 static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
311                          struct mlx4_init_hca_param *init_hca, u64 icm_size)
312 {
313         struct mlx4_priv *priv = mlx4_priv(dev);
314         u64 aux_pages;
315         int err;
316
317         err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
318         if (err) {
319                 mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
320                 return err;
321         }
322
323         mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
324                  (unsigned long long) icm_size >> 10,
325                  (unsigned long long) aux_pages << 2);
326
327         priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
328                                           GFP_HIGHUSER | __GFP_NOWARN, 0);
329         if (!priv->fw.aux_icm) {
330                 mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
331                 return -ENOMEM;
332         }
333
334         err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
335         if (err) {
336                 mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
337                 goto err_free_aux;
338         }
339
340         err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
341         if (err) {
342                 mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
343                 goto err_unmap_aux;
344         }
345
346         err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
347         if (err) {
348                 mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
349                 goto err_unmap_cmpt;
350         }
351
352         /*
353          * Reserved MTT entries must be aligned up to a cacheline
354          * boundary, since the FW will write to them, while the driver
355          * writes to all other MTT entries. (The variable
356          * dev->caps.mtt_entry_sz below is really the MTT segment
357          * size, not the raw entry size)
358          */
359         dev->caps.reserved_mtts =
360                 ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
361                       dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
362
363         err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
364                                   init_hca->mtt_base,
365                                   dev->caps.mtt_entry_sz,
366                                   dev->caps.num_mtt_segs,
367                                   dev->caps.reserved_mtts, 1, 0);
368         if (err) {
369                 mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
370                 goto err_unmap_eq;
371         }
372
373         err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
374                                   init_hca->dmpt_base,
375                                   dev_cap->dmpt_entry_sz,
376                                   dev->caps.num_mpts,
377                                   dev->caps.reserved_mrws, 1, 1);
378         if (err) {
379                 mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
380                 goto err_unmap_mtt;
381         }
382
383         err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
384                                   init_hca->qpc_base,
385                                   dev_cap->qpc_entry_sz,
386                                   dev->caps.num_qps,
387                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
388                                   0, 0);
389         if (err) {
390                 mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
391                 goto err_unmap_dmpt;
392         }
393
394         err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
395                                   init_hca->auxc_base,
396                                   dev_cap->aux_entry_sz,
397                                   dev->caps.num_qps,
398                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
399                                   0, 0);
400         if (err) {
401                 mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
402                 goto err_unmap_qp;
403         }
404
405         err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
406                                   init_hca->altc_base,
407                                   dev_cap->altc_entry_sz,
408                                   dev->caps.num_qps,
409                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
410                                   0, 0);
411         if (err) {
412                 mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
413                 goto err_unmap_auxc;
414         }
415
416         err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
417                                   init_hca->rdmarc_base,
418                                   dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
419                                   dev->caps.num_qps,
420                                   dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
421                                   0, 0);
422         if (err) {
423                 mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
424                 goto err_unmap_altc;
425         }
426
427         err = mlx4_init_icm_table(dev, &priv->cq_table.table,
428                                   init_hca->cqc_base,
429                                   dev_cap->cqc_entry_sz,
430                                   dev->caps.num_cqs,
431                                   dev->caps.reserved_cqs, 0, 0);
432         if (err) {
433                 mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
434                 goto err_unmap_rdmarc;
435         }
436
437         err = mlx4_init_icm_table(dev, &priv->srq_table.table,
438                                   init_hca->srqc_base,
439                                   dev_cap->srq_entry_sz,
440                                   dev->caps.num_srqs,
441                                   dev->caps.reserved_srqs, 0, 0);
442         if (err) {
443                 mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
444                 goto err_unmap_cq;
445         }
446
447         /*
448          * It's not strictly required, but for simplicity just map the
449          * whole multicast group table now.  The table isn't very big
450          * and it's a lot easier than trying to track ref counts.
451          */
452         err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
453                                   init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
454                                   dev->caps.num_mgms + dev->caps.num_amgms,
455                                   dev->caps.num_mgms + dev->caps.num_amgms,
456                                   0, 0);
457         if (err) {
458                 mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
459                 goto err_unmap_srq;
460         }
461
462         return 0;
463
464 err_unmap_srq:
465         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
466
467 err_unmap_cq:
468         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
469
470 err_unmap_rdmarc:
471         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
472
473 err_unmap_altc:
474         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
475
476 err_unmap_auxc:
477         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
478
479 err_unmap_qp:
480         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
481
482 err_unmap_dmpt:
483         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
484
485 err_unmap_mtt:
486         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
487
488 err_unmap_eq:
489         mlx4_unmap_eq_icm(dev);
490
491 err_unmap_cmpt:
492         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
493         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
494         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
495         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
496
497 err_unmap_aux:
498         mlx4_UNMAP_ICM_AUX(dev);
499
500 err_free_aux:
501         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
502
503         return err;
504 }
505
506 static void mlx4_free_icms(struct mlx4_dev *dev)
507 {
508         struct mlx4_priv *priv = mlx4_priv(dev);
509
510         mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
511         mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
512         mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
513         mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
514         mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
515         mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
516         mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
517         mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
518         mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
519         mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
520         mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
521         mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
522         mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
523         mlx4_unmap_eq_icm(dev);
524
525         mlx4_UNMAP_ICM_AUX(dev);
526         mlx4_free_icm(dev, priv->fw.aux_icm, 0);
527 }
528
529 static void mlx4_close_hca(struct mlx4_dev *dev)
530 {
531         mlx4_CLOSE_HCA(dev, 0);
532         mlx4_free_icms(dev);
533         mlx4_UNMAP_FA(dev);
534         mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
535 }
536
537 static int mlx4_init_hca(struct mlx4_dev *dev)
538 {
539         struct mlx4_priv          *priv = mlx4_priv(dev);
540         struct mlx4_adapter        adapter;
541         struct mlx4_dev_cap        dev_cap;
542         struct mlx4_mod_stat_cfg   mlx4_cfg;
543         struct mlx4_profile        profile;
544         struct mlx4_init_hca_param init_hca;
545         u64 icm_size;
546         int err;
547
548         err = mlx4_QUERY_FW(dev);
549         if (err) {
550                 mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
551                 return err;
552         }
553
554         err = mlx4_load_fw(dev);
555         if (err) {
556                 mlx4_err(dev, "Failed to start FW, aborting.\n");
557                 return err;
558         }
559
560         mlx4_cfg.log_pg_sz_m = 1;
561         mlx4_cfg.log_pg_sz = 0;
562         err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
563         if (err)
564                 mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
565
566         err = mlx4_dev_cap(dev, &dev_cap);
567         if (err) {
568                 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
569                 goto err_stop_fw;
570         }
571
572         profile = default_profile;
573
574         icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
575         if ((long long) icm_size < 0) {
576                 err = icm_size;
577                 goto err_stop_fw;
578         }
579
580         init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
581
582         err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
583         if (err)
584                 goto err_stop_fw;
585
586         err = mlx4_INIT_HCA(dev, &init_hca);
587         if (err) {
588                 mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
589                 goto err_free_icm;
590         }
591
592         err = mlx4_QUERY_ADAPTER(dev, &adapter);
593         if (err) {
594                 mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
595                 goto err_close;
596         }
597
598         priv->eq_table.inta_pin = adapter.inta_pin;
599         memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
600
601         return 0;
602
603 err_close:
604         mlx4_close_hca(dev);
605
606 err_free_icm:
607         mlx4_free_icms(dev);
608
609 err_stop_fw:
610         mlx4_UNMAP_FA(dev);
611         mlx4_free_icm(dev, priv->fw.fw_icm, 0);
612
613         return err;
614 }
615
616 static int mlx4_setup_hca(struct mlx4_dev *dev)
617 {
618         struct mlx4_priv *priv = mlx4_priv(dev);
619         int err;
620
621         err = mlx4_init_uar_table(dev);
622         if (err) {
623                 mlx4_err(dev, "Failed to initialize "
624                          "user access region table, aborting.\n");
625                 return err;
626         }
627
628         err = mlx4_uar_alloc(dev, &priv->driver_uar);
629         if (err) {
630                 mlx4_err(dev, "Failed to allocate driver access region, "
631                          "aborting.\n");
632                 goto err_uar_table_free;
633         }
634
635         priv->kar = ioremap(priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
636         if (!priv->kar) {
637                 mlx4_err(dev, "Couldn't map kernel access region, "
638                          "aborting.\n");
639                 err = -ENOMEM;
640                 goto err_uar_free;
641         }
642
643         err = mlx4_init_pd_table(dev);
644         if (err) {
645                 mlx4_err(dev, "Failed to initialize "
646                          "protection domain table, aborting.\n");
647                 goto err_kar_unmap;
648         }
649
650         err = mlx4_init_mr_table(dev);
651         if (err) {
652                 mlx4_err(dev, "Failed to initialize "
653                          "memory region table, aborting.\n");
654                 goto err_pd_table_free;
655         }
656
657         err = mlx4_init_eq_table(dev);
658         if (err) {
659                 mlx4_err(dev, "Failed to initialize "
660                          "event queue table, aborting.\n");
661                 goto err_mr_table_free;
662         }
663
664         err = mlx4_cmd_use_events(dev);
665         if (err) {
666                 mlx4_err(dev, "Failed to switch to event-driven "
667                          "firmware commands, aborting.\n");
668                 goto err_eq_table_free;
669         }
670
671         err = mlx4_NOP(dev);
672         if (err) {
673                 if (dev->flags & MLX4_FLAG_MSI_X) {
674                         mlx4_warn(dev, "NOP command failed to generate MSI-X "
675                                   "interrupt IRQ %d).\n",
676                                   priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
677                         mlx4_warn(dev, "Trying again without MSI-X.\n");
678                 } else {
679                         mlx4_err(dev, "NOP command failed to generate interrupt "
680                                  "(IRQ %d), aborting.\n",
681                                  priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
682                         mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
683                 }
684
685                 goto err_cmd_poll;
686         }
687
688         mlx4_dbg(dev, "NOP command IRQ test passed\n");
689
690         err = mlx4_init_cq_table(dev);
691         if (err) {
692                 mlx4_err(dev, "Failed to initialize "
693                          "completion queue table, aborting.\n");
694                 goto err_cmd_poll;
695         }
696
697         err = mlx4_init_srq_table(dev);
698         if (err) {
699                 mlx4_err(dev, "Failed to initialize "
700                          "shared receive queue table, aborting.\n");
701                 goto err_cq_table_free;
702         }
703
704         err = mlx4_init_qp_table(dev);
705         if (err) {
706                 mlx4_err(dev, "Failed to initialize "
707                          "queue pair table, aborting.\n");
708                 goto err_srq_table_free;
709         }
710
711         err = mlx4_init_mcg_table(dev);
712         if (err) {
713                 mlx4_err(dev, "Failed to initialize "
714                          "multicast group table, aborting.\n");
715                 goto err_qp_table_free;
716         }
717
718         return 0;
719
720 err_qp_table_free:
721         mlx4_cleanup_qp_table(dev);
722
723 err_srq_table_free:
724         mlx4_cleanup_srq_table(dev);
725
726 err_cq_table_free:
727         mlx4_cleanup_cq_table(dev);
728
729 err_cmd_poll:
730         mlx4_cmd_use_polling(dev);
731
732 err_eq_table_free:
733         mlx4_cleanup_eq_table(dev);
734
735 err_mr_table_free:
736         mlx4_cleanup_mr_table(dev);
737
738 err_pd_table_free:
739         mlx4_cleanup_pd_table(dev);
740
741 err_kar_unmap:
742         iounmap(priv->kar);
743
744 err_uar_free:
745         mlx4_uar_free(dev, &priv->driver_uar);
746
747 err_uar_table_free:
748         mlx4_cleanup_uar_table(dev);
749         return err;
750 }
751
752 static void mlx4_enable_msi_x(struct mlx4_dev *dev)
753 {
754         struct mlx4_priv *priv = mlx4_priv(dev);
755         struct msix_entry entries[MLX4_NUM_EQ];
756         int err;
757         int i;
758
759         if (msi_x) {
760                 for (i = 0; i < MLX4_NUM_EQ; ++i)
761                         entries[i].entry = i;
762
763                 err = pci_enable_msix(dev->pdev, entries, ARRAY_SIZE(entries));
764                 if (err) {
765                         if (err > 0)
766                                 mlx4_info(dev, "Only %d MSI-X vectors available, "
767                                           "not using MSI-X\n", err);
768                         goto no_msi;
769                 }
770
771                 for (i = 0; i < MLX4_NUM_EQ; ++i)
772                         priv->eq_table.eq[i].irq = entries[i].vector;
773
774                 dev->flags |= MLX4_FLAG_MSI_X;
775                 return;
776         }
777
778 no_msi:
779         for (i = 0; i < MLX4_NUM_EQ; ++i)
780                 priv->eq_table.eq[i].irq = dev->pdev->irq;
781 }
782
783 static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
784 {
785         struct mlx4_priv *priv;
786         struct mlx4_dev *dev;
787         int err;
788
789         printk(KERN_INFO PFX "Initializing %s\n",
790                pci_name(pdev));
791
792         err = pci_enable_device(pdev);
793         if (err) {
794                 dev_err(&pdev->dev, "Cannot enable PCI device, "
795                         "aborting.\n");
796                 return err;
797         }
798
799         /*
800          * Check for BARs.  We expect 0: 1MB
801          */
802         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
803             pci_resource_len(pdev, 0) != 1 << 20) {
804                 dev_err(&pdev->dev, "Missing DCS, aborting.\n");
805                 err = -ENODEV;
806                 goto err_disable_pdev;
807         }
808         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
809                 dev_err(&pdev->dev, "Missing UAR, aborting.\n");
810                 err = -ENODEV;
811                 goto err_disable_pdev;
812         }
813
814         err = pci_request_region(pdev, 0, DRV_NAME);
815         if (err) {
816                 dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
817                 goto err_disable_pdev;
818         }
819
820         err = pci_request_region(pdev, 2, DRV_NAME);
821         if (err) {
822                 dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
823                 goto err_release_bar0;
824         }
825
826         pci_set_master(pdev);
827
828         err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
829         if (err) {
830                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
831                 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
832                 if (err) {
833                         dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
834                         goto err_release_bar2;
835                 }
836         }
837         err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
838         if (err) {
839                 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
840                          "consistent PCI DMA mask.\n");
841                 err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
842                 if (err) {
843                         dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
844                                 "aborting.\n");
845                         goto err_release_bar2;
846                 }
847         }
848
849         priv = kzalloc(sizeof *priv, GFP_KERNEL);
850         if (!priv) {
851                 dev_err(&pdev->dev, "Device struct alloc failed, "
852                         "aborting.\n");
853                 err = -ENOMEM;
854                 goto err_release_bar2;
855         }
856
857         dev       = &priv->dev;
858         dev->pdev = pdev;
859         INIT_LIST_HEAD(&priv->ctx_list);
860         spin_lock_init(&priv->ctx_lock);
861
862         INIT_LIST_HEAD(&priv->pgdir_list);
863         mutex_init(&priv->pgdir_mutex);
864
865         /*
866          * Now reset the HCA before we touch the PCI capabilities or
867          * attempt a firmware command, since a boot ROM may have left
868          * the HCA in an undefined state.
869          */
870         err = mlx4_reset(dev);
871         if (err) {
872                 mlx4_err(dev, "Failed to reset HCA, aborting.\n");
873                 goto err_free_dev;
874         }
875
876         if (mlx4_cmd_init(dev)) {
877                 mlx4_err(dev, "Failed to init command interface, aborting.\n");
878                 goto err_free_dev;
879         }
880
881         err = mlx4_init_hca(dev);
882         if (err)
883                 goto err_cmd;
884
885         mlx4_enable_msi_x(dev);
886
887         err = mlx4_setup_hca(dev);
888         if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
889                 dev->flags &= ~MLX4_FLAG_MSI_X;
890                 pci_disable_msix(pdev);
891                 err = mlx4_setup_hca(dev);
892         }
893
894         if (err)
895                 goto err_close;
896
897         err = mlx4_register_device(dev);
898         if (err)
899                 goto err_cleanup;
900
901         pci_set_drvdata(pdev, dev);
902
903         return 0;
904
905 err_cleanup:
906         mlx4_cleanup_mcg_table(dev);
907         mlx4_cleanup_qp_table(dev);
908         mlx4_cleanup_srq_table(dev);
909         mlx4_cleanup_cq_table(dev);
910         mlx4_cmd_use_polling(dev);
911         mlx4_cleanup_eq_table(dev);
912         mlx4_cleanup_mr_table(dev);
913         mlx4_cleanup_pd_table(dev);
914         mlx4_cleanup_uar_table(dev);
915
916 err_close:
917         if (dev->flags & MLX4_FLAG_MSI_X)
918                 pci_disable_msix(pdev);
919
920         mlx4_close_hca(dev);
921
922 err_cmd:
923         mlx4_cmd_cleanup(dev);
924
925 err_free_dev:
926         kfree(priv);
927
928 err_release_bar2:
929         pci_release_region(pdev, 2);
930
931 err_release_bar0:
932         pci_release_region(pdev, 0);
933
934 err_disable_pdev:
935         pci_disable_device(pdev);
936         pci_set_drvdata(pdev, NULL);
937         return err;
938 }
939
940 static int __devinit mlx4_init_one(struct pci_dev *pdev,
941                                    const struct pci_device_id *id)
942 {
943         static int mlx4_version_printed;
944
945         if (!mlx4_version_printed) {
946                 printk(KERN_INFO "%s", mlx4_version);
947                 ++mlx4_version_printed;
948         }
949
950         return __mlx4_init_one(pdev, id);
951 }
952
953 static void mlx4_remove_one(struct pci_dev *pdev)
954 {
955         struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
956         struct mlx4_priv *priv = mlx4_priv(dev);
957         int p;
958
959         if (dev) {
960                 mlx4_unregister_device(dev);
961
962                 for (p = 1; p <= dev->caps.num_ports; ++p)
963                         mlx4_CLOSE_PORT(dev, p);
964
965                 mlx4_cleanup_mcg_table(dev);
966                 mlx4_cleanup_qp_table(dev);
967                 mlx4_cleanup_srq_table(dev);
968                 mlx4_cleanup_cq_table(dev);
969                 mlx4_cmd_use_polling(dev);
970                 mlx4_cleanup_eq_table(dev);
971                 mlx4_cleanup_mr_table(dev);
972                 mlx4_cleanup_pd_table(dev);
973
974                 iounmap(priv->kar);
975                 mlx4_uar_free(dev, &priv->driver_uar);
976                 mlx4_cleanup_uar_table(dev);
977                 mlx4_close_hca(dev);
978                 mlx4_cmd_cleanup(dev);
979
980                 if (dev->flags & MLX4_FLAG_MSI_X)
981                         pci_disable_msix(pdev);
982
983                 kfree(priv);
984                 pci_release_region(pdev, 2);
985                 pci_release_region(pdev, 0);
986                 pci_disable_device(pdev);
987                 pci_set_drvdata(pdev, NULL);
988         }
989 }
990
991 int mlx4_restart_one(struct pci_dev *pdev)
992 {
993         mlx4_remove_one(pdev);
994         return __mlx4_init_one(pdev, NULL);
995 }
996
997 static struct pci_device_id mlx4_pci_table[] = {
998         { PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
999         { PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
1000         { PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
1001         { PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
1002         { PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
1003         { 0, }
1004 };
1005
1006 MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
1007
1008 static struct pci_driver mlx4_driver = {
1009         .name           = DRV_NAME,
1010         .id_table       = mlx4_pci_table,
1011         .probe          = mlx4_init_one,
1012         .remove         = __devexit_p(mlx4_remove_one)
1013 };
1014
1015 static int __init mlx4_init(void)
1016 {
1017         int ret;
1018
1019         ret = mlx4_catas_init();
1020         if (ret)
1021                 return ret;
1022
1023         ret = pci_register_driver(&mlx4_driver);
1024         return ret < 0 ? ret : 0;
1025 }
1026
1027 static void __exit mlx4_cleanup(void)
1028 {
1029         pci_unregister_driver(&mlx4_driver);
1030         mlx4_catas_cleanup();
1031 }
1032
1033 module_init(mlx4_init);
1034 module_exit(mlx4_cleanup);