2955f368de0c3478204011aeed298b314e2494cb
[linux-3.10.git] / drivers / infiniband / hw / ipath / ipath_stats.c
1 /*
2  * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3  * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include "ipath_kernel.h"
35
36 struct infinipath_stats ipath_stats;
37
38 /**
39  * ipath_snap_cntr - snapshot a chip counter
40  * @dd: the infinipath device
41  * @creg: the counter to snapshot
42  *
43  * called from add_timer and user counter read calls, to deal with
44  * counters that wrap in "human time".  The words sent and received, and
45  * the packets sent and received are all that we worry about.  For now,
46  * at least, we don't worry about error counters, because if they wrap
47  * that quickly, we probably don't care.  We may eventually just make this
48  * handle all the counters.  word counters can wrap in about 20 seconds
49  * of full bandwidth traffic, packet counters in a few hours.
50  */
51
52 u64 ipath_snap_cntr(struct ipath_devdata *dd, ipath_creg creg)
53 {
54         u32 val, reg64 = 0;
55         u64 val64;
56         unsigned long t0, t1;
57         u64 ret;
58         unsigned long flags;
59
60         t0 = jiffies;
61         /* If fast increment counters are only 32 bits, snapshot them,
62          * and maintain them as 64bit values in the driver */
63         if (!(dd->ipath_flags & IPATH_32BITCOUNTERS) &&
64             (creg == dd->ipath_cregs->cr_wordsendcnt ||
65              creg == dd->ipath_cregs->cr_wordrcvcnt ||
66              creg == dd->ipath_cregs->cr_pktsendcnt ||
67              creg == dd->ipath_cregs->cr_pktrcvcnt)) {
68                 val64 = ipath_read_creg(dd, creg);
69                 val = val64 == ~0ULL ? ~0U : 0;
70                 reg64 = 1;
71         } else                  /* val64 just to keep gcc quiet... */
72                 val64 = val = ipath_read_creg32(dd, creg);
73         /*
74          * See if a second has passed.  This is just a way to detect things
75          * that are quite broken.  Normally this should take just a few
76          * cycles (the check is for long enough that we don't care if we get
77          * pre-empted.)  An Opteron HT O read timeout is 4 seconds with
78          * normal NB values
79          */
80         t1 = jiffies;
81         if (time_before(t0 + HZ, t1) && val == -1) {
82                 ipath_dev_err(dd, "Error!  Read counter 0x%x timed out\n",
83                               creg);
84                 ret = 0ULL;
85                 goto bail;
86         }
87         if (reg64) {
88                 ret = val64;
89                 goto bail;
90         }
91
92         if (creg == dd->ipath_cregs->cr_wordsendcnt) {
93                 if (val != dd->ipath_lastsword) {
94                         dd->ipath_sword += val - dd->ipath_lastsword;
95                         spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
96                         dd->ipath_traffic_wds += val - dd->ipath_lastsword;
97                         spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
98                         dd->ipath_lastsword = val;
99                 }
100                 val64 = dd->ipath_sword;
101         } else if (creg == dd->ipath_cregs->cr_wordrcvcnt) {
102                 if (val != dd->ipath_lastrword) {
103                         dd->ipath_rword += val - dd->ipath_lastrword;
104                         spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
105                         dd->ipath_traffic_wds += val - dd->ipath_lastrword;
106                         spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
107                         dd->ipath_lastrword = val;
108                 }
109                 val64 = dd->ipath_rword;
110         } else if (creg == dd->ipath_cregs->cr_pktsendcnt) {
111                 if (val != dd->ipath_lastspkts) {
112                         dd->ipath_spkts += val - dd->ipath_lastspkts;
113                         dd->ipath_lastspkts = val;
114                 }
115                 val64 = dd->ipath_spkts;
116         } else if (creg == dd->ipath_cregs->cr_pktrcvcnt) {
117                 if (val != dd->ipath_lastrpkts) {
118                         dd->ipath_rpkts += val - dd->ipath_lastrpkts;
119                         dd->ipath_lastrpkts = val;
120                 }
121                 val64 = dd->ipath_rpkts;
122         } else
123                 val64 = (u64) val;
124
125         ret = val64;
126
127 bail:
128         return ret;
129 }
130
131 /**
132  * ipath_qcheck - print delta of egrfull/hdrqfull errors for kernel ports
133  * @dd: the infinipath device
134  *
135  * print the delta of egrfull/hdrqfull errors for kernel ports no more than
136  * every 5 seconds.  User processes are printed at close, but kernel doesn't
137  * close, so...  Separate routine so may call from other places someday, and
138  * so function name when printed by _IPATH_INFO is meaningfull
139  */
140 static void ipath_qcheck(struct ipath_devdata *dd)
141 {
142         static u64 last_tot_hdrqfull;
143         size_t blen = 0;
144         char buf[128];
145
146         *buf = 0;
147         if (dd->ipath_pd[0]->port_hdrqfull != dd->ipath_p0_hdrqfull) {
148                 blen = snprintf(buf, sizeof buf, "port 0 hdrqfull %u",
149                                 dd->ipath_pd[0]->port_hdrqfull -
150                                 dd->ipath_p0_hdrqfull);
151                 dd->ipath_p0_hdrqfull = dd->ipath_pd[0]->port_hdrqfull;
152         }
153         if (ipath_stats.sps_etidfull != dd->ipath_last_tidfull) {
154                 blen += snprintf(buf + blen, sizeof buf - blen,
155                                  "%srcvegrfull %llu",
156                                  blen ? ", " : "",
157                                  (unsigned long long)
158                                  (ipath_stats.sps_etidfull -
159                                   dd->ipath_last_tidfull));
160                 dd->ipath_last_tidfull = ipath_stats.sps_etidfull;
161         }
162
163         /*
164          * this is actually the number of hdrq full interrupts, not actual
165          * events, but at the moment that's mostly what I'm interested in.
166          * Actual count, etc. is in the counters, if needed.  For production
167          * users this won't ordinarily be printed.
168          */
169
170         if ((ipath_debug & (__IPATH_PKTDBG | __IPATH_DBG)) &&
171             ipath_stats.sps_hdrqfull != last_tot_hdrqfull) {
172                 blen += snprintf(buf + blen, sizeof buf - blen,
173                                  "%shdrqfull %llu (all ports)",
174                                  blen ? ", " : "",
175                                  (unsigned long long)
176                                  (ipath_stats.sps_hdrqfull -
177                                   last_tot_hdrqfull));
178                 last_tot_hdrqfull = ipath_stats.sps_hdrqfull;
179         }
180         if (blen)
181                 ipath_dbg("%s\n", buf);
182
183         if (dd->ipath_port0head != (u32)
184             le64_to_cpu(*dd->ipath_hdrqtailptr)) {
185                 if (dd->ipath_lastport0rcv_cnt ==
186                     ipath_stats.sps_port0pkts) {
187                         ipath_cdbg(PKT, "missing rcv interrupts? "
188                                    "port0 hd=%llx tl=%x; port0pkts %llx\n",
189                                    (unsigned long long)
190                                    le64_to_cpu(*dd->ipath_hdrqtailptr),
191                                    dd->ipath_port0head,
192                                    (unsigned long long)
193                                    ipath_stats.sps_port0pkts);
194                 }
195                 dd->ipath_lastport0rcv_cnt = ipath_stats.sps_port0pkts;
196         }
197 }
198
199 /**
200  * ipath_get_faststats - get word counters from chip before they overflow
201  * @opaque - contains a pointer to the infinipath device ipath_devdata
202  *
203  * called from add_timer
204  */
205 void ipath_get_faststats(unsigned long opaque)
206 {
207         struct ipath_devdata *dd = (struct ipath_devdata *) opaque;
208         u32 val;
209         static unsigned cnt;
210         unsigned long flags;
211
212         /*
213          * don't access the chip while running diags, or memory diags can
214          * fail
215          */
216         if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
217             ipath_diag_inuse)
218                 /* but re-arm the timer, for diags case; won't hurt other */
219                 goto done;
220
221         /*
222          * We now try to maintain a "active timer", based on traffic
223          * exceeding a threshold, so we need to check the word-counts
224          * even if they are 64-bit.
225          */
226         ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordsendcnt);
227         ipath_snap_cntr(dd, dd->ipath_cregs->cr_wordrcvcnt);
228         spin_lock_irqsave(&dd->ipath_eep_st_lock, flags);
229         if (dd->ipath_traffic_wds  >= IPATH_TRAFFIC_ACTIVE_THRESHOLD)
230                 atomic_add(5, &dd->ipath_active_time); /* S/B #define */
231         dd->ipath_traffic_wds = 0;
232         spin_unlock_irqrestore(&dd->ipath_eep_st_lock, flags);
233
234         if (dd->ipath_flags & IPATH_32BITCOUNTERS) {
235                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktsendcnt);
236                 ipath_snap_cntr(dd, dd->ipath_cregs->cr_pktrcvcnt);
237         }
238
239         ipath_qcheck(dd);
240
241         /*
242          * deal with repeat error suppression.  Doesn't really matter if
243          * last error was almost a full interval ago, or just a few usecs
244          * ago; still won't get more than 2 per interval.  We may want
245          * longer intervals for this eventually, could do with mod, counter
246          * or separate timer.  Also see code in ipath_handle_errors() and
247          * ipath_handle_hwerrors().
248          */
249
250         if (dd->ipath_lasterror)
251                 dd->ipath_lasterror = 0;
252         if (dd->ipath_lasthwerror)
253                 dd->ipath_lasthwerror = 0;
254         if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
255             && time_after(jiffies, dd->ipath_unmasktime)) {
256                 char ebuf[256];
257                 int iserr;
258                 iserr = ipath_decode_err(ebuf, sizeof ebuf,
259                                  (dd->ipath_maskederrs & ~dd->
260                                   ipath_ignorederrs));
261                 if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
262                                 ~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
263                                 INFINIPATH_E_PKTERRS ))
264                         ipath_dev_err(dd, "Re-enabling masked errors "
265                                       "(%s)\n", ebuf);
266                 else {
267                         /*
268                          * rcvegrfull and rcvhdrqfull are "normal", for some
269                          * types of processes (mostly benchmarks) that send
270                          * huge numbers of messages, while not processing
271                          * them.  So only complain about these at debug
272                          * level.
273                          */
274                         if (iserr)
275                                         ipath_dbg("Re-enabling queue full errors (%s)\n",
276                                                         ebuf);
277                         else
278                                 ipath_cdbg(ERRPKT, "Re-enabling packet"
279                                                 " problem interrupt (%s)\n", ebuf);
280                 }
281                 dd->ipath_maskederrs = dd->ipath_ignorederrs;
282                 ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
283                                  ~dd->ipath_maskederrs);
284         }
285
286         /* limit qfull messages to ~one per minute per port */
287         if ((++cnt & 0x10)) {
288                 for (val = dd->ipath_cfgports - 1; ((int)val) >= 0;
289                      val--) {
290                         if (dd->ipath_lastegrheads[val] != -1)
291                                 dd->ipath_lastegrheads[val] = -1;
292                         if (dd->ipath_lastrcvhdrqtails[val] != -1)
293                                 dd->ipath_lastrcvhdrqtails[val] = -1;
294                 }
295         }
296
297 done:
298         mod_timer(&dd->ipath_stats_timer, jiffies + HZ * 5);
299 }