blob: 4e0444c0aca6958f91d15e18908acb364c27fd9f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Nathan Scott7b718762005-11-02 14:58:39 +11002 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Nathan Scott7b718762005-11-02 14:58:39 +11005 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * published by the Free Software Foundation.
8 *
Nathan Scott7b718762005-11-02 14:58:39 +11009 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070013 *
Nathan Scott7b718762005-11-02 14:58:39 +110014 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <xfs.h>
19
20static kmem_zone_t *ktrace_hdr_zone;
21static kmem_zone_t *ktrace_ent_zone;
22static int ktrace_zentries;
23
Lachlan McIlroyde2eeea2008-02-06 13:37:56 +110024void __init
Linus Torvalds1da177e2005-04-16 15:20:36 -070025ktrace_init(int zentries)
26{
27 ktrace_zentries = zentries;
28
29 ktrace_hdr_zone = kmem_zone_init(sizeof(ktrace_t),
30 "ktrace_hdr");
31 ASSERT(ktrace_hdr_zone);
32
33 ktrace_ent_zone = kmem_zone_init(ktrace_zentries
34 * sizeof(ktrace_entry_t),
35 "ktrace_ent");
36 ASSERT(ktrace_ent_zone);
37}
38
Lachlan McIlroyde2eeea2008-02-06 13:37:56 +110039void __exit
Linus Torvalds1da177e2005-04-16 15:20:36 -070040ktrace_uninit(void)
41{
Nathan Scott3758dee2006-03-22 12:47:28 +110042 kmem_zone_destroy(ktrace_hdr_zone);
43 kmem_zone_destroy(ktrace_ent_zone);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044}
45
46/*
47 * ktrace_alloc()
48 *
49 * Allocate a ktrace header and enough buffering for the given
50 * number of entries.
51 */
52ktrace_t *
Christoph Hellwig4750ddb2005-11-02 15:07:23 +110053ktrace_alloc(int nentries, unsigned int __nocast sleep)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
55 ktrace_t *ktp;
56 ktrace_entry_t *ktep;
57
58 ktp = (ktrace_t*)kmem_zone_alloc(ktrace_hdr_zone, sleep);
59
60 if (ktp == (ktrace_t*)NULL) {
61 /*
62 * KM_SLEEP callers don't expect failure.
63 */
64 if (sleep & KM_SLEEP)
65 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
66
67 return NULL;
68 }
69
70 /*
71 * Special treatment for buffers with the ktrace_zentries entries
72 */
73 if (nentries == ktrace_zentries) {
74 ktep = (ktrace_entry_t*)kmem_zone_zalloc(ktrace_ent_zone,
75 sleep);
76 } else {
77 ktep = (ktrace_entry_t*)kmem_zalloc((nentries * sizeof(*ktep)),
Nathan Scottefb8ad72006-09-28 11:03:05 +100078 sleep | KM_LARGE);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 }
80
81 if (ktep == NULL) {
82 /*
83 * KM_SLEEP callers don't expect failure.
84 */
85 if (sleep & KM_SLEEP)
86 panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
87
88 kmem_free(ktp, sizeof(*ktp));
89
90 return NULL;
91 }
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 ktp->kt_entries = ktep;
94 ktp->kt_nentries = nentries;
David Chinner6ee47522008-03-06 13:45:35 +110095 atomic_set(&ktp->kt_index, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 ktp->kt_rollover = 0;
97 return ktp;
98}
99
100
101/*
102 * ktrace_free()
103 *
104 * Free up the ktrace header and buffer. It is up to the caller
105 * to ensure that no-one is referencing it.
106 */
107void
108ktrace_free(ktrace_t *ktp)
109{
110 int entries_size;
111
112 if (ktp == (ktrace_t *)NULL)
113 return;
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 /*
116 * Special treatment for the Vnode trace buffer.
117 */
118 if (ktp->kt_nentries == ktrace_zentries) {
119 kmem_zone_free(ktrace_ent_zone, ktp->kt_entries);
120 } else {
121 entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
122
123 kmem_free(ktp->kt_entries, entries_size);
124 }
125
126 kmem_zone_free(ktrace_hdr_zone, ktp);
127}
128
129
130/*
131 * Enter the given values into the "next" entry in the trace buffer.
132 * kt_index is always the index of the next entry to be filled.
133 */
134void
135ktrace_enter(
136 ktrace_t *ktp,
137 void *val0,
138 void *val1,
139 void *val2,
140 void *val3,
141 void *val4,
142 void *val5,
143 void *val6,
144 void *val7,
145 void *val8,
146 void *val9,
147 void *val10,
148 void *val11,
149 void *val12,
150 void *val13,
151 void *val14,
152 void *val15)
153{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 int index;
155 ktrace_entry_t *ktep;
156
157 ASSERT(ktp != NULL);
158
159 /*
160 * Grab an entry by pushing the index up to the next one.
161 */
David Chinner6ee47522008-03-06 13:45:35 +1100162 index = atomic_add_return(1, &ktp->kt_index);
163 index = (index - 1) % ktp->kt_nentries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (!ktp->kt_rollover && index == ktp->kt_nentries - 1)
165 ktp->kt_rollover = 1;
166
167 ASSERT((index >= 0) && (index < ktp->kt_nentries));
168
169 ktep = &(ktp->kt_entries[index]);
170
171 ktep->val[0] = val0;
172 ktep->val[1] = val1;
173 ktep->val[2] = val2;
174 ktep->val[3] = val3;
175 ktep->val[4] = val4;
176 ktep->val[5] = val5;
177 ktep->val[6] = val6;
178 ktep->val[7] = val7;
179 ktep->val[8] = val8;
180 ktep->val[9] = val9;
181 ktep->val[10] = val10;
182 ktep->val[11] = val11;
183 ktep->val[12] = val12;
184 ktep->val[13] = val13;
185 ktep->val[14] = val14;
186 ktep->val[15] = val15;
187}
188
189/*
190 * Return the number of entries in the trace buffer.
191 */
192int
193ktrace_nentries(
194 ktrace_t *ktp)
195{
David Chinner6ee47522008-03-06 13:45:35 +1100196 int index;
197 if (ktp == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
David Chinner6ee47522008-03-06 13:45:35 +1100200 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
201 return (ktp->kt_rollover ? ktp->kt_nentries : index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202}
203
204/*
205 * ktrace_first()
206 *
207 * This is used to find the start of the trace buffer.
208 * In conjunction with ktrace_next() it can be used to
209 * iterate through the entire trace buffer. This code does
210 * not do any locking because it is assumed that it is called
211 * from the debugger.
212 *
213 * The caller must pass in a pointer to a ktrace_snap
214 * structure in which we will keep some state used to
215 * iterate through the buffer. This state must not touched
216 * by any code outside of this module.
217 */
218ktrace_entry_t *
219ktrace_first(ktrace_t *ktp, ktrace_snap_t *ktsp)
220{
221 ktrace_entry_t *ktep;
222 int index;
223 int nentries;
224
225 if (ktp->kt_rollover)
David Chinner6ee47522008-03-06 13:45:35 +1100226 index = atomic_read(&ktp->kt_index) % ktp->kt_nentries;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 else
228 index = 0;
229
230 ktsp->ks_start = index;
231 ktep = &(ktp->kt_entries[index]);
232
233 nentries = ktrace_nentries(ktp);
234 index++;
235 if (index < nentries) {
236 ktsp->ks_index = index;
237 } else {
238 ktsp->ks_index = 0;
239 if (index > nentries)
240 ktep = NULL;
241 }
242 return ktep;
243}
244
245/*
246 * ktrace_next()
247 *
248 * This is used to iterate through the entries of the given
249 * trace buffer. The caller must pass in the ktrace_snap_t
250 * structure initialized by ktrace_first(). The return value
251 * will be either a pointer to the next ktrace_entry or NULL
252 * if all of the entries have been traversed.
253 */
254ktrace_entry_t *
255ktrace_next(
256 ktrace_t *ktp,
257 ktrace_snap_t *ktsp)
258{
259 int index;
260 ktrace_entry_t *ktep;
261
262 index = ktsp->ks_index;
263 if (index == ktsp->ks_start) {
264 ktep = NULL;
265 } else {
266 ktep = &ktp->kt_entries[index];
267 }
268
269 index++;
270 if (index == ktrace_nentries(ktp)) {
271 ktsp->ks_index = 0;
272 } else {
273 ktsp->ks_index = index;
274 }
275
276 return ktep;
277}
278
279/*
280 * ktrace_skip()
281 *
282 * Skip the next "count" entries and return the entry after that.
283 * Return NULL if this causes us to iterate past the beginning again.
284 */
285ktrace_entry_t *
286ktrace_skip(
287 ktrace_t *ktp,
288 int count,
289 ktrace_snap_t *ktsp)
290{
291 int index;
292 int new_index;
293 ktrace_entry_t *ktep;
294 int nentries = ktrace_nentries(ktp);
295
296 index = ktsp->ks_index;
297 new_index = index + count;
298 while (new_index >= nentries) {
299 new_index -= nentries;
300 }
301 if (index == ktsp->ks_start) {
302 /*
303 * We've iterated around to the start, so we're done.
304 */
305 ktep = NULL;
306 } else if ((new_index < index) && (index < ktsp->ks_index)) {
307 /*
308 * We've skipped past the start again, so we're done.
309 */
310 ktep = NULL;
311 ktsp->ks_index = ktsp->ks_start;
312 } else {
313 ktep = &(ktp->kt_entries[new_index]);
314 new_index++;
315 if (new_index == nentries) {
316 ktsp->ks_index = 0;
317 } else {
318 ktsp->ks_index = new_index;
319 }
320 }
321 return ktep;
322}