misc: tegra-profiler: support eh_frame sections
[linux-3.10.git] / drivers / misc / tegra-profiler / comm.c
1 /*
2  * drivers/misc/tegra-profiler/comm.c
3  *
4  * Copyright (c) 2015, NVIDIA CORPORATION.  All rights reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/miscdevice.h>
23 #include <linux/sched.h>
24 #include <linux/poll.h>
25 #include <linux/bitops.h>
26 #include <linux/err.h>
27 #include <linux/mm.h>
28
29 #include <asm/uaccess.h>
30
31 #include <linux/tegra_profiler.h>
32
33 #include "comm.h"
34 #include "version.h"
35
36 struct quadd_ring_buffer {
37         struct quadd_ring_buffer_hdr *rb_hdr;
38         char *buf;
39
40         size_t max_fill_count;
41         size_t nr_skipped_samples;
42
43         struct quadd_mmap_area *mmap;
44
45         spinlock_t lock;
46 };
47
48 struct quadd_comm_ctx {
49         struct quadd_comm_control_interface *control;
50
51         atomic_t active;
52
53         struct mutex io_mutex;
54         int nr_users;
55
56         int params_ok;
57         pid_t process_pid;
58         uid_t debug_app_uid;
59
60         wait_queue_head_t read_wait;
61
62         struct miscdevice *misc_dev;
63
64         struct list_head mmap_areas;
65         spinlock_t mmaps_lock;
66 };
67
68 struct comm_cpu_context {
69         struct quadd_ring_buffer rb;
70 };
71
72 static struct quadd_comm_ctx comm_ctx;
73 static DEFINE_PER_CPU(struct comm_cpu_context, cpu_ctx);
74
75 static int __maybe_unused
76 rb_is_full(struct quadd_ring_buffer *rb)
77 {
78         struct quadd_ring_buffer_hdr *rb_hdr = rb->rb_hdr;
79         return (rb_hdr->pos_write + 1) % rb_hdr->size == rb_hdr->pos_read;
80 }
81
82 static int __maybe_unused
83 rb_is_empty(struct quadd_ring_buffer *rb)
84 {
85         struct quadd_ring_buffer_hdr *rb_hdr = rb->rb_hdr;
86         return rb_hdr->pos_read == rb_hdr->pos_write;
87 }
88
89 static size_t
90 rb_get_filled_space(struct quadd_ring_buffer_hdr *rb_hdr)
91 {
92         return (rb_hdr->pos_write >= rb_hdr->pos_read) ?
93                 rb_hdr->pos_write - rb_hdr->pos_read :
94                 rb_hdr->pos_write + rb_hdr->size - rb_hdr->pos_read;
95 }
96
97 static size_t
98 rb_get_free_space(struct quadd_ring_buffer_hdr *rb_hdr)
99 {
100         return rb_hdr->size - rb_get_filled_space(rb_hdr) - 1;
101 }
102
103 static ssize_t
104 rb_write(struct quadd_ring_buffer_hdr *rb_hdr,
105          char *mmap_buf, void *data, size_t length)
106 {
107         size_t new_pos_write, chunk1;
108
109         new_pos_write = (rb_hdr->pos_write + length) % rb_hdr->size;
110
111         if (new_pos_write < rb_hdr->pos_write) {
112                 chunk1 = rb_hdr->size - rb_hdr->pos_write;
113
114                 memcpy(mmap_buf + rb_hdr->pos_write, data, chunk1);
115
116                 if (new_pos_write > 0)
117                         memcpy(mmap_buf, data + chunk1, new_pos_write);
118         } else {
119                 memcpy(mmap_buf + rb_hdr->pos_write, data, length);
120         }
121
122         rb_hdr->pos_write = new_pos_write;
123         return length;
124 }
125
126 static ssize_t
127 write_sample(struct quadd_ring_buffer *rb,
128              struct quadd_record_data *sample,
129              struct quadd_iovec *vec, int vec_count)
130 {
131         int i;
132         ssize_t err;
133         size_t length_sample, fill_count;
134         struct quadd_ring_buffer_hdr *rb_hdr = rb->rb_hdr, new_hdr;
135
136         if (!rb_hdr)
137                 return -EIO;
138
139         length_sample = sizeof(*sample);
140         for (i = 0; i < vec_count; i++)
141                 length_sample += vec[i].len;
142
143         new_hdr.size = rb_hdr->size;
144         new_hdr.pos_write = rb_hdr->pos_write;
145         new_hdr.pos_read = rb_hdr->pos_read;
146
147         pr_debug("[cpu: %d] type/len: %u/%#zx, read/write pos: %#x/%#x, free: %#zx\n",
148                 smp_processor_id(),
149                 sample->record_type,
150                 length_sample,
151                 new_hdr.pos_read, new_hdr.pos_write,
152                 rb_get_free_space(&new_hdr));
153
154         if (length_sample > rb_get_free_space(&new_hdr)) {
155                 pr_err_once("[cpu: %d] warning: buffer has been overflowed\n",
156                             smp_processor_id());
157                 return -ENOSPC;
158         }
159
160         err = rb_write(&new_hdr, rb->buf, sample, sizeof(*sample));
161         if (err < 0)
162                 return err;
163
164         for (i = 0; i < vec_count; i++) {
165                 err = rb_write(&new_hdr, rb->buf, vec[i].base, vec[i].len);
166                 if (err < 0)
167                         return err;
168         }
169
170         fill_count = rb_get_filled_space(&new_hdr);
171         if (fill_count > rb->max_fill_count) {
172                 rb->max_fill_count = fill_count;
173                 rb_hdr->max_fill_count = fill_count;
174         }
175
176         rb_hdr->pos_write = new_hdr.pos_write;
177         wake_up_all(&comm_ctx.read_wait);
178
179         return length_sample;
180 }
181
182 static size_t get_data_size(void)
183 {
184         int cpu_id;
185         size_t size = 0;
186         struct comm_cpu_context *cc;
187         struct quadd_ring_buffer *rb;
188
189         for_each_possible_cpu(cpu_id) {
190                 cc = &per_cpu(cpu_ctx, cpu_id);
191
192                 rb = &cc->rb;
193                 if (!rb->rb_hdr)
194                         continue;
195
196                 size +=  rb_get_filled_space(rb->rb_hdr);
197         }
198
199         return size;
200 }
201
202 static ssize_t
203 put_sample(struct quadd_record_data *data,
204            struct quadd_iovec *vec,
205            int vec_count, int cpu_id)
206 {
207         ssize_t err = 0;
208         unsigned long flags;
209         struct comm_cpu_context *cc;
210         struct quadd_ring_buffer *rb;
211         struct quadd_ring_buffer_hdr *rb_hdr;
212
213         if (!atomic_read(&comm_ctx.active))
214                 return -EIO;
215
216         cc = cpu_id < 0 ? &__get_cpu_var(cpu_ctx) :
217                 &per_cpu(cpu_ctx, cpu_id);
218
219         rb = &cc->rb;
220
221         spin_lock_irqsave(&rb->lock, flags);
222
223         err = write_sample(rb, data, vec, vec_count);
224         if (err < 0) {
225                 pr_err_once("%s: error: write sample\n", __func__);
226                 rb->nr_skipped_samples++;
227
228                 rb_hdr = rb->rb_hdr;
229                 if (rb_hdr)
230                         rb_hdr->skipped_samples++;
231         }
232
233         spin_unlock_irqrestore(&rb->lock, flags);
234
235         return err;
236 }
237
238 static void comm_reset(void)
239 {
240         pr_debug("Comm reset\n");
241 }
242
243 static int is_active(void)
244 {
245         return atomic_read(&comm_ctx.active) != 0;
246 }
247
248 static struct quadd_comm_data_interface comm_data = {
249         .put_sample = put_sample,
250         .reset = comm_reset,
251         .is_active = is_active,
252 };
253
254 static int check_access_permission(void)
255 {
256         struct task_struct *task;
257
258         if (capable(CAP_SYS_ADMIN))
259                 return 0;
260
261         if (!comm_ctx.params_ok || comm_ctx.process_pid == 0)
262                 return -EACCES;
263
264         rcu_read_lock();
265         task = pid_task(find_vpid(comm_ctx.process_pid), PIDTYPE_PID);
266         rcu_read_unlock();
267         if (!task)
268                 return -EACCES;
269
270         if (current_fsuid() != task_uid(task) &&
271             task_uid(task) != comm_ctx.debug_app_uid) {
272                 pr_err("Permission denied, owner/task uids: %u/%u\n",
273                            current_fsuid(), task_uid(task));
274                 return -EACCES;
275         }
276         return 0;
277 }
278
279 static struct quadd_mmap_area *
280 find_mmap(unsigned long vm_start)
281 {
282         struct quadd_mmap_area *entry;
283
284         list_for_each_entry(entry, &comm_ctx.mmap_areas, list) {
285                 struct vm_area_struct *mmap_vma = entry->mmap_vma;
286                 if (vm_start == mmap_vma->vm_start)
287                         return entry;
288         }
289
290         return NULL;
291 }
292
293 static int device_open(struct inode *inode, struct file *file)
294 {
295         mutex_lock(&comm_ctx.io_mutex);
296         comm_ctx.nr_users++;
297         mutex_unlock(&comm_ctx.io_mutex);
298         return 0;
299 }
300
301 static int device_release(struct inode *inode, struct file *file)
302 {
303         mutex_lock(&comm_ctx.io_mutex);
304         comm_ctx.nr_users--;
305
306         if (comm_ctx.nr_users == 0) {
307                 if (atomic_cmpxchg(&comm_ctx.active, 1, 0)) {
308                         comm_ctx.control->stop();
309                         pr_info("Stop profiling: daemon is closed\n");
310                 }
311         }
312         mutex_unlock(&comm_ctx.io_mutex);
313
314         return 0;
315 }
316
317 static unsigned int
318 device_poll(struct file *file, poll_table *wait)
319 {
320         unsigned int mask = 0;
321
322         poll_wait(file, &comm_ctx.read_wait, wait);
323
324         if (get_data_size() > 0)
325                 mask |= POLLIN | POLLRDNORM;
326
327         if (!atomic_read(&comm_ctx.active))
328                 mask |= POLLHUP;
329
330         return mask;
331 }
332
333 static int
334 init_mmap_hdr(struct quadd_mmap_rb_info *mmap_rb,
335               struct quadd_mmap_area *mmap)
336 {
337         int cpu_id;
338         size_t size;
339         unsigned long flags;
340         struct vm_area_struct *vma;
341         struct quadd_ring_buffer *rb;
342         struct quadd_ring_buffer_hdr *rb_hdr;
343         struct quadd_mmap_header *mmap_hdr;
344         struct comm_cpu_context *cc;
345
346         if (mmap->type != QUADD_MMAP_TYPE_RB)
347                 return -EIO;
348
349         cpu_id = mmap_rb->cpu_id;
350         cc = &per_cpu(cpu_ctx, cpu_id);
351
352         rb = &cc->rb;
353
354         spin_lock_irqsave(&rb->lock, flags);
355
356         mmap->rb = rb;
357
358         rb->mmap = mmap;
359
360         rb->max_fill_count = 0;
361         rb->nr_skipped_samples = 0;
362
363         vma = mmap->mmap_vma;
364
365         size = vma->vm_end - vma->vm_start;
366         size -= sizeof(*mmap_hdr) + sizeof(*rb_hdr);
367
368         mmap_hdr = mmap->data;
369
370         mmap_hdr->magic = QUADD_MMAP_HEADER_MAGIC;
371         mmap_hdr->version = QUADD_MMAP_HEADER_VERSION;
372         mmap_hdr->cpu_id = cpu_id;
373
374         rb_hdr = (struct quadd_ring_buffer_hdr *)(mmap_hdr + 1);
375         rb->rb_hdr = rb_hdr;
376
377         rb_hdr->size = size;
378         rb_hdr->pos_read = 0;
379         rb_hdr->pos_write = 0;
380
381         rb_hdr->max_fill_count = 0;
382         rb_hdr->skipped_samples = 0;
383
384         rb->buf = (char *)(rb_hdr + 1);
385
386         rb_hdr->state = QUADD_RB_STATE_ACTIVE;
387
388         spin_unlock_irqrestore(&rb->lock, flags);
389
390         pr_info("[cpu: %d] init_mmap_hdr: vma: %#lx - %#lx, data: %p - %p\n",
391                 cpu_id,
392                 vma->vm_start, vma->vm_end,
393                 mmap->data, mmap->data + vma->vm_end - vma->vm_start);
394
395         return 0;
396 }
397
398 static void rb_stop(void)
399 {
400         int cpu_id;
401         struct quadd_ring_buffer *rb;
402         struct quadd_ring_buffer_hdr *rb_hdr;
403         struct comm_cpu_context *cc;
404
405         for_each_possible_cpu(cpu_id) {
406                 cc = &per_cpu(cpu_ctx, cpu_id);
407
408                 rb = &cc->rb;
409                 rb_hdr = rb->rb_hdr;
410
411                 if (!rb_hdr)
412                         continue;
413
414                 pr_info("[%d] skipped samples/max filling: %zu/%zu\n",
415                         cpu_id, rb->nr_skipped_samples, rb->max_fill_count);
416
417                 rb_hdr->state = QUADD_RB_STATE_STOPPED;
418         }
419 }
420
421 static void rb_reset(struct quadd_ring_buffer *rb)
422 {
423         unsigned long flags;
424
425         if (!rb)
426                 return;
427
428         spin_lock_irqsave(&rb->lock, flags);
429
430         rb->mmap = NULL;
431         rb->buf = NULL;
432         rb->rb_hdr = NULL;
433
434         spin_unlock_irqrestore(&rb->lock, flags);
435 }
436
437 static long
438 device_ioctl(struct file *file,
439              unsigned int ioctl_num,
440              unsigned long ioctl_param)
441 {
442         int err = 0;
443         struct quadd_mmap_area *mmap;
444         struct quadd_parameters *user_params;
445         struct quadd_comm_cap cap;
446         struct quadd_module_state state;
447         struct quadd_module_version versions;
448         struct quadd_sections extabs;
449         struct quadd_mmap_rb_info mmap_rb;
450
451         if (ioctl_num != IOCTL_SETUP &&
452             ioctl_num != IOCTL_GET_CAP &&
453             ioctl_num != IOCTL_GET_STATE &&
454             ioctl_num != IOCTL_GET_VERSION) {
455                 err = check_access_permission();
456                 if (err)
457                         return err;
458         }
459
460         mutex_lock(&comm_ctx.io_mutex);
461
462         switch (ioctl_num) {
463         case IOCTL_SETUP:
464                 if (atomic_read(&comm_ctx.active)) {
465                         pr_err("error: tegra profiler is active\n");
466                         err = -EBUSY;
467                         goto error_out;
468                 }
469
470                 user_params = vmalloc(sizeof(*user_params));
471                 if (!user_params) {
472                         err = -ENOMEM;
473                         goto error_out;
474                 }
475
476                 if (copy_from_user(user_params, (void __user *)ioctl_param,
477                                    sizeof(struct quadd_parameters))) {
478                         pr_err("setup failed\n");
479                         vfree(user_params);
480                         err = -EFAULT;
481                         goto error_out;
482                 }
483
484                 err = comm_ctx.control->set_parameters(user_params,
485                                                        &comm_ctx.debug_app_uid);
486                 if (err) {
487                         pr_err("error: setup failed\n");
488                         vfree(user_params);
489                         goto error_out;
490                 }
491                 comm_ctx.params_ok = 1;
492                 comm_ctx.process_pid = user_params->pids[0];
493
494                 if (user_params->reserved[QUADD_PARAM_IDX_SIZE_OF_RB] == 0) {
495                         pr_err("error: too old version of daemon\n");
496                         vfree(user_params);
497                         err = -EINVAL;
498                         goto error_out;
499                 }
500
501                 pr_info("setup success: freq/mafreq: %u/%u, backtrace: %d, pid: %d\n",
502                         user_params->freq,
503                         user_params->ma_freq,
504                         user_params->backtrace,
505                         user_params->pids[0]);
506
507                 vfree(user_params);
508                 break;
509
510         case IOCTL_GET_CAP:
511                 comm_ctx.control->get_capabilities(&cap);
512                 if (copy_to_user((void __user *)ioctl_param, &cap,
513                                  sizeof(struct quadd_comm_cap))) {
514                         pr_err("error: get_capabilities failed\n");
515                         err = -EFAULT;
516                         goto error_out;
517                 }
518                 break;
519
520         case IOCTL_GET_VERSION:
521                 strcpy((char *)versions.branch, QUADD_MODULE_BRANCH);
522                 strcpy((char *)versions.version, QUADD_MODULE_VERSION);
523
524                 versions.samples_version = QUADD_SAMPLES_VERSION;
525                 versions.io_version = QUADD_IO_VERSION;
526
527                 if (copy_to_user((void __user *)ioctl_param, &versions,
528                                  sizeof(struct quadd_module_version))) {
529                         pr_err("error: get version failed\n");
530                         err = -EFAULT;
531                         goto error_out;
532                 }
533                 break;
534
535         case IOCTL_GET_STATE:
536                 comm_ctx.control->get_state(&state);
537
538                 state.buffer_size = 0;
539                 state.buffer_fill_size = get_data_size();
540                 state.reserved[QUADD_MOD_STATE_IDX_RB_MAX_FILL_COUNT] = 0;
541
542                 if (copy_to_user((void __user *)ioctl_param, &state,
543                                  sizeof(struct quadd_module_state))) {
544                         pr_err("error: get_state failed\n");
545                         err = -EFAULT;
546                         goto error_out;
547                 }
548                 break;
549
550         case IOCTL_START:
551                 if (!atomic_cmpxchg(&comm_ctx.active, 0, 1)) {
552                         if (!comm_ctx.params_ok) {
553                                 pr_err("error: params failed\n");
554                                 atomic_set(&comm_ctx.active, 0);
555                                 err = -EFAULT;
556                                 goto error_out;
557                         }
558
559                         err = comm_ctx.control->start();
560                         if (err) {
561                                 pr_err("error: start failed\n");
562                                 atomic_set(&comm_ctx.active, 0);
563                                 goto error_out;
564                         }
565                         pr_info("Start profiling success\n");
566                 }
567                 break;
568
569         case IOCTL_STOP:
570                 if (atomic_cmpxchg(&comm_ctx.active, 1, 0)) {
571                         comm_ctx.control->stop();
572                         wake_up_all(&comm_ctx.read_wait);
573                         rb_stop();
574                         pr_info("Stop profiling success\n");
575                 }
576                 break;
577
578         case IOCTL_SET_SECTIONS_INFO:
579                 if (copy_from_user(&extabs, (void __user *)ioctl_param,
580                                    sizeof(extabs))) {
581                         pr_err("error: set_sections_info failed\n");
582                         err = -EFAULT;
583                         goto error_out;
584                 }
585
586                 pr_debug("%s: user_mmap_start: %#llx, sections vma: %#llx - %#llx\n",
587                          __func__,
588                          (unsigned long long)extabs.user_mmap_start,
589                          (unsigned long long)extabs.vm_start,
590                          (unsigned long long)extabs.vm_end);
591
592                 spin_lock(&comm_ctx.mmaps_lock);
593                 mmap = find_mmap(extabs.user_mmap_start);
594                 if (!mmap) {
595                         pr_err("%s: error: mmap is not found\n", __func__);
596                         err = -ENXIO;
597                         spin_unlock(&comm_ctx.mmaps_lock);
598                         goto error_out;
599                 }
600
601                 mmap->type = QUADD_MMAP_TYPE_EXTABS;
602                 mmap->rb = NULL;
603
604                 err = comm_ctx.control->set_extab(&extabs, mmap);
605                 spin_unlock(&comm_ctx.mmaps_lock);
606                 if (err) {
607                         pr_err("error: set_sections_info\n");
608                         goto error_out;
609                 }
610                 break;
611
612         case IOCTL_SET_MMAP_RB:
613                 if (copy_from_user(&mmap_rb, (void __user *)ioctl_param,
614                                    sizeof(mmap_rb))) {
615                         pr_err("%s: error: mmap_rb failed\n", __func__);
616                         err = -EFAULT;
617                         goto error_out;
618                 }
619
620                 spin_lock(&comm_ctx.mmaps_lock);
621                 mmap = find_mmap((unsigned long)mmap_rb.vm_start);
622                 spin_unlock(&comm_ctx.mmaps_lock);
623                 if (!mmap) {
624                         pr_err("%s: error: mmap is not found\n", __func__);
625                         err = -ENXIO;
626                         goto error_out;
627                 }
628                 mmap->type = QUADD_MMAP_TYPE_RB;
629
630                 err = init_mmap_hdr(&mmap_rb, mmap);
631                 if (err) {
632                         pr_err("%s: error: init_mmap_hdr\n", __func__);
633                         goto error_out;
634                 }
635
636                 break;
637
638         default:
639                 pr_err("error: ioctl %u is unsupported in this version of module\n",
640                        ioctl_num);
641                 err = -EFAULT;
642                 goto error_out;
643         }
644
645 error_out:
646         mutex_unlock(&comm_ctx.io_mutex);
647         return err;
648 }
649
650 static void
651 delete_mmap(struct quadd_mmap_area *mmap)
652 {
653         struct quadd_mmap_area *entry, *next;
654
655         list_for_each_entry_safe(entry, next, &comm_ctx.mmap_areas, list) {
656                 if (entry == mmap) {
657                         list_del(&entry->list);
658                         vfree(entry->data);
659                         kfree(entry);
660                         break;
661                 }
662         }
663 }
664
665 static void mmap_open(struct vm_area_struct *vma)
666 {
667         pr_debug("%s: mmap_open: vma: %#lx - %#lx\n",
668                 __func__, vma->vm_start, vma->vm_end);
669 }
670
671 static void mmap_close(struct vm_area_struct *vma)
672 {
673         struct quadd_mmap_area *mmap;
674
675         pr_debug("%s: mmap_close: vma: %#lx - %#lx\n",
676                  __func__, vma->vm_start, vma->vm_end);
677
678         spin_lock(&comm_ctx.mmaps_lock);
679
680         mmap = find_mmap(vma->vm_start);
681         if (!mmap) {
682                 pr_err("%s: error: mmap is not found\n", __func__);
683                 goto out;
684         }
685
686         pr_debug("mmap_close: type: %d\n", mmap->type);
687
688         if (mmap->type == QUADD_MMAP_TYPE_EXTABS)
689                 comm_ctx.control->delete_mmap(mmap);
690         else if (mmap->type == QUADD_MMAP_TYPE_RB)
691                 rb_reset(mmap->rb);
692         else
693                 pr_err("error: mmap area is uninitialized\n");
694
695         delete_mmap(mmap);
696
697 out:
698         spin_unlock(&comm_ctx.mmaps_lock);
699 }
700
701 static int mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
702 {
703         void *data;
704         struct quadd_mmap_area *mmap;
705         unsigned long offset = vmf->pgoff << PAGE_SHIFT;
706
707         pr_debug("mmap_fault: vma: %#lx - %#lx, pgoff: %#lx, vaddr: %p\n",
708                  vma->vm_start, vma->vm_end, vmf->pgoff, vmf->virtual_address);
709
710         spin_lock(&comm_ctx.mmaps_lock);
711
712         mmap = find_mmap(vma->vm_start);
713         if (!mmap) {
714                 spin_unlock(&comm_ctx.mmaps_lock);
715                 return VM_FAULT_SIGBUS;
716         }
717
718         data = mmap->data;
719
720         vmf->page = vmalloc_to_page(data + offset);
721         get_page(vmf->page);
722
723         spin_unlock(&comm_ctx.mmaps_lock);
724         return 0;
725 }
726
727 static struct vm_operations_struct mmap_vm_ops = {
728         .open   = mmap_open,
729         .close  = mmap_close,
730         .fault  = mmap_fault,
731 };
732
733 static int
734 device_mmap(struct file *filp, struct vm_area_struct *vma)
735 {
736         unsigned long vma_size, nr_pages;
737         struct quadd_mmap_area *entry;
738
739         pr_debug("mmap: vma: %#lx - %#lx, pgoff: %#lx\n",
740                  vma->vm_start, vma->vm_end, vma->vm_pgoff);
741
742         if (vma->vm_pgoff != 0)
743                 return -EINVAL;
744
745         vma->vm_private_data = filp->private_data;
746
747         vma_size = vma->vm_end - vma->vm_start;
748         nr_pages = vma_size / PAGE_SIZE;
749
750         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
751         if (!entry)
752                 return -ENOMEM;
753
754         entry->mmap_vma = vma;
755
756         INIT_LIST_HEAD(&entry->list);
757         INIT_LIST_HEAD(&entry->ex_entries);
758
759         entry->data = vmalloc_user(nr_pages * PAGE_SIZE);
760         if (!entry->data) {
761                 pr_err("%s: error: vmalloc_user", __func__);
762                 kfree(entry);
763                 return -ENOMEM;
764         }
765
766         entry->type = QUADD_MMAP_TYPE_NONE;
767
768         pr_debug("%s: data: %p - %p (%#lx)\n",
769                  __func__, entry->data, entry->data + nr_pages * PAGE_SIZE,
770                  nr_pages * PAGE_SIZE);
771
772         spin_lock(&comm_ctx.mmaps_lock);
773         list_add_tail(&entry->list, &comm_ctx.mmap_areas);
774         spin_unlock(&comm_ctx.mmaps_lock);
775
776         vma->vm_ops = &mmap_vm_ops;
777         vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
778
779         vma->vm_ops->open(vma);
780
781         return 0;
782 }
783
784 static void unregister(void)
785 {
786         misc_deregister(comm_ctx.misc_dev);
787         kfree(comm_ctx.misc_dev);
788 }
789
790 static const struct file_operations qm_fops = {
791         .poll           = device_poll,
792         .open           = device_open,
793         .release        = device_release,
794         .unlocked_ioctl = device_ioctl,
795         .compat_ioctl   = device_ioctl,
796         .mmap           = device_mmap,
797 };
798
799 static int comm_init(void)
800 {
801         int res, cpu_id;
802         struct miscdevice *misc_dev;
803
804         misc_dev = kzalloc(sizeof(*misc_dev), GFP_KERNEL);
805         if (!misc_dev) {
806                 pr_err("Error: alloc error\n");
807                 return -ENOMEM;
808         }
809
810         misc_dev->minor = MISC_DYNAMIC_MINOR;
811         misc_dev->name = QUADD_DEVICE_NAME;
812         misc_dev->fops = &qm_fops;
813
814         res = misc_register(misc_dev);
815         if (res < 0) {
816                 pr_err("Error: misc_register: %d\n", res);
817                 kfree(misc_dev);
818                 return res;
819         }
820         comm_ctx.misc_dev = misc_dev;
821
822         mutex_init(&comm_ctx.io_mutex);
823         atomic_set(&comm_ctx.active, 0);
824
825         comm_ctx.params_ok = 0;
826         comm_ctx.process_pid = 0;
827         comm_ctx.nr_users = 0;
828
829         init_waitqueue_head(&comm_ctx.read_wait);
830
831         INIT_LIST_HEAD(&comm_ctx.mmap_areas);
832         spin_lock_init(&comm_ctx.mmaps_lock);
833
834         for_each_possible_cpu(cpu_id) {
835                 struct comm_cpu_context *cc = &per_cpu(cpu_ctx, cpu_id);
836                 struct quadd_ring_buffer *rb = &cc->rb;
837
838                 rb->mmap = NULL;
839                 rb->buf = NULL;
840                 rb->rb_hdr = NULL;
841
842                 rb->max_fill_count = 0;
843                 rb->nr_skipped_samples = 0;
844
845                 spin_lock_init(&rb->lock);
846         }
847
848         return 0;
849 }
850
851 struct quadd_comm_data_interface *
852 quadd_comm_events_init(struct quadd_comm_control_interface *control)
853 {
854         int err;
855
856         err = comm_init();
857         if (err < 0)
858                 return ERR_PTR(err);
859
860         comm_ctx.control = control;
861         return &comm_data;
862 }
863
864 void quadd_comm_events_exit(void)
865 {
866         mutex_lock(&comm_ctx.io_mutex);
867         unregister();
868         mutex_unlock(&comm_ctx.io_mutex);
869 }