video: tegra: nvmap: handle the vma->vm_mm NULL case
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_mm.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_mm.c
3  *
4  * Some MM related functionality specific to nvmap.
5  *
6  * Copyright (c) 2013-2016, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <trace/events/nvmap.h>
24
25 #include <asm/pgtable.h>
26
27 #include "nvmap_priv.h"
28
29 inline static void nvmap_flush_dcache_all(void *dummy)
30 {
31 #if defined(CONFIG_DENVER_CPU)
32         u64 id_afr0;
33         u64 midr;
34
35         asm volatile ("mrs %0, MIDR_EL1" : "=r"(midr));
36         /* check if current core is a Denver processor */
37         if ((midr & 0xFF8FFFF0) == 0x4e0f0000) {
38                 asm volatile ("mrs %0, ID_AFR0_EL1" : "=r"(id_afr0));
39                 /* check if complete cache flush through msr is supported */
40                 if (likely((id_afr0 & 0xf00) == 0x100)) {
41                         asm volatile ("msr s3_0_c15_c13_0, %0" : : "r" (0));
42                         asm volatile ("dsb sy");
43                         return;
44                 }
45         }
46 #endif
47         __flush_dcache_all(NULL);
48 }
49
50 void inner_flush_cache_all(void)
51 {
52 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
53         nvmap_flush_dcache_all(NULL);
54 #elif defined(CONFIG_ARM64)
55         on_each_cpu(nvmap_flush_dcache_all, NULL, 1);
56 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
57         v7_flush_kern_cache_all();
58 #else
59         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
60 #endif
61 }
62
63 extern void __clean_dcache_louis(void *);
64 extern void v7_clean_kern_cache_louis(void *);
65 void inner_clean_cache_all(void)
66 {
67 #if defined(CONFIG_ARM64) && \
68         defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
69         on_each_cpu(__clean_dcache_louis, NULL, 1);
70         __clean_dcache_all(NULL);
71 #elif defined(CONFIG_ARM64)
72         on_each_cpu(__clean_dcache_all, NULL, 1);
73 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
74         on_each_cpu(v7_clean_kern_cache_louis, NULL, 1);
75         v7_clean_kern_cache_all(NULL);
76 #else
77         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
78 #endif
79 }
80
81 /*
82  * FIXME:
83  *
84  *   __clean_dcache_page() is only available on ARM64 (well, we haven't
85  *   implemented it on ARMv7).
86  */
87 #if defined(CONFIG_ARM64)
88 void nvmap_clean_cache(struct page **pages, int numpages)
89 {
90         int i;
91
92         /* Not technically a flush but that's what nvmap knows about. */
93         nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
94         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
95                 nvmap_stats_read(NS_ALLOC),
96                 nvmap_stats_read(NS_CFLUSH_RQ),
97                 nvmap_stats_read(NS_CFLUSH_DONE));
98
99         for (i = 0; i < numpages; i++)
100                 __clean_dcache_page(pages[i]);
101 }
102 #endif
103
104 void nvmap_clean_cache_page(struct page *page)
105 {
106 #if defined(CONFIG_ARM64)
107         __clean_dcache_page(page);
108 #else
109         __flush_dcache_page(page_mapping(page), page);
110 #endif
111 }
112
113 void nvmap_flush_cache(struct page **pages, int numpages)
114 {
115         unsigned int i;
116         bool flush_inner = true;
117         __attribute__((unused)) unsigned long base;
118
119         nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
120 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
121         if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
122                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
123                 inner_flush_cache_all();
124                 flush_inner = false;
125         }
126 #endif
127         if (flush_inner)
128                 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
129         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
130                 nvmap_stats_read(NS_ALLOC),
131                 nvmap_stats_read(NS_CFLUSH_RQ),
132                 nvmap_stats_read(NS_CFLUSH_DONE));
133
134         for (i = 0; i < numpages; i++) {
135                 struct page *page = nvmap_to_page(pages[i]);
136 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
137                 if (flush_inner)
138                         __flush_dcache_page(page);
139 #else
140                 if (flush_inner)
141                         __flush_dcache_page(page_mapping(page), page);
142
143                 base = page_to_phys(page);
144                 outer_flush_range(base, base + PAGE_SIZE);
145 #endif
146         }
147 }
148
149 enum NVMAP_PROT_OP {
150         NVMAP_HANDLE_PROT_NONE = 1,
151         NVMAP_HANDLE_PROT_RESTORE = 2,
152 };
153
154 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
155 {
156         struct list_head *vmas;
157         struct nvmap_vma_list *vma_list;
158         struct vm_area_struct *vma;
159
160         if (!handle->heap_pgalloc)
161                 return;
162
163         /* if no dirty page is present, no need to zap */
164         if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
165                 return;
166
167         if (!size) {
168                 offset = 0;
169                 size = handle->size;
170         }
171
172         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
173
174         mutex_lock(&handle->lock);
175         vmas = &handle->vmas;
176         list_for_each_entry(vma_list, vmas, list) {
177                 struct nvmap_vma_priv *priv;
178                 u32 vm_size = size;
179
180                 vma = vma_list->vma;
181                 priv = vma->vm_private_data;
182                 if ((offset + size) > (vma->vm_end - vma->vm_start))
183                         vm_size = vma->vm_end - vma->vm_start - offset;
184                 if (priv->offs || vma->vm_pgoff)
185                         /* vma mapping starts in the middle of handle memory.
186                          * zapping needs special care. zap entire range for now.
187                          * FIXME: optimze zapping.
188                          */
189                         zap_page_range(vma, vma->vm_start,
190                                 vma->vm_end - vma->vm_start, NULL);
191                 else
192                         zap_page_range(vma, vma->vm_start + offset,
193                                 vm_size, NULL);
194         }
195         mutex_unlock(&handle->lock);
196 }
197
198 static int nvmap_prot_handle(struct nvmap_handle *handle, u32 offset,
199                 u32 size, int op)
200 {
201         struct list_head *vmas;
202         struct nvmap_vma_list *vma_list;
203         struct vm_area_struct *vma;
204         int err = -EINVAL;
205
206         if (!handle->heap_pgalloc)
207                 return err;
208
209         if ((offset >= handle->size) || (offset > handle->size - size) ||
210             (size > handle->size))
211                 return err;
212
213         if (!size)
214                 size = handle->size;
215
216         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
217
218         mutex_lock(&handle->lock);
219         vmas = &handle->vmas;
220         list_for_each_entry(vma_list, vmas, list) {
221                 struct nvmap_vma_priv *priv;
222                 u32 vm_size = size;
223                 struct vm_area_struct *prev;
224
225                 vma = vma_list->vma;
226                 /* vm_mm = NULL for the vma_copy created in v4l2.
227                  * ignore the kernel copy of vma. The protection fixing
228                  * for user copy of vma is good enough.
229                  */
230                 if (!vma->vm_mm)
231                         continue;
232                 prev = vma->vm_prev;
233                 priv = vma->vm_private_data;
234                 if ((offset + size) > (vma->vm_end - vma->vm_start))
235                         vm_size = vma->vm_end - vma->vm_start - offset;
236
237                 if ((priv->offs || vma->vm_pgoff) ||
238                     (size > (vma->vm_end - vma->vm_start)))
239                         vm_size = vma->vm_end - vma->vm_start;
240                 if (vma->vm_mm != current->mm)
241                         down_write(&vma->vm_mm->mmap_sem);
242                 switch (op) {
243                 case NVMAP_HANDLE_PROT_NONE:
244                         vma->vm_flags = vma_list->save_vm_flags;
245                         (void)vm_set_page_prot(vma);
246                         if (nvmap_handle_track_dirty(handle) &&
247                             !atomic_read(&handle->pgalloc.ndirty)) {
248                                 err = 0;
249                                 break;
250                         }
251                         err = mprotect_fixup(vma, &prev, vma->vm_start,
252                                         vma->vm_start + vm_size, VM_NONE);
253                         if (err)
254                                 goto try_unlock;
255                         vma->vm_flags = vma_list->save_vm_flags;
256                         (void)vm_set_page_prot(vma);
257                         break;
258                 case NVMAP_HANDLE_PROT_RESTORE:
259                         vma->vm_flags = VM_NONE;
260                         (void)vm_set_page_prot(vma);
261                         err = mprotect_fixup(vma, &prev, vma->vm_start,
262                                         vma->vm_start + vm_size,
263                                         vma_list->save_vm_flags);
264                         if (err)
265                                 goto try_unlock;
266                         _nvmap_handle_mkdirty(handle, 0, size);
267                         break;
268                 default:
269                         BUG();
270                 };
271 try_unlock:
272                 if (vma->vm_mm != current->mm)
273                         up_write(&vma->vm_mm->mmap_sem);
274                 if (err)
275                         goto finish;
276         }
277 finish:
278         mutex_unlock(&handle->lock);
279         return err;
280 }
281
282 static int nvmap_prot_handles(struct nvmap_handle **handles, u32 *offsets,
283                        u32 *sizes, u32 nr, int op)
284 {
285         int i, err = 0;
286
287         down_write(&current->mm->mmap_sem);
288         for (i = 0; i < nr; i++) {
289                 err = nvmap_prot_handle(handles[i], offsets[i],
290                                 sizes[i], op);
291                 if (err)
292                         goto finish;
293         }
294 finish:
295         up_write(&current->mm->mmap_sem);
296         return err;
297 }
298
299 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
300                         u32 nr, u32 op)
301 {
302         int i, err;
303
304         for (i = 0; i < nr; i++) {
305                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
306                 u32 offset = sizes[i] ? offsets[i] : 0;
307
308                 if ((offset != 0) || (size != handles[i]->size))
309                         return -EINVAL;
310
311                 if (op == NVMAP_PAGES_PROT_AND_CLEAN)
312                         continue;
313                 /*
314                  * NOTE: This unreserves the handle even when
315                  * NVMAP_PAGES_INSERT_ON_UNRESERVE is called on some portion
316                  * of the handle
317                  */
318                 atomic_set(&handles[i]->pgalloc.reserved,
319                                 (op == NVMAP_PAGES_RESERVE) ? 1 : 0);
320         }
321
322         if (op == NVMAP_PAGES_PROT_AND_CLEAN)
323                 op = NVMAP_PAGES_RESERVE;
324
325         switch (op) {
326         case NVMAP_PAGES_RESERVE:
327                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
328                                                 NVMAP_HANDLE_PROT_NONE);
329                 if (err)
330                         return err;
331                 break;
332         case NVMAP_INSERT_PAGES_ON_UNRESERVE:
333                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
334                                                 NVMAP_HANDLE_PROT_RESTORE);
335                 if (err)
336                         return err;
337                 break;
338         case NVMAP_PAGES_UNRESERVE:
339                 for (i = 0; i < nr; i++)
340                         if (nvmap_handle_track_dirty(handles[i]))
341                                 atomic_set(&handles[i]->pgalloc.ndirty, 0);
342                 break;
343         default:
344                 return -EINVAL;
345         }
346
347         if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
348                 return 0;
349
350         if (op == NVMAP_PAGES_RESERVE) {
351                 err = nvmap_do_cache_maint_list(handles, offsets, sizes,
352                                           NVMAP_CACHE_OP_WB, nr);
353                 if (err)
354                         return err;
355                 for (i = 0; i < nr; i++)
356                         nvmap_handle_mkclean(handles[i], offsets[i],
357                                              sizes[i] ? sizes[i] : handles[i]->size);
358         } else if ((op == NVMAP_PAGES_UNRESERVE) && handles[0]->heap_pgalloc) {
359         } else {
360                 err = nvmap_do_cache_maint_list(handles, offsets, sizes,
361                                           NVMAP_CACHE_OP_WB_INV, nr);
362                 if (err)
363                         return err;
364         }
365         return 0;
366 }
367