video: tegra: nvmap: Fix OOB vulnerability
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_mm.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_mm.c
3  *
4  * Some MM related functionality specific to nvmap.
5  *
6  * Copyright (c) 2013-2017, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <trace/events/nvmap.h>
24
25 #include <asm/pgtable.h>
26
27 #include "nvmap_priv.h"
28
29 inline static void nvmap_flush_dcache_all(void *dummy)
30 {
31 #if defined(CONFIG_DENVER_CPU)
32         u64 id_afr0;
33         u64 midr;
34
35         asm volatile ("mrs %0, MIDR_EL1" : "=r"(midr));
36         /* check if current core is a Denver processor */
37         if ((midr & 0xFF8FFFF0) == 0x4e0f0000) {
38                 asm volatile ("mrs %0, ID_AFR0_EL1" : "=r"(id_afr0));
39                 /* check if complete cache flush through msr is supported */
40                 if (likely((id_afr0 & 0xf00) == 0x100)) {
41                         asm volatile ("msr s3_0_c15_c13_0, %0" : : "r" (0));
42                         asm volatile ("dsb sy");
43                         return;
44                 }
45         }
46 #endif
47         __flush_dcache_all(NULL);
48 }
49
50 void inner_flush_cache_all(void)
51 {
52 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
53         nvmap_flush_dcache_all(NULL);
54 #elif defined(CONFIG_ARM64)
55         on_each_cpu(nvmap_flush_dcache_all, NULL, 1);
56 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
57         v7_flush_kern_cache_all();
58 #else
59         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
60 #endif
61 }
62
63 extern void __clean_dcache_louis(void *);
64 extern void v7_clean_kern_cache_louis(void *);
65 void inner_clean_cache_all(void)
66 {
67 #if defined(CONFIG_ARM64) && \
68         defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
69         on_each_cpu(__clean_dcache_louis, NULL, 1);
70         __clean_dcache_all(NULL);
71 #elif defined(CONFIG_ARM64)
72         on_each_cpu(__clean_dcache_all, NULL, 1);
73 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
74         on_each_cpu(v7_clean_kern_cache_louis, NULL, 1);
75         v7_clean_kern_cache_all(NULL);
76 #else
77         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
78 #endif
79 }
80
81 /*
82  * FIXME:
83  *
84  *   __clean_dcache_page() is only available on ARM64 (well, we haven't
85  *   implemented it on ARMv7).
86  */
87 #if defined(CONFIG_ARM64)
88 void nvmap_clean_cache(struct page **pages, int numpages)
89 {
90         int i;
91
92         /* Not technically a flush but that's what nvmap knows about. */
93         nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
94         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
95                 nvmap_stats_read(NS_ALLOC),
96                 nvmap_stats_read(NS_CFLUSH_RQ),
97                 nvmap_stats_read(NS_CFLUSH_DONE));
98
99         for (i = 0; i < numpages; i++)
100                 __clean_dcache_page(pages[i]);
101 }
102 #endif
103
104 void nvmap_clean_cache_page(struct page *page)
105 {
106 #if defined(CONFIG_ARM64)
107         __clean_dcache_page(page);
108 #else
109         __flush_dcache_page(page_mapping(page), page);
110 #endif
111 }
112
113 void nvmap_flush_cache(struct page **pages, int numpages)
114 {
115         unsigned int i;
116         bool flush_inner = true;
117         __attribute__((unused)) unsigned long base;
118
119         nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
120 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
121         if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
122                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
123                 inner_flush_cache_all();
124                 flush_inner = false;
125         }
126 #endif
127         if (flush_inner)
128                 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
129         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
130                 nvmap_stats_read(NS_ALLOC),
131                 nvmap_stats_read(NS_CFLUSH_RQ),
132                 nvmap_stats_read(NS_CFLUSH_DONE));
133
134         for (i = 0; i < numpages; i++) {
135                 struct page *page = nvmap_to_page(pages[i]);
136 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
137                 if (flush_inner)
138                         __flush_dcache_page(page);
139 #else
140                 if (flush_inner)
141                         __flush_dcache_page(page_mapping(page), page);
142
143                 base = page_to_phys(page);
144                 outer_flush_range(base, base + PAGE_SIZE);
145 #endif
146         }
147 }
148
149 enum NVMAP_PROT_OP {
150         NVMAP_HANDLE_PROT_NONE = 1,
151         NVMAP_HANDLE_PROT_RESTORE = 2,
152 };
153
154 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
155 {
156         struct list_head *vmas;
157         struct nvmap_vma_list *vma_list;
158         struct vm_area_struct *vma;
159
160         if (!handle->heap_pgalloc)
161                 return;
162
163         /* if no dirty page is present, no need to zap */
164         if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
165                 return;
166
167         if (!size) {
168                 offset = 0;
169                 size = handle->size;
170         }
171
172         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
173
174         mutex_lock(&handle->lock);
175         vmas = &handle->vmas;
176         list_for_each_entry(vma_list, vmas, list) {
177                 struct nvmap_vma_priv *priv;
178                 u32 vm_size = size;
179
180                 vma = vma_list->vma;
181                 priv = vma->vm_private_data;
182                 if ((offset + size) > (vma->vm_end - vma->vm_start))
183                         vm_size = vma->vm_end - vma->vm_start - offset;
184                 if (priv->offs || vma->vm_pgoff)
185                         /* vma mapping starts in the middle of handle memory.
186                          * zapping needs special care. zap entire range for now.
187                          * FIXME: optimze zapping.
188                          */
189                         zap_page_range(vma, vma->vm_start,
190                                 vma->vm_end - vma->vm_start, NULL);
191                 else
192                         zap_page_range(vma, vma->vm_start + offset,
193                                 vm_size, NULL);
194         }
195         mutex_unlock(&handle->lock);
196 }
197
198 static int nvmap_prot_handle(struct nvmap_handle *handle, u32 offset,
199                 u32 size, int op)
200 {
201         struct list_head *vmas;
202         struct nvmap_vma_list *vma_list;
203         struct vm_area_struct *vma;
204         int err = -EINVAL;
205
206         BUG_ON(offset);
207
208         if (!handle->heap_pgalloc)
209                 return err;
210
211         if (!size)
212                 size = handle->size;
213
214         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
215
216         mutex_lock(&handle->lock);
217         vmas = &handle->vmas;
218         list_for_each_entry(vma_list, vmas, list) {
219                 struct nvmap_vma_priv *priv;
220                 u32 vm_size = size;
221                 struct vm_area_struct *prev;
222
223                 vma = vma_list->vma;
224                 prev = vma->vm_prev;
225                 priv = vma->vm_private_data;
226                 if ((offset + size) > (vma->vm_end - vma->vm_start))
227                         vm_size = vma->vm_end - vma->vm_start - offset;
228
229                 if ((priv->offs || vma->vm_pgoff) ||
230                     (size > (vma->vm_end - vma->vm_start)))
231                         vm_size = vma->vm_end - vma->vm_start;
232                 if (vma->vm_mm != current->mm)
233                         down_write(&vma->vm_mm->mmap_sem);
234                 switch (op) {
235                 case NVMAP_HANDLE_PROT_NONE:
236                         vma->vm_flags = vma_list->save_vm_flags;
237                         (void)vm_set_page_prot(vma);
238                         if (nvmap_handle_track_dirty(handle) &&
239                             !atomic_read(&handle->pgalloc.ndirty)) {
240                                 err = 0;
241                                 break;
242                         }
243                         err = mprotect_fixup(vma, &prev, vma->vm_start,
244                                         vma->vm_start + vm_size, VM_NONE);
245                         if (err)
246                                 goto try_unlock;
247                         vma->vm_flags = vma_list->save_vm_flags;
248                         (void)vm_set_page_prot(vma);
249                         break;
250                 case NVMAP_HANDLE_PROT_RESTORE:
251                         vma->vm_flags = VM_NONE;
252                         (void)vm_set_page_prot(vma);
253                         err = mprotect_fixup(vma, &prev, vma->vm_start,
254                                         vma->vm_start + vm_size,
255                                         vma_list->save_vm_flags);
256                         if (err)
257                                 goto try_unlock;
258                         _nvmap_handle_mkdirty(handle, 0, size);
259                         break;
260                 default:
261                         BUG();
262                 };
263 try_unlock:
264                 if (vma->vm_mm != current->mm)
265                         up_write(&vma->vm_mm->mmap_sem);
266                 if (err)
267                         goto finish;
268         }
269 finish:
270         mutex_unlock(&handle->lock);
271         return err;
272 }
273
274 static int nvmap_prot_handles(struct nvmap_handle **handles, u32 *offsets,
275                        u32 *sizes, u32 nr, int op)
276 {
277         int i, err = 0;
278
279         down_write(&current->mm->mmap_sem);
280         for (i = 0; i < nr; i++) {
281                 err = nvmap_prot_handle(handles[i], offsets[i],
282                                 sizes[i], op);
283                 if (err)
284                         goto finish;
285         }
286 finish:
287         up_write(&current->mm->mmap_sem);
288         return err;
289 }
290
291 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
292                         u32 nr, u32 op)
293 {
294         int i, err;
295
296         /* validates all page params first */
297         for (i = 0; i < nr; i++) {
298                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
299                 u32 offset = sizes[i] ? offsets[i] : 0;
300
301                 if ((offset != 0) || (size != handles[i]->size))
302                         return -EINVAL;
303         }
304
305         for (i = 0; i < nr; i++) {
306                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
307                 u32 offset = sizes[i] ? offsets[i] : 0;
308
309                 if ((offset != 0) || (size != handles[i]->size))
310                         return -EINVAL;
311
312                 if (op == NVMAP_PAGES_PROT_AND_CLEAN)
313                         continue;
314                 /*
315                  * NOTE: This unreserves the handle even when
316                  * NVMAP_PAGES_INSERT_ON_UNRESERVE is called on some portion
317                  * of the handle
318                  */
319                 atomic_set(&handles[i]->pgalloc.reserved,
320                                 (op == NVMAP_PAGES_RESERVE) ? 1 : 0);
321         }
322
323         if (op == NVMAP_PAGES_PROT_AND_CLEAN)
324                 op = NVMAP_PAGES_RESERVE;
325
326         switch (op) {
327         case NVMAP_PAGES_RESERVE:
328                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
329                                                 NVMAP_HANDLE_PROT_NONE);
330                 if (err)
331                         return err;
332                 break;
333         case NVMAP_INSERT_PAGES_ON_UNRESERVE:
334                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
335                                                 NVMAP_HANDLE_PROT_RESTORE);
336                 if (err)
337                         return err;
338                 break;
339         case NVMAP_PAGES_UNRESERVE:
340                 for (i = 0; i < nr; i++)
341                         if (nvmap_handle_track_dirty(handles[i]))
342                                 atomic_set(&handles[i]->pgalloc.ndirty, 0);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347
348         if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
349                 return 0;
350
351         if (op == NVMAP_PAGES_RESERVE) {
352                 nvmap_do_cache_maint_list(handles, offsets, sizes,
353                                           NVMAP_CACHE_OP_WB, nr);
354                 for (i = 0; i < nr; i++)
355                         nvmap_handle_mkclean(handles[i], offsets[i],
356                                              sizes[i] ? sizes[i] : handles[i]->size);
357         } else if ((op == NVMAP_PAGES_UNRESERVE) && handles[0]->heap_pgalloc) {
358         } else {
359                 nvmap_do_cache_maint_list(handles, offsets, sizes,
360                                           NVMAP_CACHE_OP_WB_INV, nr);
361         }
362         return 0;
363 }
364