f5601b4ba24d2a09246efb88829357a0b8f55b0c
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_mm.c
1 /*
2  * drivers/video/tegra/nvmap/nvmap_mm.c
3  *
4  * Some MM related functionality specific to nvmap.
5  *
6  * Copyright (c) 2013-2016, NVIDIA CORPORATION. All rights reserved.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #include <trace/events/nvmap.h>
24
25 #include <asm/pgtable.h>
26
27 #include "nvmap_priv.h"
28
29 inline static void nvmap_flush_dcache_all(void *dummy)
30 {
31 #if defined(CONFIG_DENVER_CPU)
32         u64 id_afr0;
33         u64 midr;
34
35         asm volatile ("mrs %0, MIDR_EL1" : "=r"(midr));
36         /* check if current core is a Denver processor */
37         if ((midr & 0xFF8FFFF0) == 0x4e0f0000) {
38                 asm volatile ("mrs %0, ID_AFR0_EL1" : "=r"(id_afr0));
39                 /* check if complete cache flush through msr is supported */
40                 if (likely((id_afr0 & 0xf00) == 0x100)) {
41                         asm volatile ("msr s3_0_c15_c13_0, %0" : : "r" (0));
42                         asm volatile ("dsb sy");
43                         return;
44                 }
45         }
46 #endif
47         __flush_dcache_all(NULL);
48 }
49
50 void inner_flush_cache_all(void)
51 {
52 #if defined(CONFIG_ARM64) && defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
53         nvmap_flush_dcache_all(NULL);
54 #elif defined(CONFIG_ARM64)
55         on_each_cpu(nvmap_flush_dcache_all, NULL, 1);
56 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
57         v7_flush_kern_cache_all();
58 #else
59         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
60 #endif
61 }
62
63 extern void __clean_dcache_louis(void *);
64 extern void v7_clean_kern_cache_louis(void *);
65 void inner_clean_cache_all(void)
66 {
67 #if defined(CONFIG_ARM64) && \
68         defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
69         on_each_cpu(__clean_dcache_louis, NULL, 1);
70         __clean_dcache_all(NULL);
71 #elif defined(CONFIG_ARM64)
72         on_each_cpu(__clean_dcache_all, NULL, 1);
73 #elif defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU)
74         on_each_cpu(v7_clean_kern_cache_louis, NULL, 1);
75         v7_clean_kern_cache_all(NULL);
76 #else
77         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
78 #endif
79 }
80
81 /*
82  * FIXME:
83  *
84  *   __clean_dcache_page() is only available on ARM64 (well, we haven't
85  *   implemented it on ARMv7).
86  */
87 #if defined(CONFIG_ARM64)
88 void nvmap_clean_cache(struct page **pages, int numpages)
89 {
90         int i;
91
92         /* Not technically a flush but that's what nvmap knows about. */
93         nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
94         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
95                 nvmap_stats_read(NS_ALLOC),
96                 nvmap_stats_read(NS_CFLUSH_RQ),
97                 nvmap_stats_read(NS_CFLUSH_DONE));
98
99         for (i = 0; i < numpages; i++)
100                 __clean_dcache_page(pages[i]);
101 }
102 #endif
103
104 void nvmap_clean_cache_page(struct page *page)
105 {
106 #if defined(CONFIG_ARM64)
107         __clean_dcache_page(page);
108 #else
109         __flush_dcache_page(page_mapping(page), page);
110 #endif
111 }
112
113 void nvmap_flush_cache(struct page **pages, int numpages)
114 {
115         unsigned int i;
116         bool flush_inner = true;
117         __attribute__((unused)) unsigned long base;
118
119         nvmap_stats_inc(NS_CFLUSH_RQ, numpages << PAGE_SHIFT);
120 #if defined(CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS)
121         if (numpages >= (cache_maint_inner_threshold >> PAGE_SHIFT)) {
122                 nvmap_stats_inc(NS_CFLUSH_DONE, cache_maint_inner_threshold);
123                 inner_flush_cache_all();
124                 flush_inner = false;
125         }
126 #endif
127         if (flush_inner)
128                 nvmap_stats_inc(NS_CFLUSH_DONE, numpages << PAGE_SHIFT);
129         trace_nvmap_cache_flush(numpages << PAGE_SHIFT,
130                 nvmap_stats_read(NS_ALLOC),
131                 nvmap_stats_read(NS_CFLUSH_RQ),
132                 nvmap_stats_read(NS_CFLUSH_DONE));
133
134         for (i = 0; i < numpages; i++) {
135                 struct page *page = nvmap_to_page(pages[i]);
136 #ifdef CONFIG_ARM64 //__flush_dcache_page flushes inner and outer on ARM64
137                 if (flush_inner)
138                         __flush_dcache_page(page);
139 #else
140                 if (flush_inner)
141                         __flush_dcache_page(page_mapping(page), page);
142
143                 base = page_to_phys(page);
144                 outer_flush_range(base, base + PAGE_SIZE);
145 #endif
146         }
147 }
148
149 enum NVMAP_PROT_OP {
150         NVMAP_HANDLE_PROT_NONE = 1,
151         NVMAP_HANDLE_PROT_RESTORE = 2,
152 };
153
154 void nvmap_zap_handle(struct nvmap_handle *handle, u32 offset, u32 size)
155 {
156         struct list_head *vmas;
157         struct nvmap_vma_list *vma_list;
158         struct vm_area_struct *vma;
159
160         if (!handle->heap_pgalloc)
161                 return;
162
163         /* if no dirty page is present, no need to zap */
164         if (nvmap_handle_track_dirty(handle) && !atomic_read(&handle->pgalloc.ndirty))
165                 return;
166
167         if (!size) {
168                 offset = 0;
169                 size = handle->size;
170         }
171
172         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
173
174         mutex_lock(&handle->lock);
175         vmas = &handle->vmas;
176         list_for_each_entry(vma_list, vmas, list) {
177                 struct nvmap_vma_priv *priv;
178                 u32 vm_size = size;
179
180                 vma = vma_list->vma;
181                 priv = vma->vm_private_data;
182                 if ((offset + size) > (vma->vm_end - vma->vm_start))
183                         vm_size = vma->vm_end - vma->vm_start - offset;
184                 if (priv->offs || vma->vm_pgoff)
185                         /* vma mapping starts in the middle of handle memory.
186                          * zapping needs special care. zap entire range for now.
187                          * FIXME: optimze zapping.
188                          */
189                         zap_page_range(vma, vma->vm_start,
190                                 vma->vm_end - vma->vm_start, NULL);
191                 else
192                         zap_page_range(vma, vma->vm_start + offset,
193                                 vm_size, NULL);
194         }
195         mutex_unlock(&handle->lock);
196 }
197
198 static int nvmap_prot_handle(struct nvmap_handle *handle, u32 offset,
199                 u32 size, int op)
200 {
201         struct list_head *vmas;
202         struct nvmap_vma_list *vma_list;
203         struct vm_area_struct *vma;
204         int err = -EINVAL;
205
206         if (!handle->heap_pgalloc)
207                 return err;
208
209         if ((offset >= handle->size) || (offset > handle->size - size) ||
210             (size > handle->size))
211                 return err;
212
213         if (!size)
214                 size = handle->size;
215
216         size = PAGE_ALIGN((offset & ~PAGE_MASK) + size);
217
218         mutex_lock(&handle->lock);
219         vmas = &handle->vmas;
220         list_for_each_entry(vma_list, vmas, list) {
221                 struct nvmap_vma_priv *priv;
222                 u32 vm_size = size;
223                 struct vm_area_struct *prev;
224
225                 vma = vma_list->vma;
226                 prev = vma->vm_prev;
227                 priv = vma->vm_private_data;
228                 if ((offset + size) > (vma->vm_end - vma->vm_start))
229                         vm_size = vma->vm_end - vma->vm_start - offset;
230
231                 if ((priv->offs || vma->vm_pgoff) ||
232                     (size > (vma->vm_end - vma->vm_start)))
233                         vm_size = vma->vm_end - vma->vm_start;
234                 if (vma->vm_mm != current->mm)
235                         down_write(&vma->vm_mm->mmap_sem);
236                 switch (op) {
237                 case NVMAP_HANDLE_PROT_NONE:
238                         vma->vm_flags = vma_list->save_vm_flags;
239                         (void)vm_set_page_prot(vma);
240                         if (nvmap_handle_track_dirty(handle) &&
241                             !atomic_read(&handle->pgalloc.ndirty)) {
242                                 err = 0;
243                                 break;
244                         }
245                         err = mprotect_fixup(vma, &prev, vma->vm_start,
246                                         vma->vm_start + vm_size, VM_NONE);
247                         if (err)
248                                 goto try_unlock;
249                         vma->vm_flags = vma_list->save_vm_flags;
250                         (void)vm_set_page_prot(vma);
251                         break;
252                 case NVMAP_HANDLE_PROT_RESTORE:
253                         vma->vm_flags = VM_NONE;
254                         (void)vm_set_page_prot(vma);
255                         err = mprotect_fixup(vma, &prev, vma->vm_start,
256                                         vma->vm_start + vm_size,
257                                         vma_list->save_vm_flags);
258                         if (err)
259                                 goto try_unlock;
260                         _nvmap_handle_mkdirty(handle, 0, size);
261                         break;
262                 default:
263                         BUG();
264                 };
265 try_unlock:
266                 if (vma->vm_mm != current->mm)
267                         up_write(&vma->vm_mm->mmap_sem);
268                 if (err)
269                         goto finish;
270         }
271 finish:
272         mutex_unlock(&handle->lock);
273         return err;
274 }
275
276 static int nvmap_prot_handles(struct nvmap_handle **handles, u32 *offsets,
277                        u32 *sizes, u32 nr, int op)
278 {
279         int i, err = 0;
280
281         down_write(&current->mm->mmap_sem);
282         for (i = 0; i < nr; i++) {
283                 err = nvmap_prot_handle(handles[i], offsets[i],
284                                 sizes[i], op);
285                 if (err)
286                         goto finish;
287         }
288 finish:
289         up_write(&current->mm->mmap_sem);
290         return err;
291 }
292
293 int nvmap_reserve_pages(struct nvmap_handle **handles, u32 *offsets, u32 *sizes,
294                         u32 nr, u32 op)
295 {
296         int i, err;
297
298         for (i = 0; i < nr; i++) {
299                 u32 size = sizes[i] ? sizes[i] : handles[i]->size;
300                 u32 offset = sizes[i] ? offsets[i] : 0;
301
302                 if ((offset != 0) || (size != handles[i]->size))
303                         return -EINVAL;
304
305                 if (op == NVMAP_PAGES_PROT_AND_CLEAN)
306                         continue;
307                 /*
308                  * NOTE: This unreserves the handle even when
309                  * NVMAP_PAGES_INSERT_ON_UNRESERVE is called on some portion
310                  * of the handle
311                  */
312                 atomic_set(&handles[i]->pgalloc.reserved,
313                                 (op == NVMAP_PAGES_RESERVE) ? 1 : 0);
314         }
315
316         if (op == NVMAP_PAGES_PROT_AND_CLEAN)
317                 op = NVMAP_PAGES_RESERVE;
318
319         switch (op) {
320         case NVMAP_PAGES_RESERVE:
321                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
322                                                 NVMAP_HANDLE_PROT_NONE);
323                 if (err)
324                         return err;
325                 break;
326         case NVMAP_INSERT_PAGES_ON_UNRESERVE:
327                 err = nvmap_prot_handles(handles, offsets, sizes, nr,
328                                                 NVMAP_HANDLE_PROT_RESTORE);
329                 if (err)
330                         return err;
331                 break;
332         case NVMAP_PAGES_UNRESERVE:
333                 for (i = 0; i < nr; i++)
334                         if (nvmap_handle_track_dirty(handles[i]))
335                                 atomic_set(&handles[i]->pgalloc.ndirty, 0);
336                 break;
337         default:
338                 return -EINVAL;
339         }
340
341         if (!(handles[0]->userflags & NVMAP_HANDLE_CACHE_SYNC_AT_RESERVE))
342                 return 0;
343
344         if (op == NVMAP_PAGES_RESERVE) {
345                 err = nvmap_do_cache_maint_list(handles, offsets, sizes,
346                                           NVMAP_CACHE_OP_WB, nr);
347                 if (err)
348                         return err;
349                 for (i = 0; i < nr; i++)
350                         nvmap_handle_mkclean(handles[i], offsets[i],
351                                              sizes[i] ? sizes[i] : handles[i]->size);
352         } else if ((op == NVMAP_PAGES_UNRESERVE) && handles[0]->heap_pgalloc) {
353         } else {
354                 err = nvmap_do_cache_maint_list(handles, offsets, sizes,
355                                           NVMAP_CACHE_OP_WB_INV, nr);
356                 if (err)
357                         return err;
358         }
359         return 0;
360 }
361