blob: c9450982a1558e4c5d687f6584e5e3b26f382643 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -04002/*
3 * linux/arch/arm/lib/uaccess_with_memcpy.c
4 *
5 * Written by: Lennert Buytenhek and Nicolas Pitre
6 * Copyright (C) 2009 Marvell Semiconductor
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -04007 */
8
9#include <linux/kernel.h>
10#include <linux/ctype.h>
11#include <linux/uaccess.h>
12#include <linux/rwsem.h>
13#include <linux/mm.h>
14#include <linux/sched.h>
15#include <linux/hardirq.h> /* for in_atomic() */
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/gfp.h>
Arnd Bergmann7816e212011-06-06 16:56:21 +000017#include <linux/highmem.h>
Steven Cappera3a9ea62013-10-14 09:49:10 +010018#include <linux/hugetlb.h>
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040019#include <asm/current.h>
20#include <asm/page.h>
21
22static int
23pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
24{
25 unsigned long addr = (unsigned long)_addr;
26 pgd_t *pgd;
27 pmd_t *pmd;
28 pte_t *pte;
Russell King516295e2010-11-21 16:27:49 +000029 pud_t *pud;
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040030 spinlock_t *ptl;
31
32 pgd = pgd_offset(current->mm, addr);
33 if (unlikely(pgd_none(*pgd) || pgd_bad(*pgd)))
34 return 0;
35
Russell King516295e2010-11-21 16:27:49 +000036 pud = pud_offset(pgd, addr);
37 if (unlikely(pud_none(*pud) || pud_bad(*pud)))
38 return 0;
39
40 pmd = pmd_offset(pud, addr);
Steven Cappera3a9ea62013-10-14 09:49:10 +010041 if (unlikely(pmd_none(*pmd)))
42 return 0;
43
44 /*
45 * A pmd can be bad if it refers to a HugeTLB or THP page.
46 *
47 * Both THP and HugeTLB pages have the same pmd layout
48 * and should not be manipulated by the pte functions.
49 *
50 * Lock the page table for the destination and check
51 * to see that it's still huge and whether or not we will
Kirill A. Shutemov0ebd7442016-01-15 16:53:14 -080052 * need to fault on write.
Steven Cappera3a9ea62013-10-14 09:49:10 +010053 */
54 if (unlikely(pmd_thp_or_huge(*pmd))) {
55 ptl = &current->mm->page_table_lock;
56 spin_lock(ptl);
57 if (unlikely(!pmd_thp_or_huge(*pmd)
Kirill A. Shutemov0ebd7442016-01-15 16:53:14 -080058 || pmd_hugewillfault(*pmd))) {
Steven Cappera3a9ea62013-10-14 09:49:10 +010059 spin_unlock(ptl);
60 return 0;
61 }
62
63 *ptep = NULL;
64 *ptlp = ptl;
65 return 1;
66 }
67
68 if (unlikely(pmd_bad(*pmd)))
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040069 return 0;
70
71 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
72 if (unlikely(!pte_present(*pte) || !pte_young(*pte) ||
73 !pte_write(*pte) || !pte_dirty(*pte))) {
74 pte_unmap_unlock(pte, ptl);
75 return 0;
76 }
77
78 *ptep = pte;
79 *ptlp = ptl;
80
81 return 1;
82}
83
Nicolas Pitrecb9dc922009-05-21 22:17:17 -040084static unsigned long noinline
85__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040086{
Russell Kingc014953d2015-12-05 13:42:07 +000087 unsigned long ua_flags;
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040088 int atomic;
89
Al Virodb68ce12017-03-20 21:08:07 -040090 if (uaccess_kernel()) {
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040091 memcpy((void *)to, from, n);
92 return 0;
93 }
94
95 /* the mmap semaphore is taken only if not in an atomic context */
Nicolas Pitre0f64b242015-08-12 16:45:02 +010096 atomic = faulthandler_disabled();
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -040097
98 if (!atomic)
99 down_read(&current->mm->mmap_sem);
100 while (n) {
101 pte_t *pte;
102 spinlock_t *ptl;
103 int tocopy;
104
105 while (!pin_page_for_write(to, &pte, &ptl)) {
106 if (!atomic)
107 up_read(&current->mm->mmap_sem);
108 if (__put_user(0, (char __user *)to))
109 goto out;
110 if (!atomic)
111 down_read(&current->mm->mmap_sem);
112 }
113
114 tocopy = (~(unsigned long)to & ~PAGE_MASK) + 1;
115 if (tocopy > n)
116 tocopy = n;
117
Russell Kingc014953d2015-12-05 13:42:07 +0000118 ua_flags = uaccess_save_and_enable();
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400119 memcpy((void *)to, from, tocopy);
Russell Kingc014953d2015-12-05 13:42:07 +0000120 uaccess_restore(ua_flags);
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400121 to += tocopy;
122 from += tocopy;
123 n -= tocopy;
124
Steven Cappera3a9ea62013-10-14 09:49:10 +0100125 if (pte)
126 pte_unmap_unlock(pte, ptl);
127 else
128 spin_unlock(ptl);
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400129 }
130 if (!atomic)
131 up_read(&current->mm->mmap_sem);
132
133out:
134 return n;
135}
136
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400137unsigned long
Russell King3fba7e22015-08-19 11:02:28 +0100138arm_copy_to_user(void __user *to, const void *from, unsigned long n)
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400139{
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400140 /*
141 * This test is stubbed out of the main function above to keep
142 * the overhead for small copies low by avoiding a large
143 * register dump on the stack just to reload them right away.
144 * With frame pointer disabled, tail call optimization kicks in
145 * as well making this test almost invisible.
146 */
Russell Kingc014953d2015-12-05 13:42:07 +0000147 if (n < 64) {
148 unsigned long ua_flags = uaccess_save_and_enable();
149 n = __copy_to_user_std(to, from, n);
150 uaccess_restore(ua_flags);
151 } else {
Julien Thierrya1d09e02018-09-11 10:15:12 +0100152 n = __copy_to_user_memcpy(uaccess_mask_range_ptr(to, n),
153 from, n);
Russell Kingc014953d2015-12-05 13:42:07 +0000154 }
155 return n;
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400156}
157
158static unsigned long noinline
159__clear_user_memset(void __user *addr, unsigned long n)
160{
Russell Kingc014953d2015-12-05 13:42:07 +0000161 unsigned long ua_flags;
162
Al Virodb68ce12017-03-20 21:08:07 -0400163 if (uaccess_kernel()) {
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400164 memset((void *)addr, 0, n);
165 return 0;
166 }
167
168 down_read(&current->mm->mmap_sem);
169 while (n) {
170 pte_t *pte;
171 spinlock_t *ptl;
172 int tocopy;
173
174 while (!pin_page_for_write(addr, &pte, &ptl)) {
175 up_read(&current->mm->mmap_sem);
176 if (__put_user(0, (char __user *)addr))
177 goto out;
178 down_read(&current->mm->mmap_sem);
179 }
180
181 tocopy = (~(unsigned long)addr & ~PAGE_MASK) + 1;
182 if (tocopy > n)
183 tocopy = n;
184
Russell Kingc014953d2015-12-05 13:42:07 +0000185 ua_flags = uaccess_save_and_enable();
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400186 memset((void *)addr, 0, tocopy);
Russell Kingc014953d2015-12-05 13:42:07 +0000187 uaccess_restore(ua_flags);
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400188 addr += tocopy;
189 n -= tocopy;
190
Steven Cappera3a9ea62013-10-14 09:49:10 +0100191 if (pte)
192 pte_unmap_unlock(pte, ptl);
193 else
194 spin_unlock(ptl);
Lennert Buytenhek39ec58f2009-03-09 14:30:09 -0400195 }
196 up_read(&current->mm->mmap_sem);
197
198out:
199 return n;
200}
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400201
Russell King3fba7e22015-08-19 11:02:28 +0100202unsigned long arm_clear_user(void __user *addr, unsigned long n)
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400203{
204 /* See rational for this in __copy_to_user() above. */
Russell Kingc014953d2015-12-05 13:42:07 +0000205 if (n < 64) {
206 unsigned long ua_flags = uaccess_save_and_enable();
207 n = __clear_user_std(addr, n);
208 uaccess_restore(ua_flags);
209 } else {
210 n = __clear_user_memset(addr, n);
211 }
212 return n;
Nicolas Pitrecb9dc922009-05-21 22:17:17 -0400213}
Nicolas Pitrec626e3f2009-05-29 21:55:50 -0400214
215#if 0
216
217/*
218 * This code is disabled by default, but kept around in case the chosen
219 * thresholds need to be revalidated. Some overhead (small but still)
220 * would be implied by a runtime determined variable threshold, and
221 * so far the measurement on concerned targets didn't show a worthwhile
222 * variation.
223 *
224 * Note that a fairly precise sched_clock() implementation is needed
225 * for results to make some sense.
226 */
227
228#include <linux/vmalloc.h>
229
230static int __init test_size_treshold(void)
231{
232 struct page *src_page, *dst_page;
233 void *user_ptr, *kernel_ptr;
234 unsigned long long t0, t1, t2;
235 int size, ret;
236
237 ret = -ENOMEM;
238 src_page = alloc_page(GFP_KERNEL);
239 if (!src_page)
240 goto no_src;
241 dst_page = alloc_page(GFP_KERNEL);
242 if (!dst_page)
243 goto no_dst;
244 kernel_ptr = page_address(src_page);
245 user_ptr = vmap(&dst_page, 1, VM_IOREMAP, __pgprot(__P010));
246 if (!user_ptr)
247 goto no_vmap;
248
249 /* warm up the src page dcache */
250 ret = __copy_to_user_memcpy(user_ptr, kernel_ptr, PAGE_SIZE);
251
252 for (size = PAGE_SIZE; size >= 4; size /= 2) {
253 t0 = sched_clock();
254 ret |= __copy_to_user_memcpy(user_ptr, kernel_ptr, size);
255 t1 = sched_clock();
256 ret |= __copy_to_user_std(user_ptr, kernel_ptr, size);
257 t2 = sched_clock();
258 printk("copy_to_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
259 }
260
261 for (size = PAGE_SIZE; size >= 4; size /= 2) {
262 t0 = sched_clock();
263 ret |= __clear_user_memset(user_ptr, size);
264 t1 = sched_clock();
265 ret |= __clear_user_std(user_ptr, size);
266 t2 = sched_clock();
267 printk("clear_user: %d %llu %llu\n", size, t1 - t0, t2 - t1);
268 }
269
270 if (ret)
271 ret = -EFAULT;
272
273 vunmap(user_ptr);
274no_vmap:
275 put_page(dst_page);
276no_dst:
277 put_page(src_page);
278no_src:
279 return ret;
280}
281
282subsys_initcall(test_size_treshold);
283
284#endif