mm: kill vma flag VM_CAN_NONLINEAR
[linux-3.10.git] / mm / filemap_xip.c
1 /*
2  *      linux/mm/filemap_xip.c
3  *
4  * Copyright (C) 2005 IBM Corporation
5  * Author: Carsten Otte <cotte@de.ibm.com>
6  *
7  * derived from linux/mm/filemap.c - Copyright (C) Linus Torvalds
8  *
9  */
10
11 #include <linux/fs.h>
12 #include <linux/pagemap.h>
13 #include <linux/export.h>
14 #include <linux/uio.h>
15 #include <linux/rmap.h>
16 #include <linux/mmu_notifier.h>
17 #include <linux/sched.h>
18 #include <linux/seqlock.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <asm/tlbflush.h>
22 #include <asm/io.h>
23
24 /*
25  * We do use our own empty page to avoid interference with other users
26  * of ZERO_PAGE(), such as /dev/zero
27  */
28 static DEFINE_MUTEX(xip_sparse_mutex);
29 static seqcount_t xip_sparse_seq = SEQCNT_ZERO;
30 static struct page *__xip_sparse_page;
31
32 /* called under xip_sparse_mutex */
33 static struct page *xip_sparse_page(void)
34 {
35         if (!__xip_sparse_page) {
36                 struct page *page = alloc_page(GFP_HIGHUSER | __GFP_ZERO);
37
38                 if (page)
39                         __xip_sparse_page = page;
40         }
41         return __xip_sparse_page;
42 }
43
44 /*
45  * This is a file read routine for execute in place files, and uses
46  * the mapping->a_ops->get_xip_mem() function for the actual low-level
47  * stuff.
48  *
49  * Note the struct file* is not used at all.  It may be NULL.
50  */
51 static ssize_t
52 do_xip_mapping_read(struct address_space *mapping,
53                     struct file_ra_state *_ra,
54                     struct file *filp,
55                     char __user *buf,
56                     size_t len,
57                     loff_t *ppos)
58 {
59         struct inode *inode = mapping->host;
60         pgoff_t index, end_index;
61         unsigned long offset;
62         loff_t isize, pos;
63         size_t copied = 0, error = 0;
64
65         BUG_ON(!mapping->a_ops->get_xip_mem);
66
67         pos = *ppos;
68         index = pos >> PAGE_CACHE_SHIFT;
69         offset = pos & ~PAGE_CACHE_MASK;
70
71         isize = i_size_read(inode);
72         if (!isize)
73                 goto out;
74
75         end_index = (isize - 1) >> PAGE_CACHE_SHIFT;
76         do {
77                 unsigned long nr, left;
78                 void *xip_mem;
79                 unsigned long xip_pfn;
80                 int zero = 0;
81
82                 /* nr is the maximum number of bytes to copy from this page */
83                 nr = PAGE_CACHE_SIZE;
84                 if (index >= end_index) {
85                         if (index > end_index)
86                                 goto out;
87                         nr = ((isize - 1) & ~PAGE_CACHE_MASK) + 1;
88                         if (nr <= offset) {
89                                 goto out;
90                         }
91                 }
92                 nr = nr - offset;
93                 if (nr > len - copied)
94                         nr = len - copied;
95
96                 error = mapping->a_ops->get_xip_mem(mapping, index, 0,
97                                                         &xip_mem, &xip_pfn);
98                 if (unlikely(error)) {
99                         if (error == -ENODATA) {
100                                 /* sparse */
101                                 zero = 1;
102                         } else
103                                 goto out;
104                 }
105
106                 /* If users can be writing to this page using arbitrary
107                  * virtual addresses, take care about potential aliasing
108                  * before reading the page on the kernel side.
109                  */
110                 if (mapping_writably_mapped(mapping))
111                         /* address based flush */ ;
112
113                 /*
114                  * Ok, we have the mem, so now we can copy it to user space...
115                  *
116                  * The actor routine returns how many bytes were actually used..
117                  * NOTE! This may not be the same as how much of a user buffer
118                  * we filled up (we may be padding etc), so we can only update
119                  * "pos" here (the actor routine has to update the user buffer
120                  * pointers and the remaining count).
121                  */
122                 if (!zero)
123                         left = __copy_to_user(buf+copied, xip_mem+offset, nr);
124                 else
125                         left = __clear_user(buf + copied, nr);
126
127                 if (left) {
128                         error = -EFAULT;
129                         goto out;
130                 }
131
132                 copied += (nr - left);
133                 offset += (nr - left);
134                 index += offset >> PAGE_CACHE_SHIFT;
135                 offset &= ~PAGE_CACHE_MASK;
136         } while (copied < len);
137
138 out:
139         *ppos = pos + copied;
140         if (filp)
141                 file_accessed(filp);
142
143         return (copied ? copied : error);
144 }
145
146 ssize_t
147 xip_file_read(struct file *filp, char __user *buf, size_t len, loff_t *ppos)
148 {
149         if (!access_ok(VERIFY_WRITE, buf, len))
150                 return -EFAULT;
151
152         return do_xip_mapping_read(filp->f_mapping, &filp->f_ra, filp,
153                             buf, len, ppos);
154 }
155 EXPORT_SYMBOL_GPL(xip_file_read);
156
157 /*
158  * __xip_unmap is invoked from xip_unmap and
159  * xip_write
160  *
161  * This function walks all vmas of the address_space and unmaps the
162  * __xip_sparse_page when found at pgoff.
163  */
164 static void
165 __xip_unmap (struct address_space * mapping,
166                      unsigned long pgoff)
167 {
168         struct vm_area_struct *vma;
169         struct mm_struct *mm;
170         struct prio_tree_iter iter;
171         unsigned long address;
172         pte_t *pte;
173         pte_t pteval;
174         spinlock_t *ptl;
175         struct page *page;
176         unsigned count;
177         int locked = 0;
178
179         count = read_seqcount_begin(&xip_sparse_seq);
180
181         page = __xip_sparse_page;
182         if (!page)
183                 return;
184
185 retry:
186         mutex_lock(&mapping->i_mmap_mutex);
187         vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188                 mm = vma->vm_mm;
189                 address = vma->vm_start +
190                         ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
191                 BUG_ON(address < vma->vm_start || address >= vma->vm_end);
192                 pte = page_check_address(page, mm, address, &ptl, 1);
193                 if (pte) {
194                         /* Nuke the page table entry. */
195                         flush_cache_page(vma, address, pte_pfn(*pte));
196                         pteval = ptep_clear_flush_notify(vma, address, pte);
197                         page_remove_rmap(page);
198                         dec_mm_counter(mm, MM_FILEPAGES);
199                         BUG_ON(pte_dirty(pteval));
200                         pte_unmap_unlock(pte, ptl);
201                         page_cache_release(page);
202                 }
203         }
204         mutex_unlock(&mapping->i_mmap_mutex);
205
206         if (locked) {
207                 mutex_unlock(&xip_sparse_mutex);
208         } else if (read_seqcount_retry(&xip_sparse_seq, count)) {
209                 mutex_lock(&xip_sparse_mutex);
210                 locked = 1;
211                 goto retry;
212         }
213 }
214
215 /*
216  * xip_fault() is invoked via the vma operations vector for a
217  * mapped memory region to read in file data during a page fault.
218  *
219  * This function is derived from filemap_fault, but used for execute in place
220  */
221 static int xip_file_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
222 {
223         struct file *file = vma->vm_file;
224         struct address_space *mapping = file->f_mapping;
225         struct inode *inode = mapping->host;
226         pgoff_t size;
227         void *xip_mem;
228         unsigned long xip_pfn;
229         struct page *page;
230         int error;
231
232         /* XXX: are VM_FAULT_ codes OK? */
233 again:
234         size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
235         if (vmf->pgoff >= size)
236                 return VM_FAULT_SIGBUS;
237
238         error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
239                                                 &xip_mem, &xip_pfn);
240         if (likely(!error))
241                 goto found;
242         if (error != -ENODATA)
243                 return VM_FAULT_OOM;
244
245         /* sparse block */
246         if ((vma->vm_flags & (VM_WRITE | VM_MAYWRITE)) &&
247             (vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) &&
248             (!(mapping->host->i_sb->s_flags & MS_RDONLY))) {
249                 int err;
250
251                 /* maybe shared writable, allocate new block */
252                 mutex_lock(&xip_sparse_mutex);
253                 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 1,
254                                                         &xip_mem, &xip_pfn);
255                 mutex_unlock(&xip_sparse_mutex);
256                 if (error)
257                         return VM_FAULT_SIGBUS;
258                 /* unmap sparse mappings at pgoff from all other vmas */
259                 __xip_unmap(mapping, vmf->pgoff);
260
261 found:
262                 err = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address,
263                                                         xip_pfn);
264                 if (err == -ENOMEM)
265                         return VM_FAULT_OOM;
266                 /*
267                  * err == -EBUSY is fine, we've raced against another thread
268                  * that faulted-in the same page
269                  */
270                 if (err != -EBUSY)
271                         BUG_ON(err);
272                 return VM_FAULT_NOPAGE;
273         } else {
274                 int err, ret = VM_FAULT_OOM;
275
276                 mutex_lock(&xip_sparse_mutex);
277                 write_seqcount_begin(&xip_sparse_seq);
278                 error = mapping->a_ops->get_xip_mem(mapping, vmf->pgoff, 0,
279                                                         &xip_mem, &xip_pfn);
280                 if (unlikely(!error)) {
281                         write_seqcount_end(&xip_sparse_seq);
282                         mutex_unlock(&xip_sparse_mutex);
283                         goto again;
284                 }
285                 if (error != -ENODATA)
286                         goto out;
287                 /* not shared and writable, use xip_sparse_page() */
288                 page = xip_sparse_page();
289                 if (!page)
290                         goto out;
291                 err = vm_insert_page(vma, (unsigned long)vmf->virtual_address,
292                                                         page);
293                 if (err == -ENOMEM)
294                         goto out;
295
296                 ret = VM_FAULT_NOPAGE;
297 out:
298                 write_seqcount_end(&xip_sparse_seq);
299                 mutex_unlock(&xip_sparse_mutex);
300
301                 return ret;
302         }
303 }
304
305 static const struct vm_operations_struct xip_file_vm_ops = {
306         .fault  = xip_file_fault,
307         .page_mkwrite   = filemap_page_mkwrite,
308         .remap_pages = generic_file_remap_pages,
309 };
310
311 int xip_file_mmap(struct file * file, struct vm_area_struct * vma)
312 {
313         BUG_ON(!file->f_mapping->a_ops->get_xip_mem);
314
315         file_accessed(file);
316         vma->vm_ops = &xip_file_vm_ops;
317         vma->vm_flags |= VM_MIXEDMAP;
318         return 0;
319 }
320 EXPORT_SYMBOL_GPL(xip_file_mmap);
321
322 static ssize_t
323 __xip_file_write(struct file *filp, const char __user *buf,
324                   size_t count, loff_t pos, loff_t *ppos)
325 {
326         struct address_space * mapping = filp->f_mapping;
327         const struct address_space_operations *a_ops = mapping->a_ops;
328         struct inode    *inode = mapping->host;
329         long            status = 0;
330         size_t          bytes;
331         ssize_t         written = 0;
332
333         BUG_ON(!mapping->a_ops->get_xip_mem);
334
335         do {
336                 unsigned long index;
337                 unsigned long offset;
338                 size_t copied;
339                 void *xip_mem;
340                 unsigned long xip_pfn;
341
342                 offset = (pos & (PAGE_CACHE_SIZE -1)); /* Within page */
343                 index = pos >> PAGE_CACHE_SHIFT;
344                 bytes = PAGE_CACHE_SIZE - offset;
345                 if (bytes > count)
346                         bytes = count;
347
348                 status = a_ops->get_xip_mem(mapping, index, 0,
349                                                 &xip_mem, &xip_pfn);
350                 if (status == -ENODATA) {
351                         /* we allocate a new page unmap it */
352                         mutex_lock(&xip_sparse_mutex);
353                         status = a_ops->get_xip_mem(mapping, index, 1,
354                                                         &xip_mem, &xip_pfn);
355                         mutex_unlock(&xip_sparse_mutex);
356                         if (!status)
357                                 /* unmap page at pgoff from all other vmas */
358                                 __xip_unmap(mapping, index);
359                 }
360
361                 if (status)
362                         break;
363
364                 copied = bytes -
365                         __copy_from_user_nocache(xip_mem + offset, buf, bytes);
366
367                 if (likely(copied > 0)) {
368                         status = copied;
369
370                         if (status >= 0) {
371                                 written += status;
372                                 count -= status;
373                                 pos += status;
374                                 buf += status;
375                         }
376                 }
377                 if (unlikely(copied != bytes))
378                         if (status >= 0)
379                                 status = -EFAULT;
380                 if (status < 0)
381                         break;
382         } while (count);
383         *ppos = pos;
384         /*
385          * No need to use i_size_read() here, the i_size
386          * cannot change under us because we hold i_mutex.
387          */
388         if (pos > inode->i_size) {
389                 i_size_write(inode, pos);
390                 mark_inode_dirty(inode);
391         }
392
393         return written ? written : status;
394 }
395
396 ssize_t
397 xip_file_write(struct file *filp, const char __user *buf, size_t len,
398                loff_t *ppos)
399 {
400         struct address_space *mapping = filp->f_mapping;
401         struct inode *inode = mapping->host;
402         size_t count;
403         loff_t pos;
404         ssize_t ret;
405
406         sb_start_write(inode->i_sb);
407
408         mutex_lock(&inode->i_mutex);
409
410         if (!access_ok(VERIFY_READ, buf, len)) {
411                 ret=-EFAULT;
412                 goto out_up;
413         }
414
415         pos = *ppos;
416         count = len;
417
418         /* We can write back this queue in page reclaim */
419         current->backing_dev_info = mapping->backing_dev_info;
420
421         ret = generic_write_checks(filp, &pos, &count, S_ISBLK(inode->i_mode));
422         if (ret)
423                 goto out_backing;
424         if (count == 0)
425                 goto out_backing;
426
427         ret = file_remove_suid(filp);
428         if (ret)
429                 goto out_backing;
430
431         ret = file_update_time(filp);
432         if (ret)
433                 goto out_backing;
434
435         ret = __xip_file_write (filp, buf, count, pos, ppos);
436
437  out_backing:
438         current->backing_dev_info = NULL;
439  out_up:
440         mutex_unlock(&inode->i_mutex);
441         sb_end_write(inode->i_sb);
442         return ret;
443 }
444 EXPORT_SYMBOL_GPL(xip_file_write);
445
446 /*
447  * truncate a page used for execute in place
448  * functionality is analog to block_truncate_page but does use get_xip_mem
449  * to get the page instead of page cache
450  */
451 int
452 xip_truncate_page(struct address_space *mapping, loff_t from)
453 {
454         pgoff_t index = from >> PAGE_CACHE_SHIFT;
455         unsigned offset = from & (PAGE_CACHE_SIZE-1);
456         unsigned blocksize;
457         unsigned length;
458         void *xip_mem;
459         unsigned long xip_pfn;
460         int err;
461
462         BUG_ON(!mapping->a_ops->get_xip_mem);
463
464         blocksize = 1 << mapping->host->i_blkbits;
465         length = offset & (blocksize - 1);
466
467         /* Block boundary? Nothing to do */
468         if (!length)
469                 return 0;
470
471         length = blocksize - length;
472
473         err = mapping->a_ops->get_xip_mem(mapping, index, 0,
474                                                 &xip_mem, &xip_pfn);
475         if (unlikely(err)) {
476                 if (err == -ENODATA)
477                         /* Hole? No need to truncate */
478                         return 0;
479                 else
480                         return err;
481         }
482         memset(xip_mem + offset, 0, length);
483         return 0;
484 }
485 EXPORT_SYMBOL_GPL(xip_truncate_page);