blob: 56cc3c4377fbae7c5e70ba15b1b26b8cbff7c2c1 [file] [log] [blame]
David Daneyc52d0d32010-02-18 16:13:04 -08001/*
Alex Smithebb5e782015-10-21 09:54:38 +01002 * Copyright (C) 2015 Imagination Technologies
3 * Author: Alex Smith <alex.smith@imgtec.com>
David Daneyc52d0d32010-02-18 16:13:04 -08004 *
Alex Smithebb5e782015-10-21 09:54:38 +01005 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
David Daneyc52d0d32010-02-18 16:13:04 -08009 */
10
David Daneyc52d0d32010-02-18 16:13:04 -080011#include <linux/binfmts.h>
12#include <linux/elf.h>
Alex Smithebb5e782015-10-21 09:54:38 +010013#include <linux/err.h>
14#include <linux/init.h>
15#include <linux/mm.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
David Daneyc52d0d32010-02-18 16:13:04 -080018
Alex Smithebb5e782015-10-21 09:54:38 +010019#include <asm/abi.h>
David Daneyc52d0d32010-02-18 16:13:04 -080020#include <asm/vdso.h>
Alex Smithebb5e782015-10-21 09:54:38 +010021
22/* Kernel-provided data used by the VDSO. */
23static union mips_vdso_data vdso_data __page_aligned_data;
David Daneyc52d0d32010-02-18 16:13:04 -080024
25/*
Alex Smithebb5e782015-10-21 09:54:38 +010026 * Mapping for the VDSO data pages. The real pages are mapped manually, as
27 * what we map and where within the area they are mapped is determined at
28 * runtime.
David Daneyc52d0d32010-02-18 16:13:04 -080029 */
Alex Smithebb5e782015-10-21 09:54:38 +010030static struct page *no_pages[] = { NULL };
31static struct vm_special_mapping vdso_vvar_mapping = {
32 .name = "[vvar]",
33 .pages = no_pages,
34};
David Daneyc52d0d32010-02-18 16:13:04 -080035
Alex Smithebb5e782015-10-21 09:54:38 +010036static void __init init_vdso_image(struct mips_vdso_image *image)
David Daneyc52d0d32010-02-18 16:13:04 -080037{
Alex Smithebb5e782015-10-21 09:54:38 +010038 unsigned long num_pages, i;
39
40 BUG_ON(!PAGE_ALIGNED(image->data));
41 BUG_ON(!PAGE_ALIGNED(image->size));
42
43 num_pages = image->size / PAGE_SIZE;
44
45 for (i = 0; i < num_pages; i++) {
46 image->mapping.pages[i] =
47 virt_to_page(image->data + (i * PAGE_SIZE));
48 }
David Daneyc52d0d32010-02-18 16:13:04 -080049}
50
51static int __init init_vdso(void)
52{
Alex Smithebb5e782015-10-21 09:54:38 +010053 init_vdso_image(&vdso_image);
David Daneyc52d0d32010-02-18 16:13:04 -080054
Alex Smithebb5e782015-10-21 09:54:38 +010055#ifdef CONFIG_MIPS32_O32
56 init_vdso_image(&vdso_image_o32);
David Daneyc52d0d32010-02-18 16:13:04 -080057#endif
58
Alex Smithebb5e782015-10-21 09:54:38 +010059#ifdef CONFIG_MIPS32_N32
60 init_vdso_image(&vdso_image_n32);
61#endif
David Daneyc52d0d32010-02-18 16:13:04 -080062
David Daneyc52d0d32010-02-18 16:13:04 -080063 return 0;
64}
David Daney1ed84532010-06-16 15:00:28 -070065subsys_initcall(init_vdso);
David Daneyc52d0d32010-02-18 16:13:04 -080066
David Daneyc52d0d32010-02-18 16:13:04 -080067int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
68{
Alex Smithebb5e782015-10-21 09:54:38 +010069 struct mips_vdso_image *image = current->thread.abi->vdso;
David Daneyc52d0d32010-02-18 16:13:04 -080070 struct mm_struct *mm = current->mm;
Alex Smithebb5e782015-10-21 09:54:38 +010071 unsigned long base, vdso_addr;
72 struct vm_area_struct *vma;
73 int ret;
David Daneyc52d0d32010-02-18 16:13:04 -080074
75 down_write(&mm->mmap_sem);
76
Alex Smithebb5e782015-10-21 09:54:38 +010077 base = get_unmapped_area(NULL, 0, PAGE_SIZE + image->size, 0, 0);
78 if (IS_ERR_VALUE(base)) {
79 ret = base;
80 goto out;
David Daneyc52d0d32010-02-18 16:13:04 -080081 }
82
Alex Smithebb5e782015-10-21 09:54:38 +010083 vdso_addr = base + PAGE_SIZE;
David Daneyc52d0d32010-02-18 16:13:04 -080084
Alex Smithebb5e782015-10-21 09:54:38 +010085 vma = _install_special_mapping(mm, base, PAGE_SIZE,
86 VM_READ | VM_MAYREAD,
87 &vdso_vvar_mapping);
88 if (IS_ERR(vma)) {
89 ret = PTR_ERR(vma);
90 goto out;
91 }
92
93 /* Map data page. */
94 ret = remap_pfn_range(vma, base,
95 virt_to_phys(&vdso_data) >> PAGE_SHIFT,
96 PAGE_SIZE, PAGE_READONLY);
David Daneyc52d0d32010-02-18 16:13:04 -080097 if (ret)
Alex Smithebb5e782015-10-21 09:54:38 +010098 goto out;
David Daneyc52d0d32010-02-18 16:13:04 -080099
Alex Smithebb5e782015-10-21 09:54:38 +0100100 /* Map VDSO image. */
101 vma = _install_special_mapping(mm, vdso_addr, image->size,
102 VM_READ | VM_EXEC |
103 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
104 &image->mapping);
105 if (IS_ERR(vma)) {
106 ret = PTR_ERR(vma);
107 goto out;
108 }
David Daneyc52d0d32010-02-18 16:13:04 -0800109
Alex Smithebb5e782015-10-21 09:54:38 +0100110 mm->context.vdso = (void *)vdso_addr;
111 ret = 0;
112
113out:
David Daneyc52d0d32010-02-18 16:13:04 -0800114 up_write(&mm->mmap_sem);
115 return ret;
116}