blob: a6488bb6cfa9a82086be23b237bd9a0fb2afb646 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * linux/arch/arm/mm/copypage-v6.c
4 *
5 * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7#include <linux/init.h>
8#include <linux/spinlock.h>
9#include <linux/mm.h>
Russell King063b0a42008-10-31 15:08:35 +000010#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <asm/pgtable.h>
13#include <asm/shmparam.h>
14#include <asm/tlbflush.h>
15#include <asm/cacheflush.h>
Russell King46097c72008-08-10 18:10:19 +010016#include <asm/cachetype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
Russell King1b2e2b72006-08-21 17:06:38 +010018#include "mm.h"
19
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#if SHMLBA > 16384
21#error FIX ME
22#endif
23
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050024static DEFINE_RAW_SPINLOCK(v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
Linus Torvalds1da177e2005-04-16 15:20:36 -070026/*
27 * Copy the user page. No aliasing to deal with so we can just
28 * attack the kernel's existing mapping of these pages.
29 */
Russell King063b0a42008-10-31 15:08:35 +000030static void v6_copy_user_highpage_nonaliasing(struct page *to,
Russell Kingf00a75c2009-10-05 15:17:45 +010031 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
Linus Torvalds1da177e2005-04-16 15:20:36 -070032{
Russell King063b0a42008-10-31 15:08:35 +000033 void *kto, *kfrom;
34
Cong Wang5472e862011-11-25 23:14:15 +080035 kfrom = kmap_atomic(from);
36 kto = kmap_atomic(to);
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 copy_page(kto, kfrom);
Cong Wang5472e862011-11-25 23:14:15 +080038 kunmap_atomic(kto);
39 kunmap_atomic(kfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040}
41
42/*
43 * Clear the user page. No aliasing to deal with so we can just
44 * attack the kernel's existing mapping of this page.
45 */
Russell King303c6442008-10-31 16:32:19 +000046static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
Cong Wang5472e862011-11-25 23:14:15 +080048 void *kaddr = kmap_atomic(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -070049 clear_page(kaddr);
Cong Wang5472e862011-11-25 23:14:15 +080050 kunmap_atomic(kaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070051}
52
53/*
Russell King063b0a42008-10-31 15:08:35 +000054 * Discard data in the kernel mapping for the new page.
55 * FIXME: needs this MCRR to be supported.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 */
Russell King063b0a42008-10-31 15:08:35 +000057static void discard_old_kernel_data(void *kto)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
60 :
61 : "r" (kto),
Jungseung Lee80231872014-11-29 02:51:49 +010062 "r" ((unsigned long)kto + PAGE_SIZE - 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063 : "cc");
Russell King063b0a42008-10-31 15:08:35 +000064}
65
66/*
67 * Copy the page, taking account of the cache colour.
68 */
69static void v6_copy_user_highpage_aliasing(struct page *to,
Russell Kingf00a75c2009-10-05 15:17:45 +010070 struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
Russell King063b0a42008-10-31 15:08:35 +000071{
72 unsigned int offset = CACHE_COLOUR(vaddr);
73 unsigned long kfrom, kto;
74
Catalin Marinasc0177802010-09-13 15:57:36 +010075 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
Huang Yingcb9f7532018-04-05 16:24:39 -070076 __flush_dcache_page(page_mapping_file(from), from);
Russell King063b0a42008-10-31 15:08:35 +000077
78 /* FIXME: not highmem safe */
79 discard_old_kernel_data(page_address(to));
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
81 /*
82 * Now copy the page using the same cache colour as the
83 * pages ultimate destination.
84 */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050085 raw_spin_lock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
Russell Kingde27c302011-07-02 14:46:27 +010087 kfrom = COPYPAGE_V6_FROM + (offset << PAGE_SHIFT);
88 kto = COPYPAGE_V6_TO + (offset << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Russell King67ece142011-07-02 15:20:44 +010090 set_top_pte(kfrom, mk_pte(from, PAGE_KERNEL));
91 set_top_pte(kto, mk_pte(to, PAGE_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Russell King063b0a42008-10-31 15:08:35 +000093 copy_page((void *)kto, (void *)kfrom);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050095 raw_spin_unlock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096}
97
98/*
99 * Clear the user page. We need to deal with the aliasing issues,
100 * so remap the kernel page into the same cache colour as the user
101 * page.
102 */
Russell King303c6442008-10-31 16:32:19 +0000103static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104{
Russell Kingde27c302011-07-02 14:46:27 +0100105 unsigned long to = COPYPAGE_V6_TO + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Russell King303c6442008-10-31 16:32:19 +0000107 /* FIXME: not highmem safe */
108 discard_old_kernel_data(page_address(page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 /*
111 * Now clear the page using the same cache colour as
112 * the pages ultimate destination.
113 */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500114 raw_spin_lock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
Russell King67ece142011-07-02 15:20:44 +0100116 set_top_pte(to, mk_pte(page, PAGE_KERNEL));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117 clear_page((void *)to);
118
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500119 raw_spin_unlock(&v6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120}
121
122struct cpu_user_fns v6_user_fns __initdata = {
Russell King303c6442008-10-31 16:32:19 +0000123 .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing,
Russell King063b0a42008-10-31 15:08:35 +0000124 .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125};
126
127static int __init v6_userpage_init(void)
128{
129 if (cache_is_vipt_aliasing()) {
Russell King303c6442008-10-31 16:32:19 +0000130 cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing;
Russell King063b0a42008-10-31 15:08:35 +0000131 cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 }
133
134 return 0;
135}
136
Russell King08ee4e42005-05-10 17:30:47 +0100137core_initcall(v6_userpage_init);