Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 1 | /* |
| 2 | * MMU context allocation for 64-bit kernels. |
| 3 | * |
| 4 | * Copyright (C) 2004 Anton Blanchard, IBM Corp. <anton@samba.org> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU General Public License |
| 8 | * as published by the Free Software Foundation; either version |
| 9 | * 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | */ |
| 12 | |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 13 | #include <linux/sched.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/errno.h> |
| 16 | #include <linux/string.h> |
| 17 | #include <linux/types.h> |
| 18 | #include <linux/mm.h> |
| 19 | #include <linux/spinlock.h> |
| 20 | #include <linux/idr.h> |
| 21 | |
| 22 | #include <asm/mmu_context.h> |
| 23 | |
| 24 | static DEFINE_SPINLOCK(mmu_context_lock); |
| 25 | static DEFINE_IDR(mmu_context_idr); |
| 26 | |
| 27 | int init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 28 | { |
| 29 | int index; |
| 30 | int err; |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame^] | 31 | int new_context = (mm->context.id == 0); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 32 | |
| 33 | again: |
| 34 | if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) |
| 35 | return -ENOMEM; |
| 36 | |
| 37 | spin_lock(&mmu_context_lock); |
| 38 | err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); |
| 39 | spin_unlock(&mmu_context_lock); |
| 40 | |
| 41 | if (err == -EAGAIN) |
| 42 | goto again; |
| 43 | else if (err) |
| 44 | return err; |
| 45 | |
| 46 | if (index > MAX_CONTEXT) { |
Sonny Rao | f86c9747 | 2006-06-27 08:46:09 -0400 | [diff] [blame] | 47 | spin_lock(&mmu_context_lock); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 48 | idr_remove(&mmu_context_idr, index); |
Sonny Rao | f86c9747 | 2006-06-27 08:46:09 -0400 | [diff] [blame] | 49 | spin_unlock(&mmu_context_lock); |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 50 | return -ENOMEM; |
| 51 | } |
| 52 | |
| 53 | mm->context.id = index; |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame^] | 54 | #ifdef CONFIG_PPC_MM_SLICES |
| 55 | /* The old code would re-promote on fork, we don't do that |
| 56 | * when using slices as it could cause problem promoting slices |
| 57 | * that have been forced down to 4K |
| 58 | */ |
| 59 | if (new_context) |
| 60 | slice_set_user_psize(mm, mmu_virtual_psize); |
| 61 | #else |
Paul Mackerras | bf72aeb | 2006-06-15 10:45:18 +1000 | [diff] [blame] | 62 | mm->context.user_psize = mmu_virtual_psize; |
| 63 | mm->context.sllp = SLB_VSID_USER | |
| 64 | mmu_psize_defs[mmu_virtual_psize].sllp; |
Benjamin Herrenschmidt | d0f13e3 | 2007-05-08 16:27:27 +1000 | [diff] [blame^] | 65 | #endif |
Paul Mackerras | 14cf11a | 2005-09-26 16:04:21 +1000 | [diff] [blame] | 66 | |
| 67 | return 0; |
| 68 | } |
| 69 | |
| 70 | void destroy_context(struct mm_struct *mm) |
| 71 | { |
| 72 | spin_lock(&mmu_context_lock); |
| 73 | idr_remove(&mmu_context_idr, mm->context.id); |
| 74 | spin_unlock(&mmu_context_lock); |
| 75 | |
| 76 | mm->context.id = NO_CONTEXT; |
| 77 | } |