87a5a74cf5159e895a124020300dd057ebe6197f
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <mach/nvmap.h>
34 #include "nvmap_heap.h"
35
36 struct nvmap_device;
37 struct page;
38 struct tegra_iovmm_area;
39
40 #if defined(CONFIG_TEGRA_NVMAP)
41 #define nvmap_err(_client, _fmt, ...)                           \
42         dev_err(nvmap_client_to_device(_client),                \
43                 "%s: "_fmt, __func__, ##__VA_ARGS__)
44
45 #define nvmap_warn(_client, _fmt, ...)                          \
46         dev_warn(nvmap_client_to_device(_client),               \
47                  "%s: "_fmt, __func__, ##__VA_ARGS__)
48
49 #define nvmap_debug(_client, _fmt, ...)                         \
50         dev_dbg(nvmap_client_to_device(_client),                \
51                 "%s: "_fmt, __func__, ##__VA_ARGS__)
52
53 #define nvmap_ref_to_id(_ref)           ((unsigned long)(_ref)->handle)
54
55 /* handles allocated using shared system memory (either IOVMM- or high-order
56  * page allocations */
57 struct nvmap_pgalloc {
58         struct page **pages;
59         struct tegra_iovmm_area *area;
60         struct list_head mru_list;      /* MRU entry for IOVMM reclamation */
61         bool contig;                    /* contiguous system memory */
62         bool dirty;                     /* area is invalid and needs mapping */
63         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
64 };
65
66 struct nvmap_handle {
67         struct rb_node node;    /* entry on global handle tree */
68         atomic_t ref;           /* reference count (i.e., # of duplications) */
69         atomic_t pin;           /* pin count */
70         unsigned int usecount;  /* how often is used */
71         unsigned long flags;
72         size_t size;            /* padded (as-allocated) size */
73         size_t orig_size;       /* original (as-requested) size */
74         size_t align;
75         struct nvmap_client *owner;
76         struct nvmap_device *dev;
77         union {
78                 struct nvmap_pgalloc pgalloc;
79                 struct nvmap_heap_block *carveout;
80         };
81         bool global;            /* handle may be duplicated by other clients */
82         bool secure;            /* zap IOVMM area on unpin */
83         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
84         bool alloc;             /* handle has memory allocated */
85         unsigned int userflags; /* flags passed from userspace */
86         struct mutex lock;
87 };
88
89 #ifdef CONFIG_NVMAP_PAGE_POOLS
90 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
91 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
92 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
93 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
94 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
95
96 struct nvmap_page_pool {
97         struct mutex lock;
98         int npages;
99         struct page **page_array;
100         struct page **shrink_array;
101         int max_pages;
102         int flags;
103 };
104
105 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
106 #endif
107
108 struct nvmap_share {
109         struct tegra_iovmm_client *iovmm;
110         wait_queue_head_t pin_wait;
111         struct mutex pin_lock;
112 #ifdef CONFIG_NVMAP_PAGE_POOLS
113         union {
114                 struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
115                 struct {
116                         struct nvmap_page_pool uc_pool;
117                         struct nvmap_page_pool wc_pool;
118                         struct nvmap_page_pool iwb_pool;
119                         struct nvmap_page_pool wb_pool;
120                 };
121         };
122 #endif
123 #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
124         struct mutex mru_lock;
125         struct list_head *mru_lists;
126         int nr_mru;
127 #endif
128 };
129
130 struct nvmap_carveout_commit {
131         size_t commit;
132         struct list_head list;
133 };
134
135 struct nvmap_client {
136         const char                      *name;
137         struct nvmap_device             *dev;
138         struct nvmap_share              *share;
139         struct rb_root                  handle_refs;
140         atomic_t                        iovm_commit;
141         size_t                          iovm_limit;
142         struct mutex                    ref_lock;
143         bool                            super;
144         atomic_t                        count;
145         struct task_struct              *task;
146         struct list_head                list;
147         struct nvmap_carveout_commit    carveout_commit[0];
148 };
149
150 struct nvmap_vma_priv {
151         struct nvmap_handle *handle;
152         size_t          offs;
153         atomic_t        count;  /* number of processes cloning the VMA */
154 };
155
156 static inline void nvmap_ref_lock(struct nvmap_client *priv)
157 {
158         mutex_lock(&priv->ref_lock);
159 }
160
161 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
162 {
163         mutex_unlock(&priv->ref_lock);
164 }
165 #endif /* CONFIG_TEGRA_NVMAP */
166
167 struct device *nvmap_client_to_device(struct nvmap_client *client);
168
169 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
170
171 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
172
173 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
174
175 void nvmap_usecount_inc(struct nvmap_handle *h);
176 void nvmap_usecount_dec(struct nvmap_handle *h);
177
178 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
179                                               struct nvmap_handle *handle,
180                                               unsigned long type);
181
182 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
183                                    struct nvmap_heap_block *b);
184
185 struct nvmap_carveout_node;
186 void nvmap_carveout_commit_add(struct nvmap_client *client,
187                                struct nvmap_carveout_node *node, size_t len);
188
189 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
190                                     struct nvmap_carveout_node *node,
191                                     size_t len);
192
193 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
194
195 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
196                                         unsigned long handle);
197
198 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
199                                                    unsigned long id);
200
201 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
202                                          unsigned long id);
203
204 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
205                                              size_t size);
206
207 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
208                                                    unsigned long id);
209
210 int nvmap_alloc_handle_id(struct nvmap_client *client,
211                           unsigned long id, unsigned int heap_mask,
212                           size_t align, unsigned int flags);
213
214 void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
215
216 int nvmap_pin_ids(struct nvmap_client *client,
217                   unsigned int nr, const unsigned long *ids);
218
219 void nvmap_unpin_ids(struct nvmap_client *priv,
220                      unsigned int nr, const unsigned long *ids);
221
222 void _nvmap_handle_free(struct nvmap_handle *h);
223
224 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
225
226 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
227
228 #if defined(CONFIG_TEGRA_NVMAP)
229 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
230 {
231         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
232                 pr_err("%s: %s getting a freed handle\n",
233                         __func__, current->group_leader->comm);
234                 if (atomic_read(&h->ref) <= 0)
235                         return NULL;
236         }
237         return h;
238 }
239
240 static inline void nvmap_handle_put(struct nvmap_handle *h)
241 {
242         int cnt = atomic_dec_return(&h->ref);
243
244         if (WARN_ON(cnt < 0)) {
245                 pr_err("%s: %s put to negative references\n",
246                         __func__, current->comm);
247         } else if (cnt == 0)
248                 _nvmap_handle_free(h);
249 }
250
251 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
252 {
253         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
254                 return pgprot_noncached(prot);
255         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
256                 return pgprot_writecombine(prot);
257         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
258                 return pgprot_inner_writeback(prot);
259         return prot;
260 }
261 #else /* CONFIG_TEGRA_NVMAP */
262 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
263 void nvmap_handle_put(struct nvmap_handle *h);
264 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
265 #endif /* !CONFIG_TEGRA_NVMAP */
266
267 int is_nvmap_vma(struct vm_area_struct *vma);
268
269 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
270         size_t size, size_t align, unsigned int flags, unsigned int iova_start);
271
272 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r);
273
274 #endif