9c9efc83d848edf2605e90d6d6afcd52e2f8a3cc
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/nvmap.h>
35 #include "nvmap_heap.h"
36 #include <linux/workqueue.h>
37 #include <asm/tlbflush.h>
38 #include <asm/cacheflush.h>
39
40 struct nvmap_device;
41 struct page;
42 struct tegra_iovmm_area;
43
44 extern const struct file_operations nvmap_fd_fops;
45 void _nvmap_handle_free(struct nvmap_handle *h);
46
47 #if defined(CONFIG_TEGRA_NVMAP)
48 #define nvmap_err(_client, _fmt, ...)                           \
49         dev_err(nvmap_client_to_device(_client),                \
50                 "%s: "_fmt, __func__, ##__VA_ARGS__)
51
52 #define nvmap_warn(_client, _fmt, ...)                          \
53         dev_warn(nvmap_client_to_device(_client),               \
54                  "%s: "_fmt, __func__, ##__VA_ARGS__)
55
56 #define nvmap_debug(_client, _fmt, ...)                         \
57         dev_dbg(nvmap_client_to_device(_client),                \
58                 "%s: "_fmt, __func__, ##__VA_ARGS__)
59
60 #define nvmap_ref_to_id(_ref)           ((unsigned long)(_ref)->handle)
61
62 /*
63  *
64  */
65 struct nvmap_deferred_ops {
66         struct list_head ops_list;
67         spinlock_t deferred_ops_lock;
68         bool enable_deferred_cache_maintenance;
69         u64 deferred_maint_inner_requested;
70         u64 deferred_maint_inner_flushed;
71         u64 deferred_maint_outer_requested;
72         u64 deferred_maint_outer_flushed;
73 };
74
75 /* handles allocated using shared system memory (either IOVMM- or high-order
76  * page allocations */
77 struct nvmap_pgalloc {
78         struct page **pages;
79         struct tegra_iovmm_area *area;
80         struct list_head mru_list;      /* MRU entry for IOVMM reclamation */
81         bool contig;                    /* contiguous system memory */
82         bool dirty;                     /* area is invalid and needs mapping */
83         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
84 };
85
86 struct nvmap_handle {
87         struct rb_node node;    /* entry on global handle tree */
88         atomic_t ref;           /* reference count (i.e., # of duplications) */
89         atomic_t pin;           /* pin count */
90         unsigned long flags;
91         size_t size;            /* padded (as-allocated) size */
92         size_t orig_size;       /* original (as-requested) size */
93         size_t align;
94         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
95         void *map_resources;    /* mapping resources associated with the
96                                    buffer */
97         struct nvmap_client *owner;
98         struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
99                         time on validation in some cases.
100                         if handle was duplicated by other client and
101                         original client destroy ref, this field
102                         has to be set to zero. In this case ref should be
103                         obtained through validation */
104         struct nvmap_device *dev;
105         union {
106                 struct nvmap_pgalloc pgalloc;
107                 struct nvmap_heap_block *carveout;
108         };
109         bool global;            /* handle may be duplicated by other clients */
110         bool secure;            /* zap IOVMM area on unpin */
111         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
112         bool alloc;             /* handle has memory allocated */
113         unsigned int userflags; /* flags passed from userspace */
114         struct mutex lock;
115         void *nvhost_priv;      /* nvhost private data */
116         void (*nvhost_priv_delete)(void *priv);
117 };
118
119 /* handle_ref objects are client-local references to an nvmap_handle;
120  * they are distinct objects so that handles can be unpinned and
121  * unreferenced the correct number of times when a client abnormally
122  * terminates */
123 struct nvmap_handle_ref {
124         struct nvmap_handle *handle;
125         struct rb_node  node;
126         atomic_t        dupes;  /* number of times to free on file close */
127         atomic_t        pin;    /* number of times to unpin on free */
128 };
129
130 #ifdef CONFIG_NVMAP_PAGE_POOLS
131 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
132 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
133 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
134 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
135 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
136
137 struct nvmap_page_pool {
138         struct mutex lock;
139         int npages;
140         struct page **page_array;
141         struct page **shrink_array;
142         int max_pages;
143         int flags;
144 };
145
146 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
147 #endif
148
149 struct nvmap_share {
150         struct tegra_iovmm_client *iovmm;
151         wait_queue_head_t pin_wait;
152         struct mutex pin_lock;
153 #ifdef CONFIG_NVMAP_PAGE_POOLS
154         union {
155                 struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
156                 struct {
157                         struct nvmap_page_pool uc_pool;
158                         struct nvmap_page_pool wc_pool;
159                         struct nvmap_page_pool iwb_pool;
160                         struct nvmap_page_pool wb_pool;
161                 };
162         };
163 #endif
164 #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
165         struct mutex mru_lock;
166         struct list_head *mru_lists;
167         int nr_mru;
168 #endif
169 };
170
171 struct nvmap_carveout_commit {
172         size_t commit;
173         struct list_head list;
174 };
175
176 struct nvmap_client {
177         const char                      *name;
178         struct nvmap_device             *dev;
179         struct nvmap_share              *share;
180         struct rb_root                  handle_refs;
181         atomic_t                        iovm_commit;
182         size_t                          iovm_limit;
183         struct mutex                    ref_lock;
184         bool                            super;
185         atomic_t                        count;
186         struct task_struct              *task;
187         struct list_head                list;
188         struct nvmap_carveout_commit    carveout_commit[0];
189 };
190
191 struct nvmap_vma_priv {
192         struct nvmap_handle *handle;
193         size_t          offs;
194         atomic_t        count;  /* number of processes cloning the VMA */
195 };
196
197 static inline void nvmap_ref_lock(struct nvmap_client *priv)
198 {
199         mutex_lock(&priv->ref_lock);
200 }
201
202 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
203 {
204         mutex_unlock(&priv->ref_lock);
205 }
206
207 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
208 {
209         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
210                 pr_err("%s: %s getting a freed handle\n",
211                         __func__, current->group_leader->comm);
212                 if (atomic_read(&h->ref) <= 0)
213                         return NULL;
214         }
215         return h;
216 }
217
218 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
219 {
220         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
221                 return pgprot_noncached(prot);
222         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
223                 return pgprot_writecombine(prot);
224 #ifndef CONFIG_ARM_LPAE /* !!!FIXME!!! BUG 892578 */
225         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
226                 return pgprot_inner_writeback(prot);
227 #endif
228         return prot;
229 }
230
231 #else /* CONFIG_TEGRA_NVMAP */
232 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
233 void nvmap_handle_put(struct nvmap_handle *h);
234 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
235
236 #endif /* !CONFIG_TEGRA_NVMAP */
237
238 struct device *nvmap_client_to_device(struct nvmap_client *client);
239
240 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
241
242 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
243
244 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
245
246 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
247
248 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
249                                               struct nvmap_handle *handle,
250                                               unsigned long type);
251
252 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
253                                    struct nvmap_heap_block *b);
254
255 struct nvmap_carveout_node;
256 void nvmap_carveout_commit_add(struct nvmap_client *client,
257                                struct nvmap_carveout_node *node, size_t len);
258
259 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
260                                     struct nvmap_carveout_node *node,
261                                     size_t len);
262
263 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
264
265 void nvmap_cache_maint_ops_flush(struct nvmap_device *dev,
266                 struct nvmap_handle *h);
267
268 struct nvmap_deferred_ops *nvmap_get_deferred_ops_from_dev(
269                 struct nvmap_device *dev);
270
271 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
272                 struct nvmap_handle *h);
273
274 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
275                                         unsigned long handle, bool skip_val);
276
277 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
278                                          unsigned long id);
279
280 void nvmap_handle_put(struct nvmap_handle *h);
281
282 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
283                                                    unsigned long id);
284
285 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
286                                              size_t size);
287
288 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
289                                         unsigned long id, bool skip_val);
290
291 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
292                         struct nvmap_client *client, int fd);
293
294 int nvmap_alloc_handle_id(struct nvmap_client *client,
295                           unsigned long id, unsigned int heap_mask,
296                           size_t align, u8 kind,
297                           unsigned int flags);
298
299 void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
300
301 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
302
303 int nvmap_pin_ids(struct nvmap_client *client,
304                   unsigned int nr, const unsigned long *ids);
305
306 void nvmap_unpin_ids(struct nvmap_client *priv,
307                      unsigned int nr, const unsigned long *ids);
308
309 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
310
311 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
312
313 int is_nvmap_vma(struct vm_area_struct *vma);
314
315 int _nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r, 
316         phys_addr_t *phys);
317
318 void nvmap_unpin_handles(struct nvmap_client *client,
319                          struct nvmap_handle **h, int nr);
320
321 #ifdef CONFIG_DMA_SHARED_BUFFER
322 /* dma-buf exporter */
323 struct dma_buf *nvmap_share_dmabuf(struct nvmap_client *client, ulong id);
324 #else
325 static inline struct dma_buf *nvmap_share_dmabuf(struct nvmap_client *client,
326                                                  ulong id)
327 {
328         return NULL;
329 }
330 #endif  /* !CONFIG_DMA_SHARED_BUFFER */
331
332 #ifdef CONFIG_COMPAT
333 ulong unmarshal_user_handle(__u32 handle);
334 __u32 marshal_kernel_handle(ulong handle);
335 ulong unmarshal_user_id(u32 id);
336 #else
337 ulong unmarshal_user_handle(struct nvmap_handle *handle);
338 struct nvmap_handle *marshal_kernel_handle(ulong handle);
339 ulong unmarshal_user_id(ulong id);
340 #endif
341
342 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
343 {
344 #ifdef CONFIG_ARM_ERRATA_798181
345         flush_tlb_kernel_page_skip_errata_798181(kaddr);
346 #else
347         flush_tlb_kernel_page(kaddr);
348 #endif
349 }
350
351 extern void v7_clean_kern_cache_all(void *);
352
353 extern size_t cache_maint_outer_threshold;
354
355 static inline void inner_flush_cache_all(void)
356 {
357 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU
358         v7_flush_kern_cache_all();
359 #else
360         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
361 #endif
362 }
363
364 static inline void inner_clean_cache_all(void)
365 {
366 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU
367         v7_clean_kern_cache_all(NULL);
368 #else
369         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
370 #endif
371 }
372
373 extern void __flush_dcache_page(struct address_space *, struct page *);
374
375 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */