video: tegra: nvmap: use dmabuf fd to share handles
[linux-3.10.git] / drivers / video / tegra / nvmap / nvmap_priv.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2010-2013, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/syscalls.h>
35 #include <linux/nvmap.h>
36 #include "nvmap_heap.h"
37 #include <linux/workqueue.h>
38 #include <asm/tlbflush.h>
39 #include <asm/cacheflush.h>
40
41 struct nvmap_device;
42 struct page;
43 struct tegra_iovmm_area;
44
45 extern const struct file_operations nvmap_fd_fops;
46 void _nvmap_handle_free(struct nvmap_handle *h);
47
48 #if defined(CONFIG_TEGRA_NVMAP)
49 #define nvmap_err(_client, _fmt, ...)                           \
50         dev_err(nvmap_client_to_device(_client),                \
51                 "%s: "_fmt, __func__, ##__VA_ARGS__)
52
53 #define nvmap_warn(_client, _fmt, ...)                          \
54         dev_warn(nvmap_client_to_device(_client),               \
55                  "%s: "_fmt, __func__, ##__VA_ARGS__)
56
57 #define nvmap_debug(_client, _fmt, ...)                         \
58         dev_dbg(nvmap_client_to_device(_client),                \
59                 "%s: "_fmt, __func__, ##__VA_ARGS__)
60
61 #define nvmap_ref_to_id(_ref)           ((unsigned long)(_ref)->handle)
62
63 /*
64  *
65  */
66 struct nvmap_deferred_ops {
67         struct list_head ops_list;
68         spinlock_t deferred_ops_lock;
69         bool enable_deferred_cache_maintenance;
70         u64 deferred_maint_inner_requested;
71         u64 deferred_maint_inner_flushed;
72         u64 deferred_maint_outer_requested;
73         u64 deferred_maint_outer_flushed;
74 };
75
76 /* handles allocated using shared system memory (either IOVMM- or high-order
77  * page allocations */
78 struct nvmap_pgalloc {
79         struct page **pages;
80         struct tegra_iovmm_area *area;
81         struct list_head mru_list;      /* MRU entry for IOVMM reclamation */
82         bool contig;                    /* contiguous system memory */
83         bool dirty;                     /* area is invalid and needs mapping */
84         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
85 };
86
87 struct nvmap_handle {
88         struct rb_node node;    /* entry on global handle tree */
89         atomic_t ref;           /* reference count (i.e., # of duplications) */
90         atomic_t pin;           /* pin count */
91         unsigned long flags;
92         size_t size;            /* padded (as-allocated) size */
93         size_t orig_size;       /* original (as-requested) size */
94         size_t align;
95         u8 kind;                /* memory kind (0=pitch, !0 -> blocklinear) */
96         void *map_resources;    /* mapping resources associated with the
97                                    buffer */
98         struct nvmap_client *owner;
99         struct nvmap_handle_ref *owner_ref; /* use this ref to avoid spending
100                         time on validation in some cases.
101                         if handle was duplicated by other client and
102                         original client destroy ref, this field
103                         has to be set to zero. In this case ref should be
104                         obtained through validation */
105         struct nvmap_device *dev;
106         union {
107                 struct nvmap_pgalloc pgalloc;
108                 struct nvmap_heap_block *carveout;
109         };
110         bool global;            /* handle may be duplicated by other clients */
111         bool secure;            /* zap IOVMM area on unpin */
112         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
113         bool alloc;             /* handle has memory allocated */
114         unsigned int userflags; /* flags passed from userspace */
115         struct mutex lock;
116         void *nvhost_priv;      /* nvhost private data */
117         void (*nvhost_priv_delete)(void *priv);
118 };
119
120 /* handle_ref objects are client-local references to an nvmap_handle;
121  * they are distinct objects so that handles can be unpinned and
122  * unreferenced the correct number of times when a client abnormally
123  * terminates */
124 struct nvmap_handle_ref {
125         struct nvmap_handle *handle;
126         struct rb_node  node;
127         atomic_t        dupes;  /* number of times to free on file close */
128         atomic_t        pin;    /* number of times to unpin on free */
129 };
130
131 #ifdef CONFIG_NVMAP_PAGE_POOLS
132 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
133 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
134 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
135 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
136 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
137
138 struct nvmap_page_pool {
139         struct mutex lock;
140         int npages;
141         struct page **page_array;
142         struct page **shrink_array;
143         int max_pages;
144         int flags;
145 };
146
147 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
148 #endif
149
150 struct nvmap_share {
151         struct tegra_iovmm_client *iovmm;
152         wait_queue_head_t pin_wait;
153         struct mutex pin_lock;
154 #ifdef CONFIG_NVMAP_PAGE_POOLS
155         union {
156                 struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
157                 struct {
158                         struct nvmap_page_pool uc_pool;
159                         struct nvmap_page_pool wc_pool;
160                         struct nvmap_page_pool iwb_pool;
161                         struct nvmap_page_pool wb_pool;
162                 };
163         };
164 #endif
165 #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
166         struct mutex mru_lock;
167         struct list_head *mru_lists;
168         int nr_mru;
169 #endif
170 };
171
172 struct nvmap_carveout_commit {
173         size_t commit;
174         struct list_head list;
175 };
176
177 struct nvmap_client {
178         const char                      *name;
179         struct nvmap_device             *dev;
180         struct nvmap_share              *share;
181         struct rb_root                  handle_refs;
182         atomic_t                        iovm_commit;
183         size_t                          iovm_limit;
184         struct mutex                    ref_lock;
185         bool                            super;
186         atomic_t                        count;
187         struct task_struct              *task;
188         struct list_head                list;
189         struct nvmap_carveout_commit    carveout_commit[0];
190 };
191
192 struct nvmap_vma_priv {
193         struct nvmap_handle *handle;
194         size_t          offs;
195         atomic_t        count;  /* number of processes cloning the VMA */
196 };
197
198 static inline void nvmap_ref_lock(struct nvmap_client *priv)
199 {
200         mutex_lock(&priv->ref_lock);
201 }
202
203 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
204 {
205         mutex_unlock(&priv->ref_lock);
206 }
207
208 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
209 {
210         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
211                 pr_err("%s: %s getting a freed handle\n",
212                         __func__, current->group_leader->comm);
213                 if (atomic_read(&h->ref) <= 0)
214                         return NULL;
215         }
216         return h;
217 }
218
219 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
220 {
221         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
222                 return pgprot_noncached(prot);
223         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
224                 return pgprot_writecombine(prot);
225 #ifndef CONFIG_ARM_LPAE /* !!!FIXME!!! BUG 892578 */
226         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
227                 return pgprot_inner_writeback(prot);
228 #endif
229         return prot;
230 }
231
232 #else /* CONFIG_TEGRA_NVMAP */
233 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
234 void nvmap_handle_put(struct nvmap_handle *h);
235 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
236
237 #endif /* !CONFIG_TEGRA_NVMAP */
238
239 struct device *nvmap_client_to_device(struct nvmap_client *client);
240
241 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
242
243 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
244
245 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
246
247 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
248
249 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
250                                               struct nvmap_handle *handle,
251                                               unsigned long type);
252
253 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
254                                    struct nvmap_heap_block *b);
255
256 struct nvmap_carveout_node;
257 void nvmap_carveout_commit_add(struct nvmap_client *client,
258                                struct nvmap_carveout_node *node, size_t len);
259
260 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
261                                     struct nvmap_carveout_node *node,
262                                     size_t len);
263
264 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
265
266 void nvmap_cache_maint_ops_flush(struct nvmap_device *dev,
267                 struct nvmap_handle *h);
268
269 struct nvmap_deferred_ops *nvmap_get_deferred_ops_from_dev(
270                 struct nvmap_device *dev);
271
272 int nvmap_find_cache_maint_op(struct nvmap_device *dev,
273                 struct nvmap_handle *h);
274
275 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
276                                         unsigned long handle, bool skip_val);
277
278 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
279                                          unsigned long id);
280
281 void nvmap_handle_put(struct nvmap_handle *h);
282
283 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
284                                                    unsigned long id);
285
286 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
287                                              size_t size);
288
289 struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
290                                         unsigned long id, bool skip_val);
291
292 struct nvmap_handle_ref *nvmap_create_handle_from_fd(
293                         struct nvmap_client *client, int fd);
294
295 int nvmap_alloc_handle_id(struct nvmap_client *client,
296                           unsigned long id, unsigned int heap_mask,
297                           size_t align, u8 kind,
298                           unsigned int flags);
299
300 void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
301
302 void nvmap_free_handle_user_id(struct nvmap_client *c, unsigned long user_id);
303
304 int nvmap_pin_ids(struct nvmap_client *client,
305                   unsigned int nr, const unsigned long *ids);
306
307 void nvmap_unpin_ids(struct nvmap_client *priv,
308                      unsigned int nr, const unsigned long *ids);
309
310 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
311
312 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
313
314 int is_nvmap_vma(struct vm_area_struct *vma);
315
316 int _nvmap_pin(struct nvmap_client *c, struct nvmap_handle_ref *r, 
317         phys_addr_t *phys);
318
319 void nvmap_unpin_handles(struct nvmap_client *client,
320                          struct nvmap_handle **h, int nr);
321
322 int nvmap_get_dmabuf_fd(struct nvmap_client *client, ulong id);
323 ulong nvmap_get_id_from_dmabuf_fd(struct nvmap_client *client, int fd);
324
325 #ifdef CONFIG_COMPAT
326 ulong unmarshal_user_handle(__u32 handle);
327 __u32 marshal_kernel_handle(ulong handle);
328 ulong unmarshal_user_id(u32 id);
329 #else
330 ulong unmarshal_user_handle(struct nvmap_handle *handle);
331 struct nvmap_handle *marshal_kernel_handle(ulong handle);
332 ulong unmarshal_user_id(ulong id);
333 #endif
334
335 static inline void nvmap_flush_tlb_kernel_page(unsigned long kaddr)
336 {
337 #ifdef CONFIG_ARM_ERRATA_798181
338         flush_tlb_kernel_page_skip_errata_798181(kaddr);
339 #else
340         flush_tlb_kernel_page(kaddr);
341 #endif
342 }
343
344 extern void v7_clean_kern_cache_all(void *);
345
346 extern size_t cache_maint_outer_threshold;
347
348 static inline void inner_flush_cache_all(void)
349 {
350 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU
351         v7_flush_kern_cache_all();
352 #else
353         on_each_cpu(v7_flush_kern_cache_all, NULL, 1);
354 #endif
355 }
356
357 static inline void inner_clean_cache_all(void)
358 {
359 #ifdef CONFIG_NVMAP_CACHE_MAINT_BY_SET_WAYS_ON_ONE_CPU
360         v7_clean_kern_cache_all(NULL);
361 #else
362         on_each_cpu(v7_clean_kern_cache_all, NULL, 1);
363 #endif
364 }
365
366 extern void __flush_dcache_page(struct address_space *, struct page *);
367
368 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */