dda58c8015c5c52e4cdd06116ec8f901eba74f77
[linux-2.6.git] / drivers / video / tegra / nvmap / nvmap.h
1 /*
2  * drivers/video/tegra/nvmap/nvmap.h
3  *
4  * GPU memory management driver for Tegra
5  *
6  * Copyright (c) 2010-2012, NVIDIA Corporation.
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License as published by
10  * the Free Software Foundation; either version 2 of the License, or
11  * (at your option) any later version.
12  *'
13  * This program is distributed in the hope that it will be useful, but WITHOUT
14  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
16  * more details.
17  *
18  * You should have received a copy of the GNU General Public License along
19  * with this program; if not, write to the Free Software Foundation, Inc.,
20  * 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
21  */
22
23 #ifndef __VIDEO_TEGRA_NVMAP_NVMAP_H
24 #define __VIDEO_TEGRA_NVMAP_NVMAP_H
25
26 #include <linux/list.h>
27 #include <linux/mm.h>
28 #include <linux/mutex.h>
29 #include <linux/rbtree.h>
30 #include <linux/sched.h>
31 #include <linux/wait.h>
32 #include <linux/atomic.h>
33 #include <linux/dma-buf.h>
34 #include <linux/nvmap.h>
35 #include "nvmap_heap.h"
36
37 struct nvmap_device;
38 struct page;
39 struct tegra_iovmm_area;
40
41 void _nvmap_handle_free(struct nvmap_handle *h);
42
43 #if defined(CONFIG_TEGRA_NVMAP)
44 #define nvmap_err(_client, _fmt, ...)                           \
45         dev_err(nvmap_client_to_device(_client),                \
46                 "%s: "_fmt, __func__, ##__VA_ARGS__)
47
48 #define nvmap_warn(_client, _fmt, ...)                          \
49         dev_warn(nvmap_client_to_device(_client),               \
50                  "%s: "_fmt, __func__, ##__VA_ARGS__)
51
52 #define nvmap_debug(_client, _fmt, ...)                         \
53         dev_dbg(nvmap_client_to_device(_client),                \
54                 "%s: "_fmt, __func__, ##__VA_ARGS__)
55
56 #define nvmap_ref_to_id(_ref)           ((unsigned long)(_ref)->handle)
57
58 /* handles allocated using shared system memory (either IOVMM- or high-order
59  * page allocations */
60 struct nvmap_pgalloc {
61         struct page **pages;
62         struct tegra_iovmm_area *area;
63         struct list_head mru_list;      /* MRU entry for IOVMM reclamation */
64         bool contig;                    /* contiguous system memory */
65         bool dirty;                     /* area is invalid and needs mapping */
66         u32 iovm_addr;  /* is non-zero, if client need specific iova mapping */
67 };
68
69 struct nvmap_handle {
70         struct rb_node node;    /* entry on global handle tree */
71         atomic_t ref;           /* reference count (i.e., # of duplications) */
72         atomic_t pin;           /* pin count */
73         unsigned int usecount;  /* how often is used */
74         unsigned long flags;
75         size_t size;            /* padded (as-allocated) size */
76         size_t orig_size;       /* original (as-requested) size */
77         size_t align;
78         struct nvmap_client *owner;
79         struct nvmap_device *dev;
80         union {
81                 struct nvmap_pgalloc pgalloc;
82                 struct nvmap_heap_block *carveout;
83         };
84         bool global;            /* handle may be duplicated by other clients */
85         bool secure;            /* zap IOVMM area on unpin */
86         bool heap_pgalloc;      /* handle is page allocated (sysmem / iovmm) */
87         bool alloc;             /* handle has memory allocated */
88         unsigned int userflags; /* flags passed from userspace */
89         struct mutex lock;
90 };
91
92 #ifdef CONFIG_NVMAP_PAGE_POOLS
93 #define NVMAP_UC_POOL NVMAP_HANDLE_UNCACHEABLE
94 #define NVMAP_WC_POOL NVMAP_HANDLE_WRITE_COMBINE
95 #define NVMAP_IWB_POOL NVMAP_HANDLE_INNER_CACHEABLE
96 #define NVMAP_WB_POOL NVMAP_HANDLE_CACHEABLE
97 #define NVMAP_NUM_POOLS (NVMAP_HANDLE_CACHEABLE + 1)
98
99 struct nvmap_page_pool {
100         struct mutex lock;
101         int npages;
102         struct page **page_array;
103         struct page **shrink_array;
104         int max_pages;
105         int flags;
106 };
107
108 int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
109 #endif
110
111 struct nvmap_share {
112         struct tegra_iovmm_client *iovmm;
113         wait_queue_head_t pin_wait;
114         struct mutex pin_lock;
115 #ifdef CONFIG_NVMAP_PAGE_POOLS
116         union {
117                 struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
118                 struct {
119                         struct nvmap_page_pool uc_pool;
120                         struct nvmap_page_pool wc_pool;
121                         struct nvmap_page_pool iwb_pool;
122                         struct nvmap_page_pool wb_pool;
123                 };
124         };
125 #endif
126 #ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
127         struct mutex mru_lock;
128         struct list_head *mru_lists;
129         int nr_mru;
130 #endif
131 };
132
133 struct nvmap_carveout_commit {
134         size_t commit;
135         struct list_head list;
136 };
137
138 struct nvmap_client {
139         const char                      *name;
140         struct nvmap_device             *dev;
141         struct nvmap_share              *share;
142         struct rb_root                  handle_refs;
143         atomic_t                        iovm_commit;
144         size_t                          iovm_limit;
145         struct mutex                    ref_lock;
146         bool                            super;
147         atomic_t                        count;
148         struct task_struct              *task;
149         struct list_head                list;
150         struct nvmap_carveout_commit    carveout_commit[0];
151 };
152
153 struct nvmap_vma_priv {
154         struct nvmap_handle *handle;
155         size_t          offs;
156         atomic_t        count;  /* number of processes cloning the VMA */
157 };
158
159 static inline void nvmap_ref_lock(struct nvmap_client *priv)
160 {
161         mutex_lock(&priv->ref_lock);
162 }
163
164 static inline void nvmap_ref_unlock(struct nvmap_client *priv)
165 {
166         mutex_unlock(&priv->ref_lock);
167 }
168
169 static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
170 {
171         if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
172                 pr_err("%s: %s getting a freed handle\n",
173                         __func__, current->group_leader->comm);
174                 if (atomic_read(&h->ref) <= 0)
175                         return NULL;
176         }
177         return h;
178 }
179
180 static inline void nvmap_handle_put(struct nvmap_handle *h)
181 {
182         int cnt = atomic_dec_return(&h->ref);
183
184         if (WARN_ON(cnt < 0)) {
185                 pr_err("%s: %s put to negative references\n",
186                         __func__, current->comm);
187         } else if (cnt == 0)
188                 _nvmap_handle_free(h);
189 }
190
191 static inline pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot)
192 {
193         if (h->flags == NVMAP_HANDLE_UNCACHEABLE)
194                 return pgprot_noncached(prot);
195         else if (h->flags == NVMAP_HANDLE_WRITE_COMBINE)
196                 return pgprot_writecombine(prot);
197 #ifndef CONFIG_ARM_LPAE /* !!!FIXME!!! BUG 892578 */
198         else if (h->flags == NVMAP_HANDLE_INNER_CACHEABLE)
199                 return pgprot_inner_writeback(prot);
200 #endif
201         return prot;
202 }
203
204 #else /* CONFIG_TEGRA_NVMAP */
205 struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
206 void nvmap_handle_put(struct nvmap_handle *h);
207 pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
208
209 #endif /* !CONFIG_TEGRA_NVMAP */
210
211 struct device *nvmap_client_to_device(struct nvmap_client *client);
212
213 pte_t **nvmap_alloc_pte(struct nvmap_device *dev, void **vaddr);
214
215 pte_t **nvmap_alloc_pte_irq(struct nvmap_device *dev, void **vaddr);
216
217 void nvmap_free_pte(struct nvmap_device *dev, pte_t **pte);
218
219 pte_t **nvmap_vaddr_to_pte(struct nvmap_device *dev, unsigned long vaddr);
220
221 void nvmap_usecount_inc(struct nvmap_handle *h);
222 void nvmap_usecount_dec(struct nvmap_handle *h);
223
224 struct nvmap_heap_block *nvmap_carveout_alloc(struct nvmap_client *dev,
225                                               struct nvmap_handle *handle,
226                                               unsigned long type);
227
228 unsigned long nvmap_carveout_usage(struct nvmap_client *c,
229                                    struct nvmap_heap_block *b);
230
231 struct nvmap_carveout_node;
232 void nvmap_carveout_commit_add(struct nvmap_client *client,
233                                struct nvmap_carveout_node *node, size_t len);
234
235 void nvmap_carveout_commit_subtract(struct nvmap_client *client,
236                                     struct nvmap_carveout_node *node,
237                                     size_t len);
238
239 struct nvmap_share *nvmap_get_share_from_dev(struct nvmap_device *dev);
240
241 struct nvmap_handle *nvmap_validate_get(struct nvmap_client *client,
242                                         unsigned long handle);
243
244 struct nvmap_handle_ref *_nvmap_validate_id_locked(struct nvmap_client *priv,
245                                                    unsigned long id);
246
247 struct nvmap_handle *nvmap_get_handle_id(struct nvmap_client *client,
248                                          unsigned long id);
249
250 struct nvmap_handle_ref *nvmap_create_handle(struct nvmap_client *client,
251                                              size_t size);
252
253 int nvmap_alloc_handle_id(struct nvmap_client *client,
254                           unsigned long id, unsigned int heap_mask,
255                           size_t align, unsigned int flags);
256
257 void nvmap_free_handle_id(struct nvmap_client *c, unsigned long id);
258
259 int nvmap_pin_ids(struct nvmap_client *client,
260                   unsigned int nr, const unsigned long *ids);
261
262 void nvmap_unpin_ids(struct nvmap_client *priv,
263                      unsigned int nr, const unsigned long *ids);
264
265 int nvmap_handle_remove(struct nvmap_device *dev, struct nvmap_handle *h);
266
267 void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
268
269 int is_nvmap_vma(struct vm_area_struct *vma);
270
271 struct nvmap_handle_ref *nvmap_alloc_iovm(struct nvmap_client *client,
272         size_t size, size_t align, unsigned int flags, unsigned int iova_start);
273
274 void nvmap_free_iovm(struct nvmap_client *client, struct nvmap_handle_ref *r);
275
276 #endif /* __VIDEO_TEGRA_NVMAP_NVMAP_H */