*
* GPU memory management driver for Tegra
*
- * Copyright (c) 2010-2011, NVIDIA Corporation.
+ * Copyright (c) 2010-2012, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/wait.h>
-
#include <linux/atomic.h>
-
#include <mach/nvmap.h>
-
#include "nvmap_heap.h"
+struct nvmap_device;
+struct page;
+struct tegra_iovmm_area;
+
+#if defined(CONFIG_TEGRA_NVMAP)
#define nvmap_err(_client, _fmt, ...) \
dev_err(nvmap_client_to_device(_client), \
"%s: "_fmt, __func__, ##__VA_ARGS__)
#define nvmap_ref_to_id(_ref) ((unsigned long)(_ref)->handle)
-struct nvmap_device;
-struct page;
-struct tegra_iovmm_area;
-
/* handles allocated using shared system memory (either IOVMM- or high-order
* page allocations */
struct nvmap_pgalloc {
bool secure; /* zap IOVMM area on unpin */
bool heap_pgalloc; /* handle is page allocated (sysmem / iovmm) */
bool alloc; /* handle has memory allocated */
+ unsigned int userflags; /* flags passed from userspace */
struct mutex lock;
};
+#define NVMAP_DEFAULT_PAGE_POOL_SIZE 8192
+#define NVMAP_NUM_POOLS 2
+#define NVMAP_UC_POOL 0
+#define NVMAP_WC_POOL 1
+
+struct nvmap_page_pool {
+ spinlock_t lock;
+ int npages;
+ struct page **page_array;
+ int max_pages;
+};
+
+int nvmap_page_pool_init(struct nvmap_page_pool *pool, int flags);
+struct page *nvmap_page_pool_alloc(struct nvmap_page_pool *pool);
+bool nvmap_page_pool_release(struct nvmap_page_pool *pool, struct page *page);
+int nvmap_page_pool_get_free_count(struct nvmap_page_pool *pool);
+
struct nvmap_share {
struct tegra_iovmm_client *iovmm;
wait_queue_head_t pin_wait;
struct mutex pin_lock;
+ union {
+ struct nvmap_page_pool pools[NVMAP_NUM_POOLS];
+ struct {
+ struct nvmap_page_pool uc_pool;
+ struct nvmap_page_pool wc_pool;
+ };
+ };
#ifdef CONFIG_NVMAP_RECLAIM_UNPINNED_VM
struct mutex mru_lock;
struct list_head *mru_lists;
struct nvmap_carveout_commit carveout_commit[0];
};
-/* handle_ref objects are client-local references to an nvmap_handle;
- * they are distinct objects so that handles can be unpinned and
- * unreferenced the correct number of times when a client abnormally
- * terminates */
-struct nvmap_handle_ref {
- struct nvmap_handle *handle;
- struct rb_node node;
- atomic_t dupes; /* number of times to free on file close */
- atomic_t pin; /* number of times to unpin on free */
-};
-
struct nvmap_vma_priv {
struct nvmap_handle *handle;
size_t offs;
{
mutex_unlock(&priv->ref_lock);
}
+#endif /* CONFIG_TEGRA_NVMAP */
struct device *nvmap_client_to_device(struct nvmap_client *client);
struct nvmap_handle_ref *nvmap_duplicate_handle_id(struct nvmap_client *client,
unsigned long id);
-
int nvmap_alloc_handle_id(struct nvmap_client *client,
unsigned long id, unsigned int heap_mask,
size_t align, unsigned int flags);
void nvmap_handle_add(struct nvmap_device *dev, struct nvmap_handle *h);
+#if defined(CONFIG_TEGRA_NVMAP)
static inline struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h)
{
if (unlikely(atomic_inc_return(&h->ref) <= 1)) {
return pgprot_inner_writeback(prot);
return prot;
}
+#else /* CONFIG_TEGRA_NVMAP */
+struct nvmap_handle *nvmap_handle_get(struct nvmap_handle *h);
+void nvmap_handle_put(struct nvmap_handle *h);
+pgprot_t nvmap_pgprot(struct nvmap_handle *h, pgprot_t prot);
+#endif /* !CONFIG_TEGRA_NVMAP */
int is_nvmap_vma(struct vm_area_struct *vma);